100M_low_100_8397 / trainer_state.json
craa's picture
End of training
1d5c6aa verified
{
"best_metric": 3.3066227436065674,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M_8397/checkpoint-90000",
"epoch": 10.0,
"eval_steps": 1000,
"global_step": 92910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005381552039608223,
"grad_norm": 1.1552565097808838,
"learning_rate": 0.0003,
"loss": 8.6283,
"step": 50
},
{
"epoch": 0.010763104079216447,
"grad_norm": 1.5108792781829834,
"learning_rate": 0.0006,
"loss": 6.9523,
"step": 100
},
{
"epoch": 0.01614465611882467,
"grad_norm": 2.4516749382019043,
"learning_rate": 0.0005996767589699385,
"loss": 6.514,
"step": 150
},
{
"epoch": 0.021526208158432893,
"grad_norm": 1.227023959159851,
"learning_rate": 0.0005993535179398771,
"loss": 6.2311,
"step": 200
},
{
"epoch": 0.026907760198041114,
"grad_norm": 1.2729941606521606,
"learning_rate": 0.0005990302769098158,
"loss": 6.0782,
"step": 250
},
{
"epoch": 0.03228931223764934,
"grad_norm": 2.2429041862487793,
"learning_rate": 0.0005987070358797543,
"loss": 5.9519,
"step": 300
},
{
"epoch": 0.03767086427725756,
"grad_norm": 1.0578374862670898,
"learning_rate": 0.0005983837948496929,
"loss": 5.8611,
"step": 350
},
{
"epoch": 0.04305241631686579,
"grad_norm": 1.2515922784805298,
"learning_rate": 0.0005980605538196314,
"loss": 5.7841,
"step": 400
},
{
"epoch": 0.048433968356474004,
"grad_norm": 1.5836460590362549,
"learning_rate": 0.0005977373127895701,
"loss": 5.7439,
"step": 450
},
{
"epoch": 0.05381552039608223,
"grad_norm": 1.2947415113449097,
"learning_rate": 0.0005974140717595086,
"loss": 5.6449,
"step": 500
},
{
"epoch": 0.05919707243569045,
"grad_norm": 1.0450425148010254,
"learning_rate": 0.0005970908307294472,
"loss": 5.5658,
"step": 550
},
{
"epoch": 0.06457862447529868,
"grad_norm": 1.5436805486679077,
"learning_rate": 0.0005967675896993858,
"loss": 5.5045,
"step": 600
},
{
"epoch": 0.0699601765149069,
"grad_norm": 1.5273220539093018,
"learning_rate": 0.0005964443486693243,
"loss": 5.4187,
"step": 650
},
{
"epoch": 0.07534172855451512,
"grad_norm": 0.809162974357605,
"learning_rate": 0.000596121107639263,
"loss": 5.3605,
"step": 700
},
{
"epoch": 0.08072328059412334,
"grad_norm": 1.2222342491149902,
"learning_rate": 0.0005957978666092015,
"loss": 5.3092,
"step": 750
},
{
"epoch": 0.08610483263373157,
"grad_norm": 1.3174521923065186,
"learning_rate": 0.0005954746255791401,
"loss": 5.273,
"step": 800
},
{
"epoch": 0.09148638467333979,
"grad_norm": 1.2672854661941528,
"learning_rate": 0.0005951513845490787,
"loss": 5.2322,
"step": 850
},
{
"epoch": 0.09686793671294801,
"grad_norm": 1.2101420164108276,
"learning_rate": 0.0005948281435190174,
"loss": 5.1579,
"step": 900
},
{
"epoch": 0.10224948875255624,
"grad_norm": 1.4577302932739258,
"learning_rate": 0.0005945049024889559,
"loss": 5.1254,
"step": 950
},
{
"epoch": 0.10763104079216446,
"grad_norm": 1.1115610599517822,
"learning_rate": 0.0005941816614588944,
"loss": 5.0941,
"step": 1000
},
{
"epoch": 0.10763104079216446,
"eval_accuracy": 0.22676085857946562,
"eval_loss": 5.024355888366699,
"eval_runtime": 186.3243,
"eval_samples_per_second": 96.665,
"eval_steps_per_second": 6.043,
"step": 1000
},
{
"epoch": 0.11301259283177269,
"grad_norm": 1.2621811628341675,
"learning_rate": 0.000593858420428833,
"loss": 5.0471,
"step": 1050
},
{
"epoch": 0.1183941448713809,
"grad_norm": 1.290496587753296,
"learning_rate": 0.0005935351793987716,
"loss": 5.0281,
"step": 1100
},
{
"epoch": 0.12377569691098914,
"grad_norm": 1.2553092241287231,
"learning_rate": 0.0005932119383687103,
"loss": 4.9935,
"step": 1150
},
{
"epoch": 0.12915724895059735,
"grad_norm": 1.1223114728927612,
"learning_rate": 0.0005928886973386488,
"loss": 4.9514,
"step": 1200
},
{
"epoch": 0.13453880099020557,
"grad_norm": 0.9260576963424683,
"learning_rate": 0.0005925654563085874,
"loss": 4.9153,
"step": 1250
},
{
"epoch": 0.1399203530298138,
"grad_norm": 1.0368200540542603,
"learning_rate": 0.000592242215278526,
"loss": 4.8948,
"step": 1300
},
{
"epoch": 0.14530190506942203,
"grad_norm": 1.1109979152679443,
"learning_rate": 0.0005919189742484645,
"loss": 4.8731,
"step": 1350
},
{
"epoch": 0.15068345710903025,
"grad_norm": 1.4146970510482788,
"learning_rate": 0.0005915957332184032,
"loss": 4.8403,
"step": 1400
},
{
"epoch": 0.15606500914863847,
"grad_norm": 1.0073134899139404,
"learning_rate": 0.0005912724921883417,
"loss": 4.8369,
"step": 1450
},
{
"epoch": 0.16144656118824668,
"grad_norm": 0.950721025466919,
"learning_rate": 0.0005909492511582803,
"loss": 4.7835,
"step": 1500
},
{
"epoch": 0.1668281132278549,
"grad_norm": 0.9828820824623108,
"learning_rate": 0.0005906260101282189,
"loss": 4.7715,
"step": 1550
},
{
"epoch": 0.17220966526746315,
"grad_norm": 0.9663535356521606,
"learning_rate": 0.0005903027690981575,
"loss": 4.751,
"step": 1600
},
{
"epoch": 0.17759121730707136,
"grad_norm": 0.8463447093963623,
"learning_rate": 0.000589979528068096,
"loss": 4.7339,
"step": 1650
},
{
"epoch": 0.18297276934667958,
"grad_norm": 0.9118891358375549,
"learning_rate": 0.0005896562870380347,
"loss": 4.6909,
"step": 1700
},
{
"epoch": 0.1883543213862878,
"grad_norm": 0.8051866888999939,
"learning_rate": 0.0005893330460079732,
"loss": 4.7067,
"step": 1750
},
{
"epoch": 0.19373587342589602,
"grad_norm": 0.9270610809326172,
"learning_rate": 0.0005890098049779118,
"loss": 4.6868,
"step": 1800
},
{
"epoch": 0.19911742546550426,
"grad_norm": 0.7820844650268555,
"learning_rate": 0.0005886865639478504,
"loss": 4.6451,
"step": 1850
},
{
"epoch": 0.20449897750511248,
"grad_norm": 0.9605551362037659,
"learning_rate": 0.0005883633229177889,
"loss": 4.6169,
"step": 1900
},
{
"epoch": 0.2098805295447207,
"grad_norm": 0.8132458925247192,
"learning_rate": 0.0005880400818877276,
"loss": 4.611,
"step": 1950
},
{
"epoch": 0.2152620815843289,
"grad_norm": 0.952850341796875,
"learning_rate": 0.0005877168408576662,
"loss": 4.5795,
"step": 2000
},
{
"epoch": 0.2152620815843289,
"eval_accuracy": 0.27085241776035757,
"eval_loss": 4.507682800292969,
"eval_runtime": 184.2381,
"eval_samples_per_second": 97.759,
"eval_steps_per_second": 6.112,
"step": 2000
},
{
"epoch": 0.22064363362393713,
"grad_norm": 0.9736102223396301,
"learning_rate": 0.0005873935998276048,
"loss": 4.5552,
"step": 2050
},
{
"epoch": 0.22602518566354537,
"grad_norm": 0.8927708268165588,
"learning_rate": 0.0005870703587975433,
"loss": 4.5378,
"step": 2100
},
{
"epoch": 0.2314067377031536,
"grad_norm": 1.0510213375091553,
"learning_rate": 0.0005867471177674818,
"loss": 4.539,
"step": 2150
},
{
"epoch": 0.2367882897427618,
"grad_norm": 0.9041006565093994,
"learning_rate": 0.0005864238767374205,
"loss": 4.5068,
"step": 2200
},
{
"epoch": 0.24216984178237003,
"grad_norm": 0.9170295596122742,
"learning_rate": 0.0005861006357073591,
"loss": 4.4987,
"step": 2250
},
{
"epoch": 0.24755139382197827,
"grad_norm": 1.4727824926376343,
"learning_rate": 0.0005857773946772977,
"loss": 4.4592,
"step": 2300
},
{
"epoch": 0.2529329458615865,
"grad_norm": 1.0264145135879517,
"learning_rate": 0.0005854541536472362,
"loss": 4.4577,
"step": 2350
},
{
"epoch": 0.2583144979011947,
"grad_norm": 0.8357675671577454,
"learning_rate": 0.0005851309126171749,
"loss": 4.4338,
"step": 2400
},
{
"epoch": 0.2636960499408029,
"grad_norm": 0.8532307744026184,
"learning_rate": 0.0005848076715871134,
"loss": 4.4297,
"step": 2450
},
{
"epoch": 0.26907760198041114,
"grad_norm": 0.7271891236305237,
"learning_rate": 0.000584484430557052,
"loss": 4.4147,
"step": 2500
},
{
"epoch": 0.27445915402001936,
"grad_norm": 0.7774198651313782,
"learning_rate": 0.0005841611895269906,
"loss": 4.4133,
"step": 2550
},
{
"epoch": 0.2798407060596276,
"grad_norm": 0.8901899456977844,
"learning_rate": 0.0005838379484969291,
"loss": 4.4158,
"step": 2600
},
{
"epoch": 0.2852222580992358,
"grad_norm": 1.0149247646331787,
"learning_rate": 0.0005835147074668678,
"loss": 4.3799,
"step": 2650
},
{
"epoch": 0.29060381013884407,
"grad_norm": 0.7539450526237488,
"learning_rate": 0.0005831914664368063,
"loss": 4.3692,
"step": 2700
},
{
"epoch": 0.2959853621784523,
"grad_norm": 0.8020486831665039,
"learning_rate": 0.0005828682254067449,
"loss": 4.3373,
"step": 2750
},
{
"epoch": 0.3013669142180605,
"grad_norm": 0.9698400497436523,
"learning_rate": 0.0005825449843766835,
"loss": 4.3398,
"step": 2800
},
{
"epoch": 0.3067484662576687,
"grad_norm": 0.9938604831695557,
"learning_rate": 0.0005822217433466221,
"loss": 4.3472,
"step": 2850
},
{
"epoch": 0.31213001829727693,
"grad_norm": 0.8308948278427124,
"learning_rate": 0.0005818985023165607,
"loss": 4.3363,
"step": 2900
},
{
"epoch": 0.31751157033688515,
"grad_norm": 0.8094744086265564,
"learning_rate": 0.0005815752612864992,
"loss": 4.3147,
"step": 2950
},
{
"epoch": 0.32289312237649337,
"grad_norm": 0.8576586246490479,
"learning_rate": 0.0005812520202564378,
"loss": 4.3173,
"step": 3000
},
{
"epoch": 0.32289312237649337,
"eval_accuracy": 0.2982191465728543,
"eval_loss": 4.243260860443115,
"eval_runtime": 184.3833,
"eval_samples_per_second": 97.682,
"eval_steps_per_second": 6.107,
"step": 3000
},
{
"epoch": 0.3282746744161016,
"grad_norm": 0.8846203088760376,
"learning_rate": 0.0005809287792263764,
"loss": 4.301,
"step": 3050
},
{
"epoch": 0.3336562264557098,
"grad_norm": 0.8314961791038513,
"learning_rate": 0.0005806055381963151,
"loss": 4.3089,
"step": 3100
},
{
"epoch": 0.3390377784953181,
"grad_norm": 0.8715083003044128,
"learning_rate": 0.0005802822971662536,
"loss": 4.2974,
"step": 3150
},
{
"epoch": 0.3444193305349263,
"grad_norm": 0.9279531836509705,
"learning_rate": 0.0005799590561361922,
"loss": 4.3041,
"step": 3200
},
{
"epoch": 0.3498008825745345,
"grad_norm": 0.8645572662353516,
"learning_rate": 0.0005796358151061307,
"loss": 4.287,
"step": 3250
},
{
"epoch": 0.35518243461414273,
"grad_norm": 0.9392287731170654,
"learning_rate": 0.0005793125740760694,
"loss": 4.2519,
"step": 3300
},
{
"epoch": 0.36056398665375095,
"grad_norm": 0.7573714852333069,
"learning_rate": 0.0005789893330460079,
"loss": 4.2413,
"step": 3350
},
{
"epoch": 0.36594553869335916,
"grad_norm": 0.6203613877296448,
"learning_rate": 0.0005786660920159465,
"loss": 4.2458,
"step": 3400
},
{
"epoch": 0.3713270907329674,
"grad_norm": 0.8517956137657166,
"learning_rate": 0.0005783428509858851,
"loss": 4.2433,
"step": 3450
},
{
"epoch": 0.3767086427725756,
"grad_norm": 0.832983136177063,
"learning_rate": 0.0005780196099558237,
"loss": 4.2253,
"step": 3500
},
{
"epoch": 0.3820901948121838,
"grad_norm": 0.5542912483215332,
"learning_rate": 0.0005776963689257623,
"loss": 4.2293,
"step": 3550
},
{
"epoch": 0.38747174685179203,
"grad_norm": 0.6788548827171326,
"learning_rate": 0.0005773731278957008,
"loss": 4.236,
"step": 3600
},
{
"epoch": 0.3928532988914003,
"grad_norm": 0.6434756517410278,
"learning_rate": 0.0005770498868656394,
"loss": 4.2193,
"step": 3650
},
{
"epoch": 0.3982348509310085,
"grad_norm": 0.7149718403816223,
"learning_rate": 0.000576726645835578,
"loss": 4.223,
"step": 3700
},
{
"epoch": 0.40361640297061674,
"grad_norm": 0.6300427913665771,
"learning_rate": 0.0005764034048055167,
"loss": 4.2004,
"step": 3750
},
{
"epoch": 0.40899795501022496,
"grad_norm": 0.8285446763038635,
"learning_rate": 0.0005760801637754552,
"loss": 4.1971,
"step": 3800
},
{
"epoch": 0.4143795070498332,
"grad_norm": 0.6956706643104553,
"learning_rate": 0.0005757569227453937,
"loss": 4.1895,
"step": 3850
},
{
"epoch": 0.4197610590894414,
"grad_norm": 0.770892858505249,
"learning_rate": 0.0005754336817153324,
"loss": 4.182,
"step": 3900
},
{
"epoch": 0.4251426111290496,
"grad_norm": 0.5891619920730591,
"learning_rate": 0.0005751104406852709,
"loss": 4.1666,
"step": 3950
},
{
"epoch": 0.4305241631686578,
"grad_norm": 0.6791778802871704,
"learning_rate": 0.0005747871996552096,
"loss": 4.1689,
"step": 4000
},
{
"epoch": 0.4305241631686578,
"eval_accuracy": 0.31222939319209253,
"eval_loss": 4.0884881019592285,
"eval_runtime": 184.3094,
"eval_samples_per_second": 97.722,
"eval_steps_per_second": 6.109,
"step": 4000
},
{
"epoch": 0.43590571520826604,
"grad_norm": 0.6507901549339294,
"learning_rate": 0.0005744639586251481,
"loss": 4.1549,
"step": 4050
},
{
"epoch": 0.44128726724787426,
"grad_norm": 0.7162640690803528,
"learning_rate": 0.0005741407175950867,
"loss": 4.1497,
"step": 4100
},
{
"epoch": 0.44666881928748253,
"grad_norm": 0.7263709902763367,
"learning_rate": 0.0005738174765650253,
"loss": 4.1271,
"step": 4150
},
{
"epoch": 0.45205037132709075,
"grad_norm": 0.6067292094230652,
"learning_rate": 0.0005734942355349638,
"loss": 4.1378,
"step": 4200
},
{
"epoch": 0.45743192336669897,
"grad_norm": 0.7140578031539917,
"learning_rate": 0.0005731709945049025,
"loss": 4.1394,
"step": 4250
},
{
"epoch": 0.4628134754063072,
"grad_norm": 0.8744107484817505,
"learning_rate": 0.000572847753474841,
"loss": 4.1203,
"step": 4300
},
{
"epoch": 0.4681950274459154,
"grad_norm": 0.6609825491905212,
"learning_rate": 0.0005725245124447796,
"loss": 4.1102,
"step": 4350
},
{
"epoch": 0.4735765794855236,
"grad_norm": 0.6945375204086304,
"learning_rate": 0.0005722012714147182,
"loss": 4.1125,
"step": 4400
},
{
"epoch": 0.47895813152513184,
"grad_norm": 0.7473881840705872,
"learning_rate": 0.0005718780303846568,
"loss": 4.1133,
"step": 4450
},
{
"epoch": 0.48433968356474005,
"grad_norm": 0.6308166980743408,
"learning_rate": 0.0005715547893545953,
"loss": 4.1187,
"step": 4500
},
{
"epoch": 0.48972123560434827,
"grad_norm": 0.8003607988357544,
"learning_rate": 0.000571231548324534,
"loss": 4.1168,
"step": 4550
},
{
"epoch": 0.49510278764395654,
"grad_norm": 0.6896440386772156,
"learning_rate": 0.0005709083072944725,
"loss": 4.1019,
"step": 4600
},
{
"epoch": 0.5004843396835648,
"grad_norm": 0.5829487442970276,
"learning_rate": 0.0005705850662644111,
"loss": 4.0929,
"step": 4650
},
{
"epoch": 0.505865891723173,
"grad_norm": 0.6313600540161133,
"learning_rate": 0.0005702618252343497,
"loss": 4.0804,
"step": 4700
},
{
"epoch": 0.5112474437627812,
"grad_norm": 0.6282418966293335,
"learning_rate": 0.0005699385842042882,
"loss": 4.1074,
"step": 4750
},
{
"epoch": 0.5166289958023894,
"grad_norm": 0.5928139090538025,
"learning_rate": 0.0005696153431742269,
"loss": 4.0956,
"step": 4800
},
{
"epoch": 0.5220105478419976,
"grad_norm": 0.6114786267280579,
"learning_rate": 0.0005692921021441655,
"loss": 4.0867,
"step": 4850
},
{
"epoch": 0.5273920998816058,
"grad_norm": 0.6260164380073547,
"learning_rate": 0.0005689688611141041,
"loss": 4.07,
"step": 4900
},
{
"epoch": 0.5327736519212141,
"grad_norm": 0.7232828140258789,
"learning_rate": 0.0005686456200840426,
"loss": 4.0782,
"step": 4950
},
{
"epoch": 0.5381552039608223,
"grad_norm": 0.6681451201438904,
"learning_rate": 0.0005683223790539811,
"loss": 4.0569,
"step": 5000
},
{
"epoch": 0.5381552039608223,
"eval_accuracy": 0.3208234020066667,
"eval_loss": 3.996339797973633,
"eval_runtime": 184.1238,
"eval_samples_per_second": 97.82,
"eval_steps_per_second": 6.115,
"step": 5000
},
{
"epoch": 0.5435367560004305,
"grad_norm": 0.6551739573478699,
"learning_rate": 0.0005679991380239198,
"loss": 4.0632,
"step": 5050
},
{
"epoch": 0.5489183080400387,
"grad_norm": 0.6114643812179565,
"learning_rate": 0.0005676758969938584,
"loss": 4.0459,
"step": 5100
},
{
"epoch": 0.5542998600796469,
"grad_norm": 0.6146873831748962,
"learning_rate": 0.000567352655963797,
"loss": 4.0601,
"step": 5150
},
{
"epoch": 0.5596814121192552,
"grad_norm": 0.6343026757240295,
"learning_rate": 0.0005670294149337355,
"loss": 4.044,
"step": 5200
},
{
"epoch": 0.5650629641588634,
"grad_norm": 0.6887774467468262,
"learning_rate": 0.0005667061739036742,
"loss": 4.0369,
"step": 5250
},
{
"epoch": 0.5704445161984716,
"grad_norm": 0.775775671005249,
"learning_rate": 0.0005663829328736127,
"loss": 4.041,
"step": 5300
},
{
"epoch": 0.5758260682380799,
"grad_norm": 0.646882176399231,
"learning_rate": 0.0005660596918435512,
"loss": 4.0477,
"step": 5350
},
{
"epoch": 0.5812076202776881,
"grad_norm": 0.645609438419342,
"learning_rate": 0.0005657364508134899,
"loss": 4.0269,
"step": 5400
},
{
"epoch": 0.5865891723172963,
"grad_norm": 0.637544572353363,
"learning_rate": 0.0005654132097834284,
"loss": 4.0299,
"step": 5450
},
{
"epoch": 0.5919707243569046,
"grad_norm": 0.5601037740707397,
"learning_rate": 0.0005650899687533671,
"loss": 4.0364,
"step": 5500
},
{
"epoch": 0.5973522763965128,
"grad_norm": 0.6552086472511292,
"learning_rate": 0.0005647667277233056,
"loss": 4.0319,
"step": 5550
},
{
"epoch": 0.602733828436121,
"grad_norm": 0.6226367950439453,
"learning_rate": 0.0005644434866932442,
"loss": 4.0239,
"step": 5600
},
{
"epoch": 0.6081153804757292,
"grad_norm": 0.6150733828544617,
"learning_rate": 0.0005641202456631828,
"loss": 4.0096,
"step": 5650
},
{
"epoch": 0.6134969325153374,
"grad_norm": 0.6194325685501099,
"learning_rate": 0.0005637970046331214,
"loss": 4.0067,
"step": 5700
},
{
"epoch": 0.6188784845549457,
"grad_norm": 0.6258173584938049,
"learning_rate": 0.00056347376360306,
"loss": 4.0021,
"step": 5750
},
{
"epoch": 0.6242600365945539,
"grad_norm": 0.5563567876815796,
"learning_rate": 0.0005631505225729985,
"loss": 3.9966,
"step": 5800
},
{
"epoch": 0.6296415886341621,
"grad_norm": 0.7610234618186951,
"learning_rate": 0.0005628272815429371,
"loss": 3.9795,
"step": 5850
},
{
"epoch": 0.6350231406737703,
"grad_norm": 0.6821996569633484,
"learning_rate": 0.0005625040405128757,
"loss": 3.9997,
"step": 5900
},
{
"epoch": 0.6404046927133785,
"grad_norm": 0.6400163173675537,
"learning_rate": 0.0005621807994828143,
"loss": 4.0132,
"step": 5950
},
{
"epoch": 0.6457862447529867,
"grad_norm": 0.7269625067710876,
"learning_rate": 0.0005618575584527529,
"loss": 4.0125,
"step": 6000
},
{
"epoch": 0.6457862447529867,
"eval_accuracy": 0.3269717429694248,
"eval_loss": 3.925267457962036,
"eval_runtime": 184.1043,
"eval_samples_per_second": 97.83,
"eval_steps_per_second": 6.116,
"step": 6000
},
{
"epoch": 0.651167796792595,
"grad_norm": 0.6363526582717896,
"learning_rate": 0.0005615343174226915,
"loss": 3.9904,
"step": 6050
},
{
"epoch": 0.6565493488322032,
"grad_norm": 0.6771544218063354,
"learning_rate": 0.00056121107639263,
"loss": 3.9856,
"step": 6100
},
{
"epoch": 0.6619309008718114,
"grad_norm": 0.6212124824523926,
"learning_rate": 0.0005608878353625687,
"loss": 3.9796,
"step": 6150
},
{
"epoch": 0.6673124529114196,
"grad_norm": 0.7166991233825684,
"learning_rate": 0.0005605645943325072,
"loss": 3.9659,
"step": 6200
},
{
"epoch": 0.6726940049510278,
"grad_norm": 0.6104118824005127,
"learning_rate": 0.0005602413533024458,
"loss": 3.9671,
"step": 6250
},
{
"epoch": 0.6780755569906362,
"grad_norm": 0.6991214156150818,
"learning_rate": 0.0005599181122723844,
"loss": 3.9859,
"step": 6300
},
{
"epoch": 0.6834571090302444,
"grad_norm": 0.6045164465904236,
"learning_rate": 0.000559594871242323,
"loss": 3.9565,
"step": 6350
},
{
"epoch": 0.6888386610698526,
"grad_norm": 0.6517415046691895,
"learning_rate": 0.0005592716302122616,
"loss": 3.9662,
"step": 6400
},
{
"epoch": 0.6942202131094608,
"grad_norm": 0.6841729283332825,
"learning_rate": 0.0005589483891822001,
"loss": 3.9579,
"step": 6450
},
{
"epoch": 0.699601765149069,
"grad_norm": 0.591302216053009,
"learning_rate": 0.0005586251481521387,
"loss": 3.9575,
"step": 6500
},
{
"epoch": 0.7049833171886772,
"grad_norm": 0.6354752779006958,
"learning_rate": 0.0005583083719426786,
"loss": 3.9505,
"step": 6550
},
{
"epoch": 0.7103648692282855,
"grad_norm": 0.6361892223358154,
"learning_rate": 0.0005579851309126171,
"loss": 3.9571,
"step": 6600
},
{
"epoch": 0.7157464212678937,
"grad_norm": 0.5964643955230713,
"learning_rate": 0.0005576618898825558,
"loss": 3.9424,
"step": 6650
},
{
"epoch": 0.7211279733075019,
"grad_norm": 0.718136727809906,
"learning_rate": 0.0005573386488524943,
"loss": 3.9508,
"step": 6700
},
{
"epoch": 0.7265095253471101,
"grad_norm": 0.6186783909797668,
"learning_rate": 0.0005570154078224328,
"loss": 3.9547,
"step": 6750
},
{
"epoch": 0.7318910773867183,
"grad_norm": 0.5839592814445496,
"learning_rate": 0.0005566921667923715,
"loss": 3.9387,
"step": 6800
},
{
"epoch": 0.7372726294263265,
"grad_norm": 0.5628191232681274,
"learning_rate": 0.00055636892576231,
"loss": 3.9408,
"step": 6850
},
{
"epoch": 0.7426541814659348,
"grad_norm": 0.7687479853630066,
"learning_rate": 0.0005560456847322487,
"loss": 3.9491,
"step": 6900
},
{
"epoch": 0.748035733505543,
"grad_norm": 0.5638557076454163,
"learning_rate": 0.0005557224437021872,
"loss": 3.9488,
"step": 6950
},
{
"epoch": 0.7534172855451512,
"grad_norm": 0.6913768649101257,
"learning_rate": 0.0005553992026721258,
"loss": 3.9257,
"step": 7000
},
{
"epoch": 0.7534172855451512,
"eval_accuracy": 0.3327180682472692,
"eval_loss": 3.864708662033081,
"eval_runtime": 184.286,
"eval_samples_per_second": 97.734,
"eval_steps_per_second": 6.11,
"step": 7000
},
{
"epoch": 0.7587988375847594,
"grad_norm": 0.5453241467475891,
"learning_rate": 0.0005550759616420644,
"loss": 3.9302,
"step": 7050
},
{
"epoch": 0.7641803896243676,
"grad_norm": 0.6204462051391602,
"learning_rate": 0.000554752720612003,
"loss": 3.9324,
"step": 7100
},
{
"epoch": 0.7695619416639758,
"grad_norm": 0.5680400133132935,
"learning_rate": 0.0005544294795819415,
"loss": 3.9254,
"step": 7150
},
{
"epoch": 0.7749434937035841,
"grad_norm": 0.6182464957237244,
"learning_rate": 0.0005541062385518801,
"loss": 3.9359,
"step": 7200
},
{
"epoch": 0.7803250457431924,
"grad_norm": 0.6028207540512085,
"learning_rate": 0.0005537829975218188,
"loss": 3.9335,
"step": 7250
},
{
"epoch": 0.7857065977828006,
"grad_norm": 0.5548726320266724,
"learning_rate": 0.0005534597564917573,
"loss": 3.913,
"step": 7300
},
{
"epoch": 0.7910881498224088,
"grad_norm": 0.5448580384254456,
"learning_rate": 0.0005531365154616959,
"loss": 3.9274,
"step": 7350
},
{
"epoch": 0.796469701862017,
"grad_norm": 0.686208963394165,
"learning_rate": 0.0005528132744316344,
"loss": 3.9146,
"step": 7400
},
{
"epoch": 0.8018512539016253,
"grad_norm": 0.6147581934928894,
"learning_rate": 0.0005524900334015731,
"loss": 3.9209,
"step": 7450
},
{
"epoch": 0.8072328059412335,
"grad_norm": 0.6350402235984802,
"learning_rate": 0.0005521667923715117,
"loss": 3.9114,
"step": 7500
},
{
"epoch": 0.8126143579808417,
"grad_norm": 0.598574161529541,
"learning_rate": 0.0005518435513414502,
"loss": 3.9369,
"step": 7550
},
{
"epoch": 0.8179959100204499,
"grad_norm": 0.6298902630805969,
"learning_rate": 0.0005515203103113888,
"loss": 3.9029,
"step": 7600
},
{
"epoch": 0.8233774620600581,
"grad_norm": 0.5852333307266235,
"learning_rate": 0.0005511970692813274,
"loss": 3.8858,
"step": 7650
},
{
"epoch": 0.8287590140996663,
"grad_norm": 0.6372036337852478,
"learning_rate": 0.000550873828251266,
"loss": 3.9097,
"step": 7700
},
{
"epoch": 0.8341405661392746,
"grad_norm": 0.6588336825370789,
"learning_rate": 0.0005505505872212045,
"loss": 3.9069,
"step": 7750
},
{
"epoch": 0.8395221181788828,
"grad_norm": 0.6206655502319336,
"learning_rate": 0.0005502273461911432,
"loss": 3.8922,
"step": 7800
},
{
"epoch": 0.844903670218491,
"grad_norm": 0.6114910244941711,
"learning_rate": 0.0005499041051610817,
"loss": 3.8982,
"step": 7850
},
{
"epoch": 0.8502852222580992,
"grad_norm": 0.6116674542427063,
"learning_rate": 0.0005495808641310204,
"loss": 3.8957,
"step": 7900
},
{
"epoch": 0.8556667742977074,
"grad_norm": 0.6183781623840332,
"learning_rate": 0.0005492576231009589,
"loss": 3.8894,
"step": 7950
},
{
"epoch": 0.8610483263373157,
"grad_norm": 0.6823594570159912,
"learning_rate": 0.0005489343820708974,
"loss": 3.8987,
"step": 8000
},
{
"epoch": 0.8610483263373157,
"eval_accuracy": 0.3365249394776252,
"eval_loss": 3.825028419494629,
"eval_runtime": 184.1616,
"eval_samples_per_second": 97.8,
"eval_steps_per_second": 6.114,
"step": 8000
},
{
"epoch": 0.8664298783769239,
"grad_norm": 0.5773827433586121,
"learning_rate": 0.0005486111410408361,
"loss": 3.8826,
"step": 8050
},
{
"epoch": 0.8718114304165321,
"grad_norm": 0.6269072890281677,
"learning_rate": 0.0005482879000107746,
"loss": 3.8726,
"step": 8100
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.5973522663116455,
"learning_rate": 0.0005479646589807133,
"loss": 3.87,
"step": 8150
},
{
"epoch": 0.8825745344957485,
"grad_norm": 0.6495493650436401,
"learning_rate": 0.0005476414179506518,
"loss": 3.8642,
"step": 8200
},
{
"epoch": 0.8879560865353568,
"grad_norm": 0.5791492462158203,
"learning_rate": 0.0005473181769205904,
"loss": 3.8504,
"step": 8250
},
{
"epoch": 0.8933376385749651,
"grad_norm": 0.5900397300720215,
"learning_rate": 0.000546994935890529,
"loss": 3.8612,
"step": 8300
},
{
"epoch": 0.8987191906145733,
"grad_norm": 0.5566937327384949,
"learning_rate": 0.0005466716948604677,
"loss": 3.8794,
"step": 8350
},
{
"epoch": 0.9041007426541815,
"grad_norm": 0.6604649424552917,
"learning_rate": 0.0005463484538304062,
"loss": 3.8659,
"step": 8400
},
{
"epoch": 0.9094822946937897,
"grad_norm": 0.5881569385528564,
"learning_rate": 0.0005460252128003447,
"loss": 3.8646,
"step": 8450
},
{
"epoch": 0.9148638467333979,
"grad_norm": 0.586227297782898,
"learning_rate": 0.0005457019717702833,
"loss": 3.8677,
"step": 8500
},
{
"epoch": 0.9202453987730062,
"grad_norm": 0.5569677352905273,
"learning_rate": 0.0005453787307402219,
"loss": 3.8548,
"step": 8550
},
{
"epoch": 0.9256269508126144,
"grad_norm": 0.5891553163528442,
"learning_rate": 0.0005450554897101605,
"loss": 3.8741,
"step": 8600
},
{
"epoch": 0.9310085028522226,
"grad_norm": 0.5274822115898132,
"learning_rate": 0.0005447322486800991,
"loss": 3.8376,
"step": 8650
},
{
"epoch": 0.9363900548918308,
"grad_norm": 0.6486146450042725,
"learning_rate": 0.0005444090076500377,
"loss": 3.8607,
"step": 8700
},
{
"epoch": 0.941771606931439,
"grad_norm": 0.5777039527893066,
"learning_rate": 0.0005440922314405775,
"loss": 3.8713,
"step": 8750
},
{
"epoch": 0.9471531589710472,
"grad_norm": 0.5529994964599609,
"learning_rate": 0.0005437689904105161,
"loss": 3.8491,
"step": 8800
},
{
"epoch": 0.9525347110106555,
"grad_norm": 0.5525511503219604,
"learning_rate": 0.0005434457493804546,
"loss": 3.8521,
"step": 8850
},
{
"epoch": 0.9579162630502637,
"grad_norm": 0.6336971521377563,
"learning_rate": 0.0005431225083503932,
"loss": 3.8456,
"step": 8900
},
{
"epoch": 0.9632978150898719,
"grad_norm": 0.6398906707763672,
"learning_rate": 0.0005427992673203318,
"loss": 3.8494,
"step": 8950
},
{
"epoch": 0.9686793671294801,
"grad_norm": 0.5508920550346375,
"learning_rate": 0.0005424760262902704,
"loss": 3.837,
"step": 9000
},
{
"epoch": 0.9686793671294801,
"eval_accuracy": 0.34044165877756155,
"eval_loss": 3.782215118408203,
"eval_runtime": 183.9619,
"eval_samples_per_second": 97.906,
"eval_steps_per_second": 6.121,
"step": 9000
},
{
"epoch": 0.9740609191690883,
"grad_norm": 0.5972535014152527,
"learning_rate": 0.000542152785260209,
"loss": 3.8537,
"step": 9050
},
{
"epoch": 0.9794424712086965,
"grad_norm": 0.6076335310935974,
"learning_rate": 0.0005418295442301476,
"loss": 3.8602,
"step": 9100
},
{
"epoch": 0.9848240232483048,
"grad_norm": 0.6718324422836304,
"learning_rate": 0.0005415063032000861,
"loss": 3.8268,
"step": 9150
},
{
"epoch": 0.9902055752879131,
"grad_norm": 0.5986764430999756,
"learning_rate": 0.0005411830621700248,
"loss": 3.8535,
"step": 9200
},
{
"epoch": 0.9955871273275213,
"grad_norm": 0.5580224990844727,
"learning_rate": 0.0005408598211399633,
"loss": 3.8519,
"step": 9250
},
{
"epoch": 1.0009686793671295,
"grad_norm": 0.5752728581428528,
"learning_rate": 0.0005405365801099019,
"loss": 3.8272,
"step": 9300
},
{
"epoch": 1.0063502314067376,
"grad_norm": 0.6517691016197205,
"learning_rate": 0.0005402133390798405,
"loss": 3.7748,
"step": 9350
},
{
"epoch": 1.011731783446346,
"grad_norm": 0.5505602955818176,
"learning_rate": 0.000539890098049779,
"loss": 3.773,
"step": 9400
},
{
"epoch": 1.017113335485954,
"grad_norm": 0.5624109506607056,
"learning_rate": 0.0005395668570197177,
"loss": 3.784,
"step": 9450
},
{
"epoch": 1.0224948875255624,
"grad_norm": 0.5721527338027954,
"learning_rate": 0.0005392436159896562,
"loss": 3.7722,
"step": 9500
},
{
"epoch": 1.0278764395651705,
"grad_norm": 0.5518069863319397,
"learning_rate": 0.0005389203749595948,
"loss": 3.7686,
"step": 9550
},
{
"epoch": 1.0332579916047788,
"grad_norm": 0.5892334580421448,
"learning_rate": 0.0005385971339295334,
"loss": 3.768,
"step": 9600
},
{
"epoch": 1.0386395436443872,
"grad_norm": 0.5937067866325378,
"learning_rate": 0.000538273892899472,
"loss": 3.7844,
"step": 9650
},
{
"epoch": 1.0440210956839953,
"grad_norm": 0.6311096549034119,
"learning_rate": 0.0005379506518694106,
"loss": 3.7654,
"step": 9700
},
{
"epoch": 1.0494026477236036,
"grad_norm": 0.5864768624305725,
"learning_rate": 0.0005376274108393491,
"loss": 3.7586,
"step": 9750
},
{
"epoch": 1.0547841997632117,
"grad_norm": 0.5333934426307678,
"learning_rate": 0.0005373041698092877,
"loss": 3.7562,
"step": 9800
},
{
"epoch": 1.06016575180282,
"grad_norm": 0.6055988073348999,
"learning_rate": 0.0005369809287792263,
"loss": 3.7772,
"step": 9850
},
{
"epoch": 1.0655473038424281,
"grad_norm": 0.534672737121582,
"learning_rate": 0.000536657687749165,
"loss": 3.7656,
"step": 9900
},
{
"epoch": 1.0709288558820365,
"grad_norm": 0.644813597202301,
"learning_rate": 0.0005363344467191035,
"loss": 3.7714,
"step": 9950
},
{
"epoch": 1.0763104079216446,
"grad_norm": 0.6052290797233582,
"learning_rate": 0.000536011205689042,
"loss": 3.7725,
"step": 10000
},
{
"epoch": 1.0763104079216446,
"eval_accuracy": 0.34394756150867145,
"eval_loss": 3.754002809524536,
"eval_runtime": 184.3547,
"eval_samples_per_second": 97.698,
"eval_steps_per_second": 6.108,
"step": 10000
},
{
"epoch": 1.081691959961253,
"grad_norm": 0.5823466777801514,
"learning_rate": 0.0005356879646589807,
"loss": 3.7756,
"step": 10050
},
{
"epoch": 1.087073512000861,
"grad_norm": 0.5655180811882019,
"learning_rate": 0.0005353647236289192,
"loss": 3.7547,
"step": 10100
},
{
"epoch": 1.0924550640404693,
"grad_norm": 0.5540060997009277,
"learning_rate": 0.0005350414825988579,
"loss": 3.7692,
"step": 10150
},
{
"epoch": 1.0978366160800774,
"grad_norm": 0.5735357999801636,
"learning_rate": 0.0005347182415687964,
"loss": 3.7782,
"step": 10200
},
{
"epoch": 1.1032181681196858,
"grad_norm": 0.6658727526664734,
"learning_rate": 0.000534395000538735,
"loss": 3.7972,
"step": 10250
},
{
"epoch": 1.1085997201592939,
"grad_norm": 0.6762692332267761,
"learning_rate": 0.0005340717595086736,
"loss": 3.7446,
"step": 10300
},
{
"epoch": 1.1139812721989022,
"grad_norm": 0.5580800175666809,
"learning_rate": 0.0005337485184786122,
"loss": 3.7644,
"step": 10350
},
{
"epoch": 1.1193628242385103,
"grad_norm": 0.6203590631484985,
"learning_rate": 0.0005334252774485507,
"loss": 3.77,
"step": 10400
},
{
"epoch": 1.1247443762781186,
"grad_norm": 0.5936480760574341,
"learning_rate": 0.0005331020364184894,
"loss": 3.7849,
"step": 10450
},
{
"epoch": 1.1301259283177267,
"grad_norm": 0.5875517129898071,
"learning_rate": 0.0005327787953884279,
"loss": 3.7623,
"step": 10500
},
{
"epoch": 1.135507480357335,
"grad_norm": 0.5761053562164307,
"learning_rate": 0.0005324555543583665,
"loss": 3.7619,
"step": 10550
},
{
"epoch": 1.1408890323969434,
"grad_norm": 0.6478322744369507,
"learning_rate": 0.0005321323133283051,
"loss": 3.749,
"step": 10600
},
{
"epoch": 1.1462705844365515,
"grad_norm": 0.6216648817062378,
"learning_rate": 0.0005318090722982436,
"loss": 3.7505,
"step": 10650
},
{
"epoch": 1.1516521364761596,
"grad_norm": 0.5962103009223938,
"learning_rate": 0.0005314858312681823,
"loss": 3.7563,
"step": 10700
},
{
"epoch": 1.157033688515768,
"grad_norm": 0.6087831854820251,
"learning_rate": 0.0005311625902381209,
"loss": 3.7601,
"step": 10750
},
{
"epoch": 1.1624152405553763,
"grad_norm": 0.6549679636955261,
"learning_rate": 0.0005308393492080595,
"loss": 3.7554,
"step": 10800
},
{
"epoch": 1.1677967925949844,
"grad_norm": 0.5932186841964722,
"learning_rate": 0.000530516108177998,
"loss": 3.7631,
"step": 10850
},
{
"epoch": 1.1731783446345927,
"grad_norm": 0.5829725861549377,
"learning_rate": 0.0005301928671479365,
"loss": 3.7619,
"step": 10900
},
{
"epoch": 1.1785598966742008,
"grad_norm": 0.6248167753219604,
"learning_rate": 0.0005298696261178752,
"loss": 3.7551,
"step": 10950
},
{
"epoch": 1.1839414487138091,
"grad_norm": 0.5710045695304871,
"learning_rate": 0.0005295463850878138,
"loss": 3.7577,
"step": 11000
},
{
"epoch": 1.1839414487138091,
"eval_accuracy": 0.3465084014215709,
"eval_loss": 3.7283456325531006,
"eval_runtime": 184.2095,
"eval_samples_per_second": 97.775,
"eval_steps_per_second": 6.113,
"step": 11000
},
{
"epoch": 1.1893230007534172,
"grad_norm": 0.5649476051330566,
"learning_rate": 0.0005292296088783535,
"loss": 3.7498,
"step": 11050
},
{
"epoch": 1.1947045527930256,
"grad_norm": 0.5573504567146301,
"learning_rate": 0.0005289063678482922,
"loss": 3.7595,
"step": 11100
},
{
"epoch": 1.2000861048326337,
"grad_norm": 0.5430361032485962,
"learning_rate": 0.0005285831268182307,
"loss": 3.7529,
"step": 11150
},
{
"epoch": 1.205467656872242,
"grad_norm": 0.5485645532608032,
"learning_rate": 0.0005282598857881694,
"loss": 3.7594,
"step": 11200
},
{
"epoch": 1.21084920891185,
"grad_norm": 0.5611818432807922,
"learning_rate": 0.0005279366447581079,
"loss": 3.7436,
"step": 11250
},
{
"epoch": 1.2162307609514584,
"grad_norm": 0.5610353350639343,
"learning_rate": 0.0005276134037280465,
"loss": 3.7635,
"step": 11300
},
{
"epoch": 1.2216123129910665,
"grad_norm": 0.5841692686080933,
"learning_rate": 0.0005272901626979851,
"loss": 3.7718,
"step": 11350
},
{
"epoch": 1.2269938650306749,
"grad_norm": 0.5393407940864563,
"learning_rate": 0.0005269669216679236,
"loss": 3.7475,
"step": 11400
},
{
"epoch": 1.232375417070283,
"grad_norm": 0.561508059501648,
"learning_rate": 0.0005266436806378623,
"loss": 3.7624,
"step": 11450
},
{
"epoch": 1.2377569691098913,
"grad_norm": 0.6286839842796326,
"learning_rate": 0.0005263204396078008,
"loss": 3.7587,
"step": 11500
},
{
"epoch": 1.2431385211494996,
"grad_norm": 0.6179966926574707,
"learning_rate": 0.0005259971985777394,
"loss": 3.7298,
"step": 11550
},
{
"epoch": 1.2485200731891077,
"grad_norm": 0.5545424222946167,
"learning_rate": 0.000525673957547678,
"loss": 3.7309,
"step": 11600
},
{
"epoch": 1.2539016252287158,
"grad_norm": 0.5041254162788391,
"learning_rate": 0.0005253507165176167,
"loss": 3.7386,
"step": 11650
},
{
"epoch": 1.2592831772683242,
"grad_norm": 0.5765021443367004,
"learning_rate": 0.0005250274754875552,
"loss": 3.7517,
"step": 11700
},
{
"epoch": 1.2646647293079325,
"grad_norm": 0.5996841788291931,
"learning_rate": 0.0005247042344574938,
"loss": 3.7354,
"step": 11750
},
{
"epoch": 1.2700462813475406,
"grad_norm": 0.5650532841682434,
"learning_rate": 0.0005243809934274323,
"loss": 3.7446,
"step": 11800
},
{
"epoch": 1.275427833387149,
"grad_norm": 0.6425164937973022,
"learning_rate": 0.0005240577523973709,
"loss": 3.7483,
"step": 11850
},
{
"epoch": 1.280809385426757,
"grad_norm": 0.6343678832054138,
"learning_rate": 0.0005237345113673095,
"loss": 3.7166,
"step": 11900
},
{
"epoch": 1.2861909374663654,
"grad_norm": 0.6143035888671875,
"learning_rate": 0.0005234112703372481,
"loss": 3.7269,
"step": 11950
},
{
"epoch": 1.2915724895059735,
"grad_norm": 0.5740583539009094,
"learning_rate": 0.0005230880293071867,
"loss": 3.7502,
"step": 12000
},
{
"epoch": 1.2915724895059735,
"eval_accuracy": 0.34853836332460886,
"eval_loss": 3.703268051147461,
"eval_runtime": 184.8299,
"eval_samples_per_second": 97.446,
"eval_steps_per_second": 6.092,
"step": 12000
},
{
"epoch": 1.2969540415455818,
"grad_norm": 0.5388001799583435,
"learning_rate": 0.0005227647882771253,
"loss": 3.7413,
"step": 12050
},
{
"epoch": 1.30233559358519,
"grad_norm": 0.5561287999153137,
"learning_rate": 0.0005224415472470639,
"loss": 3.7277,
"step": 12100
},
{
"epoch": 1.3077171456247982,
"grad_norm": 0.5779213309288025,
"learning_rate": 0.0005221183062170024,
"loss": 3.7232,
"step": 12150
},
{
"epoch": 1.3130986976644063,
"grad_norm": 0.5532439351081848,
"learning_rate": 0.0005217950651869409,
"loss": 3.742,
"step": 12200
},
{
"epoch": 1.3184802497040147,
"grad_norm": 0.619836151599884,
"learning_rate": 0.0005214718241568796,
"loss": 3.7342,
"step": 12250
},
{
"epoch": 1.3238618017436228,
"grad_norm": 0.5282240509986877,
"learning_rate": 0.0005211485831268182,
"loss": 3.7343,
"step": 12300
},
{
"epoch": 1.329243353783231,
"grad_norm": 0.5425903797149658,
"learning_rate": 0.0005208253420967568,
"loss": 3.724,
"step": 12350
},
{
"epoch": 1.3346249058228392,
"grad_norm": 0.5502631664276123,
"learning_rate": 0.0005205021010666953,
"loss": 3.7427,
"step": 12400
},
{
"epoch": 1.3400064578624475,
"grad_norm": 0.5649226307868958,
"learning_rate": 0.0005201788600366339,
"loss": 3.7308,
"step": 12450
},
{
"epoch": 1.3453880099020559,
"grad_norm": 0.6057882905006409,
"learning_rate": 0.0005198556190065725,
"loss": 3.7329,
"step": 12500
},
{
"epoch": 1.350769561941664,
"grad_norm": 0.5616216659545898,
"learning_rate": 0.0005195323779765112,
"loss": 3.7356,
"step": 12550
},
{
"epoch": 1.356151113981272,
"grad_norm": 0.5632114410400391,
"learning_rate": 0.0005192091369464497,
"loss": 3.7176,
"step": 12600
},
{
"epoch": 1.3615326660208804,
"grad_norm": 0.5245296359062195,
"learning_rate": 0.0005188858959163882,
"loss": 3.7434,
"step": 12650
},
{
"epoch": 1.3669142180604887,
"grad_norm": 0.566827654838562,
"learning_rate": 0.0005185626548863269,
"loss": 3.7283,
"step": 12700
},
{
"epoch": 1.3722957701000968,
"grad_norm": 0.5973430871963501,
"learning_rate": 0.0005182394138562654,
"loss": 3.7161,
"step": 12750
},
{
"epoch": 1.3776773221397052,
"grad_norm": 0.6542244553565979,
"learning_rate": 0.0005179161728262041,
"loss": 3.7091,
"step": 12800
},
{
"epoch": 1.3830588741793133,
"grad_norm": 0.5695278644561768,
"learning_rate": 0.0005175929317961426,
"loss": 3.7389,
"step": 12850
},
{
"epoch": 1.3884404262189216,
"grad_norm": 0.5513305068016052,
"learning_rate": 0.0005172696907660812,
"loss": 3.7244,
"step": 12900
},
{
"epoch": 1.3938219782585297,
"grad_norm": 0.6421329379081726,
"learning_rate": 0.0005169464497360198,
"loss": 3.721,
"step": 12950
},
{
"epoch": 1.399203530298138,
"grad_norm": 0.5805631279945374,
"learning_rate": 0.0005166232087059583,
"loss": 3.7256,
"step": 13000
},
{
"epoch": 1.399203530298138,
"eval_accuracy": 0.3509969608700749,
"eval_loss": 3.6784543991088867,
"eval_runtime": 184.6324,
"eval_samples_per_second": 97.551,
"eval_steps_per_second": 6.099,
"step": 13000
},
{
"epoch": 1.4045850823377461,
"grad_norm": 0.5761812925338745,
"learning_rate": 0.0005162999676758969,
"loss": 3.716,
"step": 13050
},
{
"epoch": 1.4099666343773545,
"grad_norm": 0.6273037791252136,
"learning_rate": 0.0005159767266458355,
"loss": 3.7195,
"step": 13100
},
{
"epoch": 1.4153481864169626,
"grad_norm": 0.5659797787666321,
"learning_rate": 0.0005156534856157741,
"loss": 3.7202,
"step": 13150
},
{
"epoch": 1.420729738456571,
"grad_norm": 0.5564277768135071,
"learning_rate": 0.0005153302445857127,
"loss": 3.7025,
"step": 13200
},
{
"epoch": 1.426111290496179,
"grad_norm": 0.5497375130653381,
"learning_rate": 0.0005150070035556513,
"loss": 3.7169,
"step": 13250
},
{
"epoch": 1.4314928425357873,
"grad_norm": 0.5665987730026245,
"learning_rate": 0.0005146902273461911,
"loss": 3.7145,
"step": 13300
},
{
"epoch": 1.4368743945753955,
"grad_norm": 0.5927955508232117,
"learning_rate": 0.0005143669863161297,
"loss": 3.7128,
"step": 13350
},
{
"epoch": 1.4422559466150038,
"grad_norm": 0.553808867931366,
"learning_rate": 0.0005140437452860683,
"loss": 3.7183,
"step": 13400
},
{
"epoch": 1.447637498654612,
"grad_norm": 0.5321818590164185,
"learning_rate": 0.0005137205042560069,
"loss": 3.7137,
"step": 13450
},
{
"epoch": 1.4530190506942202,
"grad_norm": 0.6058953404426575,
"learning_rate": 0.0005133972632259455,
"loss": 3.7225,
"step": 13500
},
{
"epoch": 1.4584006027338283,
"grad_norm": 0.5817305445671082,
"learning_rate": 0.000513074022195884,
"loss": 3.7164,
"step": 13550
},
{
"epoch": 1.4637821547734367,
"grad_norm": 0.565757155418396,
"learning_rate": 0.0005127507811658226,
"loss": 3.7019,
"step": 13600
},
{
"epoch": 1.469163706813045,
"grad_norm": 0.5681242942810059,
"learning_rate": 0.0005124275401357612,
"loss": 3.7118,
"step": 13650
},
{
"epoch": 1.474545258852653,
"grad_norm": 0.5758519768714905,
"learning_rate": 0.0005121042991056997,
"loss": 3.7026,
"step": 13700
},
{
"epoch": 1.4799268108922612,
"grad_norm": 0.6025715470314026,
"learning_rate": 0.0005117810580756384,
"loss": 3.6834,
"step": 13750
},
{
"epoch": 1.4853083629318695,
"grad_norm": 0.5520561337471008,
"learning_rate": 0.0005114578170455769,
"loss": 3.6964,
"step": 13800
},
{
"epoch": 1.4906899149714778,
"grad_norm": 0.604680061340332,
"learning_rate": 0.0005111345760155156,
"loss": 3.7189,
"step": 13850
},
{
"epoch": 1.496071467011086,
"grad_norm": 0.7172315716743469,
"learning_rate": 0.0005108113349854541,
"loss": 3.7077,
"step": 13900
},
{
"epoch": 1.501453019050694,
"grad_norm": 0.5513233542442322,
"learning_rate": 0.0005104880939553926,
"loss": 3.6821,
"step": 13950
},
{
"epoch": 1.5068345710903024,
"grad_norm": 0.6223646402359009,
"learning_rate": 0.0005101648529253313,
"loss": 3.7026,
"step": 14000
},
{
"epoch": 1.5068345710903024,
"eval_accuracy": 0.3527702846520951,
"eval_loss": 3.66027569770813,
"eval_runtime": 184.9365,
"eval_samples_per_second": 97.39,
"eval_steps_per_second": 6.089,
"step": 14000
},
{
"epoch": 1.5122161231299107,
"grad_norm": 0.5914530754089355,
"learning_rate": 0.0005098416118952699,
"loss": 3.6923,
"step": 14050
},
{
"epoch": 1.5175976751695188,
"grad_norm": 0.5675052404403687,
"learning_rate": 0.0005095183708652085,
"loss": 3.7059,
"step": 14100
},
{
"epoch": 1.5229792272091272,
"grad_norm": 0.5429747700691223,
"learning_rate": 0.000509195129835147,
"loss": 3.6907,
"step": 14150
},
{
"epoch": 1.5283607792487355,
"grad_norm": 0.5908409953117371,
"learning_rate": 0.0005088718888050856,
"loss": 3.6958,
"step": 14200
},
{
"epoch": 1.5337423312883436,
"grad_norm": 0.600124180316925,
"learning_rate": 0.0005085486477750242,
"loss": 3.702,
"step": 14250
},
{
"epoch": 1.5391238833279517,
"grad_norm": 0.5889090895652771,
"learning_rate": 0.0005082254067449629,
"loss": 3.704,
"step": 14300
},
{
"epoch": 1.54450543536756,
"grad_norm": 0.5812370777130127,
"learning_rate": 0.0005079021657149014,
"loss": 3.7089,
"step": 14350
},
{
"epoch": 1.5498869874071683,
"grad_norm": 0.640620231628418,
"learning_rate": 0.0005075789246848399,
"loss": 3.676,
"step": 14400
},
{
"epoch": 1.5552685394467765,
"grad_norm": 0.5642813444137573,
"learning_rate": 0.0005072556836547785,
"loss": 3.7029,
"step": 14450
},
{
"epoch": 1.5606500914863846,
"grad_norm": 0.5681257247924805,
"learning_rate": 0.0005069324426247171,
"loss": 3.6864,
"step": 14500
},
{
"epoch": 1.566031643525993,
"grad_norm": 0.5891586542129517,
"learning_rate": 0.0005066092015946557,
"loss": 3.6999,
"step": 14550
},
{
"epoch": 1.5714131955656012,
"grad_norm": 0.5758991837501526,
"learning_rate": 0.0005062859605645943,
"loss": 3.6825,
"step": 14600
},
{
"epoch": 1.5767947476052093,
"grad_norm": 0.5694742202758789,
"learning_rate": 0.0005059627195345329,
"loss": 3.6859,
"step": 14650
},
{
"epoch": 1.5821762996448174,
"grad_norm": 0.5646113157272339,
"learning_rate": 0.0005056394785044715,
"loss": 3.6862,
"step": 14700
},
{
"epoch": 1.5875578516844258,
"grad_norm": 0.5336816310882568,
"learning_rate": 0.00050531623747441,
"loss": 3.6966,
"step": 14750
},
{
"epoch": 1.592939403724034,
"grad_norm": 0.546383798122406,
"learning_rate": 0.0005049929964443486,
"loss": 3.6899,
"step": 14800
},
{
"epoch": 1.5983209557636422,
"grad_norm": 0.6023898124694824,
"learning_rate": 0.0005046697554142871,
"loss": 3.6788,
"step": 14850
},
{
"epoch": 1.6037025078032503,
"grad_norm": 0.5576519966125488,
"learning_rate": 0.0005043465143842258,
"loss": 3.708,
"step": 14900
},
{
"epoch": 1.6090840598428586,
"grad_norm": 0.5140047073364258,
"learning_rate": 0.0005040232733541644,
"loss": 3.6735,
"step": 14950
},
{
"epoch": 1.614465611882467,
"grad_norm": 0.5628222823143005,
"learning_rate": 0.000503700032324103,
"loss": 3.6954,
"step": 15000
},
{
"epoch": 1.614465611882467,
"eval_accuracy": 0.35463226919057184,
"eval_loss": 3.6429481506347656,
"eval_runtime": 184.8414,
"eval_samples_per_second": 97.44,
"eval_steps_per_second": 6.092,
"step": 15000
},
{
"epoch": 1.619847163922075,
"grad_norm": 0.5722090601921082,
"learning_rate": 0.0005033767912940415,
"loss": 3.6689,
"step": 15050
},
{
"epoch": 1.6252287159616834,
"grad_norm": 0.6029437184333801,
"learning_rate": 0.0005030535502639802,
"loss": 3.682,
"step": 15100
},
{
"epoch": 1.6306102680012917,
"grad_norm": 0.5833781957626343,
"learning_rate": 0.0005027303092339187,
"loss": 3.6812,
"step": 15150
},
{
"epoch": 1.6359918200408998,
"grad_norm": 0.5960364937782288,
"learning_rate": 0.0005024135330244585,
"loss": 3.6792,
"step": 15200
},
{
"epoch": 1.641373372080508,
"grad_norm": 0.5771504044532776,
"learning_rate": 0.0005020902919943972,
"loss": 3.6862,
"step": 15250
},
{
"epoch": 1.6467549241201163,
"grad_norm": 0.6127772331237793,
"learning_rate": 0.0005017670509643357,
"loss": 3.6975,
"step": 15300
},
{
"epoch": 1.6521364761597246,
"grad_norm": 0.5885007381439209,
"learning_rate": 0.0005014438099342743,
"loss": 3.68,
"step": 15350
},
{
"epoch": 1.6575180281993327,
"grad_norm": 0.6041487455368042,
"learning_rate": 0.0005011205689042129,
"loss": 3.6602,
"step": 15400
},
{
"epoch": 1.6628995802389408,
"grad_norm": 0.5758805871009827,
"learning_rate": 0.0005007973278741514,
"loss": 3.6751,
"step": 15450
},
{
"epoch": 1.6682811322785491,
"grad_norm": 0.5186255574226379,
"learning_rate": 0.00050047408684409,
"loss": 3.6795,
"step": 15500
},
{
"epoch": 1.6736626843181575,
"grad_norm": 0.5514072179794312,
"learning_rate": 0.0005001508458140286,
"loss": 3.6774,
"step": 15550
},
{
"epoch": 1.6790442363577656,
"grad_norm": 0.5339574813842773,
"learning_rate": 0.0004998276047839673,
"loss": 3.6791,
"step": 15600
},
{
"epoch": 1.6844257883973737,
"grad_norm": 0.5713083148002625,
"learning_rate": 0.0004995043637539058,
"loss": 3.685,
"step": 15650
},
{
"epoch": 1.689807340436982,
"grad_norm": 0.543732762336731,
"learning_rate": 0.0004991811227238443,
"loss": 3.6788,
"step": 15700
},
{
"epoch": 1.6951888924765903,
"grad_norm": 0.5611950755119324,
"learning_rate": 0.0004988578816937829,
"loss": 3.6788,
"step": 15750
},
{
"epoch": 1.7005704445161984,
"grad_norm": 1.1558012962341309,
"learning_rate": 0.0004985346406637215,
"loss": 3.6853,
"step": 15800
},
{
"epoch": 1.7059519965558065,
"grad_norm": 0.5830097794532776,
"learning_rate": 0.0004982113996336602,
"loss": 3.6819,
"step": 15850
},
{
"epoch": 1.7113335485954149,
"grad_norm": 0.5888424515724182,
"learning_rate": 0.0004978881586035987,
"loss": 3.6499,
"step": 15900
},
{
"epoch": 1.7167151006350232,
"grad_norm": 0.5985537171363831,
"learning_rate": 0.0004975649175735373,
"loss": 3.6656,
"step": 15950
},
{
"epoch": 1.7220966526746313,
"grad_norm": 0.5927515625953674,
"learning_rate": 0.0004972416765434759,
"loss": 3.6735,
"step": 16000
},
{
"epoch": 1.7220966526746313,
"eval_accuracy": 0.3565279361242711,
"eval_loss": 3.6250205039978027,
"eval_runtime": 184.8324,
"eval_samples_per_second": 97.445,
"eval_steps_per_second": 6.092,
"step": 16000
},
{
"epoch": 1.7274782047142396,
"grad_norm": 0.5919635891914368,
"learning_rate": 0.0004969184355134145,
"loss": 3.6675,
"step": 16050
},
{
"epoch": 1.732859756753848,
"grad_norm": 0.541195809841156,
"learning_rate": 0.0004965951944833531,
"loss": 3.6654,
"step": 16100
},
{
"epoch": 1.738241308793456,
"grad_norm": 0.5535009503364563,
"learning_rate": 0.0004962719534532916,
"loss": 3.6582,
"step": 16150
},
{
"epoch": 1.7436228608330642,
"grad_norm": 0.5698593854904175,
"learning_rate": 0.0004959487124232302,
"loss": 3.6677,
"step": 16200
},
{
"epoch": 1.7490044128726725,
"grad_norm": 0.5787349343299866,
"learning_rate": 0.0004956254713931688,
"loss": 3.665,
"step": 16250
},
{
"epoch": 1.7543859649122808,
"grad_norm": 0.61586993932724,
"learning_rate": 0.0004953022303631074,
"loss": 3.6734,
"step": 16300
},
{
"epoch": 1.759767516951889,
"grad_norm": 0.5861958861351013,
"learning_rate": 0.0004949789893330459,
"loss": 3.6602,
"step": 16350
},
{
"epoch": 1.765149068991497,
"grad_norm": 0.5748381614685059,
"learning_rate": 0.0004946557483029846,
"loss": 3.6709,
"step": 16400
},
{
"epoch": 1.7705306210311054,
"grad_norm": 0.5469369888305664,
"learning_rate": 0.0004943325072729231,
"loss": 3.6531,
"step": 16450
},
{
"epoch": 1.7759121730707137,
"grad_norm": 0.5762391686439514,
"learning_rate": 0.0004940092662428617,
"loss": 3.664,
"step": 16500
},
{
"epoch": 1.7812937251103218,
"grad_norm": 0.5528908967971802,
"learning_rate": 0.0004936860252128003,
"loss": 3.6412,
"step": 16550
},
{
"epoch": 1.78667527714993,
"grad_norm": 0.5524837970733643,
"learning_rate": 0.0004933627841827388,
"loss": 3.6725,
"step": 16600
},
{
"epoch": 1.7920568291895382,
"grad_norm": 0.5726085305213928,
"learning_rate": 0.0004930395431526775,
"loss": 3.6769,
"step": 16650
},
{
"epoch": 1.7974383812291466,
"grad_norm": 0.5498278737068176,
"learning_rate": 0.0004927163021226161,
"loss": 3.6422,
"step": 16700
},
{
"epoch": 1.8028199332687547,
"grad_norm": 0.5690969824790955,
"learning_rate": 0.0004923930610925547,
"loss": 3.653,
"step": 16750
},
{
"epoch": 1.8082014853083628,
"grad_norm": 0.5563623905181885,
"learning_rate": 0.0004920698200624932,
"loss": 3.6843,
"step": 16800
},
{
"epoch": 1.813583037347971,
"grad_norm": 0.559473991394043,
"learning_rate": 0.0004917465790324317,
"loss": 3.656,
"step": 16850
},
{
"epoch": 1.8189645893875794,
"grad_norm": 1.0415881872177124,
"learning_rate": 0.0004914233380023704,
"loss": 3.6526,
"step": 16900
},
{
"epoch": 1.8243461414271875,
"grad_norm": 0.5609972476959229,
"learning_rate": 0.0004911000969723089,
"loss": 3.6581,
"step": 16950
},
{
"epoch": 1.8297276934667959,
"grad_norm": 0.5753895044326782,
"learning_rate": 0.0004907768559422476,
"loss": 3.6405,
"step": 17000
},
{
"epoch": 1.8297276934667959,
"eval_accuracy": 0.3581517535326585,
"eval_loss": 3.6095125675201416,
"eval_runtime": 185.0185,
"eval_samples_per_second": 97.347,
"eval_steps_per_second": 6.086,
"step": 17000
},
{
"epoch": 1.8351092455064042,
"grad_norm": 0.5633401870727539,
"learning_rate": 0.0004904536149121861,
"loss": 3.6531,
"step": 17050
},
{
"epoch": 1.8404907975460123,
"grad_norm": 0.5552918910980225,
"learning_rate": 0.0004901303738821248,
"loss": 3.647,
"step": 17100
},
{
"epoch": 1.8458723495856204,
"grad_norm": 0.5768826603889465,
"learning_rate": 0.0004898071328520633,
"loss": 3.6472,
"step": 17150
},
{
"epoch": 1.8512539016252287,
"grad_norm": 0.5807743072509766,
"learning_rate": 0.0004894838918220019,
"loss": 3.6456,
"step": 17200
},
{
"epoch": 1.856635453664837,
"grad_norm": 0.6019062399864197,
"learning_rate": 0.0004891606507919405,
"loss": 3.6529,
"step": 17250
},
{
"epoch": 1.8620170057044452,
"grad_norm": 0.5893161296844482,
"learning_rate": 0.000488837409761879,
"loss": 3.6711,
"step": 17300
},
{
"epoch": 1.8673985577440533,
"grad_norm": 0.5603693723678589,
"learning_rate": 0.0004885141687318177,
"loss": 3.6614,
"step": 17350
},
{
"epoch": 1.8727801097836616,
"grad_norm": 0.631818413734436,
"learning_rate": 0.00048819092770175623,
"loss": 3.6642,
"step": 17400
},
{
"epoch": 1.87816166182327,
"grad_norm": 0.5838807821273804,
"learning_rate": 0.0004878676866716948,
"loss": 3.6581,
"step": 17450
},
{
"epoch": 1.883543213862878,
"grad_norm": 0.5909974575042725,
"learning_rate": 0.00048754444564163337,
"loss": 3.6499,
"step": 17500
},
{
"epoch": 1.8889247659024861,
"grad_norm": 0.5818292498588562,
"learning_rate": 0.000487221204611572,
"loss": 3.6563,
"step": 17550
},
{
"epoch": 1.8943063179420945,
"grad_norm": 0.6259658336639404,
"learning_rate": 0.00048689796358151056,
"loss": 3.6557,
"step": 17600
},
{
"epoch": 1.8996878699817028,
"grad_norm": 0.5432948470115662,
"learning_rate": 0.00048657472255144915,
"loss": 3.6522,
"step": 17650
},
{
"epoch": 1.905069422021311,
"grad_norm": 0.5726639032363892,
"learning_rate": 0.00048625148152138775,
"loss": 3.6559,
"step": 17700
},
{
"epoch": 1.910450974060919,
"grad_norm": 0.6220903396606445,
"learning_rate": 0.0004859282404913263,
"loss": 3.6441,
"step": 17750
},
{
"epoch": 1.9158325261005273,
"grad_norm": 0.5544300079345703,
"learning_rate": 0.0004856049994612649,
"loss": 3.643,
"step": 17800
},
{
"epoch": 1.9212140781401357,
"grad_norm": 0.6041421294212341,
"learning_rate": 0.00048528175843120353,
"loss": 3.6461,
"step": 17850
},
{
"epoch": 1.9265956301797438,
"grad_norm": 0.5323398113250732,
"learning_rate": 0.0004849585174011421,
"loss": 3.6522,
"step": 17900
},
{
"epoch": 1.931977182219352,
"grad_norm": 0.5484367609024048,
"learning_rate": 0.00048463527637108067,
"loss": 3.6477,
"step": 17950
},
{
"epoch": 1.9373587342589604,
"grad_norm": 0.6333380937576294,
"learning_rate": 0.0004843120353410192,
"loss": 3.6439,
"step": 18000
},
{
"epoch": 1.9373587342589604,
"eval_accuracy": 0.3589290562920833,
"eval_loss": 3.5955045223236084,
"eval_runtime": 184.9578,
"eval_samples_per_second": 97.379,
"eval_steps_per_second": 6.088,
"step": 18000
},
{
"epoch": 1.9427402862985685,
"grad_norm": 0.5808707475662231,
"learning_rate": 0.0004839887943109578,
"loss": 3.64,
"step": 18050
},
{
"epoch": 1.9481218383381766,
"grad_norm": 0.5633965134620667,
"learning_rate": 0.00048366555328089645,
"loss": 3.629,
"step": 18100
},
{
"epoch": 1.953503390377785,
"grad_norm": 0.5531556606292725,
"learning_rate": 0.000483342312250835,
"loss": 3.6406,
"step": 18150
},
{
"epoch": 1.9588849424173933,
"grad_norm": 0.635948896408081,
"learning_rate": 0.0004830190712207736,
"loss": 3.6499,
"step": 18200
},
{
"epoch": 1.9642664944570014,
"grad_norm": 0.5666787624359131,
"learning_rate": 0.0004826958301907122,
"loss": 3.6559,
"step": 18250
},
{
"epoch": 1.9696480464966095,
"grad_norm": 0.5713225603103638,
"learning_rate": 0.0004823725891606507,
"loss": 3.6499,
"step": 18300
},
{
"epoch": 1.9750295985362178,
"grad_norm": 0.6086699962615967,
"learning_rate": 0.0004820493481305893,
"loss": 3.6176,
"step": 18350
},
{
"epoch": 1.9804111505758262,
"grad_norm": 0.577656626701355,
"learning_rate": 0.00048172610710052797,
"loss": 3.6473,
"step": 18400
},
{
"epoch": 1.9857927026154343,
"grad_norm": 0.5730327367782593,
"learning_rate": 0.0004814028660704665,
"loss": 3.6484,
"step": 18450
},
{
"epoch": 1.9911742546550424,
"grad_norm": 0.542858362197876,
"learning_rate": 0.0004810796250404051,
"loss": 3.6421,
"step": 18500
},
{
"epoch": 1.9965558066946507,
"grad_norm": 0.5895008444786072,
"learning_rate": 0.00048075638401034364,
"loss": 3.6335,
"step": 18550
},
{
"epoch": 2.001937358734259,
"grad_norm": 0.5415585041046143,
"learning_rate": 0.00048043314298028224,
"loss": 3.6123,
"step": 18600
},
{
"epoch": 2.007318910773867,
"grad_norm": 0.5612041354179382,
"learning_rate": 0.00048010990195022083,
"loss": 3.5606,
"step": 18650
},
{
"epoch": 2.0127004628134753,
"grad_norm": 0.5955508351325989,
"learning_rate": 0.0004797866609201594,
"loss": 3.5534,
"step": 18700
},
{
"epoch": 2.018082014853084,
"grad_norm": 0.5306816101074219,
"learning_rate": 0.000479463419890098,
"loss": 3.5606,
"step": 18750
},
{
"epoch": 2.023463566892692,
"grad_norm": 0.6179097890853882,
"learning_rate": 0.0004791401788600366,
"loss": 3.5398,
"step": 18800
},
{
"epoch": 2.0288451189323,
"grad_norm": 0.5494289994239807,
"learning_rate": 0.00047881693782997515,
"loss": 3.5486,
"step": 18850
},
{
"epoch": 2.034226670971908,
"grad_norm": 0.6472309827804565,
"learning_rate": 0.00047849369679991375,
"loss": 3.5584,
"step": 18900
},
{
"epoch": 2.0396082230115167,
"grad_norm": 0.5931106209754944,
"learning_rate": 0.0004781704557698523,
"loss": 3.5716,
"step": 18950
},
{
"epoch": 2.044989775051125,
"grad_norm": 0.555692732334137,
"learning_rate": 0.00047784721473979094,
"loss": 3.5585,
"step": 19000
},
{
"epoch": 2.044989775051125,
"eval_accuracy": 0.3609611912528775,
"eval_loss": 3.585639476776123,
"eval_runtime": 184.5705,
"eval_samples_per_second": 97.583,
"eval_steps_per_second": 6.101,
"step": 19000
},
{
"epoch": 2.050371327090733,
"grad_norm": 0.5866180062294006,
"learning_rate": 0.00047752397370972953,
"loss": 3.5712,
"step": 19050
},
{
"epoch": 2.055752879130341,
"grad_norm": 0.6226833462715149,
"learning_rate": 0.0004772007326796681,
"loss": 3.5541,
"step": 19100
},
{
"epoch": 2.0611344311699495,
"grad_norm": 0.622482180595398,
"learning_rate": 0.00047687749164960667,
"loss": 3.5497,
"step": 19150
},
{
"epoch": 2.0665159832095576,
"grad_norm": 0.5930569171905518,
"learning_rate": 0.00047655425061954526,
"loss": 3.543,
"step": 19200
},
{
"epoch": 2.0718975352491658,
"grad_norm": 0.6076319217681885,
"learning_rate": 0.00047623100958948386,
"loss": 3.5663,
"step": 19250
},
{
"epoch": 2.0772790872887743,
"grad_norm": 0.5735373497009277,
"learning_rate": 0.00047590776855942245,
"loss": 3.5541,
"step": 19300
},
{
"epoch": 2.0826606393283824,
"grad_norm": 0.6126927137374878,
"learning_rate": 0.00047559099234996226,
"loss": 3.559,
"step": 19350
},
{
"epoch": 2.0880421913679905,
"grad_norm": 0.5821768641471863,
"learning_rate": 0.00047526775131990085,
"loss": 3.5555,
"step": 19400
},
{
"epoch": 2.0934237434075986,
"grad_norm": 0.5864294171333313,
"learning_rate": 0.0004749445102898394,
"loss": 3.5562,
"step": 19450
},
{
"epoch": 2.098805295447207,
"grad_norm": 0.6452410817146301,
"learning_rate": 0.000474621269259778,
"loss": 3.5593,
"step": 19500
},
{
"epoch": 2.1041868474868153,
"grad_norm": 0.5725786089897156,
"learning_rate": 0.0004742980282297166,
"loss": 3.5832,
"step": 19550
},
{
"epoch": 2.1095683995264234,
"grad_norm": 0.6051315665245056,
"learning_rate": 0.0004739747871996551,
"loss": 3.5634,
"step": 19600
},
{
"epoch": 2.1149499515660315,
"grad_norm": 0.5349266529083252,
"learning_rate": 0.00047365154616959377,
"loss": 3.5558,
"step": 19650
},
{
"epoch": 2.12033150360564,
"grad_norm": 0.5834625363349915,
"learning_rate": 0.00047332830513953237,
"loss": 3.5382,
"step": 19700
},
{
"epoch": 2.125713055645248,
"grad_norm": 0.5570130944252014,
"learning_rate": 0.0004730050641094709,
"loss": 3.5871,
"step": 19750
},
{
"epoch": 2.1310946076848563,
"grad_norm": 0.6045011281967163,
"learning_rate": 0.0004726818230794095,
"loss": 3.5578,
"step": 19800
},
{
"epoch": 2.1364761597244644,
"grad_norm": 0.5597689747810364,
"learning_rate": 0.00047235858204934804,
"loss": 3.5797,
"step": 19850
},
{
"epoch": 2.141857711764073,
"grad_norm": 0.582958996295929,
"learning_rate": 0.0004720353410192867,
"loss": 3.5748,
"step": 19900
},
{
"epoch": 2.147239263803681,
"grad_norm": 0.5808094143867493,
"learning_rate": 0.0004717120999892253,
"loss": 3.5758,
"step": 19950
},
{
"epoch": 2.152620815843289,
"grad_norm": 0.6471470594406128,
"learning_rate": 0.0004713888589591638,
"loss": 3.5596,
"step": 20000
},
{
"epoch": 2.152620815843289,
"eval_accuracy": 0.36173936323540484,
"eval_loss": 3.5779969692230225,
"eval_runtime": 184.816,
"eval_samples_per_second": 97.454,
"eval_steps_per_second": 6.093,
"step": 20000
},
{
"epoch": 2.1580023678828972,
"grad_norm": 0.5732181668281555,
"learning_rate": 0.0004710720827497037,
"loss": 3.5622,
"step": 20050
},
{
"epoch": 2.163383919922506,
"grad_norm": 0.582992672920227,
"learning_rate": 0.00047074884171964223,
"loss": 3.5469,
"step": 20100
},
{
"epoch": 2.168765471962114,
"grad_norm": 0.5529500842094421,
"learning_rate": 0.0004704256006895808,
"loss": 3.5583,
"step": 20150
},
{
"epoch": 2.174147024001722,
"grad_norm": 0.587487518787384,
"learning_rate": 0.00047010235965951936,
"loss": 3.5612,
"step": 20200
},
{
"epoch": 2.1795285760413305,
"grad_norm": 0.5818561911582947,
"learning_rate": 0.00046977911862945796,
"loss": 3.5628,
"step": 20250
},
{
"epoch": 2.1849101280809387,
"grad_norm": 0.5599380731582642,
"learning_rate": 0.0004694558775993966,
"loss": 3.5771,
"step": 20300
},
{
"epoch": 2.1902916801205468,
"grad_norm": 0.5862258076667786,
"learning_rate": 0.00046913263656933515,
"loss": 3.5635,
"step": 20350
},
{
"epoch": 2.195673232160155,
"grad_norm": 0.5630679726600647,
"learning_rate": 0.00046880939553927374,
"loss": 3.5593,
"step": 20400
},
{
"epoch": 2.2010547841997634,
"grad_norm": 0.6166080832481384,
"learning_rate": 0.00046848615450921234,
"loss": 3.5634,
"step": 20450
},
{
"epoch": 2.2064363362393715,
"grad_norm": 0.5633708834648132,
"learning_rate": 0.0004681629134791509,
"loss": 3.5678,
"step": 20500
},
{
"epoch": 2.2118178882789796,
"grad_norm": 0.5323472023010254,
"learning_rate": 0.00046783967244908947,
"loss": 3.577,
"step": 20550
},
{
"epoch": 2.2171994403185877,
"grad_norm": 0.5587198138237,
"learning_rate": 0.0004675164314190281,
"loss": 3.5656,
"step": 20600
},
{
"epoch": 2.2225809923581963,
"grad_norm": 0.6203996539115906,
"learning_rate": 0.00046719319038896666,
"loss": 3.5725,
"step": 20650
},
{
"epoch": 2.2279625443978044,
"grad_norm": 0.6120006442070007,
"learning_rate": 0.00046686994935890526,
"loss": 3.5511,
"step": 20700
},
{
"epoch": 2.2333440964374125,
"grad_norm": 0.5724984407424927,
"learning_rate": 0.0004665467083288438,
"loss": 3.5857,
"step": 20750
},
{
"epoch": 2.2387256484770206,
"grad_norm": 0.5859040021896362,
"learning_rate": 0.0004662234672987824,
"loss": 3.5626,
"step": 20800
},
{
"epoch": 2.244107200516629,
"grad_norm": 0.6283140778541565,
"learning_rate": 0.00046590022626872104,
"loss": 3.554,
"step": 20850
},
{
"epoch": 2.2494887525562373,
"grad_norm": 0.6019113659858704,
"learning_rate": 0.0004655769852386596,
"loss": 3.5601,
"step": 20900
},
{
"epoch": 2.2548703045958454,
"grad_norm": 0.5885460376739502,
"learning_rate": 0.0004652537442085982,
"loss": 3.5968,
"step": 20950
},
{
"epoch": 2.2602518566354535,
"grad_norm": 0.5559438467025757,
"learning_rate": 0.00046493050317853677,
"loss": 3.5619,
"step": 21000
},
{
"epoch": 2.2602518566354535,
"eval_accuracy": 0.362556867563321,
"eval_loss": 3.565824508666992,
"eval_runtime": 184.5191,
"eval_samples_per_second": 97.61,
"eval_steps_per_second": 6.102,
"step": 21000
},
{
"epoch": 2.265633408675062,
"grad_norm": 0.5775765776634216,
"learning_rate": 0.0004646072621484753,
"loss": 3.5615,
"step": 21050
},
{
"epoch": 2.27101496071467,
"grad_norm": 0.669364869594574,
"learning_rate": 0.0004642840211184139,
"loss": 3.5574,
"step": 21100
},
{
"epoch": 2.2763965127542782,
"grad_norm": 0.5841721892356873,
"learning_rate": 0.00046396078008835255,
"loss": 3.5482,
"step": 21150
},
{
"epoch": 2.281778064793887,
"grad_norm": 0.6061359643936157,
"learning_rate": 0.0004636375390582911,
"loss": 3.5743,
"step": 21200
},
{
"epoch": 2.287159616833495,
"grad_norm": 0.629613995552063,
"learning_rate": 0.0004633142980282297,
"loss": 3.5631,
"step": 21250
},
{
"epoch": 2.292541168873103,
"grad_norm": 0.5746256113052368,
"learning_rate": 0.00046299105699816823,
"loss": 3.5674,
"step": 21300
},
{
"epoch": 2.297922720912711,
"grad_norm": 0.5825964212417603,
"learning_rate": 0.0004626678159681068,
"loss": 3.5444,
"step": 21350
},
{
"epoch": 2.303304272952319,
"grad_norm": 0.6356550455093384,
"learning_rate": 0.0004623445749380454,
"loss": 3.5491,
"step": 21400
},
{
"epoch": 2.3086858249919278,
"grad_norm": 0.5417643189430237,
"learning_rate": 0.000462021333907984,
"loss": 3.5494,
"step": 21450
},
{
"epoch": 2.314067377031536,
"grad_norm": 0.543899416923523,
"learning_rate": 0.0004616980928779226,
"loss": 3.5541,
"step": 21500
},
{
"epoch": 2.319448929071144,
"grad_norm": 0.6250973343849182,
"learning_rate": 0.0004613748518478612,
"loss": 3.5749,
"step": 21550
},
{
"epoch": 2.3248304811107525,
"grad_norm": 0.6272781491279602,
"learning_rate": 0.00046105161081779974,
"loss": 3.574,
"step": 21600
},
{
"epoch": 2.3302120331503606,
"grad_norm": 0.6016408205032349,
"learning_rate": 0.00046072836978773834,
"loss": 3.5511,
"step": 21650
},
{
"epoch": 2.3355935851899687,
"grad_norm": 0.5845016837120056,
"learning_rate": 0.000460405128757677,
"loss": 3.5522,
"step": 21700
},
{
"epoch": 2.340975137229577,
"grad_norm": 0.5752265453338623,
"learning_rate": 0.0004600818877276155,
"loss": 3.5494,
"step": 21750
},
{
"epoch": 2.3463566892691854,
"grad_norm": 0.6028287410736084,
"learning_rate": 0.0004597586466975541,
"loss": 3.5626,
"step": 21800
},
{
"epoch": 2.3517382413087935,
"grad_norm": 0.5635079145431519,
"learning_rate": 0.00045943540566749266,
"loss": 3.5678,
"step": 21850
},
{
"epoch": 2.3571197933484016,
"grad_norm": 0.6141257286071777,
"learning_rate": 0.00045911216463743126,
"loss": 3.5413,
"step": 21900
},
{
"epoch": 2.3625013453880097,
"grad_norm": 0.6080167293548584,
"learning_rate": 0.00045878892360736985,
"loss": 3.5746,
"step": 21950
},
{
"epoch": 2.3678828974276183,
"grad_norm": 0.6267144083976746,
"learning_rate": 0.00045846568257730845,
"loss": 3.5593,
"step": 22000
},
{
"epoch": 2.3678828974276183,
"eval_accuracy": 0.3641260325691377,
"eval_loss": 3.552825689315796,
"eval_runtime": 185.2813,
"eval_samples_per_second": 97.209,
"eval_steps_per_second": 6.077,
"step": 22000
},
{
"epoch": 2.3732644494672264,
"grad_norm": 0.591131865978241,
"learning_rate": 0.00045814244154724704,
"loss": 3.5471,
"step": 22050
},
{
"epoch": 2.3786460015068345,
"grad_norm": 0.5588999390602112,
"learning_rate": 0.00045781920051718563,
"loss": 3.5592,
"step": 22100
},
{
"epoch": 2.384027553546443,
"grad_norm": 0.5897203087806702,
"learning_rate": 0.0004574959594871242,
"loss": 3.5639,
"step": 22150
},
{
"epoch": 2.389409105586051,
"grad_norm": 0.5783429741859436,
"learning_rate": 0.00045717271845706277,
"loss": 3.5625,
"step": 22200
},
{
"epoch": 2.3947906576256592,
"grad_norm": 0.595344603061676,
"learning_rate": 0.0004568494774270013,
"loss": 3.5561,
"step": 22250
},
{
"epoch": 2.4001722096652673,
"grad_norm": 0.6287717819213867,
"learning_rate": 0.00045652623639693996,
"loss": 3.5577,
"step": 22300
},
{
"epoch": 2.4055537617048754,
"grad_norm": 0.7863216996192932,
"learning_rate": 0.00045620299536687855,
"loss": 3.5778,
"step": 22350
},
{
"epoch": 2.410935313744484,
"grad_norm": 0.5969358086585999,
"learning_rate": 0.0004558797543368171,
"loss": 3.5605,
"step": 22400
},
{
"epoch": 2.416316865784092,
"grad_norm": 0.6180084347724915,
"learning_rate": 0.0004555565133067557,
"loss": 3.5457,
"step": 22450
},
{
"epoch": 2.4216984178237,
"grad_norm": 0.6318864226341248,
"learning_rate": 0.0004552332722766943,
"loss": 3.5685,
"step": 22500
},
{
"epoch": 2.4270799698633088,
"grad_norm": 0.6047439575195312,
"learning_rate": 0.0004549100312466328,
"loss": 3.5711,
"step": 22550
},
{
"epoch": 2.432461521902917,
"grad_norm": 0.5744851231575012,
"learning_rate": 0.00045459325503717263,
"loss": 3.5702,
"step": 22600
},
{
"epoch": 2.437843073942525,
"grad_norm": 0.5994943976402283,
"learning_rate": 0.0004542700140071113,
"loss": 3.569,
"step": 22650
},
{
"epoch": 2.443224625982133,
"grad_norm": 0.5537005662918091,
"learning_rate": 0.0004539467729770499,
"loss": 3.5438,
"step": 22700
},
{
"epoch": 2.4486061780217416,
"grad_norm": 0.576801061630249,
"learning_rate": 0.0004536235319469884,
"loss": 3.5601,
"step": 22750
},
{
"epoch": 2.4539877300613497,
"grad_norm": 0.7851975560188293,
"learning_rate": 0.000453300290916927,
"loss": 3.5645,
"step": 22800
},
{
"epoch": 2.459369282100958,
"grad_norm": 0.5843862891197205,
"learning_rate": 0.0004529770498868656,
"loss": 3.5571,
"step": 22850
},
{
"epoch": 2.464750834140566,
"grad_norm": 0.5837774872779846,
"learning_rate": 0.00045265380885680414,
"loss": 3.5662,
"step": 22900
},
{
"epoch": 2.4701323861801745,
"grad_norm": 0.5811747312545776,
"learning_rate": 0.0004523305678267428,
"loss": 3.57,
"step": 22950
},
{
"epoch": 2.4755139382197826,
"grad_norm": 0.6275676488876343,
"learning_rate": 0.0004520073267966814,
"loss": 3.5547,
"step": 23000
},
{
"epoch": 2.4755139382197826,
"eval_accuracy": 0.3650710953873481,
"eval_loss": 3.5427539348602295,
"eval_runtime": 184.4438,
"eval_samples_per_second": 97.65,
"eval_steps_per_second": 6.105,
"step": 23000
},
{
"epoch": 2.4808954902593907,
"grad_norm": 0.6013798117637634,
"learning_rate": 0.00045168408576661993,
"loss": 3.5469,
"step": 23050
},
{
"epoch": 2.4862770422989993,
"grad_norm": 0.6493346095085144,
"learning_rate": 0.0004513608447365585,
"loss": 3.561,
"step": 23100
},
{
"epoch": 2.4916585943386074,
"grad_norm": 0.5985797643661499,
"learning_rate": 0.00045103760370649706,
"loss": 3.5711,
"step": 23150
},
{
"epoch": 2.4970401463782155,
"grad_norm": 0.6186266541481018,
"learning_rate": 0.00045071436267643566,
"loss": 3.5492,
"step": 23200
},
{
"epoch": 2.5024216984178236,
"grad_norm": 0.560845136642456,
"learning_rate": 0.0004503911216463743,
"loss": 3.5565,
"step": 23250
},
{
"epoch": 2.5078032504574317,
"grad_norm": 0.5958519577980042,
"learning_rate": 0.00045006788061631285,
"loss": 3.568,
"step": 23300
},
{
"epoch": 2.5131848024970402,
"grad_norm": 0.5718041062355042,
"learning_rate": 0.00044974463958625144,
"loss": 3.5392,
"step": 23350
},
{
"epoch": 2.5185663545366483,
"grad_norm": 0.6036562323570251,
"learning_rate": 0.00044942139855619004,
"loss": 3.567,
"step": 23400
},
{
"epoch": 2.5239479065762565,
"grad_norm": 0.5902794003486633,
"learning_rate": 0.0004490981575261286,
"loss": 3.551,
"step": 23450
},
{
"epoch": 2.529329458615865,
"grad_norm": 0.5769206285476685,
"learning_rate": 0.0004487749164960672,
"loss": 3.557,
"step": 23500
},
{
"epoch": 2.534711010655473,
"grad_norm": 0.5881948471069336,
"learning_rate": 0.0004484516754660058,
"loss": 3.5472,
"step": 23550
},
{
"epoch": 2.540092562695081,
"grad_norm": 0.5914930105209351,
"learning_rate": 0.00044812843443594436,
"loss": 3.5434,
"step": 23600
},
{
"epoch": 2.5454741147346893,
"grad_norm": 0.5718581080436707,
"learning_rate": 0.00044780519340588296,
"loss": 3.559,
"step": 23650
},
{
"epoch": 2.550855666774298,
"grad_norm": 0.5887609720230103,
"learning_rate": 0.0004474819523758215,
"loss": 3.5548,
"step": 23700
},
{
"epoch": 2.556237218813906,
"grad_norm": 0.57635498046875,
"learning_rate": 0.0004471587113457601,
"loss": 3.5431,
"step": 23750
},
{
"epoch": 2.561618770853514,
"grad_norm": 0.6003997325897217,
"learning_rate": 0.00044683547031569874,
"loss": 3.5628,
"step": 23800
},
{
"epoch": 2.567000322893122,
"grad_norm": 0.6113025546073914,
"learning_rate": 0.0004465122292856373,
"loss": 3.5497,
"step": 23850
},
{
"epoch": 2.5723818749327307,
"grad_norm": 0.5799350738525391,
"learning_rate": 0.0004461889882555759,
"loss": 3.5676,
"step": 23900
},
{
"epoch": 2.577763426972339,
"grad_norm": 0.5788649916648865,
"learning_rate": 0.00044586574722551447,
"loss": 3.5609,
"step": 23950
},
{
"epoch": 2.583144979011947,
"grad_norm": 0.5850538611412048,
"learning_rate": 0.000445542506195453,
"loss": 3.5571,
"step": 24000
},
{
"epoch": 2.583144979011947,
"eval_accuracy": 0.3657125820370048,
"eval_loss": 3.534817695617676,
"eval_runtime": 185.0176,
"eval_samples_per_second": 97.347,
"eval_steps_per_second": 6.086,
"step": 24000
},
{
"epoch": 2.5885265310515555,
"grad_norm": 0.6198786497116089,
"learning_rate": 0.0004452192651653916,
"loss": 3.5545,
"step": 24050
},
{
"epoch": 2.5939080830911636,
"grad_norm": 0.5845891833305359,
"learning_rate": 0.00044489602413533025,
"loss": 3.54,
"step": 24100
},
{
"epoch": 2.5992896351307717,
"grad_norm": 0.5993114113807678,
"learning_rate": 0.0004445727831052688,
"loss": 3.5377,
"step": 24150
},
{
"epoch": 2.60467118717038,
"grad_norm": 0.5982822775840759,
"learning_rate": 0.0004442495420752074,
"loss": 3.5527,
"step": 24200
},
{
"epoch": 2.610052739209988,
"grad_norm": 0.6251617670059204,
"learning_rate": 0.00044392630104514593,
"loss": 3.5481,
"step": 24250
},
{
"epoch": 2.6154342912495965,
"grad_norm": 0.617463231086731,
"learning_rate": 0.0004436030600150845,
"loss": 3.5421,
"step": 24300
},
{
"epoch": 2.6208158432892046,
"grad_norm": 0.5983661413192749,
"learning_rate": 0.0004432798189850231,
"loss": 3.5651,
"step": 24350
},
{
"epoch": 2.6261973953288127,
"grad_norm": 0.6203315258026123,
"learning_rate": 0.0004429565779549617,
"loss": 3.5403,
"step": 24400
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.5848509073257446,
"learning_rate": 0.0004426333369249003,
"loss": 3.5581,
"step": 24450
},
{
"epoch": 2.6369604994080293,
"grad_norm": 0.5791871547698975,
"learning_rate": 0.0004423100958948389,
"loss": 3.5359,
"step": 24500
},
{
"epoch": 2.6423420514476375,
"grad_norm": 0.634689211845398,
"learning_rate": 0.00044198685486477744,
"loss": 3.5436,
"step": 24550
},
{
"epoch": 2.6477236034872456,
"grad_norm": 0.5859078764915466,
"learning_rate": 0.00044166361383471604,
"loss": 3.5412,
"step": 24600
},
{
"epoch": 2.653105155526854,
"grad_norm": 0.6053293943405151,
"learning_rate": 0.0004413403728046547,
"loss": 3.5539,
"step": 24650
},
{
"epoch": 2.658486707566462,
"grad_norm": 0.6215761303901672,
"learning_rate": 0.0004410171317745932,
"loss": 3.5523,
"step": 24700
},
{
"epoch": 2.6638682596060703,
"grad_norm": 0.5783427357673645,
"learning_rate": 0.0004406938907445318,
"loss": 3.5632,
"step": 24750
},
{
"epoch": 2.6692498116456784,
"grad_norm": 0.6102443933486938,
"learning_rate": 0.00044037064971447036,
"loss": 3.5519,
"step": 24800
},
{
"epoch": 2.674631363685287,
"grad_norm": 0.61601322889328,
"learning_rate": 0.00044004740868440896,
"loss": 3.5425,
"step": 24850
},
{
"epoch": 2.680012915724895,
"grad_norm": 0.5947862863540649,
"learning_rate": 0.00043972416765434755,
"loss": 3.5461,
"step": 24900
},
{
"epoch": 2.685394467764503,
"grad_norm": 0.6124972105026245,
"learning_rate": 0.00043940092662428615,
"loss": 3.547,
"step": 24950
},
{
"epoch": 2.6907760198041117,
"grad_norm": 0.5721360445022583,
"learning_rate": 0.00043907768559422474,
"loss": 3.5501,
"step": 25000
},
{
"epoch": 2.6907760198041117,
"eval_accuracy": 0.3670556403832796,
"eval_loss": 3.5255908966064453,
"eval_runtime": 184.7747,
"eval_samples_per_second": 97.475,
"eval_steps_per_second": 6.094,
"step": 25000
},
{
"epoch": 2.69615757184372,
"grad_norm": 0.6443188190460205,
"learning_rate": 0.00043875444456416334,
"loss": 3.5278,
"step": 25050
},
{
"epoch": 2.701539123883328,
"grad_norm": 0.5788137316703796,
"learning_rate": 0.0004384312035341019,
"loss": 3.5573,
"step": 25100
},
{
"epoch": 2.706920675922936,
"grad_norm": 0.5494847297668457,
"learning_rate": 0.00043810796250404047,
"loss": 3.5508,
"step": 25150
},
{
"epoch": 2.712302227962544,
"grad_norm": 0.5972323417663574,
"learning_rate": 0.000437784721473979,
"loss": 3.535,
"step": 25200
},
{
"epoch": 2.7176837800021527,
"grad_norm": 0.6219172477722168,
"learning_rate": 0.00043746148044391766,
"loss": 3.5357,
"step": 25250
},
{
"epoch": 2.723065332041761,
"grad_norm": 0.5936007499694824,
"learning_rate": 0.00043713823941385625,
"loss": 3.5371,
"step": 25300
},
{
"epoch": 2.728446884081369,
"grad_norm": 0.5830941200256348,
"learning_rate": 0.0004368149983837948,
"loss": 3.5308,
"step": 25350
},
{
"epoch": 2.7338284361209775,
"grad_norm": 0.5860649943351746,
"learning_rate": 0.0004364917573537334,
"loss": 3.5441,
"step": 25400
},
{
"epoch": 2.7392099881605856,
"grad_norm": 0.6084650158882141,
"learning_rate": 0.00043616851632367193,
"loss": 3.5439,
"step": 25450
},
{
"epoch": 2.7445915402001937,
"grad_norm": 0.6109269857406616,
"learning_rate": 0.0004358452752936106,
"loss": 3.5458,
"step": 25500
},
{
"epoch": 2.749973092239802,
"grad_norm": 0.5673308372497559,
"learning_rate": 0.0004355220342635492,
"loss": 3.545,
"step": 25550
},
{
"epoch": 2.7553546442794103,
"grad_norm": 0.6352481842041016,
"learning_rate": 0.00043519879323348777,
"loss": 3.5535,
"step": 25600
},
{
"epoch": 2.7607361963190185,
"grad_norm": 0.6171466112136841,
"learning_rate": 0.0004348755522034263,
"loss": 3.549,
"step": 25650
},
{
"epoch": 2.7661177483586266,
"grad_norm": 0.5649158358573914,
"learning_rate": 0.0004345523111733649,
"loss": 3.55,
"step": 25700
},
{
"epoch": 2.7714993003982347,
"grad_norm": 0.6140876412391663,
"learning_rate": 0.00043422907014330344,
"loss": 3.5487,
"step": 25750
},
{
"epoch": 2.776880852437843,
"grad_norm": 0.5735211968421936,
"learning_rate": 0.0004339058291132421,
"loss": 3.5289,
"step": 25800
},
{
"epoch": 2.7822624044774513,
"grad_norm": 0.596508800983429,
"learning_rate": 0.0004335825880831807,
"loss": 3.522,
"step": 25850
},
{
"epoch": 2.7876439565170594,
"grad_norm": 0.5887395739555359,
"learning_rate": 0.00043325934705311923,
"loss": 3.5414,
"step": 25900
},
{
"epoch": 2.793025508556668,
"grad_norm": 0.6502258777618408,
"learning_rate": 0.0004329361060230578,
"loss": 3.5267,
"step": 25950
},
{
"epoch": 2.798407060596276,
"grad_norm": 0.5818353295326233,
"learning_rate": 0.00043261286499299636,
"loss": 3.5323,
"step": 26000
},
{
"epoch": 2.798407060596276,
"eval_accuracy": 0.3677229864202361,
"eval_loss": 3.5174596309661865,
"eval_runtime": 185.1441,
"eval_samples_per_second": 97.281,
"eval_steps_per_second": 6.082,
"step": 26000
},
{
"epoch": 2.803788612635884,
"grad_norm": 0.641747236251831,
"learning_rate": 0.00043228962396293496,
"loss": 3.5431,
"step": 26050
},
{
"epoch": 2.8091701646754923,
"grad_norm": 0.6401011943817139,
"learning_rate": 0.0004319663829328736,
"loss": 3.5387,
"step": 26100
},
{
"epoch": 2.8145517167151004,
"grad_norm": 0.6039129495620728,
"learning_rate": 0.00043164314190281215,
"loss": 3.5148,
"step": 26150
},
{
"epoch": 2.819933268754709,
"grad_norm": 0.5612286925315857,
"learning_rate": 0.00043131990087275074,
"loss": 3.5341,
"step": 26200
},
{
"epoch": 2.825314820794317,
"grad_norm": 0.5970532894134521,
"learning_rate": 0.00043099665984268934,
"loss": 3.5381,
"step": 26250
},
{
"epoch": 2.830696372833925,
"grad_norm": 0.5767912864685059,
"learning_rate": 0.0004306734188126279,
"loss": 3.5239,
"step": 26300
},
{
"epoch": 2.8360779248735337,
"grad_norm": 0.5821993350982666,
"learning_rate": 0.00043035017778256647,
"loss": 3.5205,
"step": 26350
},
{
"epoch": 2.841459476913142,
"grad_norm": 0.6017364263534546,
"learning_rate": 0.0004300269367525051,
"loss": 3.538,
"step": 26400
},
{
"epoch": 2.84684102895275,
"grad_norm": 0.5902091860771179,
"learning_rate": 0.00042970369572244366,
"loss": 3.5355,
"step": 26450
},
{
"epoch": 2.852222580992358,
"grad_norm": 0.6088672280311584,
"learning_rate": 0.00042938045469238226,
"loss": 3.5265,
"step": 26500
},
{
"epoch": 2.857604133031966,
"grad_norm": 0.5895884037017822,
"learning_rate": 0.00042906367848292206,
"loss": 3.5467,
"step": 26550
},
{
"epoch": 2.8629856850715747,
"grad_norm": 0.5715399980545044,
"learning_rate": 0.00042874043745286066,
"loss": 3.5379,
"step": 26600
},
{
"epoch": 2.868367237111183,
"grad_norm": 0.6140602231025696,
"learning_rate": 0.0004284171964227992,
"loss": 3.539,
"step": 26650
},
{
"epoch": 2.873748789150791,
"grad_norm": 0.5857102274894714,
"learning_rate": 0.0004280939553927378,
"loss": 3.5439,
"step": 26700
},
{
"epoch": 2.8791303411903995,
"grad_norm": 0.6274014711380005,
"learning_rate": 0.00042777071436267644,
"loss": 3.5369,
"step": 26750
},
{
"epoch": 2.8845118932300076,
"grad_norm": 0.5715806484222412,
"learning_rate": 0.000427447473332615,
"loss": 3.5394,
"step": 26800
},
{
"epoch": 2.8898934452696157,
"grad_norm": 0.5876508951187134,
"learning_rate": 0.0004271242323025536,
"loss": 3.5329,
"step": 26850
},
{
"epoch": 2.895274997309224,
"grad_norm": 0.5981003642082214,
"learning_rate": 0.00042680099127249217,
"loss": 3.5231,
"step": 26900
},
{
"epoch": 2.9006565493488323,
"grad_norm": 0.5943262577056885,
"learning_rate": 0.0004264777502424307,
"loss": 3.538,
"step": 26950
},
{
"epoch": 2.9060381013884404,
"grad_norm": 0.8639909029006958,
"learning_rate": 0.0004261545092123693,
"loss": 3.536,
"step": 27000
},
{
"epoch": 2.9060381013884404,
"eval_accuracy": 0.3691632891011049,
"eval_loss": 3.5098328590393066,
"eval_runtime": 184.886,
"eval_samples_per_second": 97.417,
"eval_steps_per_second": 6.09,
"step": 27000
},
{
"epoch": 2.9114196534280485,
"grad_norm": 0.5836945176124573,
"learning_rate": 0.00042583126818230795,
"loss": 3.5395,
"step": 27050
},
{
"epoch": 2.9168012054676566,
"grad_norm": 0.6182237267494202,
"learning_rate": 0.0004255080271522465,
"loss": 3.5359,
"step": 27100
},
{
"epoch": 2.922182757507265,
"grad_norm": 0.6002602577209473,
"learning_rate": 0.0004251847861221851,
"loss": 3.5197,
"step": 27150
},
{
"epoch": 2.9275643095468733,
"grad_norm": 0.5895917415618896,
"learning_rate": 0.00042486154509212363,
"loss": 3.5076,
"step": 27200
},
{
"epoch": 2.9329458615864814,
"grad_norm": 0.5710264444351196,
"learning_rate": 0.0004245383040620622,
"loss": 3.5327,
"step": 27250
},
{
"epoch": 2.93832741362609,
"grad_norm": 0.5726727247238159,
"learning_rate": 0.0004242150630320009,
"loss": 3.5243,
"step": 27300
},
{
"epoch": 2.943708965665698,
"grad_norm": 0.5930039286613464,
"learning_rate": 0.0004238918220019394,
"loss": 3.5362,
"step": 27350
},
{
"epoch": 2.949090517705306,
"grad_norm": 0.54901522397995,
"learning_rate": 0.000423568580971878,
"loss": 3.5241,
"step": 27400
},
{
"epoch": 2.9544720697449143,
"grad_norm": 0.6401501893997192,
"learning_rate": 0.00042324533994181655,
"loss": 3.5304,
"step": 27450
},
{
"epoch": 2.9598536217845224,
"grad_norm": 0.5905794501304626,
"learning_rate": 0.00042292209891175514,
"loss": 3.5298,
"step": 27500
},
{
"epoch": 2.965235173824131,
"grad_norm": 0.5865811109542847,
"learning_rate": 0.00042259885788169374,
"loss": 3.5374,
"step": 27550
},
{
"epoch": 2.970616725863739,
"grad_norm": 0.5635622143745422,
"learning_rate": 0.00042227561685163233,
"loss": 3.5246,
"step": 27600
},
{
"epoch": 2.975998277903347,
"grad_norm": 0.5960383415222168,
"learning_rate": 0.00042195237582157093,
"loss": 3.5413,
"step": 27650
},
{
"epoch": 2.9813798299429557,
"grad_norm": 0.6016942262649536,
"learning_rate": 0.0004216291347915095,
"loss": 3.5191,
"step": 27700
},
{
"epoch": 2.986761381982564,
"grad_norm": 0.56882244348526,
"learning_rate": 0.00042130589376144806,
"loss": 3.5143,
"step": 27750
},
{
"epoch": 2.992142934022172,
"grad_norm": 0.589320182800293,
"learning_rate": 0.00042098265273138666,
"loss": 3.5266,
"step": 27800
},
{
"epoch": 2.9975244860617805,
"grad_norm": 0.6223992109298706,
"learning_rate": 0.0004206594117013252,
"loss": 3.5296,
"step": 27850
},
{
"epoch": 3.0029060381013886,
"grad_norm": 0.5933626294136047,
"learning_rate": 0.00042033617067126385,
"loss": 3.4722,
"step": 27900
},
{
"epoch": 3.0082875901409967,
"grad_norm": 0.643946647644043,
"learning_rate": 0.00042001292964120244,
"loss": 3.4474,
"step": 27950
},
{
"epoch": 3.0136691421806048,
"grad_norm": 0.6281617283821106,
"learning_rate": 0.000419689688611141,
"loss": 3.4438,
"step": 28000
},
{
"epoch": 3.0136691421806048,
"eval_accuracy": 0.36952868876282496,
"eval_loss": 3.505746841430664,
"eval_runtime": 185.2261,
"eval_samples_per_second": 97.238,
"eval_steps_per_second": 6.079,
"step": 28000
},
{
"epoch": 3.0190506942202133,
"grad_norm": 0.5724912881851196,
"learning_rate": 0.0004193664475810796,
"loss": 3.4348,
"step": 28050
},
{
"epoch": 3.0244322462598214,
"grad_norm": 0.5964477062225342,
"learning_rate": 0.00041904320655101817,
"loss": 3.4444,
"step": 28100
},
{
"epoch": 3.0298137982994295,
"grad_norm": 0.6437315344810486,
"learning_rate": 0.0004187199655209567,
"loss": 3.4424,
"step": 28150
},
{
"epoch": 3.0351953503390376,
"grad_norm": 0.7275903820991516,
"learning_rate": 0.00041839672449089536,
"loss": 3.4293,
"step": 28200
},
{
"epoch": 3.040576902378646,
"grad_norm": 0.5967044830322266,
"learning_rate": 0.00041807348346083395,
"loss": 3.4381,
"step": 28250
},
{
"epoch": 3.0459584544182543,
"grad_norm": 0.6012422442436218,
"learning_rate": 0.0004177502424307725,
"loss": 3.4267,
"step": 28300
},
{
"epoch": 3.0513400064578624,
"grad_norm": 0.6100385785102844,
"learning_rate": 0.0004174270014007111,
"loss": 3.4447,
"step": 28350
},
{
"epoch": 3.0567215584974705,
"grad_norm": 0.6521284580230713,
"learning_rate": 0.00041710376037064963,
"loss": 3.4316,
"step": 28400
},
{
"epoch": 3.062103110537079,
"grad_norm": 0.5814330577850342,
"learning_rate": 0.0004167805193405883,
"loss": 3.4391,
"step": 28450
},
{
"epoch": 3.067484662576687,
"grad_norm": 0.6042176485061646,
"learning_rate": 0.0004164572783105269,
"loss": 3.4447,
"step": 28500
},
{
"epoch": 3.0728662146162953,
"grad_norm": 0.6039173603057861,
"learning_rate": 0.0004161340372804654,
"loss": 3.4407,
"step": 28550
},
{
"epoch": 3.0782477666559034,
"grad_norm": 0.624396800994873,
"learning_rate": 0.000415810796250404,
"loss": 3.4477,
"step": 28600
},
{
"epoch": 3.083629318695512,
"grad_norm": 0.6126594543457031,
"learning_rate": 0.0004154875552203426,
"loss": 3.4456,
"step": 28650
},
{
"epoch": 3.08901087073512,
"grad_norm": 0.6176913380622864,
"learning_rate": 0.00041516431419028114,
"loss": 3.4701,
"step": 28700
},
{
"epoch": 3.094392422774728,
"grad_norm": 0.624182403087616,
"learning_rate": 0.0004148410731602198,
"loss": 3.4544,
"step": 28750
},
{
"epoch": 3.0997739748143363,
"grad_norm": 0.5999160408973694,
"learning_rate": 0.0004145178321301584,
"loss": 3.4638,
"step": 28800
},
{
"epoch": 3.105155526853945,
"grad_norm": 0.5691066980361938,
"learning_rate": 0.00041419459110009693,
"loss": 3.4382,
"step": 28850
},
{
"epoch": 3.110537078893553,
"grad_norm": 0.5719393491744995,
"learning_rate": 0.0004138713500700355,
"loss": 3.4553,
"step": 28900
},
{
"epoch": 3.115918630933161,
"grad_norm": 0.585585355758667,
"learning_rate": 0.00041354810903997406,
"loss": 3.4603,
"step": 28950
},
{
"epoch": 3.121300182972769,
"grad_norm": 0.5965518355369568,
"learning_rate": 0.00041322486800991266,
"loss": 3.4492,
"step": 29000
},
{
"epoch": 3.121300182972769,
"eval_accuracy": 0.3698451946250286,
"eval_loss": 3.5004117488861084,
"eval_runtime": 184.5771,
"eval_samples_per_second": 97.58,
"eval_steps_per_second": 6.1,
"step": 29000
},
{
"epoch": 3.1266817350123777,
"grad_norm": 0.559124231338501,
"learning_rate": 0.0004129016269798513,
"loss": 3.4561,
"step": 29050
},
{
"epoch": 3.132063287051986,
"grad_norm": 0.6254011392593384,
"learning_rate": 0.00041257838594978985,
"loss": 3.4717,
"step": 29100
},
{
"epoch": 3.137444839091594,
"grad_norm": 0.5621007084846497,
"learning_rate": 0.00041225514491972844,
"loss": 3.458,
"step": 29150
},
{
"epoch": 3.1428263911312024,
"grad_norm": 0.60045325756073,
"learning_rate": 0.00041193190388966704,
"loss": 3.4385,
"step": 29200
},
{
"epoch": 3.1482079431708105,
"grad_norm": 0.5860231518745422,
"learning_rate": 0.0004116086628596056,
"loss": 3.459,
"step": 29250
},
{
"epoch": 3.1535894952104186,
"grad_norm": 0.5977693796157837,
"learning_rate": 0.0004112854218295442,
"loss": 3.4527,
"step": 29300
},
{
"epoch": 3.1589710472500268,
"grad_norm": 0.6494083404541016,
"learning_rate": 0.0004109621807994828,
"loss": 3.4596,
"step": 29350
},
{
"epoch": 3.1643525992896353,
"grad_norm": 0.6031472086906433,
"learning_rate": 0.00041063893976942136,
"loss": 3.4657,
"step": 29400
},
{
"epoch": 3.1697341513292434,
"grad_norm": 0.6102636456489563,
"learning_rate": 0.00041031569873935996,
"loss": 3.4436,
"step": 29450
},
{
"epoch": 3.1751157033688515,
"grad_norm": 0.6388329863548279,
"learning_rate": 0.0004099924577092985,
"loss": 3.4628,
"step": 29500
},
{
"epoch": 3.1804972554084596,
"grad_norm": 0.5955821871757507,
"learning_rate": 0.0004096692166792371,
"loss": 3.4558,
"step": 29550
},
{
"epoch": 3.185878807448068,
"grad_norm": 0.5899354815483093,
"learning_rate": 0.00040934597564917574,
"loss": 3.4744,
"step": 29600
},
{
"epoch": 3.1912603594876763,
"grad_norm": 0.6263838410377502,
"learning_rate": 0.0004090227346191143,
"loss": 3.4617,
"step": 29650
},
{
"epoch": 3.1966419115272844,
"grad_norm": 0.6136727333068848,
"learning_rate": 0.0004086994935890529,
"loss": 3.462,
"step": 29700
},
{
"epoch": 3.2020234635668925,
"grad_norm": 0.6539897918701172,
"learning_rate": 0.00040837625255899147,
"loss": 3.4374,
"step": 29750
},
{
"epoch": 3.207405015606501,
"grad_norm": 0.643663763999939,
"learning_rate": 0.00040805301152893,
"loss": 3.4465,
"step": 29800
},
{
"epoch": 3.212786567646109,
"grad_norm": 0.6039734482765198,
"learning_rate": 0.0004077297704988686,
"loss": 3.4573,
"step": 29850
},
{
"epoch": 3.2181681196857173,
"grad_norm": 0.6215190291404724,
"learning_rate": 0.00040740652946880725,
"loss": 3.4581,
"step": 29900
},
{
"epoch": 3.2235496717253254,
"grad_norm": 0.6273991465568542,
"learning_rate": 0.0004070832884387458,
"loss": 3.4525,
"step": 29950
},
{
"epoch": 3.228931223764934,
"grad_norm": 0.6081337928771973,
"learning_rate": 0.0004067600474086844,
"loss": 3.4644,
"step": 30000
},
{
"epoch": 3.228931223764934,
"eval_accuracy": 0.3708994535954925,
"eval_loss": 3.497182607650757,
"eval_runtime": 185.0908,
"eval_samples_per_second": 97.309,
"eval_steps_per_second": 6.084,
"step": 30000
},
{
"epoch": 3.234312775804542,
"grad_norm": 0.7007036805152893,
"learning_rate": 0.00040643680637862293,
"loss": 3.4624,
"step": 30050
},
{
"epoch": 3.23969432784415,
"grad_norm": 0.5929351449012756,
"learning_rate": 0.0004061135653485615,
"loss": 3.4567,
"step": 30100
},
{
"epoch": 3.2450758798837587,
"grad_norm": 0.6256581544876099,
"learning_rate": 0.0004057903243185001,
"loss": 3.4358,
"step": 30150
},
{
"epoch": 3.250457431923367,
"grad_norm": 0.5989969968795776,
"learning_rate": 0.0004054670832884387,
"loss": 3.436,
"step": 30200
},
{
"epoch": 3.255838983962975,
"grad_norm": 0.623378574848175,
"learning_rate": 0.0004051438422583773,
"loss": 3.4733,
"step": 30250
},
{
"epoch": 3.261220536002583,
"grad_norm": 0.6182704567909241,
"learning_rate": 0.0004048206012283159,
"loss": 3.4613,
"step": 30300
},
{
"epoch": 3.2666020880421915,
"grad_norm": 0.6270396113395691,
"learning_rate": 0.00040449736019825444,
"loss": 3.4558,
"step": 30350
},
{
"epoch": 3.2719836400817996,
"grad_norm": 0.6733869314193726,
"learning_rate": 0.00040417411916819304,
"loss": 3.4665,
"step": 30400
},
{
"epoch": 3.2773651921214078,
"grad_norm": 0.6124371886253357,
"learning_rate": 0.0004038508781381317,
"loss": 3.4659,
"step": 30450
},
{
"epoch": 3.282746744161016,
"grad_norm": 0.6001761555671692,
"learning_rate": 0.0004035276371080702,
"loss": 3.4618,
"step": 30500
},
{
"epoch": 3.2881282962006244,
"grad_norm": 0.6276858448982239,
"learning_rate": 0.00040321086089861003,
"loss": 3.4604,
"step": 30550
},
{
"epoch": 3.2935098482402325,
"grad_norm": 0.5796586275100708,
"learning_rate": 0.00040288761986854863,
"loss": 3.4587,
"step": 30600
},
{
"epoch": 3.2988914002798406,
"grad_norm": 0.6277230978012085,
"learning_rate": 0.0004025643788384872,
"loss": 3.4438,
"step": 30650
},
{
"epoch": 3.304272952319449,
"grad_norm": 0.6220398545265198,
"learning_rate": 0.00040224113780842576,
"loss": 3.4767,
"step": 30700
},
{
"epoch": 3.3096545043590573,
"grad_norm": 0.6023754477500916,
"learning_rate": 0.00040191789677836436,
"loss": 3.4706,
"step": 30750
},
{
"epoch": 3.3150360563986654,
"grad_norm": 0.6297191977500916,
"learning_rate": 0.0004015946557483029,
"loss": 3.447,
"step": 30800
},
{
"epoch": 3.3204176084382735,
"grad_norm": 0.597350537776947,
"learning_rate": 0.00040127141471824155,
"loss": 3.4861,
"step": 30850
},
{
"epoch": 3.3257991604778816,
"grad_norm": 0.6226769685745239,
"learning_rate": 0.00040094817368818014,
"loss": 3.4636,
"step": 30900
},
{
"epoch": 3.33118071251749,
"grad_norm": 0.5647996664047241,
"learning_rate": 0.0004006249326581187,
"loss": 3.4799,
"step": 30950
},
{
"epoch": 3.3365622645570983,
"grad_norm": 0.6258048415184021,
"learning_rate": 0.0004003016916280573,
"loss": 3.4772,
"step": 31000
},
{
"epoch": 3.3365622645570983,
"eval_accuracy": 0.3716034156556425,
"eval_loss": 3.4863648414611816,
"eval_runtime": 185.2474,
"eval_samples_per_second": 97.227,
"eval_steps_per_second": 6.078,
"step": 31000
},
{
"epoch": 3.3419438165967064,
"grad_norm": 0.617977499961853,
"learning_rate": 0.00039997845059799587,
"loss": 3.4577,
"step": 31050
},
{
"epoch": 3.347325368636315,
"grad_norm": 0.6409074664115906,
"learning_rate": 0.00039965520956793447,
"loss": 3.4636,
"step": 31100
},
{
"epoch": 3.352706920675923,
"grad_norm": 0.5829642415046692,
"learning_rate": 0.00039933196853787306,
"loss": 3.4648,
"step": 31150
},
{
"epoch": 3.358088472715531,
"grad_norm": 0.5979641675949097,
"learning_rate": 0.00039900872750781166,
"loss": 3.4759,
"step": 31200
},
{
"epoch": 3.3634700247551392,
"grad_norm": 0.6275162100791931,
"learning_rate": 0.0003986854864777502,
"loss": 3.4603,
"step": 31250
},
{
"epoch": 3.368851576794748,
"grad_norm": 0.675043523311615,
"learning_rate": 0.0003983622454476888,
"loss": 3.4723,
"step": 31300
},
{
"epoch": 3.374233128834356,
"grad_norm": 0.6001876592636108,
"learning_rate": 0.00039803900441762733,
"loss": 3.4641,
"step": 31350
},
{
"epoch": 3.379614680873964,
"grad_norm": 0.7512485384941101,
"learning_rate": 0.000397715763387566,
"loss": 3.4683,
"step": 31400
},
{
"epoch": 3.384996232913572,
"grad_norm": 0.6349093914031982,
"learning_rate": 0.0003973925223575046,
"loss": 3.441,
"step": 31450
},
{
"epoch": 3.3903777849531807,
"grad_norm": 0.6452442407608032,
"learning_rate": 0.0003970692813274431,
"loss": 3.4758,
"step": 31500
},
{
"epoch": 3.3957593369927888,
"grad_norm": 0.8671071529388428,
"learning_rate": 0.0003967460402973817,
"loss": 3.4781,
"step": 31550
},
{
"epoch": 3.401140889032397,
"grad_norm": 0.6597549319267273,
"learning_rate": 0.0003964227992673203,
"loss": 3.4631,
"step": 31600
},
{
"epoch": 3.4065224410720054,
"grad_norm": 0.6139020919799805,
"learning_rate": 0.00039609955823725884,
"loss": 3.4709,
"step": 31650
},
{
"epoch": 3.4119039931116135,
"grad_norm": 0.6076966524124146,
"learning_rate": 0.0003957763172071975,
"loss": 3.4493,
"step": 31700
},
{
"epoch": 3.4172855451512216,
"grad_norm": 0.6119977831840515,
"learning_rate": 0.0003954530761771361,
"loss": 3.4535,
"step": 31750
},
{
"epoch": 3.4226670971908297,
"grad_norm": 0.6142314672470093,
"learning_rate": 0.00039512983514707463,
"loss": 3.4606,
"step": 31800
},
{
"epoch": 3.428048649230438,
"grad_norm": 0.6000369191169739,
"learning_rate": 0.0003948065941170132,
"loss": 3.4516,
"step": 31850
},
{
"epoch": 3.4334302012700464,
"grad_norm": 0.5796836018562317,
"learning_rate": 0.00039448335308695176,
"loss": 3.4648,
"step": 31900
},
{
"epoch": 3.4388117533096545,
"grad_norm": 0.5938079357147217,
"learning_rate": 0.00039416011205689036,
"loss": 3.4688,
"step": 31950
},
{
"epoch": 3.4441933053492626,
"grad_norm": 0.6108924150466919,
"learning_rate": 0.000393836871026829,
"loss": 3.4697,
"step": 32000
},
{
"epoch": 3.4441933053492626,
"eval_accuracy": 0.3729599469600063,
"eval_loss": 3.4790189266204834,
"eval_runtime": 184.7239,
"eval_samples_per_second": 97.502,
"eval_steps_per_second": 6.096,
"step": 32000
},
{
"epoch": 3.449574857388871,
"grad_norm": 0.6282352209091187,
"learning_rate": 0.00039351362999676755,
"loss": 3.4755,
"step": 32050
},
{
"epoch": 3.4549564094284793,
"grad_norm": 0.5978196859359741,
"learning_rate": 0.00039319038896670614,
"loss": 3.4675,
"step": 32100
},
{
"epoch": 3.4603379614680874,
"grad_norm": 0.603289008140564,
"learning_rate": 0.00039286714793664474,
"loss": 3.4656,
"step": 32150
},
{
"epoch": 3.4657195135076955,
"grad_norm": 0.6244550943374634,
"learning_rate": 0.0003925439069065833,
"loss": 3.439,
"step": 32200
},
{
"epoch": 3.471101065547304,
"grad_norm": 0.6003792881965637,
"learning_rate": 0.0003922206658765219,
"loss": 3.4448,
"step": 32250
},
{
"epoch": 3.476482617586912,
"grad_norm": 0.5921474099159241,
"learning_rate": 0.0003918974248464605,
"loss": 3.464,
"step": 32300
},
{
"epoch": 3.4818641696265202,
"grad_norm": 0.5765640735626221,
"learning_rate": 0.00039157418381639906,
"loss": 3.4772,
"step": 32350
},
{
"epoch": 3.4872457216661283,
"grad_norm": 0.6701759099960327,
"learning_rate": 0.00039125094278633766,
"loss": 3.4741,
"step": 32400
},
{
"epoch": 3.492627273705737,
"grad_norm": 0.6599100828170776,
"learning_rate": 0.0003909277017562762,
"loss": 3.4774,
"step": 32450
},
{
"epoch": 3.498008825745345,
"grad_norm": 0.5839077830314636,
"learning_rate": 0.0003906044607262148,
"loss": 3.4744,
"step": 32500
},
{
"epoch": 3.503390377784953,
"grad_norm": 0.6445351839065552,
"learning_rate": 0.0003902876845167546,
"loss": 3.467,
"step": 32550
},
{
"epoch": 3.5087719298245617,
"grad_norm": 0.6389232277870178,
"learning_rate": 0.0003899644434866932,
"loss": 3.4601,
"step": 32600
},
{
"epoch": 3.5141534818641698,
"grad_norm": 0.6152642965316772,
"learning_rate": 0.00038964120245663184,
"loss": 3.46,
"step": 32650
},
{
"epoch": 3.519535033903778,
"grad_norm": 0.6026628613471985,
"learning_rate": 0.0003893179614265704,
"loss": 3.4665,
"step": 32700
},
{
"epoch": 3.524916585943386,
"grad_norm": 0.658417820930481,
"learning_rate": 0.000388994720396509,
"loss": 3.4735,
"step": 32750
},
{
"epoch": 3.530298137982994,
"grad_norm": 0.6930371522903442,
"learning_rate": 0.0003886714793664475,
"loss": 3.4673,
"step": 32800
},
{
"epoch": 3.5356796900226026,
"grad_norm": 0.6179644465446472,
"learning_rate": 0.0003883482383363861,
"loss": 3.4923,
"step": 32850
},
{
"epoch": 3.5410612420622107,
"grad_norm": 0.6348962783813477,
"learning_rate": 0.00038802499730632476,
"loss": 3.4828,
"step": 32900
},
{
"epoch": 3.546442794101819,
"grad_norm": 0.6657530665397644,
"learning_rate": 0.0003877082210968645,
"loss": 3.4599,
"step": 32950
},
{
"epoch": 3.5518243461414274,
"grad_norm": 0.6153150200843811,
"learning_rate": 0.00038738498006680316,
"loss": 3.4765,
"step": 33000
},
{
"epoch": 3.5518243461414274,
"eval_accuracy": 0.37310097840838946,
"eval_loss": 3.472996473312378,
"eval_runtime": 184.7663,
"eval_samples_per_second": 97.48,
"eval_steps_per_second": 6.094,
"step": 33000
},
{
"epoch": 3.5572058981810355,
"grad_norm": 0.621093213558197,
"learning_rate": 0.0003870617390367417,
"loss": 3.4507,
"step": 33050
},
{
"epoch": 3.5625874502206436,
"grad_norm": 0.6191295981407166,
"learning_rate": 0.0003867384980066803,
"loss": 3.4579,
"step": 33100
},
{
"epoch": 3.5679690022602517,
"grad_norm": 0.6550074815750122,
"learning_rate": 0.00038641525697661884,
"loss": 3.4754,
"step": 33150
},
{
"epoch": 3.57335055429986,
"grad_norm": 0.6438351273536682,
"learning_rate": 0.00038609201594655743,
"loss": 3.4556,
"step": 33200
},
{
"epoch": 3.5787321063394684,
"grad_norm": 0.6011756062507629,
"learning_rate": 0.000385768774916496,
"loss": 3.4769,
"step": 33250
},
{
"epoch": 3.5841136583790765,
"grad_norm": 0.5651047825813293,
"learning_rate": 0.0003854455338864346,
"loss": 3.4487,
"step": 33300
},
{
"epoch": 3.5894952104186846,
"grad_norm": 0.6038775444030762,
"learning_rate": 0.0003851222928563732,
"loss": 3.4777,
"step": 33350
},
{
"epoch": 3.594876762458293,
"grad_norm": 0.6581705212593079,
"learning_rate": 0.0003847990518263118,
"loss": 3.4427,
"step": 33400
},
{
"epoch": 3.6002583144979012,
"grad_norm": 0.61561119556427,
"learning_rate": 0.00038447581079625035,
"loss": 3.476,
"step": 33450
},
{
"epoch": 3.6056398665375093,
"grad_norm": 0.6354854702949524,
"learning_rate": 0.00038415256976618895,
"loss": 3.4703,
"step": 33500
},
{
"epoch": 3.611021418577118,
"grad_norm": 0.6150094866752625,
"learning_rate": 0.0003838293287361275,
"loss": 3.4711,
"step": 33550
},
{
"epoch": 3.616402970616726,
"grad_norm": 0.611519455909729,
"learning_rate": 0.00038350608770606613,
"loss": 3.4708,
"step": 33600
},
{
"epoch": 3.621784522656334,
"grad_norm": 0.6256572604179382,
"learning_rate": 0.00038318284667600473,
"loss": 3.4477,
"step": 33650
},
{
"epoch": 3.627166074695942,
"grad_norm": 0.6752513647079468,
"learning_rate": 0.00038285960564594327,
"loss": 3.4523,
"step": 33700
},
{
"epoch": 3.6325476267355503,
"grad_norm": 0.6451570391654968,
"learning_rate": 0.00038253636461588186,
"loss": 3.4629,
"step": 33750
},
{
"epoch": 3.637929178775159,
"grad_norm": 0.6020840406417847,
"learning_rate": 0.00038221312358582046,
"loss": 3.4861,
"step": 33800
},
{
"epoch": 3.643310730814767,
"grad_norm": 0.5956219434738159,
"learning_rate": 0.00038188988255575905,
"loss": 3.4752,
"step": 33850
},
{
"epoch": 3.648692282854375,
"grad_norm": 0.6432216763496399,
"learning_rate": 0.00038156664152569765,
"loss": 3.4618,
"step": 33900
},
{
"epoch": 3.6540738348939836,
"grad_norm": 0.6001318097114563,
"learning_rate": 0.00038124340049563624,
"loss": 3.4456,
"step": 33950
},
{
"epoch": 3.6594553869335917,
"grad_norm": 0.5723239183425903,
"learning_rate": 0.0003809201594655748,
"loss": 3.458,
"step": 34000
},
{
"epoch": 3.6594553869335917,
"eval_accuracy": 0.37387969365535584,
"eval_loss": 3.4675986766815186,
"eval_runtime": 185.1386,
"eval_samples_per_second": 97.284,
"eval_steps_per_second": 6.082,
"step": 34000
},
{
"epoch": 3.6648369389732,
"grad_norm": 0.6167240142822266,
"learning_rate": 0.0003805969184355134,
"loss": 3.4585,
"step": 34050
},
{
"epoch": 3.670218491012808,
"grad_norm": 0.6382555961608887,
"learning_rate": 0.0003802736774054519,
"loss": 3.4491,
"step": 34100
},
{
"epoch": 3.675600043052416,
"grad_norm": 0.5925253033638,
"learning_rate": 0.00037995043637539057,
"loss": 3.4558,
"step": 34150
},
{
"epoch": 3.6809815950920246,
"grad_norm": 0.5835258960723877,
"learning_rate": 0.00037962719534532916,
"loss": 3.4596,
"step": 34200
},
{
"epoch": 3.6863631471316327,
"grad_norm": 0.8861356377601624,
"learning_rate": 0.0003793039543152677,
"loss": 3.4475,
"step": 34250
},
{
"epoch": 3.691744699171241,
"grad_norm": 0.6378123760223389,
"learning_rate": 0.0003789807132852063,
"loss": 3.4609,
"step": 34300
},
{
"epoch": 3.6971262512108494,
"grad_norm": 0.6498599648475647,
"learning_rate": 0.0003786574722551449,
"loss": 3.4649,
"step": 34350
},
{
"epoch": 3.7025078032504575,
"grad_norm": 0.6097577810287476,
"learning_rate": 0.00037833423122508343,
"loss": 3.4591,
"step": 34400
},
{
"epoch": 3.7078893552900656,
"grad_norm": 0.6025764346122742,
"learning_rate": 0.0003780109901950221,
"loss": 3.4636,
"step": 34450
},
{
"epoch": 3.713270907329674,
"grad_norm": 0.6137334108352661,
"learning_rate": 0.0003776877491649607,
"loss": 3.4743,
"step": 34500
},
{
"epoch": 3.7186524593692822,
"grad_norm": 0.6193282604217529,
"learning_rate": 0.0003773645081348992,
"loss": 3.4563,
"step": 34550
},
{
"epoch": 3.7240340114088903,
"grad_norm": 0.6524245738983154,
"learning_rate": 0.0003770412671048378,
"loss": 3.4657,
"step": 34600
},
{
"epoch": 3.7294155634484984,
"grad_norm": 0.6087914705276489,
"learning_rate": 0.00037671802607477635,
"loss": 3.4607,
"step": 34650
},
{
"epoch": 3.7347971154881066,
"grad_norm": 0.6221836805343628,
"learning_rate": 0.000376394785044715,
"loss": 3.4561,
"step": 34700
},
{
"epoch": 3.740178667527715,
"grad_norm": 0.6097055077552795,
"learning_rate": 0.0003760715440146536,
"loss": 3.452,
"step": 34750
},
{
"epoch": 3.745560219567323,
"grad_norm": 0.655005693435669,
"learning_rate": 0.00037574830298459214,
"loss": 3.4711,
"step": 34800
},
{
"epoch": 3.7509417716069313,
"grad_norm": 0.6379830837249756,
"learning_rate": 0.00037542506195453073,
"loss": 3.4451,
"step": 34850
},
{
"epoch": 3.75632332364654,
"grad_norm": 0.6179572939872742,
"learning_rate": 0.0003751018209244693,
"loss": 3.4654,
"step": 34900
},
{
"epoch": 3.761704875686148,
"grad_norm": 0.612697184085846,
"learning_rate": 0.00037477857989440787,
"loss": 3.4587,
"step": 34950
},
{
"epoch": 3.767086427725756,
"grad_norm": 0.6860320568084717,
"learning_rate": 0.0003744553388643465,
"loss": 3.4454,
"step": 35000
},
{
"epoch": 3.767086427725756,
"eval_accuracy": 0.37395770642880666,
"eval_loss": 3.4650323390960693,
"eval_runtime": 184.8816,
"eval_samples_per_second": 97.419,
"eval_steps_per_second": 6.09,
"step": 35000
},
{
"epoch": 3.772467979765364,
"grad_norm": 0.6109294295310974,
"learning_rate": 0.0003741320978342851,
"loss": 3.4285,
"step": 35050
},
{
"epoch": 3.7778495318049723,
"grad_norm": 0.6423705816268921,
"learning_rate": 0.00037380885680422365,
"loss": 3.4731,
"step": 35100
},
{
"epoch": 3.783231083844581,
"grad_norm": 0.6706321835517883,
"learning_rate": 0.00037348561577416224,
"loss": 3.4448,
"step": 35150
},
{
"epoch": 3.788612635884189,
"grad_norm": 0.5894853472709656,
"learning_rate": 0.0003731623747441008,
"loss": 3.4727,
"step": 35200
},
{
"epoch": 3.793994187923797,
"grad_norm": 0.6134861707687378,
"learning_rate": 0.0003728391337140394,
"loss": 3.4464,
"step": 35250
},
{
"epoch": 3.7993757399634056,
"grad_norm": 0.620707094669342,
"learning_rate": 0.00037251589268397803,
"loss": 3.4611,
"step": 35300
},
{
"epoch": 3.8047572920030137,
"grad_norm": 0.6619590520858765,
"learning_rate": 0.00037219265165391657,
"loss": 3.4473,
"step": 35350
},
{
"epoch": 3.810138844042622,
"grad_norm": 0.6385608315467834,
"learning_rate": 0.00037186941062385516,
"loss": 3.4468,
"step": 35400
},
{
"epoch": 3.8155203960822304,
"grad_norm": 0.5904738903045654,
"learning_rate": 0.0003715461695937937,
"loss": 3.4418,
"step": 35450
},
{
"epoch": 3.8209019481218385,
"grad_norm": 0.62429279088974,
"learning_rate": 0.0003712229285637323,
"loss": 3.4486,
"step": 35500
},
{
"epoch": 3.8262835001614466,
"grad_norm": 0.6489128470420837,
"learning_rate": 0.0003708996875336709,
"loss": 3.4485,
"step": 35550
},
{
"epoch": 3.8316650522010547,
"grad_norm": 0.6631107926368713,
"learning_rate": 0.0003705764465036095,
"loss": 3.4504,
"step": 35600
},
{
"epoch": 3.837046604240663,
"grad_norm": 0.6365602612495422,
"learning_rate": 0.0003702532054735481,
"loss": 3.4585,
"step": 35650
},
{
"epoch": 3.8424281562802713,
"grad_norm": 0.6052917838096619,
"learning_rate": 0.0003699299644434867,
"loss": 3.4588,
"step": 35700
},
{
"epoch": 3.8478097083198795,
"grad_norm": 0.60471510887146,
"learning_rate": 0.0003696067234134252,
"loss": 3.463,
"step": 35750
},
{
"epoch": 3.8531912603594876,
"grad_norm": 0.6473640203475952,
"learning_rate": 0.0003692834823833638,
"loss": 3.4513,
"step": 35800
},
{
"epoch": 3.858572812399096,
"grad_norm": 0.6347455978393555,
"learning_rate": 0.00036896024135330246,
"loss": 3.4592,
"step": 35850
},
{
"epoch": 3.863954364438704,
"grad_norm": 0.646809995174408,
"learning_rate": 0.000368637000323241,
"loss": 3.4763,
"step": 35900
},
{
"epoch": 3.8693359164783123,
"grad_norm": 0.6334096193313599,
"learning_rate": 0.0003683137592931796,
"loss": 3.4571,
"step": 35950
},
{
"epoch": 3.8747174685179204,
"grad_norm": 0.6145724654197693,
"learning_rate": 0.00036799051826311814,
"loss": 3.4562,
"step": 36000
},
{
"epoch": 3.8747174685179204,
"eval_accuracy": 0.3749936030612299,
"eval_loss": 3.4543886184692383,
"eval_runtime": 185.0342,
"eval_samples_per_second": 97.339,
"eval_steps_per_second": 6.085,
"step": 36000
},
{
"epoch": 3.8800990205575285,
"grad_norm": 0.640128493309021,
"learning_rate": 0.00036766727723305673,
"loss": 3.4502,
"step": 36050
},
{
"epoch": 3.885480572597137,
"grad_norm": 0.6229842305183411,
"learning_rate": 0.0003673440362029953,
"loss": 3.4506,
"step": 36100
},
{
"epoch": 3.890862124636745,
"grad_norm": 0.6296483278274536,
"learning_rate": 0.0003670207951729339,
"loss": 3.469,
"step": 36150
},
{
"epoch": 3.8962436766763533,
"grad_norm": 0.6755225658416748,
"learning_rate": 0.0003666975541428725,
"loss": 3.4531,
"step": 36200
},
{
"epoch": 3.901625228715962,
"grad_norm": 0.6031575798988342,
"learning_rate": 0.0003663743131128111,
"loss": 3.4584,
"step": 36250
},
{
"epoch": 3.90700678075557,
"grad_norm": 0.6367796063423157,
"learning_rate": 0.00036605107208274965,
"loss": 3.4711,
"step": 36300
},
{
"epoch": 3.912388332795178,
"grad_norm": 0.6142009496688843,
"learning_rate": 0.00036572783105268824,
"loss": 3.4706,
"step": 36350
},
{
"epoch": 3.9177698848347866,
"grad_norm": 0.5969540476799011,
"learning_rate": 0.0003654045900226268,
"loss": 3.457,
"step": 36400
},
{
"epoch": 3.9231514368743947,
"grad_norm": 0.6135761737823486,
"learning_rate": 0.00036508134899256543,
"loss": 3.4594,
"step": 36450
},
{
"epoch": 3.928532988914003,
"grad_norm": 0.6303659677505493,
"learning_rate": 0.00036475810796250403,
"loss": 3.4445,
"step": 36500
},
{
"epoch": 3.933914540953611,
"grad_norm": 0.6449431777000427,
"learning_rate": 0.00036443486693244257,
"loss": 3.4505,
"step": 36550
},
{
"epoch": 3.939296092993219,
"grad_norm": 0.6396144032478333,
"learning_rate": 0.00036411162590238116,
"loss": 3.4268,
"step": 36600
},
{
"epoch": 3.9446776450328276,
"grad_norm": 0.6068968176841736,
"learning_rate": 0.00036378838487231976,
"loss": 3.4544,
"step": 36650
},
{
"epoch": 3.9500591970724357,
"grad_norm": 0.6107643842697144,
"learning_rate": 0.00036346514384225835,
"loss": 3.4429,
"step": 36700
},
{
"epoch": 3.955440749112044,
"grad_norm": 0.6284364461898804,
"learning_rate": 0.00036314190281219695,
"loss": 3.438,
"step": 36750
},
{
"epoch": 3.9608223011516523,
"grad_norm": 0.6059133410453796,
"learning_rate": 0.00036281866178213554,
"loss": 3.4578,
"step": 36800
},
{
"epoch": 3.9662038531912605,
"grad_norm": 0.5825189352035522,
"learning_rate": 0.0003624954207520741,
"loss": 3.4431,
"step": 36850
},
{
"epoch": 3.9715854052308686,
"grad_norm": 0.6884462833404541,
"learning_rate": 0.0003621721797220127,
"loss": 3.4577,
"step": 36900
},
{
"epoch": 3.9769669572704767,
"grad_norm": 0.6246378421783447,
"learning_rate": 0.0003618554035125525,
"loss": 3.4424,
"step": 36950
},
{
"epoch": 3.9823485093100848,
"grad_norm": 0.6227812767028809,
"learning_rate": 0.0003615321624824911,
"loss": 3.444,
"step": 37000
},
{
"epoch": 3.9823485093100848,
"eval_accuracy": 0.37545526918155364,
"eval_loss": 3.4485981464385986,
"eval_runtime": 186.1677,
"eval_samples_per_second": 96.746,
"eval_steps_per_second": 6.048,
"step": 37000
},
{
"epoch": 3.9877300613496933,
"grad_norm": 0.665170431137085,
"learning_rate": 0.0003612089214524296,
"loss": 3.4387,
"step": 37050
},
{
"epoch": 3.9931116133893014,
"grad_norm": 0.6156587600708008,
"learning_rate": 0.0003608921452429695,
"loss": 3.4548,
"step": 37100
},
{
"epoch": 3.9984931654289095,
"grad_norm": 0.6279550194740295,
"learning_rate": 0.000360568904212908,
"loss": 3.461,
"step": 37150
},
{
"epoch": 4.003874717468518,
"grad_norm": 0.6261448860168457,
"learning_rate": 0.00036024566318284667,
"loss": 3.3728,
"step": 37200
},
{
"epoch": 4.009256269508126,
"grad_norm": 0.6143143177032471,
"learning_rate": 0.00035992242215278526,
"loss": 3.3296,
"step": 37250
},
{
"epoch": 4.014637821547734,
"grad_norm": 0.655872642993927,
"learning_rate": 0.0003595991811227238,
"loss": 3.3614,
"step": 37300
},
{
"epoch": 4.020019373587343,
"grad_norm": 0.6455563306808472,
"learning_rate": 0.0003592759400926624,
"loss": 3.3377,
"step": 37350
},
{
"epoch": 4.0254009256269505,
"grad_norm": 0.6441978216171265,
"learning_rate": 0.00035895269906260094,
"loss": 3.3616,
"step": 37400
},
{
"epoch": 4.030782477666559,
"grad_norm": 0.6673616766929626,
"learning_rate": 0.0003586294580325396,
"loss": 3.3547,
"step": 37450
},
{
"epoch": 4.036164029706168,
"grad_norm": 0.6724904775619507,
"learning_rate": 0.0003583062170024782,
"loss": 3.3415,
"step": 37500
},
{
"epoch": 4.041545581745775,
"grad_norm": 0.6894497275352478,
"learning_rate": 0.0003579829759724167,
"loss": 3.3525,
"step": 37550
},
{
"epoch": 4.046927133785384,
"grad_norm": 0.6247220635414124,
"learning_rate": 0.0003576597349423553,
"loss": 3.3583,
"step": 37600
},
{
"epoch": 4.0523086858249915,
"grad_norm": 0.644463062286377,
"learning_rate": 0.0003573364939122939,
"loss": 3.363,
"step": 37650
},
{
"epoch": 4.0576902378646,
"grad_norm": 0.6603822112083435,
"learning_rate": 0.00035701325288223245,
"loss": 3.3778,
"step": 37700
},
{
"epoch": 4.063071789904209,
"grad_norm": 0.652593731880188,
"learning_rate": 0.0003566900118521711,
"loss": 3.3786,
"step": 37750
},
{
"epoch": 4.068453341943816,
"grad_norm": 0.6232748627662659,
"learning_rate": 0.0003563667708221097,
"loss": 3.3588,
"step": 37800
},
{
"epoch": 4.073834893983425,
"grad_norm": 0.6294955015182495,
"learning_rate": 0.00035604352979204824,
"loss": 3.3854,
"step": 37850
},
{
"epoch": 4.079216446023033,
"grad_norm": 0.6011383533477783,
"learning_rate": 0.00035572028876198683,
"loss": 3.3782,
"step": 37900
},
{
"epoch": 4.084597998062641,
"grad_norm": 0.6583512425422668,
"learning_rate": 0.00035539704773192537,
"loss": 3.3833,
"step": 37950
},
{
"epoch": 4.08997955010225,
"grad_norm": 0.6446003317832947,
"learning_rate": 0.00035507380670186397,
"loss": 3.3707,
"step": 38000
},
{
"epoch": 4.08997955010225,
"eval_accuracy": 0.3760752425594231,
"eval_loss": 3.4499671459198,
"eval_runtime": 184.8025,
"eval_samples_per_second": 97.461,
"eval_steps_per_second": 6.093,
"step": 38000
},
{
"epoch": 4.095361102141858,
"grad_norm": 0.6237815022468567,
"learning_rate": 0.0003547505656718026,
"loss": 3.3644,
"step": 38050
},
{
"epoch": 4.100742654181466,
"grad_norm": 0.6614598631858826,
"learning_rate": 0.00035442732464174116,
"loss": 3.3779,
"step": 38100
},
{
"epoch": 4.106124206221074,
"grad_norm": 0.6301232576370239,
"learning_rate": 0.00035410408361167975,
"loss": 3.3746,
"step": 38150
},
{
"epoch": 4.111505758260682,
"grad_norm": 0.6670769453048706,
"learning_rate": 0.0003537808425816183,
"loss": 3.38,
"step": 38200
},
{
"epoch": 4.1168873103002905,
"grad_norm": 0.673061728477478,
"learning_rate": 0.0003534576015515569,
"loss": 3.3393,
"step": 38250
},
{
"epoch": 4.122268862339899,
"grad_norm": 0.6556143164634705,
"learning_rate": 0.00035313436052149553,
"loss": 3.3838,
"step": 38300
},
{
"epoch": 4.127650414379507,
"grad_norm": 0.6522071957588196,
"learning_rate": 0.0003528111194914341,
"loss": 3.3699,
"step": 38350
},
{
"epoch": 4.133031966419115,
"grad_norm": 0.7275823354721069,
"learning_rate": 0.00035248787846137267,
"loss": 3.3934,
"step": 38400
},
{
"epoch": 4.138413518458724,
"grad_norm": 0.6600149273872375,
"learning_rate": 0.00035216463743131126,
"loss": 3.399,
"step": 38450
},
{
"epoch": 4.1437950704983315,
"grad_norm": 0.6132925748825073,
"learning_rate": 0.0003518413964012498,
"loss": 3.3967,
"step": 38500
},
{
"epoch": 4.14917662253794,
"grad_norm": 0.616926908493042,
"learning_rate": 0.0003515181553711884,
"loss": 3.3699,
"step": 38550
},
{
"epoch": 4.154558174577549,
"grad_norm": 0.6914690732955933,
"learning_rate": 0.00035119491434112705,
"loss": 3.3801,
"step": 38600
},
{
"epoch": 4.159939726617156,
"grad_norm": 0.631589412689209,
"learning_rate": 0.0003508716733110656,
"loss": 3.3869,
"step": 38650
},
{
"epoch": 4.165321278656765,
"grad_norm": 0.6257556676864624,
"learning_rate": 0.0003505484322810042,
"loss": 3.3908,
"step": 38700
},
{
"epoch": 4.1707028306963725,
"grad_norm": 0.6427106261253357,
"learning_rate": 0.0003502251912509427,
"loss": 3.3834,
"step": 38750
},
{
"epoch": 4.176084382735981,
"grad_norm": 0.6590670347213745,
"learning_rate": 0.0003499019502208813,
"loss": 3.3694,
"step": 38800
},
{
"epoch": 4.18146593477559,
"grad_norm": 0.6249048113822937,
"learning_rate": 0.0003495787091908199,
"loss": 3.3961,
"step": 38850
},
{
"epoch": 4.186847486815197,
"grad_norm": 0.6336603760719299,
"learning_rate": 0.0003492554681607585,
"loss": 3.3878,
"step": 38900
},
{
"epoch": 4.192229038854806,
"grad_norm": 0.6488654613494873,
"learning_rate": 0.0003489322271306971,
"loss": 3.3747,
"step": 38950
},
{
"epoch": 4.197610590894414,
"grad_norm": 0.6409652829170227,
"learning_rate": 0.0003486089861006357,
"loss": 3.3754,
"step": 39000
},
{
"epoch": 4.197610590894414,
"eval_accuracy": 0.3763153654414931,
"eval_loss": 3.4473249912261963,
"eval_runtime": 184.9125,
"eval_samples_per_second": 97.403,
"eval_steps_per_second": 6.089,
"step": 39000
},
{
"epoch": 4.202992142934022,
"grad_norm": 0.6476558446884155,
"learning_rate": 0.00034828574507057424,
"loss": 3.3746,
"step": 39050
},
{
"epoch": 4.208373694973631,
"grad_norm": 0.6456419825553894,
"learning_rate": 0.00034796250404051283,
"loss": 3.4059,
"step": 39100
},
{
"epoch": 4.213755247013238,
"grad_norm": 0.6619618535041809,
"learning_rate": 0.00034763926301045137,
"loss": 3.3704,
"step": 39150
},
{
"epoch": 4.219136799052847,
"grad_norm": 0.6563810706138611,
"learning_rate": 0.00034731602198039,
"loss": 3.3754,
"step": 39200
},
{
"epoch": 4.224518351092455,
"grad_norm": 0.6438670754432678,
"learning_rate": 0.0003469927809503286,
"loss": 3.3833,
"step": 39250
},
{
"epoch": 4.229899903132063,
"grad_norm": 0.6422169208526611,
"learning_rate": 0.00034666953992026716,
"loss": 3.3859,
"step": 39300
},
{
"epoch": 4.2352814551716715,
"grad_norm": 0.7038455009460449,
"learning_rate": 0.00034634629889020575,
"loss": 3.3857,
"step": 39350
},
{
"epoch": 4.24066300721128,
"grad_norm": 0.6076750159263611,
"learning_rate": 0.00034602305786014435,
"loss": 3.3918,
"step": 39400
},
{
"epoch": 4.246044559250888,
"grad_norm": 0.68123859167099,
"learning_rate": 0.00034569981683008294,
"loss": 3.3651,
"step": 39450
},
{
"epoch": 4.251426111290496,
"grad_norm": 0.6171448826789856,
"learning_rate": 0.00034537657580002154,
"loss": 3.3783,
"step": 39500
},
{
"epoch": 4.256807663330104,
"grad_norm": 0.710161030292511,
"learning_rate": 0.00034505333476996013,
"loss": 3.3814,
"step": 39550
},
{
"epoch": 4.2621892153697125,
"grad_norm": 0.636483371257782,
"learning_rate": 0.00034473009373989867,
"loss": 3.3792,
"step": 39600
},
{
"epoch": 4.267570767409321,
"grad_norm": 0.5944780707359314,
"learning_rate": 0.00034440685270983727,
"loss": 3.399,
"step": 39650
},
{
"epoch": 4.272952319448929,
"grad_norm": 0.7163312435150146,
"learning_rate": 0.0003440836116797758,
"loss": 3.3993,
"step": 39700
},
{
"epoch": 4.278333871488537,
"grad_norm": 0.6651536226272583,
"learning_rate": 0.00034376037064971445,
"loss": 3.3949,
"step": 39750
},
{
"epoch": 4.283715423528146,
"grad_norm": 0.6634711623191833,
"learning_rate": 0.00034343712961965305,
"loss": 3.3995,
"step": 39800
},
{
"epoch": 4.2890969755677535,
"grad_norm": 0.6421599984169006,
"learning_rate": 0.0003431138885895916,
"loss": 3.3957,
"step": 39850
},
{
"epoch": 4.294478527607362,
"grad_norm": 0.6643863320350647,
"learning_rate": 0.0003427906475595302,
"loss": 3.3875,
"step": 39900
},
{
"epoch": 4.299860079646971,
"grad_norm": 0.6470743417739868,
"learning_rate": 0.0003424674065294688,
"loss": 3.3964,
"step": 39950
},
{
"epoch": 4.305241631686578,
"grad_norm": 0.6629840135574341,
"learning_rate": 0.0003421441654994073,
"loss": 3.392,
"step": 40000
},
{
"epoch": 4.305241631686578,
"eval_accuracy": 0.37669543324306815,
"eval_loss": 3.4432873725891113,
"eval_runtime": 184.8006,
"eval_samples_per_second": 97.462,
"eval_steps_per_second": 6.093,
"step": 40000
},
{
"epoch": 4.310623183726187,
"grad_norm": 0.6477255821228027,
"learning_rate": 0.00034182092446934597,
"loss": 3.4029,
"step": 40050
},
{
"epoch": 4.3160047357657945,
"grad_norm": 0.6865015029907227,
"learning_rate": 0.00034149768343928456,
"loss": 3.3759,
"step": 40100
},
{
"epoch": 4.321386287805403,
"grad_norm": 0.8546903729438782,
"learning_rate": 0.0003411744424092231,
"loss": 3.4042,
"step": 40150
},
{
"epoch": 4.326767839845012,
"grad_norm": 0.6472812294960022,
"learning_rate": 0.0003408512013791617,
"loss": 3.3882,
"step": 40200
},
{
"epoch": 4.332149391884619,
"grad_norm": 0.6662764549255371,
"learning_rate": 0.00034052796034910024,
"loss": 3.3868,
"step": 40250
},
{
"epoch": 4.337530943924228,
"grad_norm": 0.6494865417480469,
"learning_rate": 0.0003402047193190389,
"loss": 3.3874,
"step": 40300
},
{
"epoch": 4.342912495963836,
"grad_norm": 0.7330858111381531,
"learning_rate": 0.0003398814782889775,
"loss": 3.3871,
"step": 40350
},
{
"epoch": 4.348294048003444,
"grad_norm": 0.6532103419303894,
"learning_rate": 0.000339558237258916,
"loss": 3.395,
"step": 40400
},
{
"epoch": 4.3536756000430525,
"grad_norm": 0.652320384979248,
"learning_rate": 0.0003392349962288546,
"loss": 3.3932,
"step": 40450
},
{
"epoch": 4.359057152082661,
"grad_norm": 0.6824901700019836,
"learning_rate": 0.0003389117551987932,
"loss": 3.364,
"step": 40500
},
{
"epoch": 4.364438704122269,
"grad_norm": 0.6938865780830383,
"learning_rate": 0.00033858851416873175,
"loss": 3.3868,
"step": 40550
},
{
"epoch": 4.369820256161877,
"grad_norm": 0.7002043724060059,
"learning_rate": 0.0003382652731386704,
"loss": 3.3905,
"step": 40600
},
{
"epoch": 4.375201808201485,
"grad_norm": 0.6329423785209656,
"learning_rate": 0.000337942032108609,
"loss": 3.3952,
"step": 40650
},
{
"epoch": 4.3805833602410935,
"grad_norm": 0.6581116914749146,
"learning_rate": 0.00033761879107854754,
"loss": 3.3918,
"step": 40700
},
{
"epoch": 4.385964912280702,
"grad_norm": 0.6352985501289368,
"learning_rate": 0.00033729555004848613,
"loss": 3.3855,
"step": 40750
},
{
"epoch": 4.39134646432031,
"grad_norm": 0.6230785846710205,
"learning_rate": 0.00033697230901842467,
"loss": 3.4084,
"step": 40800
},
{
"epoch": 4.396728016359918,
"grad_norm": 0.6846745610237122,
"learning_rate": 0.00033664906798836327,
"loss": 3.3767,
"step": 40850
},
{
"epoch": 4.402109568399527,
"grad_norm": 0.6545602083206177,
"learning_rate": 0.0003363258269583019,
"loss": 3.3906,
"step": 40900
},
{
"epoch": 4.4074911204391345,
"grad_norm": 0.6751523017883301,
"learning_rate": 0.00033600258592824046,
"loss": 3.403,
"step": 40950
},
{
"epoch": 4.412872672478743,
"grad_norm": 0.6432384252548218,
"learning_rate": 0.00033567934489817905,
"loss": 3.3969,
"step": 41000
},
{
"epoch": 4.412872672478743,
"eval_accuracy": 0.37734398233043276,
"eval_loss": 3.4374191761016846,
"eval_runtime": 186.1551,
"eval_samples_per_second": 96.753,
"eval_steps_per_second": 6.049,
"step": 41000
},
{
"epoch": 4.418254224518351,
"grad_norm": 0.6344847679138184,
"learning_rate": 0.00033535610386811764,
"loss": 3.4161,
"step": 41050
},
{
"epoch": 4.423635776557959,
"grad_norm": 0.6220810413360596,
"learning_rate": 0.00033503932765865745,
"loss": 3.402,
"step": 41100
},
{
"epoch": 4.429017328597568,
"grad_norm": 0.6427842974662781,
"learning_rate": 0.000334716086628596,
"loss": 3.3949,
"step": 41150
},
{
"epoch": 4.4343988806371755,
"grad_norm": 0.6630915999412537,
"learning_rate": 0.0003343928455985346,
"loss": 3.3724,
"step": 41200
},
{
"epoch": 4.439780432676784,
"grad_norm": 0.6749280691146851,
"learning_rate": 0.00033406960456847324,
"loss": 3.4069,
"step": 41250
},
{
"epoch": 4.445161984716393,
"grad_norm": 0.6909694075584412,
"learning_rate": 0.0003337463635384118,
"loss": 3.3812,
"step": 41300
},
{
"epoch": 4.450543536756,
"grad_norm": 0.6568173170089722,
"learning_rate": 0.00033342312250835037,
"loss": 3.4003,
"step": 41350
},
{
"epoch": 4.455925088795609,
"grad_norm": 0.6784096956253052,
"learning_rate": 0.00033309988147828896,
"loss": 3.3896,
"step": 41400
},
{
"epoch": 4.461306640835216,
"grad_norm": 0.6052407622337341,
"learning_rate": 0.0003327766404482275,
"loss": 3.4065,
"step": 41450
},
{
"epoch": 4.466688192874825,
"grad_norm": 0.6263163089752197,
"learning_rate": 0.0003324533994181661,
"loss": 3.4116,
"step": 41500
},
{
"epoch": 4.4720697449144335,
"grad_norm": 0.6299830675125122,
"learning_rate": 0.00033213015838810475,
"loss": 3.3998,
"step": 41550
},
{
"epoch": 4.477451296954041,
"grad_norm": 0.6599326729774475,
"learning_rate": 0.0003318069173580433,
"loss": 3.396,
"step": 41600
},
{
"epoch": 4.48283284899365,
"grad_norm": 0.68622887134552,
"learning_rate": 0.0003314901411485831,
"loss": 3.3886,
"step": 41650
},
{
"epoch": 4.488214401033258,
"grad_norm": 0.6320132613182068,
"learning_rate": 0.0003311669001185217,
"loss": 3.3932,
"step": 41700
},
{
"epoch": 4.493595953072866,
"grad_norm": 0.6479349136352539,
"learning_rate": 0.0003308436590884603,
"loss": 3.3889,
"step": 41750
},
{
"epoch": 4.4989775051124745,
"grad_norm": 0.6223969459533691,
"learning_rate": 0.0003305204180583988,
"loss": 3.3739,
"step": 41800
},
{
"epoch": 4.504359057152083,
"grad_norm": 0.7559693455696106,
"learning_rate": 0.0003301971770283374,
"loss": 3.3848,
"step": 41850
},
{
"epoch": 4.509740609191691,
"grad_norm": 0.7084282040596008,
"learning_rate": 0.00032987393599827607,
"loss": 3.3905,
"step": 41900
},
{
"epoch": 4.515122161231299,
"grad_norm": 0.6577198505401611,
"learning_rate": 0.0003295506949682146,
"loss": 3.3904,
"step": 41950
},
{
"epoch": 4.520503713270907,
"grad_norm": 0.7152112126350403,
"learning_rate": 0.0003292274539381532,
"loss": 3.4062,
"step": 42000
},
{
"epoch": 4.520503713270907,
"eval_accuracy": 0.3782491695388152,
"eval_loss": 3.432380199432373,
"eval_runtime": 184.5835,
"eval_samples_per_second": 97.576,
"eval_steps_per_second": 6.1,
"step": 42000
},
{
"epoch": 4.5258852653105155,
"grad_norm": 0.6525520086288452,
"learning_rate": 0.00032890421290809174,
"loss": 3.4211,
"step": 42050
},
{
"epoch": 4.531266817350124,
"grad_norm": 0.6888226270675659,
"learning_rate": 0.00032858097187803034,
"loss": 3.4038,
"step": 42100
},
{
"epoch": 4.536648369389732,
"grad_norm": 0.6215112209320068,
"learning_rate": 0.00032825773084796893,
"loss": 3.3989,
"step": 42150
},
{
"epoch": 4.54202992142934,
"grad_norm": 0.6531518697738647,
"learning_rate": 0.00032793448981790753,
"loss": 3.4025,
"step": 42200
},
{
"epoch": 4.547411473468949,
"grad_norm": 0.6420837640762329,
"learning_rate": 0.0003276112487878461,
"loss": 3.3903,
"step": 42250
},
{
"epoch": 4.5527930255085565,
"grad_norm": 0.5992629528045654,
"learning_rate": 0.0003272880077577847,
"loss": 3.3813,
"step": 42300
},
{
"epoch": 4.558174577548165,
"grad_norm": 0.7291805148124695,
"learning_rate": 0.00032696476672772326,
"loss": 3.3928,
"step": 42350
},
{
"epoch": 4.563556129587774,
"grad_norm": 0.6690374612808228,
"learning_rate": 0.00032664152569766185,
"loss": 3.3947,
"step": 42400
},
{
"epoch": 4.568937681627381,
"grad_norm": 0.6393318176269531,
"learning_rate": 0.0003263182846676004,
"loss": 3.3933,
"step": 42450
},
{
"epoch": 4.57431923366699,
"grad_norm": 0.7334935665130615,
"learning_rate": 0.00032599504363753904,
"loss": 3.405,
"step": 42500
},
{
"epoch": 4.579700785706597,
"grad_norm": 0.6425918936729431,
"learning_rate": 0.00032567180260747764,
"loss": 3.3993,
"step": 42550
},
{
"epoch": 4.585082337746206,
"grad_norm": 0.625737726688385,
"learning_rate": 0.0003253485615774162,
"loss": 3.4049,
"step": 42600
},
{
"epoch": 4.5904638897858145,
"grad_norm": 0.6439979076385498,
"learning_rate": 0.00032502532054735477,
"loss": 3.4023,
"step": 42650
},
{
"epoch": 4.595845441825422,
"grad_norm": 0.666843831539154,
"learning_rate": 0.00032470207951729337,
"loss": 3.3937,
"step": 42700
},
{
"epoch": 4.601226993865031,
"grad_norm": 0.6379914283752441,
"learning_rate": 0.0003243788384872319,
"loss": 3.4084,
"step": 42750
},
{
"epoch": 4.606608545904638,
"grad_norm": 0.7040283679962158,
"learning_rate": 0.00032405559745717056,
"loss": 3.3822,
"step": 42800
},
{
"epoch": 4.611990097944247,
"grad_norm": 0.6507775187492371,
"learning_rate": 0.00032373235642710915,
"loss": 3.4123,
"step": 42850
},
{
"epoch": 4.6173716499838555,
"grad_norm": 0.6584706902503967,
"learning_rate": 0.0003234091153970477,
"loss": 3.3987,
"step": 42900
},
{
"epoch": 4.622753202023463,
"grad_norm": 0.7082355618476868,
"learning_rate": 0.0003230858743669863,
"loss": 3.3717,
"step": 42950
},
{
"epoch": 4.628134754063072,
"grad_norm": 0.6571763753890991,
"learning_rate": 0.0003227626333369248,
"loss": 3.3706,
"step": 43000
},
{
"epoch": 4.628134754063072,
"eval_accuracy": 0.3783615166248154,
"eval_loss": 3.4268484115600586,
"eval_runtime": 185.1004,
"eval_samples_per_second": 97.304,
"eval_steps_per_second": 6.083,
"step": 43000
},
{
"epoch": 4.63351630610268,
"grad_norm": 0.6413716077804565,
"learning_rate": 0.0003224393923068635,
"loss": 3.4092,
"step": 43050
},
{
"epoch": 4.638897858142288,
"grad_norm": 0.6072510480880737,
"learning_rate": 0.00032211615127680207,
"loss": 3.3956,
"step": 43100
},
{
"epoch": 4.6442794101818965,
"grad_norm": 0.672419011592865,
"learning_rate": 0.0003217929102467406,
"loss": 3.4054,
"step": 43150
},
{
"epoch": 4.649660962221505,
"grad_norm": 0.6509859561920166,
"learning_rate": 0.0003214696692166792,
"loss": 3.3917,
"step": 43200
},
{
"epoch": 4.655042514261113,
"grad_norm": 0.689818263053894,
"learning_rate": 0.0003211464281866178,
"loss": 3.3953,
"step": 43250
},
{
"epoch": 4.660424066300721,
"grad_norm": 0.6377256512641907,
"learning_rate": 0.00032082318715655634,
"loss": 3.3956,
"step": 43300
},
{
"epoch": 4.665805618340329,
"grad_norm": 0.680729329586029,
"learning_rate": 0.000320499946126495,
"loss": 3.383,
"step": 43350
},
{
"epoch": 4.6711871703799375,
"grad_norm": 0.7217500805854797,
"learning_rate": 0.0003201767050964336,
"loss": 3.4109,
"step": 43400
},
{
"epoch": 4.676568722419546,
"grad_norm": 0.6791247129440308,
"learning_rate": 0.0003198534640663721,
"loss": 3.4091,
"step": 43450
},
{
"epoch": 4.681950274459154,
"grad_norm": 0.6612512469291687,
"learning_rate": 0.0003195302230363107,
"loss": 3.3867,
"step": 43500
},
{
"epoch": 4.687331826498762,
"grad_norm": 0.6829741597175598,
"learning_rate": 0.00031920698200624926,
"loss": 3.4031,
"step": 43550
},
{
"epoch": 4.692713378538371,
"grad_norm": 0.6728960871696472,
"learning_rate": 0.00031888374097618785,
"loss": 3.3919,
"step": 43600
},
{
"epoch": 4.6980949305779784,
"grad_norm": 0.6731211543083191,
"learning_rate": 0.0003185604999461265,
"loss": 3.3839,
"step": 43650
},
{
"epoch": 4.703476482617587,
"grad_norm": 0.6127499341964722,
"learning_rate": 0.00031823725891606504,
"loss": 3.3901,
"step": 43700
},
{
"epoch": 4.7088580346571955,
"grad_norm": 0.6518672704696655,
"learning_rate": 0.00031791401788600364,
"loss": 3.392,
"step": 43750
},
{
"epoch": 4.714239586696803,
"grad_norm": 0.6531979441642761,
"learning_rate": 0.00031759077685594223,
"loss": 3.41,
"step": 43800
},
{
"epoch": 4.719621138736412,
"grad_norm": 0.6991766691207886,
"learning_rate": 0.00031726753582588077,
"loss": 3.3793,
"step": 43850
},
{
"epoch": 4.725002690776019,
"grad_norm": 0.704139232635498,
"learning_rate": 0.0003169442947958194,
"loss": 3.4015,
"step": 43900
},
{
"epoch": 4.730384242815628,
"grad_norm": 0.6892104148864746,
"learning_rate": 0.000316621053765758,
"loss": 3.3967,
"step": 43950
},
{
"epoch": 4.7357657948552365,
"grad_norm": 0.6997895240783691,
"learning_rate": 0.00031629781273569656,
"loss": 3.407,
"step": 44000
},
{
"epoch": 4.7357657948552365,
"eval_accuracy": 0.37935634246564476,
"eval_loss": 3.4216456413269043,
"eval_runtime": 184.8084,
"eval_samples_per_second": 97.458,
"eval_steps_per_second": 6.093,
"step": 44000
},
{
"epoch": 4.741147346894844,
"grad_norm": 0.6774421334266663,
"learning_rate": 0.00031597457170563515,
"loss": 3.3999,
"step": 44050
},
{
"epoch": 4.746528898934453,
"grad_norm": 0.7047556042671204,
"learning_rate": 0.0003156513306755737,
"loss": 3.3915,
"step": 44100
},
{
"epoch": 4.751910450974061,
"grad_norm": 0.6317178606987,
"learning_rate": 0.0003153280896455123,
"loss": 3.39,
"step": 44150
},
{
"epoch": 4.757292003013669,
"grad_norm": 0.6738790273666382,
"learning_rate": 0.00031500484861545094,
"loss": 3.3896,
"step": 44200
},
{
"epoch": 4.7626735550532775,
"grad_norm": 0.7213401794433594,
"learning_rate": 0.0003146816075853895,
"loss": 3.3808,
"step": 44250
},
{
"epoch": 4.768055107092886,
"grad_norm": 0.6467810273170471,
"learning_rate": 0.00031435836655532807,
"loss": 3.3878,
"step": 44300
},
{
"epoch": 4.773436659132494,
"grad_norm": 0.6356862187385559,
"learning_rate": 0.00031403512552526667,
"loss": 3.3864,
"step": 44350
},
{
"epoch": 4.778818211172102,
"grad_norm": 0.7657820582389832,
"learning_rate": 0.0003137118844952052,
"loss": 3.3988,
"step": 44400
},
{
"epoch": 4.78419976321171,
"grad_norm": 0.7307938933372498,
"learning_rate": 0.0003133886434651438,
"loss": 3.3934,
"step": 44450
},
{
"epoch": 4.7895813152513185,
"grad_norm": 0.6736586689949036,
"learning_rate": 0.00031306540243508245,
"loss": 3.4102,
"step": 44500
},
{
"epoch": 4.794962867290927,
"grad_norm": 0.6513236165046692,
"learning_rate": 0.000312742161405021,
"loss": 3.3931,
"step": 44550
},
{
"epoch": 4.800344419330535,
"grad_norm": 0.6173604130744934,
"learning_rate": 0.0003124189203749596,
"loss": 3.3887,
"step": 44600
},
{
"epoch": 4.805725971370143,
"grad_norm": 0.6931024789810181,
"learning_rate": 0.0003120956793448981,
"loss": 3.3973,
"step": 44650
},
{
"epoch": 4.811107523409751,
"grad_norm": 0.6727322936058044,
"learning_rate": 0.0003117724383148367,
"loss": 3.3839,
"step": 44700
},
{
"epoch": 4.8164890754493594,
"grad_norm": 0.6649513244628906,
"learning_rate": 0.00031144919728477526,
"loss": 3.3763,
"step": 44750
},
{
"epoch": 4.821870627488968,
"grad_norm": 0.6292411088943481,
"learning_rate": 0.0003111259562547139,
"loss": 3.3963,
"step": 44800
},
{
"epoch": 4.827252179528576,
"grad_norm": 0.6615663170814514,
"learning_rate": 0.0003108027152246525,
"loss": 3.3864,
"step": 44850
},
{
"epoch": 4.832633731568184,
"grad_norm": 0.6625530123710632,
"learning_rate": 0.0003104794741945911,
"loss": 3.4006,
"step": 44900
},
{
"epoch": 4.838015283607793,
"grad_norm": 0.6565783023834229,
"learning_rate": 0.00031015623316452964,
"loss": 3.3776,
"step": 44950
},
{
"epoch": 4.8433968356474,
"grad_norm": 0.7287229895591736,
"learning_rate": 0.00030983299213446823,
"loss": 3.385,
"step": 45000
},
{
"epoch": 4.8433968356474,
"eval_accuracy": 0.37947618660090415,
"eval_loss": 3.4171833992004395,
"eval_runtime": 184.7333,
"eval_samples_per_second": 97.497,
"eval_steps_per_second": 6.095,
"step": 45000
},
{
"epoch": 4.848778387687009,
"grad_norm": 0.6722894310951233,
"learning_rate": 0.0003095097511044069,
"loss": 3.3969,
"step": 45050
},
{
"epoch": 4.8541599397266175,
"grad_norm": 0.6436869502067566,
"learning_rate": 0.0003091865100743454,
"loss": 3.4128,
"step": 45100
},
{
"epoch": 4.859541491766225,
"grad_norm": 0.654567539691925,
"learning_rate": 0.000308863269044284,
"loss": 3.4056,
"step": 45150
},
{
"epoch": 4.864923043805834,
"grad_norm": 0.6916000247001648,
"learning_rate": 0.00030854002801422256,
"loss": 3.4162,
"step": 45200
},
{
"epoch": 4.870304595845441,
"grad_norm": 0.6123929023742676,
"learning_rate": 0.00030821678698416115,
"loss": 3.3915,
"step": 45250
},
{
"epoch": 4.87568614788505,
"grad_norm": 0.6771581172943115,
"learning_rate": 0.0003078935459540997,
"loss": 3.3876,
"step": 45300
},
{
"epoch": 4.8810676999246585,
"grad_norm": 0.6943243741989136,
"learning_rate": 0.00030757030492403834,
"loss": 3.3773,
"step": 45350
},
{
"epoch": 4.886449251964266,
"grad_norm": 0.7478858828544617,
"learning_rate": 0.00030724706389397694,
"loss": 3.3974,
"step": 45400
},
{
"epoch": 4.891830804003875,
"grad_norm": 0.6267790794372559,
"learning_rate": 0.0003069238228639155,
"loss": 3.3862,
"step": 45450
},
{
"epoch": 4.897212356043483,
"grad_norm": 0.6570143103599548,
"learning_rate": 0.00030660058183385407,
"loss": 3.3973,
"step": 45500
},
{
"epoch": 4.902593908083091,
"grad_norm": 0.6861966848373413,
"learning_rate": 0.00030627734080379267,
"loss": 3.399,
"step": 45550
},
{
"epoch": 4.9079754601226995,
"grad_norm": 0.6667155623435974,
"learning_rate": 0.0003059540997737312,
"loss": 3.4026,
"step": 45600
},
{
"epoch": 4.913357012162308,
"grad_norm": 0.7267022132873535,
"learning_rate": 0.00030563732356427107,
"loss": 3.3965,
"step": 45650
},
{
"epoch": 4.918738564201916,
"grad_norm": 0.7066245079040527,
"learning_rate": 0.00030531408253420966,
"loss": 3.4037,
"step": 45700
},
{
"epoch": 4.924120116241524,
"grad_norm": 0.693480372428894,
"learning_rate": 0.00030499084150414826,
"loss": 3.3913,
"step": 45750
},
{
"epoch": 4.929501668281132,
"grad_norm": 0.6911986470222473,
"learning_rate": 0.00030466760047408685,
"loss": 3.3884,
"step": 45800
},
{
"epoch": 4.9348832203207404,
"grad_norm": 0.6638094782829285,
"learning_rate": 0.0003043443594440254,
"loss": 3.3795,
"step": 45850
},
{
"epoch": 4.940264772360349,
"grad_norm": 0.6978832483291626,
"learning_rate": 0.000304021118413964,
"loss": 3.3869,
"step": 45900
},
{
"epoch": 4.945646324399957,
"grad_norm": 0.7054188251495361,
"learning_rate": 0.0003036978773839025,
"loss": 3.3958,
"step": 45950
},
{
"epoch": 4.951027876439565,
"grad_norm": 0.7118246555328369,
"learning_rate": 0.0003033746363538412,
"loss": 3.3928,
"step": 46000
},
{
"epoch": 4.951027876439565,
"eval_accuracy": 0.38003716146069033,
"eval_loss": 3.4111993312835693,
"eval_runtime": 185.018,
"eval_samples_per_second": 97.347,
"eval_steps_per_second": 6.086,
"step": 46000
},
{
"epoch": 4.956409428479174,
"grad_norm": 0.7064889073371887,
"learning_rate": 0.00030305139532377977,
"loss": 3.4139,
"step": 46050
},
{
"epoch": 4.961790980518781,
"grad_norm": 0.6596596240997314,
"learning_rate": 0.0003027281542937183,
"loss": 3.3863,
"step": 46100
},
{
"epoch": 4.96717253255839,
"grad_norm": 0.6461761593818665,
"learning_rate": 0.0003024049132636569,
"loss": 3.4073,
"step": 46150
},
{
"epoch": 4.9725540845979985,
"grad_norm": 0.7342297434806824,
"learning_rate": 0.0003020816722335955,
"loss": 3.3922,
"step": 46200
},
{
"epoch": 4.977935636637606,
"grad_norm": 0.7432610988616943,
"learning_rate": 0.00030175843120353404,
"loss": 3.4053,
"step": 46250
},
{
"epoch": 4.983317188677215,
"grad_norm": 0.6522065997123718,
"learning_rate": 0.0003014351901734727,
"loss": 3.406,
"step": 46300
},
{
"epoch": 4.988698740716822,
"grad_norm": 0.6344789862632751,
"learning_rate": 0.0003011119491434113,
"loss": 3.3822,
"step": 46350
},
{
"epoch": 4.994080292756431,
"grad_norm": 0.6894682049751282,
"learning_rate": 0.0003007887081133498,
"loss": 3.3982,
"step": 46400
},
{
"epoch": 4.9994618447960395,
"grad_norm": 0.6706180572509766,
"learning_rate": 0.0003004654670832884,
"loss": 3.3969,
"step": 46450
},
{
"epoch": 5.004843396835647,
"grad_norm": 0.6677836179733276,
"learning_rate": 0.00030014222605322696,
"loss": 3.3245,
"step": 46500
},
{
"epoch": 5.010224948875256,
"grad_norm": 0.6664152145385742,
"learning_rate": 0.00029981898502316555,
"loss": 3.3235,
"step": 46550
},
{
"epoch": 5.015606500914864,
"grad_norm": 0.6685128211975098,
"learning_rate": 0.00029949574399310415,
"loss": 3.3007,
"step": 46600
},
{
"epoch": 5.020988052954472,
"grad_norm": 0.6446495056152344,
"learning_rate": 0.00029917250296304274,
"loss": 3.2914,
"step": 46650
},
{
"epoch": 5.0263696049940805,
"grad_norm": 0.6748939752578735,
"learning_rate": 0.00029884926193298134,
"loss": 3.3175,
"step": 46700
},
{
"epoch": 5.031751157033688,
"grad_norm": 0.7316281199455261,
"learning_rate": 0.0002985260209029199,
"loss": 3.3109,
"step": 46750
},
{
"epoch": 5.037132709073297,
"grad_norm": 0.6529657244682312,
"learning_rate": 0.00029820277987285853,
"loss": 3.3074,
"step": 46800
},
{
"epoch": 5.042514261112905,
"grad_norm": 0.6704086065292358,
"learning_rate": 0.00029787953884279707,
"loss": 3.3047,
"step": 46850
},
{
"epoch": 5.047895813152513,
"grad_norm": 0.6949445009231567,
"learning_rate": 0.00029755629781273566,
"loss": 3.3009,
"step": 46900
},
{
"epoch": 5.0532773651921215,
"grad_norm": 0.6553006768226624,
"learning_rate": 0.00029723305678267426,
"loss": 3.3224,
"step": 46950
},
{
"epoch": 5.05865891723173,
"grad_norm": 0.7208836674690247,
"learning_rate": 0.00029690981575261285,
"loss": 3.3105,
"step": 47000
},
{
"epoch": 5.05865891723173,
"eval_accuracy": 0.38008953215261687,
"eval_loss": 3.415787935256958,
"eval_runtime": 184.7813,
"eval_samples_per_second": 97.472,
"eval_steps_per_second": 6.094,
"step": 47000
},
{
"epoch": 5.064040469271338,
"grad_norm": 0.7050295472145081,
"learning_rate": 0.0002965865747225514,
"loss": 3.3034,
"step": 47050
},
{
"epoch": 5.069422021310946,
"grad_norm": 0.6400262117385864,
"learning_rate": 0.00029626333369249,
"loss": 3.2999,
"step": 47100
},
{
"epoch": 5.074803573350554,
"grad_norm": 0.6437999606132507,
"learning_rate": 0.0002959400926624286,
"loss": 3.2865,
"step": 47150
},
{
"epoch": 5.080185125390162,
"grad_norm": 0.6724359393119812,
"learning_rate": 0.0002956168516323672,
"loss": 3.3,
"step": 47200
},
{
"epoch": 5.085566677429771,
"grad_norm": 0.6594730019569397,
"learning_rate": 0.00029529361060230577,
"loss": 3.3093,
"step": 47250
},
{
"epoch": 5.090948229469379,
"grad_norm": 0.687857449054718,
"learning_rate": 0.0002949703695722443,
"loss": 3.3153,
"step": 47300
},
{
"epoch": 5.096329781508987,
"grad_norm": 0.6695648431777954,
"learning_rate": 0.00029464712854218296,
"loss": 3.3269,
"step": 47350
},
{
"epoch": 5.101711333548596,
"grad_norm": 0.6603997945785522,
"learning_rate": 0.0002943238875121215,
"loss": 3.3302,
"step": 47400
},
{
"epoch": 5.107092885588203,
"grad_norm": 0.651153028011322,
"learning_rate": 0.0002940006464820601,
"loss": 3.3208,
"step": 47450
},
{
"epoch": 5.112474437627812,
"grad_norm": 0.7518373131752014,
"learning_rate": 0.0002936774054519987,
"loss": 3.3356,
"step": 47500
},
{
"epoch": 5.1178559896674205,
"grad_norm": 0.738702654838562,
"learning_rate": 0.0002933541644219373,
"loss": 3.3095,
"step": 47550
},
{
"epoch": 5.123237541707028,
"grad_norm": 0.6973130106925964,
"learning_rate": 0.0002930309233918758,
"loss": 3.3263,
"step": 47600
},
{
"epoch": 5.128619093746637,
"grad_norm": 0.6830120086669922,
"learning_rate": 0.0002927076823618144,
"loss": 3.3239,
"step": 47650
},
{
"epoch": 5.134000645786244,
"grad_norm": 0.6565729379653931,
"learning_rate": 0.0002923909061523542,
"loss": 3.3204,
"step": 47700
},
{
"epoch": 5.139382197825853,
"grad_norm": 0.7066057324409485,
"learning_rate": 0.0002920676651222928,
"loss": 3.3097,
"step": 47750
},
{
"epoch": 5.1447637498654615,
"grad_norm": 0.6757489442825317,
"learning_rate": 0.0002917444240922314,
"loss": 3.3308,
"step": 47800
},
{
"epoch": 5.150145301905069,
"grad_norm": 0.6399307250976562,
"learning_rate": 0.00029142118306216996,
"loss": 3.3083,
"step": 47850
},
{
"epoch": 5.155526853944678,
"grad_norm": 0.6833632588386536,
"learning_rate": 0.0002910979420321086,
"loss": 3.3093,
"step": 47900
},
{
"epoch": 5.160908405984286,
"grad_norm": 0.6751337051391602,
"learning_rate": 0.00029077470100204715,
"loss": 3.3321,
"step": 47950
},
{
"epoch": 5.166289958023894,
"grad_norm": 0.6862357258796692,
"learning_rate": 0.00029045145997198574,
"loss": 3.3207,
"step": 48000
},
{
"epoch": 5.166289958023894,
"eval_accuracy": 0.3803372607368339,
"eval_loss": 3.4145004749298096,
"eval_runtime": 184.805,
"eval_samples_per_second": 97.459,
"eval_steps_per_second": 6.093,
"step": 48000
},
{
"epoch": 5.1716715100635025,
"grad_norm": 0.6574402451515198,
"learning_rate": 0.00029012821894192433,
"loss": 3.3211,
"step": 48050
},
{
"epoch": 5.17705306210311,
"grad_norm": 0.691375732421875,
"learning_rate": 0.00028980497791186293,
"loss": 3.3103,
"step": 48100
},
{
"epoch": 5.182434614142719,
"grad_norm": 0.6582377552986145,
"learning_rate": 0.0002894817368818015,
"loss": 3.3237,
"step": 48150
},
{
"epoch": 5.187816166182327,
"grad_norm": 0.74177485704422,
"learning_rate": 0.00028915849585174006,
"loss": 3.3264,
"step": 48200
},
{
"epoch": 5.193197718221935,
"grad_norm": 0.6446400284767151,
"learning_rate": 0.00028883525482167866,
"loss": 3.3225,
"step": 48250
},
{
"epoch": 5.198579270261543,
"grad_norm": 0.6987661719322205,
"learning_rate": 0.00028851201379161725,
"loss": 3.3068,
"step": 48300
},
{
"epoch": 5.203960822301152,
"grad_norm": 0.6338952779769897,
"learning_rate": 0.00028818877276155585,
"loss": 3.3243,
"step": 48350
},
{
"epoch": 5.20934237434076,
"grad_norm": 0.6728731989860535,
"learning_rate": 0.0002878655317314944,
"loss": 3.3331,
"step": 48400
},
{
"epoch": 5.214723926380368,
"grad_norm": 0.7184383869171143,
"learning_rate": 0.00028754229070143304,
"loss": 3.3179,
"step": 48450
},
{
"epoch": 5.220105478419977,
"grad_norm": 0.6783042550086975,
"learning_rate": 0.0002872190496713716,
"loss": 3.3045,
"step": 48500
},
{
"epoch": 5.225487030459584,
"grad_norm": 0.6732574701309204,
"learning_rate": 0.0002868958086413102,
"loss": 3.3205,
"step": 48550
},
{
"epoch": 5.230868582499193,
"grad_norm": 0.7296816110610962,
"learning_rate": 0.00028657256761124877,
"loss": 3.3154,
"step": 48600
},
{
"epoch": 5.236250134538801,
"grad_norm": 0.6376650929450989,
"learning_rate": 0.00028624932658118736,
"loss": 3.3214,
"step": 48650
},
{
"epoch": 5.241631686578409,
"grad_norm": 0.6608768105506897,
"learning_rate": 0.0002859260855511259,
"loss": 3.3387,
"step": 48700
},
{
"epoch": 5.247013238618018,
"grad_norm": 0.6965283155441284,
"learning_rate": 0.0002856028445210645,
"loss": 3.3452,
"step": 48750
},
{
"epoch": 5.252394790657625,
"grad_norm": 0.6676567792892456,
"learning_rate": 0.0002852796034910031,
"loss": 3.3208,
"step": 48800
},
{
"epoch": 5.257776342697234,
"grad_norm": 0.6948692202568054,
"learning_rate": 0.0002849563624609417,
"loss": 3.3475,
"step": 48850
},
{
"epoch": 5.2631578947368425,
"grad_norm": 0.6433426737785339,
"learning_rate": 0.0002846331214308803,
"loss": 3.333,
"step": 48900
},
{
"epoch": 5.26853944677645,
"grad_norm": 0.6406604647636414,
"learning_rate": 0.0002843098804008188,
"loss": 3.3265,
"step": 48950
},
{
"epoch": 5.273920998816059,
"grad_norm": 0.7309980988502502,
"learning_rate": 0.00028398663937075747,
"loss": 3.3359,
"step": 49000
},
{
"epoch": 5.273920998816059,
"eval_accuracy": 0.380725477504995,
"eval_loss": 3.4107563495635986,
"eval_runtime": 184.8872,
"eval_samples_per_second": 97.416,
"eval_steps_per_second": 6.09,
"step": 49000
},
{
"epoch": 5.279302550855666,
"grad_norm": 0.6788413524627686,
"learning_rate": 0.000283663398340696,
"loss": 3.3424,
"step": 49050
},
{
"epoch": 5.284684102895275,
"grad_norm": 0.7102241516113281,
"learning_rate": 0.0002833401573106346,
"loss": 3.3221,
"step": 49100
},
{
"epoch": 5.2900656549348835,
"grad_norm": 0.675389289855957,
"learning_rate": 0.0002830169162805732,
"loss": 3.3225,
"step": 49150
},
{
"epoch": 5.295447206974491,
"grad_norm": 0.6687794327735901,
"learning_rate": 0.0002826936752505118,
"loss": 3.3299,
"step": 49200
},
{
"epoch": 5.3008287590141,
"grad_norm": 0.7226663827896118,
"learning_rate": 0.00028237043422045034,
"loss": 3.3284,
"step": 49250
},
{
"epoch": 5.306210311053708,
"grad_norm": 0.6959290504455566,
"learning_rate": 0.00028204719319038893,
"loss": 3.3362,
"step": 49300
},
{
"epoch": 5.311591863093316,
"grad_norm": 0.7100611925125122,
"learning_rate": 0.0002817239521603275,
"loss": 3.3298,
"step": 49350
},
{
"epoch": 5.316973415132924,
"grad_norm": 0.6422631144523621,
"learning_rate": 0.0002814007111302661,
"loss": 3.3177,
"step": 49400
},
{
"epoch": 5.322354967172533,
"grad_norm": 0.7156828045845032,
"learning_rate": 0.0002810774701002047,
"loss": 3.3182,
"step": 49450
},
{
"epoch": 5.327736519212141,
"grad_norm": 0.6757283806800842,
"learning_rate": 0.00028075422907014325,
"loss": 3.3563,
"step": 49500
},
{
"epoch": 5.333118071251749,
"grad_norm": 0.7245064973831177,
"learning_rate": 0.00028043098804008185,
"loss": 3.3438,
"step": 49550
},
{
"epoch": 5.338499623291357,
"grad_norm": 0.7414877414703369,
"learning_rate": 0.00028010774701002044,
"loss": 3.3352,
"step": 49600
},
{
"epoch": 5.343881175330965,
"grad_norm": 0.7045677900314331,
"learning_rate": 0.00027978450597995904,
"loss": 3.3261,
"step": 49650
},
{
"epoch": 5.349262727370574,
"grad_norm": 0.6863226294517517,
"learning_rate": 0.00027946772977049885,
"loss": 3.3259,
"step": 49700
},
{
"epoch": 5.354644279410182,
"grad_norm": 0.7018098831176758,
"learning_rate": 0.00027914448874043744,
"loss": 3.3174,
"step": 49750
},
{
"epoch": 5.36002583144979,
"grad_norm": 0.6944161057472229,
"learning_rate": 0.00027882124771037603,
"loss": 3.3304,
"step": 49800
},
{
"epoch": 5.365407383489399,
"grad_norm": 0.6603707671165466,
"learning_rate": 0.0002784980066803146,
"loss": 3.3176,
"step": 49850
},
{
"epoch": 5.370788935529006,
"grad_norm": 0.7112104892730713,
"learning_rate": 0.00027817476565025317,
"loss": 3.3343,
"step": 49900
},
{
"epoch": 5.376170487568615,
"grad_norm": 0.6899374127388,
"learning_rate": 0.00027785152462019176,
"loss": 3.34,
"step": 49950
},
{
"epoch": 5.3815520396082235,
"grad_norm": 0.7424962520599365,
"learning_rate": 0.00027752828359013036,
"loss": 3.3431,
"step": 50000
},
{
"epoch": 5.3815520396082235,
"eval_accuracy": 0.38131611460315457,
"eval_loss": 3.4052765369415283,
"eval_runtime": 184.8419,
"eval_samples_per_second": 97.44,
"eval_steps_per_second": 6.092,
"step": 50000
},
{
"epoch": 5.386933591647831,
"grad_norm": 0.694149911403656,
"learning_rate": 0.0002772050425600689,
"loss": 3.3057,
"step": 50050
},
{
"epoch": 5.39231514368744,
"grad_norm": 0.7085953950881958,
"learning_rate": 0.00027688180153000755,
"loss": 3.3414,
"step": 50100
},
{
"epoch": 5.397696695727047,
"grad_norm": 0.691828727722168,
"learning_rate": 0.0002765585604999461,
"loss": 3.3414,
"step": 50150
},
{
"epoch": 5.403078247766656,
"grad_norm": 0.7001767158508301,
"learning_rate": 0.0002762353194698847,
"loss": 3.3379,
"step": 50200
},
{
"epoch": 5.4084597998062645,
"grad_norm": 0.676909327507019,
"learning_rate": 0.0002759120784398233,
"loss": 3.3329,
"step": 50250
},
{
"epoch": 5.413841351845872,
"grad_norm": 0.7085362076759338,
"learning_rate": 0.00027558883740976187,
"loss": 3.3346,
"step": 50300
},
{
"epoch": 5.419222903885481,
"grad_norm": 0.7403079867362976,
"learning_rate": 0.0002752655963797004,
"loss": 3.3504,
"step": 50350
},
{
"epoch": 5.424604455925088,
"grad_norm": 0.6892008781433105,
"learning_rate": 0.000274942355349639,
"loss": 3.343,
"step": 50400
},
{
"epoch": 5.429986007964697,
"grad_norm": 0.6684857606887817,
"learning_rate": 0.0002746191143195776,
"loss": 3.3485,
"step": 50450
},
{
"epoch": 5.435367560004305,
"grad_norm": 0.7241749167442322,
"learning_rate": 0.0002742958732895162,
"loss": 3.3344,
"step": 50500
},
{
"epoch": 5.440749112043913,
"grad_norm": 0.6599351167678833,
"learning_rate": 0.0002739726322594548,
"loss": 3.3182,
"step": 50550
},
{
"epoch": 5.446130664083522,
"grad_norm": 0.6936551928520203,
"learning_rate": 0.00027364939122939333,
"loss": 3.321,
"step": 50600
},
{
"epoch": 5.45151221612313,
"grad_norm": 0.6684977412223816,
"learning_rate": 0.0002733261501993319,
"loss": 3.3305,
"step": 50650
},
{
"epoch": 5.456893768162738,
"grad_norm": 0.6817634701728821,
"learning_rate": 0.0002730029091692705,
"loss": 3.339,
"step": 50700
},
{
"epoch": 5.462275320202346,
"grad_norm": 0.6818822026252747,
"learning_rate": 0.0002726796681392091,
"loss": 3.3326,
"step": 50750
},
{
"epoch": 5.467656872241955,
"grad_norm": 0.6656324863433838,
"learning_rate": 0.0002723564271091477,
"loss": 3.3483,
"step": 50800
},
{
"epoch": 5.473038424281563,
"grad_norm": 0.6781908869743347,
"learning_rate": 0.0002720331860790863,
"loss": 3.333,
"step": 50850
},
{
"epoch": 5.478419976321171,
"grad_norm": 0.7217748165130615,
"learning_rate": 0.00027170994504902485,
"loss": 3.3446,
"step": 50900
},
{
"epoch": 5.483801528360779,
"grad_norm": 0.7343464493751526,
"learning_rate": 0.00027138670401896344,
"loss": 3.355,
"step": 50950
},
{
"epoch": 5.489183080400387,
"grad_norm": 0.6531841158866882,
"learning_rate": 0.00027106346298890204,
"loss": 3.3256,
"step": 51000
},
{
"epoch": 5.489183080400387,
"eval_accuracy": 0.3820636464713182,
"eval_loss": 3.3998801708221436,
"eval_runtime": 184.5563,
"eval_samples_per_second": 97.591,
"eval_steps_per_second": 6.101,
"step": 51000
},
{
"epoch": 5.494564632439996,
"grad_norm": 0.6916175484657288,
"learning_rate": 0.00027074022195884063,
"loss": 3.3435,
"step": 51050
},
{
"epoch": 5.499946184479604,
"grad_norm": 0.6753712296485901,
"learning_rate": 0.0002704169809287792,
"loss": 3.3426,
"step": 51100
},
{
"epoch": 5.505327736519212,
"grad_norm": 0.6608497500419617,
"learning_rate": 0.00027009373989871776,
"loss": 3.3304,
"step": 51150
},
{
"epoch": 5.510709288558821,
"grad_norm": 0.678605854511261,
"learning_rate": 0.00026977049886865636,
"loss": 3.3325,
"step": 51200
},
{
"epoch": 5.516090840598428,
"grad_norm": 0.7043835520744324,
"learning_rate": 0.00026944725783859495,
"loss": 3.3457,
"step": 51250
},
{
"epoch": 5.521472392638037,
"grad_norm": 0.7066404223442078,
"learning_rate": 0.00026912401680853355,
"loss": 3.3602,
"step": 51300
},
{
"epoch": 5.5268539446776455,
"grad_norm": 0.6896523833274841,
"learning_rate": 0.0002688007757784721,
"loss": 3.3234,
"step": 51350
},
{
"epoch": 5.532235496717253,
"grad_norm": 0.6568740606307983,
"learning_rate": 0.00026847753474841074,
"loss": 3.3371,
"step": 51400
},
{
"epoch": 5.537617048756862,
"grad_norm": 0.7369204163551331,
"learning_rate": 0.0002681542937183493,
"loss": 3.3357,
"step": 51450
},
{
"epoch": 5.542998600796469,
"grad_norm": 0.687563419342041,
"learning_rate": 0.0002678310526882879,
"loss": 3.3575,
"step": 51500
},
{
"epoch": 5.548380152836078,
"grad_norm": 0.6815269589424133,
"learning_rate": 0.00026750781165822647,
"loss": 3.3286,
"step": 51550
},
{
"epoch": 5.553761704875686,
"grad_norm": 0.7188419699668884,
"learning_rate": 0.00026718457062816506,
"loss": 3.3391,
"step": 51600
},
{
"epoch": 5.559143256915294,
"grad_norm": 0.7285634279251099,
"learning_rate": 0.00026686132959810366,
"loss": 3.3365,
"step": 51650
},
{
"epoch": 5.564524808954903,
"grad_norm": 0.6750437617301941,
"learning_rate": 0.0002665445533886434,
"loss": 3.3578,
"step": 51700
},
{
"epoch": 5.569906360994511,
"grad_norm": 0.6828053593635559,
"learning_rate": 0.00026622131235858206,
"loss": 3.327,
"step": 51750
},
{
"epoch": 5.575287913034119,
"grad_norm": 0.7370911836624146,
"learning_rate": 0.0002658980713285206,
"loss": 3.3642,
"step": 51800
},
{
"epoch": 5.580669465073727,
"grad_norm": 0.7274966835975647,
"learning_rate": 0.0002655748302984592,
"loss": 3.3163,
"step": 51850
},
{
"epoch": 5.586051017113336,
"grad_norm": 0.724192202091217,
"learning_rate": 0.0002652515892683978,
"loss": 3.3457,
"step": 51900
},
{
"epoch": 5.591432569152944,
"grad_norm": 0.7059313058853149,
"learning_rate": 0.0002649283482383364,
"loss": 3.3399,
"step": 51950
},
{
"epoch": 5.596814121192552,
"grad_norm": 0.6701476573944092,
"learning_rate": 0.0002646051072082749,
"loss": 3.3417,
"step": 52000
},
{
"epoch": 5.596814121192552,
"eval_accuracy": 0.38217947044972844,
"eval_loss": 3.397221088409424,
"eval_runtime": 184.8285,
"eval_samples_per_second": 97.447,
"eval_steps_per_second": 6.092,
"step": 52000
},
{
"epoch": 5.60219567323216,
"grad_norm": 0.7342854738235474,
"learning_rate": 0.0002642818661782135,
"loss": 3.3522,
"step": 52050
},
{
"epoch": 5.607577225271768,
"grad_norm": 0.7041910886764526,
"learning_rate": 0.0002639586251481521,
"loss": 3.3441,
"step": 52100
},
{
"epoch": 5.612958777311377,
"grad_norm": 0.6980746984481812,
"learning_rate": 0.0002636353841180907,
"loss": 3.3438,
"step": 52150
},
{
"epoch": 5.618340329350985,
"grad_norm": 0.7169170379638672,
"learning_rate": 0.0002633121430880293,
"loss": 3.3183,
"step": 52200
},
{
"epoch": 5.623721881390593,
"grad_norm": 0.6377230286598206,
"learning_rate": 0.00026298890205796784,
"loss": 3.3293,
"step": 52250
},
{
"epoch": 5.629103433430201,
"grad_norm": 0.6815519332885742,
"learning_rate": 0.00026266566102790644,
"loss": 3.3181,
"step": 52300
},
{
"epoch": 5.634484985469809,
"grad_norm": 0.660933256149292,
"learning_rate": 0.00026234241999784503,
"loss": 3.3399,
"step": 52350
},
{
"epoch": 5.639866537509418,
"grad_norm": 0.6373748779296875,
"learning_rate": 0.0002620191789677836,
"loss": 3.3355,
"step": 52400
},
{
"epoch": 5.645248089549026,
"grad_norm": 0.6971507668495178,
"learning_rate": 0.00026169593793772217,
"loss": 3.341,
"step": 52450
},
{
"epoch": 5.650629641588634,
"grad_norm": 0.7280732989311218,
"learning_rate": 0.0002613726969076608,
"loss": 3.356,
"step": 52500
},
{
"epoch": 5.656011193628243,
"grad_norm": 0.6992477178573608,
"learning_rate": 0.00026104945587759936,
"loss": 3.337,
"step": 52550
},
{
"epoch": 5.66139274566785,
"grad_norm": 0.696068525314331,
"learning_rate": 0.00026072621484753795,
"loss": 3.3223,
"step": 52600
},
{
"epoch": 5.666774297707459,
"grad_norm": 0.7363685965538025,
"learning_rate": 0.00026040297381747655,
"loss": 3.3556,
"step": 52650
},
{
"epoch": 5.672155849747067,
"grad_norm": 0.682386040687561,
"learning_rate": 0.00026007973278741514,
"loss": 3.3368,
"step": 52700
},
{
"epoch": 5.677537401786675,
"grad_norm": 0.6552140116691589,
"learning_rate": 0.00025975649175735373,
"loss": 3.3458,
"step": 52750
},
{
"epoch": 5.682918953826284,
"grad_norm": 0.7018849849700928,
"learning_rate": 0.0002594332507272923,
"loss": 3.3377,
"step": 52800
},
{
"epoch": 5.688300505865891,
"grad_norm": 0.7437209486961365,
"learning_rate": 0.00025911000969723087,
"loss": 3.3348,
"step": 52850
},
{
"epoch": 5.6936820579055,
"grad_norm": 0.7024593949317932,
"learning_rate": 0.00025878676866716946,
"loss": 3.3426,
"step": 52900
},
{
"epoch": 5.699063609945108,
"grad_norm": 0.6867077350616455,
"learning_rate": 0.00025846352763710806,
"loss": 3.3245,
"step": 52950
},
{
"epoch": 5.704445161984716,
"grad_norm": 0.7076590061187744,
"learning_rate": 0.0002581402866070466,
"loss": 3.3248,
"step": 53000
},
{
"epoch": 5.704445161984716,
"eval_accuracy": 0.38262896744661695,
"eval_loss": 3.3937745094299316,
"eval_runtime": 184.9379,
"eval_samples_per_second": 97.389,
"eval_steps_per_second": 6.089,
"step": 53000
},
{
"epoch": 5.709826714024325,
"grad_norm": 0.8061707019805908,
"learning_rate": 0.00025781704557698525,
"loss": 3.337,
"step": 53050
},
{
"epoch": 5.715208266063933,
"grad_norm": 0.7249623537063599,
"learning_rate": 0.0002574938045469238,
"loss": 3.3559,
"step": 53100
},
{
"epoch": 5.720589818103541,
"grad_norm": 0.7043891549110413,
"learning_rate": 0.0002571705635168624,
"loss": 3.3466,
"step": 53150
},
{
"epoch": 5.725971370143149,
"grad_norm": 0.706001877784729,
"learning_rate": 0.000256847322486801,
"loss": 3.3386,
"step": 53200
},
{
"epoch": 5.731352922182758,
"grad_norm": 0.7397270798683167,
"learning_rate": 0.0002565240814567396,
"loss": 3.336,
"step": 53250
},
{
"epoch": 5.736734474222366,
"grad_norm": 0.7014256715774536,
"learning_rate": 0.0002562008404266781,
"loss": 3.3473,
"step": 53300
},
{
"epoch": 5.742116026261974,
"grad_norm": 0.7000493407249451,
"learning_rate": 0.0002558775993966167,
"loss": 3.3464,
"step": 53350
},
{
"epoch": 5.747497578301582,
"grad_norm": 0.6832889318466187,
"learning_rate": 0.0002555543583665553,
"loss": 3.3479,
"step": 53400
},
{
"epoch": 5.75287913034119,
"grad_norm": 0.6802942156791687,
"learning_rate": 0.0002552311173364939,
"loss": 3.3327,
"step": 53450
},
{
"epoch": 5.758260682380799,
"grad_norm": 0.6792773604393005,
"learning_rate": 0.0002549078763064325,
"loss": 3.3537,
"step": 53500
},
{
"epoch": 5.763642234420407,
"grad_norm": 0.7272350788116455,
"learning_rate": 0.00025458463527637103,
"loss": 3.3429,
"step": 53550
},
{
"epoch": 5.769023786460015,
"grad_norm": 0.6929247975349426,
"learning_rate": 0.0002542613942463097,
"loss": 3.3457,
"step": 53600
},
{
"epoch": 5.774405338499624,
"grad_norm": 0.6766125559806824,
"learning_rate": 0.0002539381532162482,
"loss": 3.334,
"step": 53650
},
{
"epoch": 5.779786890539231,
"grad_norm": 0.6857972741127014,
"learning_rate": 0.00025362137700678803,
"loss": 3.3444,
"step": 53700
},
{
"epoch": 5.78516844257884,
"grad_norm": 0.6698341369628906,
"learning_rate": 0.0002532981359767266,
"loss": 3.3559,
"step": 53750
},
{
"epoch": 5.790549994618448,
"grad_norm": 0.6908291578292847,
"learning_rate": 0.0002529748949466652,
"loss": 3.3184,
"step": 53800
},
{
"epoch": 5.795931546658056,
"grad_norm": 0.6863468289375305,
"learning_rate": 0.0002526516539166038,
"loss": 3.3631,
"step": 53850
},
{
"epoch": 5.801313098697665,
"grad_norm": 0.7525630593299866,
"learning_rate": 0.00025232841288654235,
"loss": 3.3399,
"step": 53900
},
{
"epoch": 5.806694650737272,
"grad_norm": 0.7373597025871277,
"learning_rate": 0.00025200517185648095,
"loss": 3.343,
"step": 53950
},
{
"epoch": 5.812076202776881,
"grad_norm": 0.7358316779136658,
"learning_rate": 0.00025168193082641954,
"loss": 3.3469,
"step": 54000
},
{
"epoch": 5.812076202776881,
"eval_accuracy": 0.38326252243546316,
"eval_loss": 3.3860888481140137,
"eval_runtime": 184.3203,
"eval_samples_per_second": 97.716,
"eval_steps_per_second": 6.109,
"step": 54000
},
{
"epoch": 5.817457754816489,
"grad_norm": 0.6819138526916504,
"learning_rate": 0.00025135868979635814,
"loss": 3.3147,
"step": 54050
},
{
"epoch": 5.822839306856097,
"grad_norm": 0.7241456508636475,
"learning_rate": 0.0002510354487662967,
"loss": 3.3323,
"step": 54100
},
{
"epoch": 5.828220858895706,
"grad_norm": 0.6862399578094482,
"learning_rate": 0.0002507122077362353,
"loss": 3.3485,
"step": 54150
},
{
"epoch": 5.833602410935313,
"grad_norm": 0.7358222007751465,
"learning_rate": 0.00025038896670617387,
"loss": 3.3481,
"step": 54200
},
{
"epoch": 5.838983962974922,
"grad_norm": 0.6974659562110901,
"learning_rate": 0.00025006572567611246,
"loss": 3.3291,
"step": 54250
},
{
"epoch": 5.84436551501453,
"grad_norm": 0.7777621746063232,
"learning_rate": 0.00024974248464605106,
"loss": 3.3385,
"step": 54300
},
{
"epoch": 5.849747067054138,
"grad_norm": 0.727279543876648,
"learning_rate": 0.00024941924361598965,
"loss": 3.3501,
"step": 54350
},
{
"epoch": 5.855128619093747,
"grad_norm": 0.6796432137489319,
"learning_rate": 0.00024909600258592825,
"loss": 3.3435,
"step": 54400
},
{
"epoch": 5.860510171133355,
"grad_norm": 0.7217870950698853,
"learning_rate": 0.0002487727615558668,
"loss": 3.3396,
"step": 54450
},
{
"epoch": 5.865891723172963,
"grad_norm": 0.744452714920044,
"learning_rate": 0.0002484495205258054,
"loss": 3.336,
"step": 54500
},
{
"epoch": 5.871273275212571,
"grad_norm": 0.7230072617530823,
"learning_rate": 0.000248126279495744,
"loss": 3.3416,
"step": 54550
},
{
"epoch": 5.87665482725218,
"grad_norm": 0.7871853113174438,
"learning_rate": 0.00024780303846568257,
"loss": 3.3303,
"step": 54600
},
{
"epoch": 5.882036379291788,
"grad_norm": 0.7021961212158203,
"learning_rate": 0.0002474797974356211,
"loss": 3.3474,
"step": 54650
},
{
"epoch": 5.887417931331396,
"grad_norm": 0.7060352563858032,
"learning_rate": 0.00024715655640555976,
"loss": 3.3321,
"step": 54700
},
{
"epoch": 5.892799483371004,
"grad_norm": 0.71165931224823,
"learning_rate": 0.0002468333153754983,
"loss": 3.3456,
"step": 54750
},
{
"epoch": 5.898181035410612,
"grad_norm": 0.683138906955719,
"learning_rate": 0.0002465100743454369,
"loss": 3.3308,
"step": 54800
},
{
"epoch": 5.903562587450221,
"grad_norm": 0.7074604034423828,
"learning_rate": 0.0002461868333153755,
"loss": 3.3447,
"step": 54850
},
{
"epoch": 5.9089441394898286,
"grad_norm": 0.722017765045166,
"learning_rate": 0.0002458635922853141,
"loss": 3.3308,
"step": 54900
},
{
"epoch": 5.914325691529437,
"grad_norm": 0.7072309255599976,
"learning_rate": 0.0002455403512552526,
"loss": 3.3635,
"step": 54950
},
{
"epoch": 5.919707243569046,
"grad_norm": 0.7123068571090698,
"learning_rate": 0.0002452171102251912,
"loss": 3.3445,
"step": 55000
},
{
"epoch": 5.919707243569046,
"eval_accuracy": 0.38317081939814773,
"eval_loss": 3.3848025798797607,
"eval_runtime": 184.0505,
"eval_samples_per_second": 97.859,
"eval_steps_per_second": 6.118,
"step": 55000
},
{
"epoch": 5.925088795608653,
"grad_norm": 0.7227998971939087,
"learning_rate": 0.0002448938691951298,
"loss": 3.3381,
"step": 55050
},
{
"epoch": 5.930470347648262,
"grad_norm": 0.6625151634216309,
"learning_rate": 0.0002445706281650684,
"loss": 3.3566,
"step": 55100
},
{
"epoch": 5.93585189968787,
"grad_norm": 0.6930627226829529,
"learning_rate": 0.000244247387135007,
"loss": 3.3258,
"step": 55150
},
{
"epoch": 5.941233451727478,
"grad_norm": 0.7274208664894104,
"learning_rate": 0.00024392414610494557,
"loss": 3.327,
"step": 55200
},
{
"epoch": 5.946615003767087,
"grad_norm": 0.7061982154846191,
"learning_rate": 0.00024360090507488414,
"loss": 3.3288,
"step": 55250
},
{
"epoch": 5.951996555806694,
"grad_norm": 0.684239387512207,
"learning_rate": 0.00024327766404482273,
"loss": 3.3363,
"step": 55300
},
{
"epoch": 5.957378107846303,
"grad_norm": 0.6876581907272339,
"learning_rate": 0.0002429544230147613,
"loss": 3.3406,
"step": 55350
},
{
"epoch": 5.962759659885911,
"grad_norm": 0.7436810731887817,
"learning_rate": 0.00024263118198469992,
"loss": 3.344,
"step": 55400
},
{
"epoch": 5.968141211925519,
"grad_norm": 0.7856045365333557,
"learning_rate": 0.0002423079409546385,
"loss": 3.3348,
"step": 55450
},
{
"epoch": 5.973522763965128,
"grad_norm": 0.7595300674438477,
"learning_rate": 0.00024198469992457706,
"loss": 3.336,
"step": 55500
},
{
"epoch": 5.978904316004736,
"grad_norm": 0.8078129887580872,
"learning_rate": 0.00024166145889451568,
"loss": 3.3391,
"step": 55550
},
{
"epoch": 5.984285868044344,
"grad_norm": 0.7111245393753052,
"learning_rate": 0.00024133821786445425,
"loss": 3.3372,
"step": 55600
},
{
"epoch": 5.989667420083952,
"grad_norm": 0.7171187996864319,
"learning_rate": 0.0002410149768343928,
"loss": 3.3492,
"step": 55650
},
{
"epoch": 5.995048972123561,
"grad_norm": 0.7238572239875793,
"learning_rate": 0.00024069820062493265,
"loss": 3.3415,
"step": 55700
},
{
"epoch": 6.000430524163169,
"grad_norm": 0.7295876741409302,
"learning_rate": 0.00024037495959487121,
"loss": 3.33,
"step": 55750
},
{
"epoch": 6.005812076202777,
"grad_norm": 0.6892274618148804,
"learning_rate": 0.0002400517185648098,
"loss": 3.247,
"step": 55800
},
{
"epoch": 6.011193628242385,
"grad_norm": 0.708806037902832,
"learning_rate": 0.00023972847753474838,
"loss": 3.2382,
"step": 55850
},
{
"epoch": 6.016575180281993,
"grad_norm": 0.732413113117218,
"learning_rate": 0.00023940523650468697,
"loss": 3.2495,
"step": 55900
},
{
"epoch": 6.021956732321602,
"grad_norm": 0.7050142288208008,
"learning_rate": 0.00023908199547462557,
"loss": 3.2437,
"step": 55950
},
{
"epoch": 6.0273382843612096,
"grad_norm": 0.7132096886634827,
"learning_rate": 0.00023875875444456413,
"loss": 3.2471,
"step": 56000
},
{
"epoch": 6.0273382843612096,
"eval_accuracy": 0.3839456231411528,
"eval_loss": 3.3877761363983154,
"eval_runtime": 184.3787,
"eval_samples_per_second": 97.685,
"eval_steps_per_second": 6.107,
"step": 56000
},
{
"epoch": 6.032719836400818,
"grad_norm": 0.6656051278114319,
"learning_rate": 0.0002384355134145027,
"loss": 3.2627,
"step": 56050
},
{
"epoch": 6.038101388440427,
"grad_norm": 0.6595709919929504,
"learning_rate": 0.00023811227238444132,
"loss": 3.2699,
"step": 56100
},
{
"epoch": 6.043482940480034,
"grad_norm": 0.760237455368042,
"learning_rate": 0.0002377890313543799,
"loss": 3.2639,
"step": 56150
},
{
"epoch": 6.048864492519643,
"grad_norm": 0.6955994367599487,
"learning_rate": 0.00023746579032431849,
"loss": 3.2432,
"step": 56200
},
{
"epoch": 6.0542460445592505,
"grad_norm": 0.7197161912918091,
"learning_rate": 0.0002371490141148583,
"loss": 3.246,
"step": 56250
},
{
"epoch": 6.059627596598859,
"grad_norm": 0.6739785075187683,
"learning_rate": 0.00023682577308479689,
"loss": 3.274,
"step": 56300
},
{
"epoch": 6.065009148638468,
"grad_norm": 0.7220432758331299,
"learning_rate": 0.00023650253205473545,
"loss": 3.2443,
"step": 56350
},
{
"epoch": 6.070390700678075,
"grad_norm": 0.6965115666389465,
"learning_rate": 0.00023617929102467402,
"loss": 3.2513,
"step": 56400
},
{
"epoch": 6.075772252717684,
"grad_norm": 0.7645218372344971,
"learning_rate": 0.00023585604999461264,
"loss": 3.2559,
"step": 56450
},
{
"epoch": 6.081153804757292,
"grad_norm": 0.7593476176261902,
"learning_rate": 0.0002355328089645512,
"loss": 3.2585,
"step": 56500
},
{
"epoch": 6.0865353567969,
"grad_norm": 0.6853207945823669,
"learning_rate": 0.00023520956793448978,
"loss": 3.277,
"step": 56550
},
{
"epoch": 6.091916908836509,
"grad_norm": 0.680121898651123,
"learning_rate": 0.0002348863269044284,
"loss": 3.2643,
"step": 56600
},
{
"epoch": 6.097298460876116,
"grad_norm": 0.7078647017478943,
"learning_rate": 0.00023456308587436697,
"loss": 3.258,
"step": 56650
},
{
"epoch": 6.102680012915725,
"grad_norm": 0.6985390782356262,
"learning_rate": 0.00023423984484430554,
"loss": 3.2553,
"step": 56700
},
{
"epoch": 6.108061564955333,
"grad_norm": 0.7196840047836304,
"learning_rate": 0.00023391660381424413,
"loss": 3.2634,
"step": 56750
},
{
"epoch": 6.113443116994941,
"grad_norm": 0.7238477468490601,
"learning_rate": 0.00023359336278418272,
"loss": 3.2565,
"step": 56800
},
{
"epoch": 6.11882466903455,
"grad_norm": 0.7099041938781738,
"learning_rate": 0.0002332701217541213,
"loss": 3.2683,
"step": 56850
},
{
"epoch": 6.124206221074158,
"grad_norm": 0.7175658345222473,
"learning_rate": 0.0002329468807240599,
"loss": 3.275,
"step": 56900
},
{
"epoch": 6.129587773113766,
"grad_norm": 0.7077184319496155,
"learning_rate": 0.00023262363969399845,
"loss": 3.2594,
"step": 56950
},
{
"epoch": 6.134969325153374,
"grad_norm": 0.6981788873672485,
"learning_rate": 0.00023230039866393708,
"loss": 3.2697,
"step": 57000
},
{
"epoch": 6.134969325153374,
"eval_accuracy": 0.38385218165763235,
"eval_loss": 3.3861021995544434,
"eval_runtime": 184.0749,
"eval_samples_per_second": 97.846,
"eval_steps_per_second": 6.117,
"step": 57000
},
{
"epoch": 6.140350877192983,
"grad_norm": 0.7192840576171875,
"learning_rate": 0.00023197715763387564,
"loss": 3.2669,
"step": 57050
},
{
"epoch": 6.1457324292325906,
"grad_norm": 0.7335977554321289,
"learning_rate": 0.0002316539166038142,
"loss": 3.2646,
"step": 57100
},
{
"epoch": 6.151113981272199,
"grad_norm": 0.7761592268943787,
"learning_rate": 0.00023133067557375283,
"loss": 3.2544,
"step": 57150
},
{
"epoch": 6.156495533311807,
"grad_norm": 0.7091419696807861,
"learning_rate": 0.0002310074345436914,
"loss": 3.2679,
"step": 57200
},
{
"epoch": 6.161877085351415,
"grad_norm": 0.7654346823692322,
"learning_rate": 0.00023068419351362997,
"loss": 3.2741,
"step": 57250
},
{
"epoch": 6.167258637391024,
"grad_norm": 0.7190260291099548,
"learning_rate": 0.00023036095248356856,
"loss": 3.2763,
"step": 57300
},
{
"epoch": 6.1726401894306315,
"grad_norm": 0.7094807028770447,
"learning_rate": 0.00023003771145350716,
"loss": 3.2687,
"step": 57350
},
{
"epoch": 6.17802174147024,
"grad_norm": 0.6916371583938599,
"learning_rate": 0.00022971447042344572,
"loss": 3.2652,
"step": 57400
},
{
"epoch": 6.183403293509849,
"grad_norm": 0.766798734664917,
"learning_rate": 0.00022939122939338432,
"loss": 3.2899,
"step": 57450
},
{
"epoch": 6.188784845549456,
"grad_norm": 0.7512463927268982,
"learning_rate": 0.0002290679883633229,
"loss": 3.2665,
"step": 57500
},
{
"epoch": 6.194166397589065,
"grad_norm": 0.6769281625747681,
"learning_rate": 0.00022874474733326148,
"loss": 3.2634,
"step": 57550
},
{
"epoch": 6.1995479496286725,
"grad_norm": 0.6847677826881409,
"learning_rate": 0.00022842150630320008,
"loss": 3.2695,
"step": 57600
},
{
"epoch": 6.204929501668281,
"grad_norm": 0.7338321208953857,
"learning_rate": 0.00022809826527313864,
"loss": 3.2808,
"step": 57650
},
{
"epoch": 6.21031105370789,
"grad_norm": 0.7830347418785095,
"learning_rate": 0.0002277750242430772,
"loss": 3.255,
"step": 57700
},
{
"epoch": 6.215692605747497,
"grad_norm": 0.7542902827262878,
"learning_rate": 0.00022745178321301583,
"loss": 3.2679,
"step": 57750
},
{
"epoch": 6.221074157787106,
"grad_norm": 0.7709856033325195,
"learning_rate": 0.0002271285421829544,
"loss": 3.2809,
"step": 57800
},
{
"epoch": 6.226455709826714,
"grad_norm": 0.7455905675888062,
"learning_rate": 0.00022680530115289297,
"loss": 3.2939,
"step": 57850
},
{
"epoch": 6.231837261866322,
"grad_norm": 0.7592281699180603,
"learning_rate": 0.0002264820601228316,
"loss": 3.2679,
"step": 57900
},
{
"epoch": 6.237218813905931,
"grad_norm": 0.7581020593643188,
"learning_rate": 0.00022615881909277016,
"loss": 3.2665,
"step": 57950
},
{
"epoch": 6.242600365945538,
"grad_norm": 0.7076690196990967,
"learning_rate": 0.00022583557806270875,
"loss": 3.2705,
"step": 58000
},
{
"epoch": 6.242600365945538,
"eval_accuracy": 0.3839192204894139,
"eval_loss": 3.3843679428100586,
"eval_runtime": 184.5165,
"eval_samples_per_second": 97.612,
"eval_steps_per_second": 6.102,
"step": 58000
},
{
"epoch": 6.247981917985147,
"grad_norm": 0.755380392074585,
"learning_rate": 0.00022551233703264732,
"loss": 3.2734,
"step": 58050
},
{
"epoch": 6.253363470024755,
"grad_norm": 0.7202139496803284,
"learning_rate": 0.0002251890960025859,
"loss": 3.2963,
"step": 58100
},
{
"epoch": 6.258745022064363,
"grad_norm": 0.7247757911682129,
"learning_rate": 0.0002248658549725245,
"loss": 3.28,
"step": 58150
},
{
"epoch": 6.264126574103972,
"grad_norm": 0.7414571046829224,
"learning_rate": 0.00022454261394246308,
"loss": 3.2758,
"step": 58200
},
{
"epoch": 6.26950812614358,
"grad_norm": 0.6735288500785828,
"learning_rate": 0.00022421937291240164,
"loss": 3.2868,
"step": 58250
},
{
"epoch": 6.274889678183188,
"grad_norm": 0.7054392695426941,
"learning_rate": 0.00022389613188234027,
"loss": 3.2836,
"step": 58300
},
{
"epoch": 6.280271230222796,
"grad_norm": 0.710004985332489,
"learning_rate": 0.00022357289085227883,
"loss": 3.2973,
"step": 58350
},
{
"epoch": 6.285652782262405,
"grad_norm": 0.7532511949539185,
"learning_rate": 0.0002232496498222174,
"loss": 3.2859,
"step": 58400
},
{
"epoch": 6.2910343343020125,
"grad_norm": 0.7729986310005188,
"learning_rate": 0.000222926408792156,
"loss": 3.2755,
"step": 58450
},
{
"epoch": 6.296415886341621,
"grad_norm": 0.7198432683944702,
"learning_rate": 0.0002226031677620946,
"loss": 3.2798,
"step": 58500
},
{
"epoch": 6.301797438381229,
"grad_norm": 0.730705201625824,
"learning_rate": 0.00022227992673203316,
"loss": 3.2788,
"step": 58550
},
{
"epoch": 6.307178990420837,
"grad_norm": 0.6701096296310425,
"learning_rate": 0.00022195668570197175,
"loss": 3.2919,
"step": 58600
},
{
"epoch": 6.312560542460446,
"grad_norm": 0.791893720626831,
"learning_rate": 0.00022163344467191032,
"loss": 3.2969,
"step": 58650
},
{
"epoch": 6.3179420945000535,
"grad_norm": 0.7149146199226379,
"learning_rate": 0.00022131020364184891,
"loss": 3.2738,
"step": 58700
},
{
"epoch": 6.323323646539662,
"grad_norm": 0.6918314099311829,
"learning_rate": 0.0002209869626117875,
"loss": 3.2761,
"step": 58750
},
{
"epoch": 6.328705198579271,
"grad_norm": 0.7772107124328613,
"learning_rate": 0.00022066372158172608,
"loss": 3.287,
"step": 58800
},
{
"epoch": 6.334086750618878,
"grad_norm": 0.7206048369407654,
"learning_rate": 0.00022034048055166464,
"loss": 3.2821,
"step": 58850
},
{
"epoch": 6.339468302658487,
"grad_norm": 0.6840312480926514,
"learning_rate": 0.00022001723952160327,
"loss": 3.288,
"step": 58900
},
{
"epoch": 6.344849854698095,
"grad_norm": 0.7471789717674255,
"learning_rate": 0.00021969399849154183,
"loss": 3.2853,
"step": 58950
},
{
"epoch": 6.350231406737703,
"grad_norm": 0.7397593855857849,
"learning_rate": 0.00021937075746148043,
"loss": 3.2824,
"step": 59000
},
{
"epoch": 6.350231406737703,
"eval_accuracy": 0.38456961667587136,
"eval_loss": 3.3814733028411865,
"eval_runtime": 184.1551,
"eval_samples_per_second": 97.803,
"eval_steps_per_second": 6.114,
"step": 59000
},
{
"epoch": 6.355612958777312,
"grad_norm": 0.7210568189620972,
"learning_rate": 0.00021904751643141902,
"loss": 3.2614,
"step": 59050
},
{
"epoch": 6.360994510816919,
"grad_norm": 0.6994883418083191,
"learning_rate": 0.0002187242754013576,
"loss": 3.2758,
"step": 59100
},
{
"epoch": 6.366376062856528,
"grad_norm": 0.7228302359580994,
"learning_rate": 0.00021840103437129619,
"loss": 3.3003,
"step": 59150
},
{
"epoch": 6.371757614896136,
"grad_norm": 0.7233319282531738,
"learning_rate": 0.00021807779334123475,
"loss": 3.2775,
"step": 59200
},
{
"epoch": 6.377139166935744,
"grad_norm": 0.7645636796951294,
"learning_rate": 0.00021775455231117335,
"loss": 3.2802,
"step": 59250
},
{
"epoch": 6.382520718975353,
"grad_norm": 0.7440745234489441,
"learning_rate": 0.00021743131128111194,
"loss": 3.2961,
"step": 59300
},
{
"epoch": 6.387902271014961,
"grad_norm": 0.762554943561554,
"learning_rate": 0.00021711453507165172,
"loss": 3.2985,
"step": 59350
},
{
"epoch": 6.393283823054569,
"grad_norm": 0.7303590774536133,
"learning_rate": 0.00021679129404159034,
"loss": 3.2798,
"step": 59400
},
{
"epoch": 6.398665375094177,
"grad_norm": 0.7145769000053406,
"learning_rate": 0.0002164680530115289,
"loss": 3.2863,
"step": 59450
},
{
"epoch": 6.404046927133785,
"grad_norm": 0.7159892916679382,
"learning_rate": 0.00021614481198146748,
"loss": 3.2711,
"step": 59500
},
{
"epoch": 6.4094284791733935,
"grad_norm": 0.745442271232605,
"learning_rate": 0.00021582157095140607,
"loss": 3.2881,
"step": 59550
},
{
"epoch": 6.414810031213002,
"grad_norm": 0.7081004977226257,
"learning_rate": 0.00021549832992134467,
"loss": 3.2733,
"step": 59600
},
{
"epoch": 6.42019158325261,
"grad_norm": 0.6986952424049377,
"learning_rate": 0.00021517508889128324,
"loss": 3.2899,
"step": 59650
},
{
"epoch": 6.425573135292218,
"grad_norm": 0.7033957242965698,
"learning_rate": 0.00021485184786122183,
"loss": 3.3002,
"step": 59700
},
{
"epoch": 6.430954687331827,
"grad_norm": 0.7274535298347473,
"learning_rate": 0.0002145286068311604,
"loss": 3.2904,
"step": 59750
},
{
"epoch": 6.4363362393714345,
"grad_norm": 0.7411662936210632,
"learning_rate": 0.00021420536580109902,
"loss": 3.281,
"step": 59800
},
{
"epoch": 6.441717791411043,
"grad_norm": 0.774648129940033,
"learning_rate": 0.0002138821247710376,
"loss": 3.2846,
"step": 59850
},
{
"epoch": 6.447099343450651,
"grad_norm": 0.7479146122932434,
"learning_rate": 0.00021355888374097615,
"loss": 3.3097,
"step": 59900
},
{
"epoch": 6.452480895490259,
"grad_norm": 0.7600090503692627,
"learning_rate": 0.00021323564271091478,
"loss": 3.2781,
"step": 59950
},
{
"epoch": 6.457862447529868,
"grad_norm": 0.723557710647583,
"learning_rate": 0.00021291240168085334,
"loss": 3.2848,
"step": 60000
},
{
"epoch": 6.457862447529868,
"eval_accuracy": 0.3852529346873366,
"eval_loss": 3.373725652694702,
"eval_runtime": 184.4028,
"eval_samples_per_second": 97.672,
"eval_steps_per_second": 6.106,
"step": 60000
},
{
"epoch": 6.4632439995694755,
"grad_norm": 0.7417320609092712,
"learning_rate": 0.0002125891606507919,
"loss": 3.2836,
"step": 60050
},
{
"epoch": 6.468625551609084,
"grad_norm": 0.7455304861068726,
"learning_rate": 0.0002122659196207305,
"loss": 3.2872,
"step": 60100
},
{
"epoch": 6.474007103648693,
"grad_norm": 0.7251283526420593,
"learning_rate": 0.0002119426785906691,
"loss": 3.2821,
"step": 60150
},
{
"epoch": 6.4793886556883,
"grad_norm": 0.70667964220047,
"learning_rate": 0.00021161943756060767,
"loss": 3.2856,
"step": 60200
},
{
"epoch": 6.484770207727909,
"grad_norm": 0.7194522619247437,
"learning_rate": 0.00021129619653054626,
"loss": 3.297,
"step": 60250
},
{
"epoch": 6.490151759767517,
"grad_norm": 0.7400850057601929,
"learning_rate": 0.00021097295550048483,
"loss": 3.3113,
"step": 60300
},
{
"epoch": 6.495533311807125,
"grad_norm": 0.7215319871902466,
"learning_rate": 0.00021064971447042343,
"loss": 3.2953,
"step": 60350
},
{
"epoch": 6.500914863846734,
"grad_norm": 0.7370033860206604,
"learning_rate": 0.00021032647344036202,
"loss": 3.2912,
"step": 60400
},
{
"epoch": 6.506296415886341,
"grad_norm": 0.7307314872741699,
"learning_rate": 0.0002100032324103006,
"loss": 3.2959,
"step": 60450
},
{
"epoch": 6.51167796792595,
"grad_norm": 0.7162298560142517,
"learning_rate": 0.00020967999138023916,
"loss": 3.2938,
"step": 60500
},
{
"epoch": 6.517059519965558,
"grad_norm": 0.7373604774475098,
"learning_rate": 0.00020935675035017778,
"loss": 3.2815,
"step": 60550
},
{
"epoch": 6.522441072005166,
"grad_norm": 0.7468721866607666,
"learning_rate": 0.00020903350932011634,
"loss": 3.2863,
"step": 60600
},
{
"epoch": 6.5278226240447745,
"grad_norm": 0.7195272445678711,
"learning_rate": 0.0002087102682900549,
"loss": 3.2734,
"step": 60650
},
{
"epoch": 6.533204176084383,
"grad_norm": 0.7415298223495483,
"learning_rate": 0.00020838702725999353,
"loss": 3.2863,
"step": 60700
},
{
"epoch": 6.538585728123991,
"grad_norm": 0.7255674004554749,
"learning_rate": 0.0002080637862299321,
"loss": 3.2876,
"step": 60750
},
{
"epoch": 6.543967280163599,
"grad_norm": 0.7130120396614075,
"learning_rate": 0.0002077405451998707,
"loss": 3.2908,
"step": 60800
},
{
"epoch": 6.549348832203208,
"grad_norm": 0.7552233934402466,
"learning_rate": 0.00020741730416980926,
"loss": 3.2829,
"step": 60850
},
{
"epoch": 6.5547303842428155,
"grad_norm": 0.6897677779197693,
"learning_rate": 0.00020709406313974786,
"loss": 3.2917,
"step": 60900
},
{
"epoch": 6.560111936282424,
"grad_norm": 0.933058500289917,
"learning_rate": 0.00020677082210968645,
"loss": 3.289,
"step": 60950
},
{
"epoch": 6.565493488322032,
"grad_norm": 0.7120639681816101,
"learning_rate": 0.00020644758107962502,
"loss": 3.2926,
"step": 61000
},
{
"epoch": 6.565493488322032,
"eval_accuracy": 0.385314214916064,
"eval_loss": 3.3742878437042236,
"eval_runtime": 186.1309,
"eval_samples_per_second": 96.765,
"eval_steps_per_second": 6.05,
"step": 61000
},
{
"epoch": 6.57087504036164,
"grad_norm": 0.7277801036834717,
"learning_rate": 0.0002061243400495636,
"loss": 3.2919,
"step": 61050
},
{
"epoch": 6.576256592401249,
"grad_norm": 0.8280491828918457,
"learning_rate": 0.0002058010990195022,
"loss": 3.2829,
"step": 61100
},
{
"epoch": 6.5816381444408565,
"grad_norm": 0.7976410984992981,
"learning_rate": 0.00020547785798944078,
"loss": 3.2889,
"step": 61150
},
{
"epoch": 6.587019696480465,
"grad_norm": 0.7580078840255737,
"learning_rate": 0.00020515461695937934,
"loss": 3.2738,
"step": 61200
},
{
"epoch": 6.592401248520073,
"grad_norm": 0.8833340406417847,
"learning_rate": 0.00020483137592931797,
"loss": 3.2927,
"step": 61250
},
{
"epoch": 6.597782800559681,
"grad_norm": 0.7139707803726196,
"learning_rate": 0.00020450813489925653,
"loss": 3.2959,
"step": 61300
},
{
"epoch": 6.60316435259929,
"grad_norm": 0.7467029094696045,
"learning_rate": 0.0002041848938691951,
"loss": 3.2858,
"step": 61350
},
{
"epoch": 6.608545904638898,
"grad_norm": 0.7808436751365662,
"learning_rate": 0.0002038616528391337,
"loss": 3.2803,
"step": 61400
},
{
"epoch": 6.613927456678506,
"grad_norm": 0.7623021006584167,
"learning_rate": 0.00020353841180907226,
"loss": 3.3034,
"step": 61450
},
{
"epoch": 6.619309008718115,
"grad_norm": 0.7227434515953064,
"learning_rate": 0.00020321517077901086,
"loss": 3.3085,
"step": 61500
},
{
"epoch": 6.624690560757722,
"grad_norm": 0.7426319718360901,
"learning_rate": 0.00020289192974894945,
"loss": 3.2961,
"step": 61550
},
{
"epoch": 6.630072112797331,
"grad_norm": 0.7512562870979309,
"learning_rate": 0.00020256868871888802,
"loss": 3.2782,
"step": 61600
},
{
"epoch": 6.635453664836939,
"grad_norm": 0.7073656916618347,
"learning_rate": 0.00020224544768882664,
"loss": 3.28,
"step": 61650
},
{
"epoch": 6.640835216876547,
"grad_norm": 0.7550458908081055,
"learning_rate": 0.0002019222066587652,
"loss": 3.2992,
"step": 61700
},
{
"epoch": 6.6462167689161555,
"grad_norm": 0.7247327566146851,
"learning_rate": 0.00020159896562870378,
"loss": 3.2897,
"step": 61750
},
{
"epoch": 6.651598320955763,
"grad_norm": 0.7218628525733948,
"learning_rate": 0.00020127572459864237,
"loss": 3.2825,
"step": 61800
},
{
"epoch": 6.656979872995372,
"grad_norm": 0.7239559292793274,
"learning_rate": 0.00020095248356858097,
"loss": 3.2896,
"step": 61850
},
{
"epoch": 6.66236142503498,
"grad_norm": 0.7311209440231323,
"learning_rate": 0.00020062924253851953,
"loss": 3.2835,
"step": 61900
},
{
"epoch": 6.667742977074588,
"grad_norm": 0.6923702359199524,
"learning_rate": 0.00020030600150845813,
"loss": 3.2869,
"step": 61950
},
{
"epoch": 6.6731245291141965,
"grad_norm": 0.703928530216217,
"learning_rate": 0.0001999827604783967,
"loss": 3.3001,
"step": 62000
},
{
"epoch": 6.6731245291141965,
"eval_accuracy": 0.3858626946937515,
"eval_loss": 3.3671865463256836,
"eval_runtime": 185.6838,
"eval_samples_per_second": 96.998,
"eval_steps_per_second": 6.064,
"step": 62000
},
{
"epoch": 6.678506081153805,
"grad_norm": 0.7426213026046753,
"learning_rate": 0.0001996595194483353,
"loss": 3.3055,
"step": 62050
},
{
"epoch": 6.683887633193413,
"grad_norm": 0.7367209196090698,
"learning_rate": 0.00019933627841827389,
"loss": 3.2921,
"step": 62100
},
{
"epoch": 6.689269185233021,
"grad_norm": 0.7438285946846008,
"learning_rate": 0.00019901303738821245,
"loss": 3.2854,
"step": 62150
},
{
"epoch": 6.69465073727263,
"grad_norm": 0.750081479549408,
"learning_rate": 0.00019868979635815102,
"loss": 3.2727,
"step": 62200
},
{
"epoch": 6.7000322893122375,
"grad_norm": 0.7430285215377808,
"learning_rate": 0.00019836655532808964,
"loss": 3.2853,
"step": 62250
},
{
"epoch": 6.705413841351846,
"grad_norm": 0.7340359687805176,
"learning_rate": 0.0001980433142980282,
"loss": 3.2936,
"step": 62300
},
{
"epoch": 6.710795393391454,
"grad_norm": 0.7362263798713684,
"learning_rate": 0.00019772007326796678,
"loss": 3.2875,
"step": 62350
},
{
"epoch": 6.716176945431062,
"grad_norm": 0.8167773485183716,
"learning_rate": 0.0001973968322379054,
"loss": 3.2949,
"step": 62400
},
{
"epoch": 6.721558497470671,
"grad_norm": 0.7202277779579163,
"learning_rate": 0.00019707359120784397,
"loss": 3.293,
"step": 62450
},
{
"epoch": 6.7269400495102785,
"grad_norm": 0.6944708228111267,
"learning_rate": 0.00019675035017778253,
"loss": 3.2817,
"step": 62500
},
{
"epoch": 6.732321601549887,
"grad_norm": 0.715620756149292,
"learning_rate": 0.00019642710914772113,
"loss": 3.2855,
"step": 62550
},
{
"epoch": 6.737703153589496,
"grad_norm": 0.7600197792053223,
"learning_rate": 0.00019610386811765972,
"loss": 3.2856,
"step": 62600
},
{
"epoch": 6.743084705629103,
"grad_norm": 0.7726536393165588,
"learning_rate": 0.00019578062708759832,
"loss": 3.27,
"step": 62650
},
{
"epoch": 6.748466257668712,
"grad_norm": 0.7212598919868469,
"learning_rate": 0.00019545738605753689,
"loss": 3.2962,
"step": 62700
},
{
"epoch": 6.75384780970832,
"grad_norm": 0.7723383903503418,
"learning_rate": 0.00019513414502747545,
"loss": 3.3036,
"step": 62750
},
{
"epoch": 6.759229361747928,
"grad_norm": 0.7511371970176697,
"learning_rate": 0.00019481090399741408,
"loss": 3.2864,
"step": 62800
},
{
"epoch": 6.7646109137875365,
"grad_norm": 0.7517921924591064,
"learning_rate": 0.00019448766296735264,
"loss": 3.3052,
"step": 62850
},
{
"epoch": 6.769992465827144,
"grad_norm": 0.7510790824890137,
"learning_rate": 0.0001941644219372912,
"loss": 3.2835,
"step": 62900
},
{
"epoch": 6.775374017866753,
"grad_norm": 0.7443214058876038,
"learning_rate": 0.00019384118090722983,
"loss": 3.299,
"step": 62950
},
{
"epoch": 6.780755569906361,
"grad_norm": 0.7521921992301941,
"learning_rate": 0.0001935179398771684,
"loss": 3.285,
"step": 63000
},
{
"epoch": 6.780755569906361,
"eval_accuracy": 0.3861808303492723,
"eval_loss": 3.3636913299560547,
"eval_runtime": 185.9174,
"eval_samples_per_second": 96.876,
"eval_steps_per_second": 6.056,
"step": 63000
},
{
"epoch": 6.786137121945969,
"grad_norm": 0.7525462508201599,
"learning_rate": 0.00019319469884710697,
"loss": 3.288,
"step": 63050
},
{
"epoch": 6.7915186739855775,
"grad_norm": 0.7926684021949768,
"learning_rate": 0.00019287145781704556,
"loss": 3.2791,
"step": 63100
},
{
"epoch": 6.796900226025185,
"grad_norm": 0.7578175067901611,
"learning_rate": 0.00019254821678698416,
"loss": 3.2871,
"step": 63150
},
{
"epoch": 6.802281778064794,
"grad_norm": 0.7035473585128784,
"learning_rate": 0.00019222497575692272,
"loss": 3.2867,
"step": 63200
},
{
"epoch": 6.807663330104402,
"grad_norm": 0.7578956484794617,
"learning_rate": 0.00019190819954746253,
"loss": 3.2833,
"step": 63250
},
{
"epoch": 6.813044882144011,
"grad_norm": 0.7775467038154602,
"learning_rate": 0.0001915849585174011,
"loss": 3.3052,
"step": 63300
},
{
"epoch": 6.8184264341836185,
"grad_norm": 0.7119178771972656,
"learning_rate": 0.00019126171748733972,
"loss": 3.3001,
"step": 63350
},
{
"epoch": 6.823807986223227,
"grad_norm": 0.7036906480789185,
"learning_rate": 0.0001909384764572783,
"loss": 3.3091,
"step": 63400
},
{
"epoch": 6.829189538262835,
"grad_norm": 0.7361630201339722,
"learning_rate": 0.00019061523542721688,
"loss": 3.2839,
"step": 63450
},
{
"epoch": 6.834571090302443,
"grad_norm": 0.7481423616409302,
"learning_rate": 0.00019029199439715548,
"loss": 3.2791,
"step": 63500
},
{
"epoch": 6.839952642342052,
"grad_norm": 0.7738127708435059,
"learning_rate": 0.00018996875336709404,
"loss": 3.3003,
"step": 63550
},
{
"epoch": 6.8453341943816595,
"grad_norm": 0.7231500744819641,
"learning_rate": 0.00018964551233703264,
"loss": 3.2839,
"step": 63600
},
{
"epoch": 6.850715746421268,
"grad_norm": 0.803739070892334,
"learning_rate": 0.0001893222713069712,
"loss": 3.2965,
"step": 63650
},
{
"epoch": 6.856097298460876,
"grad_norm": 0.7869791388511658,
"learning_rate": 0.0001889990302769098,
"loss": 3.2808,
"step": 63700
},
{
"epoch": 6.861478850500484,
"grad_norm": 0.7621977925300598,
"learning_rate": 0.0001886757892468484,
"loss": 3.2896,
"step": 63750
},
{
"epoch": 6.866860402540093,
"grad_norm": 0.7234262228012085,
"learning_rate": 0.00018835254821678696,
"loss": 3.2922,
"step": 63800
},
{
"epoch": 6.8722419545797,
"grad_norm": 0.7651438117027283,
"learning_rate": 0.00018802930718672553,
"loss": 3.2865,
"step": 63850
},
{
"epoch": 6.877623506619309,
"grad_norm": 0.752111554145813,
"learning_rate": 0.00018770606615666415,
"loss": 3.2797,
"step": 63900
},
{
"epoch": 6.8830050586589175,
"grad_norm": 0.7573847770690918,
"learning_rate": 0.00018738282512660272,
"loss": 3.2812,
"step": 63950
},
{
"epoch": 6.888386610698525,
"grad_norm": 0.7495539784431458,
"learning_rate": 0.0001870595840965413,
"loss": 3.2812,
"step": 64000
},
{
"epoch": 6.888386610698525,
"eval_accuracy": 0.3865049419136229,
"eval_loss": 3.359797477722168,
"eval_runtime": 185.9009,
"eval_samples_per_second": 96.885,
"eval_steps_per_second": 6.057,
"step": 64000
},
{
"epoch": 6.893768162738134,
"grad_norm": 0.7728069424629211,
"learning_rate": 0.0001867363430664799,
"loss": 3.2643,
"step": 64050
},
{
"epoch": 6.899149714777742,
"grad_norm": 0.7521748542785645,
"learning_rate": 0.00018641310203641848,
"loss": 3.2905,
"step": 64100
},
{
"epoch": 6.90453126681735,
"grad_norm": 0.7777418494224548,
"learning_rate": 0.00018608986100635705,
"loss": 3.2872,
"step": 64150
},
{
"epoch": 6.9099128188569585,
"grad_norm": 0.7546526789665222,
"learning_rate": 0.00018576661997629564,
"loss": 3.2953,
"step": 64200
},
{
"epoch": 6.915294370896566,
"grad_norm": 0.7118765711784363,
"learning_rate": 0.00018544337894623423,
"loss": 3.275,
"step": 64250
},
{
"epoch": 6.920675922936175,
"grad_norm": 0.7455823421478271,
"learning_rate": 0.0001851201379161728,
"loss": 3.2854,
"step": 64300
},
{
"epoch": 6.926057474975783,
"grad_norm": 0.747428834438324,
"learning_rate": 0.0001847968968861114,
"loss": 3.3062,
"step": 64350
},
{
"epoch": 6.931439027015391,
"grad_norm": 0.7414302229881287,
"learning_rate": 0.00018447365585604996,
"loss": 3.2951,
"step": 64400
},
{
"epoch": 6.9368205790549995,
"grad_norm": 0.6974478960037231,
"learning_rate": 0.00018415041482598859,
"loss": 3.2932,
"step": 64450
},
{
"epoch": 6.942202131094608,
"grad_norm": 0.7721847891807556,
"learning_rate": 0.00018382717379592715,
"loss": 3.2928,
"step": 64500
},
{
"epoch": 6.947583683134216,
"grad_norm": 0.7501277923583984,
"learning_rate": 0.00018350393276586572,
"loss": 3.2793,
"step": 64550
},
{
"epoch": 6.952965235173824,
"grad_norm": 0.7129700183868408,
"learning_rate": 0.00018318069173580434,
"loss": 3.2818,
"step": 64600
},
{
"epoch": 6.958346787213433,
"grad_norm": 0.81559157371521,
"learning_rate": 0.0001828574507057429,
"loss": 3.288,
"step": 64650
},
{
"epoch": 6.9637283392530405,
"grad_norm": 0.750301718711853,
"learning_rate": 0.00018253420967568148,
"loss": 3.2954,
"step": 64700
},
{
"epoch": 6.969109891292649,
"grad_norm": 0.7174051403999329,
"learning_rate": 0.00018221096864562007,
"loss": 3.2705,
"step": 64750
},
{
"epoch": 6.974491443332257,
"grad_norm": 0.7266606688499451,
"learning_rate": 0.00018188772761555867,
"loss": 3.2906,
"step": 64800
},
{
"epoch": 6.979872995371865,
"grad_norm": 0.7052478790283203,
"learning_rate": 0.00018156448658549723,
"loss": 3.2925,
"step": 64850
},
{
"epoch": 6.985254547411474,
"grad_norm": 0.8231821656227112,
"learning_rate": 0.00018124124555543583,
"loss": 3.2771,
"step": 64900
},
{
"epoch": 6.990636099451081,
"grad_norm": 0.8024710416793823,
"learning_rate": 0.0001809180045253744,
"loss": 3.3017,
"step": 64950
},
{
"epoch": 6.99601765149069,
"grad_norm": 0.8686951398849487,
"learning_rate": 0.00018059476349531296,
"loss": 3.2788,
"step": 65000
},
{
"epoch": 6.99601765149069,
"eval_accuracy": 0.3870885165740745,
"eval_loss": 3.35601806640625,
"eval_runtime": 185.3686,
"eval_samples_per_second": 97.163,
"eval_steps_per_second": 6.074,
"step": 65000
},
{
"epoch": 7.0013992035302985,
"grad_norm": 0.7666776180267334,
"learning_rate": 0.00018027152246525159,
"loss": 3.2632,
"step": 65050
},
{
"epoch": 7.006780755569906,
"grad_norm": 0.7738795280456543,
"learning_rate": 0.00017994828143519015,
"loss": 3.2048,
"step": 65100
},
{
"epoch": 7.012162307609515,
"grad_norm": 0.7600313425064087,
"learning_rate": 0.00017962504040512872,
"loss": 3.2028,
"step": 65150
},
{
"epoch": 7.017543859649122,
"grad_norm": 0.7462911605834961,
"learning_rate": 0.00017930179937506734,
"loss": 3.1916,
"step": 65200
},
{
"epoch": 7.022925411688731,
"grad_norm": 0.7242733836174011,
"learning_rate": 0.0001789785583450059,
"loss": 3.1975,
"step": 65250
},
{
"epoch": 7.0283069637283395,
"grad_norm": 0.7007710337638855,
"learning_rate": 0.00017865531731494448,
"loss": 3.1913,
"step": 65300
},
{
"epoch": 7.033688515767947,
"grad_norm": 0.7678819298744202,
"learning_rate": 0.00017833207628488307,
"loss": 3.213,
"step": 65350
},
{
"epoch": 7.039070067807556,
"grad_norm": 0.7424229979515076,
"learning_rate": 0.00017800883525482167,
"loss": 3.2039,
"step": 65400
},
{
"epoch": 7.044451619847164,
"grad_norm": 0.7469813823699951,
"learning_rate": 0.00017768559422476026,
"loss": 3.1909,
"step": 65450
},
{
"epoch": 7.049833171886772,
"grad_norm": 0.7923039793968201,
"learning_rate": 0.00017736235319469883,
"loss": 3.205,
"step": 65500
},
{
"epoch": 7.0552147239263805,
"grad_norm": 0.8204380869865417,
"learning_rate": 0.0001770391121646374,
"loss": 3.2063,
"step": 65550
},
{
"epoch": 7.060596275965988,
"grad_norm": 0.7640592455863953,
"learning_rate": 0.00017671587113457602,
"loss": 3.1972,
"step": 65600
},
{
"epoch": 7.065977828005597,
"grad_norm": 0.797031581401825,
"learning_rate": 0.0001763926301045146,
"loss": 3.2047,
"step": 65650
},
{
"epoch": 7.071359380045205,
"grad_norm": 0.7652319669723511,
"learning_rate": 0.00017606938907445315,
"loss": 3.2061,
"step": 65700
},
{
"epoch": 7.076740932084813,
"grad_norm": 0.7750967144966125,
"learning_rate": 0.00017574614804439178,
"loss": 3.2077,
"step": 65750
},
{
"epoch": 7.0821224841244215,
"grad_norm": 0.7342427968978882,
"learning_rate": 0.00017542290701433034,
"loss": 3.2122,
"step": 65800
},
{
"epoch": 7.08750403616403,
"grad_norm": 0.7213613390922546,
"learning_rate": 0.0001750996659842689,
"loss": 3.2194,
"step": 65850
},
{
"epoch": 7.092885588203638,
"grad_norm": 0.7914056181907654,
"learning_rate": 0.0001747764249542075,
"loss": 3.2036,
"step": 65900
},
{
"epoch": 7.098267140243246,
"grad_norm": 0.7841677069664001,
"learning_rate": 0.0001744531839241461,
"loss": 3.2226,
"step": 65950
},
{
"epoch": 7.103648692282855,
"grad_norm": 0.7444742918014526,
"learning_rate": 0.00017412994289408467,
"loss": 3.2324,
"step": 66000
},
{
"epoch": 7.103648692282855,
"eval_accuracy": 0.38705374764997386,
"eval_loss": 3.3600642681121826,
"eval_runtime": 185.3852,
"eval_samples_per_second": 97.154,
"eval_steps_per_second": 6.074,
"step": 66000
},
{
"epoch": 7.109030244322462,
"grad_norm": 0.7269986867904663,
"learning_rate": 0.00017380670186402326,
"loss": 3.222,
"step": 66050
},
{
"epoch": 7.114411796362071,
"grad_norm": 0.7909741401672363,
"learning_rate": 0.00017348346083396183,
"loss": 3.2207,
"step": 66100
},
{
"epoch": 7.119793348401679,
"grad_norm": 0.7954514026641846,
"learning_rate": 0.00017316021980390042,
"loss": 3.2228,
"step": 66150
},
{
"epoch": 7.125174900441287,
"grad_norm": 0.7739808559417725,
"learning_rate": 0.00017283697877383902,
"loss": 3.2292,
"step": 66200
},
{
"epoch": 7.130556452480896,
"grad_norm": 0.7709057927131653,
"learning_rate": 0.0001725137377437776,
"loss": 3.2097,
"step": 66250
},
{
"epoch": 7.135938004520503,
"grad_norm": 0.7626157402992249,
"learning_rate": 0.00017219049671371615,
"loss": 3.2097,
"step": 66300
},
{
"epoch": 7.141319556560112,
"grad_norm": 0.7553810477256775,
"learning_rate": 0.00017186725568365478,
"loss": 3.2228,
"step": 66350
},
{
"epoch": 7.1467011085997205,
"grad_norm": 0.7737429141998291,
"learning_rate": 0.00017154401465359334,
"loss": 3.2253,
"step": 66400
},
{
"epoch": 7.152082660639328,
"grad_norm": 0.828437328338623,
"learning_rate": 0.00017122077362353194,
"loss": 3.2324,
"step": 66450
},
{
"epoch": 7.157464212678937,
"grad_norm": 0.7904108166694641,
"learning_rate": 0.00017089753259347053,
"loss": 3.236,
"step": 66500
},
{
"epoch": 7.162845764718545,
"grad_norm": 0.8120779395103455,
"learning_rate": 0.0001705742915634091,
"loss": 3.2139,
"step": 66550
},
{
"epoch": 7.168227316758153,
"grad_norm": 0.7788743376731873,
"learning_rate": 0.0001702510505333477,
"loss": 3.2075,
"step": 66600
},
{
"epoch": 7.1736088687977615,
"grad_norm": 0.7754313945770264,
"learning_rate": 0.00016992780950328626,
"loss": 3.2088,
"step": 66650
},
{
"epoch": 7.178990420837369,
"grad_norm": 0.7604042291641235,
"learning_rate": 0.00016960456847322486,
"loss": 3.2142,
"step": 66700
},
{
"epoch": 7.184371972876978,
"grad_norm": 0.7660232186317444,
"learning_rate": 0.00016928132744316345,
"loss": 3.2324,
"step": 66750
},
{
"epoch": 7.189753524916586,
"grad_norm": 0.8033111095428467,
"learning_rate": 0.00016895808641310202,
"loss": 3.2294,
"step": 66800
},
{
"epoch": 7.195135076956194,
"grad_norm": 0.7543028593063354,
"learning_rate": 0.0001686348453830406,
"loss": 3.2363,
"step": 66850
},
{
"epoch": 7.2005166289958025,
"grad_norm": 0.7488883137702942,
"learning_rate": 0.0001683116043529792,
"loss": 3.2186,
"step": 66900
},
{
"epoch": 7.205898181035411,
"grad_norm": 0.7356674075126648,
"learning_rate": 0.000167994828143519,
"loss": 3.2357,
"step": 66950
},
{
"epoch": 7.211279733075019,
"grad_norm": 0.8123546838760376,
"learning_rate": 0.00016767158711345758,
"loss": 3.2299,
"step": 67000
},
{
"epoch": 7.211279733075019,
"eval_accuracy": 0.3872414998401173,
"eval_loss": 3.3597795963287354,
"eval_runtime": 185.6336,
"eval_samples_per_second": 97.024,
"eval_steps_per_second": 6.066,
"step": 67000
},
{
"epoch": 7.216661285114627,
"grad_norm": 0.7803553938865662,
"learning_rate": 0.00016734834608339618,
"loss": 3.2275,
"step": 67050
},
{
"epoch": 7.222042837154235,
"grad_norm": 0.7761536240577698,
"learning_rate": 0.00016702510505333475,
"loss": 3.2226,
"step": 67100
},
{
"epoch": 7.2274243891938434,
"grad_norm": 0.7861804962158203,
"learning_rate": 0.00016670186402327334,
"loss": 3.2236,
"step": 67150
},
{
"epoch": 7.232805941233452,
"grad_norm": 0.7782896161079407,
"learning_rate": 0.0001663786229932119,
"loss": 3.2238,
"step": 67200
},
{
"epoch": 7.23818749327306,
"grad_norm": 0.788238525390625,
"learning_rate": 0.00016605538196315053,
"loss": 3.2344,
"step": 67250
},
{
"epoch": 7.243569045312668,
"grad_norm": 0.7603457570075989,
"learning_rate": 0.0001657321409330891,
"loss": 3.2303,
"step": 67300
},
{
"epoch": 7.248950597352277,
"grad_norm": 0.8009949922561646,
"learning_rate": 0.00016540889990302766,
"loss": 3.2343,
"step": 67350
},
{
"epoch": 7.254332149391884,
"grad_norm": 0.8290765285491943,
"learning_rate": 0.00016508565887296629,
"loss": 3.2391,
"step": 67400
},
{
"epoch": 7.259713701431493,
"grad_norm": 0.7603195905685425,
"learning_rate": 0.00016476241784290485,
"loss": 3.2145,
"step": 67450
},
{
"epoch": 7.265095253471101,
"grad_norm": 0.7419003248214722,
"learning_rate": 0.00016443917681284342,
"loss": 3.2315,
"step": 67500
},
{
"epoch": 7.270476805510709,
"grad_norm": 0.7349012494087219,
"learning_rate": 0.00016411593578278202,
"loss": 3.2342,
"step": 67550
},
{
"epoch": 7.275858357550318,
"grad_norm": 0.7769827842712402,
"learning_rate": 0.0001637926947527206,
"loss": 3.2128,
"step": 67600
},
{
"epoch": 7.281239909589925,
"grad_norm": 0.779949426651001,
"learning_rate": 0.00016346945372265918,
"loss": 3.224,
"step": 67650
},
{
"epoch": 7.286621461629534,
"grad_norm": 0.7693045139312744,
"learning_rate": 0.00016314621269259777,
"loss": 3.2385,
"step": 67700
},
{
"epoch": 7.2920030136691425,
"grad_norm": 0.7848260402679443,
"learning_rate": 0.00016282297166253634,
"loss": 3.2205,
"step": 67750
},
{
"epoch": 7.29738456570875,
"grad_norm": 0.7522407174110413,
"learning_rate": 0.00016249973063247494,
"loss": 3.2086,
"step": 67800
},
{
"epoch": 7.302766117748359,
"grad_norm": 0.799242377281189,
"learning_rate": 0.00016217648960241353,
"loss": 3.2311,
"step": 67850
},
{
"epoch": 7.308147669787967,
"grad_norm": 0.7705872654914856,
"learning_rate": 0.0001618532485723521,
"loss": 3.2347,
"step": 67900
},
{
"epoch": 7.313529221827575,
"grad_norm": 0.7869008183479309,
"learning_rate": 0.00016153000754229067,
"loss": 3.2402,
"step": 67950
},
{
"epoch": 7.3189107738671835,
"grad_norm": 0.7905930280685425,
"learning_rate": 0.0001612067665122293,
"loss": 3.2138,
"step": 68000
},
{
"epoch": 7.3189107738671835,
"eval_accuracy": 0.38763482329400567,
"eval_loss": 3.3565242290496826,
"eval_runtime": 185.2952,
"eval_samples_per_second": 97.202,
"eval_steps_per_second": 6.077,
"step": 68000
},
{
"epoch": 7.324292325906791,
"grad_norm": 0.7964014410972595,
"learning_rate": 0.00016088352548216785,
"loss": 3.2405,
"step": 68050
},
{
"epoch": 7.3296738779464,
"grad_norm": 0.779831051826477,
"learning_rate": 0.00016056028445210642,
"loss": 3.2321,
"step": 68100
},
{
"epoch": 7.335055429986008,
"grad_norm": 0.7957444190979004,
"learning_rate": 0.00016023704342204504,
"loss": 3.2332,
"step": 68150
},
{
"epoch": 7.340436982025616,
"grad_norm": 0.7503647208213806,
"learning_rate": 0.0001599138023919836,
"loss": 3.247,
"step": 68200
},
{
"epoch": 7.3458185340652244,
"grad_norm": 0.8236241340637207,
"learning_rate": 0.0001595905613619222,
"loss": 3.2457,
"step": 68250
},
{
"epoch": 7.351200086104833,
"grad_norm": 0.7641984820365906,
"learning_rate": 0.00015926732033186077,
"loss": 3.2195,
"step": 68300
},
{
"epoch": 7.356581638144441,
"grad_norm": 0.7976327538490295,
"learning_rate": 0.00015894407930179934,
"loss": 3.227,
"step": 68350
},
{
"epoch": 7.361963190184049,
"grad_norm": 0.8243280649185181,
"learning_rate": 0.00015862083827173796,
"loss": 3.2295,
"step": 68400
},
{
"epoch": 7.367344742223658,
"grad_norm": 0.7711389660835266,
"learning_rate": 0.00015829759724167653,
"loss": 3.2366,
"step": 68450
},
{
"epoch": 7.372726294263265,
"grad_norm": 0.7629539966583252,
"learning_rate": 0.0001579743562116151,
"loss": 3.2337,
"step": 68500
},
{
"epoch": 7.378107846302874,
"grad_norm": 0.7828649878501892,
"learning_rate": 0.00015765111518155372,
"loss": 3.2379,
"step": 68550
},
{
"epoch": 7.383489398342482,
"grad_norm": 0.7742972373962402,
"learning_rate": 0.0001573278741514923,
"loss": 3.2245,
"step": 68600
},
{
"epoch": 7.38887095038209,
"grad_norm": 0.7311100959777832,
"learning_rate": 0.00015700463312143085,
"loss": 3.2345,
"step": 68650
},
{
"epoch": 7.394252502421699,
"grad_norm": 0.811392605304718,
"learning_rate": 0.00015668139209136945,
"loss": 3.2358,
"step": 68700
},
{
"epoch": 7.399634054461306,
"grad_norm": 0.7674997448921204,
"learning_rate": 0.00015635815106130804,
"loss": 3.2327,
"step": 68750
},
{
"epoch": 7.405015606500915,
"grad_norm": 0.7893558740615845,
"learning_rate": 0.0001560349100312466,
"loss": 3.2267,
"step": 68800
},
{
"epoch": 7.4103971585405235,
"grad_norm": 0.7651101350784302,
"learning_rate": 0.0001557116690011852,
"loss": 3.2281,
"step": 68850
},
{
"epoch": 7.415778710580131,
"grad_norm": 0.8185087442398071,
"learning_rate": 0.00015538842797112377,
"loss": 3.2409,
"step": 68900
},
{
"epoch": 7.42116026261974,
"grad_norm": 0.7831454277038574,
"learning_rate": 0.00015506518694106237,
"loss": 3.2436,
"step": 68950
},
{
"epoch": 7.426541814659347,
"grad_norm": 0.8313812613487244,
"learning_rate": 0.00015474194591100096,
"loss": 3.2307,
"step": 69000
},
{
"epoch": 7.426541814659347,
"eval_accuracy": 0.38814179766854806,
"eval_loss": 3.351215124130249,
"eval_runtime": 185.4234,
"eval_samples_per_second": 97.134,
"eval_steps_per_second": 6.073,
"step": 69000
},
{
"epoch": 7.431923366698956,
"grad_norm": 0.762865424156189,
"learning_rate": 0.00015441870488093953,
"loss": 3.2376,
"step": 69050
},
{
"epoch": 7.4373049187385645,
"grad_norm": 0.7708355188369751,
"learning_rate": 0.0001540954638508781,
"loss": 3.2271,
"step": 69100
},
{
"epoch": 7.442686470778172,
"grad_norm": 0.7354069352149963,
"learning_rate": 0.00015377222282081672,
"loss": 3.2354,
"step": 69150
},
{
"epoch": 7.448068022817781,
"grad_norm": 0.7922617793083191,
"learning_rate": 0.0001534489817907553,
"loss": 3.2525,
"step": 69200
},
{
"epoch": 7.453449574857389,
"grad_norm": 0.7759140133857727,
"learning_rate": 0.00015312574076069388,
"loss": 3.229,
"step": 69250
},
{
"epoch": 7.458831126896997,
"grad_norm": 0.7401615977287292,
"learning_rate": 0.00015280249973063248,
"loss": 3.2231,
"step": 69300
},
{
"epoch": 7.4642126789366054,
"grad_norm": 0.8062000870704651,
"learning_rate": 0.00015247925870057104,
"loss": 3.2304,
"step": 69350
},
{
"epoch": 7.469594230976213,
"grad_norm": 0.7673658132553101,
"learning_rate": 0.00015215601767050964,
"loss": 3.2379,
"step": 69400
},
{
"epoch": 7.474975783015822,
"grad_norm": 0.7671270370483398,
"learning_rate": 0.0001518327766404482,
"loss": 3.2405,
"step": 69450
},
{
"epoch": 7.48035733505543,
"grad_norm": 0.8334165811538696,
"learning_rate": 0.0001515095356103868,
"loss": 3.2467,
"step": 69500
},
{
"epoch": 7.485738887095038,
"grad_norm": 0.7375505566596985,
"learning_rate": 0.0001511862945803254,
"loss": 3.2518,
"step": 69550
},
{
"epoch": 7.491120439134646,
"grad_norm": 0.7733789682388306,
"learning_rate": 0.00015086305355026396,
"loss": 3.2537,
"step": 69600
},
{
"epoch": 7.496501991174255,
"grad_norm": 0.7866163849830627,
"learning_rate": 0.00015053981252020253,
"loss": 3.2355,
"step": 69650
},
{
"epoch": 7.501883543213863,
"grad_norm": 0.7521885633468628,
"learning_rate": 0.00015021657149014115,
"loss": 3.2372,
"step": 69700
},
{
"epoch": 7.507265095253471,
"grad_norm": 0.7642475962638855,
"learning_rate": 0.00014989333046007972,
"loss": 3.234,
"step": 69750
},
{
"epoch": 7.51264664729308,
"grad_norm": 0.8261973261833191,
"learning_rate": 0.00014957008943001832,
"loss": 3.2418,
"step": 69800
},
{
"epoch": 7.518028199332687,
"grad_norm": 0.7892765998840332,
"learning_rate": 0.00014924684839995688,
"loss": 3.2449,
"step": 69850
},
{
"epoch": 7.523409751372296,
"grad_norm": 0.876653790473938,
"learning_rate": 0.00014892360736989548,
"loss": 3.2268,
"step": 69900
},
{
"epoch": 7.528791303411904,
"grad_norm": 0.7362908124923706,
"learning_rate": 0.00014860036633983407,
"loss": 3.2202,
"step": 69950
},
{
"epoch": 7.534172855451512,
"grad_norm": 0.7930489182472229,
"learning_rate": 0.00014827712530977264,
"loss": 3.2237,
"step": 70000
},
{
"epoch": 7.534172855451512,
"eval_accuracy": 0.3885306663540361,
"eval_loss": 3.347409248352051,
"eval_runtime": 184.9947,
"eval_samples_per_second": 97.36,
"eval_steps_per_second": 6.087,
"step": 70000
},
{
"epoch": 7.539554407491121,
"grad_norm": 0.8036817312240601,
"learning_rate": 0.00014795388427971123,
"loss": 3.2342,
"step": 70050
},
{
"epoch": 7.544935959530728,
"grad_norm": 0.7615011930465698,
"learning_rate": 0.0001476306432496498,
"loss": 3.2196,
"step": 70100
},
{
"epoch": 7.550317511570337,
"grad_norm": 0.7539505958557129,
"learning_rate": 0.0001473074022195884,
"loss": 3.2305,
"step": 70150
},
{
"epoch": 7.5556990636099455,
"grad_norm": 0.8157840967178345,
"learning_rate": 0.00014698416118952696,
"loss": 3.2403,
"step": 70200
},
{
"epoch": 7.561080615649553,
"grad_norm": 0.7653469443321228,
"learning_rate": 0.00014666092015946556,
"loss": 3.245,
"step": 70250
},
{
"epoch": 7.566462167689162,
"grad_norm": 0.798906147480011,
"learning_rate": 0.00014633767912940415,
"loss": 3.2421,
"step": 70300
},
{
"epoch": 7.57184371972877,
"grad_norm": 0.7808455228805542,
"learning_rate": 0.00014601443809934272,
"loss": 3.2254,
"step": 70350
},
{
"epoch": 7.577225271768378,
"grad_norm": 0.8109095692634583,
"learning_rate": 0.00014569119706928132,
"loss": 3.2344,
"step": 70400
},
{
"epoch": 7.5826068238079865,
"grad_norm": 0.8093851804733276,
"learning_rate": 0.0001453679560392199,
"loss": 3.2406,
"step": 70450
},
{
"epoch": 7.587988375847594,
"grad_norm": 0.7726173400878906,
"learning_rate": 0.00014504471500915848,
"loss": 3.2277,
"step": 70500
},
{
"epoch": 7.593369927887203,
"grad_norm": 0.7869266271591187,
"learning_rate": 0.00014472147397909707,
"loss": 3.2524,
"step": 70550
},
{
"epoch": 7.598751479926811,
"grad_norm": 0.8031958341598511,
"learning_rate": 0.00014440469776963688,
"loss": 3.2369,
"step": 70600
},
{
"epoch": 7.604133031966419,
"grad_norm": 0.8056939840316772,
"learning_rate": 0.00014408145673957545,
"loss": 3.2352,
"step": 70650
},
{
"epoch": 7.609514584006027,
"grad_norm": 0.8427897095680237,
"learning_rate": 0.00014375821570951404,
"loss": 3.2355,
"step": 70700
},
{
"epoch": 7.614896136045635,
"grad_norm": 0.8212729692459106,
"learning_rate": 0.00014343497467945264,
"loss": 3.2493,
"step": 70750
},
{
"epoch": 7.620277688085244,
"grad_norm": 0.8241793513298035,
"learning_rate": 0.00014311173364939123,
"loss": 3.2543,
"step": 70800
},
{
"epoch": 7.625659240124852,
"grad_norm": 0.7650026679039001,
"learning_rate": 0.0001427884926193298,
"loss": 3.2323,
"step": 70850
},
{
"epoch": 7.63104079216446,
"grad_norm": 0.8155238032341003,
"learning_rate": 0.0001424652515892684,
"loss": 3.242,
"step": 70900
},
{
"epoch": 7.636422344204068,
"grad_norm": 0.7850176692008972,
"learning_rate": 0.000142142010559207,
"loss": 3.2581,
"step": 70950
},
{
"epoch": 7.641803896243677,
"grad_norm": 0.785377562046051,
"learning_rate": 0.00014181876952914555,
"loss": 3.2459,
"step": 71000
},
{
"epoch": 7.641803896243677,
"eval_accuracy": 0.3885870572028118,
"eval_loss": 3.3447868824005127,
"eval_runtime": 185.3202,
"eval_samples_per_second": 97.189,
"eval_steps_per_second": 6.076,
"step": 71000
},
{
"epoch": 7.647185448283285,
"grad_norm": 0.7669951319694519,
"learning_rate": 0.00014149552849908415,
"loss": 3.23,
"step": 71050
},
{
"epoch": 7.652567000322893,
"grad_norm": 0.8189201354980469,
"learning_rate": 0.00014117228746902272,
"loss": 3.244,
"step": 71100
},
{
"epoch": 7.657948552362502,
"grad_norm": 0.7769984602928162,
"learning_rate": 0.0001408490464389613,
"loss": 3.2315,
"step": 71150
},
{
"epoch": 7.663330104402109,
"grad_norm": 0.7977750301361084,
"learning_rate": 0.00014052580540889988,
"loss": 3.2309,
"step": 71200
},
{
"epoch": 7.668711656441718,
"grad_norm": 0.7878773808479309,
"learning_rate": 0.00014020256437883847,
"loss": 3.2351,
"step": 71250
},
{
"epoch": 7.674093208481326,
"grad_norm": 0.7823585867881775,
"learning_rate": 0.00013987932334877707,
"loss": 3.2523,
"step": 71300
},
{
"epoch": 7.679474760520934,
"grad_norm": 0.7985871434211731,
"learning_rate": 0.00013955608231871564,
"loss": 3.2374,
"step": 71350
},
{
"epoch": 7.684856312560543,
"grad_norm": 0.8083027005195618,
"learning_rate": 0.00013923284128865423,
"loss": 3.2564,
"step": 71400
},
{
"epoch": 7.69023786460015,
"grad_norm": 0.8058404922485352,
"learning_rate": 0.00013890960025859283,
"loss": 3.2503,
"step": 71450
},
{
"epoch": 7.695619416639759,
"grad_norm": 0.8511492013931274,
"learning_rate": 0.0001385863592285314,
"loss": 3.2375,
"step": 71500
},
{
"epoch": 7.7010009686793675,
"grad_norm": 0.7996093034744263,
"learning_rate": 0.00013826311819847,
"loss": 3.2436,
"step": 71550
},
{
"epoch": 7.706382520718975,
"grad_norm": 0.8188735246658325,
"learning_rate": 0.00013793987716840858,
"loss": 3.2353,
"step": 71600
},
{
"epoch": 7.711764072758584,
"grad_norm": 0.8392016291618347,
"learning_rate": 0.00013761663613834715,
"loss": 3.2451,
"step": 71650
},
{
"epoch": 7.717145624798192,
"grad_norm": 0.7832477688789368,
"learning_rate": 0.00013729339510828572,
"loss": 3.252,
"step": 71700
},
{
"epoch": 7.7225271768378,
"grad_norm": 0.8292062282562256,
"learning_rate": 0.0001369701540782243,
"loss": 3.2424,
"step": 71750
},
{
"epoch": 7.727908728877408,
"grad_norm": 0.786185085773468,
"learning_rate": 0.0001366469130481629,
"loss": 3.2195,
"step": 71800
},
{
"epoch": 7.733290280917016,
"grad_norm": 0.7632396817207336,
"learning_rate": 0.00013632367201810147,
"loss": 3.2411,
"step": 71850
},
{
"epoch": 7.738671832956625,
"grad_norm": 0.7751836776733398,
"learning_rate": 0.00013600043098804007,
"loss": 3.2415,
"step": 71900
},
{
"epoch": 7.744053384996233,
"grad_norm": 0.7656434774398804,
"learning_rate": 0.00013567718995797866,
"loss": 3.2421,
"step": 71950
},
{
"epoch": 7.749434937035841,
"grad_norm": 0.7917135953903198,
"learning_rate": 0.00013535394892791723,
"loss": 3.2524,
"step": 72000
},
{
"epoch": 7.749434937035841,
"eval_accuracy": 0.3892775463048728,
"eval_loss": 3.3401355743408203,
"eval_runtime": 185.1628,
"eval_samples_per_second": 97.271,
"eval_steps_per_second": 6.081,
"step": 72000
},
{
"epoch": 7.754816489075449,
"grad_norm": 0.801364541053772,
"learning_rate": 0.00013503070789785583,
"loss": 3.2339,
"step": 72050
},
{
"epoch": 7.760198041115058,
"grad_norm": 0.8408780097961426,
"learning_rate": 0.00013470746686779442,
"loss": 3.2441,
"step": 72100
},
{
"epoch": 7.765579593154666,
"grad_norm": 0.8276086449623108,
"learning_rate": 0.000134384225837733,
"loss": 3.2443,
"step": 72150
},
{
"epoch": 7.770961145194274,
"grad_norm": 0.8465146422386169,
"learning_rate": 0.00013406098480767158,
"loss": 3.2322,
"step": 72200
},
{
"epoch": 7.776342697233883,
"grad_norm": 0.8557305335998535,
"learning_rate": 0.00013373774377761015,
"loss": 3.2372,
"step": 72250
},
{
"epoch": 7.78172424927349,
"grad_norm": 0.8262193202972412,
"learning_rate": 0.00013341450274754874,
"loss": 3.2308,
"step": 72300
},
{
"epoch": 7.787105801313099,
"grad_norm": 0.8111556768417358,
"learning_rate": 0.0001330912617174873,
"loss": 3.238,
"step": 72350
},
{
"epoch": 7.792487353352707,
"grad_norm": 0.7550597190856934,
"learning_rate": 0.0001327680206874259,
"loss": 3.24,
"step": 72400
},
{
"epoch": 7.797868905392315,
"grad_norm": 0.7710828185081482,
"learning_rate": 0.0001324447796573645,
"loss": 3.2414,
"step": 72450
},
{
"epoch": 7.803250457431924,
"grad_norm": 0.8323572278022766,
"learning_rate": 0.00013212153862730307,
"loss": 3.2446,
"step": 72500
},
{
"epoch": 7.808632009471531,
"grad_norm": 0.7747483849525452,
"learning_rate": 0.00013179829759724166,
"loss": 3.2418,
"step": 72550
},
{
"epoch": 7.81401356151114,
"grad_norm": 0.8585454821586609,
"learning_rate": 0.00013147505656718026,
"loss": 3.2396,
"step": 72600
},
{
"epoch": 7.819395113550748,
"grad_norm": 0.7940674424171448,
"learning_rate": 0.00013115181553711883,
"loss": 3.235,
"step": 72650
},
{
"epoch": 7.824776665590356,
"grad_norm": 0.8253437280654907,
"learning_rate": 0.00013082857450705742,
"loss": 3.2329,
"step": 72700
},
{
"epoch": 7.830158217629965,
"grad_norm": 0.7802859544754028,
"learning_rate": 0.00013050533347699602,
"loss": 3.2448,
"step": 72750
},
{
"epoch": 7.835539769669572,
"grad_norm": 0.8375236988067627,
"learning_rate": 0.00013018209244693458,
"loss": 3.2488,
"step": 72800
},
{
"epoch": 7.840921321709181,
"grad_norm": 0.7941522002220154,
"learning_rate": 0.00012985885141687318,
"loss": 3.2526,
"step": 72850
},
{
"epoch": 7.846302873748789,
"grad_norm": 0.7875718474388123,
"learning_rate": 0.00012953561038681175,
"loss": 3.2378,
"step": 72900
},
{
"epoch": 7.851684425788397,
"grad_norm": 0.7901372313499451,
"learning_rate": 0.00012921236935675034,
"loss": 3.2463,
"step": 72950
},
{
"epoch": 7.857065977828006,
"grad_norm": 0.7917222380638123,
"learning_rate": 0.0001288891283266889,
"loss": 3.2448,
"step": 73000
},
{
"epoch": 7.857065977828006,
"eval_accuracy": 0.3890915325609344,
"eval_loss": 3.338547468185425,
"eval_runtime": 184.9196,
"eval_samples_per_second": 97.399,
"eval_steps_per_second": 6.089,
"step": 73000
},
{
"epoch": 7.862447529867614,
"grad_norm": 0.7870434522628784,
"learning_rate": 0.0001285658872966275,
"loss": 3.2372,
"step": 73050
},
{
"epoch": 7.867829081907222,
"grad_norm": 0.8214866518974304,
"learning_rate": 0.0001282426462665661,
"loss": 3.2345,
"step": 73100
},
{
"epoch": 7.87321063394683,
"grad_norm": 0.7990794777870178,
"learning_rate": 0.00012791940523650466,
"loss": 3.2531,
"step": 73150
},
{
"epoch": 7.878592185986438,
"grad_norm": 0.7697991132736206,
"learning_rate": 0.00012759616420644326,
"loss": 3.235,
"step": 73200
},
{
"epoch": 7.883973738026047,
"grad_norm": 0.8018404245376587,
"learning_rate": 0.00012727292317638185,
"loss": 3.2478,
"step": 73250
},
{
"epoch": 7.889355290065655,
"grad_norm": 0.7848626971244812,
"learning_rate": 0.00012694968214632045,
"loss": 3.2291,
"step": 73300
},
{
"epoch": 7.894736842105263,
"grad_norm": 0.8082217574119568,
"learning_rate": 0.00012662644111625902,
"loss": 3.2291,
"step": 73350
},
{
"epoch": 7.900118394144871,
"grad_norm": 0.8116781115531921,
"learning_rate": 0.0001263032000861976,
"loss": 3.243,
"step": 73400
},
{
"epoch": 7.90549994618448,
"grad_norm": 0.8129122257232666,
"learning_rate": 0.00012597995905613618,
"loss": 3.2448,
"step": 73450
},
{
"epoch": 7.910881498224088,
"grad_norm": 0.7559465765953064,
"learning_rate": 0.00012565671802607477,
"loss": 3.2353,
"step": 73500
},
{
"epoch": 7.916263050263696,
"grad_norm": 0.8014430403709412,
"learning_rate": 0.00012533347699601334,
"loss": 3.2305,
"step": 73550
},
{
"epoch": 7.921644602303305,
"grad_norm": 0.8275867104530334,
"learning_rate": 0.00012501023596595193,
"loss": 3.2334,
"step": 73600
},
{
"epoch": 7.927026154342912,
"grad_norm": 0.8044885993003845,
"learning_rate": 0.0001246869949358905,
"loss": 3.2502,
"step": 73650
},
{
"epoch": 7.932407706382521,
"grad_norm": 0.7925671339035034,
"learning_rate": 0.0001243637539058291,
"loss": 3.254,
"step": 73700
},
{
"epoch": 7.937789258422129,
"grad_norm": 0.7803763747215271,
"learning_rate": 0.0001240405128757677,
"loss": 3.2513,
"step": 73750
},
{
"epoch": 7.943170810461737,
"grad_norm": 0.8105255365371704,
"learning_rate": 0.00012371727184570629,
"loss": 3.2366,
"step": 73800
},
{
"epoch": 7.948552362501346,
"grad_norm": 0.8192880153656006,
"learning_rate": 0.00012339403081564485,
"loss": 3.249,
"step": 73850
},
{
"epoch": 7.953933914540953,
"grad_norm": 0.8156821727752686,
"learning_rate": 0.00012307078978558345,
"loss": 3.2375,
"step": 73900
},
{
"epoch": 7.959315466580562,
"grad_norm": 0.759685754776001,
"learning_rate": 0.00012274754875552202,
"loss": 3.2364,
"step": 73950
},
{
"epoch": 7.96469701862017,
"grad_norm": 0.8244112133979797,
"learning_rate": 0.0001224243077254606,
"loss": 3.2453,
"step": 74000
},
{
"epoch": 7.96469701862017,
"eval_accuracy": 0.3897680054404674,
"eval_loss": 3.334453821182251,
"eval_runtime": 185.0218,
"eval_samples_per_second": 97.345,
"eval_steps_per_second": 6.086,
"step": 74000
},
{
"epoch": 7.970078570659778,
"grad_norm": 0.8168348073959351,
"learning_rate": 0.00012210106669539918,
"loss": 3.2585,
"step": 74050
},
{
"epoch": 7.975460122699387,
"grad_norm": 0.8332703113555908,
"learning_rate": 0.00012177782566533779,
"loss": 3.2674,
"step": 74100
},
{
"epoch": 7.980841674738995,
"grad_norm": 0.805260956287384,
"learning_rate": 0.00012145458463527635,
"loss": 3.2452,
"step": 74150
},
{
"epoch": 7.986223226778603,
"grad_norm": 0.7721812129020691,
"learning_rate": 0.00012113134360521495,
"loss": 3.2402,
"step": 74200
},
{
"epoch": 7.991604778818211,
"grad_norm": 0.7534312009811401,
"learning_rate": 0.00012080810257515353,
"loss": 3.2406,
"step": 74250
},
{
"epoch": 7.996986330857819,
"grad_norm": 0.8354355096817017,
"learning_rate": 0.00012048486154509212,
"loss": 3.2524,
"step": 74300
},
{
"epoch": 8.002367882897428,
"grad_norm": 0.7531010508537292,
"learning_rate": 0.00012016162051503069,
"loss": 3.2025,
"step": 74350
},
{
"epoch": 8.007749434937036,
"grad_norm": 0.7959233522415161,
"learning_rate": 0.00011983837948496929,
"loss": 3.1688,
"step": 74400
},
{
"epoch": 8.013130986976645,
"grad_norm": 0.8078753352165222,
"learning_rate": 0.00011951513845490787,
"loss": 3.1317,
"step": 74450
},
{
"epoch": 8.018512539016251,
"grad_norm": 0.8000760078430176,
"learning_rate": 0.00011919189742484645,
"loss": 3.1572,
"step": 74500
},
{
"epoch": 8.02389409105586,
"grad_norm": 0.8154542446136475,
"learning_rate": 0.00011886865639478503,
"loss": 3.1778,
"step": 74550
},
{
"epoch": 8.029275643095469,
"grad_norm": 0.787812352180481,
"learning_rate": 0.00011855188018532485,
"loss": 3.1613,
"step": 74600
},
{
"epoch": 8.034657195135077,
"grad_norm": 0.7934473156929016,
"learning_rate": 0.00011822863915526343,
"loss": 3.1818,
"step": 74650
},
{
"epoch": 8.040038747174686,
"grad_norm": 0.7993143796920776,
"learning_rate": 0.00011790539812520201,
"loss": 3.1479,
"step": 74700
},
{
"epoch": 8.045420299214294,
"grad_norm": 0.8254367709159851,
"learning_rate": 0.00011758215709514061,
"loss": 3.1685,
"step": 74750
},
{
"epoch": 8.050801851253901,
"grad_norm": 0.858529269695282,
"learning_rate": 0.00011725891606507917,
"loss": 3.1728,
"step": 74800
},
{
"epoch": 8.05618340329351,
"grad_norm": 1.1026115417480469,
"learning_rate": 0.00011693567503501777,
"loss": 3.1755,
"step": 74850
},
{
"epoch": 8.061564955333118,
"grad_norm": 0.7949498891830444,
"learning_rate": 0.00011661243400495635,
"loss": 3.1708,
"step": 74900
},
{
"epoch": 8.066946507372727,
"grad_norm": 0.7937607765197754,
"learning_rate": 0.00011628919297489493,
"loss": 3.1623,
"step": 74950
},
{
"epoch": 8.072328059412335,
"grad_norm": 0.852537214756012,
"learning_rate": 0.00011596595194483351,
"loss": 3.1701,
"step": 75000
},
{
"epoch": 8.072328059412335,
"eval_accuracy": 0.3898147261822276,
"eval_loss": 3.3382134437561035,
"eval_runtime": 184.9993,
"eval_samples_per_second": 97.357,
"eval_steps_per_second": 6.087,
"step": 75000
},
{
"epoch": 8.077709611451942,
"grad_norm": 0.8193413019180298,
"learning_rate": 0.00011564271091477211,
"loss": 3.174,
"step": 75050
},
{
"epoch": 8.08309116349155,
"grad_norm": 0.8629480004310608,
"learning_rate": 0.0001153194698847107,
"loss": 3.173,
"step": 75100
},
{
"epoch": 8.088472715531159,
"grad_norm": 0.8033722043037415,
"learning_rate": 0.00011499622885464927,
"loss": 3.1637,
"step": 75150
},
{
"epoch": 8.093854267570768,
"grad_norm": 0.8152185082435608,
"learning_rate": 0.00011467298782458786,
"loss": 3.1754,
"step": 75200
},
{
"epoch": 8.099235819610376,
"grad_norm": 0.8210130929946899,
"learning_rate": 0.00011434974679452645,
"loss": 3.173,
"step": 75250
},
{
"epoch": 8.104617371649983,
"grad_norm": 0.7966681718826294,
"learning_rate": 0.00011402650576446503,
"loss": 3.1861,
"step": 75300
},
{
"epoch": 8.109998923689592,
"grad_norm": 0.8028509616851807,
"learning_rate": 0.00011370326473440361,
"loss": 3.1731,
"step": 75350
},
{
"epoch": 8.1153804757292,
"grad_norm": 0.8054764866828918,
"learning_rate": 0.0001133800237043422,
"loss": 3.1566,
"step": 75400
},
{
"epoch": 8.120762027768809,
"grad_norm": 0.8137140274047852,
"learning_rate": 0.00011305678267428077,
"loss": 3.1735,
"step": 75450
},
{
"epoch": 8.126143579808417,
"grad_norm": 0.8169977068901062,
"learning_rate": 0.00011273354164421936,
"loss": 3.1752,
"step": 75500
},
{
"epoch": 8.131525131848026,
"grad_norm": 0.7922982573509216,
"learning_rate": 0.00011241030061415795,
"loss": 3.1688,
"step": 75550
},
{
"epoch": 8.136906683887632,
"grad_norm": 0.8188258409500122,
"learning_rate": 0.00011208705958409654,
"loss": 3.1865,
"step": 75600
},
{
"epoch": 8.142288235927241,
"grad_norm": 0.8402687311172485,
"learning_rate": 0.00011176381855403511,
"loss": 3.1962,
"step": 75650
},
{
"epoch": 8.14766978796685,
"grad_norm": 0.8304385542869568,
"learning_rate": 0.0001114405775239737,
"loss": 3.1542,
"step": 75700
},
{
"epoch": 8.153051340006458,
"grad_norm": 0.8148844242095947,
"learning_rate": 0.00011111733649391228,
"loss": 3.1785,
"step": 75750
},
{
"epoch": 8.158432892046067,
"grad_norm": 0.8117761611938477,
"learning_rate": 0.00011079409546385086,
"loss": 3.1802,
"step": 75800
},
{
"epoch": 8.163814444085673,
"grad_norm": 0.821329653263092,
"learning_rate": 0.00011047731925439068,
"loss": 3.1793,
"step": 75850
},
{
"epoch": 8.169195996125282,
"grad_norm": 0.8583831191062927,
"learning_rate": 0.00011015407822432928,
"loss": 3.1839,
"step": 75900
},
{
"epoch": 8.17457754816489,
"grad_norm": 0.8019603490829468,
"learning_rate": 0.00010983083719426785,
"loss": 3.1867,
"step": 75950
},
{
"epoch": 8.1799591002045,
"grad_norm": 0.8235881328582764,
"learning_rate": 0.00010950759616420644,
"loss": 3.1943,
"step": 76000
},
{
"epoch": 8.1799591002045,
"eval_accuracy": 0.38993489627615047,
"eval_loss": 3.338373899459839,
"eval_runtime": 186.0672,
"eval_samples_per_second": 96.798,
"eval_steps_per_second": 6.052,
"step": 76000
},
{
"epoch": 8.185340652244108,
"grad_norm": 0.83317631483078,
"learning_rate": 0.00010918435513414502,
"loss": 3.1712,
"step": 76050
},
{
"epoch": 8.190722204283716,
"grad_norm": 0.758842408657074,
"learning_rate": 0.00010886111410408359,
"loss": 3.1701,
"step": 76100
},
{
"epoch": 8.196103756323323,
"grad_norm": 0.803379476070404,
"learning_rate": 0.00010853787307402218,
"loss": 3.1913,
"step": 76150
},
{
"epoch": 8.201485308362932,
"grad_norm": 0.8195414543151855,
"learning_rate": 0.00010821463204396078,
"loss": 3.1836,
"step": 76200
},
{
"epoch": 8.20686686040254,
"grad_norm": 0.8033959865570068,
"learning_rate": 0.00010789139101389935,
"loss": 3.1796,
"step": 76250
},
{
"epoch": 8.212248412442149,
"grad_norm": 0.7738995552062988,
"learning_rate": 0.00010756814998383794,
"loss": 3.1808,
"step": 76300
},
{
"epoch": 8.217629964481757,
"grad_norm": 0.8362164497375488,
"learning_rate": 0.00010724490895377652,
"loss": 3.1813,
"step": 76350
},
{
"epoch": 8.223011516521364,
"grad_norm": 0.8498926162719727,
"learning_rate": 0.00010692166792371512,
"loss": 3.1778,
"step": 76400
},
{
"epoch": 8.228393068560973,
"grad_norm": 0.8118336796760559,
"learning_rate": 0.00010659842689365368,
"loss": 3.202,
"step": 76450
},
{
"epoch": 8.233774620600581,
"grad_norm": 0.8127890229225159,
"learning_rate": 0.00010627518586359228,
"loss": 3.1597,
"step": 76500
},
{
"epoch": 8.23915617264019,
"grad_norm": 0.8220996260643005,
"learning_rate": 0.00010595194483353086,
"loss": 3.1678,
"step": 76550
},
{
"epoch": 8.244537724679798,
"grad_norm": 0.7945106029510498,
"learning_rate": 0.00010562870380346944,
"loss": 3.1966,
"step": 76600
},
{
"epoch": 8.249919276719407,
"grad_norm": 0.8012769818305969,
"learning_rate": 0.00010530546277340802,
"loss": 3.1733,
"step": 76650
},
{
"epoch": 8.255300828759013,
"grad_norm": 0.8267130851745605,
"learning_rate": 0.00010498222174334662,
"loss": 3.1683,
"step": 76700
},
{
"epoch": 8.260682380798622,
"grad_norm": 0.8328855037689209,
"learning_rate": 0.00010465898071328519,
"loss": 3.181,
"step": 76750
},
{
"epoch": 8.26606393283823,
"grad_norm": 0.7733327746391296,
"learning_rate": 0.00010433573968322378,
"loss": 3.1747,
"step": 76800
},
{
"epoch": 8.27144548487784,
"grad_norm": 0.8119755387306213,
"learning_rate": 0.00010401249865316237,
"loss": 3.2023,
"step": 76850
},
{
"epoch": 8.276827036917448,
"grad_norm": 0.8023077845573425,
"learning_rate": 0.00010368925762310096,
"loss": 3.1885,
"step": 76900
},
{
"epoch": 8.282208588957054,
"grad_norm": 0.7848573327064514,
"learning_rate": 0.00010336601659303954,
"loss": 3.2007,
"step": 76950
},
{
"epoch": 8.287590140996663,
"grad_norm": 0.7905171513557434,
"learning_rate": 0.00010304277556297812,
"loss": 3.1906,
"step": 77000
},
{
"epoch": 8.287590140996663,
"eval_accuracy": 0.39015557029130166,
"eval_loss": 3.3344521522521973,
"eval_runtime": 184.7107,
"eval_samples_per_second": 97.509,
"eval_steps_per_second": 6.096,
"step": 77000
},
{
"epoch": 8.292971693036272,
"grad_norm": 0.8187376260757446,
"learning_rate": 0.00010271953453291671,
"loss": 3.1843,
"step": 77050
},
{
"epoch": 8.29835324507588,
"grad_norm": 0.8553321361541748,
"learning_rate": 0.00010239629350285528,
"loss": 3.1856,
"step": 77100
},
{
"epoch": 8.303734797115489,
"grad_norm": 0.7859914302825928,
"learning_rate": 0.00010207305247279387,
"loss": 3.1817,
"step": 77150
},
{
"epoch": 8.309116349155097,
"grad_norm": 0.8525058627128601,
"learning_rate": 0.00010174981144273246,
"loss": 3.1928,
"step": 77200
},
{
"epoch": 8.314497901194704,
"grad_norm": 0.8467106819152832,
"learning_rate": 0.00010142657041267104,
"loss": 3.1828,
"step": 77250
},
{
"epoch": 8.319879453234313,
"grad_norm": 0.854286789894104,
"learning_rate": 0.00010110332938260962,
"loss": 3.1902,
"step": 77300
},
{
"epoch": 8.325261005273921,
"grad_norm": 0.8719087243080139,
"learning_rate": 0.00010078008835254821,
"loss": 3.1861,
"step": 77350
},
{
"epoch": 8.33064255731353,
"grad_norm": 0.8280075788497925,
"learning_rate": 0.0001004568473224868,
"loss": 3.1662,
"step": 77400
},
{
"epoch": 8.336024109353138,
"grad_norm": 0.8436982035636902,
"learning_rate": 0.00010013360629242537,
"loss": 3.1986,
"step": 77450
},
{
"epoch": 8.341405661392745,
"grad_norm": 0.8524004817008972,
"learning_rate": 9.981036526236396e-05,
"loss": 3.1873,
"step": 77500
},
{
"epoch": 8.346787213432354,
"grad_norm": 0.7993379831314087,
"learning_rate": 9.948712423230255e-05,
"loss": 3.1881,
"step": 77550
},
{
"epoch": 8.352168765471962,
"grad_norm": 0.822976291179657,
"learning_rate": 9.916388320224112e-05,
"loss": 3.1826,
"step": 77600
},
{
"epoch": 8.35755031751157,
"grad_norm": 0.8318803310394287,
"learning_rate": 9.884064217217971e-05,
"loss": 3.1908,
"step": 77650
},
{
"epoch": 8.36293186955118,
"grad_norm": 0.8046072125434875,
"learning_rate": 9.851740114211831e-05,
"loss": 3.1683,
"step": 77700
},
{
"epoch": 8.368313421590786,
"grad_norm": 0.8725241422653198,
"learning_rate": 9.819416011205688e-05,
"loss": 3.1804,
"step": 77750
},
{
"epoch": 8.373694973630395,
"grad_norm": 0.8241723775863647,
"learning_rate": 9.787091908199547e-05,
"loss": 3.1809,
"step": 77800
},
{
"epoch": 8.379076525670003,
"grad_norm": 0.8239020109176636,
"learning_rate": 9.754767805193405e-05,
"loss": 3.18,
"step": 77850
},
{
"epoch": 8.384458077709612,
"grad_norm": 0.8390723466873169,
"learning_rate": 9.722443702187265e-05,
"loss": 3.1922,
"step": 77900
},
{
"epoch": 8.38983962974922,
"grad_norm": 0.8563929796218872,
"learning_rate": 9.690119599181121e-05,
"loss": 3.1725,
"step": 77950
},
{
"epoch": 8.395221181788829,
"grad_norm": 0.7796242833137512,
"learning_rate": 9.657795496174981e-05,
"loss": 3.1759,
"step": 78000
},
{
"epoch": 8.395221181788829,
"eval_accuracy": 0.3906213652213623,
"eval_loss": 3.331331729888916,
"eval_runtime": 184.5428,
"eval_samples_per_second": 97.598,
"eval_steps_per_second": 6.102,
"step": 78000
},
{
"epoch": 8.400602733828435,
"grad_norm": 0.8204180598258972,
"learning_rate": 9.625471393168839e-05,
"loss": 3.1812,
"step": 78050
},
{
"epoch": 8.405984285868044,
"grad_norm": 0.7976886034011841,
"learning_rate": 9.593147290162697e-05,
"loss": 3.1918,
"step": 78100
},
{
"epoch": 8.411365837907653,
"grad_norm": 0.8261345624923706,
"learning_rate": 9.560823187156555e-05,
"loss": 3.1952,
"step": 78150
},
{
"epoch": 8.416747389947261,
"grad_norm": 0.8616110682487488,
"learning_rate": 9.528499084150415e-05,
"loss": 3.1779,
"step": 78200
},
{
"epoch": 8.42212894198687,
"grad_norm": 0.7845104932785034,
"learning_rate": 9.496174981144271e-05,
"loss": 3.1962,
"step": 78250
},
{
"epoch": 8.427510494026476,
"grad_norm": 0.873723566532135,
"learning_rate": 9.463850878138131e-05,
"loss": 3.1786,
"step": 78300
},
{
"epoch": 8.432892046066085,
"grad_norm": 0.8098871111869812,
"learning_rate": 9.431526775131989e-05,
"loss": 3.1858,
"step": 78350
},
{
"epoch": 8.438273598105694,
"grad_norm": 0.8407077789306641,
"learning_rate": 9.399202672125848e-05,
"loss": 3.1914,
"step": 78400
},
{
"epoch": 8.443655150145302,
"grad_norm": 0.8190973401069641,
"learning_rate": 9.366878569119705e-05,
"loss": 3.1817,
"step": 78450
},
{
"epoch": 8.44903670218491,
"grad_norm": 0.8429524302482605,
"learning_rate": 9.334554466113565e-05,
"loss": 3.1981,
"step": 78500
},
{
"epoch": 8.45441825422452,
"grad_norm": 0.7990863919258118,
"learning_rate": 9.302230363107424e-05,
"loss": 3.1671,
"step": 78550
},
{
"epoch": 8.459799806264126,
"grad_norm": 0.8443186283111572,
"learning_rate": 9.269906260101281e-05,
"loss": 3.2079,
"step": 78600
},
{
"epoch": 8.465181358303735,
"grad_norm": 0.8222746849060059,
"learning_rate": 9.23758215709514e-05,
"loss": 3.2124,
"step": 78650
},
{
"epoch": 8.470562910343343,
"grad_norm": 0.8287258744239807,
"learning_rate": 9.205258054088998e-05,
"loss": 3.1891,
"step": 78700
},
{
"epoch": 8.475944462382952,
"grad_norm": 0.8466224670410156,
"learning_rate": 9.172933951082856e-05,
"loss": 3.1759,
"step": 78750
},
{
"epoch": 8.48132601442256,
"grad_norm": 0.8561066389083862,
"learning_rate": 9.140609848076715e-05,
"loss": 3.187,
"step": 78800
},
{
"epoch": 8.486707566462167,
"grad_norm": 0.7976627349853516,
"learning_rate": 9.108285745070574e-05,
"loss": 3.1839,
"step": 78850
},
{
"epoch": 8.492089118501776,
"grad_norm": 0.8352649807929993,
"learning_rate": 9.075961642064432e-05,
"loss": 3.1816,
"step": 78900
},
{
"epoch": 8.497470670541384,
"grad_norm": 0.818343460559845,
"learning_rate": 9.04363753905829e-05,
"loss": 3.1888,
"step": 78950
},
{
"epoch": 8.502852222580993,
"grad_norm": 0.8282904624938965,
"learning_rate": 9.011313436052148e-05,
"loss": 3.1915,
"step": 79000
},
{
"epoch": 8.502852222580993,
"eval_accuracy": 0.3909855697013165,
"eval_loss": 3.3280069828033447,
"eval_runtime": 184.8504,
"eval_samples_per_second": 97.436,
"eval_steps_per_second": 6.091,
"step": 79000
},
{
"epoch": 8.508233774620601,
"grad_norm": 0.779547393321991,
"learning_rate": 8.978989333046008e-05,
"loss": 3.2,
"step": 79050
},
{
"epoch": 8.513615326660208,
"grad_norm": 0.8538345098495483,
"learning_rate": 8.946665230039865e-05,
"loss": 3.1964,
"step": 79100
},
{
"epoch": 8.518996878699816,
"grad_norm": 0.7864282727241516,
"learning_rate": 8.914341127033724e-05,
"loss": 3.1979,
"step": 79150
},
{
"epoch": 8.524378430739425,
"grad_norm": 0.8223557472229004,
"learning_rate": 8.882017024027582e-05,
"loss": 3.186,
"step": 79200
},
{
"epoch": 8.529759982779034,
"grad_norm": 0.8408324122428894,
"learning_rate": 8.84969292102144e-05,
"loss": 3.1804,
"step": 79250
},
{
"epoch": 8.535141534818642,
"grad_norm": 0.8488681316375732,
"learning_rate": 8.817368818015298e-05,
"loss": 3.1583,
"step": 79300
},
{
"epoch": 8.54052308685825,
"grad_norm": 0.9019160270690918,
"learning_rate": 8.785044715009158e-05,
"loss": 3.183,
"step": 79350
},
{
"epoch": 8.545904638897857,
"grad_norm": 0.8224218487739563,
"learning_rate": 8.752720612003017e-05,
"loss": 3.1904,
"step": 79400
},
{
"epoch": 8.551286190937466,
"grad_norm": 0.8656252026557922,
"learning_rate": 8.720396508996874e-05,
"loss": 3.1863,
"step": 79450
},
{
"epoch": 8.556667742977075,
"grad_norm": 0.8077045679092407,
"learning_rate": 8.688072405990734e-05,
"loss": 3.1836,
"step": 79500
},
{
"epoch": 8.562049295016683,
"grad_norm": 0.8300409913063049,
"learning_rate": 8.656394785044713e-05,
"loss": 3.1885,
"step": 79550
},
{
"epoch": 8.567430847056292,
"grad_norm": 0.8392258882522583,
"learning_rate": 8.624070682038572e-05,
"loss": 3.1947,
"step": 79600
},
{
"epoch": 8.572812399095898,
"grad_norm": 0.8185868263244629,
"learning_rate": 8.591746579032432e-05,
"loss": 3.1776,
"step": 79650
},
{
"epoch": 8.578193951135507,
"grad_norm": 0.8567453622817993,
"learning_rate": 8.55942247602629e-05,
"loss": 3.1977,
"step": 79700
},
{
"epoch": 8.583575503175116,
"grad_norm": 0.8524957299232483,
"learning_rate": 8.527098373020148e-05,
"loss": 3.2041,
"step": 79750
},
{
"epoch": 8.588957055214724,
"grad_norm": 0.8346071243286133,
"learning_rate": 8.494774270014006e-05,
"loss": 3.1833,
"step": 79800
},
{
"epoch": 8.594338607254333,
"grad_norm": 0.8146092295646667,
"learning_rate": 8.462450167007866e-05,
"loss": 3.2014,
"step": 79850
},
{
"epoch": 8.599720159293941,
"grad_norm": 0.8573397994041443,
"learning_rate": 8.430126064001722e-05,
"loss": 3.1881,
"step": 79900
},
{
"epoch": 8.605101711333548,
"grad_norm": 0.8767163753509521,
"learning_rate": 8.397801960995582e-05,
"loss": 3.1895,
"step": 79950
},
{
"epoch": 8.610483263373157,
"grad_norm": 0.8038421273231506,
"learning_rate": 8.36547785798944e-05,
"loss": 3.1977,
"step": 80000
},
{
"epoch": 8.610483263373157,
"eval_accuracy": 0.3914517992429284,
"eval_loss": 3.3241639137268066,
"eval_runtime": 184.6396,
"eval_samples_per_second": 97.547,
"eval_steps_per_second": 6.098,
"step": 80000
},
{
"epoch": 8.615864815412765,
"grad_norm": 0.8378307819366455,
"learning_rate": 8.333153754983298e-05,
"loss": 3.1845,
"step": 80050
},
{
"epoch": 8.621246367452374,
"grad_norm": 0.846358060836792,
"learning_rate": 8.300829651977156e-05,
"loss": 3.1763,
"step": 80100
},
{
"epoch": 8.626627919491982,
"grad_norm": 0.9070448875427246,
"learning_rate": 8.268505548971016e-05,
"loss": 3.2014,
"step": 80150
},
{
"epoch": 8.632009471531589,
"grad_norm": 0.8281485438346863,
"learning_rate": 8.236181445964875e-05,
"loss": 3.1819,
"step": 80200
},
{
"epoch": 8.637391023571197,
"grad_norm": 0.8226301074028015,
"learning_rate": 8.203857342958732e-05,
"loss": 3.1888,
"step": 80250
},
{
"epoch": 8.642772575610806,
"grad_norm": 0.817070484161377,
"learning_rate": 8.171533239952591e-05,
"loss": 3.1826,
"step": 80300
},
{
"epoch": 8.648154127650415,
"grad_norm": 0.8424062132835388,
"learning_rate": 8.13920913694645e-05,
"loss": 3.1891,
"step": 80350
},
{
"epoch": 8.653535679690023,
"grad_norm": 1.12106192111969,
"learning_rate": 8.106885033940308e-05,
"loss": 3.1823,
"step": 80400
},
{
"epoch": 8.658917231729632,
"grad_norm": 0.7957353591918945,
"learning_rate": 8.074560930934166e-05,
"loss": 3.1792,
"step": 80450
},
{
"epoch": 8.664298783769238,
"grad_norm": 0.8199249505996704,
"learning_rate": 8.042236827928025e-05,
"loss": 3.2027,
"step": 80500
},
{
"epoch": 8.669680335808847,
"grad_norm": 0.8268219232559204,
"learning_rate": 8.009912724921882e-05,
"loss": 3.1962,
"step": 80550
},
{
"epoch": 8.675061887848456,
"grad_norm": 0.8875090479850769,
"learning_rate": 7.977588621915741e-05,
"loss": 3.1878,
"step": 80600
},
{
"epoch": 8.680443439888064,
"grad_norm": 0.8505129218101501,
"learning_rate": 7.9452645189096e-05,
"loss": 3.1794,
"step": 80650
},
{
"epoch": 8.685824991927673,
"grad_norm": 0.8001469969749451,
"learning_rate": 7.91358689796358e-05,
"loss": 3.1733,
"step": 80700
},
{
"epoch": 8.69120654396728,
"grad_norm": 0.7973533868789673,
"learning_rate": 7.88126279495744e-05,
"loss": 3.187,
"step": 80750
},
{
"epoch": 8.696588096006888,
"grad_norm": 0.8615847229957581,
"learning_rate": 7.848938691951298e-05,
"loss": 3.1679,
"step": 80800
},
{
"epoch": 8.701969648046497,
"grad_norm": 0.8752439618110657,
"learning_rate": 7.816614588945156e-05,
"loss": 3.1839,
"step": 80850
},
{
"epoch": 8.707351200086105,
"grad_norm": 0.846650242805481,
"learning_rate": 7.784290485939014e-05,
"loss": 3.1935,
"step": 80900
},
{
"epoch": 8.712732752125714,
"grad_norm": 0.831285297870636,
"learning_rate": 7.751966382932873e-05,
"loss": 3.1917,
"step": 80950
},
{
"epoch": 8.718114304165322,
"grad_norm": 0.8393616080284119,
"learning_rate": 7.719642279926731e-05,
"loss": 3.1806,
"step": 81000
},
{
"epoch": 8.718114304165322,
"eval_accuracy": 0.39170126627335045,
"eval_loss": 3.3225531578063965,
"eval_runtime": 184.5825,
"eval_samples_per_second": 97.577,
"eval_steps_per_second": 6.1,
"step": 81000
},
{
"epoch": 8.723495856204929,
"grad_norm": 0.8248891234397888,
"learning_rate": 7.68731817692059e-05,
"loss": 3.1781,
"step": 81050
},
{
"epoch": 8.728877408244538,
"grad_norm": 0.8192293643951416,
"learning_rate": 7.654994073914448e-05,
"loss": 3.1876,
"step": 81100
},
{
"epoch": 8.734258960284146,
"grad_norm": 0.8089962601661682,
"learning_rate": 7.622669970908307e-05,
"loss": 3.1831,
"step": 81150
},
{
"epoch": 8.739640512323755,
"grad_norm": 0.8610677123069763,
"learning_rate": 7.590345867902164e-05,
"loss": 3.1931,
"step": 81200
},
{
"epoch": 8.745022064363363,
"grad_norm": 0.8550341725349426,
"learning_rate": 7.558021764896023e-05,
"loss": 3.1866,
"step": 81250
},
{
"epoch": 8.75040361640297,
"grad_norm": 0.8591922521591187,
"learning_rate": 7.525697661889883e-05,
"loss": 3.1865,
"step": 81300
},
{
"epoch": 8.755785168442578,
"grad_norm": 0.8985189199447632,
"learning_rate": 7.493373558883741e-05,
"loss": 3.1987,
"step": 81350
},
{
"epoch": 8.761166720482187,
"grad_norm": 0.8182072639465332,
"learning_rate": 7.461049455877599e-05,
"loss": 3.1902,
"step": 81400
},
{
"epoch": 8.766548272521796,
"grad_norm": 0.8501378893852234,
"learning_rate": 7.428725352871457e-05,
"loss": 3.1873,
"step": 81450
},
{
"epoch": 8.771929824561404,
"grad_norm": 0.862420916557312,
"learning_rate": 7.396401249865315e-05,
"loss": 3.1824,
"step": 81500
},
{
"epoch": 8.777311376601011,
"grad_norm": 0.8630905151367188,
"learning_rate": 7.364077146859173e-05,
"loss": 3.194,
"step": 81550
},
{
"epoch": 8.78269292864062,
"grad_norm": 0.838539719581604,
"learning_rate": 7.331753043853033e-05,
"loss": 3.1888,
"step": 81600
},
{
"epoch": 8.788074480680228,
"grad_norm": 0.8231281638145447,
"learning_rate": 7.299428940846891e-05,
"loss": 3.1906,
"step": 81650
},
{
"epoch": 8.793456032719837,
"grad_norm": 0.8844528198242188,
"learning_rate": 7.267104837840749e-05,
"loss": 3.1923,
"step": 81700
},
{
"epoch": 8.798837584759445,
"grad_norm": 0.8540237545967102,
"learning_rate": 7.234780734834607e-05,
"loss": 3.1919,
"step": 81750
},
{
"epoch": 8.804219136799054,
"grad_norm": 0.9054052829742432,
"learning_rate": 7.202456631828465e-05,
"loss": 3.1701,
"step": 81800
},
{
"epoch": 8.80960068883866,
"grad_norm": 0.8349437117576599,
"learning_rate": 7.170132528822325e-05,
"loss": 3.1747,
"step": 81850
},
{
"epoch": 8.814982240878269,
"grad_norm": 0.8228332996368408,
"learning_rate": 7.137808425816183e-05,
"loss": 3.1834,
"step": 81900
},
{
"epoch": 8.820363792917878,
"grad_norm": 0.792899489402771,
"learning_rate": 7.105484322810041e-05,
"loss": 3.1799,
"step": 81950
},
{
"epoch": 8.825745344957486,
"grad_norm": 0.8734421730041504,
"learning_rate": 7.073160219803899e-05,
"loss": 3.2022,
"step": 82000
},
{
"epoch": 8.825745344957486,
"eval_accuracy": 0.39199636751665456,
"eval_loss": 3.3189969062805176,
"eval_runtime": 184.5397,
"eval_samples_per_second": 97.6,
"eval_steps_per_second": 6.102,
"step": 82000
},
{
"epoch": 8.831126896997095,
"grad_norm": 0.8499659299850464,
"learning_rate": 7.040836116797757e-05,
"loss": 3.1784,
"step": 82050
},
{
"epoch": 8.836508449036701,
"grad_norm": 0.8386522531509399,
"learning_rate": 7.008512013791617e-05,
"loss": 3.1999,
"step": 82100
},
{
"epoch": 8.84189000107631,
"grad_norm": 0.7806944847106934,
"learning_rate": 6.976187910785475e-05,
"loss": 3.187,
"step": 82150
},
{
"epoch": 8.847271553115919,
"grad_norm": 0.8628476858139038,
"learning_rate": 6.943863807779334e-05,
"loss": 3.186,
"step": 82200
},
{
"epoch": 8.852653105155527,
"grad_norm": 0.8431470394134521,
"learning_rate": 6.911539704773192e-05,
"loss": 3.1932,
"step": 82250
},
{
"epoch": 8.858034657195136,
"grad_norm": 0.8196133971214294,
"learning_rate": 6.87921560176705e-05,
"loss": 3.1826,
"step": 82300
},
{
"epoch": 8.863416209234742,
"grad_norm": 0.7967073321342468,
"learning_rate": 6.846891498760909e-05,
"loss": 3.1747,
"step": 82350
},
{
"epoch": 8.868797761274351,
"grad_norm": 0.7723034620285034,
"learning_rate": 6.814567395754767e-05,
"loss": 3.1753,
"step": 82400
},
{
"epoch": 8.87417931331396,
"grad_norm": 0.7974772453308105,
"learning_rate": 6.782243292748626e-05,
"loss": 3.2064,
"step": 82450
},
{
"epoch": 8.879560865353568,
"grad_norm": 0.8252553939819336,
"learning_rate": 6.749919189742484e-05,
"loss": 3.1969,
"step": 82500
},
{
"epoch": 8.884942417393177,
"grad_norm": 0.8222091197967529,
"learning_rate": 6.717595086736342e-05,
"loss": 3.1896,
"step": 82550
},
{
"epoch": 8.890323969432785,
"grad_norm": 0.8507568836212158,
"learning_rate": 6.6852709837302e-05,
"loss": 3.1828,
"step": 82600
},
{
"epoch": 8.895705521472392,
"grad_norm": 0.851717472076416,
"learning_rate": 6.652946880724059e-05,
"loss": 3.1905,
"step": 82650
},
{
"epoch": 8.901087073512,
"grad_norm": 0.8440418839454651,
"learning_rate": 6.620622777717918e-05,
"loss": 3.2065,
"step": 82700
},
{
"epoch": 8.906468625551609,
"grad_norm": 0.8326337933540344,
"learning_rate": 6.588298674711776e-05,
"loss": 3.177,
"step": 82750
},
{
"epoch": 8.911850177591218,
"grad_norm": 0.8331674933433533,
"learning_rate": 6.555974571705636e-05,
"loss": 3.1813,
"step": 82800
},
{
"epoch": 8.917231729630826,
"grad_norm": 0.8243589401245117,
"learning_rate": 6.523650468699494e-05,
"loss": 3.2014,
"step": 82850
},
{
"epoch": 8.922613281670433,
"grad_norm": 0.8393963575363159,
"learning_rate": 6.491326365693352e-05,
"loss": 3.2035,
"step": 82900
},
{
"epoch": 8.927994833710041,
"grad_norm": 0.8549140095710754,
"learning_rate": 6.45900226268721e-05,
"loss": 3.2009,
"step": 82950
},
{
"epoch": 8.93337638574965,
"grad_norm": 0.8491566181182861,
"learning_rate": 6.426678159681068e-05,
"loss": 3.1851,
"step": 83000
},
{
"epoch": 8.93337638574965,
"eval_accuracy": 0.3924175061098235,
"eval_loss": 3.3162879943847656,
"eval_runtime": 184.6451,
"eval_samples_per_second": 97.544,
"eval_steps_per_second": 6.098,
"step": 83000
},
{
"epoch": 8.938757937789259,
"grad_norm": 0.8390645384788513,
"learning_rate": 6.394354056674928e-05,
"loss": 3.1912,
"step": 83050
},
{
"epoch": 8.944139489828867,
"grad_norm": 0.83738112449646,
"learning_rate": 6.362029953668786e-05,
"loss": 3.1936,
"step": 83100
},
{
"epoch": 8.949521041868476,
"grad_norm": 0.8717303276062012,
"learning_rate": 6.329705850662644e-05,
"loss": 3.1696,
"step": 83150
},
{
"epoch": 8.954902593908082,
"grad_norm": 0.8360523581504822,
"learning_rate": 6.297381747656502e-05,
"loss": 3.1865,
"step": 83200
},
{
"epoch": 8.960284145947691,
"grad_norm": 0.8245041370391846,
"learning_rate": 6.26505764465036e-05,
"loss": 3.1871,
"step": 83250
},
{
"epoch": 8.9656656979873,
"grad_norm": 0.8196189999580383,
"learning_rate": 6.23273354164422e-05,
"loss": 3.2,
"step": 83300
},
{
"epoch": 8.971047250026908,
"grad_norm": 0.819350004196167,
"learning_rate": 6.200409438638078e-05,
"loss": 3.204,
"step": 83350
},
{
"epoch": 8.976428802066517,
"grad_norm": 0.8296644687652588,
"learning_rate": 6.168085335631936e-05,
"loss": 3.1848,
"step": 83400
},
{
"epoch": 8.981810354106123,
"grad_norm": 0.848055362701416,
"learning_rate": 6.135761232625794e-05,
"loss": 3.208,
"step": 83450
},
{
"epoch": 8.987191906145732,
"grad_norm": 0.831109344959259,
"learning_rate": 6.1034371296196526e-05,
"loss": 3.1622,
"step": 83500
},
{
"epoch": 8.99257345818534,
"grad_norm": 0.8709787726402283,
"learning_rate": 6.0711130266135114e-05,
"loss": 3.1891,
"step": 83550
},
{
"epoch": 8.997955010224949,
"grad_norm": 0.8544104695320129,
"learning_rate": 6.0387889236073695e-05,
"loss": 3.2029,
"step": 83600
},
{
"epoch": 9.003336562264558,
"grad_norm": 0.8114558458328247,
"learning_rate": 6.0064648206012276e-05,
"loss": 3.1443,
"step": 83650
},
{
"epoch": 9.008718114304166,
"grad_norm": 0.8070032596588135,
"learning_rate": 5.9741407175950864e-05,
"loss": 3.1218,
"step": 83700
},
{
"epoch": 9.014099666343773,
"grad_norm": 0.8356350064277649,
"learning_rate": 5.9418166145889445e-05,
"loss": 3.137,
"step": 83750
},
{
"epoch": 9.019481218383381,
"grad_norm": 0.850892186164856,
"learning_rate": 5.909492511582803e-05,
"loss": 3.1288,
"step": 83800
},
{
"epoch": 9.02486277042299,
"grad_norm": 0.8735014200210571,
"learning_rate": 5.8771684085766614e-05,
"loss": 3.1357,
"step": 83850
},
{
"epoch": 9.030244322462599,
"grad_norm": 0.8315800428390503,
"learning_rate": 5.8448443055705195e-05,
"loss": 3.1184,
"step": 83900
},
{
"epoch": 9.035625874502207,
"grad_norm": 0.8260695338249207,
"learning_rate": 5.812520202564378e-05,
"loss": 3.1394,
"step": 83950
},
{
"epoch": 9.041007426541814,
"grad_norm": 0.85528963804245,
"learning_rate": 5.7801960995582364e-05,
"loss": 3.1297,
"step": 84000
},
{
"epoch": 9.041007426541814,
"eval_accuracy": 0.39232688960138623,
"eval_loss": 3.318115472793579,
"eval_runtime": 184.4628,
"eval_samples_per_second": 97.64,
"eval_steps_per_second": 6.104,
"step": 84000
},
{
"epoch": 9.046388978581422,
"grad_norm": 0.8493125438690186,
"learning_rate": 5.747871996552096e-05,
"loss": 3.134,
"step": 84050
},
{
"epoch": 9.051770530621031,
"grad_norm": 0.8698522448539734,
"learning_rate": 5.715547893545954e-05,
"loss": 3.1345,
"step": 84100
},
{
"epoch": 9.05715208266064,
"grad_norm": 0.838257372379303,
"learning_rate": 5.683223790539812e-05,
"loss": 3.1236,
"step": 84150
},
{
"epoch": 9.062533634700248,
"grad_norm": 0.8330464363098145,
"learning_rate": 5.650899687533671e-05,
"loss": 3.1374,
"step": 84200
},
{
"epoch": 9.067915186739857,
"grad_norm": 0.8760532736778259,
"learning_rate": 5.618575584527529e-05,
"loss": 3.1379,
"step": 84250
},
{
"epoch": 9.073296738779463,
"grad_norm": 0.8745752573013306,
"learning_rate": 5.586251481521388e-05,
"loss": 3.1318,
"step": 84300
},
{
"epoch": 9.078678290819072,
"grad_norm": 0.8654460906982422,
"learning_rate": 5.553927378515246e-05,
"loss": 3.1433,
"step": 84350
},
{
"epoch": 9.08405984285868,
"grad_norm": 0.8354806900024414,
"learning_rate": 5.521603275509104e-05,
"loss": 3.1573,
"step": 84400
},
{
"epoch": 9.089441394898289,
"grad_norm": 0.826752781867981,
"learning_rate": 5.489279172502963e-05,
"loss": 3.1278,
"step": 84450
},
{
"epoch": 9.094822946937898,
"grad_norm": 0.8850333094596863,
"learning_rate": 5.456955069496821e-05,
"loss": 3.1349,
"step": 84500
},
{
"epoch": 9.100204498977504,
"grad_norm": 0.9492641687393188,
"learning_rate": 5.42463096649068e-05,
"loss": 3.126,
"step": 84550
},
{
"epoch": 9.105586051017113,
"grad_norm": 0.8297050595283508,
"learning_rate": 5.392306863484538e-05,
"loss": 3.1188,
"step": 84600
},
{
"epoch": 9.110967603056721,
"grad_norm": 0.8881982564926147,
"learning_rate": 5.359982760478396e-05,
"loss": 3.1177,
"step": 84650
},
{
"epoch": 9.11634915509633,
"grad_norm": 0.8270519971847534,
"learning_rate": 5.327658657472255e-05,
"loss": 3.1435,
"step": 84700
},
{
"epoch": 9.121730707135939,
"grad_norm": 0.8376482129096985,
"learning_rate": 5.295334554466113e-05,
"loss": 3.1478,
"step": 84750
},
{
"epoch": 9.127112259175545,
"grad_norm": 0.7974318265914917,
"learning_rate": 5.2630104514599716e-05,
"loss": 3.1418,
"step": 84800
},
{
"epoch": 9.132493811215154,
"grad_norm": 0.852908194065094,
"learning_rate": 5.23068634845383e-05,
"loss": 3.1387,
"step": 84850
},
{
"epoch": 9.137875363254762,
"grad_norm": 0.8228384256362915,
"learning_rate": 5.198362245447688e-05,
"loss": 3.143,
"step": 84900
},
{
"epoch": 9.143256915294371,
"grad_norm": 0.8243878483772278,
"learning_rate": 5.166038142441547e-05,
"loss": 3.1274,
"step": 84950
},
{
"epoch": 9.14863846733398,
"grad_norm": 0.8277369737625122,
"learning_rate": 5.1337140394354054e-05,
"loss": 3.1309,
"step": 85000
},
{
"epoch": 9.14863846733398,
"eval_accuracy": 0.3925001909574503,
"eval_loss": 3.3175320625305176,
"eval_runtime": 184.4687,
"eval_samples_per_second": 97.637,
"eval_steps_per_second": 6.104,
"step": 85000
},
{
"epoch": 9.154020019373588,
"grad_norm": 0.8655103445053101,
"learning_rate": 5.101389936429264e-05,
"loss": 3.1261,
"step": 85050
},
{
"epoch": 9.159401571413195,
"grad_norm": 0.8685607314109802,
"learning_rate": 5.069065833423122e-05,
"loss": 3.1284,
"step": 85100
},
{
"epoch": 9.164783123452803,
"grad_norm": 0.8760268092155457,
"learning_rate": 5.0367417304169804e-05,
"loss": 3.1325,
"step": 85150
},
{
"epoch": 9.170164675492412,
"grad_norm": 0.8456857204437256,
"learning_rate": 5.004417627410839e-05,
"loss": 3.1454,
"step": 85200
},
{
"epoch": 9.17554622753202,
"grad_norm": 0.8488378524780273,
"learning_rate": 4.972093524404697e-05,
"loss": 3.1419,
"step": 85250
},
{
"epoch": 9.180927779571629,
"grad_norm": 0.8161391615867615,
"learning_rate": 4.939769421398556e-05,
"loss": 3.1268,
"step": 85300
},
{
"epoch": 9.186309331611236,
"grad_norm": 0.831362247467041,
"learning_rate": 4.907445318392414e-05,
"loss": 3.1351,
"step": 85350
},
{
"epoch": 9.191690883650844,
"grad_norm": 0.8824942111968994,
"learning_rate": 4.875121215386272e-05,
"loss": 3.1441,
"step": 85400
},
{
"epoch": 9.197072435690453,
"grad_norm": 0.8387179970741272,
"learning_rate": 4.842797112380131e-05,
"loss": 3.1401,
"step": 85450
},
{
"epoch": 9.202453987730062,
"grad_norm": 0.8176819682121277,
"learning_rate": 4.810473009373989e-05,
"loss": 3.1501,
"step": 85500
},
{
"epoch": 9.20783553976967,
"grad_norm": 0.8273627161979675,
"learning_rate": 4.778148906367848e-05,
"loss": 3.1534,
"step": 85550
},
{
"epoch": 9.213217091809279,
"grad_norm": 0.8555848598480225,
"learning_rate": 4.745824803361706e-05,
"loss": 3.1264,
"step": 85600
},
{
"epoch": 9.218598643848885,
"grad_norm": 0.8379252552986145,
"learning_rate": 4.713500700355564e-05,
"loss": 3.1297,
"step": 85650
},
{
"epoch": 9.223980195888494,
"grad_norm": 0.8088021278381348,
"learning_rate": 4.681176597349424e-05,
"loss": 3.1484,
"step": 85700
},
{
"epoch": 9.229361747928102,
"grad_norm": 0.8791375160217285,
"learning_rate": 4.648852494343282e-05,
"loss": 3.108,
"step": 85750
},
{
"epoch": 9.234743299967711,
"grad_norm": 0.874676525592804,
"learning_rate": 4.6165283913371406e-05,
"loss": 3.1435,
"step": 85800
},
{
"epoch": 9.24012485200732,
"grad_norm": 0.8440104722976685,
"learning_rate": 4.584204288330999e-05,
"loss": 3.1426,
"step": 85850
},
{
"epoch": 9.245506404046926,
"grad_norm": 0.8481955528259277,
"learning_rate": 4.551880185324857e-05,
"loss": 3.1367,
"step": 85900
},
{
"epoch": 9.250887956086535,
"grad_norm": 0.8442366123199463,
"learning_rate": 4.5195560823187156e-05,
"loss": 3.1375,
"step": 85950
},
{
"epoch": 9.256269508126143,
"grad_norm": 0.8529907464981079,
"learning_rate": 4.487231979312574e-05,
"loss": 3.1235,
"step": 86000
},
{
"epoch": 9.256269508126143,
"eval_accuracy": 0.3927857307466268,
"eval_loss": 3.315546989440918,
"eval_runtime": 184.656,
"eval_samples_per_second": 97.538,
"eval_steps_per_second": 6.098,
"step": 86000
},
{
"epoch": 9.261651060165752,
"grad_norm": 0.8316681981086731,
"learning_rate": 4.4549078763064325e-05,
"loss": 3.1276,
"step": 86050
},
{
"epoch": 9.26703261220536,
"grad_norm": 0.8408940434455872,
"learning_rate": 4.4225837733002906e-05,
"loss": 3.1184,
"step": 86100
},
{
"epoch": 9.272414164244967,
"grad_norm": 0.8253659605979919,
"learning_rate": 4.390259670294149e-05,
"loss": 3.1218,
"step": 86150
},
{
"epoch": 9.277795716284576,
"grad_norm": 0.8289502859115601,
"learning_rate": 4.3579355672880075e-05,
"loss": 3.1409,
"step": 86200
},
{
"epoch": 9.283177268324184,
"grad_norm": 0.8685707449913025,
"learning_rate": 4.3256114642818656e-05,
"loss": 3.1158,
"step": 86250
},
{
"epoch": 9.288558820363793,
"grad_norm": 0.8403062224388123,
"learning_rate": 4.2932873612757244e-05,
"loss": 3.1321,
"step": 86300
},
{
"epoch": 9.293940372403402,
"grad_norm": 0.8375023007392883,
"learning_rate": 4.2609632582695825e-05,
"loss": 3.1274,
"step": 86350
},
{
"epoch": 9.29932192444301,
"grad_norm": 0.8399428725242615,
"learning_rate": 4.2286391552634406e-05,
"loss": 3.124,
"step": 86400
},
{
"epoch": 9.304703476482617,
"grad_norm": 0.8669325113296509,
"learning_rate": 4.1963150522572994e-05,
"loss": 3.1222,
"step": 86450
},
{
"epoch": 9.310085028522225,
"grad_norm": 0.8724686503410339,
"learning_rate": 4.1639909492511575e-05,
"loss": 3.1573,
"step": 86500
},
{
"epoch": 9.315466580561834,
"grad_norm": 0.8969683647155762,
"learning_rate": 4.131666846245017e-05,
"loss": 3.1455,
"step": 86550
},
{
"epoch": 9.320848132601443,
"grad_norm": 0.8751991987228394,
"learning_rate": 4.099342743238875e-05,
"loss": 3.1269,
"step": 86600
},
{
"epoch": 9.326229684641051,
"grad_norm": 0.8700101971626282,
"learning_rate": 4.067018640232733e-05,
"loss": 3.1314,
"step": 86650
},
{
"epoch": 9.331611236680658,
"grad_norm": 0.8855168223381042,
"learning_rate": 4.035341019286714e-05,
"loss": 3.1481,
"step": 86700
},
{
"epoch": 9.336992788720266,
"grad_norm": 0.8036063313484192,
"learning_rate": 4.003016916280573e-05,
"loss": 3.121,
"step": 86750
},
{
"epoch": 9.342374340759875,
"grad_norm": 0.8808650374412537,
"learning_rate": 3.9706928132744314e-05,
"loss": 3.1303,
"step": 86800
},
{
"epoch": 9.347755892799483,
"grad_norm": 0.8660295605659485,
"learning_rate": 3.93836871026829e-05,
"loss": 3.1483,
"step": 86850
},
{
"epoch": 9.353137444839092,
"grad_norm": 0.8451541066169739,
"learning_rate": 3.906044607262148e-05,
"loss": 3.1565,
"step": 86900
},
{
"epoch": 9.3585189968787,
"grad_norm": 0.818123996257782,
"learning_rate": 3.8737205042560064e-05,
"loss": 3.1348,
"step": 86950
},
{
"epoch": 9.363900548918307,
"grad_norm": 0.8490729928016663,
"learning_rate": 3.841396401249865e-05,
"loss": 3.1488,
"step": 87000
},
{
"epoch": 9.363900548918307,
"eval_accuracy": 0.39300531823289986,
"eval_loss": 3.313007116317749,
"eval_runtime": 184.3608,
"eval_samples_per_second": 97.694,
"eval_steps_per_second": 6.108,
"step": 87000
},
{
"epoch": 9.369282100957916,
"grad_norm": 0.8371396064758301,
"learning_rate": 3.809072298243723e-05,
"loss": 3.1313,
"step": 87050
},
{
"epoch": 9.374663652997524,
"grad_norm": 0.8060377240180969,
"learning_rate": 3.776748195237582e-05,
"loss": 3.1325,
"step": 87100
},
{
"epoch": 9.380045205037133,
"grad_norm": 0.8614924550056458,
"learning_rate": 3.74442409223144e-05,
"loss": 3.1245,
"step": 87150
},
{
"epoch": 9.385426757076742,
"grad_norm": 0.848238468170166,
"learning_rate": 3.7127464712854216e-05,
"loss": 3.1259,
"step": 87200
},
{
"epoch": 9.390808309116348,
"grad_norm": 0.8356297612190247,
"learning_rate": 3.6804223682792803e-05,
"loss": 3.1552,
"step": 87250
},
{
"epoch": 9.396189861155957,
"grad_norm": 0.8634541034698486,
"learning_rate": 3.6480982652731385e-05,
"loss": 3.1351,
"step": 87300
},
{
"epoch": 9.401571413195565,
"grad_norm": 0.9080988168716431,
"learning_rate": 3.6157741622669966e-05,
"loss": 3.1374,
"step": 87350
},
{
"epoch": 9.406952965235174,
"grad_norm": 0.8959322571754456,
"learning_rate": 3.5834500592608554e-05,
"loss": 3.1358,
"step": 87400
},
{
"epoch": 9.412334517274783,
"grad_norm": 0.8156682848930359,
"learning_rate": 3.5511259562547135e-05,
"loss": 3.1316,
"step": 87450
},
{
"epoch": 9.417716069314391,
"grad_norm": 0.8381271362304688,
"learning_rate": 3.518801853248572e-05,
"loss": 3.14,
"step": 87500
},
{
"epoch": 9.423097621353998,
"grad_norm": 0.8428494334220886,
"learning_rate": 3.4864777502424304e-05,
"loss": 3.145,
"step": 87550
},
{
"epoch": 9.428479173393606,
"grad_norm": 0.8386251330375671,
"learning_rate": 3.4541536472362885e-05,
"loss": 3.1372,
"step": 87600
},
{
"epoch": 9.433860725433215,
"grad_norm": 0.8217976093292236,
"learning_rate": 3.421829544230147e-05,
"loss": 3.1445,
"step": 87650
},
{
"epoch": 9.439242277472824,
"grad_norm": 0.8642744421958923,
"learning_rate": 3.389505441224006e-05,
"loss": 3.1265,
"step": 87700
},
{
"epoch": 9.444623829512432,
"grad_norm": 0.8612553477287292,
"learning_rate": 3.357181338217864e-05,
"loss": 3.1228,
"step": 87750
},
{
"epoch": 9.450005381552039,
"grad_norm": 0.8542048335075378,
"learning_rate": 3.324857235211722e-05,
"loss": 3.1372,
"step": 87800
},
{
"epoch": 9.455386933591647,
"grad_norm": 0.882955014705658,
"learning_rate": 3.292533132205581e-05,
"loss": 3.1426,
"step": 87850
},
{
"epoch": 9.460768485631256,
"grad_norm": 0.9134492874145508,
"learning_rate": 3.260209029199439e-05,
"loss": 3.1432,
"step": 87900
},
{
"epoch": 9.466150037670864,
"grad_norm": 0.902438223361969,
"learning_rate": 3.227884926193298e-05,
"loss": 3.1252,
"step": 87950
},
{
"epoch": 9.471531589710473,
"grad_norm": 0.8667796850204468,
"learning_rate": 3.195560823187157e-05,
"loss": 3.1455,
"step": 88000
},
{
"epoch": 9.471531589710473,
"eval_accuracy": 0.3932915099394032,
"eval_loss": 3.3105454444885254,
"eval_runtime": 184.2551,
"eval_samples_per_second": 97.75,
"eval_steps_per_second": 6.111,
"step": 88000
},
{
"epoch": 9.476913141750082,
"grad_norm": 0.8128182888031006,
"learning_rate": 3.163236720181014e-05,
"loss": 3.1338,
"step": 88050
},
{
"epoch": 9.482294693789688,
"grad_norm": 0.8941981792449951,
"learning_rate": 3.130912617174873e-05,
"loss": 3.1472,
"step": 88100
},
{
"epoch": 9.487676245829297,
"grad_norm": 0.808533251285553,
"learning_rate": 3.098588514168732e-05,
"loss": 3.151,
"step": 88150
},
{
"epoch": 9.493057797868905,
"grad_norm": 0.8551432490348816,
"learning_rate": 3.06626441116259e-05,
"loss": 3.1357,
"step": 88200
},
{
"epoch": 9.498439349908514,
"grad_norm": 0.848689615726471,
"learning_rate": 3.0339403081564487e-05,
"loss": 3.1529,
"step": 88250
},
{
"epoch": 9.503820901948123,
"grad_norm": 0.8379674553871155,
"learning_rate": 3.0016162051503068e-05,
"loss": 3.1468,
"step": 88300
},
{
"epoch": 9.50920245398773,
"grad_norm": 0.8962308168411255,
"learning_rate": 2.9692921021441652e-05,
"loss": 3.148,
"step": 88350
},
{
"epoch": 9.514584006027338,
"grad_norm": 0.8571812510490417,
"learning_rate": 2.9369679991380237e-05,
"loss": 3.147,
"step": 88400
},
{
"epoch": 9.519965558066946,
"grad_norm": 0.8555566668510437,
"learning_rate": 2.904643896131882e-05,
"loss": 3.1502,
"step": 88450
},
{
"epoch": 9.525347110106555,
"grad_norm": 0.8368306756019592,
"learning_rate": 2.8723197931257406e-05,
"loss": 3.1455,
"step": 88500
},
{
"epoch": 9.530728662146164,
"grad_norm": 0.8342770338058472,
"learning_rate": 2.8399956901195987e-05,
"loss": 3.1193,
"step": 88550
},
{
"epoch": 9.536110214185772,
"grad_norm": 0.829977810382843,
"learning_rate": 2.807671587113457e-05,
"loss": 3.1288,
"step": 88600
},
{
"epoch": 9.541491766225379,
"grad_norm": 0.8537443280220032,
"learning_rate": 2.775347484107316e-05,
"loss": 3.1352,
"step": 88650
},
{
"epoch": 9.546873318264987,
"grad_norm": 0.8419883847236633,
"learning_rate": 2.7430233811011744e-05,
"loss": 3.1337,
"step": 88700
},
{
"epoch": 9.552254870304596,
"grad_norm": 0.8567074537277222,
"learning_rate": 2.7106992780950328e-05,
"loss": 3.1389,
"step": 88750
},
{
"epoch": 9.557636422344205,
"grad_norm": 0.8512192964553833,
"learning_rate": 2.678375175088891e-05,
"loss": 3.1232,
"step": 88800
},
{
"epoch": 9.563017974383813,
"grad_norm": 0.8137180805206299,
"learning_rate": 2.6460510720827494e-05,
"loss": 3.1321,
"step": 88850
},
{
"epoch": 9.56839952642342,
"grad_norm": 0.8155874609947205,
"learning_rate": 2.6137269690766078e-05,
"loss": 3.1375,
"step": 88900
},
{
"epoch": 9.573781078463028,
"grad_norm": 0.8336650133132935,
"learning_rate": 2.5814028660704663e-05,
"loss": 3.1384,
"step": 88950
},
{
"epoch": 9.579162630502637,
"grad_norm": 0.880803644657135,
"learning_rate": 2.5490787630643247e-05,
"loss": 3.1375,
"step": 89000
},
{
"epoch": 9.579162630502637,
"eval_accuracy": 0.39352620017708245,
"eval_loss": 3.3093018531799316,
"eval_runtime": 184.3935,
"eval_samples_per_second": 97.677,
"eval_steps_per_second": 6.107,
"step": 89000
},
{
"epoch": 9.584544182542245,
"grad_norm": 0.8568444848060608,
"learning_rate": 2.5167546600581828e-05,
"loss": 3.1341,
"step": 89050
},
{
"epoch": 9.589925734581854,
"grad_norm": 0.8514227867126465,
"learning_rate": 2.4844305570520416e-05,
"loss": 3.127,
"step": 89100
},
{
"epoch": 9.59530728662146,
"grad_norm": 0.8377872109413147,
"learning_rate": 2.4521064540459e-05,
"loss": 3.1369,
"step": 89150
},
{
"epoch": 9.60068883866107,
"grad_norm": 0.8574662804603577,
"learning_rate": 2.4197823510397585e-05,
"loss": 3.1345,
"step": 89200
},
{
"epoch": 9.606070390700678,
"grad_norm": 0.8137006759643555,
"learning_rate": 2.387458248033617e-05,
"loss": 3.1332,
"step": 89250
},
{
"epoch": 9.611451942740286,
"grad_norm": 0.871265709400177,
"learning_rate": 2.355134145027475e-05,
"loss": 3.1301,
"step": 89300
},
{
"epoch": 9.616833494779895,
"grad_norm": 0.8703457713127136,
"learning_rate": 2.3228100420213335e-05,
"loss": 3.1333,
"step": 89350
},
{
"epoch": 9.622215046819504,
"grad_norm": 0.8446807861328125,
"learning_rate": 2.290485939015192e-05,
"loss": 3.1298,
"step": 89400
},
{
"epoch": 9.62759659885911,
"grad_norm": 0.8325860500335693,
"learning_rate": 2.2581618360090508e-05,
"loss": 3.132,
"step": 89450
},
{
"epoch": 9.632978150898719,
"grad_norm": 0.8514713644981384,
"learning_rate": 2.2264842150630318e-05,
"loss": 3.136,
"step": 89500
},
{
"epoch": 9.638359702938327,
"grad_norm": 0.876413881778717,
"learning_rate": 2.1941601120568905e-05,
"loss": 3.1412,
"step": 89550
},
{
"epoch": 9.643741254977936,
"grad_norm": 0.8323884010314941,
"learning_rate": 2.1618360090507483e-05,
"loss": 3.1538,
"step": 89600
},
{
"epoch": 9.649122807017545,
"grad_norm": 0.8908365964889526,
"learning_rate": 2.129511906044607e-05,
"loss": 3.1279,
"step": 89650
},
{
"epoch": 9.654504359057151,
"grad_norm": 0.8492385149002075,
"learning_rate": 2.0971878030384655e-05,
"loss": 3.1357,
"step": 89700
},
{
"epoch": 9.65988591109676,
"grad_norm": 0.8621118664741516,
"learning_rate": 2.064863700032324e-05,
"loss": 3.1436,
"step": 89750
},
{
"epoch": 9.665267463136368,
"grad_norm": 0.8707040548324585,
"learning_rate": 2.0325395970261824e-05,
"loss": 3.1463,
"step": 89800
},
{
"epoch": 9.670649015175977,
"grad_norm": 0.8553931713104248,
"learning_rate": 2.0002154940200406e-05,
"loss": 3.1213,
"step": 89850
},
{
"epoch": 9.676030567215586,
"grad_norm": 0.8200343251228333,
"learning_rate": 1.967891391013899e-05,
"loss": 3.1342,
"step": 89900
},
{
"epoch": 9.681412119255192,
"grad_norm": 0.860724925994873,
"learning_rate": 1.9355672880077575e-05,
"loss": 3.1524,
"step": 89950
},
{
"epoch": 9.6867936712948,
"grad_norm": 0.8658027648925781,
"learning_rate": 1.9032431850016162e-05,
"loss": 3.1281,
"step": 90000
},
{
"epoch": 9.6867936712948,
"eval_accuracy": 0.3939400590267678,
"eval_loss": 3.3066227436065674,
"eval_runtime": 184.6006,
"eval_samples_per_second": 97.567,
"eval_steps_per_second": 6.1,
"step": 90000
},
{
"epoch": 9.69217522333441,
"grad_norm": 0.8631778955459595,
"learning_rate": 1.8709190819954744e-05,
"loss": 3.143,
"step": 90050
},
{
"epoch": 9.697556775374018,
"grad_norm": 0.8524464964866638,
"learning_rate": 1.8385949789893328e-05,
"loss": 3.1402,
"step": 90100
},
{
"epoch": 9.702938327413626,
"grad_norm": 0.8782649040222168,
"learning_rate": 1.8062708759831913e-05,
"loss": 3.1413,
"step": 90150
},
{
"epoch": 9.708319879453235,
"grad_norm": 0.8450905084609985,
"learning_rate": 1.7739467729770497e-05,
"loss": 3.1314,
"step": 90200
},
{
"epoch": 9.713701431492842,
"grad_norm": 0.8762593269348145,
"learning_rate": 1.741622669970908e-05,
"loss": 3.1401,
"step": 90250
},
{
"epoch": 9.71908298353245,
"grad_norm": 0.8233435750007629,
"learning_rate": 1.7092985669647666e-05,
"loss": 3.1364,
"step": 90300
},
{
"epoch": 9.724464535572059,
"grad_norm": 0.8614866733551025,
"learning_rate": 1.676974463958625e-05,
"loss": 3.1457,
"step": 90350
},
{
"epoch": 9.729846087611667,
"grad_norm": 0.8564322590827942,
"learning_rate": 1.6446503609524835e-05,
"loss": 3.1397,
"step": 90400
},
{
"epoch": 9.735227639651276,
"grad_norm": 0.850202202796936,
"learning_rate": 1.612326257946342e-05,
"loss": 3.1339,
"step": 90450
},
{
"epoch": 9.740609191690883,
"grad_norm": 0.8573711514472961,
"learning_rate": 1.5800021549402004e-05,
"loss": 3.1491,
"step": 90500
},
{
"epoch": 9.745990743730491,
"grad_norm": 0.8609964847564697,
"learning_rate": 1.5476780519340585e-05,
"loss": 3.1383,
"step": 90550
},
{
"epoch": 9.7513722957701,
"grad_norm": 0.8481546640396118,
"learning_rate": 1.5153539489279171e-05,
"loss": 3.1178,
"step": 90600
},
{
"epoch": 9.756753847809708,
"grad_norm": 0.8250866532325745,
"learning_rate": 1.4830298459217756e-05,
"loss": 3.1341,
"step": 90650
},
{
"epoch": 9.762135399849317,
"grad_norm": 0.8713231086730957,
"learning_rate": 1.4507057429156339e-05,
"loss": 3.1359,
"step": 90700
},
{
"epoch": 9.767516951888926,
"grad_norm": 0.8582589030265808,
"learning_rate": 1.4183816399094925e-05,
"loss": 3.1271,
"step": 90750
},
{
"epoch": 9.772898503928532,
"grad_norm": 0.8594558238983154,
"learning_rate": 1.3860575369033508e-05,
"loss": 3.141,
"step": 90800
},
{
"epoch": 9.77828005596814,
"grad_norm": 0.832633912563324,
"learning_rate": 1.3537334338972092e-05,
"loss": 3.1495,
"step": 90850
},
{
"epoch": 9.78366160800775,
"grad_norm": 0.8840243220329285,
"learning_rate": 1.3214093308910678e-05,
"loss": 3.1235,
"step": 90900
},
{
"epoch": 9.789043160047358,
"grad_norm": 0.8464884161949158,
"learning_rate": 1.2890852278849261e-05,
"loss": 3.1395,
"step": 90950
},
{
"epoch": 9.794424712086967,
"grad_norm": 0.8295110464096069,
"learning_rate": 1.2567611248787846e-05,
"loss": 3.14,
"step": 91000
},
{
"epoch": 9.794424712086967,
"eval_accuracy": 0.39398591054542553,
"eval_loss": 3.3052074909210205,
"eval_runtime": 184.568,
"eval_samples_per_second": 97.585,
"eval_steps_per_second": 6.101,
"step": 91000
},
{
"epoch": 9.799806264126573,
"grad_norm": 0.8717403411865234,
"learning_rate": 1.2244370218726428e-05,
"loss": 3.1482,
"step": 91050
},
{
"epoch": 9.805187816166182,
"grad_norm": 0.878490149974823,
"learning_rate": 1.1921129188665013e-05,
"loss": 3.1342,
"step": 91100
},
{
"epoch": 9.81056936820579,
"grad_norm": 0.8271324634552002,
"learning_rate": 1.1597888158603599e-05,
"loss": 3.1386,
"step": 91150
},
{
"epoch": 9.815950920245399,
"grad_norm": 0.8197286128997803,
"learning_rate": 1.1274647128542182e-05,
"loss": 3.1268,
"step": 91200
},
{
"epoch": 9.821332472285007,
"grad_norm": 0.8513312935829163,
"learning_rate": 1.0951406098480766e-05,
"loss": 3.1306,
"step": 91250
},
{
"epoch": 9.826714024324616,
"grad_norm": 0.8409889340400696,
"learning_rate": 1.0628165068419349e-05,
"loss": 3.1581,
"step": 91300
},
{
"epoch": 9.832095576364223,
"grad_norm": 0.8312180638313293,
"learning_rate": 1.0304924038357935e-05,
"loss": 3.132,
"step": 91350
},
{
"epoch": 9.837477128403831,
"grad_norm": 0.8290335536003113,
"learning_rate": 9.98168300829652e-06,
"loss": 3.1575,
"step": 91400
},
{
"epoch": 9.84285868044344,
"grad_norm": 0.887336015701294,
"learning_rate": 9.658441978235103e-06,
"loss": 3.1239,
"step": 91450
},
{
"epoch": 9.848240232483048,
"grad_norm": 0.8567714095115662,
"learning_rate": 9.335200948173687e-06,
"loss": 3.145,
"step": 91500
},
{
"epoch": 9.853621784522657,
"grad_norm": 0.8222310543060303,
"learning_rate": 9.011959918112272e-06,
"loss": 3.1271,
"step": 91550
},
{
"epoch": 9.859003336562264,
"grad_norm": 0.8363861441612244,
"learning_rate": 8.688718888050856e-06,
"loss": 3.1345,
"step": 91600
},
{
"epoch": 9.864384888601872,
"grad_norm": 0.8393396735191345,
"learning_rate": 8.36547785798944e-06,
"loss": 3.1483,
"step": 91650
},
{
"epoch": 9.869766440641481,
"grad_norm": 0.8519042134284973,
"learning_rate": 8.042236827928023e-06,
"loss": 3.1135,
"step": 91700
},
{
"epoch": 9.87514799268109,
"grad_norm": 0.8198532462120056,
"learning_rate": 7.71899579786661e-06,
"loss": 3.1369,
"step": 91750
},
{
"epoch": 9.880529544720698,
"grad_norm": 0.8474845290184021,
"learning_rate": 7.395754767805193e-06,
"loss": 3.1332,
"step": 91800
},
{
"epoch": 9.885911096760307,
"grad_norm": 0.843837559223175,
"learning_rate": 7.072513737743777e-06,
"loss": 3.1413,
"step": 91850
},
{
"epoch": 9.891292648799913,
"grad_norm": 0.8688272833824158,
"learning_rate": 6.749272707682361e-06,
"loss": 3.1395,
"step": 91900
},
{
"epoch": 9.896674200839522,
"grad_norm": 0.834823489189148,
"learning_rate": 6.426031677620945e-06,
"loss": 3.1182,
"step": 91950
},
{
"epoch": 9.90205575287913,
"grad_norm": 0.8303846120834351,
"learning_rate": 6.10279064755953e-06,
"loss": 3.1462,
"step": 92000
},
{
"epoch": 9.90205575287913,
"eval_accuracy": 0.39428514059846664,
"eval_loss": 3.3026177883148193,
"eval_runtime": 184.592,
"eval_samples_per_second": 97.572,
"eval_steps_per_second": 6.1,
"step": 92000
},
{
"epoch": 9.907437304918739,
"grad_norm": 0.8718098402023315,
"learning_rate": 5.779549617498114e-06,
"loss": 3.1523,
"step": 92050
},
{
"epoch": 9.912818856958348,
"grad_norm": 0.8723344802856445,
"learning_rate": 5.4563085874366985e-06,
"loss": 3.1466,
"step": 92100
},
{
"epoch": 9.918200408997954,
"grad_norm": 0.8737227320671082,
"learning_rate": 5.133067557375282e-06,
"loss": 3.152,
"step": 92150
},
{
"epoch": 9.923581961037563,
"grad_norm": 0.8384645581245422,
"learning_rate": 4.809826527313866e-06,
"loss": 3.1369,
"step": 92200
},
{
"epoch": 9.928963513077171,
"grad_norm": 0.8230083584785461,
"learning_rate": 4.48658549725245e-06,
"loss": 3.1199,
"step": 92250
},
{
"epoch": 9.93434506511678,
"grad_norm": 0.8617830872535706,
"learning_rate": 4.163344467191035e-06,
"loss": 3.1382,
"step": 92300
},
{
"epoch": 9.939726617156388,
"grad_norm": 0.8417300581932068,
"learning_rate": 3.840103437129619e-06,
"loss": 3.1341,
"step": 92350
},
{
"epoch": 9.945108169195997,
"grad_norm": 0.8135554194450378,
"learning_rate": 3.5168624070682038e-06,
"loss": 3.136,
"step": 92400
},
{
"epoch": 9.950489721235604,
"grad_norm": 0.832378089427948,
"learning_rate": 3.193621377006788e-06,
"loss": 3.1483,
"step": 92450
},
{
"epoch": 9.955871273275212,
"grad_norm": 0.8610823750495911,
"learning_rate": 2.870380346945372e-06,
"loss": 3.1366,
"step": 92500
},
{
"epoch": 9.961252825314821,
"grad_norm": 0.84225994348526,
"learning_rate": 2.5471393168839564e-06,
"loss": 3.1237,
"step": 92550
},
{
"epoch": 9.96663437735443,
"grad_norm": 0.8493366241455078,
"learning_rate": 2.2238982868225405e-06,
"loss": 3.1305,
"step": 92600
},
{
"epoch": 9.972015929394038,
"grad_norm": 0.8750933408737183,
"learning_rate": 1.9006572567611246e-06,
"loss": 3.1303,
"step": 92650
},
{
"epoch": 9.977397481433645,
"grad_norm": 0.8312073349952698,
"learning_rate": 1.5774162266997088e-06,
"loss": 3.1224,
"step": 92700
},
{
"epoch": 9.982779033473253,
"grad_norm": 0.8882536888122559,
"learning_rate": 1.2541751966382931e-06,
"loss": 3.1166,
"step": 92750
},
{
"epoch": 9.988160585512862,
"grad_norm": 0.8545377254486084,
"learning_rate": 9.309341665768773e-07,
"loss": 3.1243,
"step": 92800
},
{
"epoch": 9.99354213755247,
"grad_norm": 0.874809980392456,
"learning_rate": 6.076931365154616e-07,
"loss": 3.1167,
"step": 92850
},
{
"epoch": 9.998923689592079,
"grad_norm": 0.8542647361755371,
"learning_rate": 2.8445210645404587e-07,
"loss": 3.1182,
"step": 92900
},
{
"epoch": 10.0,
"step": 92910,
"total_flos": 7.7681075945472e+17,
"train_loss": 3.4603127508396456,
"train_runtime": 79796.1096,
"train_samples_per_second": 37.257,
"train_steps_per_second": 1.164
}
],
"logging_steps": 50,
"max_steps": 92910,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.7681075945472e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}