| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9994447529150472, | |
| "eval_steps": 100, | |
| "global_step": 1350, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0037016472330186935, | |
| "grad_norm": 2.6190161309557647, | |
| "learning_rate": 7.407407407407407e-07, | |
| "loss": 1.1051, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.007403294466037387, | |
| "grad_norm": 2.617573716089078, | |
| "learning_rate": 1.4814814814814815e-06, | |
| "loss": 1.0488, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01110494169905608, | |
| "grad_norm": 2.188167315916446, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 1.0738, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.014806588932074774, | |
| "grad_norm": 1.6667692225000028, | |
| "learning_rate": 2.962962962962963e-06, | |
| "loss": 1.0593, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.018508236165093468, | |
| "grad_norm": 1.654227504239446, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 1.0202, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.02220988339811216, | |
| "grad_norm": 1.2548959657686796, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 1.0001, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.025911530631130855, | |
| "grad_norm": 1.1833827152770533, | |
| "learning_rate": 5.185185185185185e-06, | |
| "loss": 0.937, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.029613177864149548, | |
| "grad_norm": 1.0883900510725615, | |
| "learning_rate": 5.925925925925926e-06, | |
| "loss": 0.9428, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.03331482509716824, | |
| "grad_norm": 1.0419522343447962, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.9189, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.037016472330186935, | |
| "grad_norm": 0.9923021285070018, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.8983, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.040718119563205625, | |
| "grad_norm": 0.949359665064221, | |
| "learning_rate": 8.148148148148148e-06, | |
| "loss": 0.8726, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.04441976679622432, | |
| "grad_norm": 0.9124655284576202, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.8745, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.04812141402924301, | |
| "grad_norm": 0.9036479282174604, | |
| "learning_rate": 9.62962962962963e-06, | |
| "loss": 0.8342, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.05182306126226171, | |
| "grad_norm": 1.055447514473688, | |
| "learning_rate": 1.037037037037037e-05, | |
| "loss": 0.8618, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0555247084952804, | |
| "grad_norm": 0.9121775150471593, | |
| "learning_rate": 1.1111111111111113e-05, | |
| "loss": 0.8508, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.059226355728299096, | |
| "grad_norm": 0.9467767452776054, | |
| "learning_rate": 1.1851851851851852e-05, | |
| "loss": 0.8151, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06292800296131779, | |
| "grad_norm": 0.8970016443602813, | |
| "learning_rate": 1.2592592592592593e-05, | |
| "loss": 0.8292, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.06662965019433648, | |
| "grad_norm": 0.9353391005460929, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.8196, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.07033129742735518, | |
| "grad_norm": 0.9292437461938907, | |
| "learning_rate": 1.4074074074074075e-05, | |
| "loss": 0.8431, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.07403294466037387, | |
| "grad_norm": 0.9139153798915853, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.8403, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07403294466037387, | |
| "eval_loss": 0.8454068303108215, | |
| "eval_runtime": 14.3147, | |
| "eval_samples_per_second": 8.942, | |
| "eval_steps_per_second": 2.235, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07773459189339256, | |
| "grad_norm": 1.0020926055129713, | |
| "learning_rate": 1.555555555555556e-05, | |
| "loss": 0.837, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.08143623912641125, | |
| "grad_norm": 0.9562456885916061, | |
| "learning_rate": 1.6296296296296297e-05, | |
| "loss": 0.8401, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.08513788635942994, | |
| "grad_norm": 1.038284162697189, | |
| "learning_rate": 1.7037037037037038e-05, | |
| "loss": 0.8056, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.08883953359244864, | |
| "grad_norm": 1.0582758839194197, | |
| "learning_rate": 1.7777777777777777e-05, | |
| "loss": 0.8279, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09254118082546733, | |
| "grad_norm": 0.9684707427457332, | |
| "learning_rate": 1.851851851851852e-05, | |
| "loss": 0.7864, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.09624282805848602, | |
| "grad_norm": 0.9673150329418422, | |
| "learning_rate": 1.925925925925926e-05, | |
| "loss": 0.8441, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.09994447529150471, | |
| "grad_norm": 1.0579924708844666, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7993, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.10364612252452342, | |
| "grad_norm": 1.0677906786127234, | |
| "learning_rate": 1.9999164298554375e-05, | |
| "loss": 0.8282, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10734776975754211, | |
| "grad_norm": 1.0132922833799443, | |
| "learning_rate": 1.9996657333896875e-05, | |
| "loss": 0.8313, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.1110494169905608, | |
| "grad_norm": 1.1704172102766746, | |
| "learning_rate": 1.9992479525042305e-05, | |
| "loss": 0.788, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.11475106422357949, | |
| "grad_norm": 1.0851017037390138, | |
| "learning_rate": 1.9986631570270835e-05, | |
| "loss": 0.8224, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.11845271145659819, | |
| "grad_norm": 0.96343536824137, | |
| "learning_rate": 1.9979114447011323e-05, | |
| "loss": 0.7772, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12215435868961688, | |
| "grad_norm": 0.883696584269749, | |
| "learning_rate": 1.996992941167792e-05, | |
| "loss": 0.7918, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.12585600592263557, | |
| "grad_norm": 0.9253292123835586, | |
| "learning_rate": 1.9959077999460094e-05, | |
| "loss": 0.7888, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.12955765315565426, | |
| "grad_norm": 0.9860462178783356, | |
| "learning_rate": 1.9946562024066018e-05, | |
| "loss": 0.7817, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.13325930038867295, | |
| "grad_norm": 0.9675122183243109, | |
| "learning_rate": 1.9932383577419432e-05, | |
| "loss": 0.8005, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13696094762169164, | |
| "grad_norm": 0.997564966420069, | |
| "learning_rate": 1.991654502931001e-05, | |
| "loss": 0.7749, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.14066259485471036, | |
| "grad_norm": 1.0707004475294264, | |
| "learning_rate": 1.9899049026997272e-05, | |
| "loss": 0.7994, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.14436424208772905, | |
| "grad_norm": 0.8879974025424234, | |
| "learning_rate": 1.9879898494768093e-05, | |
| "loss": 0.8145, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.14806588932074774, | |
| "grad_norm": 1.0280629400513372, | |
| "learning_rate": 1.9859096633447965e-05, | |
| "loss": 0.811, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14806588932074774, | |
| "eval_loss": 0.8159765005111694, | |
| "eval_runtime": 14.2933, | |
| "eval_samples_per_second": 8.955, | |
| "eval_steps_per_second": 2.239, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.15176753655376643, | |
| "grad_norm": 0.9767690095329316, | |
| "learning_rate": 1.9836646919866012e-05, | |
| "loss": 0.7803, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.15546918378678512, | |
| "grad_norm": 0.9869907646781438, | |
| "learning_rate": 1.9812553106273848e-05, | |
| "loss": 0.797, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1591708310198038, | |
| "grad_norm": 0.9676536231152361, | |
| "learning_rate": 1.9786819219718443e-05, | |
| "loss": 0.7733, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.1628724782528225, | |
| "grad_norm": 0.8842322753775609, | |
| "learning_rate": 1.9759449561369036e-05, | |
| "loss": 0.7914, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.1665741254858412, | |
| "grad_norm": 0.9394648601954673, | |
| "learning_rate": 1.973044870579824e-05, | |
| "loss": 0.7997, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.17027577271885988, | |
| "grad_norm": 0.8304504280713702, | |
| "learning_rate": 1.9699821500217436e-05, | |
| "loss": 0.7226, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.1739774199518786, | |
| "grad_norm": 1.0726438115973416, | |
| "learning_rate": 1.9667573063666622e-05, | |
| "loss": 0.7977, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.1776790671848973, | |
| "grad_norm": 0.8897537319506557, | |
| "learning_rate": 1.9633708786158803e-05, | |
| "loss": 0.7823, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.18138071441791598, | |
| "grad_norm": 0.9221732420888282, | |
| "learning_rate": 1.959823432777912e-05, | |
| "loss": 0.8141, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.18508236165093467, | |
| "grad_norm": 0.9036288550881341, | |
| "learning_rate": 1.95611556177388e-05, | |
| "loss": 0.7629, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.18878400888395336, | |
| "grad_norm": 0.8388970534683132, | |
| "learning_rate": 1.9522478853384154e-05, | |
| "loss": 0.7579, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.19248565611697205, | |
| "grad_norm": 0.949588184172495, | |
| "learning_rate": 1.9482210499160767e-05, | |
| "loss": 0.8039, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.19618730334999074, | |
| "grad_norm": 0.8650881046969451, | |
| "learning_rate": 1.9440357285533e-05, | |
| "loss": 0.7428, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.19988895058300943, | |
| "grad_norm": 0.8580467714762627, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.793, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.20359059781602815, | |
| "grad_norm": 0.9843752995987342, | |
| "learning_rate": 1.93519245252219e-05, | |
| "loss": 0.793, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.20729224504904684, | |
| "grad_norm": 0.8962094587951618, | |
| "learning_rate": 1.9305359759215686e-05, | |
| "loss": 0.79, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.21099389228206553, | |
| "grad_norm": 0.905499144650914, | |
| "learning_rate": 1.9257239692688907e-05, | |
| "loss": 0.7603, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.21469553951508422, | |
| "grad_norm": 0.9040645563886123, | |
| "learning_rate": 1.9207572368443386e-05, | |
| "loss": 0.7863, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.2183971867481029, | |
| "grad_norm": 1.0639739918515576, | |
| "learning_rate": 1.9156366087890062e-05, | |
| "loss": 0.7878, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.2220988339811216, | |
| "grad_norm": 0.9628865239491529, | |
| "learning_rate": 1.9103629409661468e-05, | |
| "loss": 0.7511, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2220988339811216, | |
| "eval_loss": 0.7993099689483643, | |
| "eval_runtime": 14.2853, | |
| "eval_samples_per_second": 8.96, | |
| "eval_steps_per_second": 2.24, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2258004812141403, | |
| "grad_norm": 0.8937071844485754, | |
| "learning_rate": 1.9049371148181253e-05, | |
| "loss": 0.7874, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.22950212844715898, | |
| "grad_norm": 0.9474641234506681, | |
| "learning_rate": 1.8993600372190933e-05, | |
| "loss": 0.8109, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.23320377568017767, | |
| "grad_norm": 0.9457596208875108, | |
| "learning_rate": 1.8936326403234125e-05, | |
| "loss": 0.8028, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.23690542291319638, | |
| "grad_norm": 1.0148882167066917, | |
| "learning_rate": 1.8877558814098564e-05, | |
| "loss": 0.8011, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.24060707014621507, | |
| "grad_norm": 0.8803198233632789, | |
| "learning_rate": 1.881730742721608e-05, | |
| "loss": 0.7722, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.24430871737923376, | |
| "grad_norm": 0.8992984482099479, | |
| "learning_rate": 1.8755582313020912e-05, | |
| "loss": 0.7789, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.24801036461225245, | |
| "grad_norm": 0.9029136320220847, | |
| "learning_rate": 1.8692393788266477e-05, | |
| "loss": 0.79, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.25171201184527114, | |
| "grad_norm": 0.9033190903919935, | |
| "learning_rate": 1.8627752414301087e-05, | |
| "loss": 0.7952, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.25541365907828983, | |
| "grad_norm": 0.8970075260774104, | |
| "learning_rate": 1.8561668995302668e-05, | |
| "loss": 0.79, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.2591153063113085, | |
| "grad_norm": 0.8419522769107057, | |
| "learning_rate": 1.8494154576472976e-05, | |
| "loss": 0.7695, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.2628169535443272, | |
| "grad_norm": 0.8272814106701719, | |
| "learning_rate": 1.8425220442191496e-05, | |
| "loss": 0.7631, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.2665186007773459, | |
| "grad_norm": 0.8905409087258471, | |
| "learning_rate": 1.8354878114129368e-05, | |
| "loss": 0.7396, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2702202480103646, | |
| "grad_norm": 0.974755957648857, | |
| "learning_rate": 1.8283139349323632e-05, | |
| "loss": 0.8032, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.2739218952433833, | |
| "grad_norm": 0.8281032066004408, | |
| "learning_rate": 1.8210016138212186e-05, | |
| "loss": 0.7463, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.277623542476402, | |
| "grad_norm": 0.7538888666458481, | |
| "learning_rate": 1.8135520702629677e-05, | |
| "loss": 0.7172, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.2813251897094207, | |
| "grad_norm": 0.9059177473066354, | |
| "learning_rate": 1.8059665493764745e-05, | |
| "loss": 0.7906, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.2850268369424394, | |
| "grad_norm": 0.9003001812735215, | |
| "learning_rate": 1.7982463190078928e-05, | |
| "loss": 0.7423, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.2887284841754581, | |
| "grad_norm": 0.8611806092964468, | |
| "learning_rate": 1.7903926695187595e-05, | |
| "loss": 0.7973, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.2924301314084768, | |
| "grad_norm": 0.8831696562894742, | |
| "learning_rate": 1.78240691357032e-05, | |
| "loss": 0.7655, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.2961317786414955, | |
| "grad_norm": 0.8323221100213534, | |
| "learning_rate": 1.7742903859041324e-05, | |
| "loss": 0.776, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2961317786414955, | |
| "eval_loss": 0.7871907949447632, | |
| "eval_runtime": 14.2617, | |
| "eval_samples_per_second": 8.975, | |
| "eval_steps_per_second": 2.244, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.29983342587451417, | |
| "grad_norm": 0.8049305774182498, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.735, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.30353507310753286, | |
| "grad_norm": 0.8769829762188076, | |
| "learning_rate": 1.757670463444118e-05, | |
| "loss": 0.8049, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.30723672034055155, | |
| "grad_norm": 0.864360277829185, | |
| "learning_rate": 1.749169846508936e-05, | |
| "loss": 0.7376, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.31093836757357024, | |
| "grad_norm": 0.9009580756822332, | |
| "learning_rate": 1.740544013109005e-05, | |
| "loss": 0.7685, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.31464001480658893, | |
| "grad_norm": 0.9585985415142408, | |
| "learning_rate": 1.7317944049686125e-05, | |
| "loss": 0.7572, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.3183416620396076, | |
| "grad_norm": 0.9192232956038318, | |
| "learning_rate": 1.722922484499793e-05, | |
| "loss": 0.754, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.3220433092726263, | |
| "grad_norm": 0.8217963401694658, | |
| "learning_rate": 1.7139297345578992e-05, | |
| "loss": 0.7306, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.325744956505645, | |
| "grad_norm": 0.867222349629964, | |
| "learning_rate": 1.7048176581937562e-05, | |
| "loss": 0.7863, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.3294466037386637, | |
| "grad_norm": 0.9313710295824902, | |
| "learning_rate": 1.6955877784024418e-05, | |
| "loss": 0.7536, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.3331482509716824, | |
| "grad_norm": 0.8454148296361755, | |
| "learning_rate": 1.686241637868734e-05, | |
| "loss": 0.7906, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.33684989820470107, | |
| "grad_norm": 0.9365890079239012, | |
| "learning_rate": 1.676780798709262e-05, | |
| "loss": 0.7732, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.34055154543771976, | |
| "grad_norm": 0.8187371665418082, | |
| "learning_rate": 1.6672068422114195e-05, | |
| "loss": 0.8161, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.3442531926707385, | |
| "grad_norm": 0.9282608988682542, | |
| "learning_rate": 1.657521368569064e-05, | |
| "loss": 0.7727, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.3479548399037572, | |
| "grad_norm": 0.9417161012583897, | |
| "learning_rate": 1.647725996615059e-05, | |
| "loss": 0.7392, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.3516564871367759, | |
| "grad_norm": 0.8273496274966264, | |
| "learning_rate": 1.637822363550706e-05, | |
| "loss": 0.7566, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.3553581343697946, | |
| "grad_norm": 0.8333503706819086, | |
| "learning_rate": 1.627812124672099e-05, | |
| "loss": 0.7615, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.35905978160281327, | |
| "grad_norm": 0.8838562817691503, | |
| "learning_rate": 1.6176969530934573e-05, | |
| "loss": 0.7365, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.36276142883583196, | |
| "grad_norm": 0.8973618042999694, | |
| "learning_rate": 1.6074785394674835e-05, | |
| "loss": 0.7624, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.36646307606885065, | |
| "grad_norm": 0.8761004580717847, | |
| "learning_rate": 1.5971585917027864e-05, | |
| "loss": 0.7429, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.37016472330186934, | |
| "grad_norm": 0.8619944583073996, | |
| "learning_rate": 1.586738834678418e-05, | |
| "loss": 0.7878, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.37016472330186934, | |
| "eval_loss": 0.7774370908737183, | |
| "eval_runtime": 14.2807, | |
| "eval_samples_per_second": 8.963, | |
| "eval_steps_per_second": 2.241, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.373866370534888, | |
| "grad_norm": 0.8202880479359111, | |
| "learning_rate": 1.5762210099555804e-05, | |
| "loss": 0.728, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.3775680177679067, | |
| "grad_norm": 0.8822671386817853, | |
| "learning_rate": 1.5656068754865388e-05, | |
| "loss": 0.7477, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.3812696650009254, | |
| "grad_norm": 0.8486610570948868, | |
| "learning_rate": 1.554898205320797e-05, | |
| "loss": 0.719, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.3849713122339441, | |
| "grad_norm": 0.8240752169293275, | |
| "learning_rate": 1.5440967893085827e-05, | |
| "loss": 0.7419, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.3886729594669628, | |
| "grad_norm": 0.8316656751994306, | |
| "learning_rate": 1.5332044328016916e-05, | |
| "loss": 0.7428, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.3923746066999815, | |
| "grad_norm": 0.8087681319089429, | |
| "learning_rate": 1.5222229563517385e-05, | |
| "loss": 0.7658, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.39607625393300017, | |
| "grad_norm": 0.7538109023098745, | |
| "learning_rate": 1.5111541954058733e-05, | |
| "loss": 0.7554, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.39977790116601886, | |
| "grad_norm": 0.9099819099198067, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.7137, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.40347954839903755, | |
| "grad_norm": 0.903536929993487, | |
| "learning_rate": 1.4887622344495643e-05, | |
| "loss": 0.7659, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.4071811956320563, | |
| "grad_norm": 0.8457480185886588, | |
| "learning_rate": 1.4774427770379492e-05, | |
| "loss": 0.7526, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.410882842865075, | |
| "grad_norm": 0.7789008578079015, | |
| "learning_rate": 1.4660435197025391e-05, | |
| "loss": 0.7303, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.41458449009809367, | |
| "grad_norm": 0.8748325021299536, | |
| "learning_rate": 1.4545663677185007e-05, | |
| "loss": 0.7508, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.41828613733111236, | |
| "grad_norm": 0.9201816263076547, | |
| "learning_rate": 1.4430132393803353e-05, | |
| "loss": 0.7576, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.42198778456413105, | |
| "grad_norm": 0.8717802358680588, | |
| "learning_rate": 1.4313860656812537e-05, | |
| "loss": 0.745, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.42568943179714974, | |
| "grad_norm": 0.9437971373387479, | |
| "learning_rate": 1.4196867899904292e-05, | |
| "loss": 0.7626, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.42939107903016843, | |
| "grad_norm": 0.9108698247572669, | |
| "learning_rate": 1.4079173677281836e-05, | |
| "loss": 0.7959, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4330927262631871, | |
| "grad_norm": 0.857828540495448, | |
| "learning_rate": 1.396079766039157e-05, | |
| "loss": 0.7425, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.4367943734962058, | |
| "grad_norm": 0.8176732343058545, | |
| "learning_rate": 1.3841759634635177e-05, | |
| "loss": 0.7352, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.4404960207292245, | |
| "grad_norm": 0.8191927860207149, | |
| "learning_rate": 1.3722079496062702e-05, | |
| "loss": 0.7599, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.4441976679622432, | |
| "grad_norm": 0.9392342604351928, | |
| "learning_rate": 1.3601777248047105e-05, | |
| "loss": 0.7595, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.4441976679622432, | |
| "eval_loss": 0.7718663811683655, | |
| "eval_runtime": 14.2597, | |
| "eval_samples_per_second": 8.976, | |
| "eval_steps_per_second": 2.244, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.4478993151952619, | |
| "grad_norm": 0.9422724492877115, | |
| "learning_rate": 1.3480872997940906e-05, | |
| "loss": 0.739, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.4516009624282806, | |
| "grad_norm": 0.8664419234133783, | |
| "learning_rate": 1.3359386953715423e-05, | |
| "loss": 0.7624, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.45530260966129926, | |
| "grad_norm": 0.8268673861634072, | |
| "learning_rate": 1.3237339420583213e-05, | |
| "loss": 0.7116, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.45900425689431795, | |
| "grad_norm": 0.8019512288624171, | |
| "learning_rate": 1.3114750797604248e-05, | |
| "loss": 0.7472, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.46270590412733664, | |
| "grad_norm": 0.8280253216841055, | |
| "learning_rate": 1.2991641574276419e-05, | |
| "loss": 0.7417, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.46640755136035533, | |
| "grad_norm": 0.9004418297788057, | |
| "learning_rate": 1.2868032327110904e-05, | |
| "loss": 0.7519, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.4701091985933741, | |
| "grad_norm": 0.9104424493686898, | |
| "learning_rate": 1.2743943716193017e-05, | |
| "loss": 0.7402, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.47381084582639277, | |
| "grad_norm": 0.8552077552123197, | |
| "learning_rate": 1.261939648172906e-05, | |
| "loss": 0.7225, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.47751249305941146, | |
| "grad_norm": 0.7961294819198029, | |
| "learning_rate": 1.2494411440579814e-05, | |
| "loss": 0.7342, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.48121414029243015, | |
| "grad_norm": 0.8480774387053995, | |
| "learning_rate": 1.2369009482781191e-05, | |
| "loss": 0.7365, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.48491578752544884, | |
| "grad_norm": 0.7885755366389152, | |
| "learning_rate": 1.2243211568052678e-05, | |
| "loss": 0.7552, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.48861743475846753, | |
| "grad_norm": 0.7906865082372576, | |
| "learning_rate": 1.211703872229411e-05, | |
| "loss": 0.7365, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4923190819914862, | |
| "grad_norm": 0.819623561884998, | |
| "learning_rate": 1.1990512034071407e-05, | |
| "loss": 0.7355, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.4960207292245049, | |
| "grad_norm": 0.8742039076741506, | |
| "learning_rate": 1.1863652651091824e-05, | |
| "loss": 0.7483, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.4997223764575236, | |
| "grad_norm": 0.79128012709554, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.7624, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.5034240236905423, | |
| "grad_norm": 0.8488152261113262, | |
| "learning_rate": 1.1609020666180574e-05, | |
| "loss": 0.7282, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.507125670923561, | |
| "grad_norm": 0.8897308949587664, | |
| "learning_rate": 1.1481290623512491e-05, | |
| "loss": 0.7677, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.5108273181565797, | |
| "grad_norm": 0.8967740652858772, | |
| "learning_rate": 1.1353312997501313e-05, | |
| "loss": 0.7078, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.5145289653895984, | |
| "grad_norm": 0.8371045946121705, | |
| "learning_rate": 1.1225109178364456e-05, | |
| "loss": 0.748, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.518230612622617, | |
| "grad_norm": 0.8356367511625883, | |
| "learning_rate": 1.1096700594125318e-05, | |
| "loss": 0.7021, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.518230612622617, | |
| "eval_loss": 0.7633776664733887, | |
| "eval_runtime": 14.2769, | |
| "eval_samples_per_second": 8.966, | |
| "eval_steps_per_second": 2.241, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5219322598556357, | |
| "grad_norm": 0.9272941513398387, | |
| "learning_rate": 1.0968108707031792e-05, | |
| "loss": 0.7329, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.5256339070886544, | |
| "grad_norm": 0.8184878745101959, | |
| "learning_rate": 1.0839355009969068e-05, | |
| "loss": 0.7513, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.5293355543216731, | |
| "grad_norm": 0.8378444140394703, | |
| "learning_rate": 1.0710461022867303e-05, | |
| "loss": 0.7793, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.5330372015546918, | |
| "grad_norm": 0.8269985738310317, | |
| "learning_rate": 1.0581448289104759e-05, | |
| "loss": 0.7407, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5367388487877105, | |
| "grad_norm": 0.9030617257540957, | |
| "learning_rate": 1.0452338371907065e-05, | |
| "loss": 0.7517, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.5404404960207292, | |
| "grad_norm": 0.8998005141400585, | |
| "learning_rate": 1.0323152850743107e-05, | |
| "loss": 0.7481, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.5441421432537479, | |
| "grad_norm": 0.8279469371102903, | |
| "learning_rate": 1.0193913317718245e-05, | |
| "loss": 0.7384, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.5478437904867666, | |
| "grad_norm": 0.7963542084416609, | |
| "learning_rate": 1.0064641373965394e-05, | |
| "loss": 0.7362, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.5515454377197853, | |
| "grad_norm": 0.9484030675845825, | |
| "learning_rate": 9.935358626034607e-06, | |
| "loss": 0.7559, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.555247084952804, | |
| "grad_norm": 0.8648077990419285, | |
| "learning_rate": 9.806086682281759e-06, | |
| "loss": 0.7544, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5589487321858226, | |
| "grad_norm": 0.789146267358829, | |
| "learning_rate": 9.676847149256894e-06, | |
| "loss": 0.7529, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.5626503794188414, | |
| "grad_norm": 0.8480815345018301, | |
| "learning_rate": 9.547661628092938e-06, | |
| "loss": 0.7387, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5663520266518601, | |
| "grad_norm": 0.8476699553679603, | |
| "learning_rate": 9.418551710895243e-06, | |
| "loss": 0.7317, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.5700536738848788, | |
| "grad_norm": 0.9000817486943193, | |
| "learning_rate": 9.289538977132702e-06, | |
| "loss": 0.6862, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.5737553211178975, | |
| "grad_norm": 0.8395859096767366, | |
| "learning_rate": 9.160644990030932e-06, | |
| "loss": 0.7557, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.5774569683509162, | |
| "grad_norm": 0.7566339188724619, | |
| "learning_rate": 9.03189129296821e-06, | |
| "loss": 0.7266, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5811586155839349, | |
| "grad_norm": 0.7966256156650591, | |
| "learning_rate": 8.903299405874685e-06, | |
| "loss": 0.7243, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.5848602628169536, | |
| "grad_norm": 0.8848790598873679, | |
| "learning_rate": 8.774890821635548e-06, | |
| "loss": 0.7152, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.5885619100499723, | |
| "grad_norm": 0.7756335423304778, | |
| "learning_rate": 8.646687002498692e-06, | |
| "loss": 0.7145, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.592263557282991, | |
| "grad_norm": 0.8560744348197757, | |
| "learning_rate": 8.518709376487515e-06, | |
| "loss": 0.7355, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.592263557282991, | |
| "eval_loss": 0.7575986385345459, | |
| "eval_runtime": 14.2817, | |
| "eval_samples_per_second": 8.963, | |
| "eval_steps_per_second": 2.241, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5959652045160097, | |
| "grad_norm": 0.8288143886892615, | |
| "learning_rate": 8.390979333819427e-06, | |
| "loss": 0.7084, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.5996668517490283, | |
| "grad_norm": 0.8153497698516867, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.7151, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.603368498982047, | |
| "grad_norm": 0.8669494090426968, | |
| "learning_rate": 8.13634734890818e-06, | |
| "loss": 0.7269, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.6070701462150657, | |
| "grad_norm": 0.8796270235656223, | |
| "learning_rate": 8.009487965928597e-06, | |
| "loss": 0.7553, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.6107717934480844, | |
| "grad_norm": 0.8475892652162709, | |
| "learning_rate": 7.882961277705897e-06, | |
| "loss": 0.7446, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.6144734406811031, | |
| "grad_norm": 0.7537468893152552, | |
| "learning_rate": 7.756788431947327e-06, | |
| "loss": 0.6972, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.6181750879141218, | |
| "grad_norm": 0.8556469029063232, | |
| "learning_rate": 7.630990517218809e-06, | |
| "loss": 0.737, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 0.6218767351471405, | |
| "grad_norm": 0.844268000072148, | |
| "learning_rate": 7.505588559420188e-06, | |
| "loss": 0.7342, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.6255783823801592, | |
| "grad_norm": 0.9222919394028835, | |
| "learning_rate": 7.380603518270942e-06, | |
| "loss": 0.714, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 0.6292800296131779, | |
| "grad_norm": 0.8847989728686334, | |
| "learning_rate": 7.256056283806987e-06, | |
| "loss": 0.7432, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.6329816768461966, | |
| "grad_norm": 0.8394983300816217, | |
| "learning_rate": 7.131967672889101e-06, | |
| "loss": 0.7492, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 0.6366833240792152, | |
| "grad_norm": 0.8096772768405788, | |
| "learning_rate": 7.008358425723586e-06, | |
| "loss": 0.7348, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.6403849713122339, | |
| "grad_norm": 0.801645629087954, | |
| "learning_rate": 6.885249202395754e-06, | |
| "loss": 0.7447, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 0.6440866185452526, | |
| "grad_norm": 0.8243514079201689, | |
| "learning_rate": 6.762660579416791e-06, | |
| "loss": 0.7353, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.6477882657782713, | |
| "grad_norm": 0.8323727881143189, | |
| "learning_rate": 6.640613046284581e-06, | |
| "loss": 0.7276, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.65148991301129, | |
| "grad_norm": 0.8287297297936415, | |
| "learning_rate": 6.519127002059096e-06, | |
| "loss": 0.7446, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6551915602443087, | |
| "grad_norm": 0.7996423574225279, | |
| "learning_rate": 6.3982227519528986e-06, | |
| "loss": 0.7356, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 0.6588932074773274, | |
| "grad_norm": 0.8318179852048597, | |
| "learning_rate": 6.277920503937303e-06, | |
| "loss": 0.7469, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.6625948547103461, | |
| "grad_norm": 0.8130320177499363, | |
| "learning_rate": 6.158240365364823e-06, | |
| "loss": 0.7145, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 0.6662965019433648, | |
| "grad_norm": 0.7783067507555986, | |
| "learning_rate": 6.039202339608432e-06, | |
| "loss": 0.7262, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6662965019433648, | |
| "eval_loss": 0.7520912885665894, | |
| "eval_runtime": 14.2681, | |
| "eval_samples_per_second": 8.971, | |
| "eval_steps_per_second": 2.243, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6699981491763835, | |
| "grad_norm": 0.8995639130446902, | |
| "learning_rate": 5.920826322718165e-06, | |
| "loss": 0.7664, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 0.6736997964094021, | |
| "grad_norm": 0.8342262543730588, | |
| "learning_rate": 5.80313210009571e-06, | |
| "loss": 0.7186, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.6774014436424208, | |
| "grad_norm": 0.7658273926634057, | |
| "learning_rate": 5.686139343187468e-06, | |
| "loss": 0.6973, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 0.6811030908754395, | |
| "grad_norm": 0.7858008938996333, | |
| "learning_rate": 5.569867606196652e-06, | |
| "loss": 0.7278, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6848047381084582, | |
| "grad_norm": 0.7365636269558681, | |
| "learning_rate": 5.454336322814995e-06, | |
| "loss": 0.7035, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.688506385341477, | |
| "grad_norm": 0.7765336159889873, | |
| "learning_rate": 5.339564802974615e-06, | |
| "loss": 0.7156, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.6922080325744957, | |
| "grad_norm": 0.8109558110462189, | |
| "learning_rate": 5.2255722296205104e-06, | |
| "loss": 0.7155, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 0.6959096798075144, | |
| "grad_norm": 0.8066106385142293, | |
| "learning_rate": 5.112377655504359e-06, | |
| "loss": 0.7258, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6996113270405331, | |
| "grad_norm": 0.8330960608993025, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.7381, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 0.7033129742735518, | |
| "grad_norm": 0.7923011687772826, | |
| "learning_rate": 4.888458045941269e-06, | |
| "loss": 0.718, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.7070146215065705, | |
| "grad_norm": 0.7403061016696155, | |
| "learning_rate": 4.7777704364826175e-06, | |
| "loss": 0.7283, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 0.7107162687395892, | |
| "grad_norm": 0.7502671442178295, | |
| "learning_rate": 4.66795567198309e-06, | |
| "loss": 0.6978, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.7144179159726078, | |
| "grad_norm": 0.8005011603840454, | |
| "learning_rate": 4.559032106914173e-06, | |
| "loss": 0.6945, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 0.7181195632056265, | |
| "grad_norm": 0.8021903626529494, | |
| "learning_rate": 4.4510179467920325e-06, | |
| "loss": 0.7208, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.7218212104386452, | |
| "grad_norm": 0.7496333253248526, | |
| "learning_rate": 4.343931245134616e-06, | |
| "loss": 0.7019, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.7255228576716639, | |
| "grad_norm": 0.7306867866282613, | |
| "learning_rate": 4.237789900444197e-06, | |
| "loss": 0.7509, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.7292245049046826, | |
| "grad_norm": 0.794740513892548, | |
| "learning_rate": 4.132611653215822e-06, | |
| "loss": 0.7209, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 0.7329261521377013, | |
| "grad_norm": 0.8323839400101082, | |
| "learning_rate": 4.028414082972141e-06, | |
| "loss": 0.7169, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.73662779937072, | |
| "grad_norm": 0.7868952327952299, | |
| "learning_rate": 3.925214605325164e-06, | |
| "loss": 0.7325, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 0.7403294466037387, | |
| "grad_norm": 0.8502475718106594, | |
| "learning_rate": 3.823030469065431e-06, | |
| "loss": 0.7241, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7403294466037387, | |
| "eval_loss": 0.7482771873474121, | |
| "eval_runtime": 14.3279, | |
| "eval_samples_per_second": 8.934, | |
| "eval_steps_per_second": 2.233, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7440310938367574, | |
| "grad_norm": 0.7564293493522903, | |
| "learning_rate": 3.7218787532790167e-06, | |
| "loss": 0.7545, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 0.747732741069776, | |
| "grad_norm": 0.8080770614952582, | |
| "learning_rate": 3.6217763644929393e-06, | |
| "loss": 0.67, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.7514343883027947, | |
| "grad_norm": 0.9151222461443353, | |
| "learning_rate": 3.522740033849411e-06, | |
| "loss": 0.7272, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 0.7551360355358134, | |
| "grad_norm": 0.7866669956934906, | |
| "learning_rate": 3.424786314309365e-06, | |
| "loss": 0.7038, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7588376827688321, | |
| "grad_norm": 0.8019128518787991, | |
| "learning_rate": 3.3279315778858034e-06, | |
| "loss": 0.7572, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.7625393300018508, | |
| "grad_norm": 0.7964612775903888, | |
| "learning_rate": 3.2321920129073815e-06, | |
| "loss": 0.7439, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.7662409772348695, | |
| "grad_norm": 0.7639197988273, | |
| "learning_rate": 3.1375836213126653e-06, | |
| "loss": 0.7061, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 0.7699426244678882, | |
| "grad_norm": 0.7942875517461166, | |
| "learning_rate": 3.04412221597558e-06, | |
| "loss": 0.7088, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7736442717009069, | |
| "grad_norm": 0.7540670094250499, | |
| "learning_rate": 2.9518234180624393e-06, | |
| "loss": 0.7006, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 0.7773459189339256, | |
| "grad_norm": 0.7803211907263926, | |
| "learning_rate": 2.8607026544210115e-06, | |
| "loss": 0.712, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7810475661669443, | |
| "grad_norm": 0.8209958734370267, | |
| "learning_rate": 2.770775155002071e-06, | |
| "loss": 0.7188, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 0.784749213399963, | |
| "grad_norm": 0.7881600050355753, | |
| "learning_rate": 2.6820559503138797e-06, | |
| "loss": 0.7395, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7884508606329816, | |
| "grad_norm": 0.765277203264815, | |
| "learning_rate": 2.594559868909956e-06, | |
| "loss": 0.7631, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 0.7921525078660003, | |
| "grad_norm": 0.7941335474151308, | |
| "learning_rate": 2.50830153491064e-06, | |
| "loss": 0.7057, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.795854155099019, | |
| "grad_norm": 0.7729898104805705, | |
| "learning_rate": 2.423295365558821e-06, | |
| "loss": 0.7205, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.7995558023320377, | |
| "grad_norm": 0.8980427433975159, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.7287, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.8032574495650564, | |
| "grad_norm": 0.8104280836983825, | |
| "learning_rate": 2.2570961409586756e-06, | |
| "loss": 0.7159, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 0.8069590967980751, | |
| "grad_norm": 0.8406044981202091, | |
| "learning_rate": 2.1759308642968024e-06, | |
| "loss": 0.7356, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.8106607440310938, | |
| "grad_norm": 0.768755465033822, | |
| "learning_rate": 2.0960733048124082e-06, | |
| "loss": 0.7105, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 0.8143623912641126, | |
| "grad_norm": 0.7210365270405806, | |
| "learning_rate": 2.01753680992107e-06, | |
| "loss": 0.7248, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8143623912641126, | |
| "eval_loss": 0.7455551624298096, | |
| "eval_runtime": 14.2968, | |
| "eval_samples_per_second": 8.953, | |
| "eval_steps_per_second": 2.238, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8180640384971313, | |
| "grad_norm": 0.7924363499963953, | |
| "learning_rate": 1.9403345062352574e-06, | |
| "loss": 0.7131, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 0.82176568573015, | |
| "grad_norm": 0.7868992820086619, | |
| "learning_rate": 1.8644792973703252e-06, | |
| "loss": 0.7348, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.8254673329631687, | |
| "grad_norm": 0.809772262855919, | |
| "learning_rate": 1.7899838617878163e-06, | |
| "loss": 0.6728, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 0.8291689801961873, | |
| "grad_norm": 0.8179198294868835, | |
| "learning_rate": 1.7168606506763696e-06, | |
| "loss": 0.7105, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.832870627429206, | |
| "grad_norm": 0.7920423737698461, | |
| "learning_rate": 1.6451218858706374e-06, | |
| "loss": 0.7077, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 0.8365722746622247, | |
| "grad_norm": 0.775464355534586, | |
| "learning_rate": 1.5747795578085046e-06, | |
| "loss": 0.734, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.8402739218952434, | |
| "grad_norm": 0.8452812565386641, | |
| "learning_rate": 1.505845423527027e-06, | |
| "loss": 0.7525, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 0.8439755691282621, | |
| "grad_norm": 0.7466403819167539, | |
| "learning_rate": 1.4383310046973365e-06, | |
| "loss": 0.7297, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.8476772163612808, | |
| "grad_norm": 0.7351450960338738, | |
| "learning_rate": 1.372247585698916e-06, | |
| "loss": 0.7155, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 0.8513788635942995, | |
| "grad_norm": 0.8877131801895142, | |
| "learning_rate": 1.307606211733522e-06, | |
| "loss": 0.7074, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.8550805108273182, | |
| "grad_norm": 0.7457958546738195, | |
| "learning_rate": 1.2444176869790925e-06, | |
| "loss": 0.6875, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 0.8587821580603369, | |
| "grad_norm": 0.7576965976506596, | |
| "learning_rate": 1.18269257278392e-06, | |
| "loss": 0.7265, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8624838052933556, | |
| "grad_norm": 0.7653232826664952, | |
| "learning_rate": 1.1224411859014417e-06, | |
| "loss": 0.7492, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 0.8661854525263742, | |
| "grad_norm": 0.8267243510852378, | |
| "learning_rate": 1.0636735967658785e-06, | |
| "loss": 0.7016, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.8698870997593929, | |
| "grad_norm": 0.8003633848555943, | |
| "learning_rate": 1.0063996278090704e-06, | |
| "loss": 0.7471, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 0.8735887469924116, | |
| "grad_norm": 0.7409525497221457, | |
| "learning_rate": 9.506288518187468e-07, | |
| "loss": 0.7417, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8772903942254303, | |
| "grad_norm": 0.8295453068439365, | |
| "learning_rate": 8.963705903385344e-07, | |
| "loss": 0.7302, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 0.880992041458449, | |
| "grad_norm": 0.7228829739431897, | |
| "learning_rate": 8.436339121099413e-07, | |
| "loss": 0.6954, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.8846936886914677, | |
| "grad_norm": 0.7661785054153895, | |
| "learning_rate": 7.924276315566171e-07, | |
| "loss": 0.7202, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 0.8883953359244864, | |
| "grad_norm": 0.7378588721148888, | |
| "learning_rate": 7.427603073110967e-07, | |
| "loss": 0.7492, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8883953359244864, | |
| "eval_loss": 0.7441431283950806, | |
| "eval_runtime": 14.2681, | |
| "eval_samples_per_second": 8.971, | |
| "eval_steps_per_second": 2.243, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8920969831575051, | |
| "grad_norm": 0.7694484037001753, | |
| "learning_rate": 6.946402407843156e-07, | |
| "loss": 0.7268, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 0.8957986303905238, | |
| "grad_norm": 0.8126254616112653, | |
| "learning_rate": 6.480754747781037e-07, | |
| "loss": 0.7146, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.8995002776235425, | |
| "grad_norm": 0.7650595723224202, | |
| "learning_rate": 6.030737921409169e-07, | |
| "loss": 0.7182, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 0.9032019248565611, | |
| "grad_norm": 0.7431305102510244, | |
| "learning_rate": 5.596427144670002e-07, | |
| "loss": 0.6765, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.9069035720895798, | |
| "grad_norm": 0.7841618982958003, | |
| "learning_rate": 5.177895008392353e-07, | |
| "loss": 0.7341, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 0.9106052193225985, | |
| "grad_norm": 0.7773777505162962, | |
| "learning_rate": 4.775211466158469e-07, | |
| "loss": 0.7582, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.9143068665556172, | |
| "grad_norm": 0.7946948428274566, | |
| "learning_rate": 4.388443822612043e-07, | |
| "loss": 0.7332, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 0.9180085137886359, | |
| "grad_norm": 0.8151810800637177, | |
| "learning_rate": 4.017656722208807e-07, | |
| "loss": 0.7366, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.9217101610216546, | |
| "grad_norm": 0.9027905284832103, | |
| "learning_rate": 3.662912138411967e-07, | |
| "loss": 0.7396, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 0.9254118082546733, | |
| "grad_norm": 0.725716815368628, | |
| "learning_rate": 3.3242693633337986e-07, | |
| "loss": 0.7226, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.929113455487692, | |
| "grad_norm": 0.7771745605835194, | |
| "learning_rate": 3.001784997825652e-07, | |
| "loss": 0.7033, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 0.9328151027207107, | |
| "grad_norm": 0.7151872727161339, | |
| "learning_rate": 2.6955129420176193e-07, | |
| "loss": 0.7266, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.9365167499537294, | |
| "grad_norm": 0.765823657800921, | |
| "learning_rate": 2.405504386309643e-07, | |
| "loss": 0.7362, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 0.9402183971867482, | |
| "grad_norm": 0.8218718960756801, | |
| "learning_rate": 2.1318078028155886e-07, | |
| "loss": 0.7237, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.9439200444197668, | |
| "grad_norm": 0.8337523698451327, | |
| "learning_rate": 1.874468937261531e-07, | |
| "loss": 0.7303, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 0.9476216916527855, | |
| "grad_norm": 0.7695117064285074, | |
| "learning_rate": 1.6335308013398888e-07, | |
| "loss": 0.7093, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.9513233388858042, | |
| "grad_norm": 0.8218511606318669, | |
| "learning_rate": 1.409033665520354e-07, | |
| "loss": 0.7018, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 0.9550249861188229, | |
| "grad_norm": 0.707438521727017, | |
| "learning_rate": 1.201015052319099e-07, | |
| "loss": 0.7045, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.9587266333518416, | |
| "grad_norm": 0.7380126202252467, | |
| "learning_rate": 1.0095097300273026e-07, | |
| "loss": 0.6796, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 0.9624282805848603, | |
| "grad_norm": 0.7403710844221856, | |
| "learning_rate": 8.345497068998897e-08, | |
| "loss": 0.7355, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9624282805848603, | |
| "eval_loss": 0.7434929609298706, | |
| "eval_runtime": 14.2784, | |
| "eval_samples_per_second": 8.965, | |
| "eval_steps_per_second": 2.241, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.966129927817879, | |
| "grad_norm": 0.7352871953677099, | |
| "learning_rate": 6.761642258056977e-08, | |
| "loss": 0.7367, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 0.9698315750508977, | |
| "grad_norm": 0.7627715822652762, | |
| "learning_rate": 5.3437975933985366e-08, | |
| "loss": 0.7092, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.9735332222839164, | |
| "grad_norm": 0.7854895643723886, | |
| "learning_rate": 4.0922000539906914e-08, | |
| "loss": 0.6743, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 0.9772348695169351, | |
| "grad_norm": 0.7610445137278696, | |
| "learning_rate": 3.0070588322079765e-08, | |
| "loss": 0.7196, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9809365167499537, | |
| "grad_norm": 0.7513309573352382, | |
| "learning_rate": 2.088555298867978e-08, | |
| "loss": 0.7254, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 0.9846381639829724, | |
| "grad_norm": 0.7508563384954318, | |
| "learning_rate": 1.3368429729168075e-08, | |
| "loss": 0.7286, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.9883398112159911, | |
| "grad_norm": 0.7778870969688703, | |
| "learning_rate": 7.520474957699586e-09, | |
| "loss": 0.7453, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 0.9920414584490098, | |
| "grad_norm": 0.8791645230895945, | |
| "learning_rate": 3.3426661031255024e-09, | |
| "loss": 0.7285, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9957431056820285, | |
| "grad_norm": 0.8318565179722118, | |
| "learning_rate": 8.357014456272794e-10, | |
| "loss": 0.6923, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 0.9994447529150472, | |
| "grad_norm": 0.7524398232226177, | |
| "learning_rate": 0.0, | |
| "loss": 0.7212, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.9994447529150472, | |
| "step": 1350, | |
| "total_flos": 76902580617216.0, | |
| "train_loss": 0.7593590865311799, | |
| "train_runtime": 10140.5208, | |
| "train_samples_per_second": 2.131, | |
| "train_steps_per_second": 0.133 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1350, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 76902580617216.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |