| { |
| "best_global_step": 100, |
| "best_metric": 0.01389834564179182, |
| "best_model_checkpoint": "/teamspace/studios/this_studio/DATN/output/medgemma_finetuned/checkpoint-100", |
| "epoch": 2.3346303501945527, |
| "eval_steps": 100, |
| "global_step": 600, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.019455252918287938, |
| "grad_norm": 3.0237326622009277, |
| "learning_rate": 1.777777777777778e-06, |
| "loss": 0.8809, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.038910505836575876, |
| "grad_norm": 2.4512810707092285, |
| "learning_rate": 4e-06, |
| "loss": 0.8569, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.058365758754863814, |
| "grad_norm": 1.5967055559158325, |
| "learning_rate": 6.222222222222222e-06, |
| "loss": 0.7725, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.07782101167315175, |
| "grad_norm": 1.2497001886367798, |
| "learning_rate": 8.444444444444446e-06, |
| "loss": 0.6516, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.09727626459143969, |
| "grad_norm": 1.2455090284347534, |
| "learning_rate": 1.0666666666666667e-05, |
| "loss": 0.5238, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.11673151750972763, |
| "grad_norm": 1.361525535583496, |
| "learning_rate": 1.2888888888888889e-05, |
| "loss": 0.3777, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.13618677042801555, |
| "grad_norm": 1.6556775569915771, |
| "learning_rate": 1.511111111111111e-05, |
| "loss": 0.2116, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.1556420233463035, |
| "grad_norm": 0.6078555583953857, |
| "learning_rate": 1.7333333333333332e-05, |
| "loss": 0.0794, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.17509727626459143, |
| "grad_norm": 0.31556975841522217, |
| "learning_rate": 1.9555555555555557e-05, |
| "loss": 0.0343, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.19455252918287938, |
| "grad_norm": 0.23063282668590546, |
| "learning_rate": 2.177777777777778e-05, |
| "loss": 0.0227, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2140077821011673, |
| "grad_norm": 0.10897089540958405, |
| "learning_rate": 2.4e-05, |
| "loss": 0.0159, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.23346303501945526, |
| "grad_norm": 0.08667729049921036, |
| "learning_rate": 2.6222222222222226e-05, |
| "loss": 0.0155, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2529182879377432, |
| "grad_norm": 0.07056345790624619, |
| "learning_rate": 2.8444444444444447e-05, |
| "loss": 0.0133, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.2723735408560311, |
| "grad_norm": 0.119380883872509, |
| "learning_rate": 3.066666666666666e-05, |
| "loss": 0.0113, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.2918287937743191, |
| "grad_norm": 0.10328345745801926, |
| "learning_rate": 3.288888888888889e-05, |
| "loss": 0.0074, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.311284046692607, |
| "grad_norm": 0.08840714395046234, |
| "learning_rate": 3.511111111111111e-05, |
| "loss": 0.0069, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.33073929961089493, |
| "grad_norm": 0.1998119205236435, |
| "learning_rate": 3.733333333333334e-05, |
| "loss": 0.0069, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.35019455252918286, |
| "grad_norm": 0.08085718750953674, |
| "learning_rate": 3.9555555555555556e-05, |
| "loss": 0.0072, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.36964980544747084, |
| "grad_norm": 0.10597972571849823, |
| "learning_rate": 4.177777777777778e-05, |
| "loss": 0.0072, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.38910505836575876, |
| "grad_norm": 0.04654397815465927, |
| "learning_rate": 4.4e-05, |
| "loss": 0.0065, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.38910505836575876, |
| "eval_loss": 0.01389834564179182, |
| "eval_runtime": 174.9649, |
| "eval_samples_per_second": 2.915, |
| "eval_steps_per_second": 0.732, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.4085603112840467, |
| "grad_norm": 0.08657707273960114, |
| "learning_rate": 4.6222222222222224e-05, |
| "loss": 0.0071, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.4280155642023346, |
| "grad_norm": 0.09846911579370499, |
| "learning_rate": 4.844444444444445e-05, |
| "loss": 0.0069, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.4474708171206226, |
| "grad_norm": 0.07289458811283112, |
| "learning_rate": 5.066666666666667e-05, |
| "loss": 0.0072, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.4669260700389105, |
| "grad_norm": 0.04821142926812172, |
| "learning_rate": 5.288888888888889e-05, |
| "loss": 0.0063, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.48638132295719844, |
| "grad_norm": 0.06011726334691048, |
| "learning_rate": 5.511111111111111e-05, |
| "loss": 0.0068, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.5058365758754864, |
| "grad_norm": 0.05461547151207924, |
| "learning_rate": 5.7333333333333336e-05, |
| "loss": 0.0065, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.5252918287937743, |
| "grad_norm": 0.05819641426205635, |
| "learning_rate": 5.9555555555555554e-05, |
| "loss": 0.0061, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.5447470817120622, |
| "grad_norm": 0.048746656626462936, |
| "learning_rate": 6.177777777777779e-05, |
| "loss": 0.0065, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.5642023346303502, |
| "grad_norm": 0.04917529225349426, |
| "learning_rate": 6.4e-05, |
| "loss": 0.0066, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.5836575875486382, |
| "grad_norm": 0.062472034245729446, |
| "learning_rate": 6.622222222222222e-05, |
| "loss": 0.0067, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.603112840466926, |
| "grad_norm": 0.07727159559726715, |
| "learning_rate": 6.844444444444445e-05, |
| "loss": 0.0068, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.622568093385214, |
| "grad_norm": 0.036719705909490585, |
| "learning_rate": 7.066666666666667e-05, |
| "loss": 0.0062, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.642023346303502, |
| "grad_norm": 0.04453688859939575, |
| "learning_rate": 7.288888888888888e-05, |
| "loss": 0.0064, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.6614785992217899, |
| "grad_norm": 0.06014733761548996, |
| "learning_rate": 7.511111111111111e-05, |
| "loss": 0.0068, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.6809338521400778, |
| "grad_norm": 0.06567176431417465, |
| "learning_rate": 7.733333333333333e-05, |
| "loss": 0.0069, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.7003891050583657, |
| "grad_norm": 0.058225322514772415, |
| "learning_rate": 7.955555555555556e-05, |
| "loss": 0.007, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.7198443579766537, |
| "grad_norm": 0.04187316447496414, |
| "learning_rate": 8.177777777777778e-05, |
| "loss": 0.0065, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.7392996108949417, |
| "grad_norm": 0.03578794747591019, |
| "learning_rate": 8.4e-05, |
| "loss": 0.0061, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.7587548638132295, |
| "grad_norm": 0.0358467772603035, |
| "learning_rate": 8.622222222222223e-05, |
| "loss": 0.0065, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.7782101167315175, |
| "grad_norm": 0.027109306305646896, |
| "learning_rate": 8.844444444444445e-05, |
| "loss": 0.0063, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7782101167315175, |
| "eval_loss": 0.01398194208741188, |
| "eval_runtime": 152.8483, |
| "eval_samples_per_second": 3.337, |
| "eval_steps_per_second": 0.837, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7976653696498055, |
| "grad_norm": 0.031359098851680756, |
| "learning_rate": 9.066666666666667e-05, |
| "loss": 0.0061, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.8171206225680934, |
| "grad_norm": 0.041419435292482376, |
| "learning_rate": 9.288888888888888e-05, |
| "loss": 0.0062, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.8365758754863813, |
| "grad_norm": 0.03226594254374504, |
| "learning_rate": 9.511111111111112e-05, |
| "loss": 0.0059, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.8560311284046692, |
| "grad_norm": 0.07033108919858932, |
| "learning_rate": 9.733333333333333e-05, |
| "loss": 0.0061, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.8754863813229572, |
| "grad_norm": 0.05067949742078781, |
| "learning_rate": 9.955555555555556e-05, |
| "loss": 0.0059, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.8949416342412452, |
| "grad_norm": 0.025263365358114243, |
| "learning_rate": 0.00010177777777777777, |
| "loss": 0.0059, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.914396887159533, |
| "grad_norm": 0.03773004561662674, |
| "learning_rate": 0.00010400000000000001, |
| "loss": 0.0062, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.933852140077821, |
| "grad_norm": 0.03331644833087921, |
| "learning_rate": 0.00010622222222222222, |
| "loss": 0.006, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.953307392996109, |
| "grad_norm": 0.03881971910595894, |
| "learning_rate": 0.00010844444444444444, |
| "loss": 0.0059, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.9727626459143969, |
| "grad_norm": 0.0403914675116539, |
| "learning_rate": 0.00011066666666666668, |
| "loss": 0.0062, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.9922178988326849, |
| "grad_norm": 0.03234079107642174, |
| "learning_rate": 0.0001128888888888889, |
| "loss": 0.0064, |
| "step": 255 |
| }, |
| { |
| "epoch": 1.0116731517509727, |
| "grad_norm": 0.0188930481672287, |
| "learning_rate": 0.00011511111111111112, |
| "loss": 0.0061, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.0311284046692606, |
| "grad_norm": 0.02896421030163765, |
| "learning_rate": 0.00011733333333333333, |
| "loss": 0.0059, |
| "step": 265 |
| }, |
| { |
| "epoch": 1.0505836575875487, |
| "grad_norm": 0.038550637662410736, |
| "learning_rate": 0.00011955555555555557, |
| "loss": 0.0066, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.0700389105058365, |
| "grad_norm": 0.024153664708137512, |
| "learning_rate": 0.00011999797360750958, |
| "loss": 0.0062, |
| "step": 275 |
| }, |
| { |
| "epoch": 1.0894941634241244, |
| "grad_norm": 0.027283893898129463, |
| "learning_rate": 0.00011998974162260325, |
| "loss": 0.006, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.1089494163424125, |
| "grad_norm": 0.035498056560754776, |
| "learning_rate": 0.00011997517826389341, |
| "loss": 0.0064, |
| "step": 285 |
| }, |
| { |
| "epoch": 1.1284046692607004, |
| "grad_norm": 0.021591784432530403, |
| "learning_rate": 0.00011995428506841069, |
| "loss": 0.0061, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.1478599221789882, |
| "grad_norm": 0.016961926594376564, |
| "learning_rate": 0.00011992706424124257, |
| "loss": 0.006, |
| "step": 295 |
| }, |
| { |
| "epoch": 1.1673151750972763, |
| "grad_norm": 0.03020872175693512, |
| "learning_rate": 0.00011989351865530078, |
| "loss": 0.0063, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.1673151750972763, |
| "eval_loss": 0.014270401559770107, |
| "eval_runtime": 152.9753, |
| "eval_samples_per_second": 3.334, |
| "eval_steps_per_second": 0.837, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.1867704280155642, |
| "grad_norm": 0.025152679532766342, |
| "learning_rate": 0.00011985365185101807, |
| "loss": 0.0063, |
| "step": 305 |
| }, |
| { |
| "epoch": 1.206225680933852, |
| "grad_norm": 0.030001649633049965, |
| "learning_rate": 0.00011980746803597448, |
| "loss": 0.0063, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.2256809338521402, |
| "grad_norm": 0.031324416399002075, |
| "learning_rate": 0.0001197549720844533, |
| "loss": 0.0065, |
| "step": 315 |
| }, |
| { |
| "epoch": 1.245136186770428, |
| "grad_norm": 0.030990222468972206, |
| "learning_rate": 0.00011969616953692672, |
| "loss": 0.0066, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.264591439688716, |
| "grad_norm": 0.02283373288810253, |
| "learning_rate": 0.00011963106659947091, |
| "loss": 0.0062, |
| "step": 325 |
| }, |
| { |
| "epoch": 1.2840466926070038, |
| "grad_norm": 0.027848919853568077, |
| "learning_rate": 0.00011955967014311121, |
| "loss": 0.0061, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.3035019455252919, |
| "grad_norm": 0.0324862040579319, |
| "learning_rate": 0.00011948198770309682, |
| "loss": 0.0064, |
| "step": 335 |
| }, |
| { |
| "epoch": 1.3229571984435797, |
| "grad_norm": 0.021669326350092888, |
| "learning_rate": 0.00011939802747810558, |
| "loss": 0.0061, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.3424124513618678, |
| "grad_norm": 0.023976098746061325, |
| "learning_rate": 0.0001193077983293787, |
| "loss": 0.006, |
| "step": 345 |
| }, |
| { |
| "epoch": 1.3618677042801557, |
| "grad_norm": 0.022651424631476402, |
| "learning_rate": 0.00011921130977978545, |
| "loss": 0.0062, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.3813229571984436, |
| "grad_norm": 0.021479440852999687, |
| "learning_rate": 0.0001191085720128182, |
| "loss": 0.0062, |
| "step": 355 |
| }, |
| { |
| "epoch": 1.4007782101167314, |
| "grad_norm": 0.1814209520816803, |
| "learning_rate": 0.00011899959587151756, |
| "loss": 0.0061, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.4202334630350195, |
| "grad_norm": 1.4026191234588623, |
| "learning_rate": 0.00011888439285732813, |
| "loss": 0.0261, |
| "step": 365 |
| }, |
| { |
| "epoch": 1.4396887159533074, |
| "grad_norm": 1.3291703462600708, |
| "learning_rate": 0.00011876297512888443, |
| "loss": 0.0267, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.4591439688715953, |
| "grad_norm": 1.7693599462509155, |
| "learning_rate": 0.00011863535550072783, |
| "loss": 0.0212, |
| "step": 375 |
| }, |
| { |
| "epoch": 1.4785992217898833, |
| "grad_norm": 0.9990978240966797, |
| "learning_rate": 0.00011850154744195403, |
| "loss": 0.0218, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.4980544747081712, |
| "grad_norm": 0.29475995898246765, |
| "learning_rate": 0.0001183615650747915, |
| "loss": 0.0117, |
| "step": 385 |
| }, |
| { |
| "epoch": 1.517509727626459, |
| "grad_norm": 0.04740586131811142, |
| "learning_rate": 0.00011821542317311106, |
| "loss": 0.0084, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.536964980544747, |
| "grad_norm": 0.04320238158106804, |
| "learning_rate": 0.00011806313716086658, |
| "loss": 0.0067, |
| "step": 395 |
| }, |
| { |
| "epoch": 1.556420233463035, |
| "grad_norm": 0.05465610325336456, |
| "learning_rate": 0.00011790472311046715, |
| "loss": 0.0065, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.556420233463035, |
| "eval_loss": 0.07703334093093872, |
| "eval_runtime": 152.4522, |
| "eval_samples_per_second": 3.345, |
| "eval_steps_per_second": 0.84, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.575875486381323, |
| "grad_norm": 0.027148762717843056, |
| "learning_rate": 0.00011774019774108079, |
| "loss": 0.0063, |
| "step": 405 |
| }, |
| { |
| "epoch": 1.595330739299611, |
| "grad_norm": 0.03351860120892525, |
| "learning_rate": 0.00011756957841686985, |
| "loss": 0.0067, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.6147859922178989, |
| "grad_norm": 0.023354800418019295, |
| "learning_rate": 0.00011739288314515842, |
| "loss": 0.0065, |
| "step": 415 |
| }, |
| { |
| "epoch": 1.6342412451361867, |
| "grad_norm": 0.030715681612491608, |
| "learning_rate": 0.00011721013057453183, |
| "loss": 0.0061, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.6536964980544746, |
| "grad_norm": 0.01966220512986183, |
| "learning_rate": 0.0001170213399928684, |
| "loss": 0.0062, |
| "step": 425 |
| }, |
| { |
| "epoch": 1.6731517509727627, |
| "grad_norm": 0.02200680784881115, |
| "learning_rate": 0.00011682653132530386, |
| "loss": 0.0061, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.6926070038910506, |
| "grad_norm": 0.026331638917326927, |
| "learning_rate": 0.00011662572513212835, |
| "loss": 0.0064, |
| "step": 435 |
| }, |
| { |
| "epoch": 1.7120622568093387, |
| "grad_norm": 0.026765212416648865, |
| "learning_rate": 0.00011641894260661656, |
| "loss": 0.006, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.7315175097276265, |
| "grad_norm": 0.019284185022115707, |
| "learning_rate": 0.00011620620557279086, |
| "loss": 0.0059, |
| "step": 445 |
| }, |
| { |
| "epoch": 1.7509727626459144, |
| "grad_norm": 0.1872239112854004, |
| "learning_rate": 0.0001159875364831181, |
| "loss": 0.0063, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.7704280155642023, |
| "grad_norm": 0.027414845302700996, |
| "learning_rate": 0.00011576295841613982, |
| "loss": 0.0064, |
| "step": 455 |
| }, |
| { |
| "epoch": 1.7898832684824901, |
| "grad_norm": 0.03878985717892647, |
| "learning_rate": 0.00011553249507403663, |
| "loss": 0.0063, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.8093385214007782, |
| "grad_norm": 0.023719098418951035, |
| "learning_rate": 0.00011529617078012663, |
| "loss": 0.0061, |
| "step": 465 |
| }, |
| { |
| "epoch": 1.8287937743190663, |
| "grad_norm": 0.03357016295194626, |
| "learning_rate": 0.00011505401047629824, |
| "loss": 0.0065, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.8482490272373542, |
| "grad_norm": 0.019986068829894066, |
| "learning_rate": 0.00011480603972037789, |
| "loss": 0.006, |
| "step": 475 |
| }, |
| { |
| "epoch": 1.867704280155642, |
| "grad_norm": 0.028719456866383553, |
| "learning_rate": 0.00011455228468343263, |
| "loss": 0.0062, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.88715953307393, |
| "grad_norm": 0.023534944280982018, |
| "learning_rate": 0.00011429277214700788, |
| "loss": 0.0061, |
| "step": 485 |
| }, |
| { |
| "epoch": 1.9066147859922178, |
| "grad_norm": 0.022944800555706024, |
| "learning_rate": 0.00011402752950030108, |
| "loss": 0.006, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.9260700389105059, |
| "grad_norm": 0.03216244652867317, |
| "learning_rate": 0.00011375658473727081, |
| "loss": 0.0061, |
| "step": 495 |
| }, |
| { |
| "epoch": 1.9455252918287937, |
| "grad_norm": 0.030108436942100525, |
| "learning_rate": 0.00011347996645368247, |
| "loss": 0.0063, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.9455252918287937, |
| "eval_loss": 0.051376283168792725, |
| "eval_runtime": 152.6704, |
| "eval_samples_per_second": 3.341, |
| "eval_steps_per_second": 0.838, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.9649805447470818, |
| "grad_norm": 0.029736053198575974, |
| "learning_rate": 0.00011319770384409007, |
| "loss": 0.006, |
| "step": 505 |
| }, |
| { |
| "epoch": 1.9844357976653697, |
| "grad_norm": 0.016025669872760773, |
| "learning_rate": 0.00011290982669875517, |
| "loss": 0.0059, |
| "step": 510 |
| }, |
| { |
| "epoch": 2.0038910505836576, |
| "grad_norm": 0.0349261574447155, |
| "learning_rate": 0.00011261636540050267, |
| "loss": 0.0062, |
| "step": 515 |
| }, |
| { |
| "epoch": 2.0233463035019454, |
| "grad_norm": 0.02828766591846943, |
| "learning_rate": 0.00011231735092151423, |
| "loss": 0.006, |
| "step": 520 |
| }, |
| { |
| "epoch": 2.0428015564202333, |
| "grad_norm": 0.015425251796841621, |
| "learning_rate": 0.00011201281482005949, |
| "loss": 0.0059, |
| "step": 525 |
| }, |
| { |
| "epoch": 2.062256809338521, |
| "grad_norm": 0.018080057576298714, |
| "learning_rate": 0.00011170278923716524, |
| "loss": 0.0059, |
| "step": 530 |
| }, |
| { |
| "epoch": 2.0817120622568095, |
| "grad_norm": 0.027067864313721657, |
| "learning_rate": 0.00011138730689322337, |
| "loss": 0.0058, |
| "step": 535 |
| }, |
| { |
| "epoch": 2.1011673151750974, |
| "grad_norm": 0.023079361766576767, |
| "learning_rate": 0.00011106640108453743, |
| "loss": 0.0063, |
| "step": 540 |
| }, |
| { |
| "epoch": 2.1206225680933852, |
| "grad_norm": 0.019412392750382423, |
| "learning_rate": 0.00011074010567980857, |
| "loss": 0.0058, |
| "step": 545 |
| }, |
| { |
| "epoch": 2.140077821011673, |
| "grad_norm": 0.017525188624858856, |
| "learning_rate": 0.00011040845511656096, |
| "loss": 0.0058, |
| "step": 550 |
| }, |
| { |
| "epoch": 2.159533073929961, |
| "grad_norm": 0.023526394739747047, |
| "learning_rate": 0.00011007148439750726, |
| "loss": 0.0061, |
| "step": 555 |
| }, |
| { |
| "epoch": 2.178988326848249, |
| "grad_norm": 0.01878705434501171, |
| "learning_rate": 0.00010972922908685439, |
| "loss": 0.0059, |
| "step": 560 |
| }, |
| { |
| "epoch": 2.198443579766537, |
| "grad_norm": 0.016807595267891884, |
| "learning_rate": 0.00010938172530655, |
| "loss": 0.0058, |
| "step": 565 |
| }, |
| { |
| "epoch": 2.217898832684825, |
| "grad_norm": 0.013794473372399807, |
| "learning_rate": 0.00010902900973247018, |
| "loss": 0.0058, |
| "step": 570 |
| }, |
| { |
| "epoch": 2.237354085603113, |
| "grad_norm": 0.018339036032557487, |
| "learning_rate": 0.0001086711195905487, |
| "loss": 0.0061, |
| "step": 575 |
| }, |
| { |
| "epoch": 2.2568093385214008, |
| "grad_norm": 0.0149207953363657, |
| "learning_rate": 0.00010830809265284802, |
| "loss": 0.0062, |
| "step": 580 |
| }, |
| { |
| "epoch": 2.2762645914396886, |
| "grad_norm": 0.01599634811282158, |
| "learning_rate": 0.00010793996723357291, |
| "loss": 0.0057, |
| "step": 585 |
| }, |
| { |
| "epoch": 2.2957198443579765, |
| "grad_norm": 0.027164485305547714, |
| "learning_rate": 0.00010756678218502662, |
| "loss": 0.0061, |
| "step": 590 |
| }, |
| { |
| "epoch": 2.3151750972762644, |
| "grad_norm": 0.03339695930480957, |
| "learning_rate": 0.0001071885768935105, |
| "loss": 0.0064, |
| "step": 595 |
| }, |
| { |
| "epoch": 2.3346303501945527, |
| "grad_norm": 0.017755091190338135, |
| "learning_rate": 0.00010680539127516707, |
| "loss": 0.006, |
| "step": 600 |
| }, |
| { |
| "epoch": 2.3346303501945527, |
| "eval_loss": 0.04756447672843933, |
| "eval_runtime": 152.6843, |
| "eval_samples_per_second": 3.34, |
| "eval_steps_per_second": 0.838, |
| "step": 600 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1799, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 7, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "EarlyStoppingCallback": { |
| "args": { |
| "early_stopping_patience": 30, |
| "early_stopping_threshold": 0.001 |
| }, |
| "attributes": { |
| "early_stopping_patience_counter": 5 |
| } |
| }, |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 4.0851838292772096e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|