| { |
| "best_global_step": 100, |
| "best_metric": 0.01389834564179182, |
| "best_model_checkpoint": "/teamspace/studios/this_studio/DATN/output/medgemma_finetuned/checkpoint-100", |
| "epoch": 1.1673151750972763, |
| "eval_steps": 100, |
| "global_step": 300, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.019455252918287938, |
| "grad_norm": 3.0237326622009277, |
| "learning_rate": 1.777777777777778e-06, |
| "loss": 0.8809, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.038910505836575876, |
| "grad_norm": 2.4512810707092285, |
| "learning_rate": 4e-06, |
| "loss": 0.8569, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.058365758754863814, |
| "grad_norm": 1.5967055559158325, |
| "learning_rate": 6.222222222222222e-06, |
| "loss": 0.7725, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.07782101167315175, |
| "grad_norm": 1.2497001886367798, |
| "learning_rate": 8.444444444444446e-06, |
| "loss": 0.6516, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.09727626459143969, |
| "grad_norm": 1.2455090284347534, |
| "learning_rate": 1.0666666666666667e-05, |
| "loss": 0.5238, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.11673151750972763, |
| "grad_norm": 1.361525535583496, |
| "learning_rate": 1.2888888888888889e-05, |
| "loss": 0.3777, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.13618677042801555, |
| "grad_norm": 1.6556775569915771, |
| "learning_rate": 1.511111111111111e-05, |
| "loss": 0.2116, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.1556420233463035, |
| "grad_norm": 0.6078555583953857, |
| "learning_rate": 1.7333333333333332e-05, |
| "loss": 0.0794, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.17509727626459143, |
| "grad_norm": 0.31556975841522217, |
| "learning_rate": 1.9555555555555557e-05, |
| "loss": 0.0343, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.19455252918287938, |
| "grad_norm": 0.23063282668590546, |
| "learning_rate": 2.177777777777778e-05, |
| "loss": 0.0227, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2140077821011673, |
| "grad_norm": 0.10897089540958405, |
| "learning_rate": 2.4e-05, |
| "loss": 0.0159, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.23346303501945526, |
| "grad_norm": 0.08667729049921036, |
| "learning_rate": 2.6222222222222226e-05, |
| "loss": 0.0155, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2529182879377432, |
| "grad_norm": 0.07056345790624619, |
| "learning_rate": 2.8444444444444447e-05, |
| "loss": 0.0133, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.2723735408560311, |
| "grad_norm": 0.119380883872509, |
| "learning_rate": 3.066666666666666e-05, |
| "loss": 0.0113, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.2918287937743191, |
| "grad_norm": 0.10328345745801926, |
| "learning_rate": 3.288888888888889e-05, |
| "loss": 0.0074, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.311284046692607, |
| "grad_norm": 0.08840714395046234, |
| "learning_rate": 3.511111111111111e-05, |
| "loss": 0.0069, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.33073929961089493, |
| "grad_norm": 0.1998119205236435, |
| "learning_rate": 3.733333333333334e-05, |
| "loss": 0.0069, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.35019455252918286, |
| "grad_norm": 0.08085718750953674, |
| "learning_rate": 3.9555555555555556e-05, |
| "loss": 0.0072, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.36964980544747084, |
| "grad_norm": 0.10597972571849823, |
| "learning_rate": 4.177777777777778e-05, |
| "loss": 0.0072, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.38910505836575876, |
| "grad_norm": 0.04654397815465927, |
| "learning_rate": 4.4e-05, |
| "loss": 0.0065, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.38910505836575876, |
| "eval_loss": 0.01389834564179182, |
| "eval_runtime": 174.9649, |
| "eval_samples_per_second": 2.915, |
| "eval_steps_per_second": 0.732, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.4085603112840467, |
| "grad_norm": 0.08657707273960114, |
| "learning_rate": 4.6222222222222224e-05, |
| "loss": 0.0071, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.4280155642023346, |
| "grad_norm": 0.09846911579370499, |
| "learning_rate": 4.844444444444445e-05, |
| "loss": 0.0069, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.4474708171206226, |
| "grad_norm": 0.07289458811283112, |
| "learning_rate": 5.066666666666667e-05, |
| "loss": 0.0072, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.4669260700389105, |
| "grad_norm": 0.04821142926812172, |
| "learning_rate": 5.288888888888889e-05, |
| "loss": 0.0063, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.48638132295719844, |
| "grad_norm": 0.06011726334691048, |
| "learning_rate": 5.511111111111111e-05, |
| "loss": 0.0068, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.5058365758754864, |
| "grad_norm": 0.05461547151207924, |
| "learning_rate": 5.7333333333333336e-05, |
| "loss": 0.0065, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.5252918287937743, |
| "grad_norm": 0.05819641426205635, |
| "learning_rate": 5.9555555555555554e-05, |
| "loss": 0.0061, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.5447470817120622, |
| "grad_norm": 0.048746656626462936, |
| "learning_rate": 6.177777777777779e-05, |
| "loss": 0.0065, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.5642023346303502, |
| "grad_norm": 0.04917529225349426, |
| "learning_rate": 6.4e-05, |
| "loss": 0.0066, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.5836575875486382, |
| "grad_norm": 0.062472034245729446, |
| "learning_rate": 6.622222222222222e-05, |
| "loss": 0.0067, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.603112840466926, |
| "grad_norm": 0.07727159559726715, |
| "learning_rate": 6.844444444444445e-05, |
| "loss": 0.0068, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.622568093385214, |
| "grad_norm": 0.036719705909490585, |
| "learning_rate": 7.066666666666667e-05, |
| "loss": 0.0062, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.642023346303502, |
| "grad_norm": 0.04453688859939575, |
| "learning_rate": 7.288888888888888e-05, |
| "loss": 0.0064, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.6614785992217899, |
| "grad_norm": 0.06014733761548996, |
| "learning_rate": 7.511111111111111e-05, |
| "loss": 0.0068, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.6809338521400778, |
| "grad_norm": 0.06567176431417465, |
| "learning_rate": 7.733333333333333e-05, |
| "loss": 0.0069, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.7003891050583657, |
| "grad_norm": 0.058225322514772415, |
| "learning_rate": 7.955555555555556e-05, |
| "loss": 0.007, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.7198443579766537, |
| "grad_norm": 0.04187316447496414, |
| "learning_rate": 8.177777777777778e-05, |
| "loss": 0.0065, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.7392996108949417, |
| "grad_norm": 0.03578794747591019, |
| "learning_rate": 8.4e-05, |
| "loss": 0.0061, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.7587548638132295, |
| "grad_norm": 0.0358467772603035, |
| "learning_rate": 8.622222222222223e-05, |
| "loss": 0.0065, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.7782101167315175, |
| "grad_norm": 0.027109306305646896, |
| "learning_rate": 8.844444444444445e-05, |
| "loss": 0.0063, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7782101167315175, |
| "eval_loss": 0.01398194208741188, |
| "eval_runtime": 152.8483, |
| "eval_samples_per_second": 3.337, |
| "eval_steps_per_second": 0.837, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7976653696498055, |
| "grad_norm": 0.031359098851680756, |
| "learning_rate": 9.066666666666667e-05, |
| "loss": 0.0061, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.8171206225680934, |
| "grad_norm": 0.041419435292482376, |
| "learning_rate": 9.288888888888888e-05, |
| "loss": 0.0062, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.8365758754863813, |
| "grad_norm": 0.03226594254374504, |
| "learning_rate": 9.511111111111112e-05, |
| "loss": 0.0059, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.8560311284046692, |
| "grad_norm": 0.07033108919858932, |
| "learning_rate": 9.733333333333333e-05, |
| "loss": 0.0061, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.8754863813229572, |
| "grad_norm": 0.05067949742078781, |
| "learning_rate": 9.955555555555556e-05, |
| "loss": 0.0059, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.8949416342412452, |
| "grad_norm": 0.025263365358114243, |
| "learning_rate": 0.00010177777777777777, |
| "loss": 0.0059, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.914396887159533, |
| "grad_norm": 0.03773004561662674, |
| "learning_rate": 0.00010400000000000001, |
| "loss": 0.0062, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.933852140077821, |
| "grad_norm": 0.03331644833087921, |
| "learning_rate": 0.00010622222222222222, |
| "loss": 0.006, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.953307392996109, |
| "grad_norm": 0.03881971910595894, |
| "learning_rate": 0.00010844444444444444, |
| "loss": 0.0059, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.9727626459143969, |
| "grad_norm": 0.0403914675116539, |
| "learning_rate": 0.00011066666666666668, |
| "loss": 0.0062, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.9922178988326849, |
| "grad_norm": 0.03234079107642174, |
| "learning_rate": 0.0001128888888888889, |
| "loss": 0.0064, |
| "step": 255 |
| }, |
| { |
| "epoch": 1.0116731517509727, |
| "grad_norm": 0.0188930481672287, |
| "learning_rate": 0.00011511111111111112, |
| "loss": 0.0061, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.0311284046692606, |
| "grad_norm": 0.02896421030163765, |
| "learning_rate": 0.00011733333333333333, |
| "loss": 0.0059, |
| "step": 265 |
| }, |
| { |
| "epoch": 1.0505836575875487, |
| "grad_norm": 0.038550637662410736, |
| "learning_rate": 0.00011955555555555557, |
| "loss": 0.0066, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.0700389105058365, |
| "grad_norm": 0.024153664708137512, |
| "learning_rate": 0.00011999797360750958, |
| "loss": 0.0062, |
| "step": 275 |
| }, |
| { |
| "epoch": 1.0894941634241244, |
| "grad_norm": 0.027283893898129463, |
| "learning_rate": 0.00011998974162260325, |
| "loss": 0.006, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.1089494163424125, |
| "grad_norm": 0.035498056560754776, |
| "learning_rate": 0.00011997517826389341, |
| "loss": 0.0064, |
| "step": 285 |
| }, |
| { |
| "epoch": 1.1284046692607004, |
| "grad_norm": 0.021591784432530403, |
| "learning_rate": 0.00011995428506841069, |
| "loss": 0.0061, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.1478599221789882, |
| "grad_norm": 0.016961926594376564, |
| "learning_rate": 0.00011992706424124257, |
| "loss": 0.006, |
| "step": 295 |
| }, |
| { |
| "epoch": 1.1673151750972763, |
| "grad_norm": 0.03020872175693512, |
| "learning_rate": 0.00011989351865530078, |
| "loss": 0.0063, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.1673151750972763, |
| "eval_loss": 0.014270401559770107, |
| "eval_runtime": 152.9753, |
| "eval_samples_per_second": 3.334, |
| "eval_steps_per_second": 0.837, |
| "step": 300 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1799, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 7, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "EarlyStoppingCallback": { |
| "args": { |
| "early_stopping_patience": 30, |
| "early_stopping_threshold": 0.001 |
| }, |
| "attributes": { |
| "early_stopping_patience_counter": 2 |
| } |
| }, |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.0425665990873542e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|