| { | |
| "best_metric": 0.9483693001512903, | |
| "best_model_checkpoint": "deit-base-patch16-224-finetuned-stroke-binary/checkpoint-1700", | |
| "epoch": 10.496124031007753, | |
| "eval_steps": 100, | |
| "global_step": 1700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.31007751937984496, | |
| "grad_norm": 1.1434299945831299, | |
| "learning_rate": 1.29366106080207e-06, | |
| "loss": 0.1628, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "grad_norm": 0.9698119163513184, | |
| "learning_rate": 2.58732212160414e-06, | |
| "loss": 0.1646, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "eval_accuracy": 0.9430122116689281, | |
| "eval_f1": 0.9424717047287801, | |
| "eval_loss": 0.15877728164196014, | |
| "eval_precision": 0.9442262324920166, | |
| "eval_recall": 0.9430122116689281, | |
| "eval_runtime": 9.3645, | |
| "eval_samples_per_second": 236.103, | |
| "eval_steps_per_second": 29.58, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 1.4945017099380493, | |
| "learning_rate": 3.88098318240621e-06, | |
| "loss": 0.1355, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.235658914728682, | |
| "grad_norm": 1.1095311641693115, | |
| "learning_rate": 5.17464424320828e-06, | |
| "loss": 0.1417, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.235658914728682, | |
| "eval_accuracy": 0.9439167797376753, | |
| "eval_f1": 0.9432839027033096, | |
| "eval_loss": 0.16397197544574738, | |
| "eval_precision": 0.9457751540323792, | |
| "eval_recall": 0.9439167797376753, | |
| "eval_runtime": 9.5146, | |
| "eval_samples_per_second": 232.38, | |
| "eval_steps_per_second": 29.113, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5457364341085271, | |
| "grad_norm": 2.9033849239349365, | |
| "learning_rate": 6.468305304010349e-06, | |
| "loss": 0.1576, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.8558139534883722, | |
| "grad_norm": 0.8375345468521118, | |
| "learning_rate": 7.76196636481242e-06, | |
| "loss": 0.1681, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.8558139534883722, | |
| "eval_accuracy": 0.945273631840796, | |
| "eval_f1": 0.9446810798244, | |
| "eval_loss": 0.16215302050113678, | |
| "eval_precision": 0.9469984434420068, | |
| "eval_recall": 0.945273631840796, | |
| "eval_runtime": 9.4184, | |
| "eval_samples_per_second": 234.752, | |
| "eval_steps_per_second": 29.41, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.1612403100775195, | |
| "grad_norm": 1.2844864130020142, | |
| "learning_rate": 9.055627425614489e-06, | |
| "loss": 0.1487, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.471317829457364, | |
| "grad_norm": 2.3872156143188477, | |
| "learning_rate": 1.034928848641656e-05, | |
| "loss": 0.1512, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.471317829457364, | |
| "eval_accuracy": 0.9434644957033017, | |
| "eval_f1": 0.9430495459530136, | |
| "eval_loss": 0.15099050104618073, | |
| "eval_precision": 0.9440827208056073, | |
| "eval_recall": 0.9434644957033017, | |
| "eval_runtime": 9.4764, | |
| "eval_samples_per_second": 233.316, | |
| "eval_steps_per_second": 29.23, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.781395348837209, | |
| "grad_norm": 1.7295209169387817, | |
| "learning_rate": 1.164294954721863e-05, | |
| "loss": 0.161, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.0868217054263565, | |
| "grad_norm": 3.4684560298919678, | |
| "learning_rate": 1.2936610608020698e-05, | |
| "loss": 0.1506, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.0868217054263565, | |
| "eval_accuracy": 0.9339665309814563, | |
| "eval_f1": 0.9326951378602786, | |
| "eval_loss": 0.19129638373851776, | |
| "eval_precision": 0.9391386500820929, | |
| "eval_recall": 0.9339665309814563, | |
| "eval_runtime": 9.3995, | |
| "eval_samples_per_second": 235.226, | |
| "eval_steps_per_second": 29.47, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.3968992248062015, | |
| "grad_norm": 2.042165517807007, | |
| "learning_rate": 1.423027166882277e-05, | |
| "loss": 0.1263, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.7069767441860466, | |
| "grad_norm": 0.8381805419921875, | |
| "learning_rate": 1.552393272962484e-05, | |
| "loss": 0.1654, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.7069767441860466, | |
| "eval_accuracy": 0.9425599276345545, | |
| "eval_f1": 0.9419379928735436, | |
| "eval_loss": 0.1679351031780243, | |
| "eval_precision": 0.9442161451256601, | |
| "eval_recall": 0.9425599276345545, | |
| "eval_runtime": 9.4346, | |
| "eval_samples_per_second": 234.351, | |
| "eval_steps_per_second": 29.36, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.0124031007751935, | |
| "grad_norm": 2.1265010833740234, | |
| "learning_rate": 1.6817593790426908e-05, | |
| "loss": 0.1507, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 4.322480620155039, | |
| "grad_norm": 2.001044511795044, | |
| "learning_rate": 1.8111254851228977e-05, | |
| "loss": 0.1482, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.322480620155039, | |
| "eval_accuracy": 0.9402985074626866, | |
| "eval_f1": 0.9402037869673187, | |
| "eval_loss": 0.1550845354795456, | |
| "eval_precision": 0.9401885824772098, | |
| "eval_recall": 0.9402985074626866, | |
| "eval_runtime": 9.4957, | |
| "eval_samples_per_second": 232.841, | |
| "eval_steps_per_second": 29.171, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.632558139534884, | |
| "grad_norm": 0.40356162190437317, | |
| "learning_rate": 1.940491591203105e-05, | |
| "loss": 0.1451, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.942635658914728, | |
| "grad_norm": 0.9926322102546692, | |
| "learning_rate": 1.999925630026586e-05, | |
| "loss": 0.1599, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.942635658914728, | |
| "eval_accuracy": 0.9461781999095432, | |
| "eval_f1": 0.9457377367508388, | |
| "eval_loss": 0.14886853098869324, | |
| "eval_precision": 0.9470663350522793, | |
| "eval_recall": 0.9461781999095432, | |
| "eval_runtime": 9.4883, | |
| "eval_samples_per_second": 233.025, | |
| "eval_steps_per_second": 29.194, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 5.248062015503876, | |
| "grad_norm": 1.0929352045059204, | |
| "learning_rate": 1.9993951980962474e-05, | |
| "loss": 0.1598, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 5.558139534883721, | |
| "grad_norm": 1.2597020864486694, | |
| "learning_rate": 1.9983550078926357e-05, | |
| "loss": 0.1477, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.558139534883721, | |
| "eval_accuracy": 0.9425599276345545, | |
| "eval_f1": 0.9423650667942064, | |
| "eval_loss": 0.14367012679576874, | |
| "eval_precision": 0.942514608438234, | |
| "eval_recall": 0.9425599276345545, | |
| "eval_runtime": 9.4876, | |
| "eval_samples_per_second": 233.041, | |
| "eval_steps_per_second": 29.196, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.868217054263566, | |
| "grad_norm": 1.0700503587722778, | |
| "learning_rate": 1.9968055899822005e-05, | |
| "loss": 0.1432, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 6.173643410852713, | |
| "grad_norm": 2.013948440551758, | |
| "learning_rate": 1.9947477346715192e-05, | |
| "loss": 0.1308, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.173643410852713, | |
| "eval_accuracy": 0.9416553595658074, | |
| "eval_f1": 0.9414428935954616, | |
| "eval_loss": 0.15271881222724915, | |
| "eval_precision": 0.9416239036557209, | |
| "eval_recall": 0.9416553595658074, | |
| "eval_runtime": 9.4259, | |
| "eval_samples_per_second": 234.566, | |
| "eval_steps_per_second": 29.387, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.4837209302325585, | |
| "grad_norm": 2.9245336055755615, | |
| "learning_rate": 1.9921824916041882e-05, | |
| "loss": 0.1355, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 6.793798449612403, | |
| "grad_norm": 2.073958396911621, | |
| "learning_rate": 1.9891111692254346e-05, | |
| "loss": 0.1362, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 6.793798449612403, | |
| "eval_accuracy": 0.9425599276345545, | |
| "eval_f1": 0.9421222945979488, | |
| "eval_loss": 0.16084641218185425, | |
| "eval_precision": 0.943232512538474, | |
| "eval_recall": 0.9425599276345545, | |
| "eval_runtime": 9.4085, | |
| "eval_samples_per_second": 235.0, | |
| "eval_steps_per_second": 29.441, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 7.09922480620155, | |
| "grad_norm": 3.3903515338897705, | |
| "learning_rate": 1.98553533411472e-05, | |
| "loss": 0.1437, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 7.409302325581395, | |
| "grad_norm": 0.6468881964683533, | |
| "learning_rate": 1.9814568101866843e-05, | |
| "loss": 0.1494, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 7.409302325581395, | |
| "eval_accuracy": 0.9434644957033017, | |
| "eval_f1": 0.9428523551904958, | |
| "eval_loss": 0.16010308265686035, | |
| "eval_precision": 0.9451435778977758, | |
| "eval_recall": 0.9434644957033017, | |
| "eval_runtime": 9.4239, | |
| "eval_samples_per_second": 234.617, | |
| "eval_steps_per_second": 29.394, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 7.7193798449612405, | |
| "grad_norm": 1.8461512327194214, | |
| "learning_rate": 1.9768776777608227e-05, | |
| "loss": 0.1437, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 8.024806201550387, | |
| "grad_norm": 1.100502371788025, | |
| "learning_rate": 1.971800272500388e-05, | |
| "loss": 0.1592, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 8.024806201550387, | |
| "eval_accuracy": 0.9430122116689281, | |
| "eval_f1": 0.9428538886036008, | |
| "eval_loss": 0.1429978460073471, | |
| "eval_precision": 0.9429310401027086, | |
| "eval_recall": 0.9430122116689281, | |
| "eval_runtime": 9.449, | |
| "eval_samples_per_second": 233.992, | |
| "eval_steps_per_second": 29.315, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 8.334883720930232, | |
| "grad_norm": 4.64835786819458, | |
| "learning_rate": 1.9662271842210433e-05, | |
| "loss": 0.131, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 8.644961240310078, | |
| "grad_norm": 1.8132234811782837, | |
| "learning_rate": 1.960161255569886e-05, | |
| "loss": 0.16, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 8.644961240310078, | |
| "eval_accuracy": 0.9457259158751696, | |
| "eval_f1": 0.9451300205802559, | |
| "eval_loss": 0.1504276692867279, | |
| "eval_precision": 0.9475179562129199, | |
| "eval_recall": 0.9457259158751696, | |
| "eval_runtime": 9.4602, | |
| "eval_samples_per_second": 233.715, | |
| "eval_steps_per_second": 29.28, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 8.955038759689922, | |
| "grad_norm": 1.1923645734786987, | |
| "learning_rate": 1.9536055805755044e-05, | |
| "loss": 0.1105, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 9.26046511627907, | |
| "grad_norm": 2.665161609649658, | |
| "learning_rate": 1.9465635030698203e-05, | |
| "loss": 0.1245, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 9.26046511627907, | |
| "eval_accuracy": 0.9461781999095432, | |
| "eval_f1": 0.9457529907245916, | |
| "eval_loss": 0.1505969762802124, | |
| "eval_precision": 0.9469878940244384, | |
| "eval_recall": 0.9461781999095432, | |
| "eval_runtime": 9.4828, | |
| "eval_samples_per_second": 233.159, | |
| "eval_steps_per_second": 29.211, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 9.570542635658915, | |
| "grad_norm": 0.45807984471321106, | |
| "learning_rate": 1.939038614982509e-05, | |
| "loss": 0.1342, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 9.88062015503876, | |
| "grad_norm": 1.0274362564086914, | |
| "learning_rate": 1.9310347545088764e-05, | |
| "loss": 0.1397, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 9.88062015503876, | |
| "eval_accuracy": 0.9312528267752148, | |
| "eval_f1": 0.9299781453592552, | |
| "eval_loss": 0.19713716208934784, | |
| "eval_precision": 0.9359197905804808, | |
| "eval_recall": 0.9312528267752148, | |
| "eval_runtime": 9.4563, | |
| "eval_samples_per_second": 233.811, | |
| "eval_steps_per_second": 29.293, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 10.186046511627907, | |
| "grad_norm": 2.8293967247009277, | |
| "learning_rate": 1.9225560041521225e-05, | |
| "loss": 0.1615, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 10.496124031007753, | |
| "grad_norm": 0.916153609752655, | |
| "learning_rate": 1.913606688640993e-05, | |
| "loss": 0.1396, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 10.496124031007753, | |
| "eval_accuracy": 0.9488919041157847, | |
| "eval_f1": 0.9483693001512903, | |
| "eval_loss": 0.15268893539905548, | |
| "eval_precision": 0.9504887785210339, | |
| "eval_recall": 0.9488919041157847, | |
| "eval_runtime": 9.4488, | |
| "eval_samples_per_second": 233.998, | |
| "eval_steps_per_second": 29.316, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 10.496124031007753, | |
| "step": 1700, | |
| "total_flos": 5.998049534242161e+18, | |
| "train_loss": 0.14669023766237146, | |
| "train_runtime": 774.0157, | |
| "train_samples_per_second": 319.869, | |
| "train_steps_per_second": 9.984 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 7728, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 48, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.998049534242161e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |