| { | |
| "best_global_step": 524, | |
| "best_metric": 1.0, | |
| "best_model_checkpoint": "wav2vec2_frog_classifier_sew_d/checkpoint-524", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 524, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07633587786259542, | |
| "grad_norm": 11.720809936523438, | |
| "learning_rate": 2.2900763358778629e-07, | |
| "loss": 0.7534, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15267175572519084, | |
| "grad_norm": 7.877044200897217, | |
| "learning_rate": 4.834605597964377e-07, | |
| "loss": 0.6457, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.22900763358778625, | |
| "grad_norm": 6.020662784576416, | |
| "learning_rate": 7.37913486005089e-07, | |
| "loss": 0.5034, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3053435114503817, | |
| "grad_norm": 11.8593111038208, | |
| "learning_rate": 9.923664122137404e-07, | |
| "loss": 0.3672, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "grad_norm": 19.238187789916992, | |
| "learning_rate": 1.246819338422392e-06, | |
| "loss": 0.3239, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4580152671755725, | |
| "grad_norm": 29.412681579589844, | |
| "learning_rate": 1.5012722646310435e-06, | |
| "loss": 0.4459, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5343511450381679, | |
| "grad_norm": 23.955923080444336, | |
| "learning_rate": 1.7557251908396948e-06, | |
| "loss": 0.3843, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6106870229007634, | |
| "grad_norm": 33.24127960205078, | |
| "learning_rate": 2.010178117048346e-06, | |
| "loss": 0.1934, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6870229007633588, | |
| "grad_norm": 8.673649787902832, | |
| "learning_rate": 2.2646310432569978e-06, | |
| "loss": 0.2595, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "grad_norm": 1.9229257106781006, | |
| "learning_rate": 2.5190839694656487e-06, | |
| "loss": 0.2245, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8396946564885496, | |
| "grad_norm": 13.961572647094727, | |
| "learning_rate": 2.7735368956743004e-06, | |
| "loss": 0.2427, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.916030534351145, | |
| "grad_norm": 5.140026092529297, | |
| "learning_rate": 3.0279898218829517e-06, | |
| "loss": 0.1903, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9923664122137404, | |
| "grad_norm": 0.7627769112586975, | |
| "learning_rate": 3.2824427480916034e-06, | |
| "loss": 0.1594, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.9463087248322147, | |
| "eval_fbeta": 0.9778085139994803, | |
| "eval_loss": 0.08893176913261414, | |
| "eval_precision": 1.0, | |
| "eval_recall": 0.8980891719745223, | |
| "eval_runtime": 13.2655, | |
| "eval_samples_per_second": 67.468, | |
| "eval_steps_per_second": 8.443, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.0687022900763359, | |
| "grad_norm": 0.26965653896331787, | |
| "learning_rate": 3.5368956743002547e-06, | |
| "loss": 0.0149, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1450381679389312, | |
| "grad_norm": 0.1886816918849945, | |
| "learning_rate": 3.791348600508906e-06, | |
| "loss": 0.1284, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2213740458015268, | |
| "grad_norm": 36.349491119384766, | |
| "learning_rate": 4.045801526717557e-06, | |
| "loss": 0.1203, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.297709923664122, | |
| "grad_norm": 0.12028646469116211, | |
| "learning_rate": 4.300254452926209e-06, | |
| "loss": 0.0069, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.3740458015267176, | |
| "grad_norm": 0.1057005226612091, | |
| "learning_rate": 4.554707379134861e-06, | |
| "loss": 0.0351, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.450381679389313, | |
| "grad_norm": 0.13390018045902252, | |
| "learning_rate": 4.8091603053435125e-06, | |
| "loss": 0.0688, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.5267175572519083, | |
| "grad_norm": 188.4539031982422, | |
| "learning_rate": 5.063613231552163e-06, | |
| "loss": 0.1442, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.6030534351145038, | |
| "grad_norm": 0.15123459696769714, | |
| "learning_rate": 5.318066157760815e-06, | |
| "loss": 0.0292, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.6793893129770994, | |
| "grad_norm": 0.10352899134159088, | |
| "learning_rate": 5.572519083969467e-06, | |
| "loss": 0.0328, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.7557251908396947, | |
| "grad_norm": 0.2473313808441162, | |
| "learning_rate": 5.826972010178118e-06, | |
| "loss": 0.0049, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.83206106870229, | |
| "grad_norm": 0.10870244354009628, | |
| "learning_rate": 6.081424936386769e-06, | |
| "loss": 0.0361, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.9083969465648853, | |
| "grad_norm": 0.0995243638753891, | |
| "learning_rate": 6.335877862595419e-06, | |
| "loss": 0.0387, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.984732824427481, | |
| "grad_norm": 0.07525702565908432, | |
| "learning_rate": 6.590330788804071e-06, | |
| "loss": 0.0037, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.9968253968253968, | |
| "eval_fbeta": 0.9949302118129412, | |
| "eval_loss": 0.00947254616767168, | |
| "eval_precision": 0.9936708860759493, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 13.3328, | |
| "eval_samples_per_second": 67.128, | |
| "eval_steps_per_second": 8.4, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 2.0610687022900764, | |
| "grad_norm": 0.06131064146757126, | |
| "learning_rate": 6.844783715012723e-06, | |
| "loss": 0.0032, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.1374045801526718, | |
| "grad_norm": 0.05664181336760521, | |
| "learning_rate": 7.0992366412213746e-06, | |
| "loss": 0.0029, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.213740458015267, | |
| "grad_norm": 0.051850203424692154, | |
| "learning_rate": 7.3536895674300254e-06, | |
| "loss": 0.0026, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.2900763358778624, | |
| "grad_norm": 0.04679035767912865, | |
| "learning_rate": 7.608142493638677e-06, | |
| "loss": 0.0024, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.366412213740458, | |
| "grad_norm": 0.043534696102142334, | |
| "learning_rate": 7.862595419847328e-06, | |
| "loss": 0.0022, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.4427480916030535, | |
| "grad_norm": 0.06631364673376083, | |
| "learning_rate": 8.11704834605598e-06, | |
| "loss": 0.0501, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.519083969465649, | |
| "grad_norm": 0.2406100630760193, | |
| "learning_rate": 8.371501272264631e-06, | |
| "loss": 0.1185, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.595419847328244, | |
| "grad_norm": 0.15562430024147034, | |
| "learning_rate": 8.625954198473283e-06, | |
| "loss": 0.1555, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.67175572519084, | |
| "grad_norm": 0.07086818665266037, | |
| "learning_rate": 8.880407124681935e-06, | |
| "loss": 0.0396, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.7480916030534353, | |
| "grad_norm": 3.227670192718506, | |
| "learning_rate": 9.134860050890587e-06, | |
| "loss": 0.1098, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.8244274809160306, | |
| "grad_norm": 2.484853744506836, | |
| "learning_rate": 9.389312977099237e-06, | |
| "loss": 0.2296, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.900763358778626, | |
| "grad_norm": 1.2200217247009277, | |
| "learning_rate": 9.643765903307888e-06, | |
| "loss": 0.0674, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.9770992366412212, | |
| "grad_norm": 0.7877931594848633, | |
| "learning_rate": 9.89821882951654e-06, | |
| "loss": 0.1377, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.9354838709677419, | |
| "eval_fbeta": 0.9427827536107456, | |
| "eval_loss": 0.1109568178653717, | |
| "eval_precision": 0.9477124183006536, | |
| "eval_recall": 0.9235668789808917, | |
| "eval_runtime": 13.3172, | |
| "eval_samples_per_second": 67.206, | |
| "eval_steps_per_second": 8.41, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 3.053435114503817, | |
| "grad_norm": 0.1212519109249115, | |
| "learning_rate": 9.961832061068703e-06, | |
| "loss": 0.0141, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.1297709923664123, | |
| "grad_norm": 0.18422600626945496, | |
| "learning_rate": 9.89821882951654e-06, | |
| "loss": 0.1976, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.2061068702290076, | |
| "grad_norm": 3.665578842163086, | |
| "learning_rate": 9.834605597964377e-06, | |
| "loss": 0.0717, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.282442748091603, | |
| "grad_norm": 0.1137157678604126, | |
| "learning_rate": 9.770992366412214e-06, | |
| "loss": 0.0075, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.3587786259541983, | |
| "grad_norm": 0.0800420492887497, | |
| "learning_rate": 9.707379134860051e-06, | |
| "loss": 0.0049, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.435114503816794, | |
| "grad_norm": 0.06329266726970673, | |
| "learning_rate": 9.643765903307888e-06, | |
| "loss": 0.0036, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.5114503816793894, | |
| "grad_norm": 0.058372776955366135, | |
| "learning_rate": 9.580152671755725e-06, | |
| "loss": 0.0391, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.5877862595419847, | |
| "grad_norm": 0.05802847445011139, | |
| "learning_rate": 9.516539440203563e-06, | |
| "loss": 0.0029, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.66412213740458, | |
| "grad_norm": 0.050598662346601486, | |
| "learning_rate": 9.452926208651401e-06, | |
| "loss": 0.0026, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.7404580152671754, | |
| "grad_norm": 0.045668765902519226, | |
| "learning_rate": 9.389312977099237e-06, | |
| "loss": 0.0023, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.816793893129771, | |
| "grad_norm": 0.040798306465148926, | |
| "learning_rate": 9.325699745547074e-06, | |
| "loss": 0.0021, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.8931297709923665, | |
| "grad_norm": 0.038592200726270676, | |
| "learning_rate": 9.26208651399491e-06, | |
| "loss": 0.0019, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 3.969465648854962, | |
| "grad_norm": 0.03555968776345253, | |
| "learning_rate": 9.19847328244275e-06, | |
| "loss": 0.0017, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 1.0, | |
| "eval_fbeta": 0.9999999200000064, | |
| "eval_loss": 0.0016206938307732344, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 13.3404, | |
| "eval_samples_per_second": 67.089, | |
| "eval_steps_per_second": 8.396, | |
| "step": 524 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1965, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.07357231514624e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |