| { | |
| "best_metric": 0.7391304347826086, | |
| "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-DMAE-da-colab/checkpoint-138", | |
| "epoch": 38.26086956521739, | |
| "eval_steps": 500, | |
| "global_step": 440, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 16.909873962402344, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 1.3823, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.9565217391304348, | |
| "eval_accuracy": 0.1956521739130435, | |
| "eval_loss": 1.4058117866516113, | |
| "eval_runtime": 0.9924, | |
| "eval_samples_per_second": 46.35, | |
| "eval_steps_per_second": 3.023, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 19.725204467773438, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 1.3366, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.1956521739130435, | |
| "eval_loss": 1.4482489824295044, | |
| "eval_runtime": 0.7643, | |
| "eval_samples_per_second": 60.185, | |
| "eval_steps_per_second": 3.925, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.608695652173913, | |
| "grad_norm": 25.69033432006836, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 1.2352, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.9565217391304346, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.230912685394287, | |
| "eval_runtime": 0.8016, | |
| "eval_samples_per_second": 57.387, | |
| "eval_steps_per_second": 3.743, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 3.4782608695652173, | |
| "grad_norm": 22.049612045288086, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 1.1374, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6086956521739131, | |
| "eval_loss": 1.1031465530395508, | |
| "eval_runtime": 0.9885, | |
| "eval_samples_per_second": 46.536, | |
| "eval_steps_per_second": 3.035, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 29.715892791748047, | |
| "learning_rate": 3.93939393939394e-05, | |
| "loss": 1.0344, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.956521739130435, | |
| "eval_accuracy": 0.5869565217391305, | |
| "eval_loss": 1.0230214595794678, | |
| "eval_runtime": 0.7523, | |
| "eval_samples_per_second": 61.145, | |
| "eval_steps_per_second": 3.988, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 5.217391304347826, | |
| "grad_norm": 42.69334030151367, | |
| "learning_rate": 3.838383838383839e-05, | |
| "loss": 0.8772, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9114828705787659, | |
| "eval_runtime": 0.772, | |
| "eval_samples_per_second": 59.587, | |
| "eval_steps_per_second": 3.886, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 6.086956521739131, | |
| "grad_norm": 43.39430618286133, | |
| "learning_rate": 3.7373737373737376e-05, | |
| "loss": 0.8105, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 6.956521739130435, | |
| "grad_norm": 46.86561584472656, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 0.7321, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 6.956521739130435, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8857656717300415, | |
| "eval_runtime": 1.0277, | |
| "eval_samples_per_second": 44.76, | |
| "eval_steps_per_second": 2.919, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 7.826086956521739, | |
| "grad_norm": 42.23720169067383, | |
| "learning_rate": 3.535353535353536e-05, | |
| "loss": 0.6319, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.866457998752594, | |
| "eval_runtime": 0.7747, | |
| "eval_samples_per_second": 59.38, | |
| "eval_steps_per_second": 3.873, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 42.64775466918945, | |
| "learning_rate": 3.434343434343435e-05, | |
| "loss": 0.6438, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 8.956521739130435, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 0.7737635970115662, | |
| "eval_runtime": 0.7744, | |
| "eval_samples_per_second": 59.4, | |
| "eval_steps_per_second": 3.874, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 9.565217391304348, | |
| "grad_norm": 60.89689636230469, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.4714, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.8491604924201965, | |
| "eval_runtime": 1.0319, | |
| "eval_samples_per_second": 44.578, | |
| "eval_steps_per_second": 2.907, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 10.434782608695652, | |
| "grad_norm": 47.14131546020508, | |
| "learning_rate": 3.232323232323232e-05, | |
| "loss": 0.433, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 10.956521739130435, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 0.8385751247406006, | |
| "eval_runtime": 0.7991, | |
| "eval_samples_per_second": 57.561, | |
| "eval_steps_per_second": 3.754, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 11.304347826086957, | |
| "grad_norm": 40.993927001953125, | |
| "learning_rate": 3.131313131313132e-05, | |
| "loss": 0.4793, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.7391304347826086, | |
| "eval_loss": 0.939383327960968, | |
| "eval_runtime": 0.7851, | |
| "eval_samples_per_second": 58.594, | |
| "eval_steps_per_second": 3.821, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 12.173913043478262, | |
| "grad_norm": 48.550899505615234, | |
| "learning_rate": 3.0303030303030306e-05, | |
| "loss": 0.4769, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 12.956521739130435, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9470547437667847, | |
| "eval_runtime": 1.0593, | |
| "eval_samples_per_second": 43.423, | |
| "eval_steps_per_second": 2.832, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 13.043478260869565, | |
| "grad_norm": 84.14537811279297, | |
| "learning_rate": 2.9292929292929297e-05, | |
| "loss": 0.4308, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 13.91304347826087, | |
| "grad_norm": 54.65965270996094, | |
| "learning_rate": 2.8282828282828285e-05, | |
| "loss": 0.3872, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.6086956521739131, | |
| "eval_loss": 1.1526373624801636, | |
| "eval_runtime": 0.7735, | |
| "eval_samples_per_second": 59.47, | |
| "eval_steps_per_second": 3.878, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 14.782608695652174, | |
| "grad_norm": 63.24810791015625, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 0.3906, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 14.956521739130435, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.057466983795166, | |
| "eval_runtime": 0.7776, | |
| "eval_samples_per_second": 59.156, | |
| "eval_steps_per_second": 3.858, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 15.652173913043478, | |
| "grad_norm": 76.85520935058594, | |
| "learning_rate": 2.6262626262626265e-05, | |
| "loss": 0.3798, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.0592772960662842, | |
| "eval_runtime": 0.8046, | |
| "eval_samples_per_second": 57.172, | |
| "eval_steps_per_second": 3.729, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 16.52173913043478, | |
| "grad_norm": 43.57295608520508, | |
| "learning_rate": 2.5252525252525253e-05, | |
| "loss": 0.3377, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 16.956521739130434, | |
| "eval_accuracy": 0.6086956521739131, | |
| "eval_loss": 1.0782535076141357, | |
| "eval_runtime": 0.7945, | |
| "eval_samples_per_second": 57.9, | |
| "eval_steps_per_second": 3.776, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "grad_norm": 55.173805236816406, | |
| "learning_rate": 2.4242424242424244e-05, | |
| "loss": 0.3919, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.1066713333129883, | |
| "eval_runtime": 0.7682, | |
| "eval_samples_per_second": 59.879, | |
| "eval_steps_per_second": 3.905, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 18.26086956521739, | |
| "grad_norm": 44.900856018066406, | |
| "learning_rate": 2.3232323232323232e-05, | |
| "loss": 0.3631, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 18.956521739130434, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.1017966270446777, | |
| "eval_runtime": 0.7687, | |
| "eval_samples_per_second": 59.84, | |
| "eval_steps_per_second": 3.903, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 19.130434782608695, | |
| "grad_norm": 45.883216857910156, | |
| "learning_rate": 2.2222222222222227e-05, | |
| "loss": 0.3433, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 29.03622055053711, | |
| "learning_rate": 2.121212121212121e-05, | |
| "loss": 0.2762, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.147935152053833, | |
| "eval_runtime": 1.0114, | |
| "eval_samples_per_second": 45.482, | |
| "eval_steps_per_second": 2.966, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 20.869565217391305, | |
| "grad_norm": 49.36585235595703, | |
| "learning_rate": 2.0202020202020206e-05, | |
| "loss": 0.2935, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 20.956521739130434, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.1054612398147583, | |
| "eval_runtime": 0.7595, | |
| "eval_samples_per_second": 60.565, | |
| "eval_steps_per_second": 3.95, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 21.73913043478261, | |
| "grad_norm": 34.57924270629883, | |
| "learning_rate": 1.9191919191919194e-05, | |
| "loss": 0.3029, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.120306372642517, | |
| "eval_runtime": 0.7563, | |
| "eval_samples_per_second": 60.82, | |
| "eval_steps_per_second": 3.967, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 22.608695652173914, | |
| "grad_norm": 50.57252502441406, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 0.2857, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 22.956521739130434, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.2819756269454956, | |
| "eval_runtime": 0.7606, | |
| "eval_samples_per_second": 60.476, | |
| "eval_steps_per_second": 3.944, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 23.47826086956522, | |
| "grad_norm": 32.393211364746094, | |
| "learning_rate": 1.7171717171717173e-05, | |
| "loss": 0.2603, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.2550427913665771, | |
| "eval_runtime": 0.7542, | |
| "eval_samples_per_second": 60.991, | |
| "eval_steps_per_second": 3.978, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 24.347826086956523, | |
| "grad_norm": 53.81830596923828, | |
| "learning_rate": 1.616161616161616e-05, | |
| "loss": 0.2162, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 24.956521739130434, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.1655433177947998, | |
| "eval_runtime": 0.7904, | |
| "eval_samples_per_second": 58.201, | |
| "eval_steps_per_second": 3.796, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 25.217391304347824, | |
| "grad_norm": 61.60272216796875, | |
| "learning_rate": 1.5151515151515153e-05, | |
| "loss": 0.2465, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.2510898113250732, | |
| "eval_runtime": 0.796, | |
| "eval_samples_per_second": 57.789, | |
| "eval_steps_per_second": 3.769, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 26.08695652173913, | |
| "grad_norm": 50.68645477294922, | |
| "learning_rate": 1.4141414141414143e-05, | |
| "loss": 0.2503, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 26.956521739130434, | |
| "grad_norm": 62.43329620361328, | |
| "learning_rate": 1.3131313131313132e-05, | |
| "loss": 0.2238, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 26.956521739130434, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.3460850715637207, | |
| "eval_runtime": 1.0048, | |
| "eval_samples_per_second": 45.782, | |
| "eval_steps_per_second": 2.986, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 27.82608695652174, | |
| "grad_norm": 56.05590057373047, | |
| "learning_rate": 1.2121212121212122e-05, | |
| "loss": 0.2271, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.3471860885620117, | |
| "eval_runtime": 0.7754, | |
| "eval_samples_per_second": 59.324, | |
| "eval_steps_per_second": 3.869, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 28.695652173913043, | |
| "grad_norm": 36.40557098388672, | |
| "learning_rate": 1.1111111111111113e-05, | |
| "loss": 0.2694, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 28.956521739130434, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4501123428344727, | |
| "eval_runtime": 0.746, | |
| "eval_samples_per_second": 61.663, | |
| "eval_steps_per_second": 4.022, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 29.565217391304348, | |
| "grad_norm": 46.565956115722656, | |
| "learning_rate": 1.0101010101010103e-05, | |
| "loss": 0.1903, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.462881326675415, | |
| "eval_runtime": 1.0409, | |
| "eval_samples_per_second": 44.193, | |
| "eval_steps_per_second": 2.882, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 30.434782608695652, | |
| "grad_norm": 92.46195220947266, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 0.2054, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 30.956521739130434, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4671651124954224, | |
| "eval_runtime": 0.7747, | |
| "eval_samples_per_second": 59.374, | |
| "eval_steps_per_second": 3.872, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 31.304347826086957, | |
| "grad_norm": 44.79200744628906, | |
| "learning_rate": 8.08080808080808e-06, | |
| "loss": 0.199, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.472485899925232, | |
| "eval_runtime": 0.7531, | |
| "eval_samples_per_second": 61.078, | |
| "eval_steps_per_second": 3.983, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 32.17391304347826, | |
| "grad_norm": 34.2342643737793, | |
| "learning_rate": 7.070707070707071e-06, | |
| "loss": 0.2034, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 32.95652173913044, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.4506837129592896, | |
| "eval_runtime": 0.9808, | |
| "eval_samples_per_second": 46.899, | |
| "eval_steps_per_second": 3.059, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 33.04347826086956, | |
| "grad_norm": 28.347145080566406, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 0.1959, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 33.91304347826087, | |
| "grad_norm": 44.34228515625, | |
| "learning_rate": 5.0505050505050515e-06, | |
| "loss": 0.2048, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4330270290374756, | |
| "eval_runtime": 0.9488, | |
| "eval_samples_per_second": 48.484, | |
| "eval_steps_per_second": 3.162, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 34.78260869565217, | |
| "grad_norm": 38.90620803833008, | |
| "learning_rate": 4.04040404040404e-06, | |
| "loss": 0.1767, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 34.95652173913044, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4638035297393799, | |
| "eval_runtime": 0.7554, | |
| "eval_samples_per_second": 60.894, | |
| "eval_steps_per_second": 3.971, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 35.65217391304348, | |
| "grad_norm": 32.936920166015625, | |
| "learning_rate": 3.0303030303030305e-06, | |
| "loss": 0.1799, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4231865406036377, | |
| "eval_runtime": 0.7731, | |
| "eval_samples_per_second": 59.501, | |
| "eval_steps_per_second": 3.88, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 36.52173913043478, | |
| "grad_norm": 32.154483795166016, | |
| "learning_rate": 2.02020202020202e-06, | |
| "loss": 0.1903, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 36.95652173913044, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.450764536857605, | |
| "eval_runtime": 1.0455, | |
| "eval_samples_per_second": 43.997, | |
| "eval_steps_per_second": 2.869, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 37.391304347826086, | |
| "grad_norm": 32.06462860107422, | |
| "learning_rate": 1.01010101010101e-06, | |
| "loss": 0.1864, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4460225105285645, | |
| "eval_runtime": 0.78, | |
| "eval_samples_per_second": 58.972, | |
| "eval_steps_per_second": 3.846, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 38.26086956521739, | |
| "grad_norm": 44.23124694824219, | |
| "learning_rate": 0.0, | |
| "loss": 0.1818, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 38.26086956521739, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.4456393718719482, | |
| "eval_runtime": 0.9819, | |
| "eval_samples_per_second": 46.85, | |
| "eval_steps_per_second": 3.055, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 38.26086956521739, | |
| "step": 440, | |
| "total_flos": 8.989085534729011e+17, | |
| "train_loss": 0.4470971633087505, | |
| "train_runtime": 894.3938, | |
| "train_samples_per_second": 32.29, | |
| "train_steps_per_second": 0.492 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 440, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 40, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.989085534729011e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |