Eric03 commited on
Commit
c60f76e
·
verified ·
1 Parent(s): a061151

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2006.14804/main_diagram/main_diagram.drawio +1 -0
  2. 2006.14804/main_diagram/main_diagram.pdf +0 -0
  3. 2006.14804/paper_text/intro_method.md +115 -0
  4. 2103.10379/main_diagram/main_diagram.drawio +1 -0
  5. 2103.10379/paper_text/intro_method.md +140 -0
  6. 2106.02940/main_diagram/main_diagram.drawio +0 -0
  7. 2106.02940/paper_text/intro_method.md +29 -0
  8. 2108.09079/main_diagram/main_diagram.drawio +1 -0
  9. 2108.09079/main_diagram/main_diagram.pdf +0 -0
  10. 2108.09079/paper_text/intro_method.md +226 -0
  11. 2203.09275/main_diagram/main_diagram.drawio +0 -0
  12. 2203.09275/paper_text/intro_method.md +107 -0
  13. 2203.13920/main_diagram/main_diagram.drawio +1 -0
  14. 2203.13920/main_diagram/main_diagram.pdf +0 -0
  15. 2203.13920/paper_text/intro_method.md +50 -0
  16. 2204.10670/paper_text/intro_method.md +13 -0
  17. 2206.07837/main_diagram/main_diagram.drawio +1 -0
  18. 2206.07837/main_diagram/main_diagram.pdf +0 -0
  19. 2206.07837/paper_text/intro_method.md +173 -0
  20. 2208.08631/main_diagram/main_diagram.drawio +1 -0
  21. 2208.08631/main_diagram/main_diagram.pdf +0 -0
  22. 2208.08631/paper_text/intro_method.md +139 -0
  23. 2208.13614/main_diagram/main_diagram.drawio +1 -0
  24. 2208.13614/main_diagram/main_diagram.pdf +0 -0
  25. 2208.13614/paper_text/intro_method.md +498 -0
  26. 2210.06742/main_diagram/main_diagram.drawio +0 -0
  27. 2210.06742/paper_text/intro_method.md +142 -0
  28. 2301.00437/main_diagram/main_diagram.drawio +1 -0
  29. 2301.00437/main_diagram/main_diagram.pdf +0 -0
  30. 2301.00437/paper_text/intro_method.md +449 -0
  31. 2302.08956/main_diagram/main_diagram.drawio +1 -0
  32. 2302.08956/main_diagram/main_diagram.pdf +0 -0
  33. 2302.08956/paper_text/intro_method.md +100 -0
  34. 2303.02430/main_diagram/main_diagram.drawio +1 -0
  35. 2303.02430/paper_text/intro_method.md +159 -0
  36. 2303.06635/main_diagram/main_diagram.drawio +0 -0
  37. 2303.06635/paper_text/intro_method.md +135 -0
  38. 2303.09429/main_diagram/main_diagram.drawio +0 -0
  39. 2303.09429/paper_text/intro_method.md +61 -0
  40. 2304.04395/main_diagram/main_diagram.drawio +0 -0
  41. 2304.04395/paper_text/intro_method.md +112 -0
  42. 2305.00909/main_diagram/main_diagram.drawio +406 -0
  43. 2305.00909/paper_text/intro_method.md +96 -0
  44. 2305.13286/main_diagram/main_diagram.drawio +16 -0
  45. 2305.13286/main_diagram/main_diagram.pdf +0 -0
  46. 2305.13286/paper_text/intro_method.md +59 -0
  47. 2305.15616/main_diagram/main_diagram.drawio +160 -0
  48. 2305.15616/main_diagram/main_diagram.pdf +0 -0
  49. 2305.15616/paper_text/intro_method.md +152 -0
  50. 2305.19922/main_diagram/main_diagram.drawio +1 -0
2006.14804/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-05T03:15:21.475Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15" etag="iRIuKMbucwN61AWunc9F" version="14.2.9"><diagram id="rmXPvQNwLDfYRC06J5c-" name="Page-1">5LzXuqRIki38NH3Z/aHFJRoCDUEQcIfWItDw9D++M7NEV7U4M1Uzfc4flZUZgOM4JpYtM7e9/4Jy3SFN0VjqQ5q1f0Gg9PgLyv8FQWAaRu9/wJnz+xkIp76dKaYq/X7u5xNudWU/Bn4/u1ZpNv9q4DIM7VKNvz6ZDH2fJcuvzkXTNOy/HpYP7a+fOkZF9psTbhK1vz3rV+lSfjtLIeTP5+WsKsofT4YJ+tuVLvox+PubzGWUDvsvTqHCX1BuGobl27fu4LIWSO+HXL7dJ/6Dqz8tbMr65d+5IU9LmtS0nZG6v+oIczxqMvkr8V0bW9Su39/4+2qX84cI7oWP4GuULMP0F5Tdy2rJ3DFKwMn91vt9rly69j6C76/fJ8ymJTv+4VLhnwRwm042dNkynfeQ7zcgCPE3gqB//hDfZvhuRAiKfTvef9YIDG75rpTylwqhv4s/+m4IxU9P+1lW95fv4vo/ER39O6Ij2vu5bHx/KZYvWXw7kQ/3i/9SqMRnHX5c+Ov8ZfXMPQAhxuPniz9m4e5B2bH8ldlvCd6j+GiJ7n+YtehugUZLNfQ/HnS/ybdn/fr59+lfrOnv9DsNa59m4FWhf63deZmG5idXQMArVG3LDS0wjXs2NMfBf99f7bs/o8hPd/5iJPH1+T7yF+e/ff4YS6IJ/DYL6KcP/CtDwmD8N4aEQNDvWBH5Z1nRj4n/mQNm6Q1J3w/n7Evpws+n2KxPGYB099Wkjea5Sv6lyn5WDYKBGY5qeQP9/w3/fhR8twbwnT9+eXB+P/i2SrC0f66a+02GdUqyf+lJv9XhL7SC/45Sfpybsvb2ge3X6/g9RX1/gjVUX974w0SQf2YiKIn/esJvb/N9jl/C7d9Ne0cC/CdE+jEZ8neTLdFUZMtvJrvVGZ2/GDaCAfM/foU7qv7DZ/1snN9m/dlUf5L0f8N6kX+Igf8u5KHI70EeU2RfKPb7oPZ3/gHA8dcmH7VV0QN/uGfJQMwCIFLdUZ35fqGr0vTLeabsXkoUf00FzPq7qO95cfYvOA/mWpfh23K/pv573/kDQApGoN+qD/stNMG/B03Yn4ZM6G90K/RbNQ19l303199jCeM0JNk8/+tIEkdJU3zFHnNd2qr/IeD/UoT5UsU0fA+GKE9D/1sRB0Zh5O+ViZC/E2fI34szMPFnaRP7P4szWRsP+6+iDDhxX/jhSX9M4Onvd3t/H/51EPwchsDhz7Hn6+hH8PkHAetvNPnj+PeDFjiwsqm6ZQpg4Y8OZD/g8Bus/0vf+t+KeDD8O+GCJv9rce4W+T8Ln/CfFPHQ30Imjv5PRDz8P9qPoP+qH5F/BvP7T/cDhP6NEf1E//9YP4DpP8sPkN+8Agz/T/gB8R/pBz/ZM/Lv2/PPzvOzvwS/vPYPnOcP9INvZZj/PT+A/zg/QIEf/OPaDUz9OY5AQv/U/f5HXIL8N1ziDzNx+FfE51/xnh8m/lfobxCE/l2QgIl/FSbA0Z/Im35UIf+ln3wznv8tPyGxf0p1oP9ipQD75wwK+/cc5g+z4t+rCP8RKb28dtE/rlP+P5bS39bwKy1i6O+k88j/aDr/j8vV/81STd9/z7vn/3TtftWh/wjtQr/WLo5gf/tBiH6pYOp/UsHUv1NJ/tMyDQL6KYX/c4LIv+ZQ/6tVZBKF/gmI4/B/Mbsm6X8WGxD8fzY2UPBvTMy9XT/7C3LPBznZHk3p/3PODiPYb0sNGPIbb0f+R+GcQv4sOE/+6X7if4hy/7DSO/rb1OdHYfZ/rfJO/bby/sfo1l6zr5d/TlGdgd388/8veiZQ5G8I8TOOIv9K478bK/48jf9edf6P0LiYZSnYaPmPp2Z/lKLpfx4wf2cjBv69jZg/T9P/Rv34x1bazauBWP+L+2hpNDXmfVe1fDGwv0H4HyNhjPhnTAfGfyth9Hd5MPqnifjfKE3+3PMC/2sJ//MdyX4AImeLKUqr7FfbivnX5w+S+j+3a4j6rdTh35E6Tf4Nxv4suf8b9a8fpl11X313v5Ty38PKMoy/OKtFcdZaN3h83+CNh2UZuntACy6wP/nA74n/62HMPH7rDwTIFP04yKsDWAH7fT18uSygsZABkkDEJO2xv1XJ0OfVbS3T35L7iYiYfrVfieD8DasiRv01XuMb9b4d3H+h6N+e5drF81+98a8wQv1t7Iv/EDPCCfpvGET+9KF/bUbwDRMIRhIwDYG/f5DMX1gVgfyNwkkYQ77/TfzWxv7BkD/e3v4Pehf/37Y37Ie98cPe/4dZHEFgfyMxBCKxb3/+bza4/34JDcF+j6WxVR99yUwAU38rQ/z/hK/h+BebwLFvvTm/aZxB/wbDv9hM+i1P/73M7E+jFvTv1dj+lJ7fH8T9HqAAPeaAnPw3mnz/b6/C4GCfm4IJ/Efk+jvW+W9mcPifZhm/LY39y2D0R8Sa349ivx9vfhWSfkSgr+iCMt8OEREEDoSrXqzp7JAqFQNzfwzXKwWvuL8N0P3XHapYjgnub3xvn2x/f2FtpOVtmH3YkF548mMLu3YObYZRH2IdSIzruvtwSmIZSLxyufPI9Q/UGyqlFh9FTmkMFzwpXtx1S+kfh+2bjC3cT7E5T9fZ4ZVrm7QzVaXd51bu0qZ1f7ze2OP2nqkbaF0Y7bjpnpY3FNzcLs8idC2KxaZnU3wUly1iSeTYO2qeDaM/V0Z7iNXAmcyk9YVRj4IVSAI/P/Ya416l7uLM0aA7d7KhcgmD2Okaxsy97zb689mkpZ1BEnNQ79cBJSVs1ALEu0khGw/29EfR9QqkBmF70JRBv5Da0QVHEj4jN+L4+R4Irgkkpwh0XmX2kqjvN8oZPWmHo5KZ6sgZSmLXleSZQ8eK6niInlR2qGNL90BI0R1PP5ogE6hdZ8/g9WCDlilRxtTRdlWg4NSVDp1JZremxfpGKqxdp88P+dgtHr3n2L6d3cj825f7D41Kts5TJ1KgHcWQyiuTXq97KEXqQLV++CofYQMbDPdlDQrDSHJkMwI4YGww5Ov8/THdsmBk8E0A1iK4BVP8YtyAFMz+i+M39u3ry+BdrwETPQP00YY+fkUSDeXOgOJgnjQcX2IJ+QKsm52xxS5ehhJdhS6Oxe/vj+YPzOzLJZHgNpWEIpPgOe51IuOhKvCdLeg8AhzH/gsKXKpS5IK4x+ypNNNKWxoex/Lx/WyF91aDw3alxlSFYwpLfjRhPbqOEPw0Z9I5neU+hlR2drOithRNUa1PLq2jz/CkDvPZ4NrFnNqlnNr7vr+Cr8zHoeBdLPf99Y+5fzG/EL6NOunaez3tFlfsGUoBEfiPLX3btFIpP43/8f+PtYTvcgxrqIpkB0r4YdMQ/F6H2MTIo9W+ZEW3mdR22iWsOkdVVjH8ch7q2/2jHb3LNhZLIfLhNu6NMb4DPMIatXdZ3LcZzJYdb8OYfcEZbymuCXKv9Bdv8vU20guL/IB4vY02ae6nf81m3zN5iHGGX3eVW8rv2/fnvhLkddoIvYAZU/73Z/OkFgv9I3SkdvmyDZ9erVrH9Pqneb6vv/279XPV99nuJwKtmY3TJvdKojcLfc3yTRqUhv6kZXDXPW/Gj7dtsUv4dkqrGJP7dEXTt4XeVt0K9svBmOyk/ThtD1To2tovnA+zi0LgKszw5G6rPJzwUWWwn0Jrt/nFBhyvsGhRI6E3LZAosbcjcDJCSzN/JRBNeyfplt1r6NOHYLsj41SKUoQvt/YsAIbop8Pwba5p0tHJ54rT19SjwVSEa7SZZ3IplV7p2qlf4M2fGU7vaXp/c1T0/Z7gCu7AHM/31MLjgWFpHncIr58g7cgMae2fo9eO+RuiZP4gvRcFIS0V2Msd+ViAUXybrmiwBMluyc+PR0735zPJ6Kozb7i8Fa0nOpuYMl8VAKUZhwiU7a21p3NfxikiJTKCvIkeSDlZzMDtOSDxGY2JYdu5HCHTtScobUXjuV4Dda1exXNqCfqzmpYZiz9hFW5vAwsz2czixRPh5l0kWSOQTyNNP7TQr3MND6Ui0pEYUJk5Po3+WmvqY7yQ2gRieV7ESB1ofUVLzsSi89bKvcht4dFhAYh/8UxkHRz6PFoeYKWdRZ73+4jU+Y7BsfVG0ZWmjRtKRRJZIH3h2HKKmf1reWzSWlC5bNtDXrc8z2gL3fKLoqm5qNVU6bgXaD0KNr6WAe4GIma/3+h+AEYXAgMhA+n5Asp2DOadb732vj383beiQeXbPR9JJJWjEF8jbsE/R4zqXjhBRC+INmuHNo7rIt5bbiYkkfPljgjcYys4eH7ezwH5i5hfvUVnMCNtNXcC+2ukKbvnE/eMhSkMyOAmRwg7qzfJIqZkFy/sI8lyMdUcxQh7qn2RLzbPN7eWkW6nxRW8y2sbfTA/0W4j11o35G6sDGKMIxNB8k15KhBRZfEAD5oNl8B4zNmmGoQqsqY1f8tdZEFh3AKWa+fTKsql7JcX81DDoN7L+e28J3wmkEnfYTKqWVtjRApGlmX2h505cKCj+VXdOEdGVwE5Yap9biuLlKVIYTcdDNgibM/JKT4mqcl+L+m43GFfcGezmNM7E1GN+rAhlpcgbhS5vNezRJmGPLMWh57sg5Q+LYL61atc33OwstClUkZuCP1OwTWCXLHzAe4SMU7h6iY6RHJMG9HSd8t7L+r44PbeEzlceTtA9jZyv6covdYvEVqX4qu7+FbeVmP47Me0oKTL1GWJiIUjfXPSJC1x+dpuOtmOrLIOtXaYCy9dFkpkj+GBHBt3ak4ktbzCB1xeIcrGxxI9uCWdczxeLPxTUD28w62SZAm0KYgNpgr86qfRKK365l+aCpsfKluKFRWC8Jxgz2EKaeahvUA0UjmkMmgzaIWcuH55Hb3cWYjYRqS0IE/OsnN0IXY23pEkAumILWhGU84cJM/htcUAFqh3ocSyoNzflUdegMJHwq2tIx0RH8hOxGkclqEj2t8XzKromgRFOOKRPjirsuu9QgbcmF+QwQQLQ2FNKI3yuywbVXOSV5F3hf7mC4l/2VilKYsE2V6Y8WGfPIpnkFO9lUFpJuNFiqcFJ5fNaxztkunQtRPsRKfHG959l1G0h1HIbV+T2O5oQENoZVTeMkX6bbriyrNkH90v8GkW7X6oeN1vJrK1PC5RhHldzTvltKR1ePPY3Ywp/tEVt7bFhss5ZmR1eLYjHttTxiqEN2LEPAZsAe43LNgV3xxMQUnZgcrZ/vbl+0oxqW+mjouaSZ4m4wiORTA0p0TAjejCNDbKZ9NScxoDKdqstanb1g3pat6TBjvwHm7LSce1+AHwVzxVFMfMpN0iHumbpy0GiO4dxBoPufuoVh8SsvQZy4+STrRHgMhnA6BGfsX1JsUgOO4R81SVM57JxrH6y5b2yiU+0YtRcqXyE6WuBrpArU6tGUvujmJ/PGHurD1maTKoRwxdFsuVyHbeScwqKGy1Pk+JPz64ID2cQMMrES4Hv50TEdI8iVFqyIiUTDFyvzS0Ra4VQ5lvBsVkBhtUFzuw0XgDu3i04mJ0UMsUlXcHTW3RukVIu7hijAPxhhzZCcJkgAUyMID5IaVj1MuJ/RVPcT7cF/C9hOoTMVkMTQ3EA2eQcHbzfA4mpuWm7Gg+RbzXQTY3X3EbF1BX0WKjGSoho+LF2G975undeHSn4YI5RQSCb9ONkbblHiV/WFIxP9W4u/onJ/MncAKfk3ifebDxs9L9D+sGRguXNfo5U8Xn6R4Q9nriQ1+bJnr2InLGI7liFWr5MMfIZF7th7bAv4JFLWwOWqb7hleh6CI7bCU7oRoCx/lOKx8okQIZHwxRaEWJwSKNecI3Rp3hRN2x2YT0e7XzYYkklc+LhjN9qRpp7Hfz4yYvHTyntSe8GqHA6s/O1ZiTmRl/FKUvIkeDKcWnA7FkN9oHP0iDsFnUlSgfLHjddhts8tiNsq7HLDnUNb/XA4PZ9VrSwJ4Ii2BlqHwZAw1fhS4VZqZ3mr26dXA6vColToAohOZLRUmJS8xLGS+jryzT5vra0YCvzypo+Fh3twip2ViCh/W2GlfBI/5oGGxhbg+duMPWoim2SYO9FC5+kgI92CDjX98HvmePpENu3jAx9+N1idsC1hVPRuhjzSFgc4Oq9XzUTLqMFip8oFf5Bt4/LvGmV/Xl7JXyEZd0s5rX1O+hk+6qVerKy+7f78t2dc6hzw7/oB9zhTKffeFK7DAJIxfl3J140LkgBxyej3jRFmOiGT661S/sZcNfNXboOyLr9vkp9R1v3lzZ8YElueuRHPYeK7LAOQziBzy9ODiOTcqj5+NLKF61F4jmFbx1JXmgQX9PriJoX8zw9sZmUGsRLUUS3JzDt6s3oU1+4gNq4fzKe77DQPayR/T48UqIZQdOdzhF1gpZWpuSjle5jEjvw5A68So+nj1hrf0Q8vjOl60oaPnHLpFMLUeqzjwNrBSXd8wqjhal1szCUES8xI8xZBl744gWyouWaRWW9LUtZXjs8Nd+c3Ng9pt3ks37wT5UGEHwhbZ3GwMoanxU6CUzMskCk7s5Nu5UcXBQy9OrKLPSCIqSayeyMEFqSvfh9sPC0TpUQmOB+tSsJBxRGL26C9a9VBlhiHtOwD+4q3yUWFYt72Lm5xl5bBDPDhrPQz2HPgWpUAzGjpgCYcuG5QdOsMVBLnUMaQdNFD77udJdN86FnJZ8Q3mZPO5V6BmpuNQCPxpcf5qFQEUy9egQiIKZlXz2aygMmVgMSKnEfCrErBX0TxzEEONimd0sjbotVi14BIrWKs6sCD57B0IrvCYeYIMXS3vesLpbP304JF0e+aAJdXNR9mIyzc5duHiW4jEIgSOs8gNDnYlNSdJT7Oxl7fZBORwCBtM2Yj1BetCW82svH/N2CZitdA+2u5rNZ3HCzaFWOm23uiX1WfhuL8cLNqV4oQDbsQ+ITQuSc4sId4ZJnY8yRpFXZBonXTa6DyiRFPdRJjGkhs+99hoixEl2eLNvP5IlFNFtFMRIwDyFHOGChR/lHY+UU7ClZLLq18E0T1pkmkFZ5gwyEe1RJ/mpsbvlskoT6GK9e0cV3vRyK3AsmZ1E4Qx4JRWrFNtNWG41GZIsWKevKQ3sF0YRhIzE2BULzUEoER70zB/ETZnlgpG3WZmZObSJPddB2vTxJ0dgD8VK5aw8IbGqbw+ZqwkrNYXrX3YqDRKkZ3dGwdfiMcsLT2FyaC/V3kmCo74S8wYWGsJqFv/csnc0O+0UJ3IemI914gglK4Nbyiq4iBVrOYRGM5CzzeJWW+XeHDOtV+vypqM5LHbqZ4nn0FASVhTsuRYht/FcehVEeaHRALX4vZqcXWTyChCEb8xbiU4LLc+6BiWgHeL0ciG27unMwaMI8RFXNeZ0WNiOWUQ4rFqroJhhZJPxgbolA73gl9pt79CEpcedIunlLAoC0l+BKmvwqUNynb+DpRNBqqTmKn+jcTioEJBdgxAoOZFlUEA6APxiLxKLDVqkDJmtgr3digbGM7wVWk0iS4WdiARvQaAg5RWPiojYxQGZZDNrqxpGMvnb77QwIXAxJLqE4DPpnKHUuKcWJzucnzpWVcEqL3V9yCZHd+9FPBWHH8gaswoojOS19g3hWO1UsQuq0JRPyrjHzL3ZBJPRqxCYprwzVNhX+ZRmzJvEQ3VTUJB/jwElkS5NFiYQH9vA7QJafnh+QgK+oErgj5oKMh51Sx1ytxCFK9NlfiNptvuKY38QSoGVjaEk2qIw21mIhkejcQV64qgCSnRRYbg7GR3Brp41wMI8Ii+kgm+uu1RXhRigDDHOwGUBBgWCd0PGFeDp8hiQW1rYe5D29xySTCDbWzWbigci1fm6rRkeWu161eoUxIL7NrwDIb0772Rr3EEzzpQSm3eiJu9r1+Sed9izywJbtscFoSz0uadlmzfsk9xuib35fnwMqlbM2163vAesyqzQxVcSoO/xs87cIfNu9NEUYscX1rEdChPLhmtcXaor5OlydnOlzxekMglhLq27sfBYTPYt9uyaghkDC9O0Zsjkj6B3d9waijNy5Jm+QmkJ0fpOQEVxuEm8gjC0Y5SU00blxFAZMTmioHTMR88eFMQWNoss58H4rFFb7VCydEICW4LyxyH49aUAPo9euVcOVbo/g2PISB3XGVVcKZIfWEFvq145fSsWmYdVDdp4g2X8aAXmUXHjZMM4JKFMZOnOW9Xw3SoGA7mkOeqqcdukO5q7eHbzpGqY3ymmaANvgJCVy5MUgmpEYxlGHcdiZdkgDWQywEPLKbTMRxbSQysXCq8YUgx+JkPM8icshi4uaIEYPKtg5/TqsdnywMpO3g/snftijm5yaalbo8q9iEfUw/3hrZeU9cyCxKpllJccA9ZpFHrQeRpKk+ZcLpF9Qm/7vKna4DPnnQzHUnF51cMmnuJujGp/MqYmrhmyWX44rhvq45uMnCi9rBROIDDK84pCbTCj+kreAJGyBeZz9GNyixxR3HiWW2i+ebVELRM758Fx0U/Gk32frjTR7njtFDI8MDeQ7fTjZgGbBSV075iO/YFQzSuOn03FL8EdzOstBykZaXN2orGxLmC0YspVHD2X51Qm8EjZjLEEmaEuNF3PK5pwJjsgZ5rVXZJt/mQLWPpg3ECtrtRSbsbthNJaIbHkA7Pa3vAyYGfbgHQ1fL84kDiJ8QLY4lRdn8NO+K5/LijdwU8D7Tqke6dKSyKghnIVo3PFQQ27cCv0kfsiXBuD3h8fsbUFp4hAeposLzl1UT73HUs8xPduCG3POcAbCOKgmy6snWxWlCd9bcb0VG1+5Jj4vMyl0EkM2EljOwQ2qcjC38TW6kd6afYU1hFE1eeTgDrUbOprk0ykuYgOrGoMHBxp/Ock9b5hPgWm5mFNwSAvoh8O5LefB3s5ZBPSRJ72p95QWcv1svlxhwV1/S7STGJgdFIFae6A46AiyFj45pFK4wUZJFeHFUWNjnj5R7nTF0VHxtGCYV0/cRmOQd75EbawkDeFeeg2t4wg2XjmYY8UCT55N/EQHyFsIrcc5a2WUhzUDvgGfZ5Jdjp2dcOn/jS5OBS/0rK6R05gZL4d5okomvPatFk1UOhZIPZW83eAqQ97gzT2XXRxq/j5naRZHwWURZdhP0aJFjQ5dHoJKh8d/XjIGx9z11g+bt9dudtFRRiG0chpqcfzXiPRv8P71jezo+qjJ1X/orXA6l7jbfPF8m6m8zrggdljULNCo1fsTjLvqCnXai0/CVBkyIDQvrV2T9lWfJtss4yf0GpsHBiV208PMP8m0Q/tTUyvcxT1LA1rKgTcKoMTBiBsQH0rxDYVy3R84cNBn7ogpfdHdABllK0Ok/amUldITaZp6J/detzWOwJuS5v9k6iO9wTjRkxeh1OPCPkoq+Dtv2lNzdIYT6Jxpj6qmeCeSY/5euLkq8VVZJ6OeRow9AK/uI2dNsRye748lAk9z2oZSZqgg/NxBWSqWlROEPKdX6yQTvQlix7Fdt+TVp0G67K31RyVWTKEQbGBOk5ubcuC+mYgkEVQrwDxX7SPr4RBvhoQtpDH6F0D1Z/QtWGXGaYDcxEvraXtfOaIxHzr3ZZPH2BffbEHVtQaefmVhlqEFVRpZ9Es2MydVH094UQnoHq3zpVnTzrG9dV32jsNFyEis1QQtwq+B3XYbdvGc3lPn2e6ohmAokKE5w/ivjdlf2R00L3dBjylqNFLRVpgyiFoz+Esw7mTEnlnI+o5TaBGmt10EtN13qdHefHxaamId7xUBXr4S9u4oGxxa08B9XKlri/VJEwnq94esH+lMATBPiLVnqERYDZtWE8vjmkaeX9oWW18uHeP3t37WisodAwdmOgwybIruR4gVeSJN7BHHE2hybYt3JsIW9BpgopPWABhCLni6ynD5NJdF6y0UPMiY1A+2N4TkWygcttr0bdNQrAed+hdpeNQazC9yAl5i8ABG0nOl1vatzI+bf50XSCYlr9uoFKtz5tv+Mr0jVFELgYWogjrM09ceup80R+gmXFkr0/AJx1zhDKhs1UlUHbCct6WYmOe2mQbERVPnoj1ojsTxnpCjMxc1MeUYfiYOW00NvonSQUe0FPwXKjtjUQkQB7okujpQavuNaADKM4jzJ5jbL4zE4UOASgTo+tItdvLomnJqk7alUX5VW8dj7czsgXb4m3IQT5CZOipmx2P+S23JqXuvL2wvDt5tXy2iM1H3tzJ1+g4JNRaC0eRt2mZ0fpcBJ84n5mfjajHXlPPSlXAUxxDpxwKEn6RjZGX817fYAMmeM7h5twLmqC1LeT3zpiZDuQ+AUJgazDizegrWwg4fgAVqc+Nj+j+DMnwBQ3zDUktqbPpgRfpBSh2W7t+esNasjb9nqWzIh9MjQuUoMOE5mwfsO+E1NsJoWnklKVDjc9JBXng5CrZNn0YarzdzPJT5jgGiqbbmthtILKGxSlKF02Fy9sGimqBNYWbMTuq+jwnSfNXNXv26J0nATa+rCo0LZ5Ovz5RI1zPM3sgmUwJpdbtJqZ87Raw7iJm0QKrqWseTiknK6IRrnPQ0Z08Mn0fP5TJuUbMMqCzH4hQ5GTxCIW3ybDstgvuq/fV0KSKJ9HV16HfLyaWs5yup2EWRZ9/7HoTZhfLzKbF5lori42Gik+aq40acFQXT66lU4NiWff0ExHiwNDLiHpNpOCisEJ6Hq4N+4DtXkI+9oA48/G2iKYJ0sWPPz3ynLomjLFNJHzgzidS96NmroLcseag7KHiXiNLoR/62S4bAXFo6CPFF1xD+hO3QnipUy0uCUjqNbm18WVkZPVMcCxLJi+/kZxbHUbftHaV9c63ccjL3lcqlcc1fSQ99JNyf84gLGwas0JsNlR3tOKeI5TxE6k3YT6CZ8GPV2sxAN1ieyTeOKlTk6ZDKpYS5Oj7V308z9bKlXr6qJ6Vhb49YmgLYKlhYlB0MaAJT2v+mlDXOB/zutQfD/2MTzJnoUXuY2jgjgm/HcGradE5kFUtUV8pDxzDGAkE1JdtOtiWUhhSPzB0QK1PmsbQaw941YAG3r09dU9m03+Sn0BExosfpgV+4RoT5ACCzgMdIqFPbluEDwIf2x5mWkpCdEQzH7FjE0QE7Pc1o32OdrEaMSuROKfh0yqGB2ycgDVQeJ0/8zxPx32uiV4O8605Cdw5OJqYt2dgwTFBNVmomiujjRCReuWxg85LTWlcaySiLCc7mreTWEOe2nXINvDj19DsV0DJYGcLMxmi1df1Vtt9pQbcyyZJ8qyX83EMm0aBllW2dw4cd/jkcSIbGWxWQflvNctHD0lTp4QoGoWfIYy2qabo1nJaeX6nCc9cAl5Lt30PoSgqG1ZNyu0jjuyd8s4s5Inlk2JTenTUFBqgGivCICNFbP6KGRHlei64DV0oGHagvtUE+vLII4dSo5cJV9CnAWnl5C1zRk2VL7nW/rVrMe3R9xqCv9Ch6Ze3VtU3uF19oQ5dMuQH0BNa0hYKRe7QGqIvEPs+6tk6qObt6RKUPTVIYIpDJB7H5dgl6Tna7ugBnr8YB8+XXkX4vk3EYjv9mnSXeoLjmNNFNd0nEkW6lXitAkpjw7zdmdW2gTLGCqCQFx1nCZYeST+TebaqZKpx2xKfwoq8po/Y8n0wDyIxHnLN9UaNRohOo6HiqWS6zs7DwyIyeJmzK9uA1jagsq+hSdi7Xxvj6AdXG5epQqqb4wIwqbfnkbqCoNE5469lQD4OGcbvT/qo3BgXZgQqt720Xj6ZHb3jihgZU/Zey9Iz33p0rEDhlRqKQKUXHUrFkrxHEeuYXGg4XZxLa+9FC1FcM4UCpdqbEAEn7ESN+trsoXHyidNlUX3ZUeY+84hlsmys30ShfSIqJnDzFYTEGUuZfsffJ0wgHwGB8xUNDRn2WRAm2afBaJJyPBgK12GA+m//Sbt3ypCJyecFVUvuI/H1Yq5hvXPQ1oV1KcsN+BGiGKOIOMPs61uriutiaCqxQdlwbeIE05Yb35sWdbkWQZ4xdvgD2AIZWeCZNMKEw1LeKUIKVX3OiJZuKuHRHfzJFvt4MeXef4KbZt2MxgcJKtO/H09jfDWgEhKisEe0W0me2daBYI7ZCzaWYGBhZmDHNxSqhAn8N2Aq4XVJKckYT7p4tedAvrRoflKdScUKvUzJnWOEfB37LdUwSlfuVA73Hpu3UiTgTLyiACZPMx9Bw1XTp08xddz8KNyrbbgkVfNY5S+gRc9cD6snSw7IMEDOudRBf8RhxeG0+C2e9BpaUEDiM4VmQ3wiIO05jKtHnfi87Mx4gy0IllN07Ro4YzECbdAfUjHMHr2E83zQN7jZFqcUrAh5m4fooWmAnqkUE5yccDAY8fF5Zj82L5B3FIbFvXig/nsMljqvvW5R3hGv7wVLF8+VeeGRlt4cR/yq1+wSYqx5IGGOpYh1Tc/4vgYMYxEsWn5WHqRXkatbUie/aUBEhE8in+wzYCQAdK/oQfRjrc3LM7piqYGReNU1G+HDF6fujpWKZGEKShIMF3E1ffBk6RjT1w4eZ5elF/GrSvR89/KgB9L6eNW6tBOcY1HZ24L5V2zljFHghAx7VtTpnmlJDrxFjyjSa1fYERVsOjdEwzEUJk8OC/uTH4jw/ioEH84SMP2eT0yUDXeSU9gYexV9NKlLxGwm/K6Qx9rsEXeoBydK/n7RpxKFrIHv3h5INGOswicClYjPCfdU5EfqORm5hwAwdMU5FhdfBzZoXpH0ymBoGn0YQfqlAHnWxbfOZmGGyFD8vjuu1PDRaegnHH6gJyxNtdqHx06A+pZdOgzmKzbspmo6pgObVR9lEaxPrX6k2Gg5HiTGp8hKBycn21CQF1rCjPvkDw6Jn8by8p6lrDK7J+5ck7PvQPo03/ZreFuQCq0UMi6T7O5W8FONeKMYGXOXdc+JFUZBCCskqSTM7nhmcZ/jpGWQsdGRWHkkhzqaEMA4IpCkn0bzzW0RQot495QWIns8J2bbF3Yeiz3zA8KFVlIRTk31QcALkQSkQ2VB7JFc65RSRTOKFxbiyIdrEugN6KAfTLaP4M4PqPDGEY1g4kQrgMynNTkMwW3eh1oGMH+ZJz34SSMm4ngUubjHE3hGkPMMBotHJSwiguhgf3Oi4RXft4o5sP6yI1YE0TIIGZtgbpkwSoUxhqAbSyORnuZE66f0YqYTbImJADeYQoh6WLJ+Gy5vev0Zh4rZsZHu7FzPbCUvJ5KzY0G3U1h48eIpL8LLWOXrehIDK7bPSzc9aEaIlX/rPR7HdoKcZgCw0V4qPsjKFHpLBRnK3MmOGm3enNteDW/nahBKyTOEmYKEWI8UgoV5lQzFvpRVk9ExmfS35fJM+AitRwt1i6lgOBk9d5qf8KT5dC9xaRGTCJ3EqbO+CqrCpzWH3oOvwLEVrlpGassrlkolIidoWnxcMMVwzx471mooQpUNHowBGsFYwoOC4L1jEUvygho4R6GrQFmzx/i8TXENIyr6Tb4tMtN2NbCQtAqZE2MLhsduQ5dF5nXbQbRJBSadkSaon0K+E3i942e5TWfb19FpLJg3a0+SORgdzfhFcTs8BhcUJlCtuFuVkhi9OoQk6+nzs5oVLV58v0F5Two1Wq1GI1yvgRqWxTAjMjdmkiBgGdnCvBQlcZo6oVwjzywUh7GKoGa9rrlUQ64yz0hAnR2iKEVsHNFlHEovT72KBSo/Xd9iwmOKWb020RvRFGVjuC6OIMx7HVoAR0hzaYaELKG0UNBceszgI9cQ0kdrrTsJsh1RM1+M+DT9at419vW8HXCXGRbjlFAMM4WjJPB4+6Erq+4i73iTDumYLSzP3wgIcim2+VNbsVaZDHiByZcbncNQSo/CTxwhYThG1BgHCfInDCUfJ3e1T4m9Bmsh57dNsKe0sg5RLfzTEOztI6JqlPHH5+HGfC+tAgM9aTbxBEbsL0d+DRGzyzdnSEAAWq4905wTeIrA6uwKxVHO2tAxWJLKFJCJzqDdTG9GrHkOtl1l6hsOKhOzviUeQh06KON/4aPH1xOVRgTxasugvK6W+bbD7yju0U/mYUJFwg4IZTjo3D4kjalyQRYLlpMgR2Lw09+PPjMYNBztwPRplMJ6ShIWT1a1SPZNREOoFi5uttNSdbtkZ/0sQpHQtKai5DrdlP6DzmoJlyr6WkUYgeRQLw7hGWjF1/bm1oWKz/csv7DSvDjeUhuLK4NaU8udJjHkGYhuFl6se3+FMh2IN3bUc2mIVmHW7edhMTW6QLJ20aq8RgdxIvOUvSXoBdp9SKlQb8fnrYZCB6QhTp+hqmmYA5DES1Z2GcMhS40VasnzY+JFTCC8L2SLWjyLpr5ZqkaUcUIqZZCBpG0Uwa6UrfK4A4N4E6hMsheJzZ7KeqirxgVJX3syvfMze6N3sXNOMSslVQnNoc+1EEQu0QR6KcgvQ2Xgawf9JiIC99yj0IxpgG+QqM3bR5CF4A79vsUsHkvYfpiHfY6Bw3YfMTlbdhPdRn3p4haDauU5hCKDRkEMVx7yQdq4gnJAarNbHjaVxzB3oos0CCT3ZOyShfWoJJpHAk0D6JgUuEorcai3CZJw6RiJzJWnbUk3NG7PDPtbsanztMIToPfOuDPfA0YupweWc03iIErLQg2zeBtfivNSh6eeOdVtQuwsl8Jl6LKyTFnM4vIuEa5RU6v3ogwYMjv61WG98FrqV2GO5wd/8NhtiWh2zgw7KlmF18rbjsJEVB8rX/tWGt+EzLYxEY6en4GKo916l2AzdPaRw6Ew/XQWGa2U8grKAmo7/E5/uMMS5+i2KFtgKt62gCvqqWgbd+LrMEpyCDEWmIJbC6Vu9FIx6/aOwm3MMIEo7xxk0G0daW0jAPUvLBWUXUOs8VEEwgYjKL3Jbz8f1xwnzhzavjLeOcXORiPyHbCPWcmk9xvHoLW6Uyw2zFz+UURJnTFV1Q6i7AjXwB81WlnxE71A/8DB6ZE4cfFSmk9hgOElSrKKnpYlDw9tKfDAMn0D0jKkIy/BP+U63DXBR0DzamI+nZbdlUiFhChwCaDeeiz5l+QzaOwmPh3QBOJplacSX03EI13w5T5E15vqwpALZPdrrx8uoxLxSTp4w/SWYTbL8iRr+ppC+84R6WUw8wvT8/g46TkUzg3HqiMwxBtxFkDSpJ21uSZLJQ53O0u1XobtdA+MdmamyA35wc8PKlAA9nqNj1g3C+1J/YlR11sUic5M9sfEppnmWjEPzxWo57IBn7tqJMPFPGwf4W0bszUMW6tYEqNTZzJw9bMnI5RWpZ1TBKXDzEc413HJ+wZZ+s5851mzzFTvMqilyYLQOJIcgUdKrAufV3DnBwFffngrNQZx1zF7dsQmBLnBsMLlY6MkPvfiw45fMTcfV5EiZqVIs3ajWV/OlSgtap7IiJDHWgT7K76kof0cGKIUt9qRz/EBe/rCBgeCGO4cMeXC7RymSrB2m8TAtS5v4/NjVtjZnwkWL5tJc3JTE2vbz3kiEnaGy8UPh77rw0Pdm7Tyvuhqd6x45zjIiOJ+IVU+kCMhB9vhIBXyj3RatNs8YwiXUXZV3GigdpTg7gg0Go9EGq3Mr3bdL55BTVVPzRvVOj1f2Q4yeMBye3aePmpnGvlIqVYrpoGR6Fz1QW0PgOeyXZOyKnX8DvGhfZB+j6fHOC3oh80dAWWR9bqANTGf9vWuFCbPz1MXnFTmOXgBNUEo6/aPnCOcu+Bby7xMhtkgZpwVedg6Vs60gW7neV9dEkUqYQO2PWJn2YhV2NXe8eZuTV/vmSu/DI+TPPVIYAY544BcpFufOgmH67lzKKfXN9K1X312ycqtDxSfbuKZMqouP0VGKn2ezC6+R585QONBwHdncfAOeh7TduHAoqdiY5RykOZHs9XCDLaOLnrbtjKkqWxlcRp+X8SLXpGN8ECFR+LtO9IcBFaWtJ7xEqU2GLZmWJMF8r57TCY85fJGcZwuF4v0V8M9IsyqRjPd+lkVbfnkJB+TzAM0c/LsvgjkbuZP50nCGs8qFbvfbOPOmtlQAmmLYfeg+JLNe1JOu+EAfsg35/FarrdnJ/sMPQ7FwVJbbnSpmpF4fiNsbAr2iCWMWPgdO47KzctPxmY6aLmznEDFiygH262ABzly7+xzFEYPIS24+dYuZYIGuX3XN/emFdqn40QycUtuMCPBWaRv3cnQ9GmK/Ud3cjWEGFQ600rBFje91ahkhm/BxTZjWHwHSHCFbCMoRaYC4ss8yyhdWN106gGX/SM/90C7Fcy701Qo5ZeVX6svPzAEC2EYo3hJjRD2xpiFf2hL+SE9Wi1D55aheo2MhpX2nRtt81owQwdame93qXRQyTRZkgG7L3H73CRHya8iM+DXlt45hWCLsy1WskdUegmZMUjChTYeLUmeXNiu6ri6UuxglZBVMCwno5Zs7pxlqXp9hXLIeap3BiXmeiaElgsPMZ4ENLIuN/wZmOwqnf58PWMcM6SsMFwZekbrGG6c6Kq25hiJPwTDSegP72QcH/QQz2aO1JPdtb4hDgGrjbKIpTeu1FNHSJqgM5z0RpwaGIUofasQIqRBVIb7UHO/WR2Rik0rMG6+4jzOL45we3HsAyVfN6MS1quSLNjb80UaZa2pIwa6RvkYTBi5wRgXWgKGPloUJxmAiIKN9lNX2AV6KAedxlxmhE0ccL6sKLEKoS4mGCXFsZLeUE+u0cVON0Flxklyhr/JgTYV0m5FGk0zcqWLQWe9lOiLtHK1ZtF1s/QpM0fmyc9Dx19BePC88k49H5VNw0Cwm2Q9WKrgW3F5X0+/ecYqYZnMtvAdM4pDZDfsNRrubVoQJYi1f9yYEWmpqBo1c4IeVRDUBwtWuog+lPA81VhbxDEbsJ3SA/6GISMTKh5nOwLKGymmRzOilhjuzD1DqNg9MuQEohbO2tVryONbU2SflPNRdDRct+DoXx7EPYGlGpg5LwA4RPNZjy71ABVW8PsJWXQznvBiKKDhfIQBfIhT/GigUfW1TcT6TBR7InPEPvLrGNSwz8wh3T2YCR8CNJlmFJMup6cbT6/zHc2bjjxb8vFMXybVcrp1tkQ/kUZ7UvThXHA+akLaYaocB+i4WJfe4NnZD9hJzLWczbRlttqZlbgV4t35eN9prUg0gymK+RIZbdTn42iz9VudwwMdNc3M0ke4uAvmgmroqDwOunFjg4hZgjoYETqrR4suymgS3jv5vAii09OT2j4sS7eqb8CzgkHt3E4eaFKz8VpkrBubLGF/Ew4ylRDpI3n4GFAifm4ybrarVDev+wBuphgZGPDLCdh2EUOsA/aOsKT2eJF2JqDHHBENKEyojYenhvJc/PqaCGZZGc1sWjPZegJuZXPeZrDyZtbPV/5JyfdsX6FQ07BS+Rv+tc/SWl0lW3CNU2QoOVe7a6b+3PiD2s5Gwd6ZV6NxcKBawwlG9qYNKu0PdBKfypyvSBI7AysYaIzTtxNhiXoz4ztKQUSF8gEOh5+Jh5UUEhwaRzIMVLgfTLgShbzrrIQwIzm1J940RG6N6Ao+Zr58tcaXJjT02lATPq2JOaz7ntmK2fhITu/sYS/NYYJOnu+Zr+EPHBPpiSdI/55a6CHsN5M3nfQei7RWGRrEmwdY0H3rZujHWOvED4F3qLk0gJLOmtFdKUyqdLZFYG8/MdHIiP1UX56lBy/5vKCXdo5knh29GxDUSTraey1GKtGt2492hg3eGowlvWPNHOr79EOQifhBvbrQkevhvXhzepK29Y55sLFh9Vfkw8sztxhWNrmEkicbbR2ZjIgeqa+GDnGMiozU7N3HC8oDgBhnSBGJG98LISMzzD87D5x73u1X9hZX8QG2hZYXlzd6fpP71bRo8wpnaKk34lo+XyVq6b0xNolqWAESnLPhMgCggVjzFIh0a5s9D8L9IDMNf/8JvD5Wx5dGxrdNAwFW4Nd/vBV7x5EEuLbAG6E8sWbAw9hGVF3fhQJKVai27jcPbkwadJTWrEGBnxsRwc+hTevAOG8sDhX7EbFsFUjyh1VGj7kzCBRtCtCJnGisVhTuA3E7nzDQiEkzti6hCsAMD8oeY6lTreqt48eJrWRrlCRHZkGsBPsmOzuWulueZQQluYhBqHpKtD2c23qitWDNNSmWav+K/PVW+Q26Q3/zMOTIP6WJMgwzs8XsyTK672SKkIAbgZ0rhBDMtoPVLbXxw9/aY9O8Ep2GoSW82EFBA+b2AL8qnL3aSSQ9xeMFS2erRKdomrjJFUMnsC4hvhYQtrsd3hz1sCAQJxz09EtCJ0KfQZvRWCBR5I0R5mpXILPK57Yq2n+T9bdYt20gKm0o6CcUyyfQuV/yIupVpLrYaB+e1BJuMWmCoPxEZulq28WXMYaJmSVmiz2xPkAlxY44D3D357hXm4cw3b0JwRLp7co1ErzG6w6Ta+Rgg6+R74JehgNmAoGz2rJmyP5mhrMHmPjAg+bbm6xGz/fZKpneBqalb5/5+tgH5SGTQLovWtCkrCwShaOD5Ztd5XZ7BeA5mfWGcsQhsfEkrePSooDFxvdUAqET6RcPHSYXhWdQjbCIZABt82wHTN+q3ULu5xZuSnJfY/j5eIdHC3JSxd+m8bYks1rldOO5MGuzsnsjHOJNDtxLblvkmG3KPIht/hZq54d4elG6nl9VtXHekEqTvXK0iabvU30kXjUPMQlt+FvevuVOzXQ84o7qLA+Zae18U589YVvWVjJPmpihT0OZ4A3MYkqFjtZKYi8EInXluUNkOlobU/IE1+AejCLlUhs79otEKhSupJvENO88bPnPbEkVsqnnqj9J0OjFMP0HNyPSLVf6+UnpmgzzoRWylAzSbTxt0okM0IraakzNMG5PMRbOgM62ewyNp/BsoTB4PfZ21U93kocBmEqPPRmaY+2bJrIP6Ox6Sy9Btzk5By+R+NyQ20Mkd7xgXrLOQdONCQNdUwxLBs9jn0FS/nJZaiuUBD013ELzSDOvbJiIF9IIlyYwb1TuVWaLhWfeWlBr3kxZdZDWBJDEt9B8tkX4rZcAwYo3WdkWfuPYw4wi+4CLnX67/guCwZbkh62huHojg7Q2X3XN9NQJ1d7SVa35lurQWPy4Gi35sA26hM7qjvQEIBQIt5XCsdKDQiMIGsyx805JNXvdY7vUfPBCQZg5+GlING+A2hQDDkOEGSKKCxR2Yg+5nuckgao54qpgNWqqD+sxxvInCntf3V63xqhHiBRegFOtlIfjGLxzq2jyTBO4tyfk0sRE1P64vVGjmw+1dKi1X5M6+xdJdi+b7k+PRpUZdHYVt2vuROqNVmfxI5GKsky/LJgNZOLjUxoMsnZvCUNpfSkAeKT92kyO3FrN4gCGEM3zE8aHmYs8h+0NFiZKr+UFhxfCdopzOGE01RYJdAE/nNTkjs0gD9rG44LnhXifQealYSRZ8s6OBHTfb1LAPKoSUDcHOvPhqye9kKjppK5SsTK9k7M1ZAEWyHG28QXVn354LpBd0mafCZopouwnGXj+2veU/HgZbuWxQ6u5a4NuPb/iNVy31Gyeb+bV+CFhICYJ+zEayLL8uQy9vZi0YMAviNiTKbAaLYVfYDmXf8sGPUlYgVGvRF7YwJisUOLPgqOLPOFG/Qq9ni//P8quYltaJlu+Ei5DtHCnkBlO4V7A0zdZ51u97h325JdjBZk7d0RsS+KHRWxKHw9V3rAHuxnwwX6KkRLwSV4cnbQXVGQye2vDl/v1vl7PSrPYo2CWal+I7hYOHKi5YkKLdJfPM8oIFcoLSKtD7LB/fe+6LdNCOCv7BA6XpOEvYyYgsdmJU0qG4ZfeBdB6QUeiN+MJihYkoNUCA2BTwgbv7ETs0yUjVpFapRo5Fv9acvdC+L4/KmsdzAA2LCV9HVqF3+/3tiNuc7sHYILzCNMJm/Zvh20qzI18XhYZLNsdUMp3LOFxY7Kofs2e+NX0a02kHGoAYDXxLPMrFZL4MGCIKtSez2YvNqZuGIpc7J0cy+xjj8GPkg8o+7sZfkFdBLFqMv/dA5gsLapNCS6Fm63TD6KBLDzmrjDbrH0PbkPYJUkkv3VgSc0chCGKg4QdEKfnedJhSPGtDLAtXV+h+ytr9lsfeKvJijmrrPEum9dHg2HXqzxXaZARTX1PHnnJMrD5AhPK4+Dn+ZdD1RDmLC0Sx0/z+2U07oPAttSH7iXrviHeK9hwf3g2pLPxPpzeppRmZS97RrDtMCGtY60AxhU8hgVWbsz/ZDxT0EY5yL+a3jZF7+aFTmTX3BWOvGHveN8LgxMPno7Y/X5tt9bS5Hdsv1EBhjSL80aC4Km0kF+4244k5RgefP1BjW0tHsCa0PdR8WeHOAXExssbQJHpRzG99p59PMyspReNGpVSHQWh1LbVgj6ogX/AHykZoRwf4o9wuq1L5PKrc3NHoX1f55FO9T254iCVk6eSADo73yWdlEdvV1gJOyvFfBIG6acSGro1re4aHZKFQMoQHHmG9WRSDo+B4y7KL11Ru/OpkZCTxAQh1ug0LSd2TzzckvzPBDrBZsiw+K88wd1lMp/j7fRgq9mamv915Sc/BjoGfA0elaXwYxZwt5amZMhfczN1iK9GGRUb/b6UOqGBkqhuuu85ews78JIyiSB6jr+lF4eYdIYV2cTIn95fbjuxYhDVZ5Rqe/Vp4vYWw+CZyYI3p9p4OO+vm097y4dxQMpiU4kk5Qqs+FVEVsaxEnhh9vO5Fbh1f/yGq+OtEn8VkkKAE8xB4Y+Bfa2bGVTAxByd4Y6R8ZzKUQPBAQEyGkZRAhlu+GyBCXHPrz5C1tk6Q6SZr27TjEewZSwMSy1RDJ9+NiW0jN4oMZFQDH0W6Uc3eIBzg2xWVZdf5RrTN8IAr220ryib6Hp8NW5cKQwrJ18qS1gqq47EjdfXbESU0Q4pmYEjJDBUVmLfb6ZxqTC9o3IUH6b7MkA3CCRKSG7JIWqovPR9uOfDZ0GQsOfgb6ixSSbgIvWNMFJPX1C/ZRyypnBb6OYAcYWpPMA62cjlY5j+KjGaS2dKV0Dwc7/3S+CqrVIzhs1ew5kDWa4k3kPOWVZ3FgSzfRc+6CEXTIbwBY1bpuIXsp/GNri2JAd2pL53zIPAA1EQYICzB06FSkKJbsyYz48Xz4Ig24zjdL3DV+R76pmIAYt/uP6hWI+Ew905DcytNh+n9D77NEyqWqC/xc7R4jkamc3jUTNlkBSvwofGJhk7f+2F8hdCdDT467uUQHGDnDI+19rG8IEpSJJTy2BVBYnhxJ8ng/IHNryCBCf6yZP22JlWwYjQLu7hafrWPMtBxcDSfFN/uktLPZasdmY85MLgm8oWTtmF1WQtS3zekeO7v+tPYVQx/rA5aBcfSRNmEOQNV9s/y8kLnwMp4x45GbYBMGYj7eIIKZOOFCx9xzfiwFSInCN4E3H6b4WCqesNzOafxRgcCv76u1C/TsKKpTNj78qvhBfB67puIoSVpChU7TUVdTS7IpdOyVPw/rUOCPvMnV+RiVkdyf7f6rAfVvzU20sb+92AbNW61PvgHk6u2YGqbAalh7SWbUy1MlznGcIJBOFeM3LVJ/ODVMQ72imvYqxfgTs39ckwprCekDH8ztdP1Kqp6dDBGdwpNWQsbvHIpmW9rfOjFzyOmGF69pNLinCvds075YjBf/1JOpdvBhJ16kPgSAR+Gc5OoqC1D8CPyyelxmKOFuWmwViHQIMMislDVJIEg61RjoDW9pdWKiFkmUQc33eRERbDDRJIP008o5maUqt81PuMPIAqIzkjSxkEblDZtyxGku6qPERa+Lqc/zDHGWVmMvFoGqordYj7c2DtB8ER/MOnoUWM6Ury+HeNvzMcAIK3ctC72QfSUpGfehJVPxVg2Oo4/dMaRkTOs+bCI8nyfGwywscaJ6FjhW1A39dbX/4iwJamgDSzz8WbuEjleu2zFPGP8JesK6cK+0KRvQnIuQK5qAJMkWPz5VWsV5FtnBxRdZVfzIc/iMi/HcKmmAYyE8m5nKK3Rd4A+Fgd/sX+Jh2cx4vFVoErP7LLb/nQz1TAUAGPIi8ZT5gb8VQj53n+q4nlwjIhT1hfJwXFzAzkVyZTzpKMZZOzBL+kJC6cutZUmjrUcavfOcMym0SN7BfiP5zyxj6YqkcpHr40YKCfcA16rJNeU/+8QcknxMkpScs/lmq+Vc5gDnfwYahb7MA0n8Vmek12vuVy2TiyPhRitD9pZGsPhtVr9qnFiV7s+humHuLYULa7tLABQkDYagVD4cUXDT4aih+MynKHrPvxvtX+EMEGVu3RlGr/T/G2klhrJu/vi+nTpObEAFkTqc8CAkmdizVq+mOtIyLdzo7KxdxGX+KVaEkYoU06CfUNIE1F8Uo8xNdZ5RBSGdjnTa9BMj8/5YyA0oEwgVe3kDKlj9MhOGfCThiBL4hfCeuT8Yr/cude+JCVb6TPrj4/novkufKlp9R8VawMDhWxiEodKxEcmWlwa74VFbksi6UnUxPUXz5Bkg6nhX5JrpQrBP5R4CbT/uZARDETMzYGbO1+2eIIhS8GzTQ2cvQXk3dH+lee2fGvcp1ATkLQI+nFZpyot5Z3zjwbHYWeyVjllVmndcyrU0dN+lbrr9WVBGrLsZtefOm7IU9LroRrKn7e/usDucxU8X5ePu4SUML4sdvLUF4d/iZteBXwb4YRo1tpF9uVNZEXn7bbHzVa1OLYNb4gfnQ7C+CoEca1qX+wyzf2Ob5dScGgs15utBsFV6s5UxCP3Bv4iBcIcXXrRlQbpzWZBLKfv3cgxue+VhzqUk6zSWOMspiSE4/iHVT6Yn7XsDJ9GCtWV3J3KDeFsv6eCsQcCcZ75z8DEARsKM85VMm7biNl2v/aXRuUTtQsuH7dRy13opqdqJpKVOfB1r5DPVSmdVtbD5YK4RPWFsjCEyGFyQhj65KdFff3LUoo+717yoqh9ZXFh5fffslD3CtMHgRw4lz+PHRqa14jCUZRiQ2UzaeQcumkw5LDhTOycmfar0JrfyJ3/GLSbb9O/ZU89J4ddVjf3VpAz2sNSYfyHksQESWxSD351TKcLezNJG7H1xmuofuRjM+L9z7b4tKGg7a4JogOsekgWzPhq8NBXgKEfxZggQZiwgzqnFs5tAaCFzcJTkQTnZnza1sEXs44bhoLc/dhBy+9pCd1Th6TtmG33gdPgmoC9RDe2Zyvm0jUZQVLbUCtZJeqRn/H+v7EvAjmINQUV1rWQnDrlHHGzunsqWOs4ABZMvKQXI8WiTAYRFFjL7I71kr+xqogWJzcxIw6tfWsJbltUs8gKBoIniCXSlYBN+AfN5TjWgQpX8VLq9FRvvJ3ZVZudRkEytXR+uVsMKnyaqzSYt9NzQLXQaJZL8W4GPNND1tDLn+0EiT25MoX0QCfLa0lSkOXb6ZDMoETsm8EL7Gx5qL50n6tAkA8j3rNSOwuv4Bb4j54xl0P1q9FZCp8fxGO5M4PrYnwTdVW7W9KzjIxL8jBFGf81bjlv4EUdpVGqCD3QVHZkYXWxF28L+iRnYEfmUm32h/B0V5fpWM15ut0a7ZkcCYTSo1pUG1W5MMeeaajstGefq2d4rZZo7bJAdxaRkVvXWW+Rp1+vFzI3LWI648z3qFaxOyukQnMLhdD5+RXRJD16gzNjIVgII8tgfIrbaZCMy4uEArMji+8To2e8niwo3A+BbyzUu+FvR0hYO4Q/trjN7GFDLh0yHGH9Lo/M9EXRgO+Mr86/02Wv+peKTxpH4ZdwUFLjHAGUVSpK4zei71zCtJ40Iv5wPaHl8w7m1+2YFSF/Zj4x14bmKeHZ1lHrB2JGyiggKqt9vgKYih+ICmq7OZMMNbp2PVRpxL2Hb8gM9Osg5tQkMcSCYxsw5pUFcSKMc+8Vk7bL+sGgrKPwu3uQHp8yMIj3cyhcB5FHkwMRt6yI37Nyv49/0ew9UGrXpCUtErwXb0mHCJvP2djMNsegV5XvxMSXpFu1iv59YFkVqLj7MzCxv3wA5tSk99Qmm5zmDow33p4sX0JKl4bNIDqS2Eg6RWlioQ0OR4xfQyR+ZVQCX+EH5vrb7yVEUzg17huZqWpyIgwH6KRmBqTKYWfcSDaImKrRhX+AOrmEs7p3gI1vphh4CSXDY/tg+sfoYq64FzNNaSVSIJpq/C2iVayVXNG4jLAbDAWjHw5Ascftoc8UeNWW+YFmQZIXhD9ZtSQ+Lkn/4jEHv1arvsFPRbKI8cvxg5+MGt05TgM5Zyga+nyxKHxDUJyQIYEEljzUS0VMtnh5qYG1KyKY9VwpOTjc4ztUT2/7M1ywQxnTk+Sgiu/uIqREaUSsbhb1V2cEAf9vBPtCAwEAsf9RdQrbVSaAZ5EwxzKBWjX8o4HQgfsujVizXw961FDWVNLj1NaFRCLuUq+R9799/9OiRnZh4ghEnRzH2ZLFNehK5LJantYZcz63yqwUWP7CNyXPryFNx6CFE1+P/OzXCMb5lfQ3FgI/TAsLFLWV43dFif3C8D0ynpcbY05eXnRqPuwhWdb66swvo+eoeWHwnzj8o2acX8prjbWzprU+KAUsFa43M59sW/5wYzuU65WV2XqBz8590OmXFxAzi/AieZYeLLR6j3ES/hAh/ybq/JXSxqxr4t7IRefz+K6hqK/GJXHyTyjvw54IwpmaJAX9WUH1cOVrQ09ZnvJGMtX4WPRO//QTp2PykzHq88koR/1g/A4PdGC0FwT3ktqEqSGxy5ISG3baKYKb4A8ivw8z2jaf05WBWObmjNmvr+EU72RtruJlFy68gaOdcZ5KMVhMNAfrMN91KmnlJ3V4HwrX1lWXV5CrHsL4+dLsvLubPRLmx4HSWKOGJU1GbQf4XARg392jd14kBC5t55QhXF+HHTW3N9WcB6beQGu9qvouIR85HUimRnI2OMab9/oVVXZ1UrNS1CN8ANxZSM9zK26ZXtuiOOhQKJjYEctnd8XLz2muFQSU/Ef+334QqbH4mZ0rC7ma6+frF9TbHNlV2R+izfy7Mxd6kCniuYKIsuFvyXVwVJdBd/L6GDSwpiBwE/kuGnwNmRufcNC36EpY1ojc4XoO7FexHuLjzvCL/Kup2Wj0DBJ4AhPyVA/vh9ehRGPc9ENaE/5IzFs+hLkvXnls3u4QSIEQ3zVry+GUPyVLYbU+BaweWd/TTOlZqYSW68KxHchuuptKz73mw/wMjT6EfpEAhUa89TsVP82Vt1N0yOGy5k0KyUSyHaHqjwKOzi1nvMy1oTWLZaA9Oh9Qd1REvgFaQ1Nvhel2iuNYe/SHGjfkisWwoNs5rS+PcDQvOrde/DE7YjVMAL7GSv2PKkok+DW7a7PpDHkvEMRhoFCJBSc9lC1UgvFyV0aRh/4uWawBVv6BPYtfgEH0hWtMCGt63M1aqkEyqrGP3gxBSVUFfzo30tMbffwU3JCLrUZTdjCx1/w+ahl0i2J2CthkQVcAZcITBMQXp6Rq0fnP38sX/tb0CaYOKNpQGr1a8FMqqZOHHrFMUAkFgUavRFU8bY0gtlgVZUaOycR4b4Qz8A7n4xYKTjvVwx7lhgBmntLL/6e1rCjyzec+JU9vKShpzJeyhsVmRPmKCC2LBs4+Kqoz9Kr0WMrfyHblFgXgdxvuTwMbBpqtQAVFrdvZW8Pant8zr255mjAAQSJ58BIZxCXLKDeWhYB1xyUmO9D3wMETJh410jTDoRpvbUxdoYosKAuyPkX+dDIb2rq0sGfr81Simm/Bv+9eWYnSCHE7t3rpV2XVhWQhfs92KV4Ab2JYj/4a9OszQzeUmxReCbDmnxJd2mLMECSWlcjLw1n+lh/I3GS4NvcWtsHs2Wpz+nXS//qH89mINQqpLZ4xvUDeZL1tqaevcedTeCAlumpNqnwNwLHEynmeBRpvpMZku1Ns+U+6FOzpykwrok3O9AhynpHf8942kqFNTgj++IZO8f/grIoCtHBAqIyB4u5XM28zybOKftacG/bb7hyKL4aX9G+nJnrli1In86KAWo3ZiSVW4bICkUodgSGaW1HPWNM2UP3r1JNlUeCWJNmlorNLhfCH/6Y0g6CgHoQpLqXWDkOpti4U1a+dAEnfF2AmQAFiqIYOXgTnICkk0i6eWfQk4V8S+R5/zy6J8qC+1f5OXh6+nAkdYGqVnY5KAnFzng7jsdSyQ1ZNPbYFMyLtePh84e5Oni3tSKV5mDmSyvnfdBmuVjE3tlPk9Spwp8v1zWlvUqSLySev2n9TeDZK7uUOCsAPYzYfdP1Czm/y3qBqOHmzau3j3zjzQ4K/XLRdk6LF565bHaEmwMLktUsaVm4VWZJrQePDNTtWg4yWyvYRuR53zwfS2hlw3YRt2+PZwD0TLP4HM1FNDx7DOcJlh4F6GJIEoq0CeXuYbq9dKfxOG88QN3U/tEfudil4rayBHGSbL3fCWy27LAn3kHtoUaUN04hI1maap7TpGMWGulYSFd2zBG/oxxIjKm16G6YDG9CfrEnvvPL/TdKnVUcQXzE49xXqNu/R3yRR70nleA3dZokIhGKCKw8OrDHzzrfBf2g+jUa6CIgS422B3gJNeIb+oXpJVGYZGaWvvjIzS9bfN+QLISF9YhqQG3T2KBXYujWPp1jvyffReDA9phtNl0UVni41Resf28SBHFhbJ/A8ruuYWW0JtTo7t2OdGqZEPo9gUUrghAixeZEF0l+6FLMGbEKhOY9PKe2xekO7gykeZHLYt/4khNTsU91soc1a1R+5ooMouVHCCb3RMGEjBuqNHnikOHkxCyMYN0enNlk13fxGH3peizaFYuWwxfg4WT+QfNdEprItxh8bPzw4+MheMkcFwJ68gSrGFspF4l1+t7azBRG/N4OonAba7a713u3WEfAIvklSHBdMr8rONZGKUDMZaT4wF8udPkGzaLubHqCYFOzvW54lorHfFe2bKpK6tnx9W9U/wHjWMK4LTqC3m+xDB5HHeLEK062uxFpXBDnX/K9kvSjWj03rUEg/ncfA+MFAgfSKmwi4IFQFMtpdi+Y/tgZVMItTsx2EWLRmmQfjwdr8Mryi6ikJgiD4B4w5vgypsxY1FeMZJkK46Thid7N4ayglq+Zm10j6fvERPgAv8eIck1I7F2gF8MvHW+jTsXh66duREZpp4cv3u3k3Vq3lrOjECH5zeGRxQlQdeMXZWebVhf7S9c2pIrcM2KDJC0ToiztsOhSOof9TlTH2zYqICMLFqAkfvfIOvvENlIENWmvrG1NQ9pPCWfZlanWXSwCfmqrAFkkAnJ/OU2eSjW9BUnCoM9FprA0zIAlMF1GtNA/rwISROC/xcHMaYWmaSIEqeYC81VNREIhUdhf+xOYjMDOjM7Gllr56TLJ5x6kSNqs2IiRmgaczW+AzGLhb4rknIqzxFpju+XvQzzCzIcpNhEoIPzWzvop2BwLa9NYy69G5UMCP+YLfjA6UMrlTf5GCDB8p0iutD4GDNFxir4JqduKbaK6MjCvCHWR6yY+oaEtIBYH8nJQPTEPWtVgObcUZku1mcN1pjPyXGi9IIBz646HJmCcroXHL8/4m+i0a/AqXKRfEs8j/cDkFx0EUjwvWyYzd/XO1gGFsnmRj1xEFtpEcMh+laUIay+6bYxPqNtn9BuipRoDRMS2gRu0Yl1T6r7LVZZSolQla0WApgyWAi6WzS6hdqae/S2mX8UESIGzb/S7pM+hjhrnMcAbx44uVOFXcA6KPAz4u5EGdXQAVMcwdKNbdQFwVXmd9knTS1Cw4ULbmBVTS7r1Eeg0Sm0c2V62EHTTb1jMx1B9zjTnXoqBNXgfGe5PxGFOBI0MGsdsiPKHYZnn0mvbXyntkKqG+iZ/46pi3NNwYaBlpXzYEtSL07DQ/jlnPaoAT10eFzY08l+KAHChm8i52iasJtxnCY5WaunClb7VpVtCVslx6vHs0iPaU8ZrfhWBNtgb8Tn0qn+UPojC+bEpEQtkQhxiLUmpv80Ff1+IRcSkW91T9hyVxxA9/M/7Qvyqm+L7NyuDftSJhfj8QhfnzFzx26WPx51dIOey1AQJm7BpILh2HMd8TF/w++JiHhwaluUS6hwd1jHJJu/SWUEjfc1P+VY/fjbx+xIuRGBqjQZXI2kwyMpJnQW4zcvYSstznPt9/DsA6SQTE6zloFKFLLcVccmJtuBo61DnQ1HhlpQVB49gbD2Amek3y93c4Pu+IUJbCKOtcIu6Ve3qQCdKX8qfIQUQMrE6tWtAXGGY3tDtSK2VRQggJljsjzAOSU1PfhEDLwXTjhTnkBKFa6xDZeqmZXmLj9jhYgoiHKbsfFzMFr05cuonhQVdSiICyKR5o3P6LZKb5u/7Wz2uuV2MNohcwaZtwzuXXHXNDCslCZD2R6i/9+es2s6L0bOHrTKWSdbAz5dZRpgohsxn/8istmAqTlG/8iODjO36OCQnNI78EHlbM00u4HAytXdO4CKT4SKBU+B7C14s+41y4JYYswhsueAVEkbiNGGKZq0uuvIppi+KwZ1CMPCk+F00YfNKQMaNhDBL9SvmZuTCpzjiw/e/We0MK9OlBE1f5g5w3wgYLqS4+x6aWmAC9js9j4W6h/oS7YqCutfJrJENYySJorlOY4T6kqDFsixQeSkuHefOEfqiHXEBCx+rlpowPKwx4xLDTKkX1PlXyLGgZHECf8OPxsvQ2crxgdl+38zODlz4uXROmeZt6y3FjbhM5revzDfNyNTBxslZUbbsSSK7g1gB9quUZZSCZt4V418Mg7QwssPbd7gZswL2caInQ0w0Sjcq3BWCtPP8vpuMFnBHzUEf1sU8ALBfbWCti02gcDN1JtSZAntTSgGmMYmMnI2iZDIWyma4cjIU3EQlxQlsfrGwVNTmy5NMPKF6DjI2Rv7ihs06vh5F1cacaH3emC5SgmW9PWtiHePYzJfhnIyfmTwFghCjCw9hrwFf6FATP3zOm3DJ0jmXyl9BXC6MWhG/uWnfeLqmvDDlteNeNMTr3jaWFAQRfKLowWCXxIZzpHqdNkrcy9+4pdIFa78G3SVtYlEQRJ6XHCI+UHDUomJQETtCWMRUmQ7ZackfeZICZbs3zOAp0eA3pCv3vNQVxpG4aClo3KB6USnE/Zh8LWmfIcfhlEo9pBrnTaO1LflOP3z4iEBMxoaDaeQ+p2XmSqqO/OzHnZAf45cy/7RhnaFzDYIkcnyzsExb+bHRXv8OKuhjQW9nj5LhqxFIGl0fxFhymWQx0klZuJnpJeUahBeQL6EfIOglinaz/SSK0NTK5bXSxcafr9YzRp83n1ybgeW9h/d87REuCOmVb8VOEPXGovRGv5c30Wo3m4eZ8eHKRxVQvtueZ0rYhccHyH3NKLIi4D4VcUuNTwADh82zo6RgzwEzgyA30TZ7dcs2iuMbOBBuRDRQBxykVKMbIC7nWOmGuBgNVjjFHbCxv8le14iFHzQ1yUmWfqJlR+PWJI0BeNkQsn8DeYnjA/7aABy4ddIIAS4p+GU2Z7yxCZ2kY2a08hVGoO8d4klcO/56XXQ7Z6yJODzo04nzR9K8P3AhadRGYS2pHwT6a5hBU44cNiZj4MEOWThW6hUnpt3hrxWyiZgg8Xc5UXGuKgJFxB52zIVclTj5iFJ08kO8948jLb42yY0MxX5q9Mu1r1WxqDCd+N81BCRdbXm32Nt35ckOoAUpfb7rd8n3JafVjo72KfXJajQTnEFqy4/A+/ENIgumYMyz2TAosQHShlah86WpF1TQ+TeJyigGa5IAqxoqIU1lMCrE8k77cJWsfwSbToka6xAwp4D7M1n7CpF84YvZ1Yjf8B8CFwExnOSsRcXqfty6CvU0CDR70qRF169yCbDugaUhCLXmdxwfOO/A1UyOhgOpNm3MJ3glgNmP5D0LAOQrHLmwxtHoozXecAPlHxyXugatkVwwWi5O7lZocJMiM7EEW1tkpfsbfFVHhXfKSHlDB7G70QMv9+400xm5aA5Xj6WZNl3VUAgv30H+6GFqP4I1hoNEC78XSMSJ7fUAqkbkRFY1LC0DUL2JQ1bBARE+XA66q9gORBowaspB6kbwra7GpTeX29dc5Mx4fQ9F+9jI1qjdR+SPDSX9Y7kkwoPgki7IeqMawEDFQgQrHz82u8Hyr4pyFO7vFcGGGmt2ZWdqt1EPuBbwdi/hKoE14nfwhBRgcH1usvVvetQpDydgra/csqgmfJQjieIlkaD3grzTmdRQUnpZF6aa8+MK1XRokS6C4VZe68cXHXaoju+mpi7pSA8EJLdFGmto4w9zFO3AKZP05nYfQAW1FySPXz9wFSThIfqmTeKvNI4N/TnEXfpcwvxmms/4DWaf1qpbZFbOTRk+fv4tfq9VL1WQEz4LYflnHs8vR9CWl6iiVoKPfpBO1td1QdCaj/Kq1lkMiQYDAbHUQMu7WtvqdHXZn4RM6BdZ7pNpDQtRlEeYIUdJnkreZjVXaVh9+X1+HS1mp8QlQeDaGJcdBClUV3qv6II2PJ4m8uXXMDGEsVK8c9Ruz7RrHLp9nEvVxuEh2IKCJXKO8OgjqVEIaZXXlu8xjMbKwFd9L3FdVx8LC4oZYB2fK8Zaj6PsycP9veFPc0hlu7HSvPfrbYpNfJe9QLNDk/ubCROPTi6GERD6EAZmN4AqtyU0OwW5qSCUeNdvHNG+GJpXx8gdK9mqkMNkSnnE2k25hbGZZn8MbV63/SR9ozeIv54vgw80znrxlNIa2hs6ZczdWYogp+p3DZMSETNto/FILrwnJbtzMVpSbzXly5pYk0QADZhi6786+37nTZ3dkRbPFWdZzk2O06P3qVJL8NJdfGDD7YDDmJnDUUjrSN3EnRexXHS9Jti6qso969L2ilziw6r0vpVfejngNuQQel/taO+RXwUReeV3qrLt5KvSuL9rXdQfdoWhuOvRP77tmp1U4qG7/maJSJjb29+4OR+pvYgF4tNIO0HtK2YpqBpjWGwewUAhBzjgbk5SvTQDFSMTnmEODfB5iMocGUe21h6iSFonyXhRW472EVwd5Obhvu+lepZ2Iyt4GwhXtP77W5tfPYs+lvnQhyVrDmJWvqvUbKiVwqCc5/mADALyuFmkbFoIpXa4tJVF132rIOh9ijv8MvAvGqn9HoTjES817BStlBbm4ONH6SH9s0ZWebhtlJnTcHS09LpF5KpI54hlAzDnff9gUI4kxyWzMkygMf8awsV/n+smImRoH4WfiK8ED3l/mS+SjXyEo1AzxHUqv5LkDp8fkugLDRD6fV9WqL6p+tm6UGD7C51UgypP58Fo/y4l4fWzSjG4TPFygnbbrqiazHoHPcHGab+AH79E5WIym8UAJUsPKTa/X1ga8c1fgGd3MjxOAeed7KK83YFBM0bckZ4c7C1TxdxTU1ispKZzhCXUoI5w15n70NcHTlhnpxvY/6J8fyHNFmFIjGHq1xQFR8RQNs+0h47H1WbQNNSChFbC2xd1pEkTr8Ow5d5UWKE3B9akhVS2llMM/tdfNmC1oEeR7YNGJfTlm9oJk7/yo15GzurK6YLd49szuERHV9SgowNedOfdH4cIQScJRVnh/W0f9VuC9+6AXwIFrONjgVso8ykdhaoOhPaqnZAKDqY7DORdr5dGXT2oV1nm7ACW5bVIqYDkUyDK1mPZ3gD4PpQpr9fb6HjabJ/XlgfrU+kMiUP2sqVnvcP5jJeW0Si/5HuDOZJjRd4NpCF6meg3JzvFbbWctgaKfM3pxhU3rkf0TDYiQQwAPI+bpx9bG1piSCdChxGsbMyNo1mCnk0Gp1inQ7MJuX19e+F6B4S6dk0f1EtgQUYffzvzp0XiiHyggFU8KDRjVO6CXZ8PdARdM3ni0dR2e+JMj2mxZcNXaIqsCySSxIiNuIbaY5qVQpDq8DRH/GDoBpYqFyXPKYJ0z9HjHt1tImGKhOryQKc+KxAitExjRqxJp6qRW9YPOBdujysYGm6LdpruRJ8vCKhgW4RNNwxO+yHugA8p9kemtOFhASPHELhqy1Jr/oKEIPlRjrQ6iwgGh+iQtRN4k6TwrWHsphA03XSG/BewwGnPPFTS3x52NDyWIrmGWmGuqpE3YAqPh4yTbN0IpxKFHIywmiW7sFufM8WvUCWxM5p5weQhC7FMYlV98DyJpPjpMOxtd0A1wPgM+l1R6o/yq8jjm7vI5XNuznal1maRWw142K/H7gIO7d7z4N5LD/DHvAt9VvpKj7dliyOGcYYep7zseDaLgIFQRV5WBLsna5iUfmPKCbXMRUq42Y9TeTS+XinxFpQXWIHvCVxafnyQoRk2lPtE6WHdGUIwCVek4oDR9IMz/+QqOCC0uaKgYJZ9aImj4g+adyr4hlxHxGEMge2go+XTYUoPbRs30DCc4KCY4FwNmwKeQexU8NGUpJYIwNGr77QTcbaNjfZz5AJMZ/Bv6i25BM5yXrrVGu+N9TB2al5MRuDGab98EfEYM83wNISozE+JTNfg262+3nwk58VNEtERdEQFRQUZLUsifbp/LAU1TOccHTIsloLErIGhTtXG1hWhTzRqIbw8JmF3J47wTvzrMJxohfb7Rf4KOjXM2QPSujqaEtzbXpBtaddZe13k+ovJyKczbwu4vfjhPxLNh21ttGJqIBinl4NmDKCIVgI63b3SEciNt1fAYkTtVzGEzb5fcoz5PhlpC3Y4hOwawvydeNQVLq+qJuQCD+GRQkbSMIotWkBfiE3Zmilrjl1Bus7YHUQpkGTwTKMLnvORVVQGK2xM4u5Dw30vn5dfQAa2qeIuM6nVJZd4fkbRKrsayDBL2zHldwPiAz2AuyjZYbm8jNtBwx6QWzVEyKRT4NaDfr8O9uI+iV8lGmy+8F0K2UgtMlOV6Y3ZqHydJgjCY7yKNxtcrQjSDiJsWMQpB9dpJTifQoWswdfbPgbtdpuJpgObKPb3uA/8va53Y1lR5BYISavIrfX6ERHnzXqpMoVhvr8ALBjvucXK/cNB4dhqeC5aaIiHHtoszkkfI9iYiMUfqgcOHVomCiWCiOa+dpFI0uMOLW6gZBSIXIpX3O0XU7xWtNbtr0q8UWorP0tTIVnK+ODsCzZVJ3xxb4+sD1Ph0CGdoF8g84Dxg7LRuEXEN4aKeXE9x4eGgN/BBAmXuRe6wJkutccdPbBCMMiitPpKMLQ8Pkq1d2phJanMeOFrmA8BYgjOuxE5mDLSjbYWCYZ1Cy8HkabI0gt/Fz063eEGBk/e0XTziqnC6iprAQztPQ69eg2xe95JbNGbdoG+bPgWFP8XggtP+HHsFzVo5AUIMAJRuXeUuwa8x/KRsI94J5iRdmJpzZ9BJVW1sMSELCGx8U/c1oA7mDLCfpN+Fgg//7wRls0G2nTO2DJupgJSbcVnp0FyUk1QNS1aB6M2LsuRX1Epu9K8oIR0ix58mpdeSh0K8rtOMaD1hyHd7MDYv8aLaGUX/eFO9xfxMfvXzKoMTsmEuGM92K1U31yu8LRcH1FZE2e0kMSX+WLDhLdkeFBW1j7/SDugfkJH/RUrFZapDxsyosUvZdOsojsf7EZ51kWqWvZpHwsq04a3h14wbgYi9xMifpdV42hc+iPJOPzSGQmcvgLfYTRssiGooo+aOs4RGTbLrShKpf/J4EJdUZ80xtlpqWuMRmvHxBIpQYk0nptDuOXSgtNAkHpx2nblW+di9OtfGraWBcW54oelsiiD8jrGleClH7WDfgVO6527ecejVFTEvyn1mfewnKGlfwxGLB/UDy2ottAB3NknphZwwWhLbCZl917hZwe2NZgFUfgoUL+sBABJ/jXfD+avyFApZZezt6WXmnpuL2pQ1L4b8kpgPu2u803iVnyp3yII3CnoT/vNE8As3PjZfSVNKghT7IMaos1ZXiz3KOlwcUNrleBPWK1zEqnGNpkep3wiJrtqC4li/FzaDd6YornNQyL8w4+trxhJjZEB6cVOpzCEit2DArlD4kG8v7PKYYTK0DuLZeztxMvaDGK5r8xcoUV/kZghlTB4h56SWhAIhQ7KCQTQ+4tLzw7Iucf1IP4xpDVk/Sa6UR90G0ga1cC8H3H+ddrFJ/GLEAGrZIH3NsfyezUtUjhpOsCHSWIflrYt7tXSiYWLgyn8oPV6EKsaz7XLQ8n4PHZG2/jD54lpd5tmO1xrVN6/a2GVFPZKBawgn6HM4U3dV6tKfXIpofdnRGN9RtebCxlEz+sM81IRtEMCr11R4n6gC31/wl/yChLppqxVKo3LhCaxVEapF8MFxGij5W8K8o40/OO3w77OMV1rvNp4i32WaCQuIr/2u99NBvRvJh2o+O3PhWpfavbgTawjZhuWlKnqdF84Es8y6MlkWJNCatghZOks/pQaL6I4ZvYYx9d7rt6O9rLjROp91oq2ZeqN8GWwMO4L86v08ShjwrTm1gsICYsAMPSVpFN+ma7wSefYHshWXog1rEaecJxq5x6fGiAgA4Y+8FISN3Ac2LP/TRdoi89GN7a7X/h3f1f+cRfJjA+3SdtAYDzXNqtAfhAlWeu8vZV35Vi0Yw3u4h7TAtwDAcTcwP3KY93n8LQD4R5W2cCb8DvSfYVKIBY+kq8iEV1gH0fHmL/zt3TjTSrlYNO/m1OHXAV8B+5UidoVUwDZQV8sKN6A2wKOOcgxCV6qHRx579dr+JU3qbx0whph79YlmSBYChsEMbVteCHhsyAHZrA19g6JtaMf7h4vshMXvrsdxdf9NGiLJ8aNfMkRZ84IZ/OoZ4V+wMUUokorhBoJnxpn/2Wt6MJfEV5zcxHdyXNEuxNd1NxjFxU1WBxB41+QTAMqO86p6W4+/opCzXGxcZeug/Boi/0YlvTs6XfHl5bvpHHKJTdtvl6U1tXZsRAFvHvIauCnX90Q+TbityzwwaqH6cHouBdOF/H+hWoeb34M+d4gZEu5tgojS6Ryapaqr4B+YUYI4igMKphu/hxtsQ8FCPTqj75IycfKFNha5CfqcZvdlHoVggqsxiUZ9R3ZLhwUZmuF31vM8q0jwPcmmnJY/GUA38HWD48sA2nmB81Hbw4VWF+40wrVtFyFlN8AKy0YnBE/7lud63jzLD+JlAGklWYoIgpTXnJOJA635H63mfDVhA3f9b6wsISUcCnbz10NC064KKJSrYyDaF/D5S198OIVMDvRWwpeOcaBWkE4HMxZJQz8sUn8Ax+H3ijFawMOCI4iZXLjUCn2UM9xmp8a/gR+myZFmpaN4n1XlCxIEAqT5CMffTo+ZC0URZTgqdjDMGEAKLFU1IFVx4RfdhEJD7AnB7IvOmp8FfhB7PmyLQI90QCcnimQv8flD0Met+GOBH/pfJEdXRD/12py4XkihQi2bEURPONbPb9aCVzYg7bbm05e3zDK9wRd8u+aGBjTDZ+Bwiz3TavFD8vjooUPbxfevxbBifrAZaCqODg+5XLQYab3SyX6DK3WLN0eOtdoiuYcpo/yBeOReAAD2Bo6niK6DM73SS07hAZPQOZZ4dL0L/jGJ1tQ4PdvPHayJwMorxBRBitDYZO8tVGI+5K4XyfqwD/is0y5l3mu5UJhu6UJCvpxbrS2paM9vQJtngMrW+R1h96ImSQ0RSUypcfDXXwdcTn0FsPZgwXzJSKBQBK/7vIjfpl+gHJh3Kukxo7tUQqfpYw+jz7yBBBm+HBgbWeru9SeTlhYZDqqFXMcKR52R5oE8YU4xdImj0LkWjnnKTaPyXLAX29KPHvYMtXdP2oYwiG1T/3XMV/kN8BGaRBd8HVVzQgNOmzVRBvAh2VZkDkrn7BAkE2nGT5lhX9o2cSnmS9vO3+gt7d1b+0UCPEVMKAEs+6HTLrvShXalD6ih1o/AFezFEpjf7jxcCnikWxZcZTTaaLepzmoQvb9lC8MPCp7H8oh6eUtR0y/kTKjfIp5h/F6l63dFq6zsfTbLPi322pjkfuHLtSi3jYfCSWh5ZiAdhaJyJspUCpIQixE04DUomwLTtVgeUeLW763Jw6gIEo/AdaOP9q/BHFJaAHurCy7C2EujSK1+tvA76Dz02Jz6VF8rcHpwRx8Fw5ETJBjl9O4DrEMVMRPshZ3r5QwVCBoHRz1o6fAamv+SimevL9BALb4BSDNDQvn29PyGdtf6AT6WKkSQOnodm98kAbd76DfhV4J7777X84mvzA1ZO6XUj3MXcIPSl/gAP3VSuqrdc95KTXIcZ/p532IK/GOV+C/fiUX+p2cvO+9301TlOHyxqSwlWYa4e0rDHE75Jfvmn8L69pYjGwXkAAftOa83Pj8Wlc5UyD+sS9qZooNgphT5gkwASDtyxTo87gqKL0SeZWnyQq9X3fqeS8vQ3b3NkFcDI3p5ndFXKJTzNZp1PBQU7urnl+GC/pZbUi8hHFL4XdDVXnFeOrWXemdKPkCuxd99cNb8ToTfGCnPNwcW6tc7vxIRSYHN/I0q2J8xKO6kyT/26mp+V0dfIJhSnbxfQYePezNiQH89iWXx+fRiESeCAvTUhkN3XjZxqY/cSA9xj6Atwmrt511xu7ddgeZlO1UEHcJXxtOrWEqc3CO8kXGpRxvQAiW6Xu5Gy9rJFS5ixwO77tbWLiyuw6i+y7sXg6yhvgLiqDy5z+myWwguf3gciVRRtm50Ia6IJR3mRhO8N+POTQZw5ApSKepzU0bC0dH8DubfoM+pAjxDs24ruNO+nfpuegvgGiBdB6bg/pAUTCOrNldUKxHDm18344DRC1WwLrvDTL0sLHnHELhRgrvw1ZOH28j8GHLve6P46fBSrtaDnb92HMTgMhEK2g6nFLa+WULkF8iDavMPndcByn9LlVtqIr8W+doOlblo3ZfNI0bhJtdEk3YLQlrb+whLau6GMS3WeUTipivCEpvk/7TaVJwkfOiQ3DZMpRHko+JbLsKwJWCLFwbStBo8uZPicR17R1mDnrXfk/OHi0TA9TJTRrE02phlSqO/EvFHQM4sLFl3/MvGiWm3+DK9gnghtzBaPGbzFMMinNo798hvfiq90bD1U/uTEOjFV1JK9d91cldyMZGDOFcoZxrmQqXM18+iSUrgpSdmW3lBf5Ci4TP0To8AbOt6QIXAxXtiWGmhFzRC0GKnJY8Vs3w/aF9CQkUr/ZGSGWDbMeXmTtFVF+2o1WDlo0VHsdyjOfcHq1g8J8KcE2KEwD17h4eBI5UqYQYGS9ga4RlOuBZlju1Z1avVqzD9YdikadwYodpdzQezXeXYjHnBrXsz5swbBUzTGKJgqvPu1NW0PQVGK3tWSQicQXse+DImXB85TcdyeWw16xO6i3Zt43KGGXsesvgfbbUawz0QRVUppWl7IYpY4iP6Bi0fUFmM+scjqSvhIIqfGcKUxDeNNLmVH3/vMVHSzq+Epoef33C9nRshYTLxO90gzrbhSK7t+8cOjoZkixLlphN4cWbMVBfCk0h1sUNzQAWRu1RLUbi4XcvxX3RLPLry7+ufHU1Capu0IzLql/pdvCyo8/PIQl0mAKO61mnP1okZjG08NIFdw5/Vdm/+sgjBzGBD+wYL/VXnq4/2E63ohSLzpvv1Ugk7VjapNzNWc+lHbic3aHoa2uYQdOiqEyUWSQA8TSe+knn0gWrj/66fv+N+LM1/0OBlxgmgtBZHuLr8XYSCnXPVFrh5pDArcHs8Q6pH/8BlEgOsSFzV4XyDpx/6GARrjqeu/tqECjjattvvJHIV4R7exIHo+YhgsnbV3+OJS0/3IIYASdAinhHNYCXeuM9CnxzyvYrOw+lEKtxAHJ9ZIkmpowRWtA49WctPS5ISlMzx6eva4E5wC1B3F7y5Yu3JEJWIm9NFVDCXzT4CCaa/XdFnigQyk2fGRzvrq1f1VXgBu64b7LU03cj9cm2ED+mmnkabfc4CJpTpMdDny+tgm8YYFt2xVuJ2g4QNf8C4cQW4KMYorH4Qj4IZLPjZdHCwDe6/iKqRLqDSNPut4POPLxOX+QA6/8JtnEAnOB5ILtEr8M0gL17QhYPPfKIvXC7uALKkAFY0aM2SD3T1BBXbLH74tXDUy+Ay3Z+qWK4Dq1i08TYvaXF/EgKd2W5HRxM813dyc+Mh66a23vFFFH1YOgEnU6PRx6Av/a169FB1QsEetRABzoR5rm4ahNumYVBq/t64ssoPRGdElJNKDciPDF6Sfr8aipvamCM29mkzxa26MMPFe3I5rKVgnGOWWzejfh3924n6/Urd7UFIpIUnga2Nn9cwlNTmYohrySu6/pRJE4DFJAEOxa+aGXA9R9Lr9KJRPAvMe40XcPwAUAGHAwu7kjEfstyXPb2I6a/YWxNVY1rgtSHJ5ugZR+QWQD6f8D4JFYdiBi+ScIe+OY3WfZgUpqq7mYsRQECYql774kkHemrcCx34jFg7HTg1XX5vAXaoJr9ebHbTOC7yhZzRffCl7e0YgZCBLjb+mdrosiamO5L5cGXV4HO8eWMh07f26DwEWuzZEUtd0Q7ab4v0VLP2SJv5t2jHYE0R+qbuokJOxOIA5o4wA007eei5MG07xkmiD3g3nq/I9KYVNvCYkH4OYfaQpYX0jtHxgurMDQatOrL5VvNbnxN+jUAjsliK6UP0y/RhPJLwXKKrxPtyfCTycQxI/8m4LDwazDdGqiTzoD+w9RVbEmKbdFfwmWIe+A6w92dr3+Q1YM3qNVZvSoi4d4jex/t8WessaZoTByckr9dYGLnS0zpesjtD01SdXLXZJ8n46WYJwY6yldUobMDbXZETMOF2OXKvBCj1F0IRZiWihCMzTEKD2JdlG0VGWw5B1gzt9BBes2OrrsMvWOoITRwXbHjlk57dvXTzEUZBWBurAVllRIh0h5Tdm4f+jGyS81RDycVBDN7xLKOb6dU+WHZNNaY763uOjO5osWOmeZl6Onu9csOEidyo8PzwQ5UV2FXx9b3V08iz2Q6Rku4DT8Hco5Yfole9vyWAd67Z6PB7uDrnlJZYThTQl3JS1xIKKBUQX29HYxZ+flgoJORLk3kI3sjnvt79QuxvD31YVsfx/vbfeoQwl+F2FlXjoJRGWB2Vf7X92Cqd5XxHxsIlc86VnkB+YN6TRQKjxb39/+8O63Duv5lgggQQXEwEFxqboFKO038vgDgWAnCTym/wh/af/IIKOScz+/7cVZgELCVYdHXF50EmLxIR16fGyq/WI4OfshqICnpET1A/x0a3/YSdpGu5Frsl8P8jquh0TPuudwWz/iXDTJPmHSDoqZDsfkaJ81TZ4fF0FUn/HMy+Re73F2bhhFG+ImD7Y+48p4rSY216lnba0eaZsIFe3q5r71p0F/GPvx9rOb6qruKUn/OVSN+SFiBUXaXlJqUYR4IqX5aagrsd91Y3jHkBHOjsbj5a/GrkJOyCyQg8g2Nqn7JgAjUPbdX9PrJYQWTO5KBYtG4/c+asuN+xwF5Iz/GMIHMSYZQ61GDWElwMCmeDAU97v2Ejn3UwxqvfKItRasQTaTpFravLCTCS4rxPyd5TwiEsdkWhtP38NVOu/KkZBlUAzFj2X5J7un+3IsMxsfIaVNkKM/IlHf748ZA2L6FB41xqrduW+fzQ27uu5Pf5RdT5Bfgme9KGwEzDpgAyEpS+mdRNZz4Dvky54AEOCrfKJdsZ8qwDUNpYW9c/U3s7LnZKJoEJP62kO+6X7IBo2pape6a8ekogJW/a/bjyhgPO8H9o2wtvmVYVx3WqLYvSA/KinAYYSQk7XgiT9AnXU/8cSuPrzNUM+paSNfP4Nt8C7JIYiFjWl/aJ+q/avSsZqa4Tyb+otym8VmmUM/sH9w0pq/T+8vYVbIrh5h22Wvxxfrh7nG1Reo9lARCTFnQtEPLRGQk++v1jcSWDfuHbqhF9tmrxtbad+gRDoNDaqJUTRXEri8rZyuDk0u3beI9+G3z+yteGwKhiZiF2zSYuIRc46Ji2l9HWIPFp+ujPw6CLrKzXKZWgbMxXRUbLI4usui4Gb09lbLcOXvSmbL+HLvgfsCq8kSaycebRH4lBO7RiX7ePXKZL2wbl7OJPqDFUkTw2hcpAFe1wy2cNReR959M7+vJin/jMGkPlXTnmhNRqsPlMF/FgsUSGy/mN8X98e7wo+RnrXVCgtIYuM8SVXIO2ZSXSEFC6QDzx77ZylT83ZQoszgjZsmtGF+4YQC+h4mitogn8GwnJAXt8qsGgVAlK07zVrAp05beFYr7zzVi6RwUrLAnOZV48e/YK9X+DZQfATFAB5YS2VG53gRqGlmAHPNtkuTOfNa5/CUV9YX0L4PvZmKAPpNgWwkZuVbqVajTX7BKMCmS3a7KFzfu4/50rIYFVhg1sJWf0XMdg5yP0f06TO6AgWHaCn8tF48obMlLIjwwa59MBuYBc+sIJiWBBqZnlxk4WaJRcKjxKO+4eP06sVfb82WPhY69xxPy0NncgD7D3zOS/0a0RlnwIc9vSAa9owvaexNct08XLXAl936DOYNcGrQ6mWkWvhgEvdWXMgqehZagRU9IGf/oSrASBACJtglVK1DLfZPhNAIdsXkYbrsbuFlUxfRx03/pjw8A0NNzivUkxhrHbBI/7UtpZf9v93Dy8gqCgdWKKnPRwj9EpJOqgI4pm6Kr980KgLDn8ckg8+yQCqivujfg6LRe9bSaDWcdaOVJ7l5FNnGhesrD290As5dV+664qr2FpjOPukCaT8P/73K4pdxNHAHMnMgoYgfVtr8VevbUmHS/fMevPj/Vz6V274DXPoLs+wMn/j7n1HIuLc5DKzrArUkMWOIo3wlopMGXj0NTiTEl82v6D5hVVlC4t0cLoul0MrcKfDCa+k2NHjPMNNMFM4dLqHlog4cgEtFleBb2gbNl7XGwVLfHyvx7AgfG7SB+pdV4TkANbomqAOxr56CLjblAUse+FWrgY/ObU9HF2Xk5HFcxUvhlmi/elwV72r+uSLQSt8ngRd72VrydrhyJsfVilWHmB+EoD4jh2X729L1x68EjFyWB+fhVlRpsyjSx8h2j516BNLLzo5DB6JUTxeVemXWmm2Q2L9CfJQQijvZGKQ8KgQ86KrOhVj455H8DOtt2DVi0/lXcT/YfWog+SXyf84VdlWkvkfiajbrWA8l+lT0tpFDmBHUjp7+O5SDLQsTXosej+o+EzWUkQ6P0F9r5QkvqGb7QVm+UOVPiawlR16GpVcsbJuzomKOpGM2BFYO9H1nIa2lMYdRMQoSDPyMmjhLPuPuZqVpR7Bo8UHVNFh40EagcbwiV3RugVEUQcboEMvM1a9sM4LhG2rlMVP3RPWxsudWVPApDj4PecYX7dZKXYzDJ/IBdivcZpX6cf9hgV0vg5VXkOG7nCQP03kIRslskW+78jDGUXPotcbwK3mcLAeMYb6Y2WPYChjvpxTF9/miyhePI0U8jJUXvFagNMbvXCpjm9MMykxixrr6do1Zf1bOp53HLWIbMk5z1U9hMGBhGBHr19XiiAKvi0Ci3LD8jYVuLr4+Q9mNkb2C87PfN+KtHeX8dqUVLuz+bg2U9gJqP2oAuFiNt4y9IuiCOUK+oAKunXF1/Q/afhx5R3cqySN5hnCvI33l8qwCXkh4znqzR7KxB3tfCbLB4X0YBHKawXsmJ+mw2x/I0ZefE9rAFSSS2XuqBPUcUR90UILznIWIm0mtkNP+bHqD8VUGa9pNPnFF0coT3QeRHqgtwH20BUuMVyNczl6O+tOtEd1ooRJl3f107vHaQ196aGN5f2d/YZ8Ubcotu0XyFsStEI/Ms94VDJfO7wYIlYEDOB0n+GzKr3R5Z+N5uCsBCyByXLZdpraMYwSNYuOGcul1kexG97B2YCWe2fVqwyu0Cd9Hxe+VIP6p7lrINdq/Fef1fJQRYeBrVWYmfYO8dPpmjcYVYjvkHvfA9gS7YPcv+3qmQKys0TWnPHleV7Tlz4oZoizz3ufl37C7qe+LTfDlBRAr5MtpTYmiTM8rW1Bo/KPBiwuRjup586IW3APO1AtP3izJ66TfYf8HROJPb+cSYK+53GG7K/VeUrzhdCZD4Eu32gR8dlPmAmCKwisv5mWESz8xcKYONKjf1QMyJ1FY6Wa7wxoeSqmDwapVUhODLts7paBbTkhx3NJLv16PghzEa+AuQomUmudOI1h/g51DBN2SD0FmuleeJhathyCZ7evBgHKmRwfNlx0kKOISaHhqdvEm16NUD+iy+wThj84V5ANefYoYYuV5wJtKMn6EEm8mdOp1wmUETdnS+D1mdqQ1BSJ+O1qPPxCf2gx6QUfChZvbQwHZ1BZWXA4ipLpp2psOdCKc8rmDZ0W0imcxTyE3ljET7XsUfBiPuHuFX/a0YdcdCwLgY6IUXCGD9EwPNwBnQA4ll8yut+m8+TTNMMbv2AdQy64LZHXU06x6VeFoXuKpzyybYaDQz2sxxhTzCuqQYVh7jJuKvnOFn7BTwwhlQY+po9/GF1pIXkMVwv6rUewCCrso/KwY9hn1CM4TGF7n+dndyRIMNu25BjXubEGASfgBxfMfXNphn7jkQbJwitn6jX3EjIVT/Lxyg+UNEgiCLnq1i3Rpn1l8kOQ1+Pu2KcbTNoIarFN9dyK/G6x/rEJUcADZKzJUTpwdLzqEWYO4+E4hNsqKd0iY0iq73jYnt6mHA7JN1+ZM8LsMcl/sKskrnt3gyqFfz5sUHkmwSH7p2mffLVB5arfUL9djrGbjpKe/jMXzRkDbNzdfQqLEangObghumtXXGqA6RsCAu/RSeVle6DLpFCHoJZURd7Kcw+cgTb6OK6XgFsCXl3cGK/HOi5KixMqYWn8eS4HUBLqZlBCs/hqZ6D7kxUUDbkXDXUINowgFwp+RwMwiQVFd9R8pIgCJ83R8PZOYLG7S972GeBTroyI3qoT7x5Ud910cy8C/jW8CmBzsov3V9UXEqWeQ+w7KxAOwhZXhhQOylkWtp74Z/NOlL7Sak7wFwvrZfaUuFrOaTF78ub8FIV9IdeBjP1ayrGTTNTH4sW3ImHJ3Wle8B9BXIOtkXtMqKqtCnsZ4c/ogcPt50PsyyenACjGMGe8hepl1j0YV1aup/SBoCmVjHisYzdsIpwmYqxNxFmF9bq3nNXXBKcke781CNdrXglFEFBr+yWqbYzoNrSH0yD53SkRBLirPvhHiXMn1B0nUPlmtIpTSokoLC3/OuAMt6D/kc6kXx6u/koo2QWjfxq6cdNIk81BcQFtfrTqvXPr2Y9WdmfDzvrfT7TmThQoR4SPd0Dusjay+K5LwifJEvjeRcI5T+77cSNxoJRU0e9/PixO/9/Ewcczyj0tRly0BllzIFjJ7S3EUHIJFgAYiJoK+jze/P01uYPCZPRVpHbIMROYw6MAE5PIXHgPHfB84mDn6Kr3sCIO/R/moaXtZ4mkNDUTQ1qTXDKXlitd0S4ezHaOFfLMvdb8U9+Jxe1ake7G8p8VeYxRsUVqVeLpxMWymb56eP06nYHePVcXx+TdNRfcJW38ICDwtMfU9rXoxXvZzSewIVn5Vg5jmAi/7qOqmPQfNxRrmliTO90YZp2oXtjgfK4N4uaFKWOAH5l3W+zntwkN9vawNF71b5tMYo6pefjnTQXO7ZiJJUrHdljJO/ukNSvo9WR0ZnXuPRFJ5lBerYkNssgenyhOHxAPfndRyaqJYG6S6yv6rDq0jATKGTTsnjLd8d9BB9JPbVtfcp6UVOYJdX/pdzei8QzMNftrscIt+N97d7auorvtf1wyrp00f32gysDbkYp7T4HgpBLXWVWlzQuDomru3yq3e/q5jvRUOqv5Di5AmkP8vadYrVomNf0hdCeGivgSznZln1iznRGfPG825/kikHR1rlhML2/1JLzfwsOhdjnemxPNonfj4Rx/pyM+Rf/B6KOI9eqVY4TM+iFScR/uZb+yWJpqdK/AxxicWOdUZzIemvRW+U3TJfoylDvxAMd3Ws9rt78CL0w3j5R5QntrxG+BUHectKn+DqerNUf+FR7NMJm6HV7X41+WSYBJYi+6/emNfQUkbGby0d33MHjjFgBc8eMgJRYn0etMqQRX5APRrGDhaj0Y6wue3TLX88TuQDIy+/j+q/Lh3UfCOMAI/I73OOmlVpqNxJtGfsecp3nZIugcngNsRDN6p/RYxYpq2XwRAQT67FqvM15vPpvsO6MMcotNsZzFjsAo7rhaF6h4vJvtdPaOhRHD37sHaqlGvDhFqb/LO3Y5NVGl37qZgcNqCrzre5bVvqr1Y4fDV9DglvWEKOpe0MIL3gEXzKUl+JsX5UomvR30kdL5jN1IZ8euGQO8+A2CFAu4RwqWdRfPu4nmcbpn1FJHsPwprWvJd5SBM75b2PtUiQfjjGuVCHy1ZhTnA2e+0POnqHDMSVaxsLfQMd69ejTitlBLwwsctRIKMudZz46wi4jBoVb7PnxdDtF0AN8MJ/TChwBInOIlDRL9xsFOqskPSm9lq1Jp6U/iZpRqrZ4HzvlxZEpt58WD7HBCcShr7ahwkzRvUOpwJFdEnRvFzt+Xob+PtvK5XLL4xaLOGFskzXrqjNYY7lKuMIuG3DgrLR33PT7MDrYUMx1y5e5Jo7yqRmMg9SsqRwy6YZBa4BgZvXeDM7bxVhZKV0D5ItcVI9v9TSaJ/H0q3j2i6gPXK1KuzKBmA+5L8A5Of3amVoO/nCq9pugyaNRcf6eAgH+NknK/fhkzHJC12X84xZTQ6AvE7AL6aJE4W7az+lF6Cs89O/Mid2QhN7YH+i9ItIWwU/rUGXXuB/0EdAs7sON6WSvjBX8U3KJO2FASIau/2xIHN7NC+rFtq7FEsX1VypTsCRD9xZvFpUSECdrSCd5iQVazVkB+2XsBb+My9tmkb1klI2bfuw9YMtG8/7EOCzDtOm+IO1vrWKm+aGpHrflVS1qCEMvdVOxIRhNZvJsUU20zZ5oQv/Vfh+PbbbGTJjif/QG5d0YqyppzYfmNA5SWgaSTW7kJhiTrb6Fk1vv8M5zRa5tGK/gMTJBgWqsnGVTYf2dBmnhO5XFkkzPEII1I7+fqKR8Zwf+x079fJNEF80s/QuIfw239LtV2Ozj1H/yp2KCMqXwvFY6qXzF8ZWT3RmtX1lrrdhJQsWhVMj87NNxezTmQe5TvwUIgz9PgzkH/pf/cSXqsKB4x+afEG0jiTEDh9pMKQ8yrlyON8q2+v4b9a3Z4LKxvy1oiVjtUy1VsQDuWco3Qv8sXSkGmtsTekHfBDUlsQkgQylf70Y4GKy5saW1FoWTAZXj+8fOH9QACZh/Mjg4SgS8kEnFdbJ/S9HnRHo+mh7oAcdHtMD6x0gAoUbb2i5OYhW42tnf9oYYNf6j5T2Lac4ikGvfgtLYecTX8uBo4/FQ9jXFyMEoe4kMUVkfp2/PrgGg47MPiH0dR7cbPw0RBCcPHHK/gXwWGWAoPbXzc3ybZ6gG+Zqtj772mFLzXh4vn4cxLT4kZ4BZbBySxsjbOA84HnYtCXzmYpio01RRKqxbyI4z34pUYr3p4dtUCefk6caJJuROooMzKFe+b+VzHA9l9ULuPvKltUl/WJGnFHbfssRQm2xdXzszF+qmFbJn6iPTA+JUGNGKiNzmnbYx51Ku1AOK8mv1qVkK6kTgEkQFPblOgRX5T3AK3kN+f3FQthUtTSlmRO/a87vNHTaEZeI1euI892mtngn87rej1zg7mmDJX+0gc1HXI+yulqlCJtj/pIoNQMWlrsbmHURbNh0BJpOxzfqI3YFCzpogFU44yYmiXUWAyIkyB3lwY43HpF4CTgWpJJqfJbSC28sZ0Pj8h/mRscWixj6EtdKiVQNESxFjATBY7wO0fDQ3xSNWV820T3lj58t3uB9vaDvtoaGOlMo/cBeYzCAJ8GfBXON1OsWGIEQfycGd5KQlrE2C3cUgwIV5b4eWVWvuqDgEBwijZJDQB8Zp4J4C8Dwt9mY4LFRB1PZ8UrXd1ZevzyyiKxk5W7+XPYLUC8bRdreXTsWmfZO6EF9Hftn1iR/nd0XVf/NWom9pU0esC7yHxBQenriSGkZ+c92ftVlLL5FJYBq4sUFYq5EEevV7LFn54LO1B9wvoaWDTQ3MwSQ6jSzSpzAA6YQ711zHOchwd2ZwUBFsNWKRsXWzffa3n4h/QNcyGZnCOVam1Ob8qlHX0MkzGOFL/Tqh9ZU119a01uAfIZMreYqeRSsfDGJkwejC3fq7dJAVd1xwSpDDfChnjS4y2dfSlIirP1EkUyb/mjP9+YSoAR+DGZYY0WZwaerqkBJJUlge0UJxpEL3/cYJ0kuZGwE4nwGqauXy2dZzIiONY3xZbJBtgS2v6dz2inaaCFoGwOLZQCjquMySWKoTXVPKuDCREmgOnnewaoRip/AUbpaS9wX4qBOAaTF7rkh9yAEQmfVfFGe0CrBWDbsuHWw4xtp3jns+bl2QM+lBCp3uOTrz0zW7cMvlcRt97+akTzVuZQotdYPNFYYri/EKgj063jIysf33BsQQgGANbXt6DPsrQvfXx8B1/l80BfuHDReyJhLg+IfLMy875cOESLA8xFxevzlxqmRKkZaCYlOW3jV0Cgv9WS2lr8cD71TZZzZDseCeRop0iXYTPg3wD81o9mVZbGVXiquwRiysgEH4IQJIrY2Tyg98Ay7TqC/aS/tVA3rfbEZy6TCcBEnQpzq92Vzy4IvCw6nMg5b/L8FC86ybdrCoVNvpWwXI7dXx3P7OrzJ1+XVHSGv9arYsMHzcs7Da6/RH9DJl0o60D1Wk4k28ZOPFgMj0aT0uUHchHgnoK48pHep19SMg8YLgL/ouTRZrbJ+LQtzSAoHhz/kACU3s0ULZYFKI149RmxaVasPVnZbR5vVpceDwNVoQRABbNPxraI4A6qxf95dMui1cnlDHEy32qfilwa5aX6Qo84XvNf4b8EUbdCntUn8oYTkSpT0PJmV6V0MkyP4kB8eF9AFY7PtsBPhtdj/Rpn+5Zzioancx1hFJUEwhyjkX+QPN8jscWClwCPoLWtztSL02GueyICYl/NVp04ye3Hlth0B0v5lbxpE1qvmIZRDKVhDo+oMqv3LoxhNnig+kWF75vY6JI0PzU6i6Ao3tXbsY8/zjY0JxaLxCmEa5ZCOc5i/j4DPsCuIzZPELeuX9BZ/c6z5di5Er2MuEV5YyKQNR7pJMGZnfwM2ue54lmJosJAoiR1Du/t83ffndyBGvUHoXrmehPU0agQJwou5qMUis+fA1BuGW+xKDSNfeGSkf8MpkvlrNxDxi95Uc0Q2BBdZnr/KJQ9pwT0fbgLYQJ5a8d8aFQeCXc+E+iMS5rpeYkQDOTVYVeeacRhWPmAyj+T3XiEvLXklBV6vqzbDiVWwCIiqlZ+mutnUKDDAxAUlDyLtrmULKYr4+HJOV8RrX7vAkzUGMXOYMvMuQ11GOD86EihFGSjCftHz2TcIP4U0iWBBQzrvsQvx/N62BxgRG8SW7ObxKQpd0CJEw57FHVtE/a0jgxHM0tC/YmUvAUHv4r550HxCmD/lowNpDusjjGLo47YS8mMGP71EZG8vInwEDeVSOdeZ4q+p+68NqFN/heejaMd5GrZaa3kRaUCTfzeCqbTjxba5EPPc3hOvEYaoAmDIvN4z5o1H0r5JWGdyu+ti5bISlxc/EfLsfinIyrRW4AMjllaI2zWXRZrJ635lJEX9BpbqTSl/6TsR0p355eE+2/jpCyXDDqzDneAqg2+0T3GnMvXeQ5D2JQ6SpR3wJo2wmuRksIpDXWq8FGYGXELJELAaP6gJKEKZMxstV8ktaRe8Dx+Y5axkNYvw+LlqpKhfXYr06e6B/zo/Lmi2plS4r/NMb1+YjhsPygFO9omNr2X+F11dKlgw0TIwqMsbv1S9vc5UIdP4bJ44GqD+x/BbXiA/JH0rvjg+EJX8bbkzvTSyv+gGS6pqcE6LfWBwiiroR18Gnwo+QtIM6U9ERsrfbHBN0yNNTEKF1/y2Wxzsb/YWfGLigi7ieC5XfpMJ3KAMGJ/vYWWcKw80Nn8rXYfyq7doMoi8DGN/o4nkQRYuqdrFL5xRJ9tvQjXaQYiTkeXT1Rna1kvNflQA+V7BGhb0NiIuVDkcf7WoeQyMgzB9RRVIDjvQJcwwJ/7M2aKxOl++JxZ8f3cYhGsJ5vHK/gIqpkgrqT0oMMAA+UeBgFPyj1dRLoIghTzmB/MX9Gz9ycGphO1qcRCwyfs2VreqK5QMkTG2hWE9Fi4juS3Tyd8CICOdFPHJHWAv0fkqGiFXmqJxq+4s3GkHA1TeJ2mzV+C6VUD9ERAmk4fepmAQj16Z45+ZMpD1nqA10Nsp7VyxxXChop8FDqFQNhX9O7LcuOwi95OqkpDKI0ydZXOBHA2gllWgnwiQGjlsngl7ZALjtpjf2vyt12wu1kxt4rA6KV7nmZZHHuSqU0ML+xNwRSvR+vA5R1fBzUud91FqXvdFWk2XiUQrusaV2bAJJjWKCaEW+2opFtqogztSQMoh1L5GV1eq3pa4flVOJKex8RkkQM/oaJhv11xrlOJ3c/9wd5D+4130mOFcSO0gfhZ0BoiI+iVo1LtWr+ocBs4cOcQM10N6pOWcY7vER6zcTdMLb3BnCoTqv9EHRP3IOxdMroD60Gp4pKcNYrFtTMQyLNmDcuYDqnw4A6A0At7FQ2safk37mlaKFOfs57rXOg5u0A2nxT4dsLg0eHNaVmO11LKyW9nRARZLyRxf9V4zDN6ReSZlPk/CPTTLSds4+PFv5D4fmq8AH8aNVBi8L05gWCwpPpig4eYmHK61oa4QE3qt8shivySzXEvcs0S6EL50xncu+xApgnvlRe7laV7YJ4KAHacaLkBLs1h9Q8Jj2ld8Hql2w/rCuVEGtnlGfIu+XoXGf5LGOZH5y7+UJP0hddUUB28l8e1ROJPn+MOeRFtqUr4K+QxjrlTHnwZCZLg3jYCT4jyn0/rRMsoEU+/ItuHxq+DM7YHTuzbQr8ItFyLPrR9crQZHHS+oAMnHAlPtFncM/9tmnGZG2lq0Zj7PC8bihvdEtq1fu85zryEFdcuOvuyiFlbEzTJmGxfmeYtf4ShNRUatGKILoT3NaebXmF+QsQdmcDX9dcwIJoSiC1kyVlzNzUVII+BFTolk2Y5NKSQZCoGRKmTUy85QogOkIAO19EJexibH6hySpTqVpyCrjeiZJVvpBgDKEWesiGxr1a3VGMBFAtqEOoJ/uCEjLT3XazdLdFEQzm+SOu/E+ZnrzwDtxJNRmI83PmPo+BcawkWcrCLjSx78eBKf3GUjn3/hBYPvviLc8Vv1ysNb3D00QjDub2fH9fwirmDwDLFbtyhBzSysN8EpfY0dfyNqUtY6oevBxB8YphmJkVODI8fA4ttt/YqCQ5wg8n2GpWAeYNWlAcVmTNekQvp7VdX3nZ+oNYJmXTaMwEkML1oitTBAmjOtgkga+faJ8uisfIN67vinnKoSUiy5OdDyVTegAJIFsCSCdJIZol7xiQroelixMjqcJIEHK0meyaEDKaov+T1SFEdRtmvRHpvuelHYIt6SHvzV0fGbxGohc0pnmVOBX5H4bpYduiUElTw9zcs0wVagMcVwOb5EQoTK3+Difd1qwlFO46CJQ4udAXFJfKbnCWQcWvI08PMKejuJujhgE+SR7AnWelfc4wHSDuo1+d98t9T7QAZeZDuOxa+ckPfvGlbvqvTnLoijfu6YbDqhvsoE9QKQ+/lA4gAQqy+zTUpZwudFWX+Kqn4VCc86oVYFn/Z1X8/40ZD4UpoGTL6il2Jh5o9QYiOh4kRbHPu/KNPnhw/yLODjW11G4zkCHzaMKw3LDwS7sG5XYFN2Eh0EEmUta18iZBd/OWTqwPxXckqqrGT3mvY5KmOffJ4R4S0itN3UD3AeamVwID/bCj4scOybjkUjCYqjXbN+nctF4p5x1o4QjPQ/KjYJpOV3+nFr7IP2x3Dofxm/yHsI1CNQ8i7+8lMtHAlPmTWu8RLAQjhShCWQjN2UM2At/dtXSjfw0lIZGpYFgNnfZHr6L4sj+0Y9bvV+/YrWnJU9MHiF5l9T+2Iood0iuE9IQ7Dkl1ttp1WfMZVN04sg2Tm3zGuje2IOq1/+Wtzn+VvrKgF4UzX7lbx68OU30BkndS6TccRBh0/AUpOoz7J1vNFs2gHJKRjwUPz0p+t4BGpCL/Hu0rymnsTs/mqYVzUZjKFpR3ZHZPx+aA9RicIk9l5pacpBlhs+sjp1Pcj3SFzM4tecEJK+hslK+gz4eK8St+2uLJGvodj+G22smKWvoKwoK23IfbBbni/juucnBF5Zl8YnJVASBwGOtAu4eBEcu8G7YdybDonD9zDsnn2V9fcOvZRILHI3GihzXz3Z1dsxZBH2+OgpaKpHx3AmMJEimIGAlBimEJtyTpnAKmdEpREmLkrqIVfy2eP1/V3LnXRGnevIXvOoYEge/7qWqycVHLTz5TSjKC8UMehezBgNQrItr97u4S5AModYcv3qqmjpEkPp98oAkdKPMsTFk3Q3o4Qu4ng3AVWWwombYcmstY6J6lpvZaPVTI1MPAuP46yD+K1hMp3ywJ0aw5AP5NfSeTn7T/Vw/ICX5ZqGwhW3GF9acUwFzuxfc264NW9+ZQ4CMQWD7eadR+Gcv89m0rkrW6d0eyPSRDRSxn+9DNI3pzRd0TWAncIoPGPn5kcVw4qHX1zq6Cmi0OnfODwyiH5Zlk3PcYcB+89ggwv6vNZyu357zFKLP2JJD21sTI/j2aCushhIIcyM8Gm1Ahx/85Il8dPqoGZCfHv1fiTrraHp1OuXUMRBcR3PHN9t/6opIxjxYdt4jkMJu77qyOd+mshK1EADTOLGNBGaAhz5AQklRTs0EAo/kKn8ezTYWJYBXjYUfDbrmBGKlTMKsZ5elsniTCGZBUxClkiSF9CuStoVnQ3nGMPVkY6ap8Hch/nh1xUZQBTQ7ulJDKFbnIpFPyOBq8YOo3CJsy/6KwP2cv7OVvPsij8DGpafkBQz7iMO6Oc0dgNWIRFpTkVefGeq6pd94Rk97tEF20VrXB/LvGr4/krw+JQvAxRGf+jvS/nIx6QWAOc7zq79oLJysE3rrM9Gr15vQzCz0IR55Te+rlqnt/hx7dvxYBIy1t3O+gpsFnuVhPY0qfpIrYPSAnI8MJNqqWYJ2p8GJi8n3eTwB7PYhy9SejNF+A5QBQNwpcz4uYBG/ebEYWL4NQUJCztNKYSmQ1ZFgx5/Rr3fApIPqiQitfPZfpdmzpRe5FU0s7Z7/7FA0eRUaYVMBEJutoL4V8hf1RXMf6LRf0kgWEJfNP30fo/b7Mf8Elzioc0roG3Kug2CC6LBozM4YjiZ/pUU8nCN3Me+eNuxI2R3XSJoxOCsI9e1ZpsRUrkpeJhHMtYo633hRF6R3LuNuPvnJTZMk9ioDGXUsAGvanSSCYfUGoz0wOHMwJ/j+JW++DcOKU+ZXf0om+hndM4hiqsbVgQcTEyzbI2mcMeIqhZcL+eTqnT7PMLmoORfCwHeG2gqMfEulwKvVt+MmYDIZAvtomqxDRszhclW9ji21iBNphUg8M9woTJ6o2nHrhyyU7v9cU6+ZNjnL/f7F2XCPRQd7f3Q0xhwe07GyQf3aqqLHo/0CeCp/gpqeR9Hgu8H8v6nGZmr4guQZCRUHMWpkuA/bJ4RXbZ7QdYd2seJnNdUwC7yu5VhOPY5nJNFWwdK3esjYJu5+Oq/1Yj8Wtp8UkmyvoOchUvs2CP8mgvYKoYcyKYoEdfNrJMhwv/bcpzOZLbn+g4vzSGW5LJSxZG69WkquJS+7LeZIYxD1y+WugyqHPR5GqwG8uH6YxZ/nyFyCh22ZBFL/gaYvCD0/Q8poBBEyhfGb8PWZMjKEdPGqzjTY4xI6a2V6ocYdgp4gVJJEDdvXP+Ahl21Ut9khvw8A0waSMO+t+s/nFFlw5LR+/SF+UOG3Y7e3RgvNNSng/+4pvRyI0j0X3NpCFTkCtiHHXRK9LofeWddd9jXQzRp0OuOdWI4WLnOloyOVAQvWoiyr0AWhtUiS0AcUYv8G8TA4x//y/9WfixfMfMfZ/reb3ZRWKl3Iqf4Ekq/4R4NDhzSFb3wx4h/FB7pDc63DLVmqTzq4smytxFlf3Odfg79h7s/oSw+68gjxdW9jjztmt1mTtkKgoqrJ42EMws3JHXdDYSkgmF5NQov/wlNS96fOap+6NGJwvdcjh3Ockc2ARdVwx3CnmV1s/vdMQ1WcgOgGgeM7LpnR4/DcLJvMCmKfI+O7Djzl5jEVOqqeXbCotv8gHtFIhoSmmFM9yOrWGxiP6DSYfEcxmDU/ZXQ4LmDEDszGt3HhGVn29iM/GyHQCv7pVNAyv9tnlW8ik1WjcKQlT7hulihsV7aPKUQrVDMn+LVbgr4e7d9Ydq6gNWvEW9v1m1H4BSz9v4GKOZlg15EvrxvbOpdh0BpJaTKes1Kxa3kMIlNkNflpj2nxHqQiTsnoYTD3sXOr5uCiE7aX9UFdISCux3dMLjkvxcN+Ydb4S4QNV2uNm2p30eYJg9UNZW+1uUnAr9me6W1NiYhCCEncZ0ZyI/ErBE9X2R0IxC0TOmcqUIrt1rEHAq5mc55+d7kvfTaGREpfpk7QjVtAN+ODIG9TcNMeotiZ4tq/5xN4hK3xRZrkpsvy/gg+MtkwlXsQk34PABwJd2h1tkOBIuniIRDLtk643lw95aymF+8skXuWgHmWjxDi1hXdeeCyENz1/0g/I7Ky5blfDc1nNFo6e1TFL9731d/UbWv4/ND5iqDF/p+6/h13zqBwZOSoWduiMSyD+RFWcMrg/pkfCrh/mVqpOnSsPsygOrIH5DtjZzE+0NBPAaZ+1kkSAWrcek9bp3AMT4LspYMS/xzSMyq48Gx6ab667X0hK1jrcyS3KEgtSl8f6oXVr4E2ZUl+dO9mP0ie7Rnoc2sEJ2sEPnNSbfLDsAEIE3T6TMopXt0Jw/BN8y+NWXoXdwdVAhSDsPTEFsIBjZrx2tPoRMFT3DsmAQwXM/NuL2el3ddIl8PNE7EGQANbfj7scRktfL1+C2in6Rdnj7DBFUq0unfMI/JMIxc/AKaOvPffnGsY0n42wNGu6iFttIfdSTKhi/JilWdv9KJTnWM6mRX5oa0e9TXVJr1OzrSAH7uVdyE5Jts/ik4YdEZwObdER5HQFXA4SVI9SAfynk6NMzFng/2qNclYoh+qdXsP3HdD+SMYRxf8HQucaz6q5WiHjGkW7ee2mrRp2QxftLNPs0agOBpncQK4nmHOYzdDXcv9nOKOFTWucFXFcNjE6woPgQLXwf0397qPzjR+TZ95Cgw85IknRx+kj0p5BdO+KQ7TpUuCYOMSehHKhnzayiaELf7Qgm8j+Ws4F2uEf4NUoYsxBsiP+KzofHR15PUuFOUPyvqfyZed98CH+PS8h9+wWP+N8SggpEoBEmDqe/PAmXiPNcJvc2g/S09p4OA489Mk1rFRIxwDAaHJpAdERPQEOmodLU1mL/QlfGZe4cpnrNHsjPK0+T6+gt57CuFwL+BE58zwrs/kH8XMAKt2+2+aOL0vqNIB4zig4d5Dl/hLkuqPSkf85yEg9L+0YlCoU9Zwh2dTLr65QdqngPJ07lA5G6gz78nTqLief+3gkmTwP4nCHMCGFwrQMovAksoEgv4/pu5mf85f0vFX37x31D0udCPoocaaYhj4L5dbsIdeuMogBKguCd4qzeehxie+4veGE6t/Yvn0Hbwa2QNr8NVC5GZLYbB4cOI53idZlSVtmOT6p6tZptEOZxZSD1Wc9xQPin0d89nN2C42v3yBL9K4/jGQvHKxAaCm2oHBhTw+Vs0+G+6MBxDzHsIdO30emo3w1Eu6ouZCMKW4vNs8brvLnkgjzT9tBu57CLlUQLDaQiSI04kdWgrfQM+3b7CCZvauqQlmyb3WKRvtg0XSRTeoCAg41fkW7V1zcJfxRfjry+yVcTNCro99fRcC2AMVaeSk9EKpttBSmWL7z2Y1Uh1sISBSORiMKK0AH+QtzHLi7eLS2oFScStct3eS2QQehaN0BxuVTGPEk7Wz2lTkld9gUrMKUfi2YIcci9YZodvThmPWH8RhJm5bcjwkpYLnO3KRiP5fSDlEslSMy6H2HSU1Jnz9tFv6QQNK58BqQETjqTCr0+mLGvGIhReVOpA7cvQEgermMphzPndUWuf7O8Ri/V715kefd2Ldg2mCTzJ33BPpoIacTqFv/kOH4chtWuVvh2/PAP/lQ7++s9mcR1kELhvj4ydmuFpq6tA/JT8679/X06kjY7xUVe0SOMzv8aDTfciog8foIw/nsSe4NU3B4K2jv0GSMJzsH/tdH4ZGidZNV5XTOJofFEcejj0ViS9bPiegswKPi3uOP0PAkNqFxT1eFxEzV8UiFYUf3j5b0uluvuorTkQu0w4qYa8nCBDKvcL+RwfWaIJzyoKIRiK9VNMUkFlwvVByNfUL8JMI9i1URqkN5Q2R8Go9dj6eNmu+wNC/DJv00dALusItVNXCn9sGYU77wV8+psScsmFAaWTi8+6aYeT4RjSWou76VUIQtPIaf8d9kCR5I6VpZCUXeE2eauFuwSihG4/GzJp9/lgCwJ3K3CS3GvhsqElfPE4BqYp2hyNi1vTf+RfFk0gk9o2Nla5Hhn5hicdigQzxXWklZfnHL9hzmWuyBfbbIUU86DspqPW1gEvgrwsGbms/QyuKYovVClnCyIjWF1Wkn0Ie4+35IYpN6b4PPeB13XLJaByGn8a1dZEampuDr9y4P5+SkqK/OJe1wgWdY+0Dd4wPUF/rMuBXgNNeJH3wgH/d8cGr8n0KRRoJCbkKmJbgiv3yPFsB6/czSdef1nDVknibJgZ2J2v/yHkrWmuT0XWXFjievV0uqglcpoItEygXHxxH7v/Ewr0wYkf8iORHcfxFf8MC47ZiE6rclmVOw4uZPYrKkaBX1PFlOQsjSGmlTryqYA0iqhxWBGe6ox7XBXk87ABeSfatDAuui/NgaGbW/Fwyoa//OXfnoQ71/7Sv1ZNfYhWhMU2tAwO8zqJp4xZnoWvIu7ArzzRb7FElJTYhgb4/K2uwyG2Fhpa+b8ARdPp5smQpyxMwljRA71XWnTpjB5US9v2VGdEZ9Er/AUk2GfG/mlRCgVO8Vd++aUzP1tyl+ud/EcHpSuwwYmgk3SlHX00lhAs3bP14QocTxLaG9m4Un/2rRcnfoE7wqz+fXD5mwWPJs8FBb+uIGSEhgCwremJytL9Zd3lOoDk82Ltj7591XNm0nxXoG7Xc3EBvdkjtGQ+W3UPdBQpJexd6tW8nnUbIF980bXPMjQfHZaXvPtbxRG3xk8bPx+LUnwNGlfCwycNeWtQmaZkncHLjX/v53Ni/huu8rh4McgicQwxj/iq71uktCe1boqR7mp/wao2EpigNZEOqO39NOiu3Tc6t//2EL0shWwelM26L/8hgJN7JeJXaq+0zt5BiaL/rZGTJZZjDJ/wkOFgtQ+nmY7KFl2efC0F7ZU2QfX9REJfvpdGmA3TO5BrIHIelAqJCbCnI8WUBJbexcYcrfeFLOuC07+CHifSi+7McuCMh45Ag+84lQwU13MIQ79aLAf9dyH1PnxkfdLvsChhfHt5P3z9jZ5diyFp+CKThkEE7nLMfttdSuKYV2CRTQinph3D7PoVlI841aFHlV8zw1joOIKS9zEUkK8r7QBLHsvfngHP9iAJ1HO6n2FQ/NtORB/HFMNB1oY8/8b843D3F/j+b5Ut2j3fgVrF2RbXh/RB3ZmVsrrVEgl+3kbJ1Dr4Pcj3tpzDq5Dn/97JhwfjSP/qIf4UtWBt5roE9cszJpjF2wESkvNUVmtYylkeIevPiMxM74GrWhYiawI+sVnTKs2Xb+WAkCfvkRy7Jn1aucjT62GutYDR7Ed21mYi4YAGx270RoqQ8G4r3V8qDh6sDtGNTdIya3GexiWy9iF36cNyG99mm1CmIgWX1bL9Pmj0J1q4gW33RNgQWKG4jn8F0LS13wLK5HpZ9GPK1tSpo41+6Cf0HZMOoi/Vk59HZOzQfYwQbOJKo7Ol1wP0b44f9LoF9PULhUnmyYHNQ8mm0+ZHig8AjC811fl9T7jaNe/C0hykIlY5mFWtFnkqsB6l/Fx8jtT43V+Idk5ejiO/yDgVJpw5E3Togdu4ugXwrThiGyAb+uEb+vglbGjxRZpRgUVGL2QbZZl+6Et/RSpY2+kSdN9IDW8vOYaXiofTHkc21/rr1JydRXZfRIWZzdXkBk797VjSYPZPJ35igh//5vXg+BWBxc1rf3v8nqfC7gU0J1Sy85EV23a7lTlOQNLmlhcI5nhfpfDfRAR4+rKjEt788nzswJDfyO9+RewIXTxoWbRtTugbeskLp+EtzwHzwVlX5A9ooDDoa4XRNVHlCMvYH3dzlT98vBJncP+3Qv76C6wNqvPIf5JJHll3gS922JnTGWouzUeemO6+Xndu3UHgNYAYBe36nJV9toxFNu8kSc0slZci4mPy5hxb2nFhBjJlcdQ6chc7E3z7qul2M8RAGVgMxgLTRCifsxfRdAp9Rdc4x+C2fmHDb3ccEky0vyHBZy22bpK2ADAIgTvwE8zVrlwsoAzEBfc30XpdiT4N2BvDKxLL6Tbp0M8u0DMusj6luZj2vgkaehLlKQ0mw9EiUMjnqRyEIzhi9oOKrZroEsTV6ihTvae9ZmD0SNQePYaIFhOdWZZg8Sh6Chh9rJAuSzYp/oQt09SkOc6vfO9v9zbSvpBm2Zfpw2hnA0w7PtoXfDyYX9NUiBgUfjs0uI8XYX0OffnLo4sEt7ZJXqGzpofRj1fyr+Tdf2hwA+KvxZNvQC6lDdIiiD6jf4CgZTRafM14Zx28rIBn7r4wiigqyAP5JoPRulRSYUMpx8+zGOrioDEFZ5ah6Arkx+NHjEi8rDGPubAbEHNK87l71q4UxR+TRu1PRlmLyFAiNg/ERYzxsYu/2rxsOW4kcA95HclXy0caZRGslW7Y1g2B/b0qdId4rwT9nrnuDisRYOOz6DGRRLBg2pRFeHo11NKdfTHas4USJgifJ0i4LmySHADQuiMPyOsHs0R+X3jhr2CkSsh7v8b1urPO1Ef6CaatLRq+HHOCIHkUST7/TekSqvYMM2TCLSJ/lkqXbvG323jK41EoQlLaimza9OdtcnVKj8Itc5girfGtcYKC46r+08SsGc5wvdUDzElWV7zlsit0LdWZN/m7N4XeqDTP/h9f77XsKNBuCT5NX/YJvLnEeysQ5g5vhBMenn5I7frP9Eyc6IqKihJbW4LMz6z1ufQdrqQe0ByklBDIEEcKnAWdsU0Kinsnz3p1fba3D5MnxGF2Aoe9v90wq6dUvs/QMQ5ttaFqxF8cu2B1fjtdUXw/UQovjGcL+UnTBD6UWVheObl8LfZANG9+J90bG2n5syhyIDYXZWGHTJl4ilwxpaMsq7BbsBg+huXexghyTWHzYxQCgyKO+a1IxDquSk6buCWoz27prAePn+E1DzrmkLXgBOf2+uQIpvOd5KUvL2WYVxlyWERqmbWsQWia4+wFYl/JVeNJTdgb5KFgHSURASdZwOjNmH0o0kLYk1Tmibs/TqtnRfwgUF66Cqi2sSIwQEBWn34n0R04zinFXTu0TXdlhgBfykLI1Q+J80j6wPfUoKj3w0USf13O70i/a2dpKJ9JVNpuDRHygWMYAHQZpP1F12YMVEZEWqGcM7dL1Q80utemHGT/6cdESaWIbUBg5WGIKeyUn+iTlcEHNsa2fZC4MkF7BZXNiy9SnKMzm7BA9MMoNBA89tGuXUDyb066ef/msyBrUkXn6Wm91Vrp5HTwIwsah+gG8L9LQpCal7vGO9g2bHPrbnsi+o35NImmb4OGLjT3fBWJBdnIAG6rh02bx7+yPqekqY1f4koJ4vKjKesvVuHmWLKA2VCRLhIRjSyi4/Y8iLTEl0aNROD3kehWHAZ/UuPTUqHLEcbiyTxwVxR+WiC4bItsOnvs7iYmTCctKD1Z4W6QQUIhw7DbP/eBDUSkXMENxa83Qt3zg3axxecrq/u9DW5tF6yIm6PDktJI0KfJCRGode1rkAMGbbp0dPp6qgysGiXm9FoPiVnTZTThaTXl8vVC9Ymtq8WLooS5QPAMGwSFcR7ImD1rFjdTRsm2Dy3NYmCH5gnHgt9024xe9lq9r/G9WUJ5fUSL7VLIK3q1cRkM4y5vnCRVkXxFTajKZ9lRMVh8icXJNCSYSWT4ILxS0Ytpvb/mqrhr+SGEOzu1QL06x6ePT+vgGhMdnVpzlbKJV6u4cLS0J+arZxy83rvfs+Yo9dKzDGYmEHQdwQC6065bteOlFCwbDS9lIaRb3ps8ME6qFHirUJgsE8Uq0RvIzt7v6KxYeJQINg/YJQ2AuXtWmZTNySSD3nqtCf4VSHl6xCCPb5gl7BphIgPxlzTsVlpuW/wXGRNfxNg4MU4h00f21D5P7XXJ3h+CiAJp0WnDCAByXfc0puBt0U4CItMZrpeR2fkauobvSHoKXuOvvgDoQiMKPXmvRXDrcdTCXKlhtzfzzK5mZ5PrgCUN/LahcIDqMQn7DT9idCxrICTuE+ga9UWS9lYHVEAEKa62T9QOnRkiLPwRIwb8DD+LdCaNPBI+2dQ6DhoIWsvv8ABfT3t79wF03DFPcpuGbQm22T5RmL8CKxn3+9wfUHaN1dWQnHz6ZvG6PTgWqNVAhvJ3qXBu6A89Cbg7pwAetIVXgbOWnBbv3zB6mX7l5lgTGbWxaTGySR/gsFSoILBUKpSMbanZk2QzMVpVTsWVBqFy4sIVASl1qNwWgae8HSTnz095TBrNl1toy2XJswDcq+jzMLvWuWHPwqb3sLXfCEGQA5ROKOB/s+VHp/wd2HQex4eqjMCU7oxiq/0ST3+M4JVMy45GemRG8NgevsP1Kjd93fnqcKxfJIfD9IEaaBoDyDw1B93PNf4DEbN4e/QIo1RwzMlDt3tyzIreyBhQhSuD6j2WaG+SuMBrdUrC4/DRZrY2pz0IuQvW5FyKqSK/D9qxW6QpvgMqYmNFSow9Fln03Pfe1C1RJpAF1WC/qYd9YYWgwj6VetD75RLl26V5kt7QtPxgzA0/TwUfh00rAVHFpKtPkAwFqPxBYaXvZgOPPOxg6b9po4AQlx7VV8zQswXj0oiR3hfB1uu7tsoNas5SD5RysqRjdm/CCoBR+0Rv0k+ucGHm5PgdTj7Rdy19AJhazkny8nznzAlfaaMe5MhFSZqEwciug+sNBlkEslE1xObtT33fybXuGlLqSU7MG2KOaVfBBT/gtQwh/SILMdUexe+0CACKcuIPS9PfEeUZ5ZpVEdfGwiIYZmmmSsG4M98GT9Xx1hd/uY6HCwtEVeYzfOHYBAkeE/09dtbZqDkn/Zzx0HIQ/Hbbr8ITxqomK90EClaqXXrmkWJlYgV1UAyjEVNHvD5p+25rv5n7ZQ746aoKoYNjkEBiH59PGbFlyGaZl6+MbDkoXkBrqiB+x3krspWrxOnofwlbdtsL85Pmb7dvibgfmfquZYlZQh0m4Ix6/sXz7F+AnIroOaCryQn60KKDIq3y9Tijw6bU6dj/JnqCj0QAKnfeYghvUfeBv1sYte0+AChQliWN1tWcs7NaeDGZdqfUtjy+l+vvTKfNbkAUvxt+YxRWLXhfh1BALkueAcuJua0LHp+x/+79dz4kGSEtSb+FiGfL/awMVEQ5vjFRqcj1huo/tTs4L58vD1jmYoHhB+UGOWrT2daa6e3okCNJjsW58SqWCT9xBVozGsKhEPtEY7UJRyaPNFlwnLMSz7Sv+wmnFYw5K/cBcGDs9wNp0z09tcoJofP+stYDv+2COyJmdwR0bnH7VH5T93oBqS9L40soMWU0L/49xRslbKaeb6Dn5IpmydtLmLpisvLGMo9WTK6amEOkiZWDRkkgJlCBwj4C6hG8/lpp6QjNnB1sZ3HPKfTI+joUBitsDqy4ObyAwQ0bR56vyEN6DNV7hX5Bk2AU2C2Z83edcp3YHqWW8sBOAgZbeLPOj8n39JqzZG/vZ8KBhzwZWmBgby3Iww04cfbLlqHi0yZ/fvsXCKRBd5/RXWutB149lLirIOsud5pe055A0HFM64TePqiE3OFjWb7821piOJqIT3yu58s4Ibs/HoByY42huQpUgoTa/iVOfKcwNP710b+V9lIHCcjkw4k+jX5jUPvCZ4zYJwRpjhFTczVyUun8mf+tIQoFx40vioMKS1Fv9KZvstWJ4RoPtUKf8OhRtN5CoN9WMhKq6tKz+v7rQdwgiVREJw6mHoiNqVwlTj/w73lj56adamO8Zahh29aaIQSUWUlnpUushLC2vSEvciCg0dpmB+1bSvNIh/QG71LFYqrL5WVpdK4hjElVF1F8Vfr7814h5RHQDx8gczgIWa555upLybsJV+G4T8b3DYUvF127fnnmt67QPNWj7fRzidCDhuh8wqox7iYpT1aibPsKRJ3E8XkbiFD4YRojEyxnr+rScBwn73pT4hHDpJ84dhD3YSPEWi6c71o5765y8hEhBRQ84ge5LIrkFwUKUYQUB5TwAXLcZoJ7LQOz9dX+TshO4dDpI0WUmxWUyKB6NJFGC9kvA6ATXUVI+89cMDL4D2+bgcaWliSKjyE/sCwVDh1rDe323qV2kMzuQZAcxo2FkVIWhtMcfC5h40L6OP8+BzJ3nsitYS/R22zRLs5bMT/vHFgl2SODOQfPzjqxf9jYuYUHNBBIBqw6PkGdp4yPwcOhg9F0Bh3kPK+CCxckPjemYkrcrj1fStB5Je/QRNyG7ociusbIT1Som/dqc7J8tvLnYT9B0m1QpYLIo3BE60R9fhNT8egcErUO3iupC1lEQWYhQbSJXNGAUC+qEvdXpiZHxd80bf6qze3hvrJAyF81SC+JKXkJIOLUg538Ei87eMUPWhHusL7Knn2fwuGmR8+k/ZnRFN7avyi/9jsLoh6lJXJ9kNp67rjW7jkijBeMT4KyThg+B8uvPThm3netQzVpCJ5BgPpqtijKMo7JK3e6OykwzcY/E0nlbut5sTEgCNp+803yNFce5PBAjEnXSfQDQ1PmPPoQsRb2MXXoa5g8hSAtlL4+UbwsEdHiyu+E6m068cl8T+dnsH3TaPa5eVDJQJY8SORoQpkk/qtO2XsLY2b+1dCt1oUUB/5Tf7s+6cgRhRiy6qMnU7WqZvd4jN8hWJ+SYeZtDbjKXVGdEUfm8ZtxxdG1e7ARPWzIka1MkHGCyMsP+IyIh2gIbEAxDZdksXAlRZx5m1BHsvc9gpTRWyZdvj2Tg8+gKw4+Ogri2Bk42/fKkQyPSbwqDjzKOli7KHzxAJrHo3Bn1v+ib4/mwp+5YniKBUwxUIWruJYjNEo031BUsuWW/uaAvHlJ4eT48m1ZLmCuHD4xg5OdF7s5Qb6y8eGP3mrTfGd78XoPMvKs3cKpj3bnSb5B6k5AdeLg44fijnyt0lkK+N/k5pt7trTSLIqjHHZXA+Dp1oA49vMwud97vToaahooDToWt+kHGFtWr4c7MfimZn48bJ0YCfOneZyiMIPyt+Ad03ds6FbpBenLYMKX/ME4rybep1Ov1Ys5ljpAaTROSuOOod5+Y5colsAM0OlupBg9eF7tHI8lTVT50SOr+0C/GCeZHbq48ezlNzViOFI0BVPAyazhaEMmUttifbHRQH0hsX9ceiw1Ebrw5m3IRRzFD+NmgoLww/hl0jOnK9Orjw+OjarshpjPrX60sylSXeftwPaDTPa1CkEGoLnFjQ17ZHFp7N0l+x39Q3l4KFtDbyNhaOYgQB5oZXSgNs+N1lUI4n+Cqobyst68pOwZnYdE9rqFwptW1cXbKIALh6dTudodUHX0aXhZPSJ2fx0SPM3ETmCJWmUvhmFbqBaYRWyJblwi4bBcRSaeVdEKM7y+KYES4xqY77SPwLfDCoFw2RYxTAYM9GqEMx852wMi1vT9kT/lQFCSs6VTCusWCGQ+eI2X8kq2GYgXTD6PMLNWjgcyqQ/KlKCvvm/vTX/IYvurxjt528uj1fn8Gg5rrBuQcjPNBe0HY1+te7VezVsQK/zkDqLhF/mx2ej7d9DYo2PDped3ozl5PZq2sSq9sQkO52oByyrKcy/uJXUg+O7rj8V5vwep8hxGHraP+0HobebzVQEww2wQIC+7m2K1+W4bpB+gvLrbcLwqpYsG1gG5FQ79ht9ETCuvqRMttUb61Vb5NzoFnqgJ2fDiswjwaQaoyGnIed7NG9LLCVIgJmXYsIHX1FhNO2rfAzLl47h0eaA5DW051eJhs+D4Ymv3owvyRV9jYU9EiNr1sPWM47ZjHihK+Ny/Xu2p6y0FOuu4MjVk7wYv09jzQaoq4yRM5bOcInD1fByMI1Bk45gQsa7v6c3inRe/8VosXruiXVbLjeZ/o+qv+WY/aY8/NEU2XMZbi1DO9f7zgw18aDvfonYErtI8JWLwKios42cwDRvJz/WC5a9WukGhtrvhJAYY2XwYkP6ZLJIfLVmwRgt7oFwdSS7vDO86guKO8AEOESztfqB+8riZh29d0mHIwmjGC9mGr0r05a2yKtUBOQYJtd8PjqSxVX2n1wwcrpJq+xxhOmHAef4IUBGcTH49cHKJxRQIcEBbGSUoZaJu2jWY1vGTs+2syEqkFbTj+dW00gzDxW1gmjaVTdV2GE39yiR3kH39EfnobegOZN/06HVGIMY07ooM8zMwX3d8dT/h4F+5+zHIVuu3lceYX/oVblIB434SaL39QwrPr+q998B4qNinqHYC2cU8XsJt0f/2IIsuE7M2wxoZMGeChaBm8ANa1h7jON4vJrTD+Xvm56haAHRHrMIIhiHwWqEYC6HRaWrUcZyjrpv2h4C1wmV+DXxGKpclCJvgR6DDi7Gf4gf/GEjuftmKFKq4kt5D8+hwXr+qjYz0CLKar7g4hPBiMsgZ/NdgiJVstZlvOXWeHpREQUjbvHpZocXnyRjV0bzErvRAPMJL1g72VQK2rPnw23Dl7kx9PeabYJ3BVROhDaGei6n3oiBoDQ5MSRJhvTYaIRXibsdKmt8OTa4MSmcSCE2YIlbv6e1Ts7Wuy07NO8nhzx0sQZZGlB0Kglp2g5U0jcp448d/mOlulTksm7jhRY875U1z7Gn2fOGsk1JE+OhMuFWKHH3lW89SE4l0rBG3/HHm9znBCh8FncUVotGKEbEHTXKMbFsFfv9eKpFjAv3SpkfwWAKNGY+tk5AzDSK+MII+Iak2BFPBXmLlXi7E7K0W9fV6kPlnJsR4i4SJve9vGh3xiWKK14DyA92tWdhPoXO+1Nk2jdhSaPUcvkT+yk/h1zjhvs8ji+BXTlxTQEcYyXsNmBUvnJ5SwCYKnL4CArC29DHhlqvQQYwy5bkJr86tmDeHLDHoF/Jto5jpnVM6rgUlViCoAWCsrPvNppXEQP7j0XT3I1U3A7F6smCTCksjRV1RJx07z1aCzFSBkXEQpnbBWXfCqwfxhJdZla/fgSgeH1lmJk/M+Dx5pDxCTpCe19tHFvAvQ6AlxxHjzkiqMlEqNhOUKedFglmiS5R/A1Zkv9EpSNRiBPK+pfbqDoEr+MvlexveJPWUvoB9nb47Fm+peqCUWPmND/EjNwAoAvUSV7jBpVS3b3z5kpD1sb1zK5Hf7Tv4ueB1oHL4duOz7IoX6svWBkHvGu/e4xjGzvg4cukUDF5IJPWSFdU6Ht85gQnOMdu9k5tEr1HaPhbrUiKr4M9eKUT7VWtEGiEaOtpWP4xK0q9C4o2C+QSw4DLjduzA3ySQe3M+6X8x21dO67OQurdJBVdL+rB58Ufq6dw7UjbKxs8QJTXsfM/yU48KUgqE+mZhtf28abslMtfgeqxV8vzrgUoOTmpJkMM4gLzAX3PCflVxWc6nGT5/i8drP1jrO/AWXByrXef8F8JelaJvRa/7VI4z8cdVeqp+9oP4TkbMal8KPSA1EtT6QYu3zYYS2oVdVSn1e6PbB7hhh+EGLROwTsBCEayuUc9rzPg95YpPnOprzTo4sVC80cHpiowiYPGjg/PotUSvVEzOw4uYs2MD+xmXtdrlVpg/m7aCE3HFALFYFoENtt94OWIshIm6CRrGine22bYttdFejtUNr37eAFWbaoHjsUU+Dx7WkU0UXquDr2SWeC6WnFN+fx4vQ2mK5Hm+rprMA7bujYJNUdA1Ym6rnmP6rs4VgsEoS8xewrU89q5oUlq6ZUdcQXhQyKA3PrefHWpo4eXdm5kRd6lqahqeL0Z2ErZ5cFWTiGAWXMHCUc4WWnUe3m20BfvefUj8wkLvvg+7dxQ6uVMniHNbVYiVsWZzZSFpfdynUI3ipiyCzhCc7A2MY+uW2AZ2+rDkF3lWH5Z9pJRhLQYLOmjuFwqC+sFHcFHQCst5H1j8vZsr6SKZlSBWKYy2uktZfKsEgE96HxjvYux2gNpgkhE5zXZ0wd6wlOGQxTMrthRkrPYWr0ssDKrb895JwTUkyOSYg7PGTelPoHVkYHEv7YXHLKvdj9E8vzOAU9ZCn0nCfnhCIOk5wdxuf+9k6164bMufoGvHNHj4OYyP3wVXlBMuYAkl9l9f3GF6DBuJPLnuHGW8onp3gLcPGb+/pVnBzOQmOCAxpKZfB9l6KYg91snqOQSnvyrDqMaCuU5t5lgAWkKI8iWuUpnR+kD2lGTETMBu2DYaNOmSyKLhx3+24cPPyd6B6tK2MaRV1+RnEySYOy+kidD3Fon03QZ8kvWt9bhHwJv5r5kVVJo3bKvJqnViqf3KZAJ7bPxwliB42sjM4vDfQr0hDGdkMXyh35ZeK9uq3+756j6n4fd+99gLQRipR2npydN7h61P2avGOJdFN/IVYeOroMcr52Ku73mszWDgzBmL/mjjwfBKv557SsyXcmLMYjQPzBgQO+rtkb16Z7hZqJNZ7Z0FOXwfYEtT+lKl8KzJ8sze6V1dSS0bjazYuL6gc7ZDem5PtlyDocM74FIRb7lj69uXPHJSHP/OHzgNO4FrBmz1bH1iWYvgflPKZ9cbNSzfJnEGBmsaTuc+DE6epaqt4xMTm+PL78QeduQ2R0JT1OuriAd5UN1FqoyBuUaLlJXdWFP4ZViz84rFRy1GFl8qvj7G5fTJRA24Q19yLKp+VC5IHiH0Dw9EIDLp0n0eJe1QPhuRPoLRewDWYLxtHkbmyX3LshkhXWEn/CGqQ/sVZpVSG/awj19Cc2RD00kasw/CwuJy1HsvhXyOeutsZqMkJAwPgr59TJmCiBbOxP3MOH2oAjZz5arq+L6RBVe335eLsa0qZ/TvDOEY9tDVlJQUWvi7NUdnZr/M+eUF7lCmZw+QWYFskPQG9fFgpDhonzIX3t2/zckMOb0pLnZ6dz98vRpaow+fC/HsgMgNVqWdrfdhlHwhxfQW/HykBmK0+IbbIbhTfTPcqEN9h1k/8rN06afHZrjNbTxp8czA1igTkIr8VHd61039UgZWqnir+EBNg08TKvh8PbyC1tcICX7nCCqxjmWeDVtkbFf5gYoIFke97GW/v1q+gz1I0k9QWQqcGp4Uj1fOhZ9wtqWtyhTpmIYi3avANntX2z++MBuEGQdQFOjbZL0l2knRd/J+iQzaXu2DzrI4YoXyGxdqlYhyD8RNaJPJjYV2RFxJej+GWXL79yQOrwdHGCn/4eZYXPK5+ryX1cCIoUcGF/t2O21EwrvTz8rmqx0L8jR1QOujPfa6Ija40S6KXdhGyWYrOS8mXQt0fEJi8zmXhqLRNygoSCQyLlESm5z3EI7nNg/eJaLSrHvrhF2FYM/vSy20wHrNWMibXBWbQHWE/n2atookpu3XRKqhPMdc1qOu7HKMAH1dfG7+dl35AiaQuOl1IAxvSR+rfkWSCHDz4XzUDGRaJarivXsWHl7oMAN+BHp2GOeiNaToLaULRO6BzBk3fcr6MfLQ1cvAfC6f3BpXzhX4/jV/ztr08/QTDqJw+4+wu+NsOe51LIQ4QR5nMhLCPujqcJiLFWXZLaRFoLLvs8xr7oc1lkzke4TIbLWp38kEgzWl92H1KkAgvJMljlh+Lar5o5hiAmX1xNbMaMs36dOynymaH8GVewvD8IPAIklbtau/0OZfNqz8naGe0dryTrD+kGnZPhkpsuX2OECMXPFOQ17XrLm/eO/AZeXICMuiQuQWuJvI+RtL6vOqkHz0dlKeqqo9yqlXSXkRFrkTfrXvlb5UacT+enBZAirC8msr7dw52MmnR9NQ+BQEIE8gPpDCXSKQFCv28k0ZQF2JM9RQxCEhOE0Zw1EMfOtGCOTwbH6q9IoxF4YpXzJreZSFevSH9EYZNN8Ur+u32xefrnV3mjKhVJfCGAlJgg6ilaCJI5e/vI7hhf6VHOIqk2vWOXVVzf27Fj6qzdln274NRdDZ9lbSo8rK0mKLsny/V/IFwfwnXm3xY39cFcuVZlzoHn07dKeFF4jaNvX8K+D4Zfm2zV3QdG9FYRf17iLwX06XgxbddKiHaKY3+ZfvB+Tg26Tql1guVBl/kaa/EML8/tJkEbllOMM0OpN/V7du/XWmmpAGAr6vKVkXYYVbK12xxbmuyfrCeGoUzg6a4dZgBzDWrsHwLR1F0d1EquzNYkQ4t5pBxo5U9hIdPi4MFYt/n2dQhwL8LAh7IWS+Fjl2mSuNEyDGW4q5cD/E9Xc8ffKyshQBo294kEyyAAwiqleJL+rZeXamYp7beBKzHoqEC4MBwowAVBr3zq3rZ8zulvq80uwNWgnf8BVVF2y8cYKOVvukm3BkzUgmBC9zV74w5aFHYUiM08Ddd32HI7OabEjTiq5645maNmupwTBM3uvKhc89UGnZrda2UnpQZC3qlkihEd9wGjnE4EqHTisQu4Xodd03/NktNC4cMGBZ7PhjeZHBEPuzOBEQGUdSu+kXRDLk8pvLGXT4tGE1VlPfRRehzy7JurCUN4b9qs4fb3kwaIlTfYA+Tm7pRZXBSfnMIt4pZSezDclWCiuJpwMTPxH2MtgqQmzeOoY2bGE3pUZWSh7tBhkcvZyMAhfyAmIhTzkiYMx+wyCfvzsgibE8lA/6QXl7DwhC0+MY8sHeqy1wfaw8TNWD4Zpl/uJEQDO6am0fZ4HRt3kTFGfaqE980ZmhwpiicheNjgn6dYjfGCgmwSKB2vULRzwd7V8ypPdJjtMYOW+r3DRpqXT3heOL0HJizW2zdmIBxYsTTyo8OlSv9JE02kbbYidws0f1rXZAdaXJyKv96QiFHTy84vDKAycks1UI02V+VmGWw495BL7kG1ZLjE19x9+Xc6LK693BZb38KCkfsmkCfs8RH75kccmljpdumR5OU2sVJn7xXjAgb3eqGWlX0XjErJFGihZm8KJpnXUwf3meFD7eBVLgBju0WYcAjZG0xM/tqnF/+71EIl5ZS1dKlMHMJRkzHhnbu6XPrbLLzujbG/Wv5Hzn1wMzHhheCmaP4xX+ui/yQTa+c7wshRJcZ3+JfaZrZsZ9TrK7zRyfjfwK/Zjo9PBUI2RzcfzsfYxXmEORxaryjfh6kATImN4vsHz6ZY9LfE51qXAPhszH62WKFarlHvrNwRuIhKfG0yj0CmfY7GUt+giECWsfe88OuHEiyG1LGlrHJAw+kR0C4A/xrUYCxX/h+mpDrWJYpvGLyzXGb5S5qCb1fCkk/FN09VGRkThl6RMLbo73S+y3JxxuTFR2fVkUoCKAbSvTmlw2P2+GQQ3mhHLjvt3Rf9GWclIyRXfbxaLU2F1hEs7SK0fffN9F9X3F7tCFpQ/JD/kFT8QKMr88HpB3Vwl4t4jdw4a5X76ABMtBXogp1Wp1N7Y5YGOKG14cCuSjOlkjj1j1KHb5lvNzsqmLKWq7a6wfKYnc+TInIm7twjt/z5qjWD54gllYo0dDZ+REHVkFX7GL850vtPSjAg2UUpV6TFdcDn5BPI8yYaTX13eNJ+vQWDNwtlAjWCjGIYd8X3AwfKicpz5ZhghCkVsq0jzEAmSVlHw1KiR5hc0LGOmGUWkNYktN0kO2aMxF9PKH7s6RN8FSWESNxw9g0F+JGyGSpd9dLhXPYc9MGOxyNtZ+T9qvqWmfzD0qFsiCzNXOvp3Kx1sD2e+SLHPCNdn8IRYot7YZQUuXsO7aF2MNKLHKe9O8smQF1WuQri/dhMc1WCw14k2coiAUqMq+g+J8HBFi/BHWDU1X4IRbh7tQQJR+aVzPsubN1prwi6nRADrWYyskqU0vHv+3LhjVYGEYPhgY1Rl4Z6Krx/QIwjHjMwAc7oZDCKMhH+zrXkG4+aIO8NHyagGsIupcyog7MydF64KFJAykBLdpIu9X/o3W6XaKfIqqJrMoI8gacXttLAKCc1UmnRiWJzhUOhQKkWg6Dj0+LesZed/XeWHts8lVabYZiqbd/RuMs0Uwnf5qtEDSRjMVG7g9NizvSpes9eQMcjTMpChm/xvOcu6+cJV4A4d9N30T9CE5OaGp1xHDxBHgw2KUJfDyIXwbmBt9FyAiDYnpN0s3wEaMu6GYZPb1Pc18w2RSd14x4GYPVH68olj1k1J9HmzQPLKkPUqEtd92WgSHOWRLaCIBVorfkQ6KDh65TgWhtrGzfs+9DEE4q1IQS26SLihjpzKOwYrHGskyo9dtK2T9pygLmy8Mr8vceZ1szWkhSV8GweZZZ2mHRuLgws7YL+jwYgkct7QqfmhtGj+sxNahb1J2jIxk5GK00yjzvJSxIANcNmL9ys7ZHO473cy+zn0E6alNRdwPKtv6/tIMk7UCO4kChJc/5F2fALMS3APQa6D2JJvJqK6xFARC75pHEHquTlbqZt9y9u7VvlpDSWmPsQymZB5ZF1n1lHRVV0YIz205TA+Sd1YRotQLSt8OvunSX7Y3FI7IkG0nwT5WwzsujefQjEapLmQtY9ppf1IGRpJoS9rocE0iNT/YzDtG+WrzSe5EOxzI2MAUGFnEtOn91GjR2mCCpYOredkMNVi/Zk58/Dhnt5OcJuTT1tBWUhUqBa+CoOAvkXrW81ssyy55EL71FwQ6TR5clSCZw3gsFo5dSpr1u1ot4aMkaRV/c23lx41lGA6IIsvz4lJbZ7tmnLzlk0GPHM3EksRcNQcxLLLI+SNNFg9yYhVjXzN7HQp8dhl0M2pb9bTyysG5Ej2EsSkj3w97ZRIrZHaGlZU0ehxtp68VT2/viBNIOAhm4YF8n8Xi6WYYmfVx+4BJfOxiOdUGO5iH1M2ne/rQ4Efd7FLGQ++7W+4qR88jm4hirWuudk4nBAclBg3CGkqzz9nG3LWKLT0U5NPVe3A9nIFFYXaGIDHWhb/xBg56KhJSk5GNR0dl4AC25PDbnc+Ly4d9ZXqpa1mF8M+sXgdtN3sWhvbVEP+lh8qZEb5BEX2qVhE2cHRagLvfnuYgDDfnFJiV/r1M8mmp74TCXOa70l5RWh+IaK6sIf1i27GvffddjGGJ3XlFOlL4Xa8HO0pEDEupYAYBLuA5+l16Em8aY1mmx8uL12HwI69dD6dIpk223xXtD3V0QDq6Y3h6LNKDyb42/a7GQvZ2p98zvAUDD9iEWVULg8/Z0smj+HDGjfzmMsI6atSyAdw4bfrIHDEg3Jucnoff17a/UukB1di4CfAsq4YdmzTYJ7908wc/NI79wGfbWE83Watf/WB8RxpUka0zcKggfD8Qv9nvG5LOR+sPjBchnzYv1p6gUhRFej6E4jdf3u6P6kGGv2IzMk2QVTgxyxF2YPMYses/ZadalxcDcjHbulfU5ozzADm9t7Dg535cbz+FxPqLhu8MQpx3DfqTlIWeTo/fnuXsso77KCLjaMzXqD/iaPhD32YhKI6gfqX9ID0ogFylVxYUE33lX64T18q0WVblODloVCTQE0Y97PTEPsAC162tv+DKQRhMSOQyPdL9GERIgD4PaBLm4sTxHEmyXorOkj94YXaJIE/LBKdsqP4Y1y0L8dnbxdWb9Em9mfYqN2PTHsJQj5axN+vIvt9w/1iBAbfckyrY6bymuwE+34hUD14fbg/t7gYdahb5aNkbKZRmG4z0cAzD6AzAsESiJ0TAAxqB4ldYClB5bEQmxolM4Dv37is0jddIBOfIi5LaQG0gQlb8tq+DOEassZnHqgO8XJ9YHcHn+izwxmD129J42uaVLCybG75woQXjKEQHp3PRIhJ9Bf7Ybr3Zp0FSDI5ggAFAiWRvyjjIK7D9evXoNkLfRcmFIJKkTRbS8HNkllj/Tg5WP0MfOOtXUGOkTuglivRHjR/oZETjunA49xtigf0HR6M2OrpADq+7Adr6rRoriSiA5gd/trT8APRAvDoQNcAAGElAsxx0yy4ty0YPaQv8KtjoHOEzhtLcciWxgasEtqJzffVFPjME68YaeYl7MK5U3HCwrGpMFTkK20au+l4yQ3F/rY1lhVjKjgd6oXsYi+q1J+j72rIuu2ApjRB4SRmPQJ27ZMTwLX4xokDHXytgR6Q+ykGCbk5s5o4e3Mdw9g0Lscq5ozqNeBGkhMkExo6r+xVB/OqW+Cmla83ClFzMIYrCv5EZFAZIf3rgEq9gH75E1SWoSpK7ZGxEUArx9iX9KBKqpRKK7b9RCVRKH+ye3DEzgyNXRE+hG5a4vBfU5g+CPJLawmkp321fRX/TL3qIqHfPs4tfswMlDCrnGBULTAqbeFwM984LbL8pbP38QXw8ldlwytoDtYSr053H6FvgjADMKy+aRFE7TIHv3ktS4IfeTdcREEogDGROUWSb/AWlgCH4dauEKEH/7jwbT+T9GVL3SL9pKGYMV2QkwFJSGfL6EES+RI0ih/xNFl6ZVPYLNR38tPOL9HW9YYIQCNRmu8NwmCIQKyfpL8IwrrWV4wpEHX/DfgcjK72q8PVyDwQmt5T1QGS+6VoX+Yn7q0HT6JykyTvRRJe8B8olxUS5a7I0nLEdCNZuliW06TnlRHLP/ZqHqgmod9CjpjDvY7Dnozty8PEGBdC5sfwiLKs53pJsLNAywNioT0ugQoluIb0e0ZR95fKwUSmGMEl53A8XJ//OWmTx35Dif7NcqF9fwate7ADoOo5gSoNQfgoPfyuKD+QO02EI9NS2AYlFzXPxBniiv73+WKnnOtfjeWPtkcpKe8G9ziYcUCs1nCRzEVzOPssJdh4a0bD9RL4eoE13HEVXKjXLIZVhfb3Eg7zt3UErsiYrYwVV/2VmeMS+fviI5t+dAvRG8nMhLrklj+Di3mxvn1mhPjaUuytrBARN5PakEZ4FU3Ao74ElRjwh2gg9iEM5OBx+kG1hFa5FHL+p7Z+xuamqT73eOhj41/2oYA5U1ePDUZnKwrDGKb2u5aq5HmV8Ch2zUjLx+vgPsG5oEFMvouylqydN06qI3iI7qAiuToj7wKYjYgG4b7F3BOc1/MO4EAxszKKwp0a2hHc/TgREHT+kT7npRerGGh18XgRpCb0e2wPcG9mqUCAOVprm+OTJb0Lr6a0gGxkTwcGItLvGjM8aR8to2WPs+5/nVnU6tIzlqDQ8BSPt2FRPn62DwEa2oB9TPHXja2vBlii+K+sfmrjkdxvYBQTV1e7bHEInZqAxTMLRr115D5whqgArp2gCnV9zHaMHWU15vhiWIqBvBoHSMpVk0scdPtsEUS4ODJs6YEu2AmdkwpBl6COko0ncsF6QP4InxHrZaRa6M4z2DumWClhQX6ME8Wz3VXVnff0JPtURSfaHtZr36zByuRiaDZh076u+tufRW7Ys2C3GoJEMumokewN4EjerT88dyOYrwlAJDMqzItVIE200FTzZjTImMEuuk+LGd6lgU8ee2wo6ZnySN6rK3IHzwbQ40A54N0X0rNi2Cj/5BTmTusOGb+BjRo3LR0f1HWV1OuejI8rnQnOIlXZTrn6DnfZMi9HUjSLNIYseZnDNb8P5l/zU0/bDL5Z0ktVtRw5oNfhmviyljiXnrPpQ0I9ZQag1QueIt4NJiPc6bOoHBiIzJHzPAhUlOcfvr4rU1u9nIgPSBBLFBnOSt1tRHVcT+aAvSRppzxzyqFHrGKaHOLNi1pxuCckYw4qI68JTp+CVdl6ld6Zbs7V/y/vWnU7XKjL3I+Lu7XhReWzxSA+o1xfuEmBF8FoagJh/zR6Y31TS35u3e0wVQvL+utuo61CaR1dFj9RO/GDQQkHPbwRCljDgQbRqtEjhWJTqZXWKLKqdVeSgjYr9buehN2+Q02yi9xtEbzSUDKlsjRDVzUlyceNlXCceZ51XX+ZEM08JQ06w1BAml+sRJhgzl5hepJBvELc6rhabtExwXOQ70Mh3guIqfb9bReUrJsIS7ta2XrbDq7ai+21GHOczAbsz+msTxkinNbWSlJiwG6YApb2iNLwMiEd4jeY8LKkJBYz7cD6laMIn7ZNnsBdUp2HTrvdeQp+4+SYC0lfntLBazB5sM5q83n4nZ0TaSHCwUYkaIKPxsDBGof1OcN6OrzXBrvBMztqvxjoRywIREhGpD7YMP1xaOvWNwzJwRnkK88mCkcuOZLgyymFAxfD0IPPkCOAJ/SZBQIdi4jqUxQyvh6YVdUHG6ysBa0Ngqs+OG2ccB9OlsouM26p8RfM3rs21Zgq1rI4kSySTvkaqhZpKZFgbAR+vM0ZbSepqmIz/2imv8QXFY8aHeV3LKXxLHny1WMOSIgmw964j5p1G5EufX36Ml2wnieL0pa2Ra83X9jbeAo4Wjc9FvMLw3rJ5Si98zY/weU1VyvRhLROwbIE2Puv9kI3ki5lnlPQUfF+o2KuPvSmI1n0B4zzDSRPfPLbdplXX3qSLmTtVUYs+9ENJ+GE6cb056kqGVu8IE/Zxxnae4HRl6ZLW5nl8tm/bokoXlQY72KwmcDmu4FVVUHPjwzwfpfuERUo3gJa/SpkVw/glZSkqYs7oDYp87mrrnTlQEfOcTLmepRq1r5Qx6l03uOZhsTVrBAextuMapdiCMM6JGRLmsojGQPoPrlebnj1uAal42bUjTDOpg/QcCx+HZXlVGlt5Bh/pLyqBFIiNFUEr4uFRuL0MuUM/cbhDZwjObGJECIz2doTCFqgTh9krz1544w10p83Br4dymJqSueWJ9ITncUHmqMoHwfYQOQj6NqVHc05eiIoH6t+pEm2DVV+izir+dNoqs3itXfpddSyml7p5wT1+4Ff1xV75edcsqnhLqaig0gl9110Aje0lpqzgPqujHMy7EVZOehmOnejYVNO/ZkqFUcHJkJfFeQ+t2WBzQmWD8bwdDhjB4iCr0tvdZJOAejGW4WFd6hosItjhUarxei6rjIQb1CwwUNfXBFP+Xd+6LklVen2F/aR/Q0JjKgFlKvbRExlBBrStUfQ11M53GQmLYc5SuqmHvDcZ9W3qJav5kZagBYw+FaUvsH3c5cmChMgSr61FgUPDYzTD7ItDt25k6DJJeCOWEyR6+nu9I4Vyo3h1Nf7i4tVSAJbp/azFzGU8EuL0PpS1XGmTUwPLaq7d5F/lF7I6AbMBuIX4MlQwil0RDukMkpr7hmWWDLKTAGOQ7giVG1XYchttJ5MXYJBMjZIrOsNrFsru/PDpDWSKAA1aFWaHIL6diwnFLNPkYstKqvtyiNOODi6h8TlYTx6cu84KM318aTPK63aAI1Lupkc7feNwums4kbjQ6kjA64a1e7OKApkdPJiEG2jV009qPsLXwQBuIm+ELma/cEjrOB7OYinocE/wDf605oamOEWWdn0cZ7cQBMgISrr3vl+AujwoU/cgPOSxFYFCJ4nuwyTbVBLE2qCg6RLrCJzfIFbhEGQyExGiPg5Vy0JkvpygG434zSamIBHsW2l2+Y6SOXknl1wn4eDidmhyI4Pf6efK7W0rypIm1lm7MRS0aLDGKIAAwVsYXpdwrpp7Q6lffQStIrxXH47LifEtMni1x6jJb8zs41pAFn5R5j8UO6SxlHMxRpwFOmFUCUA1vvuAz1BFbu+2pQZmAH+R7SFkieUBAe2CN1zmo2/JpdxChPgbBnMi5Pi1oxWPyszfZY2v46MKiGB5SXRPWzQyRyYOS493+Q1WGuFK/dGuNnOU+hx5WERe7sRH4kEetsXpJImNw44gQZkHYHFoPCVJtKAIS/lxrYChsDuFYJJEzLCsDkZEKevXCn0dLc0+0qXQaUpmRy8mJEbPwmp+S9bDAb9HsWBRqbeUfnO3vcve/txtfQixryjq55MJPPgKjUoIAdUdeeUCt0RUo9zKQmx5Zpi8HNFeV2/EDnmOTJOZ/DqvCFnSacAvxOLvZ8XbPU/jNDPEQJc91rSWZiCfT7A+4vabLJRmzsMvojqNaQfGhpT4DR4e+r5KifMeVByQHj9x7SzllArLuDOGawL5DH68OPSXAKZMlh+Yx07wu4VYg1tKnpHsYzJrdrNdQfYmR/ISCb0WR1HiW1agwtrYvCgMgWzxbAtDfd/Lslhn2m1LHBQFo6xPhYoVG8yaeI11sTv8sn/TCd2HgZK3F9DvlD5Bfp/T2PVox17dX4MO16uiJbnbQ5276WPJQ1Gr1fkX6WL6/ArCLlgslTwOpj9ilL5TjhcQJ0shiZcpH4z+Zif1l4ARSMAJeqskbAJErvcuAJ19bPAWOlt3eMxYMus3NdDFsCIEWSDwUz+mhLHttBBfi+CQ8wdY2ArDZgkIDGXxjG9RKVR7XEGvBLrbbwFmdrCzRaGOWMLlYERLgY7UlJxWbVn1O5hiJ+CzLBLTI+QLwVI88253dEZncv1j+Cja3MU0YlGbuSkUtzEuDxa/GVo8SOdLFkFbpPNmrejNFHurWBCkbfQVthty8RDBrF1pRpD1Sj41ZzDbbHbSa8vdB7XoHH4hHop+wboTsTMfvwnR6P4rdKH/qDBJ+uT65tPBkXtCxe4NlA/LvwgJnhdf/6atyzOsyqXEWtpQ89eFm7A2/7Fe20XGTgaLa7u8zfV+iAq+HQCyfblsuK+g8AHenWJYiigKOiubGUSZUXunOSXRzQn6l+LrxIpZRQ2SJNY97LcEvXi7yl0wN+cDIi8hOnRiWK7IHCZUwUUNvX6QoyUhg8HPMmswz2tc914yiZShVzjk8wtF7ostI+BUKOKsx3LnvhuepJQ7Sec3lS0bDJVkIQj/tifHj+7ASt9YELT9BZnVHFvKga+4m956oHKDK1kFyWxFISKmRxytY+l8qeGEyiDpuHc3mHM1BQYRpihLFuuFTs8ecE6mLOBLFMC/mR6vjK91y8BaLhntpZvykCeNeqBd73x1CK/fN0bsrfVWDeWdHC9LfC19KjICBWLWbacDcY5tOlPC1YwyqZkxxbeLwsrYlVbzFwpTkMWsU1LGX3OO5cAw41NChbRmCpOxhrwaf/3xkFmGAL+hbyvHrluEh8ekWaNnItqDSx84u3L4uNF3XYYVY717iC2dO0+cJZLPVGaNBwBSLyhVBKp6DT0iDW3T7T7gZo4abowaQBw4FojV2MIB+RG+8r4YLEuMPvwOWiUe7I7QI4U5n/Rg/CpEXyk+NanqXhVwKBlv00MrL6BmSPid583Ptu9CQfCvAhlQzL/4dmuph8TO2St9pEd5s8yvip5zBOk/bzCYSheT+BLDprp+l6qHif/nc9T6ZJXZsywj/3/ZqyJW//qaDGYBOwtC0NvyiVdd63q/ejgMw6qujwvzR62q6n+h/O/v83CPMYH2Yl6LE1xBwOxfcGlK5mJY/49LqPC/UK4/pWLsH5P54CTo3y9gJP5fBAn9959/H3D9/RRF8b/XR5Ov9b9rOPR3rS6aqv73JQiE/Rf5773J8net+u8vA9G7v1sA9vHkiq77zx39/o9ATf73O2Ve06SuH4zU/28DYU61JbP/TSP/HjTptuLvfX8XlvXq/l0ohpyZ5/F4XmVdsixN9qxPvfbPF/Hw899yHNbXY/Selwj2vC7OZg3Bz/4Lgol/r6PnNfRfBET/e82DNYL+8+L692JZ5/FTBP8WBPn3Y7uYm+dpi/nfu/5usMir4v++Mc9DjNucFf+Xx6eI/3kH/489+Z+25D/X5qJL1mb//97H/7RH/77BHhswQ/c/AkIT/zcBwf7/m/73NP8+4/8p7+qa1ISh6K/po5l8A4/r2rd22pnOdPqaFVRmkVCIs+6/770RlA9XV9ePmS4PCiRcYs45l5OAusN9EJYxH7cTjOteMGfKeeIGwTyJth3xAV6J47wq7SoH21+j+rJIXfKrMB6vlxKG+12SpVn2aDNb+mMF5HqazLaU6ZSwyfixPuIHxEsd9oGiNVNbVWd+uYzYtaIk6AKoaECEjnbLUO+aRHpIL8GJCK8lePnZgAkFET1lCT2AQkKf74NCklBeCwq1Bwqdubo/OpjovyvbFIwqn2zB6FMui/WuENbm+P47rVYGT/91XWQmhxRl8yYy3uHzwTdVB9hDR7suwCZL5znmfuhwzMFjhCOdmuyhLlimcYyHj8sEWmaefChEtcDs4ntNjb8ovKqalbOb1vvQF4BXScx1oVaAM8V/G+xKUHFCqZARpxpf2QB3zvZecq8Fuf5k6gvCgECbtgs/7oFCovkQEtRncC1UgpNMUG5zZHtsqoWHiXUh6ZoYccAgURKxxuSgQRpRcEzsoEPa54cAivL1T3tj47ZUs7kL5beaWBf0UY3sNq7iqADuZbgCedCR0zMNVxAdCgtJ6LbWK7wPm8Hu857dF/85mcU9ySwCRnTYspeyd+nr+Zb3shliwjWTBkzyUMFrN6yI4Kyt5cbcjm7M7TfYGByjYyvFgx7bohBniOIu5JZ3JTe4dt0eO+kuucWZqVpxJLd4w48ofls6M0pP8YPsND9Ys384qXIBXycDEEELn2auoskSnBG1b7wrSCiGpAE8txMWF3d3jL5jLqJamAJX06VBkbW7tD/ccbZo7f1mnpLsJwxq/CBLTJ6sc3YJFTIsGJvp89wDuM9f+5M9VEUydbXQTbMxS9cI+bhuz2ThXFH5MR/O8k/jXJJ0avNZCtQoydTi81Sxcfh8KO7Hr4xVSJKRyeORQQrkdvk6Yv6uIj7QUlpo6YjxkBT426cfJgRj4PT7k040HFAAzjjEf7vzBPD9Z8Bh706QoIbFdxsnWOMf</diagram></mxfile>
2006.14804/main_diagram/main_diagram.pdf ADDED
Binary file (86.3 kB). View file
 
2006.14804/paper_text/intro_method.md ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep reinforcement learning (DRL) algorithms have achieved many successes in solving problems with high-dimensional state and action spaces [\[27,](#page-11-0) [3\]](#page-9-0). However, DRL's performance is limited by its sample (in)efficiency. It is often impractical to collect millions of training samples as required by standard DRL algorithms. One way to tame this problem is to leverage additional human guidance by following the general paradigm of Human-in-the-Loop Reinforcement Learning (HIRL) [\[51\]](#page-12-0). It often allows the agent to achieve better performance and higher sample efficiency.
4
+
5
+ <sup>∗</sup> Preprint of a paper accepted to NeurIPS 2021 as a Spotlight presentation
6
+
7
+ One popular form of human guidance in HIRL is the binary evaluative feedback [\[15\]](#page-10-0), in which humans provide a "good" or "bad" judgment for a queried state-action pair. This framework allows non-expert humans to provide feedback, but its sample efficiency could be further improved by asking humans to provide stronger guidance. For example, the binary feedback does not tell the agent why it made a mistake. If humans can explain the "why" behind the evaluative feedback, then it is possible to further improve the sample efficiency and performance.
8
+
9
+ Taking the training of an autonomous driving agent as a motivating example, humans can "explain" the correct action of "apply-brake" by pointing out to the agent that the "STOP" sign is an essential signal. One way to convey this information is through *saliency information*, in which humans highlight the important (salient) regions of the visual environment state. The visual explanation will indicate which visual features matter the most for the decision in the current state. Note that requiring human trainers to provide explanations on their evaluations does not necessarily require any more expertise on their part than is needed for providing only binary feedback. In our driving agent example, the human trainers may not know things like the optimal angle of the steering wheel; however, we can expect them to be able to tell whether an observed action is good or bad, and what visual objects matter for that decision.
10
+
11
+ In fact, the use of human visual explanation has been investigated by recent works in several supervised learning tasks [\[39,](#page-11-1) [35,](#page-11-2) [34\]](#page-11-3). They show that the generalization of a convolutional neural network can be improved by forcing the model to output the same saliency map as human - in other words, forces to model to make the right prediction for the right reasons. However, it remains unclear how to effectively incorporate domain knowledge in visual explanation in deep reinforcement learning.
12
+
13
+ In this work, we present EXPAND, which aims to leverage - EXPlanation AugmeNted feeDback (Fig. [1\)](#page-1-0) to support efficient human guidance for deep reinforcement learning. This raises two immediate challenges (i) how to make it easy for humans to provide the visual explanations and (ii) how to amplify the sparse human feedback. EXPAND employs a novel context-aware data augmentation method, which amplifies the difference between relevant and irrelevant regions
14
+
15
+ <span id="page-1-0"></span>![](_page_1_Figure_4.jpeg)
16
+
17
+ Figure 1: Overview of EXPAND. The agent queries the human with a sampled trajectory for binary evaluative feedback on the action and saliency annotation on the state. The Perturbation Module supplements the saliency explanation by perturbing irrelevant regions. The agent then consumes the integrated feedback and updates its parameters. This loop continues until the agent is trained with feedback queried every N<sup>f</sup> episodes. The domain shown for "Visual Explanation" is *Pixel Taxi*.
18
+
19
+ in human explanation by applying multiple perturbations to irrelevant parts. To reduce the human effort needed to provide visual explanations, EXPAND uses off-the-shelf object detectors and trackers to allow humans to to give explanations in terms of salient objects in the visual state, which are then automatically converted to saliency regions to be used by the RL system. We show that EXPAND agents require fewer interactions with the environment (*environment sample efficiency*) and over 30% fewer human signals (*human feedback sample efficiency*) by leveraging human visual explanation.
20
+
21
+ We highlight the main contributions of EXPAND below:
22
+
23
+ - This is the first work that leverages human visual explanation in human-in-the-loop reinforcement learning tasks. We show that EXPAND is the state-of-the-art method to learn from human evaluative feedback in terms of environment sample efficiency and human feedback sample efficiency.
24
+ - We benchmark our context-aware data augmentation against standard context-agnostic data augmentation techniques. Our experiment results help shed light on the limitations of existing data augmentations and can benefit future research in accelerating RL with augmented data.
25
+ - We show that human visual explanation in a sequential decision-making task can be collected in a low-effort and semi-automated way by using an off-the-shelf object detector and tracker.
26
+
27
+ We note that EXPAND agents have applicability beyond improving the efficiency of human-in-theloop RL with sparse environment rewards. In particular, they can be equally useful in accepting human guidance to align the RL system to human preferences. They can be used to incorporate rewards from human feedback, for example, when environment reward is not defined upfront or to accommodate additional constraints that are not reflected in existing reward signal. EXPAND, thus, can also be useful in aligning objectives of humans in the loop to the agent's [\[15,](#page-10-0) [5\]](#page-10-1). Finally, EXPAND's approach of allowing humans to provide their explanations and guidance in terms of objects of relevance to them, and automatically converting those to saliency regions, is an instance of the general approach for explainable and advisable AI systems that use symbols as a *lingua franca* for communicating with humans, independent of whether they use symbolic reasoning internally [\[13\]](#page-10-2).
28
+
29
+ # Method
30
+
31
+ EXPAND aims to improve data efficiency and performance with explanation augmented feedback. In the following, we will first describe how EXPAND simultaneously learns from both environment reward and binary evaluative feedback. Then we introduce our novel context-aware data augmentation that utilizes domain knowledge with human explanation. Algorithm 1 in Appendix A presents the train-interaction loop of EXPAND. Within an episode, the agent interacts with the environment and stores its transition experiences. Every few episodes, it collects human feedback queried on a trajectory sampled from the most recent policy. All the human feedback is stored in a feedback buffer similar to the replay buffer in off-policy DRL [27]. The weights of RL agent are updated twice, first using sampled environment data as in usual RL update, and then using human feedback data, in a single training step. In our experiment, both EXPAND and the baselines follow the same train-interaction loop.
32
+
33
+ The underlying RL algorithm of EXPAND can be any off-policy Q-learning-based algorithm. In this work, we use Deep Q-Networks [27] combined with multi-step returns, reward clipping, soft-target network updates and prioritized experience replay [38]. We refer to this RL approach as Efficient DON.
34
+
35
+ To learn from binary evaluative feedback, we propose a new method that doesn't require additional parameters to explicitly approximate human feedback. We use the advantage value to formulate the feedback loss function, called *advantage loss*. Advantage value is the difference between the Q-value of the action upon which the feedback was given and the Q-value of the current optimal action calculated by the neural network. Given the agent's current policy $\pi$ , state s, and action a for which the feedback is given, the advantage value is defined as:
36
+
37
+ $$A^{\pi}(s,a) = Q^{\pi}(s,a) - V^{\pi}(s) = Q^{\pi}(s,a) - Q^{\pi}(s,\pi(s))$$
38
+ (1)
39
+
40
+ Hence, the advantage value quantifies the possible (dis)advantage the agent would have if some other action were chosen instead of the current-best. It can be viewed as the agent's judgment on the optimality of an action. Positive feedback means the human trainer expects the advantage value of the annotated action to be zero. Therefore, we define a loss function, i.e., the advantage loss, which forces the network to have the same judgment on the optimality of action as the human trainer. Intuitively, we penalize the policy-approximator when a marked "good" action is not chosen as the best action, or when a marked "bad" action is chosen as the best action.
41
+
42
+ For a feedback $h=(x^h,b^h,x,a,s)$ , when the label is "good", i.e., $b^h=1$ , we expect the network to output a target value $\hat{A}(s,a)=0$ , so the loss can be defined as $|\hat{A}(s,a)-A^{\pi}(s,a)|=Q^{\pi}(s,\pi(s))-Q^{\pi}(s,a)$ . When the label is "bad", i.e., $b^h=-1$ , we expect the network to output an advantage value $A^{\pi}(s,a)<0$ . Since here we do not have a specific target value for $A^{\pi}(s,a)$ , we resort to the idea of large margin classification loss [31], which forces $Q^{\pi}(s,a)$ to be at least a margin $l_m$ lower than
43
+
44
+ the Q-value of the second best action, i.e., maxa06=<sup>a</sup> Q<sup>π</sup> (s, a<sup>0</sup> ). One advantage of this interpretation of human feedback is that it directly shapes the Q-values with the feedback information and does not require additional parameters to model human feedback. This kind of large margin loss has been shown to be effective in practice by previous works like DQfD [\[12\]](#page-10-17).
45
+
46
+ Formally, for human feedback h = (x h , b<sup>h</sup> , x, a, s) and the corresponding advantage value As,a = A<sup>π</sup> (s, a), the advantage loss is:
47
+
48
+ $$L_A(s, a, h) = L_A^{Good}(s, a, h) + L_A^{Bad}(s, a, h)$$
49
+ (2)
50
+
51
+ where
52
+
53
+ $$L_A^{Good}(s, a, h; b^h = 1) = \begin{cases} 0 & ; A_{s,a} = 0 \\ Q^{\pi}(s, \pi(s)) - Q^{\pi}(s, a) & ; \text{ otherwise} \end{cases}$$
54
+ (3)
55
+
56
+ and,
57
+
58
+ $$L_A^{Bad}(s, a, h; b^h = -1) = \begin{cases} 0 & ; A_{s,a} < 0 \\ Q^{\pi}(s, a) - (\max_{a' \neq a} Q^{\pi}(s, a') - l_m) & ; A_{s,a} = 0 \end{cases}$$
59
+ (4)
60
+
61
+ Note that the COACH framework [\[25\]](#page-11-7) also interprets human feedback as the advantage function. In COACH's formulation, the advantage value is exactly the advantage term in the policy gradient equation–human trainers are supposed to provide positive/negative policy-dependent feedback only when the agent performs an action better/worse than that in current policy. Thus, such a formula is restricted to on-policy policy-gradient methods. In contrast, the advantage function in EXPAND aims to capture the relative utility of all the actions. Here human feedback is direct policy advice as in the Advice framework [\[11\]](#page-10-9), indicating whether an action is preferable regardless of the agent's current policy. This property makes the advantage loss in EXPAND a better fit for off-policy value-based methods.
62
+
63
+ Human visual explanation informs the agent about which parts of the state matter for making the right decision. These "parts" of the state could be specific regions or objects. In EXPAND, each saliency map consists of a set of bounding boxes over the images, marking a region's importance in making the decision. The intuition is that the agent's internal representation must correctly capture the relevant features before it can make an optimal decision. Based on this, we propose a novel context-aware data augmentation technique, which applies multiple perturbations to irrelevant visual regions in the image and forces the model to be invariant under these transformations. The key idea here is that, the manipulations to irrelevant regions should not affect the agent's policy.
64
+
65
+ To get more insights on how our data augmentation method benefits policy learning, we follow the causal-graph interpretation of representation learning with data augmentation visualized in Fig. [2a](#page-4-0) [\[26\]](#page-11-12). In this model, each image observation X can be expressed by *content* variables and *style* variables. Content variable C contains all the necessary information to make the correct prediction, while style variable S contain all other information that doesn't influence current downstream task. That said, only content is causally related to current downstream target of interest Y , and content C is independent of style S. The goal of representation learning is to accurately approximate the invariant part (content) and ignore the varying parts (style). Based on this formulation, data augmentation is essentially a way to emulate style variability by performing interventions on the style variables S. However, since the choice of data augmenta-
66
+
67
+ <span id="page-4-0"></span>![](_page_4_Figure_11.jpeg)
68
+
69
+ Figure 2: (a) Modeling representation learning problem with data augmentation using a causal graph. Figure from [\[26\]](#page-11-12). (b) Example state of Pixel-Taxi, the top image represents the original state and the bottom represents a sample augmented state.
70
+
71
+ tions implicitly defines which aspects of the data are designated as style and which are content, to
72
+
73
+ make sure the context information C is preserved, existing data augmentation methods in RL are limited to conservative transformations such as random translation or random cropping. Previous works show that transformations altering content C can even be detrimental, which is referred to as aggressive augmentations in [32].
74
+
75
+ Different from standard data augmentations in RL, prior domain knowledge about state context is provided by human explanation in EXPAND. Hence, we can resort to a wider range of transformations, and thereby more informatively highlight the content variable C by applying various transformations only to regions marked as irrelevant (style variable S) while keeping the relevant parts (content variable C) unchanged. Figure 2b shows an example state from Pixel-Taxi domain where the top image is the original state having gray cell as the taxi, red as the passenger, and black as the destination. The bottom image shows an example image used by EXPAND which contrastively highlights the taxi and passenger by performing a Gaussian blur over the remainder of the image observation. This illustrates why the proposed context-aware augmentation can be more informative than standard data augmentations.
76
+
77
+ We choose to use Gaussian blurring to perturb the irrelevant regions, which is also used in a previous Explainable RL work [10]. The reason we use Gaussian blurring is that, it effectively perturbs objects in image while does not introduce new information. One counterexample here can be, transformations like *cutout* might significantly change state content in environments where small black blocks have particular semantic meaning.
78
+
79
+ Formally, consider a feedback $h=(x^h,b^h,x,a,s)$ , we need to convert state s into augmented states f(s) with perturbations on irrelevant regions. Let $\mathbb{M}(x,i,j)$ denote a mask over relevant regions, where x(i,j) denotes the pixel at index (i,j) for image x. We then have:
80
+
81
+ $$\mathbb{M}(x,i,j) = \begin{cases} 1 & \text{if (i, j) lies in Box, } \exists \text{ Box } \in b^h \\ 0 & \text{otherwise} \end{cases}$$
82
+ (5)
83
+
84
+ $$\phi(x, M, i, j) = x \odot (1 - \mathbb{M}(x, i, j)) + G(x, \sigma_G) \odot \mathbb{M}(x, i, j)$$
85
+
86
+ $$\tag{6}$$
87
+
88
+ Then we can perturb pixel (i, j) in image observation x according to mask $\mathbb{M}$ using a function $\phi$ defined as above, where $\odot$ is the Hadamard Product and function $G(x, \sigma_G)$ is the Gaussian blur of the observation x. Hence, we can get an augmented state f(s) with perturbed irrelevant regions by applying $\phi(x, M)$ to each stacked frame x with the corresponding mask $\mathbb{M}$ .
89
+
90
+ We use the context-aware data augmentation in the following two ways:
91
+
92
+ - 1. For any human feedback $h=(x^h,b^h,x,a,s)$ , we train the model by calculating the advantage loss with the original data h as well as the augmented feedback $h'=(x^h,b^h,x,a,f(s))$ . Note that one human feedback can be augmented to multiple feedbacks by varying the parameters of f(s). In EXPAND, we use Gaussian perturbations of various filter sizes and variances (see Appendix B for detailed settings).
93
+ - 2. To encourage the RL model's internal representation to accurately capture relevant visual features and ignore other irrelevant parts, we enforce an explicit *invariance constraints* via an auxiliary regularization loss:
94
+
95
+ $$L_{I} = \frac{1}{g} \sum_{i=1}^{g} \frac{1}{|\mathcal{A}|} \sum_{a \in \mathcal{A}} \|Q(s, a) - Q(f_{i}(s), a)\|_{2}$$
96
+
97
+ $$(7)$$
98
+
99
+ where A is the action set and g is the number of perturbations.
100
+
101
+ We linearly combine all the losses to obtain the overall feedback loss:
102
+
103
+ $$L_F = \lambda_A L_A + \lambda_I L_I \tag{8}$$
104
+
105
+ where $\lambda_A$ and $\lambda_I$ are the weights of advantage loss and invariant loss respectively. In all experiments, we set $\lambda_A$ to 1.0 without doing hyper-parameter tuning. For $\lambda_I$ , we set it to 0.1 as suggested in [33], which also applies an invariant constraint on the RL model but with standard data augmentations. The agent is trained with usual DQN loss as well as $L_F$ . Note that in EXPAND, the advantage loss is computed with both original human feedback and augmented human feedback. For a baseline agent that doesn't utilize human visual explanation (only trained with usual DQN loss and advantage loss with original human feedback), we refer to it as DQN-Feedback.
106
+
107
+ Though human visual explanation offers a strong way to make human-agent interaction more natural and effective, it might impose an excessive amount of workload if we require the trainer to annotate the image for every single query. To account for possible overheads and reduce human effort, we design an object-oriented interface that enables the human to provide guidance by effortlessly pointing out the labels of salient objects. Note that this design is in line with the idea of using a "symbolic" interface in human advisable AI, which argues that humans are more comfortable with symbol-level communication, and the agents are free to learn their own internal representation as long as the user interface can ground the human advice (e.g. to pixel space) [\[13\]](#page-10-2). Technically, our object-oriented interface contains a tracking and detection module, with
108
+
109
+ <span id="page-6-0"></span>![](_page_6_Picture_2.jpeg)
110
+
111
+ Figure 3: An example of object detection in car driving game Enduro. Users were allowed to remove or add other bounding regions at will.
112
+
113
+ which the human trainers only need to annotate at the beginning or when the tracker/detector works imperfectly. For example, in the car driving game Enduro (Fig. [3\)](#page-6-0), all the lanes and cars are automatically highlighted and tracked, so the human trainers only need to deselect irrelevant objects in the image.
114
+
115
+ A screenshot of our video-player like interface can be found in Appendix [F.](#page-16-0) Trainers can start/pause the replay of the queried trajectory and provide explanations by directly drawing bounding boxes on the screen. The trainers can also adjust the "video" frame rate based on their needs. Similar to Deep-TAMER [\[45\]](#page-12-3), we also applied each received feedback and explanation to frames that are displayed between 2 and 0.2 seconds before the feedback occurred – we assume that within this 1.8-second window, the salient regions/objects should be the same.
2103.10379/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-09-10T03:09:00.720Z" agent="5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36" etag="ES7kW5XmGJCbOEve9Koq" version="13.5.5" type="google"><diagram id="vxFLJJwmx3dD1RZkKw80" name="Page-1">7Vxdc5s4FP01mdl9MCPE92OSJulDs+02nWn7lJFBxjSAKMiJs79+JUDmS8Q2Acdp4hkncAUCdI/OPboSPtHOo/VVipLlNfFweAKBtz7RPpxAqFqOyf5xy2NhMYBTGPw08MqDKsNN8B8ujaC0rgIPZ40DKSEhDZKm0SVxjF3asKE0JQ/NwxYkbF41QT7uGG5cFHat3wOPLgurbYDK/hEH/lJcWQVlSYTEwaUhWyKPPNRM2sWJdp4SQoutaH2OQ954ol2K8y57Sjc3luKY7nLClXO1OL1zbm+uo/jH8l8NeMndDBa13KNwVT5webP0UbQAu++EbwZR3lRn9zilAWugT2iOwy8kC2hAYlY+J5SSiB0Q8oIz5N75KVnF3jkJSZpXpS3yT62O0zDw+bmUJMyKsqRw4SJYY3bXZ/klT4UVCAvbXlLKAXDKHxderpKQIE95CO6CCHsBUkjqMzPfT/g+23ZJFJE4Y1s2/3rsz5cUZwxcMb09Qym73dvPcxQh5Vfiswt1W7dscH7reF0zla19hUmEafrIDimhrqumAo3irIcKO6padollHTd6eSAq8epvqqtcyjZKr+7hYW27h3HsnfKuwvbcEGVZ4LIGWNKIXeeDyjbxOqA/ats/uTMUo9z7sC59k+88ip2Y3fyP+k5xFjTEfnVevidOXARhWIOMZ2Db05k9oym5w7USG84103zKVRlZpS7ejn7stTiAotTH9KlGhXLP19wsGKLuZWFLcYhocN+8qMzz5RW+kIA9GgQl7HTBMQJoNmhWUTx4eVadEloVQatZkQZbKCwaolNRDsTNYw/HpjECNkfC2cJ2sevKcDa3Dd0AtW4A9ugGvdjsYG4rWF8Wc9BpQsUwB2KO0Z/i2BYEhmFrlqpadrNeVVMMwBDt6JZqqqZ1UECaEkCaIeVwIflDVMg0f6+IKJhluW5h0QioerKuCtmWX/7Pa5kLw0eS8eoQv1jAQugJZDcMNoel4ri/eI0zoM4g/FuUsiebtytmtuIOhbnVjVjIos2O08R5TGLc6hSlCZUh2mUIxqkkdkeB5/HLnD0sA4pvEpSj+IHJQWbLFQCP5XlvSAlFpVp4snPsEGPL0pndgqUjj7m6pGdA0N8JnhVynalCLhwWc619Yy7C9kLKhaZr4/niWTFX2y/GdpjypQhQ01qx0hpIgHor6BrgsEFXjKwkJOcF90M4DgiOqIwlW/QS4ch0ek3mQchPgoBBlTVA7KYYZTgfBLIhQT7yY97Jcp41UcS5KZ5nSZNCt9Pqxpw3VdP6aluvCjLWfkFG3hry0COLDjXCq4UGTXdkcWeEaKFqTbFhCm6oUQiUhQptqlChygbg+8HJOBycjJ3gdMOvn3e4CPEGQH6KccS9dgSd7+ha68g6n6pP1fdaSk13JH1PlhqZru+NmBoRIquUaqJIrtP2T3FUyq4m5n42tJxc2Q3XaXBXmSYf0L6UTFMNswkz21Ja+mqwUIOdqqaWavrzEbq/tq8wDeuYBk9i+vkDgiMBUMfrxtDkmtOsx1QPix17O3beU/v7BrPSmbbeDV6Hzuurb8+/Gv9a7M9liPxbsrj95/TbZyW7H8mrprmjV21rKqfKhgOvz6muFysuuxZrkgXzVMZ2Imamy1U0535EC3+J4iCjKOaylh0yswFQkvi5jhQcrvdweN2xmqFAq+tbZyq5qb0ZQsaxcDb3Lv9qtT77dcVECsp7LbxUIQDJetYtK8FQPB9JPZzWnk3TAPuIWyyKixIAypJC3ogpezgarhQdONWngTJLtyQo0xVbwiDToWyM5HNPHnn8NLI8zW0NnFMbMfdsH5UmNVpjZ0sdqEnNlriF7YomFqX683PP4Niyp9fojueZE8w6UsRiGutNfDlStMl49U/raTOoH0XSB06V9HGg4tQ/egvHkqk6R9KPzKlUtL6Dit53hL1DOkeeNXp6hN2bA9pQ9ug5IDGGfHVJILW1WsEcukLGaE4YWO16pubL/umBkVnsYp2wMXPGb4h1/7zqLm1RklMbSXCKKN5ObvYYaxa20NgUtDVrwceyZOMHrYvfdjgdj6dGyFU/h6f2yATWeAq+89TTPAVaiwr09rhgZ2HXXgdzaKKSZaqnlFs8cwBQkuB8irFLQvkygBT/XuFsBxX2WolK285Th53P1ncYfr6CJMeLJbAeBTHYqqJLxDFkpKp3/dnu7aP50xhBH/dEkQnnSOGB5khVeZpgh0BU9JOXiztN3lDB0LijCzyImlShVA61hLxfIb/a9VhPaPE9kgSb43LV7uEZzhhB5rp9rzU4f9JKtygIAwZI3tNw7DPmr5Ylva8B5FrInAHjxTJSUygk2GY6IJFImizlNFlI3WFRxyuQSEcxcds3L2M1nO4YtmJIxFTPDKAKKu01vvfHeOdJ+iLSllmTjTzaDMOP8OUA0Td21E9HNJAfS1CpRrsm/bAjecN6E4IqF0VJSu4Db19B9AbUQE0nIYaWd4FUF0jHMWWnywRTc9WDOtJLdmpr1tiWLt0+ZJrJkK2lqQNsOFzEL1CI49QnsTEUXlsZCUi6IKgwaB6PSG+8uaOOhDgTKKD+0VpBUfbiDjwoAid7yfMP+F2FnedjhN47FhnXEV9DZRzsCMIDyzhNNmE4cmAcWoWYwjmpv/xeTc4MrfaPflee33H560h56B+BZM0mRPtenZf1tzFenb/w3K/rb/b9Xfrfzbn//eGrjz6JXwSpkyijhptyl6R0SXwSo/CisrbarTrmE+EZjtyBvzClj2XzoRWLtD0TFHvMT0j90ZkIGMKeYtHH9kEwlPt3ZxbscRXbrX6WqiCk6se9tIv/AQ==</diagram></mxfile>
2103.10379/paper_text/intro_method.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:ICEWS14" data-latex-placement="t!">
4
+ <img src="wideobama.png" style="width:46.0%" />
5
+ <figcaption>A timeline of events extracted from the ICEWS14 knowledge graph. The events are chronologically sorted from top to bottom, demonstrating interactions between, president Obama, NATO, Afghanistan and Russia.</figcaption>
6
+ </figure>
7
+
8
+ Knowledge Graphs (KGs) organize information around entities (people, countries, organizations, movies, etc.) in the form of factual triplets, where each triplet represents how two entities are related to each other, for example (Washington DC, *captialOf*, USA).
9
+
10
+ There exists an ever growing number of publicly available KGs, for example DBPedia [@auer2007dbpedia], Freebase [@bollacker2008freebase], Google Knowledge Graph [@blog2012introducing], NELL [@carlson2010toward], OpenIE [@yates2007textrunner; @etzioni2011open], YAGO [@biega2013inside; @hoffart2013yago2; @mahdisoltani2013yago3], and UMLS [@burgun2001comparing]. This structured way of representing knowledge makes it easy for computers to digest and utilize in various applications. For example KGs are used in recommender systems [@cao2019unifying; @zhang2016collaborative], the medical domain [@sang2018sematyp; @abdelaziz2017large], question-answering [@hao2017end], information retrieval [@xiong2017explicit], and natural language processing [@yang2017leveraging].
11
+
12
+ Though these graphs are continuously growing, they remain particularly incomplete. Knowledge Base Completion (KBC) focuses on finding/predicting missing relations between entities. A wide range of work explores various methods of finding extraction errors or completing KGs [@bordes2013translating; @yang2014embedding; @trouillon2016complex; @sadeghian2019drum; @zhou2019mining; @sun2019rotate; @zhang2019quaternion].
13
+
14
+ Different events and actions cause relations and entities to evolve over time. For example, Figure [1](#fig:ICEWS14){reference-type="ref" reference="fig:ICEWS14"} illustrates a timeline of relations and events happening in 2014 involving entities like Obama (president of USA at the time), NATO, Afghanistan, and Russia. Here, an agreement between Obama and Afghanistan is happening concurrently with NATO increasing armed forces in Afghanistan; after Obama made a visit to NATO. A few months later, after NATO makes some pessimistic comments towards Russia and Obama wants to de-escalate the conflict, it appears that he provides military aid to NATO, and in turn, NATO provides military aid in Afghanistan.
15
+
16
+ Temporally-aware KGs are graphs that add a fourth dimension, namely time *t*, giving the fact a temporal context. Temporal KGs are designed to capture this temporal information and the dynamic nature of real-world facts. While many of the previous works study knowledge graph completion on static KGs, little attention has been given to temporally-aware KGs. Though recent work has begun to solve the temporal link prediction task, these models often utilize a large number of parameters, making them difficult to train [@garcia2018learning; @dasgupta2018hyte; @leblay2018deriving]. Furthermore, many use inadequate datasets such as YAGO2 [@hoffart2013yago2], which are sparse in the time domain, or a time augmented version of FreeBase [@bollacker2008freebase], where time is appended to some existing facts.
17
+
18
+ One of the most popular models used to solve the link prediction task involves embedding the KG, that is, mapping entities and relations in the KG to high dimensional vectors, in which each entity and relation mapping considers the structure of the graph as constraints [@bordes2013translating; @yang2014embedding; @lin2015learning]. These techniques have proven to be the state-of-the-art in modeling static knowledge graphs and inferring new facts from the KG based on the existing ones. Similarly, temporal knowledge graph embedding methods learn an additional mapping for time. Different methods differ based on how they map the elements in the knowledge graph and their scoring function.
19
+
20
+ In this paper, we propose ChronoR, a novel temporal link prediction model based on $k$-dimensional rotation. We formulate the link prediction problem as learning representations for entities in the knowledge graph and a rotation operator based on each fact's relation and temporal elements. Further, we show that the proposed scoring function is a generalization of previously used scoring functions for the static KG link prediction task. We also provide insights into the regularization used in other similar models and propose a new regularization method inspired by tensor nuclear norms. Empirical experiments also confirm its advantages. Our experiments on available benchmarks show that ChronoR outperforms previous state-of-the-art methods.
21
+
22
+ The rest of the paper is organized as follows: Section [2](#sec:Related Work){reference-type="ref" reference="sec:Related Work"} covers the relevant previous work, Section [3](#sec:Problem definition){reference-type="ref" reference="sec:Problem definition"} formally defines the temporal knowledge graph completion problem, in Section [4](#sec:Model){reference-type="ref" reference="sec:Model"} we go over the proposed model's details and learning procedure, Section [5](#sec:Optimization){reference-type="ref" reference="sec:Optimization"} details the loss and regulation functions, Section [6](#sec:Experiments){reference-type="ref" reference="sec:Experiments"} discusses the experimental setup, and Section [7](#sec:Conclusion){reference-type="ref" reference="sec:Conclusion"} concludes our work.
23
+
24
+ # Method
25
+
26
+ In this section, we formally define the problem of temporal knowledge graph completion and specify the notations used throughout the rest of the paper.
27
+
28
+ We represent scalars with lower case letters $a$, vectors and matrices with bold lower case letters $\bm{a}$, higher order tensors with bold upper case letters $\bm{A}$, and the $i$^th^ element of a vector $\bm{a}$ as $a_i$. We use $\bm{a} \circ \bm{b}$ to denote the element wise product of two vectors and $[\bm{A} | \bm{B}]$ to denote matrix or vector concatenation. We denote the complex norm as $|\cdot|$ and $||\cdot||_p$ denotes the vector p-norm; we drop $p$ when $p=2$.
29
+
30
+ A **Temporal Knowledge Graph** is referred to a set of quadruples $\mathcal{K} = \{(h, r, t, \tau) \mid h,t \in \mathcal{E}, r \in \mathcal{R}, \tau \in \mathcal{T}\}$. Each quadruple represents a temporal fact that is true in a world. $\mathcal{E}$ is the set of all entities and $\mathcal{R}$ is the set of all relations in the ontology. The fourth element in each quadruple represents time, which is often discretized. $\mathcal{T}$ represents the set of all possible time stamps.
31
+
32
+ **Temporal Knowledge Graph Completion** (temporal link prediction) refers to the problem of completing a TKGE by inferring facts from a given subset of its facts. In this work, we focus on predicting temporal facts within the observed set $\mathcal{T}$, as opposed to the more general problem which also involves forecasting future facts.
33
+
34
+ In this section, we present a framework for temporal knowledge graph representation learning. Given a TKG, we want to learn representations for entities, relations, and timestamps (e.g., $\bm{h}, \bm{r}, \bm{t}, \bm{\tau} \in \mathds{R}^{n \times k}$) and a scoring function $g(\bm{h}, \bm{r}, \bm{t}, \bm{\tau}) \in \mathds{R}$, such that true quadruples receive high scores. Thus, given $g$, the embeddings can be learned by optimizing an appropriate cost function.
35
+
36
+ Many of the previous works use a variety of different objectives and linear/non-linear operators to adapt static KG completion's scoring functions to the scoring function $g(\makebox[1ex]{\textbf{$\cdot$}})$ in the temporal case (see Section [2](#sec:Related Work){reference-type="ref" reference="sec:Related Work"}).
37
+
38
+ One example, RotatE [@sun2019rotate] learns embeddings $\bm{h}, \bm{r}, \bm{t} \in \mathds{C}^n$ by requiring each triplet's head to fall close to its tail once transformed by an (element-wise) rotation parametrized by the relation vector: $\bm{h} \circ \bm{r} = \bm{t}$, where $|r_i| = 1$. Thus, RotatE defines $g(h, r, t) = \lambda - ||\bm{h} \circ \bm{r} - \bm{t}||$, for some $\lambda \in \mathds{R}$.
39
+
40
+ Our model is inspired by the success of rotation-based models in static KG completion [@sun2019rotate; @zhang2019quaternion]. For example, to carry out a rotation by an angle $\theta$ in the two dimensional space, one can use the well known Euler's formula $e^{i \theta} = \cos(\theta) + i \sin(\theta)$, and the fact that if $(x,y) \in \mathds{R}^2$ is represented by its dual complex form $z = x + i y \in \mathds{C}$, then $R_\theta(x,y) \Leftrightarrow e^{i \theta} z$. We represent rotation by angle $\theta$ in the 2-dimensional space with $R_\theta(.)$.
41
+
42
+ In this paper, we consider a subset of the group of general linear transformations $\operatorname{GL}(k, \mathds{R})$ over the k-dimensional real space, consisting of rotation and scaling and parametrize the transformation by both time and relation. Intuitively, we expect that for true facts:
43
+
44
+ $$\begin{equation}
45
+ \operatorname{Q}_{r, \tau}(\bm{h}) = \bm{t}
46
+ \end{equation}$$
47
+
48
+ where $\bm{h}, \bm{t} \in \mathds{R}^{n \times k}$ and $\operatorname{Q}_{r, \tau}$ represents the (row-wise) linear operator in k-dimensional space, parametrized by $\bm{r}$ and $\bm{\tau}$. Note that any orthogonal matrix $\bm{Q} \in \mathds{R}^{n \times n}$ (i.e., $\bm{Q}^\mathsf{T} \bm{Q} = \bm{Q} \bm{Q}^\mathsf{T} = I$) is equivalent to a k-dimensional rotation. However, we relax the unit norm constraint, thus extending rotations to a subset of linear operators which also includes scaling.
49
+
50
+ As previous work has noted, for 2 or 3 dimensional rotations, one can represent $Q$ using complex numbers $\mathds{C}$ and quaternions $\mathds{H}$ ($k = 2 \text{ and } 4$), respectively. Higher dimensional transformations can also be constructed using the Aguilera-Perez Algorithm [@aguilera2004general] followed by a scalar multiplication.
51
+
52
+ Unlike RotatE, that uses a scoring function based on the Euclidean distance of $\operatorname{Q_r}( \bm{h} )$ and $\bm{t}$, we propose to use the angle between the two vectors.
53
+
54
+ Our motivations comes from observations in a variety of previous works [@aggarwal2001surprising; @zimek2012survey] showing that in higher dimensions, the Euclidean norm suffers from the curse of high dimensionality and is not a good measure of the concept of proximity or similarity between vectors. We use the following well-known definition of inner product to define the angle between $\operatorname{Q}_{r,\tau}(\bm{h})$ and $\bm{t}$.\
55
+
56
+ ::: restatable
57
+ defimatrixInnerProduct []{#def:matrixInnerProduct label="def:matrixInnerProduct"} If $\bm{A}$ and $\bm{B}$ are matrices in $\mathds{R}^{n \times k}$ we define:
58
+
59
+ $$\begin{align}
60
+ \left\langle \bm{A}, \bm{B} \right\rangle &:= \operatorname{tr}(\mathbf{AB^{T}}) \\
61
+ \cos(\theta) &:= \frac{\left\langle \bm{A}, \bm{B} \right\rangle}{ \sqrt{ \left\langle \bm{A}, \bm{A} \right\rangle \left\langle \bm{B}, \bm{B} \right\rangle} } \\ \nonumber
62
+ \end{align}$$
63
+ :::
64
+
65
+ Based on the above definition, the angle between two matrices is proportional to their inner product. Hence, we define our scoring function as: $$\begin{equation}
66
+ \label{eq:scoring_function}
67
+ g(h, r, t, \tau) := \left\langle \operatorname{Q}_{r,\tau}(\bm{h}) \, , \bm{t} \right\rangle
68
+ \end{equation}$$
69
+
70
+ That is, we expect the $\cos$ of the relative angle between $\operatorname{Q}_{r,\tau}(\bm{h})$ and $\bm{t}$ to be higher (i.e., their angle close to $0$) when $(h, r, t, \tau)$ is a true fact in the TKG.
71
+
72
+ Some of the state of the art work in static KG completion, such as quaternion-based rotations QuatE [@zhang2019quaternion], complex domain tensor factorization [@trouillon2016complex], and also recent works on temporal KG completion like TNTComplEx [@lacroix2020tensor], use a scoring function similar to
73
+
74
+ $$\begin{align}
75
+ g(h, r, t) &= \operatorname{Re}\{\bm{h} \circ \bm{r} \circ \bm{\bar{t}}\} \\
76
+ \text{for } \, \bm{h} &, \bm{r}, \bm{t} \in \mathds{C} \text{ or } \mathds{H} \nonumber
77
+ \end{align}$$
78
+
79
+ and motivate it by the fact that the optimisation function requires the scores to be purely real [@trouillon2016complex].
80
+
81
+ However, it is interesting to note that this scoring method is in fact a special case of Equation [\[eq:scoring_function\]](#eq:scoring_function){reference-type="ref" reference="eq:scoring_function"}. The following theorem proves the equivalence for scoring functions used in *ComplEx*[^2] to Equation [\[eq:scoring_function\]](#eq:scoring_function){reference-type="ref" reference="eq:scoring_function"} when $k=2$.
82
+
83
+ ::: restatable
84
+ thminnerProductEquivalence []{#thm:innerProductEquivalence label="thm:innerProductEquivalence"} If $\bm{a}, \bm{b} \in \mathds{C}^n$ and $\bm{A}, \bm{B} \in \mathds{R}^{n \times 2}$ are their equivalent matrix forms, then $\operatorname{Re}(\bm{a} \circ \bm{\bar{b}}) = \left\langle \bm{A}, \bm{B} \right\rangle$ (proof in Appendix)
85
+ :::
86
+
87
+ To fully define $g$ we also need to specify how the linear operator $\operatorname{Q}$ is parameterized by $h, \tau$. In the rest of the paper and experiments, we simply concatenate the head and relation embeddings to get $\operatorname{Q}_{r,\tau} = [\bm{r} | \bm{\tau}]$, where $\bm{r} \in \mathds{R}^{n_r \times k}$ and $\bm{\tau} \in \mathds{R}^{n_\tau \times k}$ are the representations of the fact's relation and time elements and $n_r + n_\tau = n$. Since in many real world TKGs, there are a combination of static and dynamic facts, we also allow an extra rotation operator parametrized only by $r$, i.e., an extra term $\bm{R}_2 \in \mathds{R}^{n \times k}$ in $\operatorname{Q}$ to better represent static facts.
88
+
89
+ To summarize, our scoring function is defined as: $$\begin{equation}
90
+ \label{eq:scoring_function_final}
91
+ g(h, r, t, \tau) := \left\langle \bm{h} \, \circ [\bm{r} | \bm{\tau}] \, \circ \bm{r}_2, \bm{t} \right\rangle
92
+ \end{equation}$$ where $\bm{r} \in \mathds{R}^{n_r \times k}$, $\bm{\tau} \in \mathds{R}^{n_\tau \times k}$ and $\bm{r}_2 \in \mathds{R}^{n \times k}$.
93
+
94
+ ::: table*
95
+ :::
96
+
97
+ Having an appropriate scoring function, one can model the likelihood of any $t_i \in \mathcal{E}$, correctly answering the query $g(h, r, ?, \tau)$ as:
98
+
99
+ $$\begin{equation}
100
+ \label{eq:likelihood_t_i}
101
+ P(t=t_i\mid h, r, \tau) = \frac{\operatorname{exp}(g(h, r, t_i, \tau))}{\sum_{k=1}^K \operatorname{exp}(g(h, r, t_k, \tau))}
102
+ \end{equation}$$
103
+
104
+ and similarly for $P(h=h_i\mid r, t, \tau)$.
105
+
106
+ To learn appropriate model parameters, one can minimize, for each quadruple in the training set, the negative log-likelihood of correct prediction: $$\begin{align}
107
+ \label{eq:loss_base}
108
+ L(\mathcal{K};\bm{\theta}) = \mathlarger{\sum}_{(h, r, t, \tau) \in \operatorname{\mathcal{K}}} & \left( \sum_{t_i, h_i \in \mathcal{E}} -\log(P_{t_{\scriptscriptstyle i}}) -\log(P_{h_{\scriptscriptstyle i}}) \right)
109
+ \end{align}$$
110
+
111
+ where $\bm{\theta}$ represents all the model parameters.
112
+
113
+ Formulating the loss function following Equation [\[eq:loss_base\]](#eq:loss_base){reference-type="ref" reference="eq:loss_base"} requires computing the denominator of Equation [\[eq:likelihood_t_i\]](#eq:likelihood_t_i){reference-type="ref" reference="eq:likelihood_t_i"} for every fact in the training set of the temporal KG; however it does not require generating negative samples [@armandpour2019robust] and has been shown in practice (if computationally feasible - for our experiments' scale is) to perform better.
114
+
115
+ Various embedding methods use some form of regularization to improve the model's generalizability to unseen facts and prevent from overfitting to the training data. TNTComplEx [@lacroix2020tensor] treats the TKG as an order 3 tensor by *unfolding* the temporal and predicate mode together and adopts the regularization derived for static KGs in ComplEx [@lacroix2018canonical]. Other methods, for example TIMEPLEX [@jain2020temporal], use a sample-weighted L2 regularization penalty to prevent overfitting.
116
+
117
+ We also use the tensor nuclear norm due to its connection to tensor nuclear rank [@friedland2018nuclear]. However, we directly consider the TKG as an order 4 tensor with $\bm{E}, \bm{R}, \bm{T}$ containing the rank-one coefficients of its decomposition, and where $\bm{E}, \bm{R}, \bm{T}$ are the tensors containing entity, relation and time embeddings. Based on this connection, we propose the following regularization:
118
+
119
+ $$\begin{align}
120
+ \label{eq:omega_4}
121
+ \Lambda_4(\bm{\theta}) = \sum_{(h, r, t, \tau) \in \operatorname{\mathcal{K}}} (||\bm{h}||_4^4 + ||\bm{r_2}||_4^4 + ||[\bm{r} | \bm{\tau}]||_4^4 + ||\bm{t}||_4^4)
122
+ \end{align}$$ We empirically compare different regularizations in Section [6](#sec:Experiments){reference-type="ref" reference="sec:Experiments"} and show that $\Lambda_4(\bm{\theta})$ outperforms other methods. We provide the theoretical theorems required to drive Equation [\[eq:omega_4\]](#eq:omega_4){reference-type="ref" reference="eq:omega_4"} in the Appendix.
123
+
124
+ In addition, one would like the model to take advantage of the fact that most entities behave smoothly over time. We can capture this smoothness property of real datasets by encouraging the model to learn similar transformations for closer timestamps. Hence, following [@lacroix2020tensor] and [@sarkar2006dynamic], we add a temporal smoothness objective to our loss function:
125
+
126
+ $$\begin{align}
127
+ \Lambda_\Upgamma = \frac{1}{|\mathcal{T}|-1} \sum_{i=1}^{|\mathcal{T}|-1} || \bm{\tau_{i+1}} - \bm{\tau_{i}}||_4^4
128
+ \end{align}$$ Tuning the hyper-parameter $\lambda_2$ is related to the scale of $\Lambda_\Upgamma$ and other components of the loss function and finding an appropriate $\lambda_2$ can become difficult in practice if each component follows a different scale. Since we are using the 4-norm regularization in $\Lambda_4(\bm{\theta})$, we also use the 4-norm for $\Lambda_\Upgamma$. Similar phenomenal have been previously explored in other domains. For example, in [@belloni2014pivotal] the authors propose $\sqrt{\text{Lasso}}$, where they use the square root of the MSE component to match the scale of the 1-norm used in the sparsity regularization component of Lasso [@tibshirani1996regression] and show improvements in handling the unknown scale in badly behaved systems.
129
+
130
+ Since we used a 4-norm in $\Lambda_4$, we also use the 4-norm for $\Lambda_\Upgamma$. We saw that in practice using the same order makes it easier to tune the hyperparameters $\lambda_1$ and $\lambda_2$ in $\mathcal{L} (\mathcal{K})$.
131
+
132
+ To learn the representations for any TKG $\mathcal{K}$, the final training objective is to minimize:
133
+
134
+ $$\begin{align}
135
+ \mathcal{L}(\mathcal{K}; \theta) = L(\mathcal{K}; \bm{\theta}) + \lambda_1 \Lambda_4(\bm{\theta}) + \lambda_2 \Lambda_\Upgamma
136
+ \end{align}$$
137
+
138
+ where the first and the second terms encourage an accurate estimation of the edges in the TKG and the third term incorporates the temporal smoothness behaviour.
139
+
140
+ In the next section, we provide empirical experiments and compare ChronoR to various other benchmarks.
2106.02940/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2106.02940/paper_text/intro_method.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Reinforcement Learning (RL [\(Sutton, Barto et al. 1998\)](#page-8-0)) considers the problem of an agent taking sequential actions in an environment to maximize some notion of reward. In recent times there has been tremendous success in RL, with impressive results in Games [\(Silver et al. 2016\)](#page-8-1) and Robotics [\(OpenAI et al. 2018\)](#page-8-2), and even real-world settings [\(Bellemare](#page-7-0) [et al. 2020\)](#page-7-0). However, these successes have predominantly focused on learning a *single* task, with agents often brittle to changes in the dynamics or rewards (or even the seed [\(Henderson et al. 2018\)](#page-7-1)).
4
+
5
+ One of the most appealing qualities of RL agents is their ability to continue to learn and thus improve throughout their lifetime. As such, there has recently been an increase in interest in *Continual Reinforcement Learning* (CRL), [\(Ring](#page-8-3)
6
+
7
+ Copyright © [2022, Association for the Advancement of Artificial](#page-8-3) [Intelligence \(www.aaai.org\). All rights reserved.](#page-8-3)
8
+
9
+ <span id="page-0-0"></span>![](_page_0_Figure_11.jpeg)
10
+
11
+ Figure 1: A and B a simple four rooms environment task where the goal (the green square) is located at the bottom right. C and D a different four rooms task where the goal is at a different location, the optimal Q-function should give different values for the same action for the same starting state. The symbol ↑ denotes a high Q-value and ↓ a low Q-value. Task agnostic, single predictor continual RL methods will be sub-optimal compared to methods which use the task identifier to condition their policies like OWL.
12
+
13
+ [et al. 1994;](#page-8-3) [Khetarpal et al. 2020\)](#page-7-2), a paradigm where agents train on tasks sequentially, while seeking to maintain performance on previously mastered tasks [\(Kirkpatrick et al.](#page-8-4) [2016;](#page-8-4) [Schwarz et al. 2018;](#page-8-5) [Rolnick et al. 2019\)](#page-8-6). A key issue when training on sequential tasks is *catastrophic forgetting*, a consequence of gradient based learning, as training on a new task overwrites parameters which were important for previous tasks [\(Robert M. French 1999\)](#page-8-7). While existing methods address this, they typically only consider the setup where each "task" is an entirely different RL environment, for example different games from the Arcade Learning Environment [\(Bellemare et al. 2012\)](#page-7-3) or different simulated robotics environments [\(Ahn et al. 2019\)](#page-7-4).
14
+
15
+ In this paper, we focus on a more challenging problem. Instead of distinct environments as different tasks, we consider learning to solve different tasks with the *same state space*. Since these tasks may not only have different but even opposite optimal actions for the same observation, training on them sequentially causes what we call "interference" which can in turn *induce* forgetting, as the agent directly optimizes for an opposing policy. Interference can also lead to reduced performance for future tasks, as the agent cannot fully learn to solve a new task given it's retained knowledge of previous opposing objectives. This problem regularly arises in real-world settings, whereby there are a multitude of tasks
16
+
17
+ with reward functions which all share the same state space (e.g. a visual observation of the world). Since previous CRL methods used different environments as different tasks then the agents can learn that the different state spaces correspond to different optimal behaviors and so interference is rarely exhibited.
18
+
19
+ We begin by showing a simple supervised setting where it is impossible to solve interfering tasks, continually with a single predictor despite using strong CL techniques to prevent forgetting, Figure [2.](#page-2-0) This setting can occur in RL where we have different goals but the same observation for different tasks, see Figure [1.](#page-0-0) We therefore introduce a new approach to CRL, which we call COntinual RL Without ConfLict or OWL. OWL makes use of *shared feature extraction layers*, while acting based on *separate independent policy heads*. The use of the shared layers means that the size of the model scales gracefully with the number of tasks. The separate heads act to model multi-modal objectives and not as a means to retain task specific knowledge, as they are implicitly used in CL. Task agnostic, single predictor CRL methods which use experience replay [\(Rolnick et al. 2019\)](#page-8-6) will thus suffer from this interference. But have the advantage of not having to infer the task the agent is in to then solve it. Experience replay has been shown to be a strong CL strategy [\(Balaji et al.](#page-7-5) [2020\)](#page-7-5).
20
+
21
+ In the presence of interference, task inference is now necessary and at test time, OWL adaptively selects the policy using a method inspired by multi-armed bandits. We demonstrate in a series of simple yet challenging RL problems that OWL can successfully deal with interference, where existing methods fail. To alleviate forgetting we consider simple weight space [\(Kirkpatrick et al. 2016\)](#page-8-4) and functional regularizations [\(Hin](#page-7-6)[ton, Vinyals, and Dean 2015\)](#page-7-6) as these have been repeatedly shown to be effective in CRL [\(Nekoei et al. 2021\)](#page-8-8).
22
+
23
+ Our core contribution is to identify a challenging new setting for CRL and propose a simple approach to solving it. As far as we are aware, we are the first to consider a bandit approach for selecting policies for deployment, inferring unknown tasks. The power of this method is further demonstrated in a set of generalization experiments, where OWL is able to solve tasks it has never seen before, up to 6x more successfully than the experience replay baseline [\(Rolnick](#page-8-6) [et al. 2019\)](#page-8-6).
24
+
25
+ # Method
26
+
27
+ **Input:** tasks seen so far $\mathcal{T} = \{\mathcal{T}_1, \dots, \mathcal{T}_{\tau}\}$ , Q-functions $\{\phi_i\}_{i=1}^M$ , step size $\eta$ , maximum number of timesteps T. **Initialize:** $\mathbf{p}_{\phi}^1$ as a uniform distribution, $s_1$ as the initial state of the test task.
28
+
29
+ $$\begin{array}{c|c} \textbf{for } \mathcal{T}_j \in \mathcal{T} \textbf{ do} \\ & \textbf{for } t = 1, \dots, T-1 \textbf{ do} \\ & 1. \text{ Select } i_t \sim \mathbf{p}_\phi^t, \text{ and set } \pi_{\text{test}} = \pi_{\phi_{i_t}}. \\ & 2. \text{ Take action } a_t \sim \pi_{\text{test}}(s_t), \text{ and receive reward } r_t \\ & \text{and the next state } s_{t+1} \text{ from } \mathcal{T}_j. \\ & 3. \text{ Use Equation 2 to update } \mathbf{p}_\phi^t \text{ with } l_{i_t}^t = \\ & \hat{G}_{\phi_t}(\theta_{t+1}) \end{array}$$
2108.09079/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-16T11:13:46.587Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" etag="PYdsplI71fvd3zZyqGmR" version="14.4.8" type="device"><diagram id="flnbsLNEmtyiVIukDsLW" name="Page-1">7V1bc5s4FP41fkwGXYHHOEm7nd1uu2ln2j4SI9tMsPFinMv++pUA2SDAxra4xMXJxEaGA+j7+M7R0SUjdLt4/Rg6q/nnwGX+CBru6wjdjSAEFoQj8Wu4b0mJadtJwSz03KTI2BV88/5j6ZGydOO5bJ2WJUVREPiRt8oXToLlkk2iXJkThsFLfrdp4Lu5gpUzY7nLEAXfJo7PCrv98NxonpRa0NyV/8G82VyeGdD0/haO3Dk1vJ47bvCSKUL3I3QbBkGUfFq83jJfVF6+Xj5UfLu9sJAtozoH0Id/wBR9f/X/+zN88uDzp1+3L1eUJGaeHX+T3vEIUp8bHK9XzlJcdvSW1gX9dyOudTwNltHV1Fl4Psf0ZiROS53Fin8zgghh/h55C44ZNJbshf8Ng4WwpOwjjov/xubWMfDCGISr1925+KeZeP/24D3IC+O3mFxb8lVau9vLhLyiV+LjZPPI38Yvcy9i31bORJS9cJbysnm08PkW4B8fg83SZe5fj9sCZ/I0C0Xpl03ke0uWlrtO+PSFm/EiQWXj2iD5QhiXij2nnu/fBn4QxteDHINA6KYVlymfxi9R01EYPLHMN9iEN4axvbcsxinszyyM2GumKMX8IwsWLArf+C7pt9hID3mTD1ZK0JcdnYHk6DxDZSstc9InaLY1vSMZ/5Dy7BjOoYFzl805SK2+cQ6XcE7BkC3dG+ExBIy+s157kzxqShXb8YuXs1cv+pnuIz7/ikEh6dadqBNDbrzJjSW/q5/ZjcxRYnN3WLwlj0tgk54I7oOLuTO2F6wMGKQEC1kWMt+JvOe8PywDKD3D18DjV7LlArIULhCYN7EONuGEpUdl/ZdiCAOy31DkhDMWFQzFfNne9hkUgpWyJR6yatmq1hljmijErux7qmJ/xyr2kKrYfmFKzj4IUw1hAggrwkQ7FiZQQqoKCKc+e00FanxIqwq1SCgBJtujYn3XEWJd559/ZCiY1BWSoiWqWGpYScqEZMC8DlLApJow56C3inlZzKsr/vhtYgmghgDYPo0OAFv7DTXMhhrRaPeOu8JBKzSk2KSWyctnoeN6nDh3XsgmkRcsxYWydVSmTAhgRLAen37Vt/ZtWUplAFcLuFvV7q4hOYDbFLjbMKy7YLwGukOWoAnPDonSMDvVs0ML7DfUdGw/uHZ9AkGJnQcTGDKM70wj4ODeNQJs5AGGyOwe4MHF6wOYEFPx8aB7gM36AFfkX5aBwGxv8gXdEIxKAgLqWibFibkPnrjy1GU7YSTNP/rB5OkdeG1iojy6BpboHp2gUU0RUzVV4bl5pTlvmd1WYof1EReNJCV3pEps6g0MrPNjy2JcpzPa7D3bpDKnwGEDnMw2ig+ZajpOtOuLUBDe+763WgvJWbHQ4+dmAnCWlH7dFR30RBWOp9jpcid+igQbxz8ZhyK/SxVRi8vIP5+QWAWHgewiy5BdTaiz/AUy+v7kntpOrEQrIX8N+vZUGqBZ8B71pcE6ZKrproIafYI99RRJJb8f3iAl7YCIfSpvKDC75g2s71KGuHYfLShQ8g7myXGtagqiupHGsXFt4UxGK3EtqtGz+ZsGMrbCIinsnQUyGgbBHeFYWg9KJBV7oiIEKqloopioP3gB7zfUtGOpkfJ8rwFJzziDTKVPzIQnex51EC4x2g5IamRSwyQ3muJ9QPUVyrD4FadI1/OtjSLTCk6Bxi9NnZgw31zFpJjfpBgUGQNkbejX+bLcQjcjVe+vKgeq5i7oNPP1pg0cGuf/wNbjODCtGutfb6Qt50uUp6vjezORaJ9wdsVBjWCVN3H8m/SLhee64vBxyPhFOo+xKaFnaazH7ZLxiIjwxdlEQXIjsWkNxEUo32Ahu+RtLjVfEqHs07WzmIvLUi2dMPc2WD4H/ibuKrlQAmAbKQRAdftmmiNAWe6jj3OD7q9qykbLvN3e1/JxLd68pVpyQJB7Vc28jj134/g9q+P7D6Px7ciy4okY6YZ9qTIBMFTGThM58ybbkJXjItqRiRqprl1kaRyOLKtDxHw4CaozBlUJBh0xJsi3CjAt6jS0zSIAFmkIAFKjSfjuQ3sK87zHsmcq6x5lb3I7oT0ZBqekg1OkyGmaGUaVmWFlj5hUnVyyrjGkzx+lommWELe4lT+YbKfrNOBrm+yr/p4kVSjBqgczUN5I7aQKUQyhdlMqpMYoEi2sqNPNs58VFfnfUTb7i+BITf/GW5megHcxNIWSfP8zMU+cm8j5dW2qFLPapdjB7NGxUfOpiZkPzIk2IRuJxVRWlxpeUwVubJaOcQZtpmHowTRMWxS4+/H9goFXvZJc9CibOm4z+0Krsy8t4/7pknGnKu64+MDLJnY7uJfFmj1MBw0r8pyc6zWUAXI9mGhBy2LZgXUXxDqoDLXvBetqDLwe5ng2MrZB6W/iGJ86tgFLylSbariVZFaHyIN4XYR4ATmMYite5f3jbYqXWRag6xKv30aIgKoep88WAOpkldZHfZtw8Gfd0Agqs9PPoBG0Ds5faZpG1Vm/wZ9dhD9Tm4B9mIptDVHUhbNObQL2gnVNRlGDyzyiCYhPXzBAbQKWmGrYZVplkdcgXhckXmoTEO2WEe1OvIaVRBtoAp6xloTaBGx/LQk5PWzwZ103Ac+gkdoEbJ9GwKiRSujcQRRHfE4s9hi7grSbtmRyoeswazrR5BTs/Lw6bJVNmyibNkMLiOpbudKoEdP2Dzubmsih+7FjwCXM1OXQVezgdrhVdqBn2+DVWGFmAM/sI3Z2Wd5sgK6gmT2EztLQE1ycHdKHQKrvYROA+UeZAHwNTw2/MT1oq+HAyT5CvjtYAuW8qUz61kOBKlIG2SKVbU5bRd4Bq5pi58m39rW2eq8BB5dRkXTuiVhAqMxTP0MskK2u+4haF4sajSyNjLtQD0L1ORCre/+hIaV3CiV2LCjOzLlgQbG1UecMOTl2dTclgDahPFFlPkmZqVY8pJn14GwNychB36CyQPE5LIXWYVtNK5yGf6wykOIK6eNE905P+3rovY+8e0IjYOkLnqBx2Fbj3RZHrED6+6bgoGzq9ioHB4z38P+SOu9zwpTWAU9bpxPfDAMxiGT3oPJ6nn8OXCb2+B8=</diagram></mxfile>
2108.09079/main_diagram/main_diagram.pdf ADDED
Binary file (26.5 kB). View file
 
2108.09079/paper_text/intro_method.md ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:head" data-latex-placement="t">
4
+ <embed src="detail_pdf_a.pdf" />
5
+ <figcaption>An example of image deraining results. Obviously, SPDNet can reconstruct high-quality image with clear and accurate structure.</figcaption>
6
+ </figure>
7
+
8
+ Single image deraining (SID) aims to reconstruct a visually pleasing image from its corresponding rain-streaks-degraded image. In the past years, various methods have been proposed for SID. For traditional methods, many researchers [@garg2005does; @barnum2010analysis; @bossu2011rain; @chen2013generalized; @xu2012improved; @li2016rain; @luo2015removing; @GuMZZ17] focused on exploring the physical properties of the rain and background layers. Meanwhile, various priors have been proposed to regularize and separate them, such as layer priors with Gaussian mixture model (GMM) [@li2016rain], discriminative sparse coding (DSC) [@luo2015removing], and joint convolutional analysis and synthesis sparse representation (JCAS) [@GuMZZ17]. Although these methods can effectively remove the rain streaks, they require complex iterative optimization to find the best solution.
9
+
10
+ Recently, convolutional neural networks (CNN) have achieved significant success in many computer vision tasks [@simonyan2014very; @he2016deep; @li2019lightweight; @peng2020cumulative]. Moreover, many CNN-based methods [@FuHDLP17; @fu2017removing; @yang2017deep; @li2018recurrent; @zhang2018density; @hu2019depth; @wang2019spatial; @zhang2019image; @ren2019progressive; @yang2019single; @deng2020detail; @wang2020model; @jiang2020multi] have been proposed for rain removal, such as DDN [@fu2017removing], RESCAN [@li2018recurrent], PReNet [@ren2019progressive], DRDNet [@deng2020detail], and RCDNet [@wang2020model]. Albeit these methods have brought great performance improvements, they are difficult to remove all rain streaks and recover the structural information of images in complex scenarios (Fig. [1](#fig:head){reference-type="ref" reference="fig:head"}). This is because: (1). Most of them directly learn the mapping between the rainy image and the rain streaks layers. In other words, most methods predict rain streaks via the built CNN model and then subtract rain streaks from rainy images to get the final output. However, the density of rain streaks varies, which leads to excessive or insufficient removal of rain streaks, resulting in incomplete structural information of the reconstructed images. (2) These methods focus on learning the structure of rain streaks, but they pay less attention to learning the structure of objects and ignore the importance of image prior.
11
+
12
+ <figure id="fig:rcp" data-latex-placement="t">
13
+ <div class="minipage">
14
+ <img src="rcp/tree.jpg" />
15
+ <p>A. Rainy Image</p>
16
+ </div>
17
+ <div class="minipage">
18
+ <img src="rcp/treeres.jpg" />
19
+ <p>B. RCP of A</p>
20
+ </div>
21
+ <figcaption>B is the residue channel prior (RCP) extracted from the rainy image. Obviously, even the RCP is extracted from the rainy image, it still contains clear structures.</figcaption>
22
+ </figure>
23
+
24
+ To address the aforementioned issues, we aim to explore an image prior that can protect the structure of the image, and introduce the prior into the model to guide high-quality image reconstruction. According to our investigation, we found that the Total Variation (TV) prior will smooth texture details in the restored images, the sparse prior is usually difficult to model because it requires other domain knowledge, and the edge prior is difficult to obtain from the rainy image since the off-the-shelf edge detectors are sensitive to the rain streaks. In contrast, residue channel prior (RCP [@li2018robust; @li2019rainflow; @li2020all]) show clear structures even extracted from the rainy image (Fig. [2](#fig:rcp){reference-type="ref" reference="fig:rcp"}). Moreover, compared with layer prior [@luo2015removing] and  [@hu2019depth], RCP is the residual result of the maximum channel value and minimum channel value of the rainy image, calculated without any additional parameters. Hence, we adopt RCP to the image deraining task and propose a Structure-Preserving Deraining Network (SPDNet) with residue channel prior (RCP) guidance. SPDNet pays more attention to learning the background information and directly generates high-quality rain-free images with clear and accurate structures. To achieve this, an RCP guidance network and an iterative guidance strategy are proposed for structure-preserving deraining. Specifically, we design a wavelet-based multi-level module (WMLM) as the backbone of SPDNet to fully learn the background information in the rainy image. Meanwhile, an RCP extraction module and an interactive fusion module (IFM) are designed for RCP extraction and guidance, respectively. This is also the most important step in the SPDNet, which can highlight the structure of objects in the rainy images and promote generating high-quality rain-free images. In addition, we found that the RCP improves as image quality improves. Therefore, we propose a progressive reconstruction method, which means that the model extracts more accurate RCPs in the intermediate reconstruction stage to further guide image reconstruction. Under the iterative guidance of RCP, SPDNet can reconstruct high-quality rain-free images.
25
+
26
+ The main contributions of this paper are as follows:
27
+
28
+ - We explore the importance of residue channel prior (RCP) for rain removal and propose a Structure-Preserving Deraining Network (SPDNet) with RCP guidance. Extensive experimental results show that SPDNet achieves new state-of-the-art results.
29
+
30
+ - We propose an RCP extraction module and an Interactive Fusion Module (IFM) for RCP extraction and guidance, respectively. Meanwhile, an iterative guidance strategy is designed for progressive image reconstruction.
31
+
32
+ - We design a Wavelet-based Multi-Level Module (WMLM) as the backbone of SPDNet to learn the background of the area covered by the rain streak.
33
+
34
+ <figure id="fig:model" data-latex-placement="http">
35
+ <div class="center">
36
+ <embed src="img/model.pdf" />
37
+ </div>
38
+ <figcaption>The overall architecture of the proposed Structure-Preserving Deraining Network (SPDNet).</figcaption>
39
+ </figure>
40
+
41
+ # Method
42
+
43
+ Traditional methods mainly use manually extracted features and priors to describe the features of rain streaks. For example, Kang  [@kang2011automatic] proposed a method that can decompose an image into low- and high-frequency parts by using a bilateral filter. Luo  [@luo2015removing] presented a discriminative sparse coding for separating rain streaks from the rainy image. Li  [@li2016rain] uses Gaussian mixture models(GMM) as the prior to separate the rain streaks. Wang  [@wang2017a] combines image decomposition and dictionary learning to remove rain or snow from images. Zhu  [@zhu2017joint] detects rain-dominant regions. The detected regions are utilized as a guidance image to help separate rain streaks from the background layer.
44
+
45
+ Recently, many CNN-based image deraining networks have been proposed [@yasarla2019uncertainty; @wei2019semi; @wang2019erl; @yang2019scale; @yang2020Single; @yasarla2020syn2real; @jiang2020multi; @qian2018attentive; @wang2020joint; @fu2021rain], and they have greatly promoted the development of this field. For example, Fu  [@FuHDLP17] proposed the firstly CNN-based model to remove rain streaks. Later, they [@fu2017removing] also proposed a deeper network based on a deep residual network and use the image domain knowledge to remove rain streaks. Yang  [@yang2017deep] proposed a recurrent deep network for joint rain detection and removal to progressively remove rain streaks. Li  [@li2018recurrent] proposed a recurrent squeeze-and-excitation context aggregation network to make full use of contextual information. Deng  [@deng2020detail] introduced a detail-recovery image deraining network, which is composed of a rain residual network and a detail repair network. Jiang  [@jiang2020multi] designed a coarse-to-fine progressive fusion network and Wang  [@wang2020dcsfn] proposed a cross-scale fusion network based on inner-scale connection block. Moreover, Ren  [@ren2019progressive] proposed a progressive recurrent network (PReNet) by repeatedly unfolding a shallow ResNet. Wang  [@wang2020model] introduced a rain convolutional dictionary network, which continuously optimizes the model to obtain better rain streaks and rain-free images.
46
+
47
+ Although these methods can remove part of the rain streaks, they still cannot effectively remove all the rain streaks in complex situations. Meanwhile, these methods can hardly protect the structural information of the image thus cannot reconstruct high-quality rain-free images. Therefore, a method that can effectively remove rain streaks and protect the structure of the object is essential.
48
+
49
+ In this paper, we propose a Structure-Preserving Deraining Network (SPDNet). As shown in Fig. [3](#fig:model){reference-type="ref" reference="fig:model"}, SPDNet uses the wavelet-based feature extraction backbone as the main structure and introduces a residue channel prior (RCP) guided mechanism for structure-preserving deraining. In SPDNet, the wavelet-based feature extraction backbone is designed for background information learning, which consists of a series of wavelet-based multi-level modules (WMLM). However, it is difficult to maintain clear structures of the reconstructed image by only using the wavelet-based feature extraction backbone. Hence, RCP is introduced to the model to provide additional auxiliary information, so that the structural information of the reconstructed image is protected. More details will be introduced in the following sections.
50
+
51
+ <figure id="fig:wmlm">
52
+ <div class="center">
53
+ <embed src="img/wmlm_2.pdf" />
54
+ </div>
55
+ <figcaption>The architecture of the proposed Wavelet-based Multi-Level Module (WMLM).</figcaption>
56
+ </figure>
57
+
58
+ Due to the arbitrary size and density of the rain streaks, the occluded region and the degree of occlusion of the object in the rainy image are unknown. To solve this problem, many methods [@jiang2020multi; @wang2020dcsfn; @yi2021efficient] adopt the multi-level strategy to fully learning the features under different scales. Following these methods, we also adopt the multi-level strategy in our model. However, directly using the downsampling operation or deconvolution operation will cause a lot of information to be lost. Therefore, we propose a wavelet-based multi-level module (WMLM), which adopts the discrete wavelet transform (DWT) and Inverse DWT (IWT) in place of the simple downsampling and deconvolution operations. Moreover, DWT can capture both frequency and location information of feature maps [@liu2018multi; @daubechies1990wavelet; @liu2020wavelet], which may be helpful in preserving detailed textures. The architecture of WMLM is shown in Fig. [4](#fig:wmlm){reference-type="ref" reference="fig:wmlm"}.
59
+
60
+ In WMLM, we firstly use DWT to obtain multiple rainy image features with different scales, and adopt a convolution to resize the feature channel ($\widetilde{\mathcal{F}}_0, \widetilde{\mathcal{F}}_1, \widetilde{\mathcal{F}}_2$) as: $$\begin{equation}
61
+ \left\{\begin{array}{l}
62
+ \widetilde{\mathcal{F}}_i = \widetilde{\mathcal{F}}, \quad \text{if} \,\,\, i=0,\\\\
63
+ \widetilde{\mathcal{F}}_i = Conv(DWT(\widetilde{\mathcal{F}}_{i-1})), \quad \text{if} \,\,\, i\textgreater0,\\
64
+ \end{array}\right.
65
+ \end{equation}$$ where $\widetilde{\mathcal{F}}$ denotes the input of WMLM. Next, we adopt three SE-ResBlock in Residual block (SRiR) [@zhang2018image] to learn the background information of the rainy image. As shown in Fig. [4](#fig:wmlm){reference-type="ref" reference="fig:wmlm"}, SRiR is similar to the ResBlock [@he2016deep], which uses the SE-Resblock [@hu2018squeeze] to replace the convolution in ResBlock. $$\begin{equation}
66
+ \widetilde{\mathcal{F}}_i^s = SRiR(\widetilde{\mathcal{F}_i}), \quad i=0,1,2.
67
+ \end{equation}$$ Lastly, we use a convolution to resize the smaller size features channel and up-sampled it by IWT. Meanwhile, they are added to the output of the previous level: $$\begin{equation}
68
+ \widetilde{\mathcal{F}}_{i-1}^s = IWT(Conv(\widetilde{\mathcal{F}}_i^s)) + \widetilde{\mathcal{F}}_{i-1}^s, \quad i = 2,1,
69
+ \end{equation}$$
70
+
71
+ With the help of WMLM, the model can effectively learn the background information of rainy images and reconstruct relatively clear images.
72
+
73
+ Although the basic model can reconstruct relatively clear rain-free images, it is found that the object structure in the reconstructed image has also been damaged. In order to solve this problem, we recommend introducing additional image priors to protect the object structure. Therefore, we suggest introducing residue channel prior (RCP) to the model to achieve structure-preserving deraining. In addition, an Interactive Fusion Module (IFM) and an iterative guidance strategy are proposed to make full use of RCP information, so that high-quality rain-free images can be reconstructed.
74
+
75
+ As shown in Fig. [2](#fig:rcp){reference-type="ref" reference="fig:rcp"}, RCP is free of rain streaks and contains only a transformed version of the background details, which is the residual result of the maximum channel value and minimum channel value of the rainy image.
76
+
77
+ According to Li  [@li2018robust], the colored-image intensity of a rain streak image can be expressed as: $$\begin{equation}
78
+ \tilde{\mathcal{O}}({x})=t \beta_{r s}({x}) \mathcal{B} \alpha+(T-t) \mathcal{R} \boldsymbol{\pi},
79
+ \label{eq:i}
80
+ \end{equation}$$ where $\tilde{\mathcal{O}}({x})$ is the color vector representing the colored intensity. $\beta_{rs}$ is composed of refraction, specular reflection, and internal reflection coefficients of raindrops [@garg2003photometric]. $\textbf{B}=(B_r, B_g, B_b)^T$ represents light brightness and $\mathcal{B}=B_r+B_g+B_b$. $\textbf{R}=(R_r,R_g,R_b)^T$ represents background reflection and $\mathcal{R}=R_r+R_g+R_b$. $\alpha=\textbf{B}/\mathcal{B}$ and $\pi=\textbf{R}/\mathcal{R}$ represents the chromaticities of $\textbf{B}$ and $\textbf{R}$, respectively. $T$ is the the exposure time and $t$ is the time for a raindrop to pass through pixel x. In the Eq.([\[eq:i\]](#eq:i){reference-type="ref" reference="eq:i"}), the first term is the rain streak term and the second term is the background term. When employing any an existing color constancy algorithm to estimate $\alpha$, we can get the following normalization step: $$\begin{equation}
81
+ \mathcal{O}({x})=\frac{\tilde{\mathcal{O}}({x})}{\alpha}={O}_{r s}({x}) \mathbf{i}+{O}_{b g}({x}),
82
+ \label{eq:rs}
83
+ \end{equation}$$ where $\mathbf{i}=(1,1,1)^T$, $O_{rs}=t \beta_{r s}\mathcal{B}$, and $O_{bg}=(T-t)\mathcal{R}/\alpha$. The vector division is the element-wise division. When we normalize the image, the light chromaticity will be cancelled and the color effect of the spectral sensitivities will also be cancelled. Hence, based on Eq.([\[eq:rs\]](#eq:rs){reference-type="ref" reference="eq:rs"}), given a rain image $\mathcal{O}$, the residue channel prior $\mathcal{P}$ of $\mathcal{O}$ can be defined as: $$\begin{equation}
84
+ \mathcal{P}(x)=\max _{c \in r, g, b} \mathcal{O}^{c}(x)-\min _{d \in r, g, b}\mathcal{O}^{d}(x).
85
+ \label{res_p}
86
+ \end{equation}$$ Due to the rain streak term in Eq.([\[eq:i\]](#eq:i){reference-type="ref" reference="eq:i"}) is achromatic, whose values are canceled when employing color constancy, residue channel prior can be free from rain streaks, as shown in Fig. [2](#fig:rcp){reference-type="ref" reference="fig:rcp"}. Moreover, according to Li  [@li2018robust], without employing color constancy, the residue channel prior can still work. Since the dominant gray atmospheric light generated by a cloudy sky, the appearance of rain streaks is already achromatic in most cases. Based on this observation, RCP can extract a more complete and accurate object structure. Therefore, RCP has been introduced to the model for image deraining guidance.
87
+
88
+ In order to extract high-dimensional features of the RCP, we propose an RCP extraction module. As shown in Fig. [3](#fig:model){reference-type="ref" reference="fig:model"}, a convolution is firstly used to obtain the initial feature maps $\mathcal{F}_{p}^{init}$ of $\mathcal{P}$. Furthermore, to reduce the noise in the initial features and enrich the semantic information of the feature, we use SE-ResBlock in Residual block (SRiR) to extract the deeper feature $\mathcal{F}_{p}$ of RCP.
89
+
90
+ Although RCP has clearer structural information than the rainy image, how to make full use of the RCP features to guide the model is still a challenging task. One simple solution is concatenating RCP features with image features together. However, this is not effective to guide the model deraining and may cause feature interference. To solve this problem, we propose an interactive fusion module (IFM) to progressively combine features together. As shown in Fig. [5](#fig:ifm){reference-type="ref" reference="fig:ifm"}, two convolution with $3 \times 3$ kernel size to map the rainy image feature $\mathcal{F}_o$ and the RCP feature $\mathcal{F}_{p}$ to $\widehat{\mathcal{F}}_o$ and $\widehat{\mathcal{F}}_p$. $$\begin{equation}
91
+ \widehat{\mathcal{F}}_o = Conv(\mathcal{F}_o),
92
+ \end{equation}$$ $$\begin{equation}
93
+ \widehat{\mathcal{F}}_p = Conv(\mathcal{F}_p),
94
+ \end{equation}$$
95
+
96
+ <figure id="fig:ifm">
97
+ <div class="center">
98
+ <embed src="img/ifm.pdf" />
99
+ </div>
100
+ <figcaption>The architecture of the propose Interactive Fusion Module (IFM).</figcaption>
101
+ </figure>
102
+
103
+ Next, we calculate the similarity map $\mathcal{S}$ between $\widehat{\mathcal{F}}_o$ and $\widehat{\mathcal{F}}_p$ by using element multiplication to enhance the background information of the rainy image destroyed by the rain streaks. Moreover, because the background of RCP is similar to the rainy image, the similarity map $\mathcal{S}$ can also highlight the feature information in the prior features, thereby further strengthening the structure of the prior feature. $$\begin{equation}
104
+ \mathcal{S} = Sigmoid(\widehat{\mathcal{F}}_o \otimes \widehat{\mathcal{F}}_p),
105
+ \end{equation}$$ $$\begin{equation}
106
+ \mathcal{F}_{o}^{s} = \mathcal{S} \otimes \mathcal{F}_o,
107
+ \end{equation}$$ $$\begin{equation}
108
+ \mathcal{F}_{p}^{s} = \mathcal{S} \otimes \mathcal{F}_p,
109
+ \end{equation}$$ where $\mathcal{F}_{o}^{s}$ denotes the activated feature of $\mathcal{F}_{o}$ and $\mathcal{F}_{p}^{s}$ denotes the activated feature of $\mathcal{F}_{p}$. Finally, the activated feature is add with the original feature and the output $\widetilde{\mathcal{F}}$ of IFM can be obtained by concatenating the features after addition: $$\begin{equation}
110
+ \widetilde{\mathcal{F}}_{o} = \mathcal{F}_{o}^{s} + \mathcal{F}_o,
111
+ \end{equation}$$ $$\begin{equation}
112
+ \widetilde{\mathcal{F}}_{p} = \mathcal{F}_{p}^{s} + \mathcal{F}_p,
113
+ \end{equation}$$ $$\begin{equation}
114
+ \widetilde{\mathcal{F}} = Concat(\widetilde{\mathcal{F}}_{o},\widetilde{\mathcal{F}}_{p}),
115
+ \end{equation}$$
116
+
117
+ As shown in Fig. [6](#fig:prior_compare){reference-type="ref" reference="fig:prior_compare"}, with the improvement of image quality, the extracted RCP also shows better results. Based on this observation, we propose an iterative guidance strategy to obtain a clearer RCP and replace the RCP of rainy images, that is, a rain removal result $\mathcal{B}_n$ can be obtained from the ${WMLM}_n$ output feature $\mathcal{F}_n$ and the clearer structure of prior $\mathcal{P}_{n+1}$ can be get from the $\mathcal{B}_n$. $$\begin{equation}
118
+ \left\{\begin{array}{l}
119
+ \mathcal{F}_n = {WMLM}_n (\mathcal{\widehat{F}}_n, \mathcal{P}_{n}; \mathcal{\theta}_n),\\
120
+ \mathcal{B}_{n} = Conv(\mathcal{F}_n),\\
121
+ \mathcal{P}_{n+1} = REM(\mathcal{B}_n), \quad if\,\, n=1,2,\\
122
+ \end{array}\right.
123
+ \vspace{-2pt}
124
+ \end{equation}$$ where the $\mathcal{\theta}_n$ represents the weights of ${WMLM}_n$, $Conv$ stands of the output convolution, $REM$ means RCP extraction module, and $n$ is the number of ${WMLM}$. In our proposed model, $n$ is set to 3. The $\mathcal{\widehat{F}}_n$ is input feature map of the $WMLM_n$. Moverover, to enable the model to learn richer features and generate better results, we leverage the idea of *Ensemble Learning* to adopt the concat operation to fuse the output features of the previous WMLM with the current WMLM output feature. []{#ensemble label="ensemble"}
125
+
126
+ <figure id="fig:prior_compare" data-latex-placement="t">
127
+ <div class="minipage">
128
+ <p><img src="prior_compare/rain2.png" alt="image" /> <img src="prior_compare/rain5.png" alt="image" /></p>
129
+ <p>(a)</p>
130
+ </div>
131
+ <div class="minipage">
132
+ <p><img src="prior_compare/rain_prior2.png" alt="image" /> <img src="prior_compare/rain_prior5.png" alt="image" /></p>
133
+ <p>(b)</p>
134
+ </div>
135
+ <div class="minipage">
136
+ <p><img src="prior_compare/sr2.png" alt="image" /> <img src="prior_compare/sr5.png" alt="image" /></p>
137
+ <p>(c)</p>
138
+ </div>
139
+ <figcaption>Comparison between the RCP of rainy images and the RCP of output results. (a) is rainy images, (b) is the RCP of rainy images, and (c) is the RCP of output results. It is obvious observed that the structure of RCP of output results is more obvious than rainy images.</figcaption>
140
+ </figure>
141
+
142
+ We employ $\mathcal{L}_2$ loss as our objective function. As mentioned above, under the iterative update strategy, the model will output three results. Thus, the comprehensive loss function of our proposed model can be formulated as: $$\begin{equation}
143
+ \mathcal{L} = \sum_{i}\left\|\mathcal{B}_{i}-\hat{\mathcal{B}}\right\|^{2}, \quad i=1,2,3.
144
+ \end{equation}$$ Where, $\hat{\mathcal{B}}$ denotes the rain-free image(GT) and $\mathcal{B}_{i}$ denotes output results in different stage.
145
+
146
+ :::: table*
147
+ ::: tabular
148
+ ccccccccccccc & & & &&&& (lr)4-5 (lr)6-7 (lr)8-9 (lr)10-11 (lr)12-13 & & $128\times128$ & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM GMM[@li2016rain] & ----- & 27.961s & 28.66 & 0.8652 & 14.50 & 0.4164 & 25.71 & 0.8020 & 25.81 & 0.8344 & 34.30 & 0.9428 DSC[@luo2015removing] & ----- & 7.947s & 27.16 & 0.8663 & 14.73 & 0.3815 & 22.61 & 0.7530 & 24.24 & 0.8279 & 34.95 & 0.9416 DDN[@fu2017removing] & 0.06M & 0.278s & 34.68 & 0.9671 & 26.05 & 0.8056 & 25.87 & 0.8018 & 30.97 & 0.9116 & 36.16 & 0.9463 RESCAN[@li2018recurrent] & 0.15M & 0.016s & 36.09 & 0.9697 & 26.75 & 0.8353 & 26.58 & 0.8726 & 33.38 & 0.9417 & 38.11 & 0.9707 PReNet[@ren2019progressive] & 0.17M & 0.012s & 37.70 & 0.9842 & 29.04 & 0.8991 & 27.06 & 0.9026 & 33.17 & 0.9481 & 40.16 & 0.9816 DCSFN[@wang2020dcsfn] & 6.45M & 0.253s & 39.37 & 0.9854 & 29.25 & 0.9075 & 28.38 & 0.9072 & [34.31]{.underline} & [0.9545]{.underline} & ----- & ----- DRDNet[@deng2020detail] & 2.72M & 0.069s & 39.05 & 0.9862 & 29.15 & 0.8921 & 28.21 & 0.9012 & 34.02 & 0.9515 & 40.89 & 0.9784 RCDNet[@wang2020model] & 3.17M & 0.068s & [39.87]{.underline} & [0.9875]{.underline} & [30.24]{.underline} & [0.9098]{.underline} & [28.59]{.underline} & [0.9137]{.underline} & 34.08 & 0.9532 & [41.47]{.underline} & [0.9834]{.underline} SPDNet(Ours) & 3.04M & 0.055s & **40.59** & **0.9880** & **31.30** & **0.9217** & **30.21** & **0.9152** & **34.57** & **0.9561** & **43.55** & **0.9875**
149
+ :::
150
+ ::::
151
+
152
+ <figure id="fig:34" data-latex-placement="t">
153
+ <div class="minipage">
154
+ <p><img src="99/rain_hose.jpg" alt="image" /> <img src="99/rain_hose_crop_re.jpg" alt="image" /></p>
155
+ <p>Rainy images</p>
156
+ </div>
157
+ <div class="minipage">
158
+ <p><img src="99/DDN.jpg" alt="image" /> <img src="99/DDN_crop_re.jpg" alt="image" /></p>
159
+ <p>DDN</p>
160
+ </div>
161
+ <div class="minipage">
162
+ <p><img src="99/RESCAN.jpg" alt="image" /> <img src="99/RESCAN_crop_re.jpg" alt="image" /></p>
163
+ <p>RESCAN</p>
164
+ </div>
165
+ <div class="minipage">
166
+ <p><img src="99/PReNet.jpg" alt="image" /> <img src="99/PReNet_crop_re.jpg" alt="image" /></p>
167
+ <p>PReNet</p>
168
+ </div>
169
+ <div class="minipage">
170
+ <p><img src="99/DRD.jpg" alt="image" /> <img src="99/DRD_crop_re.jpg" alt="image" /></p>
171
+ <p>DRDNet</p>
172
+ </div>
173
+ <div class="minipage">
174
+ <p><img src="99/dcsfn.jpg" alt="image" /> <img src="99/dcsfn_crop_re.jpg" alt="image" /></p>
175
+ <p>DCSFN</p>
176
+ </div>
177
+ <div class="minipage">
178
+ <p><img src="99/rcdnet.jpg" alt="image" /> <img src="99/rcdnet_crop_re.jpg" alt="image" /></p>
179
+ <p>RCDNet</p>
180
+ </div>
181
+ <div class="minipage">
182
+ <p><img src="99/our.jpg" alt="image" /> <img src="99/our_crop_re.jpg" alt="image" /></p>
183
+ <p>SPDNet</p>
184
+ </div>
185
+ <div class="minipage">
186
+ <p><img src="99/label.jpg" alt="image" /> <img src="99/label_crop_re.jpg" alt="image" /></p>
187
+ <p>GT</p>
188
+ </div>
189
+ <figcaption>Image deraining results tested in the synthetic datasets. The first row is rainy image, the output of different methods, and GT. The second row is the zoom results of the red window. It is obvious that SPDNet can reconstruct rain-free image with clearer structure.</figcaption>
190
+ </figure>
191
+
192
+ <figure id="fig:37">
193
+ <div class="minipage">
194
+ <p><img src="35/rain_house.jpg" alt="image" /> <img src="35/rain_house_crop_re.jpg" alt="image" /> <img src="78/rain.jpg" alt="image" /> <img src="78/rain_crop_re.jpg" alt="image" /></p>
195
+ <p>Rainy image</p>
196
+ </div>
197
+ <div class="minipage">
198
+ <p><img src="35/DDN.jpg" alt="image" /> <img src="35/DDN_crop_re.jpg" alt="image" /> <img src="78/ddn.jpg" alt="image" /> <img src="78/ddn_crop_re.jpg" alt="image" /></p>
199
+ <p>DDN</p>
200
+ </div>
201
+ <div class="minipage">
202
+ <p><img src="35/RESCAN.jpg" alt="image" /> <img src="35/RESCAN_crop_re.jpg" alt="image" /> <img src="78/rescan.jpg" alt="image" /> <img src="78/rescan_crop_re.jpg" alt="image" /></p>
203
+ <p>RESCAN</p>
204
+ </div>
205
+ <div class="minipage">
206
+ <p><img src="35/PReNet.jpg" alt="image" /> <img src="35/PReNet_crop_re.jpg" alt="image" /> <img src="78/PReNet.jpg" alt="image" /> <img src="78/PReNet_crop_re.jpg" alt="image" /></p>
207
+ <p>PReNet</p>
208
+ </div>
209
+ <div class="minipage">
210
+ <p><img src="35/DRD.jpg" alt="image" /> <img src="35/DRD_crop_re.jpg" alt="image" /> <img src="78/drd.jpg" alt="image" /> <img src="78/drd_crop_re.jpg" alt="image" /></p>
211
+ <p>DRDNet</p>
212
+ </div>
213
+ <div class="minipage">
214
+ <p><img src="35/dcsfn.jpg" alt="image" /> <img src="35/dcsfn_crop_re.jpg" alt="image" /> <img src="78/dcsfn.jpg" alt="image" /> <img src="78/dcsfn_crop_re.jpg" alt="image" /></p>
215
+ <p>DCSFN</p>
216
+ </div>
217
+ <div class="minipage">
218
+ <p><img src="35/rcdnet.jpg" alt="image" /> <img src="35/rcdnet_crop_re.jpg" alt="image" /> <img src="78/rcdnet.jpg" alt="image" /> <img src="78/rcdnet_crop_re.jpg" alt="image" /></p>
219
+ <p>RCDNet</p>
220
+ </div>
221
+ <div class="minipage">
222
+ <p><img src="35/our.jpg" alt="image" /> <img src="35/our_crop_re.jpg" alt="image" /> <img src="78/our.jpg" alt="image" /> <img src="78/our_crop_re.jpg" alt="image" /></p>
223
+ <p>SPDNet</p>
224
+ </div>
225
+ <figcaption>Image deraining results tested in the real-world dataset. The first row is rainy image and the output of different methods. The second row is the zoom results of the red window.</figcaption>
226
+ </figure>
2203.09275/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2203.09275/paper_text/intro_method.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Images captured in weather degradations like rain or fog conditions are of poor quality, leading to a loss of situational awareness and a general decrease in usefulness. Hence, it is very important to compensate for the visual degradation in images caused by these weather degradations. Additionally, such weather degraded images also reduce the performance of down-stream computer vision tasks such as detection, segmentation and recognition [@li2018benchmarking; @chen2018encoder; @ren2015faster]. The main objective Single image restoration (SIR) of weather degraded image, is to restore the clean image $y$, given a weather degraded image $x$, in-order to improve performance of such down-stream tasks. Extensive research on methods to remove such weather degradation effects like rain and haze.
4
+
5
+ <figure id="fig:bar_graph" data-latex-placement="h!">
6
+ <img src="bargraph.png" />
7
+ <figcaption>Cross-domain deraining experiment where Rain800 <span class="citation" data-cites="zhang2019image"></span> is used as the synthetic source dataset <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub></span>, and SPA-data <span class="citation" data-cites="wang2019spatial"></span> is used as the real rain target dataset <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub></span>. Here, MPRN <span class="citation" data-cites="zamir2021multi"></span> and MSPFN <span class="citation" data-cites="jiang2020multi"></span> are fully-supervised methods and SIRR <span class="citation" data-cites="wei2019semi"></span>, Syn2Real <span class="citation" data-cites="Yasarla_2020_CVPR"></span> and MOSS <span class="citation" data-cites="Huang_2021_CVPR"></span> are semi-supervised methods. Fully-supervised methods are supervised using the corresponding labeled clean images and semi-supervised images are trained using a labeled source data <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub></span> and an unlabeled target data <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub></span>. (a) Blue and Red bars show the target only and source only performance of MSPFN and MPRN, respectively. We can see a drop in performance of supervised methods when they are trained on <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub></span> and tested on <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub></span>. (b) Semi-supervised methods such as SIRR, Syn2Real and MOSS perform better than the supervised methods by leveraging the information from unlabeled images. However, with the help of ART-SS, we are able improve the performance of semi-supervised restoration(SSR) methods even further by rejecting the unlabeled images that are not helpful in semi-supervision.</figcaption>
8
+ </figure>
9
+
10
+ In recent years, various convolutional neural network-based methods have been proposed for deraining[@fu2017removing; @yang2017deep; @wang2019spatial; @li2018recurrent; @ren2019progressive; @fu2021rain; @zhou2021image; @wang2021rain; @lin2020rain], dehazing[@dong2020multi; @li2020deep; @wu2021contrastive; @zhang2018density; @zhang2019joint; @zhang2021hierarchical]. These fully-supervised methods require large amounts of clean-weather degraded image pairs for training. Since collecting real world weather degraded-clean image pairs of data is difficult, most existing supervised methods rely on synthetically generated data to train the network. However, the use of synthetically generated weather degraded images often results in sub-optimal performance on the real world images due to the domain difference between synthetic and real world images. For example when we consider deraining task, this can be clearly seen in Fig. [1](#fig:bar_graph){reference-type="ref" reference="fig:bar_graph"} (a), where we train two fully-supervised SID networks, MPRN [@zamir2021multi] and MSPFN [@jiang2020multi], on a synthetic source dataset $\mathcal{D}_{src}$ from Rain800 [@zhang2019image] and test them on a real rain target dataset $\mathcal{D}_{src}$ from SPA-data [@wang2019spatial]. From Fig. [1](#fig:bar_graph){reference-type="ref" reference="fig:bar_graph"} (a), we can observe that the performance of fully-supervised methods degrades significantly when trained on Rain800 [@zhang2019image] and tested on SPA-data[@wang2019spatial] compared to target only performance which corresponds to the case where the methods are trained and tested on SPA-data [@wang2019spatial].
11
+
12
+ To address this domain gap between source and target datasets, Wei *et al.*[@wei2019semi] initially attempted to address the semi-supervised deraining task by leveraging the rain information in unlabeled target dataset while training the network. In proposed method, the authors model rain residuals by imposing a likelihood term on Gaussian Mixture Models (GMMs) for both labeled and unlabeled datasets, and minimize minimize the Kullback-Leibler (KL) divergence between the obtained GMMs of labeled and unlabeled images to enforce the consistency that distribution of labeled rainy data should be close to that of the unlabeled data. Later following this approach, Yasarla *et al.* [@Yasarla_2020_CVPR] proposed a non-parametric model for semi-supervised deraining where they project labeled and unlabeled rainy images to a latent space and formulate a joint Gaussian distribution to generate pseudo labels for the unlabeled images. Recently, Huang *et al.* [@Huang_2021_CVPR] proposed a memory-based encoder-decoder network where the memory module learns rain information from synthetic and real rainy images in a self-supervised manner using Exponential Moving Average (EMA) updates. On the other hand, to address the semi-supervised dehazing Li *et al.*[@li2019semi] proposed to leverage hazy information from unlabeled images using dark channel priors based gradient updates while training the network. Later, Shao *et al.*[@shao2020domain] proposed a bi-directional translations method that minimizes the gap between synthetic and real hazy domains using adversarial loss and dark channel priors.
13
+
14
+ One major drawback of these semi-supervised restoration(SSR) techniques is that they don't account for the effect of unlabeled images on the overall semi-supervised performance. Not all images in the unlabeled target dataset are useful in improving the SSR performance. If the unlabeled image characteristics are very different from that of in the source data, then there is a good chance that SSR performance will converge to unsupervised deraining performance instead of converging towards fully-supervised. We explore theoretical evidence for this behavior, and also conduct cross-domain experiments to empirically show that unlabeled observations which are different from the labeled source images might not be beneficial in improving the SSR performance.
15
+
16
+ In particular, we theoretically understand why a few unlabeled observations might have an adverse effect on the performance of an SSR method, and propose a novel technique called [a]{.underline}daptive [r]{.underline}ejection [t]{.underline}echnique for [s]{.underline}emi-[s]{.underline}upervision (ART-SS), that selects unlabeled observations which are useful in improving the SSD performance. In Syn2real [@Yasarla_2020_CVPR] and MOSS [@Huang_2021_CVPR], authors project the unlabeled and labeled images to a latent space and express latent vector of each image using either latent basis vector representations or labeled latent-space vectors. Additionally, these works perform supervision at the defined latent space level in unlabeled trainning phase of semi-supervised training. Following these works, we use the latent representation of the labeled or unlabeled images and compute similarity index ($\psi$) for each image that indicates how similar is the given image to the labeled images. Note as the unlabeled images can be easy or hard samples, and network might produce errors in computing the latent representations, thus we compute the variance $\sigma$ (aleotoric uncertainty [@kendall2017uncertainties]) that indicates a measure of how confident the network is about computing the latent representation. Hence, we use proposed theorem and corollaries in our theoretical study, and come up with a novel selection criterion for ART-SS method using $\psi$ and $\sigma$ measures, to decide whether the given unlabeled image is helpful for improving the SSR performance. Note, using variance $\sigma$ (aleotoric uncertainty) makes the ART-SS method robust to error in the networks latent-space representations. In this way given unlabeled image, we compute $\psi$ and $\sigma$ measures for the unlabeled image, and using the criterion to decide whether the unlabeled image is similar or dis-similar(*i.e.* might have adverse affect on SSR performance) to source domain, and can be used for updating the weights of a SSR method or not. For example using proposed ART-SS, we are able to significantly boost the performance of existing SSR deraining methods [@wei2019semi; @Yasarla_2020_CVPR; @Huang_2021_CVPR] (see Fig [1](#fig:bar_graph){reference-type="ref" reference="fig:bar_graph"}(b)).
17
+
18
+ In summary, this paper makes the following contributions:
19
+
20
+ - We theoretically study how unlabeled images can affect the performance of an semi-supervised restoration(SSR) method.
21
+
22
+ - We propose a novel rejection technique, called ART-SS, to select images that are useful in improving the SSR performance.
23
+
24
+ - Extensive cross-domain experiments and ablation study are conducted to show the significance of the proposed method. In particular, our simple rejection technique is shown to boost the performance of the existing deraining [@wei2019semi; @Yasarla_2020_CVPR; @Huang_2021_CVPR], and dehazing[@li2019semi; @shao2020domain] methods.
25
+
26
+ # Method
27
+
28
+ In this section, we define notations and key concepts regarding specified and misspecified models and present a semi-supervised degradation theorem.
29
+
30
+ Given, a weather-degraded image $x$, our objective is to obtain a restored image $\hat{y} = f(x)$, where $f(.)$ is a function with parameters $\theta$ that performs the restoration(deraining or dehazing) task. This function can be any deep learning-based model or a GMM-based model. Let us denote the collection of all possible restoration functions $\{f(.)\}$ in the parametric model $\mathcal{F}$ whose parameters expressed as $\Theta_{\mathcal{F}}$ by a dashed circle. Let $f_{opt}(.)$ denote the best possible deraining function in $\mathcal{F}$, i.e., $$\begin{equation}
31
+ f_{opt} = \mathop{\mathrm{arg\,min}}_{f \in \mathcal{F}} L(f),
32
+ \end{equation}$$ where $L(.)$ is used to denote the error for the function $f(.)$ in the restoration task. Bayes error (the lowest possible error that can be achieved and is the same as irreducible error) is expressed as $L^*$ and the corresponding function as $f^*$. Let $\hat{f}$ be the learned restoration function with parameters $\hat{\theta}$. The model bias is measured by $L(f_{opt}) -L^*$ and the estimation error is $L(\hat{f}) - L(f_{opt})$. Now let us define the limits depending on whether we are learning the restoration task in supervised fashion, ($L_{sup}^*,\: f_{sup}^*,\: \theta_{sup}^*$) or unsupervised fashion ($L_{unsup}^*,\: f_{unsup}^*,\: \theta_{unsup}^*$). We denote error for the fully-supervised method using labeled data as $L_\ell$, and semi-supervised method using labeled and unlabeled as $L_{\ell+u}$.
33
+
34
+ We denote the labeled source dataset as $\mathcal{D}_{src}$, and the target unlabeled dataset as $\mathcal{D}_{tgt}$. Following Syn2real [@Yasarla_2020_CVPR] and MOSS [@Huang_2021_CVPR], we project the labeled and unlabeled datasets onto a latent space which is defined as the output of an encoder. That is, every image $x^l_i \in \mathcal{D}_{src}$ is passed through the encoder network to obtain $z^l_i = g(x^l_i)$. Similarly, $z^u_i = g(x^u_i)$ is obtained for every image $x^u_i \in \mathcal{D}_{tgt}$. Note that encoder and decoder of a restoration network are represented using functions $g(.)$ and $h(.)$, with corresponding parameters $\theta^{enc}$ and $\theta^{dec}$, respectively. For the sake of simplicity, let us assume that all the labeled latent vectors, $Z_{src}$ =$\{z^l_i\}$ can be spanned by a set of vectors $\{\{s_i\}^{M_l}_{i=1},\{c_j\}_{j=1}^{M_c}\}$, *i.e.*$z^l_i \in span(\{\{s_i\}^{M_l}_{i=1},\{c_j\}_{j=1}^{M_c}\})$ or $z^l_i~=~\sum_{v_i \in \{\{s_i\}^{M_l}_{i=1},\{c_j\}_{j=1}^{M_c}\}} \alpha_i v_i$ and we represent this vector space as $\mathcal{V}_{src}$ . Similarly, all unlabeled latent vectors, $Z_{tgt}$ =$\{z^u_i\}$ can be spanned by a set vectors $\{\{t_i\}^{M_u}_{i=1},\{c_j\}_{j=1}^{M_c}\}$, *i.e.*$z^u_i \in span(\{\{t_i\}^{M_u}_{i=1},\{c_j\}_{j=1}^{M_c}\})$ or $z^u_i~=~\sum_{v_i \in \{\{t_i\}^{M_u}_{i=1},\{c_j\}_{j=1}^{M_c}\}} \alpha_i v_i$ and we represent this vector space as $\mathcal{V}_{tgt}$. Note that we have assumed that the labeled vector space $\mathcal{V}_{src}$ and the unlabeled vector space $\mathcal{V}_{tgt}$, have common basis vectors $V_c = \{c_j\}_{j=1}^{M_c}$ (because of similarities between labeled and unlabeled images). In addition, these vector spaces $\mathcal{V}_{src}$ and $\mathcal{V}_{tgt}$ have different basis vectors $V_s = \{\{s_i\}^{M_l}_{i=1}$ and $V_t = \{\{t_i\}^{M_u}_{i=1}$ respectively (this is due to differences or domain gap between the labeled and unlabeled weather-degraded images).
35
+
36
+ If $f^*\in \mathcal{F}$, then the model bias is $0$, *i.e.*, ${L}(f_{opt}) - {L}^* = 0$. The estimation error is the only thing that contributes to the regression error of the weather removal task. In other words, $V_s = \{\{s_i\}^{M_l}_{i=1} = \emptyset$ and $V_t = \{\{t_i\}^{M_u}_{i=1} = \emptyset$, where $\emptyset$ denotes the empty set. In other words, model $\mathcal{F}$ is good enough in learning the weather removal function $f(.)$ that minimizes the difference between labeled and unlabeled weather-degraded images. In the parametric setting, if we use Mean Squared Error (MSE) on the parameter space $\Theta_{\mathcal{F}}$, then we have $$\begin{equation}
37
+ \label{eqn:model_mse}
38
+ MSE(\hat{\theta}) = \mathbf{E}\left[\left(\hat{\theta}-\theta\right)^2\right] = \left(\mathbf{E}[\hat{\theta}]-\theta \right)^2 + Var(\hat{\theta}).
39
+ \end{equation}$$ The term $\left(\mathbf{E}[\hat{\theta}]-\theta \right)^2$ is a form of bias. In a correct parametric model, fully-supervised and semi-supervised deep learning models converge to the same parameter value $\theta^*$. In other words, both fully-supervised error and semi-supervised error tends to $L^*$, *i.e.* $L_l\rightarrow L^*$, and $L_{l+u}\rightarrow L^*$, as $N_\ell\rightarrow\infty$ and $\frac{N_\ell}{N_u}\rightarrow 0$, where $N_\ell$ and $N_u$ represent the number of labeled and unlabeled images.
40
+
41
+ If $f^*\notin \mathcal{F}$, then ${L}(f_{opt}) - {L}^* > 0$. In this case we change the training set from $\mathcal{D}_{src}$ (labeled) to $\mathcal{D}_{src} +\mathcal{D}_{tgt}$. However, this will only change the estimation error (in Eq. [\[eqn:model_mse\]](#eqn:model_mse){reference-type="ref" reference="eqn:model_mse"}). Adding unlabeled observations reduces the estimation variance. Nonetheless, fully-supervised and semi-supervised deep learning models may converge to different parameter values. In other words, model $\mathcal{F}$ isn't good enough to learn a deraining function $f(.)$ that minimizes labeled and unlabeled weather-degraded images. There exists domain gap between latent labeled and unlabeled vectors, and $V_s \neq \emptyset$ and $V_t \neq \emptyset$. Given a fixed number of weather-degraded images in the labeled training set, increasing the unlabeled observations may cause a larger estimation bias, *i.e.* $$\begin{equation}
42
+ \left(\mathbf{E}[\hat{\theta}_{l+u}]-\theta \right)^2 > \left(\mathbf{E}[\hat{\theta}_l]-\theta \right)^2,
43
+ \end{equation}$$ where $\hat{\theta}_l$ and $\theta_{l+u}$ are the parameters of fully-supervised and semi-supervised methods. In this case, semi-supervised performance would be degraded if the increase in estimation bias is more significant than the decrease in the estimation variance.
44
+
45
+ Before discussing about a lemma and a theorem for the degradation in semi-supervised(SS) performance, we construct a few idealizations that are required. Let $L(\hat{f})$ be the regression error of a learned restoration function $\hat{f}$($\in\mathcal{F}$), and $KL(f_{\theta^*_{sup}}||\hat{f})$ be the Kullback-Leibler divergence between fully-supervised limit density and the estimated $\hat{f}$. Here, we assume that $L_{sup}^*,\: f_{sup}^*,\: \theta_{sup}^*$ is the best possible fully-supervised parameters that can be learned given the model $\mathcal{F}$. Similarly, $L_{unsup}^*,\: f_{unsup}^*,\: \theta_{unsup}^*$ denote the best possible unsupervised parameters that can be learned given the model $\mathcal{F}$.
46
+
47
+ For any fixed finite $N_\ell$ or $N_\ell \rightarrow \infty$, as $\frac{N_\ell}{N_u}\rightarrow 0$, the limit of the maxima of semi-supervised likelihood function reaches the unsupervised limit $\theta_{unsup}^*$. That is, let $\hat{\theta}_{l+u}$ denote the parameters of a learned SS method when the number of labeled and unlabeled images are $N_\ell$ and $N_u$, then as $\frac{N_\ell}{N_u}\rightarrow 0$, $$\begin{equation}
48
+ \left\{\hat{\theta}_{(l+u)}\right\}_{u} \stackrel{p}{\longrightarrow} \theta_{u n s u p}^{*} \quad \forall N_\ell
49
+ \end{equation}$$ In semi-supervised learning the samples are drawn from a collection $\mathcal{D}_{src}+\mathcal{D}_{tgt}$ which implies that the probability of drawn realization being labeled image is $\lambda=\frac{N_\ell}{N_\ell+N_u}$, and being unlabeled image is $1-\lambda=\frac{N_u}{N_\ell+N_u}$. The optimization involved in learning the parameters $\hat{\theta}$, is as follows, $$\begin{equation}
50
+ \label{eqn:expect_opt}
51
+ \mathop{\mathrm{arg\,max}}_{\theta} \left(\lambda \mathbf{E}_{f(x,y)}[\log f(x,y \mid \theta)]+(1-\lambda) \mathbf{E}_{f(x, y)}[\log f(x \mid \theta)]\right),
52
+ \end{equation}$$ which is a convex combination of the fully-supervised and unsupervised expected log-likelihood functions. For an arbitrary finite value of $N_\ell$, as $\frac{N_\ell}{N_u}\rightarrow 0$, $\lambda\:=\frac{N_\ell}{N_\ell+N_u}\rightarrow0$, indicating the above optimization in $\hat{\theta}$, maximizes $\mathbf{E}_{f(x, y)}[\log f(x \mid \theta)]$, which by definition is $\theta_{u n s u p}^{*}$,. Thus the learned semi-supervised parameters, $\left\{\hat{\theta}_{(l+u)}\right\}_{u} \stackrel{p}{\longrightarrow} \theta_{u n s u p}^{*} \quad \forall N_\ell$.
53
+
54
+ If $L(f_{\theta^*_{sup}})<L(f_{\theta^*_{unsup}})$, then for fixed $N_\ell$ or $N_\ell \rightarrow \infty$, as $\frac{N_\ell}{N_u}\rightarrow 0$, $$\begin{equation*}
55
+ \mathbb{1}\left\{L\left(f_{\hat{\theta}_{\ell}}\right)<L\left(f_{\hat{\theta}_{(\ell+u)}}\right)\right\}
56
+ -\mathbb{1}\left\{K L\left(f_{\theta_{s u p}^{*}} \| f_{\hat{\theta}_{\ell}}\right)<K L\left(f_{\theta_{s u p}^{*}} \| f_{\hat{\theta}_{\ell+u}}\right)\right\} \stackrel{p}{\longrightarrow} 0
57
+ \end{equation*}$$ and we have $$\begin{equation*}
58
+ \resizebox{0.95\hsize}{!}{$
59
+ \lim _{N_\ell \rightarrow \infty,\:\frac{N_\ell}{N_u}\rightarrow 0} P\left\{L\left(f_{\hat{\theta}_{\ell}}\right)<L\left(f_{\hat{\theta}_{(\ell+u)}}\right)\right\} =\lim _{N_\ell \rightarrow \infty} P\left\{K L\left(f_{\theta_{s u p}^{*}} \| f_{\hat{\theta_{\ell}}}\right)<K L\left(f_{\theta_{s u p}^{\mathbf{x}}} \| f_{\theta_{u n s u p}^{*}}\right)\right\}$}.
60
+ \end{equation*}$$ Please refer to the supplementary document for the proof. We use these theoretical results, and come-up with the following corollaries.
61
+
62
+ If $L(f_{\theta^*_{sup}}) <L(f_{\theta^*_{unsup}})$, then for the misspecified model, $\exists \ell$, *s.t.* $$\begin{equation*}
63
+ \lim _{N_u \rightarrow \infty} P\left\{L\left(f_{\hat{\theta}_{\ell}}\right)<L\left(f_{\hat{\theta}_{(\ell+u)}}\right)\right\}>0.
64
+ \end{equation*}$$ *i.e.*  semi-supervised task yields degradation with positive probability as $N_u~\rightarrow~\infty$
65
+
66
+ Please refer to the supplementary document for proof.
67
+
68
+ If for a subset of unlabeled images $\mathcal{T}_1\subset \mathcal{D}_{tgt}$, $\exists\:\text{very small}\: \epsilon>0$, *s.t.*$|L(f_{\theta^*_{sup}}) -L(f_{\theta^*_{unsup,\mathcal{T}_1}})|<\epsilon$, then $$\begin{equation}
69
+ \left\{\hat{\theta}_{(l+u)}\right\}_{u\in \mathcal{T}_1} \stackrel{p}{\longrightarrow} \theta_{s u p}^{*} \quad \forall N_\ell.
70
+ %,N_{\mathcal{T}_1}
71
+ \end{equation}$$ In other words, model $\mathcal{F}$ behaves nearly like a specified model on the labeled images in $\mathcal{D}_{src}$, and unlabeled images in $\mathcal{T}_1$, since the unlabeled images from subset $\mathcal{T}_1$ are very similar to the labeled images in $\mathcal{D}_{src}$.
72
+
73
+ From Eq. [\[eqn:expect_opt\]](#eqn:expect_opt){reference-type="ref" reference="eqn:expect_opt"}, the optimization for learning parameters $\hat{\theta}$ is\
74
+ $\mathop{\mathrm{arg\,max}}_{\theta} \left(\lambda \mathbf{E}_{sup} +(1-\lambda) \mathbf{E}_{unsup} \right)$, where $\mathbf{E}_{sup} = \mathbf{E}_{f(x,y)}[\log f(x,y \mid \theta)]$, and $\mathbf{E}_{unsup} = \mathbf{E}_{f(x, y)}[\log f(x \mid \theta)]$. We rewrite, $\mathbf{E}_{unsup} = \mathbf{E}_{\mathcal{T}_1} + \mathbf{E}_{\mathcal{D}_{tgt}-\mathcal{T}_1}$, where $\mathbf{E}_{\mathcal{T}_1} = \mathbf{E}_{f(x, y)}[\log f(x \mid \theta, x\in\mathcal{T}_1)]$ and $\mathbf{E}_{\mathcal{D}_{tgt}-\mathcal{T}_1} = \mathbf{E}_{f(x, y)}[\log f(x \mid \theta, x\in \mathcal{D}_{tgt}-\mathcal{T}_1)]$. Thus optimization for learning parameters $\hat{\theta}$ is, $$\begin{equation*}
75
+ \mathop{\mathrm{arg\,max}}_{\theta} \left(\lambda \mathbf{E}_{sup} +(1-\lambda) (\mathbf{E}_{\mathcal{T}_1} + \mathbf{E}_{\mathcal{D}_{tgt}-\mathcal{T}_1} ) \right)
76
+ \end{equation*}$$ if we learn parameter $\hat{\theta}$ for a semi-supervision task using only $\mathcal{T}_1$ and $\mathcal{D}_{src}$. That is, rejecting unlabeled observations from $\mathcal{D}_{tgt}-\mathcal{T}_1$ while learning $\hat{\theta}$. Thus the resultant optimization for learning parameters $\hat{\theta}$ is $$\begin{equation}
77
+ \mathop{\mathrm{arg\,max}}_{\theta} \left(\lambda \mathbf{E}_{sup} +(1-\lambda) \mathbf{E}_{\mathcal{T}_1} \right) \approx \mathop{\mathrm{arg\,max}}_{\theta} \left(\lambda \mathbf{E}_{sup} +(1-\lambda) \mathbf{E}_{sup} \right).
78
+ \end{equation}$$ Since $|L(f_{\theta^*_{sup}}) -L(f_{\theta^*_{unsup,\mathcal{T}_1}})|<\epsilon$, or unlabeled images from $\mathcal{T}_1$ are similar to the labeled images $\mathcal{D}_{src}$, and have similar error, we approximate the optimization for $\hat{\theta}$ to $\mathbf{E}_{sup} = \mathbf{E}_{f(x,y)}[\log f(x,y \mid \theta)]$. Thus, $\left\{\hat{\theta}_{(l+u)}\right\}_{u\in \mathcal{T}_1} \stackrel{p}{\longrightarrow} \theta_{s u p}^{*}$.
79
+
80
+ The key takeaway from the above theorem and Corollaries is that if the SSR is misspecified, then as increasing the unlabeled images might degraded the SSR performance. In such cases, to boost the SSR performance we can create subset of unlabeled images($\mathcal{T}_1$) by rejecting the unlabeled images that are adversely effecting SSR performance. By doing this SSR will nearly act like a specified model on $\mathcal{T}_1$ and $\mathcal{D}_{src}$, and semi-supervised performance of SSR tends towards fully-supervised performance.
81
+
82
+ <figure id="fig:t-SNNE" data-latex-placement="h!">
83
+ <img src="tsne_plot.jpg" />
84
+ <figcaption>t-SNE plot of Syn2Real<span class="citation" data-cites="Yasarla_2020_CVPR"></span> for cross-domain experiment with <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub> = Rain800</span> and <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub> = SPA-data</span>. Here, we can see SSR is misspecified since <span class="math inline"><em>V</em><sub><em>s</em></sub> = {<em>s</em><sub><em>i</em></sub>}<sub><em>i</em> = 1</sub><sup><em>M</em><sub><em>l</em></sub></sup> ≠ ∅</span> and <span class="math inline"><em>V</em><sub><em>t</em></sub> = {<em>t</em><sub><em>i</em></sub>}<sub><em>i</em> = 1</sub><sup><em>M</em><sub><em>u</em></sub></sup> ≠ ∅</span>. In order to boost Syn2Real performance we need to create <span class="math inline">𝒯<sub>1</sub></span>, and train using <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub> + 𝒯<sub>1</sub></span>, <em>i.e.</em>  not using unlabeled images from <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub> − 𝒯<sub>1</sub></span>. Rain streaks in <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub> − 𝒯<sub>1</sub></span> are very different, for example in images 1,3 and 6 rain streaks are curved or look like irregular patches or rain streaks pointing in all directions. On the other hand unlabeled images from <span class="math inline">𝒯<sub>1</sub></span> are similar to <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub></span></figcaption>
85
+ </figure>
86
+
87
+ Let an SSR method $f_{\hat{\theta}} \in \mathcal{F}$, leveraging weather information from unlabeled and labeled images to learn the parameters $\hat{\theta}$. As discussed in the previous section, if the model $\mathcal{F}$ is missepcified, then a domain gap can exist between the unlabeled and labeled images. In other words, the projected latent labeled and unlabeled vectors can have some different basis vectors, implying $V_s = \{s_i\}_{i=1}^{M_l} \neq \emptyset$ and $V_t = \{t_i\}_{i=1}^{M_u} \neq \emptyset$. For example in the Fig. [2](#fig:t-SNNE){reference-type="ref" reference="fig:t-SNNE"} t-SNE plot of Syn2Real[@Yasarla_2020_CVPR] for cross-domain experiment with $\mathcal{D}_{src}=\text{Rain800}$ and $\mathcal{D}_{tgt}=\text{SPA-data}$, we can see some unlabeled images are similar or close to labeled images and others are not. In such cases we can use Corollary 2 and approximate the model $\mathcal{F}$ as a specified model by training on the labeled dataset $\mathcal{D}_{src}$ and on a subset of unlabeled images ${\mathcal{T}_1}\subset \mathcal{D}_{tgt}$. In this way, we make the model $\mathcal{F}$ behave as nearly specified model on $\mathcal{D}_{src}+{\mathcal{T}_1}$, and can boost the performance of a SSR method, *i.e.*  training SSR on $\mathcal{D}_{src}+{\mathcal{T}_1}$ improves SSR performance towards fully-supervised performance. To this end, we propose ART-SS that rejects unlabeled images that are not similar to labeled images or adversely effecting the SSR performance while training the SSR method.
88
+
89
+ <figure id="fig:Method" data-latex-placement="h!">
90
+ <p><img src="Method.jpg" style="width:60.0%" alt="image" /> <img src="reject_module.png" style="width:38.0%" alt="image" /></p>
91
+ <figcaption><strong>Overview of the proposed adaptive rejection technique</strong>. In our rejection, labeled and unlabeled images are projected to the latent space to obtain <span class="math inline"><em>z</em><sup><em>l</em></sup> ∈ <em>Z</em><sub><em>s</em><em>r</em><em>c</em></sub></span> and <span class="math inline"><em>z</em><sup><em>u</em></sup> ∈ <em>Z</em><sub><em>t</em><em>g</em><em>t</em></sub></span>. Given <span class="math inline"><em>z</em><sub><em>u</em></sub>, <em>Z</em><sub><em>t</em><em>g</em><em>t</em></sub>, <em>Z</em><sub><em>s</em><em>r</em><em>c</em></sub></span>, we use a rejection module to decide whether to update the network weights <span class="math inline"><em>f</em><sub><em>θ</em></sub></span> using <span class="math inline"><em>x</em><sub><em>u</em></sub></span> or not. Note that the semi-supervised technique in this figure can be one of <span class="citation" data-cites="wei2019semi Yasarla_2020_CVPR Huang_2021_CVPR"></span>. Here “tick" in green means perform SSD using <span class="math inline"><em>x</em><sub><em>i</em></sub><sup><em>u</em></sup></span> unlabeled image, and “red x" in means don’t perform SSR using <span class="math inline"><em>x</em><sub><em>i</em></sub><sup><em>u</em></sup></span>.</figcaption>
92
+ </figure>
93
+
94
+ <figure id="fig:psi_graph" data-latex-placement="h!">
95
+ <img src="Psi_graphs.png" style="width:100.0%" />
96
+ <figcaption>Normalized histogram graphs of <span class="math inline"><em>ψ</em></span> for a cross-domain experiment where <span class="math inline">𝒟<sub><em>s</em><em>r</em><em>c</em></sub></span> is Rain800 <span class="citation" data-cites="zhang2019image"></span> and <span class="math inline">𝒟<sub><em>t</em><em>g</em><em>t</em></sub></span> is SPA-data<span class="citation" data-cites="wang2019spatial"></span>. Three graphs correspond to three different SSD methods  <span class="citation" data-cites="wei2019semi Yasarla_2020_CVPR Huang_2021_CVPR"></span>.</figcaption>
97
+ </figure>
98
+
99
+ Fig [3](#fig:Method){reference-type="ref" reference="fig:Method"} gives an overview of the proposed method where we introduce a rejection module in order to carefully reject the unlabeled observations in $\mathcal{D}_{tgt}$ that are effecting the performance of SSD methods. In our ART-SS method, we project labeled and unlabeled images from $\mathcal{D}_{src}$ and $\mathcal{D}_{tgt}$, to obtain latent vectors $Z_{src}$ and $Z_{tgt}$ respectively. Note [@Yasarla_2020_CVPR; @Huang_2021_CVPR] express the latent vectors of labeled and unlabeled images using either fixed number of basis latent vectors [@Huang_2021_CVPR] or using nearest labeled latent vectors [@Yasarla_2020_CVPR]. So, we can define error function $L$ for every image with help of similarity index($\psi$), *i.e.* $L=-\psi$. Here, similarity index ($\psi$) for each image is computed as, $\psi = \frac{1}{M_{NN}}\sum_{z^{k}\in NN(z)} \frac{\langle z,z^{k}\rangle}{|z||z^{k}|}$, where $NN(z)$ nearest neighbor of $z$ and $M_{NN}$ number of nearest neighbors. Fig [4](#fig:psi_graph){reference-type="ref" reference="fig:psi_graph"} shows sample normalized histogram graphs corresponding to $\psi$. In Fig [4](#fig:psi_graph){reference-type="ref" reference="fig:psi_graph"}, we can observe that there is some domain gap between labeled and unlabeled images. We can also deduce the fact from Fig [4](#fig:psi_graph){reference-type="ref" reference="fig:psi_graph"} that a few unlabeled images are similar to the labeled images that will help to improve the SSR performance and a few unlabeled images may hurt the SSR performance. According to Corollary 2 to make a SSR method specified we should create subset $\mathcal{T}_1$, where unlabeled images in $\mathcal{T}_1$ should satisfy $|L_u-L_l|<\epsilon$ and $\epsilon$ is small positive number. Hence, one can come up with a rejection rule to reject the unlabeled images that might hurt the SSR performance. To this end, we propose a novel rejection technique where we adaptively update the threshold $T$ using $\psi$ values and aelotoric uncertainity [@kendall2017uncertainties]. Note, we compute aleotoric uncertainty [@kendall2017uncertainties] variance $\sigma$ that makes ART-SS robust to network's errors in latent representations, since $\sigma$ indicates how confident the network is about the computed latent representation vector $z$. By re-scalinng $\psi$ values with $\sigma$, we will be giving higher importance to highly confident or less importance to less confident image samples while computing threshold $T$ and rejecting the unnlabeled images.
100
+
101
+ In our adaptive rejection technique, we project every labeled image $x^l_i$ and unlabeled image $x^u_i$ to a latent space and obtain $z^l_i,\sigma^l_i$ and $z^u_i,\sigma^u_i$ respectively using aleotoric uncertainity. For more details on how to compute $\sigma$, please refer to the supplementary document. Thus, we obtain $Z^{src},\{\sigma^l\}^{src}$ and $Z^{tgt},\{\sigma^u\}^{tgt}$. Having obtained $Z^{src},\{\sigma^l\}^{src}$ and $Z^{tgt},\{\sigma^u\}^{tgt}$ values, we compute $\psi^l_i$ and $\psi^u_i$ for each labeled $x^l_i$ and unlabeled $x^u_i$ image respectively. We define the threshold, $T$, as a weighted mean $\psi^l_i$ values of the labeled images, *i.e.*, $$\begin{equation}
102
+ \resizebox{0.9\hsize}{!}{$
103
+ \Psi^{src} = \left\lbrace\psi^l_i: \psi^l_i = \frac{1}{M_{NN}}\sum_{z^{k}\in NN(z^l_i)} \frac{\langle z_i^l,z^{k}\rangle}{|z^l_i||z^{k}|}, z^l_i = g_\theta(x^l_i), \forall x^l_i \in \mathcal{D}_{src} \right\rbrace ,
104
+ T = \frac{1}{N_\ell} \sum_{\Psi^{src},\{\sigma^l\}^{src}} \frac{\psi^l_i}{\sigma^l_i},$}
105
+ \end{equation}$$ where $g_\theta(.)$ is the encoder of the network $f_\theta$, and we use $\sigma^l_i$ values implying higher importance is given to highly confident samples in deciding the threshold $T$. During semi-supervised training of network $f_\theta$, we will reject the unlabeled image, if $\frac{\psi^u_i}{\sigma^u_i}< T$. Fig [3](#fig:Method){reference-type="ref" reference="fig:Method"} gives the overview of the proposed adaptive rejection technique. We also provide a pseudo algorithm for the proposed rejection technique in the supplementary document.
106
+
107
+ Thus, following [@wei2019semi; @Yasarla_2020_CVPR; @Huang_2021_CVPR; @shao2020domain; @li2019semi] we train a semi-supervised network in two phases: (i) labeled training phase, and (ii) unlabeled training phase. In the labeled training phase, we learn the network weights $f_\theta$ using the labeled images $x^l_i \in \mathcal{D}_{src}$ in a fully-supervised fashion. Additionally, we compute $\{\psi^l_i\}$ and $\{\sigma^l_i\}$ for all the labeled images,*i.e.*  we compute $\Psi^{src}$ and decide threshold $T$ as explained earlier. In the unlabeled training phase, given an unlabeled images $x^u_i \in \mathcal{D}_{tgt}$, we compute $\{\psi^u_i\}$ and $\{\sigma^u_i\}$ values For each unlabeled image, we check the criterion: $\frac{\psi^u_i}{\sigma^u_i}< T$, and decide whether to use unlabeled image for updating the network weights $f_{\theta}$ using $\mathcal{L}_{unsup}$. Note that $\mathcal{L}_{unsup}$ can be an unsupervised loss proposed in corresponding SSR method [@wei2019semi; @Yasarla_2020_CVPR; @Huang_2021_CVPR; @Huang_2021_CVPR; @shao2020domain; @li2019semi].
2203.13920/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-02T01:34:52.513Z" agent="5.0 (Macintosh)" version="14.2.9" etag="awrmrURTnHvqIzL4vGL3" type="device"><diagram id="PbjPuX2gPiNRbhzq6PNm">7VxZc9s2EP41mmkfohF48zGxlbYzTsZTt9P0ESZhkS1FqCBkS/n1BUmAJ6TAFq+hJT+YWIIQ8O2B3eVCC/1me/iFwF3wBfsoWmgr/7DQbxeaBnTdZv9SyjGnuAYnbEjo804l4SH8jjhxxan70EdJrSPFOKLhrk70cBwjj9ZokBD8Uu/2hKP6t+7gBrUIDx6M2tS/Qp8GOdUxVyX9VxRuAvHNYMXvbKHozAlJAH38UiHp64V+QzCm+dX2cIOiFDyBS/7c5xN3i4kRFFOVBzQ+DXoUa0M+WypvYkIDvMExjNYl9RPB+9hH6QAr1ir73GG8Y0TAiP8gSo+cb3BPMSMFdBvxu2xu5PiNP581/k4bS1M0bw/Vm7dH3srnmk6wttoE74nHSTqXBkg2iAOgGW1QQAE1k1GEt4h9DetCUARp+FwfHnJh2RT9SjzZBYdUDi+fzjOM9nzQhWZFbBKfHtnFJr3YCUqyL2hA0Nj4FXKFWqE1+FfnzksQUvSwgxk+L0wb65x4CqPoBkeYZM/qvokc30jnQgn+t5BtUHzPMyIUHU7K2glY+QMGl3mu9LqtL3UO0EtFi3ivoKJA+upyVhgzkPQuRZg/eo9DNmLBI6DVmWSYxhLYq+LTGDBXPT5G1cw0hrWtVw2bq29r2IzFxVqVuG6+TQG3Yytg9+pmiPYAumb9GPX9hMzek+Mhz+sHdXc41O23oR7PD3XTGg51Zwb7StWDEj521YUC5ngulDsKvoeQFvCy6wq6rFWCmzaOVaS/VRmkxhMfJkE2V/B2Blnj8UdM55zZ+T4lY2+mfykdx7RCzz/dmCPQdHcG3AUAaME1oMKAtyhMD0ZMEgcCe0QlGSfOnrIVk3BoTAYpROoyKzaa8zSCFRvSqwKGhB/Da5CKIJsSU+OOKMkKIW+Cn+gWHn764+dZCGohhWNst9Yolr3HrdNuy3Mf+SdlgBXC2pnL86CGd5x4dkD3MWGSTT+mb4EYIcYxErTPYYqKsp64bT3RnRH1xP2xnsB3FocZq/E2huIN31WRzitS8Qa3okginzeGIokxXqtIcw4Fmoo05I4kPP9z/FhvH5Hvh/GG9fsCKQkPs4DdAg37ZbdhL97YVXEHRhfAn46J/fC5hqT13z6tWMiw+JBkxukj6wDA7lDebGpE2vnyUUrupp05jZwnSmWjoOXzapGzNdepk4ThutyLl3uJmchLJ4RB4NtiJya4boENe2kq2WDQhSmQpWManECSPXE3Z++yWdQwcEGLSm2DlCdjFTcUPKmrCLtjZZ9huTVkPYRKQYSMVfv3pD6GMyBDFJJKUobM2clvMsSUeJu9McS5+ppX5+s9LneKvmbLNGuOsmdTdLzIHJxOZk5SMK7m4GoO3o85UI89tQ5cA+FenPPVvt79mWb+8oNGM3DFXIXibWniz+kCcIUE+G8xRZky3cHHmWAOgN6I5yVJbgAkoHeRbNUVstxf17/PG3HZa4X+EJeltxsIotgXL7i8CCZJ6C36rMOo1VyoV8NV0DIlYAnahUeDbGEXBLMcsz6E6mEgq1HeoNsNbnZ3/EdXKCNLArhLL5Mw3kSIs3uC2uI0nXKJstgyXTFPc1pZV2S5xi5wJJgymcQxa34wzJHM0MpuhjsDIitLDXaM7HSAddtuY2/A2m1rPl5hnjZQMYWoQKrtJJoy47o/AH06r1f4Lbc43RzuCfb3Hp2FY2M239tL6o5ssBRF510nU/VxzkydEH17INEXPuFURN9QiFvnL/oyl75H0TdksauKUy8Twg4c/eIIztJ17UW1Jg/Y1uJsVR5r3CMSMggQqWrKK6vrNIlWqB8XHCa0aAmN2xAF1dDCbGWqegstjNMB+ySTldcU9TVF3c1yL9mRBqqOkkVwHeWnWbP8eabcbpQ/cqWv/wc=</diagram></mxfile>
2203.13920/main_diagram/main_diagram.pdf ADDED
Binary file (16.1 kB). View file
 
2203.13920/paper_text/intro_method.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Natural Language Understanding (NLU) models are used for different tasks such as questionanswering [\(Hirschman and Gaizauskas,](#page-5-0) [2001\)](#page-5-0), machine translation [\(Macherey et al.,](#page-5-1) [2001\)](#page-5-1) and text summarization [\(Tas and Kiyani,](#page-5-2) [2007\)](#page-5-2). These models are often trained on crowd-sourced data that may contain sensitive information such as phone numbers, contact names and street addresses. [Nasr](#page-5-3) [et al.](#page-5-3) [\(2019\)](#page-5-3), [Shokri et al.](#page-5-4) [\(2017\)](#page-5-4) and [Carlini et al.](#page-5-5) [\(2018\)](#page-5-5) have presented various attacks to demonstrate that neural-networks can leak private information. We focus on one such class of attacks, called Model Inversion Attack (ModIvA) [\(Fredrik](#page-5-6)[son et al.,](#page-5-6) [2015\)](#page-5-6), where an adversary aims to reconstruct a subset of the data on which the machinelearning model under attack is trained on. We also demonstrate that established ML practices (e.g. dropout) offer strong defense against ModIvA.
4
+
5
+ In this work, we start with inserting potentially sensitive target utterances called 'canaries'[1](#page-0-0) along with their corresponding output labels into the training data. We use this augmented dataset to train an NLU model fθ. We perform a open-box attack on this model, i.e., we assume that the adversary has access to all the parameters of the model, including the word vocabulary and the corresponding embedding vectors. The attack takes the form of text completion, where the adversary provides the start of a canary sentence (e.g., 'my pin code is') and tries to reconstruct the remaining, private tokens of an inserted canary (e.g., a sequence of 4 digit tokens). A successful attack on f<sup>θ</sup> reconstructs all the tokens of an inserted canary. We refer to such a ModIvA as 'Canary Extraction Attack' (CEA). In such an attack, this token reconstruction is cast as an optimization problem where we minimize the loss function of the model f<sup>θ</sup> with respect to its inputs (the canary utterance), keeping the model parameters fixed.
6
+
7
+ Previous ModIvAs were conducted on computer vision tasks where there exists a continuous mapping between input images and their corresponding embeddings. However, in the case of NLU, the discrete mapping of tokens to embeddings makes the token reconstruction from continuous increments in the embedding space challenging. We thus formulate a discrete optimization attack, in which the unknown tokens are eventually represented by a one-hot like vector of the vocabulary length. The token in the vocabulary with the highest softmax activation is expected to be the unknown token of the canary. We demonstrate that in our attack's best configuration, for canaries of type *"my pin code is* k1k2k3k4*"*, k<sup>i</sup> ∈ {0, 1, . . . , 9}, 1 ≤ i ≤ 4, we are able to extract the numeric pin k1k2k3k<sup>4</sup> with an accuracy of 0.5 (a lower bound on this accuracy using a naive random guessing strategy for a combination of four digits equals 10−<sup>4</sup> ).
8
+
9
+ <span id="page-0-0"></span><sup>1</sup> Following the terminology in [Carlini et al.](#page-5-5) [\(2018\)](#page-5-5)
10
+
11
+ Since we present a new application of ModIvA to NLU models, defenses against them are an important ethical consideration to prevent harm and are explored in Section 6. We observe that standard training practices commonly used to regularize NLU models successfully thwart this attack.
12
+
13
+ # Method
14
+
15
+ We propose three commonly used modeling techniques as defense mechanisms- Dropout (D), Early Stopping (ES) (Arpit et al., 2017) and including a Character Embeddings layer in the NLU model (CE). D and ES are regularization techniques to reduce memorization and overfitting. CE makes the problem in 3 more difficult to optimize, by concatenating the embeddings of each input token with a character level representation. This character level representation is obtained using a convolution layer on the input sentence (Ma and Hovy, 2016).
16
+
17
+ For defense using D, we use a dropout of 20% and 10% while training the NLU model. For ES, we stop training the NLU model under attack if the
18
+
19
+ validation loss does not decrease for 20 consecutive epochs to prevent over-training.
20
+
21
+ In this section we present the performance of the proposed defenses against ModIvA. To do so, we evaluate the attack on NLU models trained with each defense mechanism individually, and in all combinations. The canaries are inserted into the Snips dataset and repeated 10, 500 and 1000 times. The results are summarized in Table [3.](#page-4-0) We observe that the attack accuracy for each defense (used individually and in combination) is nearly zero for all canaries and is thus omitted in the table. We also note that the HDT approaches the random baseline for most defense mechanisms. The attack performance is comparable to a random-guess when the three mechanisms are combined. However, when dropout or character embedding is used alone, HDT values are lower than the baseline, indicating the importance of combining multiple defense mechanisms. Additionally, training with defenses do not have any significant impact on the performance of the NLU model under attack. The defenses thus successfully thwart the proposed attack without impacting the performance of the NLU models.
22
+
23
+ <span id="page-4-0"></span>
24
+
25
+ | R | Defense Mechanism | Color | ↓HDT<br>Pin | Call | |
26
+ |------|---------------------|-------|-------------|------|--|
27
+ | | Baseline | 0.916 | 0.90 | 0.90 | |
28
+ | | No defense | 0.30 | 0.33 | 0.40 | |
29
+ | | Dropout (D) | 0.85 | 0.80 | 0.76 | |
30
+ | | Early Stopping (ES) | 0.80 | 0.93 | 0.95 | |
31
+ | 10 | Char. Emb. (CE) | 0.65 | 0.75 | 0.90 | |
32
+ | | D + ES | 0.98 | 0.90 | 0.95 | |
33
+ | | ES + CE | 0.90 | 0.83 | 0.90 | |
34
+ | | D + ES + CE | 0.90 | 0.90 | 0.90 | |
35
+ | | No defense | 0.39 | 0.27 | 0.38 | |
36
+ | | Dropout (D) | 0.65 | 0.54 | 0.83 | |
37
+ | | Early Stopping (ES) | 0.85 | 1.00 | 0.75 | |
38
+ | 500 | Char. Emb. (CE) | 0.58 | 0.93 | 0.68 | |
39
+ | | D + ES | 0.85 | 0.93 | 0.98 | |
40
+ | | ES + CE | 0.93 | 0.98 | 0.78 | |
41
+ | | D + ES + CE | 0.95 | 0.88 | 1.00 | |
42
+ | | No defense | 0.35 | 0.18 | 0.48 | |
43
+ | | Dropout (D) | 0.35 | 0.78 | 0.58 | |
44
+ | | Early Stopping (ES) | 0.90 | 0.83 | 0.85 | |
45
+ | 1000 | Char. Emb. (CE) | 0.70 | 0.68 | 0.78 | |
46
+ | | D + ES | 0.88 | 0.98 | 0.90 | |
47
+ | | ES + CE | 0.88 | 1.00 | 0.95 | |
48
+ | | D + ES + CE | 0.95 | 0.93 | 0.95 | |
49
+
50
+ Table 3: Attack performance for the canary *color*, *pin* and *call* after incorporating defenses while training the target NLU model, with R ∈ {10, 500, 1000}.
2204.10670/paper_text/intro_method.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Transformer models have been widely used on many tasks such as text classification [@vaswani2017attention], text summarization, promoter region prediction [@zaheer2020big], and image classification [@dosovitskiy2020vit]. The main engine in Transformer is the self-attention mechanism, which can work in parallel to mix long-range tokens in a long sequence. This fundamental innovation eliminated the sequential dependency in recurrent neural networks and was used as a building block for many powerful models, such as Bert [@devlin2018bert], GPT[@brown2020gpt] and Ernie[@sun2019ernie].
4
+
5
+ However, the original self-attention is not scalable because it requires computing and storing all pairwise dot-products, which incurs $O(N^2)$ cost for sequence length $N$. The scalability issue significantly restricted the application of neural models based on self-attention.
6
+
7
+ Various methods have been introduced to alleviate the quadratic cost of full attention. Some of them attempt to shorten the sequence length [@linformer; @enformer], even though much information is lost. Others try to break up the softmax by a certain kernel factorization. Another family of methods sparsify the attention matrix with predefined attention [@beltagy2020longformer; @zaheer2020big; @ainslie2020etc; @child2019generating; @tay2019lightweight]. However, most Transformer variants stick to the dot-product self-attention, of which the expressive power is restricted by the low-rank bottleneck [@mhabottleneck] because the dimensionality of the dot-product space is much smaller than the sequence length. Therefore, they cannot accurately model the transformation if the attention is intrinsically high-rank.
8
+
9
+ This paper proposes a scalable and effective attention building block called Paramixer without dot-product and softmax. Our method directly parameterizes the mixing links in several sparse factors to form an attention matrix, where all factorizing matrices are full-rank. Therefore Paramixer does not suffer from the low-rank bottleneck. We present two ways to specify the non-zero positions in each sparse factor. Both lead to an economical approximation of the full attention matrix, with the computing cost as low as $O(N\log N)$. As a result, our method can easily model very long sequential data.
10
+
11
+ We have tested Paramixer on various sequence data sets and compared it with many popular self-attention neural networks based on dot-products. The experimental results show that Paramixer gets the best performance on very long sequence tasks, including synthetic data inference, Genome classification, and character-level long document classification. Paramixer also achieves state-of-art accuracy on the public Long Range Arena benchmark tasks.
12
+
13
+ We organize the rest of the paper as follows. Section 2 investigates dot-product self-attention and its related work. Section 3 introduces the development clue and model architecture of Paramixer. The experimental settings and results are presented in Section 4, and we conclude the paper in Section 5.
2206.07837/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-06-13T08:51:44.548Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36 Edg/102.0.1245.33" version="20.0.1" etag="3zO1JmvBSer9JYCsiATg" type="device"><diagram id="OYYM-maIlwKjtcy2M_M1">3Vpdc5s4FP01ntk+bAYhwOYxcdzdl047k51p8tRRjGK0xcgr5NjeX18JSYAEbl0bHNoZTyxd9MU9R+dK15nA+Xr/F0Ob9ANNcDbxvWQ/gfcT35/OfPFXGg7KEEQzZVgxkigTqA0P5H+sjZ62bkmCC6shpzTjZGMblzTP8ZJbNsQY3dnNXmhmz7pBKz2jVxselijDrWafScJTZZ2FjdZ/Y7JKzczA00/WyDTWQxQpSuiuMRdcTOCcUcpVab2f40z6zvhFDfT+yNNqYQzn/JQOGohXlG31u038QHw+Pv8rHed7f4jSO2XTK+YH4wYxlPC4qNztUsLxwwYt5ZOdwFzYUr7ORA2I4gvNuUYRyLqeFDOO90cXDip3CBphusacHUQT0wFG/k2oemkWxdrJuxoS4OkXTBtwQN0OaRasqsFrR4mC9lW332CH36JMTHBXbFBuuSn6byvRLJ3wZ1F64VY0AGCzL11hnovSSn2Xzg7nz4KWxWEtvibTu8fJ9P7L3AChphKLVLOZjufig4qN2iYvZI+TvgCKwY1vIwSCsAWRMTURMrZLEAqOIiSB6AGhpxYYauDewXghWTanGWXlQHAxXUSLqCeQgtYuuiZGYQdGrtvy5FYqtqgtM1QUZGl7i9Ftnkg33XsdQoP3hD/qlrL8JNuJF1a1+73uVlYOppKLt3hsVhq9ZLXuVtZMP7V0nJjQYRSebtkSW6rBEVthbtH0BMgaeJhA08TD2BjOECev9iK6QNIzfKKk3A4VIwLP5sPUAVq9kO7WDC3uSBG0R6rwNiMpP7RGKllTvfhJRIoul+Pgp+T4Vsqx+BZ2KnZgRnIsaiRPhF1+rqTTx6QhQUVabgqgK58Q55jlpUW8rLNVgp7UJIQud9oheSgtmV6u9+dRQIP+xsFgJIj78HrRYzZk9LAiAWhGgqpyJA6YmGPKp8ScE2JH3I4d0dhiRxi7bHCO2mOMHfHlwuF9VzgWv4A29KEFkedgZi5fV9ACkxm47oXsrcN8wRn9ihtPYARjKHusGEqIAM48y2mOh7vXXVP1ARhc9ocR8NGptbtfwezck37gqnUAh1Jr0JWxenP4u08KvdwZp+24D6o841ioFM4cKsV9XRqBqxk9UqkriTdI+qGmT3WAPCH9ULPnySJPn+mH8VEJRjYBzk4/uAO5lOyRSF25xuNE0rHYOpU1KKWCuvldwb24SaiXW/Za9bwsxdU7x/w2x+KxUezt1UpQAR0azTayQfEzsdqzftcRBTXk2RQeNBX7He6MhxWt64t75TyZFeHUPQ75g0lPV+bzh7j9k5K8VBrEePejIz/eweP58R9Ih6VZlvSJ5b0nWa1+Yk2N+gmSE7QlZ3Rn7WDm3o3PzYy0cqyhM1JfktOaKOpZcroytr/b8WtEDATeTRzZiPrnkrBrsN4O6qJa/wOEal7/FwlcfAM=</diagram></mxfile>
2206.07837/main_diagram/main_diagram.pdf ADDED
Binary file (38 kB). View file
 
2206.07837/paper_text/intro_method.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ To perform reliably in real world settings, machine learning models must be robust to distribution shifts – where the training distribution differs from the test distribution. Given data from multiple domains that share a common optimal predictor, the *domain generalization (DG)* task (Wang et al., 2021; Zhou et al., 2021) encapsulates this challenge by evaluating accuracy on an unseen domain. Recent empirical studies of DG algorithms (Wiles et al., 2022; Ye et al., 2022) have characterized different kinds of distribution shifts across domains. Using MNIST as an example, a *diversity* shift is when domains are created either by adding new values of a spurious attribute like rotation (e.g., Rotated-MNIST dataset (Ghifary et al., 2015; Piratla et al., 2020)) whereas a *correlation* shift is when domains exhibit different values of correlation between the class label and a spurious attribute like color (e.g., Colored-MNIST (Arjovsky et al., 2019)). Partly because advances in representation learning for DG (Ahuja et al., 2021; Krueger et al., 2021; Mahajan et al., 2021; Arjovsky et al., 2019; Li et al., 2018a; Sun & Saenko, 2016) have focused on either one of the shifts, these studies find that performance of state-of-the-art DG algorithms are not consistent across different shifts: algorithms performing well on datasets with one kind of shift fail on datasets with another kind of shift.
4
+
5
+ In this paper, we pose a harder, more realistic question: What if a dataset exhibits two or more kinds of shifts *simultaneously*? Such shifts over multiple attributes (where an attribute refers to a spurious high-level variable like rotation) are often observed in real data. For example, satellite imagery data demonstrates distribution shifts over time as well as the region captured (Koh et al., 2021). To study this question, we introduce *multi-*attribute distribution shift datasets. For instance, in our Col+Rot-MNIST dataset (see Figure 1), both the color and rotation angle of digits can shift
6
+
7
+ ![](_page_1_Figure_1.jpeg)
8
+
9
+ ![](_page_1_Figure_2.jpeg)
10
+
11
+ | Color | Kotation | Col+Rot |
12
+ |----------------------------------|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
13
+ | $30.9 \pm 1.6$ | $61.9 \pm 0.5$ | $25.2 \pm 1.3$ |
14
+ | $50.0 \pm 0.1$ | $61.2 \pm 0.3$ | $39.6 \pm 6.7$ |
15
+ | $29.7 \pm 1.8$ | $62.2 \pm 0.5$ | $24.1 \pm 0.6$ |
16
+ | $29.4 \pm 0.2$ | $62.3 \pm 0.4$ | $32.2\pm7.0$ |
17
+ | $\textbf{70.4} \pm \textbf{0.5}$ | $\textbf{62.4} \pm \textbf{0.4}$ | 54.1 ± 1.3 |
18
+ | | $50.0 \pm 0.1$<br>$29.7 \pm 1.8$<br>$29.4 \pm 0.2$ | $30.9 \pm 1.6$ $61.9 \pm 0.5$<br>$50.0 \pm 0.1$ $61.2 \pm 0.3$<br>$29.7 \pm 1.8$ $62.2 \pm 0.5$<br>$29.4 \pm 0.2$ $62.3 \pm 0.4$<br><b>70.4 ± 0.5 62.4 ± 0.4</b> |
19
+
20
+ Figure 1: (a) Our *multi-attribute* distribution shift dataset Col+Rot-MNIST. We combine Colored MNIST (Arjovsky et al., 2019) and Rotated MNIST (Ghifary et al., 2015) to introduce distinct shifts over *Color* and *Rotation* attributes. (b) The causal graph representing the data generating process for Col+Rot-MNIST. *Color* has a correlation with *Y* which changes across environments while *Rotation* varies independently. (c) Comparison with DG algorithms optimizing for different constraints shows the superiority of *Causally Adaptive Constraint Minimization (CACM)* (full table in Section 5).
21
+
22
+ across data distributions. We find that existing DG algorithms that are often targeted for a specific shift fail to generalize in such settings: best accuracy falls from 50-62% for individual shift MNIST datasets to <50% (lower than a random guess) for the multi-attribute shift dataset.
23
+
24
+ To explain such failures, we propose a causal framework for generalization under multi-attribute distribution shifts. We use a canonical causal graph to model commonly observed distribution shifts. Under this graph, we characterize a distribution shift by the type of relationship between spurious attributes and the classification label, leading to different *realized* causal DAGs. Using *d*-separation on the realized DAGs, we show that each shift entails distinct constraints over observed variables and prove that no conditional independence constraint is valid across all shifts. As a special case of *multi*-attribute, when datasets exhibit a *single*-attribute shift across domains, this result provides an explanation for the inconsistent performance of DG algorithms reported by Wiles et al. (2022); Ye et al. (2022). It implies that any algorithm based on a single, fixed independence constraint cannot work well across all shifts: there will be a dataset on which it will fail (Section 3.3).
25
+
26
+ We go on to ask if we can develop an algorithm that generalizes to different kinds of individual shifts as well as simultaneous *multi-*attribute shifts. For the common shifts modeled by the canonical graph, we show that identification of the correct regularization constraints requires knowing only the type of relationship between attributes and the label, not the full graph. As we discuss in Section 3.1, the type of shift for an attribute is often available or can be inferred for real-world datasets. Based on this, we propose *Causally Adaptive Constraint Minimization (CACM)*, an algorithm that leverages knowledge about the data-generating process (DGP) to identify and apply the correct independence constraints for regularization. Given a dataset with auxiliary attributes and their relationship with the target label, *CACM* constrains the model's representation to obey the conditional independence constraints satisfied by causal features of the label, generalizing past work on causality-based regularization (Mahajan et al., 2021; Veitch et al., 2021; Makar et al., 2022) to multi-attribute shifts.
27
+
28
+ We evaluate *CACM* on novel multi-attribute shift datasets based on MNIST, small NORB, and Waterbirds images. Across all datasets, applying the incorrect constraint, often through an existing DG algorithm, leads to significantly lower accuracy than the correct constraint. Further, *CACM* achieves substantially better accuracy than existing algorithms on datasets with multi-attribute shifts as well as individual shifts. Our contributions include:
29
+
30
+ - Theoretical result that an algorithm using a fixed independence constraint cannot yield an optimal classifier on all datasets.
31
+ - An algorithm, *Causally Adaptive Constraint Minimization (CACM)*, to adaptively derive the correct regularization constraint(s) based on the causal graph that outperforms existing DG algorithms.
32
+ - Multi-attribute shifts-based benchmarks for domain generalization where existing algorithms fail.
33
+
34
+ We consider the supervised learning setup from (Wiles et al., 2022) where each row of train data $(x_i, a_i, y_i)_{i=1}^n$ contains input features $x_i$ (e.g., X-ray pixels), a set of nuisance or spurious attributes $a_i$ (e.g., vertical shift, hospital) and class label $y_i$ (e.g., disease diagnosis). The attributes represent variables that are often recorded or implicit in data collection procedures. Some attributes represent a property of the input (e.g., vertical shift) while others represent the domain from which the input was collected (e.g., hospital). The attributes affect the observed input features $x_i$ but do not cause the target label, and hence are *spurious* attributes. The final classifier g(x) is expected to use only the input features. However, as new values of attributes are introduced or as the correlation of attributes with the label changes, we obtain different conditional distributions P(y|X). Given a set of data distributions P(y|X), we assume that the train data is sampled from distributions, P(y|X) is expected to use only the input features are introduced or as the correlation of attributes with the label changes, we obtain different conditional distributions P(y|X). Given a set of data distributions P(y|X), we assume that the train data is sampled from distributions, P(y|X). Attributes and class labels are assumed to be discrete.
35
+
36
+ The goal is to learn a classifier g(x) using train domains such that it generalizes and achieves a similar, small risk on test data from unseen $P_{Ete}$ as it achieves on the train data. Formally, given a set of distributions $\mathcal{P}$ , we define a risk-invariant predictor (Makar et al., 2022) as,
37
+
38
+ **Definition 2.1.** Optimal Risk Invariant Predictor for $\mathcal{P}$ (from (Makar et al., 2022)) Define the risk of predictor g on distribution $P \in \mathcal{P}$ as $R_P(g) = \mathbb{E}_{x,y \sim P} \ell(g(x),y)$ where $\ell$ is cross-entropy or another classification loss. Then, the set of risk-invariant predictors obtain the same risk across all distributions $P \in \mathcal{P}$ , and set of the optimal risk-invariant predictors is defined as the risk-invariant predictors that obtain minimum risk on all distributions.
39
+
40
+ $$g_{rinv} \in \arg\min_{g \in G_{rinv}} R_P(g) \ \forall P \in \mathcal{P} \ \text{where} \ G_{rinv} = \{g : R_P(g) = R_{P'}(g) \forall P, P' \in \mathcal{P}\}$$
41
+ (1)
42
+
43
+ An intuitive way to obtain a risk-invariant predictor is to consider only the parts of the input features (X) that cause the label Y and ignore any variation due to the spurious attributes. Let such latent, unobserved causal features be $X_c$ . Due to independence and stability of causal mechanisms (Peters et al., 2017), we can assume that $P(Y|X_c)$ remains invariant across different distributions. Using the notion of risk invariance, we can now define the multi-attribute generalization problem as,
44
+
45
+ **Definition 2.2.** Generalization under Multi-attribute shifts. Given a target label Y, input features X, attributes A, and latent causal features $X_c$ , consider a set of distributions P such that $P(Y|X_c)$ remains invariant while P(A|Y) changes across individual distributions. Using a training dataset $(x_i, a_i, y_i)_{i=1}^n$ sampled from a subset of distributions $P_{\mathcal{E}tr} \subset P$ , the generalization goal is to learn an optimal risk-invariant predictor over P.
46
+
47
+ Special case of single-attribute shift. When |A| = 1, we obtain the single-attribute shift problem that is widely studied (Wiles et al., 2022; Ye et al., 2022; Gulrajani & Lopez-Paz, 2021).
48
+
49
+ In practice, the causal features $X_c$ are unobserved and a key challenge is to learn $X_c$ using the observed (X, Y, A). We focus on representation learning-based (Wang et al., 2021) DG algorithms, typically characterized by a regularization constraint that is added to a standard ERM loss such as cross-entropy. Table 1 shows three independence constraints that form the basis of many popular DG algorithms, assuming environment/domain E as the attribute and $\ell$ as the main classifier loss. We now provide a general principle for deciding which constraints to choose for learning a risk-invariant predictor for a dataset.
50
+
51
+ Table 1: Statistic optimized by DG algorithms. match matches the statistic across E. h is domain classifier (loss $\ell_d$ ) using shared representation $\phi$ .
52
+
53
+ | Constraint | Statistic | Algo. |
54
+ |---------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|
55
+ | $\phi \perp \!\!\! \perp E$ | $\begin{array}{ll} \text{match } \mathbb{E}[\phi(x) E] \ \forall \ E \\ \text{max}_E \ \mathbb{E}[\ell_d(h(\phi(x)),E)] \\ \text{match } \text{Cov}[\phi(x) E] \ \forall \ E \end{array}$ | MMD<br>DANN<br>CORAL |
56
+ | $Y \perp \!\!\! \perp E \phi$ | $ \begin{array}{c} \text{match } \mathbb{E}[Y \phi(x),E] \ \forall \ E \\ \text{match } \text{Var}[\ell(g(x),y) E] \ \forall \ E \end{array} $ | IRM<br>VREx |
57
+ | $\phi \perp \!\!\! \perp E Y$ | $ \begin{array}{l} \text{match } \mathbb{E}[\phi(x) E,Y=y] \ \forall \ E \\ \text{max}_E \ \mathbb{E}[\ell_d(h(\phi(x)),E) Y=y)] \end{array} $ | C-MMD<br>CDANN |
58
+
59
+ We utilize a strategy from past work (Mahajan et al., 2021; Veitch et al., 2021) to use graph structure of the underlying data-generating process (DGP). We assume that the predictor can be represented as $g(\boldsymbol{x}) = g_1(\phi(\boldsymbol{x}))$ where $\phi$ is the representation. To learn a risk-invariant $\phi$ , we identify the conditional independence constraints satisfied by causal features $\boldsymbol{X}_c$ in the causal graph and enforce that learnt representation $\phi$ should follow the same constraints. If $\phi$ satisfies the constraints, then any function $g_1(\phi)$ will also satisfy them. Below we show that the constraints are necessary under simple assumptions on the causal DAG representing the DGP for a dataset. All proofs are in Suppl. B.
60
+
61
+ **Theorem 2.1.** Consider a causal DAG $\mathcal{G}$ over $\langle \mathbf{X}_c, \mathbf{X}, \mathbf{A}, Y \rangle$ and a corresponding generated dataset $(\mathbf{x}_i, \mathbf{a}_i, y_i)_{i=1}^n$ , where $\mathbf{X}_c$ is unobserved. Assume that graph $\mathcal{G}$ has the following property: $\mathbf{X}_c$ is defined as the set of all parents of $Y(\mathbf{X}_c \to Y)$ ; and $\mathbf{X}_c$ , $\mathbf{A}$ together cause $\mathbf{X}(\mathbf{X}_c \to \mathbf{X})$ , and $\mathbf{A} \to \mathbf{X}$ . The graph may have any other edges (see, e.g., DAG in Figure 1(b)). Let $\mathcal{P}_{\mathcal{G}}$ be the set of distributions consistent with graph $\mathcal{G}$ , obtained by changing $P(\mathbf{A}|Y)$ but not $P(Y|\mathbf{X}_c)$ . Then the conditional independence constraints satisfied by $\mathbf{X}_c$ are necessary for a (cross-entropy) riskinvariant predictor over $\mathcal{P}_{\mathcal{G}}$ . That is, if a predictor for Y does not satisfy any of these constraints, then there exists a data distribution $P' \in \mathcal{P}_{\mathcal{G}}$ such that predictor's risk will be higher than its risk in other distributions.
62
+
63
+ Thus, given a causal DAG, using d-separation on $X_c$ and observed variables, we can derive the correct regularization constraints to be applied on $\phi$ . This yields a general principle to learn a risk-invariant predictor. We use it to theoretically explain the inconsistent results of existing DG algorithms (Sec. 3.3) and to propose an Out-of-Distribution generalization algorithm CACM (Sec. 4). Note that constraints from CACM are necessary but not sufficient as $X_c$ is not identifiable.
64
+
65
+ We consider a *canonical* causal graph (Figure 2) to specify the common data-generating processes that can lead to a multi-attribute shift dataset. Shaded nodes represent observed variables X, Y; and the sets of attributes $A_{ind}$ , $A_{ind}$ , and E such that $A_{ind} \cup A_{ind} \cup \{E\} = A$ . $A_{ind}$ represents the attributes correlated with label, $A_{ind}$ the attributes that are independent of label, while E is a special attribute for the domain/environment from which a data point was collected. Not all attributes need to be observed. For example, in some cases, only E and a subset of E and any be observed. In other cases, only E and E are the only features that causal features E are the only features that cause E. In the simplest case, we assume no label shift across environments i.e. marginal distribution of E is constant across train domains and test, E and the features (i) (see Figure 2a). More generally, different domains may have different distribution of causal features (in the E-symple, more women visit one hospital) as shown by E and E-symplest case.
66
+
67
+ Under the canonical graph, we characterize different kinds of shifts based on the relationship between spurious attributes A and the classification label Y. Specifically, $A_{ind}$ is independent of the class label and a change in $P(A_{ind})$ leads to an *Independent* distribution shift. For $A_{ind}$ , there are
68
+
69
+ ![](_page_3_Figure_8.jpeg)
70
+
71
+ Figure 2: (a) Canonical causal graph for specifying *multi-attribute* distribution shifts; (b) canonical graph with E- $X_c$ correlation. Anti-causal graph shown in Suppl. G. Shaded nodes denote observed variables; since not all attributes may be observed, we use dotted boundary. Dashed lines denote correlation, between $X_c$ and E, and Y and $A_{\overline{ind}}$ . E- $X_c$ correlation can be due to confounding, selection, or causal relationship; all our results hold for any of these relationships (see Suppl. F). (c) Different mechanisms for Y- $A_{\overline{ind}}$ relationship that lead to *Causal*, *Confounded* and *Selected* shifts.
72
+
73
+ three mechanisms which can introduce the dashed-line correlation between ${m A}_{\overline{ind}}$ and Y (Figure 2c) – direct-causal (Y causing $A_{\overline{ind}}$ ), confounding between Y and $A_{\overline{ind}}$ due to a common cause, or selection during the data-generating process. Overall, we define four kinds of shifts based on the causal graph: *Independent*, Causal, Confounded, and Selected<sup>1</sup>. While the canonical graph in Figure 2a is general, resolving each dashed edge into a specific type of shift (causal mechanism) leads to a realized causal DAG for a particular dataset. As we shall see, knowledge of these shift types is sufficient to determine the correct independence constraints between observed variables.
74
+
75
+ Our canonical multi-attribute graph generalizes the DG graph from Mahajan et al. (2021) that considered an *Independent* domain/environment as the only attribute. Under the special case of a single attribute (|A|=1), the canonical graph helps interpret the validity of popular DG methods for a dataset by considering the type of the attribute-label relationship in the data-generating process. For example, let us consider two common constraints in prior work on independence between $\phi$ and a spurious attribute: unconditional ( $\phi(x) \perp \perp A$ ) (Veitch et al., 2021; Albuquerque et al., 2020; Ganin et al., 2016) or *conditional* on the label $(\phi(x) \perp \!\!\! \perp A|Y)$ (Ghifary et al., 2016; Hu et al., 2019; Li et al., 2018c;d). Under the canonical graph in Figure 2a, the unconditional constraint is true when $A \perp \!\!\! \perp Y \ (A \in A_{ind})$ but not always for $A_{\overline{ind}}$ (true only under *Confounded* shift). If the relationship is *Causal* or *Selected*, then the conditional constraint is correct. Critically, as Veitch et al. (2021) show for a single-attribute graph, the conditional constraint is not always better; it is an incorrect constraint (not satisfied by $X_c$ ) under *Confounded* setting. Further, under the canonical graph [with E- $X_c$ edge] from Figure 2b, none of these constraints are valid due to a correlation path between $X_c$ and E. This shows the importance of considering the generating process for a dataset.
76
+
77
+ Inferring attributes-label relationship type. Whether an attribute belongs to $A_{ind}$ or $A_{ind}$ can be learned from data (since $A_{ind} \perp \!\!\! \perp Y$ ). Under some special conditions with the graph in Figure 2a assuming all attributes are observed and all attributes in $A_{\overline{ind}}$ are of the same type—we can also identify the type of $A_{\overline{ind}}$ shift: $Y \perp \!\!\! \perp E|A_{\overline{ind}}$ implies Selected; if not, then $X \perp \!\!\! \perp E|A_{\overline{ind}}$ , Y implies Causal, otherwise it is Confounded. In the general case of Figure 2b, however, it is not possible to differentiate between $m{A}_{cause}$ , $m{A}_{conf}$ and $m{A}_{sel}$ using observed data and needs manual input. Fortunately, unlike the full causal graph, the type of relationship between label and an attribute is easier to obtain. For example, in text toxicity classification, toxicity labels are found to be spuriously correlated with certain demographics ( $A_{\overline{ind}}$ ) (Dixon et al., 2018; Koh et al., 2021; Park et al., 2018); while in medical applications where data is collected from small number of hospitals, shifts arise due to different methods of slide staining and image acquisition ( $A_{ind}$ ) (Koh et al., 2021; Komura & Ishikawa, 2018; Tellez et al., 2019). Suppl. A contains additional real-world examples with attributes.
78
+
79
+ We list the independence constraints between $\langle X_c, A, Y \rangle$ under the canonical graphs from Figure 2, which can be used to derive the correct regularization constraints to be applied on $\phi$ (Theorem 2.1).
80
+
81
+ **Proposition 3.1.** Given a causal DAG realized by specifying the target-attributes relationship in Figure 2a, the correct constraint depends on the relationship of label Y with the attributes A. As shown, A can be split into $A_{\overline{ind}}$ , $A_{ind}$ and E, where $A_{\overline{ind}}$ can be further split into subsets that have a causal $(A_{cause})$ , confounded $(A_{conf})$ , selected $(A_{sel})$ relationship with Y $(A_{\overline{ind}} = A_{sel})$ $A_{cause} \cup A_{conf} \cup A_{sel}$ ). Then, the (conditional) independence constraints $X_c$ should satisfy are,
82
+
83
+ - 1. Independent: $X_c \perp \!\!\! \perp A_{ind}$ ; $X_c \perp \!\!\! \perp E$ ; $X_c \perp \!\!\! \perp A_{ind} | Y$ ; $X_c \perp \!\!\! \perp A_{ind} | E$ ; $X_c \perp \!\!\! \perp A_{ind} | Y$ , E2. Causal: $X_c \perp \!\!\! \perp A_{cause} | Y$ ; $X_c \perp \!\!\! \perp E$ ; $X_c \perp \!\!\! \perp A_{cause} | Y$ , E
84
+ - 3. Confounded: $X_c \perp \!\!\! \perp A_{conf}$ ; $X_c \perp \!\!\! \perp E$ ; $X_c \perp \!\!\! \perp A_{conf} | E$
85
+ - 4. Selected: $X_c \perp \perp A_{sel} | Y; X_c \perp \perp A_{sel} | Y, E$
86
+
87
+ **Corollary 3.1.** All the above derived constraints are valid for Graph 2a. However, in the presence of a correlation between E and $X_c$ (Graph 2b), only the constraints conditioned on E hold true.
88
+
89
+ Corollary 3.1 implies that if we are not sure about E- $X_c$ correlation, E-conditioned constraints should be used. By considering independence constraints over attributes that may represent any observed variable, our graph-based characterization unites the single-domain (group-wise) (Sagawa et al., 2020) and multi-domain generalization tasks. Whether attributes represent auxiliary attributes, group indicators, or data sources, Proposition 3.1 provides the correct regularization constraint.
90
+
91
+ <sup>&</sup>lt;sup>1</sup>Note that for *Selected* to satisfy the assumptions of Theorem 2.1 implies that $X_c$ is fully predictive of Y or that the noise in Y- $X_c$ relationship is independent of the features driving the selection process.
92
+
93
+ Combining with Theorem 2.1, Proposition 3.1 shows that the necessary constraints for a risk-invariant predictor's representation ϕ(X) are different for different types of attributes. This leads us to our key result: under multi-attribute shifts, a single (conditional) independence constraint cannot be valid for all kinds of shifts. Remarkably, this result is true even for single-attribute shifts: any algorithm with a fixed conditional independence constraint (e.g., as listed in Table 1 (Gretton et al., 2012; Arjovsky et al., 2019; Li et al., 2018b; Sun & Saenko, 2016)) cannot work for all datasets.
94
+
95
+ Theorem 3.1. *Under the canonical causal graph in Figure 2(a,b), there exists no (conditional) independence constraint over* ⟨Xc, A, Y ⟩ *that is valid for all realized DAGs as the type of multiattribute shifts vary. Hence, for any predictor algorithm for* Y *that uses a single (conditional) independence constraint over its representation* ϕ(X)*,* A *and* Y *, there exists a realized DAG* G *and a corresponding training dataset such that the learned predictor cannot be a risk-invariant predictor for distributions in* PG*, where* P<sup>G</sup> *is the set of distributions obtained by changing* P(A|Y )*.*
96
+
97
+ Corollary 3.2. *Even when* |A| = 1*, an algorithm using a single independence constraint over* ⟨ϕ(X), A, Y ⟩ *cannot yield a risk-invariant predictor for all kinds of single-attribute shift datasets.*
98
+
99
+ Corollary 3.2 adds theoretical evidence for past empirical demonstrations of inconsistent performance of DG algorithms (Wiles et al., 2022; Ye et al., 2022).To demonstrate its significance, we provide OoD generalization results on a simple "slab" setup (Shah et al., 2020) with three datasets (*Causal*, *Confounded*, and *Selected* shifts) in Suppl. E.2. We evaluate two constraints motivated by DG literature Mahajan et al. (2021): unconditional X<sup>c</sup> ⊥⊥ A|E, and conditional on label X<sup>c</sup> ⊥⊥ A|Y, E. As predicted by Corollary 3.2, neither constraint obtains best accuracy on all three datasets (Table 6).
100
+
101
+ Motivated by Sec. 3, we present *CACM*, an algorithm that adaptively chooses regularizing constraints for multi-attribute shift datasets (full algorithm for any general DAG in Suppl. C). It has two phases.
102
+
103
+ Phase I. Derive correct independence constraints. If a dataset's DGP satisfies the canonical graph, *CACM* requires a user to specify the relationship type for each attribute and uses the constraints from Proposition 3.1. For other datasets, *CACM* requires a causal graph describing the dataset's DGP and uses the following steps to derive the independence constraints. Let V be the set of observed variables in the graph except Y , and C be the list of constraints.
104
+
105
+ - 1. For each observed variable V ∈ V, check whether (Xc, V ) are *d*-separated. Add X<sup>c</sup> ⊥⊥ V to C.
106
+ - 2. If not, check if (Xc, V ) are *d*-separated conditioned on any subset Z of the remaining observed variables in Z = {Y } ∪ V \ {V }. For each subset Z with *d*-separation, add X<sup>c</sup> ⊥⊥ V |Z to C.
107
+
108
+ Phase II. Apply regularization penalty using derived constraints. In Phase II, *CACM* applies those constraints as a regularizer to the standard ERM loss, g1, ϕ = arg ming1,ϕ; ℓ(g1(ϕ(x)), y) + RegP enalty, where ℓ is cross-entropy loss. The regularizer optimizes for valid constraints over all observed variables V ∈ V. Below we provide the regularizer term for datasets following the canonical graphs from Figure 2 (V = A). We choose Maximum Mean Discrepancy (MMD) (Gretton et al., 2012) to apply our penalty (in principle, any metric for conditional independence would work).
109
+
110
+ Since A includes multiple attributes, the regularizer penalty depends on the type of distribution shift for each attribute. For instance, for A ∈ Aind (*Independent*), to enforce ϕ(x) ⊥⊥ A, we aim to minimize the distributional discrepancy between P(ϕ(x)|A = ai) and P(ϕ(x)|A = a<sup>j</sup> ), for all i, j values of A. However, since the same constraint is applicable on E, it is statistically efficient to apply the constraint on E (if available) as there may be multiple closely related values of A in a domain (e.g., slide stains collected from one hospital may be spread over similar colors, but not exactly the same). Hence, we apply the constraint on distributions P(ϕ(x)|E = Ei) and P(ϕ(x)|E = E<sup>j</sup> ) if E is observed (and A may/may not be unobserved), otherwise we apply the constraint over A.
111
+
112
+ $$RegPenalty_{\mathbf{A}_{ind}} = \sum_{i=1}^{|\mathbf{A}_{ind}|} \sum_{j>i} MMD(P(\phi(\mathbf{x})|a_{i,ind}), P(\phi(\mathbf{x})|a_{j,ind}))$$
113
+ (2)
114
+
115
+ For A ∈ Acause (*Causal*), following Proposition 3.1, we consider distributions P(ϕ(x)|A = a<sup>i</sup> , Y = y) and P(ϕ(x)|A = a<sup>j</sup> , Y = y). We additionally condition on E as there may be a correlation between E and X<sup>c</sup> (Figure 2b), which renders other constraints incorrect (Corollary 3.1). We similarly obtain regularization terms for *Confounded* and *Selected* (Suppl. C).
116
+
117
+ $$RegPenalty_{\boldsymbol{A}_{cause}} \ = \sum_{|E|} \sum_{y \in Y} \sum_{i=1}^{|\boldsymbol{A}_{cause}|} \sum_{j>i} \text{MMD}(P(\phi(\boldsymbol{x})|a_{i,cause},y), P(\phi(\boldsymbol{x})|a_{j,cause},y))$$
118
+
119
+ The final RegP enalty is a sum of penalties over all attributes, RegP enalty = P <sup>A</sup>∈<sup>A</sup> λAP enaltyA, where λ<sup>A</sup> are the hyperparameters. Unlike prior work (Makar et al., 2022; Veitch et al., 2021), we do not restrict ourselves to binary-valued attributes and classes.
120
+
121
+ *CACM*'s relationship with existing DG algorithms. Table 1 shows common constraints used by popular DG algorithms. *CACM*'s strength lies in *adaptively* selecting constraints based on the causal relationships in the DGP. Thus, depending on the dataset, applying *CACM* for a single-attribute shift may involve applying the same constraint as in MMD or C-MMD algorithms. For example, in Rotated-MNIST dataset with E = Aind = rotation, the effective constraint for MMD, DANN, CORAL algorithms (ϕ ⊥⊥ E) is the same as *CACM*'s constraint ϕ ⊥⊥ Aind for *Independent* shift.
122
+
123
+ We perform experiments on MNIST, small NORB, and Waterbirds datasets to demonstrate our main claims: existing DG algorithms perform worse on multi-attribute shifts; *CACM* with the correct graph-based constraints significantly outperforms these algorithms; and incorrect constraints cannot match the above accuracy. While we provide constraints for all shifts in Proposition 3.1, our empirical experiments with datasets focus on commonly occurring *Causal* and *Independent* shifts. All experiments are performed in PyTorch 1.10 with NVIDIA Tesla P40 and P100 GPUs, and building on DomainBed (Gulrajani & Lopez-Paz, 2021) and OoD-Bench (Ye et al., 2022). Regularizing on model's logit scores provides better accuracy than ϕ(x); hence we adopt it for all our experiments.
124
+
125
+ # Method
126
+
127
+ We introduce three new datasets for the multi-attribute shift problem. For all datasets, details of environments, architectures, visualizations, and setup generation are in Suppl. D.1.
128
+
129
+ MNIST. Colored (Arjovsky et al., 2019) and Rotated MNIST (Ghifary et al., 2015) present *Causal* (Acause = color) and *Independent* (Aind = rotation) distribution shifts, respectively. We combine these to obtain a multi-attribute dataset with Acause and Aind (col + rot). For comparison, we also evaluate on single-attribute Acause (Colored) and Aind (Rotated) MNIST datasets.
130
+
131
+ small NORB (LeCun et al., 2004). This dataset was used by Wiles et al. (2022) to create a challenging DG task with single-attribute shifts, having multi-valued classes and attributes over realistic 3D objects. We create a multi-attribute shift dataset (light+azi), consisting of a causal connection, Acause =lighting, between lighting and object category Y ; and Aind =azimuth that varies independently across domains. We also evaluate on single-attribute Acause (lighting) and Aind (azimuth) datasets.
132
+
133
+ Waterbirds. We use the original dataset (Sagawa et al., 2020) where bird type (water or land) (Y ) is spuriously correlated with background (Acause ). To create a multi-attribute setup, we add different weather effects (Aind ) to train and test data with probability p = 0.5 and 1.0 respectively.
134
+
135
+ Baseline DG algorithms & implementation. We consider baseline algorithms optimizing for different constraints and statistics to compare to causal adaptive regularization: IRM (Arjovsky et al., 2019), IB-ERM and IB-IRM (Ahuja et al., 2021), VREx (Krueger et al., 2021), MMD (Li et al., 2018b), CORAL (Sun & Saenko, 2016), DANN (Gretton et al., 2012), Conditional-MMD (C-MMD) (Li et al., 2018b), Conditional-DANN (CDANN) (Li et al., 2018d), GroupDRO (Sagawa et al., 2020), Mixup (Yan et al., 2020), MLDG (Li et al., 2018a), SagNet (Nam et al., 2021), and RSC (Huang et al., 2020). Following DomainBed (Gulrajani & Lopez-Paz, 2021), a random search is performed 20 times over the hyperparameter distribution for 3 seeds. The best models obtained across the three seeds are used to compute the mean and standard error. We use a validation set that follows the test domain distribution consistent with previous work on these datasets (Arjovsky et al., 2019; Sagawa et al., 2020; Wiles et al., 2022; Ye et al., 2022). Further details are in Suppl. D.
136
+
137
+ Table 2: Colored + Rotated MNIST: Accuracy on unseen domain for singe-attribute (color, rotation) and multi-attribute (col + rot) distribution shifts; small NORB: Accuracy on unseen domain for single-attribute (lighting, azimuth) and multi-attribute (light+azi) distribution shifts. Waterbirds: Worst-group accuracy on unseen domain for single- and multi-attribute shifts.
138
+
139
+ | Algo | | Colored+Rotated MNIST<br>Accuracy | | | small NORB<br>Accuracy | | | Waterbirds<br>Worst-group accuracy |
140
+ |--------|------------------|-----------------------------------|-----------|-----------|------------------------|-----------|------------|------------------------------------|
141
+ | | color | rotation | col+rot | lighting | azimuth | light+azi | original | multi-attr |
142
+ | ERM | 30.9 ±1.6 | 61.9 ±0.5 | 25.2 ±1.3 | 65.5 ±0.7 | 78.6 ±0.7 | 64.0 ±1.2 | 66.0 ± 3.7 | 37.0 ± 1.1 |
143
+ | | IB-ERM 27.8 ±0.7 | 62.1 ±0.8 | 41.2 ±4.1 | 66.0 ±0.9 | 75.9 ±1.2 | 61.2 ±0.1 | 66.9 ± 4.6 | 40.8 ± 5.6 |
144
+ | IRM | 50.0 ±0.1 | 61.2 ±0.3 | 39.6 ±6.7 | 66.7 ±1.5 | 75.7 ±0.4 | 61.7 ±0.5 | 61.2 ± 5.2 | 37.7 ± 1.7 |
145
+ | IB-IRM | 49.9 ±0.1 | 61.4 ±0.9 | 49.3 ±0.3 | 64.7 ±0.8 | 77.6 ±0.3 | 62.2 ±1.2 | 62.3 ± 7.7 | 46.9 ± 6.5 |
146
+ | VREx | 30.3 ±1.6 | 62.1 ±0.4 | 23.3 ±0.4 | 64.7 ±1.0 | 77.6 ±0.5 | 62.5 ±1.6 | 68.8 ± 2.5 | 38.1 ± 2.3 |
147
+ | MMD | 29.7 ±1.8 | 62.2 ±0.5 | 24.1 ±0.6 | 66.6 ±1.6 | 76.7 ±1.1 | 62.5 ±0.3 | 68.1 ± 4.4 | 45.2 ± 2.4 |
148
+ | CORAL | 28.5 ±0.8 | 62.5 ±0.7 | 23.5 ±1.1 | 64.7 ±0.5 | 77.2 ±0.7 | 62.9 ±0.3 | 73.6 ± 4.8 | 54.1 ± 3.0 |
149
+ | DANN | 20.7 ��0.8 | 61.9 ±0.7 | 32.0 ±7.8 | 64.6 ±1.4 | 78.6 ±0.7 | 60.8 ±0.7 | 78.5 ± 1.8 | 55.5 ± 4.6 |
150
+ | | C-MMD 29.4 ±0.2 | 62.3 ±0.4 | 32.2 ±7.0 | 65.8 ±0.8 | 76.9 ±1.0 | 61.0 ±0.9 | 77.0 ± 1.2 | 52.3 ± 1.9 |
151
+ | | CDANN 30.8 ±8.0 | 61.8 ±0.2 | 32.2 ±7.0 | 64.9 ±0.5 | 77.3 ±0.3 | 60.8 ±0.9 | 69.9 ± 3.3 | 49.7 ± 3.9 |
152
+ | DRO | 33.9 ±0.4 | 60.6 ±0.9 | 25.3 ±0.5 | 65.5 ±0.7 | 77.1 ±1.0 | 62.3 ±0.6 | 70.4 ± 1.3 | 53.1 ± 2.2 |
153
+ | Mixup | 25.1 ±1.2 | 61.4 ±0.6 | 21.1 ±1.6 | 66.2 ±1.3 | 80.4 ±0.5 | 57.1 ±1.5 | 74.2 ± 3.9 | 64.7 ± 2.4 |
154
+ | MLDG | 31.0 ±0.3 | 61.6 ±0.8 | 24.4 ±0.7 | 66.0 ±0.7 | 77.9 ±0.5 | 64.2 ±0.6 | 70.8 ± 1.5 | 34.5 ± 1.7 |
155
+ | SagNet | 28.2 ±0.8 | 60.7 ±0.7 | 23.7 ±0.2 | 65.9 ±1.5 | 76.1 ±0.4 | 62.2 ±0.5 | 69.1 ± 1.0 | 40.6 ± 7.1 |
156
+ | RSC | 29.1 ±1.9 | 62.3 ±0.4 | 22.8 ±0.3 | 62.4 ±0.4 | 75.6 ±0.6 | 61.8 ±1.3 | 64.6 ± 6.5 | 40.9 ± 3.6 |
157
+ | CACM | 70.4 ±0.5 | 62.4 ±0.4 | 54.1 ±1.3 | 85.4 ±0.5 | 80.5 ±0.6 | 69.6 ±1.6 | 84.5 ± 0.6 | 70.5 ± 1.1 |
158
+
159
+ Table 3: small NORB *Causal* shift. Comparing X<sup>c</sup> ⊥⊥ Acause |Y, E with incorrect constraints.
160
+
161
+ | Constraint | Accuracy |
162
+ |--------------------------|------------|
163
+ | Xc<br>⊥⊥ Acause | 72.7 ± 1.1 |
164
+ | Xc<br>⊥⊥ Acause<br> E | 76.2 ± 0.9 |
165
+ | Xc<br>⊥⊥ Acause<br> Y | 79.7 ± 0.9 |
166
+ | Xc<br>⊥⊥ Acause<br> Y, E | 85.4 ± 0.5 |
167
+
168
+ Table 4: Comparing X<sup>c</sup> ⊥⊥ Acause |Y, E and X<sup>c</sup> ⊥⊥ Acause |Y for *Causal* shift in MNIST and small NORB. The constraint implied by E-X<sup>c</sup> correlation (Fig. 2b, Prop. 3.1) affects accuracy.
169
+
170
+ | Constraint | MNIST | small NORB |
171
+ |--------------------------|------------|------------|
172
+ | Xc<br>⊥⊥ Acause<br> Y | 69.7 ± 0.2 | 79.7 ± 0.9 |
173
+ | Xc<br>⊥⊥ Acause<br> Y, E | 70.4 ± 0.5 | 85.4 ± 0.5 |
2208.08631/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-03-04T16:29:40.598Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36" version="16.6.4" etag="0Gcdy0dQndvgLRA55Wdj" type="device"><diagram id="Dg4zdj23zdceZcmncePn">7V1bc6u6Ff41nmkf4uF+ecy17UxOz56d3WnPIwHZ5gQjF/BO0l9fCSSMhDDYCIyzcWaSIIMQ0vo+rbW0lljo99uPvyXebvMbDEC00JTgY6E/LDRNVW0H/cEln0WJZahFwToJA3LSoeAl/B8ghQop3YcBSJkTMwijLNyxhT6MY+BnTJmXJPCdPW0FI/auO29N7qgcCl58LwK10/4dBtmGPpdSOf3vIFxvyK0dk3yx9ejJpCDdeAF8rxTpjwv9PoEwK/7bftyDCHce7ZfiuqeGb8uGJSDOulygFRf89KI9eTbSruyTPiyIg1vcZ+jIj7w0Df2FfrfJthEqUNG/CdzHAQjI0QrG2ZO3DSM8tD/CLRomTfkneEe/v8OtF5NTXuA+8fENNlmGBk0z9Vv0CzUT/8InpMs1hOsIeLswXfpwm3/hp/mpT6viBuhfegtTu8tvgv4ytylEB3WCfpdmCXwD9zCCSf5gupJ/ym/oSOKnQL2XfP4HHSj04A98sDTp4cNH9cuHT3IUeOmm7Io/99vdC+nHGMYAn/0RZnm1S0fTyXFRs67T40PV+OCzcvANJOhpM5CQsmKoQLBmxTKlfYsOTZfAw0vWgEiE5tSlRC1lD4EWQHSXBPWv8n6QbirCm4pc07IERF4W/mSb4RGQrcvqyjt8gyG6saZQQjApHAgf2PSY1lE8ErmsKs61mlRn6bpMZZbDVVb0Ra0yJOTeZ+W0HT4hPaXVqtLSOO4C2rLmC8yjF6B/ikbTo8rYHYpyxIvRr8/oP4p+dSj0q7Z9KfSbxtTQb0hDv+6I8SId+rUmKy1I5i9ohb7uDgt94xzo/9iE8TH4l9pMjqs3kPkbcjBJYlC1o8QAotf8YX6CJAuR8icGdV5E6lMb6CR4CiPaYwcSMB2VIQHDGYQEdHNqgLd0d+lajHg7urJUVNL36MNV2pkBcNW2yk3L6pKvr4EHzsCROePo0jgyVHYytQxzEBzZk8ORpiw1Vtl1rHPnTqtBm5U+dwpabbtyJzerHZQHwOHhf9+EGXjZeflYvyfejkXnJFGniZXXFYIIRSGBFukNBD/wwchaB1EuFRj9wKJk1FybSEhF3G176Tp1idc5WaoKN0O+RwbVPmVQ1Ssd1JzKjg8qotbHB/yDyr3EJxfaksbZ5k1Ooz7ImiIgNc2pT7PnDLNTnz8RK9PJAibZBq5h7EWPh1Ju9jyc8wzhjhT+CbLsk/SVt88gKwsVpaw6lRyfRs70ETVPlB0mIYFBp+qdB7nzhNN1sFwBJq0oIzKNvll5pOXWf/fYp3knAlr5JTPutBDXdJPmI4eQp6j27iM/9XCRtcZ/v4Pnf9G7vya9qnoOY+AltLJFgXP6ZT/WYUefEDQH79Vqpfk+A2/VEfCEFLxbLN4d+wDjKq8LEG9IIHWqZFwR3E90CvWCu0pXNyaCd9qeGfBXC3jHvizgBfbxCYBXLjG/HzD+RxX/DYCvmPvU71sarv0JQRcQQncHj3xC6LCEkG68XTG2j1EU7lLcJbuK/Q2K0opJ3oapV89/W+dC8fs+i8IYMFijinMxblIwY3CY0YWY0URqsQzMGDNm+mDGFGCmuzNHPmZEPsMvN4n2rOxic/HT04PxoDFzsWYPpXxzizy2Uze2VRGrSJmJrZlV+rCKLWAV94KsIvKPzawys4pm62OySj//3S/PKqJ4re4iIJ9VpuThm6mgLxVYbkcqkLFgo12Nb2+6S0HC8Dt2IbzON7f4pxvfDMEtDeu+Dr+yxJvG8oIytCm5KV/C9Raill05bQUecFasj3I82rI60pYMbwv1YFWEZwW8bJ+AWiejR8zYnvSicB1jsKJHzT1ZNOjllnyxDYOgIDqARtt7zavCHETiJlC95t3CzBd3EbelpGe7DBIt4kIEpkNuUkSDW69yBevTIt+1lAmt7uwsSHkVAkYtKTAJ99luX4TCzFJzWalxOkiNK5AaU4bUnBVqu2gID5zW2IgjV4RBgGdaWw06zHSC8GzXWNqmW34cTtAstsbO8Xi2ay6Vysdk6+VbKlF5Oiuk9avIK9Wdv6y8uoqxdGQJqauYS7qiO4Jk1uM6QZqFWy9DopZne67CAMT+rKhNYMrlFTVT5HpwhtLU7Dplneh6YMLuO8Srj+SqYFIDpiMxfDZAY85Dm+/iSFZePY2ggWyZFLju6W59mVXnc7w6OjjqNWk2O9vbtrVUtE4cfXI4fq3VRltm2vELegfv0wjgMZ03SD7E7pb7ypyiPBaTDUwu54QxgRMYR50w2kEL6UfgJhcoZpkCAhdFgvMpKGcRuGjdoVUJnfOq5OYnclsUlFsWSM2rMrSpqci6Wk8idOvZDd3TkgX1DZqUSFWoGT1jo6dB3ick24rEnEFdUTipHihnUNBq2TmDOqWm2WT4xU2GE/bHkZDBK8lksKzxTIZaq3XJUNSkWwCBl3k3hdKf+MyVhYBj8X5aNIl2LtilWJdCXYj0nUx74zH2YXBYTLrcKu8QFoXGRQA4Vj2GXrisK8Oi0PWvyu/nuqqvYF7gQmBOjLc7j/514jGqhuDpFwzspe0RsGG6w8TTg25mWuViaooe/RJsa6iXZNvmHJdZaGeh7ex0HFdo+2XEjKQiXN+0fZ451332HsFQM7kMUGuw3ZH4LQxbNxXlYyXVI8ZY+/0cGq7UYjSeY9ZdIEOpeWGnFrh3gax/YPnHI2qH41XKoa2JADI02Avv6WOfZUtpjDF1MJ/ExtQkiVjj8pVk+dW6WFGCRKYTdtA7ja9P37qZd1zbLSxrHb+gv8vrAnlWMzc2GEqduVGCzkn1ilnnnKjOmSIGyzgHVAf+O2FVu/f+2cpIuqlm9VI1+csHVDWN/ot5s8Yx0l77bd7dCvxYUOdfta+9i1A5tG/3zFVu40StxDh+QW+thHbXrJUc10rWuGkVWA6+cDam9WYIst5gnIZpBmIftzKC6ddNcpOiYHITtOkKnJrGUApmffnoH1v8fqt5xI6MmM6lW4m2uh5qwLrkX80mwoRMhMqivcK+0kNR6XH3kFnJmWeM+iNa2h7PUCnhUaLq3LwznTPZXY2rSdqLfji/qWu2ZUfwLTMlq2T1HDgsq/EaP9Ue/xabPs/eK4iGo3eZPE3e2kjut6i8hPEgoQVJNhI4Qp5BldeeUntD9zwmA0p3b6Q1wNUqBb3t1frSCDeq4jlgHtVzuchxRxhVUSbTPJNPeSYfee4dYOfmrhPZZGZMi9cKZM+YXVLJZhT+wigcL0W4Bg7nbA2Yi9twLa41J+O5z0vw6vlm78B7o8rLVSguEW7RXbmxfNdXaHVSa9wWtcawKCd9UqVTipZjsErOjT6AkmM277X32pZFrdxF0H+ruHNfG325X8U7dU07eHBpPqZgH+FyTU+2F8xsXnooheR39HxeftVt4mPvvl/s0jeL0xTFSec2tRCJkyrahE2KOHV4T0l9KZJ5ofd1vfS8ZTs2/GTfvAxJepw/habUs5t1XtFt0K+mk9xsq6yIlVFbp+++xtWkKnxVEl8hPGCCT+v+JSPnDnTcv2Q6kJLBfNold1Ix68tJ1Y3SrmH1NgArb19GHZwoJ0jA/TBe38EsQ+Ov5zuvk7JnsMIdfVMp+k46v1r2I7fYb1RJ0lAL16ILfmOsBpsdXo1M39eV7rdDv7BrOjg/HsNUkUChXErRj7iVo3LyGuENY2aHlyvPYnERsbAuKRb1BYy/oKZ7uKLQx5Xvdgn0/M1fJz6D/BIGlm1yBpYpmFgGM7DqXvZ9nO4RP/wM03x71mOKxhkD0yg9NTmrvX8Ff5qkpk3JLVQCokdIoX1XW3Jbt4jUQzqWPccNHSYQWwgHWwU94+Y3GGB37OP/AQ==</diagram></mxfile>
2208.08631/main_diagram/main_diagram.pdf ADDED
Binary file (32.9 kB). View file
 
2208.08631/paper_text/intro_method.md ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Semi-supervised learning has emerged as an attractive solution to mitigate the reliance on large labeled data, which is often laborious to obtain, and intelligently leverage a large amount of unlabeled data, to the point of being deployed in many computer vision applications, especially image classification [40 , 53 , 55]. Generally, this task have adopted pseudo-labeling [1,19,30,40,46,51,61] or consistency regularization [17 , 24 , 29 , 36 , 48 , 53]. Some methods [ 4 , 5 , 42 , 47 , 52 , 54 , 58] proposed to integrate both approaches in a unified framework, which is often called
4
+
5
+ <sup>†</sup> Work done in Korea University
6
+
7
+ <sup>∗</sup> Equal contribution
8
+
9
+ holistic approach. As one of pioneering works, FixMatch [47] first generates a pseudo-label from the model's prediction on the weakly-augmented instance and then encourages the prediction from the strongly-augmented instance to follow the pseudo-label. Their success inspired many variants that use, e.g., curriculum learning [54, 58].
10
+
11
+ On the other hand, concurrent to the race for better semi-supervised learning methods [47,54,58], substantial progress has been made in self-supervised representation learning, especially with contrastive learning [3,6,8,10,20,22], aiming at learning a task-agnostic feature representation without any supervision, which can be well transferred to the downstream tasks. Formally, they encourage the features extracted from two differently-augmented images to be pulled against each other, which injects some invariance or robustness into the models. Not surprisingly, semi-supervised learning frameworks can definitely benefit from self-supervised representation learning [25, 33, 34] in that good representation from the feature encoder yields better performance with semi-supervised learning, and thus, some methods [25,33] attempt to combine the aforementioned two paradigms to boost the performance by achieving the better feature encoder.
12
+
13
+ Extending techniques presented in existing self-supervised representation learning [3, 6, 8, 10, 20, 22], which only focus on learning feature encoder, to further consider the model's prediction itself would be an appealing solution to effectively combine the two paradigms, which allows for boosting not only feature encoder but also classifier. However, compared to feature representation learning [3, 6, 8, 10, 20, 22], the consistency between the model's predictions from two different augmentations should be defined by considering which direction is better to achieve not only invariance but also high accuracy in image classification. Without this, simply pulling the model's predictions as done in [3,6,8,10,20,22] may hinder the classifier output, thereby decreasing the accuracy.
14
+
15
+ In this paper, we present a novel framework for semi-supervised learning, dubbed ConMatch, that intelligently leverages the confidence-guided consistency regularization between the model's predictions from two strongly-augmented images. Built upon conventional frameworks [47, 58], we consider two stronglyaugmented images and one weakly-augmented image, and define the consistency between the model's predictions from two strongly-augmented images, while still using an unsupervised loss between the model's predictions from one of the strongly-augmented images and the weakly-augmented image, as done in [47,58]. Since defining the direction of consistency regularization between two stronglyaugmented images is of prime importance, rather than selecting in a deterministic manner, we present a probabilistic technique by measuring the confidence of pseudo-labels from each strongly-augmented image, and weighting the consistency loss with this confidence. To measure the confidence of pseudo-labels, we present two techniques, including non-parametric and parametric approaches. With this confidence-guided consistency regularization, our framework dramatically boosts the performance of existing semi-supervised learners [47, 58]. In addition, we also present a stage-wise training scheme to boost the convergence of training. Our framework is a plug-and-play module, and thus various semi-
16
+
17
+ Table 1. Comparison of our ConMatch to other relevant works which have a form of consistency regularization combining pseudo-labeling [5,25,33,34,47,54,58]
18
+
19
+ | | 1 | FixMatch | FlexMatch | | | | LESS | |
20
+ |-----------------------------|----------|----------|-----------|------|------|------|------|--------|
21
+ | | [5] | [47] | [58] | [54] | [25] | [33] | [34] | (Ours) |
22
+ | Using pseudo-labeling | X | / | 1 | 1 | ✓ | 1 | 1 | 1 |
23
+ | Using two strong branches | X | X | X | X | 1 | ✓ | Х | 1 |
24
+ | Learning confidence measure | × × | X | × | Х | Х | X | Х | ✓ |
25
+ | Using stage-wise training | <b>X</b> | X | X | Х | ✓ | X | X | ✓ |
26
+
27
+ supervised learners [4, 25, 33, 34, 47, 52, 54, 58] can benefit from our framework. We briefly summarize our method with other highly relevant works in semi-supervised learning in Table 1. Experimental results and ablation studies show that the proposed framework not only boosts the convergence but also achieves the state-of-the-art performance on most standard benchmarks [12, 28, 37].
28
+
29
+ # Method
30
+
31
+ Let us define a batch of labeled instances as $\mathcal{X} = \{(x_b, y_b)\}_{b=1}^B$ , where $x_b$ is an instance and $y_b$ is a label representing one of Y labels. In addition, let us define a batch of unlabeled instances as $\mathcal{U} = \{u_b\}_{b=1}^{\mu B}$ , where $\mu$ is a hyper-parameter that determines the size of $\mathcal{U}$ relative to $\mathcal{X}$ . The objective of semi-supervised learning is to use both $\mathcal{X}$ and $\mathcal{U}$ to train a model with parameters $\theta$ taking an instance $r \in \mathcal{X} \cup \mathcal{U}$ as input and outputting a distribution over class labels y such that $p_{\text{model}}(y|r;\theta)$ . The model generally consists of an feature encoder $f(\cdot)$ and a classifier $g(\cdot)$ , and thus, $p_{\text{model}}(y|r;\theta) = g(f(r))$ .
32
+
33
+ For semi-supervised learning, most state-of-the-art methods are based on consistency regularization approaches [2,29,44] that rely on the assumption that the model should generate similar predictions when perturbed versions of the same instance are fed, e.g., using data augmentation [36], or model perturbation [29,48]. These methods formally extract a pseudo-label from one branch, filtered by confidence, and use this as a target for another branch. For instance, FixMatch [47] utilizes two types of augmentations such as weak and strong, denoted by $\alpha(\cdot)$ and $A(\cdot)$ , and a pseudo-label from weakly-augmented version of an image is used as a target for strongly-augmented version of the same image. This loss function is formally defined such that
34
+
35
+ $$\mathcal{L}_{un} = c(r)\mathcal{H}\left(q(r), p_{\text{model}}\left(y|\mathcal{A}(r); \theta\right)\right),\tag{1}$$
36
+
37
+ where c(r) denotes a confidence of q(r), and q(r) denotes a pseudo-label generated from $p_{\text{model}}(y|\alpha(r);\theta)$ , which can be either an one-hot label [42, 47, 54, 58] or a sharpened one [4,5,52], and $\mathcal{H}(\cdot,\cdot)$ is often defined as a cross-entropy loss. In this framework, measuring confidence c(r) is of prime importance, but conventional methods simply measure this, e.g., by the peak value of the softmax predictions [42, 47, 52, 54, 58].
38
+
39
+ On the other hands, semi-supervised learning framework can definitely benefit from existing self-supervised representation learning [25,33,34] in that good representation from the feature encoder $f(\cdot)$ yields better performance with semi-supervised learner. In this light, some methods attempted to combine semi-supervised learning and self-supervised representation learning to achieve the better feature encoder [25,33]. Concurrent to the race for better semi-supervised learning methods, substantial progress has been made in self-supervised representation learning, especially with contrastive learning [3,6,8,10,20,22]. The loss function for this task can also be defined as a consistency regularization
40
+
41
+ ![](_page_5_Figure_2.jpeg)
42
+
43
+ Fig. 2. Network configuration of ConMatch. A semi-supervised learning framework built upon consistency loss with an additional strong branch to leverage confidence loss between two strong branches. In the parametric approach, the confidence estimator block takes a concatenated heterogeneous feature as an input and produces the estimated confidence of pseudo label.
44
+
45
+ loss, similar to [42, 47, 52, 54, 58] but in the feature-level, such that
46
+
47
+ $$\mathcal{L}_{\text{self}} = \mathcal{D}(F_i(r), F_j(r)), \tag{2}$$
48
+
49
+ where Fi(r) = f(Ai(r)) and F<sup>j</sup> (r) = f(A<sup>j</sup> (r)) extracted from images with two different strongly-augmented images Ai(·) and A<sup>j</sup> (·), respectively. D(·, ·) can be defined as contrastive loss [22] or negative cosine similarity [10]. Even though this loss helps to boost learning the feature encoder f(·), the mechanism that simply pulls the features Fi(r) and F<sup>j</sup> (r) may not be optimal to boost a semisupervised learner and break the latent feature space, without considering a direction representing which branch is better.
50
+
51
+ To combine the semi- and self-supervised learning paradigm in a boosting fashion, unlike [25, 33, 34], we present to effectively exploit a self-supervision between two strong branches tailored for boosting semi-supervised learning, called ConMatch. Unlike existing self-supervised representation learning methods, e.g., SimSiam [10], we formulate the consistency regularization loss at class logitlevel4 , as done in semi-supervised learning methods [47, 58], and estimate the confidences of each pseudo-label from two strongly-augmented images, Ai(r) and A<sup>j</sup> (r) for r, and use them to consider the probability of each direction between them. Since measuring such confidences is notoriously challenging, we present
52
+
53
+ <sup>4</sup> In the paper, class logit means the output of the network, i.e., pmodel(y|r; θ) for r.
54
+
55
+ novel confidence estimators by using the output from weak-augmented image α(r) as an anchor in non-parametric and parametric approaches.
56
+
57
+ An overview of our ConMatch is illustrated in Fig. 2. Specifically, there exist two branches for strongly-augmented images (called strong branches) and one branch for weakly-augmented image (called weak branch). Similar to existing semi-supervised representation learning methods [47,54,58], we attempt to apply the consistency loss between a pair of each strong branch and weak branch. But, tailored to semi-supervised learning, we present a confidence-guided consistency regularization loss Lccr between two strong branches such that
58
+
59
+ $$\mathcal{L}_{ccr} = c_i(r)\mathcal{H}(q_i(r), p_{model}(y|\mathcal{A}_i(r); \theta)) + c_j(r)\mathcal{H}(q_i(r), p_{model}(y|\mathcal{A}_i(r); \theta)), (3)$$
60
+
61
+ where qi(r) and q<sup>j</sup> (r) denote the pseudo-labels generated from pmodel(y|Ai(r); θ) and pmodel(y|A<sup>j</sup> (r); θ), respectively. ci(r) and c<sup>j</sup> (r) denote estimated confidences of qi(r) and q<sup>j</sup> (r). Our proposed loss function is different from conventional selfsupervised representation learning loss Lself in that the consistency is applied in the logit-level (not feature-level) similar to [47,58], and adjusted by the estimated confidence. However, unlike [42, 47, 52, 54, 58], we can learn the better feature representation by considering two strongly-augmented views, while improving semi-supervised learning performance at the same time. It should be noted that this simple loss function can be incorporated with any semi-supervised learners, e.g., FixMatch [47] or FlexMatch [58].
62
+
63
+ To measure the confidences ci(r) and c<sup>j</sup> (r), we present two kinds of confidence estimators, based on non-parametric and parametric approaches. In the following, we explain how to measure these confidences in detail.
64
+
65
+ Existing semi-supervised learning methods [30, 44, 47] have selected unlabeled samples with high confidence as training targets (i.e., pseudo-labels) in a straightforward way; which can be viewed as a form of entropy minimization [19]. It has been well known that it is non-trivial to set an appropriate threshold for such handcrafted confidence estimation, and thus, confidence-based strategies commonly suffer from a dilemma between pseudo-label exploration and accuracy depending on the threshold [1, 34].
66
+
67
+ In our framework, estimating the confidence of pseudo-labels from strong branches may suffer from similar limitations if the conventional handcrafted methods [30, 44, 47] are simply used. To overcome this, we present a novel way to measure the confidences, ci(r) and c<sup>j</sup> (r), based on the similarity between outputs of strongly-augmented images and weakly-augmented images. Based on the hypothesis that the similarity between the logits or probabilities from stronglyaugmented images and weakly-augmented images can be directly used as a confidence estimator, we present to measure confidence of each strong branch loss
68
+
69
+ ```
70
+ 1: Notation: strong augmentation \mathcal{A}, weak augmentation \alpha, model p_{\text{model}}(\cdot;\theta)
71
+ consisting of feature encoder f and classifier g, confidence estimator h(\cdot; \theta_{\text{conf}}),
72
+ pseudo label q, leranable confidence c
73
+ 2: Input: \mathcal{X} = \{(x_b, y_b) : b \in (1, ..., B)\}, \ \mathcal{U} = \{u_b : b \in (1, ..., \mu B)\}
74
+ 3: for b = 1 to B do
75
+ F(\alpha(x_b)), L(\alpha(x_b)) = f(\alpha(x_b)), g(f(\alpha(x_b)))
76
+ 4:
77
+ 5:
78
+ c(\alpha(x_b)) = h(F(\alpha(x_b)), L(\alpha(x_b)); \theta_{conf})
79
+ 6:
80
+ if y_b == \operatorname{argmax}_{y} p_{\text{model}}(y|\alpha(x_b);\theta)) then
81
+ 7:
82
+ c_{\rm GT}(\alpha(x_b)) = 1
83
+ else
84
+ 8:
85
+ 9:
86
+ c_{\rm GT}(\alpha(x_b)) = 0
87
+ 10:
88
+ end if
89
+ 11: end for
90
+ 12: \mathcal{L}_{\text{sup}} = \sum_{b=1}^{B} \mathcal{H}(y_b, p_{\text{model}}(y|\alpha(x_b); \theta))
91
+ 13: \mathcal{L}_{\text{conf-sup}} = \mathcal{H}(c_{\text{GT}}(\alpha(x_b)), h(F(\alpha(x_b)), L(\alpha(x_b)); \theta_{\text{conf}}))
92
+ 14: for b = 1 to \mu B do
93
+ (F_i, F_j), (L_i, L_j) = f(\mathcal{A}_i(u_b), \mathcal{A}_j(u_b)), g(f(\mathcal{A}_i(u_b), \mathcal{A}_j(u_b)))
94
+ 16:
95
+ c_i, c_j = h(F_i, L_i; \theta_{conf}), h(F_j, L_j; \theta_{conf})
96
+ 17:
97
+ Generate pseudo labels for differently augmented versions \alpha, A_i, A_i
98
+ 18: end for
99
+ 19: Calculate \mathcal{L}_{un} using c, q from \alpha(u_b), and p_{\text{model}}(y|\mathcal{A}(u_b);\theta) via Eq. 2
100
+ 20: Calculate \mathcal{L}_{ccr} using c, q from \mathcal{A}(u_b) and p_{\text{model}}(y|\mathcal{A}(u_b);\theta) via Eq. 3
101
+ 21: Calculate \mathcal{L}_{conf} using c from \mathcal{A}(u_b) and p_{model}(y|\alpha(u_b);\theta) via Eq. 6
102
+ 22: Update \theta by minimizing \mathcal{L}_{\text{sup}}, \mathcal{L}_{\text{un}} and \mathcal{L}_{\text{ccr}}
103
+ 23: Update \theta_{\text{conf}} by minimizing \mathcal{L}_{\text{conf-sup}} and \mathcal{L}_{\text{conf}}
104
+ 24: Return: Model parameters \{\theta, \theta_{conf}\}
105
+ ```
106
+
107
+ by the cross-entropy loss value itself between strongly-augmented and weakly-augmented images. Specifically, we measure such a confidence with the following:
108
+
109
+ $$s_i(r) = \frac{1}{\mathcal{H}(p_{\text{model}}(y|\alpha(r);\theta), p_{\text{model}}(y|\mathcal{A}_i(r);\theta))},$$
110
+ (4)
111
+
112
+ where the smaller $\mathcal{H}(p_{\text{model}}(y|\alpha(r);\theta),p_{\text{model}}(y|\mathcal{A}_i(r);\theta))$ , the higher $s_i(r)$ is. $s_j(r)$ can be similarly defined with $\alpha(r)$ and $\mathcal{A}_j(r)$ . Finally, $c_i(r)$ is computed such that $c_i(r) = s_i(r)/(s_i(r) + s_j(r))$ , and $c_j(r)$ is similarly computed.
113
+
114
+ In this case, the total loss for the non-parametric approach is as follows:
115
+
116
+ $$\mathcal{L}_{total}^{np} = \lambda_{sup} \mathcal{L}_{sup} + \lambda_{un} \mathcal{L}_{un} + \lambda_{ccr} \mathcal{L}_{ccr},$$
117
+ (5)
118
+
119
+ where $\lambda_{\text{sup}}$ , $\lambda_{\text{un}}$ , and $\lambda_{\text{ccr}}$ are weights for $\mathcal{L}_{\text{sup}}$ , $\mathcal{L}_{\text{un}}$ , and $\mathcal{L}_{\text{ccr}}$ , respectively. Note that for weakly-augmented labeled images $\alpha(x_b)$ with labels $y_b$ , a simple classification loss $\mathcal{L}_{\text{sup}}$ is applied as $\mathcal{H}(y_b, p_{\text{model}}(y|\alpha(x_b); \theta))$ , as done in [47].
120
+
121
+ Even though the above confidence estimator with non-parametric approach yields comparable performance to some extent (which will be discussed in experiments), it solely depends on each image, and thus it may be sensitive to outliers or errors without any modules to learn a prior from the dataset. To overcome this, we present an additional parametric approach for confidence estimation. Motivated by stereo confidence estimation [11, 41, 45, 49], obtaining a confidence measure from the networks by extracting the confidence features from input and predicting the confidence with a classifier, we also introduce a learnable confidence measure for pseudo-labels. Unlike existing methods that simply use the model output as confidence [42, 47, 52, 54, 58], such learned confidence can intelligently select a subset of pseudo-labels that are less noisy, which helps the network to converge significantly faster and achieve improved performance by utilizing the false negative samples excluded from training by high threshold at early training iterations.
122
+
123
+ Specifically, we define an additional network for learnable confidence estimation such that c(r) = h(F(r), L(r); θconf), where h(·) is a confidence estimator with model parameters θconf, F(r) is a feature, and L(r) is a logit from an instance r, as shown in Fig. 2. For the network architecture, the concatenation of feature F(r) and logit L(r) transformed by individual non-linear projection heads is used, based on the intuition that a direct concatenation of two their heterogeneous confidence features does not provide an optimal performance [26], followed by the final classifier for confidence estimation. The detailed network architecture is described in the supplementary material.
124
+
125
+ The confidence estimator is learned with the following loss function:
126
+
127
+ $$\mathcal{L}_{\text{conf}} = c_i(r)\mathcal{H}(p_{\text{model}}(y|\alpha(r);\theta_{\text{freeze}}), p_{\text{model}}(y|\mathcal{A}_i(r);\theta_{\text{freeze}})) + \log(1/c_i(r)),$$
128
+ (6)
129
+
130
+ where θfreeze is a freezed network parameter with a stop gradient. The intuition behind is that during the confidence network training, we just want to make the network learn the confidence itself, rather than collapsing to trivial solution to learn the feature encoder simultaneously. In addition, we also use the supervised loss for confidence estimator Lconf−sup = H(cGT, h(F(α(xb)), L(α(xb)); θconf)); cGT = 1 if y<sup>b</sup> is equal to argmax<sup>y</sup> pmodel(y|α(xb); θ), and cGT = 0 otherwise.
131
+
132
+ The total loss for the parametric case can be written as
133
+
134
+ $$\mathcal{L}_{\text{total}}^{\text{param}} = \lambda_{\text{sup}} \mathcal{L}_{\text{sup}} + \lambda_{\text{un}} \mathcal{L}_{\text{un}} + \lambda_{\text{conf}} \mathcal{L}_{\text{conf}} + \lambda_{\text{conf}-\text{sup}} \mathcal{L}_{\text{conf}-\text{sup}} + \lambda_{\text{ccr}} \mathcal{L}_{\text{ccr}}$$
135
+ (7)
136
+
137
+ where λconf and λconf−sup are the weights for Lconf and Lconf−sup, respectively. We explain an algorithm for ConMatch of parametric approach in Alg. 1.
138
+
139
+ Even though our framework can be trained in an end-to-end manner, we further propose a stage-wise training strategy to boost the convergence of training. This stage-wise training consists of three stages, 1) pre-training for the feature encoder, 2) pre-training for the confidence estimator (for parametric approach only), and 3) fine-tuning for both feature encoder and confidence estimator (for parametric approach only). Specifically, we first warm up the feature encoder by solely using the standard semi-supervised loss functions with Lsup and Lun. We then train the confidence estimator based on the outputs of the pre-trained feature encoder in the parametric approach. As mentioned in [27], this kind of simple technique highly boosts the convergence to discriminate between confident and unconfident outputs from the networks. Finally, we fine-tune all the networks with the proposed confidence-guided self-supervised loss Lccr. We empirically demonstrate the effectiveness of the stage-wise training by achieving state-of-the-art results on standard benchmark datasets [12, 28, 37].
2208.13614/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-03-14T13:59:17.210Z" agent="5.0 (X11)" version="15.9.6" etag="0WXdSUSQq2DM-hbXScoL" type="google"><diagram id="DIsHiEyZWg3WmBMyMd3v">7Zpbb5swFMc/TV4rsLk+tkm7TeqkatW09dEFB9gIjhzntk8/U2yw41SJGjKvDXmI8N8XzDm/YziGERzPNp8omudfSYrLEXDSzQhORgAEfsj/a2HbCDCEjZDRIm0ktxMeiz9YiI5Ql0WKF1pDRkjJirkuJqSqcMI0DVFK1nqzKSn1s85Rhg3hMUGlqf4oUpY3auQ7nf4ZF1kuz+w6omaGZGMhLHKUkrUiwdsRHFNCWHM024xxWdtO2qXpd/dKbTsxiit2TAcgpsG28tpwyi9VFAllOclIhcrbTr2hZFmluB7A4aWuzT0hcy66XPyFGdsKv6ElI1zK2awUtXhTsJ919ytflJ6UmslGjPxS2MpCxehW6VQXn9S6rttLSfZrrq++KM1CC7KkiZAEewzRDAuj+aYd3dY7nGpMZpifhTehuESsWOmjI8FX1rbrXMAPhBf2e0TMZoXKpRj0y6zGETiw1jfageE93TfrvGD4cY5ernTNQ1H3gzgTpgxvXuXnlesWHUAoQBaRHEmw111cuFLLlZjwnNNN5dmE11XQ7UA+BK+Gbkfy2+H1TXhDe/D6BrxjUq3A5DROp0VZjklJ6EtfmPo4Sj2uLxglv7FSE4FnGARnIjsyyQ72gA17ADsYwJYUq2C7nj2yQ4Psb/j+e69cT6c4SJJ9XKdh/Ow4/XANvcNc+2fiOhq4BvEeriN7XMcG1yMQlHwSN2mx4odZfXi9yh7487Ws4WMqlf2u7QhH070xECQRfp72EwM+tLe2yxzmooPAlVmbGgXAXhTI+ShhcMdPwXB1Gt09sBp4vsZqDP/dei1dophlgqsFtm6U0DlslLMFsJl1XWAAe3sCOLAYwJ5B6rvNPDzH4t3JH+BuQVbhji2yHRhsv9Pcwzsipz7bvSwcyG4zDZVsuTtnBe3o4yzbgc1lOx7gbkHW4LaYVMj5vP91O7C4bree/u/RPgbRM+Aouj6Qgo/Yuixyd1wW+1dO7PMlr/nfGbCJGTHGjnPaSR3nLzNnvIAdpdDijhIYElLQvgzUFn+LCSn4QAkpDO3ttoAhIe1A1uC2+NIAfJyM1OLuKhgy0g5jFW1oMSMFZkZ6AY8vvsX9dDDkrp0htf30493Y/1doZu56CWFg8UEHmq8gzbio0uv681VeSkq0WBSJbjTdwj3jqj18HP8lzoEbqdROTHehz9Nb5efqfpR3eTlgE3pGumsOu5NFx7uefnPezIvdV75N8+5TaXj7Fw==</diagram></mxfile>
2208.13614/main_diagram/main_diagram.pdf ADDED
Binary file (9.34 kB). View file
 
2208.13614/paper_text/intro_method.md ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ The NTK is a kernel, therefore it can be used in any kernel method itself, i.e. kernel ridge regression or kernel SVM. However, computing the kernel Gram matrix on a dataset of size $m$ requires $O(m^2)$ time, which is infeasible for large datasets. One can either rely on certain approximations, e.g. Nyström approximation, see [5](#sec:computations){reference-type="ref+label" reference="sec:computations"}, or restrict oneself to small datasets.
4
+
5
+ One possible advantage of kernel methods over neural nets is lower variance. Indeed, the only variance of a kernel method is induced by sampling the dataset, while a neural network has several more sources of variance; e.g. initialization randomness and batch sampling. It is likely that this difference in variances is especially important when the dataset is small.
6
+
7
+ The other advantage of kernel methods is having smaller number of hyperparamaters compared to neural nets. This makes kernel methods useful as robust baseline methods that may outperform large neural nets in a situation when there is no budget for careful hyperparamater tuning. As an illustration, [@arora2019harnessing] demonstrated that kernel regression with 14-layer CNTK consistently outperforms ResNet-34 trained with standard hyperparameters on a random subset of CIFAR-10 with $\leq 640$ samples.
8
+
9
+ There are other setups where computing the Gram matrix on a small dataset is sufficient. For example, [@chen2021neural] proposes a condition number of the NTK Gram matrix as a proxy-measure of a given architecture performance; this proxy-measure is then used to guide neural architecture search (NAS). In this case, we do not need the Gram matrix itself but only the condition number, which motivates computing the matrix on a small subset of examples. While the condition number on a random subset Gram matrix provides only a random estimate, possibly noisy and biased, of a true condition number, the way we use it does not require exact estimates. Indeed, a performance measure in NAS algorithms is mainly used to cut-off pathologic, low-performing models from a population, rather than finding the best one. Therefore any measure that correlates positively with performance suffices.
10
+
11
+ The use of condition number as a proxy-measure of performance relies on two hypotheses: (1) performance correlates with trainability, and (2) trainability correlates with NTK condition number. The first hypothesis is mainly motivated by a natural implication \"bad trainability implies low performance\". To motivate the second hypothesis, let us consider kernel ridge regression trained with usual discrete-time gradient descent: $$\begin{equation}
12
+ f_{t+1}(\vec x)
13
+ = f_t(\vec x) + \eta \Theta(\vec x, \vec x) (\vec y - f_t(\vec x)),
14
+ \end{equation}$$ where now $t$ is a discrete time-step and $\eta$ is a learning rate.
15
+
16
+ Consider eigenvalue decomposition of the kernel: $\Theta(\vec x, \vec x) = \sum_{k=1}^m \lambda_k \vec v_k \vec v_k^T$, where $\lambda_1 \geq \ldots \geq \lambda_m \geq 0$, and $(\vec v_k)_{k=1}^m$ forms an orthonormal basis. Let us decompose our model's predictions as $f_t(\vec x) = \sum_{k=1}^m u_{t,k} \vec v_k$. Then the dynamics above decomposes as $$\begin{equation}
17
+ u_{t+1,k}
18
+ = u_{t,k} + \eta \lambda_k (\vec y^T \vec v_k - u_{t,k}).
19
+ \end{equation}$$ This gives $$\begin{equation}
20
+ u_{t+1,k} - \vec y^T \vec v_k
21
+ = (1 - \eta \lambda_k) (u_{t,k} - \vec y^T \vec v_k),
22
+ \end{equation}$$ and the solution is therefore $$\begin{equation}
23
+ u_{t,k}
24
+ = \vec y^T \vec v_k + (1 - \eta \lambda_k)^t (u_{0,k} - \vec y^T \vec v_k).
25
+ \end{equation}$$
26
+
27
+ The dynamics above converges as $t \to \infty$ for any $u_{0,k}$ if and only if $\eta < 2 / \lambda_k$. Since this should hold for all $k \in [m]$ and the maximal $\lambda$ is $\lambda_1$, we need to have $\eta < 2 / \lambda_1$. Therefore the $m$-th principal component converges at rate $\eta \lambda_m < 2 \lambda_m / \lambda_1$. $\kappa = \lambda_m / \lambda_1$ is our condition number. We see that small condition number implies low trainability and thus, by the first hypothesis, low performance.
28
+
29
+ Using a combination of two proxy-measures, the condition number and the number of linear regions (we do not discuss it here), [@chen2021neural] constructed a NAS method that provided state-of-the-art performance on NAS-Bench-201 [@dong2020bench], while using much smaller time compared to most of the other methods. [@chen2021neural] tested their method on CIFAR10 and ImageNet as well. In both cases, their method demonstrated competetive performance while using orders of magnitude less time.
30
+
31
+ In some cases, posing the problem as kernel regression allows for certain optimizations. In particular, [@radhakrishnan2021simple] proposed approaching the problem of matrix completion by minimizing the following loss: $$\begin{equation}
32
+ \mathcal{L}(\theta)
33
+ = \sum_{(i,j) \in S} (Y_{ij} - \mathop{\mathrm{tr}}(f(Z;\theta) M^{(ij)}))^2,
34
+ \end{equation}$$ where $S \subset [k] \times [d]$ is a set of coordinates of known entries of the target matrix $Y \in \mathbb{R}^{k \times d}$, $M^{(ij)} \in \mathbb{R}^{k \times d}$ has $1$ at position $(i,j)$ and $0$ elsewhere, $f(\cdot;\theta)$ is a neural network with parameters $\theta$, $n_0$ inputs and $k$ outputs, and $Z \in \mathbb{R}^{n_0 \times d}$ is an a-priori given matrix. The model $f$ is applied to each column of $Z$ seperately, therefore $f(Z;\theta)$ is $k \times d$ matrix.
35
+
36
+ The above setup can be treated as a usual $l_2$ regression problem on a dataset $(Y_{ij}, M^{(ij)})_{(i,j) \in S}$. The corresponding empirical NTK is defined as $\hat K(M^{(ij)}, M^{(i'j')}) = \nabla^T_\theta \mathop{\mathrm{tr}}(f(Z;\theta) M^{(ij)}) \nabla_\theta \mathop{\mathrm{tr}}(f(Z;\theta) M^{(i'j')})$. Naturally, it does not depend on target matrix entries $Y$, and since there is only a finite set of possible inputs $M^{(ij)}$ (namely, $k d$), the resulting $kd \times kd$ Gram matrix will be the same for all possible matrix completion problems of a given target matrix dimensions. In other words, one can precompute the Gram matrix once and use it to all possible matrix completion problems of given dimensions. In contrast, original neural network formulation would require training a new network for each dataset $(Y_{ij}, M^{(ij)})_{(i,j) \in S}$.
37
+
38
+ When $f(\cdot;\theta)$ is given by a fully-connected network with $L$ layers, [@radhakrishnan2021simple] provide a closed-form formula for its limit NTK: $K(M^{(ij)}, M^{(i'j')}) = \kappa_L\left(z_{\cdot,j}^T z_{\cdot,j'}\right) 1_{i=i'}$, where $\kappa_L$ is given by a certain recurrent relation. As we see, according to this kernel, elements of different rows of $Y$ are orthogonal (does not effect each other), while similarity of elements of the same row is given by a scalar product of the corresponding columns of $Z$. Therefore columns of $Z$ encodes a-priori similarities between columns of $Y$.
39
+
40
+ The matrix $Z$ is called a feature-prior matrix. The ideal feature-prior matrix would be the target matrix $Y$ itself. Since one does not have access to it, [@radhakrishnan2021simple] suggest using the output $\hat Y$ of a separate matrix completion method instead. The resulting joint method performs better than the backbone one on popular collaborative filtering and virtual drug screening datasets.
41
+
42
+ Image impainting can be viewed as a special case of matrix completion. Apart from using the same Gram matrix for all problems of a given size, image impainting with convolutional networks allows for one more optimization.
43
+
44
+ When $f$ is a convolutional network, we pose the problem a bit differently to above. Suppose $f$ has $n_0$ input channels, $1$ output channel, and it maps an image to an image of the same size. Suppose $Z \in \mathbb{R}^{n_0 \times 2^p \times 2^q}$ and it is treated as a $2^p \times 2^q$ image with $n_0$ channels. This in contrast to the previous considerations, where $Z$ was a matrix with columns treated as different inputs to a vector-valued model. Similar to the above, $Y \in \mathbb{R}^{2^p \times 2^q}$ is a target image, and $M^{(ij)}$ of the same size has $1$ at $(i,j)$ and zero elsewhere.
45
+
46
+ Note that $f$ applied to the \"image\" $Z$ has $2^p \times 2^q$ output and therefore its NTK $\Theta$ is a $2^p \times 2^q \times 2^p \times 2^q$ tensor. Suppose $f$ has no downsampling or upsampling layers. [@radhakrishnan2021simple] provides exact formula for the corresponding limit NTK in terms of the limit NTK of the model $f$ in this case: $K(M^{(ij)}, M^{(i'j')}) = \Theta(Z,Z)_{i,j,i',j'}$.
47
+
48
+ Now suppose $f$ has $s$ downsampling and $s$ upsampling layers. Computing the Gram matrix for its NTK requires $O(2^{2p+2q})$ memory and $O(L 2^{2p+2q})$ time, where $L$ is the number of convolutions in $f$. It is already prohibitive for moderate-size images, i.e. when $p, q \approx 10$. [@radhakrishnan2021simple] propose a way to reconstruct the $2^p \times 2^q \times 2^p \times 2^q$ Gram matrix from a smaller Gram matrix of size $2^{2s+p+q}$. Moreover, this smaller Gram matrix requires computing the \"usual\" Gram matrices only for images of size $2^{s+1} \times 2^{s+1}$ which requires only $O(L 2^{4s})$ time.
49
+
50
+ Even in the case when the NTK Gram matrix can be computed and stored, the exact solution ([\[eq:inf_wide_solution_square_loss\]](#eq:inf_wide_solution_square_loss){reference-type="ref" reference="eq:inf_wide_solution_square_loss"}) requires inverting the kernel Gram matrix, which costs $O(m^3)$ when performed naively. Fortunately, mixing continuous-time and discrete-time formulations allows one to avoid computing the inverse explicitly.
51
+
52
+ Denote $H_{t,ij} = \hat\Theta_t(x_i,x_j)$, $Z_{t,ik} = \partial_{\theta_i} f(x_k;\theta)$, and $u_{t,k} = f_t(x_k)$. Note that $H_t = Z_t^T Z_t$. Discrete-time weight evolution with learning rate $\eta$ is given by $$\begin{equation}
53
+ \theta_{t+1}
54
+ = \theta_t + \eta Z_t (\vec y - \vec u_t).
55
+ \end{equation}$$ Recall that assuming stationary kernel $H_t = H_0$ is equivalent to assuming stationary jacobian $Z_t = Z_0$. With this assumption, the dynamics above is solved as $$\begin{equation}
56
+ \theta_t
57
+ = \theta_0 + \eta Z_0 \sum_{s=0}^{t-1} (\vec y - \vec u_s).
58
+ \end{equation}$$ Recall that integrating continuous-time gradient descent dynamics under assumption $H_t = H_0$ gives $$\begin{equation}
59
+ \vec u_s
60
+ = \vec y + e^{-\eta s H_0} (\vec u_0 - \vec y).
61
+ \end{equation}$$ Combining the two latter equations, we get the weights at any time-step $t$: $$\begin{equation}
62
+ \theta_t
63
+ = \theta_0 + \eta Z_0 \sum_{s=0}^{t-1} e^{-\eta s H_0} (\vec y - \vec u_0).
64
+ \end{equation}$$ The continuous analogue of the above evolution is obtained by replacing the sum with an integral: $$\begin{equation}
65
+ \theta_t
66
+ = \theta_0 + \eta Z_0 \int_0^t e^{-\eta s H_0} (\vec y - \vec u_0) \, ds
67
+ = \theta_0 + Z_0 H_0^{-1} \left(I - e^{-\eta s H_0}\right) (\vec y - \vec u_0).
68
+ \end{equation}$$ Here we get the inverse, as expected.
69
+
70
+ Note that in this approach we do not assume that the network to be infinitely wide, we just assume it to be linear in its weights. This allows us to reason in terms of the network weight vector $\theta_t$ instead of reasoning in terms of some abstract feature space associated to the kernel. This aspect gives us one additional advantage: we can integrate the dynamics up to some time $t_1$ and, since we know the weights $\theta_{t_1}$, compute $Z_{t_1}$ and $H_{t_1}$. We can then proceed integration with these updated matrices. This method lies in between the usual gradient descent training and kernel gradient descent with constant kernel. The latter never updates the kernel, while the former updates the kernel at each timestep. In contrast, the method we discuss updates the kernel only at given timesteps.
71
+
72
+ The approach under discussion requires computing and storing $Z$ of size $N \times m$, which is an obvious disadvantage. As a remedy, [@yue2021neural] propose splitting the job of computing $Z$ between several workers. A server joins the parts together, integrates the dynamics up to some timestep $t$, and sends $\theta_t$ to all of the workers, starting a new iteration. Tuning the timesteps of kernel updates may help balancing load between the server and the workers. The data used to compute $Z$ is never stored on the server, making this approach promising for federated learning. However, since the server may attempt reconstructing the data from $Z$, one has to ensure each worker's privacy cannot be compromised; see [@yue2021neural] for further details.
73
+
74
+ <figure id="fig:image_regression" data-latex-placement="t">
75
+
76
+ <figcaption>Images are borrowed from <span class="citation" data-cites="tancik2020fourier"></span>.</figcaption>
77
+ </figure>
78
+
79
+ <figure id="fig:low_dim_regression">
80
+ <p><br />
81
+ </p>
82
+ <figcaption>Images are borrowed from <span class="citation" data-cites="tancik2020fourier"></span>.</figcaption>
83
+ </figure>
84
+
85
+ While the empirical NTK of a neural network is not the same as its limit NTK, they may have certain properties in common. In particular, certain issues of a finite-width network may reflect in certain issues of its limit NTK, and fixing these issues in the limit NTK may result in fixing them in a finite-width net.
86
+
87
+ As an example where this approach is proven to work, consider image regression. In this task, input samples are image coordinates, $x \in [0,1]^d$ for $d=2$, and targets are pixel colors; we assume grey-scale images with $y \in [0,1]$. The task is therefore to regress the full image given a set of pixels.
88
+
89
+ Let us consider applying a fully-connected network for this task. As we have already observed in [4.1](#sec:limit_fc_nets){reference-type="ref+label" reference="sec:limit_fc_nets"}, the limit NTK $\Theta(x,x')$ of a fully-connected network depends only on $x^T x$, $x^{\prime,T} x'$, and $x^T x'$. All of these terms are rotation-invariant, hence the kernel itself is rotation-invariant. However, none of this terms is translation-invariant, hence the kernel cannot be translation-invariant (otherwise, it has to be constant). Therefore it is quite unlikely that the empirical kernel will be invariant to translations.
90
+
91
+ On the other hand, both translation and rotation invariance are desirable for a kernel used for image regression. Indeed, this means that applying these transformations to the train set of pixels results in the same image as without them, up to translation and rotation. In order to achieve this property, one may start working on translationaly invariant embeddings of image coordinates. The simplest non-trivial embedding of this kind is $z(x) = [\cos(2\pi x), \sin(2\pi x)]^T$, where $\cos$ and $\sin$ are applied elementwise. Following [@tancik2020fourier], we shall refer it as \"basic\". Comparing (b) and (c) of Figure [1](#fig:image_regression){reference-type="ref" reference="fig:image_regression"}, this indeed results in better perceived quality.
92
+
93
+ However the regressed image is still blurry: see Figure [1](#fig:image_regression){reference-type="ref" reference="fig:image_regression"} (c). As we shall see shortly, NTK kernel regression learns low-frequency components of the image before its high-frequency ones. If we assume that the same property holds for the corresponding finite-width net then achieving sharp images may be impossible for a given number of gradient steps.
94
+
95
+ Recall the training dynamics of a kernel regression with kernel $\Theta$ trained to minimize square loss on a training dataset $(\vec x, \vec y)$: $$\begin{equation}
96
+ \dot f_t(\vec x)
97
+ = \Theta(\vec x, \vec x) (\vec y - f_t(\vec x)).
98
+ \end{equation}$$ $\Theta$ is a kernel, therefore its Gram matrix is positive-semidefinite. Consider its eigenvalue decomposition: $\Theta(\vec x, \vec x) = \sum_{k=1}^m \lambda_k \vec v_k \vec v_k^T$, where $\lambda_1 \geq \ldots \geq \lambda_m \geq 0$, and $(\vec v_k)_{k=1}^m$ forms an orthonormal basis.
99
+
100
+ Let us decompose our model's predictions as $f_t(\vec x) = \sum_{k=1}^m u_{t,k} \vec v_k$. Then the dynamics above decomposes as $$\begin{equation}
101
+ u_{t,k}
102
+ = \lambda_k (\vec v_k^T \vec y - u_{t,k}),
103
+ \end{equation}$$ which solves as $$\begin{equation}
104
+ u_{t,k}
105
+ = \vec v_k^T \vec y - e^{-\lambda_k t} (\vec v_k^T \vec y - u_{0,k}).
106
+ \end{equation}$$
107
+
108
+ As one clearly sees, time required to learn the $k$-th principal component of the target is inversely proportional to its strength $\lambda_k$. In other words, strong components are learned before weak ones.
109
+
110
+ The question is: what are the eigenvectors of the NTK Gram matrix? It is hard to answer this question in general since a Gram matrix depends on the dataset. However, for a kernel, there is an analogue of eigenvalue decomposition called Mercer's representation.
111
+
112
+ Let $X$ be a compact metric space and let $\mu$ be a sigma-additive measure on $X$ with $\mathop{\mathrm{supp}}\mu = X$. Suppose $K: \; X \times X \to \mathbb{R}$ is continuous, symmetric, and satisfies $\int_X \int_X K(x,x') f(x) f(x') \, d\mu(x) \, d\mu(x') < \infty$ $\forall f \in L^2_\mu(X)$. Define Gram-Schmidt operator $T_K: L^2_\mu(X) \to L^2_\mu(X)$ as $T_K[f](x) = \int_X K(x,x') \, d\mu(x')$. Then the above operator admits an eigenvalue decomposition with eigenfunctions $(\psi_k)_{k=1}^\infty$ and corresponding eigenvalues $(\lambda_k)_{k=1}^\infty$, and the set of eigenfunctions forms an orthonormal basis in $L^2_\mu(X)$. The Mercer's representation is the corresponding decomposition of the kernel: $$\begin{equation}
113
+ K(x,x')
114
+ = \sum_{k=1}^\infty \lambda_k \psi_k(x) \psi_k(x').
115
+ \end{equation}$$ The series converges uniformly in $X \times X$.
116
+
117
+ From the above, we have $\int_X \int_X K(x,x') \psi_k(x) \psi_k(x') \, d\mu(x) \, d\mu(x') = \lambda_k$ $\forall k \geq 1$. Hence if $\vec x = (x_k)_{k=1}^m$ and $\vec x' = (x'_k)_{k=1}^m$ are sampled iid from $\mu$ then $$\begin{multline}
118
+ \frac{1}{m^2} \psi_k^T(\vec x) K(\vec x, \vec x') \psi_k(\vec x')
119
+ =\\= \frac{1}{m^2} \sum_{i,j=1}^m K(x_i, x'_j) \psi(x_i) \psi(x'_k)
120
+ \to \int_X \int_X K(x,x') \psi_k(x) \psi_k(x') \, d\mu(x) \, d\mu(x')
121
+ = \lambda_k
122
+ \end{multline}$$ a.s. as $m \to \infty$ by the Law of Large Numbers (LLN). Note that considering $\psi^T(\vec x) K(\vec x, \vec x) \psi(\vec x)$ instead of $\psi^T(\vec x) K(\vec x, \vec x') \psi(\vec x')$ may result in a different limit because the diagonal of $K$ is now calculated on two dependent arguments. Nevertheless, there are only $m$ elements on the diagonal, which results in $O(m^{-1})$ error vanishing in the limit. Hence $$\begin{equation}
123
+ \frac{1}{m^2} \psi_k^T(\vec x) K(\vec x, \vec x) \psi_k(\vec x)
124
+ \to \lambda_k
125
+ \end{equation}$$ a.s. as $m \to \infty$. In other words, given $\vec x$ sampled iid from $\mu$, $(\psi_k(\vec x))_{k=1}^m$ are approximately the eigenvectors of $K(\vec x, \vec x)$ with eigenvalues $(m^2 \lambda_k)_{k=1}^m$.
126
+
127
+ Recall that, as was noted above, the limit NTK of a fully-connected net $\Theta(z,z')$ depends only on $z^T z'$, $\|z\|_2$, and $\|z'\|_2$. Recall also that we have decided to embed inputs with $z(x) = [\cos(2\pi x), \sin(2\pi x)]^T$. This embedding maps $[0,1]^d$ on a $d$-dimensional torus that lies inside a $2d-1$-dimensional sphere. In this case, our $\Theta(x,x') = \Theta(z(x),z(x'))$ depends only on $z^T(x) z(x')$.
128
+
129
+ Kernels with this property are called zonal. Any zonal kernel $K: S^{p-1} \times S^{p-1} \to \mathbb{R}$ admits the following Mercer's decomposition with respect to the uniform measure on $S^{p-1}$: $$\begin{equation}
130
+ K(z^T z')
131
+ = \sum_{k=0}^\infty \lambda_k \sum_{j=1}^{N(p,k)} Y_{k,j}(z) Y_{k,j}(z'),
132
+ \end{equation}$$ where $N(p,k)$ are so-called Gegenbauer polynomials and $Y_{k,j}$ are spherical harmonics. For $p=2$, this decomposition gets a simpler form: $$\begin{equation}
133
+ K(z^T z')
134
+ = \frac{1}{4\pi^2} + \frac{1}{\pi^2} \sum_{k=1}^\infty \lambda_k \cos(k \arccos(z^T z')).
135
+ \label{eq:mercer_zonal_2d}
136
+ \end{equation}$$
137
+
138
+ As we see, large $k$'s correspond to high-frequency harmonics, while small $k$'s correspond to low-frequency ones. A recent result of [@chen2020deep] states that the NTK of a fully-connected net with inputs lying on $S^{p-1}$ has eigenvalues decaying as a power-law: $\lambda_k \sim k^{-p}$ as $k \to \infty$; see also [@geifman2020similarity] for an earlier result for shallow nets and [@bietti2019inductive] for an even earlier result for bias-free shallow nets. This means that learning the $k$-th harmonic of the input image requires $O(k^p)$ time. Hence for a finite amount of training steps, high-frequency components remain not learned, which results in blurry images similar to Figure [1](#fig:image_regression){reference-type="ref" reference="fig:image_regression"} (c).
139
+
140
+ The possible remedy would be increasing $\lambda_k$ for large $k$. But how to achieve it? We illustrate the solution proposed in [@tancik2020fourier] in the following.
141
+
142
+ Consider the case $d=1$ for simplicity. In this case, the embedding map $z(x) = [\cos(2\pi x), \sin(2\pi x)]^T$ traverses a circle. Consider a modified embedding $\tilde z(x) = [\cos(2\pi b x), \sin(2\pi b x)]^T$ instead, where $b \in \mathbb{N}$ is a tunable parameter. The corresponding kernel is then given as $$\begin{multline}
143
+ K(\tilde z^T \tilde z')
144
+ = \frac{1}{4\pi^2} + \frac{1}{\pi^2} \sum_{k=1}^\infty \lambda_k \cos(k \arccos(\tilde z^T \tilde z'))
145
+ % = \frac{1}{4\pi^2} + \frac{1}{\pi^2} \sum_{k=1}^\infty \lambda_k \cos(k \arccos(\cos(4\pi b (x-x'))))
146
+ =\\= \frac{1}{4\pi^2} + \frac{1}{\pi^2} \sum_{k=1}^\infty \lambda_k \cos(4\pi k b (x-x'))
147
+ = \frac{1}{4\pi^2} + \frac{1}{\pi^2} \sum_{k=1}^\infty \lambda_k \cos(k b \arccos(z^T z')),
148
+ \end{multline}$$ which means that $\lambda_k$ becomes the $kb$-th eigenvalue in the original embedding space. If $\lambda_k$ decreased monotonically this would mean that each $kb$-th eigenvalue increased from $\lambda_{kb}$ to $\lambda_k$, implying faster convergence to $kb$-th principal component.
149
+
150
+ The obvious downside of the method above is that in a new parameterization some of the eigenvalues become zero --- therefore they are never learned. A simple solution is to enlarge the embedding: $\tilde z(x) = [\cos(2\pi \sigma^{j/M} x), \sin(2\pi \sigma^{j/M} x)]^T$, where $M \in \mathbb{N}$ and $\sigma \in \mathbb{R}_+$ are tunable parameters; this referred as \"positional encoding\" in [@tancik2020fourier]. Another solution proposed by [@tancik2020fourier] is random Gaussian projections: $\tilde z(x) = [\cos(2\pi B x), \sin(2\pi B x)]^T$, where $B \in \mathbb{R}^{M \times d}$, each element of $B$ is sampled independently from $\mathcal{N}(0,\sigma^2)$, and $M$ and $\sigma$ are tunable parameters. Both solution perform on par with each other and much better than the original embedding: compare (c), (d), and (e) in Figure [1](#fig:image_regression){reference-type="ref" reference="fig:image_regression"}.
151
+
152
+ The same method suites other low-dimensional regression problems as well; [@tancik2020fourier] provide examples of 3D shape regression, MRI reconstruction, and inverse rendering. See Figure [2](#fig:low_dim_regression){reference-type="ref" reference="fig:low_dim_regression"} for comparison of outputs of a neural net with no enconding of inputs (top row) and the proposed Gaussian encoding (bottom row).
153
+
154
+ One more notable example is Solid Isotropic Material Penalisation, an instance of topology optimization. The task here is to optimize over material density at $N$ points $y \in [0,1]^N$ to obtain a shape that can withstand forces applied at certain points.
155
+
156
+ Given a density $y$ and a force vector $F$, the SIMP method constructs a stiffness matrix $K(y)$, and derives a displacement vector $U(y)$ by solving a linear system $K(y) U(y) = F$. The resulting construction is stable if the forces do not do any work, i.e. $U^T(y) F = 0$. The density is therefore optimized to minimize the work $C(y) = U^T(y) F U(y) \to \min_y$ under a volume constraint $\sum_{i=1}^N y_i = V$; $C$ is usually called compliance.
157
+
158
+ We can cast the constrained optimization problem as an unconstrained one by introducing pre-density $x \in \mathbb{R}^N$ and constructing density as $y_i = \sigma(x_i + b(x))$, where $b$ is a function that ensures the volume constraint. Denoting this operation as $y = \Sigma(x)$, we get a new unconstrained optimization problem in the space of pre-densities: $C(\Sigma(x)) \to \min_x$.
159
+
160
+ While the above problem is not a regression problem, we can still model $x$ as outputs of a neural net at the corresponding grid points. However, lack of translation invariance results in unplausible patterns. [@dupuis2021dnn] used a similar embedding scheme as [@tancik2020fourier] to control this issue. On the other hand, in contrast to [@tancik2020fourier], [@dupuis2021dnn] used $\sin(\omega x)$ as activation instead of ReLU, and used $\omega$ together with bias initialization variance to control sharpness of output shapes, instead of modifying the embedding. Both methods aim to \"widen\" the spectrum of the limit NTK.
161
+
162
+ Apart from providing a meaningful kernel for kernel methods, NTK can be used as a concept useful for reasoning about neural nets of large width. Indeed, as stated in [2](#sec:convergence){reference-type="ref+label" reference="sec:convergence"}, NTK, while being random and evolving, converges to a constant deterministic limit as width goes to infinity. One can hope that for large enough width, the NTK stays close to its limit with high probability. Therefore, any result valid for kernel regression with NTK taken as a kernel, may become also valid with high probability for a wide enough net.
163
+
164
+ Let us start with the following result valid for kernel regression with a constant kernel: when the kernel is positive-definite, kernel regression learns the dataset. Indeed, recall the training dynamics of a kernel regression with kernel $\Theta$ trained to minimize square loss on a training dataset $(\vec x, \vec y)$: $$\begin{equation}
165
+ \dot f_t(\vec x)
166
+ = \Theta(\vec x, \vec x) (\vec y - f_t(\vec x)).
167
+ \end{equation}$$ Assuming $\Theta(\vec x, \vec x) \geq \lambda$, $$\begin{equation}
168
+ \frac{d}{dt}\left(\frac{1}{2} \| \vec y - f_t(\vec x) \|_2^2\right)
169
+ = -(\vec y - f_t(\vec x))^T \Theta(\vec x, \vec x) (\vec y - f_t(\vec x))
170
+ \leq -\lambda \| \vec y - f_t(\vec x) \|_2^2,
171
+ \end{equation}$$ which gives $$\begin{equation}
172
+ \| \vec y - f_t(\vec x) \|_2^2
173
+ \leq e^{-2\lambda t} \| \vec y - f_0(\vec x) \|_2^2.
174
+ \end{equation}$$ Hence $\lambda > 0$ suffices to guarantee that $f_t(\vec x)$ converges to $\vec y$ as $t \to \infty$.
175
+
176
+ Suppose now our kernel regression uses a random time-dependent kernel $\hat\Theta_t$ instead of $\Theta$: $$\begin{equation}
177
+ \dot f_t(\vec x)
178
+ = \hat\Theta_t(\vec x, \vec x) (\vec y - f_t(\vec x)).
179
+ \end{equation}$$ If we manage to guarantee that with probability $\geq 1-\delta$ $\forall t \geq 0$ $\hat\Theta_t(\vec x, \vec x) \geq \lambda$ then $\lambda > 0$ suffices to guarantee that $f_t(\vec x)$ converges to $\vec y$ as $t \to \infty$ with probability $\geq 1-\delta$. Indeed, $$\begin{equation}
180
+ \frac{d}{dt}\left(\frac{1}{2} \| \vec y - f_t(\vec x) \|_2^2\right)
181
+ = -(\vec y - f_t(\vec x))^T \hat\Theta_t(\vec x, \vec x) (\vec y - f_t(\vec x))
182
+ \leq -\lambda \| \vec y - f_t(\vec x) \|_2^2
183
+ \quad
184
+ \text{w.p. $\geq 1-\delta$},
185
+ \end{equation}$$ which gives $$\begin{equation}
186
+ \| \vec y - f_t(\vec x) \|_2^2
187
+ \leq e^{-2 \lambda t} \| \vec y - f_0(\vec x) \|_2^2
188
+ \quad
189
+ \text{w.p. $\geq 1-\delta$}.
190
+ \end{equation}$$
191
+
192
+ One of the first results of this kind concerns ReLU nets with one hidden layer under NTK parameterization: $$\begin{equation}
193
+ f(x; a_{1:n}, w_{1:n})
194
+ = \frac{1}{\sqrt{n}} \sum_{i=1}^n a_i [w_i^T x]_+.
195
+ \label{eq:two_layered_ReLU_net_ntk}
196
+ \end{equation}$$ We aim to minimize square loss on a dataset $(\vec x, \vec y)$ of size $m$ with gradient descent on the input weights: $$\begin{equation}
197
+ \dot w_i(t)
198
+ = \frac{1}{\sqrt{n}} \sum_{k=1}^m (y_k - f(x_k; a_{1:n}, w_{1:n}(t))) a_i [w_i^T(t) x_k > 0] x_k
199
+ \quad
200
+ \forall i \in [n].
201
+ \end{equation}$$ We sample $w_i \sim \mathcal{N}(0,I_{n_0})$ and $a_i \in U(\{-1,1\})$ $\forall i \in [n]$ independently. The goal of sampling $a_i$ from this particular distribution is mere simplification: in this case $a_i^2 = 1$, which simplifies the NTK Gram matrix a little bit: $$\begin{equation}
202
+ \hat\Theta_t(x_k, x_l) =
203
+ \frac{1}{n} \sum_{i=1}^n [w_i^T(t) x_k > 0] [w_i^T(t) x_l > 0] x_k^T x_l.
204
+ \end{equation}$$ However, it is possible to apply the same technique to any distribution of the output layer not depending on $n$. Note that the Gram matrix depends merely on activation patterns of the hidden layer computed on the dataset.
205
+
206
+ The limit NTK is therefore given as: $$\begin{equation}
207
+ \Theta(x_k, x_l) =
208
+ \mathbb{E}\,_{w \sim \mathcal{N}(0, I_{n_0})} [w^T x_k > 0] [w^T x_l > 0] x_k^T x_l.
209
+ \end{equation}$$ Note that in our two-layered case, $\Theta(x,x') = \lim_{n \to \infty} \hat\Theta_t(x,x') = \mathbb{E}\,\hat\Theta_0(x,x')$. In the sequel, we denote the Gram matrices $\hat\Theta_t(\vec x, \vec x)$ as $H(t)$ and $\Theta(\vec x, \vec x)$ as $H^\infty$. Let $\lambda_0$ to be the least eigenvalue of $H^\infty$.
210
+
211
+ ::: {#thm:convergence_2layer .theorem}
212
+ **Theorem 3** ([@du2018gradient]). *Consider the setting discussed above and further assume $\|x_k\|_2 \geq 1$ and $|y_k| \leq 1$ $\forall k \in [m]$. Then $\exists C, C_0 > 0$ such that $\forall \delta \in (0,1)$ taking $$\begin{equation}
213
+ n >
214
+ \max\left(
215
+ C \frac{m^6}{\lambda_0^4 \delta^3}, \;
216
+ C_0 \frac{m^2}{\lambda_0^2} \log\left(\frac{2m}{\delta}\right)
217
+ \right)
218
+ \end{equation}$$ guarantees $H(t) \geq \lambda_0/2$ $\forall t \geq 0$ w.p. $\geq 1-\delta$.*
219
+ :::
220
+
221
+ This result implies $\| \vec y - f_t(\vec x) \|_2^2 \leq e^{-\lambda_0 t} \| \vec y - f_0(\vec x) \|_2^2$ w.p. $\geq 1-\delta$, as discussed above.
222
+
223
+ For the full proof, see the original paper [@du2018gradient] or lecture notes [@golikov2020notes]. We are going to discuss, very briefly, only crucial parts of the proof in the sequel.
224
+
225
+ The proof is based on four lemmas. The first lemma states that as long as $n = \Omega(m^2 \lambda_0^{-2} \log(m/\delta))$, where $\Omega$ hides a certain constant, $\|H(0) - H^\infty\|_2 \leq \lambda_0/4$, where $\|\cdot\|_2$ denotes a singular norm, w.p. $\geq 1-\delta$; this implies $H(0) \geq 3\lambda_0/4$ with the same probability. As already noted above, $\mathbb{E}\,H(0) = H^\infty$. This allows one to apply a concentration inequality to each element of $H(0)$. Union bound then gives a bound that holds uniformly for all elements of $H(0)$. This implies a bound on $\|H(0) - H^\infty\|_F$, hence on a singular norm as well.
226
+
227
+ The second lemma states that as long as $\forall i \in [n]$ $\|w_i - w_i(0)\|_2 \leq R$ for certain $R = R(\delta,\lambda_0,m)$, $\| H - H(0) \|_2 \leq \lambda_0/4$ w.p. $\geq 1-\delta$. In other words, as long as weights are close to initialization, the corresponding Gram matrix is close to the initial one too. The idea is that as long as the weights are not far from their initialization, with certain probability, not many of the hidden neurons can alter their activation patterns on the train dataset. Since as already noted above, our Gram matrices depend only on activation patterns on the train dataset, this implies a tail bound on $|H_{kl}(0) - H_{kl}^\infty|$ $\forall k,l \in [m]$, which gives a tail bound on $\|H(0) - H^\infty\|_2$ with the same technique as used in the first lemma.
228
+
229
+ The third lemma states that as long as $H(s) \geq \lambda_0/2$ $\forall s \in [0,t]$ (we haven't proven it yet), weights indeed stay close to their initialization: $\forall i \in [n]$ $\|w_i(t) - w_i(0)\|_2 \leq R'$ for certain $R' = R'(\lambda_0,m,n)$. This can be proven by a very simple estimate: $$\begin{multline}
230
+ \left\|\frac{dw_i(s)}{ds}\right\|_2 =
231
+ \left\|\frac{1}{\sqrt{n}} \sum_{k=1}^m (y_k - f_s(x_k)) a_i [w_i^T(s) x_k > 0] x_k\right\|_2 \leq
232
+ \\\leq
233
+ \frac{1}{\sqrt{n}} \sum_{k=1}^m |y_k - f_s(x_k)| \leq
234
+ \sqrt{\frac{m}{n}} \|\vec y - f_s(\vec x)\|_2 \leq
235
+ \sqrt{\frac{m}{n}} e^{-\lambda_0 s / 2} \|\vec y - f_0(\vec x)\|_2.
236
+ \end{multline}$$ This gives $\forall i \in [n]$: $$\begin{multline}
237
+ \| w_i(t) - w_i(0) \|_2 =
238
+ \left\|\int_0^t \frac{dw_i(s)}{ds} \, ds\right\|_2 \leq
239
+ \int_0^t \left\|\frac{dw_i(s)}{ds}\right\|_2 \, ds \leq
240
+ \\\leq
241
+ \frac{2 \sqrt{m}}{\lambda_0 \sqrt{n}} \left(1 - e^{-\lambda_0 t / 2}\right) \|\vec y - f_0(\vec x)\|_2 \leq
242
+ \frac{2 \sqrt{m}}{\lambda_0 \sqrt{n}} \|\vec y - f_0(\vec x)\|_2.
243
+ \end{multline}$$
244
+
245
+ Finally, the fourth lemma states that as long as $R' < R$, $\| H(t) - H(0) \|_2 \leq \lambda_0/4$ $\forall t \geq 0$ w.p. $\geq 1-\Omega(\delta)$ where $\Omega$ hides a certain constant. Combined with the first lemma, this implies $H(t) \geq \lambda_0/2$ $\forall t \geq 0$ w.p. $\geq 1-\Omega(\delta)$. The condition $R'(\lambda_0,m,n) < R(\delta,\lambda_0,m)$ gives the second lower bound on $n$ (the first one is given be the first lemma). By changing $\delta$, we get the desired result.
246
+
247
+ The fourth lemma is proven as follows. Let $t_0$ be the first moment of time when the second lemma becomes no longer applicable, i.e. $t_0 = \inf\left\{t \geq 0: \; \max_{i \in [n]} \| w_i(t) - w_i(0) \|_2 > R\right\}$. Assume it is finite. Since weights are continuous functions of time, $\max_{i \in [n]} \| w_i(t_0) - w_i(0) \|_2 = R$. Hence the second lemma holds for $w_{1:n} = w_{1:n}(t)$ $\forall t \in [0,t_0]$ and $\| H(t) - H(0) \|_2 \leq \lambda_0/4$ w.p. $\geq 1-\delta$ $\forall t \in [0,t_0]$, therefore $H(t) \geq \lambda_0/2$ w.p. $\geq 1-\Omega(\delta)$ $\forall t \in [0,t_0]$. But then the third lemma holds as well: $\forall i \in [n]$ $\|w_i(t_0) - w_i(0)\|_2 \leq R' < R$; contradiction. Hence $\forall t \geq 0$ $\max_{i \in [n]} \| w_i(t) - w_i(0) \|_2 \leq R$ and the second lemma gives the desired statement.
248
+
249
+ [3](#thm:convergence_2layer){reference-type="ref+label" reference="thm:convergence_2layer"} requires the number of hidden units $n$ to grow as $m^6$ with the size of a train dataset and as $\delta^{-3}$ with the failure probability. This bound is way too loose for practical purposes: indeed, even for very small datasets $m \geq 100$ which results in a bound of the order at least $10^8$. If we want the bound to be valid with at least $90\%$ probability, we pay three orders of magnitude more. Note that modern architectures designed to be trained on large datasets like ImageNet ($m=10^6$) have width barely exceeding $10^4$.
250
+
251
+ We state one of the existing improvements of [3](#thm:convergence_2layer){reference-type="ref+label" reference="thm:convergence_2layer"} below:
252
+
253
+ ::: {#thm:convergence_2layer_quartic .theorem}
254
+ **Theorem 4** ([@song2019quadratic]). *Under the same setting as [3](#thm:convergence_2layer){reference-type="ref+label" reference="thm:convergence_2layer"}, $\exists C, C_0 > 0$ such that $\forall \delta \in (0,1)$ taking $$\begin{equation}
255
+ n >
256
+ \max\left(
257
+ C \frac{m^4}{\lambda_0^4} \log^3\left(\frac{m}{\delta}\right), \;
258
+ C_0 \frac{m^2}{\lambda_0^2} \log\left(\frac{2m}{\delta}\right)
259
+ \right)
260
+ \end{equation}$$ guarantees $H(t) \geq \lambda_0/2$ $\forall t \geq 0$ w.p. $\geq 1-\delta$.*
261
+ :::
262
+
263
+ This result decreases the exponent of $m$ from $6$ to $4$ and makes the $\delta$-dependence logarithmic. The proof follows the same path as above. Note however that the previous result aimed for elementwise tail bounds on $H(0) - H^\infty$ or $H - H(0)$ which lead to tail bounds on $\|H(0) - H^\infty\|_2$ and $\|H - H(0)\|_2$ by union bound, which gives an $m^2$ factor. One of the improvements proposed by [@song2019quadratic] is to replace these elementwise bounds with matrix-Chernoff bounds --- they do not give this $m^2$ factor, thus leading to better bounds. The other improvement is to replace Markov inequalities that result in $1/\delta$ factors with Bernstein inequality that results only in $\log(1/\delta)$ ones.
264
+
265
+ The $m^4$ width bound is still far from being realistically tight. We are not aware of any further improvements of the results discussed above that apply the idea of NTK stability. Global gradient descent convergence can be, however, proved by first proving gurantees on convergence to local minima and then proving that all minima are global for wide enough nets. See [@lee2016gradient; @panageas2017gradient; @mertikopoulos2020almost] for the first line of works and [@yu1995local; @nguyen2017loss; @nguyen2019connected; @nguyen2021note] for the second. None of the works of both lines use the idea of NTK stability and they neither rely on NTK parameterization. [@nguyen2019connected] proves that $n = m$ is enough of leaky ReLU nets to have only global \"local valleys\" (generalization of global minima to certain losses such as cross-entropy) and [@nguyen2021note] demonstrates that this bound cannot be improved for two-layered nets and general data.
266
+
267
+ [@du2019gradient] extends [3](#thm:convergence_2layer){reference-type="ref+label" reference="thm:convergence_2layer"} to deep nets. Their proof idea is the same: first show that $H(0)$ is close to $H^\infty$, then show that $H(t)$ stays close to $H(0)$. However for the multilayer case, $H(0)$ cannot be proven to be close to $H^\infty$ just by concentration of measure. When layers are many, perturbations caused by finite width result in deviations exponential with respect to the number of layers $L$. For this reason, their bound grows exponentially with $L$. See also [@allen2019convergence] for a similar result with a bound depending on $m$ only polynomially, proved using a different technique.
268
+
269
+ Stability of NTK has another interesting consequence. Suppose the empirical NTK is constant, i.e. $\hat\Theta_t = \hat\Theta_0$. It is equivalent to say that the corresponding model is linearized: $$\begin{equation}
270
+ f(x; \theta)
271
+ = f(x; \theta_0) + \nabla_\theta^T f(x; \theta_0) (\theta - \theta_0).
272
+ \end{equation}$$ For brevity, denote $\vec u_t = f_t(\vec x)$ and $Z_t^{ik} = \partial_{\theta_i} f(x_k; \theta_t)$. Hence $Z_t \in \mathbb{R}^{N \times m}$ where $N$ is the total number of parameters and $\vec u_t = \vec u_0 + Z_0^T (\theta_t - \theta_0)$.
273
+
274
+ Note that $H_t = Z_t^T Z_t$. Recall the train set predictions for constant kernel: $$\begin{equation}
275
+ \vec u_t
276
+ = \vec y + e^{-H_0 t} (\vec u_0 - \vec y).
277
+ \end{equation}$$ In our linearized dynamics, the weights evolve as follows: $$\begin{equation}
278
+ \dot\theta_t
279
+ = Z_0 (\vec y - \vec u_t)
280
+ = Z_0 e^{-H_0 t} (\vec y - \vec u_0).
281
+ \end{equation}$$ Straightforward integration gives: $$\begin{equation}
282
+ \theta_t
283
+ = \theta_0 + Z_0 H_0^{-1} \left(I - e^{-H_0 t}\right) (\vec y - \vec u_0).
284
+ \end{equation}$$ Recalling $H_0 = Z_0^T Z_0$, at the end of training ($t \to \infty$) we get $$\begin{equation}
285
+ \|\theta_\infty - \theta_0\|_2^2
286
+ = (\theta_\infty - \theta_0)^T (\theta_\infty - \theta_0)
287
+ = (\vec y - \vec u_0)^T H_0^{-1} (\vec y - \vec u_0).
288
+ \end{equation}$$
289
+
290
+ Define $\mathcal{F}_B^{w_{1:n}(0), a_{1:n}}$ as a set of models of the form ([\[eq:two_layered_ReLU_net_ntk\]](#eq:two_layered_ReLU_net_ntk){reference-type="ref" reference="eq:two_layered_ReLU_net_ntk"}) with output weights $a_{1:n}$ and input weights $w_{1:n}$ such that $\| W - W(0) \|_F \leq B$ for given $w_{1:n}(0)$. The above considerations state that a trained model always lies in $\mathcal{F}_B^{w_{1:n}(0), a_{1:n}}$ with $B = (\vec y - \vec u_0)^T H_0^{-1} (\vec y - \vec u_0)$.
291
+
292
+ Hence our training procedure outputs models in a certain set rather than any model in of the form ([\[eq:two_layered_ReLU_net_ntk\]](#eq:two_layered_ReLU_net_ntk){reference-type="ref" reference="eq:two_layered_ReLU_net_ntk"}). Upper-bounding Rademacher complexity of this model set will give us a generalization bound as we shall see below. Let us upper-bound the Rademacher complexity conditioned on a dataset $(\vec x, \vec y)$ of size $m$: $$\begin{multline}
293
+ \mathrm{Rad}({\mathcal{F}_B^{w_{1:n}(0), a_{1:n}}} \, | \, {\vec x, \vec y}) =
294
+ \mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \sup_{f \in \mathcal{F}_B^{w_{1:n}(0), a_{1:n}}} \left(\frac{1}{m} \sum_{k=1}^m \sigma_k u_k\right) =
295
+ \\=
296
+ \frac{1}{m} \mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \sup_{\| W - W(0) \|_F \leq B} \left(\sum_{k=1}^m \sigma_k \frac{1}{\sqrt{n}} \sum_{i=1}^n a_i [w_i^T(0) x_k \geq 0] w_i^{T} x_k\right) =
297
+ \\=
298
+ \frac{1}{m} \mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \sup_{\| W - W(0) \|_F \leq B} \left( \vec\sigma^T Z^{T}(0) \theta \right) =
299
+ \\=
300
+ \frac{1}{m} \mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \sup_{\| W - W(0) \|_F \leq B} \left( \vec\sigma^T \tilde Z^{T}(0) (\theta - \theta_0) \right) =
301
+ \\=
302
+ \frac{B}{m} \mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \| Z(0) \vec\sigma \|_2 \leq
303
+ \frac{B}{m} \sqrt{\mathbb{E}\,_{\sigma_{1:m} \sim \{-1,1\}^m} \| Z(0) \vec\sigma \|_2^2} =
304
+ \frac{B}{m} \| Z(0) \|_F.
305
+ \end{multline}$$
306
+
307
+ Note that $$\begin{equation}
308
+ \| Z(0) \|_F^2 =
309
+ \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^m [w_i^T(0) x_k \geq 0].
310
+ \end{equation}$$ It is an average of i.i.d random variables, which allows for Hoeffding's inequality: $$\begin{equation}
311
+ \mathcal{P}(\| Z(0) \|_F^2 - \frac{m}{2} \geq \epsilon) \leq
312
+ e^{-2n \epsilon^2 / m^2}.
313
+ \end{equation}$$ This gives w.p. $\geq 1-\delta$ over initialization, $$\begin{equation}
314
+ \| Z(0) \|_F^2 \leq
315
+ \frac{m}{2} + \sqrt{\frac{m^2}{2n} \log\left(\frac{1}{\delta}\right)}.
316
+ \end{equation}$$ Finally, we got that w.p. $\geq 1-\delta$ over initialization, $$\begin{equation}
317
+ \mathrm{Rad}({\mathcal{F}_B^{w_{1:n}(0), a_{1:n}}} \, | \, {(\vec x, \vec y)}) \leq
318
+ \frac{B}{\sqrt{m}} \sqrt{\frac{1}{2} + \sqrt{\frac{1}{2n} \log\left(\frac{1}{\delta}\right)}}.
319
+ \end{equation}$$
320
+
321
+ Consider zero-one risk: $r(y,z) = [y z < 0]$; we have $R(f) = \mathbb{E}\,_{x,y \sim \mathcal{D}} r(y,f(x))$ and $\hat R(f) = \mathbb{E}\,_{x,y \in S_m} r(y,f(x))$, correspondingly. From the generalization theory, we know that for any $B$ and for any initialization $w_{1:n}(0), a_{1:n}$, w.p. $\geq 1-\tilde\delta$ over the training dataset, $\forall f \in \mathcal{F}_B^{w_{1:n}(0), a_{1:n}}$, $$\begin{equation}
322
+ R(f) \leq
323
+ \hat R_m(f) + \mathbb{E}\,_{(\vec x, \vec y)} \mathrm{Rad}({\mathcal{F}_B^{w_{1:n}(0), a_{1:n}}} \, | \, {(\vec x, \vec y)}) + \sqrt{\frac{1}{2m} \log \frac{1}{\tilde\delta}}
324
+ \quad
325
+ \text{w.p. $\geq 1 - \tilde\delta$ over $(\vec x, \vec y)$.}
326
+ \end{equation}$$
327
+
328
+ We want to take $B = (\vec y - \vec u_0)^T H_0^{-1} (\vec y - \vec u_0)$ but it depends on the dataset $(\vec x, \vec y)$. Take a sequence $\{B_j\}_{j=1}^\infty$ monotonically increasing to infinity and a sequence $\{\tilde\delta_j\}_{j=1}^\infty$ of deltas $\in (0,1)$ that sum to $\tilde\delta$. This allows us to apply a union bound: w.p. $\geq 1-\tilde\delta$ over the training dataset, for any initialization $w_{1:n}(0), a_{1:n}$, $\forall j \in \mathbb{N}$, $\forall f \in \mathcal{F}_{B_j}^{w_{1:n}(0), a_{1:n}}$, $$\begin{equation}
329
+ R(f) \leq
330
+ \hat R_m(f) + \mathbb{E}\,_{(\vec x, \vec y)} \mathrm{Rad}({\mathcal{F}_{B_j}^{w_{1:n}(0), a_{1:n}}} \, | \, {(\vec x, \vec y)}) + \sqrt{\frac{1}{2m} \log \frac{1}{\tilde\delta_j}}.
331
+ \end{equation}$$ We are free to choose minimal $j$ such that $B_j \geq (\vec y - \vec u_0)^T H_0^{-1} (\vec y - \vec u_0)$; denote it by $\hat j$. Let for definiteness $B_j = j$. Then $B_{\hat j} \leq 1 + (\vec y - \vec u_0)^T \hat\Theta_0^{-1} (\vec y - \vec u_0)$.
332
+
333
+ Putting all together, we have w.p. $\geq 1-\tilde\delta$ over the training dataset, w.p. $\geq 1-\delta$ over initialization, $$\begin{multline}
334
+ R(f(\theta_\infty)) \leq
335
+ \hat R_m(f(\theta_\infty)) +
336
+ \\+
337
+ \frac{1 + (\vec y - \vec u_0)^T H_0^{-1} (\vec y - \vec u_0)}{\sqrt{m}} \sqrt{\frac{1}{2} + \sqrt{\frac{1}{2n} \log\left(\frac{1}{\delta}\right)}} + \sqrt{\frac{1}{2m} \log \frac{1}{\tilde\delta_{\hat j}}}.
338
+ \label{eq:generalization_bound_for_fixed_acts_model}
339
+ \end{multline}$$
340
+
341
+ Recall that the bound above was obtained under the assumption of constant NTK. In order to relax this assumption, one has to show that, possibly for large enough width, $H_t^{-1}$ stays close to $H_0^{-1}$. Note that when proving global GD convergence we had to prove that $H_t$ stays close to $H_0$, which is different. The required closeness result is proven in [@arora2019fine], it leads to the following theorem:
342
+
343
+ ::: {#thm:generalization_2layer .theorem}
344
+ **Theorem 5** ([@arora2019fine]). *Under the same setting as [3](#thm:convergence_2layer){reference-type="ref+label" reference="thm:convergence_2layer"}, $\exists p, C, C_0 > 0$ such that $\forall \delta \in (0,1)$ taking $$\begin{equation}
345
+ n >
346
+ \max\left(
347
+ C \frac{m^7}{\lambda_0^4 \delta^p}, \;
348
+ C_0 \frac{m^2}{\lambda_0^2} \log\left(\frac{2m}{\delta}\right)
349
+ \right)
350
+ \end{equation}$$ guarantees w.p. $\geq 1-\delta$ over the training dataset of size $m$ and w.p. $\geq 1-\delta$ over initialization, $$\begin{multline}
351
+ R(f(\theta_\infty)) \leq
352
+ \hat R_m(f(\theta_\infty)) +
353
+ \\+
354
+ \frac{1 + (\vec y - \vec u_0)^T \left(H^{\infty}\right)^{-1} (\vec y - \vec u_0)}{\sqrt{m}} \sqrt{\frac{1}{2} + \sqrt{\frac{1}{2n} \log\left(\frac{1}{\delta}\right)}} + \sqrt{\frac{1}{2m} \log \frac{1}{\delta}}.
355
+ \end{multline}$$*
356
+ :::
357
+
358
+ ![The figure is borrowed from [@fort2020deep].](images/kernel_velocity.pdf){#fig:kernel_velocity width="90%"}
359
+
360
+ As was noted in [2](#sec:convergence){reference-type="ref+label" reference="sec:convergence"}, NTK diverges under standard parameterization. Recall the example of a two-layered net: $$\begin{equation}
361
+ f(x; a_{1:n}, w_{1:n})
362
+ = \sum_{i=1}^n a_i \phi(w_i x),
363
+ \quad
364
+ a_{1:n} \sim \mathcal{N}(0, n^{-1} I),
365
+ \quad
366
+ w_{1:n} \sim \mathcal{N}(0, I);
367
+ % \label{eq:1_hid_net_standard}
368
+ \end{equation}$$ $$\begin{equation}
369
+ \hat\Theta_t(x,x')
370
+ = \sum_{i=1}^n \left(\phi(w_i(t) x) \phi(w_i(t) x') + a_i^2(t) \phi'(w_i(t) x) \phi'(w_i(t) x') x x'\right).
371
+ % \sim\\\sim n \times \EE_{w \sim \NN(0,1)} \phi(w x) \phi(w x').
372
+ \end{equation}$$ At $t=0$, since $w_i$ are independent and of the order of $O(1)$, the sum diverges proportionaly to $n$. Since under square loss, $\dot f_t(x) = \hat\Theta_t(x,\vec x) (\vec y - f_t(\vec x))$, the model prediction at any point $x$ receive a $O(n)$ increment at the very beginning of training. In other words, model predictions diverge with width, making the model useless for regression.
373
+
374
+ However, if the goal is classification, magnitude of predictions does not matter; what matters is their signs for binary classification, or indices of the largest logits when classes are multiple. Therefore in this case, an infinite-width limit under standard parameterization still may make sense besides of divergent NTK, see [@golikov2020dynamically].
375
+
376
+ In order to deal with divergence, consider a normalized empirical NTK $\tilde\Theta_t(x,x') = \hat\Theta_t(x,x') / n$; its infinite-width limit at initialization is $\mathbb{E}\,_{w \sim \mathcal{N}(0,1)} \phi(w x) \phi(w x')$; we shall refer it as normalized NTK and denote as $\tilde\Theta(x,x')$. In contrast to NTK under NTK parameterization, normalized NTK under standard parameterization evolves with time [@golikov2020dynamically]: $$\begin{multline}
377
+ \frac{d\tilde\Theta_t(x,x')}{dt}
378
+ = \frac{1}{n} \sum_{i=1}^n \left(\phi(w_i(t) x) \phi'(w_i(t) x') x' + \phi'(w_i(t) x) \phi(w_i(t) x') x\right) \frac{dw_i(t)}{dt}
379
+ +\\+ \frac{1}{n} \sum_{i=1}^n a_i^2(t) x x' \left(\phi'(w_i(t) x) \phi''(w_i(t) x') x' + \phi''(w_i(t) x) \phi(w_i(t) x') x\right) \frac{dw_i(t)}{dt}
380
+ +\\+ \frac{1}{n} \sum_{i=1}^n 2 a_i(t) \phi'(w_i(t) x) \phi'(w_i(t) x') x x' \frac{da_i(t)}{dt}.
381
+ \end{multline}$$ Recall the gradient flow dynamics under standard parameterization: $$\begin{equation}
382
+ \frac{a_k(t)}{dt}
383
+ = \sum_{j=1}^m \phi(w_k(t) x_j),
384
+ \quad
385
+ \frac{w_k(t)}{dt}
386
+ = \sum_{j=1}^m a_k(t) \phi'(w_k(t) x_j) x_j.
387
+ \end{equation}$$ At $t=0$, we have $\dot a_k = O(1)$, while $\dot w_k = O(n^{-1/2})$. Since $a_k(0) = O(n^{-1/2})$ and $w_k(0) = O(1)$, it means that for any $t > 0$ independent on $n$, $a_k(t) = O(1)$, $\dot a_k(t) = O(1)$, $w_k(t) = O(1)$, and $\dot w_k(t) = O(1)$. A naive estimate of the sums then gives $\frac{d\tilde\Theta_t(x,x')}{dt} = O(1) + O(1) + O(1) = O(1)$ for any $t > 0$ independent on $n$. Therefore the normalized kernel keeps evolving with time even in the limit of infinite width.
388
+
389
+ This can be the reason for superior performance of neural networks to conventional kernel methods and NTK. A kernel measures similarity between points in a feature space. While for NTK this feature space is fixed, a neural net varies its corresponding kernel feature space, hopefully making it better suitable for the task at hand; moreover, under standard parameterization, this feature does not vanish for large width.
390
+
391
+ The way an empirial NTK varies with time can be measured with kernel velocity, defined as kernel distance between the kernels corresponding to two consequent optimization steps. Kernel distance is in its turn defined as one minus cosine similarity between Gram matrices $H$ and $H'$ of the corresponding kernels: $$\begin{equation}
392
+ \rho(H, H') = 1 - \frac{\mathop{\mathrm{tr}}(H H^{\prime,T})}{\sqrt{\mathop{\mathrm{tr}}(H H^T) \mathop{\mathrm{tr}}(H' H^{\prime,T})}}.
393
+ \end{equation}$$
394
+
395
+ After measuring kernel velocity for a realistic net under standard parameterization, [@fort2020deep] distinguished two phases of training: a phase of rapid kernel evolution, and a phase of almost constant NTK, see Figure [3](#fig:kernel_velocity){reference-type="ref" reference="fig:kernel_velocity"}. The first phase is called *chaotic*, while the second one is coined *ordered*. Curiously enough, these two phases can be distinguished not only by kernel velocity. Suppose the network is trained up to time $T$, called *spawn epoch*. Two independent copies of the same network is then trained further. In other words, we train two networks which remain the same up to time $T$ and may diverge afterwards due to randomness of training procedure. We then measure *test error barrier* between these two networks, i.e. height of the error \"hill\" on a straight segment between their corresponding weights. A small error barrier would mean that training of the two networks ended up in the same valley of test error, which likely means that they are similar. As one can see in Figure [3](#fig:kernel_velocity){reference-type="ref" reference="fig:kernel_velocity"}, the test error barrier drops dramatically with growth of spawn epoch. Also, the two quantities under discussion, kernel velocity and error barrier appear to be strongly correlated, see again Figure [3](#fig:kernel_velocity){reference-type="ref" reference="fig:kernel_velocity"}. There are also other quantities that experience sharp transition on the border of the two phases: kernel distance between child networks as a function of spawn epoch, ReLU activation Hamming distance, and Hamming distance between responses on the test set; see [@fort2020deep] for details.
396
+
397
+ While NTK kernel regression has a natural interpretation of training an infinitely wide neural network under certain parameterization with gradient flow (see [2](#sec:convergence){reference-type="ref+label" reference="sec:convergence"}), NTK is not the only possible kernel that can be constructed using a neural net.
398
+
399
+ One of the other notable \"neural kernels\" is the NNGP-kernel [@lee2018deep], defined as $K(x,x') = \mathbb{E}\,_\theta f(x; \theta) f(x'; \theta)$, where $f(\cdot; \theta)$ is a parametric model with weights $\theta$ and scalar output. Suppose $f$ is a neural network with the output layer of the form $f(x) = v^T h(x)$, where $h(x) \in \mathbb{R}^n$ is its last layer representation and $v \sim \mathcal{N}(0, I_n / n)$ independent on $h$. Then $K(x,x') = \frac{1}{n} \mathbb{E}\,h^T(x) h(x')$. As we have seen in [4](#sec:limit){reference-type="ref+label" reference="sec:limit"} on the example of fully-connected and convolutional nets, the last layer representations tend to iid Gaussians as width go to infinity. In other words, $\forall i \in [n]$ $h^i$ tend to identical and independent Gaussian processes with covariance $\mathbb{E}\,h^i(x) h^i(x') = \frac{1}{n} \mathbb{E}\,h^T(x) h(x')$, which is exactly $K(x,x')$. This motivates the term \"NNGP\" --- *Neural Network Gaussian Process*.
400
+
401
+ Note that we have already seen the object $\mathbb{E}\,h^i(x) h^i(x')$ in [4](#sec:limit){reference-type="ref+label" reference="sec:limit"}: when $h = h_l$ --- the $l$-th layer hidden representation of a fully-connected network, the above object is hidden layer covariance $q_l(x,x')$. Therefore the NNGP of this fully-connected network is nothing else but $q_L(x,x')$. This can be generalized to the whole class of architectures expressible by tensor programs: see the Master theorem of [@yang2019tensor_i] mentioned in [2](#sec:convergence){reference-type="ref+label" reference="sec:convergence"}. That is, any neuron of any hidden representation of a neural network expressible by a tensor program tends to a Gaussian process.
402
+
403
+ Learning a Gaussian process with zero mean and covariance $K(\cdot,\cdot)$ on a training dataset $(\vec x, \vec y)$ means computing its Bayesian prosterior, which is again a Gaussian with mean $\mu(\cdot \,|\, (\vec x, \vec y))$ and covariance $K(\cdot,\cdot \,|\, (\vec x, \vec y))$ given below: $$\begin{equation}
404
+ \mu(x \,|\, (\vec x, \vec y)) = K(x,\vec x) K^{-1}(\vec x, \vec x) \vec y;
405
+ \end{equation}$$ $$\begin{equation}
406
+ K(x,x' \,|\, (\vec x, \vec y)) = K(x,x') - K(x,\vec x) K^{-1}(\vec x, \vec x) K(\vec x,x').
407
+ \end{equation}$$
408
+
409
+ Interestingly, training the last layer of an infinitely wide network with NNGP $K(\cdot,\cdot)$ results in exactly the same Gaussian process. When only the last layer is trained, the NNGP coincides with the NTK. Indeed, an NTK-parameterized NN of width $n$ with readout weights $v$ can be expressed as $f(x) = \frac{1}{\sqrt{n}} v^T h(x)$ with $v \sim \mathcal{N}(0, I_n)$. The empirical NTK is therefore given by $\hat\Theta_0(x,x') = \frac{1}{n} \nabla^T_v (v^T h(x)) \nabla_v (v^T h(x')) = \frac{1}{n} h^T(x) h(x')$, which converges to $\mathbb{E}\,h^i(x) h^i(x') = K(x,x')$ as $n \to \infty$; note that $h(\cdot)$ also depends on $n$.
410
+
411
+ Recall the model prediction dynamics under constant NTK which is $K$ in our case: $$\begin{equation}
412
+ f_t(x) = f_0(x) - K(x,\vec x) K^{-1}(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) (f_0(\vec x) - \vec y).
413
+ \end{equation}$$ Since $f_0(\cdot)$ is a Gaussian process as discussed before and $K(\vec x,\vec x)$ is deterministic, $f_t(\cdot)$ is a Gaussian process for any $t \geq 0$. Its mean $\mu_t(\cdot)$ and covariance $K_t(\cdot,\cdot)$ are: $$\begin{equation}
414
+ \mu_t^{NNGP}(x) = K(x,\vec x) K^{-1}(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) \vec y;
415
+ \end{equation}$$ $$\begin{multline}
416
+ K_t^{NNGP}(x,x')
417
+ = K(x,x')
418
+ +\\+ K(x,\vec x) K^{-1}(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) K(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) K^{-1}(\vec x,\vec x) K(\vec x,x')
419
+ -\\- \left[K(x,\vec x) K^{-1}(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) K(\vec x,x') + K(x',\vec x) K^{-1}(\vec x,\vec x) \left(I - e^{-K(\vec x,\vec x) t}\right) K(\vec x,x)\right].
420
+ \end{multline}$$ It is easy to see that $\mu_t^{NNGP}(x) \to \mu(x \,|\, (\vec x, \vec y))$ and $K_t^{NNGP}(x,x') \to K(x,x' \,|\, (\vec x, \vec y))$ as $t \to \infty$ $\forall x,x'$.
421
+
422
+ If not only the last layer is trained, NNGP does not generally correspond to NTK. The corresponding training dynamics is given by $$\begin{equation}
423
+ f_t(x) = f_0(x) - \Theta(x,\vec x) \Theta^{-1}(\vec x,\vec x) \left(I - e^{-\Theta(\vec x,\vec x) t}\right) (f_0(\vec x) - \vec y).
424
+ \end{equation}$$ While $f_t(\cdot)$ is again a Gaussian process for any $t \geq 0$, its mean and covariance are different. In particular, as $t \to \infty$, they tend to $$\begin{equation}
425
+ \mu_\infty^{NTK}(x) = \Theta(x,\vec x) \Theta^{-1}(\vec x,\vec x) \vec y;
426
+ \end{equation}$$ $$\begin{multline}
427
+ K_\infty^{NTK}(x,x')
428
+ = K(x,x')
429
+ + \Theta(x,\vec x) \Theta^{-1}(\vec x,\vec x) K(\vec x,\vec x) \Theta^{-1}(\vec x,\vec x) \Theta(\vec x,x')
430
+ -\\- \left[\Theta(x,\vec x) \Theta^{-1}(\vec x,\vec x) K(\vec x,x') + \Theta(x',\vec x) \Theta^{-1}(\vec x,\vec x) K(\vec x,x)\right].
431
+ \end{multline}$$ As was shown in [@lee2019wide], there does not exist an initial covariance matrix (a \"prior\") such that these mean and covariance correspond to Bayesian posterior given the training data.
432
+
433
+ The \"empirical\" counterpart of NNGPs is $\hat K(x,x') = \frac{1}{n} h^T(x) h(x')$. Compared to empirical NTKs, empirical NNGPs are easier to compute as they do not require a backward pass. The corresponding memory footprint is also lower for empirical NNGPs as they do not require computing Jacobian matrices that scale as $O(N)$ where $N$ is the number of weights. This makes NNGPs more suitable for large models. As an example, [@park2020towards] used performance of empirical NNGPs as a proxy measure for neural architecture search. They argue that first, empirical NTKs are too costly to compute, and second, they provide worse learning signal for their task.
434
+
435
+ NNGP of a generic neural network can be computed in a recursive manner, as was demonstrated in [4](#sec:limit){reference-type="ref+label" reference="sec:limit"} on the example of fully-connected and convolutional nets: $q_{l+1}(x,x') = \mathbb{E}\,_{[z,z']^T \sim \mathcal{N}(0,\Sigma_l(x,x'))} \phi(z) \phi(z')$, where $\Sigma_l(x,x') = \begin{pmatrix} q_l(x,x) & q_l(x,x') \\ q_l(x',x) & q_l(x',x') \end{pmatrix}$; the Master theorem of [@yang2019tensor_i] gives similar fomulas for a generic neural net. In the above example, there is an operation that maps a kernel $q_l(x,x')$ to a subsequent kernel $q_{l+1}(x,x')$. [@shankar2020neural] presents an algebra of operations on kernels. While this algebra consists of operations of only three types, it is enough to express NNGP of a fully-connected or a convolutional network with any elementwise nonlinearities.
436
+
437
+ One of the major problems of kernel methods is *label agnosticism.* Recall that a kernel evaluated at a pair of points is a scalar product of their mappings to some feaure space: $K(x,x') = \langle \Phi(x), \Phi(x') \rangle$. Therefore a kernel measures how similar the two points are, and a kernel method uses this information to derive responses on unseen data: $f(x) = K(x,\vec x) \vec\alpha$. Intuitively, a kernel $K$ should result in a good-generalizing model if $K(x,x')$ is positive when $y=y'$ and negative otherwise. Therefore the \"perfect\" kernel would be $K^*(x,x') = y y'$; the obvious problem is that it cannot be computed on unseen data.
438
+
439
+ A kernel that can be computed on unseen data cannot depend on labels. Therefore, if data has several possible labelings, for a pair of data points $(x,x')$, there could be a labeling with $y=y'$ and a labeling with $y\neq y'$. At the same moment, $K(x,x')$ stays the same on both cases; therefore, the corresponding kernel method cannot generalize well on both of the labelings.
440
+
441
+ As an example of several possible labelings on a single dataset, consider a dataset of pictures with two objects in each frame, and let the two objects belong to two disjoint sets of classes. Then one of the labelings may consider only the objects of the first classes set, while the other may consider the objects of the second set.
442
+
443
+ [@chen2020label] propose two ways of making a kernel *label-aware.* The first is mixing the kernel at hand with the perfect kernel $K^*(x,x') = y y'$: $K^{HR}(x,x') = (1-\lambda) K(x,x') + \lambda K^*(x,x')$ for $\lambda \in [0,1]$. If the perfect kernel was available, the best choice would be to take $\lambda=1$. Since it is not available, we have to approximate it somehow, therefore making the optimal $\lambda$ to become less than one.
444
+
445
+ In order to approximate $K^*(x,x')$, we need a model that maps $(x,x')$ to $y y'$. Since the training dataset for this model consists $O(m^2)$ samples, and since the model itself has to be evaluated on $O(m)$ samples for each test point $x$, the model has to be relatively simple. [@chen2020label] consider models of the form $Z(x,x') = \vec y^T M(x,x',\vec x) \vec y$, where $M \in \mathbb{R}^{m \times m}$. One of the possible choices of $M$ is $M(x,x',\vec x)_{ij} = \psi(K(x,x'),K(x_i,x_j))$, where $\psi(z_1,z_2)$ measures similarity. As one can see, this choice of $Z$ takes a linear combination of $y_i y_j$ with weights being similarities of $K(x,x')$ and $K(x_i,x_j)$. Intuitively, this reads as \"$y y'$ and $y_i y_j$ are similar if $K(x,x')$ and $K(x_i,x_j)$ are close\".
446
+
447
+ While the above proposal can be applied to any kernel $K$, the second label-aware kernel of [@chen2020label] is a specific modification of NTK. Let us recall the construction of $\Theta^{NTH}$ resulted from integrating the learning dynamics up to the order $n^{-1}$, taking the limit of $t \to \infty$, and taking expectation (see [3](#sec:finite_width){reference-type="ref+label" reference="sec:finite_width"} and specifically Eq. ([\[eq:lantk_nth\]](#eq:lantk_nth){reference-type="ref" reference="eq:lantk_nth"})): $$\begin{multline}
448
+ \Theta^{NTH}(x_1,x_2)
449
+ = O_{2,0}^{(0)}(x_1,x_2) + n^{-1} \mathbb{E}\,O_{2,\infty}^{(1)}(x_1,x_2)
450
+ =\\= \Theta(x_1,x_2) + n^{-1} \mathbb{E}\,\left[O_{2,0}^{(1)}(x_1,x_2)\right] - n^{-1} \mathbb{E}\,\left[O_{3,0}^{(1)}(x_1, x_2, \vec x) \Theta^{-1}(\vec x,\vec x) f_0^{(0)}(\vec x)\right]
451
+ +\\+ n^{-1} \vec y^T \Theta^{-1}(\vec x,\vec x) \mathbb{E}\,\left[O_{4,0}^{(1)}(x_1, x_2, \vec x, \vec x)\right] \Theta^{-1}(\vec x,\vec x) \vec y
452
+ +\\+ n^{-1} \mathbb{E}\,\left[f_0^{(0),T}(\vec x) \Theta^{-1}(\vec x,\vec x) O_{4,0}^{(1)}(x_1, x_2, \vec x, \vec x) \Theta^{-1}(\vec x,\vec x) f_0^{(0)}(\vec x)\right]
453
+ -\\- n^{-1} \sum_{k,l=1}^m \frac{1}{\lambda_k (\lambda_k+\lambda_l)} \vec y^T \vec v_k \vec v_k^T \mathbb{E}\,\left[O_{4,0}^{(1)}(x_1, x_2, \vec x, \vec x)\right] \vec v_l \vec v_l^T \vec y
454
+ -\\- n^{-1} \sum_{k,l=1}^m \frac{1}{\lambda_k (\lambda_k+\lambda_l)} \mathbb{E}\,\left[f_0^{(0),T}(\vec x) \vec v_k \vec v_k^T O_{4,0}^{(1)}(x_1, x_2, \vec x, \vec x) \vec v_l \vec v_l^T f_0^{(0)}(\vec x)\right].
455
+ \end{multline}$$ Since $\hat\Theta_0(x_1,x_2) = O_{2,0}^{(0)}(x_1,x_2) + n^{-1} O_{2,0}^{(1)}(x_1,x_2) + O(n^{-2})$, we have $\Theta(x_1,x_2) + n^{-1} \mathbb{E}\,\left[O_{2,0}^{(1)}(x_1,x_2)\right] = \mathbb{E}\,\hat\Theta_0(x_1,x_2) + O(n^{-2})$ and $\Theta(x_1,x_2) = \mathbb{E}\,\hat\Theta_0(x_1,x_2) + O(n^{-1})$. For the same reason, $\mathbb{E}\,\left[O_{4,0}(x_1, x_2, x_3, x_4)\right] = n^{-1} \mathbb{E}\,\left[O_{4,0}^{(1)}(x_1, x_2, x_3, x_4)\right] + O(n^{-2})$. Suppose $f_0^{(0)}(\vec x) = 0$. Given this approximation, up to order $O(n^{-2})$, $$\begin{multline}
456
+ \Theta^{NTH}(x_1,x_2)
457
+ \approx \mathbb{E}\,\hat\Theta_0(x_1,x_2) + \vec y^T \left(\mathbb{E}\,\hat\Theta_0(\vec x,\vec x)\right)^{-1} \mathbb{E}\,\left[O_{4,0}(x_1, x_2, \vec x, \vec x)\right] \left(\mathbb{E}\,\hat\Theta_0(\vec x,\vec x)\right)^{-1} \vec y
458
+ -\\- \sum_{k,l=1}^m \frac{1}{\lambda_k (\lambda_k+\lambda_l)} \vec y^T \vec v_k \vec v_k^T \mathbb{E}\,\left[O_{4,0}(x_1, x_2, \vec x, \vec x)\right] \vec v_l \vec v_l^T \vec y.
459
+ \end{multline}$$ As one can see, $\Theta^{NTH}(x_1,x_2)$ depends on train labels $\vec y$. Roughly speaking, this kernel corresponds to the NTK of a network trained until convergence ($t\to\infty$); obviously, this kernel should depend on training data.
460
+
461
+ As an interesting observation $\Theta^{NTH}(x_1,x_2) = \mathbb{E}\,\hat\Theta_0(x_1,x_2) + \vec y^T M(x_1,x_2,\vec x) \vec y$ for a certain matrix $M$ --- recall that $K^{(HR)}(x_1,x_2)$ considered previously has a similar form.
462
+
463
+ Note that computing the Gram matrix $\Theta^{NTH}(\vec x, \vec x)$ requires computing the Gram \"matrix\" of the expected 4-th order empirical kernel $\mathbb{E}\,\left[O_{4,0}(\vec x, \vec x, \vec x, \vec x)\right]$. Instantiating this tensor requires $O(m^4)$ time and $O(m^4)$ memory which is only possible for very small datasets.
464
+
465
+ ![ []{#fig:myrtle label="fig:myrtle"} Myrtle architecture. ](images/experiments/myrtle_avg.png){#fig:myrtle width="90%"}
466
+
467
+ <figure id="fig:myrtle_bce_cifar2_time_flops">
468
+ <p><img src="images/experiments/NTK_vs_ENTK_vs_NNGP_CIFAR2_Myrtle_avg_BCE_upto1e4samples.png" style="width:49.0%" alt="image" /> <img src="images/experiments/myrtle_avg_flops_CIFAR2.png" style="width:49.0%" alt="image" /></p>
469
+ <figcaption> <span id="fig:myrtle_bce_cifar2_time_flops" data-label="fig:myrtle_bce_cifar2_time_flops"></span> Myrtle network trained on subsets of CIFAR2 of different sizes. Different lines refer to different regimes of training (e.g. NTK, NNGP etc.) and different stages of training (e.g. cosntructing the kernel, integrating the dynamics etc.). We use BCE loss, and integrate the dynamics numerically for <span class="math inline"><em>T</em> = 10<sup>4</sup></span> steps. We measure training time and number of FLOPS. </figcaption>
470
+ </figure>
471
+
472
+ <figure id="fig:myrtle_bce_cifar10_time_accuracy">
473
+ <p><img src="images/experiments/NTK_vs_ENTK_vs_NNGP_CIFAR10_Myrtle_avg_BCE.png" style="width:49.0%" alt="image" /> <img src="images/experiments/myrtle_avg_BCE_acc_CIFAR10.png" style="width:49.0%" alt="image" /></p>
474
+ <figcaption> <span id="fig:myrtle_bce_cifar10_time_accuracy" data-label="fig:myrtle_bce_cifar10_time_accuracy"></span> Myrtle network trained on subsets of CIFAR10 of different sizes. Different lines refer to different regimes of training (e.g. NTK, NNGP etc.) and different stages of training (e.g. cosntructing the kernel, integrating the dynamics etc.). We use cross-entropy loss, and integrate the dynamics numerically for <span class="math inline"><em>T</em> = 10<sup>4</sup></span> steps. We measure training time and accuracy. </figcaption>
475
+ </figure>
476
+
477
+ <figure id="fig:resnet50_nll_cifar10_time_accuracy">
478
+ <p><img src="images/experiments/NTK_vs_ENTK_vs_NNGP_CIFAR10_Resnet50_NLL.png" style="width:49.0%" alt="image" /> <img src="images/experiments/Resnet50_NLL_acc_CIFAR10.png" style="width:49.0%" alt="image" /></p>
479
+ <figcaption> <span id="fig:resnet50_nll_cifar10_time_accuracy" data-label="fig:resnet50_nll_cifar10_time_accuracy"></span> Resnet50 trained on subsets of CIFAR10 of different sizes. Different lines refer to different regimes of training (e.g. NTK, NNGP etc.) and different stages of training (e.g. cosntructing the kernel, integrating the dynamics etc.). We use cross-entropy loss, and integrate the dynamics numerically for <span class="math inline"><em>T</em> = 10<sup>4</sup></span> steps. We measure training time and accuracy. </figcaption>
480
+ </figure>
481
+
482
+ <figure id="fig:myrtle_bce_stl2_time_accuracy">
483
+ <p><img src="images/experiments/ntk_myrtle_avg_time_vs_input_size_STL2_upto96x96.png" style="width:49.0%" alt="image" /> <img src="images/experiments/ntk_myrtle_avg_acc_vs_input_size_STL2_upto96x96.png" style="width:49.0%" alt="image" /></p>
484
+ <figcaption> <span id="fig:myrtle_bce_stl2_time_accuracy" data-label="fig:myrtle_bce_stl2_time_accuracy"></span> Myrtle network trained on a subset of STL2 of size 500 with images of different resolutions. Different lines refer to different regimes of training (e.g. NTK, NNGP etc.) and different stages of training (e.g. cosntructing the kernel, integrating the dynamics etc.). We use BCE loss, and integrate the dynamics numerically for <span class="math inline"><em>T</em> = 10<sup>4</sup></span> steps. We measure training time and accuracy. </figcaption>
485
+ </figure>
486
+
487
+ In this section, we present a small experimental study on scope of applicability for NTK regression to real-time scenarios. In particular, we would like to investigate first, what is the maximal size $m$ of training dataset of images of given size we can afford with limited computational resources. Second, what is the maximal image resolution $d$ we can afford given fixed dataset size. We restrict ourselves to these two questions since for practical purposes, dependence of NTK regression complexity on these two parameters is the most worrying: it is $O(m^2 d^4)$ for constructing the Gram matrix, $O(m^3)$ for integrating the dynamics analytically, and $O(m^2 T)$ for integrating the dynamics numerically for $T$ steps; see [5](#sec:computations){reference-type="ref+label" reference="sec:computations"}.
488
+
489
+ We use NeuralTangents [@novak2019neural] and perform all our experiments on a single GTX 1080Ti GPU with 12 GiB of memory. We consider a Myrtle network[^4] with 64 channels in all convolutional layers, see [4](#fig:myrtle){reference-type="ref+label" reference="fig:myrtle"}. We pick this architecture because it is lightweight and uses only those layers for which NTK can be computed analytically.
490
+
491
+ For the first experiment, we consider two classes of CIFAR10 and refer this dataset as CIFAR2. We pick a subset of 1000 samples of the original test set of CIFAR2 and vary the size of the training subset. We optimize binary cross-entropy (BCE) and integrate the dynamics numerically for $T = 10^4$. We compute the Gram matrix of a kernel using batch size 4. On [5](#fig:myrtle_bce_cifar2_time_flops){reference-type="ref+label" reference="fig:myrtle_bce_cifar2_time_flops"}, we plot training time and the number of floating-point operations (FLOPS) for different stages (i.e. Gram matrix computation, integrating the dynamics, inference on a test set) and for different regimes of training (analytical NTK, analytical NNGP, and empirical NTK) versus size of training dataset. As one can see, already for relatively small datasets ($m=10^4$), the most time-demanding stage is construction of the Gram matrix $\Theta(\vec x, \vec x)$ (solid line), but not integration (which is also takes time quadratic to size of the dataset) (dotted line). Also, the time to compute the NNGP kernel is almost the same as the one for NTK, since both are computed analytically; see [4](#sec:limit){reference-type="ref+label" reference="sec:limit"}. We could not obtain the point $m=10^4$ for empirical NTK (ENTK) due to numerical reasons. If we extrapolate the solid line to $m=10^6$, the size of ImageNet, noting the quadratic growth, we will get $5 \times 10^9$ seconds, which is around 160 years of computations. While our time measurements are device-dependent, we also measure the number of FLOPS, which while being device-independent, grows the same way as time and is also quite large. This experiment demonstrates that indeed, the naive approach for integrating the NTK dynamics falls short on datasets of realistic sizes, thus striving for major optimizations. As mentioned in [5](#sec:computations){reference-type="ref+label" reference="sec:computations"}, a promising approach could be the one of [@meanti2020kernel].
492
+
493
+ On [6](#fig:myrtle_bce_cifar10_time_accuracy){reference-type="ref+label" reference="fig:myrtle_bce_cifar10_time_accuracy"}, we present the same experiment but with all 10 classes of CIFAR10. We observe the same quadratic time growth issue for all three regimes of training (analytical NTK, analytical NNGP, and empirical NTK). We also report accuracy for comparison with previous works on small data training with kernel methods (i.e. [@arora2019harnessing]).
494
+
495
+ In addition to experiments with a small network, we experimented with a variant of Resnet50 [@he2016deep]. We modify this architecture by removing batch normalizations and substituting max poolings with average poolings, so to make analytical computations possible. Results are shown on [7](#fig:resnet50_nll_cifar10_time_accuracy){reference-type="ref+label" reference="fig:resnet50_nll_cifar10_time_accuracy"}. Doing the same extrapolation to ImageNet size, we get $6.25 \times 10^{11}$ seconds, which is around $20000$ years.
496
+
497
+ Lastly, we consider two classes of STL10 and similarly to CIFAR2, refer this dataset as STL2. We pick a subset of 100 samples of the original test set of STL2 and 500 samples of its original train set. While STL10 has fewer labeled examples compared to CIFAR10, it has larger images: $96
498
+ \times 96$ for STL10 versus $32 \times 32$ for CIFAR10. We vary size of the input image and measure training time and accuracy, similarly to the first experiment. As before, we optimize binary cross-entropy (BCE) and integrate the dynamics numerically for $T = 10^4$. However, we use batch size 1 for computing the Gram matrix, since larger batch sizes do not fit in GPU memory for large image sizes. Results are shown on [8](#fig:myrtle_bce_stl2_time_accuracy){reference-type="ref+label" reference="fig:myrtle_bce_stl2_time_accuracy"}. As before, the most time-demanding part is kernel Gram matrix computation (blue line): it grows as $O(d^4)$, where $d$ is image resolution; see [4](#sec:limit){reference-type="ref+label" reference="sec:limit"}. If we extrapolate this line to $d=224$, the resolution on which traditional ImageNet classification models operate, we will get around 150 days of computations. This experiment therefore demonstrates that not only dataset size, but also image resolution complexity can also be a serious bottleneck in applying NTK approach in practice. Also, while for dataset size, certain optimizations are available (e.g. [@meanti2020kernel]), we are not aware of any optimizations aiming for decreasing image resolution complexity.
2210.06742/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2210.06742/paper_text/intro_method.md ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In addition to the relatively matured area of horizontal object detection [@liu2020deep], oriented object detection has received extensive attention, especially for complex scenes, whereby fine-grained bounding box (e.g. rotated/quadrilateral bounding box) is needed, e.g. aerial images [@ding2018learning; @yang2019scrdet], scene text [@zhou2017east], retail scenes [@pan2020dynamic] etc.
4
+
5
+ Despite the increasing popularity of oriented object detection, many existing datasets are annotated with horizontal boxes (HBox) which may not be compatible (at least on the surface) for training an oriented detector. Hence labor-intensive re-annotation[^2] have been performed on existing horizontal-annotated datasets. For example, DIOR-R [@cheng2022anchor] and SKU110K-R [@pan2020dynamic] are rotated box (RBox) annotations of the aerial image dataset DIOR (192K instances) [@li2020object] and the retail scene SKU110K (1,733K instances) [@goldman2019precise], respectively.
6
+
7
+ One attractive question arises that if one can achieve weakly supervised learning for oriented object detection by only using (the more readily available) HBox annotations than RBox ones. One potential and verified technique in our experiments is HBox-supervised instance segmentation, concerning with BoxInst [@tian2021boxinst], BoxLevelSet [@li2022box], etc. Based on the segmentation mask by these methods, one can readily obtain the final RBox by finding its minimum circumscribed rectangle, and we term the above procedure as HBox-Mask-RBox style methods i.e. BoxInst-RBox and BoxLevelSet-RBox in this paper. Yet it in fact involves a potentially more challenging task i.e. instance segmentation whose quality can be sensitive to the background noise, and it can influence heavily on the subsequent RBox detection step, especially given complex scenes (in Fig. [\[fig:boxinst_vis\]](#fig:boxinst_vis){reference-type="ref" reference="fig:boxinst_vis"}) and the objects are crowded (in Fig. [\[fig:boxlevelset_vis\]](#fig:boxlevelset_vis){reference-type="ref" reference="fig:boxlevelset_vis"}). Also, involving segmentation is often more computational costive and the whole procedure can be time consuming (see Tab. [\[tab:main_dota\]](#tab:main_dota){reference-type="ref" reference="tab:main_dota"}-[\[tab:main_dior\]](#tab:main_dior){reference-type="ref" reference="tab:main_dior"}).
8
+
9
+ In this paper, we propose a simple yet effective approach, dubbed as **HBox-to-RBox (H2RBox)**, which achieves close performance to those RBox annotation supervised methods e.g. [@han2021redet; @{yang2023scrdet++}] by only using HBox annotations, and even outperforms in considerable amount of cases as shown in our experiments. The cores of our method are weakly- and self-supervised learning, which predicts the angle of the object by learning the enforced consistency between two different views. Specifically, we predict five offsets in the regression sub-network based on FCOS [@tian2019fcos] in the WS branch (see Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"} left) so that the final decoded outputs are RBoxes. Since we only have horizontal box annotations, we use the horizontal circumscribed rectangle of the predicted RBox when computing the regression loss. Ideally, predicted RBoxes and corresponding ground truth (GT) RBoxes (unlabeled) have highly overlapping horizontal circumscribed rectangles. In the SS branch (see Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"} right), we rotate the input image by a randomly angle and predict the corresponding RBox through a regression sub-network. Then, the consistency of RBoxes between the two branches, including scale consistency and spatial location consistency, are learned to eliminate the undesired cases to ensure the reliability of the WS branch. Our main contributions are as follows:
10
+
11
+ <figure id="fig:vis" data-latex-placement="!tb">
12
+ <p><br />
13
+ </p>
14
+ <figcaption>Visual comparison of three HBox-supervised rotated detectors on aircraft detection <span class="citation" data-cites="wei2020x"></span>, ship detection <span class="citation" data-cites="yang2018automatic"></span>, vehicle detection <span class="citation" data-cites="azimi2021eagle"></span>, etc. The HBox-Mask-RBox style methods, i.e. BoxInst-RBox <span class="citation" data-cites="tian2021boxinst"></span> and BoxLevelSet-RBox <span class="citation" data-cites="li2022box"></span>, perform not well in complex and object-cluttered scenes.</figcaption>
15
+ </figure>
16
+
17
+ **1)** To our best knowledge, we propose the first HBox annotation-based oriented object detector. Specifically, a weakly- and self-supervised angle learning paradigm is devised which closes the gap between HBox training and RBox testing, and it can serve as a plugin for existing detectors.
18
+
19
+ **2)** We prove through geometric equations that the predicted RBox is the correct GT RBox under our designed pipeline and consistency loss, and does not rely on not-fully-verified/ad-hoc assumptions, e.g. color-pairwise affinity in BoxInst or additional intermediate results whose quality cannot be ensured, e.g. feature map used by many weakly supervised methods [@wang2022contrastmask].
20
+
21
+ **3)** Compared with the potential alternatives e.g. HBox-Mask-RBox whose instance segmentation part is fulfilled by the state-of-the-art BoxInst, our H2RBox outperforms by about 14% mAP (67.90% vs. 53.59%) on DOTA-v1.0 dataset, requiring only one third of its computational resources (6.25 GB vs. 19.93 GB), and being around 12$\times$ faster in inference (31.6 fps vs. 2.7 fps).
22
+
23
+ **4)** Compared with the fully RBox annotation-supervised rotation detector FCOS, H2RBox is only 0.91% (74.40% vs. 75.31%) and 1.01% (33.15% vs. 34.16%) behind on DOTA-v1.0 and DIOR-R, respectively. Furthermore, we do not add extra computation in the inference stage, thus maintaining a comparable detection speed, about 29.1 FPS vs. 29.5 FPS on DOTA-v1.0.
24
+
25
+ <figure id="fig:pipeline" data-latex-placement="!tb">
26
+ <img src="figure/pipeline.png" style="width:99.0%" />
27
+ <figcaption>Our H2RBox consists of two branches respectively fed with two augmented views (<strong>View 1 and View 2</strong>) of the input image. The left <strong>Weakly-supervised Branch</strong> in general can be any rotated object detector (FCOS here) for RBox prediction, whose circumscribed HBox is used for supervised learning given the GT HBox label in the sense of weakly-supervised learning. This branch is also used for test-stage inference. The right <strong>Self-supervised Branch</strong> tires to achieve RBox prediction consistency of the two views with self-supervised learning. Image is from the DIOR-R dataset.</figcaption>
28
+ </figure>
29
+
30
+ # Method
31
+
32
+ The overview of the H2RBox is shown in Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}. Two augmented views are generated and information leakage is avoided for training overfitting. There are two branches. One branch is used for weakly-supervised (WS) learning where the supervision is the GT HBox from the training data, and the regression loss is calculated between the circumscribed HBox derived from the predicted RBox by this branch and GT HBox. The other branch is trained by self-supervised (SS) learning that involves two augmented views of the raw input image, which encourages to obtain the consistent RBox prediction between the two views. The final loss is the weighted sum of the WS loss and SS loss. Note that the test-stage prediction is concerned only with the WS branch.
33
+
34
+ In line with the general idea of self-supervised learning by data augmentation, given the input image, we perform random rotation to generate View 2 while keeping View 1 consistent with the input image, as shown in Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}. However, rotation transformation will geometrically and inevitably introduce an artificial black border area and leads to the risk of GT angle information leakage. We provide two available techniques to resolve this issue:
35
+
36
+ <figure id="fig:assignment_padding" data-latex-placement="!tb">
37
+
38
+ <figcaption>Comparison of different padding methods (Sec. <a href="#sec:view_gen_leakage" data-reference-type="ref" data-reference="sec:view_gen_leakage">3.1</a>) and re-assignment strategies (Sec. <a href="#sec:reassign" data-reference-type="ref" data-reference="sec:reassign">3.4</a>). Green and red RBox represent the target <span class="math inline"><em>r</em><em>b</em><em>o</em><em>x</em><sup><em>w</em><em>s</em>*</sup></span> and <span class="math inline"><em>r</em><em>b</em><em>o</em><em>x</em><sup><em>s</em><em>s</em></sup></span>, respectively.</figcaption>
39
+ </figure>
40
+
41
+ 1\) Center Region Cropping: Crop a $\frac{\sqrt{2}}{2}s \times \frac{\sqrt{2}}{2}s$ area[^3] in the center of the image.
42
+
43
+ 2\) Reflection Padding: Fill the black border area by reflection padding.
44
+
45
+ If the Center Region Cropping is used in View 2, View 1 also needs to perform the same operation and filter the corresponding ground truth. In contrast, Reflection Padding works better than Center Region Cropping because it preserves as much of the area as possible while maintaining a higher image resolution. Fig. [\[fig:o2o_zeros\]](#fig:o2o_zeros){reference-type="ref" reference="fig:o2o_zeros"} and Fig. [\[fig:o2m_reflection\]](#fig:o2m_reflection){reference-type="ref" reference="fig:o2m_reflection"} compare zeros padding and reflection padding. Note that the black border area does not participate in the regression loss calculation in the SS branch, so it does not matter that this region is filled with unlabeled foreground objects by reflection padding.
46
+
47
+ <figure data-latex-placement="!tb">
48
+
49
+ <figcaption>Visual comparison of our methods with and without the SS loss used in the SS branch. It can help learn the scale and spatial location consistency between the two branches.</figcaption>
50
+ </figure>
51
+
52
+ :::: wrapfigure
53
+ r0.59
54
+
55
+ ::: overpic
56
+ figure/views.png (0,6)$w \cdot |\cos\theta| +h \cdot |\sin\theta| = w_{ws}$ (0,1)$w \cdot |\sin\theta| +h \cdot |\cos\theta| = h_{ws}$ (52,6)$w \cdot |\cos\varphi| +h \cdot |\sin\varphi| = w_{ss}$ (52,1)$w \cdot |\sin\varphi| +h \cdot |\cos\varphi| = h_{ss}$
57
+ :::
58
+ ::::
59
+
60
+ The two generated views (**View 1** and **View 2**) are respectively fed into the two branches with the parameter-shared backbone and neck, specified as ResNet [@he2016deep] and FPN [@lin2017feature] as shown in Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}. The WS branch here is specified by a FCOS-based rotated object detector, as involved for both training and inference. This branch contains regression and classification sub-networks to predict RBox, category, and center-ness. Recall that we can not use the predicted RBox to calculate the final regression directly as there is no RBox annotation but HBox only. Therefore, we first convert the predicted RBox into the corresponding minimum horizontal circumscribed rectangle, for calculating the regression loss between the derived HBox and the GT Hbox annotation (we defer the details of the loss formulation to Sec. [3.5](#subsec:loss){reference-type="ref" reference="subsec:loss"}). As the network is better trained, an indirect connection (horizontal circumscribed rectangle constraint) occurs between predicted RBox and GT RBox (unlabeled): *No matter how an object is rotated, their corresponding horizontal circumscribed rectangles are always highly overlapping.* However, as shown in Fig. [\[fig:o_Lconsis\]](#fig:o_Lconsis){reference-type="ref" reference="fig:o_Lconsis"}, only using WS loss can only localize the objects, while still not effective enough for accurate rotation estimation.
61
+
62
+ As complementary to the WS loss, we further introduce the SS loss. The SS branch only contains one regression sub-network for predicting RBox in the rotated View 2. Given a (random) rotation transformation $\mathbf{R}$ (with degree $\Delta \theta$) as adopted in View 2, the relationship between location $(x, y)$ of View 1 in the WS branch and location $(x^{*}, y^{*})$ of View 2 with rotation $\mathbf{R}$ in the SS branch is: $$\begin{equation}
63
+ \small
64
+ \begin{aligned}
65
+ (x^{*}, y^{*}) = (x-x_{c}, y-y_{c})\mathbf{R}^{\top}+(x_{c}, y_{c}), \quad
66
+ \mathbf{R} = \left(
67
+ \begin{array}{cc}
68
+ \cos{\Delta \theta} & -\sin{\Delta \theta}\\
69
+ \sin{\Delta \theta} & \cos{\Delta \theta}\\
70
+ \end{array}
71
+ \right)
72
+ \end{aligned}
73
+ \label{eq:location}
74
+ \end{equation}$$ where $(x_{c}, y_{c})$ is the rotation center (i.e. image center). Recall the label of the black border area (in Fig. [3](#fig:assignment_padding){reference-type="ref" reference="fig:assignment_padding"}) in the SS branch is set as invalid and negative samples, which will not participate in the subsequent losses designed below.
75
+
76
+ Specifically, a scale loss $L_{wh}$ accounts for the scale consistency to enhance the indirect connection described above: *For augmented objects obtained from the same object through different rotations, a set of RBoxes of the same scale are predicted by the detector, and these predicted RBoxes and corresponding GT RBoxes (unlabeled) shall have highly overlapping horizontal circumscribed rectangles.* With such an enhanced indirect connection, including horizontal circumscribed rectangle constraint and scale constraint, we can limit the prediction results to a limited number of feasible cases, explained as follows:
77
+
78
+ Fig. [\[fig:views\]](#fig:views){reference-type="ref" reference="fig:views"} shows two cases based on the above enhanced indirect connection, and lists four different expressions for the four variables ($w, h, \theta, \varphi$). Due to the periodicity of the angles, there are only two feasible solutions to the four equations within the angle definition, i.e. the green GT RBox and the orange symmetric RBox. In other words, with such a strengthened indirect connection, the relationship between predicted RBox and GT RBox is coincident $B^{c}(w,h,\theta)$ or symmetrical about the center of the object $B^{s}(w,h,\pi-\theta)$. It can be seen from Fig. [\[fig:o_Lconsis\]](#fig:o_Lconsis){reference-type="ref" reference="fig:o_Lconsis"} that there are still many bad cases with extremely inaccurate angles after using $L_{wh}$. Interestingly, if we make a symmetry transformation of these bad cases with their center point, the result becomes much better. When generating views, a geometric prior can be obtained, that is, the spatial transformation relationship between the two views, denoted as $\mathbf{R}$ in Eq. [\[eq:location\]](#eq:location){reference-type="ref" reference="eq:location"}. Thus, we can get the following four transformation relationships, marked as $T\langle B_{ws},B_{ss}\rangle$, between the two branches: $$\begin{equation}
79
+ \small
80
+ \begin{aligned}
81
+ T\langle B_{ws}^{c}, B_{ss}^{c}\rangle &= \{\mathbf{R}\}, \quad T\langle B_{ws}^{c}, B_{ss}^{s}\rangle = \{\mathbf{R}, \mathbf{S}\} = \{\mathbf{S}, \mathbf{R}^{\top}\} \\
82
+ T\langle B_{ws}^{s}, B_{ss}^{s}\rangle &= \{\mathbf{R}^{\top}\}, \quad
83
+ T\langle B_{ws}^{s}, B_{ss}^{c}\rangle = \{\mathbf{R}^{\top}, \mathbf{S}\}=\{\mathbf{S}, \mathbf{R}\}
84
+ \end{aligned}
85
+ \label{eq:relations}
86
+ \end{equation}$$ where $B_{ws}^{c}$ and $B_{ss}^{s}$ represent the coincident bounding box predicted in WS branch and the symmetric bounding box predicted in SS branch, respectively. Here $\mathbf{S}$ denotes symmetric transformation. Take $T\langle B_{ws}^{c}, B_{ss}^{s}\rangle = \{\mathbf{R}, \mathbf{S}\}$ as an example, it means $B_{ss}^{s}=\mathbf{S}(\mathbf{R} \cdot B_{ws}^{c})$.
87
+
88
+ Therefore, an effective way to eliminate the symmetric case is to let the model know that the relationship between the RBoxes predicted by the two branches can only be $\mathbf{R}$. Inspired by above analysis, spatial location loss is used to construct the spatial transformation relationship $\mathbf{R}$ of RBoxes predicted by two branches. Specifically, the RBox predicted by WS branch is first transformed by $\mathbf{R}$, and then several losses (e.g. center point loss $L_{xy}$ and angle loss $L_{\theta}$) are used to measure its location consistency with the RBox predicted by SS branch. In fact, the spatial location consistency, especially the angle loss, provides a fifth angle constraint equation ($\varphi-\theta=\Delta\theta\ne 0$) so that the system of equations in Fig. [\[fig:views\]](#fig:views){reference-type="ref" reference="fig:views"} have a unique solution (i.e. the predicted RBox is the GT RBox) with non-strict proof, because system of equations are nonlinear.
89
+
90
+ The final SS learning consists of scale-consistent and spatial-location-consistent learning: $$\begin{equation}
91
+ \small
92
+ \begin{aligned}
93
+ Sim\langle \mathbf{R} \cdot B_{ws}, B_{ss}\rangle = 1
94
+ \end{aligned}
95
+ \label{eq:similarity}
96
+ \end{equation}$$ Fig. [\[fig:w_Lconsis\]](#fig:w_Lconsis){reference-type="ref" reference="fig:w_Lconsis"} shows the visualization by using the SS loss, with accurate predictions. The appendix shows visualizations of feasible solutions for different combinations of constraints.
97
+
98
+ Since the consistency of the prediction results of the two branches needs to be calculated, the labels need to be re-assigned in the SS branch. Specifically, the labels at the location $(x^{*}, y^{*})$ of the SS branch, including center-ness ($cn^{*}$), target category ($c^{*}$) and target GT HBox ($gtbox^{h*}$), are the same as in the location $(x, y)$ of the WS branch. Besides, we also need to assign the $rbox^{ws}(x_{ws},y_{ws},w_{ws},h_{ws},\theta_{ws})$ predicted by the WS branch as the target RBox of the SS branch to calculate the SS loss. We propose two reassignment strategies:
99
+
100
+ **1) One-to-one (O2O) assignment:** With $cn$, $c$ and $gtbox^{h}$, the $rbox^{ws}$ predicted at location $(x, y)$ in the WS branch is used as the target RBox at $(x^{*},y^{*})$ of the SS branch (see Fig. [\[fig:o2o_zeros\]](#fig:o2o_zeros){reference-type="ref" reference="fig:o2o_zeros"}).
101
+
102
+ **2) One-to-many (O2M) assignment:** Use the $rbox^{ws}$ closest to the center point of the $gtbox^{h}_{(x,y)}$ as the target RBox at location $(x^{*},y^{*})$ of SS branch, as shown in Fig. [\[fig:o2m_reflection\]](#fig:o2m_reflection){reference-type="ref" reference="fig:o2m_reflection"}.
103
+
104
+ Fig. [\[fig:reassign\]](#fig:reassign){reference-type="ref" reference="fig:reassign"} visualizes the difference between the two re-assignment strategies. After re-assigning, we need to perform an rotation transformation on the $rbox^{ws}$ to get the $rbox^{ws*}(x^{*}_{ws},y^{*}_{ws},w^{*}_{ws},h^{*}_{ws},\theta^{*}_{ws})$ for calculating the SS loss according to Eq. [\[eq:similarity\]](#eq:similarity){reference-type="ref" reference="eq:similarity"}: $$\begin{equation}
105
+ \small
106
+ \begin{aligned}
107
+ (x_{ws}^{*}, y_{ws}^{*}) = (x_{ws}-x_{c}, y_{ws}-y_{c})\mathbf{R}^{\top}+(x_{c}, y_{c}), \quad (w_{ws}^{*}, h_{ws}^{*}) = (w_{ws}, h_{ws}), \quad
108
+ \theta_{ws}^{*} = \theta_{ws} + \Delta \theta
109
+ \end{aligned}
110
+ \label{eq:transformation}
111
+ \end{equation}$$ The the visualized label assignment in Fig. [3](#fig:assignment_padding){reference-type="ref" reference="fig:assignment_padding"} further shows that the SS loss effectively eliminates prediction of the undesired case. The label reassignment of different detectors may require different strategies. The key is to design a suitable matching strategy for the prediction results of the two views, which can allow the network to learn the consistency better.
112
+
113
+ Since the WS branch is a rotated object detector based on FCOS, the losses in this part mainly include the regression $L_{reg}$, classification $L_{cls}$, and center-ness $L_{cn}$. We define the WS loss in the WS branch as follows: $$\begin{equation}
114
+ \small
115
+ \begin{aligned}
116
+ L_{ws} & = \frac{\mu_1}{N_{pos}}\sum_{(x,y)}L_{cls}(p_{(x,y)}, c_{(x,y)}) + \frac{\mu_2}{N_{pos}}\sum_{(x,y)}L_{cn}(cn_{(x,y)}^{'}, cn_{(x,y)}) \\
117
+ & + \frac{\mu_3}{\sum{cn_{pos}}}\sum_{(x,y)}\mathbbm{1}_{\{c_{(x,y)}>0\}}cn_{(x,y)}L_{reg}\left(r2h(rbox_{(x,y)}^{ws}), gtbox_{(x,y)}^{h}\right)
118
+ \end{aligned}
119
+ \label{eq:Lfcos}
120
+ \end{equation}$$ where $L_{cls}$ is the focal loss [@lin2017focal], $L_{cn}$ is cross-entropy loss, and $L_{reg}$ is IoU loss [@yu2016unitbox]. $N_{pos}$ denotes the number of positive samples. $p$ and $c$ denote the probability distribution of various classes calculated by Sigmoid function and target category. $rbox^{ws}$ and $gtbox^{h}$ represent the predicted RBox in the WS branch and horizontal GT box, respectively. $cn^{'}$ and $cn$ indicate the predicted and target center-ness. $\mathbbm{1}_{\{c_{(x,y)}>0\}}$ is the indicator function, being 1 if $c_{(x,y)}>0$ and 0 otherwise. The $r2h(\cdot)$ function converts the RBox to its corresponding horizontal circumscribed rectangle. We set the hyperparameters $\mu_1=1$, $\mu_2=1$ and $\mu_3=1$ by default.
121
+
122
+ Then, the SS loss between $rbox^{ws*}(x_{ws}^{*}, y_{ws}^{*}, w_{ws}^{*}, h_{ws}^{*}, \theta_{ws}^{*})$ and $rbox^{ss}(x_{ss}, y_{ss}, w_{ss}, h_{ss}, \theta_{ss})$ predicted by the SS branch is: $$\begin{equation}
123
+ \small
124
+ \begin{aligned}
125
+ L_{ss} = \frac{1}{\sum{cn_{pos}^{*}}}\sum_{(x^{*},y^{*})}\mathbbm{1}_{\{c_{(x^{*},y^{*})}^{*}>0\}}cn_{(x^{*},y^{*})}^{*}L_{reg}(rbox^{ws*}_{(x^{*},y^{*})},rbox^{ss}_{(x^{*},y^{*})})
126
+ \end{aligned}
127
+ \label{eq:Lcon}
128
+ \end{equation}$$ $$\begin{equation}
129
+ \small
130
+ \begin{aligned}
131
+ L_{reg}(rbox^{ws*},rbox^{ss}) =\gamma_{1}L_{xy}+\gamma_{2}L_{wh\theta}, \quad L_{xy}=\sum_{t \in (x,y)}l_{1}(t_{ws}^{*}, t_{ss})\\
132
+ L_{wh\theta}= \min\{L_{iou}(B_{ws}, B_{ss}^{1})+|\sin(\theta_{ws}^{*}-\theta_{ss})|, L_{iou}(B_{ws}, B_{ss}^{2})+|\cos(\theta_{ws}^{*}-\theta_{ss})|\}
133
+ \end{aligned}
134
+ \end{equation}$$ where $B_{ws}(-w_{ws}^{*}, -h_{ws}^{*}, w_{ws}^{*}, h_{ws}^{*})$, $B_{ss}^{1}(-w_{ss}, -h_{ss}, w_{ss}, h_{ss})$ and $B_{ss}^{2}(-h_{ss}, -w_{ss}, h_{ss}, w_{ss})$. We set $\gamma_{1}=0.15$ and $\gamma_{2}=1$ by default. $L_{wh\theta}$ takes into account the loss discontinuity caused by the boundary issues [@yang2021rethinking], such as periodicity of angle and exchangeability of edges.
135
+
136
+ The overall loss is a weighted sum of the WS loss and the SS loss where we set $\lambda=0.4$ by default. $$\begin{equation}
137
+ \small
138
+ % \begin{aligned}
139
+ L_{total}= L_{ws}+\lambda L_{ss}
140
+ % \end{aligned}
141
+ \label{eq:Ltotal}
142
+ \end{equation}$$
2301.00437/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-01-25T02:17:18.979Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36" etag="SK19gm3WRqyB5ffy12A3" version="20.8.10" type="google"><diagram name="Page-1" id="XbSKVPj0vYaSjSHGPn3e">7V1bc+LIDv41edwpt6/wOElmd07VTp3dmofZfdpywATvGMwYMyHn1592cBuQDThNW622qUpV8AVjS5/V0ie1+s55WGx/y8LV/Es6jZI725pu75zHO9tmlmfxf8We190ed+Tsdjxn8bQ8ab/ja/y/SHyz3LuJp9H66MQ8TZM8Xh3vnKTLZTTJj/aFWZa+HJ82S5PjX12Fz1Ftx9dJmNT3foun+Xy3dyQeq9j/OYqf5+KXmVUeWYTi5PIS63k4TV92u97OcT7dOQ9Zmua7T4vtQ5QUwhNy2V3o1xNHqxvLomXe5guLHz83f//4x/7zW/DwZer86UXfk1/Kq/wMk035wOXN5q9CAvwqXNh84/5lHufR11U4KY68cH3zffN8kfAtxj+G69VOA7N4G/EfvZ/FSfKQJmn2diFn6kWjqcv3r/Ms/R4dHBnZT47v8yP1ZxI3GGV5tD3YVT7jb1G6iPLslZ9SHh2X8i/xxkbl9stee6WgrPmB4sS+sMTLc3XhvUj5h1Kq75Cw69clOuUQKzfTLJ+nz+kyTD7t995n6WY5LWT4aPGt/Tm/p+mqlPa/UZ6/lu9LuMnTY11E2zj/6+Dz38WlPnjl1uO2vPLbxqvYWPLn/etw4+Bbxeb+a29b4nsnlbZON9kkOiObEmx5mD1H+ZnzxrvzCrmdhUAWJWEe/zx+dZVr1O77O1O9DLreGaf3EvZ1W6W+S9ixNEvYR5RwxLiMgyYJj/3ACRVJ2LbA0OpqFnHQfxHr9l7GvRex9sFORDo9lrGj21IwzDhHk4x1D3gM0y/WZCt0u212Gxkvpx8L4oNvTZJwvY4nx6I9ji1lgz65AFMQPM5VYaPggS6FjaULdjFsPNCm16BNsa91dFn+wh9pzJ+sAhOzAjC2O8eX2D13+a1DIgdeyD/hh4kL7QRTu9Ab4qrHvgKEbcKzGwiPnVQqILSBRbMAdlqDMAAg9JFB2CaCJQvC7mHXljJDgp0DYMdkYTcCsAuQYefdYHcOdsIJpIK7rsydjT3mjm64O4s71k/cVYZSXGiEjLs2BNCQcWfRwh2EizTuIIDHuLjzjB5nW8YYVEHjyIIGstfYoGmTc7mBphvQQEdcFjQ19HUNmjZZJLKgMQ0mjmzgV4MJcuDnGe0IqSK97JZuES3m9VR5y9XkA7IzLig2/Rg8iSDjCAJlvs8lz7trbLTJcJK1T91bJOoEAeSTTCEIfPuGO5MIgq5w58BBtWvcKU9CnsQCFdV1FXExD1l1ylN3w1UdMsPiK6flBqs67ADWV0+OGTHHxGk7UBPDHRyooV9njIOonF8brMlwkAdqcd/qVPfvZrEqiSjfeMpbGZeJ7YAFZLiCkyo3raxOGTTAhbDrmwL14TxqWO7SggUYxGtBsilJsoBMya+8xaDm4HVlMrBL0wL1lbg3k6HeZKA7j3Wq4MOHDzVk5NGWCxBM41mmS374Pkzi52UBGa7TiO8/mgRUnnMAomJeTzwJk4/l1xbxdPrWTaFprtEx4mbpMi89Uds7h6Z3zB0CJc9VCfQBjvwGHEHLrm62bD0QG7ZCAt0Kqdf0Dloh1UCmTSH1Go9hK8TTrBBBVd0UUm6PdSukzhwMWiGO7kF9hNnIBWnSNezgpbt5wKhN0stwGevuMzLqYbscal21Rj3sl0Otr9aohw1zqHXWGte9wt7JWPeYN7Z7J2PnVPW2NhljtjnUI2PtY964f/5xTca6x7xx//xjKGP9Yx6m7zYbTaLJpEnGTyPP9c6mW9rL2CVnj9v0WDBbxvrtMaZ/rEnGuu1x1YK+x0LWbpCZNXAq1DtVqaSLCmVWPWYZtEa059OYVY9whq0R3Qk1ZtXjoWFrRJD72jSC2jYXZ7D2qHn2jGFSLXqErN21ZwyTa9EkZO2+PcMkW/QIWb9vzzCzkbNZ5DcLeRqMnyxFQvZB02hbv5BRKS09QtadxmEMldPSI2Tt3oVdpwLqQjavB8blHtPlYxIpnXcgIwEr3tuWzrtwfh6cmtFx6TwT+CEAqGqGpn0dVJx+QsWBBSjIbZ9YXxZjOAuvi3N4RBnkZRzSmu3jnJqTcK3JqjXY7xyHA12PoT3yPFrIcwHyZPtLuJBRQ24ExcxekkGZBfTa4pCY0wbmO9bWkTFnJO5J92ksHBLzCPszEpNpaH0SRca5aTBcgG3XzBksyawjcwU4iHlSysABW34hN/lgQ13sRUw+Ms53OsXpvxt5cJ4pcguRqjeCSuS18meoaFJVG/JacTi2DXGM5sKVecGtLQoxR+dUUcG7LYoHLoS8PChz7BsO34NDYj4VDOZl7aELh0jkfnrMufHzB1OPTfOwasG8bLt8F64fityFlzlG8/P4OKTFTtV4A+nUNhzgkRvIM8doth4DebQ8whopIcvPu6faCKAhjw4/r6qogpjTpqrvuQdbV2JT6A4ZCh3ZSI3bIo+Ym+Ze8K5aI0/zqkbMIcPPU0UeLccMpq+l2/VqT1876sl/nY7+mBZMTvUNvJrfxeYzhKUlYKCG4UVJ94XXnmt2b6mAg25lpvlUDmDwa/3jjUlruzYVHF5hpIi5PdBISYNDd+bZJcPPXwEOWpSROnDAsA55CQvmkiHNrwAHcfdGGhy6M8wuHT4RNVQn5qdAN5eNxh88SUjV+lbULtU5qPrJPFLByqVl1KTTaDY+UvrJFFJFivR0gFoHEXyk9LOslyhS5PmaWhsUdKR4RjM217s0l53pES3sQYpPugAcrgcyxsceGZaGKvZopTQ6wx46BeQpp4BMG7PkpyzBMQu7UsNTz9Dgvv20eF11LzUMpbHrDD3ldYamvdTqglvsHLOnnFkbrO7wDTIdAqtK4DrXEegBbahIV1nCvCw+h+Up57CMe9GV8UrYWXVPOas0WN2hG2mRKjPWa6aV06xN7pKeI6Hba/bJEIBXjNfEODpV/LAHp7LiZxJ8uwfwIEajKYMHNEIBPjzI1FLhDkfVGh4XwVd1lieCPh9235Otxgkgo+jio4/MzFJ541Q1xaeCD1VEsQ+tE3Yxuk+nWksRL1H19u8dVnSHtj4dDgt5HHNMHcdgGbnsNDxfdzLSJ1PW1d9RTJpdh14OekG6T2KSpgILQwUZsIhctr9PbVEabOZG+Ff67QY2olxTxywIGenYCyZysPO1ARnWkC72iI2IqhjrADpe2H3NAttk7LWMBZUikVjUeKnYpDUS4WRB9BGYzGRBbCvYdskHaiNwzZ2XZSwCMJSjJ3SDG/fZedQonZkJwGCL3lQquHGfxmAFloZgc5/i94+w4idcXPez9O1O96Dxf2xSceCX9ZteP/ITmL/avmlXHOefnov//1muNrm4Gr+93QV3x2qAzKNtsf94KdlluuSH78Mkfl4WSOXAibK744Voy3MOsFusLRtPwuRj+bVFPJ0WP9O43u0x0ItbLBHLBy8l69cCa8Aa1gj2G5AKgXAIyuuWrx21CN/W83BVfJxssuT1Pgsn34v389KCwXtZFluzJF59Lj8n4VOU/JGu4zxOC5VkuwetVPU7OF6pTKg+iWb5OdVO4yyalF/mL18hWRXKYzBh2rSM9qhBfd2tPjxqEQHd1Lc7Wpv4ItoRH6qv6e3rUH1NRROKDO49V/RTYQ5taxaF+SYrPi3SaZQM3goz4LIHDe+xhWuGm7w0RUB4jKIVP57EyyjM+IdllL+k2ffBowC2n2JWgz1AhkFTisct/ryHRZjPn2Z3wf3nu+DxH/6f8f/lUfI6s8+Gh+115ohCH+gia3Ogxk0OFFTZNwNVxqMsNa9Zbbn1+muGrLImp+mUyr4MUGUe7AigX2UdOkr/3eS30LSeDq+cJH1KJ1MOo6zdMK0KCGX9+GRXHuSqC18PTlsVJ6zP3DDI84h1RPYw211RLS02bvLKrsRhKyqUCEyYqrU9bBiBI5dQVb9PwKL0sBsWxIl0ETDECXZRC3cWqODENJ1Ll1fq13lTVHCdzgfwjksnu2r6Rk522Zb6CWtSRUADsQuqfAbs8grbIlPaY5rOldkGfJ2rL6m5jQXv0Dd6XDC4plPd2Wd03dGZsNeOJSKCgM7IH3xrbRpP2HcE4MduRnc5Nxw1sqO+LGmsDDXixgmgxmgESPN82u0GI8Pz3RCgx3cQ3XsIIGBwI4d0wyGIGuxWeTYzutmV2aiRzjDoRw0ZJvE61HgGoKazVQ7w14WzGZkJfv23Np3hBj+6oTPxr4fcd3c4QY+BlPOng9UdfvRChvk0TefKVurDf1/JcJ3D1Tn2WC5mIOrNR6/5Y+YG+3LK9I8eOdp0eMoB+HIGr/drC2AM15szdukzfue3t9xEnKCvnWWLyYi6a1dNmsXSmf7xvQGj+UDqOFHXyh72eMVmB2yjGUCzrEuPUEOwnnLYCMCuqLXJsIrDsxvSXol+u0GGl7whQI/dEDdOAAHDsxuynJV2u+EYzW3uUWNibKMONei2xu4Haky0NdJ5FP22hg7DekOAFrvhqvdRev/+92eer9uBr2GyJs2d9evSGf+NRoC5PRpco/u3mI0ac7u5uD2Zg2EkalT1BsNHTQcZV6M1qer9R6+xcW850R6gRln9Ld/M0qIb9P70LFzNv7wtk+F8+j8=</diagram></mxfile>
2301.00437/main_diagram/main_diagram.pdf ADDED
Binary file (28.1 kB). View file
 
2301.00437/paper_text/intro_method.md ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Despite the impressive performance of deep neural networks (DNNs) across areas of machine learning and artificial intelligence [@Krizhevsky12; @Simonyan14; @Good16; @He15; @Huang17; @Brown20], the highly non-convex nature of these systems, as well as their massive number of parameters, ranging from hundreds of millions to hundreds of billions, impose a significant barrier to having a concrete theoretical understanding of how they work. Additionally, a variety of optimization algorithms have been developed for training DNNs, which makes it more challenging to analyze the resulting trained networks and learned features [@Ruder16]. In particular, the modern practice of training DNNs includes training the models far beyond *zero error* to achieve *zero loss* in the terminal phase of training (TPT) [@Ma17; @Belkin18; @Belkin19]. A mathematical understanding of this training paradigm is important for studying the generalization and expressivity properties of DNNs [@Papyan20; @Han21].\
4
+ Recently, [@Papyan20] has empirically discovered an intriguing phenomenon, named Neural Collapse ($\mathcal{NC}$), which reveals a common pattern of the learned deep representations across canonical datasets and architectures in image classification tasks. [@Papyan20] defined Neural Collapse as the existence of the following four properties:
5
+
6
+ - $(\mathcal{NC}1)$ **Variability collapse:** features of the same class converge to a unique vector (i.e., the class-mean), as training progresses.
7
+
8
+ - $(\mathcal{NC}2)$ **Convergence to simplex ETF:** the optimal class-means have the same length and are equally and maximally pairwise seperated, i.e., they form a simplex Equiangular Tight Frame (ETF).
9
+
10
+ - $(\mathcal{NC}3)$ **Convergence to self-duality:** up to rescaling, the class-means and classifiers converge on each other. 
11
+
12
+ - $(\mathcal{NC}4)$ **Simplification to nearest class-center:** given a feature, the classifier converges to choosing whichever class has the nearest class-mean to it.
13
+
14
+ Theoretically, it has been proven that $\mathcal{NC}$ emerges in the last layer of DNNs during TPT when the models belong to the class of "unconstrained features model" (UFM) [@Mixon20] and trained with cross-entropy (CE) loss or mean squared error (MSE) loss. With regard to classification tasks, CE is undoubtedly the most popular loss function to train neural networks. However, MSE has recently been shown to be effective for classification tasks, with comparable or even better generalization performance than CE loss [@Hui20; @Demirkaya20; @Zhou22b].\
15
+ **Contributions:** We provide a thorough analysis of the global solutions to the training deep linear network problem with MSE and CE losses under the unconstrained features model defined in Section [2.1](#sec:UFM_setting){reference-type="ref" reference="sec:UFM_setting"}. Moreover, we study the geometric structure of the learned features and classifiers under a more practical setting where the dataset is imbalanced among classes. Our contributions are three-fold:\
16
+ **1. UFM + MSE + balanced + deep linear network:** We provide the *first mathematical analysis of the global solutions for deep linear networks with arbitrary depths and widths under UFM setting*, showing that the global solutions exhibit $\mathcal{NC}$ properties and how adding the bias term can affect the collapsed structure, when training the model with the MSE loss and balanced data.\
17
+ **2. UFM + MSE + imbalanced + plain/deep linear network:** We provide the *first geometric analysis for the plain UFM*, which includes only one layer of weight after the unconstrained features, when training the model with the MSE loss and imbalanced data. Additionally, we also generalize this setting to the deep linear network one.\
18
+ **3. UFM + CE + balanced + deep linear network:** We study deep linear networks trained with CE loss and demonstrate the existence of $\mathcal{NC}$ for any global minimizes in this setting.\
19
+ **Related works:** In recent years, there has been a rapid increase in interest in $\mathcal{NC}$, resulting in a decent amount of works in a short period of time. Under UFM, these works studied different training problems, proving ETF and $\mathcal{NC}$ properties are exhibited by any global solutions of the loss functions. In particular, a line of works use UFM with CE training to analyze theoretical abstractions of $\mathcal{NC}$ [@Zhu21; @Fang21; @Lu20]. Other works study UFM with MSE loss [@Tirer22; @Zhou22; @Ergen20; @Rangamani22]. For MSE loss, recent extensions to account for additional layers with non-linearity are studied in [@Tirer22; @Rangamani22], or with batch normalization [@Ergen20]. The work [@Rangamani22] studies deep homogeneous networks with MSE loss and trained with stochastic gradient descent. Specifically, the critical points of gradient flow satisfying the so-called symmetric quasi-interpolation assumption are proved to exhibit $\mathcal{NC}$ properties, but the other solutions are not investigated. Furthermore, [@Zhu21; @Zhou22; @Zhou22b] have shown the benign optimization landscape for several loss functions under the plain UFM setting, demonstrating that critical points can only be global minima or strict saddle points. Another line of work exploits the ETF structure to improve the network design by initially fixing the last-layer linear classifier as a simplex ETF and not performing any subsequent learning [@Zhu21; @Yang22].\
20
+ Most recent papers study Neural Collapse under a balanced setting, i.e., the number of training samples in every class is identical. This setting is vital for the existence of the simplex ETF structure. To the best of our knowledge, Neural Collapse with imbalanced data is studied in [@Fang21; @Christos22; @Yang22; @Xie22]. In particular, [@Fang21] is the first to observe that for imbalanced setting, the collapse of features within the same class $\mathcal{NC}1$ is preserved, but the geometry skew away from ETF. They also present a phenomenon called \"Minority Collapse\": for large levels of imbalance, the minorities' classifiers collapse to the same vector. [@Christos22] theoretically studies the SVM problem, whose global minima follows a more general geometry than the ETF, called \"SELI\". However, this work also makes clear that the unregularized version of CE loss only converges to KKT points of the SVM problem, which are not necessarily global minima. [@Yang22] studies the imbalanced setting but with fixed last-layer linear classifiers initialized as a simplex ETF right at the beginning. [@Xie22] proposed a novel loss function for balancing different components of the gradients for imbalanced learning. A comparison of our results with some existing works regarding the study of global optimality conditions is shown in Table [\[table:1\]](#table:1){reference-type="ref" reference="table:1"}.\
21
+ This work also relates to recent advances in studying the optimization landscape in deep neural network training. As pointed out in [@Zhu21], the UFM takes a top-down approach to the analysis of deep neural networks, where last-layer features are treated as free optimization variables, in contrast to the conventional bottom-up approach that studies the problem starting from the input [@Baldi89; @Zhu18; @Kawaguchi16; @Yun17; @Laurent17; @Safran17; @Yun18]. These works studies the optimization landscape of two-layer linear network [@Baldi89; @Zhu18], deep linear network [@Kawaguchi16; @Yun17; @Laurent17] and non-linear network [@Safran17; @Yun18]. [@Zhu21] provides an interesting perspective about the differences between this top-down and bottom-up approach, with how results stemmed from UFM can provide more insights to the network design and the generalization of deep learning.\
22
+ **Organization:** We structure this paper as follows: we describe the Neural Collapse ($\mathcal{NC}$) phenomenon and discuss related works in Section [1](#sec:introduction){reference-type="ref" reference="sec:introduction"}. In Section [2](#sec:problemsetup){reference-type="ref" reference="sec:problemsetup"}, we setup some necessary terminology and introduce the "unconstrained features model" (UFM) setting used throughout in our theoretical analyses. We provide the $\mathcal{NC}$ characteristics for deep linear networks trained with MSE loss under balanced setting in Section [3](#sec:mainresults){reference-type="ref" reference="sec:mainresults"}. Next, we study a similar problem but with imbalanced setting in Section [4](#sec:imbalance){reference-type="ref" reference="sec:imbalance"}. The global optimality conditions for deep linear networks trained with CE loss under balanced setting are studied in Section [5](#sec:CEloss){reference-type="ref" reference="sec:CEloss"}. We empirically validate our theoretical results with various settings in Section [6](#sec:experimental_results){reference-type="ref" reference="sec:experimental_results"}. The paper ends with concluding remarks in Section [7](#sec:conclusion){reference-type="ref" reference="sec:conclusion"}. Technical proofs, additional experiments, and experimental details are provided in the Appendix.
23
+
24
+ **Notation:** For a weight matrix $\mathbf{W}$, we use $\mathbf{w}_{j}$ to denote its $j$-th row vector. $\| . \|_{F}$ denotes the Frobenius norm of a matrix and $\| . \|_{2}$ denotes $\text{L}_{2}$-norm of a vector. $\otimes$ denotes the Kronecker product. The symbol "$\propto$" denotes proportional, i.e, equal up to a positive scalar. Moreover, we denote the best rank-$k$ approximation of a matrix $\mathbf{A}$ as $\mathcal{P}_{k}(\mathbf{A})$. We also use some common matrix notations: $\mathbf{1}_{n}$ is the all-ones vector, $\operatorname{diag}\{a_{1},\ldots, a_{K}\}$ is a square diagonal matrix size $K \times K$ with diagonal entries $a_{1},\ldots, a_{K}$.
25
+
26
+ We consider the classification task with $K$ classes. Let $n_{k}$ denote the number of training samples of class $k$, $\forall \: k \in [K]$ and $N:= \sum_{k=1}^{K} n_{k}$. A typical deep neural network $\mathbf{\psi} (\cdot): \mathbb{R}^{D} \to \mathbb{R}^{K}$ can be expressed as follows: $$\begin{align}
27
+ \mathbf{\psi} (\mathbf{x}) = \mathbf{W} \mathbf{\phi}(\mathbf{x}) + \mathbf{b}, \nonumber
28
+ \end{align}$$ where $\phi (\cdot): \mathbb{R}^{D} \to \mathbb{R}^{d}$ is the feature mapping, and $\mathbf{W} \in \mathbb{R}^{K \times d}$ and $\mathbf{b} \in \mathbb{R}^{K}$ are the last-layer linear classifiers and bias, respectively. Formally, the feature mapping $\phi(.)$ consists of a multilayer nonlinear compositional mapping, which can be written as: $$\begin{align}
29
+ \phi_{\theta}(\mathbf{x}) =
30
+ \sigma(\mathbf{W}_{L} \ldots
31
+ \sigma(\mathbf{W}_{1} \mathbf{x} + \mathbf{b}_{1}) + \mathbf{b}_{L} ), \nonumber
32
+ \end{align}$$ where $\mathbf{W}_{l}$ and $\mathbf{b}_{l}$, $l=1,\ldots,L$, are the weight matrix and bias at layer $l$, respectively. Here, $\sigma(\cdot)$ is a nonlinear activation function. Let $\theta:=\{\mathbf{W}_{l}, \mathbf{b}_{l}\}_{l=1}^{L}$ be the set of parameters in the feature mapping and $\Theta := \left\{ \mathbf{W}, \mathbf{b}, \theta \right\}$ be the set of all network's parameters. We solve the following optimization problem to find the optimal values for $\Theta$: $$\begin{align}
33
+ \min_{\Theta}
34
+ \sum_{k=1}^{K}
35
+ \sum_{i=1}^{n_{k}}
36
+ \mathcal{L}(\psi(\mathbf{x}_{k,i}), \mathbf{y}_{k})
37
+ + \frac{\lambda}{2} \| \Theta \|_F^2,
38
+ \label{eq:loss}
39
+ \end{align}$$ where $\mathbf{x}_{k,i}\in \mathbb{R}^{D}$ is the $i$-th training sample in the $k$-th class, and $\mathbf{y}_{k} \in \mathbb{R}^{K}$ denotes its corresponding label, which is a one-hot vector whose $k$-th entry is 1 and other entries are 0. Also, $\lambda > 0$ is the regularization hyperparameter that control the impact of the weight decay penalty, and $\mathcal{L}(\psi(\mathbf{x}_{k,i}), \mathbf{y}_{k})$ is the loss function that measures the difference between the output $\psi(\mathbf{x}_{k,i})$ and the target $\mathbf{y}_{k}$.
40
+
41
+ <figure id="fig:DNN" data-latex-placement="t">
42
+ <img src="fig/NC_DNN.png" />
43
+ <figcaption>Illustration of UFM, followed by linear layers.</figcaption>
44
+ </figure>
45
+
46
+ # Method
47
+
48
+ Following recent studies of the $\mathcal{NC}$ phenomenon, we adopt the *unconstrained features model (UFM)* in our setting. UFM treats the last-layer features $\mathbf{h}=\phi(\mathbf{x}) \in \mathbb{R}^{d}$ as free optimization variables. This relaxation can be justified by the well-known result that an overparameterized deep neural network can approximate any continuous function [@Hornik89; @Hornik91; @Zhou18; @Yarotsky18]. Using the UFM, we consider the following slight variant of [\[eq:loss\]](#eq:loss){reference-type="eqref" reference="eq:loss"}: $$\begin{align}
49
+ % \begin{gathered}
50
+ \min_{\mathbf{W}, \mathbf{H}, \mathbf{b}}
51
+ f(\mathbf{W}, \mathbf{H}, \mathbf{b}) &:=
52
+ \frac{1}{2 N}
53
+ \sum_{k=1}^{K}
54
+ \sum_{i=1}^{n_{k}}
55
+ \mathcal{L}(\mathbf{W} \mathbf{h}_{k,i} + \mathbf{b}, \mathbf{y}_{k})
56
+ + \frac{\lambda_{W}}{2} \| \mathbf{W} \|_F^2
57
+ + \frac{\lambda_{H}}{2} \| \mathbf{H} \|_F^2
58
+ + \frac{\lambda_{b}}{2} \| \mathbf{b} \|_2^2,
59
+ % \end{gathered}
60
+ \label{eq:UFM}
61
+ \end{align}$$ where $\mathbf{h}_{k,i}$ is the feature of the $i$-th training sample in the $k$-th class. We let $\mathbf{H}:=[\mathbf{h}_{1,1},\ldots, \mathbf{h}_{1,n_{1}}, \newline \mathbf{h}_{2,1},\ldots, \mathbf{h}_{K,n_{K}}] \in \mathbb{R}^{d \times N}$ be the matrix of unconstrained features. The feature class-means and global-mean are computed as $\mathbf{h}_{k} := n_{k}^{-1} \sum_{i=1}^{n_{k}} \mathbf{h}_{k,i}$ for $k=1,\ldots,K$ and $\mathbf{h_{G}} := N^{-1} \sum_{k=1}^{K} \sum_{i=1}^{n_{k}} \mathbf{h}_{k,i}$, respectively. In this paper, we also denote $\mathbf{H}$ by $\mathbf{H_1}$ and use these notations interchangeably. The weight decay in problem [\[eq:UFM\]](#eq:UFM){reference-type="eqref" reference="eq:UFM"} applied to the last-layer classifier $\mathbf{W}$ and last-layer features $\mathbf{H}$ instead of the network parameters $\Theta$. This simplification has been observed in [@Zhu21] and empirically shown to exhibit similar $\mathcal{NC}$ phenomena and comparable performance with the common regularization in problem [\[eq:loss\]](#eq:loss){reference-type="eqref" reference="eq:loss"}.\
62
+ **Extending UFM to the setting with $M$ linear layers:** $\mathcal{NC}$ phenomenon has been studied extensively for different loss functions under UFM but with only 1 to 2 layers of weights. In this work, we study $\mathcal{NC}$ under UFM in its significantly more general form with $M \ge 2$ linear layers by generalizing [\[eq:UFM\]](#eq:UFM){reference-type="eqref" reference="eq:UFM"} to deep linear networks with arbitrary depths and widths (see Fig. [1](#fig:DNN){reference-type="ref" reference="fig:DNN"} for an illustration). We consider the following generalization of [\[eq:UFM\]](#eq:UFM){reference-type="eqref" reference="eq:UFM"} in the $M$-linear-layer setting: $$\begin{align}
63
+ &\underset{\substack{\mathbf{W}_{M},\ldots, \mathbf{W}_{1}, \mathbf{H}_{1}, \mathbf{b}}}{\text{min}} \,\frac{1}{2N}
64
+ \sum_{k=1}^{K}
65
+ \sum_{i=1}^{n_{k}}
66
+ \mathcal{L}(\mathbf{W}_{M} \mathbf{W}_{M-1} \ldots \mathbf{W}_{1} \mathbf{h}_{k,i}+ \mathbf{b}, \mathbf{y}_{k})
67
+ + \frac{\lambda_{W_{M}}}{2} \| \mathbf{W}_{M} \|^2_F + \frac{\lambda_{W_{M-1}}}{2} \| \mathbf{W}_{M-1} \|^2_F
68
+ \nonumber \\
69
+ &+
70
+ \ldots +
71
+ \frac{\lambda_{W_{1}}}{2} \| \mathbf{W}_{1} \|^2_F
72
+ +
73
+ \frac{\lambda_{H_{1}}}{2} \| \mathbf{H}_{1} \|^2_F + \frac{\lambda_{b}}{2} \| \mathbf{b} \|_{2}^{2},
74
+ \label{eq:UFM_general_linear}
75
+ \end{align}$$ where $M \geq 2$, $\lambda_{W_{M}}, \ldots, \lambda_{W_{1}}, \lambda_{H_{1}}, \lambda_{b} > 0$ are regularization hyperparameters, and $\mathbf{W}_{M} \in \mathbb{R}^{K \times d_{M}}$, $\mathbf{W}_{M-1} \in \mathbb{R}^{d_{M} \times d_{M-1}}, \ldots, \mathbf{W}_{1} \in \mathbb{R}^{d_{2} \times d_{1}}$ with $d_{M}, d_{M-1}, \ldots, d_{1}$ are arbitrary positive integers. In our setting, we do not consider the biases of intermediate hidden layers.\
76
+ **Imbalanced data:** Without loss of generality, we assume $n_{1} \geq n_{2} \geq \ldots \geq n_{K}$. This setting is more general than those in previous works, where only two different class sizes are considered, i.e., the majority classes of $n_{A}$ training samples and the minority classes of $n_{B}$ samples with the imbalance ratio $R:= n_{A} / n_{B} > 1 \label{eq:im_ratio}$ [@Fang21; @Christos22].\
77
+ We now recall the definition of the simplex ETF and define the "General Orthogonal Frame" (GOF), which is the convergence geometry of the class-means and classifiers in imbalanced MSE training problem with no bias (see Section [4](#sec:imbalance){reference-type="ref" reference="sec:imbalance"}).
78
+
79
+ ::: {#def:ETF .definition}
80
+ **Definition 1** (Simplex Equiangular Tight Frame). *A standard simplex ETF is a collection of points in $\mathbb{R}^{K}$ specified by the columns of: $$\begin{align}
81
+ \mathbf{M} = \sqrt{\frac{K}{K-1}}
82
+ (\mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top}). \nonumber
83
+ \end{align}$$ In other words, we also have: $$\begin{align}
84
+ \mathbf{M}^{\top} \mathbf{M} = \mathbf{M} \mathbf{M}^{\top}
85
+ = \frac{K}{K-1} (\mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top}). \nonumber
86
+ \end{align}$$ As in [@Zhu21], in this paper we consider general simplex equiangular tight frame (ETF) as a collection of points in $\mathbb{R}^{d} (d \geq K - 1)$ specified by the columns of $\sqrt{\frac{K}{K-1}} \mathbf{P}
87
+ (\mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top})$, where (i) when $d \geq K$, $\mathbf{P} \in \mathbb{R}^{d \times K}$ has $K$ orthogonal columns, i.e., $\mathbf{P}^{\top} \mathbf{P} = \mathbf{I}_{K}$, and (ii) when $d = K - 1$, $\mathbf{P}$ is chosen such that $\left[ \mathbf{P}^{\top} \quad \frac{1}{\sqrt{K}} \mathbf{1}_{K} \right]$ is an orthonormal matrix.*
88
+ :::
89
+
90
+ ::: {#def:GOF .definition}
91
+ **Definition 2** (General Orthogonal Frame). *A standard general orthogonal frame (GOF) is a collection of points in $\mathbb{R}^{K}$ specified by the columns of: $$\begin{align}
92
+ \mathbf{N} = \frac{1}{\sqrt{\sum_{k=1}^{K} a_{k}^{2}}} \operatorname{diag}(a_{1}, a_{2}, \ldots, a_{K}), \: a_{i} > 0 \: \: \forall \: i \in [K]. \nonumber
93
+ \end{align}$$ We also consider the general version of GOF as a collection of points in $\mathbb{R}^{d} \: (d \geq K)$ specified by the columns of $\mathbf{P} \mathbf{N}$ where $\mathbf{P} \in \mathbb{R}^{d \times K}$ is an orthonormal matrix, i.e. $\mathbf{P}^{\top} \mathbf{P} = \mathbf{I}_{K}$. In the special case where $a_{1} = a_{2} = \ldots = a_{K}$, we have $\mathbf{N}$ follows OF structure in [@Tirer22], i.e., $\mathbf{N}^{\top} \mathbf{N} \propto \mathbf{I}_{K}$. Fig. [2](#fig:geo){reference-type="ref" reference="fig:geo"} shows a visualization for GOF versus OF and ETF.*
94
+ :::
95
+
96
+ ::: remark
97
+ **Remark 1**. *OF is more structured than the ETF geometry: it can recover the latter when we center these vectors by their mean (see [@Tirer22] for details).*
98
+ :::
99
+
100
+ <figure id="fig:geo" data-latex-placement="!htb">
101
+
102
+ <figcaption>Visualization of geometries of Frobenius-normalized classifiers and features with <span class="math inline"><em>K</em> = 3</span> classes. For imbalanced data, the number of examples for each class is 30, 10, and 5.</figcaption>
103
+ </figure>
104
+
105
+ In this section, we present our study on the global optimality conditions for the $M$-layer deep linear networks ($M \geq 2$), trained with the MSE loss under the balanced setting, i.e., $n_{1} = n_{2} = ... = n_{K} := n$, extending the prior results that consider only one or two hidden layers. We consider the following optimization problem for training the model: $$\begin{align}
106
+ \underset{\substack{\mathbf{W}_{M},\ldots, \mathbf{W}_{1} \\ \mathbf{H}_{1}, \mathbf{b}}}{\text{min}} \,
107
+ \frac{1}{2N} \| \mathbf{W}_{M} \mathbf{W}_{M-1} \ldots \mathbf{W}_{1}
108
+ \mathbf{H}_{1} + \mathbf{b} \mathbf{1}^{\top}_{n} - \mathbf{Y} \|_F^2
109
+ + \frac{\lambda_{W_{M}}}{2} \| \mathbf{W}_{M} \|^2_F +
110
+ \ldots +
111
+ \frac{\lambda_{W_{1}}}{2} \| \mathbf{W}_{1} \|^2_F +
112
+ \frac{\lambda_{H_{1}}}{2} \| \mathbf{H}_{1} \|^2_F, \label{eq:bias_free}
113
+ \end{align}$$ where $\mathbf{Y}=\mathbf{I}_{K} \otimes \mathbf{1}_{n}^{\top} \in \mathbb{R}^{K \times N}$ is the one-hot vectors matrix. Note that [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"} is a special case of [\[eq:UFM_general_linear\]](#eq:UFM_general_linear){reference-type="eqref" reference="eq:UFM_general_linear"} when $\lambda_{b_{M}} = 0$.\
114
+ We further consider two different settings from [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"}: (i) bias-free, i.e., excluding $\mathbf{b}$, and (ii) last-layer unregularized bias, i.e., including $\mathbf{b}$. We now state the characteristics of the global solutions to these problems.
115
+
116
+ ::: {#thm:bias-free .theorem}
117
+ **Theorem 1**. *Let $R : = \min(K, d_{M}, d_{M-1}, \ldots, d_{2}, d_{1})$ and $\left(\mathbf{W}_{M}^*, \mathbf{W}_{M-1}^*, \ldots, \mathbf{W}_1^*, \mathbf{H}_1^*, \mathbf{b}^* \right)$ be any global minimizer of [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"}. Denoting $a:= K \sqrt[M]{K n \lambda_{W_{M}} \lambda_{W_{M-1}} \ldots \lambda_{W_{1}} \lambda_{H_{1}}}$, then the following results hold for both (i) bias-free setting with $\mathbf{b}^*$ excluded and (ii) last-layer unregularized bias setting with $\mathbf{b}^*$ included:*
118
+
119
+ - *If $a < \frac{(M-1)^{\frac{M-1}{M}}}{M^{2}}$, we have:\
120
+ ($\mathcal{NC}1$) $\mathbf{H}_1^*=\overline{\mathbf{H}}^{*} \otimes \mathbf{1}_n^{\top}$, where $\overline{\mathbf{H}}^{*} = [\mathbf{h}_{1}^{*}, \ldots, \mathbf{h}_{K}^{*}] \in \mathbb{R}^{d \times K}$ and $\mathbf{b}^{*} = \frac{1}{K} \mathbf{1}_{K}$.\
121
+ ($\mathcal{NC}2$) $\forall \: j = 1,\ldots,M$ : $$\begin{align}
122
+ \mathbf{W}_{M}^{\ast} \mathbf{W}_{M}^{\ast
123
+ \top} \propto
124
+ \overline{\mathbf{H}}^{\ast \top} \overline{\mathbf{H}}^{\ast}
125
+ \propto
126
+ \mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast}
127
+ \ldots
128
+ \overline{\mathbf{H}}^{*}
129
+ \nonumber \\ \propto
130
+ (\mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast} \ldots \mathbf{W}_{j}^{\ast})(\mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast} \ldots \mathbf{W}_{j}^{\ast})^{\top}
131
+ \nonumber
132
+ \end{align}$$ and align to:*
133
+
134
+ - *OF structure if [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"} is bias-free: $$\begin{align}
135
+ \left\{\begin{matrix}
136
+ \mathbf{I}_{K} &\text{if } R \geq K \\
137
+ \mathcal{P}_{R}(\mathbf{I}_{K}) & \text{if } R < K
138
+ \end{matrix}\right. .
139
+ \nonumber
140
+ \end{align}$$*
141
+
142
+ - *ETF structure if [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"} has last-layer bias $\mathbf{b}$: $$\begin{align}
143
+ \left\{\begin{matrix}
144
+ \mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top}&\text{if } R \geq K - 1 \\
145
+ \mathcal{P}_{R} \left( \mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top} \right) & \text{if } R < K - 1
146
+ \end{matrix}\right. .
147
+ \nonumber
148
+ \end{align}$$*
149
+
150
+ *($\mathcal{NC}3$) $\forall \: j = 1,\ldots,M$: $$\begin{align}
151
+ \begin{gathered}
152
+ \mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast} \ldots \mathbf{W}_{1}^{\ast} \propto \overline{\mathbf{H}}^{* \top}, \\
153
+ \mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \ldots \mathbf{W}_{j}^{*} \propto (\mathbf{W}_{j-1}^{*} \ldots \mathbf{W}_{1}^{*} \overline{\mathbf{H}}^{*})^{\top}. \nonumber
154
+ \end{gathered}
155
+ \end{align}$$*
156
+
157
+ - *If $a > \frac{(M-1)^{\frac{M-1}{M}}}{M^{2}}$, [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"} only has trivial global minima $(\mathbf{W}_{M}^*, \mathbf{W}_{M-1}^*, \ldots, \mathbf{W}_1^*, \mathbf{H}_1^{*}, \mathbf{b}^{*} ) = (\mathbf{0}, \mathbf{0},\ldots, \mathbf{0}, \mathbf{0}, \frac{1}{K} \mathbf{1}_{K})$.\*
158
+
159
+ - *If $a = \frac{(M-1)^{\frac{M-1}{M}}}{M^{2}}$, [\[eq:bias_free\]](#eq:bias_free){reference-type="eqref" reference="eq:bias_free"} has trivial global solution $(\mathbf{W}_{M}^*, \ldots, \mathbf{W}_1^*, \mathbf{H}_1^{*}, \mathbf{b}^{*} ) = (\mathbf{0}, .., \mathbf{0}, \mathbf{0}, \frac{1}{K} \mathbf{1}_{K})$ and nontrivial global solutions that have the same $(\mathcal{NC}1)$ and $(\mathcal{NC}3)$ properties as case (a).\
160
+ For $(\mathcal{NC}2)$ property, for $j = 1, \ldots, M$, we have: $$\begin{align}
161
+ \mathbf{W}_{M}^{\ast} \mathbf{W}_{M}^{\ast \top} \propto
162
+ \overline{\mathbf{H}}^{\ast \top} \overline{\mathbf{H}}^{\ast}
163
+ \propto
164
+ \mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast}
165
+ \ldots
166
+ \overline{\mathbf{H}}^{*} \propto
167
+ \nonumber \\
168
+ (\mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast} \ldots \mathbf{W}_{j}^{\ast})(\mathbf{W}_{M}^{\ast} \mathbf{W}_{M-1}^{\ast} \ldots \mathbf{W}_{j}^{\ast})^{\top}
169
+ \nonumber
170
+ \end{align}$$ and align to: $$\begin{align}
171
+ \left\{\begin{matrix}
172
+ \mathcal{P}_{r} \left( \mathbf{I}_{K} \right) &\text{if \eqref{eq:bias_free} is bias-free} \\
173
+ \mathcal{P}_{r} \left( \mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top} \right) &\text{if \eqref{eq:bias_free} has last-layer bias}
174
+ \end{matrix}\right. , \nonumber
175
+ \end{align}$$ with $r$ is the number of positive singular value of $\overline{\mathbf{H}}^{*}$.*
176
+ :::
177
+
178
+ The details proof of Theorem [1](#thm:bias-free){reference-type="ref" reference="thm:bias-free"} is provided in Appendix [9](#sec:proofs_balanced){reference-type="ref" reference="sec:proofs_balanced"}. Our proof first characterize critical points of the loss function, showing that the weight matrices of the network have the same set of singular values, up to a factor depending on the weight decay. Then, we use the singular value decomposition on these weight matrices to transform the loss function into a function of singular values of $\mathbf{W}_{1}$ and singular vectors of $\mathbf{W}_{M}$. Due to the separation of the singular values/vectors in the expression of the loss function, we can optimize each one individually. This method shares some similarities with the proof for bias-free case in [@Tirer22] where they transform a lower bound of the loss function into a function of singular values. Furthermore, the threshold $(M-1)^{\frac{M-1}{M}}/{M^{2}}$ of the constant $a$ is derived from the minimizer of the function $g(x) = 1/(x^{M} + 1) + bx$ for $x \geq 0$. For instance, if $b > (M-1)^{\frac{M-1}{M}}/M$, $g(x)$ is minimized at $x = 0$ and the optimal singular values will be $0$'s, leading to the stated solution.\
179
+ The main difficulties and novelties of our proofs for deep linear networks are: i) we observe that the product of many matrices can be simplified by using SVD with identical orthonormal bases between consecutive weight matrices (see Lemma [5](#lm:4){reference-type="ref" reference="lm:4"}) and, thus, only the singular values of $\mathbf{W}_{1}$ and left singular vectors of $\mathbf{W}_{M}$ remain in the loss function, ii) optimal singular values are related to the minimizer of the function $g(x) = 1/(x^M + 1) + bx$ (see Appendix [9.2.1](#sec:study_g){reference-type="ref" reference="sec:study_g"}), and iii) we study the properties of optimal singular vectors to derive the geometries of the global solutions.\
180
+ Theorem [1](#thm:bias-free){reference-type="ref" reference="thm:bias-free"} implies the following interesting results:
181
+
182
+ - **Features collapse**: For each $k \in [K]$, with class-means matrix $\overline{\mathbf{H}}^{*} = [\mathbf{h}^{*}_{1}, \ldots, \mathbf{h}^{*}_{K}] \in \mathbb{R}^{d \times K}$, we have $\mathbf{H}_{1}^{*} = \overline{\mathbf{H}}^{*} \otimes \mathbf{1}_{n}^{\top}$, implying the collapse of features within the same class to their class-mean.
183
+
184
+ - **Convergence to OF/Simplex ETF:** The class-means matrix, the last-layer linear classifiers, or the product of consecutive weight matrices converge to OF in the case of bias-free and simplex ETF in the case of having last-layer bias. This result is consistent with the two and three-layer cases in [@Tirer22; @Zhou22].
185
+
186
+ - **Convergence to self-duality:** If we separate the product $\mathbf{W}_{M}^{*} \ldots \mathbf{W}_{1}^{*} \overline{\mathbf{H}}^{*}$ (once) into any two components, they will be perfectly aligned to each other up to rescaling. This generalizes from the previous results which demonstrate that the last-layer linear classifiers are perfectly matched with the class-means after rescaling.
187
+
188
+ ::: remark
189
+ **Remark 2**. *The convergence of the class-means matrix to OF/Simplex ETF happens when $d_{m} \geq K$ (or $K-1$) $\: \forall \: m \in [M]$, which often holds in practice [@Krizhevsky12; @He15]. Otherwise, they converge to the best rank-$R$ approximation of $\mathbf{I}_{K}$ or $\mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K}\mathbf{1_{K}}^{\top}$, where the class-means neither have the equinorm nor the maximally pairwise separation properties. This result is consistent with the two-layer case observed in [@Zhou22].*
190
+ :::
191
+
192
+ ::: remark
193
+ **Remark 3**. *From the proofs, we can show that under the condition $d_{m} \geq K,$ $\: \forall \: m \in [M]$, the optimal value of the loss function is strictly smaller than when this condition does not hold. Our result is aligned with [@Zhu18], where they empirically observe that a larger network (i.e., larger width) tends to exhibit severe $\mathcal{NC}$ and have smaller training errors.*
194
+ :::
195
+
196
+ The majority of theoretical results for $\mathcal{NC}$ only consider the balanced data setting, i.e., the same number of training samples for each class. This assumption plays a vital role in the existence of the well-structured ETF geometry. In this section, we instead consider the imbalanced data setting and derive the first geometry analysis under this setting for MSE loss. Furthermore, we extend our study from the plain UFM setting, which includes only one layer of weight after the unconstrained features, to the deep linear network one.
197
+
198
+ The bias-free plain UFM with MSE loss is given by: $$\begin{align}
199
+ \min_{\mathbf{W}, \mathbf{H}} \frac{1}{2N}
200
+ \| \mathbf{W} \mathbf{H} - \mathbf{Y} \|_F^2
201
+ &+ \frac{\lambda_{W}}{2} \| \mathbf{W} \|_F^2
202
+ + \frac{\lambda_{H}}{2} \| \mathbf{H} \|_F^2 ,
203
+ \label{eq:UFM_imbalance}
204
+ \end{align}$$ where $\mathbf{W} \in \mathbb{R}^{K \times d}$, $\mathbf{H} \in \mathbb{R}^{d \times N}$, and $\mathbf{Y} \in \mathbb{R}^{K \times N}$ is the one-hot vectors matrix consisting $n_{k}$ one-hot vectors for each class $k$, $\forall \: k \in [K]$. We now state the $\mathcal{NC}$ properties of the global solutions of [\[eq:UFM_imbalance\]](#eq:UFM_imbalance){reference-type="eqref" reference="eq:UFM_imbalance"} under the imbalanced data setting when the feature dimension $d$ is at least the number of classes $K$.
205
+
206
+ ::: {#thm:UFM_imbalance .theorem}
207
+ **Theorem 2**. *Let $d \geq K$ and $(\mathbf{W}^{*}, \mathbf{H}^{*})$ be any global minimizer of problem [\[eq:UFM_imbalance\]](#eq:UFM_imbalance){reference-type="eqref" reference="eq:UFM_imbalance"}. Then, we have:\
208
+ $(\mathcal{NC}1) \: \:
209
+ \mathbf{H}^{*} = \overline{\mathbf{H}}^{*} \mathbf{Y}
210
+ \Leftrightarrow \mathbf{h}_{k,i}^{*} = \mathbf{h}_{k}^{*} \: \forall \: k \in [K], i \in [n_{k}], \nonumber$ where $\overline{\mathbf{H}}^{*} = [\mathbf{h}_{1}^{*},\ldots,\mathbf{h}_{K}^{*} ] \in \mathbb{R}^{d \times K}$.\
211
+ $(\mathcal{NC}2)$ Let $a:= N^{2} \lambda_{W} \lambda_{H}$, we have: $$\begin{align}
212
+ \begin{gathered}
213
+ % W W^T
214
+ \mathbf{W}^{*} \mathbf{W}^{* \top}
215
+ = \operatorname{diag}
216
+ \left\{s_{k}^{2} \right\}_{k=1}^{K},
217
+ \\
218
+ % Mean H^T H
219
+ \overline{\mathbf{H}}^{* \top}
220
+ \overline{\mathbf{H}}^{*}
221
+ =
222
+ \operatorname{diag}
223
+ \left\{
224
+ \frac{s_{k}^{2}}{(s_{k}^{2} + N \lambda_{H})^{2}}
225
+ \right\}_{k=1}^{K}, \nonumber
226
+ \end{gathered}
227
+ \end{align}$$ $$\begin{align}
228
+ \begin{gathered}
229
+ % W H
230
+ \mathbf{W}^{*} \mathbf{H}^{*}
231
+ = \operatorname{diag}
232
+ \left\{
233
+ \frac{s_{k}^{2}}{s_{k}^{2} + N \lambda_{H}}
234
+ \right\}_{k=1}^{K}
235
+ \mathbf{Y}
236
+ = \begin{bmatrix}
237
+ \frac{s_{1}^{2}}{s_{1}^{2} + N \lambda_{H}} \mathbf{1}_{n_{1}}^{\top} & \ldots & \mathbf{0} \\
238
+ \vdots & \ddots & \vdots \\
239
+ \mathbf{0} & \ldots & \frac{s_{K}^{2}}{s_{K}^{2} + N \lambda_{H}} \mathbf{1}_{n_{K}}^{\top} \\
240
+ \end{bmatrix}. \nonumber
241
+ \end{gathered}
242
+ \end{align}$$ where:*
243
+
244
+ - *If $\frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{K}} \leq 1$: $$\begin{align}
245
+ \begin{aligned}
246
+ s_{k} =
247
+ \sqrt{ \sqrt{\frac{n_{k} \lambda_{H}}{\lambda_{W}} }
248
+ - N \lambda_{H}} \quad &\forall \: k \in [K] \nonumber
249
+ \end{aligned}
250
+ \end{align}$$*
251
+
252
+ - *If there exists a $j \in [K-1]$ s.t. $\frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{j}} \leq 1 < \frac{a}{n_{j+1}} \leq \ldots \leq \frac{a}{n_{K}}$: $$\begin{align}
253
+ \begin{aligned}
254
+ s_{k} = \left\{\begin{matrix}
255
+ \sqrt{ \sqrt{\frac{n_{k} \lambda_{H}}{\lambda_{W}} }
256
+ - N \lambda_{H}} \quad &\forall \: k \leq j \\ 0 \quad &\forall \: k > j
257
+ \end{matrix}\right. .
258
+ \nonumber
259
+ \end{aligned}
260
+ \end{align}$$*
261
+
262
+ - *If $1 < \frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{K}}$: $$\begin{align}
263
+ (s_{1}, s_{2}, \ldots, s_{K} ) &= (0,0,\ldots,0), \nonumber
264
+ \end{align}$$ and $(\mathbf{W}^{*}, \mathbf{H}^{*}) = (\mathbf{0}, \mathbf{0})$ in this case.*
265
+
266
+ *For any $k$ such that $s_{k} = 0$, we have: $$\begin{align}
267
+ \mathbf{w}_{k}^{*} = \mathbf{h}_{k}^{*} = \mathbf{0}. \nonumber
268
+ \end{align}$$*
269
+
270
+ *$(\mathcal{NC}3) \quad
271
+ \mathbf{w}_{k}^{*} = \sqrt{\frac{n_{k} \lambda_{H}}{\lambda_{W}}} \mathbf{h}_{k}^{*} \quad \forall \: k \in [K]. \nonumber$*
272
+ :::
273
+
274
+ The proof is provided in Appendix [10](#sec:proofs_im){reference-type="ref" reference="sec:proofs_im"}. We use the same approach as the proofs of Theorem [1](#thm:bias-free){reference-type="ref" reference="thm:bias-free"} to prove this result, with challenge arises in the process of lower bounding the loss function w.r.t. the singular vectors of $\mathbf{W}$. Interestingly, the left singular matrix of $\mathbf{W}^{*}$ consists multiple orthogonal blocks on its diagonal, with each block corresponds with a group of classes having the same number of training samples. This property creates the orthogonality of $(\mathcal{NC}2)$ geometries.\
275
+ Theorem [2](#thm:UFM_imbalance){reference-type="ref" reference="thm:UFM_imbalance"} implies the following interesting results:
276
+
277
+ - **Features collapse:** The features in the same class also converge to their class-mean, similar as balanced case.
278
+
279
+ - **Convergence to GOF:** When the condition $N^{2} \lambda_{W} \lambda_{H} / n_{K} < 1$ is hold, the class-means matrix and the last-layer classifiers converge to GOF (see Definition [2](#def:GOF){reference-type="ref" reference="def:GOF"}). This geometry includes orthogonal vectors, but their length depends on the number of training samples in the class. The above condition implies that the imbalance and the regularization level should not be too heavy to avoid trivial solutions that may harm the model performances. We will discuss more about this phenomenon in Section [4.2](#subsec:MC){reference-type="ref" reference="subsec:MC"}.
280
+
281
+ - **Alignment between linear classifiers and last-layer features:** The last-layer linear classifier is aligned with the class-mean of the same class, but with a different ratio across classes. These ratios are proportional to the square root of the number of training samples, and thus different compared to the balanced case where $\mathbf{W}^{*} / \| \mathbf{W}^{*} \|_F = \overline{\mathbf{H}}^{* \top} / \| \overline{\mathbf{H}}^{* \top} \|_F$.
282
+
283
+ ::: remark
284
+ **Remark 4**. *We study the case $d < K$ in Theorem [6](#thm:im_UFM_bottleneck){reference-type="ref" reference="thm:im_UFM_bottleneck"}. In this case, while $(\mathcal{NC}1)$ and $(\mathcal{NC}3)$ are exactly similar as the case $d \geq K$, the $(\mathcal{NC}2)$ geometries are different if $a/n_{d} < 1$ and $n_{d} = n_{d+1}$, where a square block on the diagonal is replaced by its low-rank approximation. This square block corresponds to classes with the number of training samples equal $n_{d}$. Also, we have $\mathbf{w}_{k}^{*} = \mathbf{h}^{*}_{k} = \mathbf{0}$ for any class $k$ with the amount of data is less than $n_{d}$.*
285
+ :::
286
+
287
+ Given the exact closed forms of the singular values of $\mathbf{W}^{*}$ stated in Theorem [2](#thm:UFM_imbalance){reference-type="ref" reference="thm:UFM_imbalance"}, we derive the norm ratios between the classifiers and between features across classes as follows:
288
+
289
+ ::: {#lm:ratio .lemma}
290
+ **Lemma 1**. *Suppose $(\mathbf{W}^{*}, \mathbf{H}^{*})$ is a global minimizer of problem [\[eq:UFM_imbalance\]](#eq:UFM_imbalance){reference-type="eqref" reference="eq:UFM_imbalance"} such that $d \geq K$ and $N^{2} \lambda_{W} \lambda_{H} / n_{K} < 1$, so that all the $s_{k}$'s are positive. The following results hold: $$\begin{align}
291
+ \frac{\| \mathbf{w}_{i}^{*} \|^{2}}{\| \mathbf{w}_{j}^{*} \|^{2}} = \frac{ \sqrt{\frac{n_{i} \lambda_{H}}{\lambda_{W}}} - N \lambda_{H} }{\sqrt{\frac{n_{j} \lambda_{H}}{\lambda_{W}}} - N \lambda_{H}},
292
+ \frac{\| \mathbf{h}_{i}^{*} \|^{2}}{\| \mathbf{h}_{j}^{*} \|^{2}}
293
+ = \frac{n_{j}}{n_{i}}
294
+ \frac{ \sqrt{\frac{n_{j} \lambda_{H}}{\lambda_{W}}} - N \lambda_{H} }{\sqrt{\frac{n_{i} \lambda_{H}}{\lambda_{W}}} - N \lambda_{H}}. \nonumber
295
+ \end{align}$$ If $n_{i} \geq n_{j}$, we have $\| \mathbf{w}_{i}^{*} \| \geq \| \mathbf{w}_{j}^{*} \|$ and $\| \mathbf{h}_{i}^{*} \| \leq \| \mathbf{h}_{j}^{*} \|$.*
296
+ :::
297
+
298
+ It has been empirically observed that the classifiers of the majority classes have greater norms [@Kang19]. Our result is in agreement with this observation. Moreover, it has been shown that class imbalance impairs the model's accuracy on minority classes [@Kang19; @Cao19]. Recently, [@Fang21] discover the "Minority Collapse" phenomenon. In particular, they show that there exists a finite threshold for imbalance level beyond which all the minority classifiers collapse to a single vector, resulting in the model's poor performance on these classes. *Theorem [2](#thm:UFM_imbalance){reference-type="ref" reference="thm:UFM_imbalance"} is not only aligned with the "Minority Collapse" phenomenon, but also provides the imbalance threshold for the collapse of minority classes to vector $\mathbf{0}$, i.e., $N^{2} \lambda_{W} \lambda_{H} / n_{K} > 1$.*
299
+
300
+ We now generalize [\[eq:UFM_imbalance\]](#eq:UFM_imbalance){reference-type="eqref" reference="eq:UFM_imbalance"} to bias-free deep linear networks with $M \geq 2$ and arbitrary widths. We study the following optimization problem with imbalanced data: $$\begin{align}
301
+ \begin{aligned}
302
+ &\min _{\mathbf{W}_{M},\ldots, \mathbf{W}_{1}, \mathbf{H}_{1}}
303
+ \frac{1}{2N} \| \mathbf{W}_{M} \mathbf{W}_{M-1} \ldots \mathbf{W}_{1}
304
+ \mathbf{H}_{1} - \mathbf{Y} \|_F^2
305
+ + \frac{\lambda_{W_{M}}}{2} \| \mathbf{W}_{M} \|^2_F \
306
+ + \ldots + \frac{\lambda_{W_{1}}}{2} \| \mathbf{W}_{1} \|^2_F +
307
+ \frac{\lambda_{H_{1}}}{2} \| \mathbf{H}_{1} \|^2_F,
308
+ \label{eq:deep_imbalance}
309
+ \end{aligned}
310
+ \end{align}$$ where the target matrix $\mathbf{Y}$ is the one-hot vectors matrix defined in [\[eq:UFM_imbalance\]](#eq:UFM_imbalance){reference-type="eqref" reference="eq:UFM_imbalance"}. We now state the $\mathcal{NC}$ properties of the global solutions of [\[eq:deep_imbalance\]](#eq:deep_imbalance){reference-type="eqref" reference="eq:deep_imbalance"} when the dimensions of the hidden layers are at least the number of classes $K$.
311
+
312
+ ::: {#thm:deep_imbalance .theorem}
313
+ **Theorem 3**. *Let $d_{m} \geq K, \: \forall \: m \in [M]$, and $(\mathbf{W}_{M}^{*}, \mathbf{W}_{M-1}^{*},\ldots, \mathbf{W}_{1}^{*}, \mathbf{H}_{1}^{*})$ be any global minimizer of problem [\[eq:deep_imbalance\]](#eq:deep_imbalance){reference-type="eqref" reference="eq:deep_imbalance"}. We have the following results:\
314
+ ($\mathcal{NC}1) \quad
315
+ \mathbf{H}_{1}^{*} = \overline{\mathbf{H}}^{*} \mathbf{Y} \nonumber
316
+ \Leftrightarrow \mathbf{h}_{k,i}^{*} = \mathbf{h}_{k}^{*} \: \forall \: k \in [K], i \in [n_{k}],$ where $\overline{\mathbf{H}}^{*} = [\mathbf{h}_{1}^{*},\ldots,\mathbf{h}_{K}^{*}] \in \mathbb{R}^{d_{1} \times K}$.\
317
+ $(\mathcal{NC}2)$ Let $c:= \frac{\lambda_{W_{1}}^{M-1}}
318
+ {\lambda_{W_{M}} \lambda_{W_{M-1}} \ldots \lambda_{W_{2}} }$, $a:= N \sqrt[M]{N \lambda_{W_{M}} \lambda_{W_{M-1}} \ldots \lambda_{W_{1}} \lambda_{H_{1}}}$ and $\forall k \in [K]$, $x^{*}_{k}$ is the largest positive solution of the equation $\frac{a}{n_{k}} - \frac{ x^{M-1}}{ (x^{M} + 1)^{2}} = 0$, we have the following: $$\begin{align}
319
+ \begin{aligned}
320
+ % W W^T
321
+ &\mathbf{W}^{*}_{M} \mathbf{W}^{* \top}_{M}
322
+ = \frac{\lambda_{W_{1}}}{\lambda_{W_{M}}} \operatorname{diag}
323
+ \left\{s_{k}^{2} \right\}_{k=1}^{K},
324
+ \\
325
+ &(\mathbf{W}_{M}^{*} \ldots \mathbf{W}_{1}^{*} )(\mathbf{W}_{M}^{*} \ldots \mathbf{W}_{1}^{*} )^{\top} =
326
+ \operatorname{diag}
327
+ \left\{c s_{k}^{2M} \right\}_{k=1}^{K},
328
+ \\
329
+ % Mean H^T H
330
+ &\overline{\mathbf{H}}^{* \top}
331
+ \overline{\mathbf{H}}^{*}
332
+ =
333
+ \operatorname{diag}
334
+ \left\{
335
+ \frac{c s_{k}^{2M}}{(c s_{k}^{2M} + N \lambda_{H_{1}})^{2}}
336
+ \right\}_{k=1}^{K}, \nonumber
337
+ \\
338
+ % W_M W_M-1 \ldots
339
+ &\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \ldots \mathbf{W}_{1}^{*} \mathbf{H}_{1}^{*}
340
+ =
341
+ \left\{
342
+ \frac{c s_{k}^{2M}}{c s_{k}^{2M} + N \lambda_{H_{1}}}
343
+ \right\}_{k=1}^{K} \mathbf{Y},
344
+ % &= \begin{bmatrix}
345
+ % \frac{c s_{1}^{2M}}{c s_{1}^{2M} + N \lambda_{H_{1}}} \mathbf{1}_{n_{1}}^{\top} & \ldots & \mathbf{0} \\
346
+ % \vdots & \ddots & \vdots \\
347
+ % \mathbf{0} & \ldots & \frac{c s_{K}^{2M}}{c s_{K}^{2M} + N \lambda_{H_{1}}} \mathbf{1}_{n_{K}}^{\top} \\
348
+ % \end{bmatrix}
349
+ % \\
350
+ % H^T H
351
+ % &\mathbf{H}_{1}^{* \top} \mathbf{H}_{1}^{*}
352
+ % =
353
+ % \mathbf{Y}^{\top}
354
+ % \operatorname{diag}
355
+ % \left\{
356
+ % \frac{c s_{k}^{2M}}{(c s_{k}^{2M} + N \lambda_{H_{1}})^{2}}
357
+ % \right\}_{k=1}^{K}
358
+ % \mathbf{Y} .
359
+ % \\
360
+ % &=
361
+ % \begin{bmatrix}
362
+ % \frac{c s_{1}^{2M}}{(c s_{1}^{2M} + N \lambda_{H_{1}})^{2}} \mathbf{1}_{n_{1}} \mathbf{1}_{n_{1}}^{\top} & \ldots & \mathbf{0} \\
363
+ % \vdots & \ddots & \vdots \\
364
+ % \mathbf{0} & \ldots & \frac{c s_{K}^{2M}}{(c s_{K}^{2M} + N \lambda_{H_{1}})^{2}} \mathbf{1}_{n_{K}} \mathbf{1}_{n_{K}}^{\top} \\
365
+ % \end{bmatrix}.
366
+ \end{aligned}
367
+ \end{align}$$*
368
+
369
+ *($\mathcal{NC}3$) We have, $\forall \: k \in [K]$: $$\begin{align}
370
+ (\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \ldots \mathbf{W}_{1}^{*})_{k} = (c s_{k}^{2M} + N \lambda_{H_{1}}) \mathbf{h}_{k}^{*}, \nonumber
371
+ \end{align}$$ where:*
372
+
373
+ - *If $\frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{K}} < \frac{(M-1)^{\frac{M-1}{M}}}{M^{2}}$, we have: $$\begin{align}
374
+ \begin{aligned}
375
+ s_{k} =
376
+ \sqrt[2M]{\frac{N \lambda_{H_{1}} x_{k}^{* M}}{c}} \quad \forall \: k \in [K]
377
+ .
378
+ \nonumber
379
+ \end{aligned}
380
+ \end{align}$$*
381
+
382
+ - *If there exists a $j \in [K-1]$ s.t. $\frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{j}} < \frac{(M-1)^{\frac{M-1}{M}}}{M^{2}} < \frac{a}{n_{j+1}} \leq \ldots \leq \frac{a}{n_{K}}$, we have: $$\begin{align}
383
+ \begin{aligned}
384
+ s_{k} = \left\{\begin{matrix}
385
+ \sqrt[2M]{\frac{N \lambda_{H_{1}} x_{k}^{* M}}{c}} \quad &\forall \: k \leq j \\ 0 \quad &\forall \: k > j
386
+ \end{matrix}\right. .
387
+ \nonumber
388
+ \end{aligned}
389
+ \end{align}$$ For any $k$ such that $s_{k} = 0$, we have: $$\begin{align}
390
+ (\mathbf{W}_{M}^{*})_{k} = \mathbf{h}_{k}^{*} = \mathbf{0}. \nonumber
391
+ \end{align}$$*
392
+
393
+ - *If $\frac{(M-1)^{\frac{M-1}{M}}}{M^{2}} < \frac{a}{n_{1}} \leq \frac{a}{n_{2}} \leq \ldots \leq \frac{a}{n_{K}}$, we have: $$\begin{align}
394
+ (s_{1}, s_{2}, \ldots, s_{K} ) &= (0,0,\ldots,0), \nonumber
395
+ \end{align}$$ and $(\mathbf{W}_{M}^{*}, \ldots, \mathbf{W}_{1}^{*}, \mathbf{H}_{1}^{*}) = (\mathbf{0}, \ldots, \mathbf{0}, \mathbf{0})$ in this case.*
396
+ :::
397
+
398
+ The detailed proofs of Theorem [3](#thm:deep_imbalance){reference-type="ref" reference="thm:deep_imbalance"} and the remaining case where there are some $\frac{a}{n_{k}}$'s equal to $\frac{(M-1)^{\frac{M-1}{M}}}{M^{2}}$ are provided in Appendix [11](#sec:proofs_im_deep){reference-type="ref" reference="sec:proofs_im_deep"}. Briefly, the extending process from plain UFM setting to deep linear network setting is similar as the deep linear balanced case in Section [3](#sec:mainresults){reference-type="ref" reference="sec:mainresults"}. As analogous to plain UFM setting, the orthogonality of $(\mathcal{NC}2)$ geometries is from the property that the left singular matrix of $\mathbf{W}^{*}_{M}$ and the right singular matrix of $\overline{\mathbf{H}}^{*}$ contain orthogonal blocks on their diagonals.
399
+
400
+ ::: remark
401
+ **Remark 5**. *The equation that solves for the optimal singular value, $\frac{a}{n} - \frac{x^{M-1}}{(x^{M}+1)^{2}} = 0$, has exactly two positive solutions when $a < (M-1)^{\frac{M-1}{M}}/M^{2}$ (see Section [9.2.1](#sec:study_g){reference-type="ref" reference="sec:study_g"}). Solving this equation leads to cumbersome solutions of a high-degree polynomial. Even without the exact closed-form formula for the solution, the $(\mathcal{NC}2)$ geometries can still be easily computed by numerical methods.*
402
+ :::
403
+
404
+ ::: remark
405
+ **Remark 6**. *We study the case $R := \min(d_{M}, \ldots, d_{1}, K) < K$ in Theorem [8](#thm:im_deep_bottleneck){reference-type="ref" reference="thm:im_deep_bottleneck"}. In this case, while $(\mathcal{NC}1)$ and $(\mathcal{NC}3)$ are exactly similar as the case $R = K$ in Theorem [3](#thm:deep_imbalance){reference-type="ref" reference="thm:deep_imbalance"}, the $(\mathcal{NC}2)$ geometries are different if $a/n_{R} \leq 1$ and $n_{R} = n_{R+1}$, where a square block on the diagonal is replaced by its low-rank approximation. This square block corresponds to classes with the number of training samples equal $n_{R}$. Also, we have $(\mathbf{W}_{M})_{k}^{*} = \mathbf{h}^{*}_{k} = \mathbf{0}$ for any class $k$ with the amount of data is less than $n_{R}$.*
406
+ :::
407
+
408
+ In this section, we turn to cross-entropy loss and generalize $\mathcal{NC}$ for deep linear networks with last-layer bias under balanced setting, and a mild assumption that all the hidden layers dimension are at least $K-1$ is required. We consider the training problem [\[eq:UFM_general_linear\]](#eq:UFM_general_linear){reference-type="eqref" reference="eq:UFM_general_linear"} with CE loss as following: $$\begin{align}
409
+ &\min_{\mathbf{W}_{M},…, \mathbf{H}_{1}, \mathbf{b}}
410
+ \frac{1}{N} \sum_{k=1}^{K} \sum_{i=1}^{n}
411
+ \mathcal{L}_{CE} (\mathbf{W}_{M} \ldots \mathbf{W}_{1} \mathbf{h}_{k,i} + \mathbf{b}, \mathbf{y}_{k})
412
+ +
413
+ \frac{\lambda_{W_{M}}}{2} \| \mathbf{W}_{M} \|^2_F + … + \frac{\lambda_{H_{1}}}{2} \| \mathbf{H}_{1} \|^2_F
414
+ + \frac{\lambda_{b}}{2} \| \mathbf{b} \|_2^2,
415
+ \label{eq:CE_loss}
416
+ \end{align}$$ where: $$\begin{align}
417
+ \mathcal{L}_{CE} (\mathbf{z}, \mathbf{y}_{k})
418
+ := -\log \left(
419
+ \frac{ e^{z_{k}} }{ \sum_{i=1}^{K} e^{z_{i}}}
420
+ \right).
421
+ \nonumber
422
+ \end{align}$$
423
+
424
+ ::: {#thm:CE .theorem}
425
+ **Theorem 4**. *Assume $d_{k} \geq K - 1 \: \forall \: k \in [M]$, then any global minimizer $(\mathbf{W}_{M}^{*}, …, \mathbf{W}_{1}^{*}, \mathbf{H}_{1}^{*}, \mathbf{b}^{*})$ of problem [\[eq:CE_loss\]](#eq:CE_loss){reference-type="eqref" reference="eq:CE_loss"} satisfies:*
426
+
427
+ - *$(\mathcal{NC}1) + (\mathcal{NC}3)$: $$\begin{align}
428
+ \mathbf{h}_{k,i}^{*}
429
+ &= \frac{\lambda_{H_{1}}^{M}}{ \lambda_{W_{M}} \lambda_{W_{M-1}} \ldots \lambda_{W_{1}}}
430
+ \frac{\sum_{k=1}^{K-1} s_{k}^{2}}{\sum_{k=1}^{K-1} s_{k}^{2M}} (\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*}
431
+ \ldots\mathbf{W}_{1}^{*})_{k} \quad \forall k \in [K], i \in [n]
432
+ \nonumber \\
433
+ \Rightarrow
434
+ \mathbf{h}_{k,i}^{*} &= \mathbf{h}_{k}^{*} \quad \forall \: i \in [n], k \in [K] ,
435
+ \nonumber
436
+ \end{align}$$ where $\{ s_{k} \}_{k=1}^{K-1}$ are the singular values of $\mathbf{H}^{*}_{1}$.*
437
+
438
+ - *$(\mathcal{NC}2):$ $\mathbf{H}_{1}^{*}$ and $\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \cdots \mathbf{W}_{1}^{*}$ will converge to a simplex ETF when training progresses: $$\begin{align}
439
+ (\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \cdots \mathbf{W}_{1}^{*}) (\mathbf{W}_{M}^{*} \mathbf{W}_{M-1}^{*} \cdots \mathbf{W}_{1}^{*})^{\top}
440
+ =
441
+ \frac{ \lambda_{H_{1}}^{M} \sum_{k=1}^{K-1} s_{k}^{2M}}{(K-1) \lambda_{W_{M}} \lambda_{W_{M-1}} \ldots \lambda_{W_{1}}} \left(
442
+ \mathbf{I}_{K} - \frac{1}{K} \mathbf{1}_{K} \mathbf{1}_{K}^{\top}
443
+ \right). \nonumber
444
+ \end{align}$$*
445
+
446
+ - *We have $\mathbf{b}^{*} = b^{*} \mathbf{1}$ where either $b^{*} = 0$ or $\lambda_{b} = 0$.*
447
+ :::
448
+
449
+ The proof is provided in Appendix [12](#sec:CE_proof){reference-type="ref" reference="sec:CE_proof"} and some of the key techniques are extended from the proof for the plain UFM in [@Zhu21]. Comparing with the plain UFM with one layer of weight only, we have for deep linear case similar results as the plain UFM case, with the $(\mathcal{NC}2)$ and $(\mathcal{NC}3)$ property now hold for the product $\mathbf{W}_{M} \mathbf{W}_{M-1} \ldots \mathbf{W}_{1}$ instead of $\mathbf{W}$.
2302.08956/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-07-07T14:04:57.140Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/17.2.4 Chrome/96.0.4664.174 Electron/16.1.0 Safari/537.36" etag="ossXOTnGWUntzWfvjmyS" version="17.2.4" type="device"><diagram id="TPe3OsJ5aV2p6DrZgVPP" name="Page-1">7Vpbc5s6EP41PCYD4mLzmDhpz5zpOZOZTKfNo2wEKJURFSK2++srQOJqOyQDAU/rPAQtq2XZT99KWqSZq+3+M4Nx+B/1ENGA7u01804DwNCBIf5lkkMhcWwpCBj2pFIleMS/kOoppSn2UNJQ5JQSjuOmcEOjCG14QwYZo7ummk9J86kxDFBH8LiBpCv9hj0eFtKlrVfyfxAOQvVkQ5d3tlApS0ESQo/uaiLzXjNXjFJeXG33K0Sy4Km4FP0+nbhbOsZQxPt0ePg/ef7kBgQ63xLr6zaF/65vr0zlHD+oN0aeCIBsUsZDGtAIkvtKestoGnkoM6uLVqXzhdJYCA0hfEacHySaMOVUiEK+JfIu2mP+vXb9lJm6tmXrbi8t542DbPg04tKgIWJ3m3BGf5SYZH2LN8ncPxkhKUpoyjboTFgWcqRBFiB+Rk94ooAUDEB0izg7iI4MEcjxS9MRKIdiUOpVaIkLCdgbwJNevkCSyidpwCFcBquBqvMzperGVZKH8UYoGE68r26KqyD7HwtI0yBFCVL2hHuFyUKhO2YIEYTMxsYuxBw9xjAP707khCbyMIkLlvp4n42gFoxmafwFMY7254Hsxl12sHW76CKTDlCk3FUUdqUorLFXyQZHCjiXQjMRYXb4Xm/UemXNqlvemoSey570dKdk53IkdqI00VamduOuMb1Qfhpzo+diCnp+KGXcvjOafhy5j+GMOxJnnmGUwsy/y6SLOTe+LP9OZwNyU+11XifniXHyMeQ0rXegvknZSw66MeQQqGB/qqP+zo0DIut8Y5aRFIut30ToAnNKdJWbg+fenXB1grzrY0JWlFCWmzZ9O/uTPtfkxa+0ULvj5L9hMjhw5pbB3UvJ4B+biVWJ6TWuLialqjESVeEfT1RzOTOimkY3+H+JKgtvfSbVSfczys3BmepTytcwc+AidzQOmBvN3lOgG2dtOyznRl7b9i0rAGtSGo5VV8DRnGbM0zPjqbl0ACpb6vuWpLJtT8xkMOWE2dyYGmfJW9UmqnLEU/3eu2sTR0jfyFUDZgDQe3c7ae0CjLW7jQk8IHah0/DCmts0bM5mGn5LianGZLBYNrh87eaJ9hyf89YDYljEELGZzOyg70542g8GYKytcDjJt+9Z7YVBKzuYy6mzgz2b7HBJi3Rg9qXypN/LlZuDU3lN2STL9AEoaOrzWlwDaySMtvQX3K7xz/RijxzZLaS6ubI8GliHqhQOjpXlHMHqzemzHkiCg0hcE+RniCQi4jgKvuStO5AvZLyb7MilaK4J3fzoJNxynWS09zuv58hGHj+SMMt1W+9V24Ap9kTilGPjSr82XHfRGB+WBKN3bpXWHyjOeVaepGkOO6BKHcpE4brsVY0ogRM81NTiTCE58xxr2XxO+7BpW79VFXirvrF4RX9xVl9cFG94orfjXru1X+vd2pQsJs5ODDtmy3KvNFQW75Uh6vsJ4lqb2eWIOUZ20azOCBfq1Ulr8/43</diagram></mxfile>
2302.08956/main_diagram/main_diagram.pdf ADDED
Binary file (26.5 kB). View file
 
2302.08956/paper_text/intro_method.md ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Africa has a long and rich linguistic history, experiencing language contact, language expansion, development of trade languages, language shift, and language death, on several occasions. The continent is incredibly linguistically diverse and home to over 2000 languages. This includes 75 languages with at least one million speakers each. Africa has a rich tradition of storytelling, poems, songs, and literature [@storytellingAfrica; @africaStoryTelling] while recent years have seen a proliferation of communication in digital and social media. Code-switching is common in these new forms of communication where speakers alternate between two or more languages in the context of a single conversation [@santy-etal-2021-bertologicomix; @angel-etal-2020-nlp; @codeMixing]. However, despite this linguistic richness, African languages have been comparatively under-represented in natural language processing (NLP) research.
4
+
5
+ ![Countries and languages represented in the AfriSenti data collection (Amharic, Algerian Arabic, Hausa, Igbo, Kinyarwanda, Moroccan Arabic, Mozambican Portuguese, Nigerian Pidgin, Oromo, Swahili, Tigrinya, Twi, Xitsonga, and Yorùbá).](Afrisenti_Language_Family_paper_wise_update_cropped.png){#fig:countries_languages}
6
+
7
+ <figure id="fig:AfriSenti_examples">
8
+ <p><embed src="AfriSenti_Example.pdf" style="width:80.0%" /> []<span>Examples of tweets and their sentiments in the different AfriSenti Languages. Note that the collected tweets in Moroccan Darija (<code>ary</code>) are written in both Arabic and Latin scripts.</span> <span id="fig:AfriSenti_examples" data-label="fig:AfriSenti_examples"></span></p>
9
+ </figure>
10
+
11
+ An influential sub-area of NLP deals with sentiment, valence, emotions, and affect in language [@liu2020sentiment]. Computational analysis of emotion states in language and the creation of systems that predict these states from utterances have applications in literary analysis and culturonomics [@emotionarcs; @hamilton2016diachronic], commerce (e.g., tracking feelings towards products), and research in psychology and social science [@dodds2015human; @hamilton2016diachronic]. Despite tremendous amount of work in this important space over the last two decades, there is little work on African languages, partially due to a lack of high-quality annotated data.
12
+
13
+ To enable sentiment analysis research in African languages, we present **AfriSenti**, the largest sentiment analysis benchmark for under-represented languages---covering 110,000+ annotated tweets in 14 African languages[^2] (Amharic, Algerian Arabic, Hausa, Igbo, Kinyarwanda, Moroccan Arabic, Mozambican Portuguese, Nigerian Pidgin, Oromo, Swahili, Tigrinya, Twi, Xitsonga, and Yorùbá) from four language families (Afro-Asiatic, English Creole, Indo-European and Niger-Congo). We show the represented countries and languages in Figure [1](#fig:countries_languages){reference-type="ref" reference="fig:countries_languages"} and provide examples of the data in Table [2](#fig:AfriSenti_examples){reference-type="ref" reference="fig:AfriSenti_examples"}. AfriSenti is an extension of NaijaSenti [@muhammad-etal-2022-naijasenti], which is a sentiment corpus in four major Nigerian languages: Hausa, Igbo, Nigerian Pidgin,and Yorùbá.
14
+
15
+ The datasets are used in the first Afrocentric SemEval shared task, *SemEval 2023 Task 12: Sentiment analysis for African languages (AfriSenti-SemEval)*. AfriSenti allows the research community to build sentiment analysis systems for various African languages and enables the study of sentiment and contemporary language use in African languages. We publicly release the corpora, which provide further opportunities to investigate the difficulty of sentiment analysis for African languages.
16
+
17
+ Our contributions are: (1) the creation of the largest Twitter dataset for sentiment analysis in African languages by annotating 10 new datasets and curating four existing ones [@muhammad-etal-2022-naijasenti], (2) the discussion of the data collection and annotation process in 14 low-resource African languages, (3) the release sentiment lexicons for all languages, (4) the presentation of classification baseline results using our datasets.
18
+
19
+ # Method
20
+
21
+ AfriSenti covers 14 African languages, each with unique linguistic characteristics and writing systems, which are shown in Table [\[tab:aflang\]](#tab:aflang){reference-type="ref" reference="tab:aflang"}. As shown in [3](#fig:families){reference-type="ref+Label" reference="fig:families"}, the dataset includes six languages of the Afroasiatic family, six languages of the Niger-Congo family, one from the English Creole family, and one from the Indo-European family.
22
+
23
+ <figure id="fig:families" data-latex-placement="t">
24
+ <img src="AfrisentiLanguageFamilyUpdate_Afro-Asiatic_removed.png" />
25
+ <figcaption>Language Family (shown in green) in the AfriSenti datasets.</figcaption>
26
+ </figure>
27
+
28
+ Scripts serve not only as a means of transcribing spoken language, but also as powerful cultural symbols that reflect people's identity [@Sterponi2014]. For instance, the Bamun script is deeply connected to the identity of Bamun speakers in Cameroon, while the Geez/Ethiopic script (for Amharic and Tigrinya) evokes the strength and significance of Ethiopian culture [@Sterponi2014]. Similarly, the Ajami script, a variant of the Arabic script used in various African languages such as Hausa, serves as a reminder of the rich African cultural heritage of the Hausa community [@Gee2005ReviewOS].
29
+
30
+ African languages, with a few exceptions, use the Latin script, written from left to right, or the Arabic script, written from right to left [@Gee2005ReviewOS; @Meshesha2008IndigenousSO], with the Latin script being the most widely used in Africa [@Eberhard2020]. Ten languages out of fourteen in AfriSenti are written in Latin script, two in Arabic script, and two in Ethiopic (or Geez) script. On social media, people may write Moroccan Arabic (Darija) and Algerian Arabic (Darja) in both Latin and Arabic characters due to various reasons including access to technology, i.e., the fact that Arabic keyboards were not easily accessible on commonly used devices for many years, code-switching, and other phenomena. This makes Algerian and Moroccan Arabic digraphic, i.e., their texts can be written in multiple scripts on social media. Similarly, Amharic is digraphic and is written in both Latin and Geez script [@belay2021impacts]. This constitutes an additional challenge to the processing of these languages in NLP.[^3]
31
+
32
+ AfriSenti covers the majority of African sub-regions. Many African languages are spoken in neighbouring countries within the same sub-regions. For instance, variations of Hausa are spoken in Nigeria, Ghana, and Cameroon, while Swahili variants are widely spoken in East African countries, including Kenya, Tanzania, and Uganda. AfriSenti also includes datasets in the top three languages with the highest numbers of speakers in Africa (Swahili, Amharic, and Hausa). We show the geographic distribution of languages in AfriSenti in Figure [1](#fig:countries_languages){reference-type="ref" reference="fig:countries_languages"}.
33
+
34
+ AfriSenti includes existing and newly created datasets as shown in [\[tab:newandexisting\]](#tab:newandexisting){reference-type="ref+Label" reference="tab:newandexisting"}. For the existing datasets whose test sets are public, we created new test sets to further evaluate their performance in the AfriSenti-SemEval shared task.
35
+
36
+ Since many people share their opinions on Twitter, the platform is widely used to study sentiment analysis [@muhammad-etal-2022-naijasenti]. However, the Twitter API's support for African languages is limited, which makes it difficult for researchers to collect data. Specifically, the Twitter language API currently supports only Amharic out of more than 2000 African languages[^4]. This disparity in language coverage highlights the need for further research and development in NLP for low-resource languages.
37
+
38
+ We used the Twitter Academic API to collect tweets. However, as the API does not provide language identification for tweets in African languages, we use location-based and vocabulary-based heuristics to collect the datasets.
39
+
40
+ For all languages except Algerian Arabic and Afan Oromo, we used a location-based collection approach to filter out results. Hence, tweets were collected based on the names of the countries where the majority of the target language speakers are located. For Afaan Oromo, tweets were collected globally due to the small size of the data collected from Ethiopia.
41
+
42
+ As different languages are spoken within the same region in Africa [@amfo2019multilingualism], the location-based approach did not help in all cases. For instance, searching for tweets from *"Lagos"* (Nigeria) returned tweets in multiple languages, such as *Yorùbá*, *Igbo*, *Hausa*, *Pidgin*, *English*, etc.
43
+
44
+ To address these challenges, we combined the location-based approach with vocabulary-based collection strategies. These included the use of stopwords, sentiment lexicons, and a language detection tool. For languages that used the Geez script, we used the Ethiopic Twitter Dataset for Amharic (ETD-AM), which includes tweets that were collected since 2014 [@yimam2019analysis].
45
+
46
+ Most African languages do not have curated stopword lists [@emezueafrican]. Therefore, we created stopword lists for some AfriSenti languages and used them to collect data. We used corpora from different domains, i.e.  news data and religious texts, to rank words based on their frequency [@adelani-etal-2021-masakhaner]. We filtered out the top 100 words by deleting domain-specific words (e.g., the word *God* in religious texts) and created lists based on the top 50 words that appeared across domains.
47
+
48
+ We also used a word co-occurrence-based approach to extract stopwords [@LIANG20094901] using text sources from different domains. We lower-cased and removed punctuation symbols and numbers, constructed a co-occurrence graph, and filtered out the words that occurred most often. Native speakers verified the generated lists before use. This approach worked the best for Xistonga.
49
+
50
+ As data collection based on stopwords sometimes results in tweets that are inadequate for sentiment analysis (e.g., too many neutral tweets), we used a sentiment lexicon---a dictionary of positive and negative words---for tweet collection. This allows for a balanced collection across sentiment classes (positive/negative/neutral). For Moroccan Darija, we used emotion word list curated by @outchakoucht2021moroccan.
51
+
52
+ [\[tab:lexicon\]](#tab:lexicon){reference-type="ref+Label" reference="tab:lexicon"} provides details on the sentiment lexicons in AfriSenti and indicates whether they were manually created or translated.
53
+
54
+ Besides stopwords and sentiment lexicons, native speakers provided lists of language-specific terms including generic words. For instance, this strategy helped us collect Algerian Arabic tweets, and the generic terms included equivalents of words such as " ![image](dz.pdf){height="0.5cm"} " *(the crowd)* and names of Algerian cities.
55
+
56
+ <figure id="fig:AfriSenti_class_dist">
57
+
58
+ <figcaption>Label distributions for the different AfriSenti datasets.</figcaption>
59
+ </figure>
60
+
61
+ As we mainly used heuristics for data collection, the result included tweets in a language that is different from the target one. For instance, when collecting tweets using lists of Amharic words, some returned tweets were in Tigrinya, due to Amharic--Tigrinya code-mixing. Similarly, we applied an additional manual filtering step in the case of Tunisian, Moroccan, and Modern Standard Arabic tweets that were returned when searching for Algerian Arabic ones due to overlapping terms.
62
+
63
+ Hence, we used different techniques for language detection as a post-processing step.
64
+
65
+ Few African languages have pre-existing language detection tools [@keet2021natural]. We used Google CLD3[^5] and the Pycld2 library[^6] for the supported AfriSenti languages (Amharic, Oromo and Tigrinya).
66
+
67
+ For languages that do not have a pre-existing tool, the detection was conducted by native speakers. For instance, annotators who are native speakers of Twi and Xitsonga manually labeled 2,000 tweets in these languages. In addition, as native speakers collected the Algerian Arabic tweets, they deleted all possible tweets that were expressed in another language or Arabic variation instead.
68
+
69
+ ::: table*
70
+ :::
71
+
72
+ To reduce the effort spent on language detection, we also used a pretrained language model fine-tuned on 2,000 manually annotated tweets [@caswell-etal-2020-language] to identify Twi and Xitsonga.
73
+
74
+ Despite our efforts to detect the right languages, it is worth mentioning that as multilingualism is common in African societies, the final dataset contains many code-mixed tweets.
75
+
76
+ ::: table*
77
+ :::
78
+
79
+ We anonymized the tweets by replacing all *\@mentions* by *\@user* and removed all URLs. For the Nigerian language test sets, we further lower-cased the tweets [@muhammad-etal-2022-naijasenti].
80
+
81
+ Tweet samples were randomly selected based on the different collection strategies. Then, with the exception of the Ethiopian languages, each tweet was annotated by three native speakers. We followed the sentiment annotation guidelines by @mohammad-2016-practical and used majority voting [@Davani2021DealingWD] to determine the final sentiment label for each tweet [@muhammad-etal-2022-naijasenti]. We discarded the cases where all annotators disagree. The datasets of the three Ethiopian languages (Amharic, Tigriniya, and Oromo) were annotated using two independent annotators, and then curated by a third more experienced individual who decided on the final gold labels.
82
+
83
+ @Prabhakaran2021OnRA showed that the majority vote conceals systematic disagreements between annotators resulting from their sociocultural backgrounds and experiences. Therefore, we release all the individual labels to the research community. We report the free marginal multi-rater kappa scores [@randolph2005free] in [\[tab:afrisenti_iaa\]](#tab:afrisenti_iaa){reference-type="ref+Label" reference="tab:afrisenti_iaa"} since chance-adjusted scores such as Fleiss-$\kappa$ can be low despite a high agreement due to the imbalanced label distributions [@randolph2005free; @falotico2015fleiss; @matheson2019we]. We obtained intermediate to good levels of agreement ($0.40-0.75$) across all languages, except for Oromo where we obtained a low agreement score due the annotation challenges that we discuss in Section [5](#sec:annotation_challenges){reference-type="ref" reference="sec:annotation_challenges"}.
84
+
85
+ [\[tab:my_totaltweets\]](#tab:my_totaltweets){reference-type="ref+Label" reference="tab:my_totaltweets"} shows the number of tweets in each of the 14 datasets. The Hausa collection of tweets is the largest AfriSenti dataset and the Xitsonga dataset is the smallest one. [4](#fig:AfriSenti_class_dist){reference-type="ref+Label" reference="fig:AfriSenti_class_dist"} shows the distribution of the labeled classes in the datasets. We observe that the distribution for some languages such as `ha` is fairly equitable while in others such as `pcm`, the proportion of tweets in each class varies widely. Sentiment annotation for African languages presents some challenges [@muhammad-etal-2022-naijasenti] that we highlight in the following.
86
+
87
+ :::: table*
88
+ ::: center
89
+ :::
90
+ ::::
91
+
92
+ A significant portion of tweets in *Twi* were ambiguous, making it difficult to accurately categorize sentiment. Some tweets contained symbols that are not in the Twi alphabet, which is a frequent occurrence due to the lack of support for certain Twi letters on keyboards [@scannell2011statistical]. For example, "" is replaced by the English letter "c", and "" is replaced by "3".
93
+
94
+ Additionally, tweets are more often annotated as negative (cf. [4](#fig:AfriSenti_class_dist){reference-type="ref+Label" reference="fig:AfriSenti_class_dist"}). This is due to some common expressions that can be seen as offensive depending on the context. For instance, "*Tweaa*" was once considered an insult but has become a playful expression through trolling, and "*gyae gyimii*" is commonly used by young people to say "stop" while its literal meaning is "stop fooling".
95
+
96
+ One of the significant challenges for the Mozambican Portuguese and Xitsonga data annotators was the presence of code-mixed and sarcastic tweets. Code-mixing in tweets made it challenging for the annotators to determine the intended meaning of the tweet as it involved multiple languages spoken in Mozambique that some annotators did not understand. Similarly, the presence of two variants of Xitsonga spoken in Mozambique (Changana and Ronga) added to the complexity of the annotation task. Additionally, sarcasm was a source of disagreement among annotators, leading to the exclusion of many tweets from the final dataset.
97
+
98
+ For Oromo and Tigrinya, challenges included finding annotators and the lack of a reliable Internet connection and access to personal computers. Although we trained the Oromo annotators, we observed severe problems in the quality of the annotated data which led to a low agreement score.
99
+
100
+ For Algerian Arabic, the main challenge was the use of sarcasm. When this caused a disagreement among the annotators, the tweet was further labeled by two additional annotators. If all the annotators did not agree on one final label, we discarded it. As Twitter is also commonly used to discuss controversial topics in the region, we removed offensive tweets.
2303.02430/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-11-17T03:56:01.503Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.3.0 Chrome/104.0.5112.114 Electron/20.1.3 Safari/537.36" version="20.3.0" etag="HZyrChKfU02olNm5GM8E" type="device"><diagram id="vIqjUpc-l0e-6Wd7Lp3D">5VtNl6MoFP01WSYHRPzYVnVV96J7VYuZXnISyjhjJMeQTjK/fjDBD8DqmFPgUdtNyRO0vPf53rtAFuh5d/5akP32B9vQbOGBzXmBviw8DyIIxZ/ScrlZ/DC4GZIi3chOjeEt/Y9KI5DWY7qhB6UjZyzj6V41rlme0zVXbKQo2Ent9s4y9al7ksgngsbwtiYZNbr9lW741rB+o2myVZ+7I1U/ec/DlmzYqWVCLwv0XDDGb2e78zPNStwqSG7jXj+4Wj+9oDnvM8C7DfhFsqN8Lfl/8Uv1nmKAgFQ0nk7blNO3PVmXV06CVWHb8l0mWlCcksP+hvN7eqbi/k8FO+ab8ux6uX5VIBpJRg4Hef6eZtkzy1gh2jnLyycdeMH+rWEVqDzJ/5MWnJ4/fFdYIyi8jrId5cVFdKldLroNkQ7nYUnCqaEvlF22Le4qG5EOk9R3boAVJxLbbpzRfZwbtMB9rD8E00SuDe/CQ6/Xo+7ZdaVgnPCU5cK8DIEl8H2ogo98A/zqo2iDjyyA708UfFvQV4G1gj4w/d7vgD5Gn4cePwI9vA+9hifxABDP6MAzeAXiKEewnLfs79fjdySSYi2TDQYqHxBgW5EoVhiBUcfHEJuMwOoL+QwlwVRCfgt5bAl4FIK7n4KrFBBONApBS9j7vqel32iwDBBNFPulb8vxseb4ERwM/HhOOcAWIcvA1+qhDkJcpYBKQo0mBwhSAJBk2QDX0yoe7BngwqAD3MACtnBk2LqUVEusSSoYD5ZPYQ/tOu+gDoMV0Bzdi1e+SYGryA57yFpepCRPsh7O3tO3DYzryHGXp7pnm43YUtDRvgSETRqQSYNnQWTBhwTu2DOsLdUbaQnWHzLB9tC9U06wMAYrrONrxh1nObaHhp136EcIqejH4QoPV9LDh8Ts2COOPZGlkYLM2QV3IeeP17g+8ld6OTTcFAPsIXNHXwuF1qjQqqEIDVYNeT3E7XRi0zKylLL9KvJUlFTSYYjg5I1NFFuuh1DsrTCKm0OffwBd2dlVeVSBO+VQZKtQDdR5TwQ7vN5VIEKmj28S+iabrOBblrCcZC+N9UlN0U2f74ztJQX/UM4vMlyQI2cqXfSc8r/L4SssWz9bV76c5Z2vjUvVyMWrtQaVzZ/ta82wa+uiEFS+lELPgR2LNVV9kZMioVy19aCxoJnwiF/q/T9FSg+tPI7pOQfLXwHQd0CYadnVdN1jS8Ejqk9trX+F+lwdHm4HRJ9F31GCb00cBKGnoh8PiP5Ul36toY9jDf2OBRln6D8kjMeuB2wptABglREQmow4kwNz0MrIUokaGCVqx7fhqERF89LK1igJe2xWdPVxoJlr5QAH5tpBMKQ+Rj308Ww1QKQvVPpmuHG2C3qq26BtaYCoeuF6XrRjhcBRHYSmug3aWhUaRdomuCqGD4H+rHZC20q0EdR3wXXUPs4S7R8viqMoNFbMhltERj1k8ehVQOmJlrjQ1mc6fp3kTAbMSiIvobV0HYYKJzDs2NTlLDz1EMlT1gGRj00dgBzqANFsflh5vdb6ZSp6+R8=</diagram></mxfile>
2303.02430/paper_text/intro_method.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ As an emerging technology, generative flow networks (GFlowNets) [@bengio2021flow; @bengio2021gflownet] can make up for the shortcomings of reinforcement learning [@kaelbling1996reinforcement; @sutton2018reinforcement] on exploratory tasks. Specifically, based on the Bellman equation [@sutton2018reinforcement], reinforcement learning is usually trained to maximize the expectation of future rewards; hence the learned policy is more inclined to sample action sequences with higher rewards. [In contrast, the training goal of GFlowNets is to define a distribution proportional to the rewards over terminating states, i.e., the parent states of the final states,]{style="color: black"} rather than generating a single high-reward action sequence [@bengio2021flow]. This is more like sampling different candidates in an active learning setting [@bengio2021gflownet], thus better suited for exploration tasks.
4
+
5
+ GFlowNets construct the state transitions of trajectories into a directed acyclic graph (DAG) structure. Each node in the graph structure corresponds to a different state, and actions correspond to transitions between different states, that is, an edge connecting different nodes in the graph. For discrete tasks, the number of nodes in this graph structure is limited, and each edge can only correspond to one discrete action. However, in real environments, the state and action spaces are continuous for many tasks, such as quadrupedal locomotion [@kohl2004policy], autonomous driving [@kiran2021deep; @shalev2016safe; @pan2017virtual], or dexterous in-hand manipulation [@andrychowicz2020learning]. Moreover, the reward distributions corresponding to these environments may be multimodal, requiring more diversity exploration. The needs of these environments closely match the strengths of GFlowNets. [@bengio2021gflownet] proposes an idea for adapting GFlowNets to continuous tasks by replacing sums with integrals for continuous variables, and they suggest the use of integrable densities and detailed balance (DB) or trajectory balance (TB) [@malkin2022trajectory] criterion to obtain tractable training objectives, which can avoid some integration operations. However, this idea has not been verified experimentally.
6
+
7
+ In this paper, we propose generative Continuous Flow Networks, named CFlowNets for short, for continuous control tasks to generate policies that can be proportional to continuous reward functions. Applying GFlowNets to continuous control tasks is exceptionally challenging. In generative flow networks, the transition probability is defined as the ratio of action flow and state flow. For discrete state and action spaces, we can form a DAG and compute the state flow by traversing a node's incoming and outgoing flows. Conversely, it is impossible for continuous tasks to traverse all state-action pairs and corresponding rewards. [To address this issue, we use important sampling to approximate the integrals over inflows and outflows in the flow-matching constraint, where we use a deep neural network to predict the parent nodes of each state in the sampled trajectory.]{style="color: black"} The main contributions of this paper are summarized as the following:
8
+
9
+ **Main Contributions:** 1) [We extend the theoretical formulation and flow matching theorem of previous GFlowNets to continuous scenarios.]{style="color: black"} Based on this, a loss function for training CFlowNets is presented; 2) We propose an efficient way to sample actions with probabilities approximately proportional to the output of the flow network, and propose a flow sampling approach to approximate continuous inflows and outflows, which allows us to construct a continuous flow matching loss; 3) We theoretically analyze the error bound between sampled flows and inflows/outflows, and the tail becomes minor as the number of flow samples increases; 4) We conduct experiments based on continuous control tasks to demonstrate that CFlowNets can outperform current state-of-the-art RL algorithms, especially in terms of exploration capabilities. [To the best of our knowledge, our work is the first to empirically demonstrate the effectiveness of flow networks on continuous control tasks.]{style="color: black"} The codes are available at [ http://gitee.com/mindspore/models/tree/master/research/gflownets/cflownets]( http://gitee.com/mindspore/models/tree/master/research/gflownets/cflownets)
10
+
11
+ # Method
12
+
13
+ A stochastic, discrete-time and sequential decision task can be described as a Markov Decision Process (MDP) `\cite{}`{=latex}, which is canonically formulated by the tuple: $$\begin{align}
14
+ M= \langle \mathcal{S}, \mathcal{A}, P, R, \gamma \rangle.
15
+ \end{align}$$ In the process, $s \in \mathcal{S}$ represents the state space of the environment. At each time step, agent receives a state $s$ and selects an action $a$ on the action space $\mathcal{A}$. This results in a transition to the next state $s'$ according to the state transition function $P(s'|s, a): \mathcal{S} \times \mathcal{A} \times \mathcal{S} \rightarrow \left[0,1\right]$. Then the agent gets the reward $r$ based on the reward function $R(s, a): \mathcal{S} \times \mathcal{A} \rightarrow \mathbb{R}$. A stochastic policy $\pi$ maps each state to a distribution over actions $\pi(\cdot|s)$ and gives the probability $\pi(a|s)$ of choosing action $a$ in state $s$. The agent interacts with the environment by executing the policy $\pi$ and obtaining the admissible trajectories $\{(s_t,a_t,r_t,s_{t+1})\}_{t = 1}^{n}$, where $n$ is the trajectory length. The goal of an agent is to maximize the discounted return $\mathbb{E}_{s_{0: n}, a_{0: n}}\left[\sum_{t=0}^{\infty} \gamma^{t} r_{t} \mid s_{0}=s, a_{0}=a, \pi\right]$, where $\mathbb{E}$ is the expectation over the distribution of the trajectories and $\gamma \in \left[0,1\right)$ is the discount factor.
16
+
17
+ [GFlowNet sees the MDP as a flow network.]{style="color: black"} Define $s'=T(s,a)$ and $F(s)$ as the node's transition and the total flow going through $s$. Define an edge/action flow $F(s,a) = F(s \rightarrow s')$ as the flow through an edge $s \rightarrow s'$. The training process of vanilla GFlowNets needs to sum the flow of parents and children through nodes (states), which depends on the discrete state space and discrete action space. The framework is optimized by the following flow consistency equations: $$\begin{equation}
18
+ \label{flow_in_out}
19
+ \sum_{s, a: T(s, a)=s^{\prime}} F(s, a)=R\left(s^{\prime}\right)+\sum_{a^{\prime} \in \mathcal{A}\left(s^{\prime}\right)} F\left(s^{\prime}, a^{\prime}\right),
20
+ \end{equation}$$ which means that for any node $s$, the incoming flow equals the outgoing flow, which is the total flow $F(s)$ of node $s$.
21
+
22
+ Considering a continuous task with tuple $(\mathcal{S},\mathcal{A})$, where $\mathcal{S}$ denotes the continuous state space and $\mathcal{A}$ denotes the continuous action space. Define a trajectory $\tau = (s_1,...,s_n)$ in this continuous task as a sequence sampled elements of $\mathcal{S}$ such that every transition $a_t: s_t \rightarrow s_{t+1} \in \mathcal{A}$. Further, we define an acyclic trajectory $\tau = (s_1,...,s_n)$ as a trajectory satisfies the acyclic constraint: $\forall s_m \in \tau, s_k \in \tau, m \neq k$, we have $s_m \neq s_k$. Denote $s_0$ and $s_f$ respectively as the initial state and the final state related with the continuous task $(\mathcal{S},\mathcal{A})$, we define the complete trajectory as any sampled acyclic trajectory from $(\mathcal{S},\mathcal{A})$ starting in $s_0$ and ending in $s_f$. Correspondingly, a transition $s \rightarrow s_f$ into the final state is defined as the terminating transition, and $F(s\rightarrow s_f)$ is a terminating flow.
23
+
24
+ A trajectory flow $F(\tau): \tau \mapsto \mathbb{R}^+$ is defined as any nonnegative function defined on the set of complete trajectories $\tau$. For each trajectory $\tau$, the associated flow $F(\tau)$ contains the number of [*particles* [@bengio2021gflownet]]{style="color: black"} sharing the same path $\tau$. In addition, the tuple $(\mathcal{S},\mathcal{A},F)$ is called a continuous flow network. Let $T(s,a) = s'$ indicate an action $a$ that could make a transition from state $s$ to attain $s'$. Then we make the following assumptions.
25
+
26
+ ::: {#assumption0 .assumption}
27
+ **Assumption 1**. *Assume that the continuous take $(\mathcal{S},\mathcal{A})$ is an "acyclic" task, which means that arbitrarily sampled trajectories $\tau$ are acyclic, i.e., $$s_i \neq s_j, \forall s_i, s_j \in \tau = (s_0,...,s_n), i\neq j.$$*
28
+ :::
29
+
30
+ ::: {#assumption1 .assumption}
31
+ **Assumption 2**. *Assume the flow function $F(s,a)$ is Lipschitz continuous, i.e., $$\begin{align}
32
+ |F(s,a)-F(s,a')| &\le L ||a-a'||,~ a, a' \in \mathcal{A}, \\
33
+ |F(s,a)-F(s',a)| &\le L ||s-s'||,~ s, s' \in \mathcal{S},
34
+ \end{align}$$ where $L$ is a constant.*
35
+ :::
36
+
37
+ ::: {#assumption2 .assumption}
38
+ **Assumption 3**. *Assume that for any state pair $(s_t,s_{t+1})$, there is a unique action $a_t$ such that $T(s_t,a_t) = s_{t+1}$, i.e., taking action $a_t$ in $s_t$ is the only way to get to $s_{t+1}$. Hence we can define $s_t := g(s_{t+1},a_t)$, where $g(\cdot)$ is a transition function. And assume actions are the translation actions.*
39
+ :::
40
+
41
+ The necessity and rationality of Assumptions [1](#assumption0){reference-type="ref" reference="assumption0"}-[3](#assumption2){reference-type="ref" reference="assumption2"} are analyzed in the appendix. Under Assumption [1](#assumption0){reference-type="ref" reference="assumption0"}, we define the parent set $\mathcal{P}(s_t)$ of a state $s_{t}$ as [the set that contains]{style="color: black"} all of the direct parents of $s_t$ that could make a direct transition to $s_t$, i.e., $\mathcal{P}(s_t) = \{ s \in \mathcal{S} : T(s, a \in \mathcal{A}) = s_t \}$. Similarly, define the child set $\mathcal{C}(s_t)$ of a state $s_{t}$ as the set contains all of the direct children of $s_t$ that could make a direct transition from $s_t$, i.e., $\mathcal{C}(s_t) = \{ s \in \mathcal{S} : T(s_t, a \in \mathcal{A}) = s \}$. Then, we have the following continuous flow definitions, where Assumptions [2](#assumption1){reference-type="ref" reference="assumption1"}-[3](#assumption2){reference-type="ref" reference="assumption2"} make these integrals integrable and meaningful.
42
+
43
+ ::: definition
44
+ **Definition 1** (Continuous State Flow). *The continuous state flow $F(s): \mathcal{S} \mapsto \mathbb{R}$ is the integral of the complete trajectory flows passing through the state: $$\begin{equation}
45
+ F(s) = \int_{\tau:s\in \tau} F(\tau) \mathrm{d} \tau.
46
+ \end{equation}$$*
47
+ :::
48
+
49
+ ::: {#def-inflows .definition}
50
+ **Definition 2** (Continuous Inflows). *For any state $s_{t}$, its inflows are the integral of flows that can reach state $s_{t}$, i.e., $$\begin{equation}
51
+ \int_{s\in \mathcal{P} (s_t)} F(s\rightarrow s_t) \mathrm{d} s = \int_{s:T(s,a) = s_{t}} F(s,a) \mathrm{d} s = F(s_t) = \int_{a:T(s,a) = s_{t}} F(s,a) \mathrm{d} a,
52
+ \end{equation}$$ where $a:s\rightarrow s_t$ and $s=g(s_t,a)$ since Assumption [3](#assumption2){reference-type="ref" reference="assumption2"} holds.*
53
+ :::
54
+
55
+ ::: {#def-outflows .definition}
56
+ **Definition 3** (Continuous Outflows). *For any state $s_{t}$, the outflows are the integral of flows passing through state $s_{t}$ with all possible actions $a \in \mathcal{A}$, i.e., $$\begin{equation}
57
+ \int_{s\in \mathcal{C} (s_t)} F(s_t \rightarrow s) \mathrm{d} s = F(s_t) = \int_{a \in \mathcal{A}} F(s_t,a) \mathrm{d} a.
58
+ \end{equation}$$*
59
+ :::
60
+
61
+ Based on the above definitions, we can define the transition probability $P(s \rightarrow s'|s)$ of edge $s\rightarrow s'$ as a special case of conditional probability introduced in [@bengio2021gflownet]. In particular, the forward transition probability is given by $$\begin{equation}
62
+ \label{PFtransition-1}
63
+ P_F(s_{t+1}|s_t) := P(s_t \rightarrow s_{t+1}|s_t) = \frac{F(s_t \rightarrow s_{t+1})}{F(s_t)}.
64
+ \end{equation}$$ Similarly, the backwards transition probability is given by $$\begin{equation}
65
+ \label{PBtransition-2}
66
+ P_B(s_t|s_{t+1}) := P(s_t \rightarrow s_{t+1}|s_{t+1}) = \frac{F(s_t \rightarrow s_{t+1})}{F(s_{t+1})}.
67
+ \end{equation}$$ For any trajectory sampled from a continuous task $(\mathcal{S}, \mathcal{A})$, we have $$\begin{align}
68
+ &\forall \tau = (s_1,...,s_n), P_F(\tau) := \prod_{t=1}^{n-1} P_F(s_{t+1}|s_t) \\
69
+ &\forall \tau = (s_1,...,s_n), P_B(\tau) := \prod_{t=1}^{n-1} P_B(s_t|s_{t+1}),
70
+ \end{align}$$ and we further have $$\begin{align}
71
+ \label{transition-1-1}
72
+ \forall s \in \mathcal{S} \backslash \{s_f\}, ~ \int_{s'\in \mathcal{C}(s)} P_F(s'|s) \mathrm{d} s' = 1 ~\text{and}~
73
+ % \label{transition-1-2}
74
+ \forall s \in \mathcal{S} \backslash \{s_0\}, ~ \int_{s'\in \mathcal{P}(s)} P_B(s'|s) \mathrm{d} s' = 1.
75
+ \end{align}$$
76
+
77
+ [Given any trajectory $\tau = (s_0,...,s_n,s)$ that starts in $s_0$ and ends in $s$,]{style="color: black"} a Markovian flow [@bengio2021gflownet] is defined as the flow that satisfies $$P(s\rightarrow s'|\tau) = P(s\rightarrow s'|s) = P_F(s'|s),$$ and the corresponding flow network $(\mathcal{S},\mathcal{A},F)$ is called a Markovian flow network [@bengio2021gflownet]. [Then, we present Theorem [1](#theorem1){reference-type="ref" reference="theorem1"} proved in the appendix [9.1](#PT1){reference-type="ref" reference="PT1"}, which is an extension of Proposition 19 in [@bengio2021gflownet] to continuous scenarios.]{style="color: black"}
78
+
79
+ ::: {#theorem1 .theorem}
80
+ **Theorem 1** (Continuous Flow Matching Condition). *Consider a non-negative function $\hat F(s,a)$ taking a state $s\in \mathcal{S}$ and an action $a\in \mathcal{A}$ as inputs. Then we have $\hat F$ corresponds to a flow if and only if the following continuous flow matching conditions are satisfied: $$\begin{equation}
81
+ \begin{split}
82
+ &\forall s' > s_0, ~ \hat F(s') = \int_{s\in \mathcal{P} (s')} \hat F(s\rightarrow s') \mathrm{d} s = \int_{s:T(s,a) = s'} \hat{F}(s,a:s\rightarrow s') \mathrm{d} s \\
83
+ &\forall s' < s_f, ~ \hat F(s') = \int_{s''\in \mathcal{C} (s')} \hat F(s'\rightarrow s'') \mathrm{d} s'' = \int_{a \in \mathcal{A}} \hat F(s',a) \mathrm{d} a.
84
+ \end{split}
85
+ \end{equation}$$ Furthermore, $\hat F$ uniquely defines a Markovian flow $F$ matching $\hat F$ such that $$\begin{equation}
86
+ F(\tau) = \frac{\prod_{t=1}^{n+1} \hat F(s_{t-1} \rightarrow s_t)}{\prod_{t=1}^{n}\hat F(s_t)}.
87
+ \end{equation}$$*
88
+ :::
89
+
90
+ Theorem [1](#theorem1){reference-type="ref" reference="theorem1"} means that as long as any non-negative function satisfies the flow matching conditions, a unique flow is determined. Therefore, for sparse reward environments, i.e., $R(s) = 0,~\forall s \neq s_f$, we can obtain the target flow by training a flow network that satisfies the flow matching conditions. Such learning machines are called CFlowNets, and we have the following continuous loss function: $$\begin{align}
91
+ % \label{continuous-loss}
92
+ {\mathcal{L}} (\tau)
93
+ =\sum\limits_{s_{t} = s_1}^{s_{f}} \Bigg(
94
+ \int_{s_{t-1}\in \mathcal{P} (s_{t})} F(s_{t-1} \rightarrow s_{t}) \mathrm{d} s_{t-1}
95
+ -
96
+ R(s_{t}) -
97
+ \int_{s_{t+1} \in \mathcal{C} (s_{t})} F(s_{t} \rightarrow s_{t+1}) \mathrm{d} s_{t+1}
98
+ \Bigg)^{2}. \nonumber
99
+ \end{align}$$ However, obviously, the above continuous loss function cannot be directly applied in practice. Next, we propose a method to approximate the continuous loss function based on the sampled trajectories to obtain the flow model.
100
+
101
+ [For continuous tasks, it is usually difficult to access all state-action pairs to calculate continuous inflows and outflows.]{style="color: black"} In the following, we propose the CFlowNets training framework to address this problem, which includes an action sampling process, a flow matching approximation process. Then, CFlowNets can be trained based on an approximate flow matching loss function.
102
+
103
+ The overview framework of CFlowNets is shown in Figure [1](#framework){reference-type="ref" reference="framework"}, including the environment interaction, flow sampling, and training procedures. During the environment interaction phase (Left part of Figure [1](#framework){reference-type="ref" reference="framework"}), we sample an action probability buffer based on the forward-propagation of CFlowNets. We name this process the action selection procedure, as detailed in Section [4.2](#section42){reference-type="ref" reference="section42"}. After acquiring the action, the agent can interact with the environment to update the state, and this process repeats several steps until the complete trajectory is sampled. Once a buffer of complete trajectories is available, we randomly sample $K$ actions and compute the child states to approximately calculate the outflows. For the inflows, we use these sampled actions together with the current state as the input to the deep neural network $G$ to estimate the parent states. Based on these, we can approximately determine the inflows. We name this process the flow matching approximation procedure (Middle part of Figure [1](#framework){reference-type="ref" reference="framework"}), as detailed in Section [4.3](#section43){reference-type="ref" reference="section43"}. Finally, based on the approximate inflows and outflows, we can train a CFlowNet based on the continuous flow matching loss function (Right part of Figure [1](#framework){reference-type="ref" reference="framework"}), as details in Section [4.4](#section44){reference-type="ref" reference="section44"}. The pseudocode is provided in Appendix [10](#Pseudocode){reference-type="ref" reference="Pseudocode"}.
104
+
105
+ <figure id="framework" data-latex-placement="!t">
106
+ <embed src="framework.pdf" style="width:100.0%" />
107
+ <figcaption>Overall framework of CFlowNets. <strong>Left:</strong> During the environment interaction phase, we sample actions to update states with probabilities proportional to the reward according to CFlowNet. <strong>Middle:</strong> We randomly sample actions to approximately calculate the inflows and outflows, where a DNN is used to estimate the parent states. <strong>Right:</strong> Continuous flow matching loss is used to train the CFlowNet based on making inflows equal to outflows or reward.</figcaption>
108
+ </figure>
109
+
110
+ Starting from an empty set, CFlowNets aim to obtain complete trajectories $\tau = (s_0,s_1,...,s_f) \in \mathcal{T}$ by iteratively sampling $a_t \sim \pi(a_t | s_t) = \frac{F(s_t,a_t)}{F(s_t)}$ with tuple $\{(s_t,a_t,r_t,s_{t+1})\}_{t = 0}^{f}$. However, it is difficult to sample trajectories strictly according to the corresponding probability of $a_t$, since the actions are continuous, we cannot get the exact action probability distribution function based on the flow network $F(s_t,a_t)$. To solve this problem, at each state $s_t$, we first uniformly sample $M$ actions from $\mathcal{A}$ and generate an action probability buffer $\mathcal{P} = \{ F(s_t,a_i) \}_{i=1}^M$, which is used as an approximation of action probability distributions. Then we sample an action from $\mathcal{P}$ according to the corresponding probabilities of all actions. Obviously, actions with larger $F(s_t,a_i)$ will be sampled with higher probability. In this way, we approximately sample actions from a continuous distribution according to their corresponding probabilities.
111
+
112
+ ::: remark
113
+ **Remark 1**. *After the training process, for tasks that require a larger reward, we can sample actions with the maximum flow output in $\mathcal{P}$ during the test process to obtain a relatively higher reward. How the output of the flow model is used is flexible, and we can adjust it for different tasks.*
114
+ :::
115
+
116
+ Once a batch of trajectories $\mathcal{B}$ is available, to satisfy flow conditions, we require that for any node $s_t$, the inflows [$\int_{a:T(s,a)=s_t}F(s,a)\mathrm{d} a$]{style="color: black"} equals the outflows $\int_{a \in \mathcal{A}}F(s_t,a)\mathrm{d} a$, which is the total flow $F(s_t)$ of node $s_t$. However, obviously, we cannot directly calculate the continuous inflows and outflows to complete the flow matching condition. An intuitive idea is to discretize the inflows and outflows based on a reasonable approximation and match the discretized flows. To do this, we sample $K$ actions independently and uniformly from the continuous action space $\mathcal{A}$ and calculate corresponding $F(s_t,a_k), k = 1,...,K$ as the outflows, i.e., we use the following approximation: $$\begin{equation}
117
+ \label{inflow-approximation}
118
+ \int_{a \in \mathcal{A}}F(s_t,a)\mathrm{d} a \approx \frac{\mu(\mathcal{A})}{K} \sum_{k=1}^K F(s_t,a_k),
119
+ \end{equation}$$ where $\mu(\mathcal{A})$ denotes the measure of the continuous action space $\mathcal{A}$.
120
+
121
+ By contrast, an approximation of inflow is more difficult since we should find the parent [states]{style="color: black"} first. To solve this problem, we construct [a deep neural network $G$ (named "retrieval" neural network)]{style="color: black"} parameterized by $\phi$ with $(s_{t+1},a_t)$ as the input while $s_t$ as the output, and train this network based on $\mathcal{B}$ with the MSE loss. [That is, we want use $G$ to fit function $g(\cdot)$. The network $G$ is usually easy to train since we consider tasks satisfy Assumption [3](#assumption2){reference-type="ref" reference="assumption2"},]{style="color: black"} and we can obtain a high-precision network $G$ through simple pre-training. As the training progresses, we can also occasionally update $G$ based on the sampled trajectories to ensure accuracy. Then, the inflows can be calculated approximately: $$\begin{equation}
122
+ \color{black}
123
+ \label{outflow-approximation}
124
+ \int_{a:T(g(s_t,a),a)=s_t}F(g(s_t,a),a)\mathrm{d} a \approx \frac{\mu(\mathcal{A})}{K} \sum_{k=1}^K F(G_{\phi} (s_t, a_k), a_k).
125
+ \end{equation}$$ Next, by assuming that the flow function $F(s,a)$ is Lipschitz continuous in Assumption [2](#assumption1){reference-type="ref" reference="assumption1"}, we could provide a non-asymptotic analysis for the error between the sample inflows/outflows and the true inflows/outflows. Theorem [2](#thm: tail){reference-type="ref" reference="thm: tail"} establishes the error bound between the sample outflows (resp. inflows) and the actual outflows (resp. inflows) in the tail form and shows that the tail is decreasing exponentially. Furthermore, the tail gets much smaller with the increase of $K$, which means the sample outflows (resp. inflows) are a good estimation of the actual outflows (resp. inflows).
126
+
127
+ ::: {#thm: tail .theorem}
128
+ **Theorem 2**. *Let $\{a_k\}_{k=1}^{K}$ be sampled independently and uniformly from the continuous action space $\mathcal{A}$. Assume $G_{\phi^\star}$ can optimally output the actual state $s_t$ with $(s_{t+1},a_t)$. For any bounded continuous action $a \in \mathcal{A}$ and any state $s_t \in \mathcal{S}$, we have $$\begin{equation}
129
+ \mathbb{P}\left(\Big{|}\frac{\mu(\mathcal{A})}{K} \sum_{k=1}^K F(s_t,a_k)-\int_{a \in \mathcal{A}}F(s_t,a)\mathrm{d} a \Big{|} \ge t \right) \le 2\exp\left(-\frac{Kt^2}{2(L \mu(\mathcal{A})\rm{diam}(\mathcal{A}))^2}\right)
130
+ \end{equation}$$ and $$\begin{multline}
131
+ \mathbb{P}\left( \Big{|}\frac{\mu(\mathcal{A})}{K} \sum_{k=1}^K F(G_{\phi^\star} (s_t, a_k), a_k) - \int_{a:T(s,a)=s_t}F(s,a)\mathrm{d} a \Big{|} \ge t \right) \\ \le 2\exp\left(-\frac{Kt^2}{2\big{(}L \mu(\mathcal{A})(\rm{diam}(\mathcal{A})+\rm{diam}(\mathcal{S}))\big{)}^2}\right),
132
+ \end{multline}$$ where $L$ is the Lipschitz constant, $\rm{diam}(\mathcal{A})$ denotes the diameter of the action space $\mathcal{A}$ and $\rm{diam}(\mathcal{S})$ denotes the diameter of the state space $\mathcal{S}$.*
133
+ :::
134
+
135
+ Based on [\[inflow-approximation\]](#inflow-approximation){reference-type="eqref" reference="inflow-approximation"} and [\[outflow-approximation\]](#outflow-approximation){reference-type="eqref" reference="outflow-approximation"}, the continuous loss function can be approximated by $$\begin{align}
136
+ \color{black}
137
+ \label{approximate-loss}
138
+ {\mathcal{L}}_{\theta} (\tau)
139
+ =\sum\limits_{s_{t} = s_1}^{s_{f}} \left[
140
+ \sum_{k=1}^K F_{\theta}(G_{\phi} (s_t, a_k), a_k)
141
+ -
142
+ \lambda R(s_{t})
143
+ -
144
+ \sum_{k=1}^K F_{\theta}(s_t,a_k)
145
+ \right]^{2},
146
+ \end{align}$$ where $\theta$ is the parameter of the flow network $F(\cdot)$ and $\lambda = K/\mu(\mathcal{A})$. [Note that in many tasks we cannot obtain exact $\mu(\mathcal{A})$. For such tasks, we can directly set $\lambda$ to 1, and then adjust the reward shaping to ensure the convergence of the algorithm[^2].]{style="color: black"}
147
+
148
+ It is noteworthy that the magnitude of the state flow at different locations in the trajectory may not match. For example, the initial node flow is likely to be larger than the ending node flow. To solve this problem, inspired the log-scale loss introduced in GFlowNets [@bengio2021flow], we can modify [\[approximate-loss\]](#approximate-loss){reference-type="eqref" reference="approximate-loss"} into: $$\begin{align}
149
+ \label{approximate-loss-log}
150
+ {\mathcal{L}}_{\theta} (\tau)
151
+ &=\sum\limits_{s_{t} = s_1}^{s_{f}} \Bigg\{
152
+ \log \left[ \epsilon + \sum_{k=1}^K \exp F_{\theta}^{\log}(G_{\phi} (s_t, a_k), a_k)\right]
153
+ \nonumber \\
154
+ &~~~~~~~~~~~~~~~~~~~~~~~~~ - \log \left[ \epsilon + \lambda R(s_{t})
155
+ +
156
+ \sum_{k=1}^K \exp F_{\theta}^{\log}(s_t,a_k)
157
+ \right]
158
+ \Bigg\}^{2},
159
+ \end{align}$$ where $\epsilon$ is a hyper-parameter that helps to trade off small versus large flows and helps avoid the numerical problem of taking the logarithm of tiny flows. [Note that Theorem [2](#thm: tail){reference-type="ref" reference="thm: tail"} cannot be used to guarantee the unbiasedness of [\[approximate-loss-log\]](#approximate-loss-log){reference-type="eqref" reference="approximate-loss-log"} because $\log \mathbb{E}(x) \neq \mathbb{E}\log(x)$. But experiments show that this approximation works well.]{style="color: black"}
2303.06635/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2303.06635/paper_text/intro_method.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ "Now this representation of a general procedure of the imagination for providing a concept with its image is what I call the schema for this concept."
4
+
5
+ - Immanuel Kant
6
+
7
+ Deep neural networks (DNNs) have demonstrated the increasingly prevailing capabilities in visual representations as compared to conventional hand-crafted features. Take the visual recognition task as an example. The canonical deep learning (DL) scheme for image recognition is to yield an effective visual representation from a stack of non-linear layers along with a fully-connected (FC) classifier at the end (He et al., 2016; Dosovitskiy et al., 2021; Tolstikhin et al., 2021; Yang et al., 2022a), where specifically the inner-product similarities are computed with each category embedding as the prediction. Despite the great success of DL, existing deep networks are typically required to simultaneously perceive low-level patterns as well as high-level semantics to make predictions (Zeiler & Fergus, 2014; Krizhevsky et al., 2017). As such, both the procedure of computing visual representations and the learned category-specific embeddings are opaque to humans, leading to challenges in security-matter scenarios, such as autonomous driving and healthcare applications.
8
+
9
+ <sup>\*</sup>Equal contribution.
10
+
11
+ <sup>&</sup>lt;sup>†</sup>Corresponding author.
12
+
13
+ <sup>&</sup>lt;sup>1</sup>In Critique of Pure Reason (A140/B180).
14
+
15
+ ![](_page_1_Figure_1.jpeg)
16
+
17
+ Figure 1: An example showing how an instance IR-Graph is matched to the class imagination. The vertices of IR-Graphs represent visual semantics, and the edges indicate the vertex interactions. The graph matcher captures the similarity between the local structures of joint vertices (*e.g.*, vertex #198 and vertex #150) by aggregating information from their neighbors as class evidences. The final prediction is defined as the sum of all evidence.
18
+
19
+ Unlike prior works that merely obtain the targets in the black-box manner, here we strive to devise an innovative and generalized DNN inference paradigm by reformulating the traditional one-shot forwarding scheme into an interpretable DNN reasoning framework, resembling what occurs in deductive human reasoning. Towards this end, inspired by the schema in Kant's philosophy that describes human cognition as the procedure of associating an image of abstract concepts with the specific sense impression, we propose to formulate DNN inference into an interactive matching procedure between the local visual semantics of an input instance and the abstract category imagination, which is termed as *schema inference* in this paper, leading to the accomplishment of interpretable deductive inference based on visual semantics interactions at the micro-level.
20
+
21
+ To elaborate the achievement of the proposed concept schema inference, we take here image classification, the most basic task in computer vision, as an example to explain our technical details. At a *high level*, the devised *schema inference* scheme leverages a pre-trained DNN to extract *feature ingredients* which are, in fact, the semantics represented by a cluster of deep feature vectors from a specific local region in the image domain. Furthermore, the obtained *feature ingredients* are organized into an *ingredient relation graph* (IR-Graph) for the sake of modeling their interactions that are characterized by the similarity at the semantic-level as well as the adjacency relationship at the spatial-level. We then implement the category-specific imagination as an *ingredient relation atlas* (IR-Atlas) for all target categories induced from observed data samples. As a final step, the *graph similarity* between an instance-level IR-Graph and the category-level IR-Atlas is computed as the measurement for yielding the target predictions. As such, instead of relying on deep features, the desired outputs from schema inference contribute only from the relationship of visual words, as shown in Figure 1.
22
+
23
+ More specifically, our dedicated *schema*-based architecture, termed as *SchemaNet*, is based on vision Transformers (ViTs) (Dosovitskiy et al., 2021; Touvron et al., 2021), which are nowadays the most prevalent vision backbones. To effectively obtain the feature ingredients, we collect the intermediate features of the backbone from probe data samples clustered by k-means algorithm. IR-Graphs are established through a customized *Feat2Graph* module that transfers the discretized ingredients array to graph vertices, and meanwhile builds the connections, which indicates the ingredient interactions relying on the self-attention mechanism (Vaswani et al., 2017) and the spatial adjacency. Eventually, graph similarities are evaluated via a shallow graph convolutional network (GCN).
24
+
25
+ Our work relates to several existing methods that mine semantic-rich visual words from DNN backbones for self-explanation (Brendel & Bethge, 2019; Chen et al., 2019; Nauta et al., 2021; Xue et al., 2022b; Yang et al., 2022b). Particularly, *BagNet* (Brendel & Bethge, 2019) uses a DNN as visual
26
+
27
+ ![](_page_2_Figure_1.jpeg)
28
+
29
+ Figure 2: The overall pipeline of our proposed SchemaNet. Firstly, intermediate feature X from the backbone is fed into the *Feat2Graph* module. The converted IR-Graph is then matched to a set of imaginations, *i.e.*, category-level IR-Atlas induced from observed data, for making the prediction.
30
+
31
+ word extractor that is analogous to traditional bag-of-visual-words (BoVW) representation (Yang et al., 2007) with SIFT features (Lowe, 2004). Moreover, Chen et al. (2019) develop *ProtoPNet* framework that evaluates the similarity scores between image parts and learned class-specific prototypes for inference. Despite their encouraging performance, all these existing methods suffer from the lack of considerations in the compositional contributions of visual semantics, which is, however, already proved critical for both human and DNN inference (Deng et al., 2022), and are not applicable to schema inference in consequence. We discuss more related literature in Appendix A.
32
+
33
+ In sum, our contribution is an innovative *schema inference* paradigm that reformulates the existing black-box network forwarding into a deductive DNN reasoning procedure, allowing for a clear insight into how the class evidences are gathered and deductively lead to the target predictions. This is achieved by building the dedicated IR-Graph and IR-Atlas with the extracted *feature ingredients* and then performing graph matching between IR-Graph and IR-Atlas to derive the desired predictions. We also provide a theoretical analysis to explain the interpretability of our schema inference results. Experimental results on CIFAR-10/100, Caltech-101, and ImageNet demonstrate that the proposed schema inference yields results superior to the state-of-the-art interpretable approaches. Further, we demonstrate that by transferring to unseen tasks without fine-tuning the matcher, the learned knowledge of each class is, in fact, stored in IR-Atlas rather than the matcher, thereby exhibiting the high interpretability of the proposed method.
34
+
35
+ In this section, we introduce our proposed SchemaNet in detail. The overall procedure is illustrated in Figure 2, including a Feat2Graph module that converts deep features to instance IR-Graphs, a learnable IR-Atlas, and a graph matcher for making predictions. The main idea is to model an input image as an instance-level graph in which nodes are local semantics captured by the DNN backbone and edges represent their interactions. Meanwhile, a category-level graph is maintained as the imagination for each class driven by training samples. Finally, by measuring the similarity between the instance and category graphs, we are able to interpret how predictions are made.
36
+
37
+ # Method
38
+
39
+ We first give a brief review of the ViT architecture. The simplest ViT implementation for image classification is proposed by Dosovitskiy et al. (2021), which treats an image as a sequence of independent 16 × 16 patches. The embeddings of all patches are then directly fed to a Transformerbased network (Vaswani et al., 2017), *i.e.*, a stack of multiple encoder layers with the same structure: a multi-head self-attention mechanism (MHSA) followed by a multilayer perceptron (MLP) with residual connections. Additionally, they append a class token (CLS) to the input sequence for classification alone rather than the average pooled feature. Following this work, Touvron et al. (2021) propose DeiT appending another distillation token (DIST) to learn soft decision targets from a teacher
40
+
41
+ model. This paper will mainly focus on DeiT backbones due to the relatively simple yet efficient network architecture.
42
+
43
+ Let $X \in \mathbb{R}^{(n+\zeta) \times d}$ denotes the input feature (a sequence of n visual tokens and $\zeta$ auxillary tokens, e.g., CLS and DIST, each of which is a d-dimensional embedding vector) to a MHSA module. The output can be simplified as $\mathrm{MHSA}(X) = \sum_{h=1}^H \tilde{\Psi}_h X W_h$ , where $\tilde{\Psi}_h \in \mathbb{R}^{(n+\zeta) \times (n+\zeta)}$ denotes the self-attention matrix normalized by row-wise softmax of head $h \in \{1,\ldots,H\}$ , and $W_h \in \mathbb{R}^{d \times d}$ is a learnable projection matrix. As the MLP module processes each token individually, the visual token interactions take place only in the MHSA modules, facilitating the extraction of their relationships from the perspective of ViTs.
44
+
45
+ For convenience, we define some notations related to the self-attention matrix. Let $\bar{\Psi}$ be the average attention matrix of all heads and $\Psi$ be the symmetric attention matrix: $\Psi = (\bar{\Psi} + \bar{\Psi}^{\top})/2$ . We partition $\Psi$ into four submatrices:
46
+
47
+ $$\Psi = \begin{bmatrix} \Psi^* & \Psi^A \\ \Psi^{A}^{\top} & \Psi^V \end{bmatrix}, \tag{1}$$
48
+
49
+ where $\Psi^A \in \mathbb{R}^{\zeta \times n}$ is the attention to the auxiliary tokens, $\Psi^V \in \mathbb{R}^{n \times n}$ represents the relationships of visual tokens, and $\Psi^* \in \mathbb{R}^{\zeta \times \zeta}$ . Finally, let $\psi^{\text{CLS}} \in \mathbb{R}^n$ represent the attention values to the CLS token extracted from $\Psi^A$ .
50
+
51
+ As shown in Figure 2, given a pre-trained backbone and an input image, the intermediate feature X is converted to an IR-Graph G for inference by the Feat2Graph module which includes three steps: (1) discretizing X into a sequence of feature ingredients with specific semantics; (2) mapping the ingredients to weighted vertices of IR-Graph; (3) assigning a weighted edge to each vertex pair indicating their interaction. We start by defining IR-Graph and IR-Atlas mathematically.
52
+
53
+ **Definition 1** (IR-Graph). An IR-Graph is an undirected fully-connected graph G = (E, V), in which vertiex set V is the set of feature ingredients for an instance or a specific category, and edge set E encodes the interactions of vertex pairs.
54
+
55
+ In particular, to indicate the vertex importance (for instance, "bird head" should contribute to the prediction of bird more than "sky" in human cognition), a non-negative weight $\lambda \in \mathbb{R}_+$ is assigned to each vertex. Let $\Lambda \in \mathbb{R}_+^{|V|} = \{\lambda_i\}_{i=1}^{|V|}$ be the collection of all vertex weights. Besides, the interaction between vertex i and j is quantified by a non-negative weight $e_{i,j} \in \mathbb{R}_+$ . With a slight abuse of notation, we define $E \in \mathbb{R}_+^{|V| \times |V|}$ as the weighted adjacency matrix in the following sections.
56
+
57
+ The IR-Atlas is defined to represent the imagination of all categories:
58
+
59
+ **Definition 2** (IR-Atlas). An IR-Atlas $\mathcal{G}$ of C classes is a set of category-level IR-Graphs, in which element $\hat{G}_c = (\hat{E}_c, \hat{V}_c)$ for class c has learnable vertex weights $\hat{\Lambda}_c$ and edge weights $\hat{E}_c$ .
60
+
61
+ **Discretization.** The main purpose of feature discretization is to mine the most common patterns appearing in the dataset, each of which corresponds to a human understandable semantic, named visual word, such as "car wheel" or "bird head". However, local regions with the same semantic may show differently because of scaling, rotation, or even distortion. In our approach, we utilize the DNN backbone to extract a relatively uniform feature instead of the traditional SIFT feature utilized in (Yang et al., 2007). To be specific, a visual vocabulary $\Omega = \{\omega_i\}_{i=1}^M \ (\omega_i \in \mathbb{R}^d)$ of size M is constructed by k-means clustering running on the collection $\mathbb{X}$ of visual tokens extracted from the probe dataset<sup>2</sup>. Further, a deep feature X (CLS and DIST are removed) is discretized to a sequence of feature ingredients by replacing each element x with the index of the closest visual word
62
+
63
+ $$\operatorname{Ingredient}(x) = \underset{i \in \{1, \dots, M\}}{\operatorname{arg \, min}} \|x - \omega_i\|_2. \tag{2}$$
64
+
65
+ Strictly, the *ingredient* is referred to as the *index* of visual word $\omega$ . With the entire ingredient set $\mathbb{M} = \{1, \dots, M\}$ , the discretized sequence is denoted as $\tilde{X} = (\tilde{x}_1, \dots, \tilde{x}_n)$ , where $\tilde{x}_i \in \mathbb{M}$ is computed from Equation (2). The detailed settings of M are presented in Appendix E.1.
66
+
67
+ <sup>&</sup>lt;sup>2</sup>For probe dataset with D instances, the collection $\mathbb{X}$ has $n \times D$ visual tokens (ignoring CLS and DIST).
68
+
69
+ **Feat2Vertex.** After we have computed the discretized feature $\tilde{X}$ , the vertices of this feature are the unique ingredients: $V = \mathrm{Unique}(\tilde{X})$ . The importance of each vertex is measured from two criterions: the contribution to the DNN's final prediction and the appearance statistically. Formally, for vertex $v \in \mathbb{M}$ , the importance is defined as
70
+
71
+ $$\lambda_v = \alpha_1 \lambda_v^{\text{CLS}} + \alpha_2 \lambda_v^{\text{bag}} = \alpha_1 \sum_{i \in \Xi(v|\tilde{X})} \psi_i^{\text{CLS}} + \alpha_2 |\Xi(v|\tilde{X})|, \tag{3}$$
72
+
73
+ where $\Xi(v|\tilde{X})$ is the set of all the appeared position of ingredient v in $\tilde{X}$ , $\psi_i^{\text{CLS}}$ is the attention between the i-th visual token and the CLS token, and $\alpha_{1,2} \geq 0$ are learnable weights balancing the two terms. When $\alpha_1 = 0$ , $\Lambda$ is equivalent to BoVW representation.
74
+
75
+ **Feat2Edge.** With the self-attention matrix $\Psi^V$ extracted from Equation (1), the interactions between vertices can be defined and computed with efficiency. For any two different vertices $u,v\in V$ , the edge weight is the comprehensive consideration of the similarity in the view of ViT and spatial adjacency. The first term is the average attention between all repeated pairs $\Pi[(u,v)|\tilde{X}]$ :
76
+
77
+ $$e_{u,v}^{\text{attn}} = \frac{1}{|\Pi[(u,v)|\tilde{X}]|} \sum_{(i,j) \in \Pi[(u,v)|\tilde{X}]} \Psi_{i,j}^{V}, \tag{4}$$
78
+
79
+ where $\Pi[(u,v)|\tilde{X}]$ is the Cartesian product of $\Xi(u|\tilde{X})$ and $\Xi(v|\tilde{X})$ , and $\Psi^C_{i,j}$ is the attention between the visual tokens at i and j positions. Furthermore, we define the adjacency as
80
+
81
+ $$e_{u,v}^{\text{adj}} = \frac{1}{|\Pi[(u,v)|\tilde{X}]|} \sum_{(i,j)\in\Pi[(u,v)|\tilde{X}]} \frac{1}{\epsilon + \|\text{Pos}(i) - \text{Pos}(j)\|_2},$$
82
+ (5)
83
+
84
+ where function $Pos(\cdot)$ returns the original 2D coordinates of the input visual token with regard to the patch array after the patch embedding module of ViTs. Eventually, the interaction between vertices u and v is the weighted sum of the two components with learnable $\beta_{1,2}$ :
85
+
86
+ $$e_{u,v} = \beta_1 e_{u,v}^{\text{attn}} + \beta_2 e_{u,v}^{\text{adj}}. \tag{6}$$
87
+
88
+ Equations (4) and (5) are both invariant when exchanging vertex u and v, so our IR-Graph is equivalent to undirected graph.
89
+
90
+ It is worth noting that only *semantics* and *their relationships* are preserved in IR-Graphs for further inference rather than deep features.
91
+
92
+ After converting an input image to IR-Graph, the matcher finds the most similar category-level graph in IR-Atlas. The overall procedure is shown in Figure 3, which is composed of a GCN module and a similarity computing module (Sim) that generates the final prediction. Detailed settings of the matcher are in Appendix E.3.
93
+
94
+ For feeding IR-Graphs to the GCN module, we assign each ingredient $m \in \mathbb{M}$ (i.e., graph vertices) with a trainable embedding vector, each of which is initialized from an $d_G$ -dimensional random vector drawn independently from multivariate Gaussian distribution $\mathcal{N}(\mathbf{0}, I_{d_G})$ .
95
+
96
+ In the GCN module, we adopt GraphConv (Morris et al., 2019) with slight modifications for weighted edges. Let $F \in \mathbb{R}^{|V| \times d_G}$ be the input feature of all vertices to a GraphConv layer, the output is computed as
97
+
98
+ GraphConv
99
+ $$(F)$$
100
+ = Norm $(\sigma((I_{d_G} + E)FW))$ , (7)
101
+
102
+ where $\sigma$ denotes a non-linear activation function, Norm denotes feature normalization, and $W \in \mathbb{R}^{d_G \times d_G}$ is a
103
+
104
+ ![](_page_4_Figure_18.jpeg)
105
+
106
+ Figure 3: Illustration of the matcher.
107
+
108
+ learnable projection matrix. After passing through total $L_G$ layers of GraphConv, a weighted average pooling (WAP) layer summarizes the vertex embeddings $F^{(L_G)}$ weighted by $\Lambda$ , yielding the graph representation $z = \Lambda F^{(L_G)}$ . Now we have z for G and $\hat{Z} \in \mathbb{R}^{C \times d_G} = (\hat{z}_1, \dots, \hat{z}_C)$ for all graphs in $\hat{\mathcal{G}}$ . By computing the inner-product similarities, the final prediction logits is defined as $y = \hat{Z}z^{\top}$ .
109
+
110
+ **Prediction interpretation.** Given an instance graph G and a category graph $\hat{G}$ , we now analyze how the prediction is made based on their vertices and edges. We start with analyzing the similarity score $s=\hat{z}z^{\top}$ , where $z=\Lambda F^{(L_G)}$ and $\hat{z}=\hat{\Lambda}\hat{F}^{(L_G)}$ are weighted sum of the vertex embeddings respectively. For the sake of further discussion, let $f_v^{(l)}$ denotes the embedding vector of vertex v output from the l-th GraphConv layer, and $\lambda_v^{(l)}$ denotes its weight. The original vertex embedding of v is defined as $f_v^{(0)}$ , which is identical for the same ingredient in any two graphs. Besides, let $\Phi=V\cap\hat{V}$ be the set of shared vertices in G and $\hat{G}$ . The computation of s can be expanded as
111
+
112
+ $$s = \sum_{(u,v)\in\hat{V}\times V} \hat{\lambda}_u \lambda_v \hat{f}_u^{(L_G)} f_v^{(L_G)^{\top}}.$$
113
+ (8)
114
+
115
+ Since directly analyzing Equation (8) is hard, we show an approximation and proof it in Appendix B.
116
+
117
+ **Theorem 1.** For a shallow GCN module, Equation (8) can be approximated by
118
+
119
+ $$s = \sum_{\phi \in \Phi} \hat{\lambda}_{\phi} \lambda_{\phi} \hat{f}_{\phi}^{(L_G)} f_{\phi}^{(L_G)}. \tag{9}$$
120
+
121
+ Particularly, if $L_G = 0$ and $\alpha_1 = 0$ , our method is equivalent to BoVW with a linear classifier.
122
+
123
+ Further, as delineated in Corollary 1, the interpretability of the graph matcher with a shallow GCN can be stated as: (1) the final prediction score for a category is the summation of all present class evidence (shared vertices $\phi$ in $\Phi$ ) represented by the vertex weights; (2) the shared neighbors (local structure) connected to the shared vertex $\phi$ also contribute to the final prediction.
124
+
125
+ Except for training SchemaNet by cross-entropy loss $\mathcal{L}_{CE}$ between the predictions and ground truths, we further constrain the complexity of IR-Atlas. More formally, the complexity is defined for both edges and vertex weights of all graphs in IR-Atlas as
126
+
127
+ $$\mathcal{L}_v = \frac{1}{C} \sum_{c=1}^C \mathcal{H}(\hat{A}_c), \quad \mathcal{L}_e = \frac{1}{C|\hat{V}|} \sum_{c=1}^C \sum_{u \in \hat{V}} \mathcal{H}(\hat{E}_{c,u}), \tag{10}$$
128
+
129
+ where function $\mathcal{H}(x)$ computes the entropy of the input vector $x \in \mathbb{R}^k_+$ normalized by its sum of k components, and $\hat{E}_{c,u}$ denotes the weighted edges connected to vertex u in $\hat{G}_c$ of class c. The final optimization goal is
130
+
131
+ $$\mathcal{L} = \mathcal{L}_{CE} + \gamma_v \mathcal{L}_v + \gamma_e \mathcal{L}_e, \tag{11}$$
132
+
133
+ where $\gamma_v$ and $\gamma_e$ are hyperparameters. The overall training procedure is shown in Appendix C.
134
+
135
+ **Sparsification.** Each graph in IR-Atlas is initialized as a fully-connected graph with random vertex and edge weights. However, this requires $\mathcal{O}(C|V|^2)$ memory space for storage and training the whole set of edges. To alleviate this issue, we initialize IR-Atlas by averaging the instance IR-Graphs for each class and remove the edges connected to vertices whose weights are below a given threshold $\delta_t = 0.01$ from the category-specific graph. Such a procedure will not only dramatically decrease the learnable parameters but also boost the final performance, as shown in Table 1.
2303.09429/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2303.09429/paper_text/intro_method.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Recent progress in the field of multi-modal learning (Radford et al. 2021; Lu et al. 2019) has been reflected in various downstream tasks, *e.g*., VQA (Antol et al. 2015; Levy, Ben-Ari, and Lischinski 2022), Visual Dialog (Das et al. 2017), Image captioning (Li et al. 2020), Image Retrieval (in its variations) (Baldrati et al. 2022; Li et al. 2021) and Composed Image Retrieval (CoIR) (Vo et al. 2019; Chen, Gong, and Bazzani 2020; Baldrati et al. 2022). Image Retrieval (IR) is a longstanding task that aims to find a desired image in a large corpus, given a user query. While content-based image retrieval uses a single visual modality to convey the user intent (Barz and Denzler 2021; Dubey 2021; Zhong, Chen, and Qian 2020), providing a bi-modal query can mitigate miss-interpretations. In CoIR the gist and attributes are more succinctly described visually, and further intent is specified via a lingual modality (Han et al. 2017; Isola, Lim, and Adelson 2015; Vo et al. 2019; Guo et al. 2018; Wu et al. 2021; Liu et al. 2021; Couairon et al. 2022). Some examples of CoIR queries and their results are shown in Figures 1 and 5.
4
+
5
+ Copyright © 2024, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
6
+
7
+ Despite the progress in foundation models and new CoIR architectures, curating a dataset for CoIR remains a challenging chore, where the samples are triplets of queryimage, accompanying transition-text, and the target image, serving as the ground-truth answer. There are several existing datasets for CoIR that differ significantly from each other. Vo et al. (2019) propose a dataset of rendered images of simple 3D scenes. Other existing datasets suffer from a small amount of data, and some are domain-specific (*e.g*., shoes (Guo et al. 2018)), while in others, the lingual modality is limited by transition-text used as a class label (Isola, Lim, and Adelson 2015), or generated based on pairs of image captions that differ by a single word (Han et al. 2017). Another dataset was labeled based on previous vision and language (V&L) models (Couairon et al. 2022). Recently, Wu et al. (2021) introduced *FashionIQ*, another domain-specific dataset for CoIR, which gained popularity (*e.g*. (Goenka et al. 2022; Lee, Kim, and Han 2021; Liu et al. 2021) ) and contains human-annotated labels.
8
+
9
+ In addition to their small scale, shortcomings of these datasets include: 1) Not all acceptable target images for a given query are labeled as such, leading to incorrect count of false-negatives (*e.g*., Fig. 5); 2) Lack of visual complexity (due to restriction to a specific domain); and 3) Modality redundancy, *i.e*. target images may often be retrieved using solely the query text, when descriptive enough to ignore the query image. We further refer to this issue as (lack of) *compositonality*: where the target should be determined by its query constituents combining the lingual and visual cues.
10
+
11
+ To break out from the previous domain-specific datasets to general and natural image domain, Liu et al. (2021) introduced the new *CIRR* (Composed Image Retrieval on Real-life images) dataset that contains open domain *natural images*, taken from NLVR2 (Suhr et al. 2019). To the best of our knowledge, *CIRR* is the only existing dataset for CoIR based on natural images with human annotated open-language texts. Despite attempts to reduce the falsenegatives and relying on direct human labeling, *CIRR* still has two major shortcomings: 1) The image corpus is small, alleviating retrieval; 2) Modality redundancy still exists (see Sec. 5), as well as false-negatives (according to our observations), reflecting the challenge in creating a "clean" dataset.
12
+
13
+ In this work, we introduce a new large scale dataset for CoIR, dubbed LaSCo (Large Scale Composed Image Re-
14
+
15
+ ![](_page_1_Figure_0.jpeg)
16
+
17
+ Figure 1: An overview of our CASE baseline. The query-image is fed to a ViT encoder. The query-text is handled by our shift encoder i.e. a BERT encoder with intermediate cross-attention layers, that receives the ViT output and fuses textual and visual information. The resulting [CLS] token is then pooled and projected into a shared 256D space (circled Q). Finally, the K nearest cosine-similarity neighbors of Q are retrieved. For each training query, we also create a Reverse-Query by switching the roles of query and target images. A learnable special token [REV] in our transformer, handles the prediction of the query-image (circled RQ) as the Reverse-Query task.
18
+
19
+ trieval dataset). To construct it with minimal human effort, we employ a simple and effective methodology to rephrase labels from an existing large scale VQA dataset (Goyal et al. 2017) into a form suited for CoIR. LaSCo contains an open and broad domain of natural images and rich text. Compared to *CIRR*, it has ×10 more queries, ×2 more unique tokens and ×17 more corpus images. We then propose a new approach for analyzing CoIR datasets and methods, which detects modality redundancy or necessity, in queries. Our analysis demonstrates that LaSCo shows a significantly smaller bias towards a single modality for retrieval.
20
+
21
+ SoTA approach (Baldrati et al. 2022) employ CLIP (Radford et al. 2021) to separately encode the textual and the visual query, followed by feature vector concatenation and a learnable projection head. We experiment with an additional end-to-end learnable baseline, which leverages the layers of BLIP's (Li et al. 2022) image-grounded text encoder and enables early interactions between textual tokens and individual areas (patches) in the image. This baseline, dubbed CASE (Cross-Attention driven Shift Encoder), builds upon bi-modal Cross-Attention to create an Encoder that Shifts the query-image towards the target in the embedding space (see Fig. 1). CASE is trained using a novel bi-directional objective, which we refer to as Reverse-Ouery (RO), where the query-image is predicted from the target-image and the query-text. Being based on BLIP, CASE uses a lower dimension latent vector of 256D, reducing retrieval complexity by a factor of $\times 2.5$ over previous SoTA. Furthermore, pre-training our baseline on LaSCo improves performance on CIRR dataset and even surpasses previous SoTA methods without training on CIRR (at zero-shot).
22
+
23
+ In summary, our key contributions in this paper are:
24
+
25
+ - *LaSCo:* A new large-scale, domain-free CoIR dataset, a few orders of magnitude larger than existing datasets.
26
+ - Data Roaming: A simple methodology for automatically generating CoIR triplets from an existing VQA dataset.
27
+ - Modality Redundancy: A method for analyzing redundancy between modalities in existing CoIR datasets.
28
+
29
+ • *CASE*: A new BLIP-based baseline, featuring early fusion and a novel bi-directional training objective, that achieves SoTA performance with a large gap on *FashionIO*, *CIRR* and LaSCo benchmarks.
30
+
31
+ # Method
32
+
33
+ The CASE architecture, depicted in Figure 1, consists of two transformer components (Vaswani et al. 2017). The first is a shift-encoder, based on an image-grounded text encoder, previously introduced in (Li et al. 2022). It is a BERT (Devlin et al. 2019) encoder with additional intermediate crossattention layers, to model vision-language interactions. The second component is a ViT (Dosovitskiy et al. 2021) encoder. ViT divides an input image into patches and encodes them as a sequence of image tokens. The image tokens are then fed into cross-attention layers, allowing interaction between the lingual and visual branches. The output, a bimodality conditioned sequence (text on image and image on text), is then pooled to a single vector and projected to a 256D latent space. CASE allows early fusion between modalities, in contrast to previous late fusion methods (Baldrati et al. 2022; Vo et al. 2019; Kim et al. 2021; Delmas et al. 2022) or methods that take a middle way (Liu et al. 2021; Goenka et al. 2022; Chen, Gong, and Bazzani 2020; Hosseinzadeh and Wang 2020), as discussed in Section 2.
34
+
35
+ **Utilizing Vision-Language Pre-training:** We initialize our model's weights using BLIP (Li et al. 2022) pre-trained weights, as follows: Our shift-encoder's Self-Attention, Cross-Attention and Feed-Forward layers are initialized with the corresponding layers of BLIP's image-grounded encoder. Our final projection layer is initialized with the final
36
+
37
+ projection of BLIP's text-encoder. Finally, we initialize our ViT component with BLIP's image encoder. Our model is end-to-end trainable (see Figure 1).
38
+
39
+ A common training approach for image retrieval tasks uses an objective of contrastive loss with the target image as positive e.g., (Goenka et al. 2022; Baldrati et al. 2022; Liu et al. 2021; Li et al. 2020). Here, we propose an additional reverse objective, where the goal is to retrieve the query image given the transition-text and the target image. One can view the reverse objective as flipping the shift vector of the original query (in the latent space) to point in the opposite direction, from the target image to the embedding of the query image. Our reverse objective further suggests an additional task and can be viewed as a valid augmentation, effectively enlarging the dataset. We therefore train our model jointly with the reverse objective (see Figure 1). Namely, given a triplet $(Q_i, Q_t, T_i)$ of query-image, query-text and target-image (respectively), our model objective M requires: $M(Q_i, Q_t) = T_i$ (standard CoIR task), while simultaneously enforcing $M(T_i, [REV]; Q_t) = Q_i$ , where [REV] is a special token provided to the model. Although the reverse task is not one-to-one (multiple queries may be suitable), this objective has proven to be beneficial in practice.
40
+
41
+ We follow the most common approach for image retrieval: searching for matches in an embedding space shared by queries and targets (see Figure 1). First, corpus images (potential targets) are encoded by a ViT (Dosovitskiy et al. 2021) encoder, resulting in a single feature vector, representing each image. Then, a given query (composed of image and text) is projected by CASE to the shared embedding space. Finally, the target candidates are ranked by cosine-similarity distance w.r.t the shifted query embedding. By using a relatively small embedding space dimension of 256D, compared to 640D in the previous SoTA (Baldrati et al. 2022), the retrieval is sped up by ×2.5.
42
+
43
+ In this section, we first propose a simple analysis of existing CoIR datasets to examine the degree to which their queries require *both* modalities for successful retrieval. Next, a similar analysis is proposed for assessing the bias of CoIR methods towards modality redundancies.
44
+
45
+ An ideal composed query should require both modalities for retrieving the desired target. For example, a transition-text such "Change the color to be more cream colored" in the top row of Figure 5, will only succeed in finding the proper target in conjuction with the query image, since the type of object cannot be inferred from the text alone. However, in practice, one of the modalities can become redundant, with the degree of redundancy depending on the information conveyed by the other modality. On one extreme, the query-image might be completely redundant, reducing the task to Text-to-Image retrieval; on the other extreme, the query-text might be redundant, with the task becoming Image-to-Image
46
+
47
+ ![](_page_4_Figure_0.jpeg)
48
+
49
+ Figure 3: Modality redundancy level in several datasets (lower is better). Recall@K values for *Text-to-Image* and *Image-to-Image* retrieval using off-the-shelf CLIP (Radford et al. 2021). Lower values indicate higher image and text compositionality. The results for COCO (Lin et al. 2014) are shown as a reference for a purely text-to-image (single modality) retrieval task, presenting an upper bound.
50
+
51
+ retrieval. To quantitatively assess the degree of redundancy in CoIR datasets, we measure the Recall@K performance of naive Text-to-Image and Image-to-Image retrieval using the embeddings produced by an independent off-the-shelf CLIP model (Radford et al. 2021). To create a continuous measure we compute the Recall@K for varying K values. These measurements computed on several datasets are plotted in Figure 3. A lower curve indicates that the corresponding dataset is more challenging for a uni-modal query. Note that the LaSCo and FashionIQ curves are much lower than CIRR, implying that more of the queries in CIRR are modalityredundant. For reference, we also plot the performance of the CLIP-based Text-to-Image retriever using COCO captions as query text (a commonly used benchmark for Textto-Image retrieval (Li et al. 2022, 2021, 2020; Radford et al. 2021)). While COCO may be viewed as an "upper bound" for this task, note that the CIRR curve is quite close to it.
52
+
53
+ Next, we employ a similar analysis for studying the degree to which CoIR methods (trained on a certain dataset) are affected by the presence of modality redundancies in the dataset. Starting from the full CIRR validation set, denoted as V, we generate a sequence of progressively "purified" subsets $V_n \subset V$ , with each subset containing fewer modality redundancies. Specifically, subset $V_n$ is generated by removing from V all of the queries for which the naive CLIP-based Text-to-Image retriever, retrieves the correct target image among it's top-n results. In Figure 4 we plot the average of Recall@ $\{1,5,10,50\}$ as a function of n, measured by applying our baseline, CASE on each dataset.
54
+
55
+ Note that the performance of the CLIP-based retriever (blue line) vanishes at $V_{50}$ , since by construction, $V_{50}$ contains only queries for which CLIP failed to retrieve the target within its top 50 results. A similar trend is observed with CASE-(Text-Only), a variant of our model trained on transition-texts only, ignoring the image. CASE-(Text-Only) exhibits performance degradation with increased n, as it re-
56
+
57
+ ![](_page_4_Figure_5.jpeg)
58
+
59
+ Figure 4: Average retrieval performance on subsets of *CIRR* determined by *image-redundancy* levels. Higher values of Average Recall, imply a desirable trend of higher level of required *compositionality* between text and image (lower modality redundancy). See Section 5.
60
+
61
+ lies solely on query-text. While CASE that was trained on *CIRR* (orange line) shows better performance, it still suffers some degradation as *n* grows, implying that some bias towards modality redundancies might still exist. However, when CASE is trained or pre-trained on LaSCo dataset (pink and brown lines, respectively), it achieves the best performance, which is roughly constant regardless of *n*. Thus, LaSCo appears to be effective at removing bias towards redundancies, and CASE pre-trained on it is better suited for datasets with high compositionality.
2304.04395/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2304.04395/paper_text/intro_method.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Neural Radiance Field (NeRF) [\[33\]](#page-8-0) has become the mainstream approach to novel view synthesis nowadays. Given multi-view images with camera poses only, NeRF encodes the underlying scene in a multi-layer perceptron (MLP) by radiance propagation and generates very impressive results. Thus, subsequent to NeRF's debut in [\[33\]](#page-8-0) lot of works have made great progress in improving the quality [\[10,](#page-8-1) [46\]](#page-9-0), efficiency [\[34,](#page-8-2) [54,](#page-9-1) [3\]](#page-8-3) and generality [\[56,](#page-9-2) [50\]](#page-9-3).
4
+
5
+ ![](_page_0_Figure_9.jpeg)
6
+
7
+ Figure 1: Pipeline of Instance-NeRF, demonstrated on the 3D-FRONT dataset. Instance-NeRF takes a pre-trained NeRF as input to detect objects within the scene and utilizes existing 2D panoptic segmentation to generate 2D segmentation maps, which are then matched and used to supervise the instance field training.
8
+
9
+ This excellent approach to associate 2D with 3D through radiance field leads us to rethink the *3D instance segmentation problem*. Unlike the 2D counterpart operating on plenty of training images, 3D instance segmentation is limited by both the quantity and quality of the available data. Previously, 3D segmentation still relies on RGB-D images [\[22\]](#page-8-4) or point clouds [\[37,](#page-9-4) [38,](#page-9-5) [47,](#page-9-6) [51,](#page-9-7) [42\]](#page-9-8) captured by depth sensors or custom equipment as input, which are inconvenient to obtain and contain a variety of noises. To mitigate the dependence on explicit 3D geometry, there have been several investigations on embedding NeRF for addressing fundamental problems such as 3D semantic segmentation [\[57\]](#page-9-9) and scene manipulation [\[27\]](#page-8-5). Existing unsupervised methods such as [\[31,](#page-8-6) [55,](#page-9-10) [40\]](#page-9-11) also involve 3D scene decomposition and instance segmentation, but it is hard to apply them on complex and large scenes akin to real world cases.
10
+
11
+ <sup>\*</sup>Equal contribution.
12
+
13
+ <sup>†</sup>This research is supported in part by the Research Grant Council of the Hong Kong SAR under grant no. 16201420.
14
+
15
+ In this paper, inspired by NeRF-RPN [\[23\]](#page-8-7) and Mask-RCNN [\[18\]](#page-8-8), we propose *Instance-NeRF* for 3D instance segmentation in NeRF. Unlike semantic segmentation, where a single object in different views should be labeled as the same class consistently, most instance segmentation methods do not enforce consistency over instance IDs in different views, making direct supervision of instance ID less applicable. To address this issue, we propose a 2D mask matching procedure that links each 2D instance mask to its 3D instance to resolve inconsistency. More specifically, we incorporate NeRF-RPN with a mask head to predict 3D coarse segmentation. After projecting the attained 3D masks back to 2D, Instance-NeRF leverages Mask2Former [\[6\]](#page-8-9) and CascadePSP [\[7\]](#page-8-10) to match the same instance in 2D segmentation from different views and refine the resultant masks. The refined 2D segmentation of multiview images will be used to train an instance field which encode 3D instance information in a continuous manner as a neural field.
16
+
17
+ Our major contributions are:
18
+
19
+ - One of the first significant attempts to perform 3D instance segmentation in NeRF without using groundtruth segmentation as input.
20
+ - Propose the architecture and training approach of an *Neural Instance Field*, which can produce multi-view consistent 2D segmentation as well as continuous 3D segmentation using NeRF representation.
21
+ - Perform experiments and ablation studies on a synthetic indoor NeRF dataset to demonstrate the effectiveness of our method, which surpasses competitive 2D segmentation methods and previous works in NeRF segmentation.
22
+
23
+ # Method
24
+
25
+ Given a pre-trained NeRF, our method, Instance-NeRF, aims to detect all the objects within the underlying 3D scene and produces a bounding box, a continuous 3D mask, and a class label of each detected 3D object.
26
+
27
+ Instance-NeRF extends the given pre-trained NeRF model with an additional instance field, which can produce a view-independent instance label at any 3D position in the NeRF scene. To train the instance field component of Instance-NeRF, we propose a *NeRF-RCNN* for 3D sparse mask prediction and a *2D mask matching and refining* stage to produce multi-view consistent 2D masks based on the 3D masks for instance field supervision.
28
+
29
+ First, the *NeRF-RCNN*, which is extented from NeRF-RPN [\[23\]](#page-8-7), takes the extracted radiance and density field of the pre-trained NeRF and output 3D bounding boxes, class labels, and discrete 3D masks for each detected object in the NeRF. Then, given a set of multi-view but possibly *inconsistent* 2D panoptic segmentation maps of the scene produced by existing methods, we project the 3D masks from the same camera poses to match the same instance across different views. The multi-view *consistent* 2D segmentation can then be used to train the instance field component and produce a continuous 3D segmentation using instance field representation.
30
+
31
+ Instance-NeRF optimizes a neural radiance field that maps a 3D position x = (x, y, z) and a viewing direction d = (ϕ, θ) to a volume density σ, RGB radiance c = (r, g, b), and an instance label distribution over L labels including the background label. As shown in Figure [3,](#page-3-0) Instance-NeRF is parameterized by three components – density net Θσ, color branch Θc, and instance branch Θ<sup>i</sup> . Θσ, Θ<sup>c</sup> constitute the original NeRF, and are assumed to be pre-trained with posed RGB images. The pre-trained NeRF can be formulated as
32
+
33
+ $$\mathcal{F}_{\Theta_{\sigma},\Theta_{c}}(\mathbf{x},\mathbf{d}) = (\sigma,\mathbf{c}),$$
34
+ (1)
35
+
36
+ and the Instance Field can be formulated as:
37
+
38
+ $$\mathcal{G}_{\Theta_{\sigma},\Theta_{i}}(\mathbf{x}) = \mathbf{i},\tag{2}$$
39
+
40
+ where i is a L-dimensional vector with its first dimension representing the background.
41
+
42
+ We follow the hierarchical stratified sampling method in [\[33\]](#page-8-0) to render the RGB color, depth, and instance label for a single pixel. Specifically, for each pixel, we formulate a ray r(t) = o+td emitted from the center of the projection camera to that pixel, and select K quadrature points {tk} K k=1 on the ray r within the traceable volume of Instance-NeRF. The numerical quadrature to accumulate the expected color Cˆ (r), expected depth Dˆ (r) and expected instance logits ˆI(r) for each pixel are respectively given by:
43
+
44
+ $$\hat{\mathbf{C}}(\mathbf{r}) = \sum_{k=1}^{K} \hat{T}(t_k) \alpha(\sigma(t_k) \delta_k) \mathbf{c}(t_k), \qquad (3)$$
45
+
46
+ $$\hat{\mathbf{D}}(\mathbf{r}) = \sum_{k=1}^{K} \hat{T}(t_k) \alpha (\sigma(t_k) \delta_k) t_k,$$
47
+ (4)
48
+
49
+ <span id="page-2-0"></span>
50
+ $$\hat{\mathbf{I}}(\mathbf{r}) = \sum_{k=1}^{K} \hat{T}(t_k) \alpha (\sigma(t_k) \delta_k) \mathbf{i}(t_k), \qquad (5)$$
51
+
52
+ <span id="page-3-3"></span>![](_page_3_Figure_0.jpeg)
53
+
54
+ Figure 2: **Instance Field Training.** When training the instance field, NeRF-RCNN takes the extracted radiance and density field of the pre-trained NeRF and outputs discrete 3D masks for each detected object in the NeRF. Mask2Former generates 2D panoptic segmentation maps of images rendered from NeRF, which are *inconsistent* in terms of instance label across views. After projecting the 3D masks from the same camera poses to match the same instance across different views resulting in the multi-view *consistent* 2D segmentation maps, they can then be used to train the instance field component and produce a continuous 3D segmentation using instance field representation. In addition, we use CascadePSP to refine the preliminary instance segmentation results of Instance-NeRF, and use the refined instance masks to refine the Instance Field in turn.
55
+
56
+ <span id="page-3-0"></span>![](_page_3_Figure_2.jpeg)
57
+
58
+ Figure 3: **Instance-NeRF Architecture.** Instance-NeRF consists of density net $\Theta_{\sigma}$ , color branch $\Theta_c$ , and instance branch $\Theta_i$ . Given the input spatial position $\mathbf{x}=(x,y,z)$ and a viewing direction $\mathbf{d}=(\phi,\theta)$ , compared to the traditional NeRF, Instance-NeRF can predict a view-independent instance distribution $\mathbf{i}$ with its additional instance branch. In this paper, we adopt the Multi-resolution Hash Encoding (Hash) in [34] for position encoding and Spherical Harmonics (SH) in [54] for viewing direction encoding. Note that the hash grid $\mathcal{H}_i$ used for mask net is different from the one $\mathcal{H}_{\sigma}$ used for density net.
59
+
60
+ where
61
+
62
+ $$\hat{T}(t_k) = \exp\left(-\sum_{a=1}^{k-1} \sigma(t_a) \,\delta_a\right),$$
63
+
64
+ $$\alpha(x) = 1 - \exp(-x),$$
65
+
66
+ $$\delta_k = t_{k+1} - t_k.$$
67
+ (6)
68
+
69
+ We discuss the NeRF pre-training and Instance Field training in sections 3.1.2 and 3.1.3, respectively.
70
+
71
+ The density and radiance components of Instance-NeRF can be implemented and trained similarly as a regular NeRF, and the implementation is largely orthogonal to the Instance Field component. Posed RGB images are used to train the density net $\Theta_{\sigma}$ and color branch $\Theta_{c}$ with the appearance loss $\mathcal{L}_{p}$ :
72
+
73
+ $$\mathcal{L}_{p} = \frac{1}{\mathcal{R}} \sum_{\mathbf{r} \in \mathcal{R}} \left\| \hat{\mathbf{C}}(\mathbf{r}) - \mathbf{C}(\mathbf{r}) \right\|_{2}^{2}, \tag{7}$$
74
+
75
+ where $\mathcal{R}$ are the sampled rays within a training batch, $\mathbf{C}(\mathbf{r})$ and $\hat{\mathbf{C}}(\mathbf{r})$ are respectively the ground truth and predicted RGB color for ray $\mathbf{r}$ . In this work, we assume a well-trained $\Theta_c$ and $\Theta_\sigma$ are given and focuses mainly on the Instance Field training.
76
+
77
+ The instance field is trained with multi-view *consistent* 2D instance segmentation maps, during which $\Theta_{\sigma}$ and $\Theta_{c}$ will be fixed and only $\Theta_{i}$ will be optimized. Given a set of multi-view consistent 2D instance segmentation maps, we train the instance branch with multi-class cross-entropy loss $\mathcal{L}_{i}$ :
78
+
79
+ $$\mathcal{L}_i = -\frac{1}{\mathcal{R}L} \sum_{\mathbf{r} \in \mathcal{R}} \sum_{l=1}^{L} p^l(\mathbf{r}) \log \hat{p}^l(\mathbf{r}), \tag{8}$$
80
+
81
+ where $p^l(\mathbf{r})$ and $\hat{p}^l(\mathbf{r})$ are respectively the ground truth multi-class instance probability for class l and volume predictions for ray $\mathbf{r}$ given by applying softmax to Eq. 5.
82
+
83
+ As the input 2D segmentation maps are generated by existing 2D segmentation methods, it cannot produce 3D geometry-consistent masks for different views. To compensate this issue and to encourage the smoothness of the instance label prediction, we take advantage from the prior that instance maps are generally smooth over local regions, and add an instance regularization loss L<sup>r</sup> adopted from [\[35\]](#page-8-30):
84
+
85
+ $$\mathcal{L}_{r} = \frac{1}{\mathcal{R}L} \sum_{\mathbf{r}_{i,j} \in \mathcal{R}} \sum_{l=1}^{L} \left( \hat{\mathbf{I}}(\mathbf{r}_{i,j}) - \hat{\mathbf{I}}(\mathbf{r}_{i,j+1}) \right)^{2} w_{i,j} + \left( \hat{\mathbf{I}}(\mathbf{r}_{i,j}) - \hat{\mathbf{I}}(\mathbf{r}_{i+1,j}) \right)^{2} h_{i,j},$$
86
+ (9)
87
+
88
+ $$w_{i,j} = \frac{\delta_w(i,j)}{\sum_{\mathbf{r}_{i',j'} \in \mathcal{R}} \delta_w(i',j')},$$
89
+ (10)
90
+
91
+ $$\delta_w(i,j) = \exp(-(\hat{\mathbf{D}}(\mathbf{r}_{i,j}) - \hat{\mathbf{D}}(\mathbf{r}_{i,j+1}))^2), \tag{11}$$
92
+
93
+ where wi,j is the weight determined by the similarity of NeRF depth Dˆ between two sample rays, and likewise for hi,j . ri,j is the ray passing through pixel (i, j) of the image. Recall that depth information is obtained from the pretrained density branch. Similar depths of adjacent pixels imply that the pixels might be on the same surface in 3D space, and likely belong to the same object. Thus, this regularization loss encourages the model to predict the same instance label for them and generate smoother segmentation for each object.
94
+
95
+ The total loss L for training the instance field is:
96
+
97
+ <span id="page-4-1"></span>
98
+ $$\mathcal{L} = \mathcal{L}_p + \lambda \mathcal{L}_i, \tag{12}$$
99
+
100
+ where λ is a hyperparameter.
101
+
102
+ *NeRF-RCNN* takes as input a pre-trained NeRF and outputs 3D Axis-Aligned Bounding Boxes (AABB), class labels, and discrete 3D masks of the detected objects. This network extends NeRF-RPN [\[23\]](#page-8-7) by appending two additional detection heads: a box prediction head (MLP) and a mask prediction head (3D CNN), much similar in spirit as [\[19\]](#page-8-31). As shown in Figure [2,](#page-3-3) we first uniformly sample the appearance and density on a grid that covers the full traceable volume of the pre-trained Instance-NeRF model, following the sampling method in [\[23\]](#page-8-7). The NeRF-RPN takes the sampled grid as input, and outputs proposals and the feature pyramid. We use 3D RoIAlign to obtain the feature within the RoI of each output proposal. The box prediction head takes RoI features as input and outputs class logits and AABB regression offset, while the mask prediction head takes RoI features as input and outputs 3D masks for each RoI.
103
+
104
+ The *NeRF-RCNN* can be trained in an end-to-end manner with sampled appearance and density grids as input, along with ground truth bounding boxes and 3D instance segmentation masks, by reducing the multi-class crossentropy loss for class prediction, the smooth L1 loss for bounding box regression, and the binary cross-entropy loss for 3D mask prediction. In practice, we adopt the pretrained NeRF-RPN, and optimize the classification head and mask head side by side. In our experiments, we train the *NeRF-RCNN* with a large NeRF segmentation dataset built on 3D-FRONT.
105
+
106
+ It is infeasible to adopt a 2D instance segmentation model to perform segmentation on multi-view images and directly use the results to supervise instance field training. The main reason lies in the fact that 2D instance segmentation models generally do not guarantee any correspondence between the appearance of the same instance in different views, including the assigned instance IDs, predicted class labels, and the masks. The same object is likely to be assigned with different instance IDs in different images, making it difficult for direct use in instance field training. Multiview consistency within 2D instance masks are thus crucial for the quality of the instance field.
107
+
108
+ To address this consistency issue, we utilize the 3D coarse mask produced by NeRF-RCNN to align masks in different images that correspond to the same object. We further propose an iterative refinement approach by feeding the intermediate results of Instance-NeRF to an existing mask refinement model, and use the refined masks to train the final instance field.
109
+
110
+ To obtain the initial 2D segmentation, we use a Mask2Former [\[6\]](#page-8-9) panoptic segmentation model pretrained on the COCO [\[30\]](#page-8-32) dataset. We create a class mapping from COCO to the dataset we use, based on which we filter out masks that belong to background. To deal with prediction inconsistency across images, we match each Mask2Former predicted 2D mask with a 3D instance detected by NeRF-RCNN. To achieve this, we first project the 3D masks from NeRF-RCNN to the corresponding image spaces, and compute the Intersection over Union (IoU) between each pair of predicted 2D mask and projected 2D mask. Each predicted mask is then assigned to the instance with highest mask IoU. Those predicted masks that do not have an IoU greater than 0.05 with any projected masks are treated as unlabeled area, which do not participate in instance NeRF training. After this process, we have consistently annotated 2D instance masks from multi-view images, which will be used to optimize the instance field.
111
+
112
+ Although 2D-to-3D matching emancipates the model from inconsistency, uniformly sampled images, which frequently contain objects under partial occlusion can easily mislead the prediction model pre-trained on general datasets, resulting in missing or erroneous mask prediction in different views. An instance field trained directly on the 2D masks therefore suffers from conflicting 2D masks of different views, and usually produces fragmented segmentation results. To alleviate this issue, we adopt a pre-trained 2D mask refinement model, CascadePSP [7], to refine the preliminary instance segmentation results of Instance-NeRF. Instead of directly feeding Mask2Former output into the refinement network, we use Instance-NeRF rendering results, because Instance-NeRF can enhance single-view results by fusing multi-view information, filling part of the area of incomplete or missing mask prediction in certain views. We split the instances in each preliminary Instance-NeRF mask and refine them independently, the results of which are superimposed to supervise the final instance field training.
2305.00909/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-04-27T04:51:12.902Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.3 Safari/605.1.15" version="21.2.3" etag="nxE0-m2GX7Z_7Kyz6Hao" type="google">
2
+ <diagram name="Page-1" id="QEUwh_9NYOglyHZTE_pA">
3
+ <mxGraphModel dx="2411" dy="1556" grid="0" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1930" pageHeight="700" math="0" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="2" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#FFE6CC;opacity=80;strokeColor=#FFB570;rotation=-1;strokeWidth=1;" vertex="1" parent="1">
8
+ <mxGeometry x="1319" y="22" width="295" height="157" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="3" value="" style="shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;rounded=1;shadow=1;glass=0;sketch=0;strokeColor=#9673a6;strokeWidth=1;fontSize=18;fillColor=#E6FFCC;opacity=40;textOpacity=0;" vertex="1" parent="1">
11
+ <mxGeometry x="539" y="402" width="463.16720257234726" height="149.5867768595041" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="4" value="" style="shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;rounded=1;shadow=1;glass=0;sketch=0;strokeColor=#9673a6;strokeWidth=1;fontSize=18;fillColor=#E6FFCC;opacity=40;textOpacity=0;" vertex="1" parent="1">
14
+ <mxGeometry x="552" y="417" width="463.16720257234726" height="149.5867768595041" as="geometry" />
15
+ </mxCell>
16
+ <mxCell id="5" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#FFCE9F;glass=0;sketch=0;opacity=80;" vertex="1" parent="1">
17
+ <mxGeometry x="81" y="116.5" width="120" height="107" as="geometry" />
18
+ </mxCell>
19
+ <mxCell id="6" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#FFDE9F;glass=0;sketch=0;opacity=80;" vertex="1" parent="1">
20
+ <mxGeometry x="91" y="126.5" width="120" height="107" as="geometry" />
21
+ </mxCell>
22
+ <mxCell id="7" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#FFEE9F;glass=0;sketch=0;opacity=80;" vertex="1" parent="1">
23
+ <mxGeometry x="101" y="136.5" width="120" height="107" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="8" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#FFDE9F;glass=0;sketch=0;opacity=80;" vertex="1" parent="1">
26
+ <mxGeometry x="111" y="146.5" width="120" height="107" as="geometry" />
27
+ </mxCell>
28
+ <mxCell id="9" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#FFCE9F;glass=0;sketch=0;opacity=80;" vertex="1" parent="1">
29
+ <mxGeometry x="121" y="156.5" width="120" height="107" as="geometry" />
30
+ </mxCell>
31
+ <mxCell id="10" value="Input/Output&lt;br style=&quot;font-size: 17px;&quot;&gt;Data" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=0;sketch=0;opacity=80;fontStyle=1;fontSize=17;fontFamily=Comic Sans MS;" vertex="1" parent="1">
32
+ <mxGeometry x="143" y="195" width="78" height="30" as="geometry" />
33
+ </mxCell>
34
+ <mxCell id="11" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#cce5ff;opacity=80;strokeColor=#36393d;rotation=-1;" vertex="1" parent="1">
35
+ <mxGeometry x="407" y="173" width="295" height="157" as="geometry" />
36
+ </mxCell>
37
+ <mxCell id="12" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#cce5ff;opacity=80;strokeColor=#36393d;rotation=-1;" vertex="1" parent="1">
38
+ <mxGeometry x="407" y="155" width="295" height="157" as="geometry" />
39
+ </mxCell>
40
+ <mxCell id="13" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#b0e3e6;opacity=80;strokeColor=#0e8088;rotation=-1;" vertex="1" parent="1">
41
+ <mxGeometry x="407" y="136" width="295" height="157" as="geometry" />
42
+ </mxCell>
43
+ <mxCell id="14" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#dae8fc;opacity=80;strokeColor=#6c8ebf;rotation=-1;" vertex="1" parent="1">
44
+ <mxGeometry x="407" y="117" width="295" height="157" as="geometry" />
45
+ </mxCell>
46
+ <mxCell id="15" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#cce5ff;opacity=80;strokeColor=#36393d;rotation=-1;" vertex="1" parent="1">
47
+ <mxGeometry x="406" y="99" width="295" height="157" as="geometry" />
48
+ </mxCell>
49
+ <mxCell id="16" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#b1ddf0;opacity=80;strokeColor=#10739e;rotation=-1;" vertex="1" parent="1">
50
+ <mxGeometry x="406" y="80" width="295" height="157" as="geometry" />
51
+ </mxCell>
52
+ <mxCell id="17" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#b0e3e6;opacity=80;strokeColor=#0e8088;rotation=-1;" vertex="1" parent="1">
53
+ <mxGeometry x="406" y="62" width="295" height="157" as="geometry" />
54
+ </mxCell>
55
+ <mxCell id="18" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
56
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
57
+ <mxPoint x="435" y="147" as="sourcePoint" />
58
+ <mxPoint x="580" y="74" as="targetPoint" />
59
+ </mxGeometry>
60
+ </mxCell>
61
+ <mxCell id="19" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
62
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
63
+ <mxPoint x="462" y="159" as="sourcePoint" />
64
+ <mxPoint x="607" y="86" as="targetPoint" />
65
+ </mxGeometry>
66
+ </mxCell>
67
+ <mxCell id="20" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
68
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
69
+ <mxPoint x="495" y="175" as="sourcePoint" />
70
+ <mxPoint x="640" y="102" as="targetPoint" />
71
+ </mxGeometry>
72
+ </mxCell>
73
+ <mxCell id="21" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
74
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
75
+ <mxPoint x="525" y="190" as="sourcePoint" />
76
+ <mxPoint x="670" y="117" as="targetPoint" />
77
+ </mxGeometry>
78
+ </mxCell>
79
+ <mxCell id="22" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
80
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
81
+ <mxPoint x="434" y="275" as="sourcePoint" />
82
+ <mxPoint x="434" y="148" as="targetPoint" />
83
+ </mxGeometry>
84
+ </mxCell>
85
+ <mxCell id="23" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
86
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
87
+ <mxPoint x="462" y="287" as="sourcePoint" />
88
+ <mxPoint x="462" y="160" as="targetPoint" />
89
+ </mxGeometry>
90
+ </mxCell>
91
+ <mxCell id="24" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
92
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
93
+ <mxPoint x="493" y="301" as="sourcePoint" />
94
+ <mxPoint x="493" y="174" as="targetPoint" />
95
+ </mxGeometry>
96
+ </mxCell>
97
+ <mxCell id="25" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
98
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
99
+ <mxPoint x="525" y="315" as="sourcePoint" />
100
+ <mxPoint x="525" y="188" as="targetPoint" />
101
+ </mxGeometry>
102
+ </mxCell>
103
+ <mxCell id="26" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;entryX=0.606;entryY=0.809;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1">
104
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
105
+ <mxPoint x="438" y="119" as="sourcePoint" />
106
+ <mxPoint x="584.6119060210324" y="188.4598744858282" as="targetPoint" />
107
+ </mxGeometry>
108
+ </mxCell>
109
+ <mxCell id="27" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;entryX=0.606;entryY=0.809;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1">
110
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
111
+ <mxPoint x="478" y="99" as="sourcePoint" />
112
+ <mxPoint x="624.6119060210324" y="168.4598744858282" as="targetPoint" />
113
+ </mxGeometry>
114
+ </mxCell>
115
+ <mxCell id="28" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;entryX=0.606;entryY=0.809;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1">
116
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
117
+ <mxPoint x="517" y="80" as="sourcePoint" />
118
+ <mxPoint x="663.6119060210324" y="149.4598744858282" as="targetPoint" />
119
+ </mxGeometry>
120
+ </mxCell>
121
+ <mxCell id="29" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;exitX=0.611;exitY=0.924;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" source="11" parent="1">
122
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
123
+ <mxPoint x="585" y="315" as="sourcePoint" />
124
+ <mxPoint x="585" y="188" as="targetPoint" />
125
+ </mxGeometry>
126
+ </mxCell>
127
+ <mxCell id="30" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;exitX=0.611;exitY=0.924;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1">
128
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
129
+ <mxPoint x="627.401784569613" y="298.2463823223818" as="sourcePoint" />
130
+ <mxPoint x="624" y="168.76" as="targetPoint" />
131
+ </mxGeometry>
132
+ </mxCell>
133
+ <mxCell id="31" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;exitX=0.611;exitY=0.924;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1">
134
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
135
+ <mxPoint x="667.401784569613" y="279.2463823223818" as="sourcePoint" />
136
+ <mxPoint x="664" y="149.76" as="targetPoint" />
137
+ </mxGeometry>
138
+ </mxCell>
139
+ <mxCell id="32" value="Instances&lt;br style=&quot;font-size: 18px;&quot;&gt;(Batch)" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;opacity=80;fontStyle=1;fontSize=18;rotation=0;fontFamily=Comic Sans MS;" vertex="1" parent="1">
140
+ <mxGeometry x="379" y="321" width="114" height="39" as="geometry" />
141
+ </mxCell>
142
+ <mxCell id="33" value="Maximum Number of Tokens" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;opacity=80;fontStyle=1;fontSize=18;rotation=0;fontFamily=Comic Sans MS;" vertex="1" parent="1">
143
+ <mxGeometry x="608" y="330" width="119" height="39" as="geometry" />
144
+ </mxCell>
145
+ <mxCell id="34" value="Maximum Number of&lt;br style=&quot;font-size: 18px;&quot;&gt;Samples" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;opacity=80;fontStyle=1;fontSize=18;rotation=0;fontFamily=Comic Sans MS;" vertex="1" parent="1">
146
+ <mxGeometry x="716" y="173.5" width="114" height="39" as="geometry" />
147
+ </mxCell>
148
+ <mxCell id="35" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#9AC7BF;endWidth=23;endSize=9.5;width=13;" edge="1" parent="1">
149
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
150
+ <mxPoint x="260" y="213.5" as="sourcePoint" />
151
+ <mxPoint x="384" y="217" as="targetPoint" />
152
+ <Array as="points">
153
+ <mxPoint x="326" y="225" />
154
+ </Array>
155
+ </mxGeometry>
156
+ </mxCell>
157
+ <mxCell id="36" value="AST&lt;br style=&quot;font-size: 19px;&quot;&gt;Based&lt;br style=&quot;font-size: 19px;&quot;&gt;Tokenizer" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=1;sketch=1;fontSize=19;opacity=80;fontStyle=1;fontFamily=Comic Sans MS;" vertex="1" parent="1">
158
+ <mxGeometry x="288" y="143.5" width="60" height="30" as="geometry" />
159
+ </mxCell>
160
+ <mxCell id="37" value="Sample&lt;br style=&quot;font-size: 19px;&quot;&gt;Embedder" style="rounded=1;whiteSpace=wrap;html=1;shadow=1;glass=0;sketch=1;strokeColor=#d79b00;fontSize=19;fillColor=#ffe6cc;strokeWidth=1;perimeterSpacing=17;arcSize=25;fontStyle=1;fontFamily=Comic Sans MS;" vertex="1" parent="1">
161
+ <mxGeometry x="848" y="37" width="195" height="75" as="geometry" />
162
+ </mxCell>
163
+ <mxCell id="38" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#FFCE9F;endWidth=23;endSize=9.5;width=13;" edge="1" source="17" parent="1">
164
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
165
+ <mxPoint x="700" y="81" as="sourcePoint" />
166
+ <mxPoint x="837" y="62" as="targetPoint" />
167
+ <Array as="points">
168
+ <mxPoint x="750" y="61" />
169
+ </Array>
170
+ </mxGeometry>
171
+ </mxCell>
172
+ <mxCell id="39" value="Problem Description Encoder" style="rounded=1;whiteSpace=wrap;html=1;shadow=1;glass=0;sketch=1;strokeColor=#67AB9F;fontSize=19;fillColor=#9AC7BF;strokeWidth=1;perimeterSpacing=17;arcSize=25;fontStyle=1;fontFamily=Comic Sans MS;" vertex="1" parent="1">
173
+ <mxGeometry x="841.5" y="233.5" width="201.5" height="75" as="geometry" />
174
+ </mxCell>
175
+ <mxCell id="40" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#9AC7BF;endWidth=23;endSize=9.5;width=13;" edge="1" parent="1">
176
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
177
+ <mxPoint x="758" y="386" as="sourcePoint" />
178
+ <mxPoint x="836" y="310.79" as="targetPoint" />
179
+ <Array as="points">
180
+ <mxPoint x="781" y="340.79" />
181
+ </Array>
182
+ </mxGeometry>
183
+ </mxCell>
184
+ <mxCell id="41" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#FFCE9F;endWidth=23;endSize=9.5;width=13;" edge="1" parent="1">
185
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
186
+ <mxPoint x="1056" y="74.5" as="sourcePoint" />
187
+ <mxPoint x="1284" y="72" as="targetPoint" />
188
+ <Array as="points">
189
+ <mxPoint x="1165" y="48" />
190
+ </Array>
191
+ </mxGeometry>
192
+ </mxCell>
193
+ <mxCell id="42" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#67AB9F;endWidth=23;endSize=9.5;width=13;" edge="1" parent="1">
194
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
195
+ <mxPoint x="1054" y="276" as="sourcePoint" />
196
+ <mxPoint x="1215" y="232" as="targetPoint" />
197
+ <Array as="points">
198
+ <mxPoint x="1137" y="273" />
199
+ </Array>
200
+ </mxGeometry>
201
+ </mxCell>
202
+ <mxCell id="43" value="Batch" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;opacity=80;fontStyle=1;fontSize=18;rotation=0;fontFamily=Comic Sans MS;" vertex="1" parent="1">
203
+ <mxGeometry x="1391" y="380.79" width="114" height="39" as="geometry" />
204
+ </mxCell>
205
+ <mxCell id="44" value="Natural Language Description" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;strokeWidth=1;fontFamily=Comic Sans MS;fontSize=31;opacity=90;fontStyle=1" vertex="1" parent="1">
206
+ <mxGeometry x="603.92" y="632" width="468" height="30" as="geometry" />
207
+ </mxCell>
208
+ <mxCell id="45" value="&lt;b style=&quot;font-size: 20px;&quot;&gt;Transformer&lt;br&gt;Encoder and&amp;nbsp;&lt;br&gt;Decoder&lt;/b&gt;" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;glass=0;sketch=1;strokeColor=#b85450;strokeWidth=1;fontFamily=Comic Sans MS;fontSize=18;fillColor=#f8cecc;opacity=90;" vertex="1" parent="1">
209
+ <mxGeometry x="1622" y="233.5" width="198" height="94" as="geometry" />
210
+ </mxCell>
211
+ <mxCell id="46" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#333333;strokeWidth=2;fontFamily=Helvetica;fontSize=20;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#EA6B66;width=15;endSize=6.62;" edge="1" parent="1">
212
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
213
+ <mxPoint x="1601" y="147.25" as="sourcePoint" />
214
+ <mxPoint x="1710" y="216" as="targetPoint" />
215
+ <Array as="points">
216
+ <mxPoint x="1673" y="167" />
217
+ </Array>
218
+ </mxGeometry>
219
+ </mxCell>
220
+ <mxCell id="47" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#f8cecc;opacity=50;strokeColor=#b85450;rotation=-1;" vertex="1" parent="1">
221
+ <mxGeometry x="1191" y="401.79" width="295" height="157" as="geometry" />
222
+ </mxCell>
223
+ <mxCell id="48" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;fillColor=#f8cecc;strokeColor=#b85450;" edge="1" parent="1">
224
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
225
+ <mxPoint x="1220" y="486.79" as="sourcePoint" />
226
+ <mxPoint x="1365" y="413.79" as="targetPoint" />
227
+ </mxGeometry>
228
+ </mxCell>
229
+ <mxCell id="49" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;fillColor=#f8cecc;strokeColor=#b85450;" edge="1" parent="1">
230
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
231
+ <mxPoint x="1247" y="498.79" as="sourcePoint" />
232
+ <mxPoint x="1392" y="425.79" as="targetPoint" />
233
+ </mxGeometry>
234
+ </mxCell>
235
+ <mxCell id="50" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;fillColor=#f8cecc;strokeColor=#b85450;" edge="1" parent="1">
236
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
237
+ <mxPoint x="1280" y="514.79" as="sourcePoint" />
238
+ <mxPoint x="1425" y="441.79" as="targetPoint" />
239
+ </mxGeometry>
240
+ </mxCell>
241
+ <mxCell id="51" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;fillColor=#f8cecc;strokeColor=#b85450;" edge="1" parent="1">
242
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
243
+ <mxPoint x="1310" y="529.79" as="sourcePoint" />
244
+ <mxPoint x="1455" y="456.79" as="targetPoint" />
245
+ </mxGeometry>
246
+ </mxCell>
247
+ <mxCell id="52" value="" style="shape=flexArrow;endArrow=classic;html=1;rounded=1;shadow=1;sketch=1;strokeColor=#4D4D4D;strokeWidth=2;fontSize=16;sourcePerimeterSpacing=5;targetPerimeterSpacing=5;fillColor=#F19C99;endWidth=23;endSize=9.5;width=13;" edge="1" parent="1">
248
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
249
+ <mxPoint x="1628" y="346" as="sourcePoint" />
250
+ <mxPoint x="1511" y="430.79" as="targetPoint" />
251
+ <Array as="points">
252
+ <mxPoint x="1600" y="402" />
253
+ </Array>
254
+ </mxGeometry>
255
+ </mxCell>
256
+ <mxCell id="53" value="Tokenized Program" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;strokeWidth=1;fontFamily=Comic Sans MS;fontSize=30;opacity=90;fontStyle=1" vertex="1" parent="1">
257
+ <mxGeometry x="1194" y="579.79" width="377.5" height="30" as="geometry" />
258
+ </mxCell>
259
+ <mxCell id="54" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
260
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
261
+ <mxPoint x="1220" y="487.79" as="sourcePoint" />
262
+ <mxPoint x="1220" y="504.79" as="targetPoint" />
263
+ </mxGeometry>
264
+ </mxCell>
265
+ <mxCell id="55" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
266
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
267
+ <mxPoint x="1279" y="513.79" as="sourcePoint" />
268
+ <mxPoint x="1279" y="530.79" as="targetPoint" />
269
+ </mxGeometry>
270
+ </mxCell>
271
+ <mxCell id="56" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
272
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
273
+ <mxPoint x="1309" y="527.79" as="sourcePoint" />
274
+ <mxPoint x="1309" y="544.79" as="targetPoint" />
275
+ </mxGeometry>
276
+ </mxCell>
277
+ <mxCell id="57" value="" style="endArrow=baseDash;startArrow=baseDash;html=1;endFill=0;startFill=0;strokeWidth=2;" edge="1" parent="1">
278
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
279
+ <mxPoint x="544" y="348" as="sourcePoint" />
280
+ <mxPoint x="407" y="285" as="targetPoint" />
281
+ </mxGeometry>
282
+ </mxCell>
283
+ <mxCell id="58" value="" style="endArrow=baseDash;startArrow=baseDash;html=1;endFill=0;startFill=0;strokeWidth=2;" edge="1" parent="1">
284
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
285
+ <mxPoint x="723" y="263.5" as="sourcePoint" />
286
+ <mxPoint x="723" y="134.5" as="targetPoint" />
287
+ </mxGeometry>
288
+ </mxCell>
289
+ <mxCell id="59" value="" style="endArrow=baseDash;startArrow=baseDash;html=1;endFill=0;startFill=0;strokeWidth=2;" edge="1" parent="1">
290
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
291
+ <mxPoint x="1491" y="455.79" as="sourcePoint" />
292
+ <mxPoint x="1351" y="389.79" as="targetPoint" />
293
+ </mxGeometry>
294
+ </mxCell>
295
+ <mxCell id="60" value="" style="shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;rounded=1;shadow=1;glass=0;sketch=0;strokeColor=#9673a6;strokeWidth=1;fontSize=18;fillColor=#E6FFCC;opacity=40;textOpacity=0;" vertex="1" parent="1">
296
+ <mxGeometry x="567.92" y="433" width="463.16720257234726" height="149.5867768595041" as="geometry" />
297
+ </mxCell>
298
+ <mxCell id="61" value="Given an integer n and a list of numbers, find how many elements are greater than n." style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=0;sketch=0;strokeWidth=1;fontSize=18;opacity=70;fontFamily=Courier New;fontStyle=0" vertex="1" parent="1">
299
+ <mxGeometry x="583.8363987138263" y="485.3553719008264" width="415.41800643086816" height="44.87603305785124" as="geometry" />
300
+ </mxCell>
301
+ <mxCell id="62" value="" style="shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;rounded=1;shadow=1;glass=0;sketch=0;strokeColor=#9673a6;strokeWidth=1;fontSize=18;fillColor=#E6FFCA;opacity=40;textOpacity=0;" vertex="1" parent="1">
302
+ <mxGeometry x="583.8363987138263" y="447.9586776859504" width="463.16720257234726" height="149.5867768595041" as="geometry" />
303
+ </mxCell>
304
+ <mxCell id="63" value="Given an integer n and a list of numbers, find how many elements are greater than n." style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=0;sketch=0;strokeWidth=1;fontSize=18;fontFamily=Courier New;fontStyle=0;opacity=20;" vertex="1" parent="1">
305
+ <mxGeometry x="599.7527974276527" y="500.31404958677683" width="415.41800643086816" height="44.87603305785124" as="geometry" />
306
+ </mxCell>
307
+ <mxCell id="64" value="" style="shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;rounded=1;shadow=1;glass=0;sketch=0;strokeColor=#9673a6;strokeWidth=1;fontSize=18;fillColor=#E6FFCC;opacity=90;" vertex="1" parent="1">
308
+ <mxGeometry x="599.7527974276527" y="464.41322314049586" width="463.16720257234726" height="149.5867768595041" as="geometry" />
309
+ </mxCell>
310
+ <mxCell id="65" value="Given an integer n and a list of numbers, find how many elements are greater than n." style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=0;sketch=0;strokeWidth=1;fontSize=25;opacity=70;fontFamily=Courier New;fontStyle=1" vertex="1" parent="1">
311
+ <mxGeometry x="615.6691961414791" y="516.7685950413223" width="415.41800643086816" height="44.87603305785124" as="geometry" />
312
+ </mxCell>
313
+ <mxCell id="66" value="" style="endArrow=baseDash;startArrow=baseDash;html=1;endFill=0;startFill=0;strokeWidth=2;" edge="1" parent="1">
314
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
315
+ <mxPoint x="710" y="276" as="sourcePoint" />
316
+ <mxPoint x="561" y="351" as="targetPoint" />
317
+ </mxGeometry>
318
+ </mxCell>
319
+ <mxCell id="67" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
320
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
321
+ <mxPoint x="1247" y="499.79" as="sourcePoint" />
322
+ <mxPoint x="1247" y="516.7900000000002" as="targetPoint" />
323
+ </mxGeometry>
324
+ </mxCell>
325
+ <mxCell id="68" value="" style="whiteSpace=wrap;html=1;shape=mxgraph.basic.isocube;isoAngle=15;shadow=1;glass=1;sketch=0;fontSize=16;fillColor=#b0e3e6;opacity=80;strokeColor=#0e8088;rotation=-1;" vertex="1" parent="1">
326
+ <mxGeometry x="1170" y="99" width="295" height="157" as="geometry" />
327
+ </mxCell>
328
+ <mxCell id="69" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
329
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
330
+ <mxPoint x="1291" y="226.49999999999994" as="sourcePoint" />
331
+ <mxPoint x="1291" y="243.49999999999994" as="targetPoint" />
332
+ </mxGeometry>
333
+ </mxCell>
334
+ <mxCell id="70" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
335
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
336
+ <mxPoint x="1261" y="212.5" as="sourcePoint" />
337
+ <mxPoint x="1261" y="229.5" as="targetPoint" />
338
+ </mxGeometry>
339
+ </mxCell>
340
+ <mxCell id="71" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
341
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
342
+ <mxPoint x="1230" y="199" as="sourcePoint" />
343
+ <mxPoint x="1230" y="216" as="targetPoint" />
344
+ </mxGeometry>
345
+ </mxCell>
346
+ <mxCell id="72" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=10;" edge="1" parent="1">
347
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
348
+ <mxPoint x="1199" y="184.5" as="sourcePoint" />
349
+ <mxPoint x="1199" y="201.5" as="targetPoint" />
350
+ </mxGeometry>
351
+ </mxCell>
352
+ <mxCell id="73" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
353
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
354
+ <mxPoint x="1200" y="185" as="sourcePoint" />
355
+ <mxPoint x="1345" y="112" as="targetPoint" />
356
+ </mxGeometry>
357
+ </mxCell>
358
+ <mxCell id="74" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
359
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
360
+ <mxPoint x="1227" y="197" as="sourcePoint" />
361
+ <mxPoint x="1372" y="124" as="targetPoint" />
362
+ </mxGeometry>
363
+ </mxCell>
364
+ <mxCell id="75" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
365
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
366
+ <mxPoint x="1260" y="213" as="sourcePoint" />
367
+ <mxPoint x="1405" y="140" as="targetPoint" />
368
+ </mxGeometry>
369
+ </mxCell>
370
+ <mxCell id="76" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
371
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
372
+ <mxPoint x="1290" y="228" as="sourcePoint" />
373
+ <mxPoint x="1435" y="155" as="targetPoint" />
374
+ </mxGeometry>
375
+ </mxCell>
376
+ <mxCell id="77" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
377
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
378
+ <mxPoint x="1349" y="108" as="sourcePoint" />
379
+ <mxPoint x="1494" y="35" as="targetPoint" />
380
+ </mxGeometry>
381
+ </mxCell>
382
+ <mxCell id="78" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
383
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
384
+ <mxPoint x="1376" y="120" as="sourcePoint" />
385
+ <mxPoint x="1521" y="47" as="targetPoint" />
386
+ </mxGeometry>
387
+ </mxCell>
388
+ <mxCell id="79" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
389
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
390
+ <mxPoint x="1409" y="136" as="sourcePoint" />
391
+ <mxPoint x="1554" y="63" as="targetPoint" />
392
+ </mxGeometry>
393
+ </mxCell>
394
+ <mxCell id="80" value="" style="endArrow=none;html=1;rounded=0;fontSize=16;shadow=1;sketch=0;opacity=30;" edge="1" parent="1">
395
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
396
+ <mxPoint x="1439" y="151" as="sourcePoint" />
397
+ <mxPoint x="1584" y="78" as="targetPoint" />
398
+ </mxGeometry>
399
+ </mxCell>
400
+ <mxCell id="81" value="224 x" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=1;glass=1;sketch=0;opacity=80;fontStyle=1;fontSize=18;rotation=0;fontFamily=Comic Sans MS;" vertex="1" parent="1">
401
+ <mxGeometry x="1492" y="269.5" width="114" height="39" as="geometry" />
402
+ </mxCell>
403
+ </root>
404
+ </mxGraphModel>
405
+ </diagram>
406
+ </mxfile>
2305.00909/paper_text/intro_method.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:teaser" data-latex-placement="ht">
4
+ <embed src="figure/ChainCoder-teaser.pdf" />
5
+ <figcaption>Our prediction target formulation illustrated with an example. In the <em>Merge Intervals</em> problem (one medium difficulty problem on leetcode), the solution requires first tackling extreme cases, initializing variables, then entering the main algorithm loop. A programmer needs to first come up with this outline, then think deeper on how to implement the algorithm loop. They may then think about using a stack to keep track of the biggest end time and to decide whether to close or extend the current interval. Finally, after being clear with these ideas, they may write the formal answer with carefulness on both syntax and variable names. This reasoning procedure happens from coarse to fine. We therefore construct the prediction target into four subsequences. Note that the boxed substrings only roughly indicate the tokens, while our actual tokens take the forms of the grouped nodes in the syntax tree. </figcaption>
6
+ </figure>
7
+
8
+ The goal of automatic program synthesis has been established and extensively researched for decades [@Waldinger1969PROWAS; @backus1957fortran]. Recently, with the rapid advancements of Large Language Models (LLMs), a surge of methods leverage transformers to model programs as sequences, and have demonstrated prospects to automatically analyze, annotate, translate, or synthesize code [@li2022competition; @austin2021program; @fried2022incoder; @chen2021evaluating; @Kanade2020LearningAE; @feng2020codebert; @Clement2020PyMT5MT; @Svyatkovskiy2020IntelliCodeCC; @Wang2021CodeT5IU]. These LLMs are trained on a large corpus of code so as to imbibe syntactic and semantic understanding into model weights, and develop logical reasoning for program synthesis.
9
+
10
+ Though current LLMs have gained success in code understanding and generation, their inherent design, as well as the training/finetuning techniques, are largely borrowed from the *natural language* modeling. This limits their potential in code generation modeling due to two reasons.
11
+
12
+ First, for a given problem specification, LLMs generate end-to-end code solutions autoregressively in a single pass regardless of the logical complexity involved. Even from a programmer's perspective, it is difficult to write good code in a single shot without laying out logical and hierarchical thinking. In other words, composing code involves molding an overall "warm-up" proceeded by lower-level atomic algorithms and variable definitions[^1].
13
+
14
+ Second, LLMs tokenize the code using tokenizers meant for everyday-natural-language strings and disregard syntactical awareness as structural priors [@chen2022codet; @hendrycks2021measuring; @li2022competition; @chen2021evaluating; @austin2021program; @athiwaratkun2022multi; @nijkamp2022codegen; @xu2022ide; @wang2022code4struct; @chensparse; @li2023cancergpt; @mao2022single]. Notably, programming languages contain complex rules and conform to more rigorous layouts compared to their natural language counterparts [@casalnuovo2019studying; @naur1975programming]. They have fewer choices of synonyms, stringent syntax requirements, and diverse control flows (sequential statement execution, selection/branching, and repetition such as for and while loops).
15
+
16
+ In our ChainCoder, we investigate if addressing the aforementioned flaws is indeed promising. As shown in [1](#fig:teaser){reference-type="ref+Label" reference="fig:teaser"}, we aim to remodel the code generation that previous works treat as a no-warm-up, single-shot textual sequence completion task into a multi-step intermediate reasoning strategy through syntactical decomposition.
17
+
18
+ In a similar context of harnessing multi-step reasoning capability of LLMs (on natural language), recent works discover that chain-of-thought (CoT) prompting [@zhang2022automatic; @wei2022chain; @shi2022language] significantly boosts performance. CoT prompting advances LLMs to naturally develop a series of intermediate reasoning steps before providing the final answer through exemplar demonstrations. This exposes a profound potential of refactoring LLMs to explicitly perform **coherent intermediate step-wise reasoning**, rather than directly concluding final solutions.
19
+
20
+ Inspired by the success of chain-of-thought prompting for LLMs, we design a novel approach that follows a *step-wise*, **outline-then-detail** thought process. We decompose the source code into a predefined hierarchy of code logic, and prepare the LLM generation target in each level ($\bm{S}_{1-4}$ in [1](#fig:teaser){reference-type="ref+Label" reference="fig:teaser"}) so that the model predicts them in multiple passes separately. We refer to our model as ChainCoder, and such hierarchical prediction as "*coarse-to-fine*" generation, similar to @dong2018coarse. Specifically, ChainCoder first builds a logical skeleton of the code, then move to step-wise implementation of lower-level atomic details/algorithms. This is inherently different from vanilla CoT prompting, as CoT methods require external prompting and are mainly developed in the context of natural language reasoning, while ChainCoder is the first program synthesis model that progressively generates code by itself.
21
+
22
+ To build such a multi-level hierarchical representation, appropriate disentanglement of the code is required. To this end, we tailor the tokenization step to better display complex programming language rules. We parse the code into the Abstract Syntax Tree (AST), which naturally exposes extensive domain knowledge and structures expressed in the tree nodes and edges. We then disentangle the syntax tree into two components: the *layout frame component*, which tells more clause type and other syntactical information, and the *accessory component*, which includes concrete variable and function names, etc. The syntax tree-based tokenizer brings advantages in two-fold: its domain knowledge eases the comprehension of source code, and would possibly boost the syntactical correctness. Unlike previous research on applying syntax aware tokenizer [@jimenez2018impact] or simplification tricks during the tokenization step [@gupta2017deepfix; @chirkova2020simple], ChainCoder is the first approach to leverage syntax knowledge to disentangle the code and build hierarchical prediction targets.
23
+
24
+ In brief, our work aims to reformulate program synthesis from a sequential text completion task into a multi-pass coarse-to-fine refinement strategy. As depicted in [1](#fig:teaser){reference-type="ref+Label" reference="fig:teaser"}, this comprises of four progressive steps -- outlining, core algorithm hinting, layout framing, and accessory sub-sequencing inspired from the "warm-up" thought process of programmers. We then leverage a tailored transformer architecture that better encodes the syntactically aligned I/O data to ease comprehension of problem specifications. To the best of our knowledge, ChainCoder is the first large language model to perform multi-pass *coarse-to-fine* code generation to reflect intermediate warm-up procedure, in contrary to existing single-shot sequential completion works. In summary, our technical contributions are as follows:
25
+
26
+ - Inspired by the chain-of-thought process, we leverage the unique syntax hierarchy to build the multi-pass coarse-to-fine code generation framework. ChainCoder is the first program synthesis approach that harnesses progressive generation to elicit the step-wise reasoning capability of LLMs and improve accuracy and syntactic coherency.
27
+
28
+ - To enable progressive generation, we propose a syntax-aware tokenizer that neatly disentangles the code and outputs multi-level prediction targets accordingly. We further develop a transformer to better leverage the structure of the syntactically aligned data.
29
+
30
+ - Evaluations on the competition-level datasets show that ChainCoder performs better than other state-of-the-art models even with smaller model sizes. Ablation studies verified the effectiveness of the coarse-to-fine guidance and other design choices.
31
+
32
+ # Method
33
+
34
+ Codes are written by humans, but are executed by computers. Therefore, humans write them in a form that they can understand, then the software transforms them in a way that can be used by the computer. Such transformation is referred to as parsing, which determines a structured template from the source code text. After parsing, the programming language is converted to a better-formatted data structure, usually a tree-type format called the abstract syntax tree.
35
+
36
+ Parsing the code into the tree-type format is common to almost all programming languages. Specifically in Python, the leaf nodes of the abstract syntax tree are the function/class/variable/operator names and constant values. The branching nodes of the tree represent syntactic relationships between the leaf nodes. For example, consider the line of code `x = 0`. Its syntax tree is roughly a tree with three nodes, with the root node being the *assignment*, and two leaf nodes being `x` and `0`. The root node conveys syntax information ("$\mathrm{=}$"), which tells that this line is an "assignment" type sequential statement, there will be two placeholders as its children nodes, and one child node will assign its value to the other child node. The precise syntax tree is more complex though, which we show in [7](#fig:SMALLAST){reference-type="ref+Label" reference="fig:SMALLAST"} and [8](#fig:LARGEAST){reference-type="ref+Label" reference="fig:LARGEAST"} in the Appendix. The abstract syntax tree has better-formatted structures than the raw text, but is overlooked by previous LLM research on program synthesis. Therefore, we seek to ease the learning of the LLM by tokenizing based on the parsed tree rather than raw code text.
37
+
38
+ The code contains complex structures both syntactically and semantically. Writing algorithmic code is generally difficult, partially due to the fact that both syntactic and semantic information need to be handled simultaneously. To ease this difficulty, we propose to disentangle the code into a component that conveys more syntax information and another component that conveys more semantic information and generate them separately. We refer to these two components as the *layout frame subsequence* and the *accessory subsequence*. The disentanglement procedure is based on the syntax tree representation, as it enables more flexible treatments of the code in the domain of tree nodes and edges.
39
+
40
+ The layout frame subsequence is responsible for indicating the clause type, assigning placeholders in the sequel, and maintaining syntactic correctness. On the other hand, the accessory subsequence contains the concrete variable/class/function/operator names, and constant values. In the above example of `x = 0`, the syntax subtree of this line of code could be unambiguously decoupled into two layout frame tokens and two accessory tokens. First, the two accessory tokens are easily identified as the leaf nodes, `x` and `0`. Then, the first layout frame token could be the root node *assignment (=)* with the corresponding edge between it and the leaf node `x`, while the second layout frame token could be the edge between the root node and the other leaf node `0`. When these tokens are interleaved together, they become the serialized representation of the syntax tree.
41
+
42
+ The training procedure of ChainCoder heavily relies on an *encoding* step, which converts the source code into decoupled layout frame and accessory subsequences. In the encoding step, the code is first parsed into an AST, which is then partitioned into nodes and edges. These nodes and edges are then re-grouped to generate these two subsequences. The resulting subsequences are used for training and inference. On the other hand, we use the term *decoding* to refer to the reverse procedure. Given the decoupled subsequences, the decoding procedure recovers the original source code. It is noteworthy that the encoded two subsequences will be less human-interpretable than the original code, yet they convey precise information to the Python interpreter.
43
+
44
+ As revealed in [1](#fig:teaser){reference-type="ref+Label" reference="fig:teaser"}, the layout frame and the accessory subsequences constitute the fine level part in the coarse-to-fine hierarchy, and are referred to as $\bm{S}_3$ and $\bm{S}_4$. They contain complete information to reconstruct the original code by themselves. There are other parts of coarse level subsequences ($\bm{S}_1$ and $\bm{S}_2$), which we discuss in [3.3](#sec:prediction-target){reference-type="ref+Label" reference="sec:prediction-target"}. Next we discuss the encoding and decoding steps in detail.
45
+
46
+ **Encoding Step.** The encoding step is used to preprocess the training samples to construct the prediction targets. Prior to training, each code sample in the training set is converted to the coarse-to-fine hierarchical subsequences, and the ChainCoder is trained to generate these new subsequences.
47
+
48
+ The encoding step first parses the source code into the abstract syntax tree, then disentangles the serialized syntax tree into the layout frame subsequence ($\bm{S}_3$) and the accessory subsequence ($\bm{S}_4$). It constructs the other two coarse-level subsequences at the same time. The encoding algorithm of ChainCoder is displayed in [\[alg:wordy\]](#alg:wordy){reference-type="ref+Label" reference="alg:wordy"}.
49
+
50
+ In general, the encoding step is completed during the depth-first pre-order traversal for the parsed syntax tree. Starting from the root node, it first visits the current node, then visits all the children nodes from left to right. Every time it meets a leaf node, the traversal pauses, and it groups all branching nodes seen so far into a token, and appends the grouped branching nodes to the layout frame subsequence. It then appends the leaf node into the accessory subsequence. By grouping the branching nodes, the algorithm efficiently compresses token sequence length. The following relationship always satisfies for the tokenized subsequences: `layout frame subsequence length = accessory subsequence length + 1`, where the "`+1`" comes from the ending brackets in the layout frame subsequence after the last accessory token is appended.
51
+
52
+ ::::: algorithm
53
+ ::: algorithmic
54
+ :::
55
+
56
+ ::: algorithmic
57
+ Parse the source code into syntax tree $\phi$ Initialize $\bm{S}_1, \bm{S}_2, \bm{S}_3, \bm{S}_4$ with empty sequences Branching nodes group $t \gets \mathrm{empty\ string}$ Get next node $\bm{\xi}$ using pre-order traversal for $\phi$ Group with previous branching nodes: $t \gets t+\bm{\xi}$
58
+
59
+ Append grouped branching nodes: $\bm{S}_3 \gets \bm{S}_3 + t$ Append leaf node: $\bm{S}_4 \gets \bm{S}_4 + \bm{\xi}$ Append packed branching nodes: $\bm{S}_2 \gets \bm{S}_2 + t$ Add branching nodes to outline: $\bm{S}_1 \gets \bm{S}_1 + t$ $t \gets \mathrm{empty\ string}$
60
+
61
+ Remove the doc-strings and comments, if any Replace $v_i$ in $\bm{S}_4$ with a name pool candidate
62
+ :::
63
+ :::::
64
+
65
+ The encoding algorithm offers two optional ways to further simplify the generation task: the *name replacement* and the *doc-strings/comments removal*. The name replacement feature detects all user-defined names in the parsed tree, and replaces them from a name pool. In this way, the code logic is maintained, while the extra learning burden caused by naming style differences can be avoided. The doc-strings/comments removal also naturally takes advantage of tree-based representation, they reduce the sequence length without hurting the functionality.
66
+
67
+ By parsing and encoding, the program space is reduced and the learning task is simplified. On one hand, thanks to the degenerated variable names, the vocabulary could be shrinked while still leading to clearer meanings. For example, if there are two variables called `student_num` and `student_nums` in one program, ChainCoder might tokenize them into two distinct words `user_defined_var_7` and `user_defined_var_8` drawn from the name pool, rather than using the original names that might cause confusion. The name pool size is also much smaller than the natural language vocabulary which includes all possible sub-words of variable names. On the other hand, the token sequence could be shortened by removing the doc-strings and comments without hurting the functionality. Some sentences that would be tokenized into long sequences by natural language tokenizer can now be represented very concisely in the syntax tree domain, such as `for i in range(num)`, `res = a if x is None else b`. What's more, the names of variables/functions/classes/etc are never broken into multiple sub-words in the syntax tree representation.
68
+
69
+ **Decoding Step.** The decoding step is used during the inference time. As ChainCoder generates the hierarchical subsequences rather than the source code, this step is applied to convert the resulting subsequences back to the human-readable source code.
70
+
71
+ Given the design of the encoded subsequences, the decoding algorithm is straightforward. We first interleave the tokens in $\bm{S}_3$ and $\bm{S}_4$ and glue them together, so that the serialized syntax tree is obtained. Then, an abstract syntax tree unparse tool is used to convert the syntax tree into the source code.
72
+
73
+ <figure id="fig:model arch all" data-latex-placement="t">
74
+ <embed src="figure/Figure1-new.pdf" style="width:80.0%" />
75
+ <figcaption>Overall Model Architecture of ChainCoder, which consists of the sample embedder, the problem description embedder, the traditional transformer encoder, and decoder.</figcaption>
76
+ </figure>
77
+
78
+ As visualized in [1](#fig:teaser){reference-type="ref+Label" reference="fig:teaser"}, the prediction target sequence is composed of four components: the outline ($\bm{S}_1$), the core algorithm structure hint ($\bm{S}_2$), the layout frame ($\bm{S}_3$) and the accessory ($\bm{S}_4$) subsequences. These four subsequences are concatenated by the system-level `<PAD>` token.
79
+
80
+ In the coarse outline $\bm{S}_1$, each token essentially corresponds to a line of code with the smallest indent. For each of these lines, one layout frame token is selected, which is the first token, the one that contains the root node of the subtree of this line. The $\bm{S}_1$ is a discontinuous subset of the entire layout frame subsequence $\bm{S}_3$.
81
+
82
+ The core algorithm structure hint $\bm{S}_2$ is designed to warm-up the model before generating the final solution ($\bm{S}_3$ and $\bm{S}_4$). Since most algorithms contain loops, and the core functional parts are usually implemented in the loops or user-defined functions, we identify the implementations within the loop or user-defined functions as the core algorithms and use their layout frame tokens to construct $\bm{S}_2$. This part is a continuous subset of $\bm{S}_4$.
83
+
84
+ The construction of the last two subsequences $\bm{S}_3$ and $\bm{S}_4$ has been discussed in [3.2](#sec:encoding-alg){reference-type="ref+Label" reference="sec:encoding-alg"}. With these subsequences, ChainCoder breaks the code generation task from a single pass generation into four turns. In each turn, ChainCoder focuses on a simpler subtask, i.e., only generates a specific subsequence.
85
+
86
+ The functionalities of $\bm{S}_1$ and $\bm{S}_2$ are analogous to the chain-of-thought prompting [@zhang2022automatic; @wei2022chain; @shi2022language], except that they are generated by the model itself rather than external prompting. They only exist to hint the model generation procedure. When the entire solution has been generated, they will be removed before checking the correctness of this answer. Such design follows the simple philosophy: an outline of the algorithm shall be built before writing down the concrete answer, and a layout frame shall be constructed before the detailed variable names / constants / etc are filled in.
87
+
88
+ We adopt the same autoregressive generation recipe as other LLMs, which asks ChainCoder to predict the next token given all previously generated tokens. The inference procedure is also the same: ChainCoder completes the target sequence token by token until a `<EOS>` token is generated. As $\bm{S}_1$ and $\bm{S}_2$ need not to be complete, we apply token dropout to let the model occasionally generate $\bm{S}_3$ and $\bm{S}_4$ with information in $\bm{S}_1$ and $\bm{S}_2$ missed out. We achieve this by randomly dropping tokens in $\bm{S}_1$ and $\bm{S}_2$ with probabilities 0.05 and 0.2, respectively.
89
+
90
+ The inputs of program synthesis contain the problem description in natural language and a group of I/O data. Thanks to the syntax tree parsing, the I/O data could be well-aligned across samples based on the values' syntax roles. After alignment and padding, the tokens of I/O data are arranged into a regular matrix with both sample and token dimensions. The natural language descriptions can be well handled by LLMs such as BERT. However, the one-dimensional sequence modeling of LLMs fails to fully leverage the structures of the aligned I/O data. We accommodate this gap with a new transformer architecture, which has a sample embedder submodule that attends to the regular matrix-shaped I/O data. The overall model architecture consists of four components: a sample embedder, a description embedder, a token encoder, and a program decoder. The architecture is shown in [2](#fig:model arch all){reference-type="ref+Label" reference="fig:model arch all"}.
91
+
92
+ **Sample Embedder.** With the syntax-based encoding, alignment, and padding, the I/O data in each instance is now represented as a matrix $W\in \mathbb{R}^{N_\mathrm{sample}\times N_\mathrm{token}\times E}$, where $N_\mathrm{token}$ is the maximum number of tokens across different I/O data samples, $N_\mathrm{sample}$ is the number of samples for this instance, and $E$ represents the token embedding dimension. The sample embedder processes the matrix-shaped aligned I/O data. To track the locations of tokens in the matrix, the tokens are added with two sets of positional embeddings along the token and sample dimensions. At the output side of the sample embedder, it uses first element pooling to reduce the embedding size of each instance to $N_\mathrm{token}\times E$.
93
+
94
+ **Description Embedder.** Each instance in the I/O data corresponds to a natural language problem description. We utilize the BERT model [@devlin2018bert] to perform natural language understanding, and distill its outputs into four tokens: the first token, the last token, the minimum pooling token, and the maximum pooling token. The output dimensions for one instance is $4 \times E$.
95
+
96
+ **Token Encoder, Program Decoder And Training Loss.** From the sample embedder and the description embedder, we get the embeddings for the I/O data and the problem descriptions separately. We concatenate these embeddings and get a sequence with dimensions $(N_\mathrm{token}+ 4) \times E$. We then follow the traditional transformer encoder and decoder layers and training recipes. At the output of the decoder, we ask the model to predict the coarse-to-fine subsequences described in [3.3](#sec:prediction-target){reference-type="ref+Label" reference="sec:prediction-target"} using cross-entropy loss.
2305.13286/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-04-12T10:40:40.639Z" agent="Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0" version="21.1.7" etag="IGit-X5HGCYWgS-AHAeG" type="google">
2
+ <diagram name="Page-1" id="V8kD_jYecfOUZJv0z6bA">
3
+ <mxGraphModel dx="1434" dy="792" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="2" value="" style="shape=image;verticalLabelPosition=bottom;labelBackgroundColor=default;verticalAlign=top;aspect=fixed;imageAspect=0;image=data:image/png,iVBORw0KGgoAAAANSUhEUgAAARYAAAB9CAYAAABnCUxiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAAXLUlEQVR4nO3dd3hUVfrA8e+bUEInhACChaYUG/YCKiKIKwooK7jC2hDFwlpA0R+4lnV1LSwK6gIqKpZVERAbqCDSFBAQAReRElroCcVAIJnJ+/vj3oRJJOROcjPJMO/nec7DzJl73zk3J7w599wmqooxxvgprqwbYIw5+lhiMcb4zhKLMcZ3lliMMb6zxGKM8Z0lFmOM7yyxmHJBRFREmpd1O4w/LLFEGRFZJyKZIpIhIttE5C0RqR7y+c3uf9JeIXWPiMiUAnFWFVJ3vfu6m4gsEZG9IrJTRL4VkSbuZ8NE5OsC674oIp+Xxjab6GOJJTpdrarVgTOBs4GhIZ/dBKQDN4bUzQIuFJF4ABE5BqgInFGgrjkwyx05jAMGArWAJsArQNCN9yjQVERucde9wP3e/v5vqolGlliimKqmAlOAUwBE5ATgEuB2oLOINHAX/REnkbRx318EzABWFqhbo6qb3boUVZ2ujt9VdYKqbnC/dz/QD3jB/c6xwMOqusmP7RKRdiKyUUTai0iciAwVkfUisl1ExolILT++x5QeSyxRTESOA64EfnKrbgQWquoEYAXQG0BVs4D5wMXuchcDs4E5Bepmua8XAy1FZLiIXBq6q5VLVWcAHwOLgK3AmAJt2y0i7YqxTVcA/wV6qOp3wM1uuRRoClQHXg43roksSyzR6RMR2Y2TGGYCT7v1NwLvu6/fJ//u0EwOJZGLcBLL7AJ1MwFUdS3QHmgEfATsLDiX45oNJAHva4GLzlS1tqrOCXO7rgNGA39S1QVuXW/g36q6VlUzgEeA60WkQpixTSSpqpUoKsA6oONh6tsCAaCB+/4EIAdo477vAOwA6gCb3bqawDa3Lgg0KeQ7zwHWAs+E1CXhjFSGA1uA2mFswy9AhlsucuvUbcvzBZZdAXQJeZ/gLtuorPvCSuHFsv7R4yZAgCUiUrB+CfADzkRsP2AugKruFZHNbt1mVU05XGBV/VFEJuLO5bheBKaq6v0i0hB4AbjNS0NV9eRCProOeENENqnqS27dZpwkmet4nAS6zct3mbJhu0JHARFJAHriTNq2CSkDgBtEpIKqZgILgQdwdmFyzXHrZoXEayci/USknvu+JdAVmOe+vxLo5K6H+z3dReTSEm7KZuAy4F4RudOt+y9wv4g0cXfFngY+VNVACb/LlCJLLEeH7kAmME5Vt+YWnKM1FYAr3OVmAvVwkkmu2W7drJC63TiJZJmIZABTgUnAcyJSAxgF/E1V0wFUdTvOoekxIlIFwD3P5qJwN0SdI0+XAQ+LyG3uNrzjti8FOICTyEw5Ju5+qzHG+MZGLMYY31liMcb4zhKLMcZ3lliMMb6zxGKM8V25PUGu42PdfT9clbJkta/xEhsl+RoPILlRPV/jTRkyXgrW7TyQnvezrZtQ5w+fR9oDVz7he18vWjXf13jt23TyNR5A3TrJvsYbMLrPH/rywK7sfD/bhMSKEenvcptYTOnJCmaXdRNMhASzc8rkey2xxKCsYFZZN8FEiCUWEzE2YokdllhMxGTlWGKJFTmWWEykBIJ2/V6sCAbK5pIdSywxyEYsscN2hUzE2BxL7MgJWGIxEZJtiSVmBLODRS9UCiyxxCAbscSOgO0KmUixydvYYbtCJmJsVyh2BC2xmEixXaHYUVZzLHZ1cwzKDmbnFa9EJF5Efsp9PrN7c+v5IrJaRD4UkUql1mBTbMHsYL7ihR99bYklBmUHAnklDPfiPOMn17PAcFVtDuwC+vrYROOTQCAnX/GoxH1tiSUGBXICecULETkW6AK87r4XnAegfewu8jbOkwJMOZMTCOYrRfGrry2xxKDQXSERuV1EFoaU2w+zyovAQzhPVgTnKYi7Q57tswnncaymnCk4YvHQ3y/iQ1/b5G0MCt0FUtUxFHigeygRuQrYrqqLRKR9qTfO+KrgvMqR+tvPvrbEEoPCPNzcFujqPv0wAed5zy8Btd0nLAaAY4FU3xtqSizoYfcnhG99bbtCMSicyVtVfURVj1XVxsD1wLeq2huYAfzZXewmYHJptdcUXyA7kK8ciZ99bYklBgWC2XmlBAYDD4jIapz98Dd8aZzxVTAQzFeKKey+tl2hGBQIFu8XTFW/A75zX68FzvWtUaZUBIPFO/O2pH1tiSUGBQJ25m2sCIR3rpJvLLHEoGy7CDFm2G0TTMSU1V8xE3k2YjERk227QjEjUPwJ2xIp8qiQiEz3UmeiRzAYzCvm6Bba15Hs70JHLCKSAFQF6opIIpD7aMaa2OnbUc1u9BQ7yuOu0B3AfUBDYBGHEste4OXSbdbhJdesy+Br7yWxWm0U5YtFXzNp3ucMvW4QxyY5ua56QjUyDuyj/6j7i4x3TN0GvHDf0yTVTkJV+fCrj3nr83epVb0mIx4cxrH1GrJp+2YGPDeQvfv2empj/cRknrhpCHVqJKKqTJr7GR/MmJD3ee/LenJ/j7u57MGu7Nm3p8h4dWskMajrPXnbPOWnaUz+8Uua1m/MgD/1o2KFSgRzgrwy9XV+2+zt2dTRMMdSu25NbhjYneqJ1UGVH6YuZvbk+XTufQnndz6TjD37Afjy7emsWOhtuwc/cx8XXHouu9J2c0uXuwBo3qopDzx5D5UqVyQYyGH446/w69LfPMWrmVSda++5gmq1q4LComnLmPflT1x3/5UkNUwEIKFqZQ7sP8ioB9/zFDO+Qhw9Bl1OfIV4JF5Ys3gD8z9bSs2kanTudxEJ1SqzY0MaX4/9nhwPh5LL3eStqr4EvCQiA1R1ZATbVKhgTpBRX73J6i1rqVIpgf/cMYxFa5bw1PgX8pa5o/Mt7Duwz1O8QDDA02Of55e1K6hWpSqTh33EnJ+/p0eH7ny/dB6jJ7zBHT360r9HX54bN9xjzCDDJ7zCyo2rqFq5Cu88/BrzVywkZet66icmc36rc9iSttX7NmuQ16aPY83WFKpUSmDErc/yU8pS+nbow3uzx7NwzRLOaXYGfTv0YfC7j3trYxQklmAwh8mvf03qmq1UrlKJ+0fczm+L1wAw85N5fDfxh7BjTpk4jYnvfMb/PT8wr67/Q7fy9sj3mT9rIeddcjb9H7qV+/o87CleTlD5atwstqRsp1JCRe54tjdrlq5n/PAv85bpfOPFHNh/0HMbg4EcJg2fRvbBAHFxQo+HOrNu+WbO6NiKJdNWsGrhetrfcC6t2zZj+axVRcYrq772cubtVhGpASAiQ0Vkooic6SW4iNQSkeEhV1IOE5FaxW1sesYuVm9ZC0Bm1gE27NxE3RpJ+Za55OS2zFg221O8Hbt28sta57YT+zL3s3rTWurXqU/H8y5l4rfOWcsTv51Mp/M7eG5j2t50Vm50Onz/wUzWbV1PvdrJADzQ4x5GTBqF4v0hUrsydrNmawrgbPPGtFSSatRBValaqSoAVStXJe33XZ5jBgKBvFJe/b4rg9Q1TgI+mJnF9g07qFW3ZoliLv1xOb/v+T1fnapStbrzc6xeoxpp29M9x8vYvY8tKdsByDqQzc7UdGrUqZ5vmZMvOIllc1aG1c7sg06/xMXHERcfB6oc27I+qxdvAODXeWtp2uY4T7FC+zqS/e3lqNCjqjpeRNoBHYHngf8A53lYdyywHOjpvv8r8CZwbTHamk/92vVo3qApv6YeGraeekJrdmXsJjV9S9jxGtVryMlNW/Hzb0upWyuJHbt2Ak7yqVsrqYi1D++YOg1ocdyJLF/3Py45rS3b9+xkVeqaYsUCqFcrmWb1m7AydRWjv3mLp/4ylNs6/hWROAa+NcRznPKcUA4nsV4tGjU7hvW/bqJJ6+Nod/W5nH3Z6WxctZlPX/+azIwDxY798j/H8PzYf3DXw30REe7uNahYcWon16RBk2RSVx0ajZ7QqhEZe/aTvnV3WLFEhF5D/kSt5Bosm/kbe3ZkcHB/Nprj/EHK2LWf6rWreopVnkcsuTtpXYAxqvoF4PU2hM1U9TFVXeuWJ4CmhS0ceq+I1EXrCg2aUCmBx3oN5tWpb7D/YGZefYdTL2LGcm+jlVBVE6rw6uDh/OP1Z8nI/ONuVDgjjFxVKlfhudufZNjHIwkEg9zSuQ+jPhsbdpxcCRUTGNpjEKO/eZP9WZl0OetyxnzzFjeOvJMx37zFfVfd6TlWNIxYclVKqMjNQ3ryyZipHMzMYu4XC/ln3xEMu2cUe9Mz6Hrb5SWK3+2GK3n56de47uKbeOXp13jo6XuL1cZeg65i6pszOZiZlVd/arsWLJ/za9jxVJUPnvqSNx+eSP3GSSQ2KP5IraxGLF4SS6qIjAZ6AV+KSGWP6wFkuiMdAESkLZBZ2MKqOkZVz1bVsxud1fiwy8THxfN4r8FMXzqTOSvm5dXHxcXRrtUFfLd8jsemOSrEV+CVh19k8swv+HreNAB27kkjObEuAMmJdUnb4314nNvG5/o9ydQF05ixZDbHJjeiYd1j+O+QN/j0Hx9Qr3Yy7z3yGkk163iON7THQGYsn833KxcA0PHU9sxdOR+A2St+oEXD5p7bFy2JJS4+jpuH9GTxd8tY9r3zHzRj9z40R1GFeVMXcfxJJTtA2fmajsz6ai4AM6bMptXpLcJuY6+BV7F09q+sWHBoEjkuTmh1bnOWf+9tIvhwsjKz2bRyGw2aJVO5akUkzjl+Uj2xKhm793uKEXpTr0g+ncFLgugJfAV0VtXdQB3gQY/xBwCviMg6EVmHczTpi2K0M8+gbvewfscmJvzwab76s5qezoadm9i5Ny2seP8a8CRrNq5l7Kfj8uqmL/iOazt0A+DaDt2YNn9GWDH//tfBpGxdz3vffgTAms1ruXxwd7o+ej1dH72e7bt30PuZfqTt9Zaw7utyJxvTUpm04PO8urSMdE49vjUAbRqfQmq69wlhr5fRl7Ve93Vl+8adzJx06A9IjcRDcxinXtiKreu3l+g70ran0ebcUwE484LT2bQuvNvKdLuzEztS0/nh88X56puedjw7N+9ib3pGWPESqlemUpWKAMRXjOf4Vsewa8seNq3cRvMzjweg5flNSfl5k6d45XaORVX3i8h2oB2wCgi4/3oxBugDrHffd8E5hP1U2C0FTjm+FZ3aXMraresY1d85SjN2+rssWLWI9qdc5HnSNtdZrc7gmku78uu63/hsuHNLz2HvvsSoCa8z8sFh9Ox4Lak7nMPNXp3e7FS6nNeZValreO+R1wF49dPXmPvL/LDaluvkY1vS8bRLSNm2npdvex6At2e8z4gvRnPH5bcQHxdHViCbEV+O9hyzvI9UAJq0Po5zLjudzSnbGDjyDsA5tHxG+1No1LQBqpC+bTfjR35eRKRD/j78Idqcexq1EmsyfvY43nzpXZ4fMoIBQ+8gPj6erKxsXhjq/QDo8S0b0uaS1mxdv4P+z/cGYPr7c1n10zpOadsi7ElbgGq1qtDp5guROEFEWLVoPeuWpZK+ZQ9X3NaO87u1YcfGdH6ZW75PLRDVI88fiMhjwNlAC1U9SUQaAuNVtW2RwUWaAuOBG4CLcSZvr1bVIk/g6PhY9/AnNoqQssRbZ3iV2Kh4k7pHktyonq/xpgwZLwXrQn+205745A+fR9oDVz7he18vWlW8RF6Y9m06+RoPoG6dZF/jDRjd5w99+dh1w/P9bJ8Yf39E+tvLUaFrgDOAxQCqujn38HNRVHWtiPwF+ATYgLM7Vegci4kMO5U/dpRVX3tJLFmqqiKiACJSragVRGQZ5DuUUgeIB+aLCKp6WrFaa3wRLOdzK8Y/ZfU4XS+J5SP3qFBtEekH3Aq8VsQ6V5W4ZabUlNUVrybyyuO1QrmScR5WtBdoAfwd50S5Qqnq+iN9bsqWjVhiR1ndIsNLYumkqoOBb3IrRGQYzg12TRQqwU2VTZQp4Q3Ti+1It024E7gLaCoiS0M+qgHMLe2GmdKTY5O3MSNYRrfIONKI5X1gCvAMEHq55++qGt6pqKZcCZTRpfQm8srdrpB7rske4C+Ra46JBJtjiR1ldVTIHlgWg8J5gJWIHCciM0TkfyLyi4jc69bXEZFvRGSV+29iqTfchC304XRFzbf42deWWGJQTnYwr3gQAAaqamvgfOBuEWmNs3s8XVVPBKaTf3fZlBPZgex8pQi+9bUllhgUzA7mlaKo6hZVzT3r+ndgBc49j7sBb7uLvQ10L53WmpII5+pmP/vaEksMygkG80roPXDccnth64lIY5zLO+YD9VU1945aW4H6pd9yE65gMJCveO3vkva1PVcoBoWOVFR1DM5V6EckItWBCcB9qrpX5NC1bKGXfJjyJTuYle+9l/72o68tscSgcO/cLiIVcX7R3lPViW71NhE5RlW3iMgxQMlujGJKRbhHhfzqa9sVikHhTN6K8+fqDWCFqv475KNPgZvc1zcBk31vqCmx7EBWvnIkfva1jVhiUJgjlrY499FZJiJL3Lr/A/6Fc4FqX5wbefU8/OqmLBXcFSqCb31tiSUGaaDoB13lLas6h0MPqyvoMl8aZEpNIMf7yZB+9rUllhhUVk/HM5EX5ojFN5ZYYpDHE+PMUcASi4kYSyyxwxKLiZicbO9zLCa6WWIxEZMTxuStiW6Bcng/FnOUsl2h2GEjFhMxOVmWWGJFVvBgmXyvJZYYZHMssSM7x0YsJkLURiwxI8t2hUyk2IgldmSX0a4QqhrVBbi9vMeMhjZGS4nFvonGvj4arm4u9MZE5ShmNLQxWsRi30RdXx8NicUYU85YYjHG+O5oSCxF3laxHMSMhjZGi1jsm6jra3Enh4wxxjdHw4jFGFPOWGIxxvguqhOLiDwuIoPKuh2RIiJ/E5EVIvJeWbcl0qyvo4udeRtd7gI6quqm3AoRqaCq9pT3o09U93XUjVhEZIiI/CYic4AWbl0zEZkqIotEZLaItAwjXh8RWSAiS0RktIjEi0iGiPxTRH4WkXkiEtZT/gqJ+ZaILBeRZSJyf5ibjYiMApoCU0Rkj4i8IyJzgXfCjRUtrK+juK/L+tTfME9tPgtYBlQFagKrgUE4D6o+0V3mPOBbj/FaAZ8BFd33rwI3Agpc7dY9BwwNo42Hi/kY8E3IMrWLuf3rgLrA48AioEpZ94n1tfX14Uq07QpdBExS1f0AIvIpkABcCIwPeRRkZY/xLsP5Bf7RXbcKzlPesoDP3WUWAZ3CaOPhYk4FmorISOAL4Osw4hXmU1XN9CFOeWV9fUjU9XW0JZbDiQN2q2qbYqwrwNuq+ki+SpFB6v7ZAIKE93MqLOYQoDPQH+eBT7cWo72h9pVw/WhkfR0lom2OZRbQXUSqiEgN4GpgP5AiIteB85hIETndY7zpwJ9FpJ67bh0ROaGEbSwsZpyqTgCGAmeW8DtigfV1FIuqEYuqLhaRD4GfcYaxP7of9Qb+IyJDgYrAB+4yRcX7n7vO1yISB2QDd5ewjYeL+QAwyX0P8EihAQxgfR3t7JR+Y4zvom1XyBgTBSyxGGN8Z4nFGOM7SyzGGN9ZYjHG+M4SSwEiUltE7irGel+KSO1SaJIpJdbXpccONxcgIo2Bz1X1lAL1UXNlqfHG+rr0RNUJchHyL6CZiCzBOeHpALALaAmcJCKfAMfhXLfykqqOARCRdcDZQHVgCjAH57qWVKBbtF3rESOsr0tLWV8FWd4K0BhY7r5uj3OdRpOQz+u4/1YBlgNJmv9q1MZAAGjj1n8E9Cnr7bJifR3JYiOWoi1Q1ZSQ938TkWvc18cBJwJpBdZJUdUl7utFOL+ApvyzvvaJJZai5V1ZKiLtgY7ABaq6X0S+wxkmFxT6wNwgzl88U/5ZX/vEjgr90e9AjUI+qwXscn/RWgLnR65ZphRYX5cSG7EUoKppIjJXRJYDmcC2kI+nAv1FZAWwEphXFm00/rC+Lj12uNkY4zvbFTLG+M4SizHGd5ZYjDG+s8RijPGdJRZjjO8ssRhjfGeJxRjju/8HgdSmXHV8hKsAAAAASUVORK5CYII=;" vertex="1" parent="1">
8
+ <mxGeometry x="320" y="400" width="278" height="125" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="3" value="" style="shape=image;verticalLabelPosition=bottom;labelBackgroundColor=default;verticalAlign=top;aspect=fixed;imageAspect=0;image=data:image/png,iVBORw0KGgoAAAANSUhEUgAAARUAAAB9CAYAAACMPvdhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAAVV0lEQVR4nO3dd5xU1fnH8c93l4UF6c2KgAhBbGA0VmyoKKKSxJLEQozdWFD5RY3mZ4uJMYkFIyJqEI3+xIYtiBURUUSaCoIIUpQufWVhd2ae3x/3ss6urswsd3Z2huf9ep3Xzpy595kzs2efPffcJjPDOeeiUpDtBjjn8osnFedcpDypOOci5UnFORcpTyrOuUh5UnHORcqTinMuUp5Ucoyk+ZJKJZVIWibpUUmNk17/rSSTdEZS3fWSXq0S54tq6n4VPj5F0jRJ6yR9I+ltSR3D1/4p6fUq694j6ZVMfGaXWzyp5KaTzKwxsB+wP3Bj0mv9gVXAOUl17wKHSCoEkLQjUAT0qFK3O/CupN2Bx4BrgGZAR+B+IB7G+xOwm6Rzw3UPDt/34ug/qss1nlRymJktAl4F9gKQ1B44ArgQ6C1ph3DRjwiSSPfweU9gDPB5lbq5ZrY4rJtnZm9ZYL2ZPWdmC8P33QBcAPwjfM9/A9eZ2dc1/SySukp6Q9IqSZ9LOj3ptT6SPpO0XtIiSQNr+j4u8zyp5DBJ7YA+wNSw6hxgkpk9B8wEzgQwszLgQ+DwcLnDgXHAe1Xq3g0fTwG6Srpb0lHJm1ebmdkY4FlgMrAUGFqlbWskHZbi59gOeAN4EmgL/AoYLKlbuMgjwEVm1oQggb6dSlyXHZ5UctMLktYQJIWxwF/C+nMI/jAJfyZvAo3luwTSkyCpjKtSNxbAzL4EjgR2Bp4Gvqk6dxMaB7QCnrQqJ5GZWXMzey/Fz9MXmG9mw8wsZmZTgeeA08LXy4Fukpqa2Wozm5JiXJcNZuYlhwowHzjmB+oPBWLADuHz9kAC6B4+PxpYAbQEFod1TYFlYV0c6FjNex4AfAn8NamuFcEI5W5gCdA8jc8wAygJS0/gD0AZsCaplAAPJL3/i8BqgsR3cLZ/D16qL/WiTFAuq/oDAqZJqlo/DfiAYNL1AmA8gJmtk7Q4rFtsZvN+KLCZfSTpecK5m9A9wGgzu0rSTsA/gPNTaaiZ7Zn8XNIuwFgzO7a69wdOkVQEXEYwemqXynu52uebP3lAUjFwOsEEbfekcjnwG0n1zKwUmARcTbDZstl7Yd27SfEOk3SBpLbh867AycCE8Hkf4NhwPcL36SfpqBp+hFeALpLOllQUlgMk7SGpvqQzJTUzs3JgHcEIzNVRnlTyQz+gFHjMzJZuLgR7ZeoBx4fLjSWYCE2e6xgX1r2bVLeGIIl8KqkEGA2MBO6U1AQYAlxhZqsAzGw5we7noZIaAoTH0fRMpfFmth44jmCCdjHBZtXfgAbhImcD8yWtI9htfWYqcV12KNxmdc65SPhIxTkXKU8qzrlIeVJxzkXKk4pzLlKeVJxzkcr4wW9HXNcnI7uXGjXZLhNhad+hfUbiDh84JCNxS5eUqGrdNxtXVXznrYtbfu/1bLi6zy0Z6QfLVy/LRFjattg+I3G77NY1I3Ev/tcZ3/s9b1xdXuk7L25RVCt9wY+ozUNl8fJsN8HVAfHy7Bwj6EklD5XFy7LdBFcHeFJxkfGRigNPKi5CZQlPKg4SnlRcVGLxWLab4OqAeCw7p+B4UslDPlJx4Js/LkI+p+IAEjFPKi4i5Z5UHBAvj295oQzwpJKHfKTiAGK++eOi4hO1Dnzzx0XIN38cQNyTiouKb/44yN6cip+lnIfK4+UVJVWSCiVN3Xw/ZEkdJX0oaY6kEZLqZ6zBLiPi5fFKJRVR9ANPKnmoPBarKGm4kuCuhpv9DbjbzHYnuN/OeRE20dWCWCxRqaRoq/uBJ5U8FEvEKkoqwvvunAg8HD4Xwc3Hng0XGU5wxX6XQxKxeKWyJVH1A08qeSh580fShZImJZULf2CVewjuErj531krYI2Zbc5KXxPcAtXlkKojlRT6wj1E0A98ojYPJW/2mNlQqtw8PZmkvsByM5ss6ciMN87VmqrzKD/WF6LsB55U8lCau5QPBU4O7zpYTHB/5XuB5uGdDWPALsCiyBvqMiqewiZPksj6gW/+5KF0JmrN7Hoz28XMOhDcIfBtMzsTGAOcGi7Wn+AG6S6HxMpjlcqPibIfeFLJQ7F4eUXZCtcCV0uaQ7Bt/UgkjXO1Jh6LVyo1lHY/8M2fPBSL16wDmdk7wDvh4y+Bn0XWKFfr4vGaHVG7tf3Ak0oeisX8iFoHsfSOU4qMJ5U8VO4nFDr80gcuQtn6D+XqFh+puMiU++aPA2I1n5zdKp5U8lC8hhO1Lr9kqx94UslDfpEmB9vg5k+bZq254fRraNG4BYbx8sTRPDc+OK7mF4ecRL+D+pKwBBNmfcSQV/+dUszWTVox8OTLaLFdcwzj1alv8uJHo9ht+w5cfsIFFNWrTzwR5/7RDzN78ZyU21qvoB4Dj72UeoX1KFABUxZ+wiufvs7ZB55G+1btAFi+/huGf/AUm2Lp3R1wyF2DOeHYE1jxzQr2PyrYc/e/f/gTfXufSCKRYMXKFVx45UUsWbY05Zi5NKfSvHVTfnNNPxq3aAxmfDB6CuNe/JDeZx7BQb33o2TtBgBGDX+LmZNS/521bNuCi/+3P81aNsHMGPPieF57egy/vuzn9Dhsb2LlcZYvWsHQPz/OhpLSrLa1sF4Bpww4moJ6hRQUii+nfsWkUTPY8/Dd2eeoLjRr04RHrx3Jxm/T61vb3ERtPBHn/v8+zBeL59KwfkMeunwQk76YQsvGLTh0j4M4797fUx6P0Xy7ZqnHtDgPvfUYc5fOo2H9Ygb97m9MnfcJ5x19Fk+Me4ZJc6dxQKcenHf0WVz7n5tTjhtLxLj7rSFsipVRoAL+57jLmLF4Fs9MfomNsU0AnLrfSRzZ5VBe+2xMWt/D408/wZBhD/LwoIcq6u4efA+33nkbAJeedwnXX309V1x7ZertzaGkEo8nePHh11k0dykNGtbnqkEXMnvKXADGvjCBd57/oEZxE/E4Tw56jvmzv6K4UQNuG3Ydn06cyacTZzHigRdJxBOccWk/TjqnNyMGv5DVtsZjCV4a9A6xshgFBeKUq3ux8LOlLP3yGxZOX8zJVx5do7h1eqQi6UpgGLCe4LToHsB1ZvZ6Td941frVrFq/GoDSslIWrFhIm6at6fuz43ly7DMVu0XXfLs25ZirS9awumRNGHMjX61cRKsmLTEzGtVvBECjBo1YGb5vOjaPQAoLCiksKMCgIqEAFBUWUZNbN42fMJ5dd9m1Ut36kvUVjxs1aoRZepFzKamsX13C+tUlAGwqLWP5whU0a910q+OuWbmONSvXAbBxwyYWz19KyzbNmT7xu0uFzJ0xjwOO6pH1tgLEyoLfWUFhAQWFBWDGyq/XbF3MupxUgN+Z2b2SegMtgLOBx4EaJ5VkO7RoS+edOvHZV7O4uM/v2KfDnpx/XH/KYmU8MOphZn39Rdox2zZrQ6ftO/L5oi948I1H+fOvb+T8Y85GKuCaR29IO54k/nj8ANo0ac3Y2e8zf+VCAM456Az22qkrS9Yu49kpL6cdtzo3X3cTZ576a9auX8fxp/ZJa91cSirJWrRtxs6ddmTBrK/p2K0dh530M/bvtS9ffbGYlx5+ndKSjTWK23qHlrTv0o65M+ZXqj+87yF8+ObkOtFWSfzy2mNp1qYx09+dw/IFq2rUrmTZ6gepnvuj8OeJwONmNiOp7vsLJ123Ycm0hT8auGH9Ym498wbue3koGzaVUlhQSNNGTbhk8FU8MOoRbv7N9Sk28TvFRcXc+MuBPPjGMDaUlXLiT49j6BuPcs59lzD0jUcZ0PeStGOaGbe/ejfXj7yNDq3asVOzHQB4bMIIrh15K0vXLWf/9t3Tjludm++4hc77d+Wp50dw8bkXpbVuLBarKLmifnERv73hdF4YOppNpWWM/+8kbj9vEP+8bAjrVpVw8vnH1Shug4YNuPKvF/Kfe56ldMN3f+gn9z+eRDzO+Ncm1om2mhnP3vE6j9/4Mm3bt6TFjqlv9lcnuR/UZl9INalMlvQacALwmqQmfHchl+8xs6Fmtr+Z7b9j912rW4zCgkJuPesG3pz2DuNmvA/AirXf8O704PGsr2eTMKPZdqkPMQsLCrnxl9cwZvo43v886DDH7H0k4z//EIBxMz/gJzvtnnK8qkrLN/L5srnsudNPKurMjI8WTKPHrnvXOG51Rjw/gn4nnpLWOrmWVAoKC/jtDacz5Z1P+fT9WQCUrPkWSxhmMGH0ZHbtkv41ogoLC7jyLxfw/msTmTR2WkV9zz4H0ePQvRh807A609bNykrLWTx7Obt226HGMTZLvlhXbd5hIdWkch4wHnjFzDYQbAIN2No3v/bUASxY/hVPvzeyou69zybQo9M+AOzSemeKCuux9tt1KccccOIlfLVyESMnvlJRt7JkFXvv2g2A7h32YtGq1PekADRusB0Ni4oBKCqsxx47dmbpuhW0adyqYpl9d+7GsrXL04pbnU4dO1U87tu7L7PnzE5r/VRPd68rzhhwMsu/+oaxIydU1DVp0bji8d6H7MHSBel/t+ffcDaLFyzl1aferqjb56Bu9D3rWO76wxDKNqX/h5aJthY3bkD9hkUAFBYVskvXHVi9LPU+X51sjVRSnVO5n2BkcjRwFcGE7V3AATV9473bd6P3fr2Yu2QeD19xHwAPvTacUZNe59pTBzBswGBi8Rh/eeaulGPuuUtXjtnnCOYtW8C/zv87AMPHPMmg/z7IRcedS2FBAWWxcgaNejCttjZr2JT+B/+KAgmpgMkLPmb6opkMPO5SiouKAbFo9WKenPhcWnEBhg8eRs9DetK6ZSvmTP6c2/5xO8f36k3nTp1JJBIs/HphWnt+ILfmVDp2a8cBvfZl8bxlXHNfsJk3avhb9DhyL3bebQfMYNWyNTxz3ytbiFRZl3060fOEA1k4ZxG3Dw82oZ8e8hLnXH0a9YqKuO7eywGYM2M+w+78v6y2tVHTYo4++0BUICQxd8pCFk5fwl5HdKb7MV1p1LSY0/54PAtnLGHskx+lHDdb/UCp7FmQNMXM9pM01cx6hHUfm9m+W1r3iOv61GSnyBY1arJdJsLSvkP7jMQdPnBIRuKWLin53tzWMTf1q/jO37zlhWrnvmrT1X1uyUg/WL56WSbC0rbF9hmJ22W3rhmJe/G/zvje7/mm0+6u9J3f8sxVtdIXUh2plEsqhGCvqaQ2/MicissuP0zfQd0/TH8QMBJoK+l2gsvL3ZixVrmtEs+RuRSXWdm6/W1KScXMnpA0GehFsCu5n5nN3MJqLkuydXaqq1vq+sFvmNksYFYG2+Ii4iMVB9m7BIafpZyHtuIixy6PbOWFz2vMk0oeSvhErQPiWboEhieVPBTL0invrm7xzR8XGZ9TcZC9vT9+M7E8lM4NpCS1kzRG0meSZoSXuUBSS0lvSPoi/Nki4w13kUq+qdyW5lei7AeeVPJQojxeUVIQA64xs27AQcDvJXUDrgPeMrPOwFvhc5dDymPllcoWRNYPPKnkoXh5vKJsiZktMbMp4eP1wExgZ+AUYHi42HCgX2Za6zIlnbOUo+wHnlTyUCIeryjJ17YJy4XVrSepA8FV/T4EtjezJeFLS4HMnAzjMiYej1UqqfaFre0HPlGbh5JHKGY2FBi6pXUkNQaeAwaY2Trpu3PPzMwkZeSEQJc55fHKF8pOpS9E0Q88qeShdK+iLqmIoCM9YWbPh9XLJO1oZksk7QhEc7EYV2vS3fsTVT/wzZ88lM5ErYJ/RY8AM80s+eI1LwH9w8f9gRcjb6jLqPJYWaXyY6LsBz5SyUNpjlQOJbiQ+aeSpoV1fwTuAJ6WdB6wADg9yja6zKu6+bMFkfUDTyp5yGKpX+rGzN6j+ouY94qkQS4rYonUD4KMsh94UslD2boznatb0hypRMaTSh5K8aA3l+c8qbjIeFJx4EnFRShR7pcPdp5UXIQSaUzUuvwV8+upuKj45o8DH6m4CCXKPKk4KItvysr7elLJQz6n4gDKEz5ScRExH6k4oMw3f1xUfKTiAMqztPmDmdWZAlzocTMXN1dKrn2vuRY306WunaVc7QWEPO42Jde+11yLm1F1Lak453KcJxXnXKTqWlLZ4mUPPe42Ide+11yLm1EKJ4Sccy4SdW2k4pzLcZ5UnHORqhNJRdLNkgZmux2pkHSFpJmSnogoXgdJ06OIleu8H+RHP/AjatN3KXCMmX29uUJSPTPzu6JvW7wfVCNrIxVJN0iaLek94CdhXSdJoyVNljROUtcaxD1L0kRJ0yQ9KKlQUomk2yV9LGmCpBrdbU/SEGA34FVJayU9Lmk88HhN4v1A/N0kTZV0QNjOTySNzOebo3s/+MH4ud0PsnEYL/BT4FOgEdAUmAMMJLgBdOdwmQOBt9OMuwfwMlAUPh8MnAMYcFJYdydw41a0fT7QGrgZmAw03MrvogMwneAPaiqwL/AJcET4+q3APdk+9Nr7gfeDVEu2Nn96AiPNbAOApJeAYuAQ4JmkWy02SDNuL4KO+lEYoyHBHdXKgFfCZSYDx25N45O8ZGalEcRpQ3CTpl8Ai4DmZjY2fG048EwE71EXeT+oLC/6QV2aUykA1phZ962IIWC4mV1fqVIaaGG6B+JE97m/jSjOWmAhcBgwIqKYucr7QY73g2zNqbwL9JPUUFIT4CRgAzBP0mkQ3IZR0r5pxn0LOFVS2zBGS0nto2x4hpQBPycYop8IrJbUM3ztbGBsdSvmOO8HleVFP8jKSMXMpkgaAXxMMCz9KHzpTOABSTcCRcBT4TKpxv0sXPd1SQVAOfD7SBufIWb2raS+wBsEN8n+u6RGwJfAuVltXIZ4P/i+fOgHfpi+cy5SdeLgN+dc/vCk4pyLlCcV51ykPKk45yLlScU5F6ltIqlIai7p0hqsN0pS8ww0yWWB94PasU3sUpbUAXjFzPaqUu9nlW5DvB/Ujrp0mH4m3QF0kjSN4ECojcBqoCvQRdILQDuC807uNbOhAJLmA/sDjYFXgfcIzktZBJwS0fkervZ4P6gN2T6jsTYK4Rmg4eMjCc7V6Jj0esvwZ0OCM0VbWeUzUTsAMaB7WP80cFa2P5cX7wd1sWwrI5WqJprZvKTnV0j6efi4HdAZWFllnXlmNi18PJmgg7nc5v0gA7bVpFJxVqmkI4FjgIPNbIOkdwiGv1Ul35g2TvDfzOU27wcZsE3s/QHWA02qea0ZsDrsSF2Bg2qvWa6WeT+oBdvESMXMVkoar+DCwqXAsqSXRwMXS5oJfA5MyEYbXeZ5P6gd28QuZedc7dlWNn+cc7XEk4pzLlKeVJxzkfKk4pyLlCcV51ykPKk45yLlScU5F6n/B9QBIG8REzncAAAAAElFTkSuQmCC;" vertex="1" parent="1">
11
+ <mxGeometry x="588" y="400" width="277" height="125" as="geometry" />
12
+ </mxCell>
13
+ </root>
14
+ </mxGraphModel>
15
+ </diagram>
16
+ </mxfile>
2305.13286/main_diagram/main_diagram.pdf ADDED
Binary file (12.9 kB). View file
 
2305.13286/paper_text/intro_method.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multilingual joint learning is often motivated by the idea that when multilingual large language models (MLLMs) learn information for multiple languages simultaneously, they can detect and leverage common universal patterns across them. Thus, these models can exploit data from one language to learn generalisations useful for another, obtaining impressive performance on zero-shot cross-lingual transfer for many languages [@wu2019beto]. Various studies suggest that representations created by popular MLLMs, such as mBERT and XLM-R [@conneau2020unsupervised], are not fully language-agnostic [@doddapaneni2021primer; @singh2019bert], but instead strike a balance between language-agnosticism and capturing the nuances of different languages through a language-neutral and language-specific component [@libovicky2020language; @gonen2020s; @tanti2021language]. This naturally raises the question of how much models really benefit from multilingual data and cross-lingual sharing, and under which conditions this occurs. Many works have studied the encoding of cross-lingual patterns within MLLMs by either focusing on probing for particular cross-linguistic differences [@ravishankar2019multilingual; @choenni2022investigating], or by analyzing the distributional properties of representational language subspaces [@yang2021simple; @rajaee2022isotropy; @chang2022geometry; @chi2020finding]. Yet, it is not straightforward how to translate these results into model behavior at inference time. We aim to directly study how much influence languages exert cross-lingually on the predictions for individual languages.
4
+
5
+ In this study, we take a step back in the training pipeline to study the extent to which the model exploits its multilingual training data when making predictions for a particular test language. We hypothesise that if a model performs cross-lingual information sharing, then it would at inference time (to some extent) base its predictions on training data from multiple languages. Analyzing the cross-lingual sharing mechanism from the data reliance perspective leads to a set of interesting questions that we explore:
6
+
7
+ 1. Given a test language $A$, does our MLLM tend to base its predictions on data from $A$ itself or does it (also) employ data from a language $B$ that it was exposed to during task fine-tuning?
8
+
9
+ 2. Do MLLMs only employ data cross-lingually out of necessity, e.g., in scenarios where in-language fine-tuning data is unavailable or insufficient?
10
+
11
+ 3. Do languages support each other by adding similar information to what is relied upon from in-language data (i.e., reinforcing the model in what it already learns), or do they (also) provide complementary information?
12
+
13
+ 4. How do cross-lingual sharing dynamics change over the course of fine-tuning?
14
+
15
+ 5. Is the cross-lingual sharing behaviour similar when the test language was seen during fine-tuning compared to when it is used in a zero-shot testing scenario?
16
+
17
+ To study this, we use TracIn [@pruthi2020estimating], a training data attribution (TDA) method to identify a set of training samples that are most informative for a particular test prediction. The influence of a training sample $z_\textit{train}$ on a test sample $z_\textit{test}$ can be formalized as the change in loss that would be observed for $z_\textit{test}$ if $z_\textit{train}$ was omitted during training. Thus, it can be used as a measure of how influential $z_\textit{train}$ is when solving the task for $z_\textit{test}$.
18
+
19
+ To the best of our knowledge, we present the first approach to studying cross-lingual sharing at the data level by extending the use of a TDA method to the multilingual setting. We find that MLLMs rely on data from multiple languages to a large extent, even when the test language was seen (or over-represented) during fine-tuning. This indicates that MLLM representations might be more universal than previous work suggested [@singh2019bert], in part explaining the 'surprising' effectiveness of cross-lingual transfer [@pires2019multilingual; @wu2019beto; @karthikeyan2020cross]. Moreover, we find that cross-lingual sharing increases as fine-tuning progresses, and that languages can support one another by playing both reinforcing as well as complementary roles. Lastly, we find that the model exhibits different cross-lingual behaviour in the zero-shot testing setup compared to when the test language is seen during fine-tuning.
20
+
21
+ # Method
22
+
23
+ TDA methods aim to explain a model's predictions in terms of the data samples that it was exposed to during training. Proposed methods include measuring the similarity between learned model representations from training and test samples [@rajani2020explaining], and influence functions [@koh2017understanding] that aim to compute changes in the model loss through Hessian-based approximations. While these methods compute influence between training samples and the final trained model, discrete prediction-based methods like Simfluence [@guu2023simfluence] base influence on the full training trajectory instead. TracIn [@pruthi2020estimating], used in this paper, is somewhere in between these methods: rather than using a direct loss difference, it tracks the similarity between gradients of training and test samples over model checkpoints. In NLP, TDA methods have so far mostly been used for unveiling data artifacts and explainability purposes [@han2022orca], for instance, to detect outlier data [@han2020explaining], enable instance-specific data filtering [@lam2022analyzing], or to fix erroneous model predictions [@meng2020pair; @guo2021fastif].
24
+
25
+ Many methods have been proposed to investigate the cross-lingual abilities of MLLMs [@doddapaneni2021primer]. @pires2019multilingual and @karthikeyan2020cross were the first to demonstrate that MLLMs share information cross-lingually by showing that they can perform zero-shot cross-lingual transfer between languages with zero lexical overlap. This led to a body of works that attempt to understand *how* and *where* this sharing emerges.
26
+
27
+ One line of study focuses on how MLLMs distribute their parameters across languages by analyzing the distributional properties of the resulting language representations. In particular, these studies aim to understand to what extent MLLMs exploit universal language patterns for producing input representations in individual languages. As such, @singh2019bert find that mBERT representations can be partitioned by language subspaces, suggesting that little cross-lingual sharing emerges. Yet, other works show that mBERT representations can be split into a language-specific component , and a language-neutral component that facilitates cross-lingual sharing [@libovicky2020language; @gonen2020s; @muller2021first]. In addition, @chi2020finding show that syntactic information is encoded within a shared syntactic subspace, suggesting that portions of the model are cross-lingually aligned. Similarly, @chang2022geometry more generally show that MLLMs encode information along orthogonal language-sensitive and language-neutral axes.
28
+
29
+ While the previous works studied parameter sharing indirectly through latent model representation, @wang2020negative explicitly test for the existence of language-specific and language-neutral parameters instead. They do so by employing a pruning method [@louizoslearning] to determine the importance of model parameters across languages, and find that some parameters are shared while others remain language-specific. Moreover, @wang2020negative focused on the negative interference effects [@ruder2017overview] of cross-lingual sharing i.e., parameter updates that help the model on one language, but harm its ability to handle another. They show that cross-lingual performance can be improved when parameters are more efficiently shared across languages, leading to a body of works on finding language-specific and language-neutral subnetworks within MLLMs to better understand [@foroutan2022discovering] and guide [@lin2021learning; @choenni2022data] cross-lingual sharing at the parameter level. In contrast to these works, we do not study cross-lingual sharing at the level of model parameters, but instead consider sharing at the data level. To the best of our knowledge, we are the first to explore this direction.
30
+
31
+ We conduct model fine-tuning experiments in three multilingual text classification tasks.
32
+
33
+ The Cross-Lingual Natural Language Inference (XNLI) dataset [@conneau2018xnli] contains premise-hypothesis pairs that are labeled with the relationship that holds between them: 'entailment', 'neutral' or 'contradiction'. The dataset contains parallel data in 15 languages. The original pairs come from English and were translated to the other languages. We use English, French, German, Russian and Spanish for model fine-tuning and testing.
34
+
35
+ The Cross-Lingual Paraphrase Adversaries from Word Scrambling (PAWS-X) dataset [@yang2019paws] and task requires the model to determine whether two sentences are paraphrases of one another. To create this dataset, a subset of the PAWS development and test sets [@zhang2019paws] was translated from English to 6 other languages by professional translators, while the training data was automatically translated. We use English, French, German, Korean and Spanish for model fine-tuning and testing.
36
+
37
+ The Multilingual Amazon Review Corpus (MARC) [@keung2020multilingual] contains Amazon reviews written by users in various languages. Each record in the dataset contains the review text and title, and a star rating. The corpus is balanced across 5 stars, so each star rating constitutes 20% of the reviews in each language. Note that this is a non-parallel dataset. We use Chinese, English, French, German and Spanish for model fine-tuning and testing.
38
+
39
+ For all tasks we add a classification head on top of the pretrained XLM-R base model [@conneau2020unsupervised]. The classifier is an MLP with one hidden layer and uses $\tanh$ activation. We feed the hidden representation corresponding to the beginning-of-sequence token for each input sequence to the classifier for prediction. We use learning rates of 2e-5, 9e-6, and 2e-5 for XNLI, PAWS-X, and MARC respectively, and use AdamW [@loshchilovdecoupled] as optimizer.
40
+
41
+ We fine-tune the full model on the concatenation of 2K samples from 5 different languages, i.e. 10K samples in total, for each task. This allowed us to limit the computational costs of computing influence scores (which increase linearly with the number of training samples), while still obtaining reasonable performance. We also reduced computational requirements by converting each task into a binary classification problem: for XNLI, we followed @han2020explaining by classifying "entailment or not" (i.e., mapping neutral and contradiction instances to a *non-entailment* label); for MARC, we collapsed 1 and 2 stars into a *negative* review category and 4 and 5 stars into a *positive* category. Moreover, we train our models for 10 epochs and use early stopping with a patience of 3. We find that fine-tuning converges at epoch 4 for XNLI, and at epoch 5 for PAWS-X and MARC, obtaining 78%, 83%, and 90% accuracy on their development sets, respectively.
42
+
43
+ Let a training sample $z_i$ from our training set be denoted as $\mathcal{D} = \{z_i : (x_i,y_i)\}^{N}_{i=1}$ for an input sequence $x_i$ and a label $y_i$. @koh2017understanding show that we can compute how 'influential' each training sample $z_i \in \mathcal{D}$ is to the prediction for a test sample $x_{test}: \hat{y}_{test} = f_{\hat{\theta}}(x_\textit{test})$. The influence score for a training sample $z_i$ on a test sample $z_\textit{test}$ is defined as the change in loss on $z_\textit{test}$ that would have been incurred under the parameter estimates $f_{\hat{\theta}}$ if the model was trained on $\mathcal{D}\setminus{z_i}$ instead, i.e. $\mathcal{L}(z_\textit{test}, \hat{\theta}_{-z_i}) - \mathcal{L}(z_\textit{test}, \hat{\theta})$. In practice, this is prohibitively expensive to compute as it would require training the model $|\mathcal{D}|+1$ times: once on training set $\mathcal{D}$, and, for each $z_i \in \mathcal{D}$, training on $\mathcal{D}\setminus{z_i}$.
44
+
45
+ As a solution, @koh2017understanding show that we can make approximate it by measuring the change in loss on $z_\textit{test}$ when the loss associated with the training sample $z_i$ is upweighted by some $\epsilon$ value, i.e. computing the influence score by $\mathcal{I}(z_i, z_\textit{test})=\frac{d\mathcal{L}(z_\textit{test}, \hat{\theta}_{\epsilon,z_i)}}{d_{\epsilon}}$, where $\hat{\theta}_{\epsilon,z_i}$ are the parameters trained with $z_i$ upsampled by $\epsilon$, $\hat{\theta}_{\epsilon,z_i} = \textnormal{argmin}_{\theta}\frac{1}{N}\sum^{N}_{k=1}(\mathcal{L}(z_k, \theta)+\epsilon \mathcal{L}(z_{i}, \theta))$, which can be computed via a tractable approximation: $$\begin{equation}
46
+ \begin{split}
47
+ \mathcal{I}(z_i, z_\textit{test}) \approx \\
48
+ & \hspace{-1.5cm} - \nabla_{\theta}\mathcal{L}(z_\textit{test}, \hat{\theta})^{T}
49
+ [\nabla^{2}_{\theta}\mathcal{L}(\mathcal{D}, \hat{\theta})]^{-1} \nabla_{\theta}\mathcal{L}(z_i, \hat{\theta})
50
+ \end{split}
51
+ \end{equation}$$
52
+
53
+ where \[$\nabla^{2}_{\theta}\mathcal{L}(\mathcal{D}, \hat{\theta})]^{-1}$ is the inverse-Hessian of the loss $\mathcal{L}(\mathcal{D}, \hat{\theta})$ with respect to $\theta$ ($H^{-1}_{\hat{\theta}}$).
54
+
55
+ However, computing $H^{-1}_{\hat{\theta}}$ is still expensive, this method requires further approximations if the model is non-convex, and they can be less accurate when used on deep learning models [@basu2021influence]. @pruthi2020estimating find a similar, but first-order, solution that we use in this study: TracIn. They compute influence scores as follows: $$\begin{equation}
56
+ \mathcal{I}(z_i, z_\textit{test}) = \sum^{E}_{e=1} \nabla_{\theta} \mathcal{L}(z_\textit{test}, \theta_e) \cdot \nabla_{\theta} \mathcal{L}(z_{i}, \theta_e)
57
+ \end{equation}$$ where $\theta_e$ is the checkpoint of the model at each training epoch. The intuition behind this is to approximate the total reduction in the test loss $\mathcal{L}(z_\textit{test}, \theta)$ during the training process when the training sample $z_i$ is used. This gradient product method essentially drops the $H^{-1}_{\hat{\theta}}$ term and reduces the problem to the dot product between the gradients of the training and test point loss.
58
+
59
+ A potential problem of using gradient products is that they can be dominated by outlier training samples of which the norm of their training gradients is significantly larger than the rest of the training samples [@yu2020gradient]. This would lead the method to identify the same set of outlier training samples being influential to a large number of different test points [@han2020explaining]. Since we are working in the multilingual set-up, we know that dominating gradients is a common problem [@wang2020negative].[^1] @barshan2020relatif propose a simple modification that we adapt: substituting the dot product with cosine similarity, thus normalizing by the norm of the training gradients.
2305.15616/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-05-16T04:18:42.851Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15" etag="xzJ3zZ-VuW_Z-L2osJLQ" version="21.3.2" type="device">
2
+ <diagram name="Page-1" id="TKXCwm2MADlGmYsyHoiz">
3
+ <mxGraphModel dx="821" dy="536" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="1" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="QlT_ib3cON1zx22nOIhe-1" value="Metrics&lt;br&gt;$$\mathbf{A_0,A_1}$$" style="fillColor=#ffe6cc;strokeColor=#d79b00;verticalAlign=middle;spacingBottom=-12;align=center;labelBackgroundColor=none;html=1;whiteSpace=wrap;" parent="1" vertex="1">
8
+ <mxGeometry x="300" y="235" width="80" height="50" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="QlT_ib3cON1zx22nOIhe-2" value="Encoder" style="shape=trapezoid;perimeter=trapezoidPerimeter;whiteSpace=wrap;html=1;fixedSize=1;rotation=90;fillColor=#ffe6cc;strokeColor=#d79b00;textDirection=rtl;verticalAlign=middle;horizontal=0;" parent="1" vertex="1">
11
+ <mxGeometry x="145" y="233.75" width="50" height="52.5" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1">
14
+ <mxGeometry relative="1" as="geometry">
15
+ <mxPoint x="143" y="257.5" as="targetPoint" />
16
+ <mxPoint x="123" y="257.5" as="sourcePoint" />
17
+ </mxGeometry>
18
+ </mxCell>
19
+ <mxCell id="QlT_ib3cON1zx22nOIhe-5" value="$$x(t_k)$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
20
+ <mxGeometry x="80" y="240" width="40" height="40" as="geometry" />
21
+ </mxCell>
22
+ <mxCell id="QlT_ib3cON1zx22nOIhe-6" value="Functions&lt;br&gt;$$E,S$$" style="whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;verticalAlign=middle;spacingBottom=-12;" parent="1" vertex="1">
23
+ <mxGeometry x="300" y="307.5" width="80" height="50" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-19" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="QlT_ib3cON1zx22nOIhe-7" target="QlT_ib3cON1zx22nOIhe-12">
26
+ <mxGeometry relative="1" as="geometry" />
27
+ </mxCell>
28
+ <mxCell id="QlT_ib3cON1zx22nOIhe-7" value="Autodiff&lt;br&gt;$$\nabla E,\nabla S$$" style="whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;verticalAlign=middle;spacingBottom=-12;" parent="1" vertex="1">
29
+ <mxGeometry x="415" y="307.5" width="80" height="50" as="geometry" />
30
+ </mxCell>
31
+ <mxCell id="QlT_ib3cON1zx22nOIhe-8" value="$$\mathcal{G}$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
32
+ <mxGeometry x="80" y="167.5" width="40" height="40" as="geometry" />
33
+ </mxCell>
34
+ <mxCell id="QlT_ib3cON1zx22nOIhe-9" value="Graph derivatives&lt;br&gt;$$d_0, d_1$$" style="whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;verticalAlign=middle;spacingBottom=-12;" parent="1" vertex="1">
35
+ <mxGeometry x="402.5" y="162.5" width="105" height="50" as="geometry" />
36
+ </mxCell>
37
+ <mxCell id="QlT_ib3cON1zx22nOIhe-10" value="Graph adjoints&lt;br&gt;$$d^*_0, d^*_1$$" style="whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;verticalAlign=middle;spacingBottom=-12;" parent="1" vertex="1">
38
+ <mxGeometry x="402.5" y="231.25" width="105" height="50" as="geometry" />
39
+ </mxCell>
40
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-20" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="QlT_ib3cON1zx22nOIhe-11" target="QlT_ib3cON1zx22nOIhe-12">
41
+ <mxGeometry relative="1" as="geometry" />
42
+ </mxCell>
43
+ <mxCell id="QlT_ib3cON1zx22nOIhe-11" value="Bracket matrices&lt;br&gt;$$\mathbf{L}, \mathbf{M},\mathbf{G}$$" style="whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;verticalAlign=middle;spacingBottom=-12;" parent="1" vertex="1">
44
+ <mxGeometry x="530" y="231.25" width="105" height="50" as="geometry" />
45
+ </mxCell>
46
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-27" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="QlT_ib3cON1zx22nOIhe-12" target="BsAwTsnxPIQPa3fEbhJh-26">
47
+ <mxGeometry relative="1" as="geometry" />
48
+ </mxCell>
49
+ <mxCell id="QlT_ib3cON1zx22nOIhe-12" value="$$\dot{\tilde{x}}(t_k)$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
50
+ <mxGeometry x="562.5" y="312.5" width="40" height="40" as="geometry" />
51
+ </mxCell>
52
+ <mxCell id="QlT_ib3cON1zx22nOIhe-16" value="Decoder" style="shape=trapezoid;perimeter=trapezoidPerimeter;whiteSpace=wrap;html=1;fixedSize=1;rotation=90;fillColor=#ffe6cc;strokeColor=#d79b00;textDirection=rtl;verticalAlign=middle;horizontal=0;direction=west;" parent="1" vertex="1">
53
+ <mxGeometry x="733" y="231.25" width="50" height="52.5" as="geometry" />
54
+ </mxCell>
55
+ <mxCell id="QlT_ib3cON1zx22nOIhe-19" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="1" source="QlT_ib3cON1zx22nOIhe-1" edge="1">
56
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
57
+ <mxPoint x="480" y="257.5" as="sourcePoint" />
58
+ <mxPoint x="340" y="307.5" as="targetPoint" />
59
+ </mxGeometry>
60
+ </mxCell>
61
+ <mxCell id="QlT_ib3cON1zx22nOIhe-20" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="1" source="QlT_ib3cON1zx22nOIhe-9" target="QlT_ib3cON1zx22nOIhe-10" edge="1">
62
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
63
+ <mxPoint x="480" y="257.5" as="sourcePoint" />
64
+ <mxPoint x="530" y="207.5" as="targetPoint" />
65
+ </mxGeometry>
66
+ </mxCell>
67
+ <mxCell id="QlT_ib3cON1zx22nOIhe-21" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="BsAwTsnxPIQPa3fEbhJh-1" edge="1">
68
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
69
+ <mxPoint x="410" y="260" as="sourcePoint" />
70
+ <mxPoint x="250" y="258" as="targetPoint" />
71
+ </mxGeometry>
72
+ </mxCell>
73
+ <mxCell id="QlT_ib3cON1zx22nOIhe-22" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="1" source="QlT_ib3cON1zx22nOIhe-10" edge="1">
74
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
75
+ <mxPoint x="350" y="292.5" as="sourcePoint" />
76
+ <mxPoint x="455" y="307.5" as="targetPoint" />
77
+ </mxGeometry>
78
+ </mxCell>
79
+ <mxCell id="QlT_ib3cON1zx22nOIhe-23" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" edge="1">
80
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
81
+ <mxPoint x="380" y="332.5" as="sourcePoint" />
82
+ <mxPoint x="415" y="332.5" as="targetPoint" />
83
+ </mxGeometry>
84
+ </mxCell>
85
+ <mxCell id="QlT_ib3cON1zx22nOIhe-24" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" edge="1">
86
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
87
+ <mxPoint x="382" y="260" as="sourcePoint" />
88
+ <mxPoint x="402" y="260" as="targetPoint" />
89
+ </mxGeometry>
90
+ </mxCell>
91
+ <mxCell id="QlT_ib3cON1zx22nOIhe-25" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="QlT_ib3cON1zx22nOIhe-10" target="QlT_ib3cON1zx22nOIhe-11" edge="1">
92
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
93
+ <mxPoint x="480" y="257.5" as="sourcePoint" />
94
+ <mxPoint x="530" y="207.5" as="targetPoint" />
95
+ </mxGeometry>
96
+ </mxCell>
97
+ <mxCell id="QlT_ib3cON1zx22nOIhe-30" value="$$x(t_{k+1})$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
98
+ <mxGeometry x="810" y="237.5" width="50" height="40" as="geometry" />
99
+ </mxCell>
100
+ <mxCell id="QlT_ib3cON1zx22nOIhe-32" value="Input" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
101
+ <mxGeometry x="580" y="167.5" width="40" height="26.1" as="geometry" />
102
+ </mxCell>
103
+ <mxCell id="QlT_ib3cON1zx22nOIhe-33" value="Trainable" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
104
+ <mxGeometry x="635" y="167.5" width="60" height="26.1" as="geometry" />
105
+ </mxCell>
106
+ <mxCell id="QlT_ib3cON1zx22nOIhe-34" value="Calculated" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
107
+ <mxGeometry x="710" y="167.5" width="70" height="26.1" as="geometry" />
108
+ </mxCell>
109
+ <mxCell id="QlT_ib3cON1zx22nOIhe-37" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="QlT_ib3cON1zx22nOIhe-16" target="QlT_ib3cON1zx22nOIhe-30" edge="1">
110
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
111
+ <mxPoint x="480" y="257.5" as="sourcePoint" />
112
+ <mxPoint x="530" y="207.5" as="targetPoint" />
113
+ </mxGeometry>
114
+ </mxCell>
115
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-2" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1">
116
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
117
+ <mxPoint x="199.25" y="260" as="sourcePoint" />
118
+ <mxPoint x="228" y="260" as="targetPoint" />
119
+ </mxGeometry>
120
+ </mxCell>
121
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-1" value="$$\tilde{x}&lt;br&gt;(t_k)$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
122
+ <mxGeometry x="230" y="240" width="40" height="40" as="geometry" />
123
+ </mxCell>
124
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-3" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1">
125
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
126
+ <mxPoint x="272" y="260" as="sourcePoint" />
127
+ <mxPoint x="300" y="260" as="targetPoint" />
128
+ </mxGeometry>
129
+ </mxCell>
130
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-7" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1">
131
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
132
+ <mxPoint x="637" y="259.5" as="sourcePoint" />
133
+ <mxPoint x="659" y="259.5" as="targetPoint" />
134
+ </mxGeometry>
135
+ </mxCell>
136
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-12" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" target="QlT_ib3cON1zx22nOIhe-16">
137
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
138
+ <mxPoint x="702" y="257.5" as="sourcePoint" />
139
+ <mxPoint x="724" y="257.5" as="targetPoint" />
140
+ </mxGeometry>
141
+ </mxCell>
142
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-15" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="QlT_ib3cON1zx22nOIhe-8">
143
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
144
+ <mxPoint x="130" y="187" as="sourcePoint" />
145
+ <mxPoint x="400" y="187" as="targetPoint" />
146
+ </mxGeometry>
147
+ </mxCell>
148
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-21" value="$$\tilde{x}&lt;br&gt;(t_{k+1})$$" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
149
+ <mxGeometry x="660" y="240" width="50" height="40" as="geometry" />
150
+ </mxCell>
151
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-28" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="BsAwTsnxPIQPa3fEbhJh-26" target="BsAwTsnxPIQPa3fEbhJh-21">
152
+ <mxGeometry relative="1" as="geometry" />
153
+ </mxCell>
154
+ <mxCell id="BsAwTsnxPIQPa3fEbhJh-26" value="Integrator" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
155
+ <mxGeometry x="650" y="319.45" width="70" height="26.1" as="geometry" />
156
+ </mxCell>
157
+ </root>
158
+ </mxGraphModel>
159
+ </diagram>
160
+ </mxfile>
2305.15616/main_diagram/main_diagram.pdf ADDED
Binary file (22.4 kB). View file
 
2305.15616/paper_text/intro_method.md ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Graph neural networks (GNNs) have emerged as a powerful learning paradigm able to treat unstructured data and extract ``object-relation''/causal relationships while imparting inductive biases which preserve invariances through the underlying graph topology . This framework has proven effective for a wide range of both graph analytics and data-driven physics modeling problems. Despite successes, GNNs have generally struggle to achieve the improved performance with increasing depth typical of other architectures. Well-known pathologies, such as oversmoothing, oversquashing, bottlenecks, and exploding/vanishing gradients yield deep GNNs which are either unstable or lose performance as the number of layers increase .
4
+
5
+ To combat this, a number of works build architectures which mimic physical processes to impart desirable numerical properties. For example, some works claim that posing message passing as either a diffusion process or reversible flow may promote stability or help retain information, respectively. These present opposite ends of a spectrum between irreversible and reversible processes, which either dissipate or retain information. It is unclear, however, what role (ir)reversibility plays . One could argue that dissipation entropically destroys information and could promote oversmoothing, so should be avoided. Alternatively, in dynamical systems theory, dissipation is crucial to realize a low-dimensional attractor, and thus dissipation may play an important role in realizing dimensionality reduction. Moreover, recent work has shown that dissipative phenomena can actually sharpen information as well as smooth it , although this is not often noticed in practice since typical empirical tricks (batch norm, etc.) lead to a departure from the governing mathematical theory.
6
+
7
+ In physics, Poisson brackets and their metriplectic/port-Hamiltonian generalization to dissipative systems provide an abstract framework for studying conservation and entropy production in dynamical systems. In this work, we construct four novel architectures which span the (ir)reversibility spectrum, using geometric brackets as a means of parameterizing dynamics abstractly without empirically assuming a physical model. This relies on an application of the data-driven exterior calculus (DDEC) , which allows a reinterpretation of the message-passing and aggregation of graph attention networks as the fluxes and conservation balances of physics simulators , providing a simple but powerful framework for mathematical analysis. In this context, we recast graph attention as an inner-product on graph features, inducing graph derivative ``building-blocks'' which may be used to build geometric brackets. In the process, we generalize classical graph attention to higher-order clique cochains (e.g., labels on edges and loops). The four architectures proposed here scale with identical complexity to classical graph attention networks, and possess desirable properties that have proven elusive in current architectures. On the reversible and irreversible end of the spectrum we have Hamiltonian and Gradient networks. In the middle of the spectrum, Double Bracket and Metriplectic architectures combine both reversibility and irreversibility, dissipating energy to either the environment or an entropic variable, respectively, in a manner consistent with the second law of thermodynamics. We summarize these brackets in Table , providing a diagram of their architecture in Figure .
8
+
9
+ \noindentPrimary contributions:
10
+
11
+ \noindent Theoretical analysis of GAT in terms of exterior calculus. Using DDEC we establish a unified framework for construction and analysis of message-passing graph attention networks, and provide an extensive introductory primer to the theory in the appendices. In this setting, we show that with our modified attention mechanism, GATs amount to a diffusion process for a special choice of activation and weights.
12
+
13
+ \noindent Generalized attention mechanism. Within this framework, we obtain a natural and flexible extension of graph attention from nodal features to higher order cliques (e.g. edge features). We show attention must have a symmetric numerator to be formally structure-preserving, and introduce a novel and flexible graph attention mechanism parameterized in terms of learnable inner products on nodes and edges.
14
+
15
+ \noindent Novel structure-preserving extensions. We develop four GNN architectures based upon bracket-based dynamical systems. In the metriplectic case, we obtain the first architecture with linear complexity in the size of the graph while previous works are $O(N^3)$.
16
+
17
+ \noindent Unified evaluation of dissipation. We use these architectures to systematically evaluate the role of (ir)reversibility in the performance of deep GNNs. We observe best performance for partially dissipative systems, indicating that a combination of both reversibility and irreversibility are important. Pure diffusion is the least performant across all benchmarks. For physics-based problems including optimal control, there is a distinct improvement. All models provide near state-of-the-art performance and marked improvements over black-box GAT/NODE networks.
18
+
19
+ [t]
20
+ \centering
21
+ \resizebox{\columnwidth}{!}{
22
+ {lllll}
23
+ \toprule
24
+ {\bf Formalism} & {\bf Equation} & {\bf Requirements} & {\bf Completeness} & {\bf Character} \\
25
+ Hamiltonian & $\dot\xx = \rb{\xx}{E}$ & $\LL^*=-\LL$, & complete & conservative \\
26
+ & & Jacobi's identity & & \\
27
+ Gradient & $\dot\xx = -\ib{\xx}{E}$ & $\MM^*=\MM$ & incomplete & totally dissipative \\
28
+ Double Bracket & $\dot\xx = \rb{\xx}{E}+\left\{\rb{\xx}{E}\right\}$ & $\LL^*=-\LL$ & incomplete & partially dissipative \\
29
+ Metriplectic & $\dot\xx = \rb{\xx}{E}+\ib{\xx}{S}$ & $\LL^*=-\LL, \MM^*=\MM$, & complete & partially dissipative \\
30
+ & & $\LL\nabla S = \MM\nabla E = \bm{0}$ & & \\
31
+ \bottomrule
32
+ }
33
+ \vspace{5pt}
34
+ \caption{The abstract bracket formulations employed in this work. Here $\xx$ represents a state variable, while $E=E(\xx),S=S(\xx)$ are energy and entropy functions. ``Conservative'' indicates purely reversible motion, ``totally dissipative'' indicates purely irreversible motion, and ``partially dissipative'' indicates motion which either dissipates $E$ (in the double bracket case) or generates $S$ (in the metriplectic case).}
35
+
36
+ \noindent Neural ODEs: Many works use neural networks to fit dynamics of the form $\dot{x} = f(x,\theta)$ to time series data. Model calibration (e.g., UDE ), dictionary-based learning (e.g., SINDy ), and neural ordinary differential equations (e.g., NODE ) pose a spectrum of inductive biases requiring progressively less domain expertise. Structure-preservation provides a means of obtaining stable training without requiring domain knowledge, ideally achieving the flexibility of NODE with the robustness of UDE/SINDy. The current work learns dynamics on a graph while using a modern NODE library to exploit the improved accuracy of high-order integrators .
37
+
38
+ \noindent Structure-preserving dense networks: For dense networks, it is relatively straightforward to parameterize reversible dynamics, see for example: Hamiltonian neural networks , Hamiltonian generative networks , Hamiltonian with Control (SymODEN) , Deep Lagrangian networks and Lagrangian neural networks . Structure-preserving extensions to dissipative systems are more challenging, particularly for metriplectic dynamics which require a delicate degeneracy condition to preserve discrete notions of the first and second laws of thermodynamics. For dense networks such constructions are intensive, suffering from $O(N^3)$ complexity in the number of features . In the graph setting we avoid this and achieve linear complexity by exploiting exact sequence structure. Alternative dissipative frameworks include Dissipative SymODEN and port-Hamiltonian . We choose to focus on metriplectic parameterizations due to their broad potential impact in data-driven physics modeling, and ability to naturally treat fluctuations in multiscale systems .
39
+
40
+ \noindent Physics-informed vs structure-preserving:
41
+ "Physics-informed" learning imposes physics by penalty, adding a regularizer corresponding to a physics residual. The technique is simple to implement and has been successfully applied to solve a range of PDEs , discover data-driven models to complement first-principles simulators , learn metriplectic dynamics , and perform uncertainty quantification . Penalization poses a multiobjective optimization problem, however, with parameters weighting competing objectives inducing pathologies during training, often resulting in physics being imposed to a coarse tolerance and qualitatively poor predictions . In contrast, structure-preserving architectures exactly impose physics by construction via carefully designed networks. Several works have shown that penalty-based approaches suffer in comparison, with structure-preservation providing improved long term stability, extrapolation and physical realizability.
42
+
43
+ \noindent Structure-preserving graph networks: Several works use discretizations of specific PDEs to combat oversmoothing or exploding/vanishing gradients, e.g. telegraph equations or various reaction-diffusion systems . Several works develop Hamiltonian flows on graphs . For metriplectic dynamics, poses a penalty based formulation on graphs. We particularly focus on GRAND, which poses graph learning as a diffusive process , using a similar exterior calculus framework and interpreting attention as a diffusion coefficient. We show in Appendix that their analysis fails to account for the asymmetry in the attention mechanism, leading to a departure from the governing theory. To account for this, we introduce a modified attention mechanism which retains interpretation as a part of diffusion PDE. In this purely irreversible case, it is of interest whether adherence to the theory provides improved results, or GRAND's success is driven by something other than structure-preservation.
44
+
45
+ [tb]
46
+ \centering
47
+ \includegraphics[width=\textwidth]{figs/architecture-diagram.pdf}
48
+ \caption{A diagrammatic illustration of the bracket-based architectures introduced in Section . }
49
+
50
+ Here we introduce the two essential ingredients to our approach: bracket-based dynamical systems for neural differential equations, and the data-driven exterior calculus which enables their construction. A thorough introduction to this material is provided in Appendices , , and .
51
+
52
+ \noindent Bracket-based dynamics: Originally introduced as an extension of Hamiltonian/Lagrangian dynamics to include dissipation , bracket formulations are used to inform a dynamical system with certain structural properties, e.g., time-reversibility, invariant differential forms, or property preservation. Even without dissipation, bracket formulations may compactly describe dynamics while preserving core mathematical properties, making them ideal for designing neural architectures.
53
+
54
+ Bracket formulations are usually specified via some combination of reversible brackets $\rb{F}{G} = \IP{\nabla F}{\LL\nabla G}$ and irreversible brackets $\ib{F}{G} = \IP{\nabla F}{\MM\nabla G}, \left\{\rb{F}{G}\right\} = \IP{\nabla F}{\LL^2\nabla G}$ for potentially state-dependent operators $\LL^*=-\LL$ and $\MM^*=\MM$. The particular brackets which are used in the present network architectures are summarized in Table . Note that complete systems are the dynamical extensions of isolated thermodynamical systems: they conserve energy and produce entropy, with nothing lost to the ambient environment. Conversely, incomplete systems do not account for any lost energy: they only require that it vanish in a prescribed way. The choice of completeness is an application-dependent modeling assumption.
55
+
56
+ \noindent Exterior calculus: In the combinatorial Hodge theory , an oriented graph $\mathcal{G}=\{\mathcal{V},\mathcal{E}\}$ carries sets of $k$-cliques, denoted $\mathcal{G}_k$, which are collections of ordered subgraphs generated by $(k+1)$ nodes. This induces natural exterior derivative operators $d_k:\Omega_k\to\Omega_{k+1}$, acting on the spaces of functions on $\mathcal{G}_k$, which are the signed incidence matrices between $k$-cliques and $(k+1)$-cliques. An explicit representation of these derivatives is given in Appendix , from which it is easy to check the exact sequence property $d_{k+1}\circ d_{k} = 0$ for any $k$. This yields a discrete de Rham complex on the graph $\mathcal{G}$ (Figure ). Moreover, given a choice of inner product (say, $\ell^2$) on $\Omega_k$, there is an obvious dual de Rham complex which comes directly from adjointness. In particular, one can define dual derivatives $d_k^*:\Omega_{k+1}\to\Omega_k$ via the equality
57
+ \[\IP{d_kf}{g}_{k+1} = \IP{f}{d_k^*g}_k,\]
58
+ from which nontrivial results such as the Hodge decomposition, Poincar\'{e} inequality, and coercivity/invertibility of the Hodge Laplacian $\Delta_k = d_k^*d_k + d_{k-1}d_{k-1}^*$ follow (see e.g. ). Using the derivatives $d_k, d_k^*$, it is possible to build compatible discretizations of PDEs on $\mathcal{G}$ which are guaranteed to preserve exactness properties such as, e.g., $d_1\circ d_0 = \mathrm{curl}\circ\mathrm{grad} = 0$.
59
+
60
+ The choice of inner product $\IP{\cdot}{\cdot}_k$ thus induces a definition of the dual derivatives $d_k^*$. In the graph setting , one typically selects the $\ell^2$ inner product, obtaining the adjoints of the signed incidence matrices as $d_k^*=d_k^\intercal$. By instead working with the modified inner product $\IPk{\bb{v}}{\bb{w}} = \bb{v}^\intercal \bb{A}_k \bb{w}$ for a machine-learnable $\bb{A}_k$, we obtain $d_k^* = \bb{A}_k^{-1}d_k^\intercal\bb{A}_{k+1}$ (see Appendix ). This parameterization inherits the exact sequence property from the graph topology encoded in $d_k$ while allowing for incorporation of geometric information from data. This leads directly to the following result, which holds for any (potentially feature-dependent) symmetric positive definite matrix $\bb{A}_k$.
61
+
62
+ The dual derivatives $d_k^*:\Omega_{k+1}\to\Omega_k$ adjoint to $d_k:\Omega_k\to\Omega_{k+1}$ with respect to the learnable inner products $\bb{A}_k:\Omega_k\to\Omega_k$ satisfy an exact sequence property.
63
+
64
+ \qquad $
65
+ d_{k-1}^* d_{k}^* = \bb{A}_{k-1}^{-1}d_{k-1}^\intercal\bb{A}_{k}\bb{A}_k^{-1}d_k^\intercal\bb{A}_{k+1} = \bb{A}_{k-1}^{-1}\lr{d_{k} d_{k-1}}^\intercal\bb{A}_{k+1} = 0. \qedhere
66
+ $
67
+
68
+ As will be shown in Section , by encoding graph attention into the $\bb{A}_k$, we may exploit the exact sequence property to obtain symmetric positive definite diffusion operators, as well as conduct the cancellations necessary to enforce degeneracy conditions necessary for metriplectic dynamics.
69
+
70
+ For a thorough review of DDEC, we direct readers to Appendix and . For exterior calculus in topological data analysis see , and an overview in the context of PDEs see .
71
+
72
+ \centering
73
+ [scale cd=1.5,sep=large]
74
+
75
+ \Omega_0 \arrow[shift left]{r}{d_0} & \arrow[shift left]{l}{d^\intercal_0} \Omega_1 \arrow[shift left]{r}{d_1} & \arrow[shift left]{l}{d_1^\intercal} \Omega_2 \arrow[shift left]{r}{d_2} & \arrow[shift left]{l}{d_2^\intercal} \cdots \arrow[shift left]{r}{d_{k-1}} & \arrow[shift left]{l}{d_{k-1}^\intercal} \Omega_k\\
76
+
77
+ \Omega_0 \arrow[swap]{u}{\mathbf{A}_0} & \Omega_1 \arrow[swap]{u}{\mathbf{A}_1} \arrow{l}{d_0^*} & \Omega_2 \arrow[swap]{u}{\mathbf{A}_2} \arrow{l}{d_1^*} & \arrow{l}{d_2^*} \cdots & \arrow{l}{d_{k-1}^*} \Omega_k \arrow[swap]{u}{\mathbf{A}_k}
78
+
79
+ \caption{A commutative diagram illustrating the relationship between the graph derivatives $d_k$, their $\ell^2$ adjoints $d_k^\intercal$, and the learnable adjoints $d_k^*$. These operators form a de Rham complex due to the exact sequence property $d_{i+1}\circ d_i = d^\intercal_i \circ d^\intercal_{i+1}= d^*_i \circ d^*_{i+1} = 0$. We show that the learnable $\mathbf{A}_k$ may encode attention mechanisms, without impacting the preservation of exact sequence structure.}
80
+
81
+ We next summarize properties of the bracket dynamics introduced in Section and displayed in Table , postponing details and rigorous discussion to Appendices and . Letting $\xx = (\qq,\pp)$ denote node-edge feature pairs, the following operators will be used to generate our brackets.
82
+ \[ \LL = 0 & -d_0^* \\ d_0 & 0, \quad \GG = \Delta_0 & 0 \\ 0 & \Delta_1 = d_0^*d_0 & 0 \\ 0 & d_1^*d_1 + d_0d_0^*, \quad \MM = 0 & 0 \\ 0 & \bb{A}_1d_1^*d_1\bb{A}_1. \]
83
+ As mentioned before, the inner products $\bb{A}_0,\bb{A}_1,\bb{A}_2$ on $\Omega_k$ which induce the dual derivatives $d_0^*,d_1^*$, are chosen in such a way that their combination generalizes a graph attention mechanism. The precise details of this construction are given below, and its relationship to the standard GAT network from is shown in Appendix . Notice that $\LL^* = -\LL$, while $\GG^*=\GG, \MM^*=\MM$ are positive semi-definite with respect to the block-diagonal inner product $\IPk{\cdot}{\cdot}{}$ defined by $\bb{A} = \mathrm{diag}\lr{\bb{A}_0,\bb{A}_1}$ (details are provided in Appendix ). Therefore, $\LL$ generates purely reversible (Hamiltonian) dynamics and $\GG,\MM$ generate irreversible (dissipative) ones. Additionally, note that state-dependence in $\LL,\MM,\GG$ enters only through the adjoint differential operators, meaning that any structural properties induced by the topology of the graph $\mathcal{G}$ (such as the exact sequence property mentioned in Theorem ) are automatically preserved.
84
+
85
+ Strictly speaking, $\LL$ is guaranteed to be a truly Hamiltonian system only when $d_0^*$ is state-independent, since it may otherwise fail to satisfy Jacobi's identity. On the other hand, energy conservation is always guaranteed due to the fact that $\LL$ is skew-adjoint.
86
+
87
+ In addition to the bracket matrices $\LL,\MM,\GG$, it is necessary to have access to energy and entropy functions $E,S$ and their associated functional derivatives with respect to the inner product on $\Omega_0 \oplus \Omega_1$ defined by $\bb{A}$. For the Hamiltonian, gradient, and double brackets, $E$ is chosen simply as the ``total kinetic energy''
88
+ \[
89
+ E(\qq,\pp) = \frac{1}{2}\lr{\nn{\qq}^2 + \nn{\pp}^2} = \frac{1}{2}\sum_{i\in\mathcal{V}} \nn{\qq_i}^2 + \frac{1}{2}\sum_{\alpha\in\mathcal{E}} \nn{\pp_\alpha}^2,
90
+ \]
91
+ whose $\bb{A}$-gradient (computed in Appendix ) is just $\nabla E(\qq,\pp) = \bb{A}_0^{-1}\qq & \bb{A}_1^{-1}\pp^\intercal$. Since the metriplectic bracket uses parameterizations of $E,S$ which are more involved, discussion of this case is deferred to later in this Section.
92
+
93
+ \noindent Attention as learnable inner product: Before describing the dynamics, it remains to discuss how the matrices $\bb{A}_i$, $0\leq i\leq 2$, are computed in practice, and how they relate to the idea of graph attention. Recall that if $n_V>0$ denotes the nodal feature dimension, a graph attention mechanism takes the form $a(\qq_i,\qq_j) = f\lr{\tilde{a}_{ij}} / \sum_j f\lr{\tilde{a}_{ij}}$ for some differentiable pre-attention function $\tilde{a}:n_V\times n_V\to\mathbb{R}$ (e.g., for scaled dot product ) one typically represents $a(\qq_i,\qq_j)$ as a softmax, so that $f=\exp(\qq)$). This suggests a decomposition $a(\qq_i,\qq_j) = \bb{A}_0^{-1}\bb{A}_1$ where $\bb{A}_0 = \lr{a_{0,ii}}$ is diagonal on nodes and $\bb{A}_1 = \lr{a_{1,ij}}$ is diagonal on edges,
94
+ \[
95
+ a_{0,ii} = \sum_{j\in\mathcal{N}(i)} f\lr{\tilde{a}\lr{\qq_i,\qq_j}}, \qquad a_{1,ij} = f\lr{\tilde{a}\lr{\qq_i,\qq_j}}.
96
+ \]
97
+
98
+ Treating the numerator and denominator of the standard attention mechanism separately in $\bb{A}_0,\bb{A}_1$ allows for a flexible and theoretically sound incorporation of graph attention directly into the adjoint differential operators on $\mathcal{G}$. In particular, \textit{if $\bb{A}_1$ is symmetric} with respect to edge-orientation and $\pp$ is an edge feature which is antisymmetric, it follows that
99
+ \[\lr{d_0^*\pp}_i = \lr{\bb{A}_0^{-1}d_0^\intercal\bb{A}_1\pp}_i = \sum_{j\in \mathcal{N}(i)} a\lr{\qq_i,\qq_j}\pp_{ji},\]
100
+ which is just graph attention combined with edge aggregation. This makes it possible to give the following informal statement regarding graph attention networks which is explained and proven in Appendix .
101
+
102
+ The GAT layer from is almost the forward Euler discretization of a metric heat equation.
103
+
104
+ The ``almost'' appearing here has to do with the fact that (1) the attentional numerator $f\lr{\tilde{a}(\qq_i,\qq_j)}$ is generally asymmetric in $i,j$, and is therefore symmetrized by the divergence operator $d_0^\intercal$, (2) the activation function between layers is not included, and (3) learnable weight matrices $\bb{W}^k$ in GAT are set to the identity.
105
+
106
+ The interpretation of graph attention as a combination of learnable inner products admits a direct generalization to higher-order cliques, which is discussed in Appendix .
107
+
108
+ \noindent Hamiltonian case: A purely conservative system is generated by solving
109
+ $\dot{\xx} = \LL(\xx)\nabla E(\xx)$, or
110
+ \[
111
+ \dot{\qq} \\ \dot{\pp} = 0 & -d_0^* \\ d_0 & 0\bb{A}_0^{-1} & 0 \\ 0 & \bb{A}_1^{-1}\qq \\ \pp = -d_0^*\bb{A}_1^{-1}\pp \\ d_0\bb{A}_0^{-1}\qq.
112
+ \]
113
+ This is a noncanonical Hamiltonian system which generates a purely reversible flow. In particular, it can be shown that
114
+ \[\dot{E}(\xx) = \IPk{\dot{\xx}}{\nabla E(\xx)}{} = \IPk{\LL(\xx)\nabla E(\xx)}{\nabla E(\xx)}{} = -\IPk{\nabla E(\xx)}{\LL(\xx)\nabla E(\xx)}{} = 0,\]
115
+ so that energy is conserved due to the skew-adjointness of $\LL$.
116
+
117
+ \noindent Gradient case: On the opposite end of the spectrum are generalized gradient flows, which are totally dissipative. Consider solving $\dot{\xx} = -\GG(\xx)\nabla E(\xx)$, or
118
+ \[ \dot{\qq} \\ \dot{\pp} = -\Delta_0 & 0 \\ 0 & \Delta_1\bb{A}_0^{-1} & 0 \\ 0 & \bb{A}_1^{-1}\qq \\ \pp = -\Delta_0\bb{A}_0^{-1}\qq \\ \Delta_1\bb{A}_1^{-1}\pp. \]
119
+ This system is a metric diffusion process on nodes and edges separately. Moreover, it corresponds to a generalized gradient flow, since
120
+ \[ \dot{E}(\xx) = \IPk{\dot{\xx}}{\nabla E(\xx)}{} = -\IPk{\GG(\xx)\nabla E(\xx)}{\nabla E(\xx)}{} = -\nn{\nabla E(\xx)}_\GG^2 \leq 0,\]
121
+ due to the self-adjoint and positive semi-definite nature of $\GG$.
122
+
123
+ The architecture in GRAND is almost a gradient flow, however the pre-attention mechanism lacks the requisite symmetry to formally induce a valid inner product.
124
+
125
+ \noindent Double bracket case: Another useful formulation for incomplete systems is the so-called double-bracket formalism. Consider solving $\dot{\xx} = \LL\nabla E + \LL^2\nabla E$, or
126
+ \[ \dot{\qq} \\ \dot{\pp} = 0 & -d_0^* \\ d_0 & 0\bb{A}_0^{-1}\qq \\ \bb{A}_1^{-1}\pp + -d_0^*d_0 & 0 \\ 0 & -d_0d_0^*\bb{A}_0^{-1}\qq \\ \bb{A}_1^{-1}\pp = -\Delta_0\bb{A}_0^{-1}\qq-d_0^*\bb{A}_1^{-1}\pp \\ d_0\bb{A}_{0}^{-1}\qq - d_0d_0^*\bb{A}_1^{-1}\pp. \]
127
+ This provides a dissipative relationship which preserves the Casimirs of the Poisson bracket generated by $\LL$, since $\LL\nabla C = \bm{0}$ implies $\LL^2\nabla C = \bm{0}$. In particular, it follows that
128
+ \[ \dot{E}(\xx) = \IPk{\dot{\xx}}{\nabla E(\xx)}{} = \IPk{\LL(\xx)\nabla{E}(\xx) + \LL^2(\xx)\nabla E(\xx)}{\nabla E(\xx)}{} = 0 -\nn{\LL(\xx)\nabla E(\xx)}^2 \leq 0, \]
129
+ since $\LL$ is skew-adjoint and therefore $\LL^2$ is self-adjoint.
130
+
131
+ It is interesting to note that the matrix $\LL$ is essentially a Dirac operator (square root of the Hodge Laplacian $\Delta = (d+d^*)^2$) restricted to cliques of degree at most 1. However, here $\LL^2 = -\Delta$, so that $\LL$ is in some sense ``pure imaginary''.
132
+
133
+ \noindent Metriplectic case: Metriplectic systems are expressible as $\dot{\xx} = \LL\nabla E + \MM\nabla S$ where $E,S$ are energy resp. entropy functions which satisfy the degeneracy conditions $\LL\nabla S = \MM\nabla E = \bm{0}$. One way of setting this up in the present case is to define the energy and entropy functions
134
+
135
+ E(\qq,\pp) &= f_E\lr{s(\qq)} + g_E\lr{s\lr{d_0d_0^\intercal\pp}}, \\
136
+ S(\qq,\pp) &= g_S\lr{s\lr{d_1^\intercal d_1\pp}},
137
+
138
+ where $s$ is sum aggregation over nodes resp. edges, $f_E:\mathbb{R}^{n_V}\to\mathbb{R}$ acts on node features, and $g_E,g_S:\mathbb{R}^{n_E}\to\mathbb{R}$ act on edge features. Denoting the ``all ones'' vector (of variable length) by $\mathbf{1}$, it is shown in Appendix that the $\bb{A}$-gradients of energy and entropy can be computed as
139
+ \[ \nabla E(\xx) = \bb{A}_0^{-1}\mathbf{1}\otimes\nabla f_E\lr{h(\qq)} \\ \bb{A}_1^{-1}d_0d_0^\intercal\mathbf{1}\otimes\nabla g_E\lr{h\lr{d_0d_0^\intercal\pp}}, \qquad \nabla S(\xx) = 0 \\ \bb{A}_1^{-1}d_1^\intercal d_1\mathbf{1}\otimes\nabla g_S\lr{h\lr{d_1^\intercal d_1\pp}}.\]
140
+ Similarly, it is shown in Appendix that the degeneracy conditions $\LL\nabla S = \MM\nabla E = \bm{0}$ are satisfied by construction. Therefore, the governing dynamical system becomes
141
+
142
+ \dot{\qq} \\ \dot{\pp} &= \LL\nabla E + \MM\nabla S = -\bb{A}_0^{-1}d_0^\intercal d_0 d_0^\intercal\mathbf{1}\otimes\nabla g_E\lr{s\lr{d_0d_0^\intercal\pp}} \\ d_0\bb{A}_0^{-1}\mathbf{1}\otimes\nabla f_E\lr{s(\qq)} + \mathbf{A}_1d_1^*d_1d_1^\intercal d_1\mathbf{1}\otimes\nabla g_S\lr{s\lr{d_1^\intercal d_1\pp}}
143
+ .
144
+
145
+ With this, it follows that the system obeys a version of the first and second laws of thermodynamics,
146
+
147
+ \dot{E}(\xx) &= \IPk{\dot{\xx}}{\nabla E(\xx)} = \IPk{\LL\nabla E(\xx)}{\nabla E(\xx)} + \IPk{\MM\nabla S(\xx)}{\nabla E(\xx)} = \IPk{\nabla S(\xx)}{\MM\nabla E(\xx)} = 0, \\
148
+ \dot{S}(\xx) &= \IPk{\dot{\xx}}{\nabla S(\xx)} = \IPk{\LL\nabla E(\xx)}{\nabla S(\xx)} + \IPk{\MM\nabla S(\xx)}{\nabla S(\xx)} = 0 + |\nabla S(\xx)|^2_M \geq 0.
149
+
150
+ As seen in the increased complexity of this formulation, enforcing the degeneracy conditions necessary for metriplectic structure is nontrivial. This is accomplished presently via an application of the exact sequence property in Theorem , which we derive in Appendix .
151
+
152
+ It is worth mentioning that, similar to the other architectures presented in this Section, the metriplectic network proposed here exhibits linear $O(N)$ scaling in the graph size. This is in notable contrast to which scale as $O(N^3)$.
2305.19922/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-01-15T15:20:09.526Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36" etag="s9VtT-4uFOkKhObwmjwE" version="20.8.3" type="google"><diagram id="NiSyd7cpHhrbWZtZuS4o" name="Page-2">5Vrbbts4EP0aA9uHFhKpWx5rO+0ukAJB8tD2qaAl2uJGFhWaii9fX1IiJdGSbWUjr41GCWJyOByO5gwPL84ITpabrwxl8Tca4WQErGgzgtMRADYAjviQkm0pCYKgFCwYiZRSLXgkO6yElpLmJMIrQ5FTmnCSmcKQpikOuSFDjNG1qTaniTlqhhZqRKsWPIYowS217yTisXoLt6H9NyaLWI9sW6plibSyMrGKUUTXjbHg7QhOGKW8LC03E5zI4Om4lIa+HGitHGM45X06/EPZ7jn4/M2dPOTOOF7gp+enj8rKC0py9cLKWb7VERB+Z7IY0TBfFmON1zHh+DFDoZSvBfBCFvNlImq2KM5onkY4uptpwYoz+oQnNKGssAm94qladGQdIZnTlKs0AL6skyRp9Jy78kf2LHWsT5bnAtuHNza0HOj4gQjsOEKrGEdqdGmxYaF8hLwdQB0NzDjeNEQqoF8xXWLOtkJFt1qOglunt1tW13WueDqz4kaeQE8pIpWfi8p2DaEoKBRfgWgbQByJjFZVynhMFzRFyW0tHYc5e6nCxUr0ZGxFre5wR2mmVP7FnG8VSCjn1IQfbwj/UUDjqtrPRst0oywXla2upOLdG51k9Wezre5W1HS/VmZZxXMqs2y/nRdVz2bGRXiO8kTmPE5mxeyVyUEEPxxLoBXNWYiPgAQVkyG2wPyInl/qSQSPpiPDCeLkxeSswVMLdpCFVwRnlaHUSDrvOZe8VsT4YzlVPwsF2882ReCsomGtZoRs8ovY1z1FaSE/70iKERMKY5RGhK/0kDOmNbREvFPphymWI/XyLVC+7XtQjVgJPOvlr5E7ycgv8ZfHmKMPwkBh3BKSBKULMRawdiMwkWRQilkp9qyGw7OOlygdNsVnDPArArpHLDVT2KcXBXM+egPxr+Oa/CumcYuA7ZsOAg78M00S50r5t6LcE/xrsG9Nxn8W/7o9+de2r4qA3YME3Jvk/L4kd08TEm7b5PCAM4ZXIuLifWl6lDxa5NaXxYBzyEvPmpsE2sX4rXH7edei3mHpDrFQZ/9gu0/POc1+Tgf7aUYcPEO9K2W//7b7tP9M9vN7sp97VeTn9z+qcjEd8Y5Kc+MMMyIcwKwpv6+FpybynGywvpdoT2ywh6wzap1YK2QZVZwJpzed2fOleAbiBhDYBjfALm7oOpra3rnIIehA0JG/xlZayfax7cW3OpgpTfEprEDQnoUVAk0MlbGiq/Kn4/JlAISCngidDaCbwwAV0Px6uiQ4nRR5LnBsew8c59Lg6BvRK1hbdblxRriWmx1w4bVVp83JxRUOvbiqrveUFEcBncguNBMZuPATAFb9QNNk6biyspeylVtvyOKuG2fzDKO/A9Cbf7t5Dug4PBw9zLzhSDShqUi1PJROTXFIVsV5x3rE/IKniEEWHt81UqLisSa3Qff/5DZwYOXZvaclB/gmLC689JLTdeMr0Zi+I1gcGJiweBffCTin9mkjfywAEszpT98PTq0dm1zoLg3V26/sDl6GPeDs4a7vsvPqhaYzAw7h2Dz9Do2rBfe+Yj3f9BPV+vv4csdT/1cDvP0N</diagram></mxfile>