Eric03 commited on
Commit
f5d479d
·
verified ·
1 Parent(s): c6d2e5c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2010.00784/main_diagram/main_diagram.drawio +0 -0
  2. 2010.00784/paper_text/intro_method.md +3 -0
  3. 2102.07631/main_diagram/main_diagram.drawio +1 -0
  4. 2102.07631/main_diagram/main_diagram.pdf +0 -0
  5. 2102.07631/paper_text/intro_method.md +27 -0
  6. 2103.14024/main_diagram/main_diagram.drawio +0 -0
  7. 2103.14024/paper_text/intro_method.md +137 -0
  8. 2110.00966/main_diagram/main_diagram.drawio +1 -0
  9. 2110.00966/main_diagram/main_diagram.pdf +0 -0
  10. 2110.00966/paper_text/intro_method.md +92 -0
  11. 2112.05787/main_diagram/main_diagram.drawio +1 -0
  12. 2112.05787/main_diagram/main_diagram.pdf +0 -0
  13. 2112.05787/paper_text/intro_method.md +56 -0
  14. 2204.10356/main_diagram/main_diagram.drawio +1 -0
  15. 2204.10356/main_diagram/main_diagram.pdf +0 -0
  16. 2204.10356/paper_text/intro_method.md +83 -0
  17. 2206.13464/main_diagram/main_diagram.drawio +0 -0
  18. 2206.13464/paper_text/intro_method.md +81 -0
  19. 2207.07697/main_diagram/main_diagram.drawio +0 -0
  20. 2207.07697/paper_text/intro_method.md +105 -0
  21. 2210.02412/main_diagram/main_diagram.drawio +1 -0
  22. 2210.02412/main_diagram/main_diagram.pdf +0 -0
  23. 2210.02412/paper_text/intro_method.md +115 -0
  24. 2210.10664/main_diagram/main_diagram.drawio +1 -0
  25. 2210.10664/main_diagram/main_diagram.pdf +0 -0
  26. 2210.10664/paper_text/intro_method.md +96 -0
  27. 2210.12524/main_diagram/main_diagram.drawio +0 -0
  28. 2210.12524/paper_text/intro_method.md +86 -0
  29. 2211.00164/main_diagram/main_diagram.drawio +0 -0
  30. 2211.00164/paper_text/intro_method.md +73 -0
  31. 2211.01910/main_diagram/main_diagram.drawio +0 -0
  32. 2211.01910/paper_text/intro_method.md +45 -0
  33. 2212.07634/main_diagram/main_diagram.drawio +1 -0
  34. 2212.07634/paper_text/intro_method.md +146 -0
  35. 2301.12217/main_diagram/main_diagram.drawio +1 -0
  36. 2301.12217/paper_text/intro_method.md +191 -0
  37. 2302.03985/main_diagram/main_diagram.drawio +1 -0
  38. 2302.03985/main_diagram/main_diagram.pdf +0 -0
  39. 2302.03985/paper_text/intro_method.md +137 -0
  40. 2304.05939/main_diagram/main_diagram.drawio +0 -0
  41. 2304.05939/paper_text/intro_method.md +100 -0
  42. 2304.06668/main_diagram/main_diagram.drawio +0 -0
  43. 2304.06668/paper_text/intro_method.md +85 -0
  44. 2305.13072/main_diagram/main_diagram.drawio +503 -0
  45. 2305.13072/main_diagram/main_diagram.pdf +0 -0
  46. 2305.13072/paper_text/intro_method.md +69 -0
  47. 2307.14680/main_diagram/main_diagram.drawio +610 -0
  48. 2307.14680/main_diagram/main_diagram.pdf +0 -0
  49. 2307.14680/paper_text/intro_method.md +58 -0
  50. 2308.11272/main_diagram/main_diagram.drawio +118 -0
2010.00784/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2010.00784/paper_text/intro_method.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Method
2
+
3
+ Data selection is an important component of both supervised, and unsupervised training. In our case, there is an abundance of data to build both the Fisher matrix, as well as the replay buffer. To do this efficiently for EWC and ER we need to severely restrict the number of datapoints we utilize. For example a mere $1.0\%$ of generic pre-training data makes up over 400k segments. We require this subset to be comprehensively representative of the domain. Therefore, rather than randomly sampling data, we can use model generated features to induce better coverage of previous domains.
2102.07631/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-06T11:58:43.167Z" agent="5.0 (Macintosh)" etag="wiEFkYPrNt_4iwayuQkV" version="14.2.9" type="google" pages="10"><diagram id="PSFfh3NthtqwDIwzmaF9" name="Iteration 1">7V1dl5o4GP41nrN7oQcIIFyOM063uzPTnjp7tr3agxCVFoiL2Bn76zeBIISgovIRpp25UAJEfD+e9zNxAG7913ehtV49Igd6A0VyXgfgbqAosqro+IWM7JKRsa4mA8vQdehF2cDM/QHpoERHt64DN8yFEUJe5K7ZQRsFAbQjZswKQ/TCXrZAHvupa2sJuYGZbXn86D+uE62SUUMZZ+N/QHe5Sj9Z1s3kjG+lF9NvsllZDnrJDYHpANyGCEXJO//1FnqEeCldkvvuD5zdP1gIg6jKDU+G8Ul//WID98b48OdSmYSfH4c6SKb5bnlb+o2fHp7/ok8c7VIyQAdThR4GKMAvkxBtAweSySV8hMJohZYosLwHhNZ4UMaDX2EU7ShPrW2E8NAq8j16Fr660efc+y9kqpFGj+5e6czxwS538BGGrg8jGKZjQRTuPucPcjORw2yq+GiXPypOtomsMKKPnFyUCqWOj3iaUzZs0Da04RFCK1R2rXAJoyPXUX4Qeuc+gHL0HUT4WcMdviCEnhW531kptaiwL/fXZfKA31CROEM8FE46Bopu+Zi/k2C+IS+P07uH909T8uHzTRRadrThhAePo2979VFYKcBqsSbX+a9LgiCjhYde7BVmwsjfYjUfOsje+jG5Jy8rN4KztRUT+gVfjccsz10G+NCDi+gYg77DMIKvR0lKz6b6SgFL1ZLDl0z7ZUAvWeU0X5Ma4oFmHNXGTPGm2Wj/lfOwHkpX6SGoqIe6UHqomZ3IQCO8q0uympcBs24ZoLd+RC5+5D3gqBqLOGYBSZLvQ28qCNL+KS6XLV3qRLZ+cnxJ/VtBAEaXfwlBB0IgCyUEY4Nz9/5+fJhxkoEdqYhlZOqF2ZhUhCsT4m65OIq6oSd813ESmYEb94c191IirwmsxV9Emwy0OzziWXPoTSz72zKWr1vkoTD+XLCI/0o5c1SoOd9vHy7SB2EisjKfUBrpiqEzKE1d4yuxf6iVTZrejxaLDWwE9Pmwb0ZYF9g41ia0wCSWHqfPN6368nU472rBe5d4710q8d71prz3NB3AhFAdunLyeQhZGiOzMFkE0cthU6sIm7XHyAf8MpmVJVCQkeQ5G/PLxnzwPbPdyfTTc38BOVWGqwF5KI3UsaEy/JFrQeQC04eFCZqDZGPcGxes3N1ioERWWDDRWs+4VUWTVIpqhpObMLR2uQuoXh2Uu7FZijb3FS9P81SZUCYPUKuIahwiPRPkUaTfEliSoD+HjuMGSzJTgDlLHIrfOblmpbbMFchJaA1egVokVolXYJZ4BUpjXgFHyAfo+xafOu2aUmX+U6uU4t2naRC5kQvFo5XeNa1S05Uv5gyxJ+6LRyujc1rJHK1uEaEIthgwFJBi+7ClO9DiUYujEnERzwvlznb9CvUZuQSiVL2ELnIxr1ofYXi7eJ34HPC4aeGVp+1xdp0Wue5IJ5cEOTQR8a/LERFP6K438DQBF67n5eMUw4a2HfuaJHuROzM3NFUrj1qvJSo9exr4xiUUL8aa9dlT9YRoXhFDZHHDPh6oEEOMJADYpIQk1Vy6v4DBJ8MLuXIlkaKDIElemU/9xb4nHroStQpK51jQWJQqnW4bcH5O2uBspZNPemZlMNeY5Ux7CFpqqJEGl8TyWS7wyyCXJ6ypdHKlsikVlU0Wq3/G1Ntl/BkA3DID1Z6ipXoQLfmKaX/R0mDgEnQNl0BYrXlrcFl77vM6beMDqTjbs6td3RYLqJdHBM7YnEuNRgSKzKrbmFc3rVV14xuAui0knKxKspw/txzZlm4JZslK3HthurozVFXVQoXaNPsQDOo9dW+kLk2tXFUoFJFtbV9Zz2dzk2JB7bYWyo4Gx2W21tTHwCqv8tbm2hazxXw7f7u+7fFObkEUjtE3SSx9q2x/xWrhlyU+8yog56Wxfg7WimB7jZ4CsCRuVvAXB6txsO303vgy9+mXRrcjD3z/fNpTgP2l+h0r7FYZjlrmWBnKHOiNOlag4FfJvF+ltlnXVI+vkmwSTCvq4Zmd16J6VYZQKqcdX7fUqA19A2wHVcsywiWz2lX3C5NZpmmy3NeNBpJZOGh3XHxfPuS29HmM/403wtQnYoIZc8BX/vadSV954ethZxIo5EZMvg5htGnC06IHu9uER1a2LFDc+27vyaP/t0XJfg9gsZAk8k2yIX1JXjfQt4LItQf7DkMXBemE+AGTOZNrOwOTqj58nUZkhUL3B/7yFttuiEIHhoVWw4OrhsTGnMreDKha/9yvA5VkhV0HOqRTXLrsqPmFRebxhUU/TxMKqLokSDRTxJfF0yYUvnzT1yaU4srdkjU6rSbqtXa1Rq6sM21tdtAWBIula5q4HXtvLKQUjPFmywmkixg/kiSFZb6s9CChC3paIwd8jfztdX+C0ytU2jW87ZbF3prhrY6/YtXHzbYbI86opbXMwb6WvyS+kbS9pKxSMStLbKjKKrABlBMqLIIN7alUAL4ouu8zU2o1ol32mRVzqZ33mWm96DYSuM+suh0VqyIql7imInKe7fOW+7HoF5g9BWF+G5VCZ0q9UNxlZwrXmlICxa22pmg8bRtRyF6EJ9XzgrXvvnwdE1teKHE+pr6RrKBYbB/zwDmzXYKDu+sA8/gWL9xqRbrPIgunB/eDOX+zoeKPHlTLATW2a6pxPAvf8F6IQ1LmZYJL7YQOHtuTuja9vG47RKO7Ppz41nP3Q9QLe3goNF/V6AaHBh+sXtOCMstaUNJp5mHWn5J0Ew0Pn+LPZJ0sm6qtLH3ZHNY45OldsjksMFNDkrZpUL/j2v26FXmUbiue+pcjVWWnaXCPWL4prZp8xqzg5PP+5v1sNnx6Gj7B6AWF36qK1Ibu821v5/C0qZsnuPww3w/sxeXDNvJcYsTicccKv33A05AF/wQq44gwN6jEo6et5yb9pS0p1hgfKyAV3xSjycOSXUqT4YK1Nafkvyx4ke7J/6C0/UlWyX/u3J0b0p4zcIfRmdjROkx1ERdls+QHipQSY200ZaxNvm/vE9yQDd/jfWDjreBbRiVi2zeJ1axpO0almOAr20O2bGf5Bnf77LBDpXK6HUOxwSZ6WqlXV/WKKu9LlAp543EJPsx+EzCxG9kvK4Lp/w==</diagram><diagram id="XNp7niVcqTdajjd4QlAF" name="Iteration 2">7Vpbd6o4FP41PtoFARQfq9Izc1aP03Wcc/ExhQiZAuHEWHB+/QQJcgn1Uksps85LSzY7iXz727fAQJsFyScKI+8LcZA/AIqTDLT5AIDR2OR/U8EuE6iGDjKJS7EjZIVgif9FQqgI6RY7aFNRZIT4DEdVoU3CENmsIoOUkriqtiZ+ddcIukgSLG3oy9If2GFeJjUNpZD/gbDr5TurirgTwFxZCDYedEhcEmnWQJtRQlh2FSQz5Kfg5bhYK+u7osfG9Ovip5HM4xWg9jBb7O6SKYdHoChkr146/qa6UPFM44e5mW9Hq3nwfT1UxdobtssBQw7HTwwJZR5xSQh9q5BOKdmGDkqXVfio0LknJOJClQv/QYztBBnglhEu8ljgi7sowexn6XqVLnVjiNE8ESvvB7vS4AFRHCCGqJCtScjuYID9VOkTpDAgIf9Z0+yZ0gepceAEgDkYZEttdAy1g/W52yDCfxLd8XkU+ZDh5+qmUPDXPeiJqbeUwl1JISI4ZJvSyg+pgCsIV9RM4XjCE8c1c9fUdU05os4vsv1fmAzGlckTpfowDFIXMTGpsmoJkEK0Z+IlrNS7ZGXBxIyXpnkxMTnN6K60Vjpcle8VS+1Hu49AaB6Y92Y9pqd0wnzNvIT5QNPPZv6bMTbb8Rn6WwHCF2t+/+fCkoi8YZQ8HVIRqFKQp5go1QsSN83GN2ufxLYHKbsJtjxlDh1ib4O9yaexhxlaRnBv15hrn6TPM6IMJa8gkGzvHOlJFek8qcalRCtM55Vy7Eh5mSCVUHKpFYD5ceKGejRodOXo4ExH16/08+vMKDnTEgVfUSQZtzCd+oJHlMz03s6hK9UEPJGdwzBk5wBtOYcuofpAkYNtyNCm38iqugxtHmPK0OptQWtK0D4NF4u0W6Ek4P+W/GlRyNEDihU8IsfBoXsl5pDaIiAZp+PJ25tABaOaCQw59DeYIGf828cM0JfQ37vaUDm3OBx3mTTydqcrAox/M2Comlcy4FXtARhVq1AVHO8PtHr6AO/QIGidHrT8puc+R2i9oOfIaJWesxCxyWx2N/98axi/lvbt8/TbsOPgCT5+53Q2y7qJgWqNNNpxkqmj0TH9lg5JFKlOzivjnnUg6qQKX0NvNx7J1W9rvZ06lpCdEadvqAKl3lQ0tMwNsLbW14GOw+L/OGeb55aUkyvD6XV+JXf29ygIYN8ca1yreLXzDky01uLVRMJ14XIY+o7ruGNcswq3gqsVMsxw31KBpp8+4jNaOuJrLJDlQHAEUOXjAVo/sAMNISDvNN68ZmlEVA4BPUNUqyHaQNH3RbShvv6bQhzi0OXSOWSwZwjrNYQb3i++M8IN73mzL6bqZaLv42iDzjjH30TZV09rnKR26Pwd7gG8E5HWaA1jcDow5O/J7Z2POX/paZwfM6LfPx4E0H5y9/T/a8v4KqirTKdUP24YnlmbmZfjz4fF12pZU19886dZ/wE=</diagram><diagram name="Iteration 3" id="xKDPzAtzVJsTm1C-nRs3">7Vxbc5s4GP01frQHcecxFzfebppmmnbbPu3IIDANIK8sJ3F//UpGwuLiWxJsmGk8k6BPQuBP5xx9umVgXKUvNwTOZ59wgJKBrgUvA+N6oOsAuDb7wy2r3OI4Rm6ISByIQhvDQ/wbCaMmrMs4QItSQYpxQuN52ejjLEM+LdkgIfi5XCzESfmpcxihmuHBh0nd+j0O6Cy3upa2sU9QHM3kk4EmclIoCwvDYgYD/KyYjPHAuCIY0/wqfblCCXee9MsXeDHx7fuPlOJfE3O1+vjbQMO8sg/H3FJ8BYIy+r5Vy8Zc0JV0GAqY/0QSEzrDEc5gMt5YLwleZgHi1WostSlzi/GcGQEz/kKUrgQY4JJiZprRNBG56CWmP8Tt/Ponvx5ZInX9omRdr0TiQB8IXy3wkvhoRzndFFiEJEK7KrREQe4WBVHCxzcIp4iSFStAUAJp/FSGHRTojYpy4tYLQuBKKTDHcUYXSs333MAKCCIahpXXuJLpkQPU9mYXeZ0ypbzcxrTGxDH4MM+JD9AHfBidwIepV/BhngQf0k1PMFmKb/ZpfH371924hhsmn3N+GUAKFxQTjpTnWUzRwxyum+KZdUBlHBzXpk+IUPSysw0kmaSkC2dxX2nKj6jwedNfeOKGmdJV2Nr2NlS8frxTXfesolwQ7aeS00w61i5kpdzEkz/VvM1t61RLZJUc3E9WpxNktewy/pxKz10pbhvGjuLtEFu6VCH2A0q/oPlWXrPWhEmCEhwRmJahtaAEPxbhF7fMEYnZKyJSvfF+k9GoDhuY82og8QWegc6fk19rI93gKRmvHY+4w6XEAuWm9KTsqupRxJSqfFhtyYdj9UU+3lsGDu2zbb0TMgDscp+9RwaAtqt4S/Gf/QdLu7HkgT5iSdcPx1LTzSPNME1XdyzX1jWnVJVb0bXcg6KKirS9B0KdviC0K8HSwSMbr4/Irg6UT6KSvQnYz4Ylu49YMg3n9Fjy/mBpD5asPmLJMtyTY8mqz848sMZCmY8WdZQlSTxfHDArUxpXhTijH2AaJ/xr3bCRXIqzQNjVoRlM4ihjCZ8/nw/uwjhJrnCCyfrpRgCRG/rFWFHJsX0XTcMWh3CgEgsBcwT03dNBbsN4zgQj4G3H2JuGdAD0podRohzHMdU4Z6iNNK2IfLYEO+uUMgmQg6yMFe2Cf1pQIOtABXr/mP9N6JDvrdB8nNGYxp1jeej6yG9k+dS1TKvNiRrTK7NcUlhltVVntdHWLA3Qzrr0dszSymsHLqeirX1o4OB2irYA9GZyRUGADUoY4KIOnC6L+qHo6Jqo2zVRv0VpCjsn6WGoN0t6YE9t9i1aXMbrmqSD3ozdFEJ7+lGiflYuO2fj8quGiNWFOyDmpg5d6KuUb2mQ6NSE5p6gIPYh7Vz8iEBgIadJbDzbMWCbYmN55YW+84tNHye+bc8tiQ0bEbpdFhy3p8GDW+P03fCG0a5rhGZ0dgOzidCuPjXsNgltWmVCA9DA6IZ5nhYZfdbNdq9ktGnpvQkfvJ6y2aux+QrzNte1r4h0jtNhiOwtIwLHm2ptTvLonr63k7ZPSmmt3jidp7TmOn2htNzJ0zdKy/dWKD3Q7YRy1sxhVkKM/d+SHzHIuTxc5GS+YEUyTFLIH7X2q8Z5OgwFgXk+uxWm83WmYfBONpKkrmZsHsKuIv73cXh3x+skOGV/5KoRuxynUxQEcRYt5AszB+TvnN9ZA7zcIIjD8B5GTA/WBz3wth1+2+Sp2NxntBnl7w3yC0qfZCsw0LtzQGOrfOhWoTNVGVBVpSwqoy6vBEm12CsrQOvG8ZBqLOvu2djwpuJy81ir0xKyCRSFXJ9Q2yovPiTB6yQFtHnWoNoydl1SbKNpObk9STnzsoPrqFHJ3sWnBlU5kQq0Re6GDZXNBDvVHsrii9bJpiHZ3+8YZRQUTPDyaA7Wzga0xsTyaoGuNXTuVgMT32Oj/r8Xj8nzjPwdeZPJ5ez5Ipy8hOK8ZlMMyEO5AT8TKzG+Cc/4ggv7qUds4uhuEZPldeyJyfzl9IBh4jQn/+20MED/MVpLwuclTeIMCXsAyeNnVk1MV3mfb5WN+trKSyZwipLLohrJ5gxnqIoQH6exL66LcyP8ZTkoGwXBG/NP0whU+8A/PBQmMIgZwJQ8YPKPkncdExaoxpgPdplYcJBchgzzE4njXOvW8Te3/zNo+axJDcHOyKpjWG+ahrZawnC9l65hjbfw/I0+Kc6Hw6msVtvpq5qkNvrKkPuq3ttXzV+uPo9Tc1a5n93Dy20zNnVn72y7/QBUXKZ7DS4Drbms4YBpT3xWnNopx3/1TufUHq3vFqt0OyH0yzMPE5Q8IRr7sN7rfCUwztbRgcYP9h7a/3S6yWrScbom+770bq6iT87qx9z79i26/fZ5eDE86xziMRvFtsXlxSwAeIcpgON6kv1BftNJ/13hWtuDAcuqDgbMkWs5wNTz3/KwyZvHBiy5+eclefHNv4Axxv8D</diagram><diagram name="Iteration 4" id="XAuerN0fcH224W6zpyal">7V1bd5u4Fv41fowXkhCXxzZxMjMrTdOk55zp0ywZZJsZQC7GTdJff8T9ItnGxBjomIcEhBCg/e2r9sYTdO293gVkvfrEbOpOoGK/TtDNBEKgQo3/i1rekhZdU5OGZeDYaaei4dn5SdNGJW3dOjbdVDqGjLmhs642Wsz3qRVW2kgQsJdqtwVzq3ddkyUVGp4t4oqt/3PscJW0Glgp2n+jznKV3Rko6RmPZJ3Ths2K2Oyl1IRmE3QdMBYme97rNXWjycvmhaqL1883Czv8bP3xybC/3Dxe/XaVDHZ7zCX5KwTUD1sP/ZcP/vrqKLN7H6LPhv80fzK32dA/iLtN5yt91/Atm8CAbX2bRoMoE/TxZeWE9HlNrOjsC4cMb1uFnsuPAN/dhAH7h14zlwXx1UiJN35m4bhu1u4zn1/+cRkQ2+GvVGtu+LbZo9MgpK8lWqdvf0eZR8PgjXdJz+o4uSIFMsRTTU9aXgpgaCDttSqBgr9GCsgUjMt88GLC+U465/L5v/uvsQ2/Xl977Ce1yJfnjWWQK4iE+aY2x296yIJwxZbMJ+6saP1YpUjR556xdUqHv2kYvqXMSLYhq1KJvjrhn6X9b9FQU5Qe3bymI8cHb9mBz9/3z7gjzg6/lc8Vl8VH2XWbkARh+iTJ2UxIgKNpvWHbwKJ7+qWiid9ySfeNl8q0aKr3IiegLgmdH1VRIgNBfOmHICBvpQ5r5vjhpjTyY9RQAFKFVURqNQaudddUtKc730nuX4Axf5F34FPtE59KO3yq2KggdKoAdACl8dEjDRw+YTQYMnQBHAR2kaocg12AQJfY3TejFd2muWGki1j8bAuSUkT7vo2U+Mc7EhCP+XbRwveW0f9Ps5v73x9m2QD8eZIxkrMCj3BTYR3t2iQkm5AF9LDm7Err1Qml4ikyBLVnSLSe1pXSA3qvQiXRX6lYAXuFylklQDdKSaJG4FQrbXoFHgDWqJ4IpHSMGuFPwKTaCZn0mXpPdH0sj3L6EdelLuPmqCezZjO3IWpZl1RE5cKS7pByegHfaBgSWBmOIjBuUoRNAVawYUCEoakCEGmx3OlQOpQRmlIT5tpUwUqxgUbyAplThDoSGfgiMc5nC0i0N1capU01ziUzpGAw+wRD1WfC7wBDyWIFetVibWOtdoStgwYp1M8EQlOrok5vhrpjLdv6fQztpKaq3CLqBdK/pKyCNXu3qXA6Giao6rwDsN8DEp4LnMN9V/rA1UVv7vC9hoJF4bnOgkUwFiyOItTZWD8DtROwd41RXIt2nhqjeye15I4+c3JR36IbEb2u66w3DUI7FR8u8klviee40XvlvmzSnqEFRi6i6yx9fmBF94+8yvICygQim1BjYU0kSy6aZdD5okN30YTTWuA68hcnffuIaIwCRtfebfifV+w0jVOf3i1oCo69z13i7JkfOqEzOMZeGBa1pIw9N7CKu4wDYUWwV2RrpOdmbKD1ydjHOPwFY1e4ujAjBmE56E1ZGPTFwnL53utaeUv5rkJ1ZAK+MToGJuB1QcDfU88jgxPviwWUi3dbm2tY61C8o2GKdzSaIEWJr01ojoyvzZHytSnw9WNAbcci4eBMNwpsTHUZb5uajkiXvF1fwhsKb8MR8jY24bh4O0+sHRlvZwOXePvh6o6z39AYm7O1YasyxjbgHGldMrY6UMbuNzGwHWNDPLJoS5YePDrGRgJjX7OI8lD5SoPBsfdiQbUdNrluzpUuQy5woOw9moSbEnsrYwumZvnfo2NvLLB3lnW3WRO/gpssSS/m66tNwtgfeBefBR6JbhXPqxLx7NUiZeboPL+UeOv4JEKR7l0WiX/VE2Im4D9XDw/RmAHz+L9sCYfvzrw5tW3HX25KaYLJMx9IE2SLxSNZctkQlymxXXl+u0RVnuIHEdaByhk6+at2KFuMRqIlW1iryBb1BLm/0oKjXkO5lXqCnVIFqLn4OU46tI3+nlqqnCkhy6zCC2ZS4cTJDXUUm7g82qHu2VN1umwMxdhj+yzmuA7z2CRmiwR2O4lkdiiAaggBGW1K0gebsnq7E0gfOaHEYNK7CaXQTKc0JVph7Obkc9n2aPoJyeln0iO5yVpWIliiRHBXZERi3ECY5CXXGmvR1L+Nt8nO4tOqB3CaitS8uJnMs6dT9k44VGulqoo+FfMr8oLWilOAu5pzeHjOjyoV3uW8NZxs1BzG5TnD+lRXVFWBGGAdZv5TeQZBZzMoOsXdzGBepBJdGsmlo8yMIyY2kw9TTdV109BVCJCBtFqilygteqWCmAjyNSCOH8tvJSrY64Yo4jIg37qkCtCn0MTY1FVDhbqmV9PpYG9UeaML48f1rfmfx2fP/PK8/Tb7/UH6HYLWtZrJNyya6eLDGjdW0N7rMtL20znZONaUcVHOPRBpaCkikGMR90N6wnNsO/Zi4k9qJK6F5FsIXelujOvKW6ZKQMYVp1YlUh+w1/LP09XvHEey7use1CqhG7pj4kAA7B8ocUBPUd0lRccvUt11HDoOOvWZNO4aRrhW29cURm2obzwZNtj8JN7s/ub79+XT/NOq3/jQOVL9OEo+RJ81KkQ/b7l1olkqWWk1MrcDknSCU3VQDjlL+50rwCy/eRsYWNvgR15h3fNaxHGYkEiSvRgpJM2QEKP2ipg2RsUFMf0iBvWJmFb1aOdAzO6ViQtiYJ+IUQQntVhZ3BniHXRtmCRU2Yyku6MOtZg/lgQaOkxmkOsGY6Cc3lA3gF+J0yWpisPTDWI4Ks85HhirN045Pj2ro9oqxRBYXRJS74vV2zmT7644HxCrwzG4muKyVjlTcWDc3jhR8fTcDvWqYleNcycq7lYTYtXX4OjWsOire7ppkvW4sxtkbWK8w5XSuX02SiktKegfnpTe/S1XJ1va80tLfnljXE0yMGHQuJike2GAccHlfVptbSq7O3LQ/vVWmyQnfXc0pDerrU350XA1yL8DMb1qELFkIf7+y9vAlEPjr7+cwcJH/VuKsE0d0pCC9L+Spdg0SP9ezdCqTCDPV8tMG+XALxLUspqP7m8c+N5n/Xmq/d9diCD3hi9LFC08YklZwtnDGGIY6hJxFj9yUfsJJ90cAOVE1/QSQJTnI1fZzhgC8USr8BI+rH5+ovYbVYOgmlhu93AJ9FTX5kBNUoJBBHpU0T65eGH7Eyg6pxw/LH7DMjFEi18CRbP/Aw==</diagram><diagram name="iter 6" id="UhY6HYt4VnS2kNjis6RN">7Vxfc5s4EP80eYwHBAj82LpO7qZpJ9N0pu1TRwbZ5grIkZXEvk9/EkgGjBNDAkbO4TwErf6Bdve3q9XChTWJN9cUrZZfSICjC2AEmwvr0wUApg0g/yco24ziQjsjLGgYyEY54S78F0uiIakPYYDXpYaMkIiFqzLRJ0mCfVaiIUrJU7nZnETlWVdogSuEOx9FVeqPMGDLjOo5Rk7/C4eLpZrZNGRNjFRjSVgvUUCeCiRremFNKCEsu4o3ExyJxVPrsp14dPPDWVy63z4/fnZwcvX7/jIb7KpJl90jUJywdocG8tHYVq0XDvjyySKhbEkWJEHRNKd+pOQhCbAY1eClvM0NIStONDnxH8zYVsoCemCEk5YsjmQt3oTsp+g+cmTpV6Hm00aOnBa2qpAwuv1ZLPzKRxDFvFtaUv1qLp1c4jV5oD5+qZ0SYUQX+KUBLaklYjkLgihZc41JjPlt8gYUR4iFj2VpRVLoF7t2u663JORPAgypoJ4UTqmdphJWNUJ2o7JTLh0fKEXbQrOVaLB+fhpgeqV5nD1Z22tuusZbmpvmy+0d623tQak5v8iWQ5UK/MlJqTY10CzrXDSrZQ1xaiqI27Z+FNjZnFt2n9wyC7zKOac1DtblMnC0YrO8m0cUPciZLgCM+AN8nJMUMOZIPja8fxB2/eM1oigmSZBT+NVC/P8y/XTz99epGoDfTzZGVluRJ+49rMRlgBhaM0KFBD0tQ4bvVtmcT9z/KsvHPIyiCYkITUew5o744/Q1o+QPLtTA9CefokDPfo25/4gpw5sX2fWUu1KuXNRlwYuyjY4YCAc9baSnbk09VeZdEz11W9TTOxx/w6umaso5gKIIR2TBxy2LQ6Z+ajMhKCtMQ/7gmO53vM0rDip7LppiGER9KYNmqubZtTEyHcPxPGA5YGybnFO8Sm1FmotMDeWWXhKUStyXrnvn4kHlup6r96+Sduu1NwFAK2UfD5jeEZ/1AnUVERoYXZN/atfadQwBOHBkFH5ygPJmeTdeJp+ViEJ1VBeOoO3uTJdTGtXrKE4BD978s/e419w7QWBAqW/Bubnj8oITH6+rChJF4WpdY69Q8giEh3OF4jASj7XzjDK68jCAcDiicJHwgi/mp9UdR4CwN/cP7jh8D8/mXTgfqnYvsOWOnFreiNpwto9eQ6i0oZmy6sLciUKl5vg0sVJrXI6VvodopmJmAbSmCQtZqB1mzT0f+wcxa+Y5ttPJhkkxHpYZA/sHrbOJa7YNPrAu+Oi1FzKdwcp0xOhTWRmnHytz5IjNthudyO03fxc2DFZs2A2OY6SdBZvPwWELFsAZdOAJLVj/Bswd8LAZHnq64eGJMhRM8A69bq+CWLcUB6GPmHZ+NzYDB7uHUGsMXQt1iVpj7dzuXkPaZ4haChP0QS3zNKhle+8PtdRSHTi9DdW5a1I4j90RL685+uiGaxzVvMA+hGsemFmwS1yzHd2ATSnSAGx1gQ3oBmwn2p7uJ4y+C2ADFWCbECHZwPiOqXbINZ9j+Mw+0h3PjC4joftOvwbIVQ1gvyH1L3tLol5G0XHWpwlH8WYhXsAYzdA69EfEZ4iD5EE2CzbwLUD0QVbEYRCkQJu+tGGnENcVY6GKC/WVFATOJqKtiwWqm6qrzqU1iYSDNnN1bxHlMCy7z6gif5hxaES+sGP7VUO+4Ku34b1Bw5Ac3BAa6sYKVeRcF2joNSjc3alm176/ZcORBAVXZGZBd8+FLmWBGYZbnqBu4tf+IUJHqV78aQ5N0+0OoBqQfb1R+s6tQiKkbTA+bzc+wO3b+gwx32bWx9Iu5gtPExqxjPcXGrGej/k2B8ZKtPjP5devhwLGA3K2iZyavPZjnWGMudf3flQK2/H3QVp/n+BtjK6GU1+PGRMuAKLLFcX3o+dxod4B1IAq7Z1d9QYj/9cPMCh36bhf1fqu/og7VOJnG06HXQGQOxyjhIU+p6bfM6rIQJnDR2LzXSmH07ty9JpmfIahMqt2mrFe71xa1ZzX19vYaTwTh5371lEq2mA028MFXVxxjWIaRk2YKLvi5hGYSEsFmekEO+qG2dUZtS7YUQ12fqcoTMJkwanTDYpX0YE0VC0sLLRPpUm8mH8CMPNs8g8pWtP/AA==</diagram><diagram name="iter 7" id="EmhXLUpqF1F4G0dNYTd3">7VxLc6M4EP41OY4LJF4+Jo6T3aqZVCqZ2pk5Tckg2+wAcmQlsffXrwSSedkJJMbIVTiHoNYL0d1ft1oNF3ASb24pWi2/kQBHF8AINhfw+gIA0wIO/yco24ziOlZGWNAwkI1ywmP4H5ZEQ1KfwwCvSw0ZIRELV2WiT5IE+6xEQ5SS13KzOYnKs67QAtcIjz6K6tQfYcCWGdWzjZz+Fw4XSzWzaciaGKnGkrBeooC8FkhwegEnlBCWXcWbCY7Ew1PP5ebx9vIB/vj9O1hs7q6Th5f7f56+ZIPdtOmyWwLFCTvu0EAujW3V88IBf3yySChbkgVJUDTNqVeUPCcBFqMavJS3+UrIihNNTvwXM7aVsoCeGeGkJYsjWYs3Ifspuo9sWfpVqLneyJHTwlYVEka3hU6i+KtYl3dLS6pfw0cnH/GaPFMfv9VOiTCiC/zWgFBqiXicBUGUrLnFJMb8NnkDiiPEwpeytCIp9Itdu13XexLylQBDKqgnhVNqp6mEVY2Q3ajslEvHJaVoW2i2Eg3Wh6cBpleax67IWqW56RqfaW6a77QH3lvt+UW2PlUqPPCclKpHC1WB56IqRxZ5u6HEu8cW+AI723PL6pNbZoFXOefeA7YSrOUodxpga8plYGvFZnk3Lyh6ljNdACfiC7iakxQx5kgu23l6Fob66hZRFJMkyCn8aiH+f5tef/37bqoG4PeTjZHV1uSJuwMrcRkghtaMUCFBr8uQ4cdVNucrd6jK8jEPo2hCIkLTEeDcFn+cvmaU/MGFGif9yVUU6NmvNfdfMGV48ya7XnPfyJUPdVlwiyyjIwY6g5620lO3oZ4qe62JnrpH1NNHHD/gVVs15RxAUYQjsuDjlsUhUz+1OxCUFaYhXzim1Y73ecVeZc9FUwyDqC9l0EzVPLs2RqZt2J4HoA3Glsk5xavU3qK9yDRQbukmOVKJ+9J171w8qHPbbACglbKPB0zviM96gboK8QyMbsg/aBybfwd267YzMgo/OYA0A9UQQSaftRBBfVTXGTmWuzNddmlUr6PAg7P35g/eY6W5d4LAgFLfgnPzyOUFJz5e1xUkisLVusFeoeQRCA/nBsVhJJa184wyuvIwgHA4onCR8IIv5qf1HUeAsDf39+44fA/P5l04H6q2EqlyR3Yjb0RtOI+PXkPss6WZgk1h7kSxT3N8muAnHL8ZbtQjPKm4U0ChacJCFmoHQnPPx/5eEJp5tmV3sgNSnHTKEuP0j0JnE6g8Npo4TdFEr82NaQ9moyNGn8ps2P2YjXcOwSyr1ZlZtbmeRsmpGaWvOI6RdiZpPgf7TVLgzBzbOaFJ6t8iuQPAtQM4TzeAO1FSQBVSxrAdAsFTIJBXQ6B7ioPQR0w7xxibgY3dfSg0dlyIukShsXZ+ca9B5DNEIaXj+qCQeRoUsrwz2J2rte85AA3V0WVSONLcEb/ccjjRDag4THmBtQ+oPDCDTpdAZdm6IZXSjAGpmiIV0A2pTrQhrCZR6olUoIZUEyJEFRjfMdUOiuZz7BzYubnjmdFlMLHqZmsARfUY8CfS4bJXAZpl2bzP+jQJJ94sxFsGoxlah/6I+Axx1NvLZsEG7qRHl7IiDoMgRc70zQQrBbDObIxZPtrc4UGBrzvvpsTYrjJnwNlEiTVJqQBN81nV4a0m0WVwzITWe0Q5LsvuM6rIlzOOlcgXlqpaNSTVfXjn3FdSHRgyaFtCQ9NwnQpe6wINvcZluzsp7Nq7h5YzkqDgivQlxy0Zd2CUUqUMwy1P0DQ7qhrH7ygfiq9m3zTdbgnqMdRPZG/74XqFJtu68fnO7UUi5HAwS583S8Dt2y4NAdx2dglqF8B1ThMWgcYZhEXg4QDuhzDwimvyYaDbhX//fLm72xcWHjDymBipyfsxcIgkt4TMppFkhVyauPLqvo8CJhMuAKLLDcVPowaQ8uYx04Aqxzuh6g1Gev1SQY8ppcoxet+D0mtnr+679AJJjBIW+pyafpenxtEyv94Jv3cl6occxGKc3DhlnBz2mpx7hsEw2Dg5V69XD2E9sfTjFnQaz9LzTalpgw38/NFo9YM+vZ+gwbOJnn4UGdJSQWo6gYumsXN1Eq0LXNQjmN8pCpMwWXDqdIPiVbQnHVQLI1vNzexSl3gx/55dFjrJvwoIp/8D</diagram><diagram name="iter 8" id="sLT6LE0pWTQV7pKa_XHg">7V1bd5s4EP41eawPIG5+TBw37W6bk7PJOds+yiDbtFxcmST2/vqVQOImHENsQG5xHwqDEKCZ+Wb0aSBXYBbs7jDcrL9GLvKvNMXdXYHbK01Tdc0k/1HJPpVYpp4KVthzWaNc8Oj9h5hQYdJnz0XbUsM4ivzY25SFThSGyIlLMohx9Fputoz88lU3cIUEwaMDfVH6r+fG61RqG0ou/4S81ZpfWVXYkQDyxkywXUM3ei2IwPwKzHAUxelWsJshnw4eHxf/53KGPxv659Vff8fRp83ee7n5kHb2sc0p2SNgFMbn7dqw0r5foP/MBuyrFyIi+YjRr8kVfVDTJxe98cjGim6EXEIulQk/EOsJtmyc4j0ffBw9hy6iN6CQRq9rL0aPG+jQo6/E3IhsHQc+2VPJJvS9VUi2HfKUCBPBC8KxRzR5zQ7EET1jGYXxRxh4PjXIO0iuG4VucjuNBokNJu0b7QomwgbtDkUBivGeNOFHuf6ZAwCL7b/m5jRlonXBkmwmg8yAV1nPuZLIBtNTC51pwjAjl5g8241wvI5WUQj9eS69KSsib/MlomOaDP8PFMd75r/wOY7KykE7L/5GT58YbO974cjtjvWc7Oz5Tkget3AS3f1ePJafluzx89ppchs9Ywe91Y7DDsQr9FaHgCEbHc43LQMjH8beSxlh6vTMTn2IPPIkmUXZZYPKDIz3kN4oOyk3lmuM4b7QbEMbbA9fRlPt0nWMCj5UmquWckpzVT3SXrPfak820ufL/SIb8Pe7CrgUVzmzyRsNLd46t8GfBGz6kNpSC7rKNXcM2EqwlqNcP8DWVMuaIZWaDSHl4BkFDe00x4Pssc1fzzS5usmifCbhecfX+e2Xz/fzQkqS9pEeFeyJpHAbuunCGG7jCKPjWcnS8/1Z5Ec46QEsDfqPyLcxjn6iwhEz+bGnKMjTXxcJSiEBsQwxAdG7SkDM0U9b+anV0E95vJbET8Wpwfv99BEF/6BNWzclGoC+j/xoRfotm0PqfnxGRyUbhD3y4HTmUD7xIT9Q6+y5aSazEOwwG1QTN0+3lYlqKIZta8DQprpKNEUO8flge5Np4NwsTTLFyUavvm5fSgZ1aZMNTZPK2acjpnekZ7lAndNyo6Ib6g8o59bfgdm6YU6Uwo91wMJAlSJI7VOgCMReLXNi6lYWuoxSr1WK6lzEg1l78wfvsdLc7oEY4O5bSG4eKe0YOkhkMUl/3mbbYK5QyggO8ZRUzjMMrY72rMw4XIjspVM743BstFh2SX1WmCprYjTKRviE8/zoNXKfLcMUaApzPXGf6rQf8hNM36Qb5aAnuXYKKDQPYy/2pAOhpe0gpxaEFrahG53MgLgmzbLFmMOj0MUQledGE7Mpmsg1uVGNMWx0pOi+woYxTNg4sgim663WzKrN5QxKphCUvqAggNKFpOVSqw9JrrkwDbPHkDR8RLJGgGsHcLZsANdTUUAVUqagHQKBPhDIFhDoASPXc2AsXWKMVNdAVh0KTU0LwC5RaCpdXjwoiXyBKMR9XB4UUvtBId2+gNk5f/aaBdB3FUMOTCMayHb1OqCytQUwuwQq3ZANqbhnjEjVFKk02ZCqpwlhtYhSTqTSBKSaRdRUNeUJYemgaLlE5oGZmzVdKF2SidU0WwIoEjngE8rh0tc3mlXZHFd9UoQT7Fb0zZDJAm49ZxI5MSSoV6vmar1+4LlugpzJ2yR6AmCdxRi1vLSZ4UFBr1l2U1JsZ2X6F8MSS1JSoTWtZ+WLt5Kwy9o5C1ofICa4zE5fYC6+XhCshA6NVNVDY1Hdu2fOQxXVaWMFbUtoaErXcfJaFmgYlJftbqWw6+we6OaEgYJFy5dMqxTcNaVUKqUoVvkCTaujqjx+R/VQ5GnqLtPtlEDkUE+o3na87QbO9mLweSLxIqR2OIal08OSVvNmab9xaSRw28UlIB2Ba/ZDiwDlAmgRcJjAfRcG3hBPPgx0Gf3788P9fR0tPGLkOTFSkvdjwMgkt4TMpkwyRy5JUnl+32cBkxkxAHoK+8jGUUh5c5lpRJXzrVANBiO6YF2PKIBh7DlEmnyaR9Bfq4+rdDVyZoV2VWtoV6VP2hUMWut5gdwKaFzrKdebbECsU3w/IM+DRbJcxjxthNTTV9qq34cZfEEGXAwZ915kSPYKVtMJXDSlYvnCpixwIRJiTxh6oReuiHS+g8HGr6kulCLIVkv9JPClqTCabOVKKSxVnTKWkn4NrvIxOA2I5QN1H4ObdqUI/Zwcxx/9NZ3sPeyyq9lNK0Q6mxvode8qp59T+f0dDDR0MKOr+hxdnPePxeh1vNyk7DigITPXWWGVLhZWja/4H9db0+KI7vQmUiFjMWND3Zna0OWMulgMla8R/4bhqvp14Lo13H4TwvH1zeM6E0Fv8DpgXfzc3vgtiOOaA9rAejMO12+PbwmdpltdHTycGSLPIa7312qX1gD8CfGu4fwMnCHeWe79TXgHvlk/Pj2plq9fX2Mtq6UdlwGkXAYwK5+80HTRXrKipROpS7Kb/zWOtIgo/5smYP4/</diagram><diagram name="iter 9" id="UoKW7t0xcEfv5CPmesZj">7VzbcqM4EP0aP9rF/fKYSezMVmVSmXW2duZRBtnWBhCR5bG9X78CpACBdXACQfbgPAS1LoD66HSrJTHSr8P9LQHx+hv2YTDSFH8/0m9GmqYamsX+JZJDJrEtIxOsCPJ5oVwwR/9CLlS4dIt8uCkVpBgHFMVloYejCHq0JAOE4F252BIH5bvGYAUrgrkHgqr0b+TTdSZ1TCWXf4VotRZ3VhWeEwJRmAs2a+DjXUGkT0f6NcGYZlfh/hoGSeeJfjGfdh7cP212U+P7Goxnz39FX8dZY7NTqry8AoERbbdpw+XvRg+iw6DP+o8nMaFrvMIRCKa59AvB28iHSbMKS+Vl7jCOmVBlwn8gpQcOBrClmInWNAx4Ltwj+iOpPjF56mch52bPW04TB55Y4ojyBlWNp2cgREFS4BYQEOLITypF/lUCGyZdBNh7elyjKBPPUCAeoGGH8o7f4C3x4JFyJsc1ICt4rD07K5f0cAGcXF23EIeQkgMrQGAAKPpVRjDgA2H1Ui5XNrvg+j5B96bTp+7VguZzHNTrnmmJHH4UE4VaSTKvlqYuCTO6KxdosnZ/gWDL7zTSrIDyzi2hyXreYpEx3qRquGIFVC3ep90p8tnVKvn/bXpz98f9VLTHHi9rMsutYJXRcZxc+oCCDcUkQedujSicxyDt+R0zaGXsLZk2r3GASdqCvjSTPybfUIKfYCHHSn/82Qvy7HcCsk4DzS9IKNwf1fIut2E218W6YL4MpSO9W/pAFv2Rhd2QLISrIglZ2N2RxRyGf8L4VK5gCgFBAAO8YuosQy3jAOEiJpIYEsT6AZLXFR/yjFrGyWGfNAOIJ2CVck12rUxUUzEdR9NNzTVU1UmyhIN5ChxbZxju55t8wL9BOFpnhKOdi2eaE07OMT9LFHN+hKPqTRnHkIpxxOR0MFNSo8bozU4p1N0ZcWjuFLj4/oX4U9s7jHulmveB5jSqqTi4Svo7Ap/XMJMaTrbaNpzSquyFwaFQIMYooptCyw+JgBXgBlWzOKwPJds4a1jcOV5cWN/a0uwie9p8ELy89vvHxcX7/Bc+LoyLGBd86tkW0o92aWGqMmfKgpEHN9VBEAQo3jSIN1Qc+iaIAgFaMQjdeMn9STVq4QPoLL3aqIXnwMWyizmByHXKmrEnZrNJgtmR4TYvfY5wpgTVOCAuB0HpriOPJT7aowV+mkYUUSQdPS0dD3q19LRwTMNUOqQn/ZXhMJVP5Kf66Wivy2uneFBSTyudpv6O3jadfMg6nU0sog0r8z77JjWcOrJOFXui2mXecl/FVLMH5ZVyTJ5q5QzjJDf8dXEJjaJTMYp3MAyBdCZxudTqTaJvLSzT+kST2LvHrlZ1M3js/XvsYhnnXDx2VZPdYxc9WiCnBwJ95AEqnc8OVd+Edh14XcvWQZcE5fbostcSlDUQlJQEpZ0XQRmO9ASlVQhKbKdAo49szYgKuzKQEI5vGZxk4z3Geo5v1I0JR1voVpe8Z5iyEZ89EJ+UxGecF/FpqvTEZ1SI7xonKNaUR0ikI6nlElr/M3u03YXSZUD19XpP/wFVvaq79vYfZ2dfmu0ofBsJ6YbDcL9KjtVMFmCDvAn2KGD8WKv1RCtschBc8YwQ+X7KselRHCOlut72BAqjpdQHqgpoeDk+8zmbBHsNJ/zu273E0YQ3TZPaW1z+6HN3QSIPgDBLwVtbkA+1dbVglA+8xNq20t6wW7rzMEZvxzN69d5/dyI0mkZPhW8sCRFawwnA7lDzNhjMTiZs1d0thjXhvGlbhq0IrnhZBpwopZ9dvkGG/srqY/U29dtZ217FZG9Tus1n7A40qrH81hyGR2ZHo6TYYOMlt/Ga3bORF9t5u4BhO6Hfp/H9fV30dwD32YBbkuN/4nEuNxwttUPb+PifJpVDa1S38bcXHkQRM9LKjMDnSct81uF628C7cvPuyzpgX06FM2xRboEw7aaTPkeubwDZVZdyDkMQUeQxafpJsAo8ysp/65Mrffs15fXF3lcs3GHFosdh6jT9kIrAiSTD1OnwSyrTcJGuffPBPjgMcjsMqinZGqx78afE01QBXNLTnNt0PUJsp5CE5txqnPWRABShaMWk0z0I46Bm8/R5OST2p3kkLJl/nTSLheffeNWn/wE=</diagram><diagram id="CHOS-bcbgF-9hRom5uCs" name="Iteration 10">7Vxbc5s4FP41eTQDiOtjLm66u02TbTpp05eODLJNA8gFpXH661cYyUZI2JBaTpqtM2PDQQhxvnOXlCNwmi3PC7iYX+AYpUe2GS+PwNmRbVuO7dGfivJYU3zPqQmzIolZow3hOvmJGNFk1PskRqXQkGCckmQhEiOc5ygiAg0WBX4Qm01xKj51AWdIIlxHMJWpn5KYzGtqYPsb+luUzOb8yZYX1lcyyBuzNynnMMYPDRIYH4HTAmNSH2XLU5RWzON8WY6m/0w+vEH47/zDxdcHc/7jKh/Vnb0Zcsv6FQqUkyd3fZx9en+XuReXNyPn8fQm+vcL+jJyAXs38sgZhmLKP3aKCzLHM5zDdLyhnhT4Po9R1a1JzzZt3mG8oESLEr8hQh6ZMMB7gilpTrKUXUXLhHxmt1fHt9Wx4bKzs2Xj0tkjP8lJ8fi5edK4qzrd3LY629zHhbJ6ck9OMo6X+L6I0Bb2MVUgsJghsqUdV4aKtw2xZECdI5whOmbaoEApJMkPUXYhU4HZuh279bgo4GOjwQInOSkbPV9VBNqAaTPw2DiYLluhL4rMjvaAiQtrTw/qEfCzxqtsSCsxHCKSzh+RPIBIeq9CIl3rABLJWPoDpveMDUe2l1LmnkzxanBTyCDxvt9X3uDkHBYww3m8odCjWfV7MT5799f7Me+Ajqfuo74qST71OYvqMIYElgQXlaw/zBOCrhf1Mx+o1xYleZqk6SlOcbHqAUzd6o/SS1LgO9S44q0+7C0a9PozWDJ/oIKg5VZZ4iCGrgi6z5zxQ8NBsybzhm92zG7pa0jAcIBDBcAtIGbUwlSMhpMSp/cEHRfRk3VY4tQ6+oET/jxzOwd9kYPrGKXBQct2DdMMGx+Zo8BxDFcTU7nP28bVjdm2dgu2mvUTGN3NVv1c3pM0yREzspSjBNLTgp3LEG0Vhd3CzFnPGSiyXua17dm6OG0dhNMtwxJDFEwjpWGJAjSZ6uA5F38jtET5V4i/5RuWQuT1WRHminTDoIupvhGITHU8lWiboQECma2WA/RZkm4HXC5gLnCY+9vKp42mMEtS+jbHR5WAeDBbrPgHgEN/ZxsfLV6oGq++V/CMyhqfqo8cFxlMZad+HSUnlM8Np16Pizv1mjwpnn+kfCzJ8w/lbvT+fYNjSWcM9KJ1xgR2K5gJO5TGV/gEoM0Wuc/hEqbTqR0pXULsTTzX04iEZRm+LQDhqc2XZbieDEQQ6ALC67RdSVsph2cT15SLKI9Q2ZlP9NAvymHSAjtNZjk9jqruKYInFQ5JBNNjdiFL4rjOu1GZ/GRRa2UAWL5H+3VPjtyzqi+aapf6NRHQOMz0WrpoA0kAeP4ohATatNCXwC+jpFKyauQfqYrkFWdeJSA+xYPGwSb/9IktrIOCE/zW4ZrlGp5o8AJP4mgIDJXb4SX8vfPUVuV8e7N271CWwT+mbmXqWpbOk7E/qKWz5RT0lOpOBfCbAn03mjFwjU0+Oqc4l0+BSKtKOY2CSSDql9NXv+xAWy5kq5JMnblQWwmvYFGidbt8Ui5E5VNlPocZ2fGEhp0wImXnaHaKmhi35riqJglBLiP1txoqnyFOVmgzE5YhCHOoMBG2IbRQlUt0WQwgx0a7PMOs0zNITmack4QkQlwsOaBdtXcN+VEQIXV+NAlcx9UpDXJ+ZHmKopmpchq6bBlQ1d33JgKnuALPNqmCZi9NDqbI68iT/XBi6pQD37ACQeXbGZOqknpQoRiQMHf6lc4iFCtS7dXv5E/1fgcdJQ+3tg5Vdx2uPYXgoiB2VHoQ2BPg6awX2aYBLGCtPSRfCMONI73uyxlq0BHv6VKGHlW8emLy17g0eP6xNfviyfO3yimwQFcwobIZz5bKux1cHjiFCAxTMc/i+NpSC7keco2yD2jxe8fN/dFgVx3PsFwQOB7/lrAB1DZ4If1h3/4Bw2anu8Kyh5jpqkAxhYO8tMAZWdRV+CpHEXo+gAMcxXBxMPxQkSNtm3gGhqPQXG1OwumxfAXl8XG1mHajig0IhnmPnSvGGrxwFZEjpw1bWCatBLNWGY25+bQcOM92eI/1wjnWyYbjcr+gvTip1VG9sk7qaG/LH3ssm3mFaJqOYXoi4wHYE4LtjjQj6PSI2gYiuG012QtB0Gllkm7bBfaFz+XpZldHuuHrEUy+RvjE6pDjPVH72vBJHemGT1VXfPXwKZTGeKIDbHdlu9IaUd0Q9piZfX0QShpoh4YQeDr7wdNpLzrqQHPoUv6u5+jdLNJj6esrFJVAnJkE5p58rdSR7mDX/l/CF4qRru26T4TPbptqbYZ6El7ekgsyvg1uF1ezsxR/G92MhtcitmyMqXee9qsw7Kgp8F0z2XJWbWo1JrBMIgNHBM5wbg+qQ602wlrUmQaDpa1/jaGNo6OYmlMt5+CVp18pKVxe4sVbZ/zVz2+vM/D5BN6YBQf2mbbgrbfd3Tau7NqCt9l1d9u8pnkLnlIt+DbnxhY8dbu+Zqm3vekL+rZhi/VfmFO9oNTV7nBJKkTMd2jlobRHVZRTas8earRKRgZ/tKcb151a4e5bKTp8p7ueSeHBT1sgOtzwvrxn+PrFhJ5doSKhnFrvw9Nqed2eMrb37fi/ZC/keuHHAiZ5ks8odbyE2SJF8kLJF2l7QfDMtndY8S5KYUlDw5aK6LRuLysT2ZlA9M5E/FZH5mEzEcUW2HE2Wa3/+o0il3X6vX/toaeb/1RTc33z/37A+D8=</diagram><diagram id="lPJbUxICm12cvMfJcG5s" name="Page-10">7Vpbc6IwFP41PrYDhJuPrdr2oe10xp3d9jFCELpAmBgV99dvkISLWC9IETu+OOTLSSA5X06+k9gDgyB+JDByX7CN/J4i2XEPDHuKAvo6+02AVQrIQFZSZEo8m2M5MPb+IQ5KHJ17NpqVDCnGPvWiMmjhMEQWLWGQELwsmznYL781glNUAcYW9KvoH8+mboqampTjT8ibuuLNssRrAiiMOTBzoY2XBQiMemBAMKbpUxAPkJ9MnpiX13hxJ3uaPhgM6Uv0ZMSG/vsm7ezhmCbZEAgKabNdm3xodCXmC9ls+ngRE+riKQ6hP8rR+xx9xjhiZjIDPxGlK+59OKeYQS4NfF6LYo++s2eJP38kz7caLw3jQtVwJQohJav33DApfhTr8mbrkmg3o5BQ/iVprX2X8IgVQxyiFHnwfD+vL1g7OKRi+EnZmpMFsvkw0slKZmiDW3scw+1meE4stMOOryw2gCna1Z+asY8tW4QDxMbP2hHkQ+otyh8H+fqZZnY5R9gDp8kxbJTa5Yx85UwTnNHPyRmlDmdK00jwPLTXJenEEJRR6KNQ0106dYQ+4Jz0Aco15FxgyNFO5My6KZsUuCoYRNgL6azQ81sCMAOuWFURa7hgvQFA2iBg2mVOx+zb6jNUlWswtOS0z3kQFRzcksg6jYWtEO1UAlUYopibDBHJgOgjZTZvllPnWCoqYDcVD2igtcBdvpwX0J/zKR0z96GQuabCad9neVtCi6XrUTSO4Np/S5Y6lrmY5UoiWD3AwPOTYT1CAgMc2iKIpWxiSSW4h743DVnBSl5PEgNGvAH2MVm/HdgQmY61Zi3Bf1GhRrdMNHG2750LRCiKaxC0SjzhJnXTTQJYFjJMDrmF5FLZ4Flzmly7vA1S/rkbJLgETS4b7XKmidz/ypnz5v4AXOPMBXLmLEI8U1Vil1TM/k71U1HuGw2+R/2Aivp5I8j2LEi7Jn+QbGvI2CZ/+roBoH42+aP0+7ciMuxTQPItUL8pOKkVTw5wcmSkSL8QCTrmTMdBurVVy9pGfyJ9cQ7UvDPl/qaWVQ7Vsre68U2e1CqefEZBADvnQkf5woX6RNdaW4+gks92wIV6xYWvN49sqjvmQxZRTVvd5kNTmQC9vZjaxWUI6pzaN3Y0nwm8PWqvqPUK0u/nqT3jQLXXyKnZsWpPrpyJtHHsCuocu7Z+eXRlaMOXSc0wdF8+kv2FpdV8xKhsnKOQenTVsY3TMS20XfxMTE3V2tOvUgc3TlWtEZaavg0S4UduM/ycGGHa2dtq3Ailoe/kGyGw54LngAanbausmP9hLTXP//YHRv8B</diagram></mxfile>
2102.07631/main_diagram/main_diagram.pdf ADDED
Binary file (28.6 kB). View file
 
2102.07631/paper_text/intro_method.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Development of vaccines for COVID-19 is a major triumph of modern medicine and humankind's ability to accelerate scientific research. While we are all hoping to see large-scale positive changes from fast mass adoption of the existing vaccines, there remain significant open research questions around COVID-19. The scientific community has a responsibility to do everything possible to block the ongoing transmission of the dangerous virus and accelerate research to mitigate its consequences. We present the following automated knowledge discovery system in order to propose new tools that could compliment the existing arsenal of techniques to accelerate biomedical and drug discovery research for events like COVID-19.
4
+
5
+ The COVID-19 pandemic became one of the most important events in the information space since the end of 2019. The pace of published scientific information is unprecedented and spans all resolutions, from the news and pop-science articles to drug design at the molecular level. The pace of scientific research has already been a significant problem in science for years [\[29\]](#page-9-0), and under current circumstances this factor becomes even more pronounced. Several thousands papers are being added weekly to CORD-19[1](#page-0-0) (the dataset of publications related to COVID-19) and even more in MEDLINE[2](#page-0-1) . As a result, groups working on similar problems may not be immediately aware of the other's findings, which can lead to inefficient investments and production delays.
6
+
7
+ Under normal circumstances, the MEDLINE database of biomedical citations receives approximately 950,000 new papers per year. Currently this database indexes 31 million total citations. This pace challenges traditional research methods, which often rely on human intuition when searching for relevant information. As a result, the
8
+
9
+ <span id="page-0-0"></span><sup>1</sup>https://www.semanticscholar.org/cord19
10
+
11
+ <span id="page-0-1"></span><sup>2</sup>https://www.nlm.nih.gov/bsd/stats/cit\_added.html
12
+
13
+ ![](_page_1_Figure_1.jpeg)
14
+
15
+ Figure 1: Number of new citations per week in CORD-19 dataset.
16
+
17
+ demand for modern AI solutions to help with the automated analysis of scientific information is incredibly high. For instance, the field of drug discovery has explored a range of AI analytical tools to expedite new treatments [\[13\]](#page-9-1). Designing lab experiments and finding candidate chemical compounds is a costly and long-lasting procedure, often taking years. To accelerate scientific discovery, researchers came up with a family of strategies to utilize public knowledge from databases like MEDLINE that are available through the National Institute of Health (NIH), which facilitate automated hypothesis generation (HG) also known as literature-based discovery. Undiscovered public knowledge, information that is implicitly present within available literature, but is not yet explicitly known by an individual who can act on that information, represents the target of our work.
18
+
19
+ Although, there are quite a few hypothesis generation (HG) systems [\[13\]](#page-9-1) including those we have previously proposed [\[35,](#page-9-2) [37\]](#page-9-3), none of them is currently COVID-19 customized and available in the open domain to massively process related queries. In addition to the traditional requirements for HG systems, such as high-quality results of hypotheses, interpretability and availability for broad scientific community, a specific demand for COVID-19 data analysis requires: (1) customization of the vocabulary and other logical units such as subject-verb-object predicates; (2) customization of the training data that in the reality of urgent research contains a lot of controversial and incorrect information; (3) multiple models for different information resolutions (e.g., microscopic for drug design, and macroscopic for the population related conclusions); and (4) validation on the on-going domain-specific discovery.
20
+
21
+ Our contribution: In this work we bridge this gap by releasing, AGATHA-C and AGATHA-GP , reliable and easy to use HG systems that demonstrate state-of-the art performance and validate their inference capabilities on both COVID-19 related and general biomedical data. To make them closely related to different goals of COVID-19 research, they correspond to micro- (AGATHA-C, for COVID-19) and macroscopic (AGATHA-GP, for general purpose) scales of knowledge discovery. Both systems are trained on all existing biomedical literature available through NIH and CORD-19 and able to process any queries to connect biomedical concepts but AGATHA-C exhibits better results on the molecular scale queries, e.g., those that are relevant to drug design, and AGATHA-GP works better for general queries, e.g., establishing connections between
22
+
23
+ certain profession and COVID-19 transmission. As it will be explained later, we emphasize that AGATHA is not a traditional information retrieval system that effectively searches for existing information and thus cannot be compared to them. Instead, AGATHA generates novel hypotheses.
24
+
25
+ Both systems are the next generation of the AGATHA knowledge network mining transformer model [\[37\]](#page-9-3). They substantially improve the quality of the previous AGATHA by introducing new information layers into multi-layered semantic knowledge network pipeline, and expanding new information retrieval techniques that facilitate inference. We present the deep learning transformer-based AGATHA-C/GP models trained with up-to date datasets and provide easy to use interface to broad scientific community to conduct COVID-19 research. We validate the system via candidate ranking [\[36,](#page-9-4) [37\]](#page-9-3) using very recent scientific publications containing findings absent in the training set. While the original AGATHA has demonstrated state-of-the-art performance for the time of its release, AGATHA and other systems were found to perform with notably lower quality on extremely rapidly changing COVID-19 research. We demonstrate a remarkable improvement on different types of queries with very fast query process that allows massive validation. In addition, we demonstrate that the proposed system can identify recently uncovered gene (BST2) and hormone (oxytocin and melatonin) relationships to COVID-19, using only papers published before these connections were discovered.
26
+
27
+ Reproducibility: All code, details, and pre-trained models are available at [https://github.com/IlyaTyagin/AGATHA-C-GP.](https://github.com/IlyaTyagin/AGATHA-C-GP)
2103.14024/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2103.14024/paper_text/intro_method.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Despite the progress of real-time graphics, interactive 3D content with truly photorealistic scenes and objects are still time consuming and costly to produce due to the necessity of optimized 3D assets and dedicated shaders. Instead, many graphics applications opt for image-based solutions. E-commerce websites often use a fixed set of views to showcase their products; VR experiences often rely on 360
4
+
5
+ ![](_page_0_Figure_8.jpeg)
6
+
7
+ ![](_page_0_Figure_9.jpeg)
8
+
9
+ Figure 1: Real-time NeRF with PlenOctrees. Given a set of posed images of a scene, our method creates a 3D volumetric model that can be rendered in real-time. We propose PlenOctrees, which are octrees that can capture view-dependent dependent effects such as specularities. Rendering using our approach is orders of magnitude faster than NeRF.
10
+
11
+ video recordings to avoid the costly production of real 3D scenes, and mapping services such as Google Street View stitch images into panoramic views limited to 3-DOF.
12
+
13
+ Recent advances in neural rendering, such as neural volumes [\[24\]](#page-8-0) and neural radiance fields (NeRFs) [\[30\]](#page-8-1), open a promising new avenue to model arbitrary objects and scenes in 3D from a set of calibrated images. NeRFs in particular can faithfully render detailed scenes and appearances with non-Lambertian effects from any view, while simultaneously offering a high degree of compression in terms of storage. Partly due to these exciting properties, of late, there has been an explosion of research based on NeRF.
14
+
15
+ Nevertheless, for practical applications, runtime performance remains a critical limitation of NeRFs: due to the extreme sampling requirements and costly neural network queries, rendering a NeRF is agonizingly slow. For illustration, it takes roughly 30 seconds to render an 800x800 <span id="page-1-0"></span>image from a NeRF using a high performance GPU, making it impractical for real-time interactive applications.
16
+
17
+ In this work, we propose a method for rendering a NeRF in real time, achieved by distilling the NeRF into a hierarchical 3D volumetric representation. Our approach preserves NeRF's ability to synthesize arbitrarily complex geometry and view-dependent effects from any viewpoint and requires no additional supervision. In fact, our method achieves and in many cases surpasses the quality of the original NeRF formulation, while providing significant acceleration. Our model allows us to render an 800x800 image at 167.68 FPS on a NVIDIA V100 GPU and does not rely on a deep neural network during test time. Moreover, our representation is amenable to modern web technologies, allowing interactive rendering in a browser on consumer laptops.
18
+
19
+ Naive NeRF rendering is slow because it requires dense sampling of the scene, where every sample requires a neural network inference. Because these queries depend on the viewing direction as well as the spatial position, one cannot naively cache these color values for all viewing directions.
20
+
21
+ We overcome these challenges and enable real-time rendering by pre-sampling the NeRF into a tabulated view-dependent volume which we refer to as a PlenOctree, named after the plenoptic functions of Adelsen and Bergen [\[1\]](#page-8-2). Specifically, we use a sparse voxel-based octree where every leaf of the tree stores the appearance and density values required to model the radiance at a point in the volume. In order to account for non-Lambertian materials that exhibit view-dependent effects, we propose to represent the RGB values at a location with spherical harmonics (SH), a standard basis for functions defined on the surface of the sphere. The spherical harmonics can be evaluated at arbitrary query viewing directions to recover the view dependent color.
22
+
23
+ Although one could convert an existing NeRF into such a representations via projection onto the SH basis functions, we show that we can in fact modify a NeRF network to predict appearances explicitly in terms of spherical harmonics. Specifically, we train a network that produces coefficients for the SH functions instead of raw RGB values, so that the predicted values can later be directly stored within the leaves of the PlenOctree. We also introduce a sparsity prior during NeRF training to improve the memory efficiency of our octrees, consequently allowing us to render higher quality images. Furthermore, once the structure is created, the values stored in PlenOctree can be optimized because the rendering procedure remains differentiable. This enables the PlenOctree to obtain similar or better image quality compared to NeRF. Our pipeline is illustrated in Fig. [2.](#page-2-0)
24
+
25
+ Additionally, we demonstrate how our proposed pipeline can be used to accelerate NeRF model training, making our solution more practical to train than the original NeRF approach. Specifically, we can stop training the NeRF model early to convert it into a PlenOctree, which can then be trained significantly faster as it no longer involves any neural networks.
26
+
27
+ Our experiments demonstrate that our approach can accelerate NeRF-based rendering by 5 orders of magnitude without loss in image quality. We compare our approach on standard benchmarks with scenes and objects captured from 360◦ views, and demonstrate state-of-the-art level performance for image quality and rendering speed.
28
+
29
+ Our interactive viewer can enable operations such as object insertion, visualizing radiance distributions, decomposing the SH components, and slicing the scene. We hope that these real-time operations can be useful to the community for visualizing and debugging NeRF-based representations.
30
+
31
+ To summarize, we make the following contributions:
32
+
33
+ - The first method that achieves real-time rendering of NeRFs with similar or improved quality.
34
+ - NeRF-SH: a modified NeRF that is trained to output appearance in terms of spherical basis functions.
35
+ - PlenOctree, a data structure derived from NeRFs which enables highly efficient view-dependent rendering of complex scenes.
36
+ - Accelerated NeRF training method using an early training termination, followed by a direct fine-tuning process on PlenOctree values.
37
+
38
+ # Method
39
+
40
+ We propose a pipeline that enables real-time rendering of NeRFs. Given a trained NeRF, we can convert it into a PlenOctree, an efficient data structure that is able to represent non-Lambertian effects in a scene. Specifically, it is an octree which stores spherical harmonics (SH) coefficients at the leaves, encoding view-dependent radiance.
41
+
42
+ To make the conversion to PlenOctree more straightforward, we also propose *NeRF-SH*, a variant of the NeRF network which directly outputs the SH coefficients, thus eliminating the need for a view-direction input to the network. With this change, the conversion can then be performed by evaluating on a uniform grid followed by thresholding. We
43
+
44
+ fine-tune the octree on the training images to further improve image quality, Please see Fig. 2 for a graphical illustration of our pipeline.
45
+
46
+ <span id="page-3-1"></span>The conversion process leverages the continuous nature of NeRF to dynamically obtain the spatial structure of the octree. We show that even with a partially trained NeRF, our PlenOctree is capable of producing results competitive with the fully trained NeRF.
47
+
48
+ SHs have been a popular low-dimensional representation for spherical functions and have been used to model Lambertian surfaces [36, 2] or even glossy surfaces [46]. Here we explore its use in a volumetric context. Specifically, we adapt the NeRF network f to output spherical harmonics coefficients $\mathbf{k}$ , rather than RGB values.
49
+
50
+ $$f(\mathbf{x}) = (\mathbf{k}, \sigma) \quad \text{where} \quad \mathbf{k} = (k_\ell^m)_{\ell: \, 0 \leq \ell \leq \ell_{\text{max}}}^{m: \, -\ell \leq m \leq \ell} \quad \ (4)$$
51
+
52
+ Each $k_\ell^m \in \mathbb{R}^3$ is a set of 3 coefficients corresponding to the RGB components. In this setup, the view-dependent color $\mathbf{c}$ at a point $\mathbf{x}$ may be determined by querying the SH functions $Y_\ell^m : \mathbb{S}^2 \mapsto \mathbb{R}$ at the desired viewing angle $\mathbf{d}$ :
53
+
54
+ $$c(\mathbf{d}; \mathbf{k}) = S\left(\sum_{\ell=0}^{\ell_{\text{max}}} \sum_{m=-\ell}^{\ell} k_{\ell}^{m} Y_{\ell}^{m}(\mathbf{d})\right)$$
55
+ (5)
56
+
57
+ Where $S: x \mapsto (1+\exp(-x))^{-1}$ is the sigmoid function for normalizing the colors. In other words, we factorize the view-dependent appearance with the SH basis, eliminating the view-direction input to the network and removing the need to sample view directions at conversion time. Please see the appendix for more technical discussion of SHs. With a single evaluation of the network, we can now efficiently query colors from arbitrary viewing angles at inference time. In Fig. 7, one can see that NeRF-SH training speed is similar to, but slightly faster than, NeRF (by about 10%).
58
+
59
+ Note that we can also project a trained NeRF to SHs directly at each point by sampling NeRF at random directions and multiplying by the SH component values to form Monte Carlo estimates of the inner products. However, this sampling process takes several hours to achieve reasonable quality and imposes a quality loss of about 2 dB. Nevertheless, this alternative approach offers a pathway to convert existing NeRFs into PlenOctrees.
60
+
61
+ Other than SHs, we also experiment with Spherical Gaussians (SG) [8], a learnable spherical basis which have been used to represent all-frequency lighting [54, 46, 21]. We find that SHs perform better in our use case and provide an ablation in the appendix.
62
+
63
+ <span id="page-3-0"></span><sup>&</sup>lt;sup>1</sup>With 10000 view-direction samples per point, taking about 2 hours, the PSNR is 29.21 vs. 31.02 for our main method prior to optimization.
64
+
65
+ <span id="page-4-2"></span>
66
+
67
+ | Synthetic NeRF | Synthetic NeRF Dataset | | | nd-best | |
68
+ |------------------------------|------------------------|-----|-----|---------|--------|
69
+ | | PSNR ↑ | SSI | M ↑ | LPIPS ↓ | FPS ↑ |
70
+ | NeRF (original) | 31.01 | 0.9 | 47 | 0.081 | 0.023 |
71
+ | NeRF | 31.69 | 0.9 | 53 | 0.068 | 0.045 |
72
+ | SRN | 22.26 | 0.8 | 46 | 0.170 | 0.909 |
73
+ | Neural Volumes | 26.05 | 0.8 | 93 | 0.160 | 3.330 |
74
+ | NSVF | 31.75 | 0.9 | 53 | 0.047 | 0.815 |
75
+ | AutoInt (8 sections) | 25.55 | 0.9 | 11 | 0.170 | 0.380 |
76
+ | NeRF-SH | 31.57 | 0.9 | 52 | 0.063 | 0.051 |
77
+ | PlenOctree from NeRF-SH | 31.02 | 0.9 | 51 | 0.066 | 167.68 |
78
+ | PlenOctree after fine-tuning | 31.71 | 0.9 | 58 | 0.053 | 167.68 |
79
+
80
+ Table 1: Quantitative results on the NeRF-synthetic test scenes. Our approach is significantly faster than all existing methods during inference while performing on par with NSVF, the current state-of-the-art method for image quality. We note that NeRF-SH, the modified NeRF model that is trained to output SH, performs similarly to the baseline NeRF model. The octree conversion of NeRF-SH to PlenOctree w/o fine-tuning negatively impacts the image quality metrics. This is remedied with the additional fine-tuning step.
81
+
82
+ <span id="page-4-0"></span>![](_page_4_Picture_2.jpeg)
83
+
84
+ Figure 3: **Sparsity loss and conversion robustness**. When trained without the sparsity loss, NeRF can often converge to a solution where unobserved portions or the background are solid. This degrades the spatial resolution of our octree-based representation.
85
+
86
+ **Sparsity prior.** Without any regularization, the model is free to generate arbitrary geometry in unobserved regions. While this does not directly worsen image quality, it would adversely impact our conversion process as the extra geometry occupies significant voxel space.
87
+
88
+ To solve this problem, we introduce an additional sparsity prior during NeRF training. Intuitively, this prior encourages NeRF to choose empty space when both space and solid colors are possible solutions. Formally,
89
+
90
+ $$\mathcal{L}_{\text{sparsity}} = \frac{1}{K} \sum_{k=1}^{K} |1 - \exp(-\lambda \sigma_k)|$$
91
+ (6)
92
+
93
+ Here, $\{\sigma_k\}_{k=1}^K$ are the evaluated density values at K uniformly random points within the bounding box, and $\lambda$ is a hyperparameter. The final training loss is then $\beta_{\text{sparsity}}\mathcal{L}_{\text{sparsity}} + \mathcal{L}_{\text{RGB}}$ , where $\beta_{\text{sparsity}}$ is a hyperparameter. Fig. 3 illustrates the effect of the prior.
94
+
95
+ <span id="page-4-3"></span>
96
+
97
+ | Tanks and Temple | best sec | ond-best | | |
98
+ |------------------------------|----------|-----------------|-------------------|-------|
99
+ | | PSNR ↑ | $SSIM \uparrow$ | $LPIPS\downarrow$ | FPS ↑ |
100
+ | NeRF (original) | 25.78 | 0.864 | 0.198 | 0.007 |
101
+ | NeRF | 27.94 | 0.904 | 0.168 | 0.013 |
102
+ | SRN | 24.10 | 0.847 | 0.251 | 0.250 |
103
+ | Neural Volumes | 23.70 | 0.834 | 0.260 | 1.000 |
104
+ | NSVF | 28.40 | 0.900 | 0.153 | 0.163 |
105
+ | NeRF-SH | 27.82 | 0.902 | 0.167 | 0.015 |
106
+ | PlenOctree from NeRF-SH | 27.34 | 0.897 | 0.170 | 42.22 |
107
+ | PlenOctree after fine-tuning | 27.99 | 0.917 | 0.131 | 42.22 |
108
+
109
+ Table 2: Quantitative results on the Tanks and Temples test scenes. We find that our fine-tuned *PlenOctree* model is significantly faster than existing methods while performing on par in terms of image metrics. Note that the images here are $1920 \times 1080$ compared to $800 \times 800$ in the synthetic dataset.
110
+
111
+ Once we have trained a NeRF-SH model, we can convert it into a sparse octree representation for real time rendering. A PlenOctree stores density and SH coefficients modelling view-dependent appearance at each leaf. We describe the conversion and rendering processes below.
112
+
113
+ **Rendering.** To render the PlenOctree, for each ray, we first determine ray-voxel intersections in the octree structure. This produces a sequence of segments between voxel boundaries with constant density and color, of lengths $\{\delta_i\}_{i=1}^N$ . NeRF's volume rendering model (1) is then applied to assign a color to the ray. This approach allows for skipping large voxels in one step while also not missing small voxels.
114
+
115
+ At test-time, we further accelerate this rendering process by applying early-stopping when the ray has accumulated transmittance $T_i$ less than $\gamma = 0.01$ .
116
+
117
+ Conversion from NeRF-SH. The conversion process can be divided into three steps. At a high level, we evaluate the network on a grid, retaining only density values, then filter the voxels via thresholding. Finally we sample random points within each remaining voxel and average them to obtain SH coefficients to store in the octree leaves. More details are given below:
118
+
119
+ Evaluation. We first evaluate the NeRF-SH model to obtain $\sigma$ values on a uniformly spaced 3D grid. The grid is automatically scaled to tightly fit the scene content.<sup>2</sup>
120
+
121
+ Filtering. Next, we filter this grid to obtain a sparse set of voxels centered at the grid points sufficient for representing the scene. Specifically, we render alpha maps for all the training views using this voxel grid, keeping track of the maximum ray weight $1 - \exp(-\sigma_i \delta_i)$ at each voxel. We then eliminate the voxels whose weights are lower than a threshold $\tau_m$ . The octree is constructed to contain the re-
122
+
123
+ <span id="page-4-1"></span> $<sup>^2\</sup>mathrm{By}$ pre-evaluating $\sigma$ on a larger grid and finding the bounding box of all points with $\sigma \geq \tau_a.$
124
+
125
+ <span id="page-5-5"></span><span id="page-5-2"></span>![](_page_5_Picture_0.jpeg)
126
+
127
+ Figure 4: **NeRF-synthetic qualitative results.** Randomly sampled qualitative comparisons on a reimplementation of NeRF and our proposed method. We are unable to find any significant image quality difference between the two methods. Despite this, our method can render these examples images more than 3500x faster.
128
+
129
+ maining voxels as leaves at the deepest level while being empty elsewhere. Compared to naively thresholding by $\sigma$ at each point, this method eliminates non-visible voxels.
130
+
131
+ Sampling. Finally, we sample a set of 256 random points in each remaining voxel and set the associated leaf of the octree to the mean of these values to reduce aliasing. Each leaf now contains the density $\sigma$ and a vector of spherical harmonics coefficients for each of the RGB color channels.
132
+
133
+ This full extraction process takes about 15 minutes.
134
+
135
+ Since this volume rendering process is fully differentiable with respect to the tree values, we can directly finetune the resulting octree on the original training images using the NeRF loss (3) with SGD in order to improve the image quality. Note that the tree structure is fixed to that obtained from NeRF in this process. PlenOctree optimization operates at about 3 million rays per second, compared to about 9000 for NeRF training, allowing us to optimize for many epochs in a relatively short time. The analytic derivatives for this process are implemented in custom CUDA kernels. We defer technical details to the appendix.
136
+
137
+ The fast octree optimization indirectly allows us to accelerate NeRF training, as seen in Fig. 7, since we can stop the NeRF-SH training at an earlier time for constructing the PlenOctree, with only a slight degradation in quality.
2110.00966/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-05-07T08:27:58.225Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36" version="13.0.8" etag="qs4KT_cQgtW-AQhCQOyC" type="device"><diagram id="Bbmo1Ju7wUeNQ93GT6OX">7V1pd6LI1/8083Jy2JeXgqAIioos+uY5iMgqOwh++qfKxI6Jdjrdk+TfM01mTloLKKi7/u6te8lfOH9oR4Wd+dN058Z/Yciu/Qsf/oVhKIEw4B840l1GMORxxCuC3dPY84AWnNynwctpdbBzyxcnVmkaV0H2ctBJk8R1qhdjdlGkx5en7dP45V0z23NvBjTHjm9HzWBX+Y+jGIIgzwfGbuD5T7emLwcO9uXkp4HSt3fp8WoIF/7C+SJNq8dPh5Z3Y0i9C10erxO/c/TbgxVuUr3nAuzxgsaO66e1PT1X1V0We/SDytUy24Hfj4Cjf+GcXx1i8A0FH+0yeyTxPmhdMCtXVkUauXwap8V5AhxBCJ5lvx25UAyslXu6u1tUbvvdFaDf6AIkyk0PblV04JSnCzAae8Cfnrq7kPvx6/GZN8TTkH/FlcuY/SQN3re5nwkGPjzR7D798H89/XD8hn4o/nUEJP79BGRvCfiFEkj+mIBushtAqwe+bePUiV7Sz22Dyrr6vAafkQfy6dsQrhK5fOmevrwkJA4PJzsxiC9z3rCAQeB/3wju7l4Z2DKtC8d9YZPewYEr8pJ3yHsZK9zYroLm5R3v0fzpDvM0ADd+ZjB1w2DmFeMeH//pumtz+2oqEgFToezzD/FiWvbVtJVdeG51M+1ZJL6R4V1SQv0npOT3EQn6TT6iBPuAMVeHqU8RF3Cbz5IXupeXj5QXAn+bkSwgDX11GP8cefk8+8L8lLwkaeK+FJeXrCfvMZtlEQiwf39mk2dEinz7eek6gJX/EF7Tb98FRx6QN+7ycZxnf95SrPwgeYv9Z80H7NWepkiLyk+9NLFj4Xn0m4V5tirrK3tz38IUaZ3sIEi8sjf37MlLu/MvxCwYcwtKsV8ELfgPjBf5PqsCRMDurk7L4AnlG0u4g6tJ5MdP+uYl4MPjc/yquF9SEH+gvP82wo2Tbwsk/auCzn6I+/xZQT8vB7t7qzef9M1L/rmgoz8v6B8NAYn/EAR8219/EABkyLdv8lkgAMV+LCylb2fwYxwkrwXlvr38Aee/k9X5NxgwEn2LTb/sqN+elvo87r8jC9pz/73c/1Xv9YNp8c9j/ztyuD3738knDPkc9r/OWH4g+38uA/2j2B+O7OzSP6PWu7z/9yQCsDdZ8todv5fTFPKDyP/TOP1zWeSe0/+U0yRNPrAkhV/+o7+M0z+X//1pTgOuFd1jMEtg5GUABgh/Iw/PA88xwvlbd/1t7hYBWJFb/PWd4PZHwvNIrSdSsv8ugXqdcnl30hAhH3D8eVr8pUBhzAN5Hf8ynyZfH5sv/r58oQ8oRb+SL/TrxYv+M8SLwokHGvnetDAnzTDPR9FPk66fy0l/oJ+6smvoS7v2i2L3DuG6eIF/iXD9qi+kb+t8PtEBXkLwHup8DXcp/O09M/rTGH2b5gQj4gXqXrG7ctvqJY8LtwxO9vZ8AmSdXVdp+ViseC5MigMPhLJDBzAOKjQHy4wCx44HTwcOwW53Tu0/ZYjBbUjuL3IIRvYg9r1Ix5NwvRSZp8GXCf6npXxwOd0ldPxBMdPrrNWvFDNdtOwVN/Bb5fszuHFvQ4v4QnbcJvYgOyjiT2XHnW2Xr9SOn0i0BYdzDfM3sir21o3ngANVkELybtOqSg/ghBge4Gwn8s7kunJJ+/PPHdZUKSzBvKm7PN9ycBlFLiPgs19VsDJ7AFeKiSX+4NZ/Q8YXdvw3+mAf7FOa2MfywQFPhIl24/0dBVUVgM+Fffy/nV3Z4COGoOj/Iez/YdT/7Qrgv/4PQVgSjFd+fdgmdhCX4AuCP4SZd+N/scvDcGmxc7/lEF+43H8kGfRrufgbu5UL9FIm89E1oD9ZBOrEdlkGzj3A8iLF+kSeWyjzKzt738E1L4ooiN8M61DEDV9/OZeD0vTdzeVPgDT30nRUDO3zFnzw4IdVYSflPi0OZ618OlhcjkpnvQWMiO0E/lul4BcnGLdnXkbAM22fx/5I50ChN86BumMD8Dvi+jqO+SUbcC9l98icMrOTC3OQw5tMvD7138jHfZpUlzIa5AOYSt7WfmN3uiPuMfVDPP69PNktU8meqz/nrfEbu36Pq5+G4+7lp265imI9V/+hAf5Krl5u9QOuYmTP1Z/i6ht5v6/g6r0yuFdgykjbc4frCHav9hDpDV7epjPuYST6s3h5m126YcTbJY2/3qFytTPAIN+2As4VvA8ohf31cXsDv030RPygSepXd5ngvMTVvAz5Yl4WfwDW4Au2mfB3lL19ljj923n/q1H0z8rUB3L7XvLtlSNYuuXMrf6GD0HZB5gkS7ZlduUDnr3C1naibXqOrhO3OqZF1HuOt7YlbvtZsLsZtk+KrvF7GbZX3Bddu6oLyNFbVmZdYR96dPAmj4k7sfYdHlOfxeJ3pM2kJKuhNXpMbPfMfGOrBL3h5S0rmc8Ceu8oX/vN3yqBXm01fcV7TH6uIOt3bfi+3k3Af7eiqzvvkUB/EQb9oNH7tQ59IArq231/S9m6g49+NbzC3obYn9QEid0m7z64ofFiJvs2lXdJ1Nv9JL8qXG/P+vdre/hxhou4X+fF3IjAnwHPUPZ2Z/vrtrUI7J+ijT4R93HJmH+Si3kjD4d9VR6OuM3DXQoY9o8hefmHajl+P4hAkc/S63cUqH164T1Nv24o+qzK+8vu/G+j6G/XZv9qWzl16yu+WfBP0Ob/WTvpdfNG37vxkRJE08ztHu6nNZcRv8HLDomfQCQlWHr1Sp7PY09X/xsgBoqQ2IvewZdYACU+5v0mKIr/4DafJlS/wRsRe6F6yW327XfcfZRQMewD+Wli9RvkXXuxeoVtvkSsAPD4RLF6X4VhX4r2U+EUybyrbPSzsibkOxKYP86jvyTR8zlKCrt6zpwK3arqnv7qAmTiPXz9IslJi6J4Y1aw75uCawhM4u9mwz/Ue4ah3upcxR/IK6VnX03/Ha3/Bd0k3/NivSs23hXtX+Lbj0333SahH/gGUQQmEr8beF1FVAjzLbX26KEQmvjAoOp6c4bEbqWMor5Myij2zfcw0Q9XBxHk01zAhQpvidkPmwPvd/5dC9XThsmh9eDfennY2kBmHorz1vmz1cZem+1rG/HkIrBzwyAQ8TN77r4B+ttuygfYcub2pZt3Ojlx/IF98XPHsgOGfoBtf0ct4R/MLRahHujfiFvvyXP++zwx+VU2kgUQm74ygy9fQ/R1nvgdqcY/Weko9AH9jZTuPWm9f5/Svb+S5J8qHUsCdv7vle4dibQ/WOlQBCUfyCs2ob+PAt7mqtA/dWf1HnzEH6hrZI/dcuqzEgO32R5Y20L8qcxhEfwGLf6QOZ/VQEjdZm3OhUd/LHNI5gZV/O+Y8723f/2p3AHuB3vL/fzvOPWOdMZv+e6j89uL8MFTx4iYJd5fGB8YnLo8IvLISwfgZ6bpvqB74JNNgF+jlB+s4bhbDEZj8K83EGJhYSwJq8Z2K9uPDdIcrDrLyPa6MxpN8dlE3gQzPpzGYkYJYiohmR9Pj9HxSCLD9pSlc5QLOG+xx3NPOjrYhssdbo3oQTGeHIV4205ImdBayaP8Vpf/wjikzia53JgrA7WaZotj84q1SfZ0arIcHK52DvjNsk1RuPAr/Ab+z+CXfDgG6+zGFAB6InkywdBhR+OFtZsWBRhqA7c5tSGCWQoQLg4SZeosIx4fM4fBoXY3x2YjGlUYl6STHee2SYwmw/FmK66dNlRma7cgskkx9oQDUFJuEqgctd24yIwtovl8VkeM2sUkmbboQixItJh5bDuvgtWswueT2B3lNReoQV4PCBrdYoCK6bTEfAp1RjYlB2DGUVoPUkLOCaobbtfj2abbbL1cSe2QCArwIcylWeU5aKhcz5aCK+Xcw7rhyRQqg7AL4iDBWfgwzYthIc1mqYta3KEO0sbPFU9W+bxWSw/DuJPpHExCDqgRnEXKwUeCCs43X3ek5eeFtw6DnWsJJcNj61XHVy7m2sdYa4aGJbRjzrIGHtNix1UXxC6mmqe4SzjDGrTjFtCPD/Js65Xb7YAs6tXEEZf7oW4JWZFO5LxqeC5rhmZOTA3GEsFBB6zHi+ocK6fjQzlZF0fUaKfLoxGfny6XNUoxu6Nq93f51bucxqd1rRznK2AxXGk23nXYYLlIxk4cYYhq0lFncbp1RMWWW3mA/XwUr1fr9XiqLq7uOlhTsnzksokmYNikG0b8RR4My5PnAR9xtrb2ZrPa8chpdjSM43TJ6AaUr0HGmcJBHp5WkXS1QhFK5UDWDH/me86R5AuvtVrHOBrikQvTFBikKJZW23UiqQuZmRnOSnBEwz3LvyQvxGwSDBCMBA/zjZ5LC2gNWAK3nrouPViAM7XkRFLuvBkzE8rdz/fzFbXLDRtjd6IFjQJP03q73Zjgeurb3ZNVWB4jIR27giwTqimpPlofvXpcm4DAj7ojn+mie4ofBPV+HsgBNirPOho0A7g+iUfWaRF2hFbWfjEO1iGkLwee0hxUI14iynxc0N66TXOFsAGdfEi2oeYFI1luH80c0Oy13H3WHSWJYJ1iOlnMuCYzREeZAgs5LOPDaTiaiy0UDA5cvTC5sRCE9cD2dhq5tCb4IPR860zxhdECii06SUCCQzkwa0drd/pkNRbao2B3hYKuhye7jHa7cRGLzmQKHClnSXkRsjuJnWX00fEL8yDPBNTh2K1u5Wc5liVphwmTllyCJ9KwpOLmnnjgoDTOl6bkrJbBouTVDTdyFuiAQrugHuaca9eLwcrWNi4uUuUCdfJia1IqwZQsQm8RLznyJ6+dWkd2NAEwgFPSdnC7tkTx1mt+tkFjqlzFxzCjc3YG6Arf+XeycK/p+NWAVsEKx0qZFEG2WfL2Oj/Uk1QiHRq6NGaWChJiBVMizU+jFEM20X6Cb1H1LMPLvaBbIicN9hqQLs6eoIooOsl0xBjTkhuVfmsWorFaRQvB2DnaeumSVuDW/DSBPmNqGSeqxX1XySfWhmi45/XIac3ziwEW6eiRR06BKeetuhZaZ7jlNUVAXedQy3JJSaG43M3YOckEaeXT05XojFeBF0LlCaMFV4izBV85RBJQ3urQTvXBiEmCljOTrAiDQvWlVtdXO1eRpboQYt3CFiItimu3rFUwvQCmX84MS4wF0XBMbGNKkKvDODXKVPGXiXFSRHRtFPLM3/r6fE3HdQTluMU2G6eWcnecpGKaKu3uYKzGIkosU9mI2yHGLSyPQhroO2WZQSeOY2yQTJWDwMqLqVSnQIpM02SMYSYC8zRfR4Ip88x4vd878Z7dV7TNE3xJ64FWqqY1Fs3daBmk5YEqNWNpLNvNBtzNT4OcDkYlP1JEzgU6FRUAZ3BqBHyvp+JjUdLqWuVLSk/Gy+UEI8oU6FnQdSvwjBvgIYXhJorcCT8dZi1pzhAiyLz5MG5bLSqK0CvUeNIiurarAQHVRIwjywpGwWQr2VQybsiOYSG8EVUVP4Sj5VxGoshCbdUO9sAWyORMJPcSpaztjhhJ9K467k2WQ9fcIuRNoAhTPmX08djXVFYBDL3SdkrmjxIZCZt2qA8LC5q2VNgJNgXmAce+K0HEQR6E5qo9tnFrA8t8SnMVaD2C7JsTtIfl8ldlEJWiAgqZAUzwAI8Mo+06qDNpMDVEBNUdU1+Cwc2ZqmIiztaLmWlrhCCddtu2iWigm4AH1nETubNEFMcxybIKmvA5WVtGpQdqvfQBRGtqbz4HJOWXG++41Of6wKnZk0ft50lGgKdm9sckIxmWYecZlhkFsdnbDk77GVfK7BCpyXBgiIwKrTLFlK2lE2VGj2ZsKoiiS1PA/RxYUWpH4SysO5RcgVWDUzkk2YZBBI4mojHPuJk1pDDGnFpiCRRjKDiJkgYhkU4GeDYbOa5xpB0sr8f5ZLyE7xulqo5sKXJfJ8sMq5rtoSMddxRRe3CMntgWC/5NmLiOC0caT7eYfnYaIrnI5mq93s3xpHJwMBBWFVk5W8+dovWJxhuUcAhmPt9z6ZRYS+Ci6WHfRHVku1uCamoYlkpHcjQfeucVO0kTgn9ayrUgDfcnag2jqYYTXQyfQS9lL7a831XYAvGBJZDzdQkQNzmeZrEw8OWFN1hEujAcBHY5KP2ZNR4rcwtc14V7GqGcrTU/EofiZHrRulgeoMnf2oxf7IkuJawVfAJhvS9dtG2NEFs25d5YdGWDNi4xUuwzkhaJ7WS7XKcos1Fg1QNnVpRYH8cNmIuNKVxOUPjHg8R6GZ9UjNdtcMoqZCYjRZrQKBMeal5NqlY6mfR4LU1lVrhGGzI1JpWA91eqH0m6iKvrJ2TNjznTIrxc7+zNUOR2p0Z69uVnRCbH08Nh0S4sh+6+YX7JETf7gM9WkyyawvmYL51v69uObrh4qeP7Vp+YiDU9Llx9SZYDdsF1PBJGA9PcE1i2xGfb+RKrMLNdHZoW+IUN60+BdyuXCqCmg2xPRL7J9Pi41BqkBdh0j+02Vji3+cKaZ3sLk3Nyv17M27TUciBYHCbQzgLRieESCI4IlySu2VNq2CrrKZRqwfAuqQeZ6ijLNVxVWmYrxxAiwD1O39AL67SrcNcuDcOR7bC1cwKsy0iAjZPCqDlhXnNQ9gUbr4silU5jDTUhys7nE4BlVhPXikcuM2KsrYubiWAkagyeLK1b3TQyyOlAm9LKlE0VssaoZvekx9MlURmOBnB3CAAiYnRWRi+tkGugqGGVER/FzSmzCylTHGMc51svjZp5B+ngbqt6S1uBHIYrxJQrIXaEo5G77C6PQ2Y2rucbhsCnEY4KLUBQ6tykKxvmKLziMNgeoG1bHVQxbwKsxiYyOrGDoOLtVdmmNOqpR21tUDL0A+ONH1VGaca1HHY79bNXMjEAilPkCDypuNyMnBhzEZE5bXdWDcDLnjusCRPecaJZ4pIEEadMKkcGPjOe0wCVYcAPk0gleme5loMunrW6tkrIJQB7+xmG78i8BXg4yxLRDHQcYp+5LC8XEKJm8NfORjdTvOHMmD21iZ4VXpezUK+nirgV2XWTMQ4QJ3EWQ8y5H8ZMFFt7j+tIaaob6VbbHaatW3KoTBtuUM/y6SbaRZDSetwtB41hrtdLbR+1B4WBL4gGkzQWADCefArkkwk1K831UpVlfZrPO/cIzzCg4058RDcYEXgtEJvHEydaTkyLE2G2YzXaNNrwbNvdk3fGuqrUTNex4QC+akfgXRcBJOqkmFokfIc16yabhTUwksg4lEFUtUs9Wzu6npLOLgLsZXKYR5wPQexP8McuAPGgbJMSDzAKL1sMXGM0Pmca2Zhzlib4327lAOA1LfZBZODHMNBYYYumK3CCVQGC8Iq5BGkIAtPgRFksfIukyOqoG4NginabQoANeJyzb7ZTEE4VBkWihYmIc7zJamMrbSCzXGxRYhARAHBI2QQfEXSJPnIbyMTAhPmZ2cA8bJFBXWo7TVYJPiWyP/ucoGhnFm/M9Vm3gJLkjke6Ta2WFUo0rDUPgk7LUHx+lFxrypGDya5oKGE+S09LZVplyyHk7+YA+UskCrCRjFBOt5vxhqc4BZciJMgb7zCUFQCOs3/t8ckaECpv6d3kZE2EMMNWC+xQzU64sGtm9sl9DJ3XVhMuUcpOIGaZo5RTwMgTSjKlw1MsfbuB/3Y0k4xJdzuN3XJQMynrpgCgRptyUZl2J0OkcPLEyT6oY4EMDk3Jdjs6BrHPJGxnEYwiA4mbP3rDyPY166zxu7O7MiXdOIp46xicEZ/wkukWheLIWxwEDfUEnLJj4mmQ1pGrL+rHs4BXp+rDLtiKqGkjS2kVyDlf18NQgrrv7Cz9lGsElnZmIHIOt1sAIL7bQIteZIUN/cWBFTRhDQJ/pQiIYFAopSUaxpSGzzrJp2gaLIDn08WdMx6tgI4HhH3IEj1uq2VUwXwHTwErBIHkxE/wji53MxvGPykbTic2DIP1tMqxFSTrYDjGDqI0PBsIgLuj4cnQ1jTlRaQdzcwmx1Mo64YxAKAvXogbZ0bH5n55hOYjzWUK92Vo95Yusm53xmE2rHjHQUfuyOOh33aCtFCoJlhXMC+6DUsjAhQlo9tjdkgkxck46hHNkryjHWoml7e1PQ4OIO5HE+UxnyFlRT5RGGe6XlRlNN8QbYsx6dnTRBWRn6Rx2aqWEBuzjWscdiAqxSCWoqYwtjN0TSedBRs49dQ+bk3bIIKU0EBsPp764CrSUTemEbPmJMBGBYxowVVgHARLkwMxzeiVHeMl5rODbXzG5RtABmAzDTowWWQeZUBu9qsgjCfDk1AU8RodDFmc8CRvnClBBoKo4FCXe39ljZQd8OgmsKrkBMQNA+C3SWvERjAmX7uNANZ6zohtW4PKcT6NitZQpUVVTYOJTCyzCQFcIDMCDwe4Mt4GHQg62ok1RtcHEFfWAE+Q5/UsSCtqh6agbFdSoNWCrblDUdxzJVgYqXMg9C18GDu3d9ZMTheoz2NIB2I8fxQlXguwzFaoVEsUrYpcDXGGFMI1nVWG5Ib79bFcGYAeJPBjEfTbxjIgm7xoaFkplL1iD9MSeh2lWwusk7bbxoKxJ1CFpChDRUdPuI4WAWYnkmlYosxTaqUqgTmj1UzxiazZxQAVUcEaBhQ8C+hAsYS43I+NRGwVIBoncE8AIiHyWZrmiJYNIpKOgDuCYwklTYQFriTjAhpxajdKbIytt0rcEvYq++vxr03E2R6HUAP+AlE2eqThfhTHkkdwCKpLMUNZkT3FNFuewBFx8BiwTUP8hJmsCvyo02zm9A6++4bb7NfgNwjXcIKeneixzKesp0ErFg+Hp1bORskhVeuqdoXlAIaEXQW1kCA7Z940c45aJ80ebylo62BiL2FpomvrUF6wO5bA93u/mVXwPlHWkW4z9tnTcp9Wejx4itlUIQzjdmLsBSAJszZZ7Y8AvbuWUfhTq2VtwPEZqZLT/anmN3i2X7K0X0vNeDhaWCJ3wH0XPS40iPNDIhq4nawRejF0E2MJEArFTyMV18Wjs4LBpXPcigiwiZtxFi+dR7QqcukGBprwjz5D+jKdenSixQKuFf6ZDks6iPi6WHtFtVaymhlUc5tm9lPSEqv9Xo0szYCLHw+j4ZqQBGjD1TzTgfwe487zV2MhXvCoTNk4P6h4WcOFrCiciTHHrBWF7reOZUjFyk26GiCi2dKCkgHsLNCp7mRsWkfr5iewJjivDu02AOhBWoUAM9uKGm+n+NyleIJgq4HZlA6AsgttOduFJK8kqHzcn5JgC9GjMWumI96pJx3w2cC0TChqE+5XAwfY7dJagRElL4J1UhzICBtIieK1sasporEeOsHVZZf71wCUGccFx5jmioWYlHBYogmU0yHIgjywG7zatiRsyOEOj4TF8T2sCoaiGD4CXQcvPKqB0VmMOO5uvg0ZOLxNGnr65EpxZubQCAjWuVlRKPhe3J7iNcQfdAU00gP2G2BLFHrisJ7nG11r0jm1CNCTlYQdLrKrDUSfWZVFbVBWW19v9nJ9rNaaW1sHuXWBgSJYTQaxZbUoh3MYCwIf1QhQLsx068gkvTHalWmtRzwLfS70j7g29FrOSA+SsIBZ79lwJ0CeQX8hzvbOqs5oiztxC/6MbvUnRCaJh9GibUGkzB989VBzj148OFSqSKN04QI7wlEbe9EAW08A5SMVqtKhCAqGxCat4Do0zTmhOIgSRh0eB86KE1cUMtsm432Zz3IUphEothVNa4wxfi4ZRttuVycgVeNR3WwgcccKZZl86zqOTlQ2QA5EuWDGc5hXTWFedRpMgZcid+ddCuM4bTH1BLw1sQGU2pD6Jt/6HcWntQ8iGJj5V6A5CEd5GkHeAoeVrkuqiUJ+lxwPqkvsYeanCOkhABxTVFRxQXT2JoK0TNg1arXaa4Fdgyc6uYHWjPIliCzFXPAmTFGEvtAujSReHrJ6rKRwPzF/3CEd5wUi1iL0G4Uj+dtpECRQv7VogLGG7vsJt7D8RkmXwKY4jseenOXWt840UC0FF9YN+HcoqoUWiqgmQXbPj1EwaAasaMswkDlBCwhFUzdbDqaRppY6VAEoGyoiaqWZsWfp/ZTZazZahA24i25MxstzXgF4pwlMkJnS2ljCrALNUGGt0kle+J2dA3h5jshS9SRcstluIJ1xu5LnGxdtu7z2/JkG0IwUBI2aBzsNYo0kyTYuBeLC0INSPS6UMCjGaQCllsoPVuU4eZFKKyDBbSelxSlPEz0B9iYjKdOqS4gjJPCgPtURRN5Skg+iyzrPYtoayznFCzDX7QSyzFD5BJ1A++m6cUwXKogf+TGQkT3MnESK7LWsz4aHWp3t3Y63NkaIj0VT5Pcj6PcICOS8mjBcQR2ZY1slSi+eI7Qmrg8oOzjJog7C9Ym6jwHMgmID1uZapl4ZWbvZCGwk7tpWA9gELO8khFCHOgqr6m2pozGYuXEleaGkRgpPGEohXH+Xd029jQwU7Xhvb8J9RLAY3Rf1RQ4xvj7aDmHeem2tWn8NaFPmE5khAFKF66SB6+KyfGvi49lk6a8L2uMjYPOIbCCfqRSkKdydFypx5hzrxuehZALk8qQTnm5gGyBl4CHVtTaUW8saiNIKJuMH5hLaDHGz2jCcLdutv9vL4yAoZvlySTndlC6TDfUYTahyAvNDDMO684xqKBStJBoE/e5pETdVrnJwpmIbIzD3QWlePSyUgIg9N0ZiyC1b85vt0hrNHLeyAeamHA7ZFopHxCBuP23k9alBmoTNHbcD6tzBdKtnheJsMawC3gS+Rc3qWVqKhrsFQcES1gHUNYg8+LIOcyuwi5lujdCRhixSAPxzEFMMv81uTB6hvT0i+NwDw0DI6slazglMYyxjUywFH8hRKlOEOg1+6V4AHSrA8AS5MbYW9b5VbWq75GfJfj4M6Rw9YYLU2Ao+CjJgPTxFNrvQzA8AB/vYmDiky3VC1TWwDDoSkSaIYYaVS5ePiwBU9KmcHs6QFOaIgwkPzOBCRFtnA3PyQP+2xgauJe30QBRBSORYIuC+MxYaGENlMLMTp/7CGli6uHTGpgXxnmEdKLCWcGZL0aQVDc6yuDZqHbM1xLY60+3d96JWteRbonmYtkOKVTdbRh2fIhP6UuYoOMF6Owuleh86aHeaWk6ii6ddN08tWSbP+MutIA7qaEO1JqKVK14QK+ttSnvyKd68uLeanAm/TLceCAvLi5TlRbhRYPxUgdCPCACTC4/opFxZUwcCC4C7IBvA5BEsEqGIWvLkjlIzd5KX6NLZ6htlyQeAybkcECqYNDBBnD3TP+heWwR1uEksQQOF79qju9i2E5pZw60EKjLw7fxoup4gb4b8hvBTAaVnJc9YuYgkWDL2q0JVZILCCKYK1c7uCLIaKEVsAA3nuSnvwqxyByxpNPSHEjAfkXjsnFI3Too3gibxKT4+1KOjBkFlWSatY7F0Jm9EbjdhCN950pYzGlCCfcZis80c2nLyEpNruNg2EMvThygGFqNzHqlIn+NnnNrtTrYGozHVsUaLR6xycmhWrTcAR3Cam1wieFmjaxaLrWl3qL1strpIXTPcr46QJgbMrEcDp15s/SVLUlJ0CGbrZAi93nw4XDc2r+6nEsWc6uO63Rgzl5+LlDeXuLXZQTsmQ7EbVA033g6pmPUoOWfQdFcuzCA22pNGDVFtt7Dm8biSDDcSoH1tHDWD/HHcGqczJDeyLcY2dTL0MzdhoB9wE8gvnGWjNDx6Gnac2wUsu2I1mJg1DlxDz8cUTFlpFqywo9UkY7rz7hFN46xkZVRmFCCidoNZB6dq5oYKzz/Rs+gcDE0fwei0QE6j8/angZFMuD3xPv8ISs8HTybuwuiIHh9Owt6th8s1BvegmHzfzJL5YmA0KbfSVpHVJUpMMe4YjVKzjKSBMNGEAa+13BFKbZPCLFyZdQTvZTGhYU49l+brQc1KE1Zzj/s90PLTwcdaf9/YAL+j9bE1E2aI13N5bj+fB2zMGlExSYF7EZsMWH9/mhbNMcQiqenkvY5HwKAsk1kYepPWSOPK/+v8NkAx2+Gnwxwx7bnJnLDTED8Oeef7PoeeDWlSaEIiZryTyTWDaUNoK2RQjwhrUUIxL4XNcIXIrZzTSyoOE7U0DQeEB+lyH0VKST+hm/FRg5IgLpcasQTyctxjmuuJqrsATt+y4P7fmuAdM6+nhSInxrDdmEO2WyEA23TAG/BnrCendUtbQuzRyKGoxxIB61Ry2pFHa3q8CYFRdQvBMDjJN8q08E/rCSVY5IQ7ETmILeB+++wwkY+HUkCdCdwTXY+3ELdSjoV72ELWEEojtJCbr+IjmZxrlG4qnaCnODTiJdvRbo0hkctAPcr51GcORQBLAt2JAPyHFQGTMNWAZ/bzOdDOoxgbGqyikNc2RozkbxV53NJaaJaxBd7SiYG3TJ/q6wJaBKuZ7B5XY5532GC1XArzBYauV4gZBPW4cidBfUbhX3UXNR0HUpQlxYJpgUUKdirEzMBqr4unnX9xtltMgAWUzCNdI3471d0zFob2aGiYGuEXPDSzcIdwJ/0+14jGSiOMKurO6Yt6LKs6LrSXejWYQ9BglIaMRrN44wwBHn3M8QGoPZF0FJMX6RoBijHTzru5wCrLuRe1W329GfkhsLrAscI6NRDZDNJcs4SK8xPbHAut9IX3EEFoj65Yp3HmVCntoi00ikcig3ooh6FBm2jLCOJhjAxV1YD7bd/k/Vw/mGa7YyPGToMvk63Yrjjz9lhlAaDCwGwXU/pTmHkeNYUKAg5EiEmT8kEUB/fo1OLsAU1YmZfBzHdL6gAvjeNyi8GMtrZihE01dzYpsBTjOEJ5WCEl+huN8d2JzEGLwPleuzb9+TBfQh+pAs+pQImcZUrYYcOsLc0jwDPNCRl1z3gjqOekMdqsEKnJZXj7NE2IZcAaG3+hCfEWxsdCvDitTUJfr6emmK5Om+qYZqEE7ONUa3OjmLHOJhgjvLtvs4RlibbZ82GBoPM9TF0UCGmxDU0ZjIgyAlYOBoMU0/QlZyiWZZGwqIKTlok6iGHGWlw7pQts5ahr2+A53on1uWWV3Lf425Sf4rkgPenJOEYU3ZqKcH2zpaPDWi9HcA5ru13pKwJE+LD28bFuUrdawMiFxiVZLDrTowBD0EFFA8GKNWZGzmkDBc8Aa2WYfApkiNypiDgU95D7CJFCW0FI5PQI6+4OC4CDxhKnWnLs0EhRW2jcounr5+p2j/nZ46KlnPNGpUREKBON9K0xBrpYZyNRBKwAWOwQqZNcytL9WhmPRloOQuFmqk1NY8Hu3OP4Uf4mHSWd6yP4xZScprR/GIRtUF/ZpktN2WmoyXa3GbQbfciIGxN4FJnFNhAIMLm8QDeOY9ibCMA3n6JYT32sdoZPumR00dt0m2UynnFqCGRgvgi02qBQHIbyWlmc4qxtjTiGgcpYKbABsKVCux8i60ghckCkyeCEgPgSEGkHiDQAGDp2EisYlVpRD7bcEaBF8XhCjDBNlOdri+Nw3CnOAZaQ1GIuzeYzgWMHw3yeD+Olgzs+D0JNixenJ20soIKDzEvRb6bgGIPVmZzOgy4QKwXjH+ut29lVpWwYdGEkXTI5Lyplc5lwPJJ3VuJtpSxA6Yk0A5HMnUpZQDuyG+ji/UpZgKHVRfhsOS6VsuJaAzTvhsBdT40nuV3osJp8kE1MQZYpx0M550nudIsbjuPqqAIalAPGW7DCHNYwOtXZ0+Sv6simLEBCWTQI0QBEd8GlHt0Dz6wMgFCErmzzBjAyC56JDaPNXJk97KBQALlfgBU4hmlHRJn7dsd6jzI1XYnMEuqWt6E2wP1PORXI92C8OguFgUP03KX5yQARvpEdltBbFkAoCIBGfqQ5ydYbMUDWF9bwOAVCMaAx45QelOtrYf4El4RpsLGXK2sFrxxGg2Tra/Uqa4curIuYe8TIlPnjNm8lnd+5E95ZUu5pVg4fcziPuQ1yx6PiiTtzGTFqURD2IrceOQd9054Wq7RwJMdTdo24IWiWDVGNdOqVVdEuq4p8x85IBoJI1cWBUQOwupK17gQ3lIiMpU7tnj+ae/Wxug1qmUJSVjW3yKC2ihjbsFiD40c0mc4Hx+PBmVvVwW2osUUe4J6oe8SzrrKKgjBYBf6xawBkgf6sSm415XB/a7Xr0ipOanbiJY7X/cHAV6QJPpQDrdyXCRu3y4iGxZkbfQ0sjbWols7RGmPzp1otbruPJHTIhtijXTnvEXS5WHzTlrNVUQHHeXU1nUlZMSkHYRyU37KbhSerM3pNyEO4FaANpXldQ5RVX6Osx+6KoJtNEdkmHPRKSmG7BIgD+EIJtQWIbrkc/zZvkJdSWU/kAYg6R4r2ejZg74fbVbCuBle9GoEs8XU9m/EywArFda+GlHt5RyDRAISYdnXdq8ErSqB5agXORq57NaQSSMkRQ4iDsnjZq+EEIxhzNYUCAs7jEXZmBM+V+AN9L8aLagMo3jUbceesRtMnG2JAV1RvBobRkXsQ/8INT6frpjCZajgmx8LwLie50DWZQ7uxxmyyOQBDP4RIZTzBwAMDZO4aPIiGyT34riqAytg8a0VrgKRRMaSVoDvMi3ZqdmDu8oSoJ9eqBzDZpE62rly2oWssz3Na59pp1DCnw6VJmGQE5nOamMrJYZjTHhFXRK6MONKRaphDSTKWlIUkyNloxJZZ0bSnnToJRnx5tgcAiYxdGkS0fIuAlXVzvtu+qJt/ohzcUYUBKR7Av4DFAdkKcxPnvWrt5uXx8flntArLoZnZGtpQYA9kFdbw6hPTUmIGfCcaIENwXy4BJB1qQN7nJZQ7WBwktFug/fxpvqBnpNsNxiWM9NS1EuVKtk3Wu6TkFm0ILTcrbvyBDnTb3TxSFFCjm4uGxbXKttXNcr4010sygpxA41pURBDl7BXEOAjciY5nBA97G9IxABCZEDLZxvRF6A5DEE4flhi2SC4UHrRuXh932kHOEUtSTm0ptLmii22Iq+Qu28LYAgr9PBil+7IdWmoYdxmQSPtaIqHdssnjrjaAn1gtHv3EEjqMUxhxlqUOqy4Dft9+8vsyNNbTtS9TZLurlzLgy+IRBwPZALgn1Gempc7LbmJCAc9e6IsNs+BtdQCX6dYCnZ2jPnDZGKAo1bRGs7KbWY+XAcd2BouUTOFDqTFkeTzRZ7NIzuaIDQG0c1RNavSs8QDL8+RiU+dWsE6u9dfT0eMk9g8UfBbnee0gyt0ag4MtboEQjbLHXY6ne2q4UOmjbRoDmTjah8+5iw7LuOqo2WOuAetdVpbuToFlUBdXWk86GrqYHHyzA6Flvn+2ZCCKUgzuQE1sOWBe0hlWItXg8fOYOEyOL+2MOHP4zRosAMhH8hSp8RDLleYX3McH8UDTOXnq6iVJHJUQFiceuqOiT8I5uS9Gyo42uuhZ0tT0sbltWhsv7HueD7fSfLbWUOvabwD7rpYegvm08bv34j3pZp7OSo9BfcVwLzttTzKYH8EDr+xxZRw3l3gfcqoIi8cHTiZxfa2Vn06rWjBaZS/E0WYUlqU4kucWANLmHjOFQuSevdFuw1Xl8HDuvXxEwY+2AdVH2D3PZiz2m9fdcBy0DC0agStU+6qXTYQfxubi3FYHsJ1zrrcHc4ArnJ0BbvzUPQf3cGEhxv5Al5NhbZ6rpGBD7ECIxVWk1YsDz39QwzOKk+/5q3Mk8u2VAvdeSfqPWpvf8e6vvrW5b23uW5v71ubf/y59a3Pf2ty3NvetzX1rc9/a3Lc2963NfWtz39rctzb3rc19a3Pf2ty3Nv9u5/StzX1rc9/a3Lc2963NfWtz39rctzb3rc19a3Pf2ty3NvetzX1rc9/a3Lc2963NfWtz39rctzb3rc19a3Pf2ty3NvetzX1rc9/a3Lc2963NfWtz39rctzb3rc19a3Pf2ty3NvetzX1rc9/a/Lu1NhPYA/OytRmlb1ubcfKBRm9bm8Ew9gHNzUTf3Nw3N/fNzX1z83/hLn1zc9/c3Dc3983NfXNz39zcNzf3zc19c3Pf3Nw3N/fNzX1zc9/c3Dc3/27n9M3NfXNz39zcNzf3zc19c3Pf3Nw3N/fNzX1zc9/c3Dc3983NfXNz39zcNzf3zc19c3Pf3Nw3N/fNzX1zc9/c3Dc3983NfXNz39zcNzf3zc19c3Pf3Nw3N/fNzX1zc9/c3Dc3983NfXNz39z82zU30w+wlPK6uRmjHyjk6ge76XXGqAeaue11BsPEB/Q6k3d6nakY3IMDITflwQ/TdOfCa9S6yuoK3jDZgd9anblFE5Sw0flySXG5xobn7YL93j1TB0NKx47d8vbEywis+X8ee9VtDWgOx/3qANYxRMHHwi2Dk709nwBboO26SsGI+3TYfmqhdsDN3eJOb/Uh2O3gxVyWBkl1JiDJ/UUOwcg+iONLe3aSJvCksirSyH01eO7jhh3Z5wf4CPlAKPKBYl7IB4P/SDxQFrnTCP8Rf+Ob+nEb/A/b3u/3tF8x8tJIf2i9ws78h61dBs5DcW5uf2YF9poX5hMBMDjyyHfs3AoPWHKmNnnDtr8AVRCCZ9mP4RaLUN/+vPpFmfE7byrAH9gXP3c0mf6QtxbQPbveYheFPqC/E7uYnl1v2kKUfCB/J36xN/xCyz/UUbEIc2v6fuio8Dt+CvsAP3V5Pcw1Z8A1xB/LHZK5tXQ/5A7xWdxBf2zn3J3nak9f71IlLSo/9dIEmMIUmrgzc0K3qjrtkVmQb68sX2UX1aAo0uM1/cGYGMBnPU/rJrvLGU5sl8A0Pg4+nYLeN3KiyCA3lhKe67ZBZcGpH0gSf/q+Pn9nAZZ6/D5sL/eGX7qrL3O3CAB1oZhdSQIkzQs5KNO6cJ6G6KcAACzLc594dgkV3iEvhRvbVdC8nP8er58unUMZvzbROPFSyqhX8vL4rE+XvRKZb8/xPinC7uo486fqOHCPGHCPzwqN/jb6jve45idhKP0tkv9fABv6HW87+4MZdheI/o85dpuz+XOhKIXf6BODPFzbPgT/Oih6mzL5o6Hoj9zUjzn1aW6qz5b8gHH0A/JSrzCMeWD+h1bvNmHyx1o9wB4WsOd7eoXh2APxP7KAt2mSGx79m8I9UWRZHL8f7iWANo/xHoJ+G4AB399whLyMPId852/d9bd3Bn0vAjz83dLyDwM8lkAfyJcGm6ReTvKdCO92KiCvJMK8FsjLtAQGCAZk5/JDv7zL4/I/Io5kbnNF/3nxfCmNF/nE2F+Rz0uKA31AiEtKY33+jpGfkuEg7ygA/WUKgAAnSGIsS+A0RrI4/VJqgevDrq0s+Yu6gb2pG+SX6cZ/LFP3PtN9qxm/ZLifc38sg71QDBTBP0MzLtt9LzSD+jLNQN/UDOKDNAN/UzOoL9OM2+zjH6AZCPNaNVD8szAN8nWYBnkpROj7ctZ3JmJA7Pp92aS+SjbfkW/9D8rma9HEfkk0r+AMecEvTzs27KdYbfIensG+SvhRIKZAaJ+lknkltAQ4ilL/1GyjCHaTPEAvcvoJGvCOBPa/SQPe2mF81gASJa5VAPkn4o88EOy1+AN1QD4FzpN3QAv1hRuWCAXgGfs98YeYBmGfxZ/9ZfEnAHu+exuceZExoz/PNdyr7fzPKwZBU69hy6/hlivtoNkv0Q78jnawX6cdBPaAE1eCeaMdOH61qYz+qnaA27As+Tph/C10QD5NHd5Rz/rfUweSuk39/MMI92nKz45w72El6utyPziEStRrcf9gbYBQ96X4o2DeK++AfZo2vGMD7D+oDdhr54Dg/ygRCqd8le/BPkMZLhHy/2gngEUecOxmk+9bDwvyIXHDD+6Csw/4FapiiU9TjXeUZv9XVONKjF9hHPLnjfq1ouEvw5MHBP0VNXuPZmB3NIP4QtREAKF9GewCjXig6KsQ4JehEgjIQUACWIgzJEK8RGQY8XKXF/88X3FvU/dVk9jSLWdu9TcXp04EXxgwdJPSBddcBvq+r+/u5jMU8vBSgMjbRgeU+Zgde/C1SNPqWhZgacpjjx8u/D8=</diagram></mxfile>
2110.00966/main_diagram/main_diagram.pdf ADDED
Binary file (45.6 kB). View file
 
2110.00966/paper_text/intro_method.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Many tasks in autonomous driving are substantially easier from a top-down, map or bird's-eye view (BEV). As many autonomous agents are restricted to the ground-plane, an overhead map is a convenient low-dimensional representation, ideal for navigation, that captures relevant obstacles and hazards. For scenarios such as autonomous driving, semantically segmented BEV maps must be generated on the fly as an instantaneous estimate, to cope with freely moving objects and scenes that are visited only once.
4
+
5
+ Inferring BEV maps from images requires determining the correspondence between image elements and their location in the world. Multiple works guide their transformation with dense depth and image segmentation maps [@sengupta2012automatic; @pan2020cross; @liu2020understanding; @wang2019parametric; @schulter2018learning], while others [@lu2019monocular; @mani2020monolayout; @Roddick_2020_CVPR; @philion2020lift; @saha2021enabling] have developed approaches which resolve depth and semantics implicitly. Although some exploit the camera's geometric priors [@Roddick_2020_CVPR; @philion2020lift; @saha2021enabling], they do not explicitly learn the interaction between image elements and the BEV-plane.
6
+
7
+ Unlike previous approaches, we treat the transformation to BEV as an image-to-world translation problem, where the objective is to learn an alignment between vertical scan lines in the image and polar rays in BEV. The projective geometry therefore becomes implicit to the network. For our alignment model, we adopt transformers [@vaswani2017attention], an attention-based architecture for sequence prediction. With its attention mechanisms, we explicitly model pairwise interactions between vertical scanlines in the image and their polar BEV projections. Transformers are well-suited to the image-to-BEV transformation problem, as they can reason about interdependence between objects, depths and the lighting of the scene to achieve a globally consistent representation.
8
+
9
+ We embed our transformer-based alignment model within an end-to-end learning formulation which takes as input a monocular image with its intrinsic matrix, and predicts semantic BEV maps for static and dynamic classes.
10
+
11
+ The contributions of our paper are (1) We formulate generating a BEV map from an image as a set of 1D sequence-to-sequence translations. (2) By physically grounding our formulation we construct a restricted data-efficient transformer network that is convolutional with respect to the horizontal x-axis, yet spatially-aware. (3) By combining our formulation with *monotonic attention* from the language domain, we show that knowledge of what is below a point in an image is more important than knowledge of what is above it for accurate mapping; although using both leads to best performance. (4) We show how axial attention improves performance by providing temporal awareness and demonstrate state-of-the-art results across three large-scale datasets.
12
+
13
+ # Method
14
+
15
+ Our goal is to learn a model $\mathbf{\Phi}$ that takes a monocular image $\mathbf{I}$ and produces a semantically segmented birds-eye-view map of the scene $\mathbf{Y}$. Formally, given an input image $\mathbf{I} \in \mathbb{R}^{3 \times H \times W}$ and its intrinsic matrix $\mathbf{C} \in \mathbb{R}^{3 \times 3}$, our model predicts a set of binary variables $\mathbf{Y}^k \in \mathbb{R}^{X \times Z}$ for each class $k \in K$: $$\begin{equation}
16
+ p(\mathbf{Y}^k|\mathbf{I}, \mathbf{C}) = \mathbf{\Phi}(\mathbf{I}, \mathbf{C}),
17
+ \end{equation}$$ where $\Phi$ is a neural network trained to resolve both semantic and positional uncertainties.
18
+
19
+ The design of our network rests on our novel transformation between the image-plane $\mathbb{P}^I$ and BEV-plane $\mathbb{P}^{BEV}$. Our end-to-end approach, as shown in Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}a, is composed of the following subtasks: (1) constructing representations in the image-plane which encode semantics and some knowledge of depth, (2) transforming the image-plane representation to BEV and (3) semantically segmenting the BEV-representation.
20
+
21
+ Transforming from image to BEV requires a mapping which determines the image pixel correspondence to BEV polar ray. As camera geometry dictates a 1-1 correspondence between each vertical scanline and its associated ray, we treat the mapping as a set of sequence-to-sequence translations. With reference to Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}b, we want to find the discretized radial depths of elements in the vertical scan line of an image, up to $r$ metres from the camera: we have an image column $S^I \in \mathbb{R}^H$, and we want to find its BEV ray $S^{\phi(BEV)} \in \mathbb{R}^r$, where $H$ is the height of the column and $r$ represents the radial distance from the camera. This mapping can be viewed as an assignment of semantic objects from the image-plane to their positional slots along a ray in the BEV-plane.
22
+
23
+ We propose learning the alignment between input scanlines and output polar rays through an attention mechanism [@bahdanau2015neural]. We employ attention in two ways: (1) *inter-plane attention* as shown in Fig.[1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}b, which initially assigns features from a scanline to a ray and (2) *polar ray self-attention* that globally reasons about its positional assignments across the ray. We motivate both uses below, starting with inter-plane attention.
24
+
25
+ **Inter-plane attention:** Consider a semantically segmented image column and its corresponding polar BEV ground truth. Here, alignment between the column and the ground truth ray is 'hard', *i.e.* each pixel in the polar ray corresponds to a single semantic category from the image column. Thus, the only uncertainty that must be resolved to make this a hard-assignment is the depth of each pixel. However, when making this assignment, we need to assign features that aid in resolving semantics and depth. Hence, a hard assignment would be detrimental. Instead, we want a soft-alignment, where every pixel in the polar ray is assigned a combination of elements in the image column, *i.e*. a *context* vector. Concretely, when generating each radial element $S^{\phi(BEV)}_i$, we want to give it a *context* $c_i$ based on a convex combination of elements in the image column $S^{I}$ and the radial position $r_i$ of the element $S^{\phi(BEV)}_i$ along the polar ray. This need for context assignment motivates our use of soft-attention between the image column and its polar ray, as illustrated in Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}.
26
+
27
+ Formally, let $\mathbf{h} \in \mathbb{R}^{H \times C}$ represent the encoded "memory" of an image column of height $H$, and let $\mathbf{y} \in \mathbb{R}^{r \times C}$ represent a *positional query* which encodes relative position along a polar ray of length $r$. We generate a context $\mathbf{c}$ based on the input sequence $\mathbf{h}$ and the query $\mathbf{y}$ through alignment $\boldsymbol{\alpha}$ between elements in the input sequence and their radial position. First, the input sequence $\mathbf{h}$ and positional query $\mathbf{q}$ are projected by matrices $W_Q \in \mathbb{R}^{C \times D}$ and $W_K \in \mathbb{R}^{C \times D}$ to the corresponding representations $Q$ and $K$: $$\begin{equation}
28
+ \label{eq:project}
29
+ \begin{split}
30
+ Q(\mathbf{y}_i) = \mathbf{y}_i W_Q \\
31
+ K(\mathbf{h}_i) = \mathbf{h}_i W_K . \\
32
+ \end{split}
33
+ \end{equation}$$ Following common terminology, we refer to $Q$ and $K$ as 'queries' and 'keys' respectively. After projection, an unnormalized alignment score $e_{i,j}$ is produced between each memory-query combination using the scaled-dot product [@vaswani2017attention]: $$\begin{equation}
34
+ \label{eq:energy}
35
+ e_{i,j} = \frac{\langle Q(\mathbf{y}_i), K(\mathbf{h}_j) \rangle}{\sqrt{D}}.
36
+ \end{equation}$$ The energy scalars are then normalized using a $\mathrm{softmax}$ to produce a probability distribution over the memory: $$\begin{equation}
37
+ \label{eq:alpha}
38
+ \alpha_{i,j} = \frac{\mathrm{exp}(e_{i,j})}{\sum_{k=1}^{H}\mathrm{exp}(e_{i,k})}.
39
+ \end{equation}$$ Finally, the context vector is computed as a weighted sum of $K$: $$\begin{equation}
40
+ \label{eq:context}
41
+ c_i = \sum_{j=1}^H \alpha_{i,j}K(\mathbf{h}_j).
42
+ \end{equation}$$ Generating the context this way allows each radial slot $r_i$ to independently gather relevant information from the image column; and represents an initial assignment of components from the image to their BEV locations. Such an initial assignment is analogous to lifting a pixel based on its depth. However, it is lifted to a distribution of depths and thus should be able to overcome common pitfalls of sparsity and elongated object frustums. This means that the image-context available to each radial slot is decoupled from its distance to the camera. Finally, to generate BEV feature $S^{\phi(BEV)}_i$ at radial position $r_i$, we globally operate on the assigned contexts for *all* radial positions $\mathbf{c}=\{c_1, ..., c_r\}$: $$\begin{equation}
43
+ S^{\phi(BEV)}_i = g(\mathbf{c}),
44
+ \end{equation}$$ where $g(.)$ is a nonlinear function reasoning across the *entire* polar ray. We describe its role below.
45
+
46
+ **Polar ray self-attention:** The need for the non-linear function $g(.)$ as a global operator arises out of the limitations brought about by generating each context vector $c_i$ independently. Given the absence of global reasoning for each context $c_i$, the spatial distribution of features across the ray is unlikely to be congruent with object shape, locally or globally. Rather, this distribution may only represent scattered suggestions of object-part positions. Therefore, we need to operate globally across the ray to allow the assigned scanline features to reason about their placement within the context of the entire ray, and thus aggregate information in a manner that generates coherent object shapes.
47
+
48
+ Global computation across the polar ray is computed much like soft-attention outlined in Eq. [\[eq:project\]](#eq:project){reference-type="eqref" reference="eq:project"} -  [\[eq:context\]](#eq:context){reference-type="eqref" reference="eq:context"}, except that the self-attention is applied to the ray only. Eq. [\[eq:project\]](#eq:project){reference-type="eqref" reference="eq:project"} is recalculated with a new set of weight matrices with inputs to both equations replaced with the context vector $c_i$.
49
+
50
+ **Extension to transformers:** Our inter-plane attention can be extended to attention between the encoder-decoder of transformers by replacing the key $K(\mathbf{h}_j)$ in Eq. [\[eq:context\]](#eq:context){reference-type="eqref" reference="eq:context"} with another projection of the memory $\mathbf{h}$, the 'value'. Similarly, polar-ray self-attention can be placed within a transformer-decoder by replacing the key in Eq. [\[eq:context\]](#eq:context){reference-type="eqref" reference="eq:context"} with a projection of the context $c_i$ to represent the value.
51
+
52
+ Although soft-attention is sufficient for learning an alignment between any arbitrary pair of source-target sequences, our sequences exist in the physical world where the alignment exhibits physical properties based on their spatial ordering. Typically, in urban environments, depth monotonically increases with height *i.e.,* as you move up the image, you move further away from the camera. We enforce this through monotonic attention with infinite lookback [@arivazhagan2019monotonic]. This constrains radial depth intervals to observe elements of the image column that are monotonically increasing in height but also allows context from the bottom of the column (or equivalently, previous memory entries).
53
+
54
+ Monotonic attention (MA) was originally proposed for computing alignments for simultaneous machine translation [@raffel2017online]. However, the 'hard' assignment between source and target sequence means important context is neglected. This led to the development of MA with infinite lookback (MAIL) [@chiu2018monotonic; @arivazhagan2019monotonic; @ma2019monotonic], which combined hard MA with soft-attention that extends from the hard assignment to the beginning of the source sequence. We adopt MAIL as a way of constraining our attention mechanism to potentially prevent overfitting by ignoring the redundant context in the vertical scan line of an image. The primary objective of our adoption of MAIL is to understand whether context below a point in an image is more helpful than what is above.
55
+
56
+ We employ MAIL by first calculating a hard-alignment using monotonic attention. This makes a hard assignment of context $c_i$ to an element of the memory $\mathbf{h}_j$, after which a soft-attention mechanism over previous memory entries $\mathbf{h}_1, ..., \mathbf{h}_{j-1}$ is applied. Formally, for each radial position $\mathbf{y}_i \in \mathbf{y}$ along the polar ray, the decoder begins scanning memory entries from index $j=t_{i-1}$, where $t_i$ is the index of the memory entry chosen for position $\mathbf{y}_i$. For each memory entry, it produces a selection probability $p_{i,j}$, which corresponds to the probability of either stopping and setting $t_i=j$ and $c_i = \mathbf{h}_{t_i}$, or moving onto the next memory entry $j+1$. As hard assignment is not differentiable, training is instead carried out with respect to the expected value of $c_i$, with the monotonic alignments $\alpha_{i,j}$ calculated as follows: $$\begin{equation}
57
+ \label{eq:ma_prob}
58
+ p_{i,j} = \mathrm{sigmoid}(\mathrm{Energy}(\mathbf{y}_i, \mathbf{h}_j)),
59
+ \end{equation}$$ $$\begin{equation}
60
+ \label{eq:ma_alpha}
61
+ \alpha_{i,j} = p_{i,j} \left((1-p_{i,j-1})\frac{\alpha_{i,j-1}}{p_{i,j-1}} + \alpha_{i-1,j} \right),
62
+ \end{equation}$$ where the $\mathrm{Energy}$ function is calculated in the same manner as Eq. [\[eq:energy\]](#eq:energy){reference-type="eqref" reference="eq:energy"}. Assuming monotonic attention stops at $t_i$, the infinite lookback strategy first computes energies $e_{i,k}$ using equation Eq. [\[eq:energy\]](#eq:energy){reference-type="ref" reference="eq:energy"} for $k \in 1, 2, ..., t_i$. The attention distribution over the allowed states is calculated as follows: $$\begin{equation}
63
+ \beta_{i,j} = \sum_{k=j}^{H} \left(\frac{\alpha_{i,k}\mathrm{exp}(e_{i,k})}{\sum_{l=1}^k\mathrm{exp}(e_{i,l})}\right).
64
+ \end{equation}$$ This effectively represents a distribution over image-elements which lie below a point in the image; to calculate a distribution over only what lies above a point in the image, the image column can be flipped. The context vector is calculated similar to inter-plane attention, where $c_i = \sum_{j=1}^H \beta_{i,j}K(\mathbf{h}_j)$.
65
+
66
+ We build an architecture that facilitates our goal of predicting a semantic BEV map from a monocular image around this alignment model. As shown in Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}, it contains three main components: a standard CNN backbone which extracts spatial features in the image-plane, encoder-decoder transformers to translate features from the image-plane to BEV and finally a segmentation network which decodes BEV features into semantic maps.
67
+
68
+ **2D Multi-scale feature learning in $\mathbb{P}^I$:** Reconstructing an image in BEV requires representations which can detect scene elements at varying depths and scale. Like prior object detection methods [@tian2019fcos; @Roddick_2020_CVPR; @saha2021enabling], we handle this scale variance using a CNN backbone with a feature pyramid to produce feature maps $\mathbf{f}_{t,s}^I \in \mathbb{R}^{C \times h_s \times w_s}$ at multiple scales $u \in U$.
69
+
70
+ **1D Transformer encoders in $\mathbb{P}^I$:** This component encodes long-range vertical dependencies across the input features through self-attention, using an encoder for each scale $u$ of features (second left block of Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}a). Each scale of features $\mathbf{f}_{t, u}^I$ is first reshaped into its individual columns, creating $w_u$ sequences of length $h_u$ and dimension $C$. Each encoder layer has a standard architecture consisting of multi-head attention and a feed forward network. Given the permutation invariance of the transformer, we add fixed 1D sinusoid positional encodings [@vaswani2017attention] to the input of each attention layer. The $U$ encoders each produce a memory $\mathbf{h}_{t,u}^I \in \mathbb{R}^{w_u \times h_u \times C}$.
71
+
72
+ **1D Transformer decoders in $\mathbb{P}^{BEV}$:** This component generates independent sequences of BEV features along a polar ray through multi-head attention across the encoder memory. As shown in the second left block of Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}, there is one transformer decoder for each transformer encoder. Every encoded image column $\mathbf{h}^I \in \mathbb{R}^{h_u \times C}$ is transformed to a BEV polar ray $\mathbf{f}^{\phi(BEV)} \in \mathbb{R}^{r_u \times C}$, where $r_u$ is the radial distance along the ray. Given the desired output sequence of length $r_u$, the decoder takes in $r_u$ positional embeddings, which we refer to as *positional queries*. These are $r_u$ unique embeddings with fixed sinusoid positional information added to them, just like our encoder above. When replacing the encoder-decoder multi-head soft-attention with monotonic attention, each head in the decoder is replaced with a monotonic attention head from Eq. [\[eq:ma_alpha\]](#eq:ma_alpha){reference-type="eqref" reference="eq:ma_alpha"}. The $U$ decoders each output $w_u$ BEV sequences of length $r_u$ along the polar ray, producing a polar encoding $\mathbf{f}^{\phi(BEV)} \in \mathbb{R}^{w_u \times r_u \times C}$. Similar to prior work which builds stixel representations from an image [@badino2009stixel; @pfeiffer2011modeling], each image column in our model corresponds to an angular coordinate in the polar map. Finally we concatenate along the ray to obtain a single 2D polar feature map and convert to a rectilinear grid, to create our BEV representation $\mathbf{f}^{{BEV}}_{t} \in \mathbb{R}^{C \times Z \times X}$.
73
+
74
+ Our transformer encoder and decoder use the same set of projection matrices for every sequence-to-sequence translation, giving it a structure that is convolutional along the $x$-axis and allowing us to make efficient use of data when training. We constrain our translations to 1D sequences as opposed to using the entire image to make learning easier, a decision we analyze in section [4.1](#sec:ablations){reference-type="ref" reference="sec:ablations"}.
75
+
76
+ **Polar-adaptive context assignment:** The positional encodings applied to the transformer so far have all been 1D. While this allows our convolutional transformer to leverage spatial relationships between height in the image and depth, it remains agnostic to polar angle. However, the angular domain plays an important role in urban environments. For instance, images display a broadly structured distribution of object classes across their width (e.g. pedestrians are typically only seen on sidewalks, which lie towards the edges of the image). Furthermore, object appearance is also structured along the width of the image as they are typically orientated along orthogonal axes and viewing angle changes its appearance. To account for such variations in appearance and distribution across the image, we add additional positional information by encoding polar angle in our 1D scanline-to-ray translations.
77
+
78
+ **Dynamics with axial attention in $\mathbb{P}^{BEV}$:** This component incorporates temporal information from past estimates to build a spatiotemporal BEV representation of the present. As the representations built by the previous components are entirely spatial, we add a simple component based on axial attention to make the model temporally aware. The placement of this optional module can be seen in Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}a. We obtain BEV features for multiple timesteps, creating a representation $\mathbf{f}_{1:t}^{BEV} \in \mathbb{R}^{T \times C \times Z \times X}$. We apply axial-attention across the spatial and temporal axes, giving every pixel at every timestep axial context from the other timesteps. Our temporal aggregation means the features of any timestep now contain dynamics across the sequence, and the module can use any of these features in its forward pass. This module is optional as it builds a spatiotemporal representation. It can be omitted when constructing a purely spatial model.
79
+
80
+ **Segmentation in $\mathbb{P}^{BEV}$:** To decode our BEV features into semantic occupancy grids, we adopt a convolutional encoder-decoder structure used in prior segmentation networks [@yu2018deep; @saha2021enabling]. The aggregated module structure (right block of Fig. [1](#fig:model_arch){reference-type="ref" reference="fig:model_arch"}a), takes BEV features $\mathbf{f}^{{BEV}}_{t} \in \mathbb{R}^{C \times Z \times X}$ and outputs occupancy grids $\mathbf{m}^{BEV}_{t,u} \in \mathbb{R}^{classes \times x_u \times z_u}$ for scales $u \in U$. Moving from the 1D attention mechanisms of our transformer to the two-dimensional locality of convolutions provides contextual reasoning across the horizontal $x$-axis which helps stitch together potential discontinuities between adjacent polar rays and their subsequent rectilinear resampling.
81
+
82
+ **Loss in $\mathbb{P}^{BEV}$:** As the training signal provided to the predicted occupancy grids must resolve both semantic and positional uncertainties, we use the same multi-scale Dice loss as [@saha2021enabling]. At each scale $u$, the mean Dice Loss across classes $K$ is: $$\begin{equation}
83
+ \mathcal{L}^u = 1 - \frac{1}{|K|} \sum_{k=1}^K \frac{2 \sum_i^N \hat{y}_i^k y_i^k}{\sum_i^N \hat{y}_i^k + y_i^k + \epsilon},
84
+ \end{equation}$$ where $y_i^k$ is the ground truth binary variable grid cell, $\hat{y}_i^k$ the predicted sigmoid output of the network, and $\epsilon$ is a constant used to prevent division by zero.
85
+
86
+ ::: table*
87
+ :::
88
+
89
+ <figure id="fig:soa_comp" data-latex-placement="t">
90
+ <embed src="icra2022_qual_comparison.pdf" style="width:87.0%" />
91
+ <figcaption>Qualitative results on the nuScenes validation set of <span class="citation" data-cites="Roddick_2020_CVPR"></span>. We compare against baseline results of prior work reported in <span class="citation" data-cites="Roddick_2020_CVPR"></span> and follow their colour scheme. For fair comparison, we apply the ground truth visibility mask (black) to the predicted images as was done in <span class="citation" data-cites="Roddick_2020_CVPR"></span>.</figcaption>
92
+ </figure>
2112.05787/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-01-14T05:00:32.671Z" agent="5.0 (X11)" version="15.9.1" etag="dLLx6_GhqDCtAFnsVrjl" type="google"><diagram id="rjo55-pKI-u2ZruySYxh">7Vpbj5s4FP41SN2HjsBASB53MunuSlO1mlmp7dPIAQfcIRgZZ5LMr18bbG6GJN2S0LSVIgUfX7DP+b5zAQx7vt79RWEavScBig1gBjvDvjMAsBwADPEzg30h8bxJIQgpDuSgSvCIX5EUmlK6wQHKGgMZITHDaVPokyRBPmvIIKVk2xy2InHzrikM5R3NSvDowxhpwz7hgEWFdOrWRv+NcBipO1um7FlDNVgukUUwINvaveyFYc8pIay4Wu/mKBbKU3opFnrX01tujKKEnTJBGuIFxht5tjlJGNoxuT22V2emZJMESEwzDfuWUBaRkCQwvick5UKLC78ixvbSWnDDCBdFbB3LXrTD7HPt+otY6ga4snm3k0vnjb1qJIzuP9cbxTRXNatpeUvNW/FTyJ1YYmwMlyi+hf5zmJ9jTmJCeV9CEj7ktjgsCsKmfTOyob4UORJmkIZIanaiK9sqTcixj8ga8T3xIRL4b80b27HlNAl9fv68SVEMGX5pbgBKnIblUuXqHwnmN62GkNUqQ6xua35R20QlyhHQjQZbQ8MDylKSZEiDg1DWo2xKLZ4JId71ImT6PxDiTF2rgZC3tvMDQcTRIGIImXNX/B1wG8Kq2wgz9JjCXGdbHhyaAGjb5AVRhrnb/TPGYcKFaxwEcWUO0Y12Rp/nO6zq0idLNU8taa1t5dEnckhUc+ZK1qX4hl4PKNEdkU6H2XSYBW37nMCKic4Kyz3ZVidD/lTNT3rgO782+Ja5kEqh3MvB1/t14DvtgK83HnynPfB9uHb4Tr3LwXd2EL4VUheVdBw0D5Ha2KfhXJmjDvSq+Lk80NV+NKQvnvyBsQ7MM2PdGdFVW1aHHi/gvAd3xBboAKgzIkD1ylkBlF47QC/pjC295rxSgLodqcKYHlQH4AXDnIplX+pxTcANeFXQE3CbqN6PiGJ+SES14HfjlgGviH+mCY5EwLzVXjGAWVTSL4tgKo4Q4+T5Oww8Yi6o9qN5oPDJcOdphHvckDq5v1mi435oWSDiflkKSs582DCuPSTlAaTPH/gymO0LI7lNIcilXXlmVlyLyxWOY0VFA9irFZr4vhjCKHkun7WCYdyeDY7HZfdcbq+rEL1Kt+d1sAKMyArvNysGZUVXMnA2Vuj17b8UJtmK0DV348BcJD4JEM008+WvLBomKpTTBnhNk1IEZULlc/2IUNGfaXVhoknIbv58X6E8azkpZZ6aOSyrwx7OEPY4XCqP+aSnpzY+khcM7v5AV9084vNN0Fc3v/K62b67wurZbROgI0qXz/KHdkilya+FANbFCdBVl49JgL66/PXpgTc/XWF57nrHI7IFzsWAn6U+73o/q84yClD1N6n3PG+EHJ4TuBZwS5ZZmp/tW9umZiGV3TIOZPRKxB5u01p5XMprNfNRCuAdUt/mtBNYO4cA4xojgg0z89JZrDNtcWZma5yZdVDGHoIyemH+/h+RvGYMryHjChjK9eR6vqhePaelV6D7Iu9M2Wjvy9s3vLJjEWLwjx7X/mNUB21CDBEaTLdhjpnK2GvmcDrMAb7dHLxZfZZWfDhSfdxnL/4D</diagram></mxfile>
2112.05787/main_diagram/main_diagram.pdf ADDED
Binary file (21.3 kB). View file
 
2112.05787/paper_text/intro_method.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Representation learning has transformed how we can apply machine learning to solve real-world problems. However, despite a vast body of research on pretrained language representations, there have been relatively fewer attempts to train representations specifically for dialog understanding. Prior works mostly relied on finetuned representations based on generic models like BERT [@devlin2018bert] or GPT-2 [@radford2019language]. In our experiments, we demonstrate that such representations do not perform uniformly across various dialog understanding tasks such as dialog-act classification, intent detection or dialog evaluation.
4
+
5
+ On the other hand, prior works on pretraining large-scale dialog models focused mainly on open-domain generation. These works evaluated their models only on dialog generation [@zhang2019dialogpt; @roller2020recipes; @adiwardana2020towards] or tasks related directly to the pretraining objective [@Henderson2020ConveRTEA; @gao2020dialogrpt]. Their effectiveness on other dialog understanding tasks like act classification or intent detection remains unexplored. So we ask the following research question: *Can we learn enriched representations directly at the pretraining phase that are specifically helpful for dialog understanding?*
6
+
7
+ Existing language modeling (causal or masked) pretraining objectives unfortunately are not the best to model dialogs for these reasons: (1) The model is not directly trained to learn the content discourse structure (e.g., context-response in dialogs). (2) Such models are trained to generate the response word-by-word rather than predicting a larger unit. (3) The inherent one-to-many nature of dialog generation implies that the encoding model should be able to capture uncertainty in the response prediction task, that such models ignore.
8
+
9
+ Hence, in this paper, we propose pretraining objectives for improved dialog modeling that turn the discourse-level organizational structure of texts from natural sources (e.g., documents, dialogs, or monologues) into a learnable objective. We call this objective the Discourse Mutual Information (DMI). The key insight towards the design of our pretraining objective is to capture representations that can account for a meaningful conversation out of a specific ordered sequences of utterances. We hope that a discourse-level pretraining objective with conversational data would guide the model to learn complex context-level features. For example, in Fig. [1](#fig:main-idea){reference-type="ref" reference="fig:main-idea"}, we illustrate the differences between standard language modeling (causal or masked) based pretraining objectives and a discourse-level reasoning task.
10
+
11
+ The second research question that we ask is *whether discourse-level features learned using self-supervised pretraining outperform word-level pretraining objectives for downstream dialog understanding tasks.* Experimentally, we show that representations learned using the proposed objective function are highly effective compared to both existing discriminative as well as generative dialog models. In terms of various dialog understanding tasks, our models achieve state-of-the-art performances in several tasks (absolute improvements up to 8.5% and 3.5% in task accuracies in probing and finetuning setups, resp.) and perform consistently well across a variety of dialog understanding tasks, whereas baseline models usually have a rather imbalanced performance across tasks.
12
+
13
+ Overall, our main contributions are as follows.
14
+
15
+ - We propose DMI, a novel information-theoretic objective function for pretraining dialog representation.
16
+
17
+ - We release pretrained dialog-representation models in three different sizes (small, medium and base) based on our proposed self-supervised learning objectives[^1].
18
+
19
+ - We extensively evaluate our DMI based representations on multiple open-domain downstream tasks like intent detection, dialog-act classification, response retrieval, dialog reasoning, and response-generation evaluation, and beat state-of-the-art across nine tasks in both probe as well as finetune setups.
20
+
21
+ # Method
22
+
23
+ Let $E_c$ and $E_r$ be the representations[^3] for $C$ and $R$ based on some encoder. Using the data processing inequality from Information theory [@cover1999elements], we have $$\begin{align}
24
+ I(C;R) \geq I(E_c; E_r) \label{eqn:data-ineq}
25
+ \end{align}$$ This tells us that MI between any encoded version of $C$ and $R$ will always be less or equal than the true mutual information. The equality will hold if $E_c$ and $E_r$ are both fully-invertible encoding processes (as opposed to representations which are lossy or compressive, and inversion is thus not possible). However, neural networks generally embed the data points in a low dimensional manifold by learning robust features that can represent the data points efficiently. Because of this, neural representations are usually not invertible.[^4] Now, the exact computation of MI is not possible for continuous-valued random variables. In recent years, various variational lower bounds have been proposed for estimating MI between continuous-valued random variables. Including the MI estimator ($\hat I_\theta$), the overall relation becomes $$\begin{align}
26
+ I(C;R) \geq I(E_c; E_r) \geq \hat I_\theta (E_c; E_r)
27
+ \end{align}$$
28
+
29
+ This leads us to the proposed learning objective DMI: $$\begin{align}
30
+ \max_{\theta,\phi} \hat I_\theta(E_c^{(\phi)}; E_r^{(\phi)})
31
+ \end{align}$$ where $\hat I_\theta (E_c; E_r)$ is a variational lower bound estimate of $I(E_c; E_r)$ (Equation [\[eqn:data-ineq\]](#eqn:data-ineq){reference-type="ref" reference="eqn:data-ineq"}) parametrized by $\theta$ and $\phi$ denotes the parameters of the encoder used for encoding $C$ (or $R$) to $E_c$ (or $E_r$).
32
+
33
+ For training our models, we minimize a loss function depending on the estimator being used. $$\begin{align*}
34
+ % \min_{\theta,\phi} \left[ L_\theta(E_c^{(\phi)}, E_r^{(\phi)}) = -MI^{(\theta)}_{estimator}(E_c^{(\phi)}, E_r^{(\phi)}) \right]
35
+ \min_{\theta,\phi} \left[ L_{\theta,\phi}(C,R) = -\hat I_{\theta,estimator}(E_c^{(\phi)}, E_r^{(\phi)}) \right] %\label{eqn:loss-final}
36
+ \end{align*}$$ We experimented with various MI estimators from literature, namely, MINE  [@belghazi2018mine], InfoNCE  [@oord2018infonce], JSD [@hjelm2018dim] and SMILE  [@song2019limitations]. These MI estimators generally compute samples of $E_c$ and $E_r$ using $C$ and $R$ drawn from the joint distribution $P_{CR}$. Based on our preliminary experiments, we found that InfoNCE estimator produces better representations. The InfoNCE MI-estimate is computed as, $$\begin{align}
37
+ I(C;R) &\geq \log N - L_N \label{eqn:infonce}\\
38
+ L_N &= -\frac{1}{2}\mathbb{E}_{P_{CR}} \left[ \log \frac{e^{f(c,r)}}{\sum_{r'\in R}e^{f(c, r')}} \right] \nonumber
39
+ \end{align}$$
40
+
41
+ where $N$ denotes the batch size, and $f(c,r)$ is a scoring function for the $\langle c,r\rangle$ pair.
42
+
43
+ **InfoNCE-S:** The original InfoNCE formulation only considers negative samples for one of the random variables, but does not pose any constraint on which of the variables should be considered for negative sampling. As identifying the true response, from a pool of negative samples, would require different reasoning than identifying the true context out of a pool, we consider both these cases and create a symmetric version of the InfoNCE loss function. The final expression of this loss is given in Equation [\[eqn:infonce-symm\]](#eqn:infonce-symm){reference-type="ref" reference="eqn:infonce-symm"} and we refer to it as InfoNCE-S. This considerably improves the speed of training and convergence, and also gives a boost to downstream task performance.
44
+
45
+ $$\begin{align}
46
+ L_N = -\frac{1}{2}&\mathbb{E}_{P_{CR}} \left[ \log \frac{e^{f(c,r)}}{\sum_{r'\in R}e^{f(c, r')}} \right] \nonumber \\
47
+ &\ \ - \frac{1}{2}\mathbb{E}_{P_{CR}} \left[ \log \frac{e^{f(c,r)}}{\sum_{c'\in C}e^{f(c', r)}} \right] \label{eqn:infonce-symm}
48
+ \end{align}$$ For other loss functions, more detailed discussion can be found in the Appendix.
49
+
50
+ **Comparison with ConveRT [@Henderson2020ConveRTEA]**: There are a couple of differences between ConveRT's contrastive loss and our DMI objective. ConveRT models the problem as a response selection task and focuses on modeling cosine similarity between the context and the response. On the other hand, we propose a generic similarity computation function $f(c,r)$ in Eqn. [\[eqn:infonce\]](#eqn:infonce){reference-type="ref" reference="eqn:infonce"} and [\[eqn:infonce-symm\]](#eqn:infonce-symm){reference-type="ref" reference="eqn:infonce-symm"}. Another difference is in encoding the input. ConveRT splits the context into previous turns and current query, and encodes them independently. Our model encodes the entire context jointly and hence is capable of better learning the correlations between previous turns and current query.
51
+
52
+ In this work, we focus on utilizing DMI for pretraining dialog representations incorporating strong discourse-level features. *But why should the DMI objective learn better discourse-level features than models trained on conversational data using MLM or LM objectives?* We can find the answer by looking at various LM-based objectives through the lens of InfoMax, as shown by @Kong2020AMI. They connected various pretraining objectives for natural language representations, including the ones used for training Skipgram, BERT and XLNet, to the InfoMax learning principle.
53
+
54
+ If we consider an input text $T$ and a masking function $M$ that returns a masked text $\tilde{T}$ and the masked word $w$, the MLM objective is equivalent to $\mathcal{L}_{MLM} = -\hat{I}_\theta(E^{(\phi)}(\tilde{T}), e_w)$ where, $E^{(\phi)}$ is the language encoder (e.g., a Transformer encoder) and $e_w$ is the embedding of the token $w$. Similarly, in the case of auto-regressive LMs like GPT-2, the InfoMax objective equivalent to the loss is $\mathcal{L}_{autoLM} = -\hat{I}_\theta(E^{(\phi)}(T_{1:t-1}), e_{T_{t}})$, where $T_{1:t-1}$ is the input sequence till $t-1^{th}$ token and $T_t$ is the $t^{th}$ token.
55
+
56
+ Compared to these LM objectives, DMI focuses on optimizing $I(E_c, E_r)$, where $c$ and $r$ are two structural components from the discourse with designated roles. This enables DMI to discover more important features at the discourse level.
2204.10356/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-02-06T08:52:04.085Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36" version="16.5.3" etag="zRgBQ0d1CGqkkzq6yf8A" type="device"><diagram id="fu4KbK7LNyiao5gVUZ0s">5Vtdk6I4FP01Vs08SBEQ0cfWHndna6eqa52qmX7aihA11UjYELt1f/0mkCAk6GAP2Exv94Pkks+Tc29ubsLAne8Ov1GYbL+QEEUDxw4PA/d+4DgATH3+IyTHXOJP3FywoTiUmU6CJf4XSaEtpXscorSSkRESMZxUhQGJYxSwigxSSl6q2dYkqraawA0yBMsARqb0Gw7ZNpdOHP8k/x3hzVa1DMbT/M0OqsxyJOkWhuSlJHI/Ddw5JYTlT7vDHEUCPIVLXm5x5m3RMYpi1qSAkxd4htFejk32ix3VYFEc3gnMeCqIYJriYODOtmwXcQHgj5Ts4xCJ+myeWpOYzUlEaFbYtbM/LkcHzL7LEuL5UeS3PJm6P8jiWeKoEjGjx+/lRKmUSJ6KZSlVLh8CCrVJZJBukETFM4ECBfyct4jsEK+SZ3k5TbAnJ21bmlsloyiCDD9XW4SSZ5uiuqKFB4J5w44tdWI0kT06KqYrrqs6UrKnAZLFyjOq1eS5P6opx8GoiT+UBn4SZYypZ49rsGceYZT15sMf8BkuA4oT9tGgFEMHViVRyih5Qoo4MYmR4BKOIk0EI7yJBRN5K4jLZ8+IMsw1806+2OEwFM3MXraYoWUCA9HmC7dDBlVl53kF6DA4pz5nWCELOK5rgWkF76kr4S4RBzhjawJM8rj2eZ5UpuXCHIyMOVgiygcl5uDhyLYkfsf4e5ra1IJve90g79XYznEkgE0TGFcQH/+zFxY9s47DNdzhiHf4TmSBcTpMEcVrnsgwsbM8abbmiRwAWGMfTJNDkUHM3lBOhMgRoTUrXq5g8LTJcB4G+dSJLHSz+uB4nsjl8HHZ2vPHUw/500b8Lripf+Jv7x4+q1GtqHqpJByhfKhKrPHsNN/gDCNKFIQ0kCs9cApGquUVSPCWsvK2+DPW+OPXKW+N2Z+0wJ/xe+bPPULJMEKQxjje8FzSCXy3TJr4b8gk32DS551wYbn6UgqPF9C0f4zmOY+uBdB8RwfNt7xmsCnF/RnYJjUK2Bvnd1DvxOZuYGUBKvu1fuMpuI1fqxtYUOx8rvVrx0CrCfid+bXTGp9qs+OIciiIsMxfssWxp1qlmyJgTxvaonELtkjtNm6nVSdNeizp2I+2lKdd5GP53XVbyrI2jk1tnPZMG8eero2jV2qjb+vaOO1MGwHos51uOUhRZpRvMmrcN0apaS94AHxL9fJqUjkNKmuRV2bwa75PGdmZbuo9ZJDnXDKK4K63hl93p25s+M1o0P/D8NeoqaJWf/RU98OAPuWNldTww7qLLwIzuNUVpRQ9QOtWfPoL0GMy0i2H51jaDqspQYy6gN0dQV4Rg7sY/mglwNJWDGUZiGA6XnNKO7bc0Z8Jl5wWrNuOWO/yO0J/8fnrkr9c4Og87D11BFyvUVxl1FFcBdRGNuFOABCvUvHzFzfbfFpFaFD3tbAgzkwc0opDjLMI9zQO6OgbI3tS44OBGuTbCAQCMxL4cxDekKT1SNVxtBWkGgT/eqPOvr4vsk1trnPr2zjlAmYwrLdAjew3BErV+0sA5b4lUA3iOb0BavyWQJkBCgOo0vaHJCj+uuXL5w33P8VW/KqIWcidqmjQeAcF6rZQk8ZTeaMbPHrU3YiINt1AjfSFscMdds0Vnt5R7Fq6KHWs0KVvofgR0A2LbjEa08VY9PSaWqTLdQGZt6fLa+xTE4rVXClUtOsPxUatUcxwFzqkmBnSUZcZHigJUJrmF0oecIIiHCODf/243HbLjVMRy61snDq69VZ7bem8CWgjzH+Vb9FZHFcFbSvLSu/uWxjB11ffI57q23P9xKBFnW8QtnifnKo7O+rbGa+v30037EhjThn3DzpcRxoEeNItTMQjzhaYCp10089IUpL+CVcoeiApzu4DufcrwsS5sTuLxItZEYEuUXCd/Yn4pmjsLk3yD1MEUaBKrPFBMHgm+3O/ZUx80XIncHAWQRgDCwckXmMRPLUCcVK9CLOj6YWQp/wX2PZwBbl2DNeEDvcpokMsVrO1WKuchSswHY2Gqzy+OgTOxEr4eurOQphui6hgh6uXHiCtu7LtdrWhbhDLKtkZ6RZUkDlrcc5oe2+02HM8yy79OZomvvZe3uV6p90puKr5ooI/IRZs5QyRPRMO47z4KKxmoeD/C9HcbENhKE7fLniNZcWodTEL4thNTEe9xTF8Ui3uVT7g0I8zDLuSkMwWfHrmlaWyEmUDd4eN+FjPgi/pyGJi9KKDMPo7zT8naem+vW+pb+EufbLhW247n8vw5Oljupxkp08S3U//AQ==</diagram></mxfile>
2204.10356/main_diagram/main_diagram.pdf ADDED
Binary file (18.8 kB). View file
 
2204.10356/paper_text/intro_method.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Semantic segmentation is not only a common computer vision task, but also a decades-old problem in astronomy. For astrophysicists whose research relies on observing the universe with optical telescopes and charge-coupled device (CCD) imagers, identifying cosmic rays (CRs) in their observations has been a challenging task[@Windhorst_1994; @van_Dokkum_2001; @groom2004nonsense; @curtis_mccully_2018_1482019]. Telescope images can be a few megapixels or up to 3,200 megapixels [@10.1117/12.857920], in contrast, CR-contaminated pixels are often just a few pixels wide. Because these bright pixels can be mistaken for real astronomical sources, it is necessary to reject them before further scientific interpretation of the data (see CR detection examples in Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"} ).
4
+
5
+ <figure id="fig:three_interfaces" data-latex-placement="ht!">
6
+ <embed src="Images/cosmic-conn-three-interface.pdf" style="width:100.0%" />
7
+ <figcaption>Our segmentation framework provides three user interfaces for different application scenarios: (A) the visualization toolkit has a graphical user interface (GUI) for model inference, interactive visualization, and mask editing, (B) clean Python library APIs for integration with user’s data pipeline, and (C) a command-line interface for batch processing. GUI components: File input and output; Whole-image preview and navigation; Thumbnails shortcuts to detected objects ranked from large to small; Image window with various mapping (scale) algorithms and manual controls to visualize 16-bit floating point images; Segmentation mask window with synchronized field of view with the image. The highlighted pixels are detected CRs. The user can adjust the visualization of the HDR data on the left while interactively editing the segmentation mask on the right.</figcaption>
8
+ </figure>
9
+
10
+ Identifying tiny CRs in multi-megapixel images is only the first step. Astronomy telescope imagers are often cooled to operate below freezing temperature to minimize detector dark current and other noise sources [@Brown_2013], and these highly sensitive CCD sensors produce 16-bit floating point high dynamic range (HDR) images that require special software for visualization. Without a scientific visualization tool that supports native integration with popular deep-learning frameworks, the detection and mask verification are divided into separate steps that involve exporting and reading files between different tools.
11
+
12
+ Given existing tools, the workflow of segmentation, image visualization, human inspection, and possible editing of the mask is a cumbersome process involving switching between multiple tools or software, making it worthwhile to develop a dedicated tool to streamline this workflow. In our [video demonstration](https://github.com/cy-xu/cosmic-conn), we show an interactive process that involves continuous adjustments to both the science image and the segmentation mask to acquire the accurate coverage of a CR that might affect the analysis of an adjacent stellar object. This level of seamless interaction was previously impossible if one were switching between different tools after each adjustment.
13
+
14
+ Computer vision researchers can integrate this visualization toolkit with other segmentation models to provide end users, especially domain experts who are not machine learning researchers, an interactive graphical user interface (GUI) (Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"}) in production. The streamlined workflow enables the user to do real-time segmentation, HDR image visualization, and interactive mask inspection and editing without switching tools. The GUI toolkit allows any user to benefit from deep-learning-powered tools without having to know programming. The browser-based tool can be readily hosted on a graphics processing unit-ready (GPU) server so users in the private/public network can enjoy GPU acceleration from any device (Section [3.2](#sec:multi-user){reference-type="ref" reference="sec:multi-user"}).
15
+
16
+ In addition, future tiny-object or high-precision segmentation tasks can adopt the interactive interface as an annotation tool for pixel-level labeling in multi-megapixel images, especially for HDR data. The Python backend allows native integration with popular deep-learning frameworks, with the potential to be an interface for Active Machine Learning and Interactive Machine learning (Sec. [5](#sec:discussion){reference-type="ref" reference="sec:discussion"}).
17
+
18
+ The discussion of detection algorithms is not the focus of this paper so we briefly introduce Cosmic-CoNN [@Xu2021], our deep-learning segmentation framework deployed at Las Cumbres Observatory (LCO) for identifying cosmic rays in astronomical images. We curated a large, diverse dataset [@xu_chengyuan_2021_5034763] of over 4,500 scientific observations from LCO's 23 globally distributed telescopes[^1] [@Brown_2013]. In this dataset we discovered an extreme 1 to 10,000 class imbalance between CR and non-CR pixels that presented a challenge for previous machine-learning models. We proposed a novel loss function, and other improvements, to address this issue and increase model generalization. Our model achieved $99.91\%$ true-positive rate at a fixed false-positive rate of $0.01\%$ on LCO instruments and maintains over $96.40\%$ true-positive rate on data from another observatory, acquired with instruments that were never used for training (see [@Xu2021] for details). Our CR detector has become part of LCO's BANZAI data reduction pipeline that processes hundreds of astronomical observations every day [@curtis_mccully_2018_1257560].
19
+
20
+ <figure id="fig:tools" data-latex-placement="ht!">
21
+ <img src="Images/cosmic-conn-tools.png" style="width:100.0%" />
22
+ <figcaption>When the model or the default threshold does not produce ideal results (left), the user can adjust the HDR image mapping for better visualization, at the same time edit the mask interactively (right). The probability threshold and morphological dilation allow for global mask manipulation, while the pencil tool allows pixel-level mask editing. Pixels that are manually added/deleted by the user are marked in green or red, which will override the global manipulations.</figcaption>
23
+ </figure>
24
+
25
+ Here we summarize the main contributions of our interactive visualization toolkit:
26
+
27
+ - We provide a streamlined workflow for CR detection, improving quality by enabling human-in-the-loop segmentation, and reducing the overall time cost of astronomical image analysis and interpretation.
28
+
29
+ - We address a use case that is common in scientific imaging, but not well-supported by existing tools: interactive segmentation in large, multi-megapixel HDR images with tiny objects.
30
+
31
+ - We release our software as an open-source package that can be deployed off-the-shelf with diverse image types and segmentation models, and can facilitate imaging research across many scientific disciplines.
32
+
33
+ In addition, we found the tool useful during the development of our tiny-object segmentation model. The interactive visualization provides timely feedback for changes to the image processing pipeline, making it a useful research-support tool in computer vision as well.
34
+
35
+ The visualization toolkit shown in Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"} A is the key component to unify model inference, image visualization, segmentation mask inspection and editing into a single interface. It can visualize 2-dimensional NumPy arrays [@harris2020array] and directly read FITS[^2] files. It takes only 3 seconds to detect and render a 4-megapixel (2,000 by 2,000 pixels) 16-bit floating point image on a consumer laptop with a low-power NVIDIA RTX 3060 GPU.
36
+
37
+ The image window and segmentation mask window are always synchronized to an identical field of view (Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"}  & ). This design provides a very useful reference for close inspection of tiny objects in large images. The user can navigate and zoom-in/out with mouse controls in any of the image windows, including the overview image . Thumbnail shortcuts  allow the user to quickly jump to and inspect detected objects, making it a unique design especially useful for locating tiny objects in very large images.
38
+
39
+ The image window (Fig. [2](#fig:tools){reference-type="ref" reference="fig:tools"}) provides multiple mapping algorithms to map (clip/normalize) 16-bit floating point data to 8-bit unsigned integers, including linear, logarithmic, and square-root scaling, as well as [IRAF's zscale](https://docs.astropy.org/en/stable/api/astropy.visualization.ZScaleInterval.html#astropy.visualization.ZScaleInterval), an algorithm preferred by astronomers. The modular design of the image processing pipeline (Sec. [3.1](#pipeline){reference-type="ref" reference="pipeline"}) allows new mapping algorithms to be added easily. In addition, a user can manually assign the minimum and maximum range to read from raw data for the versatility especially needed in HDR images. The bottom left corner of each window shows the mouse cursor's pixel-location value in original data and the predicted mask's confidence.
40
+
41
+ In the segmentation mask window (Fig. [2](#fig:tools){reference-type="ref" reference="fig:tools"}), a user can raise or lower the default $0.5$ threshold to acquire a binary mask from the deep-learning model's predicted probability map $\in [0, 1]$. The user can then apply morphological operations like dilation to manipulate the mask globally, or use the pencil tool to manually edit the mask at pixel-level.
42
+
43
+ In the context of CR detection, the Download button will append the edited segmentation mask to the FITS file. This behavior can be changed based on the application. We can also change the communication mechanism with the deep-learning framework so the user can initiate the iterative labeling and training process in an active learning setting.
44
+
45
+ <figure id="fig:data_pipeline" data-latex-placement="ht!">
46
+ <img src="Images/Pipeline.png" style="width:90.0%" />
47
+ <figcaption>The interaction visualization toolkit’s data flow architecture between the client and the server.</figcaption>
48
+ </figure>
49
+
50
+ The visualization toolkit is powered by a Flask backend and JavaScript frontend (Fig. [3](#fig:data_pipeline){reference-type="ref" reference="fig:data_pipeline"}). The Python-based backend allows seamless integration with popular deep-learning frameworks. We can run the server's instance locally or hosted on a cloud server for remote user access. The server-end only handles model inference and user instance management while the image processing pipeline happens entirely in the browser at the client-end. This design avoids overloading the server when hosted for multi-user access. The communication between the client and the server only happens at file uploading and downloading using a custom data steam to reduce the network delay.
51
+
52
+ We adopt a modular design in the image processing pipeline to maximize the flexibility to add or remove image operations in the pipeline. The science image and the segmentation mask go through an ordered sequence of operations, and the modular design reduces computation and shortens the response time as the image is buffered after each operation -- an adjustment in the middle of the pipeline will only trigger later stages to reprocess the image.
53
+
54
+ In the context of astronomical data, the pipeline will first apply user's manual min-max clipping to the raw data, then apply a three-sigma clipping to remove outliers (over saturation and dead pixels). By default the previously mentioned zscale algorithm is applied to map the 16-bit image to 8-bit before rendering in the browser.
55
+
56
+ The segmentation mask's pipeline is simpler as only one scalar threshold is applied to the probability mask to to acquire the binary mask. We use a separate mask to track the user's manual edits and combine with the binary mask before rendering in the browser.
57
+
58
+ <figure id="fig:multiuser" data-latex-placement="ht!">
59
+ <img src="Images/Multiuser.png" style="width:95.0%" />
60
+ <figcaption>Frontend and backend architecture for multi-user interaction.</figcaption>
61
+ </figure>
62
+
63
+ Unlike many machine learning researchers, most of the real-world model end users do not have access to GPUs. With this in mind, we designed the GUI toolkit as a web-based application to support GPU acceleration and multiple-user access from any device. In a large-scale deployment, additional cloud GPU resources could be recruited as necessary to support a higher number of users.
64
+
65
+ Fig. [4](#fig:multiuser){reference-type="ref" reference="fig:multiuser"} illustrates our system architecture, which supports multi-user interaction via secure user sessions. In addition to user's IP address, a universally unique identifier (UUID) is sent to the server to identify each detection request, either through the upload request or the download request. A unique request key is constructed and the server will maintain a map of the key and temporary path of the uploaded files with the segmentation mask appended. When the user is done with editing the image and requests to download the combined results, the key is used to retrieve the correct file. In this way, we avoid race conditions when multiple users interact with the same deployed application.
66
+
67
+ Our interactive segmentation and visualization toolkit has the following key features:
68
+
69
+ - A synchronized dual-window design (Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"} A) and thumbnail-based image navigation (Fig. [1](#fig:three_interfaces){reference-type="ref" reference="fig:three_interfaces"} ) enable inspecting and editing tiny objects in large images;
70
+
71
+ - Computer vision researchers can inspect the results interactively via the GUI toolkit to better understand the model's behavior and assist their research. They can also deploy a GUI segmentation tool for end users in production environment with little effort;
72
+
73
+ - The browser-based application can be hosted on the cloud or internal GPU server to support multi-user access and GPU acceleration from any device;
74
+
75
+ - The Python and Flask backend allow seamless integration with popular deep-learning frameworks. Researchers can adapt this GUI for high-precision annotation or Active/Interactive Machine Learning.
76
+
77
+ SAOImageDS9 [@joye2005ds9] is a powerful FITS image visualization tool widely used in the astronomical community. DS9 inspired us to develop the GUI components in our toolkit. It supports various multi-frame layouts like tiling, blinking, and coordinates aligning. Despite active development, it remains primarily focused on visualization and we do not see an easy solution to integrate this standalone software with popular deep-learning frameworks.
78
+
79
+ ImageJ [@Schneider2012] is an image analysis program extensively used in the biological sciences. ImageJ2 [@Rueden2017] is a rewrite for multidimensional image data. It provides powerful image processing functionalities but requires a third-party plug-in to synchronize two image windows, and we haven't found a solution to make tiny object searching in large images as easy as the thumbnail shortcuts provided in our toolkit. ImageJ is versatile and general-purpose, but not optimized for the deep-learning segmentation workflow.
80
+
81
+ DeepImageJ [@deepimageJ_2021] is a plugin to support the use of pre-trained deep-learning models in ImageJ. It provides access to various models in a biomedical model repository (BioImage Model Zoo), and allows basic deep-learning model inference. But it also carries over ImageJ's disadvantages we discussed above and is hard to integrate with popular deep-learning frameworks, especially for researchers who need interactive data analysis during the research stage.
82
+
83
+ ITK-SNA [@py06nimg] is well known for 3D medical image segmentation, providing powerful functionalities from community contributions. But it lacks the support for deep-learning methods and the standalone software is hard to integrate with other frameworks.
2206.13464/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2206.13464/paper_text/intro_method.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Over recent years, criticism against reinforcement learning (RL) continues to pour in regarding its poor real-world applicability. Although RL has demonstrated superhuman performance in solving complex tasks such as playing games [@mnih2013playing; @silver2017mastering], its success is heavily dependent on availability of an unbiased interactive environment, either the real system or a high-fidelity simulator, as well as millions of unrestricted trials and errors. However, constructing high-fidelity simulators can be extremely expensive or even impossible due to complex system dynamics and unobservable information in the physical world. Less accurate simulators with simplifications, on the other hand, are easy to build, however, also lead to severe visual and dynamics sim-to-real gaps when learning RL policies [@rao2020rl; @peng2018sim]. Among different sources of sim-to-real gaps, visual gaps are relatively well-addressed in a number of methods, e.g. through domain randomization [@tobin2017domain], proper model setups that are less impacted by visual gaps [@lee2021beyond], or using a robust and transferable state-encoder [@bewley2019learning; @rao2020rl; @wang2022a]. Dynamics gaps on the other hand cause a systematic bias that is not easy to address through a modeling process. These dynamics gaps widely exist in almost every imperfect physical simulator with simplified dynamics, posing great challenges for RL when solving real-world tasks.
4
+
5
+ Recently, offline RL provides another possibility to directly learn policies from real-world data [@levine2020offline; @fujimoto2018addressing; @kumar2019stabilizing], which has already been used in solving a number of practical problems, such as industrial control [@zhan2021deepthermal], robotics [@lee2021beyond] and interactive recommendation [@xiao2021general]. However, the policies learned through existing offline RL algorithms are often over-conservative due to the use of data-related policy constraints [@fujimoto2019off; @kumar2019stabilizing; @fujimoto2021minimalist], value regularizations [@kumar2020conservative; @kostrikov2021offline; @xu2021constraints], or in-sample learning [@kostrikov2022offline; @por] to combat the distributional shift [@kumar2019stabilizing]. This hurts the performance of offline RL policies and makes their performances strongly depend on the size and state-action space coverage of the offline dataset. In real-world scenarios, data collection can be expensive or restrictive. The state-action space coverage of actual offline datasets can be quite narrow, which directly limits the potential performance of offline RL policies.
6
+
7
+ <figure id="fig:intro" data-latex-placement="t">
8
+ <embed src="figure/H2O_final.pdf" style="width:90.0%" />
9
+ <figcaption>Conceptual illustration of the dynamics-aware hybrid offline-and-online RL framework</figcaption>
10
+ </figure>
11
+
12
+ Despite all the drawbacks, online RL with an imperfect simulator and offline RL with the real-world dataset bear indispensable potential when jointly used to learn high-performance policies and overcome the sim-to-real issues. Within a simulator, online RL agents can perform unrestricted exploration and have an access to a great quantity and diversity of state-action data, while offline datasets may have poor coverage. On the other hand, real-world datasets contain perfect dynamics information, which can be readily used to guide and correct erroneous information in online training with simulation data. This intuition has motivated researchers to explore how to incorporate offline real-world data with simulation-based online training, however, existing studies are only restricted to pure online [@eysenbach2020off] or offline [@liu2021dara] learning settings. None of the prior works has successfully combined offline and online learning organically, while simultaneously handling the dynamics gaps between these two regimes.
13
+
14
+ In this paper, we present the first Hybrid Offline-and-Online Reinforcement Learning framework (H2O) as conceptually illustrated in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}, which enables simultaneous policy learning with offline real-world datasets and simulation rollouts. The core of H2O is the introduction of dynamics-aware policy evaluation, which adaptively pushes down or pulls up Q-values on simulated data according to the dynamics gap evaluated against real data. We provide analyses and insights on why such dynamics-aware learning can potentially bridge the sim-to-real gap. Finally, we analyze the impact of each design component in H2O and demonstrate its superior performance against other cross-domain RL algorithms through extensive simulation and real-world experimental validations.
15
+
16
+ # Method
17
+
18
+ We consider the standard Markov Decision Process (MDP) setting, which is specified by a tuple $\mathcal{M}:=\left(\mathcal{S}, \mathcal{A}, r, P_{\mathcal{M}}, \rho, \gamma\right)$, where $\mathcal{S}$ and $\mathcal{A}$ are the state and action spaces, $r(\mathbf{s}, \mathbf{a})$ is the reward function, $P_{\mathcal{M}}\left(\mathbf{s}^{\prime} | \mathbf{s}, \mathbf{a}\right)$ refers to the transitional dynamics under $\mathcal{M}$, $\rho(\mathbf{s})$ depicts the initial state distribution, and $\gamma\in (0,1)$ is the discount factor. The goal of RL is to learn a policy $\pi(\mathbf{a}|\mathbf{s})$ that maps a state $\mathbf{s}$ to an action $\mathbf{a}$ so as to maximize the expected cumulative discounted reward starting from $\rho$, i.e., $J(\mathcal{M},\pi)=\mathbb{E}_{\mathbf{s}_0\in\rho,\mathbf{a}_t\sim\pi(\cdot|\mathbf{s}_t), \mathbf{s}_{t+1}\sim P_{\mathcal{M}}(\cdot|\mathbf{s}, \mathbf{a})}\left[\sum_{t=0}^{\infty} \gamma^{t} r\left(\mathbf{s}_{t}, \mathbf{a}_{t}\right)\right]$. In conventional actor-critic formalism [@barto1983neuronlike; @sutton1998introduction], one learns an approximated Q-function $\hat{Q}$ by minimizing the squared Bellman error (referred as *policy evaluation*), and optimizes the policy $\pi$ by maximizing the Q-function (referred as *policy improvement*) as follows: $$\begin{align}
19
+ \hat{Q} &\leftarrow \arg \min _{Q} \mathbb{E}_{\mathbf{s}, \mathbf{a}, \mathbf{s}^{\prime}\sim \mathcal{U}}\left[\left(Q(\mathbf{s}, \mathbf{a})-\hat{\mathcal{B}}^{\pi} \hat{Q}(\mathbf{s}, \mathbf{a})\right)^{2}\right] \quad\quad\text{(Policy Evaluation)}\label{1} \\
20
+ \hat{\pi} &\leftarrow \arg \max _{\pi} \mathbb{E}_{\mathbf{s}, \mathbf{a}\sim \mathcal{U}}\left[\hat{Q}(\mathbf{s}, \mathbf{a})\right] \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\text{(Policy Improvement) }\label{2}
21
+ \end{align}$$ where $\mathcal{U}$ can either be the replay buffer $B$ generated by a previous version of policy $\hat{\pi}$ through online environment interactions, or can be a fixed dataset $\mathcal{D}=\left\{\left(\mathbf{s}_{t}^{i}, \mathbf{a}_{t}^{i}, \mathbf{s}_{t+1}^{i}, r_{t}^{i}\right)\right\}_{i=1}^n$ as in offline RL setting. $\hat{\mathcal{B}}^{\pi}$ is the Bellman operator, which is often used as the Bellman evaluation operator $\hat{\mathcal{B}}^{\pi} \hat{Q}(\mathbf{s}, \mathbf{a})=r(\mathbf{s}, \mathbf{a})+\gamma \mathbb{E}_{\mathbf{a}^{\prime} \sim \hat{\pi}\left(\mathbf{a}^{\prime} | \mathbf{s}^{\prime}\right)}\left[\hat{Q}\left(\mathbf{s}^{\prime}, \mathbf{a}^{\prime}\right)\right]$.
22
+
23
+ In the offline setting, performing the standard Bellman update in Eq. ([\[1\]](#1){reference-type="ref" reference="1"}) can result in serious overestimation over Q-values due to distributional shift. An effective approach to alleviating this problem is to regularize the value function on OOD actions, which is introduced in CQL [@kumar2020conservative]. CQL learns a policy $\hat{\pi}$ upon a lower-bounded Q-function that additionally pushes down the Q-values on actions induced by $\mu(\mathbf{a} | \mathbf{s})$ and pulls up Q-values on trustworthy offline data: $$\begin{equation}
24
+ \min _{Q}\max _{\mu} \alpha\left(\mathbb{E}_{\mathbf{s} \sim \mathcal{D}, \mathbf{a} \sim \mu(\cdot | \mathbf{s})}[Q(\mathbf{s}, \mathbf{a})]-\mathbb{E}_{\mathbf{s},\mathbf{a} \sim \mathcal{D}}[Q(\mathbf{s}, \mathbf{a})]\right)+\mathcal{E}\left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}\right)\label{4}
25
+ \end{equation}$$ where the dataset $\mathcal{D}$ is collected by some behavioral policy $\pi_\mathcal{D}(\cdot| \mathbf{s})$, and $\mu(\cdot|\mathbf{s})$ is some sampling distribution, which is often taken as $\hat{\pi}(\cdot|\mathbf{s})$ in prior works [@yu2021combo; @li2022dealing]. $\mathcal{E}\left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}\right)$ denotes the Bellman error in Eq. ([\[1\]](#1){reference-type="ref" reference="1"}). It is worth noting that the above value regularization framework has the potential to handle data from two sources with some preferences, i.e., unreliable pushed-down data and reliable pulled-up data. This motivates us to devise the H2O framework to perform simultaneous offline and online policy learning using a similar value regularization recipe.
26
+
27
+ Naïvely combining offline and online data from different distributions may cause the mismatch between data distributions, giving rise to issues similar to the distributional shift problem as in offline RL settings [@kumar2019stabilizing]. Caution needs to be taken when dealing with trustworthy offline data from the real world and the potentially problematic simulated online rollouts (described in Section [4.1](#o&o){reference-type="ref" reference="o&o"}). Secondly, the dynamics model underpinning the simulator is only roughly aligned with the real-world counterpart, and the sim-to-real dynamics gaps can be heterogeneous across different state-action pairs. Hence uniform value regularization upon all simulated samples may not be appropriate to fully leverage the potential of an imperfect simulator. A well-founded dynamics gap measurement for adaptive value regularization is needed (Section [4.2](#dg){reference-type="ref" reference="dg"}). Lastly, it is also worth noting that the biased simulated dynamics could induce erroneous next-state predictions, which may lead to problematic Bellman updates. Such errors also need to be properly considered in the algorithm design (Section [4.3](#td){reference-type="ref" reference="td"}). We approach all the aforementioned issues with what we call Dynamics-Aware Hybrid Offline-and-Online (H2O) RL, which is described in the following content.
28
+
29
+ To address the potential mismatch of training data distributions from different data sources, existing online RL frameworks are not suitable, however, offline RL approaches such as value regularization can provide a viable foundation. We modify the previous value regularization scheme in offline RL and propose the dynamics-aware policy evaluation, which has the following general form: $$\begin{equation}
30
+ \min_{Q}{\color{Red}\max_{d^\phi}}\; \beta\left[\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim {\color{Red} d^\phi(\mathbf{s}, \mathbf{a})} }[Q(\mathbf{s}, \mathbf{a})]-\mathbb{E}_{\mathbf{s},\mathbf{a} \sim \mathcal{D}}[Q(\mathbf{s}, \mathbf{a})] + {\color{Red}\mathcal{R}(d^\phi)}\right]+{\color{Blue}\widetilde{\mathcal{E}} \left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}\right)}\label{d^phi}
31
+ \end{equation}$$ where $\beta$ is a positive scaling parameter. $d^\phi(\mathbf{s}, \mathbf{a})$ is a particular state-action sampling distribution that associates with high dynamics-gap samples, and $\mathcal{R}(d^\phi)$ is a regularization term for $d^\phi$ to enforce this designed behavior. Ideally, we would want to penalize Q-values at the simulated samples with high dynamics gaps, rather than all of the samples. $\widetilde{\mathcal{E}}(Q, \hat{\mathcal{B}}^{\pi} \hat{Q})$ denotes the modified Bellman error of the mixed data from offline dataset $\mathcal{D}$ and the simulation rollout samples in online replay buffer $B$, which are generated by the real MDP $\mathcal{M}$ and the simulated MDP $\widehat{\mathcal{M}}$ respectively. Specifically, the Bellman error of the simulated data needs to be fixed. The terms marked in red and blue are the key design elements that account for the dynamics gaps in offline-and-online RL policy learning, which will be discussed in the following sub-sections.
32
+
33
+ To achieve adaptive value regularization, we can use the minimization term $\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim d^\phi(\mathbf{s}, \mathbf{a})}[Q(\mathbf{s}, \mathbf{a})]$ to penalize the high-dynamics gap simulation samples, and use the maximization term $\mathbb{E}_{\mathbf{s},\mathbf{a} \sim \mathcal{D}}[Q(\mathbf{s}, \mathbf{a})]$ to cancel the penalization on real offline data. The key question is how to properly design $d^\phi(\mathbf{s}, \mathbf{a})$ to assign high probabilities to high-dynamics gap samples. In our study, we leverage the regularization term $\mathcal{R}(d^\phi)$ to control the behavior of $d^\phi(\mathbf{s}, \mathbf{a})$. We choose $\mathcal{R}(d^\phi)=-D_{KL}(d^\phi(\mathbf{s}, \mathbf{a})\|\omega(\mathbf{s}, \mathbf{a}))$, where $D_{KL}(\cdot || \cdot)$ is the Kullback-Leibler (KL) divergence and $\omega(\mathbf{s}, \mathbf{a})$ is a distribution that characterizes the dynamics gaps for samples in the state-action space. Hence maximizing over $\mathcal{R}(d^\phi)$ draws closer between $d^\phi$ and $\omega$, and achieves our desired behavior. Note that the inner maximization problem over $d^\phi$ in Eq. ([\[d\^phi\]](#d^phi){reference-type="ref" reference="d^phi"}) under this design corresponds to the following optimization problem: $$\begin{equation}
34
+ \max _{d^\phi} \mathbb{E}_{\mathbf{s}, \mathbf{a} \sim d^\phi(\mathbf{s}, \mathbf{a})}[Q(\mathbf{s}, \mathbf{a})]-D_{KL}(d^\phi(\mathbf{s}, \mathbf{a})\|\omega(\mathbf{s}, \mathbf{a})) \quad \text{s.t. } \sum_{\mathbf{s}, \mathbf{a}} d^\phi(\mathbf{s}, \mathbf{a})=1, d^\phi(\mathbf{s}, \mathbf{a}) \geq 0\label{closed-form}
35
+ \end{equation}$$ Above optimization problem admits a closed-form solution $d^{\phi}(\mathbf{s}, \mathbf{a}) \propto \omega(\mathbf{s}, \mathbf{a}) \exp \left(Q(\mathbf{s}, \mathbf{a})\right)$ (see the derivation in Appendix [8.1](#app:closed-form_derivation){reference-type="ref" reference="app:closed-form_derivation"}). Plugging this back into Eq. ([\[d\^phi\]](#d^phi){reference-type="ref" reference="d^phi"}), the first term now corresponds to a weighted soft-maximum of Q-values at any state-action pair and the original problem transforms into the following form: $$\begin{equation}
36
+ \min_{Q} \beta\left({\color{red}\log \sum_{\mathbf{s},\mathbf{a}} {\omega(\mathbf{s},\mathbf{a})} \exp \left(Q(\mathbf{s},\mathbf{a})\right)}-\mathbb{E}_{\mathbf{s},\mathbf{a}\sim \mathcal{D}}\left[Q(\mathbf{s},\mathbf{a})\right]\right)+{\color{blue}\widetilde{\mathcal{E}}\left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}\right)}\label{logsumexp}
37
+ \end{equation}$$ This result is intuitively reasonable, as we are penalizing more on Q-values with larger $\omega(\mathbf{s}, \mathbf{a})$, corresponding to those high dynamics-gap simulation samples. Then the next question is, how do we practically evaluate $\omega(\mathbf{s}, \mathbf{a})$ given the offline dataset and the online simulation rollouts? Specifically, we can measure the dynamics gap between real and simulated dynamics on a state-action pair $(\mathbf{s},\mathbf{a})$ as $u(\mathbf{s},\mathbf{a}) :=D_{KL}(P_{\widehat{\mathcal{M}}}(\mathbf{s}' | \mathbf{s}, \mathbf{a})\|P_{\mathcal{M}}(\mathbf{s}' | \mathbf{s}, \mathbf{a}))=\mathbb{E}_{s'\sim P_{\widehat{\mathcal{M}}}}\log(P_{\widehat{\mathcal{M}}}(\mathbf{s}' | \mathbf{s}, \mathbf{a})/P_{\mathcal{M}}(\mathbf{s}' | \mathbf{s}, \mathbf{a}))$. $\omega(s,a)$ can thus be represented as a normalized distribution of $u$, i.e., $\omega(\mathbf{s,a})=u(\mathbf{s,a})/\sum_{\mathbf{\tilde{s},\tilde{a}}}u(\mathbf{\tilde{s},\tilde{a}})$ . Now, the challenge is how to evaluate the dynamics ratio $P_{\widehat{\mathcal{M}}}/P_{\mathcal{M}}$. Note that according to Bayes' rule: $$\begin{align}
38
+ % \small
39
+ % \scalebox{0.95}{$
40
+ \frac{P_{\widehat{\mathcal{M}}}\left(\mathbf{s}' | \mathbf{s}, \mathbf{a}\right)}{P_{\mathcal{M}}\left(\mathbf{s}' | \mathbf{s}, \mathbf{a}\right)}
41
+ &=\frac{p\left(\mathbf{s}' | \mathbf{s}, \mathbf{a}, \text{sim}\right)}{ p\left(\mathbf{s}' | \mathbf{s}, \mathbf{a},\text{real}\right)}=\frac{p\left(\text {sim} | \mathbf{s}, \mathbf{a}, \mathbf{s}'\right)}{p\left(\text {sim} | \mathbf{s}, \mathbf{a}\right)}/ \frac{p\left(\text {real} | \mathbf{s}, \mathbf{a},\mathbf{s}'\right)}{p\left(\text {real} | \mathbf{s}, \mathbf{a}\right)} \notag \\
42
+ &=\frac{p\left(\text {sim} | \mathbf{s}, \mathbf{a}, \mathbf{s}'\right)}{p\left(\text {real} | \mathbf{s}, \mathbf{a},\mathbf{s}'\right)}/ \frac{p\left(\text {sim} | \mathbf{s}, \mathbf{a}\right)}{p\left(\text {real} | \mathbf{s}, \mathbf{a}\right)} = \frac{1 - p\left(\text {real} | \mathbf{s}, \mathbf{a}, \mathbf{s}'\right)}{p\left(\text {real} | \mathbf{s}, \mathbf{a},\mathbf{s}'\right)}/ \frac{1 - p\left(\text {real} | \mathbf{s}, \mathbf{a}\right)}{p\left(\text {real} | \mathbf{s}, \mathbf{a}\right)}
43
+ % $}
44
+ \label{bayes}
45
+ \end{align}$$ Hence, we can approximate $p\left(\text {real} | \mathbf{s}, \mathbf{a},\mathbf{s}'\right)$ and $p\left(\text {real} | \mathbf{s}, \mathbf{a}\right)$ with a pair of discriminators $D_{\Phi_{sas}}(\cdot|\mathbf{s}, \mathbf{a}, \mathbf{s}')$ and $D_{\Phi_{sa}}(\cdot|\mathbf{s}, \mathbf{a})$ respectively that are optimized with standard cross-entropy loss between real offline data and the simulated samples as in DARC [@eysenbach2020off].
46
+
47
+ Due to the existence of dynamics gaps in the simulator, directly computing the Bellman error for simulated samples in the online replay buffer $B$ (i.e., $\mathbb{E}_{\mathbf{s}, \mathbf{a}, \mathbf{s}'\sim B}[(Q-\hat{\mathcal{B}}^{\pi} \hat{Q})(\mathbf{s},\mathbf{a})]^{2}$) is problematic. This is because the next state $\mathbf{s}'$ comes from the potentially biased simulated dynamics $P_{\widehat{\mathcal{M}}}$, which can result in the miscalculation of target Q-values in Bellman updates. Ideally, we wish the next state $\mathbf{s}'$ comes from the real dynamics $P_{\mathcal{M}}$, however, such an $\mathbf{s}'$ given an arbitrary state-action pair $(\mathbf{s}, \mathbf{a})\sim B$ is not obtainable during training. To fix this issue, we reuse the previously evaluated dynamics ratio in Eq. ([\[bayes\]](#bayes){reference-type="ref" reference="bayes"}) as an importance sampling weight, and introduce the following modified Bellman error formulation on both offline dataset $\mathcal{D}$ and simulated data $B$: $$\begin{equation}
48
+ \small
49
+ \begin{aligned}
50
+ {\color{blue}{\widetilde{\mathcal{E}}\left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}\right)}}=\frac{1}{2}\mathbb{E}_{\mathbf{s}, \mathbf{a},\mathbf{s}'\sim \mathcal{D}}\left[\left(Q-\hat{\mathcal{B}}^{\pi} \hat{Q}\right)(\mathbf{s},\mathbf{a})\right]^{2}+\frac{1}{2}\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim B}\mathbb{E}_{ \mathbf{s}'\sim p_{\mathcal{M}}}\left[\left(Q-\hat{\mathcal{B}}^{\pi} \hat{Q}\right)(\mathbf{s},\mathbf{a})\right]^{2}\\
51
+ =\frac{1}{2}\mathbb{E}_{\mathbf{s}, \mathbf{a},\mathbf{s}'\sim \mathcal{D}}\left[\left(Q-\hat{\mathcal{B}}^{\pi} \hat{Q}\right)(\mathbf{s},\mathbf{a})\right]^{2}+\frac{1}{2}\mathbb{E}_{\mathbf{s}, \mathbf{a},\mathbf{s}'\sim B}\left[{\color{blue}\frac{P_{\mathcal{M}}(\mathbf{s}' | \mathbf{s}, \mathbf{a})}{P_{\widehat{\mathcal{M}}}(\mathbf{s}' | \mathbf{s}, \mathbf{a})}}\left(Q-\hat{\mathcal{B}}^{\pi} \hat{Q}\right)(\mathbf{s},\mathbf{a})\right]^{2}\label{is}
52
+ \end{aligned}
53
+ \end{equation}$$
54
+
55
+ Combining Eq. ([\[logsumexp\]](#logsumexp){reference-type="ref" reference="logsumexp"}) and ([\[is\]](#is){reference-type="ref" reference="is"}), we obtain the final dynamics-aware policy evaluation procedure for H2O, which enables adaptive value regularization on high dynamics-gap samples as well as more reliable Bellman updates. H2O can be instantiated upon common actor-critic algorithms (e.g., Soft Actor-Critic (SAC) [@haarnoja2018soft]). The pseudocode of H2O built upon SAC is presented in Algorithm [\[algorithm\]](#algorithm){reference-type="ref" reference="algorithm"}, which uses the policy improvement objective from SAC and $\lambda$ is a temperature parameter auto-tuned during training. Other policy improvement objectives, or simply maximizing the expected Q-values as in Eq. ([\[2\]](#2){reference-type="ref" reference="2"}) are also compatible with the H2O framework.
56
+
57
+ For practical considerations, we introduce two relaxations in our implementation. First, in Eq. ([\[logsumexp\]](#logsumexp){reference-type="ref" reference="logsumexp"}), we need to sample from the whole state-action space to compute the weighted average of exponentiated Q-values, which can be impractical and unnecessary since not all state-action pairs are of interest for policy learning. Instead, we approximate this value using the state-action samples in the mini-batch of the simulated replay buffer $B$. Second, the evaluation of the KL divergence in $u(\mathbf{s}, \mathbf{a})=\mathbb{E}_{s'\sim P_{\widehat{\mathcal{M}}}}\log(P_{\widehat{\mathcal{M}}}(\mathbf{s}' | \mathbf{s}, \mathbf{a})/P_{\mathcal{M}}(\mathbf{s}' | \mathbf{s}, \mathbf{a}))$ involves sampling next states $\mathbf{s}'$ from $P_{\widehat{\mathcal{M}}}(\cdot|\mathbf{s}, \mathbf{a})$, which can be infeasible given a black-box simulator. We take a simplified approach that approximates the expected value by averaging over $N$ random samples from a Gaussian distribution $\mathcal{N}(\mathbf{s}', \hat{\Sigma}_\mathcal{D})$, where $\hat{\Sigma}_\mathcal{D}$ is the covariance matrix of states computed from the real offline dataset. Although this treatment is simple, we find it highly effective and produces good performance in empirical experiments. Please turn to Appendix [9.1](#app:imp_details){reference-type="ref" reference="app:imp_details"} for more implementation details.
58
+
59
+ ::: algorithm
60
+ **Initialize:** critic network $Q_\theta$, target network $Q_{\bar{\theta}}$, actor network $\pi_\phi$, replay buffer $B=\varnothing$ for simulated transitions, discriminators $D_{\Phi_{sas}}(\cdot|\mathbf{s}, \mathbf{a}, \mathbf{s}')$ and $D_{\Phi_{sa}}(\cdot|\mathbf{s}, \mathbf{a})$
61
+ :::
62
+
63
+ In this section, we analyze H2O and provide intuition on how it works when combining both offline and online learning. Interestingly, we find that the final form of dynamics-aware policy evaluation as given in Eq. ([\[logsumexp\]](#logsumexp){reference-type="ref" reference="logsumexp"}) and ([\[is\]](#is){reference-type="ref" reference="is"}) actually leads to an equivalent adaptive reward adjustment term on the Q-function, which depends on the dynamics gap distribution $\omega(\mathbf{s}, \mathbf{a})$ and the state-action distribution of the dataset $d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})$. To show this, note that the weighted log-sum-exp term $\log \sum_{\mathbf{s},\mathbf{a}} {\omega(\mathbf{s},\mathbf{a})} \exp \left(Q(\mathbf{s},\mathbf{a})\right)=\log\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }\exp(Q(\mathbf{s}, \mathbf{a}))$ in Eq. ([\[logsumexp\]](#logsumexp){reference-type="ref" reference="logsumexp"}) can be bounded as follows (see Appendix [8.2](#app:reward_adjustment){reference-type="ref" reference="app:reward_adjustment"} for detailed proof): $$\begin{equation}
64
+ \small
65
+ % \scalebox{1}{$
66
+ \mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }[Q(\mathbf{s}, \mathbf{a})] \leq \log \mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }\exp(Q(\mathbf{s}, \mathbf{a})) \leq \mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }[Q(\mathbf{s}, \mathbf{a})] + \frac{\text{Var}_{\omega}[\exp(Q(\mathbf{s}, \mathbf{a}))]}{2\exp(2Q_{min})}
67
+ % $}
68
+ \end{equation}$$ where we denote the range of learned Q-values as $[Q_{min}, Q_{max}]$, and $\text{Var}_{\omega}[\exp(Q(\mathbf{s}, \mathbf{a}))]$ denotes the variance of $\exp(Q(\mathbf{s}, \mathbf{a}))$ under samples drawn from distribution $\omega(\mathbf{s}, \mathbf{a})$. The LHS inequality is a result of Jensen's inequality, and the RHS inequality is a direct result from [@liao2018sharpening]. Above inequalities suggest that $\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }[Q(\mathbf{s}, \mathbf{a})]$ can be a reasonable approximation of our weighted log-sum-exp term if the Q-function takes large values and the gap between $\exp(Q_{min})$ and $\sqrt{\text{Var}_{\omega}[\exp(Q(\mathbf{s}, \mathbf{a}))]}$ is not too large. This can be practically satisfied if we let $\gamma\rightarrow 1$ and design the range of reward function as well as the episode done conditions properly.
69
+
70
+ With this approximation, we can consider the following approximated policy evaluation objective based on Eq. ([\[logsumexp\]](#logsumexp){reference-type="ref" reference="logsumexp"}) and ([\[is\]](#is){reference-type="ref" reference="is"}), which offers a much cleaner form for analysis: $$\begin{equation}
71
+ \min _{Q} {\beta\left({{\color{Red}\mathbb{E}_{\mathbf{s}, \mathbf{a}\sim { \omega(\mathbf{s}, \mathbf{a})} }[Q(\mathbf{s}, \mathbf{a})]}} -\mathbb{E}_{\mathbf{s},\mathbf{a}\sim \mathcal{D}}\left[Q(\mathbf{s},\mathbf{a})\right]\right)}+{\widetilde{\mathcal{E}}\left(Q, \hat{\mathcal{B}}^{\pi} \hat{Q}^{k}\right)}\label{new_obj}
72
+ \end{equation}$$ The ablation study in Appendix [10.1](#app:extra_compara_results){reference-type="ref" reference="app:extra_compara_results"} manifests that this approximated policy evaluation objective only yields mild performance drops against the original version in most cases, making it a reasonable substitution for theoretical analysis. Assuming that the Q-function is tabular, we can find the Q-value corresponding to the new objective by following an approximate dynamic programming approach and differentiating Eq. ([\[new_obj\]](#new_obj){reference-type="ref" reference="new_obj"}) with respect to $Q^k$ in iteration $k$ (see Appendix [8.2](#app:reward_adjustment){reference-type="ref" reference="app:reward_adjustment"} for details): $$\begin{equation}
73
+ \hat{Q}^{k+1}(\mathbf{s}, \mathbf{a})=(\hat{\mathcal{B}}^{\pi}\hat{Q}^k)(\mathbf{s}, \mathbf{a})-\beta\left[\frac{\omega(\mathbf{s}, \mathbf{a})-d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})}{d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})+ d^{\pi}_{\widehat{\mathcal{M}}}(\mathbf{s}, \mathbf{a})}\right]
74
+ \label{nu}
75
+ \end{equation}$$ where $d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})$ and $d^{\pi}_{\widehat{\mathcal{M}}}(\mathbf{s}, \mathbf{a})$ are state-action marginal distributions under behavioral policy $\pi_\mathcal{D}$ and the learned policy $\pi$ respectively. If we represent the second term in the RHS of above equation as $\nu(\mathbf{s}, \mathbf{a})$, we can see that $\nu(\mathbf{s}, \mathbf{a})$ can be perceived as an adaptive reward adjustment term, which penalizes or boosts the reward at a state-action pair $(\mathbf{s}, \mathbf{a})$ according to the relative difference between dynamics gap distribution $\omega(\mathbf{s}, \mathbf{a})$ and marginal state-action distribution of the offline dataset $d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})$. Specifically, we can make the following interesting observations:
76
+
77
+ - If $\omega(\mathbf{s},\mathbf{a})>d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})$, then $\nu(\mathbf{s},\mathbf{a})$ serves as a reward penalty. This corresponds to the case that either the state-action pair $(\mathbf{s},\mathbf{a})$ has large dynamics gap, or it belongs to OOD or relatively low data density areas ($d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})=0$ or $< \omega(\mathbf{s},\mathbf{a})$). Under such cases, we push down the Q-values and adopt conservatism similar to the typical treatment in offline RL settings [@kumar2020conservative]. The higher the dynamics gap is (larger $\omega$), the more we penalize the Q-function.
78
+
79
+ - If $\omega(\mathbf{s},\mathbf{a})<d_\mathcal{M}^{\pi_\mathcal{D}}(\mathbf{s}, \mathbf{a})$, then we are evaluating at some good data areas with low dynamics gap or have more information from offline data to control the potential dynamics gap in the simulated online samples. Under such cases, $\nu(\mathbf{s},\mathbf{a})$ serves as a reward boost term and adopts optimism. Although it overestimates the Q-values, we argue that this can be beneficial. As it can promote exploration in these "good" data regions and potentially help reduce variance during training.
80
+
81
+ With the adaptive reward adjustment term $\nu$ defined in Eq. ([\[nu\]](#nu){reference-type="ref" reference="nu"}), we can further show that the value function $V(\mathbf{s})$ is underestimated at high dynamics gap areas, which allows for safe policy learning in our offline-and-online setting. Detailed theoretical analysis can be found in Appendix [8.3](#app:value){reference-type="ref" reference="app:value"}.
2207.07697/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2207.07697/paper_text/intro_method.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep learning models are widely deployed for inference on edge devices like smartphones and embedded platforms. In contrast, training is still predominantly done on large cloud servers with high-throughput accelerators such as GPUs. The centralized cloud training model requires transmitting sensitive data from edge devices to the cloud such as photos and keystrokes, thereby sacrificing user privacy and incurring additional data movement costs.
4
+
5
+ To enable users to personalize their models without relinquishing privacy, on-device training methods such as federated learning [@federated-smith] perform local training updates without the need to consolidate data to the cloud. These methods have been widely deployed to personalize keyboard suggestions in Google Gboard [@google-keyboard-federated] and to improve Automatic Speech Recognition (ASR) on iPhones [@apple-ml].
6
+
7
+ <figure id="fig:poet-overview" data-latex-placement="t">
8
+ <embed src="figures/figures/system_diagram.pdf" />
9
+ <figcaption> optimizes state-of-the-art ML models for training on Edge devices. Operators of the ML model are profiled on target edge device to obtain fine-grained profiles. adopts an integrated integrated rematerialization and paging to produce an energy-optimal training schedule.</figcaption>
10
+ </figure>
11
+
12
+ At the same time, current on-device training methods cannot support training modern architectures and large models. For example, Google Gboard fine-tunes a simple logistic regression model. Training larger models on edge devices is infeasible primarily due to the limited device memory which cannot store activations for backpropagation. A single training iteration for ResNet-50 [@resnet] requires 200$\times$ more memory than inference.
13
+
14
+ Prior work has proposed strategies including paging to auxiliary memory [@capuchin] and rematerialization [@chen2016b; @checkmate; @dtr] to reduce the memory footprint of training in the cloud. However, these methods result in a significant increase in total energy consumption. The data transfers associated with paging methods often require more energy than recomputing the data. Alternatively, rematerialization increases energy consumption at a rate of $O(n^2)$ as the memory budget shrinks.
15
+
16
+ In this work, we show that *paging and rematerialization are highly complementary*. By carefully rematerializing cheap operations while paging results of expensive operations to auxiliary memory such as a flash or an SD card, we can scale effective memory capacity with minimal energy overhead. By combining these two methods, we demonstrate it is possible to train models like BERT on mobile-class edge devices. By framing edge training as an optimization problem, we discover optimal schedules with provably minimal energy consumption at a given memory budget. While the focus of this paper is edge deployemnts, the energy objective is increasingly becoming relevant even for cloud deployments [@patterson2022carbon].
17
+
18
+ We present  (), an algorithm for energy-optimal training of modern neural networks on memory-constrained edge devices (Fig [1](#fig:poet-overview){reference-type="ref" reference="fig:poet-overview"}). Given that it is prohibitively expensive to cache all activation tensors for backpropagation, optimally pages and rematerializes activations, thereby reducing memory consumption by up to 2x. We reformulate the edge training problem as an integer linear program (ILP) and find it is solved to optimality in under ten minutes by commodity solvers.
19
+
20
+ For models deployed on real-world edge devices, training happens when the edge device is relatively idle and spare compute cycles are available. For example, Google Gboard schedules model updates when the phone is put to charge. Hence also incorporates a hard training constraint. Given a memory constraint and the number of training epochs, generates solutions that also satisfy the given training deadline. transparently develops a comprehensive cost model by profiling the target hardware with the target network's operators. Finally, is mathematically value preserving (*i.e,* it makes no approximations), and it works for existing architectures out-of-the-box.
21
+
22
+ The novel contributions of this work include:
23
+
24
+ 1. A formulation of an integer linear program to find the **energy-optimal** schedule to train modern deep neural networks **with a) memory and b) runtime as hard constraints**.
25
+
26
+ 2. A unified algorithm for hybrid activation recomputation and paging.
27
+
28
+ 3. The first demonstration of how to train ResNet-18 and BERT on tiny Cortex M class devices with memory and timing constraints.
29
+
30
+ # Method
31
+
32
+ <figure id="fig:illustrated_example" data-latex-placement="t">
33
+ <embed src="figures/figures/illustrated_example.pdf" style="width:70.0%" />
34
+ <figcaption>Rematerialization and paging are complementary. This plot visualizes the execution schedule for an eight layer neural network. We represent logical timesteps in increasing order on the y-axis while different layers are represented by the x-axis. Layers 2 and 3 are cheap-to-compute operators and therefore can be rematerialized at low cost. However, layers 5 and 6 are compute intensive so it is more energy-efficient to page them to secondary flash storage.</figcaption>
35
+ </figure>
36
+
37
+ A growing demand exists for edge machine learning applications for greater autonomy. In response, the community has developed systems to enable machine learning on edge devices. EdgeML [@edgeml], CoreML [@coreml] and TensorFlow Lite [@tf-lite] from Google are all efforts to meet this demand.
38
+
39
+ However, each of these efforts is application-specific and proposes new algorithms or optimizations to address the computational and memory requirements for machine learning inference on the edge [@deep-x]. While inference is already commonly deployed on edge devices, training remains ad-hoc and infeasible for large models.
40
+
41
+ Training on the edge is critical for privacy, cost, and connectivity. First, due to privacy concerns, many edge applications cannot transmit data to the cloud. Second, the energy consumed by bulk data transmission can significantly reduce battery life [@trickle]. Third, applications such as ocean sensing and communication [@fadel-underwater], and those deployed in farms [@farmbeats] are designed for offline operations -- with no access to the internet.
42
+
43
+ Our objective of optimizing for energy is a non-trivial contribution. On edge devices, the energy objective can oftentimes conflict with the objective of running to completion. For example, on a given platform, rematerializing might consume lower energy, but paging might be quicker. This is because, on edge devices, it is common practise to turn-off/duty-cycle components that are not utilized (e.g., SD card, DMA, etc.) The energy profile may vary depending on the size of the tensor, and if the PCIe/DMA/SPI/I2C memory bus needs to be activated, etc. Exploiting this enables to find the most energy-efficient schedule which would not have been possible had we not optimized for energy.
44
+
45
+ While definitions differ on which devices are included in "the edge" (e.g., mobile phones, routers, gateways, or even self-driving cars). In the context of this paper, the edge refers to mobile phones and microcontrollers (Table [\[table:device-spec\]](#table:device-spec){reference-type="ref" reference="table:device-spec"}). These devices are characterized by limited memory (ranging from KBs to a few GBs) and are commonly battery-powered ($\sim$ few hundred mAh) for real-world deployment. Further, in our research we found that it is quite common for these edge devices to be augmented with an off-chip secondary storage such as a flash or an SD card as seen in  [@farmbeats; @fadel-underwater; @gesturepod]. This presents us with an opportunity to exploit the off-chip memory for paging.
46
+
47
+ Rematerialization and paging are two techniques to lower the memory consumption of large, state-of-the-art ML models. In rematerialization, an activation tensor is deleted as soon as they are no longer needed, most often, during a forward pass. This frees up precious memory that can be used to store the activations of the following layers. When the deleted tensor is needed again, for example, to compute gradients during backpropagation, it is recomputed from the other dependent activations as dictated by the lineage. Paging, also known as offloading, is a complementary technique to reduce memory. In paging, an activation tensor that is not immediately needed is paged-out from the primary memory to a secondary memory such as a flash or an SD card. When the tensor is needed again, it is paged back in.
48
+
49
+ This is best understood with the representative neural-network training timeline from Figure [2](#fig:illustrated_example){reference-type="ref" reference="fig:illustrated_example"}. Along the X-axis, each cell corresponds to a single layer of an eight-layered, linear, neural-network. The Y-axis represents the logical timesteps over one epoch. An occupied cell indicates that an operation (forward/backward pass computation, rematerialization, or paging) is executed at the corresponding timestep. For example, we can see that the activation for Layer 1 (L1) is computed at the first timestep (T1). At timestep T2 and T3, the activations of L2 and L3 are computed respectively. Suppose layers L2 and L3 happen to be memory-intensive but cheap-to-compute operators, such as non-linearities (tanH, ReLU, etc,) then rematerialization becomes the optimal choice. We can delete the activations ({T3, L2}, {T4, L3}) to free up memory, and when these activations are needed during backward propagation we can rematerialize them ({T14, L3}, {T16, L2}).
50
+
51
+ Suppose layers L5 and L6 are compute-intensive operators such as convolutions, dense matrix-multiplication, etc. For such operations, rematerializing the activations would lead to an increase in run-time and energy and is sub-optimal. For these layers, it is optimal to page-out the activation tensor to secondary storage ({T6,L5}, {T7, L6}), and page-in when they are needed ({T10,L6}, {T11, L5}).
52
+
53
+ One major advantage of paging is that depending on how occupied the memory bus is, it can be pipelined to hide latency. This is because modern systems have DMA (Direct Memory Access) which can move the activation tensor from the secondary storage to the primary memory while the compute engine is running in parallel. For example, at timestep T7, we are both paging L6 out and computing L7. However, rematerialization is compute-intensive, cannot be parallelized. This leads to an increase in run-time. For example, we have to dedicate timestep T14 to recompute L3 thereby delaying the rest of the backward pass execution.
54
+
55
+ We introduce (), a graph-level compiler for deep neural networks that rewrites training DAGs for large models to fit within the memory constraints of edge devices while remaining energy-efficient. is hardware-aware and first traces the execution of the forward and backward pass with associate memory allocation requests, runtime, and per-operation memory and energy consumption. This fine-grained profiling for each workload happens only once for a given hardware, is automated, cheap, and provides the most accurate cost model for . then generates a Mixed Integer Linear Programming (MILP) which can be efficiently solved. The optimizer searches for an efficient rematerialization and paging schedule that minimizes end-to-end energy consumption subject to memory constraints. The resulting schedule is then used to generate a new DAG to execute on the edge device. While the MILP is solved on commodity hardware, the generated schedule shipped to the edge device is only a few hundred bytes, making it highly memory efficient.
56
+
57
+ Rematerialization is most efficient for operations that are cheap-to-compute yet memory-intensive. These operations can be recalculated with low energy overhead. Paging, however, is best suited to compute-intensive operations where rematerialization would otherwise incur significant energy overhead. jointly considers both rematerialization and paging in an integrated search space.
58
+
59
+ Without a minimum training throughput limit, it is possible that the energy optimal strategy is also far too slow to train in practical applications. In reality, training needs to run while the device is idle where spare compute cycles are available. For example, Google Android schedules ML model updates when the phone is charging. To maintain high training throughputs, the optimizer can maintain a minimum training throughput to ensure that training completes during downtime.
60
+
61
+ Given a memory budget $\mu_{RAM}$ and a training time budget $\mu_{deadline}$, finds an energy optimal schedule by choosing to either a) rematerialize or b) page the tensors to/from secondary storage such as an SD card. Our method scales to complex, realistic architectures and is *hardware-aware* through the use of microcontroller-specific, profile-based cost models. We build upon the formulation proposed by Checkmate [@checkmate] and adapt it to jointly consider integrated rematerialization and paging, to optimize for an energy objective rather than the runtime, and to implement a minimum throughput constraint.
62
+
63
+ **Assumptions:**  We assume operations execute sequentially on edge devices without inter-operator parallelism. Moreover, we assume parameters and gradients are stored in a contiguous memory region without paging. Unlike prior work in rematerialization [@chen2016b; @dtr], we do not limit rematerialization to occur once. We assume auxiliary storage (e.g., flash/ SD card) is available. However, if auxiliary storage is not available, the optimizer will fall back to only performing rematerialization.
64
+
65
+ Following the design of Checkmate [@checkmate], we introduce the formulation of the rematerialization problem. Given a directed acyclic dataflow graph $G = (V, E)$ with $n$ nodes, a topological ordering $\{v_1, \ldots, v_n\}$ is computed which constrains execution to that order of instructions. Two key decision variables are introduced: (1) $R \in \{0,1\}^{n \times n}$ where $r_{t,i}$ represents the decision to (re)materialize an operation $v_i$ at timestep $t$ and (2) $S \in \{0,1\}^{n \times n}$ where $s_{t,i}$ represents whether the result of an operation $v_i$ is resident in memory at timestep $t$.
66
+
67
+ From the rematerialzation matrix $R$, and the storage matrix $S$, we define a series of constraints to maintain graph dependencies. All arguments for an operation $j$ must be resident in memory prior to running that operation, yielding constraint $R_{t,i} + S_{t,i} \geq R_{t,j}~~\forall (i,j) \in E~~\forall t \in \{1,\ldots,n\}$. Similarly, the result of an operation is only resident in memory in one of the two cases: a) if it was already resident in memory before, or b) if it was (re)materialized ($S_{t,i} \leq S_{t-1,i} + R_{t-1,i}~~\forall i \in V~~\forall t \in \{1,\ldots,n\}$).
68
+
69
+ To adhere to the strict constraints on the peak memory used during training, an intermediate variable $U \in \mathbb{R}^{n \times n}$ is defined. $U_{t,i}$ is the total memory used by the system during training at timestep $t$ when evaluating operation $i$. By bounding the maximum value of $U_{t,i}~~\forall i \in V~~\forall t \in \{1,\ldots,n\}$ to the user-specified memory limit $\mu_{RAM}$, we limit the total memory consumption during training.
70
+
71
+ While rematerialization can provide significant memory savings, it introduces significant energy consumption overheads from duplicate recomputations. Similarly, paging if done wrong will result in a wasteful shuffling of data between memories. Here, we formalize a joint search space for rematerialization and paging to enable the discovery of the energy-optimal hybrid schedule.
72
+
73
+ Like rematerialization, the discovery of the optimal paging schedule is a challenging combinatorial search problem. However, we find that independently solving for paging first, and then solving for rematerialization will not produce globally optimal solutions. As an example, consider a graph where the output depends on the result of two operations $v_1$ and $v_2$ where both nodes have equivalent memory costs but $v_2$ is cheaper to evaluate. A paging strategy may evict $v2$ which would force rematerialization to recompute the more expensive $v_1$ rather than $v_2$.
74
+
75
+ We represent a schedule as a series of nodes that are either being saved $S^{RAM}$, (re)computed $R$ or paged from secondary storage $S^{AUX}$. To model when a node is copied from secondary storage to RAM, we introduce a variable $M^{in} \in \{0, 1\}^{n \times n}$ where $M^{in}_{t,i}$ represents paging a tensor from secondary storage to RAM between timesteps $t-1$ and $t$. Similarly, we model page-out with $M^{out}$.
76
+
77
+ We now present the intuition behind adding the following constraints to the optimization problem in order to search over optimal schedules for paging and rematerialization:
78
+
79
+ - For $S_{t,i}^{RAM}$ to be in memory at time-step $t$, either compute $R_{t,i}$ at timestep $t$, or retain $S_{t,i}^{RAM}$ in memory from the previous timestep $t-1$, or page-in if $S_{t-1,i}^{RAM}$ is resident on flash (at $t-1$).
80
+
81
+ - Each node $i$ can reside on flash $S_{t,i}^{AUX}$, either if it resided on flash at timestep $t-1$ ($S_{t-1,i}^{AUX}$), or it was paged out at time-step $t-1$ ($M_{t-1,i}^{out}$).
82
+
83
+ - To page-in $M_{t,i}^{in}$ at time-step $t$, it has to be resident on flash $S_{t,i}^{AUX}$ at timestep $t$.
84
+
85
+ - Each node $i$ can reside in memory $S_{t,i}^{RAM}$ at timestep $t$, only if it was paged out of flash ($M_{t,i}^{out}$).
86
+
87
+ Algorithm [\[algo:full_poet_formulation\]](#algo:full_poet_formulation){reference-type="ref" reference="algo:full_poet_formulation"} defines the complete optimization problem.
88
+
89
+ If we only consider rematerialization, then minimizing runtime will generally correlate with decreased energy usage. However, this is no longer true when considering paging; paging can be more energy-efficient than rematerializing a compute-intensive operation. To address this, we introduce a new objective function to the optimization problem that minimizes the combined energy consumption due to computation, page-in, and page-out.
90
+
91
+ When paging occurs on an edge device, the vast majority of energy consumed is due to powering-on the flash/SD block device. As this power is in addition to any power the CPU is consuming, the total power consumption is a linear combination of paging and CPU energy. We precompute each of these values, generally as the integral of the power of active components of the edge device integrated over the runtime of the operation. $\Phi_{compute}$, $\Phi_{pagein}$ and $\Phi_{pageout}$ represent the energy consumed for each node for computing, paging in, and paging out respectively.
92
+
93
+ Therefore, the new objective function combining paging and rematerialization energy usage is: $$\begin{equation}
94
+ \sum_T \left[R \Phi_{compute} + M_{in} \Phi_{pagein} + M_{out} \Phi_{pageout}\right]_T
95
+ \end{equation}$$
96
+
97
+ If we attempt to find the minimum energy schedule subject to only a memory constraint, the solver may select solutions with poor end-to-end training throughput. Ideally, training should occur in the downtime between interactive workloads on an edge device. To ensure this, we introduce a new constraint to the optimization problem that ensures schedules meet a minimum training throughput threshold. This constraint effectively trades off between energy consumption and training throughput.
98
+
99
+ To enforce a particular throughput, we compute a latency target. Via profiling, we capture $\Psi_{compute}$ denoting the runtime of each operation. We then constrain total runtime with the constraint: $$\begin{equation}
100
+ \sum_T [R \Psi_{compute}]_T \leq \mu_{deadline}
101
+ \end{equation}$$
102
+
103
+ outputs the DAG schedule in terms of which nodes of the graph ($k$) to rematerialize, and which to page-in ($M^{in}_{t,k}$) or page-out ($M^{out}_{t,k}$) at each time-step ($t$). Our Algorithm [\[alg:poet\]](#alg:poet){reference-type="ref" reference="alg:poet"} takes the ILP solves to generate and dictate the strategy that determines which tensors are resident-in-memory ($S^{aux}_{t,k}$) at a fine-grained (operator) level.
104
+
105
+ We factor in the latency introduced by paging. As described in Section [6.1](#sec:experiments){reference-type="ref" reference="sec:experiments"}, is hardware-aware by profiling the latency per platform for paging activations to secondary storage. Fine-grained profiling helps in fine-tuning when to start paging, such that the activation tensors arrive just-in-time. We then modify the page-in ($M^{in}_{t,k}$) and the page-out ($M^{out}_{t,k}$) schedule to ensure there is no contention for the memory bus as the tensors are paged-in just-in-time. For example, if ($M^{in}_{t,i}$) can contend with ($M^{in}_{t,j}$), then we schedule one of them to page-in at an earlier time ($M^{in}_{t-1,i}$) and update the in-memory schedule ($S^{aux}_{t-1,i}$) to account for the earlier paging-in. While this ensures the activations are paged in just-in-time, in parallel, ($R_{t',j}$) informs the PyTorch DAG scheduler to deallocate the tensors that we have chosen to rematerialize ($S^{aux}_{t,k}$) at a future timestep ($t'$).
2210.02412/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-05-19T12:23:17.513Z" agent="5.0 (Macintosh)" version="17.4.6" etag="y05DSxz8DL0-3RhQABSh" type="google"><diagram id="uOghx9WvQcI34nt2u9wW">7V1Zk9u4tf41XTV5CAr78mi3Z24ektRUPFXJfZqSW2q3atotRy2P2/fXX1DiBhAkIQogQVFKytOiKEo65+PZlzty/+Xtf/arr0//2K03z3cYrt/uyIc7jBlj+t/swI/TAYSwOh35vN+uT8dgdeDj9v82+YnF0W/b9eY1P3Y6dNjtng/br+bBh93Ly+bhYBxb7fe77+Zpj7vntXHg6+rzxvga2YGPD6vnTeO0f2/Xh6fiZ8Da6X/bbD8/5R8tWf7Cl1V58unA69Nqvfte+yzy8x253+92h9NfX97uN88Z8Uy6/NLyavnF9puXg88b8OkNf66ev+W/7Q7zZ/3W9+vtn/rPz9mfxaHHnb5oRtXn3f54Mv/vt93pBPLL8VE/VLyX6v+//Y5OfxSX0t/odDXzE/Rh43OPJDr8KOiuv7tmsX7y/vvT9rD5+HX1kL3yXaNMH3s6fHnWz5D+c/X69cT3x+3bRv/U96+H/e6PzX35zQmESml2Fa8UbMze/Lh9fq6dyY+P8uv8udkfNm+t5EYlEzX6N7svm8P+hz6leAMiHMDaI6d/fiMQRkB+b3yvcFWA8KmGqOLYKkfy5/KjKmbrP3J+u3lPxuI9vvHeg/dyTN7TsXhPbrzv5z1FY/KeOXgfjOAhiIVxF7Gwkg5iEQexSABi8XkTi9AxiSXmTSwKXbdhLGLJmRPLaavEIpbyINbL+l1m2+tnL7uXjUmg/e7byzqjzAe37N+8bQ//qf39v9mZ+vednn14y994fPKjePKif8R/6k9q78qeVm87Pived/rmm3XhYxSuwO7b/mFjqOfDav95czDg4sG/GjuYgx3Fsf3meXXY/ml+CReP8k/4dbc9mgGlSvOFR3HN0w/ML1P3SewrY9F5ZSLtK5/o1LjyEVQlYbxwVniYce7KC6wR9C77X6A7myTkhSB0o/i4tj/qcfprlC+s+sxm/+vrMQ7zTp+gf/qby+Q33IXBV8n8hZff78T73+7wPboTH0K4EJp3BxMrJjRylVHHQH5o9bz9/KKfPmheb/Tx9xkStg+r53f5C1+26/VzGzhNxRNELUPaJR2FbABJsiaQeAgg9UQQkgISvgHJlklQdgEJCyCaQikallwRiYWpgXHDAMgnDnBFNnX+6+o2NfNm4Fg2Ne+2fC+wqbuRJ2La1D4xlOsGGp8X0IiIBTSCG1cOCDSf+NN1A00kB7ROX77m+JwPNNp55aasDAg0n9jddQMtvXCU7ISDugBorDvCHhNoC4t7kibQ0rPRlGcO4XygdSvlZkQ1HNCwT9zzuoGWnI2GoaezGDjATnhMoPmEe68baMnZaNpD6wwEXQC0bqXcdDMCAg0vHmjJ2Wg9GeVLwhudStnhZgQEmisKfsVAc+Smk7PReqJdTZPdG2ikUyk73IyAQPMJkV830NKz0TqDEJcUQZBupdx0MwICbWGZAQfQ0rPROoMQtGmy+wOtWyk33YyAQFtYZkA2gVbkPRNCWncO6oLUQE/2M2ZqwKs09cqR5p+uHgtp3XV+F+QGSLdajpkb8KrrvXKklW1wySCtMw5BL0gOkG69HDM5sLSiaDEL7dkZiGAXZAdIt16OmR0gC8sOuJCWnPbsjkSwC9IDpFsvx0wPkOjpgYGoGYLQgUhLTnvSaDUcpFsvx8wPELwsmcbngLTuWAS7IEHQ04ARVaYtLEHgQlp62rMTD+yCDAHtTtvHzBCQhWUIXEhLziPojkWwC1IEtDNF4PA1AiJtYSkCNgvt2RmLYBfkCGi3Xo6ZIyALyxG4kJae9uyMRbALcgS0Wy/HzBGQheUIXEhLT3t2+p7sAt+TduvlmDkC4soRnNfzSqyeV/i4ejDf8HdN95dmW+xvJ3Zj+M/N4ftu/0drN6yF/Ovpe0XEZDViDPA8lm70XdImrEO0uhKfuP3gVlerfXUNHzYbTY33O32J7SH7yRI2mKdPZJwhsQlE4pQmONGoIz1mQO5xZ0DRqPM8ZkDucadIUbx0co86h4q2j7zwnlZB26dVPJ6mVYgPP92x+0+75/Xrjy/6P/rgmz74l575FUlq7Owr5hN8Uahpit0jUMr2zBoeeCw8eETcPmuCfG392fmM4tWn4nT/sELlMxGgUEUOYpo3CjluECoYoLX3IIexQxQOQKHpx02emNROP4aBQIQgyCTU0BLcoJ+QQAnFFJKEC0JFg5TaJ+WKSCEYQ9n7UZOSLadcRNfpJ1NeRldNCJARtCRMKoSdforlhYTV+pIkSVmP0jhTefRQd716fTqea8/tyfWYGYLBuTaq2SmPx4fLgiECwvv7kSSMAopzWfLM4YtSkM1b1GdKrs15VPQc1RkmEIAScYI5I5CXp1zEsKie6hi3AhOgEt1c0UTuBBbVJx2DsBKBHGf6SlI1La6JCBvV+xyBsBgyUJPchWk7PWE9/Mwlym4PCTON7GY+VSdJ3wmEAweIp78Tok4qHIOwHIIKaqWDNj1hPTzFJYoYjxthIhEzdxcUSwpqNwJNxz6M6oM6pqFmj8Yd0XLbhGAM1vYjQUQpggTV1KemUkUQMIEolsW/qfAl6tKG9PlCKMBJmv1RHdX0+ZKom8s93NxUNHo5EToN8TaNRudR3ef07yOpQI7+E2VTkW/cw/u+Yr5oxQOO+qbMw6TCl6hOdvp8SdRF5x4u+hL1jod4m0jvRE0Sp38fcQlqVrWWcqncSFE9/PT5ojDANcWTjF297PAAQQxUzg6RyVhpM8pvj6h2fKTbRGpn2WEDggCr30eO1WHT3Ehi7C2MafGFMVCFBo7/psKXZYcHEq0EFB7BgVTUjlKjqR0f6TaN2hFjRw3i7Lrr4Q6FSltplAsmuNCMQAZ3EIOgbhIo0uCOtvJEPapd9E3VudN2zkXsiZrgD8Geqv1ChLqXerglCBD10EFC3Bo7dDADbikBjGhp036YjFuugEJtZaxzAexDSdSqa4b8cny0NdK8/Y5CLHudBDj8+AilI1tUXCmGKYC1iCBPCCquIEcUqOAbVHygIgQQFU4IbNq/k0GlvZ8+MFTIDSoeUMFQG+Z1V4mnA5WwQ3OHjPRouk+Bh3w0GWkO/kDItZWKuDk+zpwPwhUQmCuOKROEFFzqrm85d+RHj9WEJKgX3sNo4z9k2HG6JQa1JjdxiMRIQMwu++tmv9VEyPqFLwFnkbtNCJtaDknBKOY8W5wMzTADIggoSKoQwUBsogziWY6WaqFOqIXNlpa+CNj0CfENHYJ0KcRaUE5JJ8xDgtMx7X5icEIGKmTCIrdSKGLBAa3HkodKzlTQ6RHqvAydJcoQIZYslWejrA3ruA/sdijh0wod52447D/+87tfzgO0AV48JXiz5eASM40axjlj2NT6CgJKapHXYdDNVj8oCvVtCslRiBufwSHQd41diBkBuDFnABvAvRi052NvMMwHSuGiFzEhIEsKck9I441LSwwrCQiuG7ADoazvCMWF9sDV0ftSpiXCtP0qeBX3s2R9QDD7BMgjgJnxawSzYwxj0mDWMgUwqCp9P9CmwFIBwhAjTN8VSF/G/BhOQD15J5hlWAeEc/RZxiWkAOTChFUO6i7DN3vWsCzCraPoA6hwTNk+TfKYCqBUgkwJMKbykIHVkt3SpXQuQKWWp9k+H6W1t6aChU9FgainxOJJ2+gDkAt4IhOavh4ZjI9BBF0RKz4lCAkigHKq7VN5AqM9IMNda30uCAkjgDOkBWDexmp+SksWPQIKow9Hnn4xT68kdG7rYVOikHFQD8OXuqzAB8UA1Xv9LQfHe1gyx9rD51WpDTZlocgGGJVelG3fBkRh9OWKc0XhpLKwD4WcAaOecWCMFGkPhxAt5SClhEtL4AoKGFaVxI0Xvg+bQponCJ1rPye1CpcFQhV9JeNcQaimBGGPa6IBAlC9/BcPBaECiGQ1GIgLbSNS0yzETHtAClaVytHMQuVKF9UqLzzGIVuLDJy1GoOvkhVrvGRDlT/eYf1jILwT+iLweCn439/hOkQ9RxqDlnNGXDYvh8CscKMamWPAimRjl0XX4CiuZWMenDwJ4uaNRgWo4R9K3H6L+dZ0FGOKz5OFRpW7Vf3+6+qg+fFyPIIhcQrI7jHXl4rPY6TXEKFyQPhzqGDFTcE6dQ6+09sWDMB6h9lAN6cnzdljQQSUqoOyRWkDegR7oBg3kxBsMQIMIgE5lUf4GoCSGKh6ct7S0t6oxQwojXqubZ8sIoRsm9Rdux8BtpfmhZqZoMiYBBBhA5eI0bGEbIJohVZ0seDf2Ygk2lCoMu7F9JoyNOqcHRwBj4MSO3MSo7IsUokP1/Tq8igDPFt+QamAqsRRgTLIAXcMbzgby0xLBYiJVFBIqqRVFN/SahMBzIPSQCOBOd3MkXAkjvikcSrCtfiTiqIMNJk0tEr23DNHzoYt5yBLqgua1z6Zn3Jy5cYwZee0VDNCXZ5xAzbvOjQc2dRVi4qnRDYSGMgyJKWK7REF5NxTW84uGxEC4LqTZt49ggLZHBQXAdfRM1FLxXWC1X1KW1qqij0UMu1H9TLTr12aZNWoBkS01Zsg7eURCcdw46Lnt9y2bF98azxwu8zmHsDjawK8xgCAXCnOpIDHJIMpZLN9c6LWCzrQRiHZjuV8MLuQiHErcMElkEi2JkvCIR7B6Nm0xUpzR/XgxOBGAkBEqqIqE3QtDfFnY7s78qeRzXDrDRQS2a4EHTUnJNhAn7Q9uZqWwYNltWa54LHc/XRj3ex2SJb1qjfezW9NJSo6fB3MwzfmnV5OdP8fgq6Y/413pomT5rYQBF0h7hvvDNcr0fWQCLrivNScmbN03iW6dw9BVyzzxjsz5JHmpou83PXGuzlu+UPoVsTtbC/N9dpUEaK+/lKugKwVGw5tJOjpqZLaSCOViR2tSgahmCN/5oJCZ1NVi9hJo5+lZRzj2TAUEFBIqnoEO8LCgWRohKkSqLgBFo1DR1KfTdphyrXyPNZCF53wJjwkAxy2R7L9USgBpSiPlRNpfQqiQGi9XaIQxwNh1NnsllG0hg+bY2amMopkBd3aiYwzJDaBzKWZLgVBxRd19Bp5twnR9jahx1ObkPjw0x27/7R7Xr/++KL/ow++6YN/6WkUsiCSRkuQXa8XAj3aLoGwKitr2NqAqpppBJtObpt1FR0+rshgwq1qyGxVQ8fXftOvnV64ta3VJBpV2hZS1cMsBc4KhZs4lAIUyqwOvRBDhhFyRTLDwWR9E0Y54wGss73oRi2DNtnisQbjucNYIjAE13sm1qcmYXBdwpTiBd/Ei4UyIgmACpYPZFVsH5u6x5Uvow28X8oaDdSJAKR9DwBrD9M/IbjJfuyQMjiIlHFFfG97MSbjPRuR99gVML4tupiM93JM3vuEaQcTPACxMCRdxCr3qtWIRWKZYgVt5kosgsckVtSY1wjEEs3bMB6xXFGoGRGLOmyVeMSKPlo3YF19XxrAKJI//YxGfN8YWlqIfSOpVODHg6FjdTf54eXsdiZY9K4WVyoCuBHC9tgVcEnBGjlvpWrPnY09tEQ8+8Onk/f6SMzGJLFPU+n1kViOSeJpmhsT0UjINTHxDBaOpZFEp0aCgzVSpzdHI7ZhEZfjPlfYDUcaSQ5p0iu2cz7SmGn7EBuzIbEVvX5rFthKz65WndgavAUIcgtbKCK28NKwpVzYwolhC0PkFZe82GcjdroqJLZi7kQbii3DpBodaEVPSEJAw51AU8OBZhYOZTnWaLPTUFG1mhTUxhdjqanIvlDlYEMfWUt6G4ZcSGxdU4zTB1vusGZqTmSfs2cbTf7Ysgtm4627QWS03V/TlcQb2GJObMnUsIUs944OXayMy5XMbRo1JJrmNIuxD4GvmjCHrmmnPnCjTjWZHtwsdUbFYOElrCvZ2Z2QcJvTiMQQitGNJpEcmiwFRgd7i8jO8MX0Fq8pcO+FJmeUiyeHJmsCKxtutNuqMCKa6DXF489Xe05opaf2LCuLDS5FwLhHgYaE1jWF48+HlquROkEdaNlBjA6GliW1aEQDvpiAehXQGo6m9ASVZQexwXlDbIu8iHlDegWtzvOpSC7ExGKpPWpJM/UJyl41tcesiaZRqy9nQO1Ri6pp1EJMi9qCcyhXHtSmBH9g2cbKrE+odvzx+AjEBWTZ8eNK8KjVmbOiO8Hc0VEfj/I+sZ+FUH5Muc58oiTLoDvFAogRIc/CRhFqwxcwbJIu7wad2iU8P9rg2O5W6sd0/ENoZ12KfM75MVJbHESsWS720iwKgl6ooy7UJReVaNSJUgQQqk1LGY7BmimMzvuUkPgMWz84a3wmjDpFAI+Puu5PCYm6sKWEN9RFQR2lY6Cu51NCoi5skeE8UBfIHExOMTeqUwfnNTUgu4RixOIeFrYycR6AHI5BlRwG7SpWu9jinJKyLgxGLOJgYesZ54HBMEKxgFFCgLQ928EZeSS7/ZOIgAxb8XitgHS6z8lJSFhEWss6fxnGpCSqC57dnxISrGELKucB1sH4TFBg2i3mg4vCMeoUmBFr5bhPgmGRGGTzwKBdAD64+BfTTkc7YhkUv2VafADpWtOUXqYF270Ng90a3OnWxOx04NgByPMmgBNrAjh8XD2Yb/i7JvNLc/zrz/868kqzCsOfNSD++uvu67evxcd/2jsGwM5g9HxOz8smwXY2wiPHnHniGv/NQySHefTJCYm1hTrVYXrSxwzHlUvFCpmRrSm0ZhwMLzNHEOB4AxMKcRdpgcVPq+Xs0+mRKxihDrnCHBOmo+2v4O0bcoIw/dON6aWK58kwPe6qmp8ebkzPRTgseDgJm9t308RfnnZaN2O/8m/9SvE2/drDWl/XOuVjfoo2S5e0gq0XSAooVO1gs1awIQFUfQdb0zKdagUbb19bE92z+XiyJzH85+bwfbf/Y14wCqF0KOtSOqXbWZ/CywgopkvWsUEFCLLGiLtC0Lc1acHNDYRIV/MKba74DKSH9NP9LuNM5Yfo3/z0j916k53x/w==</diagram></mxfile>
2210.02412/main_diagram/main_diagram.pdf ADDED
Binary file (97.7 kB). View file
 
2210.02412/paper_text/intro_method.md ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The impressive breakthroughs achieved by deep learning have largely been attributed to the extensive overparametrization of deep neural networks, as it seems to
4
+
5
+ *Proceedings of the* 40 th *International Conference on Machine Learning*, Honolulu, Hawaii, USA. PMLR 202, 2023. Copyright 2023 by the author(s).
6
+
7
+ have multiple benefits for their representational power and optimization (Belkin et al., 2019). The resulting trend towards ever larger models and datasets, however, imposes increasing computational and energy costs that are difficult to meet. This raises the question: Is this high degree of overparameterization truly necessary?
8
+
9
+ Training general small-scale or sparse deep neural network architectures from scratch remains a challenge for standard initialization schemes (Li et al., 2016; Han et al., 2015). However, (Frankle & Carbin, 2019) have recently demonstrated that there exist sparse architectures that can be trained to solve standard benchmark problems competitively. According to their Lottery Ticket Hypothesis (LTH), dense randomly initialized networks contain subnetworks that can be trained in isolation to a test accuracy that is comparable with the one of the original dense network. Such subnetworks, the lottery tickets (LTs), have since been obtained by pruning algorithms that require computationally expensive pruning-retraining iterations (Frankle & Carbin, 2019; Tanaka et al., 2020) or mask learning procedures (Savarese et al., 2020; Sreenivasan et al., 2022b). While these can lead to computational gains at training and inference time and reduce memory requirements (Hassibi et al., 1993; Han et al., 2015), the real goal remains to identify sparse trainable architectures before training, as this could lead to significant computational savings. Yet, contemporary pruning at initialization approaches (Lee et al., 2018; Wang et al., 2020; Tanaka et al., 2020; Fischer & Burkholz, 2022; Frankle et al., 2021) achieve less competitive performance. For that reason it is so remarkable that even iterative state-of-the-art approaches struggle to outperform a simple, computationally cheap, and data independent alternative: random pruning at initialization (Su et al., 2020). Liu et al. (2021) have provided systematic experimental evidence for its 'unreasonable' effectiveness in multiple settings, including complex, large scale architectures and data.
10
+
11
+ We explain theoretically why they can be effective by proving that a randomly masked network can approximate an arbitrary target network if it is wider by a logarithmic factor in its sparsity 1/ log(1/sparsity). By deriving a lower bound on the required width of a random 1 hidden layer network, we further show that this degree of overparameterization is necessary in general. This implies that sparse random networks have the universal func-
12
+
13
+ <sup>\*</sup>Equal contribution <sup>1</sup>CISPA Helmholtz Center for Information Security, Saarbrucken, Germany. Correspondence to: Advait ¨ Gadhikar <advait.gadhikar@cispa.de>.
14
+
15
+ tion approximation property like dense networks and are at least as expressive as potential target networks. However, it also highlights the limitations of random pruning in case of extremely high sparsities, as the width requirement scales then approximately as 1/ log(1/sparsity) ≈ 1/(1 − sparsity) (see also Fig. 2 for an example). In practice, we observe a similar degradation in performance for high sparsity levels.
16
+
17
+ Even for moderate to high sparsities, the randomness of the connections result in a considerable number of excess weights that are not needed for the representation of a target network. This insight suggests that, on the one hand, additional pruning could further enhance the sparsity of the resulting neural network structure, as random masks are likely not optimally sparse. On the other hand, any dense-to-sparse training approach would not need to start from a dense network but could also start training from a sparser random network and thus be turned into a sparseto-sparse learning method. The main idea is visualized in
18
+
19
+ ![](_page_1_Figure_3.jpeg)
20
+
21
+ Figure 1. *Sparse training with randomly masked (ER) networks:* A visual representation of the main implication of our theory sparse to sparse training can be effective by starting from a randomly masked (ER) network.
22
+
23
+ Fig. 1 and verified in extensive experiments with different lottery ticket pruning and continuous sparsification approaches. Our main results could also be interpreted as theoretical justification for Dynamic Sparse Training (DST) (Evci et al., 2020; Liu et al., 2021.; Bellec et al., 2018), which prunes random networks of moderate sparsity. However, it further relies on edge rewiring steps that sometimes require the computation of gradients of the corresponding dense network (Evci et al., 2020). Our derived limitations of random pruning indicate that this rewiring might be necessary at extreme sparsities but likely not for moderately sparse random starting points, as we also highlight in additional experiments.
24
+
25
+ As a special case of the main idea to prune random networks, we also consider strong lottery tickets (SLTs) (Zhou et al., 2019; Ramanujan et al., 2020). These are subnetworks of large, randomly initialized source networks, which do not require any further training after pruning. Theoretical (Malach et al., 2020; Pensia et al., 2020; Fischer et al., 2021; da Cunha et al., 2022; Burkholz, 2022a;b; Burkholz et al., 2022) as well as empirical Ramanujan et al. (2020); Zhou et al. (2019); Diffenderfer & Kailkhura (2021); Sreenivasan et al. (2022a) existence proofs so far have solely focused on pruning dense source networks. We highlight the potential for computational resource savings in the search for SLTs by proving their existence within sparse random networks instead. The main component of our results is Lemma 2.2, which extends subset sum approximations to the sparse random graph setting. This enables the direct transfer of most SLT existence results for different architectures and activation functions to sparse source networks. Furthermore, we modify the algorithm edge-popup (EP) (Ramanujan et al., 2020) to find SLTs accordingly which leads to the first sparse-to-sparse pruning approach for SLTs, up to our knowledge. We demonstrate in experiments that starting even at sparsities as high as 0.8 does not hamper the overall performance of EP.
26
+
27
+ Note that our general theory applies to any layerwise sparsity ratios of the random source network and we validate this fact in various experiments on standard benchmark image data and commonly used neural network architectures, complementing results by Liu et al. (2021) for additional choices of sparsity ratios. Our two proposals, balanced and pyramidal sparsity ratios, seem to perform competitively across multiple settings, especially, at higher sparsity regimes.
28
+
29
+ - 1. We prove that randomly pruned random networks are sufficiently expressive and can approximate an arbitrary target network if they are wider by a factor of 1/ log(1/sparsity). This overparametrization factor is necessary in general, as our lower bound for univariate target networks indicates.
30
+ - 2. Inspired by our proofs, we empirically demonstrate that, without significant loss in performance, starting any dense-to-sparse training scheme can be translated into a sparse-to-sparse one by starting from a random source network instead of a dense one.
31
+ - 3. As a special case, we also prove the existence of Strong Lottery Tickets (SLTs) within sparse random source networks, if the source network is wider than a target by a factor 1/ log(1/sparsity). Our modification of the edge-popup (EP) algorithm (Ramanujan et al., 2020) leads to the first sparse-to-spare SLT pruning method, which validates our theory and highlights potential for computational savings.
32
+ - 4. To demonstrate that our theory applies to various choices of sparsity ratios, we introduce two additional proposals that outperform state-of-the-art ones
33
+
34
+ on multiple benchmarks and are thus promising candidates for starting points of sparse-to-sparse learning schemes.
35
+
36
+ # Method
37
+
38
+ Our theoretical investigations of the next section have the purpose to explain why the effectiveness of random networks is reasonable given their high expressive power. We show that we can approximate any target network with the help of a random network, provided that it is wider by a logarithmic factor in the inverse sparsity. First, the only constraint that we face in our explicit construction of a representative subnetwork is that edges are randomly available or unavailable. But we can choose the remaining network parameters, i.e., the weights and biases, in such a way that we can optimally represent a target network. As common in results on expressiveness and representational power, we make statements about the existence of such parameters, not necessarily, if they can be found algorithmically. In practice, the parameters would usually be identified by standard neural network training or prune-train iterations. Our experiments validate that this is actually feasible in addition to plenty of other experimental evidence (Su et al., 2020; Ma et al., 2021; Liu et al., 2021). Second, we prove the existence of strong lottery tickets (SLTs), which assumes that we have to approximate the target parameters by pruning the sparse random source network. Up to our knowledge, we are the first to provide experimental and theoretical evidence for the feasibility of this case.
39
+
40
+ Background, Notation, and Proof-Setup Let x = $(x_1, x_2, ..., x_d) \in [a_1, b_1]^d$ be a bounded d-dimensional input vector, where $a_1, b_1 \in \mathbb{R}$ with $a_1 < b_1$ . $f: [a_1, b_1]^d \rightarrow$ $\mathbb{R}^{n_L}$ is a fully-connected feed forward neural network with architecture $(n_0, n_1, ..., n_L)$ , i.e., depth L and $n_l$ neurons in Layer l. Every layer $l \in \{1, 2, ..., L\}$ computes neuron states $x^{(l)} = \phi(h^{(l)}), h^{(l)} = W^{(l-1)}x^{(l-1)} + b^{(l-1)}$ . $m{h^{(l)}}$ is called the pre-activation, $m{W}^{(l)} \in \mathbb{R}^{n_l imes n_{l-1}}$ is the weight matrix and $b^{(l)}$ is the bias vector. We also write $f(x;\theta)$ to emphasize the dependence of the neural network on its parameters $\theta = (\mathbf{W}^{(l)}, \mathbf{b}^{(l)})_{l=1}^L$ . For simplicity, we restrict ourselves to the common ReLU $\phi(x) = \max\{x, 0\}$ activation function, but most of our results can be easily extended to more general activation functions as in (Burkholz, 2022b;a). In addition to fully-connected layers, we also consider convolutional layers. For a convenient notation, without loss of generality, we flatten the weight tensors so that $W_T^{(l)} \in \mathbb{R}^{c_l \times c_{l-1} \times k_l}$ where $c_l, c_{l-1}, k_l$ are the output channels, input channels and filter dimension respectively. For instance, a 2-dimensional convolution on image data would result in $k_l = k'_{1,l}k'_{2,l}$ , where $k'_{1,l}, k'_{2,l}$ define the filter size.
41
+
42
+ We distinguish three kinds of neural networks, a target network $f_T$ , a source network $f_S$ , and a subnetwork $f_P$ of $f_S$ . $f_T$ is approximated or exactly represented by $f_P$ , which is obtained by masking the parameters of the source $f_S$ . $f_S$ is said to contain a SLT if this subnetwork does not require further training after obtaining the mask (by pruning). We assume that $f_T$ has depth L and parameters $\left( \mathbf{W}_T^{(l)}, \mathbf{b}_T^{(l)}, n_{T,l}, m_{T,l} \right)$ are the weight, bias, number of neurons and number of nonzero parameters of the weight matrix in Layer $l \in \{1, 2, .., L\}$ . Note that this implies $m_l \leq n_l n_{l-1}$ . Similarly, $f_S$ has depth L+1 with parameters $\left( \mathbf{W}_S^{(l)}, \mathbf{b}_S^{(l)}, n_{S,l}, m_{S,l} \right)_{l=0}^L$ . Note that l ranges from 0 to L for the source network, while it only ranges from 1 to L for the target network. The extra source network layer l=0 accounts for an extra layer that we need in our construction to prove existence.
43
+
44
+ **ER Networks** Even though common, the terminology 'random network' is imprecise with respect to the random distribution from which a graph is drawn. In line with general graph theory, we therefore use the term Erdös-Rényi (ER) (Erdos et al., 1960) network in the following. An ER neural network $f_{\rm ER} \in {\rm ER}({\bf p})$ is characterized by layerwise sparsity ratios $p_l$ . An ER source $f_{\rm ER}$ is defined as a subnetwork of a complete source network using a binary
45
+
46
+ mask $\boldsymbol{S}_{\text{ER}}^{(l)} \in \{0,1\}^{n_l \times n_{l-1}}$ or $\boldsymbol{S}_{\text{ER}}^{(l)} \in \{0,1\}^{n_l \times n_{l-1} \times k_l}$ for every layer. The mask entries are drawn from independent dent Bernoulli distributions with layerwise success probability $p_l > 0$ , i.e., $s_{ij, \text{ER}}^{(l)} \sim \text{Ber}(p_l)$ . The random pruning is performed initially with negligible computational overhead and the mask stays fixed during training. Note that $p_l$ is also the expected density of that layer. The overall expected density of the network is given as $p = \frac{\sum_{l} m_{l} p_{l}}{\sum_{k} m_{k}} =$ 1 - sparsity. In case of uniform sparsity, $p_l = p$ , we also write ER(p) instead of ER(p). An ER network is defined as $f_{ER} = f_S(x; W \cdot S_{ER})$ . Different to conventional SLT existence proofs (Ramanujan et al., 2020), we refer to $f_{\rm ER} \in {\rm ER}({\bf p})$ as the source network, and show that the SLT is contained within this ER network. The SLT is then defined by the mask $S_P$ , which is a subnetwork of $S_{ER}$ , i.e., a zero entry $s_{ij,ER}=0$ implies also a zero in $s_{ij,P}=0$ , but the converse is not true. We skip the subscripts if the nature of the mask is clear from the context. In the following analysis of expressiveness in ER networks, we continue to use of $S_{ER}$ and $S_P$ to denote a random ER source network and a sparse subnetwork within the ER network respectively.
47
+
48
+ **Sparsity Ratios** There are plenty of reasonable choices for the layerwise sparsity ratios and thus ER probabilities $p_l$ . Our theory applies to all of them. The optimal choice for a given source network architecture depends on the target network and thus the solution to a learning problem, which is usually unknown a-priori in practice. To demonstrate that our theory holds for different approaches, we investigate the following layerwise sparsity ratios in experiments. The simplest baseline is a globally *uniform* choice $p_l = p$ . Liu et al. (2021) have compared this choice in extensive experiments with their main proposal, ERK, which assigns $p_l \propto \frac{n_{in}+n_{out}}{n_{in}n_{out}}$ to a linear and $p_l \propto \frac{c_l+c_{l-1}+k_l}{c_lc_{l-1}k_l}$ (Mocanu et al., 2017) to a convolutional layer. In addition, we propose a *pyramidal* and *balanced* approach, which are visualized in Appendix A.15.
49
+
50
+ *Pyramidal*: This method emulates a property of pruned networks that are obtained by IMP (Frankle & Carbin, 2019) i.e. the layer densities decay with increasing depth of the network. For a network of depth L, we use $p_l = (p_1)^l$ , $p_l \in (0,1)$ so that $\frac{\sum_{l=1}^{l=L} p_l m_l}{\sum_{l=1}^{l} m_l} = p$ . Given the architecture, we use a polynomial equation solver (Harris et al., 2020) to obtain $p_1$ for the first layer such that $p_1 \in (0,1)$ .
51
+
52
+ Balanced: The second layerwise sparsity method aims to maintain the same number of parameters in every layer for a given network sparsity p and source network architecture. Each neuron has a similar in- and out-degree on average. Every layer has $x = \frac{p}{L} \sum_{l=1}^{l=L} m_l$ nonzero parameters. Such an ER network can be realized with $p_l = x/m_l$ . In case that $x \ge m_l$ , we set $p_l = 1$ .
53
+
54
+ Our main goal in this section is to derive probabilistic statements about the existence of edges in an ER source network that enable us to approximate a given target network. As every connection in the source network only exists with a probability $p_l$ , for each target weight, we need to create multiple candidate edges, of which at least one is nonzero with high enough probability. This can be achieved by ensuring that each target edge has multiple potential starting points in the ER source network. Our construction realizes this idea with multiple copies of each neuron in a layer. The required number of neuron copies depends on the sparsity of the ER source network and introduces an overparametrization factor pertaining to the width of the network. To create multiple copies of input neurons as well, our construction relies on one additional layer in the source network in comparison with a target network, as visualized in Fig. 4 in the Appendix. We first explain the construction for a single target layer and extend it afterwards to deeper architectures.
55
+
56
+ **Single Hidden Layer Targets** We start with constructing a single hidden layer fully-connected target network with a subnetwork of a random ER source network that consists of one more layer. Our proof strategy is visually explained by Fig. 4 in the Appendix. The following theorem states the precise width requirement that our construction requires.
57
+
58
+ **Theorem 2.1** (Single Hidden Layer Target Construction). Assume that a single hidden-layer fully-connected target network $f_T(\mathbf{x}) = \mathbf{W}_T^{(2)} \phi(\mathbf{W}_T^{(1)} \mathbf{x} + \mathbf{b}_T^{(1)}) + \mathbf{b}_T^{(2)}$ , an allowed failure probability $\delta \in (0,1)$ , source densities $\mathbf{p}$ and a 2-layer ER source network $f_S \in ER(\mathbf{p})$ with widths $n_{S,0} = q_0 d, n_{S,1} = q_1 n_{T,1}, n_{S,2} = q_2 n_{T,2}$ are given. If
59
+
60
+ $$\begin{split} q_0 &\geq \frac{1}{\log(1/(1-p_1))}\log\left(\frac{2m_{T,1}q_1}{\delta}\right),\\ q_1 &\geq \frac{1}{\log(1/(1-p_2))}\log\left(\frac{2m_{T,2}}{\delta}\right) \text{ and } q_2 = 1 \end{split}$$
61
+
62
+ then with probability $1 - \delta$ , the random source network $f_S$ contains a subnetwork $S_P$ such that $f_S(x, W \cdot S_P) = f_T$ .
63
+
64
+ The key idea is to create multiple copies (blocks in Fig. 4 (b) in the Appendix) in the source network for each target neuron such that every target link is realized by pointing to at least one of these copies in the ER source. To create multiple candidates of input neurons, we create an univariate first layer in the source network as explained in Fig. 4. In the appendix, we derive the corresponding weight and bias parameters of the source network so that it can represent the target network exactly. Naturally, many of the available links will receive zero weights if they are
65
+
66
+ not needed in the specific construction but are required for a high enough probability that at least one weight can be set to nonzero. Our main task in the proof is to estimate the probability that we can find representatives of all target links in the ER source network, i.e., every neuron in Layer l=1 has at least one edge to every block in l=0 of size $q_0$ , as shown in Fig. 4 (b). This probability is given by $(1-(1-p_1)^{q_0})^{m_{T,1}q_1}$ . For the second layer, we repeat a similar argument to bound the probability $(1-(1-p_2)^{q_1})^{m_{T,2}}$ with $q_2=1$ , since we do not require multiple copies of the output neurons. Bounding this probability by $1-\delta$ completes the proof, as detailed in Appendix A.3.
67
+
68
+ **Deep Target Networks** Theorem 2.1 shows that $q_0$ and $q_1$ depend on $1/\log(1/\operatorname{sparsity})$ . We now generalize the idea to create multiple copies of target neurons in every layer to a fully connected network of depth L (proofs are in Appendix A.4) and convolutional networks of depth L as stated in Appendix A.5, which yields a similar result as above. The additional challenge of the extension is to handle the dependencies of layers, as the construction of every layer needs to be feasible.
69
+
70
+ **Theorem 2.2** (ER networks can represent L-layer target networks.). Given a fully-connected target network $f_T$ of depth L, $\delta \in (0,1)$ , source densities $\mathbf{p}$ and a L+1-layer ER source network $f_S \in ER(\mathbf{p})$ with widths $n_{S,0} = q_0d$ and $n_{S,l} = q_l n_{T,l}, l \in \{1,2,..,L\}$ , where
71
+
72
+ $$q_l \ge \frac{1}{\log(1/(1-p_{l+1}))} \log\left(\frac{Lm_{T,l+1}q_{l+1}}{\delta}\right)$$
73
+
74
+ $$for \ l \in \{0, 1, ..., L-1\} \ and \ q_L = 1,$$
75
+
76
+ then with probability $1 - \delta$ the random source network $f_S$ contains a subnetwork $S_P$ such that $f_S(x, W \cdot S_P) = f_T$ .
77
+
78
+ Lower Bound on Overparameterization While our existence results prove that ER networks have the universal function approximation property like dense neural networks, in order to achieve that, our construction requests a considerable amount of overparametrization in comparison with a dense target network. In particularly extremely sparse ER networks seem to face a natural limitation, since for sparsities $1-p \geq 0.9$ , the overparameterization factor scales approximately as $1/\log(1/(1-p)) \approx 1/p$ . Fig. 2 visualizes how this scaling becomes problematic for increasing sparsity. The next theorem establishes that, unfortunately, we cannot expect to get around this $1/\log(1/(1-p_l))$ limitation.
79
+
80
+ **Theorem 2.3** (Lower bound on Overparametrization in ER Networks). There exist univariate target networks $f_T(\mathbf{x}) = \phi(\mathbf{w}_T^T \mathbf{x} + b_T)$ that cannot be represented by a random 1-hidden-layer ER source network $f_S \in ER(p)$ with probability at least $1 - \delta$ , if its width is $n_{S,1} < \frac{1}{\log(1/(1-p))} \log\left(\frac{1}{1-(1-\delta)^{1/d}}\right)$ .
81
+
82
+ See Fig. 6 and App. A.6 for the complete proof.
83
+
84
+ **Theoretical Insights** We have shown that ER networks provably contain subnetworks that can represent general target networks if they are wider by a factor $1/\log(1/(1-p_l))$ . This overparameterization factor is necessary and limits the utility of random masks alone to obtain extremely sparse neural network architectures. However, their high expressiveness make them promising and computationally cheap starting points for further pruning and more general sparsification approaches.
85
+
86
+ Inspired by this insight, in the next section, we explore the idea to start pruning from ER source networks in the context of SLTs. The first question that we ask is: How much wider do random source networks need to be in order to contain SLTs?
87
+
88
+ ![](_page_5_Figure_4.jpeg)
89
+
90
+ Figure 2. Overparametrization in ER Networks For a single hidden-layer target network with width 128 in the hidden layer and 10 in the output layer, the figure shows the required width of the first layer (l=1) of the source ER network as per Theorem 2.1 with a confidence of $1-\delta=0.999$ . The required width increases moderately upto sparsity 0.9 and drastically after 0.95.
91
+
92
+ Most SLT existence proofs that derive a logarithmic lower bound on the overparametrization factor of the source network (Pensia et al., 2020; Burkholz et al., 2022; Burkholz, 2022a; da Cunha et al., 2022; Burkholz, 2022b; Ferbach et al., 2022) solve multiple subset sum approximation problems (Lueker, 1998). For every target parameter z, they identify some random parameters of the source network $X_1, ..., X_n$ , a subset of which can approximate z. In case of an ER source network, 1-p random connections are missing in comparison with a dense source network. These missing connections also reduce the amount of available source parameters $X_1, ..., X_n$ . To take this into account, we modify the corresponding subset sum approximations according to the following lemma.
93
+
94
+ **Lemma 2.4** (Subset sum approximation in ER Networks). Let $X_1, ..., X_n$ be independent, uniformly distributed random variables so that $X_i \sim U([-1,1])$ and $M_1, ..., M_n$ be independent, Bernoulli distributed random variables so that $M_i \sim Ber(p)$ for a p > 0. Let $\epsilon, \delta \in (0,1)$ be
95
+
96
+ given. Then for any $z \in [-1,1]$ there exists a subset $I \subset [n]$ so that with probability at least $1-\delta$ we have $|z-\sum_{i\in I} M_i X_i| \le \epsilon$ if
97
+
98
+ $$n \ge C \frac{1}{\log(1/(1-p))} \log\left(\frac{1}{\min(\delta, \epsilon)}\right).$$
99
+ (1)
100
+
101
+ The proof is given in App. A.2 and utilizes the original subset sum approximation result for random subsets of the base set $X_1, ..., X_n$ . In addition, it solves the challenge to combine the involved constants respecting the probability distribution of the random subsets. For simplicity, we have formulated it for uniform random variables and target parameters $z \in [-1,1]$ but it could be easily extended to random variables that contain a uniform distribution (like normal distributions) and generally bounded targets as in Corollary 7 in (Burkholz et al., 2022).
102
+
103
+ In comparison with the original subset sum approximation result, we need a base set that is larger by a factor $1/\log(1/(1-p))$ . This is exactly the factor by which we can modify contemporary SLT existence results to transfer to ER source networks and it is also the same factor that we derived in the previous section on expressiveness results. However, we require in general a higher overparameterization to accommodate subset sum approximations.
104
+
105
+ The advantage of the formulation of the above lemma is that it allows to transfer general SLT existence results to the ER source setting in a straight forward way. By replacing the subset sum approximation construction with Lemma 2.2, we can thus show SLT existence for fullyconnected (Pensia et al., 2020; Burkholz, 2022b), convolutional (Burkholz et al., 2022; Burkholz, 2022a; da Cunha et al., 2022), and residual ER networks (Burkholz, 2022a), or random GNNs (Ferbach et al., 2022). To give an example for the effective use of this lemma and discuss the general transfer strategy, we explicitly extend the SLT existence results by Burkholz (2022b) for fully-connected networks to ER source networks. We thus show that pruning a random source network of depth L+1 with widths larger than a logarithmic factor can approximate any target network of depth L with a given probability $1 - \delta$ .
106
+
107
+ **Theorem 2.5** (Existence of SLTs in ER Networks). Let $\epsilon, \delta \in (0,1)$ , a target network $f_T$ of depth L, an $ER(\mathbf{p})$ source network $f_S$ of depth L+1 with edge probabilities $p_l$ in each layer l and iid initial parameters $\boldsymbol{\theta}$ with $w_{ij}^{(l)} \sim U([-1,1]), b_i^{(l)} \sim U([-1,1])$ be given. Then with probability at least $1-\delta$ , there exists a mask $S_P$ so that each target output component i is approximated as $\max_{\boldsymbol{x} \in \mathcal{D}} \|f_{T,i}(\boldsymbol{x}) - f_{S,i}(\boldsymbol{x}; \boldsymbol{W}_S \cdot S_P)\| \le \epsilon$ if
108
+
109
+ $$n_{S,l} \ge C \frac{n_{T,l}}{\log\left(1/(1-p_{l+1})\right)} \log\left(\frac{1}{\min\{\epsilon_l, \delta/\rho\}}\right)$$
110
+
111
+ for $\rho = \frac{CN_1^{1+\gamma}}{\log(1/(1-\min_l p_l))^{1+\gamma}}\log(1/\min\{\min_l \epsilon_l, \delta\}), \ l \geq 1$ for any $\gamma \geq 0$ , and where $\epsilon_l = g(\epsilon, f_T)$ is defined in App. A.2. We also require $n_{S,0} \geq Cd1/\log(1/(1-p_1))\log\left(\frac{1}{\min\{\epsilon_1, \delta/\rho\}}\right)$ , where C>0 denotes a generic constant that is independent of $n_{T,l}$ , L, $p_l$ , $\delta$ , and $\epsilon$ .
112
+
113
+ *Proof Outline*: The main LT construction idea is visualized in Fig. 4 (c) in the appendix. For every target neuron, multiple approximating copies are created in the respective layer of the LT to serve as basis for modified subset sum approximations (see Lemma 2.2) of the parameters that lead to the next layer. In line with this approach, the first layer of the LT consists of univariate blocks that create multiple copies of the input neurons. In addition to Lemma 2.2, also the total number of subset sum approximation problems $\rho$ that have to be solved needs to be re-assessed for ER source networks, as this influences the probability of LT existence. This modification is driven by the same factor $1/\log(1/(1-p))$ . The full proof is given in Appendix A.2.
114
+
115
+ With our SLT existence results, we have provided our first example of how to generally turn dense to sparse deep learning methods into sparse to sparse schemes. Next, we also validate the idea to start pruning from a random ER mask in experiments.
2210.10664/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-01-28T18:34:55.777Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36" etag="OVM78iwTIYVVrD7Ger78" version="16.2.6" type="device"><diagram id="0HKRIM7HBSjzTMGTWpjs" name="Page-1">7V1Zl5u4Ev41fc7cB/ugjeWx17n3drZJZ5nkJYc2tM0EGzfG6XZ+/YCNbBAyCFsCvHQeYhYLrO9TValUVbpA1+PXP0N7OnobOK5/ATXn9QLdXEAIENHi/5Izi9UZU9NXJ4ah56Q3bU48eL/d9GT6veHcc9xZ7sYoCPzIm+ZPDoLJxB1EuXN2GAYv+dueAj//1Kk9TJ+obU48DGzfLdz21XOiUforSObu/7recESfDLT0ytimN6dNzEa2E7xknoVuL9B1GATR6tP49dr1k86j/bJq6G7L1fWLhe4kEvnCwvjx/fLLNfpskND87jy864UPPWiumvll+/P0F6dvGy1oF4TBfOK4SSvaBbp6GXmR+zC1B8nVlxj0+NwoGvvxEYg/PgWTKEUR6Mmx5/vXgR+Ey7bQ09MTHAzi87MoDH66mSuO/qiT5BvF35X+1F9uGLmvmVPp7/zTDcZuFC7iW9KrOMUg5ZwB0er4ZYOgQfEeZdDDRkqclDTDdcubfo0/pF3L7+aPIf79/+/PwV+3n24GzkcPDHr/64E6vQyqe9mxZ6P1vauupOyEnE4nyb8UnMz51R8PDH35JwcMgBg0TK2ABjE5aAAK0T5wcFnPg0P3o7SHcrjoz/OAXujNlsS+jG8AePq6uRh/Gqb/2+MEqsnjLPnvdvzoOo43GcbfeGMv3JA+Jn7r1ZPo9xgyxF0b5RHPQzQJJi6Dc3rK9r3hJD4cxIDFz0NXCVBeLM8u0wtjz3GSx3Aplh/qEsAnRjX4AHPAR6qwh4qwfxMEP3vzKUsBUcTrCdn8+HZs13ziClV9YLqPT5LGsY5yUJqQAyVPqiqDEm2Fcja1J5lB2QK869P5Vzk41JFGOoY6PqOuHHVsdW2sE0Vi+85z4xlBfPXkVDOw9DzGRhFjnQOxMqtMVwwxPDmIETK6BbGhGOJ3JwcxgahbEFtbIX7cWTebPMz7/X61Hn48fgZgyFpopF0GUNddmbfDnTiXiXcu6Ubfns28QR6LfC9lnUpmfBx3Vrj4O7nYJ/TwW/rN5cHNa/rN1dGiqrcjOxy6UclvSn+B61Bv4RZMsj4NTp/Tc6Hr25H3K+9j5AGRPuFD4C2HSgo53DboaROzYB4O3PRbWZ8g2xAzPTdZUqw6ptDQkhfrn70HVQQcYx2jSkcogHQohwKFhkjDFOA5ZM4UEKAAIZKkQKEhdRS4Hn22Zu/ffLm/IuPF8/jbk+9aawpkEY+7+CE9DMJoFAyDie3fbs4ymG/uiafs0xTcf9woWqRMsOdRkCfNrrTIkDE1COIzd17yk5fXi+74Wdx/EfOdreRagVbWW3pRZ328++X+/vMVLt58frv49Df8dmlPesAQpGs9HsY/xF5kbpgmtJiVKCtk5dgFdK2UjRiW3h9/WL2BXCnE8yVuddsIrNCwQohdkrGejKSpohsH6chCjqQpPoT9vHEIzLSRjIiCaH1XbjFMK2GCqIHIZzAo9O1Rjff4evoWpUto2ZHOJyUWHelmJ0a6Xj5yOzLSef7jgx/pRDNzXbmOn8iOcwyKozxW9vuPcn5H8/x5cg27Vy/KDOj46Bsds/HnzXBODiqNuurRKDrIGpoDorxkN1ijTXgOyERL6E1PAHhOQSU8AcIsKYxVbfmnYjIhzLtKHwXGnSIoAnle7UxQXWMaatpJIRAjNRvZ05Xtcuv73nSWmAlTN/TiZycuwht3dfbD5lSVWnm0Bz+HS16/n0e+N3G3qBsJygOC/PzPwBzl0aQHkTYsKBJSs0yp3mh+3EPBYd8xvYTZ0couCQvrJcbJabDuDcXDnnJun8Uqnbdw8SGYeZGXzF3iW24ng2AVIXZIC1cKhBAbs4mKE1Wr0eCieq7p2jLoSJ2SSGeiClglIe6Xzjdk0PCzhoY/4g3/I9NBHeEMITDPGWD1yW6sYZsydMg2pZo3Av6Fg7YWEWEGZtvWIhJwNBx0jxPMkJoet9bjvCCPA3btoG7NnCFiLF/CqL5Dce3gevO4hnjSoHOncvJGSKeYh/S8G3kt22v7bCDDPLNh5qkPLOk286plHmcZ+SAoajDmx67CUUftUlTnMFL9KmiWxviikBJ4tfzXnpDU4PDLlf/Weh5+6iH09eGLOXjuic5W9la7OE8Iwalr/YAIxH2O0mVPajI2lejbTJ4K0fiIZUQR0jmxDcq8RtRJ0XBoww66qIOCoFJnUVFfHRDRjMSIVQgTX0NMJoVc3GDKqzWMdSFtVDtGg/q11i+s13ov5n5FwqpWjMbBCCuk9w2WLnpBYAHT4IVjWapEFu5knEZWuGCYEy99TTcqRMzyKOPMacpWplq2K7ayaTHyycCyXAl6wxM6Calj3NW462AysKPtqUTMSDiV9Tdi4YJkanT9DfOiLs6AywPcoHKejmitqIuaRXx76uC+ZVliLmP3gtyC1cfDwj4fYAzkoG+xM6ci+I0mDRKeq1gu+PAQwZcANVu4pX2s1dVfoli/O1Gs2XIt7WPN8zud88EVUqDoXmybAtvrNJ31umy9rmug4JKyWsZ/e8Wms2rfF23WAanT5LrW0FZVqems3Dlok5ZrfZDtRZvO6r0pEqDiqlezJKiXwbVzqDToa9ra87t0Ifd1uPYNS3EFd8RxSxApgMw0Iuq4NWDclA4LlOlbZr5BxQ5cIpBFdSbJniQhO4ZN80lCcOMkqRdYeibJLiTRLJkk0cymSaJLcB9xVwYKlstH97sbBvH9N+/e8a2MQ7JAFawhAFQsMFKstb8uRtBMCI4CkzTupmM1ScMgspNsxPiwZ8lyQdHSMSkpECxOSmmwnWwLlRtBSM3h7odl7RpUVRHOlQnaKq9msxX9bJBEWcxVdWyWJqgghTWf8NYcJa8tLdSo6NVjC8LYjuWQwmiPr9gujgWqnCHI5gwhwygMQWDw7A5z/47my2UJK3xctf127kdeb+TazkUiQFJRH9LLD67/1LuMorgvEzF36iqbWPkUD8RxDwPqRMwJZxmb4/BGIKzixWA9RDbQU4lWuQfLnesmvLgLwhc7dETh3zm6kCN7mVBayQ5/tngj0ooGGKAxfjk8LUV4Sq3nA4r9yQtANuSnIuwVWNzM5E5n6rtjwgQNi07tMFMEFuuKoo+ZesP0hWVFE3Mxq+d4ksNHeObjPnxk0wcxYYSVLD5q/BfeWl7AJGX3781f7uSFWmTnyYvCyYsmOOCg4IBraPIikGBNSx/M5mPVtQ/URDQAxmClkcG5nImiTAOq7Bs1RetLFQrIjqbM4JKvT0Qn5w0pFDYrytgxvcqgNcYpiVQZOCbgvvB2n7qx3/2oXGGx9yOr5v2p8lFqoIlUAJXrg3GIazqY54Mx4SOStfMpAkzyTVFwrWfV+crbqiTX9pyK1YZ1u+dUjB+9ycqPHL/3JJlxfwhdxxswPpfSffGO1ueCIBuUx10l4XjEZayS8O0YUOj+g6g+sBUN1bm+GJpsLh3jKRWebDNlz7Gy5VM+8pX+th0jtNKoPHI9sqML42pxYdycaKAesliugKLoVxWixZ1DGm2M9j3LEsBy01jCVLDVaiNMBhbRGOwlWaRsOgDJVxupfi/J1Um4fX5KHo52aV1ZbEdXQn/VrAUGaJ61lXFItRau7ua+n3TRdTCZuIMoWbU6trUqoOctH4I5i9KQ48tRphcl2EJbQslSW8j2pyP7R2wOPVwu7SEtPhd5Y3fGrlOeqKlEcN5QQlZxZtSopVSZrCY4qCUxR8tRRrv7w/+h/aceVw5HPsTo95n4UszZzxhoiFchh0aUyY8e6wQl/IQLx4n7Oj6BdfNWeJmVIU7d4F0wSOmV6jJueolhubvhyKl91IzhuB+CEpKOxYLEi4pc2/nUOuA8G9Mmr3n2VCYqTrvyg8HPc5C7yN56nIT69WY1OfGkitz0Bdpz3paJmp3nsMpdLuxqouBGYtUNCZbJljXxhAL1EZP1+ak4+xMjZjnvtB9pC1qFzsZ5w52zDQSwdJ6hphe295An9iurhjVrvoOi+Q4OxXwvp554pSkmPnWdj9aiRU9jQ1o36WsWLjlYLpSB36hlT6eXh7H3tCyVu8vMoiYJqh3QnH1MSwdnyy5owHimzIp1k7K71fifRbbhadoMgLS+dNY45pWRVGgEcLedadEIuC8aAfcHbQSsiSdsBNC9NUqNAMC1FpWpAu4eMW0YAfcnwoXOGAG4KCebdO9lVPBGIddbb96YCmoNh5okqNTtuKGpPrMcLFbxQpZexmcbU66NSelVaWNSzd8WD4/PxpRRl5mU+dE9euL9PJrOo4wS9NhbD88DXW9FRpKtZeRD+HtQbEVdRqgxf6ypq+vOC6GobU+dabO6yji1e5wd2HGjvFFXVnCnldKO5ERBmJ9xI9ottbd0YRsCTEPyAtH5+KqrCHhM+K5DIffFtxAzpRhfGWX+xUwHb3LilkP9SCsmz49Qp2tbRkLJPgFqjISawVSnxxCmVAfhFBZv1BygJSC6Mauu44Dh+N32D5ZovlIBX8uwKdqQ3UZZnrPlevTZmr1/8+X+iowXz+NvT75ridRva7UCQra+IkBE0uBkMot5qy7cmggSirtxUWjXoVorgUdVgg0Vhuq3qc4LZoiZJmQljpmQ+5yt5uaW95LlnuLyrh1P61E48psKugN5/YCwmDOek+LFrCEahF1Bljeh4ZJNYKPcY1Q1LH6cOL8mVc3BpDJ3d0iaiE2z4QwmYTcD9RNuG+DS9BHiPqfueynVR+0mMh+DPmopQZmjt5hKS3jH8cGmJmMDNKy3BMLEjlFv8Stltaa3upRTdrCKCxfzQwujSVxx5Sc4hREuTXFh7nPqvpdSxUUdCtJLg61jIVfhj+T60Y3sgu/1AOqCSZBJmMmFAII7t8nwqfJR357aLBP1P0AvBb4Y9noayCOm0HD7yIssrjfhXi9XNXljAbaqfJDVN6zMH1vhi2lRWA3hvF8D6Pv68/awH9//uCE/xy+3+uR+9tfXxeJ58o9WXedmLyHh/Jgc+lKcpERj3WB2bibFqnG8VAoZQoILPFQK/EFu46sAdsxMEHW6YXZbsKsrnZDgXTPl8WhhpxEXFHXOEp6q9XUu6ryccqmDPcmBGjhB0tRZ5q8NQ0b7E4vjXtd4aVGqeCBh//YSHnyom/J8tMgDkhf7BkcANCr2K8tc7Qk8PAO/MvgZL4CBiulwjQLPmwrKBP5s6FFDz2gK+PgwDBIwNvPA+EeO3gaOm9zxLw==</diagram></mxfile>
2210.10664/main_diagram/main_diagram.pdf ADDED
Binary file (45.8 kB). View file
 
2210.10664/paper_text/intro_method.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ On a daily basis, millions of users interact with different websites to search for various products, which is reflected in the ever-increasing importance of online advertisements and recommender systems. Consequently, accurately predicting the click-through rates (CTR) plays an essential role in building effective personalized recommender systems that can positively impact online businesses' revenue. As a result, several machine learning approaches have been proposed over the last decade to focus on improving the performance of the CTR prediction in various recommendation settings.
4
+
5
+ One of the main aspects of CTR prediction is effectively learning-rich latent representations of the different input fields. This aspect has been tackled by applying various techniques, beginning with wide and deep networks [@cheng2016wide], factorization machines-based models [@he2017neural; @guo2017deepfm; @lian2018xdeepfm; @yu2019input; @lu2020dual] which rely on inner-product between the latent representation of the input fields and hence capturing low-order feature interaction. On the other hand, other approaches rely on deep neural networks (DNNs), and cross networks [@wang2021dcn; @wang2017deep] that can learn feature embedding in a very effective manner. However, both DNNs and cross networks fail to capture the multiplicative relations across the input fields, limiting the expressiveness of the resulting latent representations, leading to suboptimal predictive performance. Recently, transformer-based models have gotten much attention by showing remarkable performances as they successfully capture the multiplicative (high-order) relations by applying the multi-head self-attention mechanism [@song2019autoint; @lu2020dual]. Nevertheless, there is still a massive untapped potential in combining the latent representation and strengths of the different approaches for achieving superior prediction performance.
6
+
7
+ In this paper, we propose a novel mixture of experts model DeepMR to capture effective feature interactions that combine the benefits of DNN-based models and attention-based models via a parallel architecture.
8
+
9
+ Noting the cross networks' success in various recent works highlights the importance of integrating residual connections within the deep learning models. Moreover, recent residual techniques such as **ReZero** [@bachlechner2021rezero] have shown remarkable performance by employing learnable weights to the residual connections that tend to better signal propagation in the model and faster convergence and thus obtaining more expressive representations.
10
+
11
+ To leverage these benefits, our proposed DeepMR model integrates ReZero connections in its DNN and multi-head self-attention blocks allowing it to achieve superior prediction performance compared to other state-of-the-art models. The main contributions can be summarized as follows:
12
+
13
+ - We propose DeepMR, a novel mixture of experts model for CTR prediction that utilizes a DNN and multi-head self-attention component with ReZero connections.
14
+
15
+ - We evaluate the proposed model on three real-world publicly available datasets for CTR prediction. Results show that the proposed DeepMR model significantly outperforms all state-of-the-art models in CTR prediction in Area Under the ROC Curve (AUC) and Logloss.
16
+
17
+ - We conduct a comprehensive ablation study to illustrate the importance of each component in the model.
18
+
19
+ # Method
20
+
21
+ We formulate the click-through rate prediction problem as follows; Given a dataset of $M$ user-item interaction instances (records) $(\mathcal{X}, \mathcal{Y})$, where each instance $x_i$ in $\mathcal{X}$ has $N$ fields representing the features of the target user, the target item and the interaction's context. Moreover, each instance has a corresponding target $y_i \in \{0,1\}$, which represents if the item has been clicked by a user or not. Our main aim in the CTR task is to predict the probability of an item being clicked by a user given the input feature vector $x_i$.
22
+
23
+ The main goal of our proposed approach is to benefit from combining different field representations of each input instance. As beforehand seen in the literature, stacking fully-connected layers, [@wang2021dcn; @guo2017deepfm; @huang2019fibinet] to obtain instance representations has achieved superior performance with low model complexity. Furthermore, we have seen many models now using multi-head attention to capture the relations and similarities between different fields in each instance [@lu2020dual; @song2019autoint]. These models were able to attain remarkable performances by understanding these attention-based (multiplicative) relations. Therefore, in this work, we propose a multi-representation parallel structured model (DeepMR) for CTR prediction. The model consists of three main building blocks field embeddings, ReZero DNN, and ReZero multi-head self-attention block as shown in Figure [1](#fig:1){reference-type="ref" reference="fig:1"}. Combining these components allows the model to benefit from each component's strength and reach a remarkable performance. We explain the details of each block in the forthcoming sections.
24
+
25
+ To obtain the field embedding for each input instance, we use a look-up embedding table to encode the input instance fields $(x_{i}^{(1)}, x_{i}^{(2)},...x_{i}^{(N)})$. In other words, we pass all the fields through a fully connected layer to learn the latent embedding for each input field. Subsequently, we add a positional encoding to all the input fields to distinguish between the different input fields of an instance. The fields embedding can then be defined as: $$\begin{equation}
26
+ e_{i}^{(n)}= x_{i}^{(n)}W^{(n)}+P_{i}^{(n)} , \; e_{i}^{(n)} \in \mathbb{R}^{d_n}
27
+ \end{equation}$$ where $W^{(n)} \in \mathbb{R}^{Z_n \times d_n}$ is the weight matrix, $Z_n$ is the vocabulary size of the input field $x_{i}^{(n)}$ and $d_n$ is the fields embedding size, where we used same $d_n$ size for all fields. $x_{i}^{(n)}$ is the binary input vector of the $nth$ field of instance $i$ and $P_{i}^{(n)}$ is the corresponding positional encoding of size $d_n$ for each field.
28
+
29
+ Despite its simplicity, fully connected layers have shown a high capability to achieve a beneficial, meaningful representation that helps the models accomplish outstanding performances. In the ReZero-DNN component, we used a series of fully connected layers with ReZero residual connections to learn a refined representation for each input instance. Firstly, the field embeddings are concatenated column-wise to form one input vector $E_{DNN_{i}} \in \mathbb{R}^{ 1 \times ( N \cdot d_n )}$.
30
+
31
+ $$\begin{equation}
32
+ E_{DNN_{i}}= \text{concat}_{col} \left( e_{i}^{(n)} \right)_{n=1:N}
33
+ \label{eq:1}
34
+ \end{equation}$$
35
+
36
+ Finally this input vector is then fed through the ReZero-DNN layers to get the final representation $O_{DNN_{i}} \in \mathbb{R}^{ d_o }$. $$\begin{equation}
37
+ O_{DNN_{i}}= \text{ReZero-DNN}(E_{DNN_{i}})
38
+ \label{eq:2}
39
+ \end{equation}$$
40
+
41
+ **ReZero Connections** Residual connections are compelling in deep learning; they were initially used in image processing with Deep ResNets [@he2016deep]. they ascertained performance improvement as it allows signal propagation through the network, overcome the exploding weight updates, and achieves faster convergence on the logloss. In this work, we use the novel technique residual with zero initialization [@bachlechner2021rezero], which applies a simple change to the deep residual networks. Where a learnable residual weight $\alpha$ rescales the contribution of the current layer with respect to the previous one. Hence, the output of the current ReZero layer $l_{j+1}$ is: $$\begin{equation}
42
+ l_{j+1}= l_j + \alpha_j l_{j+1}
43
+ \label{eq:3}
44
+ \end{equation}$$ where $\alpha_j$ is the learnable residual weight, and $l_j$ is the output of the previous layer. The empirical interpretation is further discussed in section 4.3.
45
+
46
+ Multi-head self-attention is currently used in many areas, e.g., time series forecasting, language processing [@vaswani2017attention], and recommender systems [@kang2018self]. DeepMR also utilizes a multi-head self-attention component to enrich the extracted field representation by capturing the fields' multiplicative relations in each instance, allowing the model to learn superior representations. In this case, we concatenate the input fields embedding row-wise as shown in Figure [1](#fig:1){reference-type="ref" reference="fig:1"} to obtain the input matrix $E_{SA_{i}} \in \mathbb{R}^{ N \times d_n }$ for the multi-head attention block.
47
+
48
+ $$\begin{equation}
49
+ E_{SA_{i}}= \text{concat}_{row} \left( e_{i}^{(n)} \right)_{n=1:N}
50
+ \label{eq:4}
51
+ \end{equation}$$
52
+
53
+ Afterward, we split the input into Query $(Q)$, Key $(K)$ and Value $(V)$ among the specified number of heads to apply the multi-head attention layer as follows: $$\begin{equation}
54
+ \textrm{Att}(\textbf{Q}, \textbf{K}, \textbf{V}) = \textrm{softmax}\left( \frac{\textbf{Q} \textbf{K}^{T}}{\sqrt{\frac{d_o}{H}}}\right) \textbf{V}
55
+ \end{equation}$$
56
+
57
+ $$\begin{equation}
58
+ \begin{split}
59
+ &\textrm{SA}_i(E_{SA_{i}})= \\&
60
+ \text{concat}_{col} \left( \textrm{Att}(E_{SA_{i}}\textbf{W}^{Q}_{h}, E_{SA_{i}}\textbf{W}^{K}_{h}, E_{SA_{i}}\textbf{W}^{V}_{h}) \right)_{h=1:H}
61
+ \end{split}
62
+ \label{eq:5}
63
+ \end{equation}$$ where $\textbf{W}^{Q}_{h}$, $\textbf{W}^{K}_{h}$, $\textbf{W}^{V}_{h} \in \mathbb{R}^{d_n \times \frac{d_o}{H}}$ represent the linear projection matrices of the head at index $h$, and $H$ is the number of heads. $\textrm{SA}_i(E_{SA_{i}})$ represents the column-wise concatenation of the attention heads. Finally, we have the feed-forward layers to obtain the component's final output representation $O_{SA_{i}} \in \mathbb{R}^{ d_o }$ as follows: $$\begin{equation}
64
+ \begin{split}
65
+ &O_{SA_{i}} = \textrm{FFN}(SA_i) = \\&
66
+ \text{concat}_{row} \left( \phi (SA^{(n)}_i \textbf{W}^{(1)} + b^{(1)})\textbf{W}^{(2)} + b^{(2)} \right)_{n=1:N}
67
+ \end{split}
68
+ \label{eq:6}
69
+ \end{equation}$$ where $\textbf{W}^{(1)}$, $\textbf{W}^{(2)} \in \mathbb{R}^{d_o \times d_o}$ are the weight matrices of the two feed-forward layers, and $b^{(1)}$, $b^{(2)} \in \mathbb{R}^{d_o}$ are their bias vectors. $\phi$ represent [Leaky_ReLU]{.roman} non-linear activation, and $\text{concat}_{row}$ concatenates vectors row-wise.
70
+
71
+ In contrast to the original multi-head self-attention additive residual connection, we apply a multiplicative ReZero connection with learnable parameter $\alpha_{SA}$ within the multi-head self-attention block, enabling the network to learn better representations.
72
+
73
+ To form the final input representation we combine the output representations from the ReZero-DNN and ReZero Multi-Head attention components using a weighted point-wise sum as follows: $$\begin{equation}
74
+ O_{{comb}_i}= \beta O_{SA_{i}} + (1-\beta)O_{DNN_{i}},\: O_{{comb}_i} \in \mathbb{R}^{d_o}
75
+ \label{eq:7}
76
+ \end{equation}$$
77
+
78
+ where $O_{DNN_{i}}$ is the output of the fully-connected ReZero-DNN layers, $O_{SA_{i}}$ is the output of the multi-head self-attention component, and $\beta$ is the weighting factor which decides the contribution of each branch to the final output representation.
79
+
80
+ Finally, the model prediction of the probability of the item $i$ being clicked by user $u$ is calculated by reducing and summing the values of the $O_{{comb}_i}$ vector elements then applying a sigmoid activation as follows:
81
+
82
+ $$\begin{equation}
83
+ \hat{y}_i= \sigma(\sum_{r=1}^{d_o} O_{{comb}_i}^{(r)})
84
+ \label{eq:8}
85
+ \end{equation}$$
86
+
87
+ Hence our objective function for CTR binary classification can be defined as follows; $$\begin{equation}
88
+ \mathcal{L}= -\frac{1}{M} \sum_{i=1}^{M} y_{i} \log{(\hat{y}_{i})} + (1-y_{i}) (1-\log{(\hat{y}_{i})})
89
+ \label{eq:9}
90
+ \end{equation}$$ where $y_i$ are the actual labels and $\hat{y_i}$ are the predicted probabilities, and M is the number of input instances.
91
+
92
+ The pseudo-code of DeepMR is described in Algorithm 1.
93
+
94
+ ::: algorithm
95
+ Initialize the model parameters\
96
+ :::
2210.12524/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2210.12524/paper_text/intro_method.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Synthesizing hair images and hair style transfer in images has many potential applications such as portraying missing or wanted individuals with different hair styles, previsualization of one's appearance before having a haircut or dyeing, and enhancing appearance for media and editing applications. This technology can even improve visual identity anonymization applications to protect privacy of individuals.
4
+
5
+ Unlike, most other parts of the human face, its specific geometry, material, and high-frequency details make hair extremely difficult to represent and synthesize. This complexity comes from the nature of the hair and diverse visual perception factors that the user may intend to edit or preserve. A few key challenges can be identified that are fundamentals to visual quality: 1) complicated hair area boundaries that lead to inadequate high-fidelity hair shape control and seamless background blending, 2) the entanglement between material properties and orientation of hair strands, 3) the correlation between the color palette and strand variations. In addition to the hair complexity, synthesizing high-resolution images, and stabilizing the network training process are other challenging problems [@shaham2021spatially; @li2020gan; @chai2022any].
6
+
7
+ ![Low-resolution (128x128) hairstyle transfer results by our approach. (Top) Input images, (Bottom) Reference image and mask, (Middle) Synthesis by our method.](images/low_res_transfer.png){#fig:intro width="\\textwidth"}
8
+
9
+ We identified the most notable two relevant works as MichiGAN [@tan2020michigan] and LOHO [@saha2021loho]. LOHO decomposes the hair into the perceptual structure, appearance, and style attributes and uses latent space optimization in latent space using the StyleGAN2 [@karras2020analyzing] generator while Michigan can control shape, background, structure, and appearance. LOHO has less control ability on hair synthesizing and performs better performance on challenging samples than MichiGAN. Both methods have promising initial results but they are not feasible for real-time applications due to their high computational cost. Moreover these approaches fail to recover global structure of the target hairstyle and synthesize photorealistic hair.
10
+
11
+ Our approach aims to improve style transfer and photorealism for hair synthesis compared to these methods while reducing computation time for real-time applications. To achieve this goal, We design a customized low-resolution generator network where we mask its activations with the target hair mask for focusing on only hair region synthesis. We also design hair blending block (HBB) for seamless blending of background and synthesized hair which is followed by a super resolution network. Low-resolution hair style transfer leads to less time consumption while customized generator with HBB leads to performing better hair synthesis and style transfer.
12
+
13
+ In this paper, we propose efficient hairstyle transfer with a generative adversarial network, called EHGAN. Our method addresses the aforementioned challenges about efficient high-resolution hair synthesis task thanks to our novel framework which performs hairstyle transfer on low-resolution images, seamless blending by HBB module, and increase the resolution of our output using a pre-trained super-resolution network. This strategy improves the stability of GAN training for better photorealism, style transfer and time consumption compared to the other methods.
14
+
15
+ In particular, our main contributions are:
16
+
17
+ - We propose a low-resolution style transfer generator followed by super resolution network. In between, the novel Hair Blending Block (HBB) fuse generated hair with the original image for better photo-realistic boundaries.
18
+
19
+ - Qualitative and quantitative experiments show that our method has better hair synthesis results compared to the state-of-the-art.
20
+
21
+ - To the best of our knowledge, our method can provide, real-time hair synthesis and transfer for the first time with 11.6 frame per second efficiency.
22
+
23
+ # Method
24
+
25
+ Given an input portrait image $I_{inp}$ and reference portrait image $I_{ref}$, our EHGAN aims at performing conditional hairstyle transfer to the hair region according to reference input, while keeping the background $I_{bg}$ unchanged. The output is denoted as $I_{out}$.
26
+
27
+ ![ The hairstyle encoder $E$ estimates the reference hairstyle vector $s$ by taking reference hair $H_{ref}$ as an input image that is obtained by multiplying the reference image $I_{ref}$ and reference hair mask $M_{ref}$. The hairstyle vector $s$, target hair mask $M_{tar}$, input background $I_{bg}$ and noise vector ${z}$ feed to the generator $G$ the generator generates a fake hair image $H_{fake}$. Finally, low-resolution hair transferred image resolution was increased by a pre-trained super-resolution network to obtain the final image $I_{out}$. ](images/main_figure.png){#fig:EHGAN width="\\textwidth"}
28
+
29
+ As illustrated in Figure [2](#fig:EHGAN){reference-type="ref" reference="fig:EHGAN"}, the proposed pipeline consists of generation, hairstyle encoding, and super-resolution parts. Super-resolution is used only for the inference stage. In the training stage, the hairstyle encoding part is responsible for the estimation of the hairstyle representation of the reference image. The generation part synthesizes hair style transferred image. In our method, we estimate reference hairstyle representation $s$ by style encoder network $E$. Style encoder network needs to reference hair region $H_{ref}$. Generator $G$, needs this style representation, random sampled noise $z$, input background $I_{bg}$ and target hair shape mask $M_{t}$. Finally, we increase synthesized image resolution by pre-trained super-resolution network $SR$. We can formulate our hair synthesis process as:
30
+
31
+ $$\begin{equation}
32
+ I_{out} = SR(G(z,M_{t},I_{bg},E(H_{ref})))
33
+ \end{equation}$$
34
+
35
+ Hair appearance describes the globally-consistent color style that is invariant to a specific shape or structure. It includes multiple factors such as the intrinsic albedo color, environment-related shading variations, and even the granularity of wisp styles. Due to its global consistency, its appearance can be compactly represented. Given the complexity of natural hairstyles, instead of generating the appearance features from scratch, we address hair appearance as a style transfer problem, which extracts style from the reference image and transfers it to the target.
36
+
37
+ First, we multiply reference image and mask each other to extract reference hair region $H_{ref}$ . Than, we resize $H_{ref}$ from current resolution to our resolution size for style encoding network. Style encoding network consist of residual blocks and it estimates hair style representation $s = E(H_{ref})$. $E(.)$ corresponds the style encoding estimation and $s$ corresponds to style representation vector $s \in \mathbb{R}^{Nx512}$ of $H_{ref}$.
38
+
39
+ For providing an efficient hairstyle transfer framework, we transfer hairstyles to the low-resolution image. So, we used a low-resolution hair style encoder, generator, and discriminator networks. EHGAN contains two main modules in training: a hair synthesis generator $G$ and a hairstyle encoder $E$. The hair synthesis generator has two types of blocks that are Adaptive Instance Normalization Resnet Block (AdaINResBlk) and Hair Blending Block (HBB).
40
+
41
+ Before the synthesizing to hairstyle transferred image, we resize the input image background $I_{bg}$ to our resolution size. Our generator $G$ synthesize the transferred image corresponding to the hairstyle of reference hairstyle $s_{ref}$, which is provided by style encoding network $E$. The generator is a decoder architecture that is modified for hair synthesis tasks. It consisted of residual blocks and a hair blending block (HBB). We believe that inserting style information only at the beginning of a network is not a good architecture choice. Recent architectures  [@karras2019style; @choi2020stargan] have demonstrated that higher quality results can be obtained if style information is injected as normalization parameters in multiple layers in the network, e.g. using adaptive instance normalization (AdaIN)  [@huang2017arbitrary]. Therefore, We use AdaIN in each residual block to inject $s_{ref}$ into $G$. We called AdaINResBlk  [@choi2020stargan; @huang2018multimodal] this combination of residual block and AdaIN.
42
+
43
+ <figure id="fig:HBB">
44
+ <div class="center">
45
+ <img src="images/HBB.png" style="width:50.0%" />
46
+ </div>
47
+ <figcaption>Hair blending block (HBB). It combines synthesized hair area and background seamlessly.</figcaption>
48
+ </figure>
49
+
50
+ **AdaINResBlok** requires three inputs, layer activation $X_{L}$, a target hair mask $M_{t}$ to focus only hair area for hair generation, and a reference hairstyle $s_{ref}$ to transfer reference hairstyle information. For the first layer, $X_{L}$ is a gaussian noise vector $z \in \mathbb{R}^{Nx512}$. We mask the activation of AdaINResBlock, multiplying with the target hair mask. The generator has a better focus on target hair shape.
51
+
52
+ **Hair Blending Block (HBB)** designed to perform seamless hair blending. The output of the last AdaINResBlk contains only synthesized hair area. We need to blend this synthesized hair and input background regions. Therefore, HBB requires the background of input image $bg_{inp}$, hairstyle vector $s$ to transfer reference hairstyle, and synthesized hair area $F_{hair}$. This block contains AdaINResBlock and extra operations. We feed input background and target hair mask into HBB to obtain better seamless blending performance. We add learnable convolutional layers editing background and hair mask image. HBB performs hair blending in only one layer differentially. Also, It is suitable with generators that use all capacities to generate a hair area of the image. Finally, we extend the HBB with a final 3x3 convolution layer, with a step size of 1 and 1 pixel padding, to refine the final output and reduce noise. HBB is shown in Figure [3](#fig:HBB){reference-type="ref" reference="fig:HBB"}.
53
+
54
+ In our efficient hairstyle transfer setting, we use a low-resolution style encoder and generator networks. For this reason, we need super-resolution layers to generate high-quality hair synthesis results. Instead of the training layers that perform the super-resolution task, we use state-of-the-art pre-trained GFPGAN [@wang2021gfpgan]. Other methods [@tan2020michigan; @saha2021loho] synthesize results that have 512x512 resolution. But, our trained generator and encoder models take 128x128 input. So, we need to increase 4x resolution of our results to obtain comparable results with their results.
55
+
56
+ **Adversarial loss:** A two-scale PatchGAN [@isola2017image; @wang2018high] discriminator D is used to match distributions between synthesized hair results and real hair to enforce natural visual quality and boost local details. Loss can formulate as:
57
+
58
+ $$\begin{equation}
59
+ L_{adv}= \left[ log (D(I)) \right] +\left[ log(1-D(G(z,s,I_{bg},M_{inp} ) \right]
60
+ \end{equation}$$
61
+
62
+ **Hair loss:** We also the penalize both photometric and feature differences between reference and predicted hair region of image by hair loss. We utilize from pre-trained VGG19 model [@simonyan2014very] for extractiong feature. VGG19 is widely used in previous image generation problems  [@wang2018esrgan; @saha2021loho].
63
+
64
+ $$\begin{equation}
65
+ L_{hair}= \left\| Vgg(H_{ref})-Vgg(H_{fake}) \right\|_{1}*\lambda_{hairperceptual} + \left\| H_{ref} - H_{fake} \right\|_{1} * \lambda_{hairpixel}
66
+ \end{equation}$$
67
+
68
+ **Pixel Loss:** While the optimized natural visual quality and high-level features, colors, and lighting conditions are optimized based on pixel value difference directly. While this cost function is relatively primitive, it is sufficient to optimize lighting and colors. Furthermore, pixel loss supports natural visuality and high-level features. It formulates in pixel-level as:
69
+
70
+ $$\begin{equation}
71
+ L_{pix}=\left\| I_{inp} - I_{fake} \right\|_{1}
72
+ \end{equation}$$
73
+
74
+ **Style reconstruction:** In order to enforce the generator $G$ to utilize the style code $s$ when generating the hair $I_{out}$, we employ a style reconstruction loss as:
75
+
76
+ $$\begin{equation}
77
+ L_{style}= \left\| E(H_{ref}) - E(H_{fake})) \right\|_{1}
78
+ \end{equation}$$
79
+
80
+ Thanks to this loss, Encoder learns a mapping from a hair to its hairstyle vector and the generator learns to generate the same hair images for the same hairstyles. Thus, at test time, our learned encoder $E$ allows $G$ to transform an input hair, reflecting the style of a reference hair.
81
+
82
+ In summary, the overall training objective can be formulated as: $$\begin{equation}
83
+ min_{G} max_{D} (\lambda_{pix}L_{pix} + \lambda_{hair}L_{hair} + \lambda_{adv}L_{adv} +\lambda_{style}L_{style} )
84
+ \end{equation}$$
85
+
86
+ Considering that we do not have the ground truth images when the conditional input comes from different sources, training the proposed EHGAN is essentially an unsupervised learning problem. However, most existing unsupervised learning algorithms [@liu2019few; @zhu2017unpaired; @huang2018multimodal] are usually performing worse than supervised learning regarding the visual quality of the generated results. Motivated by [@he2018deep; @tan2020michigan] which faces a similar problem on image colorization and hair synthesis, we propose to train our EHGAN in a pseudo-supervised way. Specifically, we feed the conditional input extracted from the same source image into EHGAN and enforce it to recover the original image with explicit supervision during training, and generalize it to arbitrary reference hairstyle in inference.
2211.00164/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2211.00164/paper_text/intro_method.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Effective real-world applications of reinforcement learning or sequential decision-making must cope with exogenous information in sensory data. For example, visual datasets of a robot or car navigating in busy city streets might contain information such as advertisement billboards, birds in the sky or other people crossing the road walks. Parts of the observation (such as birds in the sky) are irrelevant for controlling the agent, while other parts (such as people crossing along the navigation route) are extremely relevant. How can we effectively learn a representation of the world which extracts just the information relevant for controlling the agent while ignoring irrelevant information?
4
+
5
+ Real world tasks are often more easily solved with fixed offline datasets since operating from offline data enables thorough testing before deployment which can ensure safety, reliability, and quality in the deployed policy [@Lange2012BatchRL; @offlineRobotics; @KumarFSTL19; @offlineDialogue; @OfflineTutorial]. The Offline-RL setting also eliminates the need to address exploration and planning which comes into play during data collection.[^2] Although approaches from representation learning have been studied in the online-RL case, yielding improvements, exogenous information has proved to be empirically challenging. A benchmark for learning from offline pixel-based data [@vd4rl] formalizes this challenge empirically. Combining these challenges, is it possible to learn distraction-invariant representations with rich observations in offline RL?
6
+
7
+ Approaches for discovering small tabular-MDPs (${\leq}500$ discrete latent states) or linear control problems invariant to exogenous information have been introduced [@dietterich2018discovering; @efroni2021provably; @efroni2022sparsity; @exoRL; @lamb2022guaranteed] before. However, the planning and exploration techniques in these algorithms are difficult to scale. A key insight that @efroni2021provably [@lamb2022guaranteed] uncovered is the usefulness of multi-step action prediction for learning exogenous-invariant representation.
8
+
9
+ <figure id="fig:motivation" data-latex-placement="t!">
10
+ <p><embed src="figures/visualizations/mainfig_iter4.pdf" style="width:45.0%" /> <embed src="figures/visualizations/mainfig_iter4_cut.pdf" style="width:52.0%" /><br />
11
+ </p>
12
+ <figcaption><strong>Left: Representation Learning for Visual Offline RL in Presence of Exogenous Information</strong>. We propose <span class="math inline">$\textsc{ACRO}$</span>, that recovers the controller latent representations from visual data which includes uncontrollable irrelevant information, such as observations of other agents acting in the same environment. <strong>Right: Results Summary</strong>. <span class="math inline">$\textsc{ACRO}$</span> learns to ignore the observations of task irrelevant agents, while baselines tend to capture such exogenous information. We use different offline datasets with varying levels of exogenous information () and find that baseline methods consistently under-perform w.r.t. <span class="math inline">$\textsc{ACRO}$</span>, as is supported by our theoretical analysis.</figcaption>
13
+ </figure>
14
+
15
+ Following these, we propose to learn *Agent-Controller Representations for Offline-RL (ACRO)* using multi-step inverse models, which predict actions given current and future observations as in . [ACRO]{.smallcaps} avoids the problem of learning distractors, because they are not predictive of the agent's actions. This property even holds for temporally-correlated exogenous information. At the same time, multi-step inverse models capture all the information that is sufficient for controlling the agent [@efroni2021provably; @lamb2022guaranteed], which we refer to as the agent-controller representation. [ACRO]{.smallcaps} is learned in an entirely reward-free fashion. Our first contribution is to show that [ACRO]{.smallcaps} outperforms all current baselines on datasets from policies of varying quality and stochasticity. gives an illustration of [ACRO]{.smallcaps}, with a summary of our experimental findings.
16
+
17
+ A second core contribution of this work is to develop and release several new benchmarks for offline-RL designed to have especially challenging exogenous information. In particular, we focus on *diverse temporally-correlated* exogenous information, with datasets where (1) every episode has a different video playing in the background, (2) the same STL-10 image is placed to the side or corner of the observation throughout the episode, and (3) the observation consists of views of nine independent agents but the actions only control one of them (see Fig. [1](#fig:motivation){reference-type="ref" reference="fig:motivation"}). Task (3) is particularly challenging because which agent is controllable must be learned from data.
18
+
19
+ Finally, we also introduce a new theoretical analysis () which explores the connection between exogenous noise in the learned representation and the success of Offline-RL. In particular, we show that Bellman completeness is achieved from the agent-controller representation of [ACRO]{.smallcaps} while representations which include exogenous noise may not verify it. Bellman completeness has been previously shown to be a sufficient condition for the convergence of offline RL methods based on Bellman error minimzation [@munos2003error; @munos2008finite; @antos2008learning].
20
+
21
+ # Method
22
+
23
+ We consider a Markov Decision Process (MDP) setting for modeling systems with both relevant and irrelevant components (also referred as exogenous block MDP in @efroni2021provably). This MDP consists of a set of observations, $\mathcal{X}$; a set of latent states, $\mathcal{Z}$; a set of actions, $\mathcal{A}$; a transition distribution, $T(z' \mid z, a)$; an emission distribution $q(x \mid z)$; a reward function $R: \Xcal \times \Acal \rightarrow \mathbb{R}$; and a start state distribution $\mu_0(z)$. We also assume that the support of the emission distributions of any two latent states are disjoint. The latent state is decoupled into two parts $z=(s,e)$ where $s\in \mathcal{S}$ is the agent-controller state and $e\in \mathcal{E}$ is the exogenous state. For $z,z'\in \mathcal{Z},a\in \Acal$ the transition function is decoupled as $T(z' \mid z, a) = T(s' \mid s, a) T_{e}(e' \mid e),$ and the reward only depends on $(s,a)$. These definitions imply that there exist mappings $\phi_\star:\Xcal\rightarrow \mathcal{S}$ and $\phi_{\star,e} :\Xcal\rightarrow \mathcal{E}$ from observations to the corresponding controller and exogenous latent states. The agent interacts with the environment, generating a latent state, observation and action sequence, $(z_1, x_1, a_1, z_2, x_2, a_2,\cdots,)$ where $z_1 \sim \mu(\cdot)$ and $x_t \sim q(\cdot \mid z_t)$. The agent does not observe the latent states $\rbr{z_1,z_2, \cdots}$, instead receiving only the observations $\rbr{x_1,x_2,\cdots}$. The agent chooses actions using a policy distribution $\pi(a \mid x)$. A policy is an *exo-free policy* if it is not a function of the exogenous noise. Formally, for any $x_1$ and $x_2$, if $\phi_\star(x_1) = \phi_\star(x_2)$, then $\pi(\cdot \mid x_1) = \pi(\cdot \mid x_2)$.
24
+
25
+ ::: wrapfigure
26
+ r0.4 ![image](figures/visualizations/acstate_new.pdf){width="80%"}
27
+
28
+ []{#fig:ACRO_Illustration label="fig:ACRO_Illustration"}
29
+ :::
30
+
31
+ We consider learning representations from an offline dataset $\mathcal{D} = (\mathcal{X}, \mathcal{A})$ consisting of sequences of N observations $\mathcal{X} = (x_1, x_2, x_3, ..., x_N)$ and the corresponding actions $\mathcal{A} = (a_1, a_2, a_3, ..., a_N)$. We are in the rich-observation setting, *i.e.*, observation $x_t \in \mathbb{R}^m$ is sufficient to decode $z_t$. Our focus is on pre-training an encoder $\phi: \mathbb{R}^m \rightarrow \mathbb{R}^d$ on $\mathcal{D}$ such that the frozen representation $s_t = \phi(x_t)$ is suitable for offline policy optimization.
32
+
33
+ To learn representations that discard exogenous information, we leverage prior works from the theoretical RL community and train a multi-step inverse action prediction model, which captures long-range dependencies thanks to its conditioning on distant future observations. This leads to the [ACRO]{.smallcaps} objective, namely to predict the action conditioning on $\phi(x_t)$ and $\phi(x_{t+k})$. Note that even though we are conditioning on future observation, we only predict the first action instead of sequence of actions up to the k-th timestep, as former is easier to learn.
34
+
35
+ Our proposed method, which we call *Agent-Controller Representations for Offline-RL* ($\textsc{ACRO}$), optimizes the following objective based on a multi-step inverse model: $$\begin{align}
36
+ \phi_\star \in \argmax_{\phi \in \Phi} \E_{t \sim U(0,N)} \E_{k \sim U(0,K)} \text{log}\left(\PP(a_t \mid \phi(x_t), \phi(x_{t+k}))\right). \label{eqn:acro}
37
+ \end{align}$$
38
+
39
+ This approach is motivated by two desiderata: (i) ignoring exogenous information and (ii) capturing the latent state that is necessary for control. The following invariance lemma (, see  for proof) states that optimal action predictor models can be obtained without dependence on exogenous noise, when the data-collection policy is assumed not to depend on it either.
40
+
41
+ ::: {#lemma:f .lemma}
42
+ **Lemma 1** (Invariance Lemma: Multi-step inverse model is invariant to exogenous information, @lamb2022guaranteed). *For any exo-free policy $\pi: \Xcal \rightarrow \Acal$, for all $a_t \in \Acal$, and $(x_t, x_{t+k}) \in {\tt supp}~\PP_\pi(X_t, X_{t+k})$: $$\begin{equation}
43
+ \PP_\pi(a_t \mid x_t, x_{t+k}) = \PP_\pi(a_t \mid \phi_\star(x_t), \phi_\star(x_{t+k}))
44
+ \end{equation}$$*
45
+ :::
46
+
47
+ At the same time, prior works have shown that single step inverse models of action prediction can fail to capture the full controller latent states [@efroni2021provably; @lamb2022guaranteed; @hutter2022uniqueness]. One type of counter-example for single-step inverse models stems from a failure to capture long-range dependencies. For example, in an empty gridworld, a pair of positions which are two or more spaces apart can be mapped to the same representation without increasing the loss of a one-step inverse model. Another simple counter-example involves a problem where the last action the agent took is recorded in the observation, in which case the encoder can simply retrieve that action directly while ignoring all other information. The use of multi-step inverse models resolves both of these counter-examples and is able to learn the full agent-controller state [@lamb2022guaranteed].
48
+
49
+ We emphasize here that even though inverse models of action prediction have appeared in past literature (as discussed in related works), they are often proposed for the purposes of exploration and reward bonus. In contrast, we propose to learn the multi-step inverse model to explicitly uncover a representation that contains only the controller, endogenous part of the state. Recently, @lamb2022guaranteed proposed a multi-step inverse model where the learnt representation $\phi(\cdot)$ is regularized, so that $\phi(\cdot)$ discards irrelevant details from observations $x$. This was accomplished by using vector-quantization on the encoder's output, forcing discrete latent states to be learnt for constructing a tabular MDP for latent recovery. In contrast, [ACRO]{.smallcaps} learns the continuous endogenous latent state, without a bottleneck, and the learnt pre-trained representation $\phi(\cdot)$ is later used for policy optimization in offline RL. More details of our algorithm are discussed in .
50
+
51
+ Due to its importance to practical applications, the offline RL setting has been extensively studied by the theoretical community. The majority of provable value-based offline RL algorithms follow a Bellman error minimization approach [@munos2003error; @munos2008finite; @antos2008learning], in line with the techniques used in practice. The common representational assumptions needed to derive these results are: (A1) the function class contains the optimal Q function (realizability), (A2) the data distribution is sufficiently diverse (concentrability), and (A3) Bellman completeness [@munos2008finite]. This last condition is the most subtle one, it states that the function class can properly represent the Bellman backup of any function it contains.
52
+
53
+ ::: definition
54
+ **Definition 2** (Bellman Completeness). *We say that $\Fcal$ is Bellman complete if it is closed under the Bellman operator. For any $f\in \Fcal$, it holds that $\Tcal f\in \Fcal$, where $(\Tcal f)(x,a)\equiv R(x,a) + \EE_{x'\sim T(x'\mid x,a)}[ \max_{a'} f(x',a')]$ for all $(x,a)\in \Xcal\times \Acal$.*
55
+ :::
56
+
57
+ @chen2019information conjectured that (A1) and (A2) alone are not sufficient for sample efficient offline RL, and, recently, @foster2021offline established a lower bound proving this claim. Thus, the representational requirements needed for offline RL are more intricate than in supervised learning.
58
+
59
+ With these observations in mind, we highlight a key advantage of the agent-controller representation $\phi_\star$ relatively to other representations in the offline RL setting. Namely, one can construct a Bellman complete function class on top of $\phi_\star$, while some representations that include exogenous information provably violate the Bellman completeness property. To formalize these claims, we denote by $\mathcal{Q}_{\Scal} = \{ (s,a) \mapsto [0,1]: (s,a)\in \mathcal{S} \times \mathcal{A} \}$ the set of Q-functions defined over $\mathcal{S}$, and for a given representation $\phi$, we let $\Fcal(\phi) = \{ (s,a) \mapsto Q(\phi(s),a) : Q \in \mathcal{Q}_{\Scal}, (s,a)\in \mathcal{S} \times \mathcal{A} \}$ denote the set of Q-functions defined on top of $\phi$. The following proposition states that the Agent-Controller representation leads to a Bellman complete function class (all proofs in / ).
60
+
61
+ ::: {#prop:controllable_rep_advantage .proposition}
62
+ **Proposition 3** ([ACRO]{.smallcaps} Representation is Bellman Complete). *$\Fcal(\phi_\star)$ is Bellman complete.*
63
+ :::
64
+
65
+ Next, we show that there exists a representation strictly more expressive than [ACRO]{.smallcaps}(*i.e.*, one that includes exogenous information) which, surprisingly, violates the Bellman completeness property.
66
+
67
+ ::: {#prop:exo_rep_disadvantage .proposition}
68
+ **Proposition 4** (Exogenous Information May Violate Bellman Completeness). *There exists $\phi$ which is a refinement[^3] of $\phi_\star$ such that $\Fcal(\phi)$ is not Bellman complete.*
69
+ :::
70
+
71
+ This proposition implies that exogenous information being included in the representation may break the Bellman completeness assumption, which is a requirement for establishing the convergence of offline RL algorithms based on Bellman error minimization. From this perspective, additional information in the representation may deteriorate the performance of offline RL. Conversely, a coarser representation may trivially violate the realizability assumption A1: such a representation may merge states on which the optimal Q-function differs, preventing it from being realized.
72
+
73
+ Together, these observations motivate the experimental pipeline used this work: learn the agent-controller representation by optimizing , then perform offline RL on top of it. In doing so, we obtain a representation that is sufficient for optimal performance, and yet filters the exogenous information which can (i) be impossible to exactly model, and (ii) hurt the offline RL performance.
2211.01910/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2211.01910/paper_text/intro_method.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The combination of scale and attention-based architectures has resulted in language models possessing an unprecedented level of generality . These so-called ``large language models'' (LLMs) have shown remarkable, often superhuman, capabilities across a diverse range of tasks, including both zero-shot and few-shot setups . With generality, however, there comes a question of control: how can we make LLMs do what we want them to do?
4
+
5
+ To answer this question and steer LLMs toward desired behaviors, recent work has considered fine-tuning , in-context learning , and several forms of prompt generation , including both differentiable tuning of soft prompts and natural language prompt engineering . The latter is of particular interest, as it provides a natural interface for humans to communicate with machines and may be of great relevance not only to LLMs but to other generalist models such as prompted image synthesizers , for which public interest in prompt design and generation has also emerged (see Appendix for examples).
6
+
7
+ Behind this interest is the fact that plain language prompts do not always produce the desired results, even when those results are possible to produce with alternative instructions. Thus, human users must experiment with a wide range of prompts to elicit desired behaviors, as they have little knowledge of how compatible instructions are with a particular model.
8
+ We can understand this by viewing LLMs as black-box computers that execute programs specified by natural language instructions: while they can execute a broad range of natural language programs, the way these programs are processed may not be intuitive for humans, and the quality of instruction can only be measured when executing these instructions on a downstream task .
9
+
10
+ \workshopexclude{
11
+ To reduce the human effort involved in creating and validating effective instructions, we propose a novel algorithm using LLMs to generate and select instructions automatically. We call this problem natural language program synthesis and propose to address it as a black-box optimization problem using LLMs to generate and search over heuristically viable candidate solutions.
12
+ In doing so, we leverage the generalist capabilities of LLMs in three ways. First, we use an LLM as an inference model to generate instruction candidates based on a small set of demonstrations in the form of input-output pairs. Next, we guide the search process by computing a score for each instruction under the LLM we seek to control. Finally, we propose an iterative Monte Carlo search method where LLMs improve the best candidates by proposing semantically similar instruction variants. Intuitively, our algorithm asks LLMs to generate a set of instruction candidates based on demonstrations and then asks them to assess which instructions are more promising. We call our algorithm Automatic Prompt Engineer (\algname). Our main contributions are:
13
+
14
+ - We frame instruction generation as natural language program synthesis, formulate it as a
15
+ black-box optimization problem guided by LLMs, and propose both a naive and an iterative Monte Carlo search methods to approximate the solution.
16
+ - Our proposed method, APE, achieves human-level performance on zero-shot learning with model-generated instructions on 19/24 NLP tasks.
17
+ - We provide extensive qualitative and quantitative analyses exploring various facets of APE, and demonstrate applications of APE for improving few-shot learning and steering LLMs toward desired behaviors such as truthfulness and/or informativeness.
18
+
19
+ }
20
+
21
+ \workshoponly{
22
+ To reduce the human effort involved in creating and validating effective instructions, we propose a novel algorithm using LLMs to generate and select instructions automatically. We call this problem natural language program synthesis and propose to address it as a black-box optimization problem using LLMs to generate and search over heuristically viable candidate solutions. In doing so, we leverage the generalist capabilities of LLMs in two ways. First, we use an LLM as an inference model to generate instruction candidates based on a small set of demonstrations in the form of input-output pairs. Second, we guide the search process by computing a score for each instruction under the LLM we seek to control. Intuitively, our algorithm asks LLMs to generate a set of instruction candidates based on demonstrations and then asks them to assess which instructions are more promising. We call our algorithm Automatic Prompt Engineer (\algname). Our main contributions are:
23
+
24
+ - We frame instruction generation as natural language program synthesis, formulate it as a
25
+ black-box optimization problem guided by LLMs, and propose a Monte Carlo search methods to approximate the solution.
26
+ - Our proposed method, APE, achieves human-level performance on zero-shot learning with model-generated instructions on 19/24 NLP tasks and demonstrate applications of APE for steering LLMs toward desired behaviors such as truthfulness and/or informativeness.
27
+
28
+ }
29
+
30
+ \centering
31
+ \vspace{-0.25in}
32
+
33
+ \hspace{-0.05\textwidth}[b]{0.25\textwidth}
34
+ \hfill\includegraphics[width=0.8\linewidth]{figures/illustration/APE_bayesian_inference.pdf}\vspace{0.75em}
35
+ \caption{}
36
+
37
+ [b]{0.31\textwidth}
38
+ \includegraphics[width=1.0\linewidth]{figures/illustration/APE_template.pdf}\vspace{0.9em}
39
+ \caption{LLMs as inference models}
40
+ \hfill
41
+ [b]{0.45\textwidth}
42
+ \includegraphics[width=1.0\linewidth]{figures/main/model_size.pdf}
43
+ \caption{Interquartile mean across 24 tasks}
44
+
45
+ \caption{(a) Natural language program synthesis finds an appropriate instruction (the program) that generates the observed demonstrations when executed by the model. We frame this as a black-box optimization problem guided by an inference procedure. (b) We use LLMs as inference models to fill in the blank; our algorithm involves a search over candidates proposed by the inference models. (c) As measured by the interquartile mean across the 24 NLP tasks introduced by , APE is able to surpass human performance when using the InstructGPT model .}
2212.07634/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2023-01-18T08:53:10.776Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.7.4 Chrome/106.0.5249.199 Electron/21.3.3 Safari/537.36" version="20.7.4" etag="57gFx2BU8HPPUROb0qCg" type="device"><diagram id="uGvCt6CMSnxvB9MV96XD">7X1dl6M4kvZv2QufM32RHCQhCS7rK7fnTO9MzdZM9ztXdUgbZ3rLaXIwWZU1v/6VAGFAAstYfNhW9u6UDVhAxKPQo1BEaIE+PL/9dxK+PP1PvIq2C+iu3hbo4wJCiIDH/uFHfuZHABRHHpPNKj/mHg582fwnKi4UR183q2hfHMsPpXG8TTcv9YPLeLeLlmntWJgk8Y/6Zet4u6odeAkfo9pj8ANfluE2ki77Y7NKn4qjgASHE79Gm8cncWuCixd8Dsur8wP7p3AV/6jcDH1aoA9JHKf5p+e3D9GWi68umPuWs+WTJdEu1fkBzH/wPdy+Fi9XPFf6U7xtEr/uVhG/3l2g9z+eNmn05SVc8rM/mILZsaf0ecu+AfZxnybxt1Iq/Mg63qWFCpma2ffNdvsh3sYJO7CK1uHrNuW/E3LgPymeKUrS6K31vUApLQa0KH6O0uQnu6T4QeAVqiswRrxCAz8OCoNucc1TRVeQkgIoBUgey7YPYmQfCkmqpYpuRKoe8fWkynrA+VL1JKn+eZcm4V2Ypuz5N/GOnfycvO42u0f26Z+7TbqXxM7ePa3LNtxuHnfs85K1ETHxvecS2rDO/q448bxZrfjP3yfRfvOf8CFriivtJd7s0ux98PsF/sjbek3jfaGVUm1CLbt4FzU0JQ41lGlEU8TBR3sACKCiBwTnqwqb7gBVINfFuoDIzf64huI0zICAPt4FrtRxXCYSI9IFLkaOD2ryxZBI8g0UXYG454uXKMTLtOv98bX4cBm4N6IJImmCuEjSBFVoAhnQBFVoguS29yXc1XRA/v3KR/X3y1wI79jJ5PEh/BN7CnYbt/HPL5l8XG4a7tbh82b7M//Jc7yL93k/Kc/noudn3Ze3/DjX912h4nfZY4RJmp0Sj8E+Peb/4iZXw+xd+dGMhpTfxLvj7O3ZkY/8M+cXmL81ZiI7di0orxUq7tUMPDSTC748cziRy7088cjMzQv/erAR/By3EvxgpcEcjapHLuhk1hMUzy29UvnT8m0O+MUcweWV2bgqmvlZedHK4RzM5Sm/cqoA9eHGbuUkQ/dBCpX7N2WdfS0F3q0AdLoChLnnB3KZV0w+P5gZfX48s1H8CJD1lf/yYEpE82gVRv56mZ2tWp/yPFn60cNaX9dNXZYvdlSXd0CtS9Cuy5q+msr0B9alNyddCuo6Az1ifLoaR9BidrBumIvrJAuej0NsBMuHImHuz+FhFYKV8avqoL4oe6GCohV6qzC5QelZIJECz5fpGRiInvmWFKjwbEmBJQWWFFhSYEmBJQUzIQXBeKQgsKRAhWdLCiwpsKTAkgJLCiwpmAcpwO54pEC0a1mBZQWWFVhWYFnBebq0rMCygoFYARiRFQDLClSAtqzAsgLLCiwrsKzAsoKZsAJF0OdgrABaVqACtGUFlhVYVmBZgWUFt8MKKilO8+MEaEROoMo0s5zAcgLLCSwnsJzAcoIb4gQz9xR4I7ICOVPasgLLCiwrsKygqUvLCiwrsKxgOlaAR2QFqqIclhVYVmBZgWUFlhVYVmBZwVxYARmRFahqSTXEHu1W73jZxEVZuWkV7p8yPYCFolCdqjSXSnzVumf8muhtk/6/8iz/9q9C+Pzzx7fql5/iy469cOVH/Ou/iqfJvhx+ln0Tv1u+Jt/LN8jfOFqJso+iOGP8miyLQ6KkGeMmj5HQLNTWdkWTWKFJcSyJtgyz3+uPoVJvcYfPvBTXAUw4wI2qbwAiWm8lf6nihwecSG0RT2oL+I22cmFIbTGwhD8rlxUFw1ofG7he/U6ejxoYzps8ILoUrR7IVWW6GiDff4vS5dOiUuIsSj59j/JKZ6BpDcoO4HZ1AEXpM9YnsMv/Y8e34UO0/RzvN4WZkkqx/da44CFO0/h5IddqS2Nu/OLXdLvZsbuLsqf8JlKdN/YeL/ydn98euRV24vV6s4wcxhiX0Uu6d1gT31o6bLW+HvBdUxYQYofAugV0FcUKPQdDRdcxYANVBVvyenrgxurpBcRzEKrpYsxyekCVJX8OCdAsHDnQ4B5gSZpYFAIYoUykKF9sZ/x2xm9n/HbGb2f85+nSzvgvZsbfc8I/KQ0YaoovHtjSAEsDLA2wNMDSgPN0aWmApQGGfCuj0gBoaYAKwZYGWBpgaYClAZYGWBowCg2gnAa45R+cjhHY7EHLCCwj0FCAZQSWEVhGcL4WF5YRqBiBPxtGYDMHLSOwjEBDAZYRWEZgGcH5WlxYRqBiBMFsGIHNGrSMwDICDQVYRmAZgWUE52txMVdGoFV0cKgsQXc2hECVMGgJgSUElhBYQmAJgSUEt0MIJnURABcwSgCDw990lECVXm0pgaUElhJYSmApgaUElhKMRAngtBkGqjoalgdYHmB5gOUBlgdYHmB5wEg8AE3LA0xXcWpIdr1ew6Us2aIKVsya2aT8tQlv+nEb7vfFbQzIljYq1RFfEquPFXIFonbZOYIVJbYqgo0MSzYiM5EsVWyYoZasCcgiVY2McwQbJsuitKVc4u09p7X1/8cfOdMtzjh8ja31JO44R73Wk0B9xoG4/WHyk22/RNkt3a7Tbee87p96HT/tetyupyXdtyQdP6XdP6VdP+16XNr+vKD1Z6ALIaADIfk52v5D6UmyWoQGejYi1PH9eu8O5OHIU9XBy9jg2b0bntK7wSm92+vRu7t6cNe5LqtA2384TM8fu+/Oqvd19bDO3td+Ut37avUrTXRFChxU74rYl+t7qruib6IrqnJOzXXFJp/B/L9FXmq6cjz/U3Lz7M+MsD3oS8IOZGEDVTVVYKKcKlKl81yrsIEkbFXl2gGFrYqUNiNsNIAlAK6WJYDeYJZAo/L8WXOZSeEIAJHgqJqCi/h685xHo+b51fR9V+77YNS+r1z5CJ+5CHcP+5d8GeLL7x+z9Qdz80pRdHwbrVMzovSR05yIK6i6sgC4GdiqfEdZLfYF/vT3r5uWeuznGIoIrHBEVRgNCEWhMTKAJIwSucq9ytyaQKhoVynXv1yXXKli94DB5AokmfF9RL4UX+MkfYof4124/XQ4+r4u1cM1v8V8H4dMlv8XpenPoqfz7QDqkj7serKo7HlS2QKlZdeTPjus5O/XuTmKVwwt1c1RhPtOQ5On7Xpy6u4iWDE4BF5Vx1o/oUEDFudtSSJEVumPX+J1+hy+SXiS95qod6hDXoJiTwj9bSlUnbsOVGlHpcr+PdBQb/Zkyftyb/YVvRma6M3oknqzTs/0Lqxnoul7pmrSLEbK3wcZKdk46a881Ujpwwdkzh0hMxAgc7vBRko8Td8q9y5b8vWqzTI/eL/Zikt0uhGRu5FY+TPZjbRlqZo7C4z+7YIximWXGZslio3IjqDUxF5QnmrazIX56WsygFTX4SpYYZVUw8gDyNCoCgLJ1+OLxf8jMjXh6fHa99e6XJlCLDl4VDJFQ8m0bZ78+wXLFLmyTD09nJpgf7htjrzAH8KXlyR+u7Hd4ACSGIOPZFusWiwywRiwKiij0Ee6eY72N6YOlclRqEM1OTKiDnnKKolef6vYujUSjglQcUuIz0c2fi1nUP+qnuk/ZcKFhKtcD+lzvXF2eiVsQKeV3L+g7rP1xfBw6q6vUF4VIn6jrZZdX3tMtbDGYvC5iHJPR5RrESUjivZEFIASouiAiGqbvB+GDJd/ZCNE8ryg73evzw9sQIBuvGb/sw1/Rsl+QT9e0sAygBcOA3ltGCvWhuFQI428lP4lfV1FBTxmr5IuR6lnypnTXKcjZLx9ejGR7fTwvhwNU4uG8m3KIyU3mP6hIg+oK6NpLg2aONlDUW4RDt1PzJ6tVpvdo0ZPmWgZYQCDBWBD/GIZubr6DxShFkY6g+ze+CNOVmcpozqxbpuKX5pO/DF1onKPyOk+sxw9TMieoGbENWH2Sp4rqtxTJuQvcjJmMUDUeHcw2gARnDRAtPDpHgMEuaiQhBadTb44iQKkVFjbPKd5PQUdy5IDgUVqNl/ya2tWPOIAJIUoJvKz6P9idffi8dVYKR8FX32AMG1EBTjNGPVfLb54PNFigt92PWk6cerXT2Pf2EN12Tc4nH3TyKyQPZftzsq+eC3j+VBG20tHp+MCv9vZyb58jpINe2dOc8+J9DuBUo3jyvTkZSvq9h1BfTloBg8HK40cklFh5Wf+qgqsXDIWrMQq73xgRZHTKLxMge9UfeZ9QYalqBfGzRwRxjwAzibx3/WFY9U5oTlKDxToTBQrOXQujLJ0AGsygOb1xxgALaKmtRhAH1DSKUDZAa7CvB6QdQ5K9K2Z8WhBIjso361WC756w9OSfuOrXn+Nk2dJ/CdFDLVneB2LGz81l6x4m7NWuHxFnKEcZ16mNBn3jwVXgnWB6xrW4UwsIj0aLd7jJ2fbOdoWZvbHVzBE4O7xOlQGOhQhEwfu0om9ngcW86/qOXOURqczKmj0EPREWyewFenwYpFOFYUGxkX6JC61eq061WiiA9ChuLJkx32/ER0x3II8lX1Bt06vKCJOczaioldDLf/S0/woB5Pb6kqRDXZp+JFPaqbfcUFw1M/Mvmn6SXrmHI3jE8HSKjN1GiZO25cLFAnytIxyGqDfahQgOQ8kLRqdj/aCpmeT9NQdRN0NGdRaW4KWVoTnUxSubIAnxCLNraz8o6jxMVR0J/Wn4C9tzkd6zPt4nPioQs1Fw+d4GwVlqY4Avv4IYJ7Oy0FVn5NotVkWsYe/ss51HsnpojL8+zZ8iLZltKOiV/7WuKDsnUf7ccpxZKSCDpaqXZbk81gJAwOzA1/lXRhjVKuhdG5LgggMsv6M5OQG3Ey9MTf0+ap8uKFY7UnuDA1IBPoO8EK4d67jElIfqO7EoDQObHzQFV/QGza+Ir+C6MHm0Ja4MF6v99HZ0FIgaVJ/9BleMFHFsGaQ6HTDpi9nuf0jCXf7dZw8c2baOmCeVJTPFDVUVYhV1NPCqpURI+OXzMovF4lYgUQ0IRJlt0glRSH3VBnD44B1ShV4VI4CRvA4baiEmWjqGiipApQTzip8RT7TuLOKMacCKuwONhVoq7uywB+ew/SJvfKCvv9tQT9+Zf9WnSYfPnEfyQluEsk7DZHn4ZXsfX4Il/4KneRY6c5ealpvqFC4CVVS4ojYcaHKQJVR48FS5cb12Vbz5ag+f2WyjHYXrVMDOoTYl7qjYpcZ4CpWJUzUlwnaFv6t/vRWlYhU/2Rc/enMvyvcYB89PrNXqxKD/tPzLLx7+5D9VKhF7Z3U2iyLnflI4D1/9XpBDf7lc5gy/e+yI9CtxlNW67Nk+2D0KKhR/OxMFqOKsdP3Oh1cDF5zVj+uhyHwGuvUTaDq+hQCVG+IgkZDLQ6FU2OqArc+CpJiSaD1ubqvb8sbMerdCGC74bVVQc6oCuJRxUxsqHWjQPajVGev9+EyjRMmzXzecKNKApBKqQQiDKs6RoraarUxkhhQ0mmJUQ/bePntFA91I2gld8s2x7B6FGvVqe3h+lDkgmOubWWohjSWls/RMnRVhylwglZHymwRLEogRsTunJ7IEkgV6lxfaxzqY9ZPi/IZBmsyAWugr8xEobDGnrqpU788KjQ3ZFHoNqdcvkv7gYs2CopIKDWILDk06J8vqzCN2LFiU2H3S5q8LtPXRN6BtX2sKfZpmX6kaYO2gRGIwkDa7VE1AnmqWZqJEUhj46MTcih1jUKfjLM9Q2wqzwezw9VktrbhzPeqoxnPzjye4K4feFibbqlCUfDMjA3SWFbVXtlXtAUGy8cMZE/tfyfhasPkevcQ7vnu8u7n5HWXM90fG77Ruiv2NOeIZF+XJSjJv1/jNNMrWhG4huvqoUo3EAd5A3e5UXnH5/vw5S279PAjsVt6fUsr8YjFRKm531W543r+gC07rt+2sfQwkPbbUhjLMsfeuLFUuZTbjaWGzyoL0XofLr89ZscrcrvP/hTybWfTdXLV4gLr5ZtqsXmzsWXMAHVEngR9vUSQntSsOQtXZlJWkHYfJz/ChNu2+y1T6oVZBq/4fh8+b7ZcgL9G2+8RfyAzpgHSrtijwFU5vpHCSgTt0NO1EkDcfF5monRgn2QmKqahDG1rNRMarvG5WxKvM4attyXJa1ZMYklkb26DiVhDUuMYpKsIHDMkjutKZ6vhCXQoq3Ja+feRrAozK969tlWpGYi5mwICiAOQcVNAsOcQ/WZNmgKVt5kv0ny62D1wAMRT7tVULp7La2X88+UsgJlgYADK+61ARcwPGWjFC7jGUxS7s5kUs92SZFWKggU1AsWGj9NrzbVlGhxcVv1DAxTx3ydkH4yVWSCtlEj+Je10AkUWCmoM0CaNro6D9bwwmOVr8r2y7Wy9pt2qQKsvXKW173qj+SnAry8tlr2AeKDRC45sL9NvSWd+qVRQjsIiPZELA9KVXxN4WiA+uVQqJMr7mCrwA1xV4O2t9JAWTM8Gv+XeWCV4eya9cxMOgFT8sQSV6zTrQRrCb/MNSNAdguWDzusN4F32In/c7FNGytTBOCex8Jid2aT80UmO9Xpst5oPmt9w5w6UzE8AB3qlIax64aCvjABHgWMiCBwA2ZEqGxf99c25d1eCHc879DGRqHhuuCSp69Ijg0WpACA7T8u1PFPT00LLys5wpAMN0lfq5UB8cKjjUusph8OD9BTZUXjdPaUm9MDFvUvxNFYkPdJsyWT3MO4FPCvSXRHPVVL+A8UvSnwMQfgRnBmukLyZUO9y6jCgXQX7m82ahNlpgakaMFOEAerRagGoOxlRBNwKpup1HaSp3imA8s+eQfYClOwovcVSf3mgQl3qilJ/Q+0RDoDKRZqHHrHGQnZm+RQm+8IRKEKaXtP1nX9iWJTXHhaV3W7/Eu6UDRWRWbyR5PHhT+yt2Wu54p9f8vu33a04kTfKz+wYvrKlJ3Hue5hsQvbvdvMY8njU/ZHrluFL2yU/Cu3wk14R4+RuIz5G3u05jWScVfplnLw8hbuiyXwVxOVLAnfFKgA/XC4EiHObXbada3bSFa+anUlFbQxxp8Kdz9EU/6jf5kecrOoPVrbF3uXh24Y1x9vMe9RdgcfadQ/lauFdQ0/Z/vNcRdUPv1SedBUt4yTrj3fp02b5bRfti8fb7DbpRsineW1Fl53XVR6ndt16G4dpUzirzf5ly+fw+eXbzY7HKv/X5vklTtJwl7Ygt1c2eBnYlyO+O7Dv2CSmbeF7gGkJ9qQKk75IDTuyJ6JnxFSpHOfWVFlTZU2VKVOlSpS/VHNFUdAsouwHciSAiqWbMVftQemLS4qzGktffIfdjjAYRcDmYJFVQHbIGzT960Kg/NJ6rJo0NLh2aLBDw1hDQ34r+v7g8WODwNTkVq6cep6RoZ6U301G5LBQY/Vp/xS+8I/rbfRW+NPOLHzStvAs3jhwUFHHWSx9MymhAPtI/OWZcqvSDQcdL0A+pIC42Qd/IXtP1hFRV05Z0eChK9t7Nm427MIO75iPsQN7Znpj5J7YskHXmxBEBYQfwu3ydZtn6P65MBdL/uULs0SnpOjOpBxEJ9k0YEcQIytNp74IV6jVifAUFMXE4pywGxUd/nnHRs27d2yc3hW1/9rWSS9GfaqktooT19CggKnv+K2RKEq9woFq1AEoL+fd3//V6lK/yJkUdKxS4FA1IwE8bZ3s2KK5bm6n7uTav4BRl2Z1BlvHxmaD2vFmXtCxdio1a3K8bcsJ+GOBP/3962aAZIsIrHBEVQAKCEUhUZBEE4ZU4XKBVOp8KnZtZDELqhazhKD/cuWC1pzGmBF02y5CXNC/DyJoJmZ/5akE7cMHRAYSNPGo04zGGhXRbTWIuaD/duWCHhXRbcWBJ9tVdogKsHha+4xU3g9vyg1NRxLymEhG8uxekuoULiZc/6u5m9Dp7qUo+1MpFxEUoNUFEF1MvY58Yp+4jhtIs9KTPU3BGTcxSIJFDF0XLLkL/qW1Cy7j3S5apsV8dVEWSj2hCj1POZnS/mnE6vY2eKtoHb7ma1Rn7t+cq6qzV8DACWp/ZCiRDTyf9t/x/6T5dJ5Wq53b27dSd0tRle6ydzpmTfS2ahRrCb9aNdX84GRB946PKfBg+b+1znkHekbgs6l+rVVQj8i/4xuL1O47XJwrOq3K6rwArI/f24QvX02s4IjWE2PvkO/UUNZIktLOJ6n3EukuwHEVfWgILJ9WT2FeWD66U22l1PBtotkH3TgbBc0QOaoRYQg0m610Oyaaj1ZdV9V07MRxrZZurQbMlWA7qFlqfxJsAzKapdYpoTAfbFeYcm94myhZeiloDhq8eRI0N25Ch1tAQyrn7IzRbADMFst67FZ794Nupj4elj2VD3zGWKYGwEwtmvXYrX7tnJlYZk9jsWHgJZziAXr6NAuRQax2GJ3o8vQcqNL+WR5QDx4X8dCOcxxoOc4D4IiVLuOOYG9I37my/ks1hkMPZF4LyMZYW/Cso7zPuCJ6V21c8VTjijexoxzRap3pxr6XuKwEdMGucs+6yq8XwDfmKvesq/ya0XxjrnLPusrPd5VfDLZvy1XuWVf5NVvqG3OVe9ZVfrtYvjJXObau8htG87W5yvHArnKNTLTiAXp6MQuRgaDu5JaTa1U+TuA23ExeO1y0fZ4YHhfp0K5x6pJJc2rwRDHlZTKYHqhwC6iOiAgGjgcAJR51se9DhE3kaGPrKu8zrojeVhtXsGpcwdOOK05zJGGSz8ADgwxMfcu6Sw3TBjoby30mBw/rGr9ewALXqyVh+Y0sCAydoIqznrW+A0aoOu7iQedgZnlPGQ7M1jd+1XAGXifQxoEzgk5p8PmNhstsxNY5fr5z/GLADbFTT6EdCc8QOm6NxQy3Do+tR/yq7TPCTSI7DoQBcGiNYsDhIGzd4DcF4CPkVRfAx5j4iAgm1vl9Yxg+wli1MXyEfo+J4aGjw4+XqioeoKd3Uri8G5rxNX3etGGUUDtKtJ2XIsB6buHgY5Y4IkP6vA3FgpMWSI0iH+vg7jN2CAjXxg6iGjuILZoyaCQ4se7u64XvjUWCE+vtvmY031gkOLHO7vOd3ReD7duKBCfW733NlvrGIsGJdYHfLpavLBKcWmf4DaP52iLB6cBucWOR4G0+zPlFglN4XKRTRIKP6RWnlxEJTltANUUkOLWO8j7jiuhttXGFqsYVaiPBTQ8e1jV+vYC9tUhwan3jVw3nG4sEp9Y5fr5z/GLAfQOR4NR6xK/aPt9AJDi1bvCbAvD1RYL71vl9Yxi+vkhw/1Iiwdu8kzOMBPfhcZlOsaPmmD5v/zJ21PRbUDXFjpq+9Xn3GU5Eb6sNJ75qOPFtcPigweG+9YBfL3xvLDjctw7wa0bzjQWH+9b/fb7/+2KwfVvB4b51hV+zpb6x4HDfesVvF8tXFhweWP/4DaP52oLDg0vZUbPNp1mIbMY7agbwuIj3T+EL/7jeRm+F5XhfMSLLbbjfb5ZddkTXMgiBBA6u/+V3LJpA+TdhfAB0vAD5kALiZh98WdNR9qfSNCIoQCtNOzFl3yesWwJ02PWwHm/jE9cBoIKfngOZe8ZNTPZ8jbUMC8s5wJIG3hHEVEMHesLSB2fcxCQsB14vyt9OolegwOZ9+LzZ8pf+Ndp+j9LNMqyD2L8AvGAUOMhvVWWzQW3DhegpzZrEhGoJxuP/hz9so3XK/+H8YUGZbtw/sa/LTOH8qzAxlP2KZKurD0kNTeTfr3FaKPlun2n5HbsAgJe3TNfiPPv0mP3LWsUfmLLTTcifkn15DtOnJftC3//G7vM1v4KxmTR5Zp8/fOI3px//9PZL9hT3zRZ+5Od/+ZG3lmSAYP9mr5S/5+mUrA78optUrTGQrXH+17ChxU/5w7DesH233Tzu2OHnzWq1zbpenGz+w9oJRQes9MU9e6bN7vEf8UvxpI98xBBd8ylcZX34MIU4RgLVvar4AfKpvDUKkWggI/hY7nqALzgC5LnQJcDHuBnl2o/5aSy3nD/E6pq4wwxSx8gJibFpN6KIja4BxYGLCKgNyNBBAaQUuL5XXFYfoZlGYICxhwh0IfHAeeazULTrEJc2o5RdSdEDWlgP+Qwu1f2x67Dzeg7CHgoaw67eQHtoSFwYr9f76HzDK6+wfEmT12X6mkTs8P9Gj6/bkHV/Jst4J0GbW8A6cMPCeCyZpqOky6okETPFRaQQh/ILf7/s3TAznh95W69pvC9sWau9k+2YygQq+4Nku4oDBg0WBWxArSucunySSgKX8rqKgWy+AIIq62XCWg285HADzAtRQ1wLu353QybZ1WnO+T4jUjnNW4X7p+wyUHz5HKbMEuyyI4x0KdExd637PmbEo40YU7f3jCzomJFJzZpDBHRVLu6cPPMOqUWfvRb6fH//V3b6c/K647YUuv/cbZhdL1pnT5bfoKDaFzuiqMxeEqf5OGlsAAkodDxPmpUXCPFEgbHa6IHkbgANjB5QwLOCmX9E4fKJKUVfi9lkbgodNnRTVaknvhcv4BpQHMVHNSUOmVcUPG7uT/L4l/O4VoGr5iEGpAhcQp2gPuMjlDqy658ojH9gILoZukMGhOvV1hlVuL6mcIER4Wq4As8R7n32Nxvh+uMKVyOs+XqQS/G4wtVw/FyPcEkwrnA1wj6vR7jUHVe4Gk6A67G5FIwrXI2Z9hDpY5OIFo0qWqCasl6vUYDjClcVJMTXpf5Y4E9/b1mimuVcfSAdBQQyFbW6aohKW75CWyYyLiFQTfCEtv5itcX6BJZ71Mg6Us0buWKevoJL0lAzvMdEXwKBU1lZa/Ql9RSUDqYn1RQ01xO0eurQk5q2DqentgiRp6/I6kmDmA2nGbMpp7UFpNMjH4Rs++Zt9M2zzl9ZWsKqBsNDEatSDYYvfWCzWewCLkIOCA79vunF8upR1vX29WvJZHdpA+4Aa17AbDLpFeNUmIU6Tv0Lw2kQGMGpG8g4ddGQODUbpXFdOK1DMpgfJOvTxMYUxJNwcxYM0aAw1HBl7b9F6fKpSpGi5NP3KGdKtZXDGhLdOl6PUieGU+zy/9jxbfgQbT8z5lUs5kpU7rfGBQ9xmsbPCq6X8riv9/Frut3s2N2zylXFw0k8UcRwPr89JuHLkxOv15tl5Czj3TJ6SfcOa+LbkNNZDzp+JRoRNkZk1Yoor9UGFUg3wPighivOQmMO0KBgbGhoZBtaaMwCGmhsaKi8lv15TVkLqEpGOqmIHHdYkpPyS9nKmRRaLGHW+QqdHV8hvN5JW5Q7CXj6XaXeZ0/uAiB0SCA1PRx3gdcetNPOMEddNoIq5+k5cp4s0iygqGN9Z+SYM3i1kTvHxDwuegeO4ZkuEuKImMeNQ4NXG83TLeaRI9LgwHE9cxXzyLFpUMMtco1iHjlKDWm4GK7RNo8cr4aGLA40YbzaESGPG7km8jlvzmSMG8OGrnbSd0TM4875kMacTzi+lq/J9uf7JFx+4z6QY/I+KAcoXHaNzLGmw66M3BBeuLzIRkeIh+wOKsI5DAYNIsnvoZziAIWyjMRmII2Z5IUqy4h+eO0LDZoznH5OqB9i9dM6dA+nH7MxI7PzWc/GE90c4lp75ekL5+jElg36oZHZUA4Lny74NEZaRTDZSZg53pxJoJxWocMC5YwVr871kzNAA+CJLRvEj2faXTLThRhffB9jGcYb0jnS6YMq9xictZCNTCfFHpETbuQ4jZDBmEIe2DUyHyHDupD9MYU8WTGLiYVMxxTykEvjHQ7riUUMxxTxwMviszUWaEwhD7woPltj4Y0p5IGXxGeLZDKmkAdeEJ+tkPGIQsaq+Z3I+P/9kvJfTeilUYuhoZdA1stgWf4CA0q9/O3G9AJcTBzqTagNjbnk5WTO5Y/eHWYuRtNamDnR19xoTjcpec13aX/3bBa13uAV3oDuNawxf74cYA2ZOuyp8h7IDPMeZEDiMwDJM/AQxUT8X71lAodDptnNdK4ZmZ4KmfMrvqBAZmDWVNJgSFOpE3FiEwBHTABsDLvyNioD5vxhnfgWi4bJ0IACVTTNgHjQcHtZPEyHBzI2HmytE136glX0BV8CfTmHWCvoCx50pmernszaQCl2gRvQPBGV27WBhssIq1HVGB41mEY4pa5vwesc0RpZHBDuhetbgVGKdszAGXK1gTNK0Y4ZLkOuNlxGKdoxg2TIlQbJKAU7ZmgMudrQGKVoxwyIIVcbEKMU7ZhhMORqw2CUoh0z+IVoTBevSLRjhrxQjbmXTYkx47PpSlwpRwHDCTFSuwa9OIJsWexMjB0yEHaa7ZrEjsZs2mLHAHa6U377Q6c74XdQ6JgNE7LQ6Qmd3iPWEegMOWJdb1WVxflkVFEIQuVhHawMBLVlVE7UjsJ1MJx2bBGVE7WjmCIPpx2VY8duhJeppnODNZWFG2wTL6ryEdlt8DS0pLB0w2lJ5W6ym+AVWjpq4wzphX1N4jitkj0eHvE/8SriV/x/</diagram></mxfile>
2212.07634/paper_text/intro_method.md ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Transformer-based [@DBLP:conf/nips/VaswaniSPUJGKP17] pre-trained language models (PLMs) have achieved great success and become the backbones of various natural language processing tasks. However, PLMs are computationally expensive and slow in inference due to their large sizes, which limits their applications in real-world scenarios. Hence, a growing interest has been in developing compression and acceleration methodologies for PLMs.
4
+
5
+ A common approach to model compression is structured pruning, which compresses the model by removing groups of consecutive parameters, namely the pruning units. In applying structured pruning on PLMs, recent works have investigated removing units such as hidden dimensions in feed-forward layers, attention heads in the multi-head attention [@DBLP:conf/nips/MichelLN19; @li-etal-2022-probing], and coarse-grained units such as multi-head attention layers and feed-forward layers [@xia-etal-2022-structured].
6
+
7
+ <figure id="figure:mnli" data-latex-placement="t!">
8
+ <embed src="pics/mnli-m.pdf" />
9
+ <figcaption>A comparison of GRAIN and other distillation and pruning methods on MNLI-m development set at different model sizes. More details are in Section <a href="#section:Experiments" data-reference-type="ref" data-reference="section:Experiments">5</a>.</figcaption>
10
+ </figure>
11
+
12
+ However, these pruning units only span a small space of model structures and limit the exploration for better structures. For example, in the pruning of BERT$_{\textrm{base}}$ [@devlin-etal-2019-bert], which contains $144$ attention heads, the possible choices of attention heads for the pruned model are limited. Block Pruning [@lagunas-etal-2021-block] extends pruning units by considering blocks in the weight matrices, but Block Pruning is not a fully structured pruning method and can not achieve large speedups.
13
+
14
+ In this work, we propose **GRAIN** (**Gra**dient-based **In**tra-attention pruning), a structured pruning method that prunes PLMs with finer pruning units. In the following, we present the method from three aspects: pruning units, pruning algorithm, and training objectives.
15
+
16
+ **Pruning Units**Unlike attention heads pruning where the pruning unit is a single head, we propose intra-attention pruning, which inspects and prunes the structures inside attention heads. Intra-attention pruning greatly expands the search space of model structures, making the resulting models more likely to find better structures. However, directly applying intra-attention pruning yields fragmented models, i.e., models with many small heads. The fragmented models have relatively large latencies on devices like GPUs. To overcome the shortcoming, we introduce structure regularization, which encourages prioritizing specific units for pruning. Structure regularization helps generate more regular structures and achieve lower latencies.
17
+
18
+ **Pruning Algorithm**Pruning algorithms decide which units to be removed. We adapt the gradient-based pruning algorithm [@DBLP:conf/nips/MichelLN19] for intra-attention pruning. Gradient-based pruning is a light-weighted method that estimates the importance of the pruning units with gradient-based scores and then prunes the least important ones. In addition, we conduct the pruning in an iterative manner [@DBLP:conf/iclr/ZhuG18], i.e., the model is gradually pruned during fine-tuning. The iterative approach has been employed in combination with pruning algorithms such as Movement Pruning [@DBLP:conf/nips/Sanh0R20] and Magnitude Pruning [@DBLP:conf/iclr/ZhuG18], but few works have combined it with gradient-based pruning. We find that iterative gradient-based pruning is especially effective despite its simplicity.
19
+
20
+ **Training Objectives**As another common approach to model compression, knowledge distillation offers highly effective training objectives [@jiao-etal-2020-tinybert]. Pruning with distillation objective shows improved performance [@DBLP:conf/nips/Sanh0R20; @xia-etal-2022-structured]. However, in gradient-based pruning, the distillation objectives may disturb the estimation of importance scores. We propose a gradient separation strategy that uses different gradients for model optimization and importance score estimation. We show that this method leads to better performance.
21
+
22
+ GRAIN performs task-specific pruning without additional pre-training or data augmentation. In the experiments, we compare GRAIN with strong pruning and distillation baselines on GLUE, SQuAD, and CoNLL 2003. GRAIN notably outperforms the comparable methods in the high-sparsity regime. A demonstration of the results on MNLI is shown in Figure [1](#figure:mnli){reference-type="ref" reference="figure:mnli"}. While keeping 5% parameters in transformers, GRAIN maintains $93\%\sim 99\%$ performance of BERT$_{\textrm{base}}$ and $6\sim 7\times$ speedups across different tasks. Furthermore, GRAIN still achieves competitive results even under extreme compression where only $3\%$ transformer weights remain.
23
+
24
+ # Method
25
+
26
+ A Transformer block [@DBLP:conf/nips/VaswaniSPUJGKP17] is mainly composed of a multi-head attention (MHA) layer and a feed-forward network (FFN) layer.
27
+
28
+ Let $\bm{X}\in \mathbb{R}^{n\times d}$ be the input sequence, where $n$ is the length, and $d$ is the hidden size. An attention head is parameterized by the matrices $\bm{W}^Q_i,\bm{W}^K_i,\bm{W}^V_i, \bm{W}^O_i \in \mathbb{R}^{d_h\times d}$. Its output is[^2] $$\begin{equation}
29
+ \label{eq:att}
30
+ \mathrm{Att}_{i}(\bm{X}) = \mathrm{softmax}\left({\bm{Q}_i\bm{K}_i^\mathsf{T}}/{\sqrt{d}}\right)\bm{V}_i\bm{W}^O_i,
31
+ \end{equation}$$ $$\begin{equation}
32
+ \bm{Q}_i=\bm{X}(\bm{W}^Q_i)^\mathsf{T}, \bm{K}_i=\bm{X}(\bm{W}^K_i)^\mathsf{T}, \bm{V}_i=\bm{X}(\bm{W}^V_i)^\mathsf{T} \nonumber,
33
+ \end{equation}$$ where $d_h$ is head size, and $i$ is the head index. An MHA layer contains $N_h=d/d_h$ attention heads $$\begin{equation}
34
+ \label{eq:mha}
35
+ \mathrm{MHA}(\bm{X}) = \sum\nolimits_i^{N_h} \mathrm{Att}_{i}(\bm{X}) .
36
+ %\sum_i^{N_h} \mathrm{Att}_{\bm{W}^Q_i,\bm{W}^K_i,\bm{W}^V_i, \bm{W}^O_i}(\bm{X}).
37
+ \end{equation}$$ Following the MHA layer is the feed-forward network layer. It consists of two linear layers and a GeLU activation [@hendrycks2016gelu] $$\begin{equation}
38
+ \textrm{FFN}(\bm{X}) = \textrm{GeLU}(\bm{X}\cdot \bm{W}_1)\cdot\bm{W_2},
39
+ \label{eq:ffn}
40
+ \end{equation}$$ where $\bm{W}_1 \in \mathbb{R}^{d\times d_{f}}$, $\bm{W}_2 \in \mathbb{R}^{d_{f}\times d}$, and $d_{f}$ is the intermediate hidden size. Typically $d_{f} > d$.
41
+
42
+ A transformer block contains other components, such as LayerNorm and residual connection, but they only take up a few parameters.
43
+
44
+ <figure id="figure:GRAIN" data-latex-placement="th">
45
+ <embed src="pics/GRAIN.pdf" />
46
+ <figcaption>An overview of GRAIN. Left: The distillation and pruning methodology of the student model. Right: The pruning units in intra-attention pruning. We show three heads for illustration. Each head can be pruned to a different size. The pruned units are in white. The dashed lines indicate that the connected rows or columns should be removed together.</figcaption>
47
+ </figure>
48
+
49
+ Gradient-based pruning [@DBLP:conf/nips/MichelLN19] defines the importance score of a pruning unit $w$ as the variation of the loss with respect to the unit: $$\begin{equation}
50
+ \label{ISeq}
51
+ \mathrm{IS}(w) = \mathbb{E}_{x\sim X}\left\lvert \frac{\partial \mathcal{L}(x)}{\partial w}w \right\rvert,
52
+ \end{equation}$$ where $X$ is the data distribution. The term in the absolute value is the first-order Taylor approximation of the loss $\mathcal{L}$ around $w=0$. To apply [\[ISeq\]](#ISeq){reference-type="eqref" reference="ISeq"} in PLM pruning, $w$ should be set accordingly. For example, by setting $w$ to $\bm{W}^O_i$, Equation [\[ISeq\]](#ISeq){reference-type="eqref" reference="ISeq"} gives the importance score of the head $h_i$; by setting $w$ to the $i$-th row of $\bm{W}_2$, Equation [\[ISeq\]](#ISeq){reference-type="eqref" reference="ISeq"} gives the importance score of the $i$-th FFN hidden dimension. A lower importance score implies that the loss is less sensitive to the unit. The pruning units are sorted and then pruned in the order of increasing scores.
53
+
54
+ GRAIN performs task-specific intra-attention pruning together with knowledge distillation. The overview of GRAIN is depicted in Figure [2](#figure:GRAIN){reference-type="ref" reference="figure:GRAIN"}. Following previous works, we only include the encoder in counting the model size unless otherwise specified. We refer to the size of the pruned model relative to the unpruned model as *model density*: $$\textrm{model density} = \frac{\texttt{SizeOf(} \textrm{pruned model}\texttt{)}}{\texttt{SizeOf(} \textrm{original model}\texttt{)}}.$$ *Sparsity* is equal to one minus model density.
55
+
56
+ FFN hidden dimensions and attention heads are common pruning units in PLM pruning studies. These pruning units have been treated as atomic in structured pruning. However, attention heads include finer pruning units and are not really atomic. Equation [\[eq:mha\]](#eq:mha){reference-type="eqref" reference="eq:mha"} shows that the output of an MHA layer is the sum of individual heads, so different heads can be pruned independently. To be specific, We can remove the rows of the matrices $\bm{W}^Q_i,\bm{W}^K_i,\bm{W}^V_i, \bm{W}^O_i$ to reduce head size. Further, from Equation [\[eq:att\]](#eq:att){reference-type="eqref" reference="eq:att"}, we see that the output dimensions of $\bm{W}^Q_i,\bm{W}^K_i$ and the input dimensions of $\bm{W}^V_i, \bm{W}^O_i$ can be different. It gives another freedom to set the dimensions of attention heads.
57
+
58
+ Based on the above observation, we introduce two kinds of intra-attention pruning units: query units, namely the rows of $\bm{W}^Q_i,\bm{W}^K_i$; and value units, namely the rows of $\bm{W}^V_i, \bm{W}^O_i$. We keep FFN hidden dimensions but discard attention heads as the pruning units since the intra-attention pruning units are more structurally fundamental. Each pruning unit takes $2d$ parameters. The new set of pruning units greatly expands the structure space.
59
+
60
+ In the actual implementation [@wolf-etal-2020-transformers], the parameters of all heads in an MHA layer are gathered and stored in four large matrices $\bm{W}^Q,\bm{W}^K,\bm{W}^V, \bm{W}^O \in \mathbb{R}^{d \times d}$. The parameters of the $i$-th head are stored in the rows $(i,i+d_h)$. We prune query and value units from large matrices by removing corresponding rows. The pruning units are illustrated in the right part of Figure [2](#figure:GRAIN){reference-type="ref" reference="figure:GRAIN"}.
61
+
62
+ Since intra-attention pruning removes the units inside attention heads, it tends to generate models with many small heads of different sizes, but the total number of heads can still be large. We refer to this kind of structure as fragmented (see the upper panel in Figure [6](#figure:structure){reference-type="ref" reference="figure:structure"} for an example). The fragmented structure has low efficiency on devices like GPUs since there are still many attention modules left in the model, and these heads are hard to parallelize.
63
+
64
+ To remedy this, we introduce **Structure Regularization** (**StructReg** for short) to encourage generating less fragmented structures. Intuitively, to avoid small heads, the pruning process should first prune the units in the small heads and make them empty, which can then be safely removed.
65
+
66
+ To be general, we define $D(\mathcal{M},\mathcal{W})$ as the density of a set of pruning units $\mathcal{W}$ in module $\mathcal{M}$, i.e., the ratio of the remaining units in $\mathcal{M}$. The regularized importance score of a unit $w\in\mathcal{W}$ is: $$\begin{equation}
67
+ \label{eq:sr}
68
+ \textrm{IS}^r(w) = \textrm{IS}(w)\cdot \tanh(D(\mathcal{M},\mathcal{W})/\alpha) ,
69
+ \end{equation}$$ where $\alpha$ is the regularization strength. The lower the density of the units in $\mathcal{M}$, the lower the regularized scores of the units. Hence, the units in low-density modules will be pruned with priority until all the units in $\mathcal{M}$ have been pruned, leaving fewer low-density modules in the pruned model.
70
+
71
+ StructReg can be applied on different levels by choosing different $\mathcal{M}$s and $\mathcal{W}$s. We apply it to intra-attention structures. We set $\mathcal{M}$ to each attention head and $\mathcal{W}$ to the value units in $\mathcal{M}$. Heads with fewer value units will be pruned with priority until empty, resulting in fewer small heads.
72
+
73
+ **Distillation Objectives**Knowledge distillation provides effective objectives for transferring knowledge from a large model to a small model. The most simple distillation objective involves a cross-entropy loss between the student's and the teacher's prediction probabilities $$\begin{align}
74
+ \mathcal{L}_{\textrm{CE}} = \bm{p}^{(T)}_{\tau}\cdot \log \bm{p}^{(S)}_{\tau},
75
+ \label{eq:ce}
76
+ \end{align}$$ where $T$ and $S$ denote *teacher* and *student* respectively, and $\bm{p}_{\tau}=\textrm{softmax}(\bm{z}/\tau)$ is the scaled probability with temperature $\tau$ and logits $\bm{z}$. By integrating logits distillation with hidden layer representation distillation [@jiao-etal-2020-tinybert; @DBLP:conf/acl/SunYSLYZ20], the performance of knowledge distillation can be further improved:
77
+
78
+ $$\begin{align}
79
+ \mathcal{L}_{\textrm{Hidden}} = \sum_{(i,j)\in \mathcal{I}} \textrm{MSE}(\bm{H}^{(S)}_i \bm{W}_i, \bm{H}_j^{(T)}),
80
+ \label{eq:hidden}
81
+ \end{align}$$ where $\mathcal{I}$ is the set of layer index pairs, $\bm{H}_{i} (i>0)$ is the hidden states from the $i$-th transformer block ($\bm{H}_0$ is the output from the embedding layer), and $\bm{W}_i$ is a trainable linear mapping. We employ the sum of $\mathcal{L}_{\textrm{CE}}$ and $\mathcal{L}_{\textrm{Hidden}}$ as the total loss.
82
+
83
+ **Gradient Separation**When applying distillation with gradient-based pruning, the hidden layer matching loss $\mathcal{L}_{\textrm{Hidden}}$ should be treated carefully. In gradient-based pruning, the units are pruned based on how significantly they affect the model predictions. Thus, the importance score should be calculated solely from the cross-entropy loss, and we should avoid the gradients from other losses like $\mathcal{L}_{\textrm{Hidden}}$ affecting the estimation of the importance scores. Therefore, we propose to use the gradient from $\mathcal{L}_{\textrm{CE}}$ for model optimization and importance score computation, while using the gradient from $\mathcal{L}_{\textrm{Hidden}}$ only for model optimization. We call this strategy **gradient separation** (GS). The gradient flows of different losses are illustrated in Figure [2](#figure:GRAIN){reference-type="ref" reference="figure:GRAIN"}.
84
+
85
+ **Iterative Pruning**Similar to @DBLP:conf/nips/Sanh0R20, we take an iterative approach to prune the model, i.e., the model size is gradually reduced during fine-tuning. We denote the total training steps as $N$ and the current step as $i$. The model is pruned to the density $s(t)$ at every step, where $s(t)$ is the density scheduler as a function of the training percentage $t = i/N \in [0,1]$. We will give the exact form of $s(t$) shortly. Notice that in the standard gradient-based runing, the importance score is estimated from all the examples in the dataset $X$ (see Equation [\[ISeq\]](#ISeq){reference-type="eqref" reference="ISeq"}). It would be impractical to estimate the score at every step. Therefore we define an exponentially smoothed importance score $\overline{\textrm{IS}_i}(w)$ which can be computed efficiently during training and used for pruning at step $i$: $$\begin{equation}
86
+ \label{smooth}
87
+ \overline{\textrm{IS}_i}(w) = \beta\cdot\overline{\textrm{IS}_{i-1}}(w) + (1-\beta)\cdot\textrm{IS}_i(w),
88
+ \end{equation}$$ where $\textrm{IS}_i(w)$ is the importance score of the pruning unit $w$ calculated with a single batch at step $i$, and $\beta$ is the smoothing factor. The smoothed score avoids the large variance and leads to more stability. Equation [\[smooth\]](#smooth){reference-type="eqref" reference="smooth"} can also be applied on the regularized score simply by replacing $\textrm{IS}(w)$ with $\textrm{IS}^r(w)$.
89
+
90
+ **Scheduling**Following @DBLP:conf/iclr/ZhuG18, we use a cubic density scheduler $s(t)$ $$\begin{equation}
91
+ \label{eq:cubic}
92
+ \begin{cases}
93
+ 1 & 0\le t<p_s \\
94
+ s_f + (1-s_f)(1-\frac{t-p_s}{p_e-p_s})^3 & p_s\le t\le p_e\\
95
+ s_f & p_e < t \le 1 \nonumber \\
96
+ \end{cases}.
97
+ \end{equation}$$ The complete process can be divided into three stages, as depicted in Figure [3](#figure:procedure){reference-type="ref" reference="figure:procedure"}. The first stage is the warm-up stage. We train the student model for $N p_s$ steps with the distillation objective, where $0<p_s<1$ is a hyperparameter. In the second stage, we gradually prune the model with distillation for $N (p_e - p_s)$ steps. The model density $s$ decreases from the initial density ($100\%$) to the target density $s_f$ following the schedule. In the last stage, the model structure is fixed, and we continually train the model with distillation to recover performance [@DBLP:conf/nips/Sanh0R20; @DBLP:conf/iclr/ZhuG18]. The three stages take place consecutively, and the whole process is done in a single run of fine-tuning.
98
+
99
+ <figure id="figure:procedure" data-latex-placement="t">
100
+ <embed src="pics/scheduler.pdf" />
101
+ <figcaption>The training stages of GRAIN.</figcaption>
102
+ </figure>
103
+
104
+ The pruning mentioned above reduces the parameters in the transformers, while another large fraction of the parameters stored in the word embedding matrix is untouched. We apply singular value decomposition (SVD) to reduce the embedding size. SVD decomposes the word embedding matrix $\bm{E}\in\mathbb{R}^{q\times d}$ as $\bm{E} = \bm{U}\bm{\Sigma}\bm{V}$, where $q$ is the vocabulary size and $d$ is the hidden size, $\bm{U}\in \mathbb{R}^{q \times d}$, $\bm{V}\in \mathbb{R}^{d \times d}$ and $\bm{\Sigma}$ is a diagonal matrix composed of singular values. $\bm{E}$ can be approximated as $\bm{E}_r$ by selecting top $r$ singular values and corresponding $r$ rows from $\bm{U}$ and $\bm{V}$ $$\begin{equation}
105
+ \bm{E}\approx\bm{E}_r = \bm{U}_r \Sigma_r \bm{V}_r = \bm{W}_r \bm{V}_r,
106
+ \end{equation}$$ where $\bm{W}_r\in\mathbb{R}^{q \times r}$ and $\bm{U}_r\in \mathbb{R}^{r \times d}$. The original embedding $\bm{E}$ is now replaced by $\bm{W}_r$ and $\bm{V}_r$. The embedding size is reduced from $qd$ to $(q+d)r$.
107
+
108
+ Embedding factorization has little effect on latencies but significantly reduces model sizes. Some works [@xia-etal-2022-structured; @lagunas-etal-2021-block] do not prune embeddings. We also conduct experiments without embedding factorization for comparison. We name this setting as **GRAIN w/o EF**.
109
+
110
+ <figure id="figure:main_results" data-latex-placement="th">
111
+ <embed src="pics/merge_cofi_paper.pdf" />
112
+ <figcaption>Performance of GRAIN and baseline methods at different model densities. The results of CoFi, AutoTinyBERT, and MobileBERT are taken from the original papers and the CoFi repository. Since the precise sizes of CoFi models around density 10%, 15%, and 20% are unknown, we use 10%, 15%, and 20% as estimates instead. </figcaption>
113
+ </figure>
114
+
115
+ ::: table*
116
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
117
+ | **Model** | ---------- | ------------ | --------- | ----------- | ----------- | -------------- | ----------- | ----------- |
118
+ | | **QNLI** | **MNLI** | **QQP** | **SST-2** | **SQuAD** | **CoNLL-03** | **Model** | **Total** |
119
+ | | (Acc) | (m/mm Acc) | (Acc) | (Acc) | (F1 / EM) | (F1) | **Size** | **Size** |
120
+ | | ---------- | ------------ | --------- | ----------- | ----------- | -------------- | ----------- | ----------- |
121
+ +:=================================+:============:+:===================:+:===========:+:=============:+:===============:+:================:+:======================:+:================:+
122
+ | BERT$_{\textrm{base}}$ (teacher) | 91.9 | 84.7 / 85.0 | 91.2 | 92.9 | 88.6 / 81.1 | 91.2 | 85.1M | 108.9M |
123
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
124
+ | *5% Model Density* | | | | | | | | |
125
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
126
+ | TinyBERT$_4$$^\dagger$ | 87.4 | 80.9 / 81.9 | 89.9 | 90.9 | 81.6 / 71.9 | 84.9 | 4.7M (5.5%) | 14.6M |
127
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
128
+ | AutoTinyBERT$^{\mathsection}$ | 88.0 | 79.4 / - | 87.7 | 88.8 | **84.6** / - | \- | 4.3M (5.0%) | 14.5M |
129
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
130
+ | Block Pruning$^\dagger$ | 83.0 | 78.9 / 78.6 | 89.2 | 86.1 | 80.7 / 71.0 | 84.0 | 4.6M (5.4%) | 28.8M |
131
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
132
+ | CoFi (reimpl.)$^\dagger$ | 85.3 | 79.8 / 79.6 | 89.8 | 89.8 | 79.0 / 69.2 | 85.0 | 4.2M (4.9%) | 28.2M |
133
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
134
+ | CoFi$^{\mathsection}$ | 86.1 | 80.6 / 80.7 | 90.1 | 90.6 | 82.6 / - | \- | 4.7M (5.5%)$^\ddagger$ | 29.0M$^\ddagger$ |
135
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
136
+ | GRAIN | 89.0 | 82.2 / **82.5** | 90.4 | 91.4 | 83.6 / 73.7 | **88.3** | 4.3M (5.0%) | 10.7M |
137
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
138
+ | GRAIN w/o EF | **89.1** | **82.4** / 82.2 | **90.5** | **91.6** | 83.4 / 73.2 | **88.3** | 4.3M (5.0%) | 28.1M |
139
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
140
+ | *3% Model Density* | | | | | | | | |
141
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
142
+ | GRAIN | **87.8** | 80.7 / 81.1 | 90.0 | 90.4 | **79.5 / 68.4** | 86.8 | 2.6M (3.0%) | 9.0M |
143
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
144
+ | GRAIN w/o EF | 87.6 | **81.0** / **81.2** | **90.2** | **91.0** | 79.0 / 67.3 | **87.2** | 2.6M (3.0%) | 26.4M |
145
+ +----------------------------------+--------------+---------------------+-------------+---------------+-----------------+------------------+------------------------+------------------+
146
+ :::
2301.12217/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-06-20T17:24:58.118Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36" version="20.0.1" etag="G6zeBM6vHlNNNG3xM57k" type="github"><diagram id="ADzLoAfZzuHZpFxTO40R">5Vttc6M4Ev41rrr7EAq9IOCj49izVzU3k92kanY+XQkQNjsY+UBO7Pv1JwHCvNnBMY53dlMpIzVS66W7H7VaYoJm692nlG5W/+YBiyfQDHYT9DCB0LFt+asI+4KAbbMgLNMoKEjgQHiK/sdKoi62jQKWNQoKzmMRbZpEnycJ80WDRtOUvzaLhTxutrqhS9YhPPk07lK/RYFYlcOC9oH+C4uWK90yIG7xZk114XIk2YoG/LVGQvMJmqWciyK13s1YrOZOz0tRb3HkbdWxlCViSAVYVHih8bYc2wSSWFa9D7nkoKYw5mn+hvx3q3p1P4HIzP/qJDMT+3J2NFExuMty2U1lAUA2u7zooRJZque3Fc/bWW94xgKZ/MS5esz4ZoIWMjGxZ+ofmo90q/o+T5ZxlK28ff3dNFF1VnIupYg9vlV9/5ow+ftA9zmfYlhyKoqRFY2XUqg6DwXbKfpKrGNJADKZiZT/YLNyHhIumcrBRXHcItE4WiYyG7NQcXhhqYikykxL8joKAtXI/esqEuxpQ33V4qu0D0lL+TYJWKBb/MGEvyozqrezSggIE+xaD1XPVStsd1T4oFIpaYqMr5lI1azpChgXVUozdDWL14NSWxgVtFVNoVGpvbS0o2XF+qBqMlFqW7/moc7Us0AaWZnlqVjxJU9oPD9Qa9NkytyhzGeudCWfrj+YEPsSMehW8KYs2S4Sv6vqhlXmvpfMVPphV8/sdSaRI6tVUtnvmp/KHKrlOV2vJbjSZrTg1GAbYsv4NvVLUikWQdMlK6ecDBZuymIqopcm90skhd/CiPfY/lBgGYoR1v3T/HEiLaMPJghdKytLvEw9amWbL97GENXO7Pn3ou4oeCIHbTvOdIquDSqql6VdACzzAc1WXcQxqw7qhe0UBm1YGkl1YqlqOUqWmoFe1FRmGdMsK9NjYBaCDcxytOtQxyzkdDELj2AJ1k0xq45YFX69iVnwbNDSWgIvAzHSBTFwQxQjl6MYPuLBPKc0yUKerlma444v/d10KEA0rbTPjuvYUbfTOlxI2fg+s8KwD2AQQS4KxrI/1LQ/NND+xpChfRP7+5j1v2En5vh2UlZ95FGu7aU4oUn6xalZFBZd1mpJqurGIOE5t3X47Pd4fHV518R/ffDUGtDQCud26On+Hbz1C2R37VXumPVCw2rYr+22mByx315WsMVLw7jmVQyyw2uapnRfK7ZRBbLjvQY26e3zsa51yhOrpbpFD94LTNrWruEaPLDbOAOBxZwA9zkDDvQQIddxxivtqzkDAFvXcQYA6Ezfh2KSA/HNYgiK+aPefE3qm6vTyIQ/Cpks2zFa0aWhrsW5cGIR0NvOaPAAb6pnjV0fHLzts2+17QM9wauPWxExaW5PbGLrFfL8FRF1VkQdCxt5Rez02jHf6tqp8per/E8TnD1DIT8A9AgwoGPZAJe/TSFhc5DyvEdex0O02YYmF3kw80REQk3Og1xrfBFxxY6qSGkzeCqXo/VW0KJA5e0UzV/b2wnDEPp+n7cTEI9YI3k7GLf8ULvr7VRHI6N7O6djjyz2+Ovl1pizkVkdZx51P0bM5ppkmG9tqPNc288ZulY1dSFgId3GBzU8DRk9sUsIrwQj5y4WCLsNNSTk9PZJefvN8uR0eYhP8b98cbliJPbT9Fm+/sLEK09/fPimK3R81g9DnmNha6wTEH13QMMQsDswBPTmaHQYOh2C/dPC0JVA4yMc2qa0CRnmRbzNyG5Fd8aL9OpA5SUWDo9Y+NP883ymjHyCFjv5+PbL/Le5ytrqUPk1KKrPvzz/6/l7ThAF5bf550Ml45BsFdAMnr8/FlwfRjzorUIvx4I145z2njyCbeDXCGgEW7vvSq1qaITNHqdojDsk4KZhaVBHnzcOZG+GHwDbTflYh13tuTtiQNx2uFmvB+PvaWBfVLYlbKnlG5WM1vmFvcpgPlOPxY88i/LdCHrwuBB8LQvE6sU99X8scy3o4nzb5ITSiXuabYprhGG0U5pznzc41VRTU2R6JYS6hDhVQ4eLzf4lyoyUUWkLLOB+ZkRckpncEy2kWFkmZ2nxn7yu9PgWCRPGRl2jGMMyCTasZpiAgK5tutjQGlK3Tttsh03eY5+609dYCL6ot+qndblH6c0Q0sBityeNytyQf5fO3AASuP20XX2MF/cstwMv1Qr9dUDVvjL3d78CC6tdVT2pBC0f72XaJknODnJc7BSt9KXb4xnRcaqiOMfiPn8Bx8k2+xwn9zqOk15Lx73TOdhD/xUi6UyAyv1+dMgJ5/wRgaqiZULHtYpSXaUdz1u/9jXv4ap2+ijmskgCafsH3btc2O2JJIyigqijgl8TIQe63F9BWr6cEhVBvERe5x2SXSQZpxWCs92OYPR2fnS59J0l/Gx+d+FRG76UkJzxTMiWC8/7RWrdQloaUxHWhRcLFgaB6VnIhjikUAo/p1oBMENEXRCErh2o2PuCyGl1qe842PUDDGGIgOU5KFCevNxA3sltWiaHcKdHcpcymcyYkb0sr7B8ENBSERMapKMkFukqCSEGHkFNrLfVhCXBVH1tdTDHmgVfYlvv2UfXZ6XHdDTt0nAddppiIS0WQ/faRN9rr46eUZPRSEfPxG7FB8jpo+dOefvM8u555fW4xzp96L0H/rPBmwod7OAmWUqEW6swQ7JUcBY6zA08ZEHgADtwHAliHgoDTABwLFPa/WixBey2vhyzeo5CYdfKADHaEaN3gY/91xWibTkmsHy5ukDToZ4ZMkwxgYGFXdcLsDmeEFHrIEkK0TB7zpJ6LvMrOY4gxuPnBIOvLejwkOlVkrsrPy1T79Ol9w9oybVKfSNaT/yzd+8yLxbxjNFULtYDrzH8+bxTNJaP0To5RnYVOx7fE5XZw3fPBbIfPh5H8/8D</diagram></mxfile>
2301.12217/paper_text/intro_method.md ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Conversational information seeking is the process of acquiring information through conversations [@Zamani:ea:2022]. Recent years have seen an increasing number of applications aiming to build conversational interfaces based on information retrieval [@Radlinski:Craswell:2017] and user recommendation [@Jannach:ea:2021]. The popularity of intelligent voice assistants such as Amazon's Alexa or Apple's Siri has further stimulated research on question answering over general purpose knowledge graphs (e.g., Wikidata). Key to question answering in this context is the ability to ground natural language onto concepts, entities, and relations in order to produce an *executable* query (e.g., [Sparql]{.smallcaps}) which will retrieve an answer or *denotation* from the knowledge graph (KG).
4
+
5
+ This grounding process, known as *semantic parsing* has been studied in the context of one or few domain-specific databases [@yu-etal-2019-cosql; @10.1162/tacl_a_00422; @suhr-etal-2018-learning] or without taking the conversational nature of the task into account [@reddy-etal-2014-large; @yih-etal-2016-value; @LC-QuAD2.0; @Gu2021BeyondIT]. However, due to the complexities of the semantic parsing task, there are no large scale datasets consisting of information seeking conversations with executable queries against a KB. Conversational semantic parsing over KGs requires handling very large vocabularies covering thousands of concept names and relations, and millions of entities rather than specialized terms consisting of hundreds of tables and column names. Moreover, information seeking conversations are by nature incremental involving interrelated rather than isolated questions.
6
+
7
+ ::::: table*
8
+ ::: minipage
9
+ :::
10
+
11
+ ::: minipage
12
+ --
13
+ --
14
+ :::
15
+ :::::
16
+
17
+ In this work, we create $\mathbb{SPICE}$, a **S**emantic **P**ars**I**ng dataset for **C**onversational qu**E**stion answering over Wikidata. $\mathbb{SPICE}$ consists of user-assistant interactions where natural language questions are paired with [Sparql]{.smallcaps} parses and answers provided by the system correspond to [Sparql]{.smallcaps} execution results. We derive this dataset from CSQA [@csqa], an existing benchmark originally proposed for retrieval-based conversational question answering [@irqa-survey]. Although CSQA does not have executable queries, it contains a large number of natural language questions and their corresponding answers, highlighting a range of conversational phenomena such as coreference, ellipsis, and topic change as well as different types of questions exemplifying varying intents.
18
+
19
+ Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"} shows a conversation from $\mathbb{SPICE}$ illustrating how questions (utterances on the left) are annotated with [Sparql]{.smallcaps} queries (SP on the right blue box). To create a large-scale dataset (197k conversations), we develop [Sparql]{.smallcaps} templates for different question intents; entity, relation, and class symbols are initially under-specified and subsequently filled automatically to generate full [Sparql]{.smallcaps} queries. CSQA questions have been previously associated with logical forms generated with custom-made grammars [@d2a2018; @kacupaj-etal-2021-conversational; @marion-etal-2021-structured]. As a result, semantic parsers based on them operate with different sets of grammar rules and are not strictly comparable, since the grammars may have different coverage and semantics (e.g., terminal symbols may encapsulate different degrees of execution complexity). In $\mathbb{SPICE}$, questions are represented with [Sparql]{.smallcaps}, a standard query language for retrieving and manipulating RDF data.[^2] This allows us to compare parsers developed on the dataset on an equal footing and facilitates further extensions (e.g., new question intents), without the need to redefine the grammar and its execution engine. In an attempt to build semantic parsers which generalize to new entities and concepts, we further create different data splits where new intents appear only at test time [@finegan-dollak-etal-2018-improving].
20
+
21
+ For our semantic parsing task, we establish two strong baseline models which tackle the large vocabulary problem and the prediction of logical forms in different ways. The first approach [@Gu2021BeyondIT] uses *dynamic vocabularies* derived from KG subgraphs for each question and a simple sequence-to-sequence architecture to predict *complete* [Sparql]{.smallcaps} queries. The other approach [@kacupaj-etal-2021-conversational] predicts [Sparql]{.smallcaps} *query templates* and then fills in entity, relation, and type slots by means of an entity and ontology classifier. Our experiments reveal several shortcomings in both approaches, such as not being able to encode large sets of KG elements and generate the same entity several times. Both approaches a struggle with ellipsis, they cannot resolve coreference when the referent appears in the conversation context beyond the previous turn, have reduced performance on questions with multiple entities. and have difficulty with unseen question intents. We discuss these challenges and outline research directions for conversational semantic parsing.
22
+
23
+ The CSQA dataset [@csqa] aims to facilitate the development of QA systems that handle complex and inter-related questions over a knowledge graph (KG). In contrast to simple factual questions that can be answered with a single KG triple (i.e., {subject, relation, object}), complex questions require manipulating sets of triples and reasoning over these. In Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"}, a question like *How many sports teams participated in that tournament?* requires numerical reasoning and answering the question in turn $\mathcal{T}_2$ relies on correctly interpreting $\mathcal{T}_1$.
24
+
25
+ Questions and answers in this dataset were elicited from human experts playing user and system roles as well as from crowd-workers. In a second stage, templates derived from the human-authored QA pairs were used to automatically augment the dataset. Human experts also suggested complex reasoning questions and derived templates thereof. Conversations were built as sequences of QA pairs exploring paths in the KG. By construction, the QA pairs in a conversation are connected through one or several entities in the KG. Questions fall into two coarse categories, *simple* and *reasoning*-based, and the way QA pairs are organised in a sequence introduces various *conversational phenomena* which we summarize below.
26
+
27
+ are factoid questions, seeking information related to an entity (e.g., *Which tournament did Detroit Tigers participate in?* in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"}) or set of entities (e.g., *What are the countries of those sports teams?*).
28
+
29
+ are complex questions which require the application of numerical and logical operators over sets of entities. For instance, to answer the question *How many sports teams participated in that tournament?* requires finding the set of sports teams that participated in a given tournament (e.g., *1909 World Series*) and taking its count. Questions in this category also involve General Entities (GE) such as *tournament*, in addition to Named Entities (NE), and multiple entities (both NE and GE) in a single question (e.g., *Which tournaments have less number of participating sports teams than 1909 World Series?*). Some question types also combine multiple reasoning operators.
30
+
31
+ contain sequences of mixed-initiative interactions where the system requests clarification on ambiguous questions. Conversations also include discourse phenomena such as coreference (e.g., *Which sports team was the champion of that tournament?* in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"}) and ellipsis (e.g., *And what about 1910 World Series?* as a follow up question for *How many sports teams participated in that tournament?*).
32
+
33
+ There are 10 question types and 47 question sub-types. In Table [2](#tab:convers-stats){reference-type="ref" reference="tab:convers-stats"}, we only list question types but provide all sub-types in Table [\[tab:question:tupe:subtype\]](#tab:question:tupe:subtype){reference-type="ref" reference="tab:question:tupe:subtype"} in Appendix [8](#app:dataset){reference-type="ref" reference="app:dataset"}.
34
+
35
+ @csqa envisaged CSQA as a benchmark for retrieval-based conversational question answering [@BordesUCW15; @dong-etal-2015-question; @jain-2016-question; @irqa-survey]. These methods embed natural language questions and KG triples into high dimensional spaces and rely on neural reasoning modules to match questions to candidate answers. Hence, questions do not have associated logical forms, only gold answers are available.
36
+
37
+ Our success in creating semantic parse annotations is partly due to the fact that CSQA provides useful KG information. Each interaction (i.e., user and system turn) comes with annotations about KG entities, types, and relation symbols as well as some information about the triple patterns involved in the question (illustrated in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"} with , , , and fields). It also provides information pertaining to question types and sub-types (see in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"}).
38
+
39
+ Taking advantage of these annotations, follow-on work [@d2a2018] defined a semantic parsing task over CSQA, modeling the meaning of questions as a sequence of actions. The set of actions encompasses find (or find_rev when the entity is in object position) to retrieve sets of entities in a subject (object) position, as well as actions operating on sets of entities (e.g., filter_type). For instance, the question in turn $\mathcal{T}_1$ in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"} would be parsed to \[filter_type, find_rev, Q650855, P1923, Q500834\], meaning "find the set of entities that are in relationship *participating team* with *Detroit Tigers* and then filter those that are of type *tournament*". A breadth-first search algorithm generates action-grammar annotations for each question and a sequence of grammar-actions is considered correct if upon execution it returns the gold answer. Subsequent work [@shen-etal-2019-multi; @kacupaj-etal-2021-conversational; @marion-etal-2021-structured] expanded this action-grammar greatly improving its coverage (i.e., the number of successfully annotated questions).
40
+
41
+ In this work, we take a step further and map CSQA natural language questions into vanilla [Sparql]{.smallcaps} queries. We first analysed how intent is expressed in question types and sub-types and then manually defined [Sparql]{.smallcaps} templates for each question sub-type. A [Sparql]{.smallcaps} template is a query with unspecified triple patterns in the WHERE clause. For instance, the template for the question in turn $\mathcal{T}_1$ is {SELECT ?x WHERE ?x RELATION ENTITY. ?x wdt:P31 TYPE.}. We finally modified the tool provided in @kacupaj-etal-2021-conversational to automatically instantiate the [Sparql]{.smallcaps} templates, providing annotations for the entire dataset (e.g., by filling missing slots as SELECT ?x WHERE {?x wdt:P1923 wd:Q650855. ?x wdt:P31 wd:Q500834.}).
42
+
43
+ ::: {#tab:convers-stats}
44
+ -------------------------------- --------------- --
45
+ Nb. instances 197K
46
+ Nb. entities 12.8M
47
+ Nb. relations 2738
48
+ Nb. types 3064
49
+ Avg. turn length 9.5
50
+ Avg. entities per conversation 7.6
51
+ Avg. types per conversations 6.5
52
+ Avg. neighbourhood per turn 181.4 triples
53
+ -------------------------------- --------------- --
54
+
55
+ : Statistics of $\mathbb{SPICE}$ dataset (top); general question types (middle); linguistic phenomena (bottom).
56
+ :::
57
+
58
+ ::: {#tab:convers-stats}
59
+ -------------------------------------------------------- -- --
60
+ Logical Reasoning, Quantitative Reasoning, Comparative
61
+ Reasoning, Quantitative Reasoning Count, Comparative
62
+ Reasoning Count, Verification, Simple Question
63
+ Clarification, Coreference, Ellipsis
64
+ -------------------------------------------------------- -- --
65
+
66
+ : Statistics of $\mathbb{SPICE}$ dataset (top); general question types (middle); linguistic phenomena (bottom).
67
+ :::
68
+
69
+ We imported the Wikidata snapshot provided by @csqa into a KG in a [Sparql]{.smallcaps} server (see Appendix [9](#app:knowledgeGraphServer){reference-type="ref" reference="app:knowledgeGraphServer"} for more details) and assessed the correctness of [Sparql]{.smallcaps} queries by executing them and comparing results to gold answers. For some questions the annotation procedure did not produce a [Sparql]{.smallcaps} parse that recovered the gold answer. In these rare cases, we redefined the answer if it did not affect the conversation flow or truncated the conversation up to that point.
70
+
71
+ Table [2](#tab:convers-stats){reference-type="ref" reference="tab:convers-stats"} shows various statistics for $\mathbb{SPICE}$ while Table [3](#tab:comparison){reference-type="ref" reference="tab:comparison"} compares it to related conversational datasets such as ATIS [@suhr-etal-2018-learning], SParC [@yu-etal-2019-sparc], and CoSQL [@yu-etal-2019-cosql]. As can be seen, $\mathbb{SPICE}$ contains a sizeable number of training instances, its conversations are longer, and the semantic parsing task is real-scale.
72
+
73
+ <figure id="fig:BertSP:LasagneSP:input:format" data-latex-placement="t">
74
+ <table>
75
+ <tbody>
76
+ <tr>
77
+ <td style="text-align: center;"><img src="model1.png" alt="image" /></td>
78
+ <td style="text-align: center;"><img src="las_model2.png" alt="image" /></td>
79
+ </tr>
80
+ <tr>
81
+ <td style="text-align: center;">(a) <span>Encoder-decoder model with dynamic vocabularies.</span></td>
82
+ <td style="text-align: center;">(b) <span>Semantic parser based on Lasagne multi-task model.</span></td>
83
+ </tr>
84
+ </tbody>
85
+ </table>
86
+ <figcaption>Two modeling approaches to conversational semantic parsing.</figcaption>
87
+ </figure>
88
+
89
+ We consider the semantic parsing task over a sequence of dialogue turns $d=(d_1, d_2, \cdots, d_{|d|})$, where turn $d_t$ corresponds to a user-system interaction with user question $x_t$ and system answer $a_t$. Each turn has a conversation context $c_t$ made of interactions $d_i$ such that $i<t$. Given interaction $d_t$ with context $c_t$ and user question $x_t=(x_{t1}, x_{t2}, \cdots,
90
+ x_{t|x_t|})$, our goal is to predict a [Sparql]{.smallcaps} query $y_t=(y_{t1}, y_{t2}, \cdots, y_{t|y_t|})$ that represents the intent of $x_t$ and, upon execution over knowledge-graph ${\cal K}$, yields denotation $a_t$. $y_t$ is a sequence over a target vocabulary ${\cal V} = {\cal V}_f \cup {\cal V}_{{\cal K}}$ where ${\cal V}_f$ is fixed and contains [Sparql]{.smallcaps} keywords (e.g., SELECT) and special tokens (e.g., beginning of sequence token, BOS), and ${\cal
91
+ V}_{{\cal K}}$ contains all knowledge-graph symbols (e.g., entity IDs such as Q76 for *Barack Obama*).
92
+
93
+ We propose two approaches for this semantic parsing task which establish strong baseline performance and highlight various challenges. These differ in the way they handle large KG vocabularies and how they generate logical forms. Figure [1](#fig:BertSP:LasagneSP:input:format){reference-type="ref" reference="fig:BertSP:LasagneSP:input:format"} provides a sketch of the two models discussed below.
94
+
95
+ ::: {#tab:comparison}
96
+ ATIS SParC CoSQL $\mathbb{SPICE}$
97
+ ------------------ -------- ------- ------- ----------------------
98
+ Nb. Instances 1,658 4,298 3,007 197K
99
+ Avg. turn length 7.0 3.0 5.2 9.5
100
+ Domain Single Multi Multi Wikidata
101
+ Logical form SQL SQL SQL [Sparql]{.smallcaps}
102
+ Database type Rel Rel Rel KG
103
+
104
+ : Datasets for conversational semantic parsing (Rel: relational database; KG: knowledge graph).
105
+ :::
106
+
107
+ Our first model is parameterised by an encoder-decoder Transformer neural network [@transformers], and an adaptation of the semantic parsing architecture proposed in @Gu2021BeyondIT.
108
+
109
+ Since the KG vocabulary ${\cal
110
+ V}_{{\cal K}}$ can be extremely large, we parse question $x_t$ with a smaller vocabulary ${\cal V}_{t} \subseteq {\cal V}_{{\cal K}}$ which only contains KG symbols related to $x_t$. Following previous work [@Gu2021BeyondIT; @marion-etal-2021-structured], we assume the symbols related to $x_t$ are those appearing in sub-graph ${\cal G}_t$ of knowledge-graph ${\cal K}$, ${\cal G}_t \subseteq {\cal K}$. Given question $x_t$ and its context $c_t$, we identify KG entities ${\cal E}_t = \{e_{t1}, e_{t2}, \cdots, e_{t|{\cal E}_t|}\}$ which correspond to mentions in $x_t$ and $c_t$. We then obtain ${\cal G}_t$ by taking the one-hop neighbourhood for each entity $e_{ti} \in {\cal E}_t$. In other words, we include all KG triples ($s$, $r$, $o$) where the entity appears in subject ($s =
111
+ e_{ti}$) or object position ($o = e_{ti}$). When $e_{ti}$ is a subject, we include triple ($e_{ti}$, $r$, $\tau_o$) where $\tau_o$ is the type of entity $o$; analogously, when $e_{ti}$ appears in an object position, we add ($\tau_s$, $r$, $e_{ti}$). For entities $e_{ti}$ we include their types $\tau_{e_{ti}}$. When $e_{ti}$ is a general entity (e.g., a type such as *tournament*) we add relations from ${\cal K}$ that have instances of type $e_{ti}$ as their subject (object). The final vocabulary ${\cal V}_{t}$ contains all entities in ${\cal E}_t$, all relations $r$ and types ($\tau_o$, $\tau_s$, and $\tau_{e_{ti}}$) found in the set of triples in ${\cal G}_t$.
112
+
113
+ Note that context $c_t$ is defined as a window over the conversation so far. Following previous work [@marion-etal-2021-structured; @kacupaj-etal-2021-conversational], we set the conversation context to the previous user-system interaction $c_t = \{d_{t-1}\}$.
114
+
115
+ Our encoder is a BERT [@devlin-etal-2019-bert] model fine-tuned on our semantic parsing task. The decoder is a randomly initialised Transformer network [@transformers]. To account for the difference in initialisation between the encoder and decoder networks, we follow the training scheme proposed in @liu-lapata-2019-text. We provide details in Appendix [10](#app:model){reference-type="ref" reference="app:model"}.
116
+
117
+ The input to our semantic parser is a tuple ($x_t, c_t, {\cal G}_t$) consisting of natural language question $x_t$, its context $c_t$, and subgraph ${\cal G}_t$ which we adapt to BERT's input format as follows [@Gu2021BeyondIT]. We concatenate the sequence of natural language questions and answers appearing in $c_t$ and $x_t$, using the special token [\[]{.smallcaps}CTX\] as a delimiter and prepend the [\[]{.smallcaps}CLS\] token in the beginning of the sequence. Special token [\[]{.smallcaps}SEP\] denotes the end of sequence followed by the linearised KG sub-graph ${\cal G}_t$. The linearisation procedure goes over entities in ${\cal G}_t$, enumerating their types and relations. Importantly, we denote entities by their label rather than their KG identifiers. The order of entities in ${\cal G}_t$ is random. Figure [1](#fig:BertSP:LasagneSP:input:format){reference-type="ref" reference="fig:BertSP:LasagneSP:input:format"}(a) shows an example of the input to our BERT-based encoder.
118
+
119
+ More formally, the encoder takes token sequences $x^\prime_t~=~\text{{\footnotesize{{\textsc [}}CLS]}}x^\prime_{t_\text{text}}\text{{\footnotesize{{\textsc [}}SEP]}}x^\prime_{t_\text{graph}}\text{{\footnotesize{{\textsc [}}SEP]}}$ as input where $x^\prime_{t_\text{text}}$ is the natural language subsequence and $x^\prime_{t_\text{graph}} = (g^t_1, \cdots, g^t_{|{\cal G}_t|})$ is the sequence of knowledge-graph symbols from the linearised graph ${\cal G}_t$. Note that these knowledge-graph symbols constitute the target dynamic vocabulary ${\cal V}_t$ and $|{\cal G}_t|$ represents the number of KG symbols which is equal to the size of the target vocabulary $|{\cal V}_t|$. The encoder maps input sequences $x^\prime_t$ into sequences of continuous representations $\mathbf{z_t} = (\mathbf{z}_{t1}, ..., \mathbf{z}_{t|x_t|})$, and the decoder then generates the target [Sparql]{.smallcaps} parse $y_t =
120
+ (y_{t1}, ..., y_{t|y_{t}|})$ token-by-token autoregressively, hence modelling the conditional probability: $p(y_{t1}, ...,
121
+ y_{t|y_{t}|}\,|\,x^\prime_t)$.
122
+
123
+ The linearised graph ${\cal G}_t$ can exceed BERT's maximum number of input positions (which is 512). To avoid throwing away useful information, we adopt a solution similar to @Gu2021BeyondIT. For question $x_t$ with ${\cal G}_t$ containing $k$ entities, we create $k$ input sequences $x^{\prime1}_t,
124
+ \cdots, x^{\prime k}_t$. These $k$ sequences share the natural language subsequence but have different KG symbol subsequences. Given an input sequence $x^{\prime1}_t, \cdots, x^{\prime k}_t$, we obtain contextualised representations as $\mathbf{z}^1_t, \cdots, \mathbf{z}^k_t = \text{BERT}(x^{\prime1}_t, \cdots, x^{\prime k}_t)$.
125
+
126
+ The model further splits the sequence of continuous representations $\mathbf{z}^j_t$ into textual representations $\mathbf{z}^j_{t_\text{text}}$ and knowledge-graph symbols $\mathbf{z}^j_{t_\text{graph}}$ both of which are contextualised. We then average representations $\mathbf{z}_{t_\text{text}} =
127
+ \text{AVG}(\mathbf{z}^j_{t_\text{text}})$ and feed them as input to the decoder (see Figure[1](#fig:BertSP:LasagneSP:input:format){reference-type="ref" reference="fig:BertSP:LasagneSP:input:format"}(a)). From representations $\mathbf{z}^j_{t_\text{graph}}$, we derive the embeddings for the elements in the target dynamic vocabulary ${\cal
128
+ V}_t$. Recall that the decoder parses input questions $x_t$ using target vocabulary ${\cal V} = {\cal V}_f \cup {\cal V}_t$ which consists of a set of fixed (${\cal V}_f$) and dynamic (${\cal V}_t$) target tokens. The decoder then predicts the probability of each [Sparql]{.smallcaps} token $y_{ti}$ as $p(y_{t_i} | y_{t_{<i}}, x^{\prime1}_t, \cdots, x^{\prime k}_t ) =
129
+ \operatorname{softmax}(\mathbf{W}_o \, \mathbf{h}^L_i)$ where $\mathbf{h}^L_i$ is the decoder top layer hidden representation at time step $i$. $\mathbf{W}_o \in \mathbb{R}^{|{\cal V}_f \cup
130
+ {\cal V}_t |}$ is the output embedding matrix with $\mathbf{W}_o =
131
+ [\mathbf{W}_f\,;\,\mathbf{W}_t]$, where $[;]$ denotes matrix concatenation, and $\mathbf{W}_t$ is derived from the encoder representations $\mathbf{z}^j_{t_\text{graph}}$.
132
+
133
+ Our second model is an adaptation of the Lasagne architecture proposed in @kacupaj-etal-2021-conversational. Lasagne generates logical forms following a multi-stage approach where a backbone sketch is first predicted and then fleshed out. Their sketch is a sequence of actions from a custom grammar which we modify to be a sketch of [Sparql]{.smallcaps} queries.
134
+
135
+ Lasagne employs an encoder-decoder model based on Transformers [@transformers] to convert a user question $x_t$ in a conversation into a logical form template. The input to the encoder is the conversation context $c_t =
136
+ \{d_{t-1}\}$ and user question $x_t$. Utterances are separated via [\[]{.smallcaps}SEP\] tokens, while the special context token [\[]{.smallcaps}CTX\] denotes the end of sequence (see Figure [1](#fig:BertSP:LasagneSP:input:format){reference-type="ref" reference="fig:BertSP:LasagneSP:input:format"}b). The input sequence is encoded via multi-head attention [@transformers] to output contextualized representations which are then fed to the decoder to predict a sequence of actions (without grounding to KG elements) token-by-token. Instead, our decoder predicts [Sparql]{.smallcaps} queries with place-holders for KG symbols. For instance, for the WHERE clause of turn $\mathcal{T}_1$ in Table [\[tab:convers:conve:example\]](#tab:convers:conve:example){reference-type="ref" reference="tab:convers:conve:example"}, it predicts {ENTITY RELATION ?x. ?x wdt:P31 TYPE.} instead of {wd:Q5582479 wdt:P161 ?x. ?x wdt:P31 wd:Q502895.})
137
+
138
+ An entity recognition module detects entities in the input and links them to the KG [@shen-etal-2019-multi; @kacupaj-etal-2021-conversational]. Initially, entity spans are identified using an LSTM which performs BIO sequence labelling.[^3] Entity spans are subsequently linked to KG entities via an inverted index (created using Elasticsearch[^4]) which maps entity labels to entity IDs. Once identified, the entities are further filtered and reordered so that that they match their order of appearance in the [Sparql]{.smallcaps} (see Figure [1](#fig:BertSP:LasagneSP:input:format){reference-type="ref" reference="fig:BertSP:LasagneSP:input:format"}(b)).
139
+
140
+ Finally, an ontology graph with types and relations appearing in $\mathbb{SPICE}$'s KG is constructed.[^5] The graph is encoded with a Graph Attention Network (GAT; @DBLP:conf/iclr/VelickovicCCRLB18) and the prediction of type and relation fillers for the [Sparql]{.smallcaps} template is modeled as a classification task over graph nodes, given the conversational context and the decoder hidden state.
141
+
142
+ All modules outlined above are trained in a multi-task manner, optimizing the weighted average of the following individual losses $L = \lambda_{1}L^{\rm F} + \lambda_{2}L^{\rm G} +
143
+ \lambda_{3}L^{\rm R} + \lambda_{4}L^{\rm O}$ where $L^{\rm F}$ is the loss of the [Sparql]{.smallcaps} template decoder, $L^{\rm G}$ is the type and relation prediction loss using the GAT network, $L^{\rm R}$ is the entity recognition loss, and $L^{\rm O}$ the entity reordering loss (and weights $\lambda_{1:4}$ are learned during training). We refer the interested reader to @kacupaj-etal-2021-conversational for mode details.
144
+
145
+ ::: table*
146
+ +:--------------------------------------------------------------------------------------------------------------------------+:---------------------------------------------------------------------------------------------+
147
+ | ::: minipage | \[0pt\] |
148
+ | +--------------------------------+---------------------+----------------------+---------------------+-------------------+ | |
149
+ | | | BertSP$_{\rm G}$ | BertSP$_{\rm S}$ | BertSP$_{\rm A}$ | LasagneSP | | ::: minipage |
150
+ | +:===============================+:========:+:========:+:========:+:=========:+:========:+:========:+:=====:+:=========:+ | Phenomena BertSP$_{\rm G}$ BertSP$_{\rm S}$ BertSP$_{\rm A}$ LasagneSP |
151
+ | | Question Type | F1 | EM | F1 | EM | F1 | EM | F1 | EM | | --------------------- ------------------ ------------------ ------------------ ----------- |
152
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | Coreference$_{=-1}$ 81.40 70.65 49.39 43.65 |
153
+ | | Clarification | 84.89 | 82.53 | 80.21 | **77.69** | 83.91 | 76.58 | 86.29 | 73.41 | | Coreference$_{<-1}$ 67.82 0 0 0 |
154
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | Ellipsis 75.93 54.33 26.39 46.54 |
155
+ | | Logical Reasoning (All) | 90.61 | 82.90 | 85.55 | **66.89** | 22.74 | 28.61 | 88.80 | 57.41 | | Multiple Entities 83.37 65.40 41.64 66.52 |
156
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | ::: |
157
+ | | Quantitative Reasoning (All) | 94.42 | 88.55 | 82.95 | 66.40 | 76.20 | 59.01 | 94.90 | **91.47** | | |
158
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
159
+ | | Comparative Reasoning (All) | 96.23 | 87.39 | 90.44 | 73.80 | 69.56 | 39.37 | 94.20 | **85.05** | | |
160
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
161
+ | | Simple Question (Coreferenced) | 88.96 | 86.53 | 83.19 | **69.87** | 76.51 | 58.83 | 84.73 | 60.90 | | |
162
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
163
+ | | Simple Question (Direct) | 91.81 | 91.59 | 87.13 | **80.69** | 71.43 | 58.71 | 87.21 | 66.88 | | |
164
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
165
+ | | Simple Question (Ellipsis) | 79.51 | 89.71 | 72.50 | **71.67** | 58.14 | 50.90 | 74.35 | 61.53 | | |
166
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
167
+ | | | AC | EM | AC | EM | AC | EM | AC | EM | | |
168
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
169
+ | | Verification (Boolean) | 90.10 | 77.24 | 79.72 | **62.62** | 37.16 | 24.90 | 34.89 | 26.72 | | |
170
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
171
+ | | Quantitative Reasoning (Count) | 87.91 | 84.97 | 76.88 | **73.20** | 50.86 | 48.44 | 60.51 | 56.15 | | |
172
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
173
+ | | Comparative Reasoning (Count) | 90.05 | 85.99 | 73.18 | 66.79 | 43.48 | 40.67 | 89.09 | **83.69** | | |
174
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
175
+ | | Overall | 81.50 | 85.74 | 81.18 | **70.96** | 59.00 | 48.60 | 79.50 | 66.32 | | |
176
+ | +--------------------------------+----------+----------+----------+-----------+----------+----------+-------+-----------+ | |
177
+ | ::: | |
178
+ +---------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------+
179
+ | | \[0pt\] |
180
+ | | |
181
+ | | ::: minipage |
182
+ | | -------------------------- ----------------------- ------------------ ----------- |
183
+ | | Unseen Instances BertSP$_{\rm S}$ LasagneSP |
184
+ | | Combinations Train/valid/test EM EM |
185
+ | | [CountLogic]{.smallcaps} 153,562/14,262/29,177   0.94 0 |
186
+ | | [UnionMulti]{.smallcaps} 157,331/14,426/25,244 19.74 16.89 |
187
+ | | [Verify3]{.smallcaps} 154,027/13,869/29,105 0 0 |
188
+ | | -------------------------- ----------------------- ------------------ ----------- |
189
+ | | ::: |
190
+ +---------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------+
191
+ :::
2302.03985/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-12-17T07:56:38.209Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" etag="JtjGx8Gg-RFAMt_Oby7r" version="15.9.6" type="google" pages="3"><diagram id="e9B5mOuagZ1m9Xockfqr" name="rla_cnn">7Vxtc5s4EP41nrn7EEYgiZePaZJLZ9rr5Nq5a/PphoBsc8GWD+PEuV9/kpHASDLgGJw0sTvTWEIIsfto99Gu5BG8mK2vs3Ax/Z3GJB05IF6P4OXIcWyIbPaH1zwVNa4Pi4pJlsSiUVXxLfmPiEogaldJTJa1hjmlaZ4s6pURnc9JlNfqwiyjj/VmY5rWn7oIJ0Sr+BaFqV77PYnzaVHrABt71ZWPJJlM5bNtCP3i0iyU7UUvy2kY08eiavN+8GoELzJK8+LbbH1BUi4/KZpCCL/tuFqOLSPzvMsNn+aLm/X4yg++3H+5Db/8sbiN/jxDuOjmIUxX4qXFaPMnKYWMruYx4b3YI/jhcZrk5NsijPjVR6Z3VjfNZ6m4PE7S9IKmNNvcC8fjsRNFrD4Ol9Oyj2We0Xuy1QxsPuzKJA2XXGn8e5hFAhK2x4qUPTPJOZIwv6q/vpDIA8lyst6qEuK4JnRG8uyJNRFXHU/gTMATQvHyj5WyUSAkNN1SM5QADQXAJmXflQLYF6GDffRhD6wPzP+p+hjTeb7VqPiY9ORuPv1IH4G69G0INOmzRrr0bW8o6TuBQfpumgsR1dTg/rui8sLZcoPTc9aACWVdXWTfJpu/Lvgxwlcj70N+Zo+8S1aWHbOBFn2Llqq2mTTzukrrWpnTOVH0LKrCNJnMWTFiOiKs/gPXTcKs27m4MEvimD/GiKEKZXy2lcZrU7gneTQVBT54OU17mpfIderIAPq8dA3AcIbChURm06wkMXMdokizfEondB6mV1WtItGqzWdKF0K1/5A8fxLSDFc5VRUfZlLWfBKSebxVKmAh/ZRt4U0L9vo/+BOr4q0YwKZwua6VnkRppxLZCCYkbzJgqGjIxdFF1xggC8G6uh1gBd5uVYq+bmiymZKyIzvQOnL1jooXEPdWsDjPsvBpq9mCN1g2Pq3+KFtxvmp7BzS1Z1+KEXR/NxRo77akqywi2rttIF9K/oBZ0IErvMpZsBPN+4AU18Rf0cy9AKp04iqd9AdObbgt8Gy7owNAlfuxYoyHBqfbDk7p/z6HdyS9ocskTyj3g3c0z+nM4CBzqrAp5gYXvLPZesJXPNY4pY/RlOHRWq5ms2Q++Xu8mkdFtwYqzD79eEhsAwVJBu50VA/pt4s/WmUPJeVks/acr9A4P+GcP4k0SVeEQ+UYdQJSF3OMiR+jxkXGIDaiXCiUpAVpKsEGlajzpIMhUX2J9LktU80wa1UUDeMskWce8K5xIWVctt9svZAiELlq2NneN7+30db1ZqBMa4ufwXv2ziEP8LpyjnWfLAjYtS7gwYSwM8eDWHHvA7tAyRC3EHad0ruQ33jOfAsPdjnghtKUOSoNe/tFFSqD7IC9ba1qsUPij6OeQjqBogOJmG3HCA1mGDYsOA6L6CBNK1/JMolXXC9vUgfIVcy3p5MT28ROmlzhYTrwDLa357jOKarTBguFBbgGzmoK9w3GWdHP6pGH8Kz+/utZzd0pihpuNdvCB1vv2Hs1qwX8B3bluEMa4I1C09FJH7J8ZYXTjk4X+0o/CLiWHwwCUf1hbDLIh+1CaeMIOwL1hRkndjSYwjVjoeDyO/vvgs4f3ijFwb6vaA6X2NpmOSZ/hlzLbjC2B7k0rC8B3ondgE49Po4g3NdklKl72YWndNGTtdCeI9LOuwxFS/sWG6HdLZNrx7IR7zZrgLB7KCSRV49eDAVJ7TktkGxp3wJJ7e5jQ1LPFdjre+Gx7PiN+qyS4ldYMvos2xQdGdJn6Yvzd6ANZHuvUxsdEjmv0lyTdZJvRalZ6Vb0xb9XMWpeaA1RFwaogwnpTk+A5cGg+tSZP3aewVZwY4/uUOTFVUa+HxlpHrRcUBzJEcgI1PsGe3c+A3vGMHJ9y0NHwS1y6xt2MGphOM3t2xiOeveR40fuzxo/eg6wtxKUPnS3U5RnwAI2aslTbko3JEuY0HlA/VDPICTdulNOkt6u089FjqUSZx9pIa1OoTFl4ipZ1f5Soi72yl1qJdGxtTEPPRtMUZGe0j9/nNI/3QgvVBLxWHbxUukfV0/M9oaKTydUdFwGQfTKUGEKV/WEir9OqOiGClfZAIZcZFocHzdb3GGD6WunVFzmFamyAAiaiRUr9EeM9uU7GNRNQ7lr7/kpalftYrjNXy48MuM3wXM/u2WjHXbr4whf5AwFy+/yy8XJfjVDt86wy+0DbbnB4dza4TugTvDoDR7q/mbD+TZbnkU9DjxMUd8TPF6I/UBlxWxmP7bpNMFwCDn8aOxOhNgSGPYJId3W0spmB9OqyZg5GgwenimSfoLHC8EDK8snaN6ddFyEmELSJ4S81An79mDckeHhnBjIq4GHtpg1bNU3nuAYjn7o8Lj++vlcU9YLbg6R5057kL96iqlMim4Hv4x7SxuSQYdNT1OwXI1+tR3p3YXaIkOnx7G6aqKKcAF3VAW4gBXIq93jW8oh5TKzGAT+aCuzCCzsB6M+E4v95Qs7HjjOSBrmyQOpjWKv2JoFtj51h2JLfOx7JtlTTwkBxaT0tWUfAuOAd43LV9fsuPkMiq/8uhEKmtt7yplkHNTGM8yZZO8ZyY67MLqfbMzsWVTMT+57s8ndLw7f6svego0FKN9/HTkF+IHqs32Tz/5K2DTczJDNLkD+2278JxVODnuXw/BktKg8M9PRYatR6g4OgxWrH60rkFj9+h+8+h8=</diagram><diagram id="bVVV3ylMSo0j04XizaoU" name="dense_la">7Rxrc9o48Ncwc/cBxvITfyQkaZtLm6a5adp+ySi2DCbGcmQToL/+JFvYlt8EUsgNTGawduWVtO9dm/SU8Xz1gcBg+hnbyOvJkr3qKec9WQaKCugXg6wTiD5UEsCEuDaflAHu3N+IAyUOXbg2CoWJEcZe5AYi0MK+j6xIgEFC8FKc5mBPXDWAE1QC3FnQK0PvXTuaJlBZApqRYT4idzLdrA0UZZig5nAzn1MJp9DGywQUn0+56CljgnGUXM1XY+Qx/m1YM/70dGff45X1XSNuMLke39489RPOXG5zS3oKgvzo1aQNZ+k8XV7+vP+59q7ur6+jsQ/7QE5ov0BvwXnWk3WPrnLmYLoYPXW05tzUnxd4g+iHsaxHdAIYBqsMSa8m8XeMG+P5o+vDyMU+HWGHyRDBaEGoUtBLgudMuVzHQfHRZMmDa0TCzRboYZJdcJqJGNINyRFaMfg0mnsUAOhlGBH8hMbYw4RCfOwjtmHX8wog6LkTnw4tuiyi8LMXRCKXKs6II+aubbNlzpZTN0J3AbTYmktqJhRG8MK3EeOrlG6LEUCrgtq1CAykWkQtEOE5isia3sepyLKZ0OHGpwFufMtMlVWZW9o0p8QKh0FuPZOUdKYe9IJrSLW2jPThj9tft7ObufFwezn7vfo4c7iCicpSEAmyqQHyISbRFE+wD72LDFpgXjbnGuOAS3GGomjNvQlcRLgoY0gijtXpGPl2bpRowMbawUBLJdRNHLLEjpCTY618TG2gKlL2UQVpqUAeqEa9IDjZr9iNFX+jObokUpFkkQQ9+wRF/K5MnCNC4Do3LWATwoZ1gDSQ8h+lsCxjnOBNSgTAVgToRbLHGnKaNFBrKKSHCvGCWKh09liVU/F00m7JX51f3tybimM9rG8/SNLvx+dK7U78UBhAv9IVWolXYa6OTB7/ohToH11dyl39zS6ZDkqx43Tg3PXWyT2UEJwHMVJRVKblyHtBzA+VMCKRzPtStV4VcMkuGdLHZA49Eb3kjoLh1WSfMdKjNodInx7Vcv1J5f3M3fa552Ro7jwFtEtt2+fk81uLkRGBfuhQohvy1BdvJiwxscXV87c/QutpEnuOfoHnsjpMeS2rZnat5Thvu2HgQc511/fc3MKOh2GU31AplOnSj5520TPOpJ5x3mMWmoanRDOaw1NVBMm5s+2cUynM1LonAERXomhSKXxobxQ9gkfZ6s+uVh/Xk/4q0r+ZuqlW2Nc3FLr2Itawaxb4WTZR4mIWL0A7M5mK82AQy1fMB6gxJc6qJk7kEwU614Zo6FhxyIqSHEY575vSW8pM1kWZGUZJZqAq5Gt7EJrx9Pmf+WhBnoNPz8qDefnly83nvtoe8pldBjtyJC0B4OOGrNTIKareIqdUucQp0ygzKuXe3tUbtHPqKJMjKjCy/sFWzIY/+QbiwflKGK2FtDcv8SYFSvKWhnnSdslXH6gDyvbsYwjqYEgD0yhnJ90TMUXMvfWC1mydhpUTqRrK+0tymtic09Lr0bG5XQ0NbbWb2230WZ3dbtHran/O6T6gl18fvn9/ufp3dj+7uLK0LxdPfeWUiZ4y0cZMFLxdJtrSyKi3uPpCWbCu4R/LQiu3qh1pQqNsNnKIhKaSU+WEpiJfL3PvfeTrjbqxRb5+sMBReYCqwHHKQTdy3VduCXbMLVWRng5EAnvMLQuU3zi31I/UtapAjECHd63GyU4bFKi1Vhzu155PtWIjm4u14lGF/F1rxY3P6l4rDg8W8itrxQ75LKXiBiFqFw0Mg+QpvOOumDiL3NbtoaGrXIY5uBN/qmSpjDRVATWyrPXv3aTW0BsURKSWK44qCe3F2Vc9Yz89VzpV883VvHxc1XyzdRUa7wcu580jzTk15YDPJ6rL+U7P35RjCu671vNmjTTfTT0PTg+VmgR7Kuj3VwBUZS5yB+3z7RF7UZOOLA+GoWuJqmMtyEvqPDrYPNKAKlV6jjOZe45K2b/+bbuWyLWBEeRRH/QirtldRdIImL7e1K2WK+uaViAEdi03azasFtYxpeZ9FQ8ozhff/9qXgnbod+aif+eovvOrm3qxSvyj0b+KUx3e4/gfBpJGJ9GkUUILqmqetlPAOXWcGrha7jgdVVLa0nFqK7hqle4YO06Vu+3Q5n/fHadXiOi4Ok5VjxdOHadTxynrOCmH6zhtb12H6zhV7bX8VKTs/rYpiuoynBRXk+GI/k2ui2umSTE2DKfpgmzwFTIb8WMIe/tvx6qqQ8JU01Q8VV97qb4UU5yvgeb5mrrdfF1u3M/bVHcdersnSzsyS2s1kK6WphsFQsVsaU+WphZ+cadJLZagNc5/G0vo1Ad+zzn3Xn4ee+CsG3TpsbT4q1ypSo+zbala9bvkd9YnNYqB77WR2hi2EKrxH+0mS4fZj/6T6dl/T1Au/gM=</diagram><diagram id="7aFq72bVeMXSZdW8sZ-V" name="groupwise">7V3ZduI4EP0aHsmxJK+PgZCkOz3TSybkJC99HCzAjbFpIzpkvn5kvICMABm8wTgv4LKR4datkqpK5bRQd7q8883Z+C/Pwk4LStayhW5aEAIkA/oSSD5CiaqjUDDybSu6aC14tP/FkVCKpAvbwnPmQuJ5DrFnrHDguS4eEEZm+r73zl429Bz2rjNzhLcEjwPT2ZY+2xYZh1IoAUVbn7nH9mgc3xsgpIenpmZ8fTTKfGxa3nsoWv0+1Guhru95JHw3XXaxE+AXQ/Pj0/Pdr8m99ah++jH+PL/RX+BjO0TmNstHkl/hY5ccPfTf35/80YcKP95fXWwv+rfXT5PoI9If01lEkEW/lXzEGPrewrVwMAhooc772Cb4cWYOgrPvlDVUNiZTJzo99FwS0QDKwbHtOF3P8fzVWGg4xOpgQOWWOR8nY86J703wxmWWZrxJEj0zw749xQT7wR1td0RPB2IvOCIBJ9XgUBCeCMY/2Cd4uaHZCK477NFb+R/0kugs1OTwIxH7EYiO39dcQpIaysYbLNIjmRnRd5QMvdYPfROpKAsT9BL15XouTquqKKhlSWehhtIW1AAo21DL0ulQP6h+l/QfOvq3Z+9B+tl/eu29lG0ZljTAGB60DEVVgIYrtwzFgActAyp6mZahXahlJJDttQxYjGVwf4JxGOkRhXp2IiLJqsB8i4eV9iNlSCmklG13HaOyiZSqnI7Ur95w0v3a/ufX89RzNEV/+Q4Q14eoDonYxUCm/l548Yn2fMW7a3oBxW+5PknfjcJXam7y75bSIz9bWge0tJtQFA9Pv214h/j6lH4o0oSlO+tpIoJzOG869silhwOqVEzlnUBvNl1xXUcnprZlBbfh2tfaAgPvlCyoVgcTTAbj6GDD+EAGP2bsoNJOyiCWMfI2YXh8gUVZFlBPJ4zMJUwPtnSpda3uJEgons9MN9+7hCOeBw2DLzByzPn8ECVhgZRMGFcCCW9uZv32R8+1lp+H3zqvyu/+F9KGcr7+fRcC4v59c6aTEAeKHPw396vDw0jQUWgwu4toG8w257Mwwh3ay4B66QXg7a1K/7jE2q+mw8yKvNu2c4NlOjcBWp0DmPyZo2JsefPG+WIbJ5nqgS0v5C5nTr7o2TInrshqxUs2gTxBJdGQDFLJK44dAVTmbArAYahy81LGdVfr3PJSKEhFBrIyROzSDpXUdaKNkwGXi3Mt52CA/iew12t6jpPCzfxcQjSbnSwlzs/cFBzPFxaQgoNNCu7Yyo/MenGklZf/eBr2/SUxJosn9/ploimO3sEFJ22/U8ZQvpCGL0dXCjWGL0nWv6p8WbF8eWj4chpfNAnUiy8i68RKIkZVgqwn5lQaDbCNVB6FRn424DBQJZd09yv0MCerQhLxnJQY57L95OxF2yvZYP4YCmpX29GFztkFQocpLGshUO4mvm26o50uUXwXSJJb9T1iEtsLvG3b4LvH/YoW9o/t/RoQi+5ADujziXbZ4IOE4PWAm1ejaWLpYnLd2dmCEMcdlrp24eVaLsgc1ZqZI8i5FJ/bUjG9qK58qcjNAtZxrShe4KoMSoFicm5GnuSxjzXyDIBGXAWQ40XLNWuBvapnifCSBbM+gDc1/PLWNdn5gtgdzrygr9wMTZkhXwX2qdXNPiEvPdHsez0TL5BPnlaX0rv3K/cDkJd9yC+1P2n2759KGjW11CizeMinDCyHMk29+eh6UO0oI1APyrcHdzAQ6MFN9hEd7DSUi1SXrqbUJW13GgKevoCcQ6shP0nKs/GUwijPZ8HbweJNYIn4Fqr3y1siMAeT0UrpXxfEsV0cyS3Tn3xNgJeuJIUVwpV0pc+QAGhlWo49u2f0HDfDg7RJiu/1zkm9GruZDMWVpU1r5BR8gFSUOcoC5ngh2k1Cj6K0a6QrytVrt7oKRxNp1CTSMOQUK6vuuJNz2MK6Z9HYb7YQnUgYwK5BaKxaMWFy6BPeQ5g/TWB6+rKV9TEyp0BWLmV4NYcCKNMEpkdTJvX4isopoxSb/qJH5jRQxGrE1MMsuqmd9dGlq4+z7gk1dDuKbkhPeShZr5huAiX8ajopU/USZGwjhRReCqK4MEbgOWAXEqRmbTdPaJShjZDd9AM4frfcIPX4Jyxlg0TcFBInsc38uFq6iQ0CV3m0EfPzpcf7if1on+4oVE7FqrAnlvDBqfWTIMRNs/TeUv4XFtiocw5oVt0vzf/+AnPYGYFbWVc0//vzppBmx1NBTy3JTJYSu6L5W6MF+iqqmUOBxu4sXi8kSmkB4qMFD6NVo23vmUPBercAIYGC3DmjX7ceINQ0AZXYBJQ9b1N5ExA6ry6g7JvK6maQ3GrPRu60pobD7C9Kq/ng9qFs64zd3R689YPKUR8qzFy4lZeNfHajvj2tJLLAcrBcdfLC1m6jwz0LnNgFV6WyODfPqGxVM1K6xJ7ieVxBanS40w5T2zCLtEN6uP6nOqtzG/+dCPX+Aw==</diagram></mxfile>
2302.03985/main_diagram/main_diagram.pdf ADDED
Binary file (35.6 kB). View file
 
2302.03985/paper_text/intro_method.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Growing evidence indicates that strengthening layer interactions can encourage the information flow of a deep neural network (He et al., 2016; Huang et al., 2017; Zhao et al., 2021). For example, in vision networks, the receptive fields are usually enlarged as layers are stacked. These hierarchical receptive fields play different roles in extracting features: local texture features are captured by small receptive fields, while global semantic features are captured by large receptive fields. Hence encouraging layer interactions can enhance the representation power of networks by combining different levels of features. Previous empirical studies also support the necessity of building interdependencies across layers. ResNet (He et al., 2016) proposed to add a skip connection between two consecutive layers. DenseNet (Huang et al., 2017) further reinforced layer interactions by making layers accessible to all subsequent layers within a stage. Recently, GLOM (Hinton, 2021) adopted an intensely interacted architecture that includes bottom-up, top-down, and same-level interactions, attempting to represent part-whole hierarchies in a neural network.
4
+
5
+ In the meantime, the attention mechanism has proven itself in learning interdependencies by retrieving query-activated information in deep neural networks. Current works about attention lay much emphasis on amplifying interactions within a layer (Hu et al., 2018; Woo et al., 2018; Dosovitskiy et al., 2021). They implement attention on channels, spatial locations, and patches; however, none of them consider attention on layers, which are actually the higher-level features of a network.
6
+
7
+ It is then natural to ask: "Can attention replicate its success in strengthening layer interactions?" This paper gives a positive answer. Specifically, starting from the vanilla attention, we first give a formal
8
+
9
+ <sup>\*</sup> Authors contributed equally. † Correspondence to gdli@hku.hk.
10
+
11
+ ![](_page_1_Figure_1.jpeg)
12
+
13
+ Figure 1: (a)Visualization of the layer attention scores from a randomly chosen head of MRLA in each stage of ResNet-50+MRLA model; (b) Schematic diagram of two consecutive layers with RLA.
14
+
15
+ definition of layer attention. Under this definition, a query representation of the current layer is sent to all previous layers to retrieve related information from hierarchical receptive fields. The resulting attention scores concretely depict the cross-layer dependencies, which also quantify the importance of hierarchical information to the query layer. Furthermore, utilizing the sequential structure of networks, we suggest a way to perform layer attention recurrently in Section 3.3 and call it recurrent layer attention (RLA). A multi-head design is naturally introduced to diversify representation subspaces, and hence comes multi-head RLA (MRLA). Figure 1(a) visualizes the layer attention scores yielded by MRLA at Eq. (6). Interestingly, most layers pay more attention to the first layer within the stage, verifying our motivation for retrospectively retrieving information.
16
+
17
+ Inheriting from the vanilla attention, MRLA has a quadratic complexity of O(T 2 ), where T is the depth of a network. When applied to very deep networks, this will incur a high computation cost and possibly the out-of-memory problem. To mitigate the issues, this paper makes an attempt to devise a light-weighted version of MRLA with linear complexity of O(T). After imposing a linearized approximation, MRLA becomes more efficient and has a broader sphere of applications.
18
+
19
+ To our best knowledge, our work is the first attempt to systematically study cross-layer dependencies via attention. It is different from the information aggregation in DenseNet because the latter aggregates all previous layers' features in a channel-wise way regardless of which layer a feature comes from. OmniNet (Tay et al., 2021) and ACLA (Wang et al., 2022b) follow the same spirit as DensetNet. They allow each token from each layer to attend to all tokens from all previous layers. Essentially, both of them neglect the layer identity of each token. By contrast, we stand upon the layer attention to retrospectively retrieve query-related features from previous layers. Besides, to bypass the high computation cost, OmniNet divides the network into several partitions and inserts the omnidirectional attention block only after the last layer of each partition; and ACLA samples tokens with gates from each layer. Instead, our light-weighted version of MRLA can be easily applied to each layer.
20
+
21
+ The two versions of MRLA can improve many state-of-the-art (SOTA) vision networks, such as convolutional neural networks (CNNs) and vision transformers. We have conducted extensive experiments across various tasks, including image classification, object detection and instance segmentation. The experiment results show that our MRLA performs favorably against its counterparts. Especially in dense prediction tasks, it can outperform other SOTA networks by a large margin. The visualizations (see Appendix B.5) show that our MRLA can retrieve local texture features with positional information from previous layers, which may account for its remarkable success in dense prediction.
22
+
23
+ The main contributions of this paper are summarized below: (1) A novel layer attention, MRLA, is proposed to strengthen cross-layer interactions by retrieving query-related information from previous layers. (2) A light-weighted version of MRLA with linear complexity is further devised to make cross-layer attention feasible to more deep networks. (3) We show that MRLA is compatible with many networks, and validate its effectiveness across a broad range of tasks on benchmark datasets. (4) We investigate the important design elements of our MRLA block through an ablation study and provide guidelines for its applications on convolutional and transformer-based vision models.
24
+
25
+ # Method
26
+
27
+ ```
28
+ 1: Input: Output of the t-th CNN block X^t \in \mathbb{R}^{1 \times H \times W \times C}, t-1-th MRLA-base block's key and
29
+ value K^{t-1} \in \mathbb{R}^{1 \times (t-1) \times C} and V^{t-1} \in \mathbb{R}^{1 \times (t-1) \times H \times W \times C}, Number of heads H 2: Output: O^t \in \mathbb{R}^{1 \times H \times W \times C}, K^t \in \mathbb{R}^{1 \times t \times C}, V^t \in \mathbb{R}^{1 \times t \times H \times W \times C}
30
+ 3: // Summarize X^t's spatial information
31
+ 4: \mathbf{Y}^t \leftarrow \text{GAP}(\mathbf{X}^t) \in \mathbb{R}^{1 \times 1 \times C}
32
+ 5: // Derive the current layer's query, key and value via convolutions
33
+ 6: \mathbf{Q}^t \leftarrow \text{Conv1D}(\mathbf{Y}^t) \in \mathbb{R}^{1 \times 1 \times C}
34
+ 7: \boldsymbol{K}_{t,:}^{t} \leftarrow \text{Conv1D}(\boldsymbol{Y}^{t}) \in \mathbb{R}^{1 \times 1 \times C}
35
+ 8: V_{t,:}^{T} \leftarrow \text{DWConv2D}(X^{t}) \in \mathbb{R}^{1 \times 1 \times H \times W \times C}
36
+ 9: If t = 1 then
37
+ 10: // First MRLA-base block
38
+ m{K}^t \leftarrow m{K}_t^t .
39
+ 11:
40
+ V^t \leftarrow V_t^t
41
+ 12:
42
+ 13: else
43
+ 14: // Concatenate with the previous key and value
44
+ m{K}^t \leftarrow ext{Concat}[m{K}^{t-1}, m{K}^t_{t,:}] \in \mathbb{R}^{1 \times t \times C}
45
+ V^t \leftarrow \operatorname{Concat}[V^{t-1}, V_{t,:}^t] \in \mathbb{R}^{1 \times t \times H \times W \times C}
46
+ 16:
47
+ 17: End if
48
+ ```
49
+
50
+ Due to the limited space in the main paper, we provide more experimental settings and discussions in this section which are organized as follows. We first provide the implementation details of ImageNet classification and more comparisons with other SOTA networks in Appendix B.1. We also compare the model complexity and memory cost of MLA and MRLAs in Appendix B.2. Next, some implementation details and results of object detection and instance segmentation on COCO are included in Appendix B.3. Then the ablation study of a vision transformer (DeiT) is given in
51
+
52
+ 18: $O^t \leftarrow \text{Multi-head Attention}(Q^t, K^t, V^t)$ where the number of heads is H
53
+
54
+ ```
55
+ 1: Input: Output of the t-th transformer block X^t \in \mathbb{R}^{1 \times (N+1) \times C}, t-1-th MRLA-base block's
56
+ key and value K^{t-1} \in \mathbb{R}^{1 \times (t-1) \times C} and V^{t-1} \in \mathbb{R}^{1 \times (t-1) \times \sqrt{N} \times \sqrt{N} \times C}, Number of heads H
57
+ 2: Output: O^t \in \mathbb{R}^{1 \times (N+1) \times C}, K^t \in \mathbb{R}^{1 \times t \times C}, V^t \in \mathbb{R}^{1 \times t \times \sqrt{N} \times \sqrt{N} \times C}
58
+ 3: // Split into the class token and patch tokens
59
+ 4: \boldsymbol{X}_{c}^{t} \in \mathbb{R}^{1 \times 1 \times C}, \boldsymbol{X}_{p}^{t} \in \mathbb{R}^{1 \times N \times C} \leftarrow \text{Split}(\boldsymbol{X}^{t})
60
+ 5: //Reshape
61
+ 6: \boldsymbol{X}_p^t \leftarrow \text{Reshape}(\boldsymbol{X}_p^t) \in \mathbb{R}^{1 \times \sqrt{N} \times \sqrt{N} \times C}
62
+ 7: // Summarize \boldsymbol{X}_p^t's spatial information
63
+ 8: \mathbf{Y}_{p}^{t} \leftarrow \text{GAP}(\mathbf{X}_{p}^{t}) \in \mathbb{R}^{1 \times 1 \times C}
64
+ 9: // Derive the current layer's query, key and value via convolutions
65
+ 10: \mathbf{Q}^t \leftarrow \text{Conv1D}(\mathbf{Y}_n^t) \in \mathbb{R}^{1 \times 1 \times C}
66
+ 11: \boldsymbol{K}_{t,:}^t \leftarrow \text{Conv1D}(\hat{\boldsymbol{Y}}_n^t) \in \mathbb{R}^{1 \times 1 \times C}
67
+ 12: V_{t,:}^t \leftarrow \text{DWConv2D}(X_p^t) \in \mathbb{R}^{1 \times 1 \times \sqrt{N} \times \sqrt{N} \times C}
68
+ 13: If t = 1 then
69
+ 14: // First MRLA-base block
70
+ 15: \pmb{K}^t \leftarrow \pmb{K}^t_{t,:}
71
+ oldsymbol{V}^t \leftarrow oldsymbol{V}_t^t .
72
+ 16:
73
+ 17: else
74
+ 18: // Concatenate with the previous key and value
75
+ K^t \leftarrow \text{Concat}[K^{t-1}, K_t^t] \in \mathbb{R}^{1 \times t \times C}
76
+ \boldsymbol{V}^t \leftarrow \text{Concat}[\boldsymbol{V}^{t-1}, \boldsymbol{V}_{t,:}^t] \in \mathbb{R}^{1 \times t \times \sqrt{N} \times \sqrt{N} \times C}
77
+ 20:
78
+ 21: End if
79
+ 22: O_n^t \leftarrow \text{Multi-head Attention}(Q^t, K^t, V^t) where the number of heads is H
80
+ 23: // Reshape to the original dimension
81
+ 24: \boldsymbol{O}_p^t \leftarrow \operatorname{Reshape}(\boldsymbol{O}_p^t) \in \mathbb{R}^{1 \times N \times C}
82
+ 25: O^t \leftarrow \text{Concat}[X_c^t, O_n^t]
83
+ ```
84
+
85
+ ```
86
+ 1: Input: Output of the t-th CNN block \boldsymbol{X}^t \in \mathbb{R}^{1 \times H \times W \times C}, t-1-th MRLA-light block's output \boldsymbol{O}^{t-1} \in \mathbb{R}^{1 \times H \times W \times C}, Number of heads H, Learnable parameter \boldsymbol{\lambda}_o^t \in \mathbb{R}^{1 \times C}
87
+ 2: Output: \boldsymbol{O}^t \in \mathbb{R}^{1 \times H \times W \times C}
88
+ 3: // Summarize \boldsymbol{X}^t's spatial information
89
+ 4: \boldsymbol{Y}^t \leftarrow \operatorname{GAP}(\boldsymbol{X}^t) \in \mathbb{R}^{1 \times 1 \times C}
90
+ 5: // Derive the current layer's query, key and value via convolutions
91
+ 6: \boldsymbol{Q}^t \leftarrow \operatorname{Conv} 1D(\boldsymbol{Y}^t) \in \mathbb{R}^{1 \times 1 \times C}
92
+ 7: \boldsymbol{K}_{t,:}^t \leftarrow \operatorname{Conv} 1D(\boldsymbol{Y}^t) \in \mathbb{R}^{1 \times 1 \times C}
93
+ 8: \boldsymbol{V}_{t,:}^t \leftarrow \operatorname{DWConv} 2D(\boldsymbol{X}^t) \in \mathbb{R}^{1 \times 1 \times H \times W \times C}
94
+ 9: \tilde{\boldsymbol{O}}^t \leftarrow \operatorname{Multi-head} \operatorname{Attention}(\boldsymbol{Q}^t, \boldsymbol{K}_{t,:}^t, \boldsymbol{V}_{t,:}^t) where the number of heads is H
95
+ 10: \boldsymbol{O}^t \leftarrow \operatorname{Expand}(\boldsymbol{\lambda}_o^t) \odot \boldsymbol{O}^{t-1} + \tilde{\boldsymbol{O}}^t
96
+ ```
97
+
98
+ ```
99
+ 1: Input: Output of the t-th transformer block X^t \in \mathbb{R}^{1 \times (N+1) \times C}, t-1-th MRLA-light block's
100
+ output O^{t-1} \in \mathbb{R}^{1 \times (N+1) \times C}, Number of heads H, Learnable parameter \lambda_o^t \in \mathbb{R}^{1 \times C}
101
+ 2: Output: O^t \in \mathbb{R}^{1 \times (N+1) \times C}
102
+ 3: // Split into the class token and patch tokens
103
+ 4: \boldsymbol{X}_{c}^{t} \in \mathbb{R}^{1 \times 1 \times C}, \boldsymbol{X}_{p}^{t} \in \mathbb{R}^{1 \times N \times C} \leftarrow \operatorname{Split}(\boldsymbol{X}^{t})
104
+ 5: \boldsymbol{O}_{c}^{t-1} \in \mathbb{R}^{1 \times 1 \times C}, \boldsymbol{O}_{p}^{t-1} \in \mathbb{R}^{1 \times N \times C} \leftarrow \operatorname{Split}(\boldsymbol{O}^{t-1})
105
+ 6: //Reshape
106
+ 7: \pmb{X}_p^t \leftarrow \text{Reshape}(\pmb{X}_p^t) \in \mathbb{R}^{1 \times \sqrt{N} \times \sqrt{N} \times C}
107
+ 8: // Summarize \boldsymbol{X}_p^t's spatial information
108
+ 9: \mathbf{Y}_{p}^{t} \leftarrow \text{GAP}(\mathbf{X}_{p}^{t}) \in \mathbb{R}^{1 \times 1 \times C}
109
+ 10: // Derive the current layer's query, key and value via convolutions 11: Q^t \leftarrow \text{Conv1D}(Y_p^t) \in \mathbb{R}^{1 \times 1 \times C}
110
+ 12: \boldsymbol{K}_{t,:}^t \leftarrow \operatorname{Conv1D}(\boldsymbol{Y}_p^t) \in \mathbb{R}^{1 \times 1 \times C}
111
+ 13: V_{t,:}^t \leftarrow \text{GELU}(\text{DWConv2D}(X_p^t)) \in \mathbb{R}^{1 \times 1 \times \sqrt{N} \times \sqrt{N} \times C}
112
+ 14: \tilde{O}_p^t \leftarrow \text{Multi-head Attention}(Q^t, K_{t,:}^t, V_{t,:}^t) where the number of heads is H
113
+ 15: // Reshape to the original dimension
114
+ 16: \tilde{O}_p^t \leftarrow \text{Reshape}(\tilde{O}_p^t) \in \mathbb{R}^{1 \times N \times C}
115
+ 17: \hat{m{O}_p^t} \leftarrow \operatorname{Expand}(m{\lambda}_o^t) \odot m{O}_p^{t-1} + \tilde{m{O}}_p^t
116
+ 18: O^t \leftarrow \text{Concat}[X_c^t, O_n^t]
117
+ ```
118
+
119
+ Appendix B.4. Finally, visualizations of the feature maps/attention maps in ResNet50/DeiT and our MRLA counterparts are shown in Appendix B.5. All experiments are implemented on four Tesla V100 GPUs (32GB).
120
+
121
+ **ResNet** For training ResNets with our MRLA, we follow exactly the same data augmentation and hyper-parameter settings in original ResNet. Specifically, the input images are randomly cropped to $224 \times 224$ with random horizontal flipping. The networks are trained from scratch using SGD with momentum of 0.9, weight decay of 1e-4, and a mini-batch size of 256. The models are trained within 100 epochs by setting the initial learning rate to 0.1, which is decreased by a factor of 10 per 30 epochs. Since the data augmentation and training settings used in ResNet are outdated, which are not as powerful as those used by other networks, strengthening layer interactions leads to overfitting on ResNet. Pretraining on a larger dataset and using extra training settings can be an option; however, as most of our baseline models and the above attention models are not pretrained on larger datasets, these measures will result in an unfair comparison. Hence we use a more efficient strategy: applying stochastic depth (Huang et al., 2016) with survival probability of 0.8 only on our MRLA, the effects of which will be discussed in the ablation study later.
122
+
123
+ EfficientNet For training EfficientNet with our MRLA, we follow the settings in EfficientNet. Specifically, networks are trained within 350 epochs using RMSProp optimizer with momentum of 0.9, decay of 0.9, batch norm momentum of 0.99, weight decay of 4e-5 and mini-batch size of 4096. The initial learning rate is set to 0.256, and is decayed by 0.97 every 2.4 epochs. Since our computational resources hardly support the original batch size, we linearly scale the initial learning rate and the batch size to 0.048 and 768, respectively. We also use AutoAugment (Cubuk et al., 2019), stochastic depth (Huang et al., 2016) with survival probability 0.8 and dropout (Srivastava et al., 2014) ratio 0.2 for EfficientNet-B0 and EfficientNet-B1. Our MRLA shares the same stochastic depth with the building layer of EfficientNet since it is natural to drop the MRLA block if the corresponding layer is dropped. We implement EfficientNets with these training tricks by pytorch-image-models (timm) toolkit (Wightman, 2019) on 2x V100 GPUs. <sup>1</sup>
124
+
125
+ <sup>&</sup>lt;sup>1</sup>License: Apache License 2.0
126
+
127
+ DeiT, CeiT and PVTv2 We adopt the same training and augmentation strategy as that in DeiT. All models are trained for 300 epochs using the AdamW optimizer with weight decay of 0.05. We use the cosine learning rate schedule and set the initial learning rate as 0.001 with batch size of 1024. Five epochs are used to gradually warm up the learning rate at the beginning of the training. We apply RandAugment (Cubuk et al., 2020), repeated augmentation (Hoffer et al., 2020), label smoothing (Szegedy et al., 2016) with = 0.1, Mixup (Zhang et al., 2017) with 0.8 probability, Cutmix (Yun et al., 2019) with 1.0 probability and random erasing (Zhong et al., 2020) with 0.25 probability. Similarly, our MRLA shares the same probability of the stochastic depth with the MHSA and FFN layers of DeiT/CeiT/PVTv2. xNote that since PVTv2 is a multi-stage architecture and the size of feature maps differs across the stage, we perform layer attention within each stage. DeiT and CeiT have 12 layers in total and we partition them into three stages, each with four layers, and apply the MRLA within the stage.
128
+
129
+ Figure 5 visualizes the FLOPs induced by MRLA-light with respect to the input resolution. We compute the FLOPs of the baseline CeiT-T and our MRLA-light counterpart and then derive their differences under various settings of input resolution. It can be observed that the complexity of MRLA-light is linear to the input resolution.
130
+
131
+ ![](_page_19_Figure_4.jpeg)
132
+
133
+ Figure 5: The FLOPs induced by MRLA-light with respect to input resolution.
134
+
135
+ Layer-interation-related Networks We first compare our MRLAs with DenseNet(Huang et al., 2017), DIANet (Huang et al., 2020) and RLAgNet (Zhao et al., 2021) empirically. Their comparisons on the ImageNet-1K validation set are given in Table 5. Our MRLAs outperform the DIANet and RLAgNet, all of which beat the DenseNet of similar model size.
136
+
137
+ Other Relevant Networks TDAM (Jaiswal et al., 2022) and BA-Net (Zhao et al., 2022) adopted different implemental settings from ours in the training of ImageNet. For the baseline model, we used the results from the torchvision toolkit while TDAM utilized those from the pytorch-image-models (timm). Note that the latter implementation includes advanced design settings (e.g., three 3x3 Convs instead of a 7x7 Conv) and training tricks (e.g., cosine learning schedule and label smoothing) to improve the performance of ResNets. And BA-Net applied cosine learning schedule and label smoothing in their training process. Therefore, it would be unfair to directly evaluate the performances between TDAM, BA-Net and MRLAs using the current results. We reproduced the performance of BA-Net with our training settings and train our model with the settings used in BA-Net. The results are given in Table 6, and we also compare the performances though we do not train our model by using the better pre-trained weights in object detection and instance segmentation tasks.
2304.05939/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2304.05939/paper_text/intro_method.md ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Generative modelling aims to learn a data distribution $P_{D}$ from samples, such that new samples can be generated from the learned distribution, $\displaystyle \rx \sim P_{D}$. The learned distribution can be used for a variety of tasks ranging from out-of-distribution detection (@asim2020blind) to serving as a prior for reconstruction tasks ([@tezcan2019]). One generative modelling approach of particular interest is Variational Autoencoder (VAE) as introduced by @kingma2013auto. This approach is particularly interesting because it yields a lower dimensional latent model, which allows generating lower dimensional representations of samples, and for doing that the model parameters are determined through directly maximizing the ELBO of the training samples. The combination of these two points makes VAEs unique compared to other generative modeling approaches (@goodfellow2014generative, @rezende2015variational, @ho2020denoising, @brehmerneurips2020, @caterini_neurips2021).
4
+
5
+ One major drawback of VAEs is that they often produce blurry generative samples even though the images in the training distribution were sharp. This is due to the formulation of the optimization. Variational autoencoders are optimized by maximizing the evidence lower bound (ELBO) $\mathop{\mathbb{E}}_{z \sim q_{\phi}(z|x)} log[p_{\theta}(x|z)] - D_{KL}[q_{\phi}(z|x)||p(z)]$ with respect to the network parameters $\theta$ and $\phi$. The first term is often referred to as the reconstruction loss and effectively ensures that an observed sample can be mapped to the latent space through the posterior model and reconstructed back from its possible latent representation. The second term is matching the learned approximate posterior distribution, $q_{\phi}(z|x)$, with that of a prior distribution $p(z)$. The ELBO can also be formulated as minimizing the Kullback-Leibler divergence in the augmented space, where the data distribution is expanded with the auxiliary variable z, $D_{KL}[q_{D,\phi}(x,z)||p_{\theta}(x,z)]$ as described by @kingma2019introduction. It is clear that the model is penalized heavily if samples are likely under $q_{D,\phi}(x,z)$ but not $p_{\theta}(x,z)$, however not the other way around due to the asymmetry of the Kullback-Leibler divergence. Therefore, $p_{\theta}(x,z)$ and thus $p_{\theta}(x)$ will have a larger variance than the original data distribution $q_{D,\phi}(x,z)$ leading to generated samples being more diverse than original data, practically also including blurry images. To alleviate this issue both $q_{\phi}(z|x)$ and $p_{\theta}(x|z)$ should be flexible enough as has been the objective of other works (@berg2018sylvester, @kingma2016improved, @vahdat2020nvae).
6
+
7
+ In addition to the line of work on increasing the flexibility of the posterior $q_{\phi}(z|x)$, which often come at the cost of being more difficult to optimize and computationally expensive, there is a line of work focusing on improving the reconstruction loss formulation. To keep the mathematical link to maximizing $p_{\theta}(x)$ for the observed samples through the ELBO formulation, a distribution $p_{\theta}(x|z)$ has to be assumed for the reconstruction loss. Popular choices for these distributions are Gaussian or Bernoulli as introduced by @kingma2013auto. The Gaussian distribution, $p_{\theta}(x|z) = \mathcal{N}(\mu(z)_\theta,\,\Sigma)$ has seen widespread adaption (@castrejon2019improved,@lee2020stochastic) and leads to a simplification of the reconstruction loss to the mean squared error (MSE) if $\Sigma$ is assumed to be identity. Since the latent space $z$ has a lower dimensionality than $x$, a perfect reconstruction will not be possible and the reconstruction loss will determine which features of x to weigh the most. The MSE places no weight on specific features, all features will be reconstructed with a similar weighting. Since most power in natural images is located in the lower frequency in their spectrum [@van1996modelling], low frequency features, in other words blurry features, will dominate the reconstruction loss. Consequently, reconstruction fidelity in higher frequency features will be less important.
8
+
9
+ Several previous works have recognized the potential of augmenting the reconstruction loss to address the blur, but they approached the problem implicitly. They aimed to retrieve more visually pleasing generations. One initial approach by @hou2017deep was to replace the pixel-wise loss with a feature based loss that is calculated with a pre-trained convolutional neural network (CNN). However, this loss is limited to the domain of images on which the CNN was pre-trained. It has also been suggested in literature to combine generative adverserial networks with VAEs by replacing the reconstruction loss of the VAE with an adversarial loss (@larsen2016autoencoding). This approach comes at the expense of architecture changes and with optimization challenges. Another approach by @barron2019general optimizes the shape of the loss function during training to increase the robustness. While their loss formulation has shown improved results, it was not specifically focusing on sharpening VAE generations. By learning the parameters of a Watson perceptual model and using it as a reconstruction loss, @czolbe2020loss have shown that generative examples of VAEs can be improved. Since the human perception also values sharp examples, the proposed loss improved sharpness of samples, but optimization with the loss is less stable than other methods. Recently, @jiang2021focal tackled the blur problem directly in the frequency domain. They have introduced the focal frequency loss, which applies a per frequency weighting for the error term in the frequency domain, where the weighting is dependent on the magnitude of the error at that frequency. The above mentioned methods do not explicitly focus on reducing blur errors, and, except for the work by @barron2019general, the proposed reconstruction terms lose their mathematical link to maximizing the ELBO, since the distribution $p_{\theta}(x|z)$ is not defined.
10
+
11
+ In this paper we aim to explicitly minimize the blur error of VAEs through the reconstruction term, while at the same time still perform ELBO maximization for the observed samples. We derive a loss function that explicitly weights errors that are due to blur more than other errors. In our experiments, we show that the new loss function produces sharper images than other state-of-the art reconstruction loss functions for VAEs on three different datasets.
12
+
13
+ In this section we will review some background on blurring and image sharpening. The blurring degradation of a sharp image, $x$ can be modeled by convolving $x$, with a blur kernel, $k$, $\hat{x} = x*k$, where $\hat{x}$ denotes the degraded image. Assuming no additive noise and the blurring kernel does not suppress any frequencies, using the convolution theorem, the blurring operation can be inverted in the frequency domain as $\mathcal{F}(\hat{x})/\mathcal{F}(k)= \mathcal{F}(x)$, where $\mathcal{F}$ is the Fourier transform. The sharp $x$ can then be obtained by applying the inverse Fourier transform to the ratio.
14
+
15
+ The assumptions of knowing the blur kernel $k$, that no noise was present during the blur generation, and convolving with $k$ not suppressing any frequencies are strong. In the case that $k$ is not well known and $\mathcal{F}(k)$ has several terms close to zero, division by $\mathcal{F}(k)$ could cause extreme values. To provide a more stable inverse operation, one can use the Wiener deconvolution (@wiener1949extrapolation),
16
+
17
+ $$\begin{equation}
18
+ \label{eq:wiener}
19
+ \frac{\mathcal{F}^*(k)}{|\mathcal{F}(k)|^2+C}\mathcal{F}(\hat{x}) \approx \mathcal{F}(x),
20
+ \end{equation}$$
21
+
22
+ where $\mathcal{F}^*$ is referring to the complex conjugate and $C$ is a constant to offset $\mathcal{F}(k)$ values that are close to zero and low signal-noise-ratio (SNR). In the presence of noise, $C$ is chosen as $1/$ SNR [@gonzalez2001digital; @wiener1949extrapolation], or a constant.
23
+
24
+ # Method
25
+
26
+ The motivation of our work is to reduce the blur in the generated outputs of variational autoencoders (VAEs). We propose to do this by weighting the reconstruction term to focus on errors induced by the blur. In VAEs the evidence lower bound (ELBO) is maximized for the observed samples, that are coming from the real data distribution $p(x)$. Assuming the constraint on the latent space is a zero-centered unit Gaussian, this leads to the following optimization problem, [@rezende2015variational], $$\begin{equation}
27
+ \begin{split}
28
+ &\max_{\theta, \phi} \ \mathop{\mathbb{E}}_{z \sim q_{\phi}(z|x)} \log[p_{\theta}(x|z)] - D_{KL}[q_{\phi}(z|x)||\mathcal{N}(0,\,\mathbb{I})],
29
+ \end{split}
30
+ \label{eq:ELBO}
31
+ \end{equation}$$ where $x$ and $z$ correspond to the input image and latent variable, respectively, and $\theta$ and $\phi$ denote the network parameters. By assuming that likelihood, $p_{\theta}(x|z)$ is a multi-variate Gaussian, the ELBO can be written as: $$\begin{equation}
32
+ \begin{split}
33
+ &\min_{\theta,\phi} \ \mathop{\mathbb{E}}_{z \sim q_{\phi}(z|x)} \log(|\Sigma_{\theta}(z)|^{\frac{1}{2}}) + \frac{1}{2}(x-\hat{x}_\theta(z))^{T}\Sigma_{\theta}(z)^{-1}(x-\hat{x}_\theta(z)) + D_{KL}[q_{\phi}(z|x)||\mathcal{N}(0,\mathbb{I})],
34
+ \end{split}
35
+ \label{eq:gaussian_reconstruction_general}
36
+ \end{equation}$$ where $\hat{x}_\theta(z)$ is the mean of the multi-variate Gaussian and $\Sigma_\theta(z)$ is its covariance. Generally, the covariance matrix $\Sigma_\theta(z)^{-1}$ is assumed to be the identity times a constant scalar, hence no dependence on $\theta$ nor $z$, which has the advantage that the calculation of determinant of $\Sigma_\theta(z)$ is not necessary. The covariance-matrix can also be seen as a weighting term of the reconstruction loss compared to the KL-divergence. In the next sections we derive the proposed form for the covariance matrix.
37
+
38
+ To design a weighting map that targets errors that are caused by the blur, those errors need to be formalized. For the sake of notational simplicity, we will drop $z$ dependence of $\hat{x}_\theta$ and $\Sigma_\theta$ for now. Let us assume that the output of the VAE is a blurred version of the input image, i.e., $\hat{x}_\theta = x*k$. Therefore, the squared reconstruction error can be viewed as the squared distance between the input and the blurred version of the input. Using Parseval's identity [@parseval1806memoire], in Fourier space this corresponds to $$\begin{equation}
39
+ \begin{split}
40
+ & \|\mathcal{F}(e)\|^2 = \|\mathcal{F}(x) - \mathcal{F}(x)\cdot\mathcal{F}(k)\|^2 = \|\mathcal{F}(x)(1-\mathcal{F}(k))\|^2= \|\mathcal{F}(x)|1 - \mathcal{F}(k)|\|^2,
41
+ \end{split}
42
+ \label{eq:blur_input_error}
43
+ \end{equation}$$ where $e$ is the error term. We see that the magnitude of the blur error at any frequency is proportional to $|\mathcal{F}(x)||1 - \mathcal{F}(k)|$. The implication of this can best be understood if we imagine $k$ as a centered Gaussian blur with unit sum. Then, the Fourier transform of $k$ will also be a Gaussian with its largest value at the center of the Fourier space, i.e., $(u,v)=(0,0)$. Moreover, because $k$ sums up to 1, the largest value of $\mathcal{F}(k)$ will be smaller than 1. Therefore, for low frequencies $(u,v)\approx(0,0)$ the value of $\mathcal{F}(k)$ would be positive and high, yielding low $|1 - \mathcal{F}(k)|$. For higher frequencies the value of $\mathcal{F}(k)$ would be positive and low, yielding high $|1 - \mathcal{F}(k)|$. This looks like the major contribution of blur error is due to higher frequencies. So, minimizing for the squared error, should directly focus on removing the blur. However, it is crucial to note that analyses on the power spectra of natural images showed that the power of natural images drops with increasing frequency with the power law $1/w^\alpha$ with $\alpha\approx 2$ [@van1996modelling], where $w$ denotes an arbitrary convex combination of $u$ and $v$. Thus, even though the multiplicative factor $|1 - \mathcal{F}(k)|$ is larger for higher frequencies, because image's power at higher frequencies is lower than at lower frequencies, their contributions to $|\mathcal{F}(e)|^2$ will still be smaller compared to lower frequencies. Hence, reconstruction term will be dominated by the lower frequency errors. As a result, models obtained by minimizing for $\|F(e)\|$ will likely tolerate blurry reconstructions and generations.
44
+
45
+ Given the distribution of the blur induced errors, the next question to answer is how to best weigh the error term to emphasize blur errors due to the convolution with a $k$. If $k$ is known, one way of weighting the errors would be with the inverse value of $\mathcal{F}(k)$. For small blur errors with $\mathcal{F}(k)\approx1$ the weighting would be $1$ and does not change the error term. However, as $|\mathcal{F}(k)|$ gets smaller the error weighting will increase. With any weight matrix in the Frequency domain, one can compute a weighted reconstruction error as $$\begin{equation}
46
+ \begin{split}
47
+ &\| \mathcal{W}\cdot (\mathcal{F}(x)-\mathcal{F}(\hat{x}_\theta)) \|^2,
48
+ \end{split}
49
+ \label{eq:gaussian_recon_fourier}
50
+ \end{equation}$$ where $\mathcal{W}$ is a chosen weight matrix. To specifically weigh the blur induced errors higher, we would like to chose $\mathcal{W}=\frac{1}{\mathcal{F}(k)}$, such that $$\begin{equation}
51
+ \begin{split}
52
+ & \| \frac{1}{\mathcal{F}(k)}\cdot (\mathcal{F}(x)-\mathcal{F}(\hat{x}_\theta)) \|^2.
53
+ \end{split}
54
+ \label{eq:naive_deconvolution}
55
+ \end{equation}$$ As mentioned before, this naive deconvolution of the reconstruction term, would lead to instabilities for frequencies where $\mathcal{F}(k)\approx0$. To stabilize the chosen weighting method, we follow the Wiener deconvolution and add a constant $C$ as $$\begin{equation}
56
+ \begin{split}
57
+ &\| \frac{\mathcal{F}^*(k)}{|\mathcal{F}(k)|^2+C}\cdot (\mathcal{F}(x)-\mathcal{F}(\hat{x}_\theta)) \|^2.
58
+ \end{split}
59
+ \label{eq:wiener_deconvolution}
60
+ \end{equation}$$ Linking this to the VAE, a reconstruction loss in the Fourier transform corresponds to a Gaussian likelihood $p_\theta(x|z)$ with $\Sigma^{-1} = W^TW$, where $W$ corresponds to the matrix that encodes the convolution operation defined by the inverse Fourier transform of $\mathcal{W}$, i.e., $\mathcal{F}^{-1}(\mathcal{W})$. When $\mathcal{W}$ is defined in terms of the kernel $k$ as in Equation [\[eq:wiener_deconvolution\]](#eq:wiener_deconvolution){reference-type="ref" reference="eq:wiener_deconvolution"}, then we denote this dependence with a subscript, i.e., $\Sigma_k^{-1} = W_k^TW_k$ with $W_k$ being the matrix corresponding to convolution with $\mathcal{F}^{-1}(\frac{\mathcal{F}^*(k)}{|\mathcal{F}(k)|^2+C})$. As a result, using the Parseval's identity the $\log p_\theta(x|z)$ term needed for the computation and optimization of the ELBO can be written as $$\begin{equation}
61
+ \label{eq:new_likelihood}
62
+ \log p_\theta(x|z) = -\frac{D}{2}\log (2\pi) -\frac{1}{2}\log |\Sigma_k| -\frac{1}{2}\left\|\frac{\mathcal{F}^*(k)}{|\mathcal{F}(k)|^2+C}(\mathcal{F}(x) - \mathcal{F}(\hat{x}_\theta))\right\|^2.
63
+ \end{equation}$$
64
+
65
+ To determine the weight matrix at a given iteration during optimization, we need a blur kernel $k$. To determine $k$ we utilize the assumption of image degradation via a convolution with a blur kernel as in Section  [2](#sec:background){reference-type="ref" reference="sec:background"}. Specifically, we optimize to find the kernel $k$ that minimizes the means squared error when the input $x$ is blurred with kernel $k$ with the output of the VAE, $\hat{x}_\theta$, $$\begin{equation}
66
+ \begin{split}
67
+ &\min_{k} \|x*k-\hat{x}_\theta\|^2.
68
+ \end{split}
69
+ \label{eq:K_blur_optimization}
70
+ \end{equation}$$ It is reasonable to assume that some images might be blurred more than others by the VAE, and we confirm this in Section [\[sec:ablation\]](#sec:ablation){reference-type="ref" reference="sec:ablation"}, and it changes with the parameters of the networks. Therefore, $k$ would have to be determined through the optimization after every iteration and for every image. To this end, we utilize a neural network, $G$, that takes as input $z$ sample from $q(z|x)$ and outputs a kernel $k$. The optimization can consequently be written as $$\begin{equation}
71
+ \begin{split}
72
+ &\min_{\gamma} \|x*G_{\gamma}(z)-\hat{x}_\theta\|^2,
73
+ \end{split}
74
+ \label{eq:G_blur_optimization}
75
+ \end{equation}$$ where $k=G_\gamma(z)$ and $z\sim\mathcal{N}(0, I)$.
76
+
77
+ The non-trivial choice of $\Sigma$ for this method, implies that the determinant $|\Sigma|$ needs to be accounted for in the likelihood calculation, Equation [\[eq:new_likelihood\]](#eq:new_likelihood){reference-type="ref" reference="eq:new_likelihood"}. Full computations of determinants are costly, nevertheless, matrices defined by convolution operations provide special opportunities. The convolution operation can be implemented as a matrix multiplication with an appropriate circulant matrix $K$ as $\textrm{vec}(k*x) = K\cdot \textrm{vec}(x)$, where $\textrm{vec}(\cdot)$ is the vectorization operation. If the kernel $k$ is two dimensional this will result in a block-circulant matrix with each block being a circulant matrix as well. Circulant and block-circulant matrices have the property that their determinant can be calculated analytically as shown by @daviscirculant. In addition, the circulant matrices will be sparse if the size of $k$ is much smaller than that of $x$, which enables an efficient computation of the determinant.
78
+
79
+ In our case $\Sigma^{-1}$ is equal to $W_{k}^{T}W_{k}$, where $W_{k}$ represents the block-circulant matrix that corresponds to the Wiener deconvolution constructed with the kernel $k$. If we assume the constant $C$ is zero and $k$ does not suppress any frequencies for any $x$, then the deconvolution would be the inverse operation of the convolution, i.e., $\mathcal{W}_k = 1 / F(k)$, and we can write $\mathcal{F}^{-1}(\mathcal{W}_k\mathcal{F}(k)\mathcal{F}(x)) = x,\ \forall x$. This implies $W_k\cdot\textrm{vec}(k*x) = \textrm{vec}(x), \forall x$ and $W_k\cdot K\cdot\textrm{vec}(x) = \textrm{vec}(x), \forall x$ with $K$ being the block circulant matrix that corresponds to the convolution with $k$. Therefore, $W_k = K^{-1}$ and $\Sigma_k = (W_k^TW_k)^{-1} = KK^T$. Hence, the determinant of the covariance can be computed in terms of the determinant of $K$ as $|\Sigma_k| = 2|K|$. Using the properties of block-circulant matrices, this determinant can be easily computed. However, any blurring $k$ will likely suppress some frequencies and therefore for optimization purposes $C$ has to be set to a small positive constant to avoid division by zero. The inverse of $W_k$ is then the block-circulant matrix that corresponds to $\frac{|\mathcal{F}(k)|^2+C}{\mathcal{F}^*(k)} = \mathcal{F}(k) + \frac{C}{\mathcal{F}^*(k)}$. $C/\mathcal{F}^*(k)$ can not be explicitly computed. Instead, we approximate the inverse of $W_k$ as $K(\epsilon) = K + \epsilon \mathbb{I}$, with $K$ still being the block-circulant matrix corresponding to $k$. For $k$ that suppresses large portion of the Fourier space, this approximation will behave similar to Wiener deconvolution in the sense that it will not allow suppression of frequencies completely since the Fourier transform of the kernel that corresponds to $K(\epsilon)$ will never be zero.
80
+
81
+ <figure id="fig:architecture" data-latex-placement="ht">
82
+ <embed src="Figures/architecture_deblurvae_image_v06.pdf" />
83
+ <figcaption>Sketch of the proposed approach for minimizing the blur error in VAEs. We illustrate the two losses that we minimize in the alternating optimization and the corresponding network parameters that are updated in different colors.</figcaption>
84
+ </figure>
85
+
86
+ In the previous section the dependency of $\Sigma$ on $z$ has been omitted for notational simplicity. We showed the inverse of $W_k$ can be approximated with $K + \epsilon \mathbb{I}$ that would allow an estimate of the determinant of $\Sigma$. From Equation [\[eq:G_blur_optimization\]](#eq:G_blur_optimization){reference-type="ref" reference="eq:G_blur_optimization"}, we also saw that the kernel $k$ that constructs $K$ is dependent on $z$ and is modeled with a neural network with the parameters $G_{\gamma}(z)$. Thus, the determinant will be both influenced by $z$ and $\eps$. If we assume $\eps$ to be small, then the determinant will be mainly dependent on $z$. However, if we assume $\eps$ to be large, then the identity will dominate the determinant calculation and the influence of $z$ on the determinant will negligible. Applying the reconstruction term derived in section [\[sec:weinerdeconvolution\]](#sec:weinerdeconvolution){reference-type="ref" reference="sec:weinerdeconvolution"} and estimating the determinant of $\Sigma$ through $K(G_{\gamma}(z), \eps)$ leads to the following optimization equations:
87
+
88
+ $$\begin{equation}
89
+ \begin{split}
90
+ &\max_{\theta, \phi} \log|(K(G_{\gamma}(z), \eps)| + \frac{1}{2}\| \frac{\mathcal{F}^*[G_{\gamma}(z)]}{|\mathcal{F}[G_{\gamma}(z)]|^2+C}\cdot (\mathcal{F}(x)-\mathcal{F}(\hat{x}_\theta)) \|^2 - D_{KL}[q_{\phi}(z|x)||\mathcal{N}(0,\,\mathbb{I})],
91
+ \end{split}
92
+ \label{eq:ELBO_weighted}
93
+ \end{equation}$$ $$\begin{equation}
94
+ \begin{split}
95
+ &\min_{\gamma} \|x*G_{\gamma}(z)-\hat{x}_\theta\|^2,
96
+ \end{split}
97
+ \label{eq:K_optimization_ELBO}
98
+ \end{equation}$$
99
+
100
+ where alternating optimization is used. We note that after each iteration of Equation  [\[eq:K_optimization_ELBO\]](#eq:K_optimization_ELBO){reference-type="ref" reference="eq:K_optimization_ELBO"} the model for which we optimize the ELBO in Equation [\[eq:ELBO_weighted\]](#eq:ELBO_weighted){reference-type="ref" reference="eq:ELBO_weighted"}, changes. The update of the model is incremental though due to the alternating optimization and in practice no convergence issues were observed. We illustrate a sketch of the proposed approach in Figure [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}.
2304.06668/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2304.06668/paper_text/intro_method.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Interactive segmentation algorithms enable a user to annotate the objects of interest within a given image with the help of user interactions such as scribbles and clicks. Such algorithms have several advantages compared to fullyautomatic segmentation methods, since they enable a user to select and iteratively refine the objects of interest. Existing interactive segmentation methods [\[8,](#page-8-0) [28,](#page-9-0) [31,](#page-9-1) [41,](#page-9-2) [42\]](#page-9-3) formulate this task as a binary instance segmentation problem, where the single object of interest can be segmented and corrected using user clicks.
4
+
5
+ Most of these approaches use deep neural networks to generate the image features that are conditioned on the user clicks and previous predictions, and they require the image level features to be re-computed for every user interaction. While such a design has been proven to be effective, the runtime for processing each interaction is proportional to the size of the feature extractor used, since a forward pass through the network is needed per interaction [\[8,](#page-8-0)[28,](#page-9-0)[31,](#page-9-1)[41,](#page-9-2) [42\]](#page-9-3). Hence, these methods often have to limit their network sizes in order to achieve a good runtime performance and are thus not scalable in this respect.
6
+
7
+ In addition, the design decision to model interactive segmentation as a binary segmentation problem forces existing methods to approach multi-instance segmentation tasks as a sequence of single-instance problems, operating on separate (sometimes cropped and rescaled [\[8\]](#page-8-0)) image regions. Consequently, such methods need additional clicks if there are multiple similar foreground instances in an image, since each of those instances has to be processed separately with a disjoint set of user interactions, specifying the foreground and background. This is inefficient, since it is often the case that one object instance has to be considered as background for a different nearby instance, such that a refinement with a negative click becomes necessary for the current object of focus.
8
+
9
+ In this work, we improve on both of the above issues by proposing a Dynamic Multi-object Interactive segmentation Transformer (DynaMITe), a novel multi-instance approach for interactive segmentation that only requires a single forward pass through the feature extractor and that processes all relevant objects together, while learning a common background representation. Our approach is based on a novel Transformer-based iterative refinement architecture which determines instance level descriptors directly from the spatio-temporal click sequence. DynaMITe dynamically generates queries to the Transformer that are conditioned on the backbone features at the click locations. These queries are updated during the refinement process whenever the network receives a new click, prompting the Transformer to output a new multi-instance segmentation. Thus, DynaMITe removes the need to re-compute image-
10
+
11
+ <sup>\*</sup>Equal contribution.
12
+
13
+ <span id="page-1-0"></span>![](_page_1_Figure_0.jpeg)
14
+
15
+ Figure 1: DynaMITe processes multiple instances at once and models the background jointly. In this example, the false positive region on the camel in the second image is corrected automatically when the user chooses to segment it as foreground. DynaMITe is also able to correctly segment tiny structures, such as the camel's leash in the final segmentation mask.
16
+
17
+ level features for each user interaction, while making more effective use of user clicks by handling multiple object instances together.
18
+
19
+ The attention-based formulation of learning object representations from user interactions allows multiple objects to interact with each other and with the common background representations, thus enabling the network to estimate a better context from the input image. Fig. [1](#page-1-0) shows a typical example, highlighting DynaMITe's capability to segment all the relevant objects in the input image using few clicks. An advantage of such a network formulation can be directly seen in the third refinement iteration, where a positive click on the unsegmented camel instance automatically removes the false positive region that was spilled over after segmenting a different camel instance nearby in the previous iteration. Existing approaches that perform sequential singleinstance segmentation would have to first add a negative click to remove the false positive as part of the individual object refinement in the third iteration, thereby requiring additional annotation effort.
20
+
21
+ In order to enable quantitative evaluation, we also propose a novel multi-instance interactive segmentation task (MIST) and a corresponding evaluation strategy. Compared to single-instance segmentation, MIST has the added complexity of requiring decisions which object to click on next, which is significantly harder than just deciding where to click next in a given single-instance error region. In particular, different users may apply different next-object selection strategies, and it is important that an interactive segmentation method is robust to this and always performs well. Hence, we propose to evaluate against a set of several different (but still basic) click sampling heuristics that are intended to span the expected variability of user types.
22
+
23
+ In summary, we propose DynaMITe, a novel Transformer-based interactive segmentation method which uses a query bootstrapping mechanism to learn object representations from image-level features that are conditioned on the user interactions. We also model the iterative refinement process as temporal update steps for the queries to our Transformer module, which removes the need to re-compute image-level features. We evaluate DynaMITe on the standard interactive segmentation benchmarks and show that it performs competitively in the single-instance setting, while outperforming existing state-of-the-art methods on multi-instance tasks.
24
+
25
+ # Method
26
+
27
+ In a typical interactive segmentation process, the model first receives an input image along with the associated foreground (positive) clicks representing the objects that the user intends to segment. Based on this set of inputs, an interactive segmentation model predicts an initial set of segmentation masks corresponding to the clicks. These initial predictions are presented to the user so that they can provide a corrective click (which can be positive or negative) that is used to refine the previous network predictions. This process is repeated until the user receives a set of nonoverlapping segmentation masks of satisfactory quality.
28
+
29
+ Current state-of-the-art interactive segmentation models [\[8,](#page-8-0) [24,](#page-9-10) [31,](#page-9-1) [41,](#page-9-2) [42\]](#page-9-3) perform this task sequentially, as their networks can handle only one foreground object at a time. These methods mostly use click maps, which are updated every time a user provides a new click and are then used to obtain a localized feature map from the feature extractor. DynaMITe, on the other hand, can process multiple objects at once, and translates clicks to spatio-temporal data that is processed by an interactive transformer. This makes our model more efficient for mainly three reasons: (i) DynaMITe just needs a single forward pass through the feature extractor to segment all the relevant foreground instances; (ii) the background is modeled jointly, and hence it reduces redundancy in negative clicks; and (iii) by annotating multiple objects jointly, these do not need to be repeatedly modeled as background for other foreground objects.
30
+
31
+ Following the state-of-the-art Transformer-based segmentation methods [\[2,](#page-8-12) [9,](#page-8-7) [10,](#page-8-5) [49\]](#page-9-13), we use three basic components in our architecture: (i) a backbone network, (ii) a feature decoder, and (iii) a Transformer structure that processes the multi-scale image features from the feature decoder (Fig. [2\)](#page-2-0). Additionally, we also include a feature fusion module that fuses the multi-scale features to generate a fused feature map at the largest scale. Our main contribution lies in the Transformer structure, which learns localized object descriptors directly from the user interactions without the need to pass them through the entire feature extractor. This results in an interactive segmentation network that is not only efficient in processing the user interactions, but also more practical since it can process multiple object instances at once. Since DynaMITe can encode relationships between multiple objects in a given scene, it is naturally capable of segmenting multiple instances at once, which is a paradigm shift for interactive segmentation networks.
32
+
33
+ Fig. 2 shows the overall architecture of our network. It takes as input an RGB image $\mathcal{I} \in \mathbb{R}^{H \times W \times 3}$ , and the corresponding set of user interactions $S^t = \{c_1, c_2, ..., c_t\}$ at timestep $t \in \{1, \dots, \mathcal{T}\}$ , where $|\mathcal{T}|$ is the maximum number of refinement iterations for $\mathcal{I}$ . Following the classic formulation of interactive segmentation that is used by existing works, we model the user interactions as positive and negative clicks, where positive clicks are placed on the foreground objects and the negative clicks on the background. Hence $S^t = \{S_+^t, S_-^t\}$ , where $S_+^t = \{P_1^t, P_2^t, ..., P_n^t\}$ denote the positive clicks, and $\mathcal{S}_{-}^{t}=\{b_{1}^{t},b_{2}^{t},...,b_{m}^{t}\}$ denote the set of negative clicks at time t. Here, $P_i^t \in S_+^t$ is a set of positive clicks that belong to object $o_i \in \mathcal{O}$ . For existing interactive segmentation methods, $|\mathcal{O}|$ is always 1 since they can process only one object at a time, which need not necessarily be the case for DynaMITe as it is capable of handling multiple objects concurrently. All $P_i^t \in S_+$ , as well as $S_$ are initialised as empty sets, and then updated when the user inputs a new click. The backbone processes $\mathcal{I}$ and extracts low-level features, which are then up-sampled by the feature decoder to produce feature maps $\mathcal{F} = \{f_{32}, f_{16}, f_8\}$ at multiple scales. These feature maps, along with the associated user interactions, up to time t, are then processed by the interactive Transformer.
34
+
35
+ The goal of our interactive Transformer is to generate segmentation masks $\mathcal{M}^t = \{M_1^t, M_2^t, ..., M_n^t\}$ for all the relevant foreground objects at a given refinement timestep t, given the inputs $\mathcal{F}$ and the corresponding clicks $\mathcal{S}^t$ . These masks should be disjoint, i.e. $M_i^t \cap M_j^t = \emptyset$ for all $i \neq j$ .
36
+
37
+ **Dynamic Query Bootstrapping.** The queries used by the Transformer are dynamically generated using the input features $\mathcal{F}$ and the user clicks $\mathcal{S}^t$ . To do this, we first sample the point features at every spatial location represented by each user click in $\mathcal{S}^t$ from all the feature scales in $\mathcal{F}$ . Hence, if $Q^t$ denotes the set of queries at time t, then $q_j \in Q^t$ for click $c_j$ in $\mathcal{S}^t$ is generated as:
38
+
39
+ $$q_j = \frac{1}{|\mathcal{F}|} \sum_{f \in \mathcal{F}} f_{c_j}.$$
40
+ (1)
41
+
42
+ During the refinement process, the network receives a new interaction $c_{t+1}$ at the time step t+1, which is used to obtain an updated set of user clicks $\mathcal{S}^{t+1}$ . To do this, if
43
+
44
+ $c_{t+1}$ is a positive click then it is added to the corresponding object specific click set $P_j$ to obtain a new set of foreground clicks $\mathcal{S}_+^{t+1}$ , else it is added to $\mathcal{S}_-$ to obtain $\mathcal{S}_-^{t+1}$ . $\mathcal{S}_-^{t+1}$ is then used to obtain the updated queries $Q^{t+1}$ , using the same process as explained above. These queries are thus dynamically updated throughout the iterative process without the need to recompute $\mathcal{F}$ , and the entire interactive segmentation process can work with multiple instances at once.
45
+
46
+ In addition to the dynamic queries, we include a set of K=9 learnable queries for modeling the background without the use of any user guidance. These static background queries learn generic background representations and they reduce the background interactions that a user will have to perform. We also add a 3D positional encoding to $q_j$ where the first two dimensions represent the spatial location of the corresponding click in the image features and the third dimension represents the refinement timestep t.
47
+
48
+ Instance Encoder. The DynaMITe instance encoder takes as input the queries $Q^t$ and the multi-scale feature maps $\mathcal{F}$ . The encoder follows the structure of the masked attention Transformer decoder presented in [9] and leverages its capability of processing multi-scale features, which is important for dense pixel prediction tasks. Its main purpose is to enhance the initial click-based queries, such that they become more discriminative. We use $L_e=9$ layers, which are grouped into 3 blocks, each of which processes successive feature scales from $\mathcal{F}$ . Every Transformer layer in the encoder consists of a masked attention module, a self-attention module and a feedforward network. Hence, our encoder block performs the following operations:
49
+
50
+ $$Q_l \leftarrow \text{MaskedCrossAttn}(Q_l, F, \mathcal{M}_{l-1}) + Q_{l-1},$$
51
+ (2)
52
+
53
+ $$Q_l \leftarrow \text{SelfAttn}(Q_l) + Q_l,$$
54
+ (3)
55
+
56
+ $$Q_l \leftarrow \text{FFN}(Q_l) + Q_l.$$
57
+ (4)
58
+
59
+ Here, $Q_l$ represents the queries at $l^{\text{th}}$ layer; $\mathcal{M}_{l-1}$ is the attention mask produced from the binarized mask predictions from $(l-1)^{\text{th}}$ layer; SelfAttn is the multi-head selfattention module introduced in [45]; and FFN denotes a feedforward network. MaskedCrossAttn is a variant of cross-attention, where the attention operation is restricted to the foreground region represented by each query in the previous layer. Hence, each masked attention module performs the following operation:
60
+
61
+ $$Q_l = \operatorname{softmax}(\mathcal{M}_{l-1} + Q_l K_l) V_l + Q_{l-1}, \tag{5}$$
62
+
63
+ where $K_l$ and $V_l$ are the keys and values derived from $\mathcal{F}$ at the corresponding feature scale.
64
+
65
+ **Decoder.** While the goal of the encoder is to update the instance specific queries Q, the decoder updates the fused features $\mathcal{F}^{\mathcal{M}}$ . The fused features are obtained from the feature
66
+
67
+ fusion module, which takes the multi-scale decoder features F as input and generates a fused feature map at the largest scale of F. The feature fusion module consists of a convolutional layer, followed by an up-sampling layer with skip connections to upsample low resolution features from F, which are then concatenated to generate F<sup>M</sup>.
68
+
69
+ The decoder processes F<sup>M</sup> using a set of L<sup>d</sup> = 5 Transformer layers. Each Transformer layer in our decoder consists of a cross-attention layer, followed by a feedforward network. Hence, each of the DynaMITe decoder layers performs the following operations:
70
+
71
+ $$F_l^M = \operatorname{softmax}(F_l^M K^T) V^T + F_{l-1}^M, \tag{6}$$
72
+
73
+ $$F_l^M = FFN(F_l^M) + F_l^M. (7)$$
74
+
75
+ Here K and V are again keys and values; however, they are obtained from the instance encoder's output Qout and not from FM. To get the final output masks, we take a dot product of the updated mask features F <sup>M</sup> out with the click specific queries Qout, as done in [\[9\]](#page-8-7), and obtain a set of output mask probabilities. Since we have more than one query representing both the objects and the background, we use the per-pixel max operation over the corresponding set of queries to obtain instance specific masks for all the objects and the background. In the end, the discretized output masks are obtained by taking an argmax per pixel across the different instance predictions.
76
+
77
+ Existing interactive segmentation approaches address multi-instance segmentation as a sequence of singleinstance tasks. *I.e*., they pick one instance at a time, and then refine it either until the mask has a satisfactory quality, or until they have exhausted a specific click budget τ for that object. If there are multiple foreground objects in a single image, these methods generate overlapping object masks which have to be merged as an additional post-processing step in order to obtain the final masks. Also, since these objects are processed individually with disjoint click sets, some clicks can be redundant at an image-level. Hence, in this work we propose a novel multi-instance interactive segmentation task (MIST), where the goal of a user is to jointly annotate multiple object instances in the same input image.
78
+
79
+ Given an input image and a common set of user clicks, the MIST expects a corresponding method to generate nonoverlapping instance masks for all relevant foreground objects. A major difference in this setting is that the background, and the corresponding negative clicks, are now common for all object instances. The MIST is a more challenging problem compared to the classical single-instance setting, since every refinement step can now lead to a positive click on any of the relevant objects or to a negative (background) click. Thus, extending an existing singleinstance interactive segmentation method to the MIST is not trivial.
80
+
81
+ Automatic Evaluation. It is also important to note that the user click patterns for the MIST may differ considerably between users. As a result, simulating the MIST for automatic evaluation is a challenge of its own. In contrast to single-instance interactive segmentation benchmarks that have converged onto a deterministic next-click simulation strategy [\[8,](#page-8-0)[24,](#page-9-10)[31,](#page-9-1)[41,](#page-9-2)[42\]](#page-9-3), the refinement focus in the MIST may jump from one object to another in an arbitrary sequence, unless users are instructed to process the objects in an image according to a specific order. Since it is hard to predict what next-object/next-click selection strategies users will end up using in an actual interactive segmentation application, and since that choice will in turn depend on their impression of which strategies work best with the given segmentation method, it is not practical to assume a single, deterministic next-click simulation strategy. Instead, we postulate that a method that performs the MIST should ideally be robust against varying click patterns and nextobject selection strategies. Hence, we propose a multi-fold evaluation based on three different next-object simulation patterns during refinement.
82
+
83
+ All of these click simulation strategies start by adding a single positive click to each of the foreground objects in that image to get an initial prediction. Based on this initial prediction, we choose an object o<sup>i</sup> according to one of the following strategies: (i) *best*: choose the object that has the best IoU, compared to the ground truth mask; (ii) *worst*: choose the object that has the worst IoU; and (iii) *random*: choose a random object. In each of these strategies, only the objects that have not yet achieved the required segmentation quality will be sampled. Next, we place a simulated click c<sup>t</sup> on the largest error region of o<sup>i</sup> . c<sup>t</sup> can now be (i) a positive click on o<sup>i</sup> ; (ii) a negative click on the background; or (iii) a positive click on another o<sup>j</sup> . This process is repeated either until all the relevant objects are segmented, or until the image-level click budget τ is fully spent. We want to emphasize that we make no claim that those strategies (*best*, *worst*, *random*) are close to optimal (in fact, we discuss several more effective strategies in the supplementary material). Instead, we intend for them to span the variability of possible next-object selection strategies to ensure that evaluated approaches generalize well to different users.
84
+
85
+ Evaluation Metric. The standard metric used by existing interactive segmentation benchmarks [\[18,](#page-8-13) [32,](#page-9-15) [37,](#page-9-16) [40\]](#page-9-17) is the average number of clicks per object (NoC). Since the MIST is quite different from annotating instances individually, the NoC metric for interactive segmentation per object would not serve as a good evaluation metric. Hence, we propose a new evaluation metric called Normalized Clicks per Image (NCI) for the multi-instance interactive segmentation task. NCI is an adaptation of NoC, where the number of clicks is now computed per image, instead of per-object, and is then normalized by the number of foreground objects in the image. For NCI, we cap the number of clicks for an image based on the number of foreground objects. If an image has |O| foreground objects, then the total click budget for that image would be τ ∗ |O|. Unlike the NoC metric, this cap is at an image level, and all of these clicks can be spent on a subset of objects if the corresponding algorithm so desires. Similar to the single-instance case, all objects that cannot be segmented to the desired quality level using this budget are marked as failure cases (counted as NFO), and the number of clicks for that image is set to the image-level click budget. In addition, we also mark an image as a failed image (counted as NFI) if there is at least one object within that image that could not be segmented.
2305.13072/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2024-04-04T13:31:28.350Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36" etag="wfV3gDNXV0KWF4r1vjlX" version="24.2.1" type="device">
2
+ <diagram name="Page-1" id="WkF3RqNHofpvs5QdU6xa">
3
+ <mxGraphModel dx="993" dy="543" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="1" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="ygKiDZCdb5-kb39eUwfT-1" target="ygKiDZCdb5-kb39eUwfT-3" edge="1">
8
+ <mxGeometry relative="1" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-1" value="$$\text{ResNet MLP with } \theta$$" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
11
+ <mxGeometry x="300" y="960" width="150" height="80" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="ygKiDZCdb5-kb39eUwfT-2" target="ygKiDZCdb5-kb39eUwfT-1" edge="1">
14
+ <mxGeometry relative="1" as="geometry" />
15
+ </mxCell>
16
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-2" value="$$x$$" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
17
+ <mxGeometry x="250" y="985" width="40" height="30" as="geometry" />
18
+ </mxCell>
19
+ <mxCell id="g3-YzACpqpJ8vhrkj714-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="ygKiDZCdb5-kb39eUwfT-3" target="g3-YzACpqpJ8vhrkj714-1" edge="1">
20
+ <mxGeometry relative="1" as="geometry" />
21
+ </mxCell>
22
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-3" value="$$w(x; \theta)$$" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
23
+ <mxGeometry x="480" y="985" width="45" height="30" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="ygKiDZCdb5-kb39eUwfT-6" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="ygKiDZCdb5-kb39eUwfT-2" target="g3-YzACpqpJ8vhrkj714-1" edge="1">
26
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
27
+ <mxPoint x="430" y="1160" as="sourcePoint" />
28
+ <mxPoint x="580" y="1010" as="targetPoint" />
29
+ <Array as="points">
30
+ <mxPoint x="270" y="1060" />
31
+ <mxPoint x="430" y="1060" />
32
+ <mxPoint x="600" y="1060" />
33
+ </Array>
34
+ </mxGeometry>
35
+ </mxCell>
36
+ <mxCell id="g3-YzACpqpJ8vhrkj714-1" value="$$\hat y = x^T w(x; \theta)$$" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
37
+ <mxGeometry x="555" y="985" width="90" height="30" as="geometry" />
38
+ </mxCell>
39
+ <mxCell id="g3-YzACpqpJ8vhrkj714-3" value="" style="endArrow=none;dashed=1;html=1;dashPattern=1 3;strokeWidth=2;rounded=0;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" edge="1">
40
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
41
+ <mxPoint x="298" y="956" as="sourcePoint" />
42
+ <mxPoint x="270" y="900" as="targetPoint" />
43
+ </mxGeometry>
44
+ </mxCell>
45
+ <mxCell id="g3-YzACpqpJ8vhrkj714-4" value="" style="endArrow=none;dashed=1;html=1;dashPattern=1 3;strokeWidth=2;rounded=0;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" edge="1">
46
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
47
+ <mxPoint x="451" y="957" as="sourcePoint" />
48
+ <mxPoint x="480" y="900" as="targetPoint" />
49
+ </mxGeometry>
50
+ </mxCell>
51
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-183" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-188" target="fbtBBihSDkiLwD9h4qN6-208">
52
+ <mxGeometry relative="1" as="geometry" />
53
+ </mxCell>
54
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-184" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-188" target="fbtBBihSDkiLwD9h4qN6-201">
55
+ <mxGeometry relative="1" as="geometry" />
56
+ </mxCell>
57
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-185" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-188" target="fbtBBihSDkiLwD9h4qN6-222">
58
+ <mxGeometry relative="1" as="geometry" />
59
+ </mxCell>
60
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-186" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-188" target="fbtBBihSDkiLwD9h4qN6-215">
61
+ <mxGeometry relative="1" as="geometry" />
62
+ </mxCell>
63
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-187" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-188" target="fbtBBihSDkiLwD9h4qN6-229">
64
+ <mxGeometry relative="1" as="geometry" />
65
+ </mxCell>
66
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-188" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFF66;" vertex="1" parent="1">
67
+ <mxGeometry x="241.66666666666666" y="732" width="26.66666666666666" height="26.66666666666666" as="geometry" />
68
+ </mxCell>
69
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-189" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=1;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-193" target="fbtBBihSDkiLwD9h4qN6-201">
70
+ <mxGeometry relative="1" as="geometry" />
71
+ </mxCell>
72
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-190" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-193" target="fbtBBihSDkiLwD9h4qN6-222">
73
+ <mxGeometry relative="1" as="geometry" />
74
+ </mxCell>
75
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-191" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-193" target="fbtBBihSDkiLwD9h4qN6-215">
76
+ <mxGeometry relative="1" as="geometry" />
77
+ </mxCell>
78
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-192" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-193" target="fbtBBihSDkiLwD9h4qN6-229">
79
+ <mxGeometry relative="1" as="geometry" />
80
+ </mxCell>
81
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-193" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFF66;" vertex="1" parent="1">
82
+ <mxGeometry x="240.95666666666665" y="789.33" width="26.66666666666666" height="26.66666666666666" as="geometry" />
83
+ </mxCell>
84
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-194" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-235">
85
+ <mxGeometry relative="1" as="geometry" />
86
+ </mxCell>
87
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-195" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-241">
88
+ <mxGeometry relative="1" as="geometry" />
89
+ </mxCell>
90
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-196" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-253">
91
+ <mxGeometry relative="1" as="geometry" />
92
+ </mxCell>
93
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-197" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-247">
94
+ <mxGeometry relative="1" as="geometry" />
95
+ </mxCell>
96
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-198" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-259">
97
+ <mxGeometry relative="1" as="geometry" />
98
+ </mxCell>
99
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-199" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201">
100
+ <mxGeometry relative="1" as="geometry">
101
+ <mxPoint x="328.1645569620253" y="693.5" as="targetPoint" />
102
+ </mxGeometry>
103
+ </mxCell>
104
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-200" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-201" target="fbtBBihSDkiLwD9h4qN6-266">
105
+ <mxGeometry relative="1" as="geometry">
106
+ <Array as="points">
107
+ <mxPoint x="310.86497890295357" y="676" />
108
+ <mxPoint x="418.9873417721519" y="676" />
109
+ </Array>
110
+ </mxGeometry>
111
+ </mxCell>
112
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-201" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FF9933;" vertex="1" parent="1">
113
+ <mxGeometry x="293.56540084388183" y="679.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
114
+ </mxCell>
115
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-202" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-235">
116
+ <mxGeometry relative="1" as="geometry" />
117
+ </mxCell>
118
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-203" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-241">
119
+ <mxGeometry relative="1" as="geometry" />
120
+ </mxCell>
121
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-204" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-253">
122
+ <mxGeometry relative="1" as="geometry" />
123
+ </mxCell>
124
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-205" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-247">
125
+ <mxGeometry relative="1" as="geometry" />
126
+ </mxCell>
127
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-206" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-259">
128
+ <mxGeometry relative="1" as="geometry" />
129
+ </mxCell>
130
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-207" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-208" target="fbtBBihSDkiLwD9h4qN6-272">
131
+ <mxGeometry relative="1" as="geometry">
132
+ <Array as="points">
133
+ <mxPoint x="310.86497890295357" y="711" />
134
+ <mxPoint x="418.9873417721519" y="711" />
135
+ </Array>
136
+ </mxGeometry>
137
+ </mxCell>
138
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-208" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FF9933;" vertex="1" parent="1">
139
+ <mxGeometry x="293.56540084388183" y="714.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
140
+ </mxCell>
141
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-209" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-235">
142
+ <mxGeometry relative="1" as="geometry" />
143
+ </mxCell>
144
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-210" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-241">
145
+ <mxGeometry relative="1" as="geometry" />
146
+ </mxCell>
147
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-211" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-253">
148
+ <mxGeometry relative="1" as="geometry" />
149
+ </mxCell>
150
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-212" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-247">
151
+ <mxGeometry relative="1" as="geometry" />
152
+ </mxCell>
153
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-213" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-259">
154
+ <mxGeometry relative="1" as="geometry" />
155
+ </mxCell>
156
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-214" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-215" target="fbtBBihSDkiLwD9h4qN6-278">
157
+ <mxGeometry relative="1" as="geometry">
158
+ <Array as="points">
159
+ <mxPoint x="310.86497890295357" y="781" />
160
+ <mxPoint x="418.9873417721519" y="781" />
161
+ </Array>
162
+ </mxGeometry>
163
+ </mxCell>
164
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-215" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FF9933;" vertex="1" parent="1">
165
+ <mxGeometry x="293.56540084388183" y="782.75" width="26.66666666666666" height="26.66666666666666" as="geometry" />
166
+ </mxCell>
167
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-216" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-235">
168
+ <mxGeometry relative="1" as="geometry" />
169
+ </mxCell>
170
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-217" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-241">
171
+ <mxGeometry relative="1" as="geometry" />
172
+ </mxCell>
173
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-218" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-253">
174
+ <mxGeometry relative="1" as="geometry" />
175
+ </mxCell>
176
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-219" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-247">
177
+ <mxGeometry relative="1" as="geometry" />
178
+ </mxCell>
179
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-220" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-259">
180
+ <mxGeometry relative="1" as="geometry" />
181
+ </mxCell>
182
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-221" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-222" target="fbtBBihSDkiLwD9h4qN6-285">
183
+ <mxGeometry relative="1" as="geometry">
184
+ <Array as="points">
185
+ <mxPoint x="310.86497890295357" y="746" />
186
+ <mxPoint x="418.9873417721519" y="746" />
187
+ </Array>
188
+ </mxGeometry>
189
+ </mxCell>
190
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-222" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FF9933;" vertex="1" parent="1">
191
+ <mxGeometry x="293.56540084388183" y="749.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
192
+ </mxCell>
193
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-223" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-235">
194
+ <mxGeometry relative="1" as="geometry" />
195
+ </mxCell>
196
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-224" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-241">
197
+ <mxGeometry relative="1" as="geometry" />
198
+ </mxCell>
199
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-225" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-253">
200
+ <mxGeometry relative="1" as="geometry" />
201
+ </mxCell>
202
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-226" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-247">
203
+ <mxGeometry relative="1" as="geometry" />
204
+ </mxCell>
205
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-227" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-259">
206
+ <mxGeometry relative="1" as="geometry" />
207
+ </mxCell>
208
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-228" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-229" target="fbtBBihSDkiLwD9h4qN6-291">
209
+ <mxGeometry relative="1" as="geometry">
210
+ <Array as="points">
211
+ <mxPoint x="310.86497890295357" y="812.5" />
212
+ <mxPoint x="418.9873417721519" y="812.5" />
213
+ </Array>
214
+ </mxGeometry>
215
+ </mxCell>
216
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-229" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FF9933;" vertex="1" parent="1">
217
+ <mxGeometry x="293.56540084388183" y="816" width="26.66666666666666" height="26.66666666666666" as="geometry" />
218
+ </mxCell>
219
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-230" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-235" target="fbtBBihSDkiLwD9h4qN6-266">
220
+ <mxGeometry relative="1" as="geometry" />
221
+ </mxCell>
222
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-231" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-235" target="fbtBBihSDkiLwD9h4qN6-272">
223
+ <mxGeometry relative="1" as="geometry" />
224
+ </mxCell>
225
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-232" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-235" target="fbtBBihSDkiLwD9h4qN6-285">
226
+ <mxGeometry relative="1" as="geometry" />
227
+ </mxCell>
228
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-233" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-235" target="fbtBBihSDkiLwD9h4qN6-278">
229
+ <mxGeometry relative="1" as="geometry" />
230
+ </mxCell>
231
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-234" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-235" target="fbtBBihSDkiLwD9h4qN6-291">
232
+ <mxGeometry relative="1" as="geometry" />
233
+ </mxCell>
234
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-235" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
235
+ <mxGeometry x="349.789029535865" y="679.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
236
+ </mxCell>
237
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-236" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241" target="fbtBBihSDkiLwD9h4qN6-266">
238
+ <mxGeometry relative="1" as="geometry" />
239
+ </mxCell>
240
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-237" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241" target="fbtBBihSDkiLwD9h4qN6-272">
241
+ <mxGeometry relative="1" as="geometry" />
242
+ </mxCell>
243
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-238" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241" target="fbtBBihSDkiLwD9h4qN6-285">
244
+ <mxGeometry relative="1" as="geometry" />
245
+ </mxCell>
246
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-239" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241" target="fbtBBihSDkiLwD9h4qN6-278">
247
+ <mxGeometry relative="1" as="geometry" />
248
+ </mxCell>
249
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-240" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241" target="fbtBBihSDkiLwD9h4qN6-291">
250
+ <mxGeometry relative="1" as="geometry" />
251
+ </mxCell>
252
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-326" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-241">
253
+ <mxGeometry relative="1" as="geometry">
254
+ <mxPoint x="380" y="690" as="targetPoint" />
255
+ </mxGeometry>
256
+ </mxCell>
257
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-241" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
258
+ <mxGeometry x="349.789029535865" y="714.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
259
+ </mxCell>
260
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-242" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-247" target="fbtBBihSDkiLwD9h4qN6-266">
261
+ <mxGeometry relative="1" as="geometry" />
262
+ </mxCell>
263
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-243" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-247" target="fbtBBihSDkiLwD9h4qN6-272">
264
+ <mxGeometry relative="1" as="geometry" />
265
+ </mxCell>
266
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-244" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-247" target="fbtBBihSDkiLwD9h4qN6-285">
267
+ <mxGeometry relative="1" as="geometry" />
268
+ </mxCell>
269
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-245" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-247" target="fbtBBihSDkiLwD9h4qN6-278">
270
+ <mxGeometry relative="1" as="geometry" />
271
+ </mxCell>
272
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-246" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-247" target="fbtBBihSDkiLwD9h4qN6-291">
273
+ <mxGeometry relative="1" as="geometry" />
274
+ </mxCell>
275
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-247" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
276
+ <mxGeometry x="349.789029535865" y="782.75" width="26.66666666666666" height="26.66666666666666" as="geometry" />
277
+ </mxCell>
278
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-248" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-253" target="fbtBBihSDkiLwD9h4qN6-266">
279
+ <mxGeometry relative="1" as="geometry" />
280
+ </mxCell>
281
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-249" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-253" target="fbtBBihSDkiLwD9h4qN6-272">
282
+ <mxGeometry relative="1" as="geometry" />
283
+ </mxCell>
284
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-250" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-253" target="fbtBBihSDkiLwD9h4qN6-285">
285
+ <mxGeometry relative="1" as="geometry" />
286
+ </mxCell>
287
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-251" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-253" target="fbtBBihSDkiLwD9h4qN6-278">
288
+ <mxGeometry relative="1" as="geometry" />
289
+ </mxCell>
290
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-252" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-253" target="fbtBBihSDkiLwD9h4qN6-291">
291
+ <mxGeometry relative="1" as="geometry" />
292
+ </mxCell>
293
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-253" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
294
+ <mxGeometry x="349.789029535865" y="749.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
295
+ </mxCell>
296
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-254" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-259" target="fbtBBihSDkiLwD9h4qN6-266">
297
+ <mxGeometry relative="1" as="geometry" />
298
+ </mxCell>
299
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-255" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-259" target="fbtBBihSDkiLwD9h4qN6-272">
300
+ <mxGeometry relative="1" as="geometry" />
301
+ </mxCell>
302
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-256" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-259" target="fbtBBihSDkiLwD9h4qN6-285">
303
+ <mxGeometry relative="1" as="geometry" />
304
+ </mxCell>
305
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-257" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-259" target="fbtBBihSDkiLwD9h4qN6-278">
306
+ <mxGeometry relative="1" as="geometry" />
307
+ </mxCell>
308
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-258" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-259" target="fbtBBihSDkiLwD9h4qN6-291">
309
+ <mxGeometry relative="1" as="geometry" />
310
+ </mxCell>
311
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-259" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
312
+ <mxGeometry x="349.789029535865" y="816" width="26.66666666666666" height="26.66666666666666" as="geometry" />
313
+ </mxCell>
314
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-260" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-266" target="fbtBBihSDkiLwD9h4qN6-292">
315
+ <mxGeometry relative="1" as="geometry" />
316
+ </mxCell>
317
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-262" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-266" target="fbtBBihSDkiLwD9h4qN6-294">
318
+ <mxGeometry relative="1" as="geometry" />
319
+ </mxCell>
320
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-263" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-266" target="fbtBBihSDkiLwD9h4qN6-298">
321
+ <mxGeometry relative="1" as="geometry" />
322
+ </mxCell>
323
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-265" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-266" target="fbtBBihSDkiLwD9h4qN6-300">
324
+ <mxGeometry relative="1" as="geometry" />
325
+ </mxCell>
326
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-266" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
327
+ <mxGeometry x="401.68776371308013" y="679.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
328
+ </mxCell>
329
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-267" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-272" target="fbtBBihSDkiLwD9h4qN6-292">
330
+ <mxGeometry relative="1" as="geometry" />
331
+ </mxCell>
332
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-268" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-272" target="fbtBBihSDkiLwD9h4qN6-294">
333
+ <mxGeometry relative="1" as="geometry" />
334
+ </mxCell>
335
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-269" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-272" target="fbtBBihSDkiLwD9h4qN6-298">
336
+ <mxGeometry relative="1" as="geometry" />
337
+ </mxCell>
338
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-271" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-272" target="fbtBBihSDkiLwD9h4qN6-300">
339
+ <mxGeometry relative="1" as="geometry" />
340
+ </mxCell>
341
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-272" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
342
+ <mxGeometry x="401.68776371308013" y="714.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
343
+ </mxCell>
344
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-273" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-278" target="fbtBBihSDkiLwD9h4qN6-292">
345
+ <mxGeometry relative="1" as="geometry" />
346
+ </mxCell>
347
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-275" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-278" target="fbtBBihSDkiLwD9h4qN6-294">
348
+ <mxGeometry relative="1" as="geometry" />
349
+ </mxCell>
350
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-277" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-278" target="fbtBBihSDkiLwD9h4qN6-300">
351
+ <mxGeometry relative="1" as="geometry" />
352
+ </mxCell>
353
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-278" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
354
+ <mxGeometry x="401.68776371308013" y="782.75" width="26.66666666666666" height="26.66666666666666" as="geometry" />
355
+ </mxCell>
356
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-279" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-285" target="fbtBBihSDkiLwD9h4qN6-292">
357
+ <mxGeometry relative="1" as="geometry" />
358
+ </mxCell>
359
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-281" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-285" target="fbtBBihSDkiLwD9h4qN6-294">
360
+ <mxGeometry relative="1" as="geometry" />
361
+ </mxCell>
362
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-282" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-285" target="fbtBBihSDkiLwD9h4qN6-298">
363
+ <mxGeometry relative="1" as="geometry" />
364
+ </mxCell>
365
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-284" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-285" target="fbtBBihSDkiLwD9h4qN6-300">
366
+ <mxGeometry relative="1" as="geometry" />
367
+ </mxCell>
368
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-285" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
369
+ <mxGeometry x="401.68776371308013" y="749.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
370
+ </mxCell>
371
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-286" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-291" target="fbtBBihSDkiLwD9h4qN6-292">
372
+ <mxGeometry relative="1" as="geometry" />
373
+ </mxCell>
374
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-288" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-291" target="fbtBBihSDkiLwD9h4qN6-294">
375
+ <mxGeometry relative="1" as="geometry" />
376
+ </mxCell>
377
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-290" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-291" target="fbtBBihSDkiLwD9h4qN6-300">
378
+ <mxGeometry relative="1" as="geometry" />
379
+ </mxCell>
380
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-291" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66CC00;" vertex="1" parent="1">
381
+ <mxGeometry x="401.68776371308013" y="816" width="26.66666666666666" height="26.66666666666666" as="geometry" />
382
+ </mxCell>
383
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-292" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66B2FF;" vertex="1" parent="1">
384
+ <mxGeometry x="453.58649789029533" y="635.33" width="26.66666666666666" height="26.66666666666666" as="geometry" />
385
+ </mxCell>
386
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-294" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66B2FF;" vertex="1" parent="1">
387
+ <mxGeometry x="453.58649789029533" y="697" width="26.66666666666666" height="26.66666666666666" as="geometry" />
388
+ </mxCell>
389
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-298" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66B2FF;" vertex="1" parent="1">
390
+ <mxGeometry x="453.58649789029533" y="798.5" width="26.66666666666666" height="26.66666666666666" as="geometry" />
391
+ </mxCell>
392
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-300" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#66B2FF;" vertex="1" parent="1">
393
+ <mxGeometry x="453.33649789029533" y="853.41" width="26.66666666666666" height="26.66666666666666" as="geometry" />
394
+ </mxCell>
395
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-301" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;Input&amp;nbsp;Features&lt;/font&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
396
+ <mxGeometry x="231.50156118143462" y="704" width="25.949367088607588" height="10.5" as="geometry" />
397
+ </mxCell>
398
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-302" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=-0.017;entryY=0.593;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-193" target="fbtBBihSDkiLwD9h4qN6-208">
399
+ <mxGeometry relative="1" as="geometry" />
400
+ </mxCell>
401
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-303" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;Input&amp;nbsp;Layer&lt;/font&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
402
+ <mxGeometry x="293.5636286919831" y="651.5" width="25.949367088607588" height="10.5" as="geometry" />
403
+ </mxCell>
404
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-305" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;" edge="1" parent="1">
405
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
406
+ <mxPoint x="234.01" y="723.67" as="sourcePoint" />
407
+ <mxPoint x="275.2758227848101" y="723.67" as="targetPoint" />
408
+ </mxGeometry>
409
+ </mxCell>
410
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-306" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;" edge="1" parent="1">
411
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
412
+ <mxPoint x="289.5987341772152" y="670.75" as="sourcePoint" />
413
+ <mxPoint x="324.19789029535866" y="670.75" as="targetPoint" />
414
+ </mxGeometry>
415
+ </mxCell>
416
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-307" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;" edge="1" parent="1">
417
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
418
+ <mxPoint x="347.62481012658225" y="670.75" as="sourcePoint" />
419
+ <mxPoint x="425.4729113924051" y="670.75" as="targetPoint" />
420
+ </mxGeometry>
421
+ </mxCell>
422
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-309" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;TabResNet Backbone&lt;/font&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
423
+ <mxGeometry x="284" y="613" width="140.7" height="10.5" as="geometry" />
424
+ </mxCell>
425
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-311" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.033;entryY=0.378;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-278" target="fbtBBihSDkiLwD9h4qN6-298">
426
+ <mxGeometry relative="1" as="geometry" />
427
+ </mxCell>
428
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-312" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;" edge="1" parent="1">
429
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
430
+ <mxPoint x="449.6173839662447" y="620" as="sourcePoint" />
431
+ <mxPoint x="484.21654008438816" y="620" as="targetPoint" />
432
+ </mxGeometry>
433
+ </mxCell>
434
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-313" value="&lt;p style=&quot;margin-bottom: 0in; line-height: 1px; background: transparent; font-size: 12px; text-align: start;&quot;&gt;$$w_{1, 0}$$&lt;/p&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" vertex="1" parent="1">
435
+ <mxGeometry x="489.0506329113924" y="643.41" width="25.949367088607588" height="10.5" as="geometry" />
436
+ </mxCell>
437
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-314" value="&lt;p style=&quot;margin-bottom: 0in; line-height: 1px; background: transparent; font-size: 12px; text-align: start;&quot;&gt;$$w_{1, M }$$&lt;/p&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" vertex="1" parent="1">
438
+ <mxGeometry x="489.0506329113924" y="705.08" width="25.949367088607588" height="10.5" as="geometry" />
439
+ </mxCell>
440
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-316" value="&lt;p style=&quot;margin-bottom: 0in; line-height: 1px; background: transparent; font-size: 12px; text-align: start;&quot;&gt;$$w_{C, 0}$$&lt;/p&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" vertex="1" parent="1">
441
+ <mxGeometry x="489.0506329113924" y="806.58" width="25.949367088607588" height="10.5" as="geometry" />
442
+ </mxCell>
443
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-319" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="fbtBBihSDkiLwD9h4qN6-291" target="fbtBBihSDkiLwD9h4qN6-298">
444
+ <mxGeometry relative="1" as="geometry">
445
+ <mxPoint x="440.6118143459915" y="833.5" as="sourcePoint" />
446
+ <mxPoint x="457.9113924050632" y="854.5" as="targetPoint" />
447
+ </mxGeometry>
448
+ </mxCell>
449
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-333" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;" edge="1" parent="1">
450
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
451
+ <mxPoint x="282.60540084388185" y="636.33" as="sourcePoint" />
452
+ <mxPoint x="432.61" y="636.33" as="targetPoint" />
453
+ </mxGeometry>
454
+ </mxCell>
455
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-304" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;TabResNet Block&lt;/font&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
456
+ <mxGeometry x="340" y="650.17" width="100.21" height="10.5" as="geometry" />
457
+ </mxCell>
458
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-336" value="" style="group" vertex="1" connectable="0" parent="1">
459
+ <mxGeometry x="251.84999999999997" y="763.5" width="5.6" height="19.25" as="geometry" />
460
+ </mxCell>
461
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-295" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-336">
462
+ <mxGeometry width="3.685470085470085" height="3.685470085470085" as="geometry" />
463
+ </mxCell>
464
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-296" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-336">
465
+ <mxGeometry y="15.280927835051546" width="3.685470085470085" height="3.685470085470085" as="geometry" />
466
+ </mxCell>
467
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-297" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-336">
468
+ <mxGeometry y="7.640463917525773" width="3.685470085470085" height="3.685470085470085" as="geometry" />
469
+ </mxCell>
470
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-338" value="" style="group" vertex="1" connectable="0" parent="1">
471
+ <mxGeometry x="464.4" y="670" width="5.6" height="19.25" as="geometry" />
472
+ </mxCell>
473
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-339" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-338">
474
+ <mxGeometry width="3.685470085470085" height="3.685470085470085" as="geometry" />
475
+ </mxCell>
476
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-340" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-338">
477
+ <mxGeometry y="15.280927835051546" width="3.685470085470085" height="3.685470085470085" as="geometry" />
478
+ </mxCell>
479
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-341" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-338">
480
+ <mxGeometry y="7.640463917525773" width="3.685470085470085" height="3.685470085470085" as="geometry" />
481
+ </mxCell>
482
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-342" value="&lt;p style=&quot;margin-bottom: 0in; line-height: 1px; background: transparent; font-size: 12px; text-align: start;&quot;&gt;$$w_{C, M}$$&lt;/p&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" vertex="1" parent="1">
483
+ <mxGeometry x="489.52063291139245" y="861.49" width="25.949367088607588" height="10.5" as="geometry" />
484
+ </mxCell>
485
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-350" value="" style="group" vertex="1" connectable="0" parent="1">
486
+ <mxGeometry x="464.4" y="830" width="5.6" height="19.25" as="geometry" />
487
+ </mxCell>
488
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-351" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-350">
489
+ <mxGeometry width="3.685470085470085" height="3.685470085470085" as="geometry" />
490
+ </mxCell>
491
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-352" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-350">
492
+ <mxGeometry y="15.280927835051546" width="3.685470085470085" height="3.685470085470085" as="geometry" />
493
+ </mxCell>
494
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-353" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#000000;fontSize=7;" vertex="1" parent="fbtBBihSDkiLwD9h4qN6-350">
495
+ <mxGeometry y="7.640463917525773" width="3.685470085470085" height="3.685470085470085" as="geometry" />
496
+ </mxCell>
497
+ <mxCell id="fbtBBihSDkiLwD9h4qN6-355" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Interpretable Weights W&lt;/font&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
498
+ <mxGeometry x="401.69" y="600" width="158.31" height="10.5" as="geometry" />
499
+ </mxCell>
500
+ </root>
501
+ </mxGraphModel>
502
+ </diagram>
503
+ </mxfile>
2305.13072/main_diagram/main_diagram.pdf ADDED
Binary file (38.5 kB). View file
 
2305.13072/paper_text/intro_method.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Tabular data are arguably the most widely spread traditional data modality arising in a plethora of real-world application domains (Bischl et al., 2021; Borisov et al., 2022). There exists a recent trend to deploy neural networks for predictive tasks on tabular data (Kadra et al., 2021; Gorishniy et al., 2021; Somepalli et al., 2022; Hollmann et al., 2023). In a series of such application realms, it is important to be able to explain the predictions of deep learning models to humans (Ras et al., 2022), especially when interacting with human decision-makers, such as in healthcare (Gulum et al., 2021; Tjoa & Guan, 2021), or the financial sector (Sadhwani et al., 2020). Heavily parametrized models such as deep neural networks can fit complex interactions in tabular datasets and achieve high predictive accuracy, however, they are not explainable. In that context, achieving both high predictive accuracy and explainability remains an open research question for the Machine Learning community.
4
+
5
+ In this work, we introduce *mesomorphic* neural architectures1 , a new class of deep models that are both deep and locally linear at the same time, therefore, offering interpretability by design. In a nutshell, we propose a new architecture that is simultaneously (i) *deep and accurate*, as well as (ii) *linear and explainable* on a per-instance basis. Technically speaking, we learn *deep* hypernetworks that generate *linear* models that are accurate concerning the data point we are interested in explaining.
6
+
7
+ Our interpretable mesomorphic networks for tabular data (dubbed IMN) are classification or regression models that identify the relevant tabular features by design. It is important to highlight that this work
8
+
9
+ <sup>1</sup>The etymology of the term *mesomorphic* is inspired by Chemistry as "pertaining to an intermediate phase of matter". For instance, a liquid crystal qualifies as both solid and liquid.
10
+
11
+ tackles explaining predictions for a single data point (Lundberg & Lee, 2017), instead of explaining a model globally for the whole dataset (Ras et al., 2022). Similarly to existing prior works (Alvarez-Melis & Jaakkola, 2018; Chen et al., 2018), we train deep models that generate explainable local models for a data sample of interest. In contrast, we train hypernetworks that generate linear models in the original feature space through a purely supervised end-to-end optimization.
12
+
13
+ We empirically show that the proposed explainable deep models are both as accurate as existing black-box classifiers for tabular datasets and achieve better performance compared to explainable end-to-end prior methods. At the same time, IMN is as interpretable as explainer techniques. Throughout this work, explainers can be categorized into two groups: *i*) interpretable surrogate models that are trained to approximate black-box models (Lundberg & Lee, 2017), and *ii*) end-to-end explainable methods by design. Concretely, we show that our method achieves comparable accuracy to competitive black-box classifiers and manages to outperform current state-of-the-art end-to-end explainable methods on the tabular datasets of the popular AutoML benchmark (Gijsbers et al., 2019). In addition, we compare our technique against state-of-the-art predictive explainers on the recent XAI explainability benchmark for tabular data (Liu et al., 2021) and empirically demonstrate that our method offers competitive interpretability. As a result, our method represents a significant step forward in making deep learning explainable by design for tabular datasets. Overall, this work offers the following contributions:
14
+
15
+ - We present a technique that makes deep learning explainable by design via training hypernetworks to generate instance-specific linear models.
16
+ - We offer ample empirical evidence that our method is as accurate as black-box classifiers, with the benefit of being as interpretable as state-of-the-art prediction explainers.
17
+
18
+ # Method
19
+
20
+ Let us denote a tabular dataset consisting of N instances of M-dimensional features as $X \in \mathbb{R}^{N \times M}$ and the C-dimensional categorical target variable as $Y \in \{1,\ldots,C\}^N$ . A model with parameters $w \in \mathcal{W}$ estimates the target variable as $f: \mathbb{R}^M \times \mathcal{W} \to \mathbb{R}^C$ and is optimized by minimizing the empirical risk $\arg\min_{w \in \mathcal{W}} \sum_{n=1}^N \mathcal{L}\left(y_n, f(x_n; w)\right)$ , where $\mathcal{L}: \{1,\ldots,C\} \times \mathbb{R}^C \to \mathbb{R}_+$ is a loss function. An explainable model f is one whose predictions $\hat{y}_n = f(x_n; w)$ for a data point $x_n$ are interpretable by humans. For instance, linear models and decision trees are commonly accepted to be interpretable by Machine Learning practitioners (Ribeiro et al., 2016; Lundberg & Lee, 2017).
21
+
22
+ In this work, we rethink shallow interpretable models $f(x_n;w)$ by defining their parameters $w \in \mathcal{W}$ to be the output of deep non-interpretable hypernetworks $w(x_n;\theta):\mathbb{R}^M \times \Theta \to \mathcal{W}$ , where the parameters of the hypernetwork are $\theta \in \Theta$ . We remind the reader that a hypernetwork (a.k.a. metanetwork, or "network of networks") is a neural network that generates the parameters of another network (Ha et al., 2017). In this mechanism, we train deep non-interpretable hypernetworks to generate interpretable models f in an end-to-end manner as $\arg\min_{\theta \in \Theta} \sum_{n=1}^N \mathcal{L}(y_n, f(x_n; w(x_n; \theta)))$ .
23
+
24
+ Our method trains deep Multi-Layer Perceptron (MLP) hypernetworks that generate the parameters of linear models. For the case of multi-class classification, we consider linear models with parameters $w \in \mathbb{R}^{C \times (M+1)}$ , denoting one set of weights and bias terms per class, as $f(x_n; w)_c = e^{z(x_n; w)_c} / \sum_{k=1}^C e^{z(x_n; w)_k}$ , with $z(x_n; w)_c = \sum_{m=1}^M w_{c,m} x_{n,m} + w_{c,0}$ representing the logit predictions for the c-th class. For the case of regression the linear model is simply $f(x_n; w) = \sum_{m=1}^M w_m x_{n,m} + w_0$ with $w \in \mathbb{R}^{M+1}$ .
25
+
26
+ Let us present our method IMN by starting with the case of multi-class classification following the hypernetwork mechanism explained in Section 2.1. The hypernetwork $w(x_n;\theta):\mathbb{R}^M\times\Theta\to\mathbb{R}^{C\times(M+1)}$ with parameters $\theta\in\Theta$ is a function that given a data point $x_n\in\mathbb{R}^M$ generates the predictions as:
27
+
28
+ $$f(x_n; w(x_n; \theta))_c = \frac{e^{z(x_n; w(x_n; \theta))_c}}{\sum_{k=1}^C e^{z(x_n; w(x_n; \theta))_k}}, \quad z(x_n; w(x_n; \theta))_c = \sum_{m=1}^M w(x_n; \theta)_{c,m} x_{n,m} + w(x_n; \theta)_{c,0}$$
29
+ (1)
30
+
31
+ Instead of training weights w as in a standard linear classification, we use the output of an MLP network as the linear weights $w(x_n;\theta)$ . We illustrate the architecture of our mesomorphic network in Figure 1. In the case of regression, our linear model with hypernetworks is $f(x_n;w(x_n;\theta)) = \sum_{m=1}^M w\left(x_n;\theta\right)_m x_{n,m} + w\left(x_n;\theta\right)_0$ . We highlight that our experimental protocol (Section 4) includes both classification and regression datasets.
32
+
33
+ Ultimately, we train the optimal parameters of the hypernetwork to minimize the following loss in an end-to-end manner: $\underset{\theta \in \Theta}{\arg\min} \sum_{n=1}^{N} \mathcal{L}\left(y_n, f\left(x_n; w(x_n; \theta)\right)\right) + \lambda ||w\left(x_n; \theta\right)||_1.$
34
+
35
+ Our hypernetworks generate interpretable models that are accurate concerning a data point of interest (e.g. "Explain why patient $x_n$ is estimated to have cancer $f(x_n; w(x_n; \theta)) > 0.5$ by analyzing the impact of features using the generated linear weights."). We stress that our novel method IMN does not simply train one linear model per data point, contrary to prior work (Ribeiro et al., 2016). Instead, the hypernetwork learns to generate accurate linear models by a shared network across all data points. As a result, generating the linear weights demands a single forward pass through the hypernetwork, rather than a separate optimization procedure. Furthermore, our method intrinsically learns to generate similar linear hyperplanes for neighboring data instances. The outputted linear models are accurate both in correctly classifying the data point $x_n$ , but also for the other majority of training instances in the neighborhood (see proof-of-concept experiment below). The outcome is a linear model with parameters $w(x_n; \theta)$
36
+
37
+ ![](_page_2_Figure_4.jpeg)
38
+
39
+ Figure 1: The IMN architecture.
40
+
41
+ that both interprets the prediction, but also serves as an accurate local model for the neighborhood of points.
42
+
43
+ The generated linear models $w(x_n;\theta)$ can be used to explain predictions through feature attribution (i.e. feature importance) (Liu et al., 2021). It is important to re-emphasize that our method offers interpretable predictions for the estimated target $f(x_n; w(x_n;\theta))$ of a particular data point $x_n$ . Concretely, we can analyse the linear coefficients $\{w(x_n;\theta)_1,\ldots,w(x_n;\theta)_M\}$ to distill the importances of $\{x_{n,1},\ldots,x_{n,M}\}$ by measuring the residual impact on the target. The impact of the m-th feature $x_{n,m}$ in estimating the target variable, is proportional to the change in the estimated target if we remove the feature (Hooker et al., 2019). Considering our linear models, the impact of the m-th feature is proportional to the change of the predicted target if we set the m-th feature to zero. In terms of notation, we multiply the feature vector element-wise with a Kronecker delta vector $\delta_i^m = \mathbb{1}_{m \neq i}$ .
44
+
45
+ $$f(x_n; w(x_n; \theta)) - f(x_n \odot \delta^m; w(x_n; \theta)) \propto w(x_n; \theta)_m x_{n,m}$$
46
+ (2)
47
+
48
+ As a result, our feature attribution strategy is that the m-th feature impacts the prediction of the target variable by a signed magnitude of $w(x_n;\theta)_m\,x_{n,m}$ . In our experiments, all the features are normalized to the same mean and variance, therefore, the magnitude $w(x_n;\theta)_m\,x_{n,m}$ can be directly used to explain the impact of the m-th feature. In cases where the unsigned importance is required, a practitioner can use the absolute impact $|w(x_n;\theta)_m\,x_{n,m}|$ as the attribution. Furthermore, to measure the global importance of the m-th feature for the whole dataset, we can compute $\frac{1}{N}\sum_{n=1}^N |w(x_n;\theta)_m\,x_{n,m}|$ .
49
+
50
+ ![](_page_3_Figure_0.jpeg)
51
+
52
+ Figure 2: Investigating the accuracy and interpretability of IMN. Left: The global decision boundary of our method that separates the classes correctly. Right: The local hyperplane pertaining to an example x' which correctly classifies the local example and retains a good global classification for the neighboring points.
53
+
54
+ As a proof of concept, we run our method on the half-moon toy task that consists of a 2-dimensional tabular dataset in the form of two half-moons that are not linearly separable.
55
+
56
+ Initially, we investigate the global accuracy of our method. As shown in Figure 2 (left), our method correctly classifies all the examples. Furthermore, our method learns an optimal non-linear decision boundary that separates the classes (plotted in green). To determine the decision boundary, we perform a fine-grid prediction on all possible combinations of $x_1$ and $x_2$ . Subsequently, we identify the points that exhibit the minimal prediction distance to a probability prediction of 0.5. Lastly, in Figure 2 (right) we investigate the local interpretability of our method, by taking a point x' and calculating the corresponding weights $(w(x'), w(x')_0)$ generated by our hypernetwork, where we omitted the dependence on $\theta$ for simplicity. The black line shows all the points that reside on the hyperplane w(x') as $\{x \mid w(x')^T \mid x + w_0(x') = 0\}$ . It is important to highlight that the local hyperplane does not only correctly classify the point x', but also the neighboring points, retaining an accurate linear classifier for the neighborhood of points.
57
+
58
+ To validate our claim that the per-example (local) hyperplane correctly classifies neighboring points, we conduct the following analysis: For every datapoint $x_n$ we take a specific number of nearest neighbor examples from every class, and we evaluate the classification accuracy of the hyperplane generated for the datapoint $x_n$ on the set of all neighbors. We repeat the above procedure with varying neighborhood sizes and we present the results in Table 1. The results indicate that the mesomorphic neural network generates hyperplanes that are accurate in the neighborhood of the point whose prediction we are interested in explaining.
59
+
60
+ Table 1: Accuracy of local hyperplanes for neighboring points.
61
+
62
+ | Number of Neighbors | Accuracy |
63
+ |---------------------|----------|
64
+ | | |
65
+ | 10 | 0.84 |
66
+ | 25 | 0.82 |
67
+ | 50 | 0.78 |
68
+ | 100 | 0.77 |
69
+ | 200 | 0.77 |
2307.14680/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-10-02T11:58:25.951Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36" etag="QuuNNP3dlxwvyS4BAxmY" version="21.7.5" type="google">
2
+ <diagram name="Copy of Page-1" id="oX5fRk73v3f934Nls3cN">
3
+ <mxGraphModel dx="961" dy="2289" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="1169" pageHeight="1654" math="1" shadow="0">
4
+ <root>
5
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-0" />
6
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-1" parent="uGYOu0AKZNpe5l68Mrnq-0" />
7
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-3" target="uGYOu0AKZNpe5l68Mrnq-70" edge="1">
8
+ <mxGeometry relative="1" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-3" value="" style="rounded=1;whiteSpace=wrap;html=1;direction=south;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
11
+ <mxGeometry x="310" y="50" width="230" height="230" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-5" target="uGYOu0AKZNpe5l68Mrnq-50" edge="1">
14
+ <mxGeometry relative="1" as="geometry">
15
+ <Array as="points">
16
+ <mxPoint x="131" y="213" />
17
+ <mxPoint x="130" y="213" />
18
+ <mxPoint x="130" y="268" />
19
+ </Array>
20
+ </mxGeometry>
21
+ </mxCell>
22
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-5" value="" style="rounded=1;whiteSpace=wrap;html=1;direction=south;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
23
+ <mxGeometry x="88" y="86" width="86" height="91" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-6" value="" style="endArrow=classic;html=1;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" edge="1">
26
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
27
+ <mxPoint x="69" y="80" as="sourcePoint" />
28
+ <mxPoint x="287" y="80" as="targetPoint" />
29
+ </mxGeometry>
30
+ </mxCell>
31
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-8" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
32
+ <mxGeometry x="15" y="95" width="280" height="20" as="geometry" />
33
+ </mxCell>
34
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-9" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
35
+ <mxGeometry x="139.89655172413794" width="10" height="10" as="geometry" />
36
+ </mxCell>
37
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-10" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
38
+ <mxGeometry x="168.86206896551727" width="10" height="10" as="geometry" />
39
+ </mxCell>
40
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-11" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
41
+ <mxGeometry x="197.82758620689657" width="10" height="10" as="geometry" />
42
+ </mxCell>
43
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-12" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
44
+ <mxGeometry x="226.79310344827587" width="10" height="10" as="geometry" />
45
+ </mxCell>
46
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-13" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
47
+ <mxGeometry x="255.75862068965517" width="10" height="10" as="geometry" />
48
+ </mxCell>
49
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-14" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
50
+ <mxGeometry x="110.93103448275863" width="10" height="10" as="geometry" />
51
+ </mxCell>
52
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-15" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
53
+ <mxGeometry x="81.96551724137932" width="10" height="10" as="geometry" />
54
+ </mxCell>
55
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-18" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
56
+ <mxGeometry x="53" width="10" height="10" as="geometry" />
57
+ </mxCell>
58
+ <mxCell id="46MY76A0PzHGgOhmMeRs-25" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-8" vertex="1">
59
+ <mxGeometry x="120.93" y="-10" width="21" height="30" as="geometry" />
60
+ </mxCell>
61
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-19" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
62
+ <mxGeometry x="154.89655172413794" y="116" width="10" height="10" as="geometry" />
63
+ </mxCell>
64
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-20" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
65
+ <mxGeometry x="183.86206896551727" y="116" width="10" height="10" as="geometry" />
66
+ </mxCell>
67
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-21" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
68
+ <mxGeometry x="212.82758620689657" y="116" width="10" height="10" as="geometry" />
69
+ </mxCell>
70
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-22" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
71
+ <mxGeometry x="241.79310344827587" y="116" width="10" height="10" as="geometry" />
72
+ </mxCell>
73
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-23" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
74
+ <mxGeometry x="270.7586206896552" y="116" width="10" height="10" as="geometry" />
75
+ </mxCell>
76
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-24" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
77
+ <mxGeometry x="125.93103448275863" y="116" width="10" height="10" as="geometry" />
78
+ </mxCell>
79
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-25" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
80
+ <mxGeometry x="96.96551724137932" y="116" width="10" height="10" as="geometry" />
81
+ </mxCell>
82
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-28" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
83
+ <mxGeometry x="68" y="116" width="10" height="10" as="geometry" />
84
+ </mxCell>
85
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-29" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
86
+ <mxGeometry x="154.89655172413794" y="137" width="10" height="10" as="geometry" />
87
+ </mxCell>
88
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-30" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
89
+ <mxGeometry x="183.86206896551727" y="137" width="10" height="10" as="geometry" />
90
+ </mxCell>
91
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-31" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
92
+ <mxGeometry x="212.82758620689657" y="137" width="10" height="10" as="geometry" />
93
+ </mxCell>
94
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-32" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
95
+ <mxGeometry x="241.79310344827587" y="137" width="10" height="10" as="geometry" />
96
+ </mxCell>
97
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-33" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
98
+ <mxGeometry x="270.7586206896552" y="137" width="10" height="10" as="geometry" />
99
+ </mxCell>
100
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-34" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
101
+ <mxGeometry x="125.93103448275863" y="137" width="10" height="10" as="geometry" />
102
+ </mxCell>
103
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-35" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
104
+ <mxGeometry x="96.96551724137932" y="137" width="10" height="10" as="geometry" />
105
+ </mxCell>
106
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-38" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
107
+ <mxGeometry x="68" y="137" width="10" height="10" as="geometry" />
108
+ </mxCell>
109
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-39" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
110
+ <mxGeometry x="154.89655172413794" y="156" width="10" height="10" as="geometry" />
111
+ </mxCell>
112
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-40" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
113
+ <mxGeometry x="183.86206896551727" y="156" width="10" height="10" as="geometry" />
114
+ </mxCell>
115
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-41" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
116
+ <mxGeometry x="212.82758620689657" y="156" width="10" height="10" as="geometry" />
117
+ </mxCell>
118
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-42" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
119
+ <mxGeometry x="241.79310344827587" y="156" width="10" height="10" as="geometry" />
120
+ </mxCell>
121
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-43" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
122
+ <mxGeometry x="270.7586206896552" y="156" width="10" height="10" as="geometry" />
123
+ </mxCell>
124
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-44" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
125
+ <mxGeometry x="125.93103448275863" y="156" width="10" height="10" as="geometry" />
126
+ </mxCell>
127
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-45" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
128
+ <mxGeometry x="96.96551724137932" y="156" width="10" height="10" as="geometry" />
129
+ </mxCell>
130
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-48" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
131
+ <mxGeometry x="68" y="156" width="10" height="10" as="geometry" />
132
+ </mxCell>
133
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-49" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-50" target="uGYOu0AKZNpe5l68Mrnq-53" edge="1">
134
+ <mxGeometry relative="1" as="geometry">
135
+ <Array as="points">
136
+ <mxPoint x="300" y="268" />
137
+ <mxPoint x="300" y="90" />
138
+ </Array>
139
+ </mxGeometry>
140
+ </mxCell>
141
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-50" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;&lt;i style=&quot;&quot;&gt;&lt;b&gt;&amp;nbsp; - Sliding Windows&lt;/b&gt;&lt;/i&gt;&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#666666;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
142
+ <mxGeometry x="145" y="248" width="139" height="40" as="geometry" />
143
+ </mxCell>
144
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-51" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ k $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
145
+ <mxGeometry x="121.89999999999998" y="253" width="60" height="30" as="geometry" />
146
+ </mxCell>
147
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-52" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-53" target="uGYOu0AKZNpe5l68Mrnq-57" edge="1">
148
+ <mxGeometry relative="1" as="geometry" />
149
+ </mxCell>
150
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-53" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Convolutional Feature Extraction&lt;/font&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
151
+ <mxGeometry x="338" y="68" width="173" height="44" as="geometry" />
152
+ </mxCell>
153
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-54" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ m $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
154
+ <mxGeometry x="6.999999999999972" y="113" width="60" height="30" as="geometry" />
155
+ </mxCell>
156
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-55" value="" style="endArrow=classic;html=1;strokeWidth=1;endSize=3;startArrow=none;startFill=0;curved=1;strokeColor=#c2c1c1;" parent="uGYOu0AKZNpe5l68Mrnq-1" edge="1">
157
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
158
+ <mxPoint x="47" y="130" as="sourcePoint" />
159
+ <mxPoint x="67" y="90" as="targetPoint" />
160
+ <Array as="points">
161
+ <mxPoint x="67" y="130" />
162
+ <mxPoint x="47" y="90" />
163
+ </Array>
164
+ </mxGeometry>
165
+ </mxCell>
166
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-56" value="" style="curved=1;endArrow=classic;html=1;rounded=0;strokeWidth=1;endSize=3;startSize=5;strokeColor=#BDBDBD;" parent="uGYOu0AKZNpe5l68Mrnq-1" edge="1">
167
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
168
+ <mxPoint x="48" y="130" as="sourcePoint" />
169
+ <mxPoint x="68" y="170" as="targetPoint" />
170
+ <Array as="points">
171
+ <mxPoint x="68" y="130" />
172
+ <mxPoint x="48" y="170" />
173
+ </Array>
174
+ </mxGeometry>
175
+ </mxCell>
176
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-57" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Edge Learning&lt;/font&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
177
+ <mxGeometry x="359.5" y="137" width="130" height="44" as="geometry" />
178
+ </mxCell>
179
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-58" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;$$ \{ t - \tau, ..., t - 1 \} $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
180
+ <mxGeometry x="60" y="181" width="154" height="30" as="geometry" />
181
+ </mxCell>
182
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-59" value="&lt;span style=&quot;font-size: 14px;&quot;&gt;- Graphs&lt;/span&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
183
+ <mxGeometry x="403.5" y="200" width="60" height="30" as="geometry" />
184
+ </mxCell>
185
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-60" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
186
+ <mxGeometry x="376.9996551724138" y="230" width="95.00034482758622" height="40" as="geometry" />
187
+ </mxCell>
188
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-61" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-60" vertex="1">
189
+ <mxGeometry y="20" width="10" height="10" as="geometry" />
190
+ </mxCell>
191
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-62" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-60" vertex="1">
192
+ <mxGeometry x="36" y="20" width="10" height="10" as="geometry" />
193
+ </mxCell>
194
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-63" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-60" vertex="1">
195
+ <mxGeometry x="95" y="20" width="10" height="10" as="geometry" />
196
+ </mxCell>
197
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-64" value="" style="endArrow=classic;html=1;rounded=0;endSize=2;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-60" target="uGYOu0AKZNpe5l68Mrnq-62" edge="1">
198
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
199
+ <mxPoint x="10.000344827586218" y="25" as="sourcePoint" />
200
+ <mxPoint x="-251.99965517241378" y="164" as="targetPoint" />
201
+ </mxGeometry>
202
+ </mxCell>
203
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-65" value="" style="curved=1;endArrow=classic;html=1;rounded=0;exitX=1;exitY=0;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;endSize=2;" parent="uGYOu0AKZNpe5l68Mrnq-60" source="uGYOu0AKZNpe5l68Mrnq-61" target="uGYOu0AKZNpe5l68Mrnq-63" edge="1">
204
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
205
+ <mxPoint x="70.00034482758622" y="70" as="sourcePoint" />
206
+ <mxPoint x="120.00034482758622" y="20" as="targetPoint" />
207
+ <Array as="points">
208
+ <mxPoint x="20.00034482758622" y="10" />
209
+ <mxPoint x="65.00034482758622" />
210
+ </Array>
211
+ </mxGeometry>
212
+ </mxCell>
213
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-66" value="" style="endArrow=classic;html=1;rounded=0;endSize=2;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-60" source="uGYOu0AKZNpe5l68Mrnq-62" edge="1">
214
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
215
+ <mxPoint x="20.00034482758622" y="35" as="sourcePoint" />
216
+ <mxPoint x="70.00034482758622" y="25" as="targetPoint" />
217
+ </mxGeometry>
218
+ </mxCell>
219
+ <mxCell id="46MY76A0PzHGgOhmMeRs-30" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-60" vertex="1">
220
+ <mxGeometry x="72.00034482758622" y="5" width="21" height="30" as="geometry" />
221
+ </mxCell>
222
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-67" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ k $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
223
+ <mxGeometry x="370" y="200" width="60" height="30" as="geometry" />
224
+ </mxCell>
225
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-68" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
226
+ <mxGeometry x="392" y="268" width="310" height="98" as="geometry" />
227
+ </mxCell>
228
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-69" value="" style="rounded=1;whiteSpace=wrap;html=1;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-68" vertex="1">
229
+ <mxGeometry x="138" y="40" width="146" height="60" as="geometry" />
230
+ </mxCell>
231
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-70" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;GNN&lt;/font&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="uGYOu0AKZNpe5l68Mrnq-68" vertex="1">
232
+ <mxGeometry x="3" y="46" width="60" height="44" as="geometry" />
233
+ </mxCell>
234
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-71" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-68" source="uGYOu0AKZNpe5l68Mrnq-72" edge="1">
235
+ <mxGeometry relative="1" as="geometry">
236
+ <mxPoint x="138" y="68" as="targetPoint" />
237
+ </mxGeometry>
238
+ </mxCell>
239
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-72" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;FC&lt;/font&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-68" vertex="1">
240
+ <mxGeometry x="84" y="38" width="30" height="60" as="geometry" />
241
+ </mxCell>
242
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-73" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-68" source="uGYOu0AKZNpe5l68Mrnq-70" target="uGYOu0AKZNpe5l68Mrnq-72" edge="1">
243
+ <mxGeometry relative="1" as="geometry" />
244
+ </mxCell>
245
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-74" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Forecast&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-68" vertex="1">
246
+ <mxGeometry x="181" y="10" width="60" height="30" as="geometry" />
247
+ </mxCell>
248
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-75" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ \hat{X}_{t}, ..., \hat{X}_{t + h- 1}&amp;nbsp; $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-68" vertex="1">
249
+ <mxGeometry x="134" y="52" width="154" height="30" as="geometry" />
250
+ </mxCell>
251
+ <mxCell id="uGYOu0AKZNpe5l68Mrnq-76" value="" style="endArrow=classic;html=1;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" edge="1">
252
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
253
+ <mxPoint x="424" y="181" as="sourcePoint" />
254
+ <mxPoint x="424" y="200" as="targetPoint" />
255
+ </mxGeometry>
256
+ </mxCell>
257
+ <mxCell id="46MY76A0PzHGgOhmMeRs-1" value="" style="endArrow=none;dashed=1;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-53" target="46MY76A0PzHGgOhmMeRs-3" edge="1">
258
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
259
+ <mxPoint x="521" y="96" as="sourcePoint" />
260
+ <mxPoint x="600" y="70" as="targetPoint" />
261
+ </mxGeometry>
262
+ </mxCell>
263
+ <mxCell id="46MY76A0PzHGgOhmMeRs-2" value="" style="endArrow=none;dashed=1;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.75;entryY=1;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-53" target="46MY76A0PzHGgOhmMeRs-3" edge="1">
264
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
265
+ <mxPoint x="531" y="106" as="sourcePoint" />
266
+ <mxPoint x="600" y="50" as="targetPoint" />
267
+ </mxGeometry>
268
+ </mxCell>
269
+ <mxCell id="46MY76A0PzHGgOhmMeRs-3" value="" style="rounded=1;whiteSpace=wrap;html=1;direction=south;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
270
+ <mxGeometry x="572" y="18" width="86" height="91" as="geometry" />
271
+ </mxCell>
272
+ <mxCell id="46MY76A0PzHGgOhmMeRs-4" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
273
+ <mxGeometry x="638.8965517241379" y="27" width="10" height="10" as="geometry" />
274
+ </mxCell>
275
+ <mxCell id="46MY76A0PzHGgOhmMeRs-16" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
276
+ <mxGeometry x="580" y="24" width="41" height="79" as="geometry" />
277
+ </mxCell>
278
+ <mxCell id="46MY76A0PzHGgOhmMeRs-5" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
279
+ <mxGeometry x="609.9310344827586" y="27" width="10" height="10" as="geometry" />
280
+ </mxCell>
281
+ <mxCell id="46MY76A0PzHGgOhmMeRs-6" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
282
+ <mxGeometry x="580.9655172413793" y="27" width="10" height="10" as="geometry" />
283
+ </mxCell>
284
+ <mxCell id="46MY76A0PzHGgOhmMeRs-7" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
285
+ <mxGeometry x="638.8965517241379" y="48" width="10" height="10" as="geometry" />
286
+ </mxCell>
287
+ <mxCell id="46MY76A0PzHGgOhmMeRs-8" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
288
+ <mxGeometry x="609.9310344827586" y="48" width="10" height="10" as="geometry" />
289
+ </mxCell>
290
+ <mxCell id="46MY76A0PzHGgOhmMeRs-9" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
291
+ <mxGeometry x="580.9655172413793" y="48" width="10" height="10" as="geometry" />
292
+ </mxCell>
293
+ <mxCell id="46MY76A0PzHGgOhmMeRs-10" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
294
+ <mxGeometry x="638.8965517241379" y="69" width="10" height="10" as="geometry" />
295
+ </mxCell>
296
+ <mxCell id="46MY76A0PzHGgOhmMeRs-11" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
297
+ <mxGeometry x="609.9310344827586" y="69" width="10" height="10" as="geometry" />
298
+ </mxCell>
299
+ <mxCell id="46MY76A0PzHGgOhmMeRs-12" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
300
+ <mxGeometry x="580.9655172413793" y="69" width="10" height="10" as="geometry" />
301
+ </mxCell>
302
+ <mxCell id="46MY76A0PzHGgOhmMeRs-13" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
303
+ <mxGeometry x="638.8965517241379" y="88" width="10" height="10" as="geometry" />
304
+ </mxCell>
305
+ <mxCell id="46MY76A0PzHGgOhmMeRs-14" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
306
+ <mxGeometry x="609.9310344827586" y="88" width="10" height="10" as="geometry" />
307
+ </mxCell>
308
+ <mxCell id="46MY76A0PzHGgOhmMeRs-15" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#fff2cc;strokeColor=#d6b656;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
309
+ <mxGeometry x="580.9655172413793" y="88" width="10" height="10" as="geometry" />
310
+ </mxCell>
311
+ <mxCell id="46MY76A0PzHGgOhmMeRs-18" value="&lt;span style=&quot;font-size: 14px;&quot;&gt;&lt;font color=&quot;#739e5a&quot;&gt;&lt;i&gt;conv kernels&lt;/i&gt;&lt;/font&gt;&lt;/span&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontStyle=1" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
312
+ <mxGeometry x="570.5000000000001" y="-13" width="60" height="30" as="geometry" />
313
+ </mxCell>
314
+ <mxCell id="46MY76A0PzHGgOhmMeRs-20" value="" style="curved=1;endArrow=classic;html=1;rounded=0;dashed=1;endSize=2;" parent="uGYOu0AKZNpe5l68Mrnq-1" edge="1">
315
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
316
+ <mxPoint x="661" y="59" as="sourcePoint" />
317
+ <mxPoint x="701" y="57" as="targetPoint" />
318
+ <Array as="points">
319
+ <mxPoint x="671" y="49" />
320
+ <mxPoint x="681" y="69" />
321
+ </Array>
322
+ </mxGeometry>
323
+ </mxCell>
324
+ <mxCell id="46MY76A0PzHGgOhmMeRs-22" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
325
+ <mxGeometry x="135.9" y="144" width="21" height="30" as="geometry" />
326
+ </mxCell>
327
+ <mxCell id="46MY76A0PzHGgOhmMeRs-23" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
328
+ <mxGeometry x="135.9" y="126" width="21" height="30" as="geometry" />
329
+ </mxCell>
330
+ <mxCell id="46MY76A0PzHGgOhmMeRs-24" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
331
+ <mxGeometry x="135.9" y="105" width="21" height="30" as="geometry" />
332
+ </mxCell>
333
+ <mxCell id="46MY76A0PzHGgOhmMeRs-26" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
334
+ <mxGeometry x="619.93" y="18" width="21" height="30" as="geometry" />
335
+ </mxCell>
336
+ <mxCell id="46MY76A0PzHGgOhmMeRs-27" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
337
+ <mxGeometry x="619.93" y="37" width="21" height="30" as="geometry" />
338
+ </mxCell>
339
+ <mxCell id="46MY76A0PzHGgOhmMeRs-28" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
340
+ <mxGeometry x="619.93" y="60" width="21" height="30" as="geometry" />
341
+ </mxCell>
342
+ <mxCell id="46MY76A0PzHGgOhmMeRs-29" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
343
+ <mxGeometry x="619.93" y="79" width="21" height="30" as="geometry" />
344
+ </mxCell>
345
+ <mxCell id="46MY76A0PzHGgOhmMeRs-34" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
346
+ <mxGeometry x="701.9996551724138" y="49" width="10" height="10" as="geometry" />
347
+ </mxCell>
348
+ <mxCell id="46MY76A0PzHGgOhmMeRs-35" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
349
+ <mxGeometry x="717.9996551724138" y="49" width="10" height="10" as="geometry" />
350
+ </mxCell>
351
+ <mxCell id="46MY76A0PzHGgOhmMeRs-36" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
352
+ <mxGeometry x="742.9996551724138" y="49" width="10" height="10" as="geometry" />
353
+ </mxCell>
354
+ <mxCell id="46MY76A0PzHGgOhmMeRs-40" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
355
+ <mxGeometry x="726" y="39" width="21" height="30" as="geometry" />
356
+ </mxCell>
357
+ <mxCell id="46MY76A0PzHGgOhmMeRs-46" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ X_t$$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
358
+ <mxGeometry x="158.85999999999996" y="50" width="60" height="30" as="geometry" />
359
+ </mxCell>
360
+ <mxCell id="46MY76A0PzHGgOhmMeRs-47" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$ z^k_t $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
361
+ <mxGeometry x="733" y="13" width="60" height="30" as="geometry" />
362
+ </mxCell>
363
+ <mxCell id="46MY76A0PzHGgOhmMeRs-53" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
364
+ <mxGeometry x="692" y="8" width="10" height="40" as="geometry" />
365
+ </mxCell>
366
+ <mxCell id="46MY76A0PzHGgOhmMeRs-48" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-53" vertex="1">
367
+ <mxGeometry width="10" height="40" as="geometry" />
368
+ </mxCell>
369
+ <mxCell id="46MY76A0PzHGgOhmMeRs-49" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-53" edge="1">
370
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
371
+ <mxPoint y="30" as="sourcePoint" />
372
+ <mxPoint x="10" y="30" as="targetPoint" />
373
+ </mxGeometry>
374
+ </mxCell>
375
+ <mxCell id="46MY76A0PzHGgOhmMeRs-51" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-53" edge="1">
376
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
377
+ <mxPoint y="20" as="sourcePoint" />
378
+ <mxPoint x="10" y="20" as="targetPoint" />
379
+ </mxGeometry>
380
+ </mxCell>
381
+ <mxCell id="46MY76A0PzHGgOhmMeRs-52" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-53" edge="1">
382
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
383
+ <mxPoint y="10" as="sourcePoint" />
384
+ <mxPoint x="10" y="10" as="targetPoint" />
385
+ </mxGeometry>
386
+ </mxCell>
387
+ <mxCell id="46MY76A0PzHGgOhmMeRs-61" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
388
+ <mxGeometry x="710" y="8" width="10" height="40" as="geometry" />
389
+ </mxCell>
390
+ <mxCell id="46MY76A0PzHGgOhmMeRs-62" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-61" vertex="1">
391
+ <mxGeometry width="10" height="40" as="geometry" />
392
+ </mxCell>
393
+ <mxCell id="46MY76A0PzHGgOhmMeRs-63" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-61" edge="1">
394
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
395
+ <mxPoint y="30" as="sourcePoint" />
396
+ <mxPoint x="10" y="30" as="targetPoint" />
397
+ </mxGeometry>
398
+ </mxCell>
399
+ <mxCell id="46MY76A0PzHGgOhmMeRs-64" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-61" edge="1">
400
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
401
+ <mxPoint y="20" as="sourcePoint" />
402
+ <mxPoint x="10" y="20" as="targetPoint" />
403
+ </mxGeometry>
404
+ </mxCell>
405
+ <mxCell id="46MY76A0PzHGgOhmMeRs-65" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-61" edge="1">
406
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
407
+ <mxPoint y="10" as="sourcePoint" />
408
+ <mxPoint x="10" y="10" as="targetPoint" />
409
+ </mxGeometry>
410
+ </mxCell>
411
+ <mxCell id="46MY76A0PzHGgOhmMeRs-66" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
412
+ <mxGeometry x="735" y="8" width="10" height="40" as="geometry" />
413
+ </mxCell>
414
+ <mxCell id="46MY76A0PzHGgOhmMeRs-67" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-66" vertex="1">
415
+ <mxGeometry width="10" height="40" as="geometry" />
416
+ </mxCell>
417
+ <mxCell id="46MY76A0PzHGgOhmMeRs-68" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-66" edge="1">
418
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
419
+ <mxPoint y="30" as="sourcePoint" />
420
+ <mxPoint x="10" y="30" as="targetPoint" />
421
+ </mxGeometry>
422
+ </mxCell>
423
+ <mxCell id="46MY76A0PzHGgOhmMeRs-69" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-66" edge="1">
424
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
425
+ <mxPoint y="20" as="sourcePoint" />
426
+ <mxPoint x="10" y="20" as="targetPoint" />
427
+ </mxGeometry>
428
+ </mxCell>
429
+ <mxCell id="46MY76A0PzHGgOhmMeRs-70" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-66" edge="1">
430
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
431
+ <mxPoint y="10" as="sourcePoint" />
432
+ <mxPoint x="10" y="10" as="targetPoint" />
433
+ </mxGeometry>
434
+ </mxCell>
435
+ <mxCell id="46MY76A0PzHGgOhmMeRs-71" value="" style="rounded=1;whiteSpace=wrap;html=1;direction=south;dashed=1;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
436
+ <mxGeometry x="571" y="117" width="189" height="130" as="geometry" />
437
+ </mxCell>
438
+ <mxCell id="46MY76A0PzHGgOhmMeRs-72" value="" style="endArrow=none;dashed=1;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-57" edge="1">
439
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
440
+ <mxPoint x="521" y="169" as="sourcePoint" />
441
+ <mxPoint x="570" y="140" as="targetPoint" />
442
+ </mxGeometry>
443
+ </mxCell>
444
+ <mxCell id="46MY76A0PzHGgOhmMeRs-73" value="" style="endArrow=none;dashed=1;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.75;entryY=1;entryDx=0;entryDy=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="uGYOu0AKZNpe5l68Mrnq-57" target="46MY76A0PzHGgOhmMeRs-71" edge="1">
445
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
446
+ <mxPoint x="531" y="110" as="sourcePoint" />
447
+ <mxPoint x="570" y="190" as="targetPoint" />
448
+ </mxGeometry>
449
+ </mxCell>
450
+ <mxCell id="46MY76A0PzHGgOhmMeRs-92" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
451
+ <mxGeometry x="648" y="199" width="40" height="40" as="geometry" />
452
+ </mxCell>
453
+ <mxCell id="46MY76A0PzHGgOhmMeRs-74" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
454
+ <mxGeometry width="40" height="40" as="geometry" />
455
+ </mxCell>
456
+ <mxCell id="46MY76A0PzHGgOhmMeRs-75" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
457
+ <mxGeometry width="10" height="10" as="geometry" />
458
+ </mxCell>
459
+ <mxCell id="46MY76A0PzHGgOhmMeRs-76" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#DEDEDE;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
460
+ <mxGeometry x="10" width="10" height="10" as="geometry" />
461
+ </mxCell>
462
+ <mxCell id="46MY76A0PzHGgOhmMeRs-77" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#c2c2c2;fontColor=#333333;strokeColor=#666666;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
463
+ <mxGeometry x="20" width="10" height="10" as="geometry" />
464
+ </mxCell>
465
+ <mxCell id="46MY76A0PzHGgOhmMeRs-78" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#525252;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
466
+ <mxGeometry x="30" width="10" height="10" as="geometry" />
467
+ </mxCell>
468
+ <mxCell id="46MY76A0PzHGgOhmMeRs-79" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
469
+ <mxGeometry y="10" width="10" height="10" as="geometry" />
470
+ </mxCell>
471
+ <mxCell id="46MY76A0PzHGgOhmMeRs-80" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
472
+ <mxGeometry y="20" width="10" height="10" as="geometry" />
473
+ </mxCell>
474
+ <mxCell id="46MY76A0PzHGgOhmMeRs-82" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
475
+ <mxGeometry y="30" width="10" height="10" as="geometry" />
476
+ </mxCell>
477
+ <mxCell id="46MY76A0PzHGgOhmMeRs-83" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
478
+ <mxGeometry x="10" y="10" width="10" height="10" as="geometry" />
479
+ </mxCell>
480
+ <mxCell id="46MY76A0PzHGgOhmMeRs-84" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#919191;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
481
+ <mxGeometry x="20" y="10" width="10" height="10" as="geometry" />
482
+ </mxCell>
483
+ <mxCell id="46MY76A0PzHGgOhmMeRs-85" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#666666;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
484
+ <mxGeometry x="30" y="10" width="10" height="10" as="geometry" />
485
+ </mxCell>
486
+ <mxCell id="46MY76A0PzHGgOhmMeRs-86" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
487
+ <mxGeometry x="10" y="20" width="10" height="10" as="geometry" />
488
+ </mxCell>
489
+ <mxCell id="46MY76A0PzHGgOhmMeRs-87" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
490
+ <mxGeometry x="20" y="20" width="10" height="10" as="geometry" />
491
+ </mxCell>
492
+ <mxCell id="46MY76A0PzHGgOhmMeRs-88" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#050505;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
493
+ <mxGeometry x="30" y="20" width="10" height="10" as="geometry" />
494
+ </mxCell>
495
+ <mxCell id="46MY76A0PzHGgOhmMeRs-89" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
496
+ <mxGeometry x="10" y="30" width="10" height="10" as="geometry" />
497
+ </mxCell>
498
+ <mxCell id="46MY76A0PzHGgOhmMeRs-90" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
499
+ <mxGeometry x="20" y="30" width="10" height="10" as="geometry" />
500
+ </mxCell>
501
+ <mxCell id="46MY76A0PzHGgOhmMeRs-91" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="46MY76A0PzHGgOhmMeRs-92" vertex="1">
502
+ <mxGeometry x="30" y="30" width="10" height="10" as="geometry" />
503
+ </mxCell>
504
+ <mxCell id="46MY76A0PzHGgOhmMeRs-94" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
505
+ <mxGeometry x="586" y="122" width="10" height="40" as="geometry" />
506
+ </mxCell>
507
+ <mxCell id="46MY76A0PzHGgOhmMeRs-95" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-94" vertex="1">
508
+ <mxGeometry width="10" height="40" as="geometry" />
509
+ </mxCell>
510
+ <mxCell id="46MY76A0PzHGgOhmMeRs-96" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-94" edge="1">
511
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
512
+ <mxPoint y="30" as="sourcePoint" />
513
+ <mxPoint x="10" y="30" as="targetPoint" />
514
+ </mxGeometry>
515
+ </mxCell>
516
+ <mxCell id="46MY76A0PzHGgOhmMeRs-97" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-94" edge="1">
517
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
518
+ <mxPoint y="20" as="sourcePoint" />
519
+ <mxPoint x="10" y="20" as="targetPoint" />
520
+ </mxGeometry>
521
+ </mxCell>
522
+ <mxCell id="46MY76A0PzHGgOhmMeRs-98" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-94" edge="1">
523
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
524
+ <mxPoint y="10" as="sourcePoint" />
525
+ <mxPoint x="10" y="10" as="targetPoint" />
526
+ </mxGeometry>
527
+ </mxCell>
528
+ <mxCell id="46MY76A0PzHGgOhmMeRs-99" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
529
+ <mxGeometry x="600" y="122" width="10" height="40" as="geometry" />
530
+ </mxCell>
531
+ <mxCell id="46MY76A0PzHGgOhmMeRs-100" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-99" vertex="1">
532
+ <mxGeometry width="10" height="40" as="geometry" />
533
+ </mxCell>
534
+ <mxCell id="46MY76A0PzHGgOhmMeRs-101" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-99" edge="1">
535
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
536
+ <mxPoint y="30" as="sourcePoint" />
537
+ <mxPoint x="10" y="30" as="targetPoint" />
538
+ </mxGeometry>
539
+ </mxCell>
540
+ <mxCell id="46MY76A0PzHGgOhmMeRs-102" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-99" edge="1">
541
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
542
+ <mxPoint y="20" as="sourcePoint" />
543
+ <mxPoint x="10" y="20" as="targetPoint" />
544
+ </mxGeometry>
545
+ </mxCell>
546
+ <mxCell id="46MY76A0PzHGgOhmMeRs-103" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-99" edge="1">
547
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
548
+ <mxPoint y="10" as="sourcePoint" />
549
+ <mxPoint x="10" y="10" as="targetPoint" />
550
+ </mxGeometry>
551
+ </mxCell>
552
+ <mxCell id="46MY76A0PzHGgOhmMeRs-104" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;...&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
553
+ <mxGeometry x="610" y="125" width="21" height="30" as="geometry" />
554
+ </mxCell>
555
+ <mxCell id="46MY76A0PzHGgOhmMeRs-108" value="" style="group" parent="uGYOu0AKZNpe5l68Mrnq-1" connectable="0" vertex="1">
556
+ <mxGeometry x="630" y="122" width="10" height="40" as="geometry" />
557
+ </mxCell>
558
+ <mxCell id="46MY76A0PzHGgOhmMeRs-109" value="" style="rounded=0;whiteSpace=wrap;html=1;direction=south;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-108" vertex="1">
559
+ <mxGeometry width="10" height="40" as="geometry" />
560
+ </mxCell>
561
+ <mxCell id="46MY76A0PzHGgOhmMeRs-110" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-108" edge="1">
562
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
563
+ <mxPoint y="30" as="sourcePoint" />
564
+ <mxPoint x="10" y="30" as="targetPoint" />
565
+ </mxGeometry>
566
+ </mxCell>
567
+ <mxCell id="46MY76A0PzHGgOhmMeRs-111" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-108" edge="1">
568
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
569
+ <mxPoint y="20" as="sourcePoint" />
570
+ <mxPoint x="10" y="20" as="targetPoint" />
571
+ </mxGeometry>
572
+ </mxCell>
573
+ <mxCell id="46MY76A0PzHGgOhmMeRs-112" value="" style="endArrow=none;html=1;rounded=0;fillColor=#d5e8d4;strokeColor=#82b366;" parent="46MY76A0PzHGgOhmMeRs-108" edge="1">
574
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
575
+ <mxPoint y="10" as="sourcePoint" />
576
+ <mxPoint x="10" y="10" as="targetPoint" />
577
+ </mxGeometry>
578
+ </mxCell>
579
+ <mxCell id="46MY76A0PzHGgOhmMeRs-113" value="&lt;span style=&quot;&quot;&gt;&lt;font style=&quot;font-size: 12px;&quot;&gt;&lt;b&gt;Gumbel softmax&lt;/b&gt;&lt;/font&gt;&lt;/span&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontStyle=0" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
580
+ <mxGeometry x="610" y="173" width="113" height="30" as="geometry" />
581
+ </mxCell>
582
+ <mxCell id="46MY76A0PzHGgOhmMeRs-114" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;$$A^k$$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
583
+ <mxGeometry x="673" y="204" width="60" height="30" as="geometry" />
584
+ </mxCell>
585
+ <mxCell id="46MY76A0PzHGgOhmMeRs-121" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;endSize=2;" parent="uGYOu0AKZNpe5l68Mrnq-1" source="46MY76A0PzHGgOhmMeRs-116" target="46MY76A0PzHGgOhmMeRs-113" edge="1">
586
+ <mxGeometry relative="1" as="geometry">
587
+ <Array as="points">
588
+ <mxPoint x="738" y="188" />
589
+ </Array>
590
+ </mxGeometry>
591
+ </mxCell>
592
+ <mxCell id="46MY76A0PzHGgOhmMeRs-116" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;FC&lt;/font&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
593
+ <mxGeometry x="725" y="121" width="25" height="43" as="geometry" />
594
+ </mxCell>
595
+ <mxCell id="46MY76A0PzHGgOhmMeRs-117" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;endSize=2;" parent="uGYOu0AKZNpe5l68Mrnq-1" target="46MY76A0PzHGgOhmMeRs-116" edge="1">
596
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
597
+ <mxPoint x="640" y="143" as="sourcePoint" />
598
+ <mxPoint x="530" y="80" as="targetPoint" />
599
+ </mxGeometry>
600
+ </mxCell>
601
+ <mxCell id="46MY76A0PzHGgOhmMeRs-118" value="&lt;span style=&quot;&quot;&gt;&lt;font size=&quot;1&quot; style=&quot;&quot;&gt;&lt;span style=&quot;font-weight: normal;&quot;&gt;Pairwise&lt;br&gt;similarity&lt;/span&gt;&lt;/font&gt;&lt;/span&gt;" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontStyle=1" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
602
+ <mxGeometry x="636" y="126" width="90" height="30" as="geometry" />
603
+ </mxCell>
604
+ <mxCell id="o85H6cR-YdHolG_h0Mx_-0" value="&lt;font style=&quot;font-size: 12px;&quot;&gt;$$ \{ t - \tau, ..., t - 1 \} $$&lt;/font&gt;" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="uGYOu0AKZNpe5l68Mrnq-1" vertex="1">
605
+ <mxGeometry x="646" y="65" width="154" height="30" as="geometry" />
606
+ </mxCell>
607
+ </root>
608
+ </mxGraphModel>
609
+ </diagram>
610
+ </mxfile>
2307.14680/main_diagram/main_diagram.pdf ADDED
Binary file (69.7 kB). View file
 
2307.14680/paper_text/intro_method.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ From financial investment and market analysis [@ding2015deep] to traffic [@li2017diffusion], electricity management, healthcare [@chauhan2015anomaly], and climate science, accurately predicting the future real values of series based on available historical records forms a coveted task over time in various scientific and industrial fields. There are a wide variety of methods employed for time series forecasting, ranging from statistical [@box2015time] to recent deep learning approaches [@lim2021time]. However, there are several major challenges present. Real-world time series data are often subject to noisy and irregular observations, missing values, repeated patterns of variable periodicities and very long-term dependencies. While the time series are supposed to represent continuous phenomena, the data is usually collected using sensors. Thus, observations are determined by a sampling rate with potential information loss. On the other hand, standard sequential neural networks, such as recurrent (RNNs) [@rumelhart1986learning] and convolutional networks (CNNs) [@lecun1998gradient], are discrete and assume regular spacing between observations. Several continuous analogues of such architectures that implicitly handle the time information have been proposed to address irregularly sampled missing data [@rubanova2019latent]. The variable periodicities and long-term dependencies present in the data make models prone to shape and temporal distortions, overfitting and poor local minima while training with standard loss functions (e. g., MSE). Variants of DTW and MSE have been proposed to mitigate these phenomena and can increase the forecasting quality of deep neural networks [@le2022deep; @kosma2022time].
4
+
5
+ A novel perspective for boosting the robustness of neural networks for complex time series is to extract representative embeddings for patterns after transforming them to another representation domain, such as the spectral one. Spectral approaches have seen much use in the text domain. Graph-based text mining (i. e., Graph-of-Words) [@rousseau2013graph] can be used for capturing the relationships between the terms and building document-level representations. It is natural, then, that such approaches might be suitable for more general sequence modeling. Capitalizing on the recent success of graph neural networks (GNNs) on graph structured data, a new family of algorithms jointly learns a correlation graph between interrelated time series while simultaneously performing forecasting [@wu2020connecting; @cao2020spectral; @shang2021discrete]. The nodes in the learnable graph structure represent each individual time series and the links between them express their temporal similarities. However, since such methods rely on series-to-series correlations, they do not explicitly represent the inter-series temporal dynamics evolution. Some preliminary studies have proposed simple computational methods for mapping time series to temporal graphs where each node corresponds to a time step, such as the visibility graph [@lacasa2008time] and the recurrence network [@donner2010recurrence].
6
+
7
+ In this paper, we propose a novel neural network, *TimeGNN*, that extends these previous approaches by jointly learning dynamic temporal graphs for time series forecasting on raw data. TimeGNN (i) extracts temporal embeddings from sliding windows of the input series using dilated convolutions of different receptive sizes, (ii) constructs a learnable graph structure, which is forward and directed, based on the similarity of the embedding vectors in each window in a differentiable way, (iii) applies standard GNN architectures to learn embeddings for each node and produces forecasts based on the representation vector of the last time step. We evaluate the proposed architecture on various real-world datasets and compare it against several deep learning benchmarks, including graph-based approaches. Our results indicate that TimeGNN is significantly less costly in both inference and training while achieving comparable forecasting performance. The code implementation for this paper is available at <https://github.com/xun468/Time-GNN>.
8
+
9
+ # Method
10
+
11
+ Let $\{\mathbf{X}_{i,1:T}\}_{i=1}^m$ be a multivariate time series that consists of $m$ channels and has a length equal to $T$. Then, $\mathbf{X}_t \in \mathbb{R}^{m}$ represents the observed values at time step $t$. Let also $\mathcal{G}$ denote the set of temporal dynamic graph structures that we want to infer.
12
+
13
+ Given the observed values of $\tau$ previous time steps of the time series, i. e.,  $\mathbf{X}_{t-\tau}, \ldots, \mathbf{X}_{t-1}$, the goal is to forecast the next $h$ time steps (e. g.,  $h=1$ for 1-step forecasting), i. e.,  $\hat{\mathbf{X}}_{t}, \hat{\mathbf{X}}_{t+1}, \ldots,$ $\hat{\mathbf{X}}_{t+h-1}$. These values can be obtained by the forecasting model $\mathcal{F}$ with parameters $\Phi$ and the graphs $\mathcal{G}$ as follows:
14
+
15
+ $$\begin{equation}
16
+ \hat{\mathbf{X}}_{t}, \hat{\mathbf{X}}_{t+1}, \ldots, \hat{\mathbf{X}}_{t+h-1} = \mathcal{F}(\mathbf{X}_{t-\tau}, \ldots, \mathbf{X}_{t-1} ; \mathcal{G} ; \Phi)
17
+ \label{eq:1}
18
+ \end{equation}$$
19
+
20
+ <figure id="fig:model" data-latex-placement="t">
21
+ <embed src="figures/series2graph-1.pdf" style="width:75.0%" />
22
+ <figcaption>The proposed TimeGNN framework time series for graph learning from raw time series and forecasting based on embeddings learned on the parameterized graph structures.</figcaption>
23
+ </figure>
24
+
25
+ Unlike previous methods which extract one feature vector per variable in the multivariate input, our method extracts one feature vector per time step in each window $k$ of length $\tau$. Temporal sub-patterns are learned using stacked dilated convolutions, similar to the main blocks of the inception architecture [@lin2013network].
26
+
27
+ Given the sliding windows $\mathbf{S} = \{\mathbf{X}_{t-\tau+k-K}, \ldots, \mathbf{X}_{t+k-K-1}\}_{k=1}^K$, we perform the following convolutional operations to extract three feature maps $\mathbf{f}_0^k$, $\mathbf{f}_1^k$, $\mathbf{f}_2^k$, per window $\mathbf{S}^k$. Let $\mathbf{f}_i^k \in \mathbb{R}^{\tau \times d}$ for hidden dimension $d$ of the convolutional kernels, such that: $$\begin{equation}
28
+ \begin{split}
29
+ &\mathbf{f}_0^k = \mathbf{S}^k \ast \mathbf{C}_0^{1,1} +\mathbf{b}_{01} \\
30
+ &\mathbf{f}_1^k = (\mathbf{S}^k \ast \mathbf{C}_1^{1,1} +\mathbf{b}_{11}) \ast \mathbf{C}_2^{3,3} + \mathbf{b}_{23} \\
31
+ &\mathbf{f}_2^k = (\mathbf{S}^k \ast \mathbf{C}_2^{1,1} + \mathbf{b}_{21}) \ast \mathbf{C}_2^{5,5} + \mathbf{b}_{25}
32
+ \end{split}
33
+ \end{equation}$$ where $\ast$ the convolutional operator, $\mathbf{C}_0^{1,1}$, $\mathbf{C}_1^{1,1}$, $\mathbf{C}_2^{1,1}$ convolutional kernels of size 1 and dilation rate 1, $\mathbf{C}_2^{3,3}$ a convolutional kernel of size 3 and dilation rate 3, $\mathbf{C}_2^{5,5}$ a convolutional kernel of size 5 and dilation rate 5, and $\mathbf{b}_{01}, \mathbf{b}_{11}, \mathbf{b}_{21}, \mathbf{b}_{23}, \mathbf{b}_{25}$ the corresponding bias terms.
34
+
35
+ The final representations per window $k$ are obtained using a fully connected layer on the concatenated features $\mathbf{f}_0^k, \mathbf{f}_1^k, \mathbf{f}_2^k$, i. e., $\mathbf{z}^k = \text{FC}(\mathbf{f}_0^k \|\mathbf{f}_1^k\|\mathbf{f}_2^k)$, such that $\mathbf{z}^k \in \mathbb{R}^{\tau \times d}$. In the next sections, we refer to each time step of the hidden representation of the feature extraction module in each window $k$ as $\mathbf{z}_i^k, \forall~i \in \{1, \ldots \tau\}$.
36
+
37
+ The set $\mathcal{G} = \{\mathcal{G}^k\}, k \in \mathbb{N}^*$ describes the collection of graph structures that are parameterized for all individual sliding window of length $\tau$ of the series, where $K$ defines the total number of windows. The goal of the graph learning module is to learn each adjacency matrix $\mathbf{A}^k \in \{0,1\}^{\tau \times \tau}$ for a temporal window of observations $\mathbf{S}^k$. Following the works of [@kipf2018neural; @shang2021discrete], we use the Gumbel softmax trick to sample a discrete adjacency matrix as described below.
38
+
39
+ For the Gumbel softmax trick, let $\mathbf{A}^k$ refer to a random variable of the matrix Bernoulli distribution parameterized by $\boldsymbol{\theta}^k \in [0,1]^{\tau \times \tau}$, so that $A_{ij}^k \sim Ber(\theta_{ij}^k)$ is independent for pairs $(i,j)$. By applying the Gumbel reparameterization trick [@jang2016categorical] for enabling differentiability in sampling, we can obtain the following: $$\begin{equation}
40
+ \begin{gathered}
41
+ A_{ij}^k = \sigma((\log(\theta_{ij}^k/(1-\theta_{ij}^k)) + (\mathbf{g}_{i,j}^1 - \mathbf{g}_{i,j}^2))/s),\\
42
+ \mathbf{g}_{i,j}^1,\mathbf{g}_{i,j}^2 \sim \text{Gumbel}(0,1), \forall~i,j
43
+ \end{gathered}
44
+ \end{equation}$$ where $\mathbf{g}_{i,j}^1,\mathbf{g}_{i,j}^2$ are vectors of i.i.d samples drawn from Gumbel distribution, $\sigma$ is the sigmoid activation and $s$ is a parameter that controls the smoothness of samples, so that the distribution converges to categorical values as $s \xrightarrow{}0$.
45
+
46
+ The link predictor takes each pair of extracted features $(\mathbf{z}_i^k,\mathbf{z}_j^k)$ of window $k$ and maps their similarity to a $\theta_{ij}^k \in [0,1]$ by applying fully connected layers. Then the Gumbel reparameterization trick is used to approximate a sigmoid activation function while retaining differentiability: $$\begin{equation}
47
+ \theta_{ij}^k = \sigma\Big( \text{FC}\big(\text{FC}(\mathbf{z}_i^k\|\mathbf{z}_j^k)\big)\Big)
48
+ \end{equation}$$ In order to obtain directed and forward (i. e., no look-back in previous time steps in the history) graph structures $\mathcal{G}$ we only learn the upper triangular part of the adjacency matrices.
49
+
50
+ Once the collection $\mathcal{G}$ of learnable graph structures per sliding window $k$ are sampled, standard GNN architectures can be applied for capturing the node-to-node relations, i. e., the temporal graph dynamics. GraphSAGE [@hamilton2017inductive] was chosen as the basic building GNN block of the node embedding learning architecture as it can effectively generalize across different graphs with the same attributes. GraphSAGE is an inductive framework that exploits node feature information and generates node embeddings (i. e., $\mathbf{h}_u$ for node $u$) via a learnable function, by sampling and aggregating features from a node's local neighborhood (i. e., $\mathcal{N}(u)$).
51
+
52
+ Let $(\mathcal{V}^k, \mathcal{E}^k)$ correspond to the set of nodes and edges of the learnable graph structure for each $\mathcal{G}^k$. The node embedding update process for each $p \in \{1, \ldots, P\}$ aggregation steps, employs the mean-based aggregator, namely convolutional, by calculating the element-wise mean of the vectors in $\{\mathbf{h}_u^{p-1}, \forall u \in \mathcal{N}(u)\}$, such that: $$\begin{equation}
53
+ \mathbf{h}_u^{p} \xleftarrow{} \sigma(\mathbf{W} \cdot \text{MEAN}(\{\mathbf{h}_u^{p-1}\} \cup \{\mathbf{h}_u^{p-1} \forall u \in \mathcal{N}(u)\}))
54
+ \end{equation}$$ where $\mathbf{W}$ trainable weights. The final normalized (i. e., $\mathbf{\tilde{h}}_u^{p}$) representation of the last node (i. e., time step) in each forward and directed graph denoted as $\mathbf{z}_{u_T} = \mathbf{\tilde{h}}_{u_T}^p$ is passed to the output module. The output module consists of two fully connected layers which reduce the vector into the final output dimension, so as to correspond to the forecasts $\hat{\mathbf{X}}_{t}, \hat{\mathbf{X}}_{t+1}, \ldots, \hat{\mathbf{X}}_{t+h-1}$. Figure [1](#fig:model){reference-type="ref" reference="fig:model"} demonstrates the feature extraction, graph learning, GNN and output modules of the proposed TimeGNN architecture.
55
+
56
+ To train the parameters of Equation [\[eq:1\]](#eq:1){reference-type="eqref" reference="eq:1"} for the time series point forecasting task, we use the mean absolute error loss (MAE). Let $\mathbf{\hat{X}}^{(i)}, i \in \{1,...,K\}$ denote the predicted vector values for $K$ samples, then the MAE loss is defined as: $$\mathcal{L} = \frac{1}{K}\sum_{i=1}^{K}\|\mathbf{\hat{X}}^{(i)}-\mathbf{X}^{(i)}\|$$
57
+
58
+ The optimized weights for the feature extraction, graph structure learning, GNN and output modules are selected based on the minimum validation loss during training, which is evaluated as described in the experimental setup (section [4.3](#sec:4.3){reference-type="ref" reference="sec:4.3"})
2308.11272/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-07-31T22:15:09.748Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" etag="Zqs71SPzbC7BgI-ZITWw" version="21.6.5" type="google">
2
+ <diagram name="페이지-1" id="9R9n-Ug1W-eM6LYzJ4XK">
3
+ <mxGraphModel dx="1434" dy="806" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-1" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
8
+ <mxGeometry x="410" y="430" width="80" height="80" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-2" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
11
+ <mxGeometry x="420" y="440" width="80" height="80" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-3" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
14
+ <mxGeometry x="430" y="450" width="80" height="80" as="geometry" />
15
+ </mxCell>
16
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-4" value="Q^I_i" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
17
+ <mxGeometry x="440" y="460" width="80" height="80" as="geometry" />
18
+ </mxCell>
19
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-5" value="Q^S" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;shadow=1;" parent="1" vertex="1">
20
+ <mxGeometry x="570" y="440" width="80" height="80" as="geometry" />
21
+ </mxCell>
22
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-6" value="Q^F" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;shadow=1;" parent="1" vertex="1">
23
+ <mxGeometry x="700" y="440" width="80" height="80" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-7" value="AwareNet" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#e1d5e7;strokeColor=#9673a6;shadow=1;" parent="1" vertex="1">
26
+ <mxGeometry x="700" y="610" width="80" height="80" as="geometry" />
27
+ </mxCell>
28
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-8" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
29
+ <mxGeometry x="510" y="580" width="80" height="80" as="geometry" />
30
+ </mxCell>
31
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-9" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
32
+ <mxGeometry x="520" y="590" width="80" height="80" as="geometry" />
33
+ </mxCell>
34
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-10" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
35
+ <mxGeometry x="530" y="600" width="80" height="80" as="geometry" />
36
+ </mxCell>
37
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-11" value="Tau_i" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
38
+ <mxGeometry x="540" y="610" width="80" height="80" as="geometry" />
39
+ </mxCell>
40
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-16" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;shadow=1;" parent="1" source="suSSk-VfTl0UGtPIKBeb-8" edge="1">
41
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
42
+ <mxPoint x="550" y="590" as="sourcePoint" />
43
+ <mxPoint x="480" y="540" as="targetPoint" />
44
+ </mxGeometry>
45
+ </mxCell>
46
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-17" value="" style="endArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;shadow=1;" parent="1" target="suSSk-VfTl0UGtPIKBeb-5" edge="1">
47
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
48
+ <mxPoint x="550" y="580" as="sourcePoint" />
49
+ <mxPoint x="600" y="530" as="targetPoint" />
50
+ </mxGeometry>
51
+ </mxCell>
52
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-18" value="" style="endArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;shadow=1;" parent="1" target="suSSk-VfTl0UGtPIKBeb-6" edge="1">
53
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
54
+ <mxPoint x="739.5" y="610" as="sourcePoint" />
55
+ <mxPoint x="739.5" y="530" as="targetPoint" />
56
+ </mxGeometry>
57
+ </mxCell>
58
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-20" value="" style="endArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;shadow=1;" parent="1" target="suSSk-VfTl0UGtPIKBeb-26" edge="1">
59
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
60
+ <mxPoint x="450" y="430" as="sourcePoint" />
61
+ <mxPoint x="500" y="380" as="targetPoint" />
62
+ </mxGeometry>
63
+ </mxCell>
64
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-21" value="" style="endArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;shadow=1;" parent="1" target="suSSk-VfTl0UGtPIKBeb-26" edge="1">
65
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
66
+ <mxPoint x="740" y="440" as="sourcePoint" />
67
+ <mxPoint x="790" y="390" as="targetPoint" />
68
+ </mxGeometry>
69
+ </mxCell>
70
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-22" value="" style="endArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;shadow=1;" parent="1" target="suSSk-VfTl0UGtPIKBeb-26" edge="1">
71
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
72
+ <mxPoint x="600" y="440" as="sourcePoint" />
73
+ <mxPoint x="650" y="390" as="targetPoint" />
74
+ </mxGeometry>
75
+ </mxCell>
76
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-23" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
77
+ <mxGeometry x="530" y="270" width="80" height="80" as="geometry" />
78
+ </mxCell>
79
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-24" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
80
+ <mxGeometry x="540" y="280" width="80" height="80" as="geometry" />
81
+ </mxCell>
82
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-25" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
83
+ <mxGeometry x="550" y="290" width="80" height="80" as="geometry" />
84
+ </mxCell>
85
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-26" value="Q^i" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;shadow=1;" parent="1" vertex="1">
86
+ <mxGeometry x="560" y="300" width="80" height="80" as="geometry" />
87
+ </mxCell>
88
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-27" value="" style="endArrow=classic;html=1;rounded=0;exitX=0;exitY=0.5;exitDx=0;exitDy=0;shadow=1;" parent="1" source="suSSk-VfTl0UGtPIKBeb-23" edge="1">
89
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
90
+ <mxPoint x="390" y="430" as="sourcePoint" />
91
+ <mxPoint x="400" y="310" as="targetPoint" />
92
+ </mxGeometry>
93
+ </mxCell>
94
+ <mxCell id="tMZanws5naMaW_EdSXE4-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;shadow=1;" edge="1" parent="1" source="suSSk-VfTl0UGtPIKBeb-28" target="tMZanws5naMaW_EdSXE4-1">
95
+ <mxGeometry relative="1" as="geometry" />
96
+ </mxCell>
97
+ <mxCell id="suSSk-VfTl0UGtPIKBeb-28" value="Mixing Network" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;shadow=1;" parent="1" vertex="1">
98
+ <mxGeometry x="280" y="280" width="120" height="60" as="geometry" />
99
+ </mxCell>
100
+ <mxCell id="tMZanws5naMaW_EdSXE4-1" value="Q_tot" style="whiteSpace=wrap;html=1;aspect=fixed;shadow=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
101
+ <mxGeometry x="100" y="270" width="80" height="80" as="geometry" />
102
+ </mxCell>
103
+ <mxCell id="tMZanws5naMaW_EdSXE4-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;shadow=1;" edge="1" parent="1" source="tMZanws5naMaW_EdSXE4-2" target="tMZanws5naMaW_EdSXE4-1">
104
+ <mxGeometry relative="1" as="geometry" />
105
+ </mxCell>
106
+ <mxCell id="tMZanws5naMaW_EdSXE4-2" value="TD Loss" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
107
+ <mxGeometry x="80" y="120" width="120" height="60" as="geometry" />
108
+ </mxCell>
109
+ <mxCell id="tMZanws5naMaW_EdSXE4-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;shadow=1;" edge="1" parent="1" source="tMZanws5naMaW_EdSXE4-6" target="tMZanws5naMaW_EdSXE4-2">
110
+ <mxGeometry relative="1" as="geometry" />
111
+ </mxCell>
112
+ <mxCell id="tMZanws5naMaW_EdSXE4-6" value="r = r^e + beta_1 r^exp + beta_2 r^div" style="rounded=0;whiteSpace=wrap;html=1;shadow=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
113
+ <mxGeometry x="250" y="120" width="120" height="60" as="geometry" />
114
+ </mxCell>
115
+ </root>
116
+ </mxGraphModel>
117
+ </diagram>
118
+ </mxfile>