Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2002.01365/main_diagram/main_diagram.drawio +1 -0
- 2002.01365/main_diagram/main_diagram.pdf +0 -0
- 2002.01365/paper_text/intro_method.md +31 -0
- 2002.07375/main_diagram/main_diagram.drawio +1 -0
- 2002.07375/main_diagram/main_diagram.pdf +0 -0
- 2002.07375/paper_text/intro_method.md +95 -0
- 2010.03255/main_diagram/main_diagram.drawio +0 -0
- 2010.03255/paper_text/intro_method.md +115 -0
- 2106.13213/main_diagram/main_diagram.drawio +1 -0
- 2106.13213/main_diagram/main_diagram.pdf +0 -0
- 2106.13213/paper_text/intro_method.md +30 -0
- 2107.09584/main_diagram/main_diagram.drawio +0 -0
- 2107.09584/paper_text/intro_method.md +33 -0
- 2108.01887/main_diagram/main_diagram.drawio +1 -0
- 2108.01887/main_diagram/main_diagram.pdf +0 -0
- 2108.01887/paper_text/intro_method.md +25 -0
- 2108.02708/main_diagram/main_diagram.drawio +1 -0
- 2108.02708/main_diagram/main_diagram.pdf +0 -0
- 2108.02708/paper_text/intro_method.md +23 -0
- 2108.10274/main_diagram/main_diagram.drawio +1 -0
- 2108.10274/main_diagram/main_diagram.pdf +0 -0
- 2108.10274/paper_text/intro_method.md +362 -0
- 2110.09779/main_diagram/main_diagram.drawio +0 -0
- 2204.00833/main_diagram/main_diagram.drawio +1 -0
- 2204.00833/main_diagram/main_diagram.pdf +0 -0
- 2204.00833/paper_text/intro_method.md +28 -0
- 2205.14772/main_diagram/main_diagram.drawio +1 -0
- 2205.14772/main_diagram/main_diagram.pdf +0 -0
- 2205.14772/paper_text/intro_method.md +191 -0
- 2208.12294/main_diagram/main_diagram.drawio +1 -0
- 2208.12294/main_diagram/main_diagram.pdf +0 -0
- 2208.12294/paper_text/intro_method.md +108 -0
- 2208.13753/main_diagram/main_diagram.drawio +0 -0
- 2208.13753/paper_text/intro_method.md +139 -0
- 2210.00364/main_diagram/main_diagram.drawio +1 -0
- 2210.00364/main_diagram/main_diagram.pdf +0 -0
- 2210.00364/paper_text/intro_method.md +150 -0
- 2210.00828/main_diagram/main_diagram.drawio +1 -0
- 2210.00828/main_diagram/main_diagram.pdf +0 -0
- 2210.00828/paper_text/intro_method.md +63 -0
- 2210.01769/main_diagram/main_diagram.drawio +0 -0
- 2210.01769/paper_text/intro_method.md +104 -0
- 2211.01452/main_diagram/main_diagram.drawio +1 -0
- 2211.01452/main_diagram/main_diagram.pdf +0 -0
- 2211.01452/paper_text/intro_method.md +105 -0
- 2211.07521/main_diagram/main_diagram.drawio +1 -0
- 2211.07521/main_diagram/main_diagram.pdf +0 -0
- 2211.07521/paper_text/intro_method.md +115 -0
- 2211.07710/main_diagram/main_diagram.drawio +1 -0
- 2211.07710/main_diagram/main_diagram.pdf +0 -0
2002.01365/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile modified="2019-11-08T13:05:34.238Z" host="" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/11.2.5 Chrome/76.0.3809.139 Electron/6.0.7 Safari/537.36" etag="0r8drXrbZ-wHv87iUqL3" pages="9" version="11.2.5" type="device"><diagram id="N5C9v8oHhM3dhvDN3Tu3" name="system figure">7V1bd5u6Ev41XuucB3eBuPoxTppm9zTd7U67d8956SKY2LTYUIwTp7/+CHMxSAMILBHsOA+tLbAQ830zI41G0ki5XG7fhVawuPVnjjdC0mw7Uq5GCMkS0vF/cclzUqLpRlIwD91ZetO+4M797WS/TEs37sxZl26MfN+L3KBcaPurlWNHpTIrDP2n8m0Pvld+amDNHargzrY8uvQfdxYtklITGfvyG8edL7Iny/okuXJv2T/nob9Zpc8bIeVh95dcXlpZXemLrhfWzH8qFClvR8pl6PtR8mm5vXS8WLaZ2JLfXVdczdsdOquI5Qd31tNyjsyvt7Z+5V/89b/1fyersZY27tHyNk72HrvWRs+ZhHbv6MS1SCNl+rRwI+cusOz46hPmBC5bREsPf5Pxxwd/FaUgy/gVpzNrvdj9dnfR9bxL3/ND/H3lr/BNU/ot0hd7dMLI2RaK0rd65/hLJwqf8S3pVRWpyU9SBipKitDTHk/ZTGm6KGJppG9vpRya53Xv5Yg/pKJsIVZ1cvxizTU0E6sq0WLVJUFilVXj7fN4Pf/w/u23h6+/g+/ql2CMmqWKa8GWw2mWqLUOEnPy4G5jSe5EfG0tXS9+3y/uEhslJH10nvC/f/lLa0UIGqv7zHLMBxuXr6PQ/+kUrui26dw/8MFBVrQSDqpM46ACMKiiUJABFHQviuUQYDEV4dB/bWLztpPteL3j7wW+QdaD7f4i/jRP/9/V4mYF26wEN9Ml78JlyeOyYoIIWLpRGfEySqmiALpjee4co31lY8wcXD6NsXKxx7hILyzd2cyrolhZrVlIVVBtnRdldIIyJhNlkCjKKBRlri8PN4gNguVhA5WyDVQlWpAKpHuaIEGqkO5Zy1g4O62YxuUI1y1duqGNJZtdPSElUTkpiUzYVY22q5M+lURj8G6r2UXc540F71nrtWvz0wpnVuoo03IryEUD5JKVhY5nRe5juXsNCSt9wiffxS3Zw2IYbwhgFELia38T2k76u2KPl6gKocaqIiucOxFV1Q6//NW7Q6qfIY1x0GVOgE6kF4XTbIZziE6MlJokU4ZOlmn0dWEdSGjQWfRid6fothAnNI0JadRMGk+oUyKLwlOnR7tYnFhwJ4GbmX1P3wMIvHQYW8sUiAoFotFn78OglXLq358hrIJQU0wCQL03ADXf/a5v3M9XJvpqWdfW43+sv8GhwevraygSpVkoN5ht+xuKIjVXxq/PAcJqNMMaR6EDdkrnsXTrPqtBgiHL+29EOEGiqS4bANWRIKrT4QQqdpQVxJQmy5jDVCoUplpURqDS2jf3cCMYHihBD5QKD4ReJ24H/czqm93jj5nRZK/VnWajnjIb0bxGPZpwaLhYy+uulLo4s6QLSzLXz8QSHt1tsL0Mo1DRHkGRKcf40j4BmnE7XZ8gn31CR203K3RgqD4BDpacncJgaDIMpyBDfeLhxyaBEZYEpBkIik7Ckmw3D5OyuJ3k8O+v3bhZOyQq5ZiMU5Mi43O4cj6hD5p1I23N8a+fxjdnPKyxN5poFJhEJeyRfrIqxSSqEjzulhkG3kOhgTIoGihk9oEkdSMBWZFiCpvugSnA0NPmEFLbs0A+ggAboOSkZnZXclXvWckZ8vReHcKKQiPcWYNphPvVYQR1nxvNeCm9siYN8wig5ARkZsMzU7zP0mgAEkvWei7cFsQ3rGuaTKSdZoSpbBl5v1S6H39IWsCXVagLq46YSGiik/0x0lSzW32qKk3p1yYwjJVODD2jXuCs2KmkGegbOYZJzgGOclWVtFF0GmmvY1zEMMYdohxNOlrw0pJsN0w81Wl3VSNT/MyOFgZDLBX/9Pp6BRschcHRi55sUTUqjVVWadb3OgH/umbgz7MtDAapXn+OZbpFAVdYiJhuOfOkG096n28BV3gOwS/Q4TM5L+nBM4ByeV2OATXr8Nkx1OrPAP0CvKL77BYGzZJheAUVWht9ypElMiCUZ8EPOLIEI/faYoKKWl6/oBgd47n5rGxVRaKR62fhw3HN4akqETcxOy6BoCrKFnv1hS6PbJxj0kvaompdl68AJBC3eAVGr58F0Eemm2YZFq1rfgxZUe+6ySNF6ph0k3R1nZGjfabUt2a2y206Y8euvoKRox3iceylo2W5oHvOJ98L+EPbiQkb0sssg7WT205MnZCzSQqwoRjqFQiGsZcI8RN7CrWHY+aG+KmuH8dHsPmIhcdFU3Ryi4eXh4jF1fepK6YkGQoEzgW2x2oWhSqUp3tz8tEhXR0cQAzr32wecXCZ6wTpCYQyBezfR/MLmSows9Mrw8BM2t4Yhs4M48owqheAzJfmV/W+otQUyyH0AvcetRvodRh5m2o/k7dd/8g0CerSG2D1S11EUffNmzcDxozznmQaolbryTQkUB6CrBwOSWD8cfXxx4dA9Z6/fYjkH9u/vz6y5GesF1YQf1xvlm/zrmvghJjGOyFfpR3aT/uipp7tfk/4PzeR566cUUNIso2MyUxIBOV6CNr3DZRxu8VTzSGk8nLIdWSFER0Z3hUfUbRXQ0SOTufM2Aki033EJcOCeMsMSlULeCeAExuZnc2gHgPkCpkMjbpBTlakTIRNisOAV2dzBWRv5T4E+i9BZf8lM74RtqHObz9uRsn25uUtrG/oR1YajZlIdMwgySfnY431LE803xeOXocAMYxHFwSGimHvqOXwEqRPrINba51ajM5kcomcqtPbK0F7vIrjFxQA7I1f59E/X35p9B7CdPxSFL82tq3+g2U53Tor+8Y1r25+aOChIgRiovOp9QnptpU3sl74o3fozXaIKIpocriE4BFNdQpqqlwPll3WLYhQBd2S1tlpXFmZTKteQcsZtHfSFP6AznEpa/Qg9ZSmXS2LmzXxpUh0aDpVO0EMpENeHjmPid+z9sZltbYafn1xb/nzo/XLHa+nN+8/XbwLfn25uwEXVhwG3LAwUg1qTk3rCBRQlbiDIkCw2qWkHh9YmkQlEXQFC6iqZ7D4m8SBgUWdGNEdLLqqnsFql1B6fGBxNIMamWRIViQYqkOzQ08lAEwG8cjjMTtHA3vGk3fG6CuJButaWQ07TwBoilFfkWD8WcYOosfommrSe2EA82BgIimqho51UAUKhiX2Wh6q81z0vC4GwXpa9BwfMLmb4U6P6Vrdr4O6Mf15NXSuCvWqxX56ZdmoyIAOZJu08I7fwTrQ/VSCdhJpv00CY2amMOtQnZnZwRgkxbFyuhHmso0/JLE8LrYCbFCssqy2otQYuq1AZFCqtw7Mpgp835F21WxmmIpP3ADl6nvAfgyiUq7gBldnC/J3sSOtGAanVan5cHIWVWpWF26TtZdiPPeJK0kLL13jlkVtWwI3+dB4yavI0dKpc4U7DtF0o6EifkO0uvMgWLIeeSbeUItw8lQbgSk4yKT2ETaAzUDFTK6B0kccdI3PiREJzZqJMhD9O9aDQ+oO43gVOqggWgcNyOf1qIWHzr31qIXnc1sO1UB39sd3/VH1ou/SrXd7+2U6uY+GkEuENEIEGu2aJnngstQVrMHtsHOtaMX41zlFdCApfLU8Zu8Skb6PXoQjKqkPplybpGRoHA0RhS0RrRiNysAdp7DHNyRcq0kz7b6tIxhC+PdrCTj1xXVqiXSPXB9P3yvo8+3i2prpxvTiT2k1Bp1OujICojpl6VrHfcUEh2D7XxUrKt61e1TboFIpSHamfHP/TxSnwRYO4KRpPZs+yfONJpSey9mOTKVOMSJ31OImlu7z4LVS7i6rJlkIIwgU8TiK9WHtxsW1LGCfMYHmaFUaLVkUb6tX89U7n6e0cbHXWPnh0vJov+HGZtGK6/AfoqUVd74869kJT9rYm9n39D1k7tTJO/a00cvOX+tF0RmmEV50OX0rw1rtbMquRoK23EGA1JEgqVevueOksNHCiduAvUwYCzT0Zxs7OqssDy5lW8HnG2LQGqwCxr+DBuOvoR9ju4+hYUEtbv2ZE9/xfw==</diagram><diagram id="UgBW0Szbt7J0PFmpeTKH" name="Explicit Iterated Learning">7Vxbc9o4FP41zLQPYSz5An5MoM3ubLqb2exO231TsAKe9YUa0cA+9LdXwhLYkoxtwOCwzkMGHZtj+3zfuehIpmeOwtV9guazT7GHgx40vFXPHPcgHA4g/c8E61Rgu3YqmCa+l4rATvDk/4e50ODSpe/hRe5EEscB8ed54SSOIjwhORlKkvg1f9pLHOSvOkdTrAieJihQpZ99j8z4Y8HBTv4L9qczcWXguOmREImT+ZMsZsiLXzMi80PPHCVxTNJP4WqEA2Y7YRfj0+db45/k0R2Eqy+//hZ++/rHw02q7GOdr2wfIcEROa1qh+v+joIlNxh/WLIWFpwm8XJe8RaEOpwQvNLhi56F2p0JKfVwHGKSrOl5gnfc6Jx1gnSvOwid4aDPpbMMgNDl30ScONOt7p1x6Adunxq2goqpfsevPegE9Mp3zwn9NGWfbqfMRNAAiiGpHSMPs0sAet7rzCf4aY4m7Ogr9T0qm5Ew4Idf/CAYxUGc0HEUR5iJ4oh8RKEfMKP85YfUraCxuQfjzzhEET+F+yAY0nFF2Jwi3ArxATAPEDAMBSGBYRYeqyl0zHroqDS/JnRg29AZKuj8P9zEhHa7gHALgbhuj7CslgEh6qN9mRdH3i0rgehoEqDFwp8U27wHzZfN31kMj71c1aWaPWNWW2NWIUtwgIj/PV+r6WzNr/AY+xuyClQNCVUgwbWIl8kE829liyNZkUwPYIjiQqgiKJlioqiiAKF15rQ5O2Gx55YLIsKOS6nGHbO2Vj2CbBXKvI5sZWSTOWIeSjaZtebZyGadg2xqodyRrTbZigqHumSzQb4UdCQ9p2KaY+su0yzRnFYRjfIrWX+hI+bNfPiVDcVgvMqN1nxUk6ApYHtOhK0i8pa4BfyrymNgy1OaSjw+GdcGHdeKZ91t4VpJrKvBtb6Z/RtI1HP7Q83hE4dUqPecZkOqOlHfR3M+0esSd4Ych5LOsaQAVzFTnwx6tTVwMeg9tJhtGw9vjAccTtfNoQkPrd9ct+Lc4FQ8sBUajBFBPfuO3ICePVY4QfCK5ImwIEn8L5abQQXsQIE/jVjGpOhievCO9XT8CQpu+YHQ9zx2LW0DateiMlrWcHLyOQNyOmTIN9CQT55Snm7xol67qfPu/d5tikmuCNbDA91bViSvXjXs7FBtDD1glER+NKXSxxla4FM4PBddq68DuUAUqTtDRtF6yS9WNubuag/mHoWYYRqgdYfuUWs4GnS36f086KqLnRcL5m8zfstdiYPjty1X7bKipgO41R4qXEFel+E8uGynis5ctwM1KIjCvSvbKwd7uV1+8bpdnY7d4wgnlORxROWkS+ClmMKiGH258kxducii2iFa5qVFvbIsouctydTGaYdonSlUOaDuOfEUAT2DJxG7Wzsg982WymOtc1Yg1Q4HzZl3HZQHRFld3jxrlLUrrFHU2a5eaJPa+9WlDhA0bO0SYdYLbEO3f31o9YGT+TOb8gu1sBS7pRdzFOVs6nxbspccNsy74eJbekYUJyEKdsfFJuuKijYcZnqgOV+pWh6FGvp8qaa8dqakknZLp/1dRnuq6ZT3/tAbjMYl979YPh9+gdpGgDot5KbEClXVg6FOPSjRzgzDjHBKXN9XuGYOjCtIAywwpWP+HLBi/NvG0+r7qEFffRVn+zKIriNz+sil2/Z1nZHL7iKXAZxmI5c2ihwWuY6zfA+OmHu/u38D1r+Aed6XmOU4p+rSRqNpAw6dfn4VzciXuXA4UEtkoGu1NpdXdFs8s8xNoa/knjpqHu86xbIf1VJOnQRSMx3seeJqofr4krHz5K0nG415MvXaQzz5rBWipZvbFi+KNrhZewNMTSxashQqVpY5ypa7ty9ReZerW0drw0umVmteIHnDPAFmLUgr78Ef1uJf00xpzesfb5gpcACk1HEirphAzkmXZYutLh7tdlU6KGTpPnpezK+l/KizDFGj4ti+96LFMrsyaKqcbW4NAirgPlFIgw24HZhl5SOQ3xNtF7jqdjq2YZZdKvJU35V/XebvuYcIVuUdMUqJAW05OTj6lbCLUaMrFo8vASyoydSOBuXav6mgqy32KW66BFALxi5LVF9lMmSatC1PqNu9xpp2GNG3rX4UZZJsY6olHb3NrqfqrckaSxh7TaTTXm0FoKwLWOp8ur5aiWM05wi27Ai21G1T9ycD11I94YDfsaLD3Y9bpnFx9wuh5oef</diagram><diagram id="C25N0nYrVrEtLzNr2URd" name="Neural_IL">7VxLd5s6EP41Oed2kRwQGPAycdI0vW2vW/e9k41qc8vDBRzbXfS3VxjJgCTMw2DTFC9y0IiM0MyneUlwoYyczb0Pl4vXnonsCyCZmwvl9gIAQwf4b0TYxoTBcBAT5r5lxiQ5IUysn4gQJUJdWSYKMjeGnmeH1jJLnHmui2ZhhgZ931tnb/vm2dlRl3COOMJkBm2e+skywwWZFtAT+gtkzRd0ZFkbxj0OpDeTmQQLaHrrFEm5u1BGvueF8ZWzGSE7kh2Vywdj/enOhV8fXv14oz0M/3vxP7y9jJk9r/Iv+yn4yA1rs/bfvgy//vtx7L+dTD/am8H1y7cvL4kUHqG9IvIicw23VIDINa8jPeCW67mYeLMIHRu3ZHz5zbLtkWd7/u5e5dvuF9E9N3wOHcuOUPPecjACgPQGrfHfd54DXXILgYts4LYJgwUyCd+SMyaSQWYGA2T+98hzUOhv8Q3rRPMDos1FSumU5iMbhtZjFjmQAHC+Z7cfYexZ+PGARNaKrqlX6jD1ixmQhaOow6tBlmXgrfwZIlzSquMZZ1jJyjDLKIT+HIUcI3yREkNC2iGjAgAVDiU7VWo2lt/N1MdX8+jqeo52Ty1zGPK9lWvulbteWCGaLOEs6l1jq5OPKYK4GnCqhqBH5IdocxBDtHcwyOqC2sMUxgwBxlQpH04ZfVVVjlpNOfwCf0LKAV1TDvWCJQ3szIZBYM1as7FY3P72M25J2BSR5peoSRu3m0xrS1rV9BVbo2J70hGzLSsMaAY1zbRsMIwkhlHLZlqWe7Dl2seugE1tDmxXSvqnZ/gCSb0yBN0FUMTYgNvUbcvohiB/OiBn7STIjjk2i3NQCeetRq3VwNoVEDJaA3VBqGsFjNq2eHxkejYk/Pn5CwsLRdXrwUJW1CvGLrARVtvA4KNiGhJbNCK+pZRgNaW0MFJJTMXjpjpSVCuhMVDDUWqYxVcQ+t53xAbMOaCDtjV3I8eMQYNw500U+FozaF+TDscyzWgsYZCehPFSt4JymeYcFAuAKCeFal2AaqW1oHzQW43mrIaiMnUOwOitrNVgGcmGelqboXGweIWg71ruHFPHCxigJlY8IT3Vxa5nFzsY8It97wzSaGRR09xq52uc99BBkUptuO2VW0G5iqYVKhdIJ1Wu0R1T/kdaby4V0JtKBVhGLVtvla+0TaCztPsFXmGBazShOmS9lVMucJWvadFKtsXp8wlVsjUla2rPXskGokp2mYyqz6cahoZOF2BX8ilVlGznO2Fh3RltrDBVK8atfakYXyeV4qhRr1AcO7ISpqYjnlnTNLaIYjDbvqV9s8JkViX3j6tWgzWde2QyUlP14Nefv3zwHgY/Pzz4n1/o/k8JTD8KdkCf+zjCX3v+9466/EzWn7IboKSpacu0AMb90+3LtPuXW3L/QtXylZqx703h1LKtAAsfd5EDS72aq6iZ2SuiOXuRmtnqfmNq5isvnEbnWLTLIwWyP2UGp5StdFBQsqozBg1ofBwmD418L9C4rESbvHEsFSEqIzTtx8qjHZfBDnvX+AbZWG6STjYiC5YYe/lcYnLExvV8B9o8o3E64tsxyw5Q8Jj8ALGvl0rN4p/U4PFAMf0VPMnwz/AFzjrxlTsNlvE/liJxbdEk/hYVHT2/C32UM0NRitLM+gkvC7R2HHu5gLs4vWod7AWPlCV30klXy+wibxG3yTwA75MOurhiJ302zwKOT+OqCaMjmZaezaeBVvec7hAUMGquBio8zZ1fojnO9txXr+scbcJ/jdN+Im3Cz+YnCg8HHP08z3Ke5m+3qM2kPaqsMNG8IklXgnieVsEz1bNhvlkpa3WFi7bEoc2z5D4Krf3vs0S+GACUlvyTUFIi/9RnPn1Y/QTD6no5ZKs544HxRFjvF1O/mMotpgaQOYpCi7x4URS9NiTO88y3KErsk/4zhKgHY7tKSf9pg6pqJ+hLJv2HhNGRpN9gtrq0eim/LGkH+bSc8fcnmRuExGDIbl/XPsvMs2rxNLMQGV3dUlM05iyC4GCT8Axpayeb+DXU8RfZD+q7kqs56QkyHpEdfyf9SDnTXsDXTU4q92rf+Gj7reBqYu6IZzCyOwRyTa+wh8KeT8kNgqoHsWQp8y2Ydl7DrXb0vsdVCUCwRzTLAstg2bSMKzqQ2izM3t/cOBPnzeRRs1Yz6539/u7+sbVNLcqlqdPLv9gC2biZ58wrRXHPn9SmMoeqkzJT3TdfR0K+6YrLnlpjpzDN9FkzImu+RiEqBpzp8J5Gw9L9vozgHQ36wagjAwzcTL5vFi/o5CNxyt1v</diagram><diagram id="CXXC2HvG7N-pPdoHeWF6" name="Page-6">7Vtdk6o2GP4tvfByHZLw5aXrWU+n7bZnzp6ZM73qRIhKC4QDcXX76xuEoIFYQQV0V2fWxTcQyPM+70fehAGaBJvPMY6Wz9Ql/gBq7maAPg0gtC3Iv1PBWyYwRkYmWMSem4nATvDi/UtyoZZLV55LEulERqnPvEgWOjQMicMkGY5jupZPm1NfvmuEF6QieHGwX5V+91y2zIcFrZ38Z+ItluLOwBxlLQEWJ+cjSZbYpes9EXoaoElMKcuOgs2E+Cl2Apc369dk+vb1+fsPHc4/jX95/evxz4ess2mTS4ohxCRkl+06V+4r9lc5XvlY2ZsA0MXJkqR9aAP0SFfM90IyKZSVCpcs8Pkh4IfY9xYhP3b4k5KYC3w8I/4XmnjMo1LDK4mZx9X0W+mEGWWMBnsnjPMuGY24lCshSh8s2CxSug7XZObTBU2GOHRjmo71ce75/oT6NN4+PxrrkzEapZeymP5DREtIQ1IT1xz/9IHIZo9VOc6fCQ0Ii9/4KXlrQf7cZgAys9/rHQNNa2hqe5/8Jss9Ltp5Lzg3gUVxo52a+UGu6QZaR8e1vojpKjoTn8Ko8Ux0q/0vbsK1FLBZQ6MKnFZFymoLKf04Uhyo0C0MZL30GHmJsJO2rjlFZfsofAioEJJTNWNClcLT7YfLayoEHVDIQeBl3JECdaNL1I3jqPNeeAQhxxHHSZR5qrm3SbVUH/VC3hbqAAxLyAMV4aHCNRQR8tLQmwroTZ+l6FA++n0dmD9WVDQ8JNvAP+YnACPa7Br50SL9/w3HC5JeL7qbxaKJzv5ONcQfLOQiHKQKDGdJlF1dOpktSYoOjXkaUGkUEj7y7GmFuEQerh9WsstqbJD5kIsq8a0cpQLPdf1DtJRdheQL0ued4sDzUyZ88wKeMkHtd7Lm319pgMP6NGwcrWw5WHH7r1BwpLB+HbZEQaslCj4FhHNwS4sSybQJDVYhV+M2B7mT6AQSISR7Moj0WiyCbcUQu0YMCd1xOsNIVeHjJPEcWZ/NsCKuNBGpInUkmgpZTHzOw1d5+qKCJ7/DF+ptrUKoySzlnpYpd5HQVeyQ/Kr9+UKpI6gd6YhtXXqlo622imGfrsDRB1VgYSbnKhChfhUoJkEXd+TcgeIZ/xsOh3dnfZKz1kvMMA1F3ml36a4BUJDlI5i7fsxK65q7bvVs7vdaUhu2qpfDAbJVRZEeq0lAVU66hJ/n2uK+9D7LuwiPwGgk8Qgqi2udpuhAVV27SIJAZ3eSnORsrOsjSY1iYC/FahOYQ3nOO1J5ZrCr6e3jBQzUFmCqEt45gBmXQkxTVvNV6LRWWQaq4lIJnCss6Bc6vdWKPqhTjrm+kn5z3K+xpg9UpRQpzs4zqHeRdEIDPruC2gsO0+jz/FINsuBGQ2yr6oewFBMMlenpXQZQqCrDXEU8MKwSWOoA0aGbgjWqEFcYHgoV32p4EDB3FB5MxyazeRV3FxN77rSJ+zWGB3jp7SDnO4gcGzEb2XcHqEta3ub2D3jr+z9gtxtAZrahGwrc57ZDnCbuoPkOkNHQKmGvioHduoOjW0BOyRbR+88Wm6tfXCGqLb2nijVmx/2kiqaMlF3dotKtg6oxnb3GwGAd0Efjxa/2kK2x7N/Y4e8bc/1aga4ZmtEKuKK1tGXGVLn+Lu0fqaaKZ7t+/QO4/sa6fyhtEX8AfTt/9FFX4KuFfGBZcid11+AN8fLIoY5aXoNHqnn02QYM378BN14pM81RmTQG7NmAj63Dy8vnSYTDQZ0lVqTcz73dh71HjKw7+Ra1V3Et1S1Awg4S7xJD4A1/hOSnI2O4Mc73voAMJKMYqXIaoJldmsX5uwygmj9oeqMk0foliSGi4s5z9p781Kk4vcfkxxA1TqELvQRx7dSn3FFZV22nPq2/uYRDl3+7Hjdd7DAaJ/etaRfKpUpvGvAooooauqL0f8JLSPzn7p3tjH27F9/R038=</diagram><diagram id="H6j-oBUY_HWSmpuFgrVF" name="Page-7">7V1bd9o4EP41eYyPJVm+PKak2T40uz3Nnr08OuCCtwZTY0rSX78ylgBLAstgy04QDymWjQTzfTOjGUnTGzSav/yWhcvZYzqJkhtoT15u0P0NhL4Hyd+i4bVswAEuG6ZZPCmbwL7hKf4V0Uabtq7jSbSqPJinaZLHy2rjOF0sonFeaQuzLN1UH/uWJtVRl+E0EhqexmEitv4dT/IZ/VnQ27d/iuLpjI0M3KC8Mw/Zw/SXrGbhJN0cNKGPN2iUpWlevpu/jKKkkB2TC4ym2I5+POTox/Ov5d+bx1/3i9uys4cmH9n9hCxa5O12jWnfP8NkTQVGf2z+yiQ4zdL18gZ9+JYucoovcMi14rdiI0RZHr3IIA+f2Uh7qRI2Ruk8yrNX8hz9FIao/BxlIvAoEzd7XKFNn5kdYLrjYki5NN31vZcXeUNF1kB8jq8qPjVZ4cuF9coEoSYbEHQlGwTrZUNUalm8JYIJkyRK0mkWzgm1llEWk28RZfy9L/sbHzazOI+eluG46GFDjBdpm+XzpOAnpetDOI+TQiJ/kk+tyNi/Rxvy92s6DxfkkVXJZttCuPhAnCSjNEmLQRfpIlKn+I4GPGwiPHV4uMhClZfbFXXddql7VAaN9dyFFnB9ptxM2aHlHrxwgAR++77lA4glHHedyoc9pyvKK1jTaDG5K9wauUrJc805m4dZzvWwbXuIE9YNGePgShVASohoUnGoJymMbVHarC2LkjCPf1bdsEzkdIQvaUy+354FyHKQh232AlU6YGjZhy9OTVbpOhtHtMtDp8iNUtDK95yAvnzsVobxLEhGdl3kBzbCrg+jW8CRh4h+GuXCSFv+7IR2gZrahlJtUaoEG3dCpCN01cUSY3haY0nFjTCWoKDilp2GJGGTMlztxfb0skRhRmZYosYS5Fu2y7OkZp6haktkfQ/AEyHDnovZs5vNulWmoFa9zuueR4cvzdbGMXxpy9pg2xLioXYYVLik06GWhwK9vMGGN63ZmQvnLZQknmsJM6KeSeLVk+T6kkv07q130lS49PZwUlGBAOYfz/8ViwHQLiHgoSWCyKuArPIs/R5xEpYIPUziKUHnfkyEvgW6EGs8DpM7emMeTybFMFIGZOl6MYmK32KrkeDMnPnRPPBRzD0x3+sHIqawq2wvFvMUj9FqVSzOXCuIjTHE0LaqVhb5br+wugoh4zuxs43hgiw3R8FysLgeBVyJc3c6Q0shRDNoUWWTrB5qRustB0hgGPPcI+C6AJ830xV0OuA66nhe62oIfqp4qzDkbXIiqK7l7BS+MSe4jnZ2XhcnFFZq9XMieonzf7aW3MX08t/ilkVCifLy/oVOsbYXrwcXBy5l29aMXyVuCm5wIEREQWAFh692aHm6W+0kVQjI+yRpEPgHLLUtGxiWVunkOCfodLZDPd2tdvcq7tf6ULyD9of0ReCrCVC5OTSy6yMe2RS6u/BUzBydsjrjJFyt4nFjwzMJV7MtHm94MsQhh87UZ+BzYRRvGDrWYE/MM1ENHsXZODFZplolPhriHOaYdCqxJ25e+UoEZxBVNsuDQ1TMGpaIGi+rAKczNC/rNdv5IfWyFReqIOzr9rIOrumoay8rpiaftnljo7w1yrtLIfPzowMGOlqVV8wolkgYKOvsMK+EsG8omyUCrznacapKyCfD1KOdakeubjvcLKt2zZ63JcQdXNNR14jLThS6SU6xq0Dv/lin7MZtubB6Rx4AzvJlf5O8m9J/t73E53cxZ32QXxbz/ZK28gtWm1frZ75J+XdA2ZeANQOS5sqYxss1m7DsTFxvXk6W0zMasP8SwGhAp/M87PSsAb6Y42zO3S0fOOaEoZy/hiE1KVMurpftHYJaGSI779cGQ54NQ1rJ2fXPEDEF2xJDng1DzkrSc17GlSSG9DJEltVtxcsYhrSyMNc/Q8Qs8JWfy2iOKUsfsByiBNNAK6ZiOtic07gQVFnIrBVUYMsiBg7GJsV82qvZZSPuSIsjERYEHnuqIi9oW+Dw4BrqSH6BmHS7dkO3Y5SyVviOJdl2oPOwkq9Qje29HH9pjg9wOE1kOAzmHGhwPYd6m+DHjvECLLWGw4FPTNxe/dTiDDUNBDWVe8G+rGygMNcY7rE1BshA1i19wKG9W45punDpB8QmuZCVsmP5H3aQuFqugZ9JdbyoGbzl8mRDowywBc7gMzkDHGJailJTrEIQPkUarLd+UCDLYBrSnEcatzXOeEUxkYOSmdxpNl8vR95yTbKhcQSg9gwLCTY8BBQNi25v9KbP3Q+MNIHAGb5eQkvOyLN75cxbLko2NM4A0Ruhc0ljQ8s+LIHIJRurBVeR5pKZgYaT+1fDGtyepcHQ8oIBkOZxNBq9zl8/TezH6ffZ9K+PD3cPt2J4/YnI8vb02myRSLmlOZFicTaJvpWLrvY2/XG7KvMfxa1FSpMgN8X/plJZ0oXwyJJu041rxzsaPdz4ChvYhM11z9lF434OF9P1NhP1DjJQhYp0trjFDtiy7JPkLCEAMi3HxxVaNd8k1YdmuQMKTjMbeplsB2IfPX6l+cxK9p5/ZMlak/0Tw/7P6eYc8wft0xaQ3ZfZkHYsYC/mz9i+tmyfbCNwZ7ZP+gsUUmDvZUGsKVbQ4ZdJ+q7nZ4ovKoOFJEcp9YL1puP5Qc07BGxdzhqq117kNNrpbOIhLybUPSWupPQi4v8jCngmJfiOdkZe12lMQ4mWKOHYp+rAnU2Q093qpovC/jNDFzUL4pwqbnm2izndrW6HA8QZo6lG2KQaYX0EwIiiZxt2s4XFa67PwWXy/TP1GXC5Ksgbhq41WAwjTDXCy8J4Sc5Fa+06IIYBphphs2qEQ0NUnMWbaoQNTjYreFmtcDabZV9zTaSWvKyDazrq2suKBwtMNULF4j5H5kd9lTZhHZtqhI3tMK+EkgoTeqFstjB/zdEOl73gs1Lq0Q7XkWY7zAA2nlcX4g6u6ahrxI/XCDG12Ip/Yf1eDlOL7YIJy87E9eblZDk9owGmGqGueZ5sA6ZeDZBtlTDVCPusRli/mUZrnSgo2zlhqhEOJ2fXP0NkGylMNcI+qxEe2bTVG0OO1z031QiHsDDXP0PELPC1F+lqjClX6x1IMNVauA6J6eCrLxl0KaiykLklUMlllhbWdZ9jIsKZPaaTqHjifw==</diagram><diagram id="_ijJwgbRs1lSYG1Qt-n2" name="Page-8">7Vtdc9o4FP01PMJYkj8fG5J0p9N2u5vs7HZfdoQRRo1tUVsE0l9fyZbBlsQG2hgaBjKTsa9kG51zdH11rxigcbZ+W+DF/AObknQAnel6gK4HEIYBFP+l4ak2eJFXG5KCTmsT2Bru6DeijI6yLumUlJ2OnLGU00XXGLM8JzHv2HBRsFW324yl3acucEIMw12MU9P6N53yuRoWDLb23whN5s2TgR/VLRluOquRlHM8ZauWCd0M0LhgjNdH2XpMUoldgwt/93lFf//C4UM8//Bu+PSP89/XYX2z20Mu2QyhIDn/4VtfB+/x2/tZvvoSovzbFU2LO2+I6ls/4nSp8FJj5U8NgAVb5lMib+IM0NVqTjm5W+BYtq6EYoRtzrNUnAFxWPKCPZAxS1lRXY2c6rNpaSgQ4F3NaJq2et6CaBxF0s5yfoszmkrF3dNMqAc6H8lK/P+TZThXXZTUQCDOcRGr01CcMfH1KJdXI/nkKS7n1feXX3BPKBXkj6TgZN0SkoL2LWEZ4cWT6NLMEyWSp0b9apastppDjZDmLb3BSBmx0nmyufWWS3Gg6DxANb8QtVNMwln8WqlFUTTyNHZ9k110VHb959kVdxFOljzP7A+wchTgXdcAPggN4KFrAR64fk/AB88DL14UC3mIYy5nQA/w94Z4qCMObVK3AO72JfTwnPH2PFdXeHRivKOzxtuBXXW70WnRbqLk84Tbd3R5n9ydAHDOgHsB0gH3NxScDHL4K0C+KyQ1wlo/DslkZoS1bp/zxAh0IAInJs1cPmBs0taBCGgLhma93InUnbqlYjtbJzLnMKKsHNExW4jBX02WnLP8nqyrEYq+MuLPkysmzJkw+V1uoza1cNBdmGwaUN0wqz5yUZHSRIjkOhZEkmI/QfU3Z4FGPbLMWFuQ22jk5cl3DfInkwv5RwhIbAuc41LvmdRf5n0v1JtpheDU896WVfBTroASx4k8vic4ngv4VNOkaBpmhaBKZrn0Bj4npnFRkEfKlqX1ioTkpMCcstxsayxijO2vZaiUV2LancPKWU60yECZDJ1IWmmM0zeqIaPTabortdJNq/1IrCLOWxFJ6Mi/HqWo+aAIGTIMkClDAKK+dGhLsigV6CKoFeDEG7j8r0tWdxAOIZDzvmWqr71ZL4qypI8yk7hLSxtz64mOkGxZLgsiByBppNmClYJpWdMYyJLBg2xKcZ4sZVlCQBPHSyFjcj7a7EuDrudo7hC5ZgwMG1snCO7NH9qSIZoOxyzLlrnAv/ZVzqc5LoldPhcJPCOBQJcAgGa6AISh5ZXY1zoImqFwQ66IUPLt5Y2PmbK4HFLJS47Toax+DjFwQw+EYCi90XDm+2Q4AbE7JD5GDsYE+zg2nVT3Mfs7vdkMic/u+7WVW/kzOqFp5Qkdu2ytDvJgTxizPKadmWEbmGG+zJn/nzPASLEh1zXdpmdzm73NGXMNYa4g1FKgFDF+St7IQv8O6H8u7VNtIWgWKs7IhaFYb2//e00XdY0zQsALwsgNkB+6VRnXkiIKbt74N2bls88UUaDnhyxLBUuM1nT7GY6tGwdsOT3Nv7wnuMgFvZeX4k9NcM/XXokm8729Eq3U296Il3RumzKoT1bHoKyvZK6VsQP88eureQBfe/855vL1qGjvsTXj9aINgabt8HiFCivaZ70fA3rdbWXQ4vyPivZZ78bQtQ2C4LRon/VeDOBpaHsnRnufvRgtKJvw8j2ekPQTK2mVAELXE1WxMAPSVPa8wvFDUkWbrSBlU6DQY1bOFkboAvcJiuvs5+3t7eDAgkwwomKpXo5WdEblOp9VuYybRzGIUo384Fh5u2vTc6rrOVZgRX2mtYHnanGYmU3yoUVhsC+F2TafmPWVTwUZ8gJTmV2qKieW+ojoQnMhOSGyb1XaBScVQ5fSSL9vYBB6luo8sJbpXqA88vj5/t/50/VfAF8nH4s/VuXd/ZfhAXvGXmV6ZUPq8ZZsKIDGDhxzGeH7TaeXzrFYed6dem7P7W0Boio4zeSsrCpVjQ+5eIQX/jmAFpOHFn8AbTrpyx9c0q0vn27VNwpbQgfXlnTryxfstzPjjqTyN3Tm/oudOzZYsQkycukBoI8zKYN8Ui4GraLR5orLxowj+prA1aKPyPQ1L7U1Q5xuf8tYtbV+EIpuvgM=</diagram><diagram id="Maq3GY3KQ8h6EYg_sY9W" name="Page-9">7V3bkqM4Ev2aith9KAJdENJjXdrTM9s90Ts1l5h92aCNbDNrgwfjuvTXrwTItiRsAzbY7nJVRBUWIInMk0epTAnfoIfZ6w9pMJ98TkI+vYFu+HqDHm8gBJ6HxT9Z8laUYEaKgnEaheVF64Kn6BsvC92ydBmFfKFdmCXJNIvmeuEwiWM+zLSyIE2TF/2yUTLVW50HY24VPA2DqV36RxRmk6KUQn9d/pFH44lqGRBWnJkF6uLySRaTIExeNorQhxv0kCZJVhzNXh/4VApPySXOfmfgeYT/fJjcpX/xf2V/fvzttqhs0OSW1SOkPM5aV/3t33+83v38w8v9p0c+mf3n7x8f//vTLUBF3c/BdFkKrHzY7E1JcJwmy3l5GU8z/lqlt+Crutyt2VuwEqHAHk9mPEvfxH1l7RB5RT0l7IBS2MtaiR4r+zDZUKC40ylvDUrojFe1r8UjDkoJNVAEoY2ktSmC3dKvK9at4qJMkxazhQUYtIUFIOlMWJ63X1g8Du+kjYtPcRKLwvtJNhOtPAJxOEribBDMoql8qI98+syzaBiUJ0qiAbT8/JBMkzSvFLn5jyhfZGnyP66d8QeDweqM4gMh1/swWEx4WLaczINhlMlmsaynJqAVOnioMZKts00Iu7ZSVFnKp0EWPes8VqWmsoUvSST6t7YgDRHIUvQiWaZDXt60yRtGPSvglDVRpNeTBemYZ1Y9Qq/B28Zlc3nBYnt3sd7KGpnb+kX0G7x91xuPAdzdTw22yG/rDeYDgL13mE+ADJsrRLi2wBWEDjBKYhnlq2WVgosy3RR1SypNdRRNp0ZRMI3Gsfg4FJbCRfm9ZDZhtNO78sQsCkPZzP3LJMr4k7Az2eaLcD1EmWDOOMyN0N1n/lXmbtJCQ7O1SNg2XOUCYYtdYYUdq7LjM6t/VWJ7JZZnkW58jCp+PJlWbefiqtWmWqXwxEpkVyUerERg2iY5tW0S96rWw9XqG/7OqZUKLKVaOt05QWkyv2go1TOZQ9xS3ad1Dd+/7RzCqqibSQRpNCPAzRx8vfJu3HU1Q6gJ0eE0WCyiYTuUorwkSDNVWyJaVWWDaKrqu0wkm7om7YBs4pjVgvHR4FAjWrcJhzRZLPaAYTDQgiUKDKAmGERr5SdZR/n4PI3Ek/JUjlNRPL5o3JhG3xI3RjVqBtkXbnAXuKEuPRJuLhMa1oDQEhtmPbje2Hg0cDSL0x40xtSFx4UCguqKVFH4xoAw6qFuv4CwY4RXQLQDhKvrkbXEg16N39ng8XH+E/vwhf4+/nL3PIkG5Nvt892t7YImFhyuU996YanV8H/ygIYdp7KUeppMJkOWZCpTl4x0JBrYzMn+DvKW7KxI04g5mOnGM0tbQsOy0c5OefrVcPfVDTOWsFHlp8g+Qnse8u7DqIqILyb7CO35wlWJTR0CI8MB6Il1el0WcLhOT517hNdlAYcr0cw9Qtsd7Vep11UBR1DqmWUesZ1OtnTae+bxrGcB18zj1sq7cdVxs+T4WWUezwvJ30XmUSHwEmPC5wWHbhKKqGc4NExEX+HwnScRsR3ZuQKiHSDMJGJbQBj1+D0zxAVnlc8MEN0kEUFXcKhMItrjxbtPIjaPLwGd4U+eRKTNNo/2mEREAGuyAtXC8v0Kq2UdScu/XEZUij41I6rwjbEAYi8FvlU21PEI6FctoyFTSXHCIMg4y+VfFEgOEZeOCl4SpX8v5a7y+1+jGV+IEz/zF/H3l2QWxOuTqpLf5mGQcXE+jFJhI1ESy1sCWeFizofRSKBIKDlZROW5ok3xUEWzelcWc9nGBihVc/Li21HJjHc30gJJMJvntyKExf+s7G2c9zYte6tfY3d/dUn8dTHf6IjoX9EXvX+ieEN8Rx9CpnyUHTaANLSp+vyvQjLaygjSI9/7Nba/X8Ay0nNhM7WCQB/WG5NbdTU9c13VcqIuuO5e1l++GET8n/EgJ7uQD1MeLORic+gmI/Hn89OHHcRSRXxXYjFCi5ueEuyTZ2hVeqQLNP0iZCt8y5yn1mCK4iuYjgYmVAEm1CuYakSmzmOSQite2oKrRAW7Gt/t+dx7zzWv0LMf/6eahftVAO+CLX9NgyguWHEhuHAq73ifpFgfFMrU9dczQdvQ3YpQRHecWCMUcSpOdB2AIfX88q8mN2ILDhLqrK/GkPVpeX1uID/CQm+l9lNPfFZLMRzMNn7MlFXdmQ/SI9v7qu16KgR7o+N8KjRaxnnMRxxmkzzoM+YxT4uIUHalbIPx6vuxUOMVevJJUY1deWfC2b5mj2pB5abkxB2acFGPgiT2MtnuWLsuPMlZMTNr+65Ag4qterrm3r4CB594kMZ58GCDfvNQQcyXaSA7Gox5Tqfvgl0VN9VnV1RJAJseMenII67MT28HTomTLfmQRT6llNkQgOevNlA+ir7fqqqiynqknm9LlcmKcq3BQvpurqLbRaEjeTZOckVVZVGO0N2bh8GNTDPvQ21kln1NV6YRxOOlfCfztkrOP55QRQFaBAFWU3uzcda2DTU7rHrdmV/F9kd4dW+lNVTNa44Br0/Jy9UYrsbQxBgAdk9tDRcTQAbAllRnEeRKUdlxpXe0zGsneM4iflzZw76WqXxJeRgJX/l9RSMOBMXJ48fVFNZqCq1FMHfOp/Nfaz69XrBmTJPbf9lETys8oD4tRh5tO8E2qwLQ626OXa37Zt+B0d83hqyWnK/iwFWeA1DJG81QzJeDHM1Samz0XEyCuTwMgyx4ypI0/z6bSq7ZZTNkSPnXkcFv+QKrQRGbGqdBGHFtbPM/3JEP+QtninHNdTBiDBMMEPIQIj7KCS4L8gAHevTxRj2ParGhOMGDhSTOdVbCW5NhBQD2u0h0r7n2rMeqoL6hx+EyfV7RW8MA4nbC01e4aRRagwvbSrfT/S1musYYpepSoecQBpmAKvUoJL4edAae67gYI4SZ7yHCcD2SbLrrl0CHSQ5mFEMPMKIP0xQ7HgGeT5CHic+ogcLDdu1WqrbVq6t2j8sNR98zRJwPHIA9skKLriSCHA8zSIBLkY8Rbbs913eM4QfDnkfmo23B20xg1FmFuz0hsvaOSy5TMQNVS/kZaaMHkVT313I2fyp7XgL1QuHnOZQyQQK+zzzxSScqChxMRSkl0CWUkZZ+4e5GPOYQzwM+FCO7zxCotzh4OxkeG7lH2xnTP3KxjdzLhKkHHVdgBFAAXOFOKhWsQkvY8TcH1Javtt3dChVcnGd+MPUJxX69lRtNB+02feh01D7a+3av8G/vlgLBoC4CmBGXQMaM7RRieGfMc31EGSDQUwvRGm/C3d0KIQ4FkLo+pcD3ET6Up4/YiU4NoFk46awMoMJz2fotCJdrHUx4KcBHkh0xEb60MdmCWDAqA8K7dl3pS7fcwrS7FQadgpEhcwl2jTaOZBstutCpZTTccbffMururDvcMqBmGfBywS9cHxVjAMRDaoPeap4HnBIPiGGX0pYe/J5WIHJcwNYumOF/HWtoaNOJTg2gxnvKrwbQtQF41AGuK71i5nnWV007hUtUBrrafufFrjZ85pSz43IS2w34m3ehU+iDGsmDZtjHB2P/MgEs5nRaqNiY27rIgb7LkCccYunGtGTw3a1g19kYJIg6e2QM7+6DL2a+om2ipjJ9gNgm8B/jkTiOh3KDQrlvQR7GUu9fk3SSJKFExoTLc8MkDpdq1YDKPd3IHd+LYXH0jyA/+qdlG+ex0MRIitVYW1UKbHferE2CVA9QI5V31d4ncZyVJ+JjmsjVH2soCdlMPichl1f8Hw==</diagram><diagram id="G4Y9Chh_EGzHpr8-Ydcq" name="Page-10">7V1dc6M4Fv01eQylb8RjJ5l01+70zOz0VnXvvkwRmzj0YuPFpOPeX7/CgA2SYgPmQ8b4IbEFFuaeo6urI4l7g++X24+Ru375HM694AaB+fYGP9wgxG0k/iYFP9MC6tC0YBH587QIHgq++P/zskKQlb76c29TOjEOwyD21+XCWbhaebO4VOZGUfhWPu05DMpXXbsLTyn4MnMDtfSrP49fsttC9qH8k+cvXvIrQ+akR5ZufnJ2J5sXdx6+FYrwLzf4PgrDOH233N57QWK73C5/ff/094dv5PHf375+/NtXxNx//ObdppU91vnK/hYibxW3WzXN0P3hBq+ZwT4JY9zeIBaIS935yX3HPzNjsv++Jjd7F3vb+NYN/MXqBn8QZwTecyz+iSPi79uLH3u3m7U789Kjq/BNMGt//DlcxbebHU+Sowitt4eaxbtF9n93/eRk7U+oVsv94w2HeV3CNGl15UuIYl8ue4qaX/RXd7V4TUgpTLxci4LV02a9O45KlaLEiqL8JV4GogCKt5s4Cv/j3YdBGImSVbjykov6QSAVZaZ/mAk+eKL87ocXxb7g/IfswNKfz5PL3O3A+JJi8bCDAd9F4etq7iWsANk9PbpLP0ja9z/9pWirCPzmvYm/f4ZLd5WdkjXshPV3FemY0Tb5bd620Bgzen70wqUXRz/FKdlRBK3Mt2S+BhKQl7wd2i7M2/RLod3S7Dw3cxeLfe2HJiHeZK2iTgvBSgtRkPRW8w+JqzogVAC1cwN785IHVM1bsB0FGtNlZZEXuLH/o+w3dfbMrvBH6O8aZwYW4tJXNuFrNPOys4qOKf9iBjtDtoQ74qBcVexGCy9WqtoBur/RMzAmCsa/hm+TE2zDCU5O74TTQ++Rv+jyoK7dduXyOD/t8hbCoOszbbIP+tynvFpw1FaQyJ4C5nFcwViI7fuRorlg7snb7yLoaXuJ4HGdvBXGcoPAC8JF5C4FrdZeJOi3I3T52B+HA1pC1+th0vb6ACxM9a2rIpR7alTmNwElwDBWyc005CagK7DsWv15KM6rb23RX8VSDbuyRz/IqxHXKHyqaf6hu3w9thBLLaxqBCDVY0vVdNz7M6gw4i55h8BduB1FBwZJly0cgpNNnGn8MeqqhTM1Ys/wvPejWTCOmKRbSN9pkAVEucardIeoGp//Kew2AVq9jWLHLETVkClFdHK5lQYNFVxun3ByVUacgqqGQZXDStA6DWMqqRo5NOs4puL1VLOJEEcIAR1HGvU25YRaU9+0UDvyiRZNaUGILIY0HX+pVfU8BKPsNC8mBaXc+AdTUGydbz+mlEuKMSTrbSoVS6Kx6+rl6CkcPMEQ5yRDUJ/RoK3z8m0Q5GkiSDvjhcEZolPMW2HI08SQZgIBfUfLHYwiupCglV5mokgrccjgDGH1RIdZ4G42/qx24Dd3Ny87PC53qCBpAbmdag8T5KlqLs1Adz1RU2GUcArwEpoV0B8H4A3HhZrFCT1PzakrEVJnO3nr0+tKlNV0qr8mvYrE6sz7l92YfgLzXTBzR8s06yB7BQ+qY7rfn74nC7YRSM14bSjWXulFbKyub1VQ5U6fqKrjsM/eZrNbuDfBWhnWchdJnWFBzS9m3PI9ApncAPIOvjSGwKqxnCMh0FnGqjBPVcdWe9s3N9ZpU1ig+OrKMhWmakYiyeN3MKuPDRa/pfRiHYFTYcVpHdq+Z4HaTZwhS7Rpu9zKmQVY4YWR6h+5xSHSLBKzxYHCV+2OzFlvYGnWtGRmNUNGngxbBNtU9k75tCKyeNF3kXL1NYQIQTLiZC9OpbUOFiFc/ASbkGTESrF9iF97Grhe8iJjswiVYk27oNE7XO1rLcREkZYoUuo98j6HWfaRjvgkRfb7SyxYqsbulSMVIvmJI5U4grkIQiSOHA8uqnoRXc0G9ED5jrGJO2fvbGXAYufz5HhkBLmFil0a7ndqBap7YCa2NPM0FFjy+KcV/iBq4aMDK7vnAAaiiTNteZgzo5WMIjazlDhoWIpUkNeuTkTKjuZjRL2TyL2EMYoTzEdZpzWnsvbOztfe628Ul+c+EeCWU3wp1oW6tSsClJLUKsdwrVnXqedL9U8ZaWLxHNWhHeF+L6kE23H7VxeM6tXb9bCujWfKjABteYl6w/3ip1pp12iqjjHdbywhepEzp03Y1XhJIZJwxGonqFv0ALvq8xx1mmW3S3UC9hSw3HBg1Qmf0Ww87hbY27JskSwQ1LnewYBVFdMxPSOgY2xRCVuC9d3qUNjunzPSeEsAq/CUtVkGdvFEnNz/87NmL8E7j1Ub8T6lbilIq8XpQ205gECnkxpJwRHvcumVgskjSM2ioE4q6IWCz8+JBTSb7iYv2C4FHSnIMo6C5+4A742Ckxdsh4LmecEKS/aMUaPlp2tgQo1WoyGot36tRYEyg9UQgfI65GgI6q1FGy3c49CjIVBFkCsVpPNOYiyCNNSs6rpORbo2soYr0lCzAus6JenayBouSUPNOqmr1aTrg2u4Jq1d4TSoIDipMS1T0HRNWrM1f9KkR01B49QYzXMEBhYEp5m5lilouiYNz31e3KRJXxgFzfOCFVTTYfJmUanxMo2p+s6aBWEF1XEkmwMO1LjYvFkQXvJm1D0AhinN5z65fdjMWft5jCl1VsNWblrurH1u3Sl5Vmuue+hcSzB31FP6rObt1LD8WRCpuuP1JtBqAKhpGbQgMjI1jrf142+7CNhxSPb5X+LzLbAAzAsethnCuw8/Cx8K0fgDaIBnGgYdNZtZcd0oknft95CbykVM4cTFa8obBtElP6HPtMHnmDKH7RvdpCJdQu4wiHSi35Q8bMhQ2LDsYRDphMYpfZhJo6XBOYLP3Qo6JRBrXSIxLYMYxOdu1pxSiHUcjxjAEVUbVXAcKImYacOGkWQR23POwDRipkN+qXnEIFbV1etMJNYk+DMukxjEqkB5nanE6sCZu9vhc4nhCprNMBmK8p2QucyZj3uK8Um+bqmUlKcrW5EK8clY5C1cf8xBLFp6+ilU4QLYIsUHqxIVPUwth4nKKISAwO4yKl3P03DrYJm1NUosgG1OCecO5Q53dMs5zcHSyCmyuugYEmZymOR4wZRh4VyxIzlh27GIQxHjEHMO9gpy3SCUOxZl8EAd6SIWdBARfjx5VDB0HOkiHcen5KKntwxjk2j05aepozLS2OLQBpjYAGBIScNnrUMCLIL44Sr2MT4x1i+fLjn/lGl8YsI7EfGjHAwFawjPV+q0yyebWZwU9jOUL5IMwWwbcRswhggDtGf/dMnJqkzjE8TQ4sIvOJxTQG1id+OfRGzsOAVClbPUDtzfXfReAsP45IjuLvNMNuWszCbGrSxsYpAjp2nymRO9HeKWzRwbMsA5Rlywulc6Ud1s4ESnhu5J9HfcFnwhmNk8gbRMKGQl/Y84hDAmWBbvKxMKIAvZBUKVryKcF+aEUw4IgYwAOejvmlCXnA7LNEJRET+lwzpAqCP8hJShqCUPRbEF3w+gBiYUxGoE9fvTd2+W3EGq2Mj8Gr10fVNVIMrFWVvJ6sNVeTbPwN6Tkq0GMp+9zcZdeBOulXFl0tpijfLXEqriYxQmSzkObVsY5+VzOPeSM/4P</diagram><diagram id="3Y3OfxjpRFIA6XUDE-WF" name="Page-11">7VrLcpswFP0alu4gYQwsbTckmUk7mSTTdquAApoIyRHyq19fCYTNw6kfddxxaxYOOshH6J5zxUWO5YyzxbVAk/QLjzG1oB0vLOezBaHvQfWpgWUJuIFbAokgcQmBNfBIfmID2gadkhjnjY6ScyrJpAlGnDEcyQaGhODzZrcXTpujTlCCO8BjhGgX/U5imZppQW+N32CSpNXIYBCUVzJUdTYzyVMU83kNcq4sZyw4l+VZthhjqmNXxSWbvv3wbPn2fDu+vWbfwFOEfvZKsnCfr6ymIDCTx6U24s4QnZp43WEkGGGJQodRNBUoWprpy2UVU4kX6j5GqcyoAoA6zaXgr3jMKRcKYZypnqMXQmkLQpQkTDUjNROs8NEMC0mUWkNzISNxrIcZzVMi8eMERXrMubKmwgSfshjr+dianjMZooxQ7cwnkimXQfsrnqvPB54hxTaKiVCeIlwzz3EuCw6JDAK9isbYFgxUe8dgG1H0/eNFzWom+NeYZ1gKFTu7umobH5lEAr4J/nxtS+CZPmnNkpX/kMmEZEW9VludGMH3EN/piN9RGrN4qJNQS0ZRnpOoqft+wcJxI1e7oaqFwt0QiQoTmCoNZ80M3xQeM8I9J+r21kr4bkMJ6LlNipxPRYTNt+op1SLqu/bviSQSCZYdokKt1bQPF7D/nwoIW6kEA/swAdtEqxw9kYDuXgKaNfTdZdeCThg66vhndD04MZ2WrnC3xFSRRstat4nukL9/w85g8zhrm5SMRzXN4NimsW0vDMNzNQ0Igk9B/TgvC0HvL1jI22AhVQ3Y9wL3pEDEFIAPut7KO+46m/LvVKWd0163fNOul3abUuDDSjt/rzWCTzB7SgkrlERCbsBrYu8ZeLtiDYmeQ6GdGrvWOstlBwagmbd+cGAN0ibacaE51loQdKwyjGeISf1efcn8LZm/qjwr8Zxu5vunTPxq26Um540atmfBAdXSkYagg7ep3r0YaV17RqKh6kHxiyyiZhdq9PJSDn2JcaOJpTdjmOzlRZj1JTCYLIoLFa06S8zfYnDdf+P4OxONQ8sHFZ0KUMnYHEXBpI09iz8a9w6xZHpJiF0Sov1GBboJcdJNDgA6CbFDuRyjPC3CC7bWzvo47Kmo25uozvN56B1pU6VNBGD/pM9D0N0TvRjmAwzjDPrHMUybCEDntIbZbx/1Umwf4pXWC5Z76OLibyH6aK90t2xHXEqKGY5eL9XF1urCa+3uuP1OdXHacru7g3vH55dq+1JtnyYfWssZCHastv2980E11z91l8vh+v8FnKtf</diagram></mxfile>
|
2002.01365/main_diagram/main_diagram.pdf
ADDED
|
Binary file (32.8 kB). View file
|
|
|
2002.01365/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
<figure id="fig:sys_archi" data-latex-placement="t!">
|
| 4 |
+
<img src="C3_system_model.png" style="width:90.0%" />
|
| 5 |
+
<figcaption>Referential communication game and architectures of the agents.</figcaption>
|
| 6 |
+
</figure>
|
| 7 |
+
|
| 8 |
+
We analyze a typical and straightforward object selection game, in which a speaking agent (Alice, or speaker) and a listening agent (Bob, or listener) must cooperate to accomplish a task. In each round of the game, we show Alice a target object $x$ selected from an object space $\mathcal{X}$ and let her send a discrete-sequence message $\mathbf{m}$ to Bob. We then show Bob $c$ different objects ($x$ must be one of them) and use $c_1,...,c_c\in\mathcal{X}$ to represent these candidates. Bob must use the message received from Alice to select the object that Alice refers among the $c$ candidates. If Bob's selection $\bar{c}$ is correct, both Alice and Bob are rewarded. The objects are shuffled and candidates are randomly selected in each round to avoid the agents recognizing the objects using their order of presentation.
|
| 9 |
+
|
| 10 |
+
In our game, each object in $\mathcal{X}$ has $N_a$ attributes (color and shape are often used in the literature), and each attribute has $N_v$ possible values. To represent objects, similarly to the settings chosen in [@kottur2017natural], we encode each attribute as a one-hot vector and concatenate the $N_a$ one-hot vectors to represent one object. The message delivered by Alice is a fixed-length discrete sequence $\mathbf{m}=(m_1,...,m_{N_L})$, in which each $m_i$ is selected from a fixed size meaningless vocabulary $V$.
|
| 11 |
+
|
| 12 |
+
Neural agents usually have separate modules for speaking and listening, which we name Alice and Bob. Their architectures, shown in Figure [1](#fig:sys_archi){reference-type="ref" reference="fig:sys_archi"}, are similar to those studied in [@ivan2017nips] and [@lazaridou2018iclr]. Alice first applies a multi-layer perceptron (MLP) to encode $x$ into an embedding, then feeds it to an encoding LSTM [@lstm]. Its output will go through a softmax layer, which we use to generate the message $m_1, m_2, \cdots$. Bob uses a decoding LSTM to read the message and uses a MLP to encode $c_1,...,c_c$ into embeddings. Bob then takes the dot product between the hidden states of the decoding LSTM and the embeddings to generate scores $s_c$ for each object. These scores are then used to calculate the cross-entropy loss when training Bob. When Alice and Bob are trained using reinforcement learning, we can use $p_A(\mathbf{m}|x;\theta_A)$ and $p_B(\bar{c}|\mathbf{m}, c_1,...,c_c;\theta_B)$ to represent their respective policies, where $\theta_A$ and $\theta_B$ contain the parameters of each of the neural agents. When the agents are trained to play the game together, we use the REINFORCE algorithm [@REINFORCE] to maximize the expected reward under their policies, and add the entropy regularization term to encourage exploration during training, as explained in [@mnih2016asynchronous]. The gradients of the objective function $J(\theta_A,\theta_B)$ are: $$\begin{align}
|
| 13 |
+
\nabla_{\theta_A}J &= \mathbb{E}\left[R(\bar{c},x)\nabla\log p_A(\mathbf{m}|x)\right]+\lambda_A\nabla H[p_A(\mathbf{m}|x)] \\
|
| 14 |
+
\nabla_{\theta_B}J &= \mathbb{E}\left[R(\bar{c},x)\nabla\log p_B(\bar{c}|\mathbf{m},c_1,...,c_c)\right]+\lambda_B\nabla H[p_B(\bar{c}|\mathbf{m},c_1,...,c_c)],
|
| 15 |
+
\end{align}$$ where $R(\bar{c},x)=\mathbbm{1}(\bar{c},x)$ is the reward function, $H$ is the standard entropy function, and $\lambda_A, \lambda_B>0$ are hyperparameters. A formal definition of the agents can be found in Appendix C.
|
| 16 |
+
|
| 17 |
+
Compositionality is a crucial feature of natural languages, allowing us to use small building blocks (e.g., words, phrases) to generate more complex structures (e.g., sentences), with the meaning of the larger structure being determined by the meaning of its parts [@using_language]. However, there is no consensus on how to quantitatively assess it. Besides a subjective human evaluation, *topological similarity* has been proposed as a possible quantitative measure [@comp_measure01].
|
| 18 |
+
|
| 19 |
+
To define topological similarity, we first define the language studied in this work as $\mathcal{L}(\cdot):\mathcal{X}\mapsto\mathcal{M}$. Then we need to measure the distances between pairs of objects: $\Delta_{\mathcal{X}}^{ij} = d_\mathcal{X} (x_i, x_j)$, where $d_\mathcal{X}(\cdot)$ is a distance in $\mathcal{X}$. Similarly, we compute the corresponding quantity for the associated messages $m_i = \mathcal{L}(x_i)$ in the message space $\mathcal{M}$ with $\Delta_{\mathcal{M}}^{ij} = d_\mathcal{M} \left(m_i, m_j\right)$, where $d_\mathcal{M}(\cdot)$ is a distance in $\mathcal{M}$. Then the topological similarity (i.e., $\rho$) is defined as the correlation between these quantities across $\mathcal{X}$. Following the setup of [@lazaridou2018iclr] and [@li2019ease], we use the negative cosine similarity in the object space and Levenshtein distances [@levenshtein1966binary] in the message space. We provide an example in Appendix B to give a better intuition about this metric.
|
| 20 |
+
|
| 21 |
+
The idea of iterated learning requires the agent in current generation be partially exposed to the language used in the previous generation. Even this idea is proven to be effective when experimenting with human participants, directly applying it to games played by neural agents is not trivial: for example, we are not sure where to find the preference for high-$\rho$ languages for the neural agents. Besides, we must carefully design an algorithm that can simulate the "partially exposed" procedure, which is essential for the success of iterated learning.
|
| 22 |
+
|
| 23 |
+
As mentioned before, the preference of high-$\rho$ language by the learning agents is essential for the success of iterated learning. In language evolution, highly compositional languages are favored because they are structurally simple and hence are easier to learn [@carr2017cultural]. We believe that a similar phenomenon applies to communication between neural agents:
|
| 24 |
+
|
| 25 |
+
**Hypothesis 1:** *High topological similarity improves the learning speed of the speaking neural agent.*
|
| 26 |
+
|
| 27 |
+
We speculate that high-$\rho$ languages are easier to emulate for a neural agent than low-$\rho$ languages. Concretely, that means that Alice, when pre-trained with object-message pairs describing a high-$\rho$ language at a given generation, will be faster to successfully output the right message for each object. Intuitively, this is because the structured mapping described by a language with high $\rho$ is smoother, and hence has a lower sample complexity, which makes resulting examples easier to learn for the speaker agent [@sample_complexity].
|
| 28 |
+
|
| 29 |
+
**Hypothesis 2:** *High topological similarity allows the listening agent to successfully recognize more concepts, using less samples.*
|
| 30 |
+
|
| 31 |
+
We speculate that high-$\rho$ languages are easier for a neural agent to recognize. That means that Bob, when pre-trained with message-object pairs corresponding to a high-$\rho$ language, will be faster to successfully choose the right object. Intuitively, the lower topological similarity is, the more difficult it will be to infer unseen object-message pairs from seen examples. The more complex mapping of a low-$\rho$ language implies that more object-message pairs need to be provided to describe it. This translates as an inability for the listening agent to generalize the information it obtained from one object-message associated to a low-$\rho$ language to other examples. Thus, the general performance of Bob on any example will improve much faster when trained with pairs corresponding to a high-$\rho$ language than with a low-$\rho$ language. We provide experimental results in section [4.1](#sec:exp_speed_adv){reference-type="ref" reference="sec:exp_speed_adv"} to verify our hypotheses. We also provide a detailed example in Appendix D to illustrate our reasoning.
|
2002.07375/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile modified="2020-02-07T08:21:47.068Z" host="www.draw.io" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36 Edg/79.0.309.71" etag="Ce72vW_ZHhZTyiaNL1Pe" version="12.6.6" type="google"><diagram id="fGd0NMz9rGiWlEKqStxT" name="Page-1">7V1bd6M4Ev41OdPzYB8JcdNj4k56H6Z7ezcP2/O0hxhsM0OMF5NO0r9+wUYYJIwFlpDskDln2mBxcemrT6VSVekGzZ7fviTeZvU19oPoxgD+2w36fGMYBkAw+yc/874/A4EJ9meWSegX5w4nHsNfAWlYnH0J/WBba5jGcZSGm/rJebxeB/O0ds5Lkvi13mwRR/WnbrxlwJx4nHsRe/Y/oZ+uirPQxocv/hGEy1XxaNdw9l88e6Rx8Uu2K8+PXyun0P0NmiVxnO4/Pb/NgiiXHpHL/rqHI9+WL5YE65TngmTy+gv+8Xz/5fH2v2byPFv/a4MmEBav+9OLXoqfXLxu+k5kEPiZSIrDOElX8TJee9H94ezd/CX5GeTPgdlBEr+s/d0RyI4OF/wRx5uiyV9Bmr4Xne29pHF2apU+R8W3i3idkueVx/vG0M6O2V9eCGMbvyTzoOXnIruAkJcsg7SloVXcMf/plUcUkv0SxM9BmrxnDZIg8tLwZx0tXgG6Zdnu0C/Zh6JrOnQTee+2XqqL/XUVpsHjxtuJ4zVTTkrEYRTN4ihOdteiu91/eW9lV4RprqwInNMTP4MkDd5aJVd8iwghFAzhFoevB21DhAxWFUVDFpAkbAitD6UTFqdOEBDqohMcvXShOmGfVAkDDasSQkYJjZXA5NQBqJcOmEy3LG4MO8p+wd325anWP/b/XnJLYyezyXYntNusAbQ3bzvJke+zT8v8X0hulL3Y7l7705/eJD/gd3L6KTnr9iw+oygzGgMOHqBRVucFAMwZxjVesEEXMHbgAavGA9hmeIC0qNKAKY0FWCtENtx+GwJvkoDykP0BScgo50eaQINMcq52gIAu7wiBtRohyHtf5BBhDK2yDdxuDcPt0LIUkztuAcrGW585GEtFXPXs7lWFDx6DI3HIwUM59gx05aNH6bo8NXqQH67J6FFiS8LocUSlPr1frMGnbvQwLNX2nzE4UH67YKQMye7qscHhvL5sduf1HhmWXuwu0X2EZLP7B5obIKhag1lXu2ygCGT3q7bd1WODdTB4sh0Mvy524AcAY/NzjUScgUxEYE1VT/NYFwMr6LV/m4dtZEfzyNtuw3ldvsFbmP6ofP6z8vnzW2Ed7A7eycE6e/cf1YM/qweHi3ZH5Cr1S03cxkKlO62GVUFyjtumKJ7wPQ6zn1bCyanzjkmsOnKH/e8uLjoAhbkPNCgCc6gb7QXD3GiHuPJXn7FszOOpPgXCXpDqDl0NQMjrjxgGhBBRnit0qShkHStCqBBkHN+dDKEyMjS4l1V4F95HNuyEQ9ZvoxCH6gZlbhxqRohXg0OeFYA+OLwsNuQOQ+JdZB5R2AmFrENMCAova4LCv1g1zlCkoJD1qMgOgtI95m7/V7SjPG0PD3L8JyYdoUHYWZX7BLHuE9nxDSMumnBhTCnXGlaMDJP1acheGx2R0YAMQK28AtW4YL0MsldVRlywuLCIt0cbXAic9RNztoudu3cOnGPpntVzp1OieL0BpIc0sYCpvCHTpgDEbQFTi4wmpG4k2QI2JXkDPh4+edNYR3x2wqckP0FPr+kBrtoglBDjiFBFCOVJYeYd4UtUXhOHmrxeLqSXr/VqEMqRUlxLoZdokMIqVPu4/yUP93iEah+oZnDx3ivNNnmD7fH3NR2j9hxkg9b3skhyXXP77MP+DcTqDesdFqM3PQxleCUUr5kRYrpw6loONI39/3EdZcidHr5zLbefUlmGM608w3W6PESQwtlUOA8R4LF3ZtqDIRSOI2pxqIHq7BVCXax+vfwmFoRNaCcoM+u64PTVuPanGGBafQhBmWCVQ3Y3lWPaD6FypFuljnF9QkP0UzluVxBpqInK2cBsUwZLyBh34iFoEI2zqEELtyuc6bQ1l6NvsCGJg1G4i6l4Ji67rrmHMKaWf6HZD56QjjRh7iR5Dk6qHF5hCTWHEm1TDTWnYc1NYg01rvQXjZQMnzUwQVII9XQwI9CrqFr55jqpRWtn8KsFQzkNegHthmFfnl5YBiPtMdxt+CAFl3KyuYgBxqAxCha7BjxGuymABXDoaDdLNTLY1dcx2m14ZDgmFR9rq8YFu+Y5RrspYAxHM1w4HObcMrPnNkJnJuWWBN4TeQholZuDaaZFgJUcbrDN6OUwcZLjiHPpZAhv0yT+O6jVQ8j/WODaNgAlcLt1SDsGTsO4Wnh+UGGLrmfOLWyMAdgVpRhe2G91QSuTvej9FbhlP5sBYNsqZW+rlj2H71EDenbZtA9oNFRUGZagOVxKF0LQ7pGO0oegiavkCgiaX9iaELQr2lOtjqA7y141Qbusg05LggY0QUNS3UIVPbsceQyXQc8lBjSmZ2WTFdH03EHYutCzsrmLaHruLnvl9Mwxd9GAnh2kof3sil7pU0fQ9pGO0oigOSZ6F0LQ/MLWhaCVTRSFE3Rn2asmaAiFbG8hNvBDZCqhmIARXAiFI16kYG1N4kXIi1e692H2bdK5CjLuuiZ5jv6eLGF9ZzGFiYXFoFBbvmJWQcswlaqG0vlDAjWUJ6F31FDenWIh4A02HkhD2emJUA1dX7uGooaKJcOqKObZROSQBbCO17lG+t52VWrkydRm3oyAemrzOeUiW/tLk0h9aNBljciCfo/CevU7QYDrd5Ic74x5tjGWWbdhAqbZtK/O5/hw4ljh8Pzoe5CE2e8PEg5YnZdSQgIydcEfoLe8wD3hBxGe2tlksPyjwIim0AWHv2GrPmKeFIxeFRvaaU0ijkytcGQz8TXQQuRMdyxRI2Q5GgrOHnIcl3rn9vShbHIEzrzAGCDjCLP+CGG7xhyxxsTtGmN8nF1jkK16z3FiMgw9ZsvjRYL9S6uvTJfzIEslZ5fzQNSNJI+0mUEhCVKDzx8GAqLm5eZ7A9FQDUTWczgItx1zLTnn+JbONBZ5fUtYL1LMZg3U7Nh0hBmVBl1qQpBRCQE9ES8iVY4bidQFqKiKIjctHcjaFWTAOouC1ANdpno4FkWxVu/JO60bmPJCCdIN7NA62K4adHtUbCIiWTNEB6xrlKKMqJ5uSlF2GiArMXUfNDkOi8nkxlsfridTRT+ebycZToJk7UWT5UvoT+yF72LXdibOYrGYOO7Cmzw5CzBZ2L5rmsi1/Lzs8WGuecZ6xM29cZPNUm/BJ+mLkM5s+3v1q500pCx/yEBac60VhQuRoMkL+KGAdmwt7cKBVt8BQP16Wrn08oEW1E7XLAHc21Fyx6AMNA0xqWkIAhRyuA0t08BT28W2ZUHHMElkRFmlDk3355GDUWagU1Md2TPmxt1796SQw4CHeYzcrduQgl27y7w0hQ4NSYhbJ+psftrnu28VLts/k80or5/haEipb8ZO6U1L1F6h01XTrzjlReFync+ogvVuxfEu57pw7kW3xRfPoe9Hxxzbdfo9lnpOWZYCaJbyXZvsaF4G+dXsRmkc27i7ah+c1bu8sAR6Yu9L1kmroyPpEax9cGQhEnWqD7SaXCOqofUt9oPJ/fNT4PvherntCrKR0GhCw3TORAPw0MDAawqVVA2823kaxvn1D7NvI+rOna0AKjJGA7Zryi9TDbrvcRTO30e4nT05BtrB7TIqBrn0ogxJZ1OVTgcbti5mBHcZ+XQHDOibUAcbdgQeSNyiM+q6iFuTlDrYsOvuQNIXnVPXQ/rKk+rIthwqM3SG3iCt4uSkxucSKUeHm9POT+7tciD3PlMDFWxu2Oe4NLSInfXVe/sex1HFLhOUp8NoLbYd5Nms1gbQtwKnoS8FGFGOS8Vtmqwp0LjAQIcti+uUhnKc+g9OnQVvupQN1jRDH5QYLWW17FTKvYwIUCd3ZbUkBpU7vWWFpVzul1hWorPcrSP7NqmTu62jB/ox9dIg+7Z0Qo8LHd3MekwNZ9idQjZWQZrPGT28bzfBGr36wRfk//vlB1j/cyIiXL9nROfBxLeRXTXyp8DB7YZ+dkDnbN40Wc39rHWXe3cVU2lgAh1iZdEo4Y1LoAee0gAQnXBH0l+JBhTjuqh4zkaEC4xz7jUZhd20QiSOuSedI467xTGjeuLoIDjmKaOi0a7zAnHscNdG+eA4FgU1Du+CZKgZqrBGKp2OWBPMma5J7/zbnjvvUE6Iru0Ram/vQqutvRwO16JGilk3SDA4aag31UgRqHOQe2/rfQaWKp1zqDVhq28cMJu5JUfnIKCUGxX19I7nItIXFB4gqVohsDrLIJm6aqBvjNDvBH2rK/SpC4jXu6XUwrkXFKFXPZUrO0zi3EN3aJ6HH3+N/SBv8X8=</diagram></mxfile>
|
2002.07375/main_diagram/main_diagram.pdf
ADDED
|
Binary file (48.6 kB). View file
|
|
|
2002.07375/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
A Relational Markov Decision Process (RMDP) [\(Boutilier](#page-9-0) [et al.,](#page-9-0) [2001\)](#page-9-0) is a first-order, predicate calculus-based representation for expressing instances of a probabilistic planning domain with a possibly unbounded number of objects. An RMDP *domain* has object types, relational state predicate and action symbols that are applied over objects, first order transition templates that specify probabilistic effects associated with action symbols, and a first-order reward structure. A domain *instance* additionally specifies a set of objects and a start state, thus defining a ground MDP with a known start state [\(Kolobov et al.,](#page-9-0) [2012\)](#page-9-0)). *Relational* planners aim to produce a single *generalized* policy that can yield a ground policy for *all* instances of the domain, with little instancespecific computation. *Domain-independent* planners are representation-specific, but domain-agnostic, making them applicable to all domains expressible in the language. In this paper, we design a domain-independent relational planner.
|
| 4 |
+
|
| 5 |
+
RMDP planners, in their vision, expect to scale to very large problem sizes by exploiting the first-order structures of a domain – thereby reducing the curse of dimensionality. Traditional RMDP planners attempted to find a generalized *first-order* value function or policy using symbolic dynamic programming [\(Boutilier et al.,](#page-9-0) [2001\)](#page-9-0), or by approximating them via a function over first-order basis functions (e.g., [\(Guestrin et al.,](#page-9-0) [2003;](#page-9-0) [Sanner & Boutilier,](#page-10-0) [2009\)](#page-10-0)). Unfortunately, these methods met with rather limited success, for e.g., no relational planner participated in International Probabilistic Planning Competition (IPPC)<sup>1</sup> after 2006, even though all competition domains were relational. We believe that this lack of success may be due to the inherent limitations in the representation power of a basis function-based representation. Through this work, we wish to revive the research thread on RMDPs and explore if neural models could be effective in representing these first-order functions.
|
| 6 |
+
|
| 7 |
+
We present Symbolic NetWork (SYMNET), the first domainindependent neural relational planner that computes generalized policies for RMDPs that are expressed in the symbolic representation language of RDDL [\(Sanner,](#page-10-0) [2010\)](#page-10-0). SYMNET outputs its generalized policy via a neural model whose all parameters are specific to a domain, but tied among all in-
|
| 8 |
+
|
| 9 |
+
<sup>1</sup> Indian Institute of Technology Delhi. Correspondence to: Sankalp Garg <sankalp2621998@gmail.com>, Aniket Bajapi <quantum.computing96@gmail.com>, Mausam <mausam@cse.iitd.ac.in>.
|
| 10 |
+
|
| 11 |
+
<sup>1</sup>[http://www.icaps-conference.org/index.](http://www.icaps-conference.org/index.php/Main/Competitions) [php/Main/Competitions](http://www.icaps-conference.org/index.php/Main/Competitions)
|
| 12 |
+
|
| 13 |
+
stances of that domain. So, on a new test instance, the policy can be applied out of the box using pre-trained parameters, i.e., without any retraining on the test instance. SYMNET is domain-independent because it converts an RDDL domain file (and instance files) completely automatically into neural architectures, without any human intervention.
|
| 14 |
+
|
| 15 |
+
SYMNET architecture uses two key ideas. First, it visualizes each state of each domain instance as a graph, where nodes represent the *object tuples* that are valid arguments to some relational predicate. An edge between two nodes indicates that an action causes predicates over these two nodes to interact in the instance. The values of predicates in a state act as features for corresponding nodes. SYMNET then learns node (and state) embeddings for these graphs using graph neural networks. Second, SYMNET learns a neural network to represent the policy and value function over this graphstructured state. To learn these in an instance-independent way, we recognize that most ground actions are a first-order action symbol applied over some object tuple. SYMNET scores such ground actions as a function over the action symbol and the relevant embeddings of object tuples. After training all model parameters using reinforcement learning over training instances of a domain, SYMNET architecture can be applied on any new (possibly larger) test problem without any further retraining.
|
| 16 |
+
|
| 17 |
+
We perform experiments on nine RDDL domains from IPPC 2014 [\(Grzes et al.,](#page-9-0) [2014\)](#page-9-0). Since no planner exists that can run without computation on a given instance, we compare SYMNET to random policies (lower bound) and policies trained from scratch on the test instance. We find that SYM-NET obtains hugely better rewards than random, and is quite close to the policies trained from scratch – it even outperforms them in 28% instances. Overall, we believe that our work is a step forward for the difficult problem of domain-independent RMDP planning. We release the code of SYMNET for future research.<sup>2</sup>
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
We now present SYMNET's architecture for training a generalized policy for a given RMDP domain. We follow existing research to hypothesize that for any instance of a domain, we can learn a representation of the current state in a latent space and then output a policy in latent space, which is decoded into a ground action. To achieve this, SYMNET uses three modules: (1) problem representation, which constructs an instance graph for every problem instance, (2) representation learning, which learns embeddings for every node in the instance graph, and for the state, and (3) policy decoder, which computes a value for every ground action, outputting a mixed policy for a given state. All parameters of representation learning and policy learning modules are shared across all instances of a domain. SYMNET's full architecture is shown in Figure [2.](#page-4-0)
|
| 22 |
+
|
| 23 |
+
We follow TRAPSNET, in that we continue the general idea of converting an instance into an instance graph and then learning a graph encoder to handle different-sized domains. However, the main challenge for a general RMDP, one that does not satisfy the restricted assumptions of TRAPSNET, is in defining a coherent graph structure for an instance. The first key question is what should be a node in the instance graph. TRAPSNET's approach was to use a single object as nodes, as all fluents (and actions) in its domains took single objects as arguments. This may not work for a general RMDP since it's fluents and actions may take several objects as arguments. Secondly, how should edges be defined. Edges represent the interaction between nodes. TRAPSNET defined them based on the one binary non-fluent in its domain. A general RMDP may not have any non-fluent symbol or may have many (possibly higher-order) non-fluents.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
Figure 1. DBN for a modified wildfire problem.
|
| 28 |
+
|
| 29 |
+
Last but not least, the real domain-independence for SYM-NET can be achieved only when it parses an RDDL domain file without any human intervention. This leads to a novel challenge of reconciling multiple different ways in RDDL to express the same domain. In our running example, connectivity structure between cells may be defined using nonfluents y-neighbour(y, y<sup>0</sup> ), x-neighbour(x, x<sup>0</sup> ), or using a quarternary non-fluent neighbour(x, y, x, y<sup>0</sup> ). Since both these representations represent the same problem, an ideal desideratum is that the graph construction algorithm leads to the same instance graph in both cases. But, this is a challenge since the corresponding RDDL domains may look very different. While, in general, this problem seems too hard to solve, since it is trying to judge logical equivalence of two domains, SYMNET attempts to achieve the same instance graphs in case the equivalence is within non-fluents.
|
| 30 |
+
|
| 31 |
+
To solve these problems, we make the observation that dynamics of an RDDL instance ultimately compile to a ground DBN with nodes as state variables (fluent symbols applied on object tuples) and actions (action symbols applied on object tuples).<sup>3</sup> DBN exposes a connectivity structure that determines which state variables and actions directly affect another state variable. It additionally has conditional probability tables (CPTs) for each transition. Figure 1 shows an example of a DBN for our running example instance. Here, left column is for current time step, and right for the next one. The edges represent which state and action variables affect the next state-variable. We note that the ground DBN does not expose non-fluents since its values are fixed, and their dependence can be compiled directly into CPTs.
|
| 32 |
+
|
| 33 |
+
SYMNET converts a ground DBN to an instance graph. It constructs a node for every unique *object tuple* that appears as an argument in any state variable in the DBN. Moreover, two nodes are connected if the state variables associated with two nodes influence each other in the DBN through
|
| 34 |
+
|
| 35 |
+
<sup>3</sup> done automatically using code from [https://github.](https://github.com/ssanner/rddlsim) [com/ssanner/rddlsim](https://github.com/ssanner/rddlsim)
|
| 36 |
+
|
| 37 |
+
<span id="page-4-0"></span>
|
| 38 |
+
|
| 39 |
+
Figure 2. Policy network for SYMNET demonstrated on 2 × 1 wildfire domain. Fully Connected Network is used in Action Decoder.
|
| 40 |
+
|
| 41 |
+
some action. This satisfies all our challenges. First, it goes beyond an object as a node, but only defines those nodes that are likely important in the instance. Second, it defines a clear semantics of edges, while maintaining its intuition of "directly influences." Finally, it can handles some variety of non-fluent representations for the same domain. Since the DBN does not even expose non-fluent state variables, and compiles them away, same instance encoded with different non-fluent representations often yield yield same ground DBNs and thus the same instance graphs.
|
| 42 |
+
|
| 43 |
+
Construction of Instance Graph: We now formally describe the conversion of a DBN into a directed instance graph, G=(V,E), where V is the set of vertices and E is the set of edges. G is composed of $\mathcal{K}=|\mathcal{A}|+1$ disjoint subgraphs $G_j=(V_j,E_j)$ . Intuitively, each graph $G_j$ has information about influence of each individual action symbol $\mathbf{a}_j \in \mathcal{A}$ . $G_{\mathcal{K}}$ represents the influence of the full set $\mathcal{A}$ , and also the natural dynamics. In our example $\mathcal{K}=4$ since we have three action symbols: put-out, cut-out and finisher.
|
| 44 |
+
|
| 45 |
+
To describe the formal process, we define three analogous sets: $O_f$ , $O_{nf}$ and $O_a$ . $O_f$ represents the set of all object tuples that act as a valid argument for any fluent symbol. $O_{nf}$ and $O_a$ are analogous sets for non-fluent and action symbols. In our running example, $O_f = \{(x1,y1),(x2,y1)\}$ , $O_{nf} = \{(x1,y1),(x2,y1),(x1,y1,x2,y1),(x2,y1)\}$ , and $O_a = \{(x1,y1),(x2,y1)\}$ . Nodes in the instance graph associate with object tuples. We use $o_v$ to denote the object tuple associated with node v. SYMNET converts a DBN into an instance graph as follows:
|
| 46 |
+
|
| 47 |
+
1. The distinct object tuples in fluents form the nodes of the graph, i.e. $V_j = \{v|o_v \in O_f\}, \forall j$ . For the example, each $V_j =$ different copies of $\{(x1,y1),(x2,y1)\}$ .
|
| 48 |
+
|
| 49 |
+
- 2. We add an edge between two nodes in $G_j$ if some state variables corresponding to them are connected in the DBN through $\mathbf{a}_j$ . Formally, $E_j(u,v)=1$ , if $\exists f,g\in F, \exists o_a\in O_a, j\in \{1,\dots,|\mathcal{A}|\}$ s.t. the transition dynamics $(T^f)$ for state variable $g'(o_v)$ and action $\mathbf{a}_j(o_a)$ depend on state variable $f(o_u)$ or $f'(o_u)$ . For the running example, there is no edge between (x1,y1) and (x2,y1) since cut-out, put-out or finisher's effects on one cell do not depend on any other cell.
|
| 50 |
+
- 3. We add an edge between two nodes in $G_K$ if some state variables corresponding to them are connected in the DBN (possibly through natural dynamics). I.e., $E_K(u,v)=1$ , if $\exists f,g\in F$ s.t. there is an edge from $f(o_u)$ (or $f'(o_u)$ ) and $g'(o_v)$ in the DBN. For the example, $E_4((x1,y1),(x2,y1))=1$ as there is an edge between burning(x1,y1) and burning'(x2,y1) since fire propagates to neighboring cells through natural dynamics. Similarly, $E_4((x2,y1),(x1,y1))=1$ .
|
| 51 |
+
- 4. As every node influences itself, self loops are added on each node. $E(v, v) = 1, \forall v \in V$ .
|
| 52 |
+
|
| 53 |
+
For each node $v \in V$ , we additionally construct a feature vector (h(v)) which consists of fluent feature vector $(h^f(v))$ and non-fluent feature vector $(h^{nf}(v))$ , such that $h = concat(h^f, h^{nf})$ . The feature vector for all nodes for the same object tuple is the same. The feature vector is constructed as follows:
|
| 54 |
+
|
| 55 |
+
1. The fluent features for each node is obtained from the state of the problem instance. The values of state variables corresponding to a node are added as feature to that node. Whenever a fluent symbol cannot take a node as an argument, we add zero as the feature for it. Formally, $h^f(v)_i = g_i(o_v)$ if $g_i \in \mathcal{F}, v \in V$
|
| 56 |
+
|
| 57 |
+
- and o<sup>v</sup> is an argument of g<sup>i</sup> , otherwise, h f (v)<sup>i</sup> = 0, ∀i = 1 . . . |F|. For the running example, we have two state-fluents. Hence, h f ((x1, y1)) = [burning(x1, y1), out-of-fuel(x1, y1)].
|
| 58 |
+
- 2. The non-fluent feature vector for each node is obtained from the RDDL file. The values of non-fluents defined on the node, and additionally any unary nonfluents where the argument intersects the node are added as the features for the node. The default value is obtained from the domain file while the specific value (if available) is obtained from the instance file. Formally, h nf (v)<sup>i</sup> = gi(onf ) if g<sup>i</sup> ∈ N F, v ∈ V, onf ∈ Onf ,((o<sup>v</sup> = onf ) ∨ (|onf | = 1 ∧ onf ⊂ ov)), otherwise, h f (v)<sup>i</sup> = 0, ∀i = 1 . . . |N F|. In our example, h nf ((x1, y1)) = [target(x1, y1)].
|
| 59 |
+
|
| 60 |
+
We note that the size of feature vector on each node depends on the domain, but is independent of the number of objects in the instance – there are a constant number of feature values per state predicate symbol. This allows variable-sized instances of the same domain to use the same representation.
|
| 61 |
+
|
| 62 |
+
SYMNET runs GAT on the instance graph to obtain node embeddings v for each node v ∈ V , It then constructs tuple embedding for each object tuple by concatenating node embeddings of all associated nodes. Formally, let O<sup>V</sup> = {ov|v ∈ V }. For o ∈ O<sup>V</sup> , the tuple embedding o = concat(v), over all v s.t. o<sup>v</sup> = o. SYMNET also computes a state embedding s by taking a dimension-wise max over all tuple embeddings, i.e., s = M axP oolo∈O<sup>V</sup> (o).
|
| 63 |
+
|
| 64 |
+
SYMNET maps latent representations o and s into a state value V (s) (long-term expected discounted reward starting in state s) and mixed policy π(s) (probability distribution over all ground actions). This is done using a value decoder and a policy decoder, respectively.
|
| 65 |
+
|
| 66 |
+
There are several challenges in designing a (generalized) policy decoder. First, the action symbols may take multiple objects as arguments. Second, and more importantly, action symbols may even take those object tuples as arguments that do not correspond to any node in the instance graph. This will happen if an object tuple (in Oa) is not an argument to any fluent symbol, i.e., ∃o<sup>a</sup> s.t. o<sup>a</sup> ∈ O<sup>a</sup> ∧ o<sup>a</sup> ∈/ O<sup>f</sup> . We note that adding these object tuples as nodes in the instance graph may not work, since we will not have any natural features for those nodes.
|
| 67 |
+
|
| 68 |
+
In response, we design a novel framework for policy and value decoders. The decoders consist of fully connected layers, the input to which are a subset of the tuple embeddings o. SYMNET uses the following rules to construct decoders:
|
| 69 |
+
|
| 70 |
+
- 1. The number of decoders is constant for a given domain and is equal to the number of distinct action symbols (|A|). For the running example, three different decoders for each policy and value decoding are constructed, namely cut-out, put-out and f inisher.
|
| 71 |
+
- 2. The input to a decoder is the state embedding s concatenated with embeddings of object tuples corresponding to the state variables affected by the action in the DBN. In running example, put-out(x1, y1) action takes only the tuple embedding of (x1, y1) as input. However, the number of state-variables being affected by a ground action might vary across instances of the same domain. For example, the f inisher action affects all cells. To alleviate this, we use size-independent max pool aggregation over the embeddings of all affected tuple embeddings to create a fixed-sized input.
|
| 72 |
+
- 3. Decoder parameters are specific to action symbols and not to ground actions. In running example, put-out(x1, y1) will be scored using embedding of (x1, y1); similarly, for (x2, y1). But, both scorings will use a single parameter set specific to put-out.
|
| 73 |
+
- 4. The policy decoder computes scores of all ground actions, which are normalized using softmax to output the final policy in a state. For It, the highest probability action is selected as the final action.
|
| 74 |
+
- 5. All value outputs are summed to give the final value for that state. This modeling choice reflects the additive reward aspect of many RDDL domains.
|
| 75 |
+
|
| 76 |
+
While construction of SYMNET architecture is heavily dependent on the RDDL domain and instance files, actual training is done via model-free reinforcement learning approach of A3C [\(Mnih et al.,](#page-9-0) [2016\)](#page-9-0). RL learns from interactions with environment – SYMNET simulates the environment using RDDL-specified dynamics. Use of model-based planning algorithms for this purpose is left as future work. We formulate training of SYMNET as a multi-task learning problem (see Section [3\)](#page-3-0), so that it generalizes well and does not overfit on any one problem instance. The parameters for the state encoder, policy decoder, and value decoder are learned using updates similar to that in A3C. SYMNET's loss function for the policy and value network is the same as that in the A3C paper (summed over the multi-task problem instances).
|
| 77 |
+
|
| 78 |
+
As constructed, SYMNET's number of parameters is independent of the size of the problem instance. Hence, the same network can be used for problem instances of any size. After the learning is completed, the network represents a generalized policy (or value), since it can be directly used on a new problem instance to compute the policy in a single forward pass.
|
| 79 |
+
|
| 80 |
+
<span id="page-6-0"></span>
|
| 81 |
+
|
| 82 |
+
| | F | | | | | | | | | |
|
| 83 |
+
|--------|----------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------|--|--|--|
|
| 84 |
+
| | Instance | 5 | 6 | 7 | 8 | 9 | 10 | | | |
|
| 85 |
+
| | AA | $\textbf{0.93} \pm \textbf{0.01}$ | $\textbf{0.94} \pm \textbf{0.01}$ | $\textbf{0.94} \pm \textbf{0.01}$ | $\textbf{0.92} \pm \textbf{0.02}$ | $\textbf{0.95} \pm \textbf{0.03}$ | $\textbf{0.91} \pm \textbf{0.05}$ | | | |
|
| 86 |
+
| | CT | $0.87 \pm 0.16$ | $0.78 \pm 0.14$ | $\textbf{1.00} \pm \textbf{0.07}$ | $\textbf{0.98} \pm \textbf{0.13}$ | $\textbf{0.99} \pm \textbf{0.04}$ | $\textbf{1.00} \pm \textbf{0.05}$ | | | |
|
| 87 |
+
| | GOL | $\textbf{0.96} \pm \textbf{0.06}$ | $\textbf{1.00} \pm \textbf{0.05}$ | $0.65\pm0.05$ | $0.83 \pm 0.03$ | $\textbf{0.95} \pm \textbf{0.04}$ | $0.64 \pm 0.08$ | | | |
|
| 88 |
+
| aj. | Nav | $\textbf{0.99} \pm \textbf{0.01}$ | $\textbf{1.00} \pm \textbf{0.01}$ | $\textbf{0.99} \pm \textbf{0.01}$ | $\textbf{1.00} \pm \textbf{0.01}$ | $\textbf{1.00} \pm \textbf{0.02}$ | $\textbf{1.00} \pm \textbf{0.02}$ | | | |
|
| 89 |
+
| Domain | ST | $\textbf{0.91} \pm \textbf{0.05}$ | $0.84 \pm 0.02$ | $0.86\pm0.05$ | $0.85 \pm 0.05$ | $0.81\pm0.02$ | $0.89 \pm 0.03$ | | | |
|
| 90 |
+
| Ŏ | Sys | $\textbf{0.96} \pm \textbf{0.03}$ | $\textbf{0.98} \pm \textbf{0.02}$ | $\textbf{0.98} \pm \textbf{0.02}$ | $\textbf{0.97} \pm \textbf{0.02}$ | $\textbf{0.99} \pm \textbf{0.01}$ | $\textbf{0.96} \pm \textbf{0.03}$ | | | |
|
| 91 |
+
| | Tam | $\textbf{0.92} \pm \textbf{0.07}$ | $\textbf{1.00} \pm \textbf{0.12}$ | $\textbf{0.98} \pm \textbf{0.06}$ | $\textbf{1.00} \pm \textbf{0.12}$ | $\textbf{1.00} \pm \textbf{0.12}$ | $\textbf{0.95} \pm \textbf{0.06}$ | | | |
|
| 92 |
+
| | Tra | $0.85 \pm 0.18$ | $\textbf{0.93} \pm \textbf{0.06}$ | $0.88 \pm 0.21$ | $0.74 \pm 0.17$ | $\textbf{0.94} \pm \textbf{0.12}$ | $0.87 \pm 0.13$ | | | |
|
| 93 |
+
| | Wild | $\textbf{0.99} \pm \textbf{0.01}$ | $\textbf{1.00} \pm \textbf{0.00}$ | $\textbf{1.00} \pm \textbf{0.00}$ | $\textbf{1.00} \pm \textbf{0.00}$ | $\textbf{1.00} \pm \textbf{0.01}$ | $\textbf{1.00} \pm \textbf{0.01}$ | | | |
|
| 94 |
+
|
| 95 |
+
Table 1. $\alpha_{symnet}(0)$ values of SYMNET. Bold values represent over 90% the score of max performance.
|
2010.03255/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2010.03255/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Fine-grained visual data are hard to collect and costly to annotate [@cub; @nab; @dog]. Fine-grained visual datasets often become quite long-tailed and lead to classifiers overfitting to the abundant classes when trained in vanilla settings. Fine-grained few-shot learning (FSL) methods alleviate this problem since they learn discriminative class features, among visually similar classes, using as few as 5 or 1 training instances.
|
| 4 |
+
|
| 5 |
+
![**Nearest "real sample" neighbors of the augmented examples.** We train data augmentation methods using the base classes and search for the nearest-neighbors of the generated samples in the novel classes of the CUB dataset. The input images are shown in the first column. Each row shows the nearest neighbors of some augmented features computed from: $\Delta$-encoder [@delta-encoder] (1st and 2nd rows) and our method (3rd and 4th rows). Green borders indicate that the images have the same class as the input image and red borders indicate otherwise. ](ICCV2021/figs/knn.pdf){#fig:teaser width="\\columnwidth" height="5cm"}
|
| 6 |
+
|
| 7 |
+
Augmenting the few-shot classes by generating additional data is a straightforward way to mitigate issues of overfitting in FSL. Nevertheless, generating diverse data reliably remains an open question [@Le-etal-ICCV19; @appleShrivastavaPTSW16]. The generated samples should contain the class-discriminative features while exhibiting high intra-class diversity. A typical data synthesis approach is generating new samples based on adversarial frameworks [@metair_gan; @adversarial2020kai; @gao2018adversarial; @metagan; @dagan; @Le_2020_ECCV; @le2020physicsbased; @m_Le-etal-ECCV18]. However, these methods suffer from a lack of diversity in the generated samples as adversarial training often mode-collapses. Another approach is the feature transfer that transfers the intra-class variance from the base classes, which have many training samples, to augment features for the novel classes, in which only few samples are available [@delta-encoder; @feature_transfer; @hallucinate_features]. These methods are based on a common assumption that intra-class variations induced by poses, backgrounds, or illumination conditions are shared across categories. The intra-class variations are either modelled as low-level statistics [@feature_transfer] or pairwise variations [@delta-encoder; @hallucinate_features] and are applied directly on the novel samples. In this paper, we discuss two potential issues with these approaches. First, these transformations can introduce certain class-discriminative features that could alter the class-identity of the transformed features. For example, only $8.7\%$ of the augmented features using the $\Delta$-encoder[@delta-encoder] have their nearest "real sample" neighbors belong to the same classes as the original samples (see Fig. [1](#fig:teaser){reference-type="ref" reference="fig:teaser"}). Second, the extracted variations might not be relevant to a specific novel sample, i.e., some bird species would never appear in sea backgrounds. Applying irrelevant variations would result in noisy or meaningless samples and degrade classification results (see Sec. [6.1](#sec:generated Intra-class Variance){reference-type="ref" reference="sec:generated Intra-class Variance"}). These two issues are more pronounced for fine-grained classification since a small change in feature space might change the category of the feature due to the small inter-class distances.
|
| 8 |
+
|
| 9 |
+
We address these issues in this paper via a novel data augmentation framework. First, we disentangle each feature into two components: one that captures the intra-class variance, which we refer as intra-class variance features, and the other that encodes the class-discriminative features. Second, we model intra-class variance via a common distribution from which we can easily sample the new intra-class variations that are relevant for diversifying a specific instance. We show that both the feature disentanglement and the distribution of intra-class variability can be approximated using data from the base classes and it generalizes well to the novel classes. The two key supervision signals that drive the training of our framework are: 1) A classification loss that ensures that the class-discriminative features contain class specific information, 2) A Variational Auto-Encoder (VAE) [@vae] system that explicitly models intra-class variance via an isotropic Gaussian distribution.
|
| 10 |
+
|
| 11 |
+
Our method works especially well for fine-grained datasets where the intra-class variations are similar across classes, achieving state-of-the-art few-shot classification performances on the CUB[@cub], NAB[@nab], and Stanford Dogs[@dog] datasets, outperforming previous methods [@delta-encoder; @meta-opt] by a large margin. We show in our analyses that the data generated by our method lies closely to the real-and-unseen features of the same class and can closely approximate the distribution of the real data.
|
| 12 |
+
|
| 13 |
+
To sum up, our contributions are:
|
| 14 |
+
|
| 15 |
+
1. We are the first to propose a VAE-based feature disentanglement method for fine-grained FSL.
|
| 16 |
+
|
| 17 |
+
2. We show that we can train such a system using sufficient data from the base classes. We can sample from the learnt distribution to obtain relevant variations to diversify novel training instances in a reliable manner.
|
| 18 |
+
|
| 19 |
+
3. Our method outperforms state-of-the-art FSL methods in multiple fine-grained datasets by a large margin.
|
| 20 |
+
|
| 21 |
+
<figure id="fig:pipeline" data-latex-placement="!t">
|
| 22 |
+
<p><embed src="ICCV2021/figs/pipeline.pdf" style="width:160mm" /><br />
|
| 23 |
+
</p>
|
| 24 |
+
<figcaption><strong>The pipeline of our proposed method</strong>. The input image is mapped into the image feature maps <strong><em>X</em></strong>. We input <strong><em>X</em></strong> into an Encoder to obtain the mean and variance of the intra-class variability distribution that are used to sample the intra-class variance feature <span class="math inline"><em>z</em><sub><em>V</em></sub></span>. The class-specific feature <span class="math inline"><em>z</em><sub><em>I</em></sub></span> is obtained by max-pooling <strong><em>X</em></strong>. <span class="math inline"><em>z</em><sub><em>V</em></sub></span> is forced to follow an isotropic multivariate Gaussian distribution. Both <span class="math inline"><em>z</em><sub><em>I</em></sub></span> and the combined features are used to train a classifier. We sample from the learned distribution repeatedly to get multiple <span class="math inline"><em>z</em><sub><em>V</em></sub></span> and add them to the class-specific feature <span class="math inline"><em>z</em><sub><em>I</em></sub></span> to get the augmented features. These augmented features are used together with the original ones to train a more robust classifier. </figcaption>
|
| 25 |
+
</figure>
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
In FSL, abundant labeled images of base classes and a small number of labeled images of novel classes are given. Our goal is to train a classifier that can correctly classify novel class images with the few given examples. The standard FSL procedure includes a training stage and a fine-tuning stage. During the training stage, we use base class images to train a feature extractor and the classifier. Then in the fine-tuning stage, we freeze the parameters of the pre-trained feature extractor and train a new classifier head using the few labeled examples in the novel classes . In the testing stage, the learned classifier predicts labels on a set of unseen novel class images.
|
| 30 |
+
|
| 31 |
+
Since the available samples during the fine-tuning stage are scarce and lack diversity, the learned classifier tends to overfit to the few samples and thus performs poorly on the test images. To address this, we augment the training samples with our proposed data augmentation method, which significantly improves the performance of the baseline.
|
| 32 |
+
|
| 33 |
+
Our goal is to generate additional features of the few novel class images which contain larger intra-class variance. Fig. [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"} illustrates the pipeline of our proposed method. We decompose the feature representation of an input image into two components, the class-specifc feature $z_I$ and the intra-class variance feature $z_V$. $z_V$ is constrained to follow a prior distribution. Then we repeatedly sample new intra-class variance features $\tilde{z}_V$ from the distribution and add them to the class-specific feature $z_I$ to get augmented features. The augmented features are used together with the original features to train the final classifier. In the following sections, we will describe how we model the distribution of intra-class variability via variational inference and how we use it to diversify samples from the novel set.
|
| 34 |
+
|
| 35 |
+
Given an input image (*i*), we first use a feature extractor to map it into a feature map $X^{(i)}$. We then compute the intra-class variance feature $z_V^{(i)}$ and the class-specific feature $z_I^{(i)}$ from $X^{(i)}$ such that the embedding of the input image, $z^{(i)}$, can be expressed as: $$\begin{equation}
|
| 36 |
+
z^{(i)} = z_I^{(i)} + z_V^{(i)}.
|
| 37 |
+
\end{equation}$$ Here we assume that the intra-class variance feature is generated from some conditional distribution $p(z_V)$ and the feature map $X^{(i)}$ is generated from some conditional distribution $p(X|z)$.
|
| 38 |
+
|
| 39 |
+
The class-specific feature $z_I^{(i)}$ can be learned by minimizing the cross-entropy loss given the class label $y^{(i)}$: $$\begin{equation}
|
| 40 |
+
L_{cls}(X^{(i)}) = L_{cross-entropy}\left(W(z_I^{(i)}), y^{(i)}\right)
|
| 41 |
+
\end{equation}$$ where $W$ is a classifier with a single fully connected layer.
|
| 42 |
+
|
| 43 |
+
We use variational inference to model the posterior distribution of the variable $z_V$. Specifically, we approximate the true posterior distribution $p(z_{V}|X)$ with another distribution $q(z_{V}|X)$. The Kullback-Leibler divergence between the true distribution and the approximation is:
|
| 44 |
+
|
| 45 |
+
$$\begin{equation}
|
| 46 |
+
\label{KL Divergence}
|
| 47 |
+
KL[q(z_{V}|X)||p(z_{V}|X)]\ = \int_Z q(Z|X) \textnormal{log}
|
| 48 |
+
\frac{q(Z|X)}{p(Z|X)}.
|
| 49 |
+
\end{equation}$$
|
| 50 |
+
|
| 51 |
+
Since the Kullback-Leibler divergence is always greater than or equal to zero, maximizing the marginal likelihood $p(X^{(i)})$ is equivalent to maximizing the evidence lower bound (ELBO) defined as follows: $$\begin{equation}
|
| 52 |
+
\label{ELBO}
|
| 53 |
+
\begin{split}
|
| 54 |
+
ELBO^{(i)} = E_{q(z_{V}^{(i)}|X^{(i)})}&[\textnormal{log }p(X^{(i)}|z_V^{(i)})] \\
|
| 55 |
+
&- KL\left(q(z_V^{(i)}|X^{(i)})||p(z_V)\right).
|
| 56 |
+
\end{split}
|
| 57 |
+
\end{equation}$$ Prior work [@metric_learning; @feature_transfer; @variational_fewshot] has shown that the distribution of intra-class variability can be modelled with a Gaussian distribution. Here we set the prior distribution of $z_V$ to be a centered isotropic multivariate Gaussian: $p(z_V) = \mathcal{N}(0, I)$. For the posterior distribution, we set it to be a multivariate Gaussian with diagonal covariance: $$\begin{equation}
|
| 58 |
+
q(z_V^{(i)}|X^{(i)}) = \mathcal{N}(\mu^{(i)}, \sigma^{(i)}),
|
| 59 |
+
\end{equation}$$ where $\mu^{(i)}$ and $\sigma^{(i)}$ are computed by a probablistic encoder. With the reparameterization trick, we obtain $z_V^{(i)}$ as follows: $$\begin{equation}
|
| 60 |
+
\label{reparameterization}
|
| 61 |
+
z_{V}^{(i)} = \mu^{(i)} + \sigma^{(i)} * \epsilon, \epsilon \sim \mathcal{N}(0, I).
|
| 62 |
+
\end{equation}$$
|
| 63 |
+
|
| 64 |
+
Since $z_I^{(i)}$ is deterministic given $X^{(i)}$, we have $p(X^{(i)}|z_V^{(i)}) = p(X^{(i)}|z_V^{(i)}, z_I^{(i)}) = p(X^{(i)}| z^{(i)})$. To estimate the maximum likelihood $p(X^{(i)}| z^{(i)})$, we use a decoder to reconstruct the original feature map from $z^{(i)}$ and minimize the $L2$ distance between the original feature map and the reconstructed one.
|
| 65 |
+
|
| 66 |
+
From Eq. [\[ELBO\]](#ELBO){reference-type="ref" reference="ELBO"}, we now derive the loss function for the modeling of intra-class variance: $$\begin{equation}
|
| 67 |
+
\label{intra-class-2}
|
| 68 |
+
\begin{aligned}
|
| 69 |
+
L_{intra}(X^{(i)}) &= \| X^{(i)} - \hat{X}^{(i)} \|^2 + KL\left(q(z_V^{(i)}|X^{(i)})||p(z_V)\right),
|
| 70 |
+
\end{aligned}
|
| 71 |
+
\end{equation}$$ where $\hat{X}^{(i)}$ is the reconstructed feature map synthesized from the sum of class-specific feature $z_I^{(i)}$ and intra-class variance feature $z_V^{(i)}$ sampled from the distribution $\mathcal{N}(\mu^{(i)}, \sigma^{(i)})$.
|
| 72 |
+
|
| 73 |
+
The $L_{intra}$ loss includes two terms. The first term is the reconstruction term, which ensures that the encoder extracts meaningful information from the inputs. The second term is a regularization term, which forces the latent code, $z_V^{(i)}$, to follow a standard normal distribution. Here, instead of minimizing the Kullback-Leibler divergence directly, we decompose it into three terms as in [@beta-tv-vae]:
|
| 74 |
+
|
| 75 |
+
$$\begin{equation}
|
| 76 |
+
\begin{split} \label{kl-decompose}
|
| 77 |
+
KL[q(&z_{V}|X)||p(z_{V})] = KL\left( q(z_{V},X)||q(z_{V})p(X) \right) + \\ &KL( q(z_{V})||\prod_{j}q(z_{V_j})) + \sum_{j}KL(q(z_{V_j})||p(z_{V_j})),
|
| 78 |
+
\end{split}
|
| 79 |
+
\end{equation}$$
|
| 80 |
+
|
| 81 |
+
where $z_{V_j}$ denotes the $j$-th dimension of the latent variable.
|
| 82 |
+
|
| 83 |
+
The three terms in Eq. [\[kl-decompose\]](#kl-decompose){reference-type="ref" reference="kl-decompose"} are referred to as the *index-code mutual information*, *total correlation*, and *dimension-wise* KL respectively. Prior work [@beta-tv-vae; @Achille2018; @Burgess2018UnderstandingDI] has shown that penalizing the index-code mutual information and total correlation terms leads to a more disentangled representation while the dimension-wise KL term ensures that the latent variables do not deviate too far form the prior. Similar to [@beta-tv-vae], we penalize the total correlation with a weight $\alpha$ and rewrite $L_{intra}$ as follows:
|
| 84 |
+
|
| 85 |
+
$$\begin{equation}
|
| 86 |
+
\begin{split} \label{intra-class-3}
|
| 87 |
+
L_{intra}&(X^{(i)})
|
| 88 |
+
= \| X^{(i)} - \hat{X}^{(i)} \|^2 + KL\left( q(z_V^{(i)},X^{(i)})||q(z_V^{(i)})p(X) \right) + \\ \alpha*& KL\left( q(z_V^{(i)})||\prod_{j}q(z_{V_j}^{(i)}) \right) + \sum_{j}KL\left(q(z_{V_j}^{(i)})||p(z_{V_j})\right).
|
| 89 |
+
\end{split}
|
| 90 |
+
\end{equation}$$ The combination of $L_{cls}$ and $L_{intra}$ drives the model to extract discriminative class-specific features $z_I^{(i)}$ and model the distribution of intra-class variability simultaneously.
|
| 91 |
+
|
| 92 |
+
Given the distribution of intra-class variability, we can generate additional samples for the base classes during the training stage. For input image (*i*) with extracted class-specific feature $z_I^{(i)}$ and intra-class variability mean and variance $\mu^{(i)}$ and $\sigma^{(i)}$ respectively, we sample new intra-class variance features, $\tilde{z}_V^{(i)}$, for this image from the distribution $\mathcal{N}(\mu^{(i)}, \sigma^{(i)})$ and add them to $z_I^{(i)}$ to obtain the augmented features $\tilde{z}^{(i)} = z_{I}^{(i)} + \tilde{z}_V^{(i)}$. We use these features to train our system using the following cross-entropy loss:
|
| 93 |
+
|
| 94 |
+
$$\begin{equation}
|
| 95 |
+
L_{aug}(X^{(i)}) = L_{cross-entropy}\left(W(\tilde{z}^{(i)}), y^{(i)}\right)
|
| 96 |
+
\end{equation}$$
|
| 97 |
+
|
| 98 |
+
The overall loss function in the training stage is a weighted combination of the aforementioned terms: $$\begin{equation}
|
| 99 |
+
\label{loss_function}
|
| 100 |
+
L = L_{cls} + L_{intra} + \beta * L_{aug}
|
| 101 |
+
\end{equation}$$
|
| 102 |
+
|
| 103 |
+
where $\beta$ is the coefficient of $L_{aug}$.
|
| 104 |
+
|
| 105 |
+
In this section, we discuss how to use our model to diversify samples for few-shot classes. Our intra-class variance is modelled by an isotropic Gaussian distribution. Sampling from this distribution would result in an arbitrary intra-class variance feature. However, we conjecture that such an arbitrary feature may not be relevant for all instances, i.e., some birds never appear with a background of the sea. Note that here as all intra-class variations are mapped into a common continuous embedding space via variational inference and closely related or similar intra-class variations likely form local neighborhoods in the embedding space. Thus, instead of sampling from the zero-mean and unit-variance distribution, we only sample from the mean and variance estimated directly from the conditional sample to obtain the likely relevant intra-class variations to this sample.
|
| 106 |
+
|
| 107 |
+
Specifically, given an image of novel class (*i*)\* with class label $y^{(i)*}$, we first extract the feature map $X^{(i)*}$, the class-specific feature $z_I^{(i)*}$, and the mean and variance of the intra-class variability distribution $\mu^{(i)*}$ and $\sigma^{(i)*}$ for this instance. We then generate additional features by adding the class-specific features $z_I^{(i)*}$ with a biased term sampled from the distribution of intra-class variability. $$\begin{equation}
|
| 108 |
+
\tilde{z}^{(i)*} = z_I^{(i)*} + \tilde{z}_V^{(i)*} , \tilde{z}_V^{(i)*} \sim N(\mu^{(i)*}, \sigma^{(i)*}),
|
| 109 |
+
\end{equation}$$ where $\tilde{z}^{(i)*}$ is the augmented feature and $\tilde{z}_V^{(i)*}$ is sampled from the posterior distribution $N\left(\mu^{(i)*}, \sigma^{(i)*}\right)$. By sampling from $N\left(\mu^{(i)*}, \sigma^{(i)*}\right)$ multiple times, we get multiple augmented features $\tilde{z}^{(i)*}$ that can be used to train the classifier. In Sec. [6.1](#sec:generated Intra-class Variance){reference-type="ref" reference="sec:generated Intra-class Variance"}, we verify the effectiveness of this sampling scheme.
|
| 110 |
+
|
| 111 |
+
::: table*
|
| 112 |
+
:::
|
| 113 |
+
|
| 114 |
+
::: table*
|
| 115 |
+
:::
|
2106.13213/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-10-16T04:27:48.096Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36" version="13.8.0" etag="YmOSlYrCw7bEczDln94S" type="google"><diagram id="iS-IiEgjtBLhj_bRvWw5">7V3rc6O2Fv9rPLP9kB3e4I9xdve2M+ncTNNO2093ZJBtdTHigpzE/esrgcRT2DhBxq7ZZjroiTjndx46OuCZ+bB9+08C4s3POIDhzNCCt5n5ZWYYumUYM/anBfu8Zm6becU6QQHvVFY8o78hr9R47Q4FMK11JBiHBMX1Sh9HEfRJrQ4kCX6td1vhsH7XGKxhq+LZB2G79ncUkI14Lk0rG36EaL3ht/Zs3rAFRee8It2AAL/mVVkf8+vMfEgwJvnV9u0Bhox4gi75RN86WouFJTAifQZwRryAcMefja+L7MXDJngXBZD112bm4nWDCHyOgc9aXyl7ad2GbENa0unlCoXhf2krIoy1NhuRkgR/hw84xEk2oTl3XBM4vHOlHuqBDV1az9cEEwLfOp9LL6hFYQbxFpJkT7vwAZ7m5kM4wvQ558BryS/T5HWbCqt0gTHAMbIu5i7JSC84JeVUNUegqmcsTUdCVUpTL7CGoaqu2XWyFiSskFX3JGQ1zAHIao1AVsf34HIlISuA3sofCKyjUtX+l4LVdsakqvMvxaphHKeqaaiiqtsiIgyoZeZFnJANXuMIhF/L2kWdzGWfR4xjTty/ICF77maAHcF10sM3RP5gwz/bvPQnn4xdf3mrFvaiENEnqwxixT/FfKxQDstK5bjgnjkotLgMsf89r/qGQrGYnOnC6zALpjI61D0LvEt8XuXlVQQka1jYud6cT2AICHqpT/8RLnoS2XBCuohFgF7o5Zpkj5VXpTGIaix3/r9j3tFihSNyl2ZMu6cddCd+KxvLWSz2Zz9QxJOZu/gpineE9v4GAdkl1Id06YwabWd/DOR5f35v+iD57etLotW1hXbKtX5crkGI1hG99ikXYNKW3ZXnQ9+XSfvSs61MDwwg1aZbl2rLs9u60pFItTOAUM+vR6gL4YxwBGuyeUjoSzGXC30A0k2BF1Z4AoSCIcpqDM1oib3RT+yFjFfl3tB6Y6O33POhTxhFpMSUM29YiiZW8mXxUSVcKInBvtItZh3S7vtYTe9Ja2x5mus63J9e5CsosVvQpBecxQZiJDxX0Vxg+4iR0mtoLcF7ViMlQ6tujGelxHrOZ6b+/h83VMmWmqoVNVGZebpJi2Q1/Uy37WeqMkgCdRLOD+uNPCUwQD5BOGr4IfT/G8b9xf44ApTy2mb/Sfca2T/+4JX6/N9Aew3zOAZ0TRUIzMktUeSWWOdR9HLzbzZB1dyXDuSW6Ce6JYan2C2RRc9UKLVHsIRhQ5/tb1aH6dqYOsy+Hh12SfES3WlrKBUbp958lEUTzxQx+S2lQmVoPwX0qVnwsREwqbmsdL0X6LAW50rdJ1EDCLo9ZghFf09g9EOibbxLtl2vttXUP2u6cVTCaekJJogShXE3q6zrqNP5XXNteigEIfxVhWA5ilyWUz0NxzJqyDOsIwEQYRbk/T/uaXjjWp33QdPRjEGgWYPWezXR7YDRdj21YBw5uvw+MBqudZoXNAHxw0AUcVtFQBTEGiuceMu+mTNiMNHoDiMzXtKWFfDrPP4VbPAWVNmqpSIHTtTpMq4Pg518lqWo+KT/QDs/JfTBAWITP21ACitgWbaGJqLmZxShLbuhoT0kOE3vmKbC8f44AnPadEAw25ocxhnDC/JBeM8btigIct0OKQXAMpuKqUmuR+i89mJmf2FzUXXO6a0Pgz+9qYtsSRDAlQDQGgKA3dHsKwGgwQD4DEOYh8rfi8BH48ZgZzVCilLYWapgJ0uCPIIPP1f/DBvJevlJY5EGg4UZKlcMCzkkMzStwBaF+3wMnQhs46zRNJkDtYHhC2T8aLXUJ+ErYXNEONmCsN78yknD2q18LVljCFk8/C5laVnRWjqeQeYORUHGMdauMchXG6lWjdIVHSXGR7Do8IqToD59dfgS+N/XmTW+axCOcr4gmGHNy2u7Qr4ApXEIOOlQFKLKjVchBqS6oC4h5RrkuKhbB2JN9H5tV+XTZ9aB1m4gAe3mH1quTF10+wWauUSf6sQM4ZI0sxNFfL4im7LkRHcI0bTG3RNda1hYpMrWwsLu8Pud3nyUpe5OiXRjpi14zohRYJF0PI5c61Wp7pt5JAIkIwu22xZs0xpRsN1zC/aUelTK8FwfMVowcuz8ak3zXCLBI6a4i/VMJ7aXGxXUtTGNtUg7uqI0Ye2zewGiLsT6Uoy12R3gnQL7qkVYH9FWm92B1SnCVWmcIlyzZoSLO9jdMS7e4dqjXM0M7uKQ9QxRLnPcBO73GNiLeA9H2NKafR0xyiXWM/pmmGJykXvSME5RmJ1E0QIOMFMQ12CUVyvoyPfPgTtfakPtnxtutSF78V6ZUe6OiU5GeTLKt37spLdeSznjuZPZncY+yeYkm5PD3D4/Oqt4dp86TK+9nvGVMU/v4T+pemXMvKIDiOt67dWUHVEo2FfJ05ubh1rFB/EGfu3Vbez4xRe2utKumwqv0f/Daddm90HM9NqrOh3mNFMPz6nDRPri9Wa8mswnuw8C9KGE11tOuXab+k4GQFUp11b3kdC00Zk2OvWNzlpsagIYEnDtGxhds+QG/Rw7GPGaxYcUv1LNrh9S4jeimvt8QlT6BdEhENL9VsKFIMSYENLr073KENJ93HQhCDEnhPT6vrsyhPT4Yna6ATG7JDldqoSVGeWU7r4Jj45omTMYEUCdkoSPoT5OCOIUlUT2NygMHsEe74i4jSgtYvEm+LNwn/ik4W4bPdJp0+EYYYmfbhAbPU3GCE8RJ3p8ZVtwgj4SQSD8BfoEROs+TGkTPaAbqV9F+KaAOky+vsAc8ayuug/ngSmShcFYYwhXYuwSE4K3vJBwwrTlZ8H8VxbCopJEHVmbuRCinAlXjBO6i4/S7PVcNgcEKXmFKenL4BzPbQ6fhYOymPOAHMx+/6Oim7qCJKdxCFOK0m0CCz1uqBaE0UxZmCWH+EHm6Kp4I/tO98Sb47x5q/OllvagiFXtr5xMnOnmjNk+VBPhoqE5I165Pg9niqxbBZw5LW/3CAdsvc0BWxEH9Ek2TuCM45xNa9mycM3Eqt6s8ow2qxT5AnY7mXbizKEoqOS0QRVr2m/zTxaGZbpJ3C9VLGh/Z3eSjgOsMSU2RhVr2u/ET9LBnC2JC6yKBe2P007ScYA17hnNejvba2LNAdZIoszKWNOduHQh5xCfDHZazZMLomUaz2rnDbd6NGE1PyQq8QVtRUcTInvqEkHz3kQJJL5jwHMgYTBjCRxbGKUIR+m1I66hr+7n99r9l4GQ2JFbefwXUE+HIi2Wv1qd52mWv/1tfv0H</diagram></mxfile>
|
2106.13213/main_diagram/main_diagram.pdf
ADDED
|
Binary file (62.3 kB). View file
|
|
|
2106.13213/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
In this paper, we focus on studying approaches for learning privacy-preserving representations from mobile data for mood prediction. Our processed data comes in the form of $\{(x_{t,i}, x_{k,i}, x_{a,i}, y_i)\}_{i=1}^n$ with $x_t \in \mathbb{N}^{|V_t|=2000}$ denoting the bag-of-words features, $x_k \in \mathbb{N}^{|V_k|=100}$ denoting the bag-of-timings features, and $x_a \in \mathbb{N}^{|V_a|=274}$ denoting the bag-of-apps features. $y$ denotes the label which takes on one of our $3$ mood categories: negative, neutral, and positive. In parallel, we also have data representing the corresponding (one-hot) user identity $x_{\textrm{id}}$ which will be useful when learning privacy-preserving representations that do not encode information about user identity $x_{\textrm{id}}$ and evaluating privacy performance.
|
| 4 |
+
|
| 5 |
+
We considered two unimodal baselines:
|
| 6 |
+
|
| 7 |
+
1\. Support Vector Machines ([SVMs]{.smallcaps}) project training examples to a chosen kernel space and finds the optimal hyperplane that maximally separates each class of instances. We apply an SVM classifier on input data $x_{\textrm{uni}} \in \{ x_t, x_k, x_a \}$ and use supervised learning to predict daily mood labels $y$.
|
| 8 |
+
|
| 9 |
+
2\. Multilayer Perceptrons ([MLPs]{.smallcaps}) have seen widespread success in supervised prediction tasks due to their ability in modeling complex nonlinear relationships. Because of the small size of our dataset, we choose a simple multilayer perceptron with two hidden layers. Similarly, we apply an [MLP]{.smallcaps} classifier on input data $x_{\textrm{uni}} \in \{ x_t, x_k, x_a \}$ to predict daily mood labels $y$.
|
| 10 |
+
|
| 11 |
+
We extend both [SVM]{.smallcaps} and [MLP]{.smallcaps} classifiers using early fusion [@baltruvsaitis2018multimodal] of text and app usage to model multimodal interactions. Specifically, we align the input through concatenating the bag-of-words, bag-of-keystrokes, and bag-of-apps features for each day resulting in an input vector $x_{\textrm{multi}} = x_t \oplus x_k \oplus x_a$, before using an [SVM]{.smallcaps}/[MLP]{.smallcaps} classifier for prediction.
|
| 12 |
+
|
| 13 |
+
While classifiers trained with traditional supervised learning can learn useful representations for mood prediction, they carry the risk of *memorizing* the identity of the user along with their sensitive mobile usage and baseline mood scores, and possibly *revealing* these identities to adversarial third-parties [@abadi2016deep]. Therefore, it is crucial to perform mood prediction while also protecting the privacy of personal identities.
|
| 14 |
+
|
| 15 |
+
We adapt the Selective-Additive Learning (SAL) framework [@wang2017select] for the purpose of privacy-preserving learning. While SAL was originally developed with a very different goal in mind: improving model generalization, we expand SAL to a very important problem in healthcare: preserving privacy. We adapted SAL to learn *disentangled* representations separated into *identity-dependent* private information and *identity-independent* population-level information using three phases:
|
| 16 |
+
|
| 17 |
+
\(1\) *Pretrain phase:* The input is a set of (multimodal) features $x$ that are likely to contain both identity-dependent and independent information. The intermediate representation $z_\textrm{feat} = f_\textrm{feat}(x; \theta_\textrm{feat}^*)$ is obtained from an [MLP]{.smallcaps} classifier pretrained for mood prediction. $f_\textrm{feat}$ denotes the classifier with pretrained parameters $\theta_\textrm{feat}^*$.
|
| 18 |
+
|
| 19 |
+
\(2\) *Selection phase:* Our goal is to now disentangle the identity-dependent and independent information within $z_\textrm{feat}$. We hypothesize that dependent and independent information are encoded in separate subspaces of the feature vector $z_\textrm{feat}$. This allows us to disentangle them by training a separate classifier to predict $z_\textrm{feat}$ *as much as possible* given only the user identity: $$\begin{equation}
|
| 20 |
+
\label{eq:SAL}
|
| 21 |
+
\theta_\textrm{id}^* = \mathop{\mathrm{arg\,min}}_{\theta_\textrm{id}} \left( z_\textrm{feat} - f_\textrm{id} (x_\textrm{id}; \theta_\textrm{id}) \right)^2 + \lambda ||z_{\textrm{id}}||_1,
|
| 22 |
+
\end{equation}$$ where $x_\textrm{id}$ denotes a one hot encoding of user identity as input, $f_\textrm{id}$ denotes the identity encoder with parameters $\theta_\textrm{id}$, and $\lambda$ denotes a hyperparameter that controls the weight of the $\ell_1$ regularizer. $f_\textrm{id}$ projects the user identity encodings to the feature space learned by $f_\textrm{feat}$. By minimizing the objective in equation [\[eq:SAL\]](#eq:SAL){reference-type="eqref" reference="eq:SAL"} for each $(x, x_\textrm{id})$ pair, $f_\textrm{id}$ learns to encode user identity into a sparse vector $z_\textrm{id} = f_\textrm{id} (x_\textrm{id}; \theta_\textrm{id}^*)$ representing identity-dependent features: the nonzero values of $z_\textrm{id}$ represent dimensions of the identity-dependent subspace in $z_\textrm{feat}$, while the remaining dimensions belong to the identity-independent subspace.
|
| 23 |
+
|
| 24 |
+
\(3\) *Addition phase:* Given two factors $z_\textrm{feat}$ and $z_\textrm{id}$, to ensure that our prediction model does not capture identity-related information $z_\textrm{id}$, we add multiplicative Gaussian noise to remove information from the identity-related subspace $z_\textrm{id}$ while repeatedly optimizing for mood prediction with a final [MLP]{.smallcaps} classification layer $g(z_\textrm{feat}, z_\textrm{id} ; \delta)$. This resulting model should only retain identity-independent features for mood prediction: $$\begin{equation}
|
| 25 |
+
\hat{y} = g \left( z_\textrm{feat} + \epsilon \odot z_\textrm{id} \right)
|
| 26 |
+
\end{equation}$$ where $\epsilon \sim N(0, \sigma^2)$ is repeatedly sampled across batches and training epochs. We call this approach [Noisy Identity MLP]{.smallcaps}, or [NI-MLP]{.smallcaps} for short, and summarize the final algorithm in Figure [\[fig:sal\]](#fig:sal){reference-type="ref" reference="fig:sal"}.
|
| 27 |
+
|
| 28 |
+
**Controlling the tradeoff between performance and privacy:** There is often a tradeoff between privacy and prediction performance. To control this tradeoff, we vary the parameter $\sigma$, which is the variance of noise added to the identity-dependent subspace across batches and training epochs. $\sigma=0$ recovers a standard [MLP]{.smallcaps} with good performance but reveals user identities, while large $\sigma$ effectively protects user identities but at the possible expense of mood prediction performance. In practice, the optimal tradeoff between privacy and performance varies depending on the problem. For our purposes, we automatically perform model selection using this performance-privacy ratio $R$ computed on the validation set, where $$\begin{equation}
|
| 29 |
+
R = \frac{s_{\textsc{MLP}} - s_{\textsc{NI-MLP}}}{t_{\textsc{MLP}} - t_{\textsc{NI-MLP}}}
|
| 30 |
+
\end{equation}$$ is defined as the improvement in privacy per unit of performance lost. Here, $s$ is defined as the accuracy in user prediction and $t$ is defined as the F1 score on mood prediction.
|
2107.09584/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2107.09584/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
3D shape understanding is an active area of research, whose goal is to build 3D models of objects and environments from limited sensory data. It is commonly tackled by leveraging partial observations such as a single view RGB image [63, 71, 23, 46], multiple view RGB images [16, 25, 30, 32], depth maps [65, 78] or tactile readings [7, 51, 62, 47, 40]. Most of this research focuses on building shape reconstruction models from a *fixed set* of partial observations. However, this constraint is relaxed in the *active sensing* scenario, where additional observation can be acquired to improve the quality of the 3D reconstructions. In *active vision* [4], for instance, the objective can be to iteratively select camera perspectives from an object that result in the highest improvement in quality of the reconstruction [80] and only very recently the research community has started to leverage large scale datasets to learn exploration strategies that generalize to unseen objects [81, 5, 43, 53, 29, 82, 52, 6].
|
| 4 |
+
|
| 5 |
+
Human haptic exploration of objects both with and without the presence of vision has classically been analysed from a psychological perspective, where it was discovered that the developed tactile exploration strategies for object understanding were demonstrated to not only be ubiquitous, but also highly tailored to specific tasks [39, 35]. In spite of this, deep learning-based data-driven approaches to *active touch* for shape understanding are practically non-existent. Previous haptic exploration works consider objects
|
| 6 |
+
|
| 7 |
+
<sup>∗</sup>Correspondence to: ejsmith@fb.com and edward.smith@mail.mcgill.ca
|
| 8 |
+
|
| 9 |
+
<sup>†</sup>Equal Contribution
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Figure 1: An overview of our active touch exploration framework. Given a 3D model, the simulator extracts touch and vision signals that are fed to the reconstruction model. The reconstruction model predicts a 3D shape that is used as an input to a policy model that decides where to touch next. The policies are trained to select grasps which minimize the Chamfer Distance.
|
| 14 |
+
|
| 15 |
+
independently, and build uncertainty estimates over point clouds, produced by densely touching the objects surface with point-based touch sensors [7, 77, 28, 18, 48]. These methods do not make use of learned object priors, and a large number of touches sampled on an object's surface (over 100) is necessary to produce not only a prediction of the surface but also to drive exploration. However, accurate estimates of object shape have also been successfully produced with orders of magnitude fewer touch signals, by making use of high resolution tactile sensors such as [79], large datasets of *static* 3D shape data, and deep learning – see *e.g.* [58, 72]. Note that no prior work exists to learn touch exploration by leveraging large scale datasets. Moreover, no prior work explores active touch in the presence of visual inputs either (*e.g.* an RGB camera).
|
| 16 |
+
|
| 17 |
+
Combining the recent emergence of both data-driven reconstruction models from vision and touch systems [58, 72], and data-driven active vision approaches, we present a novel formulation for active touch exploration. Our formulation is designed to easily enable the use of vision signals to guide the touch exploration. First, we define a new problem setting over active touch for 3D shape reconstruction where touch exploration strategies can be learned over shape predictions from a learned reconstruction model with strong object priors. Second, we develop a simulator which allows for fast and realistic grasping of objects, and for extracting both vision and touch signals using a robotic hand augmented with high-resolution tactile sensors [79]. Third, we present a 3D reconstruction model from vision and touch which produces mesh-based predictions and achieves impressive performance in the single view image setting, both with and without the presence of touch signals. Fourth, we combine the simulator and reconstruction model to produce a tactile active sensing environment for training and evaluating touch exploration policies. The outline for this environment can be viewed in Figure 1.
|
| 18 |
+
|
| 19 |
+
Over the provided environment, we present a series of datadriven touch exploration models that take as an input a mesh-
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+
(a) Touch only exploration.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
(b) Touch exploration with visual prior.
|
| 28 |
+
|
| 29 |
+
Figure 2: Target objects (top rows) and predicted 3D shapes (bottom rows) after 5 grasps have been selected.
|
| 30 |
+
|
| 31 |
+
based shape reconstruction and decide the position of the next touch. By leveraging a large scale dataset of over 25k CAD models from the ABC dataset [36] together with our environment, the data-driven touch exploration models outperform baseline policies which fail to leverage learned patterns between object shape or the distribution of object shapes and optimal actions. We demonstrate our proposed data-driven solutions perform up to 18 % better than random baselines and lead to impressive object reconstructions relative to their input modality, such as those demonstrated in Figure 2. Our framework, training and evaluation setup, and trained models are publicly available on a GitHub repository to ensure and encourage reproducible experimental comparison 3 .
|
| 32 |
+
|
| 33 |
+
<sup>3</sup> https://github.com/facebookresearch/Active-3D-Vision-and-Touch
|
2108.01887/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-25T23:25:04.231Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36" version="14.6.6" etag="bU9M8ZH0ds6HWkhhZiOk" type="google"><diagram id="1esp_VP6LG9pBasqhdZl">7VxRk6I4EP41PO4UJEDCo4qz+7BbtVXzsHePOc0qN0jcEEfdX39BggjC6Gkgo+tM1Qx0Qif09yXdabu04Gix+czJcv6NTWlsAXu6sWBoAeA4GMt/mWSbS3xo54IZj6aqUyl4iX5TJSy6raIpTSsdBWOxiJZV4YQlCZ2IioxwztbVbj9ZXB11SWZqRLsUvExITI+6/YimYp5LsXfQ+wuNZvNiZMdWLQtSdFaCdE6mbH0ggmMLjjhjIr9abEY0zoxX2CV/7rmldT8xThNxzgMgf+CNxCv1bmpeYlu8LGerZEqz/o4Fh+t5JOjLkkyy1rWEV8rmYhGr5lRw9kpHLGZcShKWyG7Dn1EcFyILwHGY/Uq5GptyQTet83f2VpF0omxBBd/KLuoBr7CsYhIM1P26xMUt2DU/wKTAiigqzPaqS2vJC2WwZuPBGzeeA04bD/odGc9tMJ4fi+yNmZz+oRX9XytWNHxKd7vBQHZw/OWmbJRXs+z/t22hR84gV5U3vIONrQeb58HI9saasIFVbFz7GJsmaKAGaLyOoLHGrjUcWoG3uxhYeNgJVDVUBmg0dFEbhBqgArY5qPzTWxCdSt+lbhkXczZjCYnHpXRYtW7Z5ytjS2XTf6kQW+WIyUqwqsXzMbOBKiZM2YpPlAgpN034jKr3x2cbmtOYiOitqv0as6GrGe66TQwfJxMZ7PD/T2tN3kFuQSj0u/EO4FzvEGjgNW4FKF2S5HKAQloHKNfXAtAJSKr4nQOQF4zCwO4m9mkCqDH20QFQ0JGPoKkw5r/3nkKHU0DmnEJxTGoAZ8lpaU8tPh34ZJHZeqdrQdLXvaQyyhGYe3FlSrcUokHXIMROK8TT6O1iiBv3TCvE1iC0wqGFR9n10NlJsgd2F8gKAwv7VjiwBs8H3cBp7CtzvaXl7ToGsTcS40nD8O1f2fNPjlfc/33YGG6U9vxue3j3nfJIvijlSnhGxOhA/fGhevQ7i3YLQoHp18CEKHgKDn+qCvMYVumowbaf1HlINmUQHofgMsLBBleZa3iVQdDZMuthSWEdS6pJ7ZMNXRcD5GEf2KiZHoXafDvRslKbciIf5qDdPZ7Ie8/wmtB9f5Au0fUNr3Uf9eNSUV98wfXUcm8u9frUjuM1uVTyT8rilaDxfbhWhL2TrhV15Vqx4eWGb9iz4qATzyrVVvbeFnZ0sPd2lUlaM/56F0vV98xFwaA9lXQdOlF6F9ggYBCb4xwQWZDfUTLr0YYZdGrLdTR98IB9gzb9M3IroLfciucaCgQbqzOuT4Kj00nw9I/JgHsGs6CNBSStxvuA1TdHH9/BY+N1VrrUWY2H2ajr2UMIdJN7BG6P3G4q69ByiC3igzsACAUGAdKQZfhwYbFGbOpHll6xOa4dOczd3JIZ66eLXs3Yfi7vuIggK8r52CGURojrh50+IS503NXHjzpDAMcgNs6Rre7lINr/oRP4WnKkUu2TDbCHHHf3t/r51FFZnb4cKbzfrET/1R2ayCDVGiIDfJBBW12CLjJgU2QwXaVyy2RA9VOKHjLsyhiMkMF7kEFbxYImMki1hshguqblpslQzy1pIkNQJYPXGxnQgwwXk8GpHdE1VVxItRUyuL2Vu0HT9Te3zAYAO2GDVGuKDcGDDRezAda/f0APG6RaQ2woMicPNlzCBtQNG5AxNjyykJezwdVS+tKkth82yNvyi2jy7uXX+cDxfw==</diagram></mxfile>
|
2108.01887/main_diagram/main_diagram.pdf
ADDED
|
Binary file (22.7 kB). View file
|
|
|
2108.01887/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
As illustrated in Figure [3](#fig:new_techniques){reference-type="ref" reference="fig:new_techniques"}, we propose two methods for introducing parallel information at both the word-level and the sentence-level: dictionary denoising and bitext denoising.
|
| 4 |
+
|
| 5 |
+
Our first method encourages learning similar representations at the word-level by introducing anchor words through multilingual dictionaries [@conneau2020emerging]. Let $D_l(w)$ denote the translation of word $w$ into language $l \in L$ according to the dictionary $D$. Given the source sentence $x = \left( x_1, x_2, \dots, x_n \right)$, we define its noised version $g_\psi \left( x \right) = \left( \tilde{x}_1, \tilde{x}_2, \dots, \tilde{x}_n \right)$, where $\tilde{x}_i = D_l(x_i)$ with probability $\frac{p_r}{|L|}$ and $\tilde{x}_i = x_i$ otherwise (i.e. we replace each word with its translation into a random language with probability $p_r$). We set $p_r=0.4$. Given the dictionary-noised sentence, we train our model using the denoising auto-encoding objective in Eq. [\[eq:bart\]](#eq:bart){reference-type="ref" reference="eq:bart"}: $$\begin{equation}
|
| 6 |
+
\label{eq:dict}
|
| 7 |
+
\ell_{\text{dict}}(x) = -\log P\big(x | g_\phi(g_\psi(x))\big)
|
| 8 |
+
\end{equation}$$
|
| 9 |
+
|
| 10 |
+
Our second approach encourages learning from both monolingual and parallel data sources, by including translation data in the pretraining process. Given a source-target bitext pair $(x,y)$ in the parallel corpus, assumed to be semantically equivalent, we model the following: $$\begin{equation}
|
| 11 |
+
\label{eq:bitext}
|
| 12 |
+
\ell_{\text{bitext}}(x,y) = - \log P\big(y | g_\phi(x)\big)
|
| 13 |
+
\end{equation}$$ in which we optimize the likelihood of generating the target sentence $y$ conditioned on the noised version of the source sentence, $g_\phi(x)$.[^2]
|
| 14 |
+
|
| 15 |
+
Our final objective combines $\ell_{\text{mono}}$, $\ell_{\text{dict}}$ and $\ell_{\text{bitext}}$.[^3] Given that our corpus contains languages with varying data sizes, we sample sentences using the exponential sampling technique from @conneau2019cross. We use $\alpha_\text{mono}=0.5$ to sample from the monolingual corpus, and $\alpha_\text{bitext} = 0.3$ to sample from the parallel corpus. To prevent over-exposure to English on the decoder side when sampling from the parallel corpus, we halve the probability of to-English directions and renormalize the probabilities. In addition, given that we have fewer amounts of parallel data (used for $\ell_{\text{bitext}}$) than monolingual data (used for $\ell_{\text{mono}}$ and $\ell_{\text{dict}}$), we sample between each task using $\alpha_\text{task} = 0.3$.
|
| 16 |
+
|
| 17 |
+
We pretrain our models on 20 languages (English, French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, Hindi, Swahili, Urdu, Japanese, Basque, Romanian, Sinhala and Nepalese), and evaluate them on machine translation and cross-lingual classification.
|
| 18 |
+
|
| 19 |
+
We use Wikipedia as our monolingual corpus, and complement it with OSCAR [@ortiz-suarez-etal-2020-monolingual], and CC100 [@conneau-etal-2020-unsupervised] for low-resource languages. For a fair comparison with monolingually pretrained baselines, we use the same parallel data as in our downstream machine translation experiments (detailed in §[3.2](#subsec:downstream){reference-type="ref" reference="subsec:downstream"}). In addition, we train a separate variant (detailed below) using additional parallel data from ParaCrawl [@espla-etal-2019-paracrawl], UNPC [@ziemski-etal-2016-united], CCAligned [@elkishky2020ccaligned], and OpenSubtitles [@lison-tiedemann-2016-opensubtitles2016].[^4] We tokenize all data using SentencePiece [@kudo-richardson-2018-sentencepiece] with a joint vocabulary of 125K subwords. We use bilingual dictionaries from FLoRes[^5] [@guzmanFLoResEvaluationDatasets2019] for Nepalese and Sinhala, and MUSE[^6] [@conneau2017word] for the rest of languages. Refer to Appendix [\[app:data\]](#app:data){reference-type="ref" reference="app:data"} for more details.
|
| 20 |
+
|
| 21 |
+
We use the same architecture as BART-base [@lewis2020bart], totaling $\sim$`<!-- -->`{=html}196M parameters, and train for 100k steps with a batch size of $\sim$`<!-- -->`{=html}520K tokens. This takes around a day on 32 NVIDIA V100 16GB GPUs. As discussed before, we train two variants of our full model: **[paradise]{.smallcaps}**, which uses the same parallel data as the machine translation experiments, and **[paradise]{.smallcaps}++**, which uses additional parallel data. To better understand the contribution of each objective, we train two additional models without dictionary denoising, which we name **[paradise]{.smallcaps} (w/o dict.)** and **[paradise]{.smallcaps}++ (w/o dict.)**. Finally, we train a baseline system using the monolingual objective alone, which we refer to as **mBART (ours)**. This follows the original mBART work [@liu2020mbart], but is directly comparable to the rest of our models in terms of data and hyperparameters.
|
| 22 |
+
|
| 23 |
+
Following @liu2020mbart, we evaluate our models on sentence-level machine translation from and to English using the following datasets: IWSLT [@Cettolo2015TheI2; @Cettolo2017OverviewOT] for Vietnamese, Japanese and Arabic, WMT [@bojar-etal-2013-findings; @bojar-etal-2014-findings; @bojar-etal-2016-findings; @bojar-etal-2017-findings] for Spanish, French, Romanian and Turkish, FLoRes [@guzmanFLoResEvaluationDatasets2019] for Sinhala and Nepalese, and IITB [@kunchukuttan-etal-2018-iit] for Hindi. We report performance in BLEU [@papineni-etal-2002-bleu] as detailed in Appendix [\[app:bleu\]](#app:bleu){reference-type="ref" reference="app:bleu"}. We finetune our models using the same setup as mBART, warming up the learning rate to $3 \times 10^{-5}$ over 2500 iterations and then decaying with a polynomial schedule. We use $0.3$ dropout and label smoothing $\epsilon=0.2$.
|
| 24 |
+
|
| 25 |
+
We evaluate our models on zero-shot cross-lingual transfer, where we finetune on English data and test performance on other languages. To that end, we use the XNLI natural language inference dataset [@conneau-etal-2018-xnli] and the PAWS-X adversarial paraphrase identification dataset [@pawsx2019emnlp]. Following @huExplicitAlignmentObjectives2021, we use all the 15 languages in XNLI, and English, German, Spanish, French and Chinese for PAWS-X. We develop a new approach for applying sequence-to-sequence models for classification: feeding the sequence into both the encoder and decoder, and taking the concatenation of the encoder's `<s>` representation and the decoder's `</s>` representation as the input of the classification head. We provide an empirical rationale for this in Table [\[tb:ft_techniques\]](#tb:ft_techniques){reference-type="ref" reference="tb:ft_techniques"}. We finetune all models with a batch size of 64 and a learning rate of $2\times10^{-5}$ for a maximum of 100k iterations, performing early stopping on the validation set.
|
2108.02708/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-11-15T11:10:06.787Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" version="15.4.3" etag="ZDMnmBF1K9sOd2mIP4vi" type="google"><diagram id="90IXHttpAA5mF9CGfyz_">7LzHluw4ki36NTXNRS2GlE466RROzRm11ppf/4g4p6oyW9/Vvfq+wY3MjHDCQQEzw7a9DWD+DeW68zVHY/kZ0qz9GwKl599Q/m8IQlPw8xs0XL8aYJwifrUUc5X+aoP+2WBVd/a7499btyrNlt9tv5rWYWjXavxrYzL0fZasf2mL5nk4/totH9r0Lw1jVGR/eQzQYCVRm/2rbl6VruWvVgr/U28pq4ry73eGod/fdNHfO/++xFJG6XD86V6o8DeUm4dh/fWpO7msBcb7q13Ef+fbfzzYnPXrf+UE5NcJe9Ruv8f2+7nW6++DLeZhG393y+Y1O/8tE0dx+y8t9s9HgP8xsCcisqHL1vl6uvy+EEH+gaC/TvodDzj6+ymOfxqXxOk/MIL+xw/1q0f5JzM/cfQH8ftK0W8PF/+43T+N8Hz4bYd/2ybof26TxyR9moH+0N9Q9iirNbPGKAHfHk/IP23l2j3X5+Hn47LOQ5NxQzvMP2ejxM/P801ete2f2nMc/PO0F220LL8vng/9+qc+v36e9nlYo7Ua+qeZ/ns/MeqqFhjRzeY06qPfzdbvB4f+8TB/j1roD3C7f9e5f3Yi8h86EYeoP3DsL16EUewPgvhXjqT+ILF/7TsK+oMm//uuw/6HXfcvHJTiGZVi/5ZLKSRGf7n0T+5Kszza2vX/h84in7lEkQhEYTABEfBf/Eajf6AEjBAoBkE0TVL/yoMw/AeC0RBJYAhGIRQK4f+GP+E/UJokSAylIAr9R5f/jm/xf+XKLH1Q+ffhMK/lUAx91Ar/bGWTbd5/fA3/OOHPnv/nCeowjL+71Nm6Xr/zTbStw1+jITur1f/T5+DHKST++5A/f1/65+D6+0H/jNP/80HwD1/+HP7ztJ+jv5/3H4XIrwek/8MIAcb5j+PjseWwzcnvXvTvPBrNRfa7F/3vAPectU8073+9/L/l2p9TmXmOrj91GIeqX5c/XdkADX+KTvKv8YiRf81g/0l3mEb/RVT9eoB/xtg/RvJfCjvi/4Xd/3bYQf9Xwo6C/o/C7l90f7Ld/2jYkf8v7P6Xw+6XBvlvhN1/NZFR/8+1/9uuJf/vIAr+f4Yof+1O/s/mMfo/p8aPGB3Bx6r70b8sIJnVo3rVKM5aY1iq3xQ2HtZ16J4OLfiCjZKm+AnJP0uZn58/XYNpqwKcu4IQZaNl/KXL8+oEgcz+3JL5eyv095bncxqtT7gwvw4RceyLvyFc5bL694CUVzEwz49mOaXgFM8nQ3h+8SfHBKA9umDh/fwtGKEVTPeLMRmSliTVfsMefURkwJqtInCmXI0Fr1SD/G6+DZ9tZNm7fu6i/qZvf0PYudoUdai4yuLfzEuJttDYbp7BPzbf8MLHK8tL6izhQ/sPhLBpRu7olntzNm3Gngs66jyt/fPsAwWnfuqmsHs/R6mTK/Pz9x7d/ApeFgPpKYNSKCzQuTSTjflpWPdiNqtlr+BT3oPAUDjjBhI4qTHlF0YI/LuwJKMw1bNhn2mSXxZz+NVQfFoWCyj2mAUTJhg8F1W+tOTqIByJrS+JK0webdh2SXbdZQq1Hs2k4+WEYBlDN0WPfcYh4gLHbnrJXIFQ3pjAUSHjB1JPNcVHxCaZf5vZy2AS5RS0rND90zowp5IXvWWPgCqP53FpnAljSb1bU+aw59bv4pL059b3M7Ai2x+rfJkirgfz07Fy8hyyzP4yRYR9JoL4lnk2k8OZ+aDGLkG8RJBUNyuFqZxJJ78uJkONzNgQUtV6v4/zFFvz5xLc+cpZD//6UzKxjP9OeRMEJAs1WZboebokXaqZ4V74hcxNCF/DRUF2bEfXyrSaFE2WcQ4mHcKW2Gytx2wdcGLR6ijRoxjU5DOlxDxUPJrO542MniPNe6aWyCHhtZWrydfxmVbQ2kH4t0XP5zrRiiR0oG5nDnzwNOA5ulrNc86CIiSIL25j6RtPkfPqybr57llogYl/kWOGouMiJrAuW+bWG9iHSzzoyyemyGYKq8jRuyRrCrKS1K7YN9NLQ2HMMUzfG+pn+4YYiuE75gMkIgq5I+U9H7yuDRDvOf2dsGvryzp+Eynr5Lsa4vS9k/MBRjMqBVe8Nr0m13amcEmCu+dBr0+KfqJ+ba1utJ+g42OUXdP8uYaNJBdNIy9pwjTfFF22u2fUp/OMXDXTG4bZJQOeio14BeOGZnReJptsSZnw4yeSv4j7tx+3P9Yi11nSQK+Iev6wO6pXzx8QHlCKbg73RBMs7sSZ2hk2wGlGYHiYh/BejDR2wGht+6+YrhoUdbUQmDvlbmkf0dwEnx0Ulzqf5EfxU8SleuY3xz2YzmJxQu8pDO7y/LuajvNSH6ARM7vLzoN3p5ad6ILWtiOHV7RIlJ+OIEzk8aQJsnUhEL+IhtdPszOTtQzZBRKM1fs5jtF+QlE4hRhzFnxXWSzVFLGm6mw+HaAolFDjJqUR42ZkGDkIQMXzH20TLxrxn2u5OowY/HNPsQbhos1zn+++FLnpGT6gyIYrMxsVsJkueP0dwGlRnCTmkZB8Fg+9AFcs1DNVneHnoUWMS3w8+0Z0EzBj65P12FgW5M6tbTJT1Rp3QWxbkpoKbM0BvzggggxQzGHRVHBwvPsaJDmS24nGzHCu9o5velSeKBRhX1rWDE0dPr/mswCJ3Ev4gFkTFKM6o6ixZUNKwoms+ZuyM2N5k877k+MzCRxkPFZnPWA0A1iXAkgQKsN7ZaTPhjL962kMykgGXxjKjLGcHrmQT8IXhhY3DVMIqsvYubWUtUXMRTCOoOKjFW3GC9OCIN8/G22vK8G3qlZqsh5G3RaD2VZNMmnvww7AGgOmJM8UooGTteK+IfR9wuDRHgy4xVh7kEX0PcYun+/7++ehDSX6+rrZXa7X469BeJoIysNRZ2U+sN/MNQzfe74f3baDMIMjOGoEVUAxNkLWqoz8xsOfaM4+gjouHoNwjejI8Tg26hLAcb4rn13RycG/oMzN+/jMqy+O4/CU+k503GRrsbqoNPOLVWZPua42kZavGV0B49b7+kw79i08cSPuGmcoslMQ4vSMXwkRvCeYt4ngGmI1J4raRVhRmA8b+9ZvVkZvgBeLCnraFQFwOTA+naXAyvK5o8HUr2QDoDKqJjEPkrbXF/CMzbWxgBXrxiTL9jlSTZ1Z2QWIf10BHBze1mYwZxvn7qFzmwnIlhlBbj1nbu4TomwHdZD66SmXdGeLYCQXIIFHZ3m482BWXt4OubFuN6aDBLM03gO6llc3S8K3xZcScwILh5/2LThOt0HJmdD6XLmfJBHiToUvwJGd0hS9wgijmH/2JyY0OwzeMWfOcAttfiapMw6GIs0bdLuw9uRQHL6r5CFabLam8WpIW7oD22zS4q3r+9V4Zn4m3tLveJ7xcCMt+TumVt1Ha3NEIV32zPbVWTA9aDPlbI4D5U74Im3edM5cTWOptd/hc/W8V+M+vJwCm6eLAQ0bpNU1Nt7VgOYT+zatq2CMu6XgUadRZmMt28R2MtuUwFQQqInix2uZp+M1uceG0U5v3/8CSFX37kkUL5TCjW7XVnKGa5Qqmcyg9pE/1wtSwxWpLG/NyHQBuS1H9vVAh0v8hYtz3++hrxf7PuEotwqtj++o2p69pA5fvqIJgjq31/JZcsoUZWkxHcVAGQ6PyVtFPbJWznvD4ASbPFHbTV5L08s+y/yuASa9+OMXovpSQbz8jXS2ndoV8xuhB6IuY8wPZoOlnyB8gZBruOUMI1HLfGPMOnZOaEz6tp1AHSFLCkvqKmJ3TDtTp3C9imk3o54VdPOb4WDmfplkPV1x+15fmBzrR4iVvgFSLgDTBGsh9xLPG9LLVurJrPTWejY5Wic9JM+STpBoiMwd/UvT6QOSMG41Nsga1oUws6qZ/olvWUbjz5VyH9FftzKCRBG1BhMyIKSGscko2rxpLydF4gxqRFD5/njy2oIx97r5fuYEIjOygxbz1retl6J/YKx78joEZah8ew7fWcV27kGKJSavfi7BPXs/+d71Xsgna/OVvnkTraPoJMn124SPpUblYu28+rOmkF/UQRrQO70/2WtDVtu0eWrOpYdxHTQ5PnNvCz2fJqMHyN+7fq09fwJ2JV0eMq3l8iZJXlBG1/242ys4RsFyofZJOTAxS72yRl30enOD6mfZey48d8dkUnpt+9ELWL7O2bDCmG6nJ/1MSAWZKD39TLYnIu/Af+I7yTSqJofnFKRs1CSkSYdZ8gyY2AcQ8hqrsEsfmJONB9VzQ5IZTO1eH8/8lBeAKEujIYyTMLc2nW8KKHbgYy3PFk5Nwzza+braJJs00b63+/6F51TE36FqUG+UCyAJzPrW9/V+yTU7PrHsyIOt3LlZUp4Jmiy23M8fPFLZtEpZARVas2IzVfbPinReDH7cuP3xan4RLzm2QGTni6FhdMjQkmY/FzSh6Mmp3hwlwoNSGN+NTFb4n9wRQV7QOOIFn+T+8bcU6VH9HSOwoyXiWCeVmIWL+ok/TCs2GIfwKoDF0PCC3TWtU51bficlUajjJdLeDPxJoJCUuLN/C26dII3S03bFU1BYIoJQv2UZ8RqQThduKppEKXvJtl5dFs7To1bYmIEVer5Yw6priARMKiRUel1iNCWqMwAMKCe3j7sOVz0Jwoc9F/v99aTSuGuJjvbvSju6oUMRyD02ApHuQ1oMUpNSP37DXUpy09kkhcR279kTuNdHYNwLrWLPBjZTF3OKP40ZLOIyRoR7yx7mP+0srK9QcCoNhAxu8FzZRGWc/xDm+/Mwe913115IuQS1346EWuhWZW3oupWwENiLNuGKFBIWcHgkD8pnCrWFSKWtGmN4xEdOEc6mM0IPKzBJwZk5GidIdLV5zQKjL0WLeC+TIai3rKqwqhirwXvxl/OLcsLdV8rqb/5sIvw5DUVnmHCSRWhuWvefu3n99GTNnpFL+OvAmKlgeGKR3iaU23tXElMjxW4KtODl7a1GEMQdC7gkaszNLxVKvptgw05WS7IBthDdRfrwfUofCxHI5C4fkO6HmvYAFaQQaBtu6koHCm8BW1zVBmBOetb1m+O6N7nnQJXgEvWFBGoxZ5EQcv3m8d7L2oU+3oBh6k9U3aK8Hm3rF+jAUUlF84gbsmkTbV9Evjwr8vebXGCGrcxFVfzoYzaHj42NjPl2rpjhaY+nJrPxx3q7OM9XImRd+7J9J2aWqu9ntHhzw/wdJ5haiHt9acMYrooAxZNLeh7hUMZ5z7zPoZmAgi2LEGdSFV5fp210wh/VO/0yt5ha7/Ob4NZ81T6lVwI8IC1DtwMqa9f9fSjAm6ZweZYCrB2OaQbhDnOqwrU3CXFblJjE5a2hYJqB9OrRJ0GV3qAE2zM/zVK5s9e0vD7meUwJAbflySnqcwhRPsezmqeGBHzkNz2nbpZPof+ixUhJLdVQeKMaPFfUvRL2ouMRcCHcKp3e5CeOIIaOXfUt6AOXZPb+vb2ztPNHXT2BqMblhb6Gke1VWVmGR0i9ZFeVZ65g/Xsd7C96Io6enmkIJFHAchdD2ie5+Ps56/72jTXPtgfU91mkt1H/vNNBULb9fjElHr0vZ29K0cEyROIp+Rm6DTh07A1baR1+FTnscoQxrZrLPonuKa6Rk9+DiaC9TiSqWeVr/ukXiLMKE7m/SD+jMvAUBUV01yTSrn2MIQTsLMOfHL7pR2ldX1vQO6p9EjpEq8y63lNIALlcdqs+1qwkHK/hfgGBTgYPTz4G3/uQjQldT0BasziICAoRlwpodhxmuOcrnfH6jCMz8+2kZq9Gbvoxsi7K5G+Ejqmp4lBHk93GMI0j/fK7sSoXrLXfh6/M0Z7qx+rNqaRkWZSd3Xh57eo9859mUF3qUuXo7O5HANjP3FQXFf2ckrAEw4iPrR742fxQI5zJGk/ak6nxR4LEsdnoyBM/cSmiwxSfYNhA6Zl3NhKAM9Kt14xmWA2KCu5YNuhNxCfAH2C63G6vdOfzRZkEO5mEH31RSMxLVvKTK3qX52CP6D1nq5onajISM0AW7uWehISN6Yet/nBqULnrSN7A+zXIj6PyurqL5StEWs5O6O0XscwKUH5WzwcHjuE6EDy4GPcfwF/Z3GU+ZWGGVzaV4kMSisk9RiqDYDJvIughvS3qYgnx5GZXOqfRN6d9HrGV50TFmh8qx/hnwXW03KUUulqew4LyFr8op6gsvpf3GpGxoHzQtX5V4KttGECNqCFH5VZEOvo2OghJOUmm5pWLYnVoYsouBYQT7aL3prV38gq427kdiMKirZgkC+O9A5LWVoYXC1niR95LeEPPm53acoal5xJSXicSLldPPRX33WMWUdcg0XlfkNRHY8fEdhp8OijsphAaWf5TlU8yEWGYkyx2ExCZyHWBLVqXTf1N94aGHCCNLQTLuan1dMlWe/Nu3E6ETXpl3WmWoKbTp5t1mj1v0sCRY5G34gPZBEL1ZEPmx63qKF/F5usqz2JSyCBRzvcT6QCbXcBRMjRRR7p3uMirV3hHu2FVAKKD58ottPeR/nUcEYujWe+HsJSuRTzbCHcz+yNqX/0viv5udcW22E60RDosjml6e6xXO3cG12qyUB/B9xPg/QyG0Ll0lJg4lY57Yffh9vSpLDa+OjxJ4gKg2A1yG48XwZ4eFgW5BOpMPcc7HY/wFkOoJT03fgrV/hHb2HMkPQ+SVH1h3nVIc4qQFQfKkr35uXXAH4qBkqNYdCrftT417SN73BYYb99CzZCW+yIvGxnUtwbqvA8JY8+WXLzbnSf/JlEZgITaoGu+pd64QfvVcg55r7e3ewcs6s+3zaj6jigK6Zy+py+xBrnJZTGmdBuraXHKUI+sJEABKguwkd6KEM8fdonnuXiTt4HQ5Zkec6rBg2PlELd4RQdg1tOyRtZCvffP/aqj2kSpT1BswHFgDuM4JIFwt4EwVGwvRSGWu3kqeQ9h0OV8qd6qX5fYtBDJnOIEDcs/tb1a7O/J4tpTXYDyoKkeDchrdIm1tW73duuI/v4UxCx2aoguBp9nNlUd44DW7yZF9pTQr0wZo/ABoEq/z25WPH2rWBt+ridUU690fa5R2d6VM89poSnfYgtLWWUibH0+KMshQfOCJCC7Vfym6PhlMNSDePd9wsPouZ/r01JmQ7UaW7UV6+cHYTRZMz8MWuRmTgN6bUiqD/JkK2XCKehoDDkOTyWHnNSIo/dOJMY0W3H53un4ybO9qyPdUAoaxVKg0t29h0f7PgNzk9cj4HxDqGipfsFBj1DluiLkSy2QmemFXO4egeI8euu+60xyeR/hOx3+LqyJG6r00N/y9NiSuQNZv6JZlHLLJpQHTdQK/zSnf5JxHuFbqYndGCdKN4HizNxcJdoPCihutfeS0LZopOg9TS93XHF420V99wEjR1BDr5SfGlbCU9SVuYiN3/lPrV/f5X6lQRpEyQl9ONFtZWkV2gNMTn0x1As0Ey4/ZTGk40968D3PK/vUtr2ZuLhfRURNAIsDeZazugiFDlXCdYqSKu1dLSUl0Xl4vKFlXa+TdnTdKvAu7vTbaAFJP1no4OnCrL4jnY8+ue0h4rJBNH0oKIwfj+zGaoo+9QXuoEWJ6zUOGJ5AAr8OLyitHHfaOgKxo6jyA5qK9kQdZAE1gl0ybvoKjCTfwdRCYC+L4Q5z+y0Ndv0lZZJq9xp+IyZ59/2xC9jR5d4nZSteNnRFOC7WFz+gqtsjQ5olDIInHC/kEWWcUlRuPzJYSazDdjppfVCx7xk8zuaAzBcwwo1O1ofpp3v2ljDMmXRJb7k6clRDDSLgvRx94yDPbDxCvfDKZLwCwARN4i++E1QMaQlDfW1Zr2jDjCMTHghDC+hJUijblT4KwU6RPXAxbD8FpB3kDypuRK0fnCn5eBhJc7GU4sruW1735MqJITk+RiNXdLYx8RQZ72HvW5edIbJwS+rmqflkORH2s/tFn2+asy/JM2jrjB4NYYeik2DW5wpeAeQ4SkFL4sKzYA1ELFCy02aQuD+DyzoHNcIqiC0VAWFBxT7Nk/e+pnMuD34RWhFPbn37uqejDz61FTjiAX+f/q0PE4gaz/OS8KogID0Ictc4CPQgnRWRtwnjo2h5rgnutcoTvN04fntWnVtiWGohM1YeHQG/ulIcN6AUQ8q4lZOOV1tX25gwRwqamn8jEw7SyPDqxA9FrBAX3P9st5SPdYcqQzl+lpxZgyl4mIS37BdjB7CS0LU31ZbW+VPldkgVcJ1M5sYbvdXUA9VT8rAtt3MMd6rWFEL7MNrfulZun9r8AoC0NcBgqgusfsX76U/UYWcOjwPB3/u1T0Q3vds8gXq1ac9hB5jNowxzdSQP4g0MCujuBMrzo4cI31tHzw0fRnv1BbvSRD+PYDAh55s3p+NjqqrcywJP8mOIQ+CmZXCK+IB/NEwMPj8e2hwaluAaZ36l3uHMNudRQkBAJeYOvLf0xkNCfAwUlJswB4wwWsjkI+0DtRAm5C44WcDbgsmg+A28qFwQqHA21qDZ3o4KJLUNKeQ9XAA8gk0Lx7fCUyQWfxXyk92Py1dGFmyWYSsMU0hfYfeSNpwhSO5IDhuEAxATVduyoL2NkEzB7UGUAxOqJWhm8Aof9i/kPcrhYQR1jnkVa6hq2TGyqpOMLYg8yaxfu+Z/2KyC0QQGaEa+wZEOuEtt0U1qRp/IuwLXMZlj29x4t3scB9WMg70myGeiSvdsVbnxdI7wPht6HUo+JNtsBzup5cf/SW4VrqLDZ7B3kd8QgVkgBeDqTb7sB5QdkCPzFqwCye97fHhKO4BUjg9c/ik20uYDQJayLUND0qcJHtQGis7+KlEMakZFDAgMcrUOqvgXq5vsGSm0Gw6b7ZwCq1DQIqpBa3EvRfSi9lclEi8voYzAyGUef9KThfJLiPXlvXZkGBRV7abMlFM4vcRWmF4peIIlGg2m5x90RwB5eAAJJpEAzWVCoVOPfdIioj5fTHui33QXPXImDMBiYMKga6U1Vt1jt90KwifYyZSHaaKqV4oAw6eul7csTDcXdJ19Tz3pBuULMcbrSamMLdF7j4LnBizscjdH5nyjcEsFxsfN2+B5R1EQkknvt/Nm4NrSKjj2RrIJTK7yBiQGspFuX/yV3oOKV+oLS0DtJuxyZ1tep/CzZKuIe9Bq3TNLDuXTOMLe6rovZCG1NPhEk28cloBaC+LlpDYd3YIgBnFNPgSF8r1+++FMpLLoiUzHQBSh+YlBblxdC5dYhPRIpexJPqixev3Pyt/2ZDeygx9YMhxtmlzW69JbdgMOmrmxCmBRXspInnq4otXA/7RRJlq+yU7fFVMPtjRMHUPJO9nWkAgj+AN/Ughk9cWVVAAGb5DevP0tiXdOHECHr9xEgj9k/j1Ve1yE9GEin5Vr4fbxOrUzCvIrccsG2apJht0E6L+NFIDzpQYGRQVMcXQ3ZuQTY+X99hWnRCvDk4grxjD+swmDJIyjaCdicqLC/UErZ89nmzDcFYFWx7P9hgvIEqpL/8c1Ne6OxLdZzs/swxgNg4CwNyyr7tvcNxlQZvnHzjUPS2f5MLKghWcM5nACh61mENbNVM6L9y705OYcWVotCcnfcb6n10hd9iRvAT0TIEmOUrWsJx8meRE2zuvtvQIPds367HsDKA2+9RSwQMBYSf8WBHrl1w0GM0xMNGU3Hq6XXDjgJ8IsQsHJz36ENFx+RaWXE+mV7x+eLZGqc4ELfq1hOsYSjgFMfRuuJcQP4r9bVpt52uzPpQLyORrzHS0r0Q2nzyN4Wuy1soUWRzseTK4ba0rqCvCKAeA/GFsF4CAgwCUXnXX05yObN1DYuz6S+9gkzinHbpGd+xcorNVuYMHkbqgozC7edMb9+ODblg3fP3s32gXPCbxOYM0y1A0jHvJEjBaVZTfmBbn+TS8nz7q7Hvj5nAkL6N2d2vGpnYIMH5/5eVOmy3f0jJ4RhgUFPh89x+g0XVYN82hpvlHn3fErfhEoX5ztDlODYkree2XED+ftfZs/6UNyUZDwJF029/V7Olj1LvGAvZYj5yQYEw2YVlAs6D0dZmQQ5pJchvZHVmY8A+54tZQ+rADBWa0kbbW33htO3D/gLgnpqR6q7d70y5pzUcwfnHB7LvkmKs3Cs2c+31tWCixLH2c0zO/8YQ/A6WUsK9haXNZ1H9zyrTFt+ixKmK9TBHJeDOzemJ7CbTa2gkJ+Ji62g9XVzH24N7A/blmlZ+tf3h71gOjGam3S3CMq2fG9Zoqr5mcJG+QJKOtsd3Te7IM+p/iwjwiLHMldlXfDAyFtZOMC30JF1eQc7GAyrnwlNfm4vTAQbanjKxKYPt5zPLrSztOHAFPyx/vopUQfOCg4vigIxUPCnfytldrkEhFG0SRsozA6zEg0RfFAZ/Z80G71rXI/9lOGGq5DZCr2WtZee/X6fD0lAxcLV+ftL+LyObkWqw/eg99yt2zE2E2MakYnNnPWq8lUYJjzOstYyFGOvEWywRHIeNkP0EVQ0QwnG2s6lDdPxo/QF5S5xZYNZLSTCWmgWOthgd87n/N99AbsvHRf8ZHgOgB34XgXdomDbB5mrJUMs7Kziist7S0/+z7OE2xDOZY9lf15CJTgfvmiOOlyavpfBatWppbEwtQsM7/0t02lPvUeBwcbo4oU6J6sRlAg/9HtYK1YuNFkmgXBGvDlOXNkbrCe7xkHDsVpGy1gtdzF4fyGv2ifkDbuMtHV6SAbwO4vLrUEn7JEQKHslW+X/pP9w2UZd3NgERk5vZ/lzG/Elbx7capY9LuHlvSdZD/FNXIT1xsNPoT8enUKgDhjar8sci85TxVt+aYJYgZVKCLAKNqEVP81lZFnKmnjBDvWRFJ0YbIAwcxUZE6r3f69FuauSwDzYFbAP3AjAkH4zJfiSQ4gMS7psUWQuLpHv62wb9Uw//qJMPRqs+Y7q3ERlhjQzHu2YaAI0IO5QxYQGLj+IktRUD9i+/q4qmGDYay+n20ocfTsmiRjxXh36+V5DhYTAV1MumfQm08ML8BbdWOOd44kkI8rV0v3LcpaxoozeaHBILCL6LK/LNuLl9PW72wo47YepTkvvynzUmWZBKZva4Thnbmf7nJ/MYrsBUe15Q3iKLPMQ/vDzoJ+/OSZOJ7rGyAtbH73+bv7mjMCe34tuw8CrF3i8oIiijIRgsVvghlMcU3mnH3J7JxCB3KIeMBxnY9KATR0XWWxnSysBzF/4vkLUWODaQQ/N7KgLWKQgKjiayLjypvrlA1yLQfucECjImQp2trfPeEbheg2BxG2z45FEMrZ2z8KBoP01xvq9miCRJHST3XyVrmzvRKZ39+DL+M7/9IptZJRjiCI7JPacerCkb8ZlmXBrsoY9tVkahR/RNXSr8uxyRMp3NBsTfRdnXr15O4+AHgfER+fRxxQHRrBE3/p4g1E2FIx+XsoVgjsExG3jIzJZgFOXxByvSgcBlEFNZuslNwUMqi05AvDvQXQvKRFRaYlvMRMJ0gg5EDkKM2JyEa9fKiJHz1/sDVsGkn9nREkXroG+pVU4A0RQfMsAKAHlzoh3F4bC7dulhlp7UpcHYFHeId/Dg1ibG/HMCpAlfXqTXKzhMjFmOt9lHCkWPCdR1Y3NCR69E6grA+oEcQuZZ0U7JZL/L3FvVDnKTCp60J2hOg7mYsobCDf/qfEa3TZLrDp4gXHvU1JAc3jmhuEhHOkpy0mbwY+WCnQHxlkdvLWhCdzyBhbJNbLE3iPUj7J3k0y3MbY4UI3IIUgEfxiYfsZ8G/uaiZuHXrA+tlR+uDL+jGR22RNBzM9sZE+eXJRH7twX4cOQ+8FuxzDs0LlJUlBAjtmAjY/bAd5KRSWvo8awXysDnYj7W9gcMt7RNVr61dkmIZurZeYgrVvvvsKsAM9ui9g5ALOtbRro8swkoe/Q6QVvx+GaNyLJRKfKdGDQlSde0yID+gfJYfjOFM9WaxHL6YT4kxZ1+juJD/KRs+aV3IoshV98qreZyPbm6qTQbZMjtO0E1uDD/Ra/J4H+COpUs9/Pk54llmaHk4wevDsS/AXlHv5WaXSSvHVWSOt4O2IcwjO2Rth6d4P2I3xCkS2v7eDEpRfzCmqU4/1MM5PX3BKPNxoLlnl4Y5ZPg5/4TyqkORgz/0Jduz8JsFjba+YRuM2CR99B3HLJpZmUQDtzl4nnK9wnuIQ3R/LSSIIybkBqgG9m1wWPM82uTfP9R935lf+9euW7KTYW1F86seybbhrkIKq+7apravvGkNKN+9UdYGq0W5B6MZ31k0p3/jjDhkRid3xBLXEtZM7WFsRRUStjvHVwXXDexp6I7mmIFgMcM/9mAJG3BgHwHXUPK3TwDJVi7QI0POp853i8UMlH1kr/a5GkGCl2nZ71DK3gDa4yPGPD3bvvMxlEjuuSfHm/T2VcZQWsu70bXFI0xzVeSbzNltNQHrYxCL3uztQB2Z59kxumMzovn9onla5HTX65jpfJcVrp/tK8OEbOYwYfY4v4mpRo8VvphfEkx2/X1VmRn0ibckxrUMQFgumjskatPICY9DabHj4fnmI1RNCZwQPplLTXPVuJ6y93R7PN9JzNekdqqgvKIhYYWVYblKOpnrPAwVovs/3YnqR0YSYpnYYJwm8UTCyqmwHhxRpml79KmZY57iiz3wEyuK8UGE/73BeSv19bGOzXkIHXNTuDZ1GAo1HLIpcMfS+5wlPApJ7BbHyUu6V/MpxCcoByM3Sn2SKcYvXlE5gPxJla+1xyYIoTLwFQf7bwgW4XBiGYxjL+bKuYU16CHEOtQB9xKY1FiJC9/DJLIjUNniyrBSBfSRiNJBgVqgGY3vSskx804kOx2SmXH4N5hmkBpkKesRtsZyazZ6ffZhxK8lkLtRKS3ymhh/XxXC+rpf5JV94BC9I+uXjCAe1FvpNrSsHGOtAx59cPSm0/1l+g9/IdpsHTS5dvTpZ+TB22PxZGXPCu1Y8xhkY6FRykqA7zBDvY5C2r5zYSG/cSX2vCJ1ZOFFCGjZ8nlmPZCQ1xcraCMHi88cYOJts26ypFWyD4etuANp90QWfO24+vSwy/ym1OVuesSMdjCOHoetie1rPw0G3BFGYx2Hk1lfvwvNpiU2TPIJxdYPrMuyQTuljpGEIlJ7YbkXuWc59STx2I3ZI231FjP2BEw9Qte9QvRIh1500zC8FUOCqnRznW54EWrJVWNfBWEdPpiy5WiDDNdWXAOGdrh0i4Ho3Batw2XswydtEWvp4K1dMrFbJkt3QK+5oV0qPe1H6cWC53tmSDTiTkUVVkPguNW2/cs8P9L7g1DmSk8QE+yEu/UO061DfI+cHqWW/rUAiee3FWG58npvHqtyWO8lz9PrwQJKKfCevlvCusiSLojjuhOQQsSEwwS4CARMDtnlfPwqOk7Pat6qWZ1PtlBITOm/pHohAngPfepQ8AiFbqKaghNeiyispmQJvqJ2d+Ot7y68lwUPLvRsdkaK4KWN4GpajP2SQWjbPa8YHpqFibRieojfvUST6rgsbeYdrnoSQMu+9FJTgZQ9xkBZa3bzIHN/cuo5LTqGBjLkMlnqEC6RGW+XxQ1IpMfGLdY1iC1TUjv7UpR8ybgIBa4XdAvhlB2KWkFT1LK6Spm2w9TYyp8v1zrAjYnEKXyaaQx9+CSHe7kBo8RkFn7Iiv8MtByV+eybBIH5Wpc+9m4PXIQ580AYh8ALWPfQOaFFZYx45eJxryGTTE4lde3e9PV7kO5Qw/tx7p76leXuX7Cy/+Y3dE7RM64wm8DYkZ74d75IeJzKWzrRIvXQaHw1zuSFG9RDEKB/jiM7CIG8DbAEncJOi8BVZbQQdOW3hQfp5sQOeAgNiRlRshX8yjA0PEVMk65esAlO40tcPo8u7Ly1uHjQXrIjem3iZEcF1bwit5JXeTbP6dJEyqSYOVFeGggImByKMwlTy2Czkg8XdhBEzacxzl9M3C2xspMZuABiomo5t0pH2+vVRkBjytUCRXdD6MGHjh4VtLu2RAY5qhmGkVEDKL6GTKNIYkZMmbzgGZPOsseMWPKqc848hLF9rnieR50qEX8zPmxGtiOAHWmgZLyhWTbzUgfAKHOvf4sdOJtJ/0gVJfYJ61Zwp/n5rZ5CHnq2l/bS9U0sHKj69z5KnX0/HaQkmYRNLyGTxw7xcI7ghbsfFPnD7DfST2fed/u7EAzL0hvJA0gi0hOYLoTwdea19m6O8Bt2tN+c34necshipseTR5cpWcMzVfVhUEbObHLKLxBT3R+GMwz0zFpUUNUr6pjGV+0T8s/RYVMT29L1piWRIZPaCas253CM47vIi+93GagpE2kLskoh2DzV3BLDweCWy4TvhbHg8W17eF9Zyuzf7e8XwPaUdeomLc80ELY86lZm9LRo5TmesteZ0/pUk0GF6h2v3cPSWYC2anIBVuuNdMjb7TARQtqsPh9LsiFRR0RMUm/BaigvMzwELu3x/CJQkf9YvAH9jyzpJbMBsae6nhujvPtpdGmGdbS+6uSez1QGqP5HhwvmUROr7cOrmBCT4k5ESakOVvdPE1Qv768aOPW7nmHqfjBqb+3VG1o0IQnUqURQUw7cMQurJMTQsKYF2frh2cx/C3wxIERaYmtiCYi3dQyfmcRo8kyOFR/aRhIHCfD3rjS0EHU2i9wReKUn33QAjAHuC4OnHy7k+2J/CrZ6HCW2oa6fIPeTPdxcxcoJdhzYeobVf3ZKotTeNYGvbIAYTgjWKWc5n8K3ATlzHyZjSumsU6Rqci3UdzmfEoAfHE8SHsR8Y1iTcnBGHc/LhJy/ibnun9agp+oN00PYBDgdM2pn9cvVwhPKxNtOWJYEz5ILbNowi4adGiVXrvczGZ9M2+9pDZH5MqC/51QyvxeeA6Koam6jYZzKuhYvMcB08SbGT24HsHW4Ou4Z5IIz6Em7Te4G2sQDpJ0QToTcGQ3DWobesR3tRl4jhsmXw4RPVXxcGUMtscrBpnx9eaDSdoKzUZWbs1Hk40Qcj+pmeNv2xO3NOQG7fa8y++goe0NxGpSci7jRvCC6b5/OaK/Pl5aE07DSosNQcBBamskcgF3Lp+EXS1uZw8yynCl4v+nxFAyJ/6Ch0vX60JjLjcx9T4/mxpW8Pg/fHGJo9cAdINyshkO0KxACPeqxJZdWK0IKqB3gkXFsfE/VjagnBbYRywy9yZkwEupn5dAHpuxAM5jO2ELOw5/TYFI4SvB6CskfhLP7A4z/76PFVSq2+ii6RQkfY0Btoi1NcOS+wnoAlFdus7wunF0eiaR9nbynrvWGdPEDZJT4Fq1LYeaVHMLzPR0dmS+LLysGywjIhtpXVX5jPY19iMyVaUy3oN9OxGFJQzKVFLgvihlfLWxhcXt3hVPyYv/0ltBaGICQzqorXEuFvw8/2J3LXYw1lfaH3i8iVnvjc2xJ7p2Sv1x5Q994ZKnjhQpx++HfUyRcUEeHaY9eLSreOG5jLmmTPBfsyqRApJ/NDKUHVvh+gcM19Lw+34gjvA6LI/CYp5lpMhcKUT+2BErjlA/vSM/OrkEKTPHNdXCYcT/8qeYAGRtuafrWkOKXeidhwfaXjWYaHjiG//KysPGD41kKKn8iXErukv0e+76YDMhbvQI+cWTKuMaieJPhSSou12mqfqrJaFe7Tj+RpME7qRXFlP5uBehGF7u8WyI71y/1qXfa+5Q8MbbKWj8NO+QIj0MEk+hLpsJ9X++7saGG+DCbQ45hpU7puu2wwyxfyXLUVIDnZwHuyrL7R9U277Gz4DxWU5eNJyC9EHhVR9T5aDSENWIUR20uunIUPdiC9OFLiJgeClpeVg4qjvU8sM8f4N1fChl9i2KfS8ImbdgqOCPFUwzi0+IwQari95jScwiyfcJ+55g078ljxXU7BSA3quEv27fPSm+XXeBhppjNPPrzj40KhqFOig5pCTZOjiFLUxDnf4+QUmw7elroZsfZvM9L92qF/VivvvtWksVvFUScZlyKWI5a0qNC/sliUFDGC4nry/jUzIUOsnTWwLd+xA49fzdGrSKEactehLK37qakTN4ynxBU81ryHL1WCuOuWXO7fD1uy43Em+Uvshi87856iGo0ik1889sZbuJaPAzFg3yf/aeppxXhAH0xk619RwWFcTBMP8wnQGZ07D6z5KAEi/5TcxIQppA8OMpUYo9/b+sW5u61RS5j0uw+7POzaHSaYLfX+LlTBZgUWyGxa88cnz8sbYzfSZyijjXVvMjbe8/HWUzVbPK512ftVwoQIamW15WT8zKgnAxXt7oIqMyjJPekpWzuegKh9ybTx+7k6t31iOWGv08r7bS/T6nVRFK0zP4tZCuvR5VrRYDHoZ+csFiCnpj7UwVaCsobBeiz95L22n+gwU4KdOY6bNDaw8MMLqRU4cJlXmgg2ppS1xS2M+U0tFuiASdxD1W6O6yL5seldgxfgeAWhGdlJuQ5DhTtvsFfek00H2oeutClUpKQlVQovmWC6n7IJpszgXClKfoMdLYYxvLJH1GGFK558Hd432zY6eI/hgnzgCBIBy5S0tCMHygLg5pLHx4jlEspQuia0+a6l1flQx+gn2iS7L/jDF16gwFPD4tcL2wfbGiFxfjZP7PsXTP1VH3iNNG9N02tXFZSuK2d12AwhsVvhldjEr6LPuO8IHNJwT5JNhyGOr/8kfvzqZqrJhXMJg48n8LHaRXpTCeBVOjZAq4QjXxGwM3gRaIpssIJqqCnh8iaj+2CKP+g2haHmfnk7ZJ7M3MLsixp+9v/j4SRJfcrvVsf4tdwioRmDDLf97M8HFXXkPIJkKvjet7GyXq2XamZXYU+1qS+E+HpxD7eilLEVoZwulODhcHmnTPNVYu9oKdz6VKz73VVTeaMySPf0ho2V5K390it2Iy4YDqoGYq77Wwq/2rkAdz4JIg+RwBjHQft5Y7WGoemK+eGlSc62P6wLmLfK8twiemyEr9vh3ZP/ap6SKh/I9Wz5+ExbtzRZQhRM8g2qRhF2vhn1y0q/rL2gn32d4QRQqqUxn3ioi68pcXyAk2AZ/fWxiJsKgW5xnQcJpodolFg4EjlST0L3ntIErD7hZk+hshtG58/WDQQINas8BKHR5i/xraDALUjhtdU71wlhYUOSp3ojMmnTAn0emgU5RbE+7DNMbyOaVBD1GY0UcPK1mvdX0KP2W8MFEjimwfBflXE41WW+otWGrKSuEmW9ca1hyOjzAvao9XXJ1jqwGxMQcNJO8fKCu2RGyUGk/A0UUKiLBptHmjCfX9I+EIMRR4DLKJULowU9Wq1WB6fY0K7/81IqWH5737F2oHs7u9SKRR217y9xjGMRhAftftnY050eIxARIllW5TyYoqyP+dLYlv+gqlNFZnlGSSeJqV5zHxS9O/Qb4/SnyycrfjJLJUNk5lI/Cz3g97cPVGzT/DwhK1S1+3ywd0tGvm5P19MUk9KXLQ8CPj7YbmyI/j3QvIZnLuysk4iufv7qa+MEpcetW80y+nt20ZtVcVJ2TJysTf9dbjkRoGiFSmsdpfoOCyCnKT6u4I2y2FRZaGDjvPhWjhEbrBbg62J/qt7e70mugrwH3+5tNuTJrr8IwlGpbgArpKmZafce8aPSsdCTQcfkszACSOGEGU+fi2BscCoGVxm68QsCKBwEXrpjtZMuTvCisb2kW6TAJx2Q0cDrZQhk/mfA4JkMQykcKm5QkrFdPiP3dOs62UF6VXorrMy8vo8enrcgRudzWNeTme+WsVhEgOisi8EWhYXwst7nRQ91tUzB08rIYgNZrfhn9xoI6XLhZrH4FurVAv9jcI7utZ8ZJh2uNkWV4nJO3vpplrAp8hMVtDe8Unez1CxbKeZl4eUBcIv+mpZdZOJQw6Rw0ZBhv8ovY5+DIvev4MzCD7/kivUk/XY46xE6vhJR91AsuhblflMBx2fwVhiyjVKb4odvL+aE9PTiOyipFfM6MxHksD7Hv4Bxj7MiAsNcWxZYsJrk0T69a53E2SVeT3aeCTVwTcOAJ1UyNrpE51xGwZrhi+XqzkQF5hXlY0LsOFGR90jsBV5843pvx/pb6Y2q2vYnX3UT8t4D/3FhPmW91E2c5sGFJl5zpprpXYvFTOf8taqgl7TUx3s3RNsSO/cdRqXV6HauH9j3ltL0k7lLf3g4zyWo/6h2jO/CYuvmCewF2Qu4tL95QK+sskpgRwnzna0lKOrBOWu/Z4plpwxeooE+jGOoNG1MJeNcTrAxXkTs4QbQbfEMPyR2cLjfh9coHHu3E/yzFWQ7WfulKJNvMsV3iqad+eRU8UBcP4bhxoc1HRhHOl1cV24GgagZ2PpyCCcHo4sn84piubU+yooDodQ4v9bBZo9JW5L7eKeyQjD6ayR0SXn7uPpemjNWuCdCnUDp2qGmXY8HL/3gFqNLEmwdgZCwQ/2k59FAV2ZB+zyLUWQudT0xCDh7xHzOSa2mx2B5tdtyAJBh6de4q71eITxQpPGl7WBo8lpLbUZcZSLHXpicmCXy+J5Ogxb/TUzUZAE6Jk1fUHS+PbiW6brwg34x8hVF8QQQmBnMNPujw5RM1vHDXNkOl6EV7XzYaINMTYp3S4pzfOmd9a7IqZ7hl2rR4dvFXtPLJEeHXksrpnf5p0S3DKDUOU0NP48vVgiybYP97hFAGEzfb45/chSY+BUWFJwQ2AtxikXfeAmGR4gL6kSZuZ2HHlKgvtQX76tI63/ukDJx1rF1DM0CqGrkxBal78ho2QTVpQbW1XToqxZZe2M2P2eCJW5QdW1PbOGx5d9Y+2Q1/HyN59MhMSpSI1lVaEcbUMPg23fcO7QJFsojpDUUk6/vYbc18R1ZP6yO1nsZwpKbNGtTPn7+XxDPeeSDgX1mgnyN928AbfaO3zfn+RBUnKuiZlqXfiM2mtJGHzTXAUX0BF5evm/PYDNLkQir9FqSR7SLIyhjJoNNnom36vQOWwNTsbO9/1pr1wCxwkm6pzVn3muQwvAIkNiguc5HXHeCtv5/7X1bd5u81vWv2WN838X2QCAQXBobjM9gYzDcYTAYjAFzMubXv5KTtDn1nLbJLu3zpAkQwFpLc861tCSh01YVmkVGfgWxlNpy+TpHqbqvr9cJuy3YrRlqFNFu3nrc3JRkiPV+fwwu1mg3DQcOaFd+3sKRFZu6RPKJNNj34Szn7cWmXJo4sgTS+JRN6CxR5OVh7PVlGFkHOx/vM352LvsjSU6XgSs7mVWzbjg6LTEYzuTLyKrmE7OPAamt5MF0NFOo5mhPELvUzm0SVZuVZY2hl5HqVnG5meydFabTsebyNIozbt0OFjuKypRNPl4SHTcxhJGtSsqWsgpp5A3ykbnNl4OpvHGSsQXF9GoI62k1dqyDKqDN4swFF202ZK6igGG2nzvW+VoPFVq/jRJoQSOpojvTqrbN6saQpj4mFfsQtUQQ8VLBX33Pqlx9sI1WuWesVCvPdXE0CxprPMmuxwy4lwgTx3i3JYNcFeM6RFWdR8o4Ncwm51b0hD3mxG8KbbsewLAsiqiPMRC//YyjsmsiZ+5BO2YTMk5S+zqrHZdD/zibq+WEKw+tPSxWJHdSCIkxxLA5r4IriSmsI0m7Q+IPtg8XhMIEmRTBtXQJJiQDOb6Iw6sDDldauMbFhTYyW5qWI4yTjjA+MDcl5VDJaCk2bLgK/AkY7BZj7RyDRtyJ2vS4ISEIF5IqgAvxWokdnEF+Og6K/WVg29EgqpWG0KlSb/0iE/zlreqeZMjrZThCWTlr2XadcJqa67BMjrgfD9r+mG0ASWrk4nXcvw34jg6nI23r4UhlajNpDnsUUF5cEaQ6lhSli8l4bLmUOh8SiUAEaH5OKYvtR1hXBzaJz6BjK/UkCT2JMsQGhX3Kra80RqtVf9Q3p4iWlqaqJ6OBRGFsY4PUOGYk+h1Ocks0eQQmBZpPrcBxzZhWro46Dm7lCeeatbb5Po31nXKrXZ24HFiI5zTI8lr0/aw9Wq14VhZGUZEB6jibXjVXVAxiI9XkdyvDUY1MkDMxn/ZBQ5B3IIQWqT6dD5bAcQflztM3/cu80VdLWvXkkz2VDDLyR2LpoYxiMM+FPD2WzVCJqfNAvU2EO0J5o6UwgGKxWvfpqz62OW588QVDY9aNDfODHG25Og7nAdssT6Su0lWimviL47OXPRPKKrsxvWKDQwKkUBtQEVRJ8RuxAof7vcYchWOKGCzk4cKcm2JkbahAxbEbcbV2obeoXJyAkFXg5JV8YJ9ReaGrim/Uc2FdxuOdA2YKdxLZ4KzNc9u5WJ4VnrbxOrimguzkx7gU47UzT4+iPN4Z2qrZ57QqakxWtFN0USZk1RJ6pl6lIsuwEkpijaX5QQrpnMu5yGDbcxST8bcNJ6icvD4vXUCX+6AQD/J8BuRhw6yZYTVlhRYV7oThBM0IfZc/xYE0PUPV6hfVUbXY2GWz0+jojDd6WgpRFDe3Qatbobg+pI/DtbEcnzwu9OFp3W8sn19gsE/J6HCk9GfbgtQkTcx8JdWkn1fTlp+YLOmQuKPke1qh620UKUg3GG6l6aDRJepIHZfFpVIOguU5wO+PZmq2HpzKE2BMIT1DpZgWW2a1THg97JtUaa44EtaPyReR3ZEVdWVIJYBUAPSnmXIpqnM5M49H+tjvH+JhobstHM+mO2cxRloiJtNKu2bCviV+sEwPYMtGahi687zNzTly20Ldnqmt6nGN57GqY5e8WM/CkTAwN0vsd4Ltao7I637lW8u6PfIjfkVuJXh6qG7Uxt1ddgBw2XbftIN+IdIzEPnH0wYE2tRYSKvYboqNpE83vLGenU/rI5uczh4OEMkHykhyVmBzWZ4OR+OzKTe14p3rXdgfhLK5H4QTomuOlDJeeO2Cxr1/N4qk4RzkVVbLU/fsLxtesQI2PoIsnHoDfysruTub7zYtlgnimUYkLyVHqE8Ufz4phiuvSmRXnusbLFUyU97giD48Ltcpg0FDOk43QD8qa98e8Acjy63FdFmcJGm9KnnTWib54DQV9ux2fjAOjTk+TCLa1+aCMWkMmUl0BvvD1JjR81kQa9yluRMN4IjyZLQi80Q8bxLJShVRw2DfiAZJPouH4Xg0syeWE/rtfDs4zAXdOZaOvT1MwVEqxN14bV3WG1cMDvF5tlNZMTIyJjjxk7WvVtV0iz0Fq7jJCV0jNbpwbXzNJvDAgsxLmf1k4fgGlu28cd5R4oQON4GYev3rtt9K3KnORq4lwWLfkCSsSAZ05YgZaqd0MNdNQ4wv6V4uUHptrpuzxjmb0blqLZ6BBPylxXZ+iiv6GkN57nntjFNTIU+QqMveuEhPNloqGxJdn6Pa893JEXmqEkeE0+B8vpXmGxCDo5On2C3E/Vhq5Iy7bjSeugyuK+lorAIkLYbGSp/Ti3WGUbCfDd0DvXTWg7whdZkyNVBM/jLu762hvuetAERmliXTemzaTphKtxzvkGKUzTVb7xtXxhFuy+poHCNLEuY38nDMdL6VpcvcquydYFARqAanQQRld99wM2MSVI4ajERnLOwas0jW692F0Y0565VlXIbqEaMvkWkXenvZTXmWvFW64OXT+rSbW4ftdHZZpTMN4jjakrdiQMnYMtgZU+e8ym8ZlXLp2oIr0IChJ6SIV+5zCzBMm8FocdDXQxICbVo7HWmpsLhN+ZI9hj4ySlUIHl1ezWrlLxytLDl5n0/613pZQaoZXHF8fnSJlVbT03o2Dc4pCY4irmFlNZYuqnk4Swx1Xl5VUhI2qi2mIDlmi++PLls0LYMNGk8a+ZqvfH5wkVDr9EkVVEA1xngZuRHmlimh8LVyPaia2c6OJ4vmV4OTi6O5pr7M3Xk8yI9iAScmwzT9mTSobXsz7Zvatr+97BvDHl62KSybmdJuzq2lFBBJxVqZRe1Q2bWB0hec2QQT5n2uh5sFZj3sT6K4uj0WrL2Tqgv9OXcwFnbfSMJiaLaboUnn12y6KvVcDQchjiSH5L113WXlQR/7qa9tR6XNHxsPkmxwTNbNAFgMRlEEtmTe+axlkoRIHyhD0mYn0oqbaXCXp1/b9VR3Z7m6aXFnUHmtDE1dabCgJH6Q4s+ewlkj0rcJvYvcL1taiau+N5+ChCH5ed+9Tc3b7eejYTthHGowIRXig7Rw2XbDL28DVqQzbn3+iAIPSPrJ06KtvpmY4do+cv5pCImYMCOfttNwnnGBv7CDdAeIr0Tn8Wnjk+98yxMHWXs5g5PLNMpJ1CXoZoNoIB69zX6xM6VKmrnifGfWzqlvACJ6imSFJLCcHejTcaNI3mLmmNPr6jqmbXNYLTiARDLhdxInTSOnYnGahPHCOtsFSZRNxUG7XjEqefg2sa8KyRWCelK7+sFutiM+4qRTX5b5epSSlKh4bsU6W1ncHOBotpnJURxfrmnCrKPtHiXEHTkSaW1O1RWhumrXt85lq3xtxfIoy6L5TAXTmYqyrLE0eZgu4vNKqZdpyEZsmiwK3g6E4Lyo/WCTWiQHbjlWvVTIRIDJ8to2TAsoDG+CjPqt0jZKM4q3V2oJcpJxKbVNHCxrLHunc51o/HQqWIu9osFbHum2TkFoMxeDUvcLrwxTd2qdsl1cmPFF3hzGs2JSrLQolnM6Y+V0yyx8f5FbydLJ/PgUGu7Vqi8opIcUNd4b3FBXfOc88/f7Ic+jS7NatuzmJC/GA2sphgv7DCk/mKt83qYn/wJt1asEyx+xqKC2SUukGVjMzpF6YSUSPQp8nCuT5FjUYBWPuWS6noPzdbVR1WpnqvUaepC/+xygVXBkmYbD0eAyiNsTf4uPBpe4CPvixGGruXSdModVaITsihOkurEnk1wbbJLhwE3CUuELf1iAlbdmW4rxt/N0fQxZyFp+RoJ00kx1WsVDU3OufMHzg2iu68NiTTq1wAlIWTGX8UzeERHNki/z0WKtSP5osMii6dAU1lAYu/JOup7IijBGxpIhvuuO4Wf2Qw27dJ/fJQg1T5vjoplVqi4CMe8vuM1gYPbjc6qdTJrMXO9fTpfBaCaPrUAeNxY/X0UUmdXhmxJsYKY4m9NIGhTuVSez7bHm6O9IzjYeTxHH3/RIo1qCioZXWyVxl7zeowWg+dtU/tHBkzNhLgyU8ZwejooBjv5uCg7HteJOIWq/Xam7xUBpgeiaxXDGRTSjpHRxFC8gPF9bM6eti29qWe2yu4PnAU0atYx9u8n5qOksNU2JAssykiBYHw+61I6UfHQbvWtv1VmhQJ12WljDchjMRwwRUYlXUadmqYs0u9/DhvGtYtzvF8eNdFVXRiQM3SVGQRgZ9YibMfmI5DYCp6lPZbQVEGdhBN8MajZK2fnVbPy9EQRydF3vhluksx5/bQuSzUioXD9fpUWqHYkTzi5Kyk5w7JvSxmnXKkpRxQesNw5AvQWPtzQqowa0sIdQIvUVSbVfmluT1JLLJGPtLMxJdBb2oKR3AYnCyAqhyK+C5XY4PAta5A2yWtcGrQf6vhKBpokMXrocx1piLwscyYmzyDGIw6zrrOn7hECd0W0tU4sbG/r84IV9l74uzSxVtl4UnuPlZkvAiSTiSNYasEotgfmOClrZOebT7XW/044tPxSdTCyc6WDjLa6b/pqASr1ux6Yl3NbdU5RTvJ8Y9VKfSkeNqejR2dwLJLpbHgzlNp/qVr8199DZaGe1TXPldIexq6xP2DNdlU40hqKlcM+3ozESqmu7H3H8vrCbSWrZl2hvH/Up06zCnBCcVVrKgj9dxVpBpGftucvE9KBwG+nZG84mJWsz5VmVr+PSWFaKONAWK7FVmHmFxsoinjFr155LeqL0g7MoTW1P1JUtFV7Vi7OhSLjhra3teXQ+75g9S6bU2cOrMGYdxiiWws491syWrNQ3RRF3JRRtTcbGhnapGCvIi3oSyGCLTDHgcLkC0rDaUq32e7oW+otd6Mu0vhwLe/54ddYwzsdZn2vnBzs8lYoDyVJZ/yFrGzJNwJC6l2rDhSN1qmt0MrEvh2YKVrynCtRxIW6uhi7N/crSBtOyz9uqW9Sp19zl/uskahsZtZE4d63dwXBdVloSbL9mvlqWeqYlpLMoVhhfoVvXTK4wE05p6VlAL1lpPHWDSbrMXNNopIPdJ5MIFtvbBKVSyAieXhGgKeJdBAsQs+tvTtMArTDgWXxzW7gZh86tmQmNtpft6/nEGhtZn5A1WJWD5Y9HrcOLLZNtkcCmaJ6heun7R01qWBhPuWDJXqLg3Pc0/+DhmHx18ldbw1iuxxTUubSqmEgr2dyqEyx00cUlxbO3Gsk8L/LVdsbP+v1lIvPC+OxXA4F88PEOt4bpO9wq2BTERbTD9dRq6dk0a/LjMb80plY147PuCbuNZGERzkyPVOU7FiCUyqD7NSXugp2EpJlFcLwmBzEU882SDMnZLeJsXUgHMz9Q4DZRt7fp9POwYay0sOODsTOMCGvjInKcLTNaKHu9Xh43WQ6YTDjmjMu1l9h2Za3fH3qTyUqSNwYTMdxtRGo9yMzzrH9YY2Y/V2oGihVw3NKZoi3M+iwx6HkmEf4XD1IzJMimBpVU+H1dRTu3scMWy4myuHDQ3CejQara8ZC+JU6yhJ6Gh02w5UeLiOiC6YGTZSuIpNuIFi3l3EU/jDK9XLkSEy5HERVd2EQEgyItTdGeklWIZY8eLbHkHo+2AEc3llZc0sV+n4vZmBtuFRiJl3aqJBdOMiqGObUVQHswz1Er17U5Ky/8fLywLb6sgqlAH9Z9O/dp2YQDe72vbCcdrvAjFtVp0F8G5ug82KjCpAa78ZCFFz+p0Aaj0r5/USy2dNuMxERlZBCXDXjf3fJHhxuGFzaVDMWm1+o6wKFis9A382NSGD5ZVl7uq5ZnrjOaVqtx7jN1f1tx4tao6NlInvnbBf4Mw0laD4tlHqWSBS8k0TziDSBeEeQ57eQu8708X6No3TTVxJ9vS0vfy8fLQV7McqrPj8UhU9W8ezld1W3b5LEnCJu2vGTZ2uFHI3k0EKM+KHxzcbryDBPBZJkEA3rsuCNtAcWc55itT9hwaY3WHrrs8yG8FBUOqYphXBLAmIhBFJ4itZ6TFFqxpZEaMMexQF0HNIKEbTjR4Od7siiZXDJMwgiHLScbldav9chsJn1L85frfWYb1oxOgvORMwOHmQdrEHuDyEhElgqnRn89bwN3JHjY/eZnGA9D1ayEmkHYszzVUbVKHigepnN6OhnZZYr7VOs45/6sWSyl0wwSb9nLB/KuzditqiLY5bTaV9KJ5unXW6VMthH4i3qZ+ZO8Nutdm8/W+erssf7RuS1pSJA3agcme8nXy0M8IKtTXdB0sjGvqAXOrTL+jMWsWCk1oA9g0ehUvqmXnhyVUB4H3uWWdDnfcjPqaitvbbeYVqODEakAMsyuSZPSoQq61TVH21+yQZGdtGVzsLOTgKotqeUEt5XvjRp3UUzvy5XkzJbihSnCM+2qO3t9uhVw62Pdmkay4MRNChZSGfbJ+iUHg2gDse80mhOqe2su+OXc9KjxKHOzNE+NU7IZ6XeYRoZCd8fBXFxhEhtUvse7J9KE/v6AmFydpfp85RANh0Srmc+stc6bJqZQtYgFy10yyqhKZ9uQ+Dh2tkkN+aLP5eZZda/bfR4GZcqx+Q0FL9W2rY5CiSNbBwcnulpTJ71dxpZO0hmXdYguqrioN4dAbPORA2KroNX9pPLrSYYaRAxTXKTQUy/YOspYslbGbLGdutdL4layIfiG5jbrhlImfd4SjJENyGzJRarGJIEyH48tTjQPxgk6TKvDxWGB+tqUmVsDU78qK+/klkaz3c6dil6aAElVdT3vNUOxOB0ps1Cd+VMwaddxYLVTaaSULipPoGWWEPoF46xsvY6OGglZ7aGcb6nheq4IU5SpM5kqL6y7Z9mT0NC7Ep0Tujoow7aV7HFwNdiJENH6ocR2ajZHLFuacCZr2D2rOtfTZWy6J0VfYHAugpmba5yiIS+PvKtY2Yk8GS0mRzYrt5uhFTZ6MfZG/IUstiIrV1Not3mZp3XSyrnHCk5NLzZ3mXG5X3rTAZ3MdWHBcVhF6Gd3LdTucuKuByM3PtnRCgFVWx1FOzQ3SrYVVTs6TUcVrCiJ2teIlo3RhonJMmHyuZYN7aZD14bpuBcB8jN1eXbP9DncyctLfOznZHx4jL2HCAjV2JOOUU9a3z0SXUlCqD0JCcGOuY0E8QvART4lyVXA6gJPkwXaxOAizcuYRA85vdmZ8bZPln2U5aFpKLtVfzyPb0WtR86hOX8SYWFkRIaRWG5zDjbZcbhY2UqlUlus34viMJwpeTBlBufxDIuw4rpR0aHcMq21LAs2Gy9rG9nUkPAyFIX+KpTyVjivmUoUfJ/1mGSWl8TnJaO0z1t+eg43YGzHhV/alrsQ9KESnI5jbxp6AX2ULmcQXc/Ntu/tFYHdhcaEJVaYkD642VHgOFDk6QLo1eHaulCtZbofYe/RzrPpdSKa3lQ4TZzzmtdoSvNva9KxgjCgbQH4KDleptRCF51VueOKqSb282GW+9P+sEoXCubWibgiOZi5H0eeJoZTcyCShWOYpJlcon4kUemOrftnumzHSysSF+OVEzO8v7wW61gIWWZ8qOvRJBNoHF1f+JA2T1dipPaUR+exflAmdzFwLbTtcWuHq3428MTRMQQc8Ep/r0iDqtr0x/u2gDu3bKjxtPHCGdZsA9Nfu+AinpfWIMjv1KUYuqRaTyZrJ8gTu8UEaIZeJKmLMluEewNYOMSk41G9RkBgDjuPLB52k2e7KS8vMQKEUVH4vjdVp6Wfea2vDi+tVNie6y74SCWTpGXeHu8UF5qrM3E6mR5vglFdToZI0e3ar1b9GYhFe60JigIkAHbihEkZsfaHHppJwSRseQ1s9kimmiQcjafrhZtHrBX2jXY6JCkXrNiXWRvIPBnZr2oCp83ZNmgVuWuybB0OX/oU1Si5yAIBXjymFYY2Q4zklJFdDmVlkE6WUrNuG73U660S4XhWd2+QfYhM81ZD7MT2gCQHQLgvCpvT+8xQKHh5ltMBSX2ZC5jm/SyMyYhiREZ6NFavvck89XfNsNG0pd2fTQPb11HMJrWLbrwnYmmA73nZzeu2No6z2Y5R481cdPdNbM5mI/W6JB1dL4xNeMAKbsEGQVVHKzBLzel6choUIzM4ZLETnu1rziyWciV7C9c/g3mYB4wfDaERasA9uuGI3tPtbHgTvvkCkQnbzFacGYfEcQvcbKLqr8d5ttP9weGAu8zI5lnrvBie3YF4mjrXOLQOExWWiaj753LsbdkzjicPxXKq1ImxzFfUeQ+W48A63fhM1ng0n7YW4r18TE/AbXWk+ZVf0IleU1RZB0l9tpbWDBqafY6vA1Ph2+mGWRqNV14rXkfbanffRvXV9UUSMWZoqw/sppWuQTMarI5lrc5W5JOQIVrvmNpcMcsvC4N1Arcwd+t4uoXm9e4ut0ScSQ2zDfTsy2xuW2zhnT0jGAzXE7o4bo6zmMlOi62XxfrwrvpTzOQRb3tcBjeNeq42HuXugmx0KgZzbeqNUldFwpg+6iEZQDv2Eb0usF+O5vZ+YUQ6OrYLASXKbS0XDAXtNhuOJgur0EfLIzyRsXe6ZJXhmslP9uqaYYqZ6adixyEnqxvKqSRS9moUs2ILfPIEX40Ub8vxq4E+hPtiPfUipPBDU95Eq7O+Jbltkjk0pq3HNXQ/Y6jQWe94b1BNNhfX3KxGsintt9tKT23t3KJcaxEYKJKhGwEaC8HyMNSNhR1sB21lyMGyUR2P3813h2J25iOZxjaTAoUpIVvqDQmHKKgZ4Qau58Hd8lzidUKy0AnA4mhLtf66OGfHxiD58DU7WpnzRiuE+ZHVA1k3DCjBwA4n41imQDtaGVoA9rFko9nBPvCRSBLa5Lb6aS9cJwuhjRODps2JySxXYiR7WnYCdDQJ1GS2Gm7l+YnfNwI9XRwngKQQ7N3QSeIBpVjBbm/O61nJ1sP9WLQBDtWlCbLcFSPlw2bW7qXrjKtPs4QPF8rJ8MNLeXA2QRwbpC/L6OA6l4k+CYtDvVacip/hAPSSBz4fnCWv72WardWSZcFjlJ1yu+CP2wHSjFW/DeZq0Lpuno+8YmYBHz+ZgxM3KKKxPLcWHnTSbH8X5ovSaLrz460zdjXfTICXGaZ2WJHPP7gt6CHTWXSl0/H1qnhTGNoX2tDOB9/vb7h0RfhhOOB1fTaeGMMWVe7Sd+tT7F6cVYrvWGymEXfKUHVEmtb41/7ELby+lkjaDCmMVI8ylqXyI2tqq2ZGomIywMLp8/TgXr294KO1lpvbwSlPR0eTsa5svRm6B2fGCybD7M2q1AF7Tckn6R93YzrW5cvmpPDWfgiPWJnJI0fL+1iTtXALc7eovGkibS2xThKws2CMoeoUnzLAyywl3Ko3ChRXJ38CNM3gRidTrMa1rXvzncMBVNJNu0UpXztpIW7dqSnWS01W5nWf03LXNA/6Ns7h0lpK8YyAjnJwL8smmGrYhvOtDOBWqar9ZjjbRtf6YoRlFuwCzkfZFWsLUl2OSG3B/oycS0iyXeV6YC6TbX4I5KUADhOJDU4kb5ljsA00WCy3On2tpPF4u0SWdqCG5xPhiRzu1mGUHKiojnf5eDbNwXEpj8Mda3jbPGEvdKtWPsch6loxO8saKEL/GhzoYRbiz7WT5NuuKqOYGwtHI+BsaHjXzG2x5gXRkd5hVXSlB7vkELIFOnhH4xAcg1kjjUaCWl8uDqPuR1U58UlFNHeqd8ZiDkR7NjhR/Xm70AtwNk0chpHBotFlwOMYJ0kMHHoSX6SI6BP7tULxaJjPd+IhCK+0VQ+lS5UGbDKPDEedlIUl6TFutFWkHsdTa2lciyDZSUvvWk2h19ZezFQHty7Oy9h1k6HOHHDgh+8TV+nhIEY4oIiAoRFujYZz7dC2xaU/tIUlLcnCBVwWQ6fZULtZtnGqyWGKlDEvIBp/iLrYbkc4pJMl/FQsjq1N4Wp03YfiCRbRQk5MjfYwSQt6bItH1y00g5kG2qSJ2ymdCHLlb7l7+qU8gSnz2a6eTrU11sDBaDe4ziXTcJeAr7gRdllnTQ14rT3rq5w5b6Z0LbpDOdgZ2WKz9Jgh2VtMaAeteN1kTpXmSsKsTboRWCFFaYKV5pVogQXZaSvxAKOiwQCq11G1YZsEv5TcbC57v9+c9/R+VJRYANy92G66Y4wT7lz7Mjhzu8vWgXNum5I3ydYaeXsRbcBcoDh7Hinbdi6VhnXb8+x6/mB7nk3D/nWjHKKLMigwkKfDmN8UpKfMhviVR+rYneJX3msjs7ny7GbQr5Y/v0PcnXqVFywlFbojsyuyiR5ZTcFYrqbswBqPyX5+b7LrNERcT8A9iBZ4HkFeAE83fQdUj4IsD2nEsQDwDxu8P9p3GvI9HlIUR7EcKzAULbyyj/iXrvmV/ToB9WKnxN+0A+ff2oAT/Pxumw/7Iz/ZwPerfvBfhu7RggA5bByGwmEU9z1+8EsGBN/e+5LsYJl90cvdNEn2bunsHi6nvtv7P38s/FkAgpCBFM2Dh93uH/k3izsIYlkIeShwiH/YJ/axe9MU3QMU4GmW+DcCb9E4L9vifWxH+3s6wxe2LPX2vlPF5X23eHn0bnval8f3idfP8/SCDyZpsr87IoektW+P+8ltbR889nGvusPLX93E9sW2swLd4yCiaZ6mBIQYIDzpjQzTowHEJxCkEIZsln16/zsouL/lM6f7sf1pH+jga530qaddDmG5X2fODYouuZM99aogdoriAQKxRR7vTsuSvy/Mis9wtz/P3AAfv/vzVYN+gxzvTPpFVBRwQ0MAaIoTeFpADP906+AexwsYHyhG4DAsQo59iR49geEFTIA8i2/Eo5fgwaIexVI8RQCIZmmGfwtuhB16fBs9vg8lvoQ6b4Me7G9BD8Bi+GAohhNwxIwAR6EnfsuhHoQsxFTFY1ehn9HVG4IH+8bg8RVg+AKU/GnA+EbLM3xPoCmK5SkeCwYsFt4LYHAdYHwMwEC/BTCw3uxRWPMDHKYxgIHoqd7AvtrjECtAspEvFHAU8NsgA/1zkPGNtkdMD7AUCymA4YPjKe69QAbfQcbHgIwvBP6/ChkI9CDF8xzCYS/LMpB9ChkMdkuKg9jZOBw6s4j+bZAh/HuQgageTwJAiDhE4SjlebIG9RgsMAivMwgI8L3IjIcEzT9kKoYWepBBFA85CEh7fwhwp78jN/dDhnpmDo/d8x58zYA8vWNehPrfg4u/2KNwpE82l8B/cZwvIO4HzQRAj0eIJzkZCvIMeiUJ/lvs9F7ThPTXWfjX2dQleaTQfUKoLxHh8a++zvv0t4j/F5n4cVb+Qdw+Zmf6C375i+zMYAqgIEYMjsUMQUH0jJ2/Iejv3vAt2Jl+6wTiB4B8HvU4QUC4s3MUwKD/dEjtveYAaPiPYT7D4sCL42gKsJDnuIce+91m+luYz35MzP9pBP7zZPGWmM++gvnwFzH/u12F++fAF0KuRzZ3RCwtYH7jHkoN3vuQDf3Wia/3jr4QwyeDH06zAg8htgn7o4b6W/j7XhNfHf6+hr/MK/j7q0n073aV11JT3N1HzJzkiRNx5yp9aMz/+veFQX18ySlN0uKum+NGoG7ni5tnkLNU1tyOP/w6/i64+5e9fTqMC4d56u1jckC6Hc1Tskfvw08PL83eXhsfGZLvSaaGJW/Lkrrfb1wLPl37YI6fug39+TZ3DfbpzOcTd+316cRDTyIHyKOeoCI5eMNFcvzWe8gRcPvxMzqSYw+eR8489snn5z47+vMzj179DhW/p3E+nfrULp+dkSVA+elK/nM7UtdHN3h8/A4ZP52Dj07dQ+Ln36MenSTLtz6cCB49/7nVbj9+Mt3XTcn8SVN+0ShfNeUXHeA3mxIwP25L8EdseTv4FBzur3uBIncYRibB32DsAXLeUq84e953X5WcLr/f+X9cr/BYrnAMjSiBRjzA334MXcm8lnLvKKijoI6COgrqKOhDURBie5CGDOQoiLkCoA+S2mBeG0zsKKijoI6COgrqKOhDURAn9CDPQnwZixgyZ/CDUBDdUVBHQR0FdRT06MqOgj4kBfGox1A0EFgeW5BlWfqDUBDzTocN31W9/F+Z0fvm1XdkFhjFcBAhisOe+mz27k9Nv3vbOX5vV9/HvFY11imrTll1yqpTVp2y+lDKCkCqh1iaBZg0KBwzM09Lst5r4TLz2jTzjoI6CuooqKOgjoI+FgUxsIfjelaAvAAFgYLwY1DQa4X2HQV1FNRRUEdBHQV9LAqi+R6PIJk2Swlkm52PMX2TQT+RX/4rKeUfy/x+M5H8JEf+s+nq3zzj5E4fvH2e+euu+iwHjJ6tEvmFJHA/z53ro8syckHxlZcg6xcxPOJoimcQ7hZPawIYhkxEZxA+L7AUuHfTL2a1v34zjkzuQwh3LoEM6rPgWQ+5e/efzmXznYrrVFyn4joV9+jKTsV9TBUHuR4nAAghx7EQQfL7H0HFCT+h4roqgQ9ZJUAmsJNCfprFfsoB7IxPXPQnF91867U9365UAHZT0X7gNp286uRVJ69es2Unr/6+vMJQ2wMQII7nGJpjAfV0nOa9rmsJu6loP3CbjoI6Cuoo6DVbdhT09ykICLBHUzQPGYTje0qgPwYD0R0DdQzUMVDHQI+u7BjoYzIQz/dojhHwvxwNOPQwy/i9U9DHnIn25rniv18p8MrawHf64O0rBb7uqs+TwM+3MH2rUgEEeqxA8QKHPZ7sqfjkJb46uP+D9yIfCPc4GkKW54HAwLetFHgoS+1UXKfiOhX3u0zZqbhOxf2JVDbXwwqLExgWIKzGnq0Y/25V3HvdBuJdVQr8lf333r5SAFE9QcBiB0DsOwg8257qJ/fae+vHvGGlQLcd7S9FNh+qCgY7HaRYHJiQCV8vNqr6n3Pt1/b56CKHLnLoIocucugih48VObBCD9CAZgDHoedrYb7bTXhhN8vlB27TMVDHQB0DvWbLjoHeAwMxPTJkQ3GIpxHk6A9CQUJHQR0FdRTUUdCjKzsK+pgUBFGPhgzmGZ5seQ5p7mNwEEt9yBzz/2AVzL3Bn1TBCL8n14x9lWNxuI4QgCzPPi8a+bks8I9WwdAM6AHIUPg9eI5h+acLN3+9cuVHb4Y/EYJkfjNkKdzFWPisi/xaGQzbTaf5gdt0Oq7TcZ2Oe82WnY57BzqOY3sURwGWY1mGpoXnqwS8VxlHv0JBz+xEpFL2o03npkmyd0tn93AX6oeblKHYHscDhmexE9M8eLo617cLi2imh1iWEwR8kcChh8T9b2/Sj1kf/q4ri3C/xhZkHO4lEuyBx+7Rz2vqux7w9lr5dee7d16a77E08V1EAUBzD0HfD5dl/MpD3q4o414Wdx7/AzHkY4/+Vl/4MB4POCxTEIUJDDK0gP6XXb7bNOEHbtOFbl3o1oVur9nyHwzdntHXa4T400Hal6jt0/K4PR4hHgoIUZBnHnaA//tRWLf/wQ/cpmOTjk06NnnNlh2b/Ek2eZ0r/j6ZdFMkfuA2HZl0ZNKRyWu27Mjkj5DJ3+eLbkLDD9ym44uOLzq+eM2WHV/80VQW6vGkEA9yLNkCgQPvJfp4rztwoO8cDPy0DZr1+IffNvjtxk5RhO6TMT/wwqPeaGWNZ5Wj96Z6XDnK/p6d1hiG6yEGUZASBIZslP68zpLq0RRgIINdlaOphyX23n7cjqNeETvP/PWv1btwTE+AZDEHBpCtd4Qf3TnxLxW8PGDPu+vy72r4/90UvNx1ga7g5VdQ5CVodB7/fgtefp/H/zsFLw97o3ZZgi5L0GUJfpcpuyxBlyX4oSzBl6jtnRe8cLBjk45NOjbp2OTRlR2bvHM2eacFL1xXi/8Dt+nIpCOTjkxes2VHJn+ETP4+X3TV9j9wm44vOr7o+OI1W3Z88UdTWe+04IVD73QssCt4oZ8VvDzUZDwueLnLRL59wQtP93geQYrnKB5id31ayyFgX0X4HIA8Q3MCRX/XsN2PrpTGPN+mA3DPPPvX1i/jXqscftYX/lYxDaSZHg04CrGIhQIS2KdNAXscL7CIozBwcSyADzmEd1BN814L6N5VbcH7qabhfwt+/EvVNIjqPP4jVdP8No//d6ppULfy5w/cpktBdCmILgXxmi27FMTbpiC+QG3vvJrmYaHtjk06NunY5HeZsmOTjk3ekk3eaTUN6gr9f+A2HZl0ZNKRyWu27Mjkj5DJ3+eLrpT/B27T8UXHFx1fvGbLji/+aCrrnVbTIPadjgV+bzXNw5X/RkHNfaz4uKDmLhn55sODEAKy3w+FII8fwT9bG4UDPQQpBgn4MgGQmo/vGrn70YIayMAnj+XpZ779a/U06LXC5F/Bm4C4xn++UBHxeaOsL2yt9f0baJErH7rowwI7P7eb1qdKJaHH8diYtIDtzXDMy+wIAj3iFCzFCRSLBJp9CVBkNJdhAMfxAtm98aFc6Zfw6eXiuuQLNUiTuodJwjkREyS7gvwzc677/IUFcbOUT8301DT3Y/iPrXJ/yInDICE9HjcjvjEjkkYOXSfu3584hZ53Q8TX/OKp5zx2jdcNTY7K9xp9aOxzz0mcF7b+ij/c4SugvpexvlHQhWjcxTHTIBYisjcYeNITASX0KIaiOVJVx+MvLwlNAD2eQ5DFnCdABgD40mEw6d1qAyiKQ9jUb8Fn31Ei93Zd+ptFTn+zS0O2R7MQ92UKsCxg+PfSo4XXe7SeO0mRpQWR63dScJd/OZj9HKyCL0Wrr0IE/oUOJd4SJV73oU/rzGHZy2Exy0KBxygA6feBEvxrq9Jxn9r3u93uhc/dPLnvOdlNeWGaOjhJQiIcqu/iQ1iQpcmjSOfuYV+IdP7HPPKx94E38j6G5XoCZLE8BQJFM+iZ90GhB6hH3vcSAgGge1CgEAdx4EXzDMX8Eff7cjXar7kfhjzXKfcJ/v9nUPTF/eS9U1b5vvPYt/NYAfZYSuCwsuIhS6Fn63ICihRl4vM0KwCI9cULj+WEHsMJDAU5IGBnZP8MXtKvc/YoD5/42Z1fSKfd3vPCJHinDvI189+bm34jc0OyoT3WyNjuiOSInu2py8Iex3Isw7CARozwCj5BqifwPEYnFv8HIPoz+PQxd9T9Drt+Z+7lIWZ4lHoR2K+rbapHQRYbCnEsALzwFaH8eqqDY4mQ4iBDc5CiIHia+IBMj8csJvACvj3PPs/8vVW6BfYgAIgDLMSuxAjs83dALM/jqzAesPz9NgZvlYt5+EiPIGZy/7rP8UXN052zC+OwvP4DCPOF9N6D23A8BhiMKwzu+wA97HN+b7L/gh52Fxy+MSyNMYJj0StRIEYoLJI4jqT5aCjQX0AY7NiswCOG4z+lIX8JY14uIiNiQ3Tm/gVzs1gbYHszDAcRxFr2Zb3s3zL2Wydav7PFX42Cn6ZqfN7du+5ryZ0dRlz2u+Pkb9gNsr2HSaeP+ubLcBiQl3xpE/rTtb9khtd2KvvnzPBMgpE+871mQD2OfQMz/MQGQO5dg5BQLQ92/4/6D6mCwE+iHn33/8m3n+s5Htd7fEqD4QbFsHBDu5uxnh9/eov79yB3SNL85MRPT1/u24ach3dvcjsZY2m3z/9LCkxIFPDa7xPg/u89BpPT9zD85HSIHTEpP5ekPDlZknShj2/6cPtk/+mCS5p7T5/++Nd3nwb//vusVWmSIbhvTZpg68P37KO29cIii537dg2TOHz0YD9OnfLxC72soHmLMP9FUpOjsv+Q5Ry+EKZ/38j6PZd+R5f/4STiN3olhrdngTADezRDPfrzkspo0HstY/0wdf2XOujLFPXqVsLQSZOvWBFRX5EmgKZ6kPqsTeDL8aK/pE2E13LB/zgp3qz1/aTIvIE2EV7Lif7rZngYK/u2Ed5gYE542eB/POvzsat6fqqS6T/fmY568wofDgo9xAhAuMExSz8FbIwAgsCzLMMjVoC/q8CHY6knT4VfSSq9/GWGex5UQf6pbngGTF9YteAn0lXCT8za6TR8p+H/dzU8eK7hAQd61Ms6Mgws9GujGHQPvEFsLXwhjfxf5+KQgUxqnBSlk5Dyb+pfGqf6Je3OY5OBR9r9leLAv6TdX1uJ/Jkti4OTkW/Dk0Mo9pNBZkQyqGkR3moymOEuLcv09GUtQYTg7c8rRi2JvhGdItu75NP6YUOMKN4e2X84Sj0cwd8fyjIrbvAg4//CXhYm4SnoufgFaBk3MgYM2cHNLBNEkH3+9iNH+Ty9Y3nI8A5yWMS7HGLd/Y6FaM+DXpQF3+sNXx+0ZJ+W+rEQq5tXZsvSPfSKEuWwqBDewLIvM8bkC2WkDSmn+aHSBvoLBWLjJKvKf6Dvf2OIGvUEHvddiABLMc8rrjkAe1hQIUTRPCs8BHqP3UDokbFnBLGWQywlwD9SkCC8dSb7q2Wen8K8rweGv1zQ+Q2M/sSb97ZB2DZYt7MMglBgX6mt+zsFnkKX3u6k8f+KNE5JM5dPY/Vf68bC04iXYz7r4T+T3RbewYKnn/M8/3ia5/FMrofh2e9wpl9M/SDA9QTuU+rn+dwu9DT18/Tub5T5QeDpUpAcZJ858ddSP4hies9qzUn+9HlM8Xb5ntuM2L/ca96J9/9k3/2ODvAAjX+gAzBPYxxEgT/i5Yh6kuD8dq+4n6nwvb2CfvVpv6U//MSKqp3K61Teu1R5v0HXIRb8YV0HHnp/R1G/uKLAm/Pa93vTLws79MwLnzHA7+I17is7XXyPesM3+K3q7eVwnVzFMfnIg7u9L8iEqW5q6BsCIpm6wrGPsk/Pk1dU71bYJQgMTebGvIDKPzEz9D/3i9M8cqzPy9Iw0v8B</diagram></mxfile>
|
2108.02708/main_diagram/main_diagram.pdf
ADDED
|
Binary file (78.5 kB). View file
|
|
|
2108.02708/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Presented with a single image of a generic object, say an airplane or a chair, our goal is to restore its 3D shape with the embedded skeleton. With the rigged 3D models, we can manipulate the object and generate its plausible articulations and possibly fun motions, such as an airplane flapping its wings or a chair walking as a quadruped, as illustrated in Fig. [1.](#page-1-0) This new question considered in this paper essentially entails the extraction and manipulation of objects from images, which could have many downstream applications in virtual reality or
|
| 4 |
+
|
| 5 |
+
Project webpage: <https://kulbear.github.io/object-wakeup/>
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup> equal contribution
|
| 8 |
+
|
| 9 |
+
augmented reality scenarios. It is worth noting that there has been research efforts [\[15\]](#page-14-0) performing 3D manipulations from a single input image, where the main focus is on rigid transformations. To create non-rigid deformations, professional software has been relied on with intensive user interactions. Instead, we aim to automate the entire pipeline of object reconstruction, rigging, and animation. The objects, as we considered here, are articulated – objects that are capable of being controlled by a set of joints. In a sense, our problem could be considered as a generalization of image-based 3D human shape and pose reconstruction to generic objects encountered in our daily life, as long as they could be endowed with a skeleton.
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Fig. 1. Two exemplar results. Given an input image of airplane or chair, our approach is capable of reconstructing its 3D shape with embedded skeleton and finally animating plausible articulated motions.
|
| 14 |
+
|
| 15 |
+
<span id="page-1-0"></span>Compared with the more established topic of human shape and pose estimation [\[40\]](#page-16-0), there are nevertheless new challenges to tackle with. To name one, there is no pre-existing parametric shape model for general objects. Besides, the human template naturally comes with its skeletal configuration for 3D motion control, and the precise skinning weights designed by professionals. However, such skeletal joints are yet to be specified not to mention the skinning weights in the case of generic objects, which usually have complex and diverse structures.
|
| 16 |
+
|
| 17 |
+
To address those problems and restore 3D rigged models from an input image, building upon the achievements on 3D object modeling, we propose a stage-wise framework consisting of two major steps. Step one involves 3D shape reconstruction from a single image. We improve the baseline method by incorporating a transformer-based [\[34\]](#page-15-0) encoder as the feature extractor, as well as an auxiliary voxel prediction module with improved loss function [\[21\]](#page-15-1). With the reconstructed 3D model, in this paper, we focus more on the second step of predicting both the skeletal joints and bones from those 3D models. We propose a novel skeleton prediction method and reformulate it as estimating a multi-head probability field, inspired by the deep implicit functions of [\[21\]](#page-15-1). Specifically, compared with previous skeleton prediction methods with voxel-based [\[44\]](#page-16-1) or mesh-based representations [\[43\]](#page-16-2), we are able to predict the existence probability of joints and bones in a continuous 3D space. To further improve the performance, a jointaware instance segmentation is proposed and incorporated as an auxiliary task that considers regional features of neighboring points.
|
| 18 |
+
|
| 19 |
+
Our major contributions are listed as follows: 1) a new object wake-up problem is considered. For which an automated pipeline is proposed to restore 3D objects with embedded skeletons from single images. To our knowledge, it is the first attempt to deform and articulate generic objects from images; 2) A novel and effective skeleton prediction approach with a multi-head structure is developed by utilizing the deep implicit functions. 3) Moreover, in-house datasets (SSkel & ShapeRR) of general 3D objects are constructed, containing annotated 3D skeletal joints and photo-realistic re-rendered images, respectively. Empirically our entire pipeline is shown to achieve satisfactory results. Further evaluations on the public benchmarks and on our in-house datasets demonstrate the superior performance of our approach on the related tasks of image-based shape reconstruction and skeleton prediction.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+
Fig. 2. Our overall pipeline. It starts with the proposed Transformer-based model for 3D reconstruction consisting of a DeiT image encoder, an auxiliary 3D CNN voxel prediction branch and the occupancy decoder. The proposed SkelNet accepts the high resolution voxelized input from the reconstructed 3D mesh, and predicts articulated skeleton with a multi-head architecture.
|
2108.10274/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2019-12-05T08:08:05.990Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36" etag="Xht7qL39BzMyQLEOSI1V" version="12.3.6" type="google" pages="1"><diagram id="ohCJ068hNRihqZjMbE14" name="Page-1">7V3fc5s4EP5r/BgPQogfj42ba28m6XTq3ty9dYjBNlcMPowb5/76EwY5RitPnStYK5y+1BZEhu9bab9drWBEJ6vdhyJcLx/yKE5HthXtRvT9yOb/GOX/VS3PdYvnWXXDokiiuom8NEyTf+OmUZy2TaJ40zqxzPO0TNbtxlmeZfGsbLWFRZE/tU+b52n7V9fhIgYN01mYwtY/k6hc1q2+7b20f4yTxVL8MnGD+sgqFCc3d7JZhlH+dNRE70Z0UuR5WX9a7SZxWoEncKn/7rcTRw8XVsRZec4fTL/8EZQfd39t1xv/+7fd33/Sb8831Km7+RGm2+aOm6stnwUERb7NorjqxRrR26dlUsbTdTirjj5x0nnbslyl/BvhH+d5VjYskoB/D9NkkfEvM36VccEbmt+LizLenbwTcsCHG1acr+KyeOanNH/gNog2JkVoA/nTC0Ge1ZyzPCLHdprGsDGKxaHrF9z4hwa6V8BILPIaHMnrcPRUOM6TNJ3kaV7sO6fz+dyezXj7pizy7/HRkch9dJnbDfKehHxgAeRdX4E8sXtDHuD+e7be8p99N7LdcFWhmj1u1vvbt2ZpmKxG1bVU34ptmmSL/dyxWnEANoAxDlTZpqWNbpZnsURF0yToSuN51UMFesInlXdN8yqJoupHlFbQHm+yIXTA4g2TaGQM0HhgrDWAWE8s2pDGr/l33vAJcMKn0XX1Mc3z9X2ySsoOxtKmOWh1gy9xiIQvHCZMAS/ta5CIyeh4dmK307vPI/a+a4CVk37XAAcUGcBUBfDkfmoowLbDcAFMXQDwHZ/g0wq7zTrMWgi7/2wrUbVH7aZGhnsDi3jr3R4ecZx/WjT/1x1tH3+pHyI64re476vdfdW6v1TR3JXOUs5pHViB33YTthA52ozAM8AIjibWQRmDPOfqtwbfDGsQXmBQ1iA7CP3WECisYWKPbh10NjE0N2EHHi5bEGF+Kxj/iRHscbzZ1EBW9GV5sQrTOm48l1nJ4gCTl7iGrqz0YWBWSlmAzEphzIvSfw1SzTiEILMGGKKjtIZBqhnHRaZtHZhPEL7lTdNcwiIYYWNkCteBSzW4beJ+aDbhYdO5zAyfMUgF4VJsCqKLFKnbkTWc7EezgnD7sgbfwWUNHsyO1StqBIBpwIqalInWvhzhwWzTeDwGyP76+vEB0I5WkJ1u+CDEbRPiQkIcBSG9lQH4Z9Rf1NiLKh5bIqYZBKvdoipkGs/T/Gm2DItyHGZZXoZlkmff7BFY2U/Dxzj9nG+S6gTeXNQ3e7vOk4q5ux/7wgL1qCmafvn3oKuVUdJWrT5c2VfxEvTGCwxrJ3URBvqh0lGxxaGQ4rkNtbaRAkPL2jE8mOgY5Kyeds/gKwrqTC61kBNl+gGGQY/RpRZy7kk/wDCOMFg5yotT+uGFRQxXJR0plexdu3SEkRI09CuQjpQik44wxvoiKnan/J7GyikJ37jpSEfaUoZBoSOJaiLrbdyICwB+4t5EP8E8T0r4E8cfwyFwUV8RKKJak8WkSy2EIMMQ1WhB6foUIcinok4jRSVcHSROoB1iGHlelbBkjMikMBUpFxWXgSJavUZxyZjdokaImZ+JS9oXLzDIBeISMoVv7HQkLh0pStYvLk0o9f80sCVt4sjyTPsyZgCD86tyah6R89u+XodGxLR4xMj7ZFMm6S2/TwOI6WjGpPJ2mJexo48ahchGN2kOrVLQ8+U509OcYSYWjATw2cFQ68LAPjnxQBB95gBjEJTmgK+0XH7Ghj+L1c/YePSZw3rae43AgBQrgkhrjofmXcDeOv3GACOzn1pBJxvbvr5trkNrpmBznX4NpKh/RTdbDVUDgd112s1BPE5O4cLeHJmmDVXaXZmw0s6Mou/dE7p2VPW1awIuuiMwCpjfenMcl9tUBTLQ+g3ChKzaW/h8uvwBgQmZkJAb3moWkc1Ad4nGYXfOtS5nHUbCITDQvZxFYG7rqhiRq34PWy31MWJqgXxHK4uwjgkBJ6Yuw3fEiWdLvkSEjvoYufIdyHJhhP4xYsMU01UxIu8Jt0UkrY8RmN8xg5GualQo8CTa5y0bplfM4KSjUaLw7vo5URbnx1VYerd6jKMoyRZX+3IE2zqvVNl2eqNHVUNeB+XV/Z8d9IOIf8JP4iRuw1Ri+hDx1/2fiPixGQDryQC8Mw2gv/EJtR4gQ+wT2HAG0/hd9eYjDkeUFPGsKeXf5Nvq8l+TmmGQKz0lMS8bOQQpAczpiX5bqRyvL1IoFBfXRYorarMbSqgFFfiFKYHaAlDyv1/CpIC9AxA9CUMBzvELlTxbMdn0tTGGKN60oc2uuwDYas/mVPG4gYtaqa14Tp/JADuyCesHWFWhZi7A0PkhgPgMRZIme60HdzeqUL4ooocC+cOsC1chiErh2X3hyWDuyCQ8pUe6IMDzDHGGGE8XHZ6K95sZhGdgYcPzjNeu4sXTlnLvCPA8Y5s9YjwdbHiKkMNQPD2GDU+j50+KTi95Rs+flKLD0+j5k7oEG55nxPiI8USnl7wzQnq8eDo2OjyNjt8dx0OGp290/O54LjY8jY7fmYUOT6P1PEOXX1I8RN8kPF0bG55G63kWoMPTaD3vEnlVVDueRut5F59eMlrPux46PM94hj1ePD10+SXFs+dNwhOdXlI8m/hjEkX7EtP78DkuALrYigt7qi6lDlzKv2xxIT0jdB10HZtcwGIHcLRcto7NgrMPvu3CbzvOT749vL/95vxrkVc0HI59qJ4K/pBHcXXGfw==</diagram></mxfile>
|
2108.10274/main_diagram/main_diagram.pdf
ADDED
|
Binary file (14.9 kB). View file
|
|
|
2108.10274/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
Isabelle Augenstein augenstein@di.ku.dk
|
| 4 |
+
|
| 5 |
+
January 2021
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
This document is a *doktordisputats* - a thesis within the Danish academic system required to obtain the degree of *Doctor Scientiarum*, in form and function equivalent to the French and German Habilitation and the Higher Doctorate of the Commonwealth.
|
| 10 |
+
|
| 11 |
+
This thesis documents and discusses my research in the field of content-based automatic fact checking, conducted in the period from 2016 to 2020. The first part of the thesis offers an executive summary, which provides a brief introduction to the field of fact checking for a broader computer science audience; a summary of this thesis' contributions to science; a positioning of the contributions of this thesis in the broader research landscape of content-based automatic fact checking; and finally, perspectives for future work.
|
| 12 |
+
|
| 13 |
+
As this thesis is a *cumulative* one as opposed to a monograph, the remainder of this document contains in total 10 technical sections organised in 3 technical chapters, which are reformatted versions of previously published papers. While the chapters are all self-contained, they are arranged to follow the logical steps of a fact checking pipeline. Moreover, the methodology presented in Section 8 builds on Section 6, which in turn builds on Section 4. Thus, it is advised to read the chapters in the order they occur in.
|
| 14 |
+
|
| 15 |
+
This doktordisputats documents research within the area of Applied Computer Science, more specifically within Natural Language Processing. As such, to get from having an idea, including a detailed plan for the experimental setup, to results, which can be interpreted and written up in a paper, is a long and winding road, involving writing and debugging code, and starting and monitoring experiments on the computing cluster. I could therefore not have produced this thesis on my own, while also fulfilling my duties as a faculty member at the University of Copenhagen. I am therefore immensely grateful to all the early-career researchers for choosing to work with me and being inspired by my research vision.
|
| 16 |
+
|
| 17 |
+
In a similar vein, this research would not have been possible without external research funding. I should therefore like to give my sincere thanks to the funding programmes of the European Commission, which have funded most of the research in this thesis. The very first project I worked on related to fact checking was funded by the Commission, namely the PHEME FP7 project (grant No. 611233). Later on, two of my PhD students were supported by the Marie Skłodowska-Curie grant agreement No 801199. Moreover, two of my collaborators for research documented in this thesis were supported by EU grants, namely ERC Starting Grant Number 313695 and QUARTZ (721321, EU H2020 MSCA-ITN).
|
| 18 |
+
|
| 19 |
+
Next, I would like to thank my external collaborators and mentors over the years who supported and guided me. Working out how to succeed in academia is anything but easy, and would be even harder without positive role models and fruitful collaborations.
|
| 20 |
+
|
| 21 |
+
Last but not least, I would like to thank my partner, Barry, for his unwavering support, having read and provided useful comments on more of my academic writing than anyone else, including on this thesis.
|
| 22 |
+
|
| 23 |
+
The past decade has seen a substantial rise in the amount of mis- and disinformation online, from targeted disinformation campaigns to influence politics, to the unintentional spreading of misinformation about public health. This development has spurred research in the area of automatic fact checking, from approaches to detect check-worthy claims and determining the stance of tweets towards claims, to methods to determine the veracity of claims given evidence documents.
|
| 24 |
+
|
| 25 |
+
These automatic methods are often content-based, using natural language processing methods, which in turn utilise deep neural networks to learn higher-order features from text in order to make predictions. As deep neural networks are black-box models, their inner workings cannot be easily explained. At the same time, it is desirable to explain how they arrive at certain decisions, especially if they are to be used for decision making. While this has been known for some time, the issues this raises have been exacerbated by models increasing in size, and by EU legislation requiring models to be used for decision making to provide explanations, and, very recently, by legislation requiring online platforms operating in the EU to provide transparent reporting on their services. Despite this, current solutions for explainability are still lacking in the area of fact checking.
|
| 26 |
+
|
| 27 |
+
A further general requirement of such deep learning based method is that they require large amounts of in-domain training data to produce reliable explanations. As automatic fact checking is a very recently introduced research area, there are few sufficiently large datasets. As such, research on how to learn from limited amounts of training data, such as how to adapt to unseen domains, is needed.
|
| 28 |
+
|
| 29 |
+
This thesis presents my research on automatic fact checking, including claim check-worthiness detection, stance detection and veracity prediction. Its contributions go beyond fact checking, with the thesis proposing more general machine learning solutions for natural language processing in the area of learning with limited labelled data. Finally, the thesis presents some first solutions for explainable fact checking.
|
| 30 |
+
|
| 31 |
+
Even so, the contributions presented here are only a start on the journey towards what is possible and needed. Future research should focus on more holistic explanations by combining instance- and model-based approaches, by developing large datasets for training models to generate explanations, and by collective intelligence and active learning approaches for using explainable fact checking models to support decision making.
|
| 32 |
+
|
| 33 |
+
I det forløbne årti har der været en betydelig stigning i mængden af mis- og desinformation online, fra målrettede desinformationskampagner til at påvirke politik til utilsigtet spredning af misinformation om folkesundhed. Denne udvikling har ansporet forskning inden for automatisk faktatjek, fra tilgange til at opdage kontrolværdige påstande og bestemmelse af tweets holdning til påstande til metoder til at bestemme rigtigheden af påstande, givet bevisdokumenter.
|
| 34 |
+
|
| 35 |
+
Disse automatiske metoder er ofte indholdsbaserede ved hjælp af naturlige sprogbehandlingsmetoder, som bruger dybe neurale netværk til at lære abstraktioner i data på højt niveau til klassificering. Da dybe neurale netværk er 'black-box'-modeller, er der ikke direkte indsigt i, hvorfor modellerne når frem til deres forudsigelser. Samtidig er det ønskeligt at forklare, hvordan de når frem til bestemte forudsigelser, især hvis de skal benyttes til at træffe beslutninger. Selv om dette har været kendt i nogen tid, er problemerne, som dette rejser, blevet forværret af modeller, der stiger i størrelse, og af EU-lovgivning, der kræver, at modeller bruges til beslutningstagning for at give forklaringer, og for nylig af lovgivning, der kræver online platforme, der opererer i EU til at levere gennemsigtig rapportering om deres tjenester. På trods af dette mangler de nuværende løsninger til forklarlighed stadig inden for faktatjek.
|
| 36 |
+
|
| 37 |
+
Et yderligere generelt krav til en sådan 'deep learning' baseret metode er, at de kræver store mængder af i domæne træningsdata for at producere pålidelige forklaringer. Da automatisk faktatjek er et meget nyligt introduceret forskningsområde, er der få tilstrækkeligt store datasæt. Derfor er der behov for forskning i, hvordan man lærer af begrænsede mængder træningsdata, såsom hvordan man tilpasser sig til usete domæner.
|
| 38 |
+
|
| 39 |
+
Denne doktordisputats præsenterer min forskning om automatisk faktatjek, herunder opdagelse af påstande, og om de bør tjekkes ('claim check-worthiness detection'), opdagelse af holdninger ('stance detection') og forudsigelse af sandhed ('veracity prediction'). Dens bidrag går ud over faktatjek, idet afhandlingen foreslår mere generelle maskinindlæringsløsninger til naturlig sprogbehandling inden for læring med begrænsede mærkede data. Endelig præsenterer denne doktordisputats nogle første løsninger til forklarlig faktatjek.
|
| 40 |
+
|
| 41 |
+
Alligevel er bidragene, der præsenteres her, kun en start på rejsen mod hvad der er muligt og nødvendigt. Fremtidig forskning bør fokusere på mere holistiske forklaringer ved at kombinere instans- og modelbaserede tilgange, ved at udvikle store datasæt til træningsmodeller til generering af forklaringer og ved kollektiv intelligens og aktive læringsmetoder til brug af forklarlige faktatjekmodeller til støtte for beslutningstagning.
|
| 42 |
+
|
| 43 |
+
The following published papers are included in the text of this dissertation, listed in the order of their appearance:
|
| 44 |
+
|
| 45 |
+
- 1. Dustin Wright and Isabelle Augenstein (Nov. 2020a). "Claim Check-Worthiness Detection as Positive Unlabelled Learning". In: *Findings of the Association for Computational Linguistics: EMNLP 2020*. Online: Association for Computational Linguistics, pp. 476–488. doi: [10.](https://doi.org/10.18653/v1/2020.findings-emnlp.43) [18653/v1/2020.findings-emnlp.43](https://doi.org/10.18653/v1/2020.findings-emnlp.43). url: [https://www.aclweb.org/anthology/](https://www.aclweb.org/anthology/2020.findings-emnlp.43) [2020.findings-emnlp.43](https://www.aclweb.org/anthology/2020.findings-emnlp.43)
|
| 46 |
+
- 2. Dustin Wright and Isabelle Augenstein (Nov. 2020b). "Transformer Based Multi-Source Domain Adaptation". In: *Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)*. Online: Association for Computational Linguistics, pp. 7963– 7974. doi: [10.18653/v1/2020.emnlp- main.639](https://doi.org/10.18653/v1/2020.emnlp-main.639). url: [https://www.aclweb.org/](https://www.aclweb.org/anthology/2020.emnlp-main.639) [anthology/2020.emnlp-main.639](https://www.aclweb.org/anthology/2020.emnlp-main.639)
|
| 47 |
+
- 3. Isabelle Augenstein, Tim Rocktaschel, Andreas Vlachos, and Kalina Bontcheva (Nov. 2016a). ¨ "Stance Detection with Bidirectional Conditional Encoding". In: *Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing*. Austin, Texas: Association for Computational Linguistics, pp. 876–885. doi: [10.18653/v1/D16-1084](https://doi.org/10.18653/v1/D16-1084). url: [https:](https://www.aclweb.org/anthology/D16-1084) [//www.aclweb.org/anthology/D16-1084](https://www.aclweb.org/anthology/D16-1084)
|
| 48 |
+
|
| 49 |
+
An earlier version of this work appeared as:
|
| 50 |
+
|
| 51 |
+
- Isabelle Augenstein, Andreas Vlachos, and Kalina Bontcheva (June 2016b). "USFD at SemEval-2016 Task 6: Any-Target Stance Detection on Twitter with Autoencoders". In: *Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)*. San Diego, California: Association for Computational Linguistics, pp. 389–393. doi: [10.18653/v1/S16-1063](https://doi.org/10.18653/v1/S16-1063). url: <https://www.aclweb.org/anthology/S16-1063>
|
| 52 |
+
- 4. Arkaitz Zubiaga, Elena Kochkina, Maria Liakata, Rob Procter, Michal Lukasik, Kalina Bontcheva, Trevor Cohn, and Isabelle Augenstein (2018). "Discourse-aware rumour stance classification in social media using sequential classifiers". In: *Information Processing* & *Management* 54.2, pp. 273–290. issn: 0306-4573. doi: [https://doi.org/10.1016/j.](https://doi.org/https://doi.org/10.1016/j.ipm.2017.11.009) [ipm.2017.11.009](https://doi.org/https://doi.org/10.1016/j.ipm.2017.11.009). url: [http://www.sciencedirect.com/science/article/pii/](http://www.sciencedirect.com/science/article/pii/S0306457317303746) [S0306457317303746](http://www.sciencedirect.com/science/article/pii/S0306457317303746)
|
| 53 |
+
|
| 54 |
+
An earlier version of this work appeared as:
|
| 55 |
+
|
| 56 |
+
• Elena Kochkina, Maria Liakata, and Isabelle Augenstein (Aug. 2017). "Turing at SemEval-2017 Task 8: Sequential Approach to Rumour Stance Classification with Branch-LSTM". in: *Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-*
|
| 57 |
+
|
| 58 |
+
- *2017)*. Vancouver, Canada: Association for Computational Linguistics, pp. 475–480. doi: [10.18653/v1/S17-2083](https://doi.org/10.18653/v1/S17-2083). url: <https://www.aclweb.org/anthology/S17-2083>
|
| 59 |
+
- 5. Isabelle Augenstein, Sebastian Ruder, and Anders Søgaard (June 2018b). "Multi-Task Learning of Pairwise Sequence Classification Tasks over Disparate Label Spaces". In: *Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)*. New Orleans, Louisiana: Association for Computational Linguistics, pp. 1896–1906. doi: [10.18653/v1/N18-1172](https://doi.org/10.18653/v1/N18-1172). url: <https://www.aclweb.org/anthology/N18-1172>
|
| 60 |
+
- 6. Johannes Bjerva, Wouter Kouw, and Isabelle Augenstein (Apr. 2020b). "Back to the Future – Temporal Adaptation of Text Representations". In: *Proceedings of the AAAI Conference on Artificial Intelligence* 34.05, pp. 7440–7447. doi: [10.1609/aaai.v34i05.6240](https://doi.org/10.1609/aaai.v34i05.6240). url: <https://ojs.aaai.org/index.php/AAAI/article/view/6240>
|
| 61 |
+
- 7. Pepa Atanasova, Jakob Grue Simonsen, Christina Lioma, and Isabelle Augenstein (Nov. 2020a). "A Diagnostic Study of Explainability Techniques for Text Classification". In: *Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)*. Online: Association for Computational Linguistics, pp. 3256–3274. doi: [10.18653/v1/2020.emnlp](https://doi.org/10.18653/v1/2020.emnlp-main.263)[main.263](https://doi.org/10.18653/v1/2020.emnlp-main.263). url: <https://www.aclweb.org/anthology/2020.emnlp-main.263>
|
| 62 |
+
- 8. Isabelle Augenstein, Christina Lioma, Dongsheng Wang, Lucas Chaves Lima, Casper Hansen, Christian Hansen, and Jakob Grue Simonsen (Nov. 2019b). "MultiFC: A Real-World Multi-Domain Dataset for Evidence-Based Fact Checking of Claims". In: *Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)*. Hong Kong, China: Association for Computational Linguistics, pp. 4685–4697. doi: [10.18653/v1/D19-1475](https://doi.org/10.18653/v1/D19-1475). url: <https://www.aclweb.org/anthology/D19-1475>
|
| 63 |
+
- 9. Pepa Atanasova, Dustin Wright, and Isabelle Augenstein (Nov. 2020c). "Generating Label Cohesive and Well-Formed Adversarial Claims". In: *Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)*. Online: Association for Computational Linguistics, pp. 3168–3177. doi: [10.18653/v1/2020.emnlp-main.256](https://doi.org/10.18653/v1/2020.emnlp-main.256). url: <https://www.aclweb.org/anthology/2020.emnlp-main.256>
|
| 64 |
+
- 10. Pepa Atanasova, Jakob Grue Simonsen, Christina Lioma, and Isabelle Augenstein (July 2020b). "Generating Fact Checking Explanations". In: *Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics*. Online: Association for Computational Linguistics, pp. 7352–7364. doi: [10.18653/v1/2020.acl-main.656](https://doi.org/10.18653/v1/2020.acl-main.656). url: [https://www.aclweb.](https://www.aclweb.org/anthology/2020.acl-main.656) [org/anthology/2020.acl-main.656](https://www.aclweb.org/anthology/2020.acl-main.656)
|
| 65 |
+
|
| 66 |
+
The papers listed below were published concurrently with my research on content-based automatic fact checking, and are unrelated or marginally related to the topic of this dissertation. Therefore, they are not included in it. They are listed here to indicate the broadness of my research and to contextualise the research presented in this dissertation. In chronological order, those papers are:
|
| 67 |
+
|
| 68 |
+
- 1. Leon Derczynski, Isabelle Augenstein, and Kalina Bontcheva (July 2015a). "USFD: Twitter NER with Drift Compensation and Linked Data". In: *Proceedings of the Workshop on Noisy User-generated Text*. Beijing, China: Association for Computational Linguistics, pp. 48–53. doi: [10.18653/v1/W15-4306](https://doi.org/10.18653/v1/W15-4306). url: <https://www.aclweb.org/anthology/W15-4306>
|
| 69 |
+
- 2. Piroska Lendvai, Isabelle Augenstein, Kalina Bontcheva, and Thierry Declerck (May 2016). "Monolingual Social Media Datasets for Detecting Contradiction and Entailment". In: *Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)*. Portoroz, Slovenia: European Language Resources Association (ELRA), pp. 4602–4605. ˇ url: <https://www.aclweb.org/anthology/L16-1729>
|
| 70 |
+
- 3. Georgios Spithourakis, Isabelle Augenstein, and Sebastian Riedel (Nov. 2016). "Numerically Grounded Language Models for Semantic Error Correction". In: *Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing*. Austin, Texas: Association for Computational Linguistics, pp. 987–992. doi: [10.18653/v1/D16-1101](https://doi.org/10.18653/v1/D16-1101). url: [https:](https://www.aclweb.org/anthology/D16-1101) [//www.aclweb.org/anthology/D16-1101](https://www.aclweb.org/anthology/D16-1101)
|
| 71 |
+
- 4. Ben Eisner, Tim Rocktaschel, Isabelle Augenstein, Matko Bo ¨ snjak, and Sebastian Riedel (Nov. ˇ 2016). "emoji2vec: Learning Emoji Representations from their Description". In: *Proceedings of The Fourth International Workshop on Natural Language Processing for Social Media*. Austin, TX, USA: Association for Computational Linguistics, pp. 48–54. doi: [10.18653/v1/W16-](https://doi.org/10.18653/v1/W16-6208) [6208](https://doi.org/10.18653/v1/W16-6208). url: <https://www.aclweb.org/anthology/W16-6208>
|
| 72 |
+
- 5. Diana Maynard, Kalina Bontcheva, and Isabelle Augenstein (Dec. 2016). *Natural Language Processing for the Semantic Web*. Synthesis Lectures on the Semantic Web: Theory and Technology. Morgan & Claypool Publishers. url: [https://www.morganclaypool.com/](https://www.morganclaypool.com/doi/abs/10.2200/S00741ED1V01Y201611WBE015) [doi/abs/10.2200/S00741ED1V01Y201611WBE015](https://www.morganclaypool.com/doi/abs/10.2200/S00741ED1V01Y201611WBE015)
|
| 73 |
+
- 6. Isabelle Augenstein and Anders Søgaard (July 2017). "Multi-Task Learning of Keyphrase Boundary Classification". In: *Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)*. Vancouver, Canada: Association for Computational Linguistics, pp. 341–346. doi: [10.18653/v1/P17-2054](https://doi.org/10.18653/v1/P17-2054). url: [https:](https://www.aclweb.org/anthology/P17-2054) [//www.aclweb.org/anthology/P17-2054](https://www.aclweb.org/anthology/P17-2054)
|
| 74 |
+
- 7. Ed Collins, Isabelle Augenstein, and Sebastian Riedel (Aug. 2017). "A Supervised Approach to Extractive Summarisation of Scientific Papers". In: *Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)*. Vancouver, Canada: Association for Computational Linguistics, pp. 195–205. doi: [10.18653/v1/K17-1021](https://doi.org/10.18653/v1/K17-1021). url: [https:](https://www.aclweb.org/anthology/K17-1021) [//www.aclweb.org/anthology/K17-1021](https://www.aclweb.org/anthology/K17-1021)
|
| 75 |
+
- 8. Isabelle Augenstein, Mrinal Das, Sebastian Riedel, Lakshmi Vikraman, and Andrew McCallum (Aug. 2017). "SemEval 2017 Task 10: ScienceIE - Extracting Keyphrases and Relations from Scientific Publications". In: *Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)*. Vancouver, Canada: Association for Computational Linguistics,
|
| 76 |
+
|
| 77 |
+
- pp. 546–555. doi: [10.18653/v1/S17-2091](https://doi.org/10.18653/v1/S17-2091). url: [https://www.aclweb.org/anthology/](https://www.aclweb.org/anthology/S17-2091) [S17-2091](https://www.aclweb.org/anthology/S17-2091)
|
| 78 |
+
- 9. Ziqi Zhang, Anna Lisa Gentile, Eva Blomqvist, Isabelle Augenstein, and Fabio Ciravegna (2017). "An Unsupervised Data-driven Method to Discover Equivalent Relations in Large Linked Datasets". In: *Semantic Web* 8.2, pp. 197–223. url: [http : / / www . semantic](http://www.semantic-web-journal.net/content/unsupervised-data-driven-method-discover-equivalent-relations-large-linked-datasets) [web - journal . net / content / unsupervised - data - driven - method - discover](http://www.semantic-web-journal.net/content/unsupervised-data-driven-method-discover-equivalent-relations-large-linked-datasets) [equivalent-relations-large-linked-datasets](http://www.semantic-web-journal.net/content/unsupervised-data-driven-method-discover-equivalent-relations-large-linked-datasets)
|
| 79 |
+
- 10. Johannes Bjerva and Isabelle Augenstein (Jan. 2018b). "Tracking Typological Traits of Uralic Languages in Distributed Language Representations". In: *Proceedings of the Fourth International Workshop on Computational Linguistics of Uralic Languages*. Helsinki, Finland: Association for Computational Linguistics, pp. 76–86. doi: [10.18653/v1/W18-0207](https://doi.org/10.18653/v1/W18-0207). url: <https://www.aclweb.org/anthology/W18-0207>
|
| 80 |
+
- 11. Johannes Bjerva and Isabelle Augenstein (June 2018a). "From Phonology to Syntax: Unsupervised Linguistic Typology at Different Levels with Language Embeddings". In: *Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)*. New Orleans, Louisiana: Association for Computational Linguistics, pp. 907–916. doi: [10.18653/v1/N18-1083](https://doi.org/10.18653/v1/N18-1083). url: <https://www.aclweb.org/anthology/N18-1083>
|
| 81 |
+
- 12. Dirk Weissenborn, Pasquale Minervini, Isabelle Augenstein, Johannes Welbl, Tim Rocktaschel, ¨ Matko Bosnjak, Je ˇ ff Mitchell, Thomas Demeester, Tim Dettmers, Pontus Stenetorp, and Sebastian Riedel (July 2018). "Jack the Reader – A Machine Reading Framework". In: *Proceedings of ACL 2018, System Demonstrations*. Melbourne, Australia: Association for Computational Linguistics, pp. 25–30. doi: [10.18653/v1/P18-4005](https://doi.org/10.18653/v1/P18-4005). url: [https://www.](https://www.aclweb.org/anthology/P18-4005) [aclweb.org/anthology/P18-4005](https://www.aclweb.org/anthology/P18-4005)
|
| 82 |
+
- 13. Katharina Kann, Johannes Bjerva, Isabelle Augenstein, Barbara Plank, and Anders Søgaard (July 2018). "Character-level Supervision for Low-resource POS Tagging". In: *Proceedings of the Workshop on Deep Learning Approaches for Low-Resource NLP*. Melbourne: Association for Computational Linguistics, pp. 1–11. doi: [10 . 18653 / v1 / W18 - 3401](https://doi.org/10.18653/v1/W18-3401). url: [https :](https://www.aclweb.org/anthology/W18-3401) [//www.aclweb.org/anthology/W18-3401](https://www.aclweb.org/anthology/W18-3401)
|
| 83 |
+
- 14. Thomas Nyegaard-Signori, Casper Veistrup Helms, Johannes Bjerva, and Isabelle Augenstein (June 2018). "KU-MTL at SemEval-2018 Task 1: Multi-task Identification of Affect in Tweets". In: *Proceedings of The 12th International Workshop on Semantic Evaluation*. New Orleans, Louisiana: Association for Computational Linguistics, pp. 385–389. doi: [10.18653/v1/S18-](https://doi.org/10.18653/v1/S18-1058) [1058](https://doi.org/10.18653/v1/S18-1058). url: <https://www.aclweb.org/anthology/S18-1058>
|
| 84 |
+
- 15. Isabelle Augenstein, Kris Cao, He He, Felix Hill, Spandana Gella, Jamie Kiros, Hongyuan Mei, and Dipendra Misra, eds. (July 2018a). *Proceedings of The Third Workshop on Representation Learning for NLP*. Melbourne, Australia: Association for Computational Linguistics. url: <https://www.aclweb.org/anthology/W18-3000>
|
| 85 |
+
|
| 86 |
+
- 16. Yova Kementchedjhieva, Johannes Bjerva, and Isabelle Augenstein (Oct. 2018). "Copenhagen at CoNLL–SIGMORPHON 2018: Multilingual Inflection in Context with Explicit Morphosyntactic Decoding". In: *Proceedings of the CoNLL–SIGMORPHON 2018 Shared Task: Universal Morphological Reinflection*. Brussels: Association for Computational Linguistics, pp. 93–98. doi: [10.18653/v1/K18-3011](https://doi.org/10.18653/v1/K18-3011). url: <https://www.aclweb.org/anthology/K18-3011>
|
| 87 |
+
- 17. Miryam de Lhoneux, Johannes Bjerva, Isabelle Augenstein, and Anders Søgaard (Oct. 2018). "Parameter sharing between dependency parsers for related languages". In: *Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing*. Brussels, Belgium: Association for Computational Linguistics, pp. 4992–4997. doi: [10.18653/v1/D18-1543](https://doi.org/10.18653/v1/D18-1543). url: <https://www.aclweb.org/anthology/D18-1543>
|
| 88 |
+
- 18. Ana Gonzalez, Isabelle Augenstein, and Anders Søgaard (Oct. 2018). "A strong baseline for question relevancy ranking". In: *Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing*. Brussels, Belgium: Association for Computational Linguistics, pp. 4810–4815. doi: [10 . 18653 / v1 / D18 - 1515](https://doi.org/10.18653/v1/D18-1515). url: [https : / / www . aclweb . org /](https://www.aclweb.org/anthology/D18-1515) [anthology/D18-1515](https://www.aclweb.org/anthology/D18-1515)
|
| 89 |
+
- 19. Anders Søgaard, Miryam de Lhoneux, and Isabelle Augenstein (Nov. 2018). "Nightmare at test time: How punctuation prevents parsers from generalizing". In: *Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP*. Brussels, Belgium: Association for Computational Linguistics, pp. 25–29. doi: [10.18653/v1/](https://doi.org/10.18653/v1/W18-5404) [W18-5404](https://doi.org/10.18653/v1/W18-5404). url: <https://www.aclweb.org/anthology/W18-5404>
|
| 90 |
+
- 20. Johannes Bjerva, Yova Kementchedjhieva, Ryan Cotterell, and Isabelle Augenstein (June 2019b). "A Probabilistic Generative Model of Linguistic Typology". In: *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)*. Minneapolis, Minnesota: Association for Computational Linguistics, pp. 1529–1540. doi: [10.18653/v1/N19-1156](https://doi.org/10.18653/v1/N19-1156). url: <https://www.aclweb.org/anthology/N19-1156>
|
| 91 |
+
- 21. Mareike Hartmann, Tallulah Jansen, Isabelle Augenstein, and Anders Søgaard (June 2019b). "Issue Framing in Online Discussion Fora". In: *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)*. Minneapolis, Minnesota: Association for Computational Linguistics, pp. 1401–1407. doi: [10.18653/v1/N19- 1142](https://doi.org/10.18653/v1/N19-1142). url: [https:](https://www.aclweb.org/anthology/N19-1142) [//www.aclweb.org/anthology/N19-1142](https://www.aclweb.org/anthology/N19-1142)
|
| 92 |
+
- 22. Alexander Miserlis Hoyle, Lawrence Wolf-Sonkin, Hanna Wallach, Ryan Cotterell, and Isabelle Augenstein (June 2019b). "Combining Sentiment Lexica with a Multi-View Variational Autoencoder". In: *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)*. Minneapolis, Minnesota: Association for Computational Linguistics, pp. 635– 640. doi: [10.18653/v1/N19-1065](https://doi.org/10.18653/v1/N19-1065). url: [https://www.aclweb.org/anthology/N19-](https://www.aclweb.org/anthology/N19-1065) [1065](https://www.aclweb.org/anthology/N19-1065)
|
| 93 |
+
|
| 94 |
+
- 23. Johannes Bjerva, Robert Ostling, Maria Han Veiga, J ¨ org Tiedemann, and Isabelle Augenstein ¨ (June 2019d). "What Do Language Representations Really Represent?" In: *Computational Linguistics* 45.2, pp. 381–389. doi: [10.1162/coli\\_a\\_00351](https://doi.org/10.1162/coli_a_00351). url: [https://www.aclweb.](https://www.aclweb.org/anthology/J19-2006) [org/anthology/J19-2006](https://www.aclweb.org/anthology/J19-2006)
|
| 95 |
+
- 24. Sebastian Ruder, Joachim Bingel, Isabelle Augenstein, and Anders Søgaard (2019). "Latent Multi-Task Architecture Learning." In: *AAAI*. AAAI Press, pp. 4822–4829. isbn: 978-1-57735- 809-1. url: <http://dblp.uni-trier.de/db/conf/aaai/aaai2019.html#RuderBAS19>
|
| 96 |
+
- 25. Johannes Bjerva, Yova Kementchedjhieva, Ryan Cotterell, and Isabelle Augenstein (July 2019c). "Uncovering Probabilistic Implications in Typological Knowledge Bases". In: *Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics*. Florence, Italy: Association for Computational Linguistics, pp. 3924–3930. doi: [10.18653/v1/P19-1382](https://doi.org/10.18653/v1/P19-1382). url: <https://www.aclweb.org/anthology/P19-1382>
|
| 97 |
+
- 26. Alexander Miserlis Hoyle, Lawrence Wolf-Sonkin, Hanna Wallach, Isabelle Augenstein, and Ryan Cotterell (July 2019a). "Unsupervised Discovery of Gendered Language through Latent-Variable Modeling". In: *Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics*. Florence, Italy: Association for Computational Linguistics, pp. 1706–1716. doi: [10.18653/v1/P19-1167](https://doi.org/10.18653/v1/P19-1167). url: <https://www.aclweb.org/anthology/P19-1167>
|
| 98 |
+
- 27. Isabelle Augenstein, Spandana Gella, Sebastian Ruder, Katharina Kann, Burcu Can, Johannes Welbl, Alexis Conneau, Xiang Ren, and Marek Rei, eds. (Aug. 2019a). *Proceedings of the 4th Workshop on Representation Learning for NLP (RepL4NLP-2019)*. Florence, Italy: Association for Computational Linguistics. url: <https://www.aclweb.org/anthology/W19-4300>
|
| 99 |
+
- 28. Ana Valeria Gonzalez, Isabelle Augenstein, and Anders Søgaard (Dec. 2019). "Retrieval-based Goal-Oriented Dialogue Generation". In: *The 3rd NeurIPS Workshop on Conversational AI*. Vancouver, Canada. url: [http://alborz- geramifard.com/workshops/neurips19-](http://alborz-geramifard.com/workshops/neurips19-Conversational-AI/Papers/34.pdf) [Conversational-AI/Papers/34.pdf](http://alborz-geramifard.com/workshops/neurips19-Conversational-AI/Papers/34.pdf)
|
| 100 |
+
- 29. Joachim Bingel, Victor Petren Bach Hansen, Ana Valeria Gonzalez, Pavel Budzianowski, ´ Isabelle Augenstein, and Anders Søgaard (Dec. 2019). "Domain Transfer in Dialogue Systems without Turn-Level Supervision". In: *The 3rd NeurIPS Workshop on Conversational AI*. Vancouver, Canada. url: [http://alborz- geramifard.com/workshops/neurips19-](http://alborz-geramifard.com/workshops/neurips19-Conversational-AI/Papers/6.pdf) [Conversational-AI/Papers/6.pdf](http://alborz-geramifard.com/workshops/neurips19-Conversational-AI/Papers/6.pdf)
|
| 101 |
+
- 30. Mostafa Abdou, Cezar Sas, Rahul Aralikatte, Isabelle Augenstein, and Anders Søgaard (Nov. 2019). "X-WikiRE: A Large, Multilingual Resource for Relation Extraction as Machine Comprehension". In: *Proceedings of the 2nd Workshop on Deep Learning Approaches for Low-Resource NLP (DeepLo 2019)*. Hong Kong, China: Association for Computational Linguistics, pp. 265–274. doi: [10.18653/v1/D19-6130](https://doi.org/10.18653/v1/D19-6130). url: [https://www.aclweb.org/anthology/](https://www.aclweb.org/anthology/D19-6130) [D19-6130](https://www.aclweb.org/anthology/D19-6130)
|
| 102 |
+
- 31. Johannes Bjerva, Katharina Kann, and Isabelle Augenstein (Nov. 2019a). "Transductive Auxiliary Task Self-Training for Neural Multi-Task Models". In: *Proceedings of the 2nd*
|
| 103 |
+
|
| 104 |
+
- *Workshop on Deep Learning Approaches for Low-Resource NLP (DeepLo 2019)*. Hong Kong, China: Association for Computational Linguistics, pp. 253–258. doi: [10.18653/v1/D19-](https://doi.org/10.18653/v1/D19-6128) [6128](https://doi.org/10.18653/v1/D19-6128). url: <https://www.aclweb.org/anthology/D19-6128>
|
| 105 |
+
- 32. Mareike Hartmann, Yevgeniy Golovchenko, and Isabelle Augenstein (Nov. 2019a). "Mapping (Dis-)Information Flow about the MH17 Plane Crash". In: *Proceedings of the Second Workshop on Natural Language Processing for Internet Freedom: Censorship, Disinformation, and Propaganda*. Hong Kong, China: Association for Computational Linguistics, pp. 45–55. doi: [10.18653/v1/D19-5006](https://doi.org/10.18653/v1/D19-5006). url: <https://www.aclweb.org/anthology/D19-5006>
|
| 106 |
+
- 33. Nils Rethmeier, Vageesh Kumar Saxena, and Isabelle Augenstein (Mar. 2020). "TX-Ray: Quantifying and Explaining Model-Knowledge Transfer in (Un-)Supervised NLP". in: *Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI)*. ed. by Jonas Peters and David Sontag. Vol. 124. Proceedings of Machine Learning Research. Virtual: PMLR, pp. 440–449. url: <http://proceedings.mlr.press/v124/rethmeier20a.html>
|
| 107 |
+
- 34. Alok Debnath, Nikhil Pinnaparaju, Manish Shrivastava, Vasudeva Varma, and Isabelle Augenstein (2020). "Semantic Textual Similarity of Sentences with Emojis". In: *Companion Proceedings of the Web Conference 2020*. WWW '20. Taipei, Taiwan: Association for Computing Machinery, pp. 426–430. isbn: 9781450370240. doi: [10.1145/3366424.3383758](https://doi.org/10.1145/3366424.3383758). url: <https://doi.org/10.1145/3366424.3383758>
|
| 108 |
+
- 35. Pranav A and Isabelle Augenstein (July 2020). "2kenize: Tying Subword Sequences for Chinese Script Conversion". In: *Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics*. Online: Association for Computational Linguistics, pp. 7257–7272. doi: [10.18653/v1/2020.acl-main.648](https://doi.org/10.18653/v1/2020.acl-main.648). url: [https://www.aclweb.org/anthology/](https://www.aclweb.org/anthology/2020.acl-main.648) [2020.acl-main.648](https://www.aclweb.org/anthology/2020.acl-main.648)
|
| 109 |
+
- 36. Johannes Bjerva, Elizabeth Salesky, Sabrina J. Mielke, Aditi Chaudhary, Celano Giuseppe, Edoardo Maria Ponti, Ekaterina Vylomova, Ryan Cotterell, and Isabelle Augenstein (Nov. 2020c). "SIGTYP 2020 Shared Task: Prediction of Typological Features". In: *Proceedings of the Second Workshop on Computational Research in Linguistic Typology*. Online: Association for Computational Linguistics, pp. 1–11. url: [https://www.aclweb.org/anthology/](https://www.aclweb.org/anthology/2020.sigtyp-1.1) [2020.sigtyp-1.1](https://www.aclweb.org/anthology/2020.sigtyp-1.1)
|
| 110 |
+
- 37. Lukas Muttenthaler, Isabelle Augenstein, and Johannes Bjerva (Nov. 2020). "Unsupervised Evaluation for Question Answering with Transformers". In: *Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP*. Online: Association for Computational Linguistics, pp. 83–90. url: [https://www.aclweb.org/anthology/2020.](https://www.aclweb.org/anthology/2020.blackboxnlp-1.8) [blackboxnlp-1.8](https://www.aclweb.org/anthology/2020.blackboxnlp-1.8)
|
| 111 |
+
- 38. Anna Rogers and Isabelle Augenstein (Nov. 2020). "What Can We Do to Improve Peer Review in NLP?". In: *Findings of the Association for Computational Linguistics: EMNLP 2020*. Online: Association for Computational Linguistics, pp. 1256–1262. url: [https :](https://www.aclweb.org/anthology/2020.findings-emnlp.112) [//www.aclweb.org/anthology/2020.findings-emnlp.112](https://www.aclweb.org/anthology/2020.findings-emnlp.112)
|
| 112 |
+
|
| 113 |
+
- 39. Johannes Bjerva, Nikita Bhutani, Behzad Golshan, Wang-Chiew Tan, and Isabelle Augenstein (Nov. 2020a). "SubjQA: A Dataset for Subjectivity and Review Comprehension". In: *Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)*. Online: Association for Computational Linguistics, pp. 5480–5494. url: [https:](https://www.aclweb.org/anthology/2020.emnlp-main.442) [//www.aclweb.org/anthology/2020.emnlp-main.442](https://www.aclweb.org/anthology/2020.emnlp-main.442)
|
| 114 |
+
- 40. Farhad Nooralahzadeh, Giannis Bekoulis, Johannes Bjerva, and Isabelle Augenstein (Nov. 2020). "Zero-Shot Cross-Lingual Transfer with Meta Learning". In: *Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)*. Online: Association for Computational Linguistics, pp. 4547–4562. url: [https://www.aclweb.org/](https://www.aclweb.org/anthology/2020.emnlp-main.368) [anthology/2020.emnlp-main.368](https://www.aclweb.org/anthology/2020.emnlp-main.368)
|
| 115 |
+
|
| 116 |
+
Finally, the following are, at the time of writing, unpublished manuscripts available on pre-print servers, which have been written concurrently with the research papers included in this dissertation. Some of them are strongly related to the topic of this dissertation, whereas others are not. As they are not yet accepted for publication, they do not fulfill the formal critera for inclusion into a doktordisputats at the Faculty of Science, University of Copenhagen. They are listed here to demonstrate my continued research efforts in the area of content-based automatic fact checking and related fields.
|
| 117 |
+
|
| 118 |
+
- 1. Benjamin Riedel, Isabelle Augenstein, Georgios P. Spithourakis, and Sebastian Riedel (2017). "A simple but tough-to-beat baseline for the Fake News Challenge stance detection task." In: *CoRR* abs/1707.03264. url: [http://dblp.uni- trier.de/db/journals/corr/](http://dblp.uni-trier.de/db/journals/corr/corr1707.html#RiedelASR17) [corr1707.html#RiedelASR17](http://dblp.uni-trier.de/db/journals/corr/corr1707.html#RiedelASR17)
|
| 119 |
+
- 2. Luna De Bruyne, Pepa Atanasova, and Isabelle Augenstein (2019). "Joint Emotion Label Space Modelling for Affect Lexica." In: *CoRR* abs/1911.08782. url: [http://dblp.uni](http://dblp.uni-trier.de/db/journals/corr/corr1911.html#abs-1911-08782)[trier.de/db/journals/corr/corr1911.html#abs-1911-08782](http://dblp.uni-trier.de/db/journals/corr/corr1911.html#abs-1911-08782)
|
| 120 |
+
- 3. Zeerak Waseem, Smarika Lulz, Joachim Bingel, and Isabelle Augenstein (June 2020). "Disembodied Machine Learning: On the Illusion of Objectivity in NLP". in: *OpenReview Preprint*. url: <https://openreview.net/forum?id=fkAxTMzy3fs>
|
| 121 |
+
- 4. Nils Rethmeier and Isabelle Augenstein (2020). "Long-Tail Zero and Few-Shot Learning via Contrastive Pretraining on and for Small Data." In: *CoRR* abs/2010.01061. url: [http:](http://dblp.uni-trier.de/db/journals/corr/corr2010.html#abs-2010-01061) [//dblp.uni-trier.de/db/journals/corr/corr2010.html#abs-2010-01061](http://dblp.uni-trier.de/db/journals/corr/corr2010.html#abs-2010-01061)
|
| 122 |
+
- 5. Wojciech Ostrowski, Arnav Arora, Pepa Atanasova, and Isabelle Augenstein (2020b). "Multi-Hop Fact Checking of Political Claims." In: *CoRR* abs/2009.06401. url: [http://dblp.uni](http://dblp.uni-trier.de/db/journals/corr/corr2009.html#abs-2009-06401)[trier.de/db/journals/corr/corr2009.html#abs-2009-06401](http://dblp.uni-trier.de/db/journals/corr/corr2009.html#abs-2009-06401)
|
| 123 |
+
- 6. Liesbeth Allein, Isabelle Augenstein, and Marie-Francine Moens (2020). "Time-Aware Evidence Ranking for Fact-Checking." In: *CoRR* abs/2009.06402. url: [http://dblp.uni](http://dblp.uni-trier.de/db/journals/corr/corr2009.html#abs-2009-06402)[trier.de/db/journals/corr/corr2009.html#abs-2009-06402](http://dblp.uni-trier.de/db/journals/corr/corr2009.html#abs-2009-06402)
|
| 124 |
+
- 7. Wei Zhao, Steffen Eger, Johannes Bjerva, and Isabelle Augenstein (2020). "Inducing Language-Agnostic Multilingual Representations." In: *CoRR* abs/2008.09112. url: [http://dblp.uni](http://dblp.uni-trier.de/db/journals/corr/corr2008.html#abs-2008-09112)[trier.de/db/journals/corr/corr2008.html#abs-2008-09112](http://dblp.uni-trier.de/db/journals/corr/corr2008.html#abs-2008-09112)
|
| 125 |
+
|
| 126 |
+
- 8. Andreas Nugaard Holm, Barbara Plank, Dustin Wright, and Isabelle Augenstein (2020). "Longitudinal Citation Prediction using Temporal Graph Neural Networks." In: *CoRR* abs/2012.05742
|
| 127 |
+
- 9. Andrea Lekkas, Peter Schneider-Kamp, and Isabelle Augenstein (2020). "Multi-Sense Language Modelling". In: *CoRR* abs/2012.05776
|
| 128 |
+
|
| 129 |
+
| i | | | executive summary | | | | |
|
| 130 |
+
|----|------------------------------------------------------------------------|--------------------|-----------------------------------------------------------------------|----|--|--|--|
|
| 131 |
+
| 1 | executive summary<br>2 | | | | | | |
|
| 132 |
+
| | 1.1 | | Introduction | 2 | | | |
|
| 133 |
+
| | | 1.1.1 | Automatic Fact Checking<br> | 2 | | | |
|
| 134 |
+
| | | 1.1.2 | Learning with Limited and Heterogeneous Labelled Data | 6 | | | |
|
| 135 |
+
| | | 1.1.3 | Explainable Natural Language Understanding | 8 | | | |
|
| 136 |
+
| | 1.2 | | Scientific Contributions | 10 | | | |
|
| 137 |
+
| | | 1.2.1 | Detecting Check-Worthy Claims | 11 | | | |
|
| 138 |
+
| | | 1.2.2 | Stance Detection<br> | 13 | | | |
|
| 139 |
+
| | | 1.2.3 | Veracity Prediction<br> | 19 | | | |
|
| 140 |
+
| | 1.3 | | Research Landscape of Content-Based Automatic Fact Checking | 22 | | | |
|
| 141 |
+
| | | 1.3.1 | Research Trends over Time | 22 | | | |
|
| 142 |
+
| | | 1.3.2 | Contextualisation of Contributions | 23 | | | |
|
| 143 |
+
| | 1.4 | | Conclusions and Future Work | 33 | | | |
|
| 144 |
+
| | | 1.4.1 | Dataset Development | 33 | | | |
|
| 145 |
+
| | | 1.4.2 | Synergies between Explainable Fact Checking Methods<br> | 34 | | | |
|
| 146 |
+
| | | 1.4.3 | Explanations to Support Human Decision Making<br> | 35 | | | |
|
| 147 |
+
| ii | detecting check-worthy claims | | | | | | |
|
| 148 |
+
| 2 | claim check-worthiness detection as positive unlabelled learning<br>37 | | | | | | |
|
| 149 |
+
| | 2.1 | Introduction<br>37 | | | | | |
|
| 150 |
+
| | 2.2 | | Related Work<br> | 39 | | | |
|
| 151 |
+
| | | 2.2.1 | Claim Check-Worthiness Detection<br> | 39 | | | |
|
| 152 |
+
| | | 2.2.2 | Positive Unlabelled Learning | 40 | | | |
|
| 153 |
+
| | 2.3 | | Methods | 41 | | | |
|
| 154 |
+
| | | 2.3.1 | Task Definition<br> | 41 | | | |
|
| 155 |
+
| | | 2.3.2 | PU Learning<br> | 42 | | | |
|
| 156 |
+
| | | 2.3.3 | Positive Unlabelled Conversion<br> | 42 | | | |
|
| 157 |
+
| | | 2.3.4 | Implementation | 43 | | | |
|
| 158 |
+
| | 2.4 | | Experimental Results<br> | 44 | | | |
|
| 159 |
+
| | | 2.4.1 | Datasets | 44 | | | |
|
| 160 |
+
| | | 2.4.2 | Is PU Learning Beneficial for Citation Needed Detection?<br> | 46 | | | |
|
| 161 |
+
| | | 2.4.3 | Does PU Citation Needed Detection Transfer to Rumour Detection? | 46 | | | |
|
| 162 |
+
| | | 2.4.4 | Does PU Citation Needed Detection Transfer to Political Speeches?<br> | 47 | | | |
|
| 163 |
+
| | | 2.4.5 | Dataset Analysis<br> | 48 | | | |
|
| 164 |
+
| | 2.5 | | Discussion and Conclusion<br> | 49 | | | |
|
| 165 |
+
|
| 166 |
+
| | 2.6 | Appendix<br><br>50 | | | | |
|
| 167 |
+
|-----|-----|----------------------------------------------------------------------|--|--|--|--|
|
| 168 |
+
| | | 2.6.1<br>Examples of PUC Improvements for Rumour Detection<br><br>50 | | | | |
|
| 169 |
+
| | | 2.6.2<br>Reproducibility<br>50 | | | | |
|
| 170 |
+
| 3 | | transformer based multi-source domain adaptation<br>53 | | | | |
|
| 171 |
+
| | 3.1 | Introduction<br>53 | | | | |
|
| 172 |
+
| | 3.2 | Related Work<br><br>55 | | | | |
|
| 173 |
+
| | | 3.2.1<br>Domain Adaptation<br>55 | | | | |
|
| 174 |
+
| | | 3.2.2<br>Transformer Based Domain Adaptation<br><br>55 | | | | |
|
| 175 |
+
| | 3.3 | Methods<br>56 | | | | |
|
| 176 |
+
| | | 3.3.1<br>Mixture of Experts Techniques<br>56 | | | | |
|
| 177 |
+
| | | 3.3.2<br>Domain Adversarial Training<br><br>59 | | | | |
|
| 178 |
+
| | | 3.3.3<br>Training<br>59 | | | | |
|
| 179 |
+
| | 3.4 | Experiments and Results<br><br>60 | | | | |
|
| 180 |
+
| | | 3.4.1<br>Baselines<br><br>61 | | | | |
|
| 181 |
+
| | | 3.4.2<br>Model Variants<br><br>61 | | | | |
|
| 182 |
+
| | | 3.4.3<br>Results<br><br>63 | | | | |
|
| 183 |
+
| | 3.5 | Discussion<br>63 | | | | |
|
| 184 |
+
| | | 3.5.1<br>What is the Effect of Domain Adversarial Training?<br>63 | | | | |
|
| 185 |
+
| | | 3.5.2<br>Is Mixture of Experts Useful with LPX Models?<br><br>64 | | | | |
|
| 186 |
+
| | 3.6 | Conclusion<br><br>65 | | | | |
|
| 187 |
+
| | 3.7 | Appendix<br><br>66 | | | | |
|
| 188 |
+
| | | 3.7.1<br>BERT Domain Adversarial Training Results<br>66 | | | | |
|
| 189 |
+
| | | 3.7.2<br>Reproducibility<br>66 | | | | |
|
| 190 |
+
| iii | | | | | | |
|
| 191 |
+
| | | stance detection | | | | |
|
| 192 |
+
| 4 | | stance detection with bidirectional conditional encoding<br>70 | | | | |
|
| 193 |
+
| | 4.1 | Introduction<br>70 | | | | |
|
| 194 |
+
| | 4.2 | Task Setup<br>71 | | | | |
|
| 195 |
+
| | 4.3 | Methods<br>72 | | | | |
|
| 196 |
+
| | | 4.3.1<br>Independent Encoding<br><br>73 | | | | |
|
| 197 |
+
| | | 4.3.2<br>Conditional Encoding<br><br>73 | | | | |
|
| 198 |
+
| | | 4.3.3<br>Bidirectional Conditional Encoding<br><br>73 | | | | |
|
| 199 |
+
| | | 4.3.4<br>Unsupervised Pretraining<br>74 | | | | |
|
| 200 |
+
| | 4.4 | Experiments<br>74 | | | | |
|
| 201 |
+
| | | 4.4.1<br>Methods<br><br>75 | | | | |
|
| 202 |
+
| | 4.5 | Unseen Target Stance Detection<br><br>76 | | | | |
|
| 203 |
+
| | | 4.5.1<br>Results and Discussion<br>76 | | | | |
|
| 204 |
+
| | 4.6 | Weakly Supervised Stance Detection<br>79 | | | | |
|
| 205 |
+
| | | 4.6.1<br>Results and Discussion<br>81 | | | | |
|
| 206 |
+
| | 4.7 | Related Work<br><br>82 | | | | |
|
| 207 |
+
| | 4.8 | Conclusions and Future Work<br>82 | | | | |
|
| 208 |
+
|
| 209 |
+
| discourse-aware rumour stance classification<br>5 | | | | 83 | | | |
|
| 210 |
+
|---------------------------------------------------|-----|--------------------------------------------------------|----------------------------------------------------------|----|--|--|--|
|
| 211 |
+
| | 5.1 | | Introduction | 84 | | | |
|
| 212 |
+
| | 5.2 | Related Work | | 86 | | | |
|
| 213 |
+
| | 5.3 | | Research Objectives | 88 | | | |
|
| 214 |
+
| | 5.4 | | Rumour Stance Classification | 89 | | | |
|
| 215 |
+
| | | 5.4.1 | Task Definition<br> | 89 | | | |
|
| 216 |
+
| | | 5.4.2 | Dataset<br> | 90 | | | |
|
| 217 |
+
| | 5.5 | | Classifiers | 91 | | | |
|
| 218 |
+
| | | 5.5.1 | Hawkes Processes<br> | 91 | | | |
|
| 219 |
+
| | | 5.5.2 | Conditional Random Fields (CRF): Linear CRF and Tree CRF | 92 | | | |
|
| 220 |
+
| | | 5.5.3 | Branch LSTM<br> | 94 | | | |
|
| 221 |
+
| | | 5.5.4 | Summary of Sequential Classifiers | 95 | | | |
|
| 222 |
+
| | | 5.5.5 | Baseline Classifiers | 95 | | | |
|
| 223 |
+
| | | 5.5.6 | Experiment Settings and Evaluation Measures | 96 | | | |
|
| 224 |
+
| | 5.6 | Features | | 96 | | | |
|
| 225 |
+
| | 5.7 | | Experimental Results<br> | 98 | | | |
|
| 226 |
+
| | | 5.7.1 | Evaluating Sequential Classifiers (RO 1) | 98 | | | |
|
| 227 |
+
| | | 5.7.2 | Exploring Contextual Features (RO 2) | 99 | | | |
|
| 228 |
+
| | | 5.7.3 | Analysis of the Best-Performing Classifiers<br>100 | | | | |
|
| 229 |
+
| | | 5.7.4 | Feature Analysis (RO 6)<br>102 | | | | |
|
| 230 |
+
| | 5.8 | | Conclusions and Future Work 104 | | | | |
|
| 231 |
+
| | 5.9 | Appendix | 107 | | | | |
|
| 232 |
+
| | | 5.9.1 | Local Features<br>107 | | | | |
|
| 233 |
+
| | | 5.9.2 | Contextual Features<br>108 | | | | |
|
| 234 |
+
| | | 5.9.3 | Hawkes Features<br>109 | | | | |
|
| 235 |
+
| 6 | | multi-task learning over disparate label spaces<br>110 | | | | | |
|
| 236 |
+
| | 6.1 | | Introduction 110 | | | | |
|
| 237 |
+
| | 6.2 | Related work | 111 | | | | |
|
| 238 |
+
| | 6.3 | | Multi-task learning with disparate label spaces 112 | | | | |
|
| 239 |
+
| | | 6.3.1 | Problem definition<br>112 | | | | |
|
| 240 |
+
| | | 6.3.2 | Label Embedding Layer<br>113 | | | | |
|
| 241 |
+
| | | 6.3.3 | Label Transfer Network<br>114 | | | | |
|
| 242 |
+
| | | 6.3.4 | Semi-supervised MTL<br>114 | | | | |
|
| 243 |
+
| | | 6.3.5 | Data-specific features 115 | | | | |
|
| 244 |
+
| | | 6.3.6 | Other multi-task improvements<br>115 | | | | |
|
| 245 |
+
| | 6.4 | | Experiments 115 | | | | |
|
| 246 |
+
| | | 6.4.1 | Tasks and datasets<br>116 | | | | |
|
| 247 |
+
| | | 6.4.2 | Base model<br>118 | | | | |
|
| 248 |
+
| | | 6.4.3 | Training settings<br>118 | | | | |
|
| 249 |
+
| | 6.5 | | Results 118 | | | | |
|
| 250 |
+
| | 6.6 | | Analysis 119 | | | | |
|
| 251 |
+
|
| 252 |
+
| | | 6.6.1 | Label Embeddings<br>119 |
|
| 253 |
+
|---|-----|------------|--------------------------------------------------------|
|
| 254 |
+
| | | 6.6.2 | Auxilary Tasks<br>119 |
|
| 255 |
+
| | | 6.6.3 | Ablation analysis 121 |
|
| 256 |
+
| | | 6.6.4 | Label transfer network<br>121 |
|
| 257 |
+
| | 6.7 | Conclusion | 123 |
|
| 258 |
+
| 7 | | | sequential alignment of text representations<br>124 |
|
| 259 |
+
| | 7.1 | | Introduction 124 |
|
| 260 |
+
| | 7.2 | | Subspace Alignment<br>125 |
|
| 261 |
+
| | | 7.2.1 | Unsupervised Subspace Alignment<br>127 |
|
| 262 |
+
| | | 7.2.2 | Semi-Supervised Subspace Alignment 127 |
|
| 263 |
+
| | | 7.2.3 | Extending SSA to Unbounded Time 128 |
|
| 264 |
+
| | | 7.2.4 | Considering Sample Similarities between Classes 128 |
|
| 265 |
+
| | 7.3 | | Experimental Setup 128 |
|
| 266 |
+
| | 7.4 | | Paper Acceptance Prediction<br>129 |
|
| 267 |
+
| | | 7.4.1 | Model 130 |
|
| 268 |
+
| | | 7.4.2 | Experiments & Results 130 |
|
| 269 |
+
| | 7.5 | | Named Entity Recognition<br>132 |
|
| 270 |
+
| | | 7.5.1 | Model 132 |
|
| 271 |
+
| | | 7.5.2 | Experiments & Results 132 |
|
| 272 |
+
| | 7.6 | | SDQC Stance Classification<br>133 |
|
| 273 |
+
| | | 7.6.1 | Model 133 |
|
| 274 |
+
| | | 7.6.2 | Experiments & Results 133 |
|
| 275 |
+
| | 7.7 | | Analysis and Discussion<br>134 |
|
| 276 |
+
| | | 7.7.1 | Example of Aligning Tweets 134 |
|
| 277 |
+
| | | 7.7.2 | Limitations<br>135 |
|
| 278 |
+
| | | 7.7.3 | Related Work 135 |
|
| 279 |
+
| | 7.8 | | Conclusions 136 |
|
| 280 |
+
| 8 | | | a diagnostic study of explainability techniques<br>137 |
|
| 281 |
+
| | 8.1 | | Introduction 137 |
|
| 282 |
+
| | 8.2 | | Related Work<br>139 |
|
| 283 |
+
| | 8.3 | | Evaluating Attribution Maps<br>140 |
|
| 284 |
+
| | 8.4 | | Experiments 144 |
|
| 285 |
+
| | | 8.4.1 | Datasets 144 |
|
| 286 |
+
| | | 8.4.2 | Models<br>144 |
|
| 287 |
+
| | | 8.4.3 | Explainability Techniques<br>145 |
|
| 288 |
+
| | 8.5 | | Results and Discussion<br>146 |
|
| 289 |
+
| | | 8.5.1 | Results<br>146 |
|
| 290 |
+
| | 8.6 | Conclusion | 151 |
|
| 291 |
+
| | 8.7 | Appendix | 152 |
|
| 292 |
+
| | | 8.7.1 | Experimental Setup 152 |
|
| 293 |
+
| | | 8.7.2 | Spider Figures for the IMDB Dataset<br>154 |
|
| 294 |
+
|
| 295 |
+
| | | 8.7.3 | Detailed Evaluation Results for the Explainability Techniques<br>154 |
|
| 296 |
+
|----|------|--------------|------------------------------------------------------------------------|
|
| 297 |
+
| iv | | | veracity prediction |
|
| 298 |
+
| 9 | | | multi-domain evidence-based fact checking of claims<br>162 |
|
| 299 |
+
| | 9.1 | | Introduction 162 |
|
| 300 |
+
| | 9.2 | Related Work | 163 |
|
| 301 |
+
| | | 9.2.1 | Datasets 163 |
|
| 302 |
+
| | | 9.2.2 | Methods<br>165 |
|
| 303 |
+
| | 9.3 | | Dataset Construction<br>166 |
|
| 304 |
+
| | | 9.3.1 | Selection of sources<br>166 |
|
| 305 |
+
| | | 9.3.2 | Retrieving Evidence Pages 167 |
|
| 306 |
+
| | | 9.3.3 | Entity Detection and Linking 167 |
|
| 307 |
+
| | 9.4 | | Claim Veracity Prediction 167 |
|
| 308 |
+
| | | 9.4.1 | Multi-Domain Claim Veracity Prediction with Disparate Label Spaces 169 |
|
| 309 |
+
| | | 9.4.2 | Joint Evidence Ranking and Claim Veracity Prediction<br>170 |
|
| 310 |
+
| | | 9.4.3 | Metadata<br>172 |
|
| 311 |
+
| | 9.5 | | Experiments 173 |
|
| 312 |
+
| | | 9.5.1 | Experimental Setup 173 |
|
| 313 |
+
| | | 9.5.2 | Results<br>173 |
|
| 314 |
+
| | 9.6 | | Analysis and Discussion<br>176 |
|
| 315 |
+
| | 9.7 | | Conclusions 176 |
|
| 316 |
+
| | 9.8 | Appendix | 176 |
|
| 317 |
+
| | | | 10 generating label cohesive and well-formed adversarial claims<br>181 |
|
| 318 |
+
| | 10.1 | | Introduction 181 |
|
| 319 |
+
| | 10.2 | Related Work | 183 |
|
| 320 |
+
| | | 10.2.1 | Adversarial Examples<br>183 |
|
| 321 |
+
| | | 10.2.2 | Fact Checking<br>183 |
|
| 322 |
+
| | 10.3 | | Methods 184 |
|
| 323 |
+
| | | 10.3.1 | Models<br>184 |
|
| 324 |
+
| | | 10.3.2 | Universal Adversarial Triggers Method<br>184 |
|
| 325 |
+
| | | 10.3.3 | Claim Generation 185 |
|
| 326 |
+
| | 10.4 | | Results 185 |
|
| 327 |
+
| | | 10.4.1 | Adversarial Triggers<br>185 |
|
| 328 |
+
| | | 10.4.2 | Generation<br>186 |
|
| 329 |
+
| | 10.5 | Conclusion | 188 |
|
| 330 |
+
| | 10.6 | Appendix | 190 |
|
| 331 |
+
| | | 10.6.1 | Implementation Details 190 |
|
| 332 |
+
| | | 10.6.2 | Top Adversarial Triggers 191 |
|
| 333 |
+
| | | 10.6.3 | Computing Infrastructure 191 |
|
| 334 |
+
| | | 10.6.4 | Evaluation Metrics<br>191 |
|
| 335 |
+
| | | 10.6.5 | Manual Evaluation<br>191 |
|
| 336 |
+
| | | | |
|
| 337 |
+
|
| 338 |
+
| | 11 generating fact checking explanations | 193 | | |
|
| 339 |
+
|---------------------|---------------------------------------------------------------------|-----|--|--|
|
| 340 |
+
| 11.1 | Introduction 193 | | | |
|
| 341 |
+
| 11.2 | Dataset<br>195 | | | |
|
| 342 |
+
| 11.3 | Method<br>196 | | | |
|
| 343 |
+
| | 11.3.1<br>Generating Explanations<br>196 | | | |
|
| 344 |
+
| | 11.3.2<br>Veracity Prediction<br>197 | | | |
|
| 345 |
+
| | 11.3.3<br>Joint Training 197 | | | |
|
| 346 |
+
| 11.4 | Automatic Evaluation 198 | | | |
|
| 347 |
+
| | 11.4.1<br>Experiments<br>198 | | | |
|
| 348 |
+
| | 11.4.2<br>Experimental Setup 199 | | | |
|
| 349 |
+
| | 11.4.3<br>Results and Discussion 199 | | | |
|
| 350 |
+
| | 11.4.4<br>A Case Study 201 | | | |
|
| 351 |
+
| 11.5 | Manual Evaluation<br>201 | | | |
|
| 352 |
+
| | 11.5.1<br>Experiments<br>201 | | | |
|
| 353 |
+
| | 11.5.2<br>Results and Discussion 203 | | | |
|
| 354 |
+
| 11.6 | Related Work<br>205 | | | |
|
| 355 |
+
| 11.7 | Conclusions 206 | | | |
|
| 356 |
+
| 11.8 | Acknowledgments<br>207 | | | |
|
| 357 |
+
| 11.9 | Appendix<br>208 | | | |
|
| 358 |
+
| | 11.9.1<br>Comparison of Different Sources of Evidence 208 | | | |
|
| 359 |
+
| | 11.9.2<br>Extractive Gold Oracle Examples<br>208 | | | |
|
| 360 |
+
| | 11.9.3<br>Manual 6-Way Veracity Prediction from Explanations<br>208 | | | |
|
| 361 |
+
| Bibliography<br>211 | | | | |
|
| 362 |
+
| | | | | |
|
2110.09779/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2204.00833/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-03-07T16:00:34.872Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/16.5.1 Chrome/96.0.4664.110 Electron/16.0.7 Safari/537.36" version="16.5.1" etag="QXj8kfVZIcpmRMV05UpP" type="device"><diagram id="qI-PGLXI6e2J1wNI7XJn">7V1bc9s2Fv41mn0qBsDB9TG2k+4+dLfTdGbbpx3GomxOZdEj0bHTX78HEkGRAGTTEmlHipRJYoIySeE71w8HRxO4vHv6eZnd3/5STvP5hNPp0wSuJpxzUBL/cyPfNiNMGboZuVkW081Ya+Bz8Xdev9GPPhTTfFWPbYaqspxXxX138LpcLPLrqjOWLZflY/dts3I+7QzcZzd5NPD5OpvHo/8tptWt/2Cgtif+mRc3t/7WSorNmS/Z9V83y/JhUd9wUS7yzZm7zF+nvvrqNpuWj63ZgI8TuFyWZbX56e7pMp+7ie1O2acdZ5tnXuaLqs8v8M0vfM3mD/XHrp+r+ubnYf1Bcvd+NoGLx9uiyj/fZ9fu7CNCj2O31d28Pj0rF1UNJRg8XlXL8q/8spyXSxxaT4Qf9JPK3a8V87l/04TDTLo/9eVa45sXjtdPnS+r/GnnJ2fNfKKQ5uVdXi2/4Vv8L1BZf9ZaQBFYou1m6HELuOWUiPqdty24BTW1rNVydtPcYjvf+EM95enph5enP19MPzhpxqPrebZaFdfdGd/CQ+sJ+5TdFXP3oX4v7lCBOP13/oj//lbeZYsXIcJZputXH5ys/XB5gR/1Aud8+e0P9wxE+sM/60daH1w9dY6+NUde8dkW1nwaKGaVLW/yetrB9ga6BaKkMYB+bJnPs6r42r1lCtT6Dr+WBd64kSMLoiNGEnj3EqvyYXmd17/VVsXgQowyIBofy7+C61JBKLfNS8vubTaTFN1mLYHNpPQSSnEqQvlUVH/UD+R+/nMrn3i0lUh3sEMg95XrFaJR+Rmqrd4O4d6IRz1kYoEX/S3b2wg845ITThWnRhmBXl0FcsqJpUKAslSjtQ7EtLc2cKOIEY0yJO6iqDVMcLyJtHosZZCRMnyItOEFn5it7jfxyax4cjoxgnao9etVrrXE5y0qd0/tn2IsT6slEGk7+GktIk8rWUJmnxHPvk5WxeYLlfBzfVhrZxeTFnreimwtR2MAdlqRvSCNLcZ67FPhPpg3TrHVxcH6LWwwZ97YQN7PbomU3YLecnKoQZLCEoGmCN2jZIwFcZ2gEs9abbWhDG1WP1ORuIvEQNB73UaMoZd528Pw6IQXRp0Rj5v/IplG/awCuzMvbhZOVnD+cwT5wmlxgQnOh/rEXTGdztfCn6+Kv7Mv60s5Wbl3z7/+RPJiIq/ctR6qclULxa6wvi1PfiiQ8h6KEcvpECYIqEYTpKxUEu0R2K6IoB8joDEjAInSYjSPbBNjsNuhHmKczBnmAWEW6GkoBa5Qzx3WEMAM7wWzPcM8IMyaUoRZgNDGQa27NpkLd3INMMPsLA40RkPZU1cRzB//V0zkx4m+mE301TFDPg6ezDAC28SXsq4D1xjr2+1ZKiNEOR0LUfYSov84IxojarQOIASMwSKAOwg+k7EehGAPivM46IyAmkjmJT5jSeclkx6hvO6N9RvRDVYYoqSWRgEHDd0AXFIgjKJ4UYP+HQO8XuF4dBOO3qJ7XQgedDhGgaVI361JeTgbk9A9WN4FXcmYpmfGjGQ/UnToGiF98S+0/Wc/8Cx0rKtXQilC2y8VewKW4oGGQFLGhv81RFDDBU/aTPCWGN7BBZ8UGbQPG9bD69TIdPgj2VvYDvUwGiyhLf6ou/AC0hLV4o+CZb/e/JEWtHNdrUbjjpjqEfPEov/MOm4PaQvik1jE9lWgHfKz5yLJocICKuCOOOwpEGADgRiPTGQpNvEUguA91+d6iJR3XN9NIBwuNIOivcQlsbSmuvEUo0HYvEOEUTyyb6231bHIM7oSeH//yFtB3lxyb7FOsaeBWK9us3v3Y7n8OJ8X9ytn6O7zJUrsOvK6yjejv26HXqpx2Zb3/OehmhcLH3m9HFElwrAhgixHbgfLo9zl23FolZBOPkRklaI3d5uX2N18H7Zlj4x6pz0axMV1EnN6BDaKcUqJ0buYvPWqvdrWsJh9iwOAv1QcIE2LTxS9zNseBojvJnwxJSx+Yi4pPOf0KYKwG/tAitQVI6X0PEXqnkA0tMOKfD/WwfhCyl0sW3++TkkSsEKaM9IyCbQfgffqmCb8DLqeh6FiGi+LzwnnJuhICEpLQNsxjotYvqB2t4Oc2F6Um2jmsqmnpgmxHSeEsbwb2xohiC/96Szc6QQ3xCjxS7kHWYUexbivroU+1ChMtf2SMAqQRmKarW6bp1uWFSprueiEGIcFmg0p42HydRRt+6ETVptLS4Yo5Ur40Od5jH1qQMfgPnoEeCkSDEbgNXrP9bn08XCNYdANTTHkJXGk85zPPQjCXqWPLxY7Pp8CvZe69F+oPDRsUaLrnRgzzpxtQ43A+wyYX8S8Hbrnr8/4Jfo2fql/sDqbzfj19Tj+yO2J4F0FY1TFBT9cG2JtQsfYMF7JvIGa7Ut1vpd62tibjaGyvTGKKaqHhdsTWCxujkmdcjaVuR5LnSBUp4ai6WiTJUaltOlwmICersfywVwnwBuhSr73XMc0yC/l9JiUYUzfAkFKak2ikBSAiFTl4QDpqN9GOGqqc0hNw3toELyNU9kRB653krS2YHYXtxjFKMMY0ZzuR27vERVCzFRc5WfVbakuEKkDcBJh4ajq24up2FlsdFpFQ+Ot0vuw7m2yQMIsY1pj2gE2TOzBRLlE70oQJbobVUzPpf19bMdhRXCnJZdN7Chsp5yNONv+rN/EgxaZ3ncNd9s6ZPwyJUsJjSmKMJjZp2iJBDXYVnDiP9kIAhvXsm0oEPq4NtmrSJ5/3FVVxjVr1kI8Oh76dgYHI62sQo86syNdQ/nJDpZmc0YC7yGYTtDCoCzx6HVWU0CSUIH3QqtH+dQPjxYgNBCAZRsT2FYqI4lNLE9iJkckDIBWzGAdIX81zXIzux4JLGs2+0JdOYkU3HQzeEkZkVRb9GEgOPU1w22FY4xQy7S0yhi33BmjqVw3KCsYs1Jay6Q8HFgRFxGdgQ2bdVm3QxRza4VWS/gdIq3WSKDAStBoXIVMFD6CIqi3AIpbDbJZ6w+QZVwxZY2U3KK6qQGgTVJsp7CEMxy0XBkipKKWK8GlNMFGPApEcaO5toZvWbFOmRgnTqk1gNDSpMoPpCWaoc6DUlxp4beEHoQsj5A9mfWEAZ0n5syos5oJQbUGCOs0BRpk63beaGMoJBbuMKumDjqlhNGWJ9yrlERLgTZZatrsrTwI2ZhdO+ts2GeDYayDwEjFjHO7Ia5AJHP7NrkFzEF9y52O0qK1BQbSAGWo1TKltYqgpRcS9ZWh/pshtDbezHnW2jhBEc5bagyhqHJsc+htKZFWo5/Ev06zE2EU5icYRhllnFOmCa1VHMMoibfQ6G2t4HoIZxsXLJ0VN4BWcdQ7ZSjTQgjt871WgKzQPyorGUZZOpGRYjKE4ZfbC4nYS8++drTWEEAXi1G4wUhMDbFQKfpsZzyKeu0hd5gMwqV3dpikduF+d+0gtGRu6wfOPwYESqruOg9aG4KiJ5kA4Jr13GKbqPIGhs7HYippjcXIM3Bw65Tecre1BNCHBZ5pwFasMZ/2e/nbzxfHZNA+ffqoLi/HMmgaI0RGjaUSrZLSNoxEGNGKuhzDEShih0WjzqIpAcIy30l0bJMWU2/n4tpXxymMRZtAdNxQYqy+oiIm5M4YvjpDVCJcT3rL3rBy1wa+c2O+vcrdFbh8v2mz2SXOMawklL5L/0V5JuJeTuo1xjy8yQ1YEFm5Tlt2mxwk6mneJzvw1TpnM3xIXkgFMe9ohiFC7NyaaZg+3W9RGcUt0cZYtP7aSr/RwvfHsZRoLQWnCvWehT1u+tdIQdgtZ7T6ShmzhOeg4BA60X6X3XrlTr6QTLjK7pyPWHxZ3TfTcEyIy36It33KNJ9lD/NqLDmwuruy1/SIbWOtEum3GIAblikK8UdQ6SGAE0GhO/i2J2+ipYmSMvjQgOW+/22aHx184hXJQ6yfQ1hlyoImYOC7OLVRTdJhAyyyyiT/RTfIZrOZa3DBabXMFqtZubzbJDunCvIQcHLdQ0kFS23bGwLOXd9vcDJt0oewozSAiCeindH6oKsE07XWtntMed0bq3yxwhk4NnS+A0vKFbF257awpr6vjXKqtHAIRVSp1lYn9Q0UgxhLqondvZGPi1STjtG+Y8I3lI000xWm5NOzig6wysAjiCFVcA8pynIItdzV8//EmsgPgZUK+mRvu069RZN4leB+1sp4ky/yZVat9bG4c98dfWwIvb8aahmpYaIIP1UvtocS4uH2e7Q3LOD2m8rh4/8B</diagram></mxfile>
|
2204.00833/main_diagram/main_diagram.pdf
ADDED
|
Binary file (37.3 kB). View file
|
|
|
2204.00833/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
As an important task of computer vision, image generation has made remarkable progress in recent years, which is supported by a flurry of generative adversarial networks [@brock2018large; @chen2016infogan; @denton2015deep; @goodfellow2014generative; @hudson2021generative; @karras2018progressive; @karras2019style; @karras2020analyzing; @lee2021vitgan; @radford2015unsupervised]. One of the milestone works is the StyleGAN series [@karras2019style; @karras2020analyzing], which borrows the principle of style transfer [@huang2017arbitrary] to build an effective generator architecture. Due to the superior performance in image quality, this style-driven modeling has become the mainstream paradigm of image generation [@karras2019style; @karras2020analyzing], which also greatly influences and promotes the development of other generative tasks, such as image manipulation [@dolhansky2018eye; @kim2021exploiting; @wang2021hififace; @yu2018generative; @zhu2020sean], image-to-image translation [@choi2018stargan; @isola2017image; @ji2022knowing; @li2021image; @ma2022knowing; @zhu2017unpaired] and text-to-image generation [@li2019controllable; @park2020swapping; @peng2021knowledge; @xu2018attngan].
|
| 4 |
+
|
| 5 |
+
In addition to the StyleGAN series, pixel synthesis [@anokhin2021image; @skorokhodov2021adversarial] is another paradigm of great potential for image generation. Recently, Anokin *et al.* [@anokhin2021image] propose a novel Conditionally-Independent Pixel Synthesis (CIPS) network for adversarial image generation, which directly computes each pixel value based on the random latent vector and positional embeddings. This end-to-end pixel regression strategy can well exploit pixel-wise prior knowledge to facilitate the generation of high-quality images. Meanwhile, it also simplifies the design of generator architecture, *e.g.*, only using $1\times 1$ convolutions, and has a higher generation ability with non-trivial topologies [@anokhin2021image]. On multiple benchmarks [@karras2019style; @radford2015unsupervised], this method exhibits comparable performance against the StyleGAN series, showing a great potential in image generation. In this paper, we also follow the principle of pixel synthesis to build an effective image generation network.
|
| 6 |
+
|
| 7 |
+
Despite the aforementioned merits, CIPS still has obvious shortcomings in model efficiency. Firstly, although CIPS is built with a simple network structure, it still requires excessive memory footprint and computation during inference. Specifically, this is mainly attributed to its high-resolution pixel tensors for end-to-end pixel regression, *e.g.*, $256\times 256\times 512$, which results in a large computational overhead and memory footprint, as shown in Fig. [\[subfig:strc_cips\]](#subfig:strc_cips){reference-type="ref" reference="subfig:strc_cips"}. Meanwhile, the learnable coordinate embeddings also constitute a large number of parameters, making CIPS taking about $30\%$ more parameters than StyleGAN2 [@karras2020analyzing]. These issues greatly limit the applications of CIPS in high-resolution image synthesis.
|
| 8 |
+
|
| 9 |
+
To address these issues, we propose a novel progressive pixel synthesis network towards efficient image generation, termed *PixelFolder*, of which structure is illustrated in Fig. [\[subfig:pxf\]](#subfig:pxf){reference-type="ref" reference="subfig:pxf"}. Firstly, we transform the pixel synthesis problem to a progressive one and then compute pixel values via a multi-stage structure. In this way, the generator can process the pixel tensors of varying scales instead of the fixed high-resolution ones, thereby reducing memory footprint and computation greatly. Secondly, we introduce novel *pixel folding* operations to further improve model efficiency. In PixelFolder, the large pixel tensors of different stages are folded into the smaller ones, and then gradually unfolded (expanded) during feature transformations. These pixel folding (and unfolding) operations can well preserve the independence of each pixel, while saving model expenditure. These innovative designs help PixelFolder achieves high-quality image generations with superior model efficiency, which are also shown to be effective for *local imaging incongruity* found in CIPS [@anokhin2021image], as shown in Fig. [1](#fig:local_incongruities){reference-type="ref" reference="fig:local_incongruities"}.
|
| 10 |
+
|
| 11 |
+
To validate the proposed PixelFolder, we conduct extensive experiments on two benchmark datasets of image generation, *i.e.*, FFHQ [@karras2019style] and LSUN Church [@radford2015unsupervised]. The experimental results show that PixelFolder not only outperforms CIPS in terms of image quality on both benchmarks, but also reduces parameters and computation by $53\%$ and $89\%$, respectively. Compared to the state-of-the-art model, *i.e.*, StyleGAN2 [@karras2020analyzing], PixelFolder is also very competitive and obtains new SOTA performance on FFHQ and LSUN Church, *i.e.*, $3.77$ FID and $2.45$ FID, respectively. Meanwhile, the efficiency of PixelFolder is still superior, with $31\%$ less parameters and $72\%$ less computation than StyleGAN2.
|
| 12 |
+
|
| 13 |
+
To sum up, our contribution is two-fold:
|
| 14 |
+
|
| 15 |
+
1. We propose a progressive pixel synthesis network for efficient image generation, termed *PixelFolder*. With the multi-stage structure and innovative pixel folding operations, PixelFolder greatly reduces the computational and memory overhead while keeping the property of end-to-end pixel synthesis.
|
| 16 |
+
|
| 17 |
+
2. Retaining much higher efficiency, the proposed PixelFolder not only has better performance than the latest pixel synthesis method CIPS, but also achieves new SOTA performance on FFHQ and LSUN Church.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
Conditionally-Independent Pixel Synthesis (CIPS) is a novel generative adversarial network proposed by Anokhin *et al.* [@anokhin2021image]. Its main principle is to synthesis each pixel conditioned on a random vector $z\in Z$ and the pixel coordinates $(x,y)$, which can be defined by $$\begin{equation}
|
| 22 |
+
\label{eq:cips}
|
| 23 |
+
I = \left \{G(x,y;\mathbf{z})|(x,y) \in mgrid(H,W) \right \},
|
| 24 |
+
\end{equation}$$ where $mgrid(H,W)=\left \{ (x,y)|0\leq x \leq W, 0 \leq y \leq H \right \}$ is the set of integer pixel coordinates, and $G(\cdot)$ is the generator. Similar to StyleGAN2 [@karras2020analyzing], $z$ is turned into a style vector $w$ via a mapping network and then shared by all pixels. Afterwards, $w$ is injected into the generation process via ModFC layers [@anokhin2021image].
|
| 25 |
+
|
| 26 |
+
An important design in CIPS is the positional embeddings of synthesized pixels, which are consisted of Fourier features and coordinate embeddings. The Fourier feature of each pixel $e_{fo}(x,y)\in\mathbb{R}^d$ is computed based on the coordinate $(x,y)$ and transformed by a learnable weight matrix $B_{fo}\in \mathbb{R}^{2\times d}$ and $sin$ activation. To improve model capacity, Anokhin *et al.* also adopt the coordinate embedding $e_{co}(x,y)\in\mathbb{R}^d$ , which has $H\times W$ learnable vectors in total. Afterwards, the final pixel vector $e(x,y) \in\mathbb{R}^{d}$ is initialized by concatenating these two types of embeddings and then fed to the generator.
|
| 27 |
+
|
| 28 |
+
Although CIPS has a simple structure and can be processed in parallel [@anokhin2021image], its computational cost and memory footprint are still expensive, mainly due to the high-resolution pixel tensor for end-to-end generation. In this paper, we follow the principle of CIPS defined in Eq. [\[eq:cips\]](#eq:cips){reference-type="ref" reference="eq:cips"} to build our model and address the issue of model efficiency via a progressive regression paradigm.
|
2205.14772/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-15T07:27:04.052Z" agent="5.0 (X11)" etag="pMS5-vEShK4AIjBNiPR0" version="17.4.6" type="google"><diagram id="ZKJysynBOhW-cL5Jl7wl" name="for paper">7V3ZcttIsv0aR8w8jAOrLD1SBGzDoQINC7ANvdygQBoiQUkOiTKWr795Thao1Vt3+07PXPcsbYBYqrJyOXkyC37mT8+7V1fzz2fmcrHcPPOcRffMj5553p4byv/jRK8nXO/gQM/UV6uFPXd74ng1LO1Jx569WS2W1/cu3F5ebrarz/dPVpcXF8tqe+/c/Orqsr1/2afLzf23fp7X9o3O7Ynjar5ZPrrsw2qxPdOz++Gdq18vV/XZ+GbXsb+cz8eL7SOuz+aLy/bOu/z4mT+9urzc6p/Ou+lyA+GNctEHvfzKr7uBXS0vtj9yw9n715/ms2599nF7/uZi0ux/9I//FehTvsw3N3bCz7y9jTzvcLH6In+s8cdne84n+d/4i7zi7o86uW0/Skzm+Rl/rG5O5V+H7dlquzz+PK9wrhUlkXNn2/ONHLnyx9PLm4vFcnF0ujsxr5r6CmdnN9vN6mJpzy/mV81MHrPaQpWc5054/6THs7jy0+XF9uX8fLXBhe+XV4v5xdyetvrlBvZ4erm5vOKw/Zf8R85fb68um+VTv3xabTZ3zjv8B+c3q8+v7TCtPJdX22X31YVyd8svdrO8PF9ur3q5xN4QHFgVsjYT7Fkjau8qoJ46u6N7+/a2uVX5evfkW62QP1jF+Aklwf2PtOTBqmPJPn918tY656ebh8b2o0IJR09iheL7+4+E4j8hlIPgefirxOJ8XyyXqsPTnXdyqLXXZ8uFPYC0VuJvjuany83by+vVdnV5Ib+dXm63l+d3LphsVjV+2F7ChOb2qBL5La/u29RogeddDaf8fN5e+88vP31aVcv/Ob1ZbRari/ob6iw3LVbLW+O4uIQR/ohZeT9jVrv36S+jd/V+1Ii+oaqPteg7WvLi4NdpiftISyZTEz/SlCt1hFYrvuM1f2AtnB9eC7u+j5fhawry1y+PNeoXO2P9znp5v8zV+d+36eXFYgJkcSu6nUG795cJ59/Ot2KgFzzjOf6Tgr6/9n9ycR+s2sv9aTydPmHW8ttpuNyPgju/RasrcVPqgJbz6+1TdrvHf3ZqsFyMAOrHA92d1R2B1N3VHc9dLTfz7erL/cc/teT2DW8vV/LiXcjYexBGwwdKc315c1Ut7U13gdOD54T733nQdn5VL7ePHkQF3M36T+jki+/r5OjyV+fEs9+PKhv8cLjDWuP6Lpaf5jdEeV8JO9efNY59WnVQ2EO+cDKedcYzeNR8K2o70UPv5fWX+pl32IlteNO3r1PvpD8MTj90N9XgrOav3zlVdPnlyF/4iz70TR9+qc6rL2Y9ac30YFicV6vk9eLzyet3l2+PEz/NJ6vk1dlm/mFxuYicVToUbbI69OYf3vvZ+UEg17RJNKl5PqpcM03k3W9WaVDm1/vVq5fOfHo4LF5vrk9yZ3Vyvrk+jS7XJ+f6n1mP3958mXvF3un5wc1Jfi1vyw6SxpGnHRoznfhpdBaZVeCaIfZnPE5uzFDWcs6Xf/fmeCL/Lm5MPnGSyNzISAYTNTKiJMyjuD1aTwITJXW6btpMnnG0jh25Ro4z+bMZTD/p02nQmaHqkqhs5c9emje1vO/GrDNvdjxx0/WbSK6U99a9XO+mUSPvqwN5n3O0TpwUz8+Ne7TOhnQ1GWbToDVDI+MpeuPIf/ogMOtCri/wfPwm19c3MoZWrndmUSl/ruX6yk+izJ8VJjBTzM/gOJwdBzK2uj9al126juXeODxaV72MLUiPg34WxZ482zGxCc2xyCqf9HIcpH0wpOuqnuFdUR2K/DwzZPIuMyQimxTvyGuRRd2LbFq5HvP1TYRrJh2ukXcHIhvIU+RVBPKMzuQZV1zk28v4RUsOI5MnMv86FPnIuKsbE1W1nBtE3l0q62bWWJs6lGfKmgV9Osg8hriT93YzyD/CmtQyr8kgayrvwJpWrqyJJ+Nq9XmxyCAJzGripdF7eedExlR5KeaVTyBPH2OGbsxyyLjyZI1dGZPM/Y6cGhOK3KAncn3RyRwHXD/LZY3zEjrhzHJZExmryD3EmqSxkTEl8o6Gx7I+AfTM5EbeUXXp8cRJMcdoAtl6oruOvP9mFtWOzBHr5KeDqVN5n1zvi9x6mc+NvBPPc2SOvehcLXKSMYoeNaZXXYfeZC7GD70RPRKdK50Uzx+aG9EjGWPiiY7JHGSd8onINPHkfaK3JY4hA1lH0fO16MFQDtTLKW1KngcZTTB+V7yAPC8b5H7oXw+Zp3kiNlTIOiQBdDfNC+gfnhngmTNeE8s6157obT/LM9hiR13vsQ6J2NJE5jyhLZloInMu5XeOSeR8IrYV43faFnVvyGDLHdd9yPB8B/eLXgyiE7RLa1s8ljGKDItBxheaqIbtip7UrdpqLTKkb+hkTqInsJ9SfIG8nzZW41j0rgnTfqK+I69Czmc6Hjd6/TH0Dn7A6PN6mRP0ZJ259hi2YMS6PHrDWK5cFzLfWNawgszbGceXyHyMA3uciS+Rc7ImRuZvfcOa44OdwJ6szZYer1nBl8CfTWSMSUiZrMVXcZzQNdNz7WCjeSVyTXq9BvbXYO2hS6KrGcYh6xhj3iInrAOOG+vjRPfWnKfYuOgu7JM+zvhif4PIVOQQw8cNHPf6DD4Sv/eqS6KbA3TZQLfFv/J+0VXao+gefCp12RVdlveJvUexj99lfOGM4y98jEeeN8gaQI6ck8xR7CsIU8ghr12sHdYK18h60o+L7FrRd5ljEVr9lrXHHPWZBmsJfRf/gHfCjxmMYSj98VjXsgzHtZQ1EF2Db62gawN8Fn6HriE2ie+QtUxwv+g65iD2N8AeDfyFxAmurehm46cYL2NMI7pRIgaFGncq/A5dd2fRQsZbiW2VA2SK6+01LdaecoM9RBijgZ+Ua2DzWQ8Zybo5KWUtOhhVDq+hXUOOCfTHs+dEdpB3MvA+8Q2zyFi7Ll3KVtZbdNWxMUz8NfSnGhCf0sIMM7Fr8SWIO5B9yDgI21sbnpvhHfDhQwPb7RB7eJ/EJvERek70hGsaTXrr113qttijvEdiJWQ0+iTxibF4APhs1Wv6cfhhrIOMzeNYV+Mz40D8mK8+6GUk+i16IHEBPiiC/SAmit+Fn0aMjcqO2EHicypzlf/JOmaOxpECxzLGAnpHPy+2AL+NWC/PL+Hn4VN8vV98VIT/cO17+m7x/WKPoqsZ1lrsEbFgcud4YTAGzituHY6JMTeD7lA3U13TwMSt+Hojsq3FbxQSCyB7+MkCftEz0IOcshdbiQfa+5oYBroPfBGkuYwvrxyrm47Iw9pbPcCmMQ9DnWharIVxWo++WeJdNpSOoT00tca/EnJy7D3w96IDpdi9Xb9oAv31JA7TBkX+0F/EZC/FM8X/0aaP7TwGzKOk/58JLlS0yXkDodby7wG2RRslbkCMS2Ar9H2IyxpHxaeJ7sxot1VLvCj6a6i/+DP0SHQnl/hO39H4lNdqgtgBH0U9lHFBVz2xAbGzDH/uJYZILMU7Y1fk68Dvi+4ybuIZtDmJEda2oWetvMeT++ErHMWsBvYyUKeGCbBFZ/GTPa6hg7Br8bfAQ2VAzNlDX0THhom8T94vsp9RB0usXQs8lUZvRP/g+2rEqc7Al6leY81Fj+JO9ASxBOMEloXswlnRylwQ07N6Rh1OgMEEs00Crrm8X2TCmMO1HUpgDA+4lv6QPrAQPyv3FLBX8bkyDvhQGZvE05o6g2emeOYqoJ6J7EXWNeKT4PGNWCf8fCb20ATwfeI3rK+kjRKb6boWnsWYElsRk2Dnpcc5A5euoYsJ8KAL7CsybvnunvgIx8CsvswKchwYr3Pg/CzgNRFiRim6FA/A5ZCXrDtwrSPrIr71XSTnaPtYCx0HdAMYR3CZjIPYn9gygX8aoPNYS3lny1xnDVxZyXyLgTiR/glYOA6Yn0gsRcYmx6FItJtB56MEv/sz2iGeBz8rOJQ4txqPHcGVHdZkRhxVwE6A8V3MHdhmBnsAVhScZ4hfqz4VryZ4AfqCnEBkLj4I71jDXwBzA2OJf+4n6htlzjIHTzAz8aDIEHhPxli7HLvgMfh3rJusn8Rp5D0GMRqxsiVeELke5cC/iNuYb+NQHlPMW7DoaiK+MIFfkn+Lbby+XB0NwXnin53Nhq4tP767TF6nzvJDt3l7/OZ8/qG7nq3EFqKT5iRqXuC36vzg6iT/fHPqhZtZn9QnkuHmklvBvmfiD0XGMr6acoff5zrpOQ9jnREjwQfLuvWIC1w36E6reRl0B/hA8AjXEXideZWnOW0NvXARh2i3fIbo+hqxueggn5Q5bQbf5SC+MfclDhO8QZ1t7uos9NpL9RpiBGLYNdZ6gvUfKNscsRuYPgae9OEbkP8C59v8zsZcE2ieVIu+NPSPeB5zpYg5nsMYDF87ZMiVgBcCjQPIvRM/BSZm7IevwVwbeaacO2b+HsJ/zuALB+s7RFfUfyJPQG4DLF469DHI+UW/KHeJIXw35hYBZ9bAdZSP+lnY3iRkvk2Mx9ju0F7zSYtYnjIfSGxeWWvcJZ4HxkNuizVHvELcLhzaLrFD6XNuU9gy5g7Zih8HxsOaIc/imMgfiJ7gnop4AnqDecqYO80jcT1wKzAFYlIMu7H+vkAc7IghgKfzQv0O5gTOIYL/jz3Gp/V7jMuhr5N5G4yd9p25GleZ99zgHDAP8PtM4/+Acc1y3A+MJBhG7N8cU088xnrJLeljoTsRYpqBL6RfFV8YAjPO8A7YqWAJwTHE1HgGbQA6KLmvGf1BDtwzQS7rHjGvEXnlkgfgGup+7DPWx/BVwJDwH/D1gkNwTd4MzAc5b/IK8LUh4iJjM3IR8Xej36e8B2DTOFQcCh2oNY+mrTd8Zp4nHecewfYhi4mvx/BdiP2J4nSRveinA98gOiBj4rEr9mdzodKO0QDrMY/nPTk4nFjjCGyQeWbRI2+W+HijvE3sA1vQrtfQGXILMgcc1y19PPJsyGyIeT3zcPhqxrMKMR62C26pA7aV+QnOYP4rWLS0c080j6Otw79gXMCBiL3I7+sQ8p3l8A3UC30ertfnw27JpclcAxvbBmAgGbdLWdN3IIcBN1QxZh2RB5NVxFggN95rmC8YxkE+q5U5yjFzdz5LxhbOsG4DcDd8E59PjkLO+fTBwH8RY1Zo9SGgfYv/hN/R+UBfEL9FP6dBoOv/UkYEvA/cAFkXVqcM1iKkXuZVQIwKex3OREOw/rGNucA6GeJbaNevI69ArNRoPBEdkLgLvNcjN0ypI/BZBfJth/xiRFk6lmdz6OvFF8FXiV45Gpcam4dlsK+eMsmBU8lfOKnqssQC2n2oz6zbI+bymcscS/EGfQtwGnIRXVPBB4LRIEOTlzau1ANx/ACeZaL8HXhC8GsRYgztqkvpi4H3uPYt4xD8DLC3+E7ycr3GRLG/cIcv6csmAePAVG0HnIqsK3MnzeUwNto8sYvcL39ugDfg52TNC8i1nanPkvuBlyAz5B+YW9Lz/cyxiesDO57ADO+ALR3O0/KOIkPwMfJ+xCXIFxiycZRjGONv5SBvIOZfx6HFeiEx5Bq4deLwedBh8hqJq/Gcvp6+gvkssRl4EPhQcIIN8KVPjAA/ACxMjBAzNkv+A9+EeD+oXiE/SoBBOmI95gXI8zJwEuAYoUPi28jbiPyAv4E1DfMxs86sr2qAJ5g/ydoCm3Yp4y3sp2SuDLzBvGFNDgYxPAB+B08EbLbLO/MEuFKPc/4+YH3IXQEHkQedwM59jlF0X/wdYlXHdwyQ6YS2onlWbeNCQt6TMT9H/CRe9hS7xvR7as/wUQl1SuboM/eVvBWxjdiCeNUwZpip5lKWt9XjCP6A+gb/3pGjyxFLJecU3KI2V5NPgA+1fgzP92axo7mgxDjwNhLDlMeNMsafMccVmQTQOcQUQ/4Ztp4h9gUqk4mVAfQGMbWkHKH3kKHlHDqVM7gmxCDmNgNwt3JLiV0b8l89cVMOzi4ZlB+GbjT6DODAAXnWBLqtcVBwjHLw5GJ9A9/CuAcsBW40thhC5IbfGTfhi6ogtbomeTXiJuw0oO1RLhPOQfFxA74aeUunMbFh7o+8R7DKDfN6yroOGUvvydaxPjJ2wFrD383suMV/A/P1wGTgxUW2iL2+5qWwqQw2EpgG+BV+C3ls4pDrJBZDrCK26hkzWc9JkD8N6stpI8B7WH/E0I7HqL+ozdAGiAkH6FMBPKFy3dVgaIOW94d+kwsVGwS3ipxI8AZjGuYDHi1Bnq3cIXWYa9kBozMfGzLX8gE98+HcwM4d5Br0M2tg8qJX/pb+XP3G1Pot8tr0Iy3z2AH+XXCE+hGrw+B3A4c4jDktYgJ5aZdzY3yF7REfhDb39Zjnkb9HHlG6di7KYxBfTOzapcAb0EFew9oQfBg5ffLd9A/i41lDUFxQsG7C/JA1CPoouz6oWdAnM14rHoMsgLXld+a2xtYUEkexGfIQ+OBE6zLMh4FXwZXU4Dt7+PVU/Ry4L1/naXrOA/JS/ttV24JczpDXISca1C+Dk50E5ABGP0y/3jBuqL3HotUxbQE+T97BfJ21Qc4562xeQoxKXQXHrdjUVxtivVTfQ+xqLK5MPOUWYOPIrySfYTx6H+m6lrwfvJisqW/XayA3s66Vf+V6QXbgOhLgxzYl/wG8Bx4ZuNRy1jxXOHoO+Sh9V88az5qcQE/8TD8w1nDiYWcvUTXWPzrNmzPopqu4UeyLaxOjJoUcUGRP+wEP6StnniFmKh4nVuM55DCsl5F3IjfBmoTWhflecpOuYouCejRy5iaiv0RdtlWOPLtRfpu63zEvILYolIckfkWOLDlwAa6yZr2NcQXcPdaceXgW6PMKrXUPtfL0K8RCxibYR894jRrGQO4aeuQo3kVelSC/7IgB+e7Kxu1MbZU2Bf2piQHV10MmiDFlp5gPfGalNg8/OTDuDpwfaoHMjbEOxNPAX5S1nBsY2xkjkPtmjuZ5SUi+i9wkzwWMS/R3sAXWQTvWpoihYuQBjvpDcGysS/qso5DLLANj45r4gEC5c/CQ0A/mQZ7YN2pdiFPQN594gfgDuTJ9DHCW6Bw5f/VT1JEF8mniVGvjPev8EZ4bt2qTyHVQ+8vIxZiIOTjyCOZkhn0CDeelcQh2AL02AWVuY5fkneCMZS3fGbEFn/k1/JJy9iOPQr8kMd7VPAT8CfwY40qvvAzX2KE+IyZwTjtutUcNZ6crrCMyx/SIu6DPA+N3i7yWHMF6ohiCmKD2iafXxELUCUMeAf48Yd0RfAnuY75LfGVQJ3MNMXsxxn3awe64wVtZQ/VsTtNq/CwGcq5T1l/gY3qtDeI4Bt/o2LoacykZOTicwNpSaHiOdVaVV29ziahQLAqOUnkq+gXOFzFugPyZS/T6DPxulNMnPxp75PwxL+WukQO1lA1qURHtOVTuaLyfOjWAU0zJn7JPQmsOa9TuiHut/yAvpMfr99bWmcuzBiexKrA4NND6CGqS8aA+Ce9ImG8h7qs/j20do+qVC0Ruy+tDxVzQMcbwkJzPGv4b+Aqc5jhGcFkJ8L4cZ2PNEzUAq6Ol6HCp+dF6YwRXag20aN2Z/h5mQxEynyK3jzkXAcZnf0d+jlpXoNfE9h0it0LWHLVF9cPgmVrtt4EdZioX8V0iB9Qmfa2N1LZGWrbitVzKlfUxcEjUY56bkRNnjj/W/V2tK+EecOoTra+NPoP1Ma1/K/9ZjO/0ye8MwKmSRzW4H3aCvFN7csh/EsdS12SdJqil6e8xamkJbT0bykG5Ua5bi1oa+Tc+H+uSwe97sBmsK8aHenk6vIkELxOz6rohj0PebNcFOVaU3B6vN+md+qHDuhvri+jisvXF2zqbx1yB9cWStfcZYuFQ0wej/wZxCPz+ro6Xo0eh6C3f6xjWh8setUL5fazzuspLSLxFLjmMeVipnEiUGpGZcpCOyjTVfhnkfSF7s4C9mffwnG/Yn1KSV1E5j3GPdVA/JdZoWLuVdRzrph5zgePd8VhnRb0xVf5CbBI8HfEMYiJjq8XBmdwvPpw+EzlRNdhcAGPV+klUMO8GD00eFfzPGj6wsbVk0ynOhK6yDuwwd8TzIvCCrPv56IsAdiX3yto19LRBHEMu7Wr9qR65Rxc5mvZdEJ+5XJcI/mDiaezlMeO16G1PfinnOAP2Dwz0D+RC2ZvGfjDiM49xDxIgV58ElgP3U2ILcCZaC9X4i+eYlr1M4E3IqaLPJ2PdTHkE4MomMJY3Ev+JmO0y589fpuKb+/H+lHUl1h494lr6S9bJ+vvjKHfjhA1IToQYDv+ltQzm5pDP3fmyxo2ctk/ZpwJODjEc/Urst3Ct//S03wI1oUJ5Mtr5xLF8VKB1X+hXozEVfDbzBvaQeax7ME41AWNn/m7sTWg11gFDUPYD+RPyisS+fWoxmtgWfwd3h75FuYc9ZOAqDPWLNWjFHKg1rPl7T76c8kh84kLwc8QM6NtrenKQlCn6l1jj9ZSTY9z0ya8AX66N5fTKQGUA3Kr3pNNRX9g317PWA56AOLhWPmdADwh02GDdyBkpPkgc4gXqaBEuV8mXt6tyvXwVv3j7+vBs8aquK/9dePqqOEjOTz6fvmr3Es92l66v8Vt/6m03s/Pwy+l58SJZHTZvj9+k6F/DesNPG/afZNR3w54d9IMl2oPD3kxyUYhXPWt6HBv51FDs2iXXEKFOlTGOzLS3Bn6RPRPas8PeJ/jujmuMNYiQI9veKcWj4BiRz7jazwpOm3rMeCb3Uye0NwScIuoNhaMcb2zvr9UnsvcD9TbGK4zd0T6XN+JP2cPAuWvvEGtEeE9g2FuDGFJofyG40DXwSAEbHXScYFoK1s4Uxxnf9rS4rFWura6O/Y46bvZHqq5WOu9jrVMrbgNHgXlP0JMCWYI3cSFz2hj5Ob4X+sTeNdEH19bkXdaptVblqX6V9e0aO67285g+Ry8MbewkStmDmHmsAXCc4MMNe1/Q52VYY8D1QUBbFRwCPmqm8kId3iGuZS0FtYIs0FyWPTIDezzxrgHcEvjpstX8mWsVah2kCNXm4RPYm6rHklfKM7QvdAruD31jrJW7avNm7OMMycHSho3WxxzgJfhw8qnIp9HjGmiNNRl7O4A/eZ3oEWMNMA9qSiJHx+YrytMqr6l+nHLmuIE5PO1zQk2DvcbksbTO2fjav7brqyK3oM9DX0Osvo48Tu0jNt/2fjF2e+xRGiZYZ/A+rsb6CfNGud43Yx4QsQfRZ7/fMPauxC775WAnxEjZrkdR1hk8kea8wxgb0TMV9FY+vfW9t3ojuqHXCx5hHZ3247EXdfQdjB1YJ/LUjFmziLwF+hEHYjT6A+U+RB9c+ued7ZdYW4+1mJw5pdanV+j/hNypD4Out8QIvod1M0ftBpi7BCbx1EfB37BPVGOB43ijnuTsxbL+QLGTzAcYD/PPfGI89q0Zy4OV7C/m+8kllmOPltZoyN+zDww8ifYB8f3kiT36kQg1Csawllhba9OD2KdHnykxTe4P1D4NeBFgVfXJ0SijomXPrcZUyDBgb4Ht4VM7YU2tU/nzHPorwYHY+Taq74XEGmAt9gux5u0TKylv4rCuBHkNk+F2ng31KZPcAtjP9sz4Vp/YN6+8cDawzxL6xPol5tnKuCeUv4wb9kA+PCX2KMFL9Ga3bqghAlM3YxyF3DzETfQBax8U36H90XrOjhG5ENa89Fh/zUe8kKBf0ktZWzF2zkHI9QEXvC7sdY2VO9b/cC9Z7f/Ajg7xIt7Jqyz8xo4Oa/Hvohm7ZGtXO1NLy5iBUUF3NDI7VIBLR7u6WG3uqYEFOvDB7PE4IHvMigGriRh1qN4fFc+E7GEi1ocuAqLJHAw+smB0WKHywKp0oDtIIOkyTG53NBjDLNl2cUMT2IFklMWmBqOCQwbLZo/1YLvE2V2ZsvMVqN4yAexmjlutuoNJQKYjkZTPZ8Qfo6V2nueM8uzWx7wUQce0Au2wZmXes5lzx85TVOCRodiuW2jIbBxfxC4AssfKgDUhsx50bZG1t/fAEyrLy90R2L8CTyayd9mlC81Hhk60zu477hxAFdOyyK1mYWC7Y9QfekU8YC4K7YInE1ZqBGdHLuYfP9zJ0NouPI8d74IckNVpRSazWs6KtsOs7c7OCfWI9phdDWBuibIdemFWhZFZJorIyHLHLth2drIo4gjtroOOmSc7tTOX2RurwmVr3y8e7mWkO0HQ9aI7QYx2IXTMOlcj6sMzxh0lDjvbgNIy7GSCJyPjhKy8BDrv1PNVY5e8d9u1k/mKsJD1Uw+9sQvUEOGxI9ciAzBprFR5QJbIRmzWRbY+ZdUiY0RI6TG560bZflbCjGcrYZ4yaMz0HGXQ4JGwOyPpNVuBXoNRLHVHjO6+8HfH+UuwQ66d/93OambFWt1n57SyBuuxc0fRuTLHsIGq005udLWiWlQqco7oH7SSjEjFXUDUQyJno6yIa/WwA/eo3eYJO+6tHve77I7ZHGws7tjRTsTHKg26Wdnhrlk2PKhli8BurHSnS6pddL7uKAKbh8wdTBYiGbI/7iBwWNnCPfQLiMisMOj62658VnmJssG0kCXWqMEdOIVndXtQVMXOcZ/VQ+5cw9gKVq/THRM2gfdXFMgKju144JwR5YzPNVZUA4SilQSgMLLORrskuItlE2lHdaU73NbVjsGjf9Usa1AGD/4W1UXurBi4S01ZQXQd2V1+WMfYsYyfxyimXTw+GWNmuewiRhePM2MXEOyk5vtSRcCusnujP6f/d839bnpd9/zEdttnHjtgdAeAVrmYEaDCALvI+D71D3c7/Etb9aQ/sMwvqyCuMtjM+n2LLDp20lv2wxSOqywAusELVuZn9AvIxIzqORhMRVDwh2C9ECsGZUNaVysTBVhWR3em0e4d7hjk/WTpByL740B9rn1nnouHnlr/JP41Qx1ghR0gmc1UEF+wUxJxk1nlcKRVioG+jt1SScgKAHSa7AB8ddmPu5NmjL1qw8rO0iZ7RWXwKdCfeNzN57CDZ+BOmp7v5e4hZkBjBZpdqdjdZisPntrBhFV0fS92XQCdA53hGUmtFe2qVVuDTlacL3cmEHskQK22SsGYNO6W81VHs8B2xrnaqYOdk6ziBzNigDgYK19kqLjzq+i0eqTd6XbXqfiH3W4kf2TqNGbQ1gPuuIqasZOV7Jb4tpGZa5UFa2xli7L17DsGu9OF2cZMd8a6XGOy3sANqHxmZC2UFa1thbTSOLCu7a66+7qR55Nx52dnd6FgFybZrZS74YDX7I5T7Z7xWCmnf2FVIbBZs7wNbLh2z6Cz1TJaARmjNXHPoD4yqZlJyropwynZMdh1+njoSkNWQTuFje4sjfDsyqHtYz4D7qFOOapzYyUFPqfEDhHcA0aqZ8ZF5gf6k7hg8nCesSdHNlAH1A/uVGDVQavY1K+EfkB3ZomciQNwLWInO8Fc7aA3arPsSgArNtH90z/UxZ7a/dPlN7rYC1e7gprd7kmwVcwuI7sD69iyfTm7jphha8c/d8U6qbUR1fHCztdYGyFGdTE/iYF2Fy2uZ7yEH7LV4ITHjHdkAhk/fc6fdl5bPIEKOdh0sJ/o3J2ojaxZEeu5y6y3uwpZbbbHyMh1vMgPHMWN7ILWndXAK3nF329xHXAvKthj9aAAI9VqJ2FlqzyVxRPVjY4ZmKzoWCEY3pmU2JLxVX0rdwkUvrJgHLP+Ph07r7OxK8plPKbfKtDZpWwnd2rqLtyUu4K5ow27eJVZoxzRqcB7WpUzmBFlchDvUuZJ9P/sqNDfy3bcbZmyyzBTG9KueWW40L1i11JsuxvPqX9lZw5yFFd3S+qONN0Z04w5DdkW7o5m1fcp3XNclUMS5GRrMKYzk7LaWQyzuGXF697v67O/LvvE6LjnUSTI2i25S+3jVAnBmxP5Fg41cc39kA4tgTwvPA730yGCe+xj5f2Np9GUe5xczoaj34iWIKNBf7czsJZMz7nbWxop90AuQft57N5U7jMG797bPa30GhLJHfX4tkbWEmmynlWSW07J6SEzqnzbs8IaP/Y3I0oh81ULKdgbr1GqsPUAcrqd1ncL9b5NOxAl5EZRwdTWE3LlyJj5kkPTebPOyf4T7l8JFLkYy3UzSiuC0H3EHu9nP2ntWbm62peTuLZvetyXjF4A5QRz1noDIgOOt7DWU9m9Lru90C73Qu/2S3M/SniLSJDRG+4/YA8RPbUJLdMwWM/jkDuCvvRj7Rz7ZriX29E9sONeb8dVBJKBZ251jyXql5lalNNanSN/Oeg3DczoGdpRVjPuG6GVd1o3gCx1P7D25mJ/EXnxQXWwYK1TvSO/WeByjzf3nXGvrGP7D1zqDnsD6G17IrwcKLakN4QNYN8D7ic/RYuMfSsjVxEN5EobajV6AKWW1juiZwLPzNgfrRmayuSvq92AT5XxYc+r7mlxc7JBkPd7yS6xR+Lu74WDaIgaoepKzT1d5PPZV5RpZgT+mfLI2EsPhMf9y8xmC0f5cuhewWhIveL8yY8PjBbQPd0Tr7qv+2cZDcFSqO3Vod0jbW2rZp+SfguBqIm8Pmtt3G+JaFgNHA88LNkBRD9EAdyP/pAiUCRUWT6/CLFnmO9Df5FGS49RQGu/yJLsdwUYEcEfepantnWeZuwr8nTPI1GA1jCQHWm/nepNxH1bODco2sU9qY2SJb6AolGSWUDpaMTK+E0S298Q6Dn2OPo8x8i4iDSKJT15SmbUmUaY42D3PRNmSzlr6fAZHvoB9b6JQwRK5FxapNOEPMcM0YxRdNBzvI8IL6W84dPrwUbEVtkE9A6UjrJ8kAszBK39YA+V7qtwtP5h2HswfnOBLCL3YtThXXuymXmQ5i9H/Q5Tx+rveiIRsRq4XtEuYt7+Lpn6D9nWID7g/F0zy79ZF6XuSWbhch+EMlHI6IMRyVE2vX5vRffEoCaHY8ydPczd+LuyJdwb4Oj4d/NF7V3kyxhp32n9p/hp6z89ojv6z3LY+U/2xBKNdkQg7C22Wbx+o6Ane2b3kKr/o//02EOL2omyw6oHrBEl/chM2hqOy9r0iICJ8tGTIjoHxCfrhkwAMVn3blM2Yy+up3uBjH5/gMc/yqP/wJeRrFYkWgXjrMA90Gv4/PIHf2cVod91yBBjo6t1tCrm8IF+XaQYMS48TTh6BlkpVtZmu+MmZKXv2HoIRs+JekZyzrRcfqmCmFZ+t55Z7h89s6zM6JllDrd5SsWKELpMiHGPuXsR5xxyKCuLcYmoSnfsGhbtQDXP18oZvEfSkQNhJb60PFUZjByIctr8ooXLOcvvOfEs7kd1EPyB/dILcflktGbPdoXT++6q8uuSX1jBTlTNNTCv0rHoxnpgdpEGt8fVYD2yoL7DSPOXirujtKLU6Bd58EURdv6Ttwm14mr41SRbzVHOgF2sVUfOgBVW5OMlv0ajHidu+btYkO6ypQU5ymM1tXYDspOA/AGOuev0WL+eI+/veP+x7TKjx2S3i687mqud99UxkwOmZ+W8kDugK5GVWnCIuuNdkQNzB48dfgORkqud2KhqgZspLLdS2ipXqZGC3Wb2qxDcJdXYCj7n7aU212bX19R+iYW6RB6XnQ+6A4NVWMdGgsHuokOlVO5fWG8s6GfnjSt/9MbpsLDe+O7v5V/pjVn3Gr2xIeotfe2eu/cVME/5jHjnjWfRifXG7DRFxfRGK73kFzuitDUr8aF2nPDLNCFsgV/XQncgu45E3kWrnll8TM7sCGu888yB5sDwQVqx5S4A6gQ6ye34WNVm9mQ7/AtbT2vctLFdNHp/yC6sY5ujcrd8qbpv+QDd+cFzId/NnbNVYOt/3AGnnL/yT1pvjJWjGLtH9N3BmA3Srtj5nFluMLZIA74D3A65H5XHx7+Mo6H/lohkVM8kE4xtlXiYOKPvvP1dZHOPH2q/VP7JxdsaXybEf/+KbyK/uP8JRn/8BOPdTyJ7z5/6KLL3677suv/4u4x7Tv1s/O7kE59nlAnPN5vl5rK+mp8/+Gjvo6+Cfl5erWSk+Lrv/Rvf3v7w5Idibz8nigfPr6q73+e91j87zz3/z3/U99EXYvcWi4NvfkP7L9CF0Ht+4N3/IueLx5+Ndd2953svnvjU76/6dKz/+EO/3/p0bLWZX1+vqvtK8NMfgv2pLy7vvtz63U/9/tmPxv7dPw3rPvyk637wXCTzhz4P++g7s0886xd/IdZ/6gPtAf4bTvH3EFTzzbMXhx+fvYiehbH86R/1P/FnveShjophbp9yTQ8/B35HY+ypR98kf/gN2fPVYrH52l8K8Ffo/oNvE/8pN+M+/Az/EzFn7wkV9X+Zg/m5b1P/djD/RgfjP1QeOgVn98+DB/6orwle/NRjf7XbeepvD3na7XjT367oT/yNIM7fzRU98UnyPefs/zH09fYPDtzwV0Pfh3nQ3wL4Bn+Dvx6GeZ57TzZ+8NhIDtwdNLv3l8T8uiRxHMR/3cf7vZOPb4b5h4Obt8dovIhXyasNCKKo/Hh49nGQ49dn29NX4SDnLhev37Wz1f6Xhb/wjy6q4ej8oD/p93t8Lu/Ix/OSevnKvT69MHvLV93n0/Prg+T8zFm8nuwd9QdyV3WzGMzNqf/m4gjNsHnzxV4n76kPkvVhu5zKM3JnZV4fBsnq0J9/eOfMI2c1Y2ktcavX9b2/NODe+dcnn08+LqanPp41wafhQm3uKerxOnnmLTH+ITwrz7vN0Yc3Z6W3vajOD9zT82zv9Py9z0/IPXH/zM5xdpG25YcUpExbnb8fRI7eyfHdv6DgZKRS2ubrzQXYVJXH7kjLzUg5J8ORfrwTrb/rIxRDeqeX/3WzqeOYlcM2PJHelu0g+aQr8Zteh4/pBEe5IeVqonirLezFFq3wR+tajlGgX8hzUbSutno9ihKJ/TMbD67NtFwv4/jFm5UJZ325nuVbuQdUbHzNjzWsZEwozk1bfohqduwM/DhK33aksz2zBX02O257bZHluF2ZW3fnODzixxrk+INcvwpk7jHaIDu9rr5O1ywih/Ic3ax/3Ppsczl+PD5sQpX7rllgmsp79SNp1/zLC7ToLs+t9ePVeXyNzXko0B2xcSLw0+M25ObntbnGRy3Z7vfxPi12ckvPXeyo2OnB7Z9fVXe04MirntKJ38/5ief8MSryG1Hum/nE16KL+4gP+OvCy2PSKVp+Wl5cL38ATP84SL5aCnay4Rlh4jNSCs4lPHwWQrDzm+2lxVc/g9OfQltfAWV/GFL91HKOjwmf73svwl1itffgb5/be/4k9noKhP+yv6sq+M0H/MfwAQ8JR3/Pee44f5oPeMg9fuexv5gPCJ7iA34r5N9SIR8SVP6e88dU8CEl9ehBv1rpwp9SOht3/gs17loEvX1sWzz9crUZpyuysEfOf4Ka7t+SGSPn8TCg/rCm7uFZd5yj/+3n/mrF3futuP+9ivuowvhHtdZ7OqT/u5T2MfGbziElz8luller5fUjHf5N4n8FAnqP+cn9JxTyl5H4weP+lfTy4l+ThUzzen61muMJ75bXN5vt71X96qo+hD8vJDV8gpR3/k9X9uDRypIVgOB+G+r3ltS/7Rn7hqkePNFs9ssWNHxcRXgtr/3X8XbeYCWdaFmtriWq/l7Vn1jV/X/3qj5VOEMh/NM/HlXL/4kSuuc88cvdOvo/fxfSv6kEDwvpofNYBf5PC+nhz1Emv5OA/6gkQLPXr2H3cGzo+kOJ7N7B7T97337uL84JwqcYmEdtQLFtA5JYBR9WLS632hWEP/32W9/rRfSf+w/oioP9587+v9l7/aYw/ou9lySs95gH5+CBm3F3Cvhn6xZPPesPey05vLoU73LncrHqM3O5WOKK/wU=</diagram></mxfile>
|
2205.14772/main_diagram/main_diagram.pdf
ADDED
|
Binary file (29.5 kB). View file
|
|
|
2205.14772/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
As a result of the many recent advancements in artificial intelligence (AI), a significant interest in the technology has developed from high-stakes decision-makers in industries such as medicine, finance, and the legal system [\(Lip](#page-9-0)[ton](#page-9-0) [2018;](#page-9-0) [Miller and Brown](#page-9-1) [2018;](#page-9-1) [Rudin](#page-9-2) [2019\)](#page-9-2). However, many modern AI systems are black boxes, obscuring undesirable biases and hiding their deficiencies [\(Szegedy et al.](#page-9-3) [2014;](#page-9-3) [Hendrycks et al.](#page-9-4) [2021;](#page-9-4) [Lipton](#page-9-0) [2018\)](#page-9-0). This has resulted in unexpected consequences when these systems are deployed in the real world [\(O'Neil](#page-9-5) [2016;](#page-9-5) [Buolamwini and](#page-8-0) [Gebru](#page-8-0) [2018;](#page-8-0) [McGregor](#page-9-6) [2021\)](#page-9-6). Accordingly, regulatory and legal ordinance has been proposed and implemented [\(EU](#page-8-1) [and Parliament](#page-8-1) [2016;](#page-8-1) [U.S.-EU TTC](#page-9-7) [2022;](#page-9-7) [European Com](#page-8-2)[mission](#page-8-2) [2021\)](#page-8-2).
|
| 4 |
+
|
| 5 |
+
Copyright © 2023, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
|
| 6 |
+
|
| 7 |
+
<span id="page-0-0"></span><sup>1</sup>Code for this work is available at [https://github.com/](https://github.com/craymichael/unfooling) [craymichael/unfooling](https://github.com/craymichael/unfooling)
|
| 8 |
+
|
| 9 |
+
All of this naturally leads to the question: how can we audit opaque algorithms? Post hoc explanation methods offer a way of understanding black box decision-making processes by estimating the influence of each variable on the decision value. Unfortunately, there is a medley of incentives that may motivate an organization to withhold this information, whether it is financial, political, personal, or otherwise. Indeed, these explainers are demonstrably deceivable [\(Slack](#page-9-8) [et al.](#page-9-8) [2020;](#page-9-8) [Baniecki](#page-8-3) [2022\)](#page-8-3) — if an organization is aware that its algorithms are under scrutiny, it is capable of falsifying how its algorithms operate. This begs the question how do we know that the auditee is complying faithfully?
|
| 10 |
+
|
| 11 |
+
In this work, we explore the problem of employing perturbation-based post hoc explainers to audit black box algorithms. To the best of our knowledge, this is the first paper to address the aforementioned questions and provide a solution. We explore a multi-faceted problem setting in which the auditor needs to ascertain that the algorithms of an organization: 1) do not violate regulations or laws in their decision-making processes and 2) do not adversarially mask said processes. Our contributions are as follows:
|
| 12 |
+
|
| 13 |
+
- We formalize real-world adversarial attack and defense models for the auditing of black box algorithms with perturbation-based post hoc explainers. We then formalize the general defense problem against adversarial attacks on explainers, as well as against the pragmatic scaffolding-based adversarial attack on explainers [\(Slack](#page-9-8) [et al.](#page-9-8) [2020\)](#page-9-8).
|
| 14 |
+
- We propose a novel unsupervised conditional anomaly detection algorithm based on *k*-nearest neighbors: KNN-CAD.
|
| 15 |
+
- We propose adversarial detection and defense algorithms for perturbation-based explainers based on any conditional anomaly detector: CAD-Detect and CAD-Defend, respectively.
|
| 16 |
+
- Our approach is evaluated on several high-stakes realworld data sets for the popular perturbation-based explainers, LIME [\(Ribeiro, Singh, and Guestrin](#page-9-9) [2016\)](#page-9-9) and SHAP [\(Lundberg and Lee](#page-9-10) [2017\)](#page-9-10). We demonstrate that the detection and defense approaches using KNN-CAD are capable of detecting whether black box models are adversarially concealing the features used in their decision-making processes. Furthermore, we show that our method exposes the features that the adversaries attempted to mask, mitigating the scaffolding-based attack.
|
| 17 |
+
|
| 18 |
+
- We introduce several new metrics to evaluate the fidelity of attacks and defenses with respect to explanations and the black box model.
|
| 19 |
+
- We conduct analyses of the explanation fidelity, hyperparameters, and sample-efficiency of our approach.
|
| 20 |
+
|
| 21 |
+
<span id="page-1-2"></span>Local Black Box Post Hoc Explainers Of particular relevance to auditing an algorithm is in understanding individual decisions. Explainable AI (XAI) approaches afford transparency of otherwise uninterpretable algorithms (Barredo Arrieta et al. 2020) and are conducive to auditing (Akpinar et al. 2022; Zhang, Cho, and Vasarhelyi 2022). Local post hoc explainers, notably LIME and SHAP, do so by estimating the contribution of each feature to a decision value. These particular explainers are classified as model agnostic and black box, which satisfies the conditions of an audit — the auditor has no a priori knowledge of the model class and has only query-level access. Both approaches produce explanations by fitting linear models to a dataset generated by perturbing the neighborhood about a sample. Let $\mathcal{D} = (\mathcal{X} \times \mathcal{Y}) = \{(\mathbf{x}_1, y_1), (\mathbf{x}_2, y_2), \dots, (\mathbf{x}_N, y_N)\}$ be the data set where each data sample $\mathbf{x}_i \in \mathbb{R}^F$ has F features and each label $y_i \in \mathbb{N}_0$ represents one of C classes encoded as an integer in the range [0..C). We denote the black box classifier as $f: \mathcal{X} \to \mathcal{Y}$ and the explainer as g. The general problem these explainers solve in order to explain an instance $x_i$ is given by Eq. (1)
|
| 22 |
+
|
| 23 |
+
<span id="page-1-0"></span>
|
| 24 |
+
$$\underset{g_{\mathbf{x}_i} \in \mathcal{G}}{\operatorname{argmin}} \sum_{\mathbf{x}_i^{(g)} \in \mathcal{X}_i^{(g)}} \left( f(\mathbf{x}_j^{(g)}) - g_{\mathbf{x}_i}(\mathbf{x}_j^{(g)}) \right)^2 \pi_{\mathbf{x}_i}(\mathbf{x}_j^{(g)}) + \Omega(g_{\mathbf{x}_i})$$
|
| 25 |
+
|
| 26 |
+
The minimization objective is a function of the linear model $g_{\mathbf{x}_i}$ from the set of all linear models $\mathcal{G}$ , the neighborhood function $\pi_{\mathbf{x}_i}$ , and the regularization function $\Omega$ . With a slight abuse of notation, $\pi_{\mathbf{x}_i}$ both generates the neighborhood of $\mathbf{x}_i$ ( $\mathcal{X}_i^{(g)}$ ) and gives the proximity of each $\mathbf{x}_j^{(g)}$ to $\mathbf{x}_i$ . The latter two functions are defined using game-theoretic means for SHAP and empirical means for LIME. Both explainers produce explanations as a set of feature contributions $\mathcal{E}_i = \{a_{ij}\}_{j=1}^F$ that describes the contribution, or importance, of each feature to the decision value $y_i$ . In our notation, $a_{ij}$ indicates the explained contribution of the $j^{\text{th}}$ feature to $y_i$ .
|
| 27 |
+
|
| 28 |
+
Adversarial Attacks on Explainers Szegedy et al. were the first to discover the phenomena of adversarial examples (Szegedy et al. 2014) — human-indistinguishable manipulation of the input space that results in misclassification. It is worth noting that naturally-occurring adversarial examples have been shown to exist (Hendrycks et al. 2021), though the focus of this work concerns human adversaries. In contrast to the literature on neural network adversarial attacks (Ren et al. 2020), attacks on explainers are comparatively less explored, especially those that are perturbation-based (Baniecki 2022). Slack et al. were the first to propose an adversarial attack on black box post hoc explainers (Slack et al. 2020). The motivation for the attack arose from the observation that explainer perturbations are often off of the data
|
| 29 |
+
|
| 30 |
+
manifold. In the attack, the adversary aims to detect whether some $\mathbf{x}_i$ originates from $\mathcal{X}$ or from the explainer-generated samples, $\mathcal{X}^{(g)}$ , using a scaffolding classifier. $\mathcal{X}^{(g)}$ is generated by perturbing each $\mathbf{x}_i \in \mathcal{X}$ , creating a set of new instances $\mathcal{X}_i^{(g)}$ . The union of each $\mathcal{X}_i^{(g)}$ yields the complete set, i.e., $\mathcal{X}^{(g)} = \bigcup_i \mathcal{X}_i^{(g)}$ . If the adversary determines that the query originates from an explainer, then it can elect to predict using an *unbiased* model. In effect, the adversary lies to the explainer about how it uses the features. Formally, given a biased predictor $f_{\text{biased}}$ and an unbiased predictor $f_{\text{unbiased}}$ , the adversary can toggle between predictors using an out-of-distribution (ood) detector d according to Eq. (2).
|
| 31 |
+
|
| 32 |
+
<span id="page-1-1"></span>
|
| 33 |
+
$$f(\mathbf{x}_i) = \begin{cases} f_{\text{biased}}(\mathbf{x}_i) & \text{if } d(\mathbf{x}_i) = 1\\ f_{\text{unbiased}}(\mathbf{x}_i) & \text{otherwise} \end{cases}$$
|
| 34 |
+
(2)
|
| 35 |
+
|
| 36 |
+
The detector d is trained on the samples $\mathcal{X} \cup \mathcal{X}^{(g)}$ with a corresponding label of '0' if the sample is ood (from $\mathcal{X}^{(g)}$ ) and '1' otherwise (from $\mathcal{X}$ ). Put simply, the detector d determines if the input sample originated from real data or is an explainer-generated perturbation. If the sample origin is predicted to be an explainer, the unbiased predictor $f_{\text{unbiased}}$ makes predictions in an innocuous manner, e.g., independent of sensitive features such as race. The authors demonstrate that the attack obscures the biased use of features, such as making predictions based on race, from LIME and SHAP while maintaining near-perfect prediction fidelity on in-distribution samples.
|
| 37 |
+
|
| 38 |
+
In related but tangential works, attacks have been developed on the model and explainer simultaneously. Abdukhamidov et al. introduce a gradient-free black box attack on XAI systems that manipulates model predictions without significantly altering the explanations by post hoc explainers (Abdukhamidov et al. 2022). Closely related, Zhan et al. develop a joint attack with the same implications (Zhan et al. 2022). Both works consider explainers that require gradient-level access to the model and are unsuitable for auditing. Noppel et al. propose a trigger-based neural backdoor attack on XAI systems that simultaneously manipulates the prediction and explanation of gradient-based explainers (Noppel, Peter, and Wressnegger 2022). Again, the attack scenario in our work deviates from this model.
|
| 39 |
+
|
| 40 |
+
Adversarial Defense for Explainers In a similar fashion, defense against adversarial attacks is well explored in the literature (Ren et al. 2020). However, there is relatively scarce work in defending against adversarial attacks on explainers. Ghalebikesabi et al. address the problems with the locality of generated samples by perturbationbased post hoc explainers (Ghalebikesabi et al. 2021a). They propose two variants of SHAP, the most relevant being Neighborhood SHAP which considers local reference populations to improve on-manifold sampling. Their approach is able to mitigate the scaffolding attack (Slack et al. 2020). While this is a notable achievement, it is unclear how the approach compares to baseline SHAP with respect to quantitative and qualitative measures of explanation quality, whether it still upholds the properties of SHAP explanations, and other concerns (Ghalebikesabi et al. 2021b). Also related is a constraint-driven variant of LIME, CLIME (Shrotri et al. 2022). CLIME has been demonstrated to mitigate the scaffolding attack, but requires hand-crafted constraints based on domain expert knowledge and for data to be discrete.
|
| 41 |
+
|
| 42 |
+
A step forward in explainer defense, Schneider et al. propose two approaches to detect manipulated Grad-CAM explanations (Schneider, Meske, and Vlachos 2022). The first is a supervised approach that determines if there is (in)consistency between the explanations of an ensemble of models and explanations that are labeled as manipulated or not. The second is an unsupervised approach that determines if the explanations of f are as sufficient to reproduce its predictions as those from an ensemble of models. They conclude that detection without domain knowledge is difficult. Aside from the deviant attack model in their work, we do not require that deceptive explanations be labeled (an expensive and error-prone process) and we require that only a single model be learned (the authors use 35 CNNs as the ensemble in experiments).
|
| 43 |
+
|
| 44 |
+
Recently, a perturbation-based explainer coined EMAP was introduced that also helps to mitigate the scaffolding attack (Vu, Mai, and Thai 2022). Similar to Neighborhood SHAP, it improves upon its perturbation strategy to create more in-distribution samples. This is accomplished by perturbing along orthogonal directions of the input manifold, which is demonstrated to maintain the data topology more faithfully.
|
| 45 |
+
|
| 46 |
+
Conditional Anomaly Detection Vanilla anomaly detection aims to discover observations that deviate from a notion of normality, typically in an unsupervised paradigm (Ruff et al. 2021). Of interest in this work is conditional, also referred to as contextual, anomaly detection. This type of anomaly is an observation that is abnormal in a particular context, e.g., in time or space. Formally, a set of conditional anomalies $\mathcal{A}$ is given by Eq. (3)
|
| 47 |
+
|
| 48 |
+
<span id="page-2-0"></span>
|
| 49 |
+
$$\mathcal{A} = \{ (\mathbf{x}_i, \mathbf{y}_i) \in (\mathcal{X} \times \mathcal{Y}) \mid \mathbb{P}(\mathbf{y}_i \mid \mathbf{x}_i) < \tau \}, \ \tau > 0 \quad (3)$$
|
| 50 |
+
|
| 51 |
+
where $\mathbb{P}$ is the probability measure of some probability density function (pdf) that characterizes normality and $\tau$ is a low-probability threshold separating normal and abnormal observations. Following the terminology in (Song et al. 2007), $\mathcal{X}$ is the set of environmental variables that the set of observed variables $\mathcal{Y}$ is conditioned on.
|
| 52 |
+
|
| 53 |
+
Song et al. proposed the first conditional anomaly detector using Gaussian mixture models — two sets of Gaussians model the environmental and observed variables, respectively, while a learned probability mapping function determines how the Gaussians in each set map to one another (Song et al. 2007). Since, several approaches have been proposed based on classical and deep learning techniques — we point to this comprehensive survey for further reading (Ruff et al. 2021). In this work, we propose a new conditional anomaly detection method as 1) the deep learning techniques are data-hungry and 2) most techniques do not consider or fair well with categorical data, which is plentiful in real-world high-stakes data: credit scoring (FICO 2018), recidivism risk scoring (Angwin et al. 2016), etc.
|
| 54 |
+
|
| 55 |
+
<span id="page-2-1"></span>
|
| 56 |
+
|
| 57 |
+
Figure 1: An overview of the adversarial attack and defense scenario. The top panel mirrors the bottom panel formalism on a higher level. Our defense approach provides defense to auditors from potential adversarial behavior of the auditee, ACME. Given the explainer g-generated samples $\mathcal{X}^{(g)}$ and reference samples $\mathcal{X}$ , the defense h queries the black box f. With the results, h gives the auditor the defended explanations $\mathcal{E}$ and a measure of adversarial behavior $D(\cdot,\cdot)$ .
|
| 58 |
+
|
| 59 |
+
<span id="page-2-3"></span>The Attack and Defense Models In the attack scenario, an organization algorithmically makes decisions on behalf of users (customers, patients, employees, etc.) using a black box predictor f. Note that f can be a black box due to it being either uninterpretable or proprietary (or both) (Rudin 2019). We will refer to the organization as the ACME (Corporation) for the remainder of the paper. We recommend referring back to Figure 1 as a guide on how elements of the scenario relate. The attacker only provides query-level access to f, e.g., via an API, a web form, or even a social media app facial filter. In the case of f being a classifier, only the final decision value is available, not the class-wise probabilities. Moreover, the class of model that f belongs to is assumed to be unknown. Thus, to ensure that ACME meets legal or regulatory compliance, an auditor uses a local post hoc explainer, g, to determine how features are used to make individual decisions. Due to a variety of incentives (financial, political, personal, etc.), an adversary within ACME desires to conceal the behavior of f. The adversary does not know what explainer is employed but may exploit weaknesses that are shared among common black box post hoc explanation algorithms. Furthermore, the adversary is unaware of when the audit occurs and is only aware of the information contained within the queries made to f. The general attack objective is to minimize the multi-objective problem given by Eq. (4)
|
| 60 |
+
|
| 61 |
+
<span id="page-2-2"></span>
|
| 62 |
+
$$\min_{\mathbf{x}_{i} \in \boldsymbol{\mathcal{X}}^{(g)} \cup \boldsymbol{\mathcal{X}}} \left( L_{f} \left( f(\mathbf{x}_{i}), y_{i} \right), |\{j \mid a_{ij} \in \mathcal{E}_{i}^{\text{sensitive}}, \\ r(\mathcal{E}_{i}^{-}, a_{ij}) < r(\mathcal{E}_{i}^{-}, a_{ik}), \forall a_{ik} \in \mathcal{E}_{i}^{\text{harmless}}\}|, \quad (4) \\ \max(\mathcal{E}_{i}^{\text{sensitive}}) \right)$$
|
| 63 |
+
|
| 64 |
+
where $\mathcal{E}_i^- = \mathcal{E}_i^{\mathrm{sensitive}} \cup \mathcal{E}_i^{\mathrm{harmless}} = g(\mathbf{x}_i; f)$ is an ordered set of feature contributions for the $i^{\mathrm{th}}$ sample, $\mathcal{E}_i^{\mathrm{sensitive}}$ and $\mathcal{E}_i^{\mathrm{harmless}}$ are the contributions for the sensitive and harmless features, respectively, $L_f$ is some metric that measures the error between its two scalar arguments, and r gives the index of its
|
| 65 |
+
|
| 66 |
+
second argument within its first argument. $\mathcal{E}_i^-$ is ordered by decreasing value of the contribution magnitude to the negative (adverse) outcome. Put simply, the first objective quantifies the error between the predictor and the ground truth, the second objective is the number of feature contributions for sensitive features that are greater than those of harmless features, and the third objective quantifies the magnitude of the feature contributions for sensitive features. The advantage of this formulation is that it applies to any type of attack that intends to manipulate the attribution of features during an audit. I.e., it is not tied to just the scaffolding attack.
|
| 67 |
+
|
| 68 |
+
In this work, the scaffolding attack is employed by ACME as described in Section 2. This attack is multi-objective and aims to minimize Eq. (4) by finding some $d \in \mathcal{D}_{ood}$ that minimizes Eq. (5)
|
| 69 |
+
|
| 70 |
+
$$\min_{\substack{(\mathbf{x}_{i},t_{i}) \in (\boldsymbol{\mathcal{X}}^{(g)} \cup \boldsymbol{\mathcal{X}}, \boldsymbol{\mathcal{T}})}} \left( \llbracket t_{i} = 0 \rrbracket L_{f}(f_{\text{biased}}(\mathbf{x}_{i}), f(\mathbf{x}_{i})), \\ \llbracket t_{i} = 1 \rrbracket L_{a}(g(\mathbf{x}_{i}; f_{\text{unbiased}}), g(\mathbf{x}_{i}; f)) \right)$$
|
| 71 |
+
(5)
|
| 72 |
+
|
| 73 |
+
where $\llbracket \cdot \rrbracket$ are Iverson brackets, $\mathcal{D}_{ood}$ is the set of all out-of-distribution detectors, $\mathcal{T}$ is a set of flags indicating whether $\mathbf{x}_i \in \mathcal{X}^{(g)}$ , and $L_a$ is some metric that measures the error between its two scalar arguments. Naturally, there is a trade-off between prediction fidelity (objective one) and unbiased explanation to an auditor (objective two). Section 4 describes how $f_{\text{biased}}$ and $f_{\text{unbiased}}$ are selected.
|
| 74 |
+
|
| 75 |
+
We now formulate the detection and defense objectives — here, the goal of *detection* is to determine whether f is using the scaffolding attack, whereas the goal of *defense* is to ascertain whether the scaffolding attack is used for individual samples. As we will detail in the next section, the latter can be used to "unfool" the explainers. Formally, the general sample-wise defense objective is given by Eq. (6)
|
| 76 |
+
|
| 77 |
+
<span id="page-3-1"></span>
|
| 78 |
+
$$\underset{h \in \mathcal{H}}{\operatorname{argmin}} L_h\left(h(\mathbf{x}_i; f), \mathbb{P}\left(f(\mathbf{x}_i) \mid \mathbf{x}_i\right)\right) \text{ s.t. } \mathbf{x}_i \in \mathcal{X}^{(g)} \cup \mathcal{X}$$
|
| 79 |
+
(6
|
| 80 |
+
|
| 81 |
+
where $\mathcal{H}$ is the class of conditional distribution estimators, h is the detector that estimates the conditional pmf p with support $\{0,1\}$ , and $\mathbb{P}$ is a probability measure of $p^2$ . The pmf p is the distribution of normality for f. Here, $L_h$ is some metric that measures the error between its two scalar arguments. The role of h is to determine the likelihood of a prediction by f given a sample. The threshold-based detection is in turn used to classify anomalies for individual samples as in Eq. (3).
|
| 82 |
+
|
| 83 |
+
To globally detect an attack, the same objective should be minimized as for defense. However, discriminating an attack instead is determined by having h estimate Eq. (7)
|
| 84 |
+
|
| 85 |
+
<span id="page-3-3"></span>
|
| 86 |
+
$$\left[ D\left( p(f(\mathbf{x}_i) \mid \mathbf{x}_i), p(f(\mathbf{x}_i^{(g)}) \mid \mathbf{x}_i^{(g)}) \right) \ge \tau_{\text{global}} \right]$$
|
| 87 |
+
(7)
|
| 88 |
+
|
| 89 |
+
where $\tau_{\text{global}} \geq 0$ is a threshold, $\mathbf{x}_i^{(g)} \in \mathcal{X}^{(g)}$ , $\mathbf{x}_i \in \mathcal{X}$ , and D is an asymmetric measure of statistical distance between the two distributions that is permitted to take on negative values and maintains the identity of indiscernibles (i.e.,
|
| 90 |
+
|
| 91 |
+
D(x,y)=0 iff x=y). With a properly calibrated h and sufficient samples to represent the distribution of normality, $D(\cdot,\cdot)>0$ if $d(\mathbf{x}_i)=1$ (or if f is not adversarial) and $D(\cdot,\cdot)\leq 0$ otherwise.
|
| 92 |
+
|
| 93 |
+
<span id="page-3-0"></span>Detection, Defense and KNN-CAD In this section, we describe a non-parametric approach to detect conditional anomalies based on k-nearest neighbors: k-nearest neighbors conditional anomaly detector (KNN-CAD). We then describe general algorithms for the detection (CAD-Detect) and defense (CAD-Defend) of adversarial attacks on q for any given h. For simplicity, we treat f as a classifier in describing KNN-CAD — in the case that f is a regressor, we cast it to a classifier by binning its output. Since f does not return class-wise probabilities in the problem setting, we exploit the fact that we have a single discrete observed variable $y_i$ . On a high level, the main idea of KNN-CAD is to compare the labels of the neighbors of some $x_i$ to $f(x_i)$ — the disagreement of the labels of its neighbors determines the degree of abnormality. Algorithms 1 (KNN-CAD.fit) and 2 (KNN-CAD.score\_samples) formalize this process. With samples representing normality $\mathcal{X}$ , the standard k-nearest neighbors algorithm is fit to the data in KNN-CAD.fit. These samples are collected by the auditor and are very unlikely to overlap with data that f (and d, if applicable) were trained on. After the fit is made, each $\mathbf{x}_i \in \mathcal{X}$ is scored by KNN-CAD.score\_samples, which gives the scored samples $\mathcal{S}$ (lower is more abnormal). Subsequently, the threshold $\tau$ is set to the percentile $\epsilon$ of S. The rounding operator used in computing the percentile is denoted as round( $\cdot$ ).
|
| 94 |
+
|
| 95 |
+
In KNN-CAD. score\_samples (Algorithm 2), the pmf $p(f(\mathbf{x}_i) \mid \mathbf{x}_i)$ is estimated. To do so, the k-nearest neighbors of, and distances from, each $\mathbf{x}_i$ are computed. For each $\mathbf{x}_i$ , the labels of its neighbors are retrieved. For the neighbors belonging to each class, the corresponding distances are gathered (denoted by, e.g., $\mathbf{d}_0$ for each $y_i = 0$ ) and then aggregated by the function $\phi$ . The aggregator $\phi$ estimates the statistical distance between $\mathbf{x}_i$ and its neighbors by computing, e.g., the median, mean, or maximum value. If the vector argument of $\phi$ is empty, then the output of the function is $\infty$ . In the final step, we are interested in measuring the dynamic range between the aggregate distances corresponding to the label of the queried sample, $d_{y_j}$ , and to the alternative label(s), $d_{\neg y_j}$ . The dynamic range is defined as the ratio between two values on the logarithmic scale as in Eq. (8).
|
| 96 |
+
|
| 97 |
+
<span id="page-3-5"></span><span id="page-3-4"></span>
|
| 98 |
+
$$dynamic\_range(a,b) = \log\left(\frac{a}{b}\right)$$
|
| 99 |
+
(8)
|
| 100 |
+
|
| 101 |
+
The values given by the dynamic range of such distances are treated as logits, so we apply the standard logistic function $\sigma$ to map the values to probabilities as in Eq. (9)
|
| 102 |
+
|
| 103 |
+
$$\begin{split} \zeta(d_{\neg y_j}, d_{y_j}) &= \sigma(\text{dynamic\_range}(d_{\neg y_j}, d_{y_j})) \\ &= \frac{1}{1 + \exp(-\log\left(\frac{d_{\neg y_j}}{d_{y_j}}\right))} \\ &= \frac{1}{1 + \frac{d_{y_j}}{d_{\neg y_i}}} = \frac{d_{\neg y_j}}{d_{\neg y_j} + d_{y_j}}. \end{split} \tag{9}$$
|
| 104 |
+
|
| 105 |
+
<span id="page-3-2"></span> $<sup>^2</sup>$ To conserve space, we omit random variables of arguments to the pmf p (and $\mathbb{P}$ ) and use realizations instead.
|
| 106 |
+
|
| 107 |
+
# Method
|
| 108 |
+
|
| 109 |
+
```
|
| 110 |
+
Algorithm 2: KNN-CAD.score_samples(h, f, \mathcal{X}, k, \phi)
|
| 111 |
+
Input: h, f, \mathcal{X}, k, \phi
|
| 112 |
+
Output: S, the scored samples
|
| 113 |
+
1 \mathcal{Y} \leftarrow f(\mathcal{X});
|
| 114 |
+
// Get the k-nearest neighbor distances
|
| 115 |
+
and indices
|
| 116 |
+
2 \mathcal{D}, \mathcal{I} \leftarrow h.neighbors(\mathcal{X}, k);
|
| 117 |
+
3 \mathcal{S} \leftarrow \text{new array};
|
| 118 |
+
4 for (\mathbf{d}, \mathbf{i}, y_i) \in (\mathcal{D}, \mathcal{I}, \mathcal{Y}) do
|
| 119 |
+
\mathbf{y}_i \leftarrow h. \mathbf{\mathcal{Y}}_{\text{train}}[\mathbf{i}]; // \text{ Gather neighbor labels}
|
| 120 |
+
\mathbf{d}_0 \leftarrow \mathbf{d}[\llbracket \mathbf{y}_i = 0 \rrbracket]; \ \mathbf{d}_1 \leftarrow \mathbf{d}[\llbracket \mathbf{y}_i = 1 \rrbracket];
|
| 121 |
+
d_0 \leftarrow \phi(\mathbf{d}_0); \ d_1 \leftarrow \phi(\mathbf{d}_1);
|
| 122 |
+
if y_i = 1 then
|
| 123 |
+
8
|
| 124 |
+
| d_{y_j} \leftarrow d_1; d_{\neg y_j} \leftarrow d_0;
|
| 125 |
+
10
|
| 126 |
+
11
|
| 127 |
+
s \leftarrow \zeta(d_{\neg y_i}, d_{y_i});
|
| 128 |
+
12
|
| 129 |
+
\mathcal{S}.append(s);
|
| 130 |
+
14 return S
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
<span id="page-4-1"></span>We denote this probability mapping function as $\zeta$ . The algorithm KNN-CAD.score\_samples is written for the case of f being a binary classifier, but it is easily generalizable to multi-class by taking the expected value of the comparison of $d_{y_j}$ to every other $d_{\neg y_j}$ , i.e., $\mathbb{E}_{d_{\neg y_j} \in \mathbf{d}_{\neg y_j}}[\zeta(d_{\neg y_j}, d_{y_j})]$ .
|
| 134 |
+
|
| 135 |
+
In Algorithm 3, the procedure for adversarial attack detection, CAD-Detect, is detailed. First, the samples ${\cal X}$ representing normality are partitioned into ${m {\cal X}}_{ ext{train}}$ ( $n_{ ext{train}}$ samples) and $\mathcal{X}_{\text{test}}$ ( $|\mathcal{X}| - n_{\text{train}}$ samples). Thereafter, h is fit using $\mathcal{X}_{\text{train}}$ and the (hyper-)parameters pertinent to h, $\Theta_h$ . After $\mathcal{X}_{\text{test}}^{(g)}$ is collected, h estimates $p(f(\mathbf{x}_i) \mid \mathbf{x}_i)$ and $p(f(\mathbf{x}_i^{(g)}) \mid \mathbf{x}_i^{(g)})$ for $\mathcal{X}_{\text{test}}$ and $\mathcal{X}_{\text{test}}^{(g)}$ , respectively. The statistical distance D between these two conditional distributions is estimated by comparing the areas under the empirical cumulative distributions of $S_{\text{test}}$ and $S_{\text{test}}^{(g)}$ . We denote these areas as $a_{\text{test}}$ and $a_{\text{test}}^{(g)}$ , respectively. In the algorithm, we denote the empirical cumulative distribution function as ECDF and the function that computes the area under a curve using the trapezoidal rule as AUC. Intuitively, a greater difference between $a_{\text{test}}^{(g)}$ and $a_{\text{test}}$ indicates a concentrated region of low-probability samples (likely adversary behavior of f), whereas a smaller difference indicates that an attack was likely not carried out. In practice, the number of perturbed samples generated around each sample, $n_p$ , is relatively large — fewer samples are necessary to accurately es-
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
Algorithm 3: CAD-Detect(f, g, h, \Theta_h, \mathcal{X}, n_{\text{train}}, \tau_{\text{global}})
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
```
|
| 142 |
+
Input: f, g, h, \Theta_h, \mathcal{X}, n_{\text{train}}, \tau_{\text{global}}
|
| 143 |
+
Output: Whether f is behaving adversarially toward g
|
| 144 |
+
|
| 145 |
+
1 \mathcal{X}_{\text{train}}, \mathcal{X}_{\text{test}} \leftarrow \mathcal{X}[:n_{\text{train}}], \mathcal{X}[n_{\text{train}}:];
|
| 146 |
+
|
| 147 |
+
2 h. \text{fit}(f, \mathcal{X}_{\text{train}}, \Theta_h);
|
| 148 |
+
|
| 149 |
+
3 \mathcal{S}_{\text{test}} \leftarrow h. \text{score\_samples}(f, \mathcal{X}_{\text{test}}, \Theta_h);
|
| 150 |
+
|
| 151 |
+
4 \mathcal{X}_{\text{test}}^{(g)} \leftarrow \{\};
|
| 152 |
+
|
| 153 |
+
5 for \mathbf{x}_i \in \mathcal{X}_{\text{test}} do
|
| 154 |
+
|
| 155 |
+
6 \mathcal{X}_{\text{test}}^{(g)} \leftarrow \mathcal{X}_{\text{test}}^{(g)} \cup \pi_{\mathbf{x}_i}^{(g)}(\mathbf{x}_i);
|
| 156 |
+
|
| 157 |
+
7 \mathcal{S}_{\text{test}}^{(g)} \leftarrow h. \text{score\_samples}(f, \mathcal{X}_{\text{test}}^{(g)}, \Theta_h);
|
| 158 |
+
|
| 159 |
+
8 a_{\text{test}} \leftarrow \text{AUC}(\text{ECDF}(\mathcal{S}_{\text{test}}^{(g)}));
|
| 160 |
+
|
| 161 |
+
9 a_{\text{test}}^{(g)} \leftarrow \text{AUC}(\text{ECDF}(\mathcal{S}_{\text{test}}^{(g)}));
|
| 162 |
+
|
| 163 |
+
10 return [[(a_{\text{test}}^{(g)} - a_{\text{test}}) \geq \tau_{\text{global}}]]
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
```
|
| 167 |
+
\begin{array}{c} \textbf{Algorithm 4:} \ \texttt{CAD-Defend}(f,g,h,\mathbf{x}_i,\Theta_h,n_p) \\ \textbf{Input:} \ f,g,h,\mathbf{x}_i,\Theta_h,n_p \\ \textbf{Output:} \ \mathcal{X}_i^{(g)}, (\text{more}) \ \text{in-distribution perturbations} \\ \textbf{1} \ \ \mathcal{X}_i^{(g)} \leftarrow \pi_{\mathbf{x}_i}^{(g)}(\mathbf{x}_i); \\ \textbf{2} \ \mathcal{S}_i^{(g)} \leftarrow h. \ \text{score\_samples}(f,\mathcal{X}_i^{(g)},\Theta_h); \\ // \ \text{Remove abnormal samples} \\ \textbf{3} \ \ \mathcal{X}_i^{(g)} \leftarrow \mathcal{X}_i^{(g)} [ \llbracket \mathcal{S}_i^{(g)} > h.\tau \rrbracket ]; \\ \textbf{4} \ n_p' \leftarrow n_p - |\mathcal{X}_i^{(g)}|; \\ \textbf{5} \ \ \text{if} \ n_p' \neq 0 \ \ \text{then} \\ \textbf{6} \ \ \ \ \mathcal{X}_i'^{(g)} \leftarrow \text{CAD-Defend}(f,g,h,\mathbf{x}_i,\Theta_h,n_p'); \\ \textbf{7} \ \ \ \ \mathcal{X}_i^{(g)} \leftarrow \mathcal{X}_i^{(g)} \cup \mathcal{X}_i'^{(g)}; \\ \textbf{8} \ \ \text{return} \ \ \mathcal{X}_i^{(g)} \end{array}
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
<span id="page-4-3"></span>timate p. We explore the sample-efficiency of h in Section 4.
|
| 171 |
+
|
| 172 |
+
The algorithm for defending against adversarial attacks,
|
| 173 |
+
|
| 174 |
+
The algorithm for defending against adversarial attacks, CAD-Defend, is detailed in Algorithm 4. The approach is a fairly straightforward modification to the neighborhood generation function $\pi_{\mathbf{x}_i}^{(g)}$ . For each sample $\mathbf{x}_i$ to be explained by g, the perturbed samples $\mathcal{X}_i^{(g)}$ generated by $\pi_{\mathbf{x}_i}^{(g)}$ are scored by h. The samples with scores below the threshold $h.\tau$ are discarded and CAD-Defend recursively builds the remaining samples until $|\mathcal{X}_i^{(g)}| = n_p$ . In practice, the recursive depth can be limited with either an explicit limit or by reducing $h.\tau$ . However, an auditor will prioritize faithful scrutiny of ACME's algorithms over the speed of the explainer.
|
| 175 |
+
|
| 176 |
+
On a final note, neither CAD-Detect nor CAD-Defend is tied to KNN-CAD — rather, they are compatible with any $h \in \mathcal{H}$ (any conditional anomaly detector).
|
| 177 |
+
|
| 178 |
+
Time and Space Complexity The time and space complexity of every introduced algorithm are listed in Table 1 and derived in detail in Appendix I. We assume that KNN-CAD is used as h in Algorithms 3 and 4. Note that KNN-CAD uses the ball tree algorithm in computing nearest neighbors. We let $T_f(\cdot)$ and $S_f(\cdot)$ be the functions that give the time and space complexity of f, respectively, and R be the number of recursions in Algorithm 4. In practice, the time and space complexity of each algorithm are dominated by that of the model to audit, f. Hence, reducing the number of queries is desirable. We explore the sample-efficiency of
|
| 179 |
+
|
| 180 |
+
<span id="page-5-1"></span>
|
| 181 |
+
|
| 182 |
+
| Alg. | Type | Complexity | Practical<br>Complexity |
|
| 183 |
+
|------|---------------|------------------------------------------------------------------------------|--------------------------------------------------------|
|
| 184 |
+
| 1 | | $\mathcal{O}(N((F+k)\log N + T_f(F)))$<br>$\mathcal{O}(N(F+k+S_f(F)))$ | $\mathcal{O}(NT_f(F))$<br>$\mathcal{O}(NS_f(F))$ |
|
| 185 |
+
| 2 | | $\mathcal{O}(N(k \log N + T_f(F)))$<br>$\mathcal{O}(N(k + S_f(F)))$ | $\mathcal{O}(NT_f(F))$<br>$\mathcal{O}(NS_f(F))$ |
|
| 186 |
+
| 3 | Time<br>Space | $\mathcal{O}(Nn_p(k\log(Nn_p) + T_f(F)))$<br>$\mathcal{O}(Nn_p(k + S_f(F)))$ | $\mathcal{O}(Nn_pT_f(F))$<br>$\mathcal{O}(Nn_pS_f(F))$ |
|
| 187 |
+
| 4 | | $\mathcal{O}(Rn_p(k\log n_p + T_f(F)))$<br>$\mathcal{O}(n_p(k + S_f(F)))$ | $\mathcal{O}(Rn_pT_f(F))$ $\mathcal{O}(n_pS_f(F))$ |
|
| 188 |
+
|
| 189 |
+
Table 1: The time and space complexity of each algorithm introduced in this paper: Algorithms 1 (KNN-CAD.fit), 2 (KNN-CAD.score\_samples), 3 (CAD-Detect), and 4 (CAD-Defend). With practical complexity, it is assumed that $T_f(F) \gg (k+F)\log N$ and $S_f(F) \gg k+F$ , e.g., as is the case with DNNs and most decision trees.
|
| 190 |
+
|
| 191 |
+
our approach in Section 4. If the queries are pre-computed, then our algorithms using KNN-CAD take linearithmic time and linear space.
|
2208.12294/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-07-22T23:18:13.540Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36" etag="AwkF60y24YVQARTHNkJX" version="20.2.0" type="google"><diagram id="6a731a19-8d31-9384-78a2-239565b7b9f0" name="Page-1">7Vxfc9o4EP8s98Bc+0DG/4HHQJL27qa9tOnddfqSUWwBGoxFZRGSfvqTbNnYkmwMsclxpTMdiCSvV7u/3dWuJHr2ZPn0joDV/AMOYNizjOCpZ1/1LMvyTJN98JbntKU/tK20ZUZQkLaZ24Y79AOKRkO0rlEA49JAinFI0arc6OMogj4ttQFC8KY8bIrD8ltXYCbeaGwb7nwQQmXYPyigc9FqeqNtx3uIZnPx6qE1SDsegL+YEbyOxPsiHMG0ZwlyMmlDPAcB3hS4sK979oRgTNNvy6cJDLlcM4mlrN5U9OYsExjRJg+Mf4zibyvwcWD6w6/WnTOFI9o3XUdo6hGEayGNnuWFjOR4ihllxjh9FnLyvq9x1tGPEy1esgG2tXradrJvM/5pXrCunjt5w/5T+ER7g/EkRJzZwdX9gjW+ZSPBcsWGRg8x//DxcrWmDAdsWgQGyKcIR5wBH5OkNWPsgWQv4fTjnnu94L2cRWP65uv94m1KngGBcAlQ/nCImb45SAEFgrOvOR9hyvlNOmVBO1VbPnsrmYU9ntNlyBpM9hWEaBax7z6bFiSs4RESith7LkXHEgUBf3zMJoB+gIeEFON0vMIooglqXTaJK05rTXEq04R0TAlewAkO2RTsqwRXTPAoDKUmFQYCGZwV+FRoErB4B/ESUvLMhmS9ows3feY5a3CEnWy21uCNRNu8YAiuaAPCAGc58S0S2RcBxv2A6XUETIsD8yZBBgT+nEMC+ihOsUbnTFNz7j+24J3DDDEoSj+/JE0JdCaVuGyM+xo7mOI1KWA3poCimCEs1tCQWRAt8QpE1cKagiUKn1NxzWH4CDl806clsZWn9OVWTOc6l1AmjtyY0jeXualX3R7cVMxfseFOhfCxUgiS57upFpcBWPCono1OiK3NRn5tmeeK2QmvunW9ZRfaMcMVKFdf3AweP4mL79uDsou3DEt18abjaXy8OejQyQ87cvLtUHGUUNE4JtxBwtTDpTabETgDiT9v6rNVL6Z9sHPXPhhnc/2VtTXz8MdxTgXXW8WkJDLZI9cTeBV/nESC6shxMJ8UaxcpMxhBwoD5CvA6AXC9EFqnAaxKLuu4Uv1VAw/2kwRazzDKcdYc/ifi7KijOOu2ECEL+Y4KKEbn2P7ps2IYuV285mJdst4aLptb73mRXLDdkSvZrq0pgwxcnena3ZmuqyvQSRqBUXDJy6BbcQQgnsNAyLCgKd5+CyjTUpS0MLvOhZyVPi2uJ17TTAgYAqqiamsO64QNg6zQWiHqgiR1gszaCAwBRY/l8qxOuOINtxxGBS88LGuSuVZJQTFeEx+Kx4pV0x2ULNeTKFFAZpAqlBJt5xN/EQDsMwD2BYBpOG0hQCH1GhDQ1UKrIeCHII6RX1Z8vULLAHAUx9uz7MnEMCaTE1D+aDSUC9rD0YHaH5i7aXWv/sHLV2+OtkpSX6bO6piL0jpubKY9f/B4ferbJ40d2x7bKZYjQyZfNBQMwBlqLKDLdURXpbbJmiTSsoyl2BnmmJmePDRagMLAkn2HZkVpHRkInvlyIGi9SZbQnbDOO3EHtrwU0XgD0zoyCLraXT3XpruvTe9K79vbaOtKYAdUugvbjjvLS7USOjsm4V3kckd+HKvolwxb45jkFXCbjkm3TDl2qsMTnZNIddT0xLJN97BUR02bVFqdpzpZDbydYsc+QDBPQd2eclLLbKusoZLqXtm6lehZ2UJDcuUxP7Hx8gqWQqp7Vbdaxf6/qXqgeF5TrjE2VrZn7ybWvbqds7qbW3a+z7S/smXLVkh1r2r3rOoaVctnq9Xdg8OVffyNiMF+GxE/mbIVN24OD47ZqhtXiXWv7nMyts+mo+nYF5Z8zvbgfUfX8C6UU7syte4hoDs5dGwInMzWI1OanKDJV2Qa6990dpHqXPnZQqX1KvEHGMf8Tp5lTAle9nZeFDOSI6zJWTAjPm8z6LYZDFtO8uxBFkGK17h06O9wo2FYvdtUWVz2U2lx+JDZwxuD4yPZci5846BIa+dKKTovNjPPYXN/sq1Nyz1lIoITTiPCZJlcuSp0b4TEeL+T8pJ0hpAfp+mz6fgommmf5yjsC+DxboG9UjdiPjIS5A1uNMVOSkAUTxnRjDy/ZyoGbDAJym8vPr69otqX5Grx2xdCnhY/IZp9dwvSDVC8CoGQLIpCVHjxNMSAFhnq5haG4izKfiD1Dbtu3NXdDlM2CvQ+ZDNHFN4xOfPmDQGrsl/pwqhN6TixOcwKpMV4NtCYtJwaHGLSs9+t2Lv8a/K3d393e+ncLTaTZb9Bza4c75uLzbbE34XQf5P807vRVtzmSJawZnfWdTQS7k7EuuLJDkNqy2UKcqrHzG3y7DDrHWYjl2Y3uBxvgCDgZ+KvbhP+UQyFr3vpafg9lC3zLc74K/eRG+27120Fs3VsZ+fqXzDfHRv9B59GePXTA9W3AI6yNd/iFaKWwksLsWToKPm67vYPy6qydXnpDoFcuT0knGy+f4rwu3jsfvrtfvVtfT0A/gdtxN57BS5iiQgk+ijyvnQGROsJ62LHtu8REATYJ/P/gK75j3/Uj/PBqmqIHIN2BaCkH5PVnIWPtMOqjUjV4ag+FqWBCAXJb9cUXqOPTUkXm8vDAjFynGaaavYFrkrjdgSwPHrloWvLaQB9TAD/3ZU+nSN/EbGld7b0RxRl8pHHFnRZO67ATmmcnEjUZB6/oOUKEwoivcP2eGDV3SxLc4ZDY0pzezjj/Yz3I+JdSgymaRjcjiuudeRLwQTyMM1TNZIbyOc/uZT8NTOgNBRlHVMUJUs+lqvsm03XHkw8R5qz5R3P8vY+53qG5xmepxoYmmXTB2VVFamRJoGqqbx55cqb5egqb7pfSmjj+LE2U7J21za7Pt0gbWQ2kuix7tTK+jr82JKi+taOLbE/t7+pmQ7f/mipff0v</diagram></mxfile>
|
2208.12294/main_diagram/main_diagram.pdf
ADDED
|
Binary file (84.2 kB). View file
|
|
|
2208.12294/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
With increasing concerns over data privacy in machine learning, regulations like CCPA<sup>1</sup>, HIPAA<sup>2</sup>, and GDPR<sup>3</sup> have been introduced to regulate how data can be transmitted and used. To address privacy concerns, federated learning (McMahan et al. 2017; Hanzely et al. 2020; Yuan and Ma 2020; Ghosh et al. 2020) has become an increasingly popular tool to enhance privacy by allowing training models without directly sharing their data. Depending on how data is split across parties, FL can be mainly classified into two categories (Yang et al. 2019a): Horizontal Federated Learning (Geiping et al. 2020; Hamer, Mohri, and Suresh 2020; Karimireddy et al. 2020; Li et al. 2020) and Vertical Federated Learning (Vepakomma et al. 2018a; Gupta and Raskar 2018; Abuadbba et al. 2020; Ceballos et al. 2020). In Horizontal FL (hFL), data is split by entity (e.g. a person), and data entities owned by each party are disjointed from other parties. In Vertical FL (vFL), a data entity is split into different attributes (e.g. features and labels of the same person), and each party might own the same data entities but their different attributes. In this paper, we focus on the setting of hFL which enables devices (i.e. mobile phones) to collaboratively
|
| 4 |
+
|
| 5 |
+
learn a machine learning model (*i.e.* binary classifier) while keeping all the training and testing data on the device.
|
| 6 |
+
|
| 7 |
+
Although raw data is not shared in federated learning, sensitive information may still be leaked when gradients and/or model parameters are communicated between parties. In hFL, (Zhu, Liu, and Han 2019) showed that an honest-but-curious server can uncover the raw features and labels of a device by knowing the model architecture, parameters, and communicated gradient of the loss on the device's data. Based on their techniques, (Zhao, Mopuri, and Bilen 2020) showed that the ground truth label of an example can be extracted by exploiting the directions of the gradients of the weights connected to the logits of different classes. Researchers have shown that vFL can still leak data information indirectly. For example, (Li et al. 2022) demonstrated that the gradient norms and directions can leak label information in the two-party vFL setting. However, the prior work on FL privacy mostly focuses on model training and there can also be privacy leaks from model evaluation. Specifically, the private label information owned by clients/devices can be leaked to the server when computing evaluation metrics in FL.
|
| 8 |
+
|
| 9 |
+
Previous work (Matthews and Harel 2013a) has shown that releasing the actual ROC curves on a private test dataset can allow an attacker with some prior knowledge of the test dataset to recover some sensitive information about the dataset. Some recent works(Chen et al. 2016; Stoddard, Chen, and Machanavajjhala 2014) proposed to provide differential privacy (DP) for plotting and releasing ROC curves. However, they have several challenges such as how to privately compute the true positive rate (TPR) and false positive rate (FPR) values and how many and what thresholds to pick(Stoddard, Chen, and Machanavajjhala 2014). And they are not designed and applicable for evaluating FL models. As the ground-truth labels often contain highly sensitive information (e.g., whether a user has purchased (in online advertising) or whether a user has a disease or not (in disease prediction) (Vepakomma et al. 2018b; Li et al. 2022), it cannot be directly shared between clients and servers, and clients and clients. Hence preventing the label leakage from the AUC computation in the general setting of FL is challenging.
|
| 10 |
+
|
| 11 |
+
To address the challenge, we consider the area under the Receiver operating characteristic (ROC) curve as the target AUC to evaluate the accuracy of a binary classifier. Since the class label is often the most sensitive information in a predic-
|
| 12 |
+
|
| 13 |
+
<sup>\*</sup>Work was done when the author was working at ByteDance Inc.
|
| 14 |
+
|
| 15 |
+
Copyright © 2023, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
|
| 16 |
+
|
| 17 |
+
<sup>&</sup>lt;sup>1</sup>California Consumer Privacy Act
|
| 18 |
+
|
| 19 |
+
<sup>&</sup>lt;sup>2</sup>Health Insurance Portability and Accountability Act
|
| 20 |
+
|
| 21 |
+
<sup>&</sup>lt;sup>3</sup>General Data Protection Regulation, European Union
|
| 22 |
+
|
| 23 |
+
tion task, our goal in this paper is to achieve *label differential privacy* (Ghazi et al. 2021) while plotting the ROC curve. To this end, we propose to adopt the Laplace mechanism <sup>4</sup> to add noise to the shared intermediate information between the server and clients to plot the Receiver operating characteristic (ROC) curve and calculate the area under the ROC curve as the AUC. We conduct extensive experiments to demonstrate the effectiveness of our proposed approach.
|
| 24 |
+
|
| 25 |
+
# Method
|
| 26 |
+
|
| 27 |
+
We focus on the setting of hFL which contains one server and multi-clients (devices). The labels are distributed in multiclients and our proposed approach can compute the evaluation metric AUC with label differential privacy. We start by introducing some background knowledge of our work.
|
| 28 |
+
|
| 29 |
+
Differential privacy (DP) (Dwork et al. 2006; Dwork and Roth 2014a) is a quantifiable and rigorous privacy framework. We adopt the following definition of DP. We define Differential privacy (DP) (Dwork et al. 2006; Dwork and Roth 2014a) as the following:
|
| 30 |
+
|
| 31 |
+
**Definition 0.1** (Differential Privacy). Let $\epsilon, \delta \in \mathbb{R}_{\geq 0}$ , a randomized mechanism $\mathcal{M}$ is $(\epsilon, \delta)$ -differentially private (i.e. $(\epsilon, \delta)$ -DP), if for any of two neighboring training datasets D, D', and for any subset S of the possible output of $\mathcal{M}$ , we have
|
| 32 |
+
|
| 33 |
+
$$\Pr[\mathcal{M}(D) \in S] \le e^{\epsilon} \cdot \Pr[\mathcal{M}(D') \in S] + \delta.$$
|
| 34 |
+
|
| 35 |
+
If $\delta = 0$ , then M is $\epsilon$ -differentially private (i.e. $\epsilon$ -DP).
|
| 36 |
+
|
| 37 |
+
In our work, we focus on protecting the privacy of label information. Following (Ghazi et al. 2021), we define label differential privacy as the following:
|
| 38 |
+
|
| 39 |
+
**Definition 0.2** (Label Differential Privacy). Let $\epsilon, \delta \in \mathbb{R}_{\geq 0}$ , a randomized mechanism $\mathcal{M}$ is $(\epsilon, \delta)$ -label differentially private (i.e. $(\epsilon, \delta)$ -LabelDP), if for any of two neighboring training datasets D, D' that differ in the label of a single example, and for any subset S of the possible output of $\mathcal{M}$ , we have
|
| 40 |
+
|
| 41 |
+
$$\Pr[\mathcal{M}(D) \in S] \le e^{\epsilon} \cdot \Pr[\mathcal{M}(D') \in S] + \delta.$$
|
| 42 |
+
|
| 43 |
+
If $\delta=0$ , then M is $\epsilon$ -label differentially private (i.e. $\epsilon$ -LabelDP).
|
| 44 |
+
|
| 45 |
+
Our proposed approach also shares the same setting with local DP (Duchi, Jordan, and Wainwright 2013; Erlingsson, Pihur, and Korolova 2014; Kasiviswanathan et al. 2008; Bebensee 2019) which assumes that the data collector (server in our paper) is untrusted. Following the same setting with local DP, in our proposed approach, each client locally perturbs their sensitive information with a DP mechanism and transfers the perturbed version to the server. After receiving all clients' perturbed data, the server calculates the statistics and publishes the result of AUC. We define local DP as the following:
|
| 46 |
+
|
| 47 |
+
**Definition 0.3** (Local Differential Privacy). Let $\epsilon>0$ and $1>\delta\geq 0$ , a randomized mechanism $\mathcal{M}$ is $(\epsilon,\delta)$ -local differentially private (i.e. $(\epsilon,\delta)$ -LocalDP), if and only if for any pair of input values v and v' in domain D, and for any subset S of possible output of $\mathcal{M}$ , we have
|
| 48 |
+
|
| 49 |
+
$$\Pr[\mathcal{M}(v) \in S] \le e^{\epsilon} \cdot \Pr[\mathcal{M}(v') \in S] + \delta.$$
|
| 50 |
+
|
| 51 |
+
If $\delta=0$ , then M is $\epsilon$ -local differentially private (i.e. $\epsilon$ -LocalDP).
|
| 52 |
+
|
| 53 |
+
**Definition 0.4** (Sensitivity). Let d be a positive integer, $\mathcal{D}$ be a collection of datasets, and $f: \mathcal{D} \to \mathcal{R}^d$ be a function. The sensitivity of a function, denoted $\Delta f$ , is defined by $\Delta f = \max ||f(D) - f(D')||_p$ where the maximum is over all pairs of datasets D and D' in $\mathcal{D}$ differing in at most one element and $||\cdot||_p$ denotes the $l_p$ norm.
|
| 54 |
+
|
| 55 |
+
**Definition 0.5** (Laplace Mechanism ). Laplace mechanism defined by (Dwork and Roth 2014b) preserves $(\epsilon,0)$ -differential privacy if the random noise is drawn from the Laplace distribution with parameter $\Delta/\epsilon$ , where $\Delta$ is the $l_1$ sensitivity and $\epsilon$ , is the corresponding privacy budget.
|
| 56 |
+
|
| 57 |
+
In this paper, we leverage two DP properties (Dwork and Roth 2014b; McSherry and Talwar 2007) to help us build a complex workflow that still has the DP guarantee: sequential composition and postprocessing. Let $M_1(\cdot)$ and $M_2(\cdot)$ be $\epsilon_1-$ and $\epsilon_2-$ differentially private algorithms, sequential composition guarantees that releasing the outputs of $M_1(D)$ and $M_2(D)$ satisfies $(\epsilon_1+\epsilon_2)-$ DP. Postprocessing an output of a DP algorithm does not incur any additional loss of privacy. For example, releasing $M_1(D)$ and $M_2(M_1(D))$ still satisfies $\epsilon_1-$ DP.
|
| 58 |
+
|
| 59 |
+
In a binary classification problem, given a threshold $\theta$ , a predicted score $s_i$ is predicted to be 1 if $s_i \geq \theta$ . Given the ground-truth label and the predicted label (at a given threshold $\theta$ ), we can quantify the accuracy of the classifier on the dataset with True positives (TP( $\theta$ )), False positives (FP( $\theta$ )), False negatives (FN( $\theta$ )), and True negatives (TN( $\theta$ )).
|
| 60 |
+
|
| 61 |
+
- True positives, $TP(\theta)$ , are the data points in the test whose true label and predicted label equal 1. *i.e.* $y_i=1$ and $s_i \geq \theta$
|
| 62 |
+
- False positives, FP(θ), are the data points in test whose true label is 0 but the predicted label is 1. i.e. y<sub>i</sub> = 0 and s<sub>i</sub> ≥ θ.
|
| 63 |
+
- False negatives, $FN(\theta)$ , are data points whose true label is 1 but the predicted label is 0. *i.e.* $u_i = 1$ and $s_i < \theta$ .
|
| 64 |
+
- True negatives, $TN(\theta)$ , are data points whose true label is 0 and the predicted label is 0. *i.e.* $y_i = 0$ and $s_i < \theta$ .
|
| 65 |
+
|
| 66 |
+
The area under the receiver operating characteristic (ROC) curves plots TPR (x-axis) vs. FPR (y-axis) over all possible thresholds $\theta$ , and AUC is the area under the ROC curve. True Positive Rate (TPR) (i.e. recall) is defined as $TPR(\theta) = \frac{TP(\theta)}{TP(\theta) + FN(\theta)}$ and False Positive Rate (FPR) is defined as $FPR(\theta) = \frac{FP(\theta)}{FP(\theta) + TN(\theta)}$ . If the classifier is good,
|
| 67 |
+
|
| 68 |
+
<sup>&</sup>lt;sup>4</sup>Other mechanisms such as the Gaussian mechanism are applicable too)
|
| 69 |
+
|
| 70 |
+
the ROC curve will be close to the left and upper boundary and AUC will be close to 1.0 (a perfect classifier). On the other hand, if the classifier is poor, the ROC curve will be close to the line from (0,0) to (1,1) with AUC around 0.5 (random prediction).
|
| 71 |
+
|
| 72 |
+
Researchers have shown AUC computation can cause privacy leakage. Matthews and Harel (Matthews and Harel 2013b) demonstrate that by using a subset of the ground-truth data and the computed ROC curve, the data underlying the ROC curve can be reproduced accurately. Stoddard el al. (Stoddard, Chen, and Machanavajjhala 2014) show that an attacker can determine the unknown label by simply enumerating over all labels, guessing the labels, and then checking which guesses lead to the given ROC curve. They propose a differentially private ROC curve computation algorithm. They first privately choose a set of thresholds (with privacy budget $\epsilon_1$ ). By modeling TP and FP values as one-sided range queries, they can compute noisy TPRs and FPRs values (using privacy budget $\epsilon_2$ ). They also leveraged a postprocessing step to enforce the monotonicity of TPRs and FPRs. However, the above method is not designed for FL settings. In this paper, we aim to provide a differentially private way to compute AUC in the FL setting.
|
| 73 |
+
|
| 74 |
+
In our hFL setting, there are multiple label parties (i.e. clients/devices) that own private labels (i.e. Y) and there is a central non-label party (i.e. server) that is responsible for computing global AUC from all clients. The model is trained using the normal hFL protocol.
|
| 75 |
+
|
| 76 |
+
Our work focuses on the evaluation time and the goal of the server is to compute global AUC without letting clients directly share their private test data. In other words, clients cannot directly send the test data (i.e. private labels and prediction scores) to the server for it to compute AUC. Specifically, we are interested in protecting label information and therefore it is required that the exchanged information between client and server excludes the ground-truth test labels (Y) and corresponding prediction scores.
|
| 77 |
+
|
| 78 |
+
In this section, we introduce how to compute the AUC with label differential privacy by leveraging the Laplace mechanism. Here we use the Laplace mechanism as an example. Settings for other DP mechanisms such as the Gaussian mechanism will be the same.
|
| 79 |
+
|
| 80 |
+
The workflow of this method is shown in Figure 1. The algorithm has six steps:
|
| 81 |
+
|
| 82 |
+
- 1. Clients Execute. Each client $C_k$ computes the prediction scores $s_k = f(X_k)$ for all its owning data points.
|
| 83 |
+
- 2. Clients Execute. For each decision threshold $\theta \in \Theta$ , client $C_k$ computes four local statistics $\mathrm{TP}_k^{\theta}, \mathrm{TN}_k^{\theta}, \mathrm{FP}_k^{\theta}$ , and $\mathrm{FN}_k^{\theta}$ given the prediction scores $s_k$ .
|
| 84 |
+
|
| 85 |
+
- 3. **Clients Executes.** The client $C_k$ adds perturbation with DP guarantee to each local statistics $\mathrm{TP}_k^\theta, \mathrm{TN}_k^\theta, \mathrm{FP}_k^\theta$ , and $\mathrm{FN}_k^\theta$ and get corresponding noisy statistics $\mathrm{TP}_k^{\theta'}, \mathrm{TN}_k^{\theta'}, \mathrm{FP}_k^{\theta'}$ , and $\mathrm{FN}_k^{\theta'}$ for each $\theta \in \Theta$ . Client $C_k$ sends all noisy statistics to the server.
|
| 86 |
+
- 4. **Server Executes.** For each $\theta \in \Theta$ , the server aggregates the noisy statistics from all the clients: $\mathrm{TP}^{\theta} = \sum_{k=1}^{K} \mathrm{TP}_{k}^{\theta'}, \mathrm{TN}^{\theta} = \sum_{k=1}^{K} \mathrm{TN}_{k}^{\theta'}, \mathrm{FP}^{\theta} = \sum_{k=1}^{K} \mathrm{FP}_{k}^{\theta'},$ and $\mathrm{FN}^{\theta} = \sum_{k=1}^{K} \mathrm{FN}_{k}^{\theta'}$
|
| 87 |
+
- 5. **Server Executes.** For each $\theta \in \Theta$ , the server computes the corresponding $TPR^{\theta} = \frac{TP^{\theta}}{TP^{\theta} + FN^{\theta}}$ and $FPR^{\theta} = \frac{FP^{\theta}}{FP^{\theta} + TN^{\theta}}$ .
|
| 88 |
+
- 6. **Server Executes.** The server plots TPR (x-axis) vs. FPR (y-axis) over all possible thresholds $\theta$ and computes the area under the corresponding curve as AUC.
|
| 89 |
+
|
| 90 |
+
In this section, we explain in details on how to perturb TP, TN,FP, and FN for each $\theta \in \Theta$ in each client. It's worth mentioning that both Gaussian and Laplace mechanisms can be leveraged to generate the corresponding DP noise. Without loss of generality, we use Laplace as an example. Laplace mechanism preserves $(\epsilon,0)$ -differential privacy if the random noise is drawn from the Laplace distribution $\operatorname{Lap}(\frac{\Delta}{\epsilon})$ with parameter $\Delta/\epsilon$ where $\Delta$ is the $l_1$ sensitivity (Dwork and Roth 2014b) and $\epsilon$ is the corresponding privacy budget. We name this method as DPAUC<sub>Lap</sub>. The noise is added as the following:
|
| 91 |
+
|
| 92 |
+
- 1. Adding noise to TP: Each client $C_k$ sets the corresponding sensitivity $\Delta_{\mathrm{TP}_k^\theta} = 1$ for each $\theta \in \Theta$ . Given a privacy budget $\epsilon_{\mathrm{TP}}$ , client $C_k$ draws the random noise from $\mathrm{Lap}(1/\epsilon_{\mathrm{TP}})$ and add it to $\mathrm{TP}_k^\theta$ and get $\mathrm{TP}_k^{\theta'}$ .
|
| 93 |
+
- 2. Adding noise to FP: Each client $C_k$ sets the corresponding sensitivity $\Delta_{\mathrm{FP}_k^\theta} = 1$ for each $\theta \in \Theta$ . Given a privacy budget $\epsilon_{\mathrm{FP}}$ , client $C_k$ draws the random noise from $\mathrm{Lap}(1/\epsilon_{\mathrm{FP}})$ and add it to $\mathrm{FP}_k^\theta$ and get $\mathrm{FP}_k^{\theta'}$ .
|
| 94 |
+
- 3. Adding noise to TN: Each client $C_k$ sets the corresponding sensitivity $\Delta_{\text{TN}_k^{\theta}} = 1$ for each $\theta \in \Theta$ . Given a privacy budget $\epsilon_{\text{TN}}$ , client $C_k$ draws the random noise from $\text{Lap}(1/\epsilon_{\text{TN}})$ and add it to $\text{TN}_k^{\theta}$ and get $\text{TN}_k^{\theta'}$ .
|
| 95 |
+
- 4. Adding noise to FN: Each client $C_k$ sets the corresponding sensitivity $\Delta_{\text{FN}_k^\theta} = 1$ for each $\theta \in \Theta$ . Given a privacy budget $\epsilon_{\text{FN}}$ , client $C_k$ draws the random noise from $\text{Lap}(1/\epsilon_{\text{FN}})$ and add it to $\text{FN}_k^\theta$ and get $\text{FN}_k^{\theta'}$ .
|
| 96 |
+
|
| 97 |
+
**Privacy Analysis.** Based on the Composition Theorem of DP (Dwork and Roth 2014b), the privacy budget for each decision boundary $(\theta)$ is $(\epsilon_{TP} + \epsilon_{TN} + \epsilon_{FP} + \epsilon_{FN})$ . Since we have $|\Theta|$ decision thresholds $(\theta)$ , the total DP privacy budget is $\epsilon = |\Theta| * (\epsilon_{TP} + \epsilon_{TN} + \epsilon_{FP} + \epsilon_{FN})$ . Without loss of generality, we set $\epsilon_{TP} = \epsilon_{TN} = \epsilon_{FP} = \epsilon_{FN} = \epsilon'$ in our paper and the total DP budget is $4|\Theta|\epsilon'$ .
|
| 98 |
+
|
| 99 |
+
| $\operatorname{Client}_k,k\in[1,K]$ | | Server | | |
|
| 100 |
+
|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--|--|
|
| 101 |
+
| | Current model <i>f</i> | Message from the server to Client | | |
|
| 102 |
+
| 1. $\operatorname{Client}_k$ computes prediction scores $s^k = f(X_k)$ for its local data $X_k$ | | wessage non the server to Chent | | |
|
| 103 |
+
| 2. For each decision threshold $\theta \in \Theta$ , ${ m Client}_k$ computes four local statistics ${ m TP}_k^{\theta}, { m TN}_k^{\theta}, { m FP}_k^{\theta}$ , and ${ m FN}_k^{\theta}$ for $s^k$ | | Message from $\operatorname{Client}_k$ to the server | | |
|
| 104 |
+
| 3. Client_k adds DP noise to each local statistic and get $\text{TP}_k^{\theta'}, \text{TN}_k^{\theta'}, \text{FN}_k^{\theta'}$ and $\text{FP}_k^{\theta'}$ for $\theta \in \Theta$ | $\operatorname{TP}_k^{ heta'}, \operatorname{TN}_k^{ heta'}, \ \operatorname{FN}_k^{ heta'}$ and $\operatorname{FP}_k^{ heta'}$ for $ heta \in \Theta$ | 4. For each $\theta \in \Theta$ , Server aggregates $\operatorname{TP}_k^{\theta'}, \operatorname{TN}_k^{\theta'}, \operatorname{FN}_k^{\theta'}$ and $\operatorname{FP}_k^{\theta'}$ to generate $\operatorname{TP}^{\theta'}, \operatorname{TN}^{\theta'}, \operatorname{FN}^{\theta'}$ and $\operatorname{FP}^{\theta'}$ | | |
|
| 105 |
+
| | | 5. For each $ heta\in\Theta$ , Server computes the $ ext{TPR}^{ heta}$ and $ ext{FPR}^{ heta}$ | | |
|
| 106 |
+
| | | Server computes the area under the ROC curve as the final AUC | | |
|
| 107 |
+
|
| 108 |
+
Figure 1: Illustration of our proposed DPAUC with Laplace mechanism as an example.
|
2208.13753/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2208.13753/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Generating photo-realistic images is a critical task in computer vision research. In this task, a generative model is designed to learn the underlying data distribution of a given set of images and to be capable of synthesizing new samples from the learned distribution. To this end, series of methods were proposed, including VAEs (Kingma and Welling 2014; Van Den Oord, Vinyals et al. 2017), GANs (Goodfellow et al. 2014; Radford, Metz, and Chintala 2015), flow-based methods (Dinh, Krueger, and Bengio 2014; Kingma and Dhariwal 2018), and the trending diffusion models (DMs) (Sohl-Dickstein et al. 2015; Ho, Jain, and Abbeel 2020). The quality of the generated images has been improved rapidly with the contribution of these lines of works. Moreover, the task itself also evolves from objectcentric image synthesis without conditions to complex scene image generation, and sometimes based on multi-modal conditions (e.g., texts, layouts, labels, and scene-graphs).
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Figure 1: Illustration of *Frido*. Given a cross-modal condition, *Frido* generates images in 1 a coarse-to-fine manner from structure to object details, producing outputs with 2 high semantic correctness and quality. Note that existing models such as the LDMs are not designed to distinguish between high/low-level visual information.
|
| 8 |
+
|
| 9 |
+
Recently, diffusion models (Ho, Jain, and Abbeel 2020; Nichol and Dhariwal 2021; Ho et al. 2022; Rombach et al. 2022; Ramesh et al. 2022) have demonstrated a remarkable capability of high-quality image synthesis and outperform other classes of generative approaches on multiple tasks, including but not limited to unconditional image generation, text-to-image generation, and image super-resolution. Despite the encouraging progress, diffusion models may fall short when targeted images are more complex and conditioning inputs are highly abstractive. The composition of objects and parts, along with high-level semantic relations are prevailing in those tasks, which are less seen in earlier object-centric benchmarks and may be essential to higher quality generation.
|
| 10 |
+
|
| 11 |
+
In particular, we point out two major challenges in existing DM works. *First*, most of existing DMs deal with feature maps or image pixels at a single scale/resolution, which might not be able to capture image semantics or compositions in real-world complex scenes. Take the first row of Figure 1 as examples, it can be seen that while LDM (Rombach et al. 2022) generates images containing a "person" given the text condition, semantic structures of "riding a motorcycle" and "mountain in the background" are not sufficiently produced. *Second*, expensive computational resources are typically required for DMs during training and testing due
|
| 12 |
+
|
| 13 |
+
<sup>\*</sup>work done during research internship at Microsoft † project lead
|
| 14 |
+
|
| 15 |
+
<sup>1</sup>Code is available at [https://github.com/davidhalladay/Frido.](https://github.com/davidhalladay/Frido)
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+
Figure 2: Generated examples of *Frido* on various tasks. From left to right, we show the examples of text-to-image (T2I) on COCO 2014, scene-graph-to-image (SG2I) on Visual Genome, layout-to-image (Layout2I) on COCO-stuff, unconditional image generation on Landscape, CelebA, LSUN-bed. All images are 256x256 resolution. Note that, for conditional generation, we adopt classifier-free guidance with s=1.5 while testing. Please refer to supplementary for full-scale version.
|
| 20 |
+
|
| 21 |
+
to the iterative denoising processes, especially for producing high-resolution outputs. This not only limits the accessibility but also results in massive carbon emissions. Therefore, a computationally efficient diffusion model that leverages coarse/high-level synthesized outputs for introducing multi-scale visual information would be desirable.
|
| 22 |
+
|
| 23 |
+
To address these limitations, we propose *Frido*, a Feature Pyramid Diffusion model for complex scene image generation.2 *Frido* is a novel multi-scale coarse-to-fine diffusion and denoising framework, which allows synthesizing images with enhanced global structural fidelity and realistic object details. Specifically, we introduce a novel *feature pyramid U-Net* (PyU-Net) with a *coarse-to-fine modulation* design, enabling our model to denoise visual features from multiple spatial scales in a top-down fashion. These multi-scale features are produced by our MS-VQGAN, a newly designed multi-scale variant of VQGAN (Esser, Rombach, and Ommer 2021) that encodes images into multi-scale visual features (discrete latent codes). As can be seen in Figure 1, as the feature gradually being denoised, the images are reconstructed in a coarse-to-fine manner (decoded by our MS-VQGAN decoder), from global structures to fine-grained details. On the other hand, a recent competitive diffusion model (Rombach et al. 2022) reconstructs images uniformly across spatial scales.
|
| 24 |
+
|
| 25 |
+
*Frido* is a generic diffusion framework that can synthesize images from diverse, multi-modal inputs, including texts, box-layouts, scene-graphs, and labels. Moreover, our model introduces minimal extra parameters while allowing us to speed up the notoriously slow inference of conventional DMs. Extensive experiments are done to demonstrate the effectiveness of the new designs. Our contributions are summarized as follows. (i) We propose *Frido*, a novel diffusion model to generate photo-realistic images from multi-modal inputs, with a *coarse-to-fine* prior that is under-explored in the DM paradigm. (ii) Empirically, we achieve *5 new stateof-the-art* results, including layout-to-image on COCO and OpenImages, scene-graph-to-image on COCO and Visual Genome, and label-to-image on COCO, all are complex scenes with highly abstractive conditions. (iii) In practice, *Frido inferences fast*, shown by a head-to-head comparison with an already fast diffusion model, the LDM.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
Multiple lines of works to generate photo-realistic images have been proposed, including VAEs, GANs, and Invertible-Flows, and achieved impressive results for object-centric images. However, VAEs suffer from blurry outputs. GANs are notoriously hard to train and lack diversity. Flow-based model suffers shape distortions due to imperfect inverse transform. Our work belongs to the paradigm of diffusion models (DMs), which have been shown to best synthesize high quality images among all deep generative methods. For completeness, we summarize the fundamentals of DMs and a recent improvement, Latent Diffusion Models (LDMs) (Rombach et al. 2022).
|
| 30 |
+
|
| 31 |
+
Diffusion Models for Image Generation A diffusion model (DM) contains two stages: forward (diffusion) and backward (denoising) processes. In the forward process, the given data x<sup>0</sup> ∼ q(x0) is gradually destroyed into an approximately standard normal distribution x<sup>T</sup> ∼ p(x<sup>T</sup> ) over T steps, where q and p denote the given data manifold and the standard Gaussian distribution, respectively; and x denotes a data point from q. The diffusion process, formulated by Ho, Jain, and Abbeel (2020), are shown as follows:
|
| 32 |
+
|
| 33 |
+
$$q(\mathbf{x}_{1:T}|\mathbf{x}_0) = \prod_{t=1}^{T} q(\mathbf{x}_t|\mathbf{x}_{t-1}), \text{ and}$$
|
| 34 |
+
|
| 35 |
+
$$q(\mathbf{x}_t|\mathbf{x}_{t-1}) = \mathcal{N}(\sqrt{1-\beta_t}\mathbf{x}_{t-1}, \beta_t \mathbf{I}).$$
|
| 36 |
+
(1)
|
| 37 |
+
|
| 38 |
+
, where β denotes the noise schedule. Can be fixed or learned. By reversing the forward process, Ho, Jain, and Abbeel (2020) obtained the backward process:
|
| 39 |
+
|
| 40 |
+
$$p_{\theta}(\mathbf{x}_{0:T}) = p(\mathbf{x}_T) \prod_{t=1}^{T} p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t), \text{ and}$$
|
| 41 |
+
|
| 42 |
+
$$p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t) = \mathcal{N}(\mathbf{x}_{t-1}; \mu_{\theta}(\mathbf{x}_t, t), \sigma_{\theta}(\mathbf{x}_t, t)).$$
|
| 43 |
+
(2)
|
| 44 |
+
|
| 45 |
+
, where θ denotes the learnable parameters, a U-Net (Ronneberger, Fischer, and Brox 2015) in Ho, Jain, and Abbeel
|
| 46 |
+
|
| 47 |
+
<sup>2</sup>*Frido* is pronounced as "free-dow".
|
| 48 |
+
|
| 49 |
+
(2020). This is implemented by a neural network predicting each of the denoising steps; and it can be viewed as a Markov chain with a learned Gaussian transition distribution (Dhariwal and Nichol 2021; Pandey et al. 2022).
|
| 50 |
+
|
| 51 |
+
In practice, we randomly sample a timestep t in [0,T], and then compute $\mathbf{x}_t$ by interpolating $\mathbf{x}_0$ and $\epsilon$ with the weight schedule $\beta_t$ , where $\epsilon$ is sampled Gaussian noise. The denoising network $\epsilon_\theta$ is trained by the following loss:
|
| 52 |
+
|
| 53 |
+
$$\mathcal{L}_{DM} = \mathbb{E}_{\mathbf{x}_0, \epsilon, t} \left[ \| \epsilon - \epsilon_{\theta}(\mathbf{x}_t) \|^2 \right]. \tag{3}$$
|
| 54 |
+
|
| 55 |
+
At a higher level, this loss trains the network to predict the step noise $\epsilon$ applied on $\mathbf{x}_{t-1}$ given $\mathbf{x}_t$ . To synthesize an image, one can run this denoising network for T steps to gradually denoise a random noise image.
|
| 56 |
+
|
| 57 |
+
**Latent Diffusion Models** Most DMs (Nichol et al. 2022; Dhariwal and Nichol 2021) operate on the original image pixels, yielding high dimensional data manifold with input $\mathbf{x}_0 \in \mathcal{R}^{3 \times H \times W}$ . Such high-dimensional inputs cost huge computation for the diffusion and denoising processes at both training and inference. Very recently, Latent Diffusion Models (LDMs) (Rombach et al. 2022) are proposed to adopt DMs to learn the low-dimensional latent codes, encoded by a VQGAN (Esser, Rombach, and Ommer 2021) or KL-autoencoder (Rombach et al. 2022). Given an image $x_0$ and the pre-trained autoencoder, containing encoder ${\mathcal E}$ and decoder $\mathcal{D}$ , the corresponding latent codes $\mathbf{z_0} = \mathcal{E}(\mathbf{x_0})$ can be produced, where $\mathbf{z_0} \in \mathcal{R}^{c \times h \times w}$ , c is usually set to 4; and h, w are downsampled 8-16 times from H, W. By replacing the image data point x in Eq. (1) and Eq. (2) with the encoded latent z, the diffusion and denoising processes of a LDM can be derived:
|
| 58 |
+
|
| 59 |
+
$$q(\mathbf{z}_{1:T}|\mathbf{z}_0) = \prod_{t=1}^{T} q(\mathbf{z}_t|\mathbf{z}_{t-1}), \text{ and}$$
|
| 60 |
+
|
| 61 |
+
$$p_{\theta}(\mathbf{z}_{0:T}) = p(\mathbf{z}_T) \prod_{t=1}^{T} p_{\theta}(\mathbf{z}_{t-1}|\mathbf{z}_t).$$
|
| 62 |
+
(4)
|
| 63 |
+
|
| 64 |
+
At inference, the final output image can be reconstructed from the denoised latent $\tilde{\mathbf{x}}_0 = \mathcal{D}(\tilde{\mathbf{z}}_0)$ , where $\tilde{\mathbf{z}}_0$ is sampled and denoised using Eq. (4). Since T is typically set to 500-1000 in practice, and the autoencoding is a one-time operation per image, the overall computation is greatly reduced due to the much lower resolution of $\mathbf{z}_0$ .
|
| 65 |
+
|
| 66 |
+
Although existing DMs generate high-resolution images for a single object with outstanding quality, most of them only deal with feature maps or image pixels at a single resolution. Since they treat high and low-level visual concepts equally, it is not easy for such DM models to describe the corresponding image semantics or composition. This might limit their uses for synthesizing complex scene images.
|
| 67 |
+
|
| 68 |
+
To enhance DMs with global structural modeling, we propose to model the latent features in a coarse-to-fine fashion via feature pyramids. We first introduce the Multi-Scale Vector Quantization model (MS-VQGAN), which encodes the image into latent codes at several spatial levels. Next,
|
| 69 |
+
|
| 70 |
+
we propose the feature pyramid diffusion model (*Frido*), extending the diffusion and denoising into a multi-scale, coarse-to-fine fashion. To achieve these, we design a new feature Pyramid U-Net (PyU-Net), equipped with a special modulation mechanism to allow coarse-to-fine learning. In this section, we introduce each component in detail.
|
| 71 |
+
|
| 72 |
+
Before we model an image in a coarse-to-fine fashion, we first encode it into latent codes with several spatial resolutions. Extending from VQGAN (Esser, Rombach, and Ommer 2021), we train a multi-scale auto-encoder, named MS-VQGAN, with a feature pyramid encoder $\mathcal E$ and decoder $\mathcal{D}$ . As shown in Figure 3a, given an image $\mathbf{x}_0$ , the en- $\operatorname{coder} \mathcal{E}$ firstly produces a latent feature map set of N scales $\mathcal{Z} = \mathcal{E}(\mathbf{x}_0) = {\{\mathbf{z}^{1:N}\}}$ , where $\mathbf{z}^t \in \mathcal{R}^{c \times \frac{s}{2^{t-1}} \times \frac{s}{2^{t-1}}}$ . Note that N and c denote the number of feature maps (stages) and the channel size of the feature, respectively; and s represents the size of the largest feature map. In this design, we are encouraging $z^1$ to preserve lower-level visual details and $z^N$ to represent higher-level shape and structures. Secondly, after quantizing and fusing, we upsample these features to the same shape, concat them, and feed them into the decoder $\mathcal{D}$ and reconstruct the image $\mathcal{D}(\mathcal{Z}) = \tilde{\mathbf{x}}_0$ . The objective for this auto-encoder module is the weighted sum of $l_2$ loss between $\mathbf{x}_0$ and $\tilde{\mathbf{x}}_0$ , and other perceptual losses<sup>3</sup> in VQGAN.
|
| 73 |
+
|
| 74 |
+
We highlight that, with this design, MS-VQGAN can not only encode the input image into multi-scale codes of different semantic levels but also preserve more structure and detail, as later analyzed in Section 4.3 of model ablation.
|
| 75 |
+
|
| 76 |
+
After the MS-VQGAN is trained, we can use it to encode an image into multi-level feature maps $\mathcal{Z}$ . Next, we introduce the feature Pyramid Diffusion Model (*Frido*) to model the underlying feature distribution and then generate images from noises. Similar to other DMs, *Frido* contains two parts: the *diffusion process* and the *denoising process*.
|
| 77 |
+
|
| 78 |
+
**Diffusion Process of** *Frido* Instead of naively adding noises simultaneously on all N feature scales $\mathcal{Z} = \{\mathbf{z}^1,...,\mathbf{z}^N\}$ at each of the T steps, we conduct diffusion process sequentially from low-level $(\mathbf{z}^1)$ to high-level $(\mathbf{z}^N)$ , and each level takes T diffusion steps (total of $N \times T$ timesteps). See the top half of Figure 3b for an illustration.
|
| 79 |
+
|
| 80 |
+
Different from the classical diffusion process that corrupts pixels into noise in an unbiased way, we observe that *Frido*'s diffusion process starts from corrupting the object details, object shape, and finally the structure of the entire image. This allows *Frido* to capture information in different semantic levels. See Fig 1 for qualitative examples.
|
| 81 |
+
|
| 82 |
+
**Denoising Process of** *Frido* In the denoising phase, a sequence of neural function estimator $\epsilon_{\theta,t,n}$ is trained, where $t=1,2,\ldots,T$ and $n=N,N-1,\ldots,1$ . In order to denoise scale-by-scale, we introduce a novel feature pyramid U-Net (PyU-Net) as the neural approximator. PyU-Net can denoise the multi-scale features from high-level $\mathbf{z}^N$ to
|
| 83 |
+
|
| 84 |
+
<sup>&</sup>lt;sup>3</sup>Patch discriminator loss and perceptual reconstruction loss.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
|
| 90 |
+
(a) Architecture of MS-VOGAN.
|
| 91 |
+
|
| 92 |
+
(b) Details of the diffusion and denoising processes.
|
| 93 |
+
|
| 94 |
+
Figure 3: Overview of *Frido* (best viewed in color). How MS-VQGAN encodes an image into multi-scale feature maps $\mathbf{z}_0^1, \mathbf{z}_0^2$ is illustrated in (a). The quantization enables VQ-VAE learning; and the fusion allows merging all representations from high to low level for the decoder to reconstruct an image. The upper half of (b) demonstrate the coarse-to-fine process, where the denoising is completed for high-level first, and then the lower one. The lower half of (b) details each denoising step. A U-Net is shared not only across timestep t but also the scale level s. Coarse-to-fine gating will be explained in Figure 4.
|
| 95 |
+
|
| 96 |
+
low-level z<sup>1</sup> sequentially, achieving a coarse-to-fine generation. We highlight that, different from the LDMs, our PyU-Net is more suitable for coarse-to-fine diffusion with these two novel features: (1) *shared U-Net* with *lightweight level-specific layers* that project features of different levels to a shared space so that the heavier U-Net can be reused across all levels, reducing the trainable parameters, and (2) *coarse-to-fine modulation* to condition the denoising of low-level features on high-level ones that are already generated.
|
| 97 |
+
|
| 98 |
+
**Feature Pyramid U-Net** The proposed PyU-Net learns the denoising process in a coarse-to-fine fashion. Take N=2 ( $\mathcal{Z}=\{\mathbf{z}_0^1,\,\mathbf{z}_0^2\}$ ) as an example (shown in Figure 3(b)), PyU-Net takes 4 inputs: (1) stage s and timestep t embeddings, (2) high-level feature conditions $\mathbf{z}_0^2$ , (3) target feature map $\mathbf{z}_t^1$ , and (4) other cross-modal conditions $\mathbf{c}$ . By jointly observing these inputs, PyU-Net predicts the noise $\epsilon$ applied on the target feature $\mathbf{z}_t^1$ , as shown in Figure 3b.
|
| 99 |
+
|
| 100 |
+
Instead of using a separate U-Net for each stage n, we opt for a single shared U-Net to reduce the parameter count. The input denoising target $\mathbf{z}_t^1$ is first projected by level-specific layers $\Phi_e^1$ into a shared space so that a shared U-Net can be applied. Finally, another level-specific projection $\Phi_d^1$ decodes the U-Net output to predict the noise $\epsilon$ added on $\mathbf{z}_t^1$ , with the following objective similar to Eq. (3):
|
| 101 |
+
|
| 102 |
+
$$\mathcal{L}_{Frido} = \mathbb{E}_{z_0^n, \epsilon, t} \left[ \| \epsilon - \epsilon_{\theta}(\mathbf{z}_t^n, \mathbf{z}_0^{n+1:N}, t) \|^2 \right]. \tag{5}$$
|
| 103 |
+
|
| 104 |
+
We note that PyU-Net not only reduces the trainable parameters but also improves the results compared to vanilla per-stage U-Nets. For analysis, please refer to the experiments. Also, for training efficiency, we adopt the teacher forcing trick similar to sequence-to-sequence language models (Brown et al. 2020), where ground truth feature conditions are used while denoising the low-level map.
|
| 105 |
+
|
| 106 |
+
**Coarse-to-Fine Modulation** *Frido* produces the latent codes sequentially from high-level to low-level feature maps. For example, while generating $\mathbf{z}_t^1$ (low-level), the model is conditioned on $\mathbf{z}_0^2$ (high-level). We, therefore, introduce a coarse-to-fine modulation as shown in Figure 4.
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+
Figure 4: Framework of coarse-to-fine modulation in PyU-Net. Note that we ignore some intermediate convolution layers and SiLU layers for simplification.
|
| 111 |
+
|
| 112 |
+
Our coarse-to-fine modulation (CFM) is designed to incorporate (1) 2D high-level features, and (2) 1D stage and time embedding into residual blocks, allowing *Frido* to have the high-level feature as well as stage-temporal awareness. Therefore, in our proposed CFM, there are two types of modulation conducted upon normalized features sequentially, between which an extra convolution (conv) and SiLU layer (Elfwing, Uchibe, and Doya 2018) are inserted.
|
| 113 |
+
|
| 114 |
+
Specifically, given the high-level ground truth $\mathbf{z}_0^2$ , we apply noise augmentation by $\mathcal{M}(\mathbf{z}_0^2) = f_z$ , where $\mathcal{M}(\mathbf{z}_0^n) = \alpha \cdot \mathbf{z}_0^n + (1-\alpha) \cdot \epsilon$ . We note that $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$ , and the scaler $\alpha$ is a hyper-parameter. After that, assume the input of CFM to be $f_i$ , in the first modulation, we modulate the normalized feature norm $(f_i)$ with 2D scaling and shifting parameters from high-level feature $f_z$ with two convs respectively, producing intermediate representation h as follows:
|
| 115 |
+
|
| 116 |
+
$$h = \operatorname{conv} \circ \operatorname{SiLU}(\operatorname{conv}(f_z) \times \operatorname{norm}(f_i) + \operatorname{conv}(f_z)).$$
|
| 117 |
+
(6)
|
| 118 |
+
|
| 119 |
+
In the second modulation, to equip U-Net with stage-temporal awareness, we further modulate h with 1D stage+time embedding and produce output $f_o$ , similar to Eq. 6. Note that we use linear layers to transform s+t and also add a conv and SiLU after the AdaIN (Huang and Belongie 2017).
|
| 120 |
+
|
| 121 |
+
| Methods | FID↓ | IS↑ | CLIP↑ | | | |
|
| 122 |
+
|----------------------------------------|-------|-------|--------|--|--|--|
|
| 123 |
+
| Methods under standard T2I setting | | | | | | |
|
| 124 |
+
| AttnGAN (Xu et al. 2018) | 33.10 | 23.61 | - | | | |
|
| 125 |
+
| Obj-GAN (Li et al. 2019) | 36.52 | 24.09 | - | | | |
|
| 126 |
+
| DM-GAN (Zhu et al. 2019) | 27.34 | 32.32 | - | | | |
|
| 127 |
+
| DF-GAN (Tao et al. 2022) | 21.42 | - | - | | | |
|
| 128 |
+
| LDM-8†<br>(Rombach et al. 2022) | 17.61 | 19.34 | 0.6500 | | | |
|
| 129 |
+
| VQ-diffusion‡<br>(Gu et al. 2022) | 14.06 | 21.85 | 0.6770 | | | |
|
| 130 |
+
| LDM-8-G† | 12.27 | 27.86 | 0.6927 | | | |
|
| 131 |
+
| Frido-f16f8 | 15.38 | 19.32 | 0.6607 | | | |
|
| 132 |
+
| Frido-f16f8-G | 11.24 | 26.82 | 0.7046 | | | |
|
| 133 |
+
| Methods with external pre-trained CLIP | | | | | | |
|
| 134 |
+
| LAFITE-CLIP‡<br>(Zhou et al. 2022) | 8.12 | 32.24 | 0.7915 | | | |
|
| 135 |
+
| Frido-f16f8-G-CLIPr | 8.97 | 27.43 | 0.7991 | | | |
|
| 136 |
+
|
| 137 |
+
Table 1: Text-to-image generation on COCO. For LDM scores, T = 250; for *Frido*, T = 200. † : reproduced with official code and configs. ‡ : obtained from official model checkpoints. G: classifier-free guidance with scale = 2.0. Note that LAFITE used CLIP at training, while *Frido* uses it at inference only (CLIPr).
|
| 138 |
+
|
| 139 |
+
To summarize, we highlight that our PyU-Net framework equips DM with the ability to learn in a coarse-to-fine fashion with a moderate increase of parameters compared to classical hierarchical learning strategy (Razavi, Van den Oord, and Vinyals 2019). *Frido* inherits three generative paradigms, VAE, GAN, and DM, and is further embedded with a coarse-to-fine prior. Moreover, the diffusion operates on lower-resolution maps first, resulting a speedup at inference. Later, we show that SOTA results can be achieved under a similar compute budget to a strong, fast DM.
|
2210.00364/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-11-17T20:01:23.373Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" version="20.4.0" etag="C7KmhJlysE-dO5W_LyQr" type="device"><diagram id="GGr3wXQna_ddXh4w55PL">7X1bd5vItu6vyRj7PJw9EJdeyaMiYYe0gcYG2+jNxm4sJMdeiRRB/fozL4VUVaAI2U4ne5811spoGZWKus75zfs7Z/JYn369eX4In+7ul+9s665+50zf2bb93rbf4f+tu4afeB/kg/Lr/I4fjXYPLubiXj605NP1/O7+m9Zw9fS0XM2f9YfF05cv98VKe3bz9evTRm/299NSf+vzTSnfaO0eXBQ3y/tOs6v53eqBn773lNaf7uflQ/vmkSW/ebxpG8suvj3c3D1tlHc5/jtn8vXpacWfHuvJ/RIXr10X7uhkz7fbgX29/7Ia8gO57t9vlms5NzmuVdNOFob4jB/nj7QqH7/ff13NYS3Obm7vl389fZuv5k9f4Pvbp9Xq6REaLPGLjzfFovz6tP5yN3laPn2F7+/u/75ZL1dKD+PlvMRfrp6e4enNt2feq7/n9T0M7yO9cNw+tdon2NXN6uadM+Y/7ZNv38t39sf6ESY2+etTZM+aj+7tVb0uhDW/+XRuFdOn72fOnXPXeE7YeN+Lx+J7WI034eSDuHss5sGnh9XtqSfiLw/fbq68r39dfH66+3S+iefvv8OvnLMvhTh7/NDMmvd1nC68M4fbBfOP9uz6s7i5+rD+6yKozyof+po9z67vJrdO+SGoxmU4GTfxtCzDqnSh/dPsavnl5lMC3wVu/OmjBc+cm6tz62ZqzbFN8SmZ/zXPq/tT/1/BtF7Nrs8fZqcnVp66j4Hz8BA34/LzPHLz9FsZTMbbZ8bnJpwGzVnq1/DPuZwGm7hKnLjK7LMqsEMRemH7jgm0P12u8qu75ZmDa7eCtdvs61d9N/z7GOXX55vb0w/W7ekS5p6Lsypbh/NNSev+KfzjrPmwUtvIddzbZ9/c2md/TT70rcf72enlYyF26wNnwLq/qpfBp3OvOM0+BF/Ol/efku+5E4mgcv9970RW8fjh6+xihPu4vHn88HxbPZXwbHT7mPwr+KR/n9sncBY+zu+uz62/0833wjn3bk8zOHPKyBazq5lQdu4LrOoCTtLjzdXdqHi8/Du8CJRfKC02sD7Vzcn7Gsa2W5dpjc+fAjkX7qn+Fs/H1eyR/wdnB38fBdMxn69JXoXp+F+peubMZ7iuwt/E1gY/b9rv7+fB9792Z+L9mV30j/bh/vRSXAt/rrQu/zotypsrONVfPj7cnUZP1yLQvv/hbK5OlvlV3uxmU4zOqqQJpoEIL1w7misjruAmVgv7x9+FQpm1E124Vjgfi7CRbZ2n+ZlQV3rzfab9vZ2rcXJP3JvryNxHbfYP1s1V9NVcHVxPoEnPM7jj0NN6ln5e9LSBc4urV5byfH0IHmfPt6ebP4JT7/vtY7Y7l+lTGdjyFFTfsH1za6+WZ1f18+3jZZVfj/8IPn3+fnflLWBVK3jn5uya+oYxXja3k7IqptkoT/M6ShdwlxbPu9X3m3DiOnHqb6Iqg1UNga7ltrKiNdDOdSQWdTjNR7Ta8/fGinTWVFslvI3nPeto0hraiU/nTX7liRnQjdsU+Nanzw+3X6JvsDPLP6cwxom7gbG60TSHsWYN0J9RJPJRNF8KuLffZhflBujyKJhY9N8/L4Lu+k7CJkK6WOWdPYEx6De7fQ57WgAdupl8hD05WRf2DPiKBbOLqhxO+J/T8Tqc+iPol96Je59eIF0OrWCaNFGKdC2ENVydB9huWpTRNBiFuL5iXMO6ArXS2myiFOYoAi+8gDbTjG9BVXbbpKEdX7T9lC585pM/8uG7zIqasRVOF7BeYxFN+POuDxgD7Hs0HXvwmcYAdMNr9z+7gJs09eHdvogq6APeAbewgXG53TZhA3uDN9OBeYlIFBulDaxBMGrXYjuWKlNokjxzu3Vz9HUL3DBdwFgLXDdYCxjFNIHzqc4ntHFtQ5FtYB1gzInTXTfZJvWVfrJGW7eqrOHvFc+HxkqflbWneZylC1xzAWsy0seRNECFalj/jfxsrhnMPWjH6YYih3n5+hhSOh9WJLCPYBOKpJ37lrJJyuXPrp4fLr9cpnh3ztJw03NHNbqs37LCjh7ghjwBJdjAiOFmZAqFyF2YPVCIpJ1dSKdhCqMRC0ujFCKxkDvt2tFvN0obOzZ+B5yI6cTiHDjzydXdJYzlS7gKHzOdbhtUU1KGx1vn8wrGqdx+OKFp3kQXC1gd7Ncq5cmz4a4qYwvlyc91viIy+C3sjlg46tziaVjrc/NrwBc6h60K7MuCG6mui3yPj6do27ZnTH/AmhPXOD+9/Hd+ORtlNlCaxhJRo3Hr11AkG09xd84+UhAr0im/CNPcoxsmtL1vkBKY84HTin0ABYAbsTvlNVAWpAxwekuiRsCXgVrkehu4bdgGz0aY0s2DNqWlUQ9A8Ns2YjxCKgQcy1apUJT6cJODBm877GMN74L37/Zx16awIpzfFM8rcIO5evMzwBDy5qcF3lzz5lvYP998H6hPCJ8TdT5EpXA98c7AM09ZU3oW4T3ZtQeqC+eD+9vwZ1+dF6x/gWfFQk4AFNqTn5UxS0oxpfHbMH4Hz5xKOWP8Lh1vaI+muN4+jKXoabPAOwvUDbl9AGusUs5AUu9sRGtLYwlGURpq60NcSbYB6glrHBjvWuB3DdAQGEeOHA6pY6PRCfOc7qd2o35q10HpOyo4KuysvD9F/PTtj5tPn5ezypoHlkGBxLiPAr2/u45A1kB5D+WfHEYOK98Eh/vrp2hKfwtEwjXd2vmA/kRxoD9/FF8ARQcsDPwU5V6SY+PHh2Z2lX8I5tGwlTSlg37E19Oqi5SDAUi50+Z3QcpOOEf+CGcSznIfUgZ+gDQXaHrmvAIpz98AKY9ADgL6XVhEd//HIGWg3WmuIWWi91WAqBPoYIBI1EMaqaFgRKZpgfwIpRQP8Umo0yRqE0+JxgLiA7SlIE/iQWmJiJPoo0SfGx0RJm6LnoHPIx+Dc6DyBeBx0xDfYbWfYQz1Vg7FPsR4w3ST5upE0wzpLfIb7T0xanQqRNSM+uT8Wzooea4Fv002TEc1XunFdD4zwm2I6IFf2aHC46KU+F6N/EJ+HuHn3Vwyi+aaIs9doDxtobSi8ySk3YzQWzRPSFXl2SLctRGAbiedfoTEQfDecYu94JnGT2UbHi9KRDFpN/JGl0AK1CHI85ITFjHWRYTUTwCSCeKD0OG9zjVJJqpwb3wYM0o7OZ8p4YtuG/oOzlKA6NzR+XG5ieYHpAfCDSQ9WDifSKjfJ/gbmxE/jcc4z/Q9nKPMI/456czDIwmngjlOCIMJ4sHTLY77Q/432uJYkbjaHFKkd7hmPt2nOEU6sqg1rEQ4m8Yudme10M98RVilbWPzeTT6qRYbksToXSDx0lkwxiPbxITZuB+YF0q35rtYqtxKTMFOYtqNWbbheXX6kXPnd/Wsz34s4v7/ikXKt8Yi/TLsG2KRcAAW6bT5TbAI8MnGBfoGeJz4UQeLAP8I13Af4L+J9wos0rweiwRAC13gLyHzqjRYRynIWCCn/HmtYII0A3pI/8V5irtPy28z6Gv2uPx2O4W1FIRDQFbrGRHsyqWYXXx0ceduxPNT8Xj5+NdFUJmSdDjpkaTbOwg7D1R5DcgEJUMHRnGov2Z28eP+4jRbA9circqA/kR8YHzQj3cGHAX2E1ZzyL3sued9+z758EXX4HfvVHs77k4v3buT95ZmvzBtRYcRKuwFnOA0tIm6AteAeTnIsaPJ2yDUv07Pl7MvYe9peYEOqQrcPh0SYokeHRLyXVMPBLJw1q9PSQlTjrZ8k/ogfgN8M1PtGyIi2R11HEFPH0ETzjWdDFDd8SjcYQscM+MKwKZav4RpNN0d4jVNd4d6j2EyebJPJn/ByktNDqBAbTRI2RxEaopmYbsjulaOkMNG1+aRBkQokgT8FlByY7ZL9L5SQsasDVQ1ZYxeN2pbGJdLWkZdawjoJmeNjbrbVUlo0pij1FZpfezTEjp7aMcLNL4+chzWUKqnF2gP3EU4vdrcR6g1OoNx6qcsw9Nrzof76K5Jzad3Yev67xDRHOruca/sEKUakYhQQ6Clx+tWkvTIyJ4kHxU5ItqUbWD0KWkQ8YYqkhmhVrhFKEWEDqJtlJ41VMhtPNbbgzRCN1zTsnkovTKq57GcGdpQkg4nFtl0osqH86trQlmqCkhzyWtFElFX8hM+ScSRSBxdwiGpUZAmrypHjM6TRpPKUpJQa5JQK0DAhLzHQpOGWSPLbVJAEZNOP3J88l0p4rvOeGQbv+b1pH5AsgpUScim34sFz5v2T1uzdrykJYwBEaHFWDsDci1Cvit1SDYxZe/2Uqlij53kZfwBZazERblOtxEgzSTKolAR3+nQVlxp1T7Q7W/fzbfzPvvAy2w9mx5KCyvaS8lqk9LymDUe0ulPsfLRnPesDciqueiOweSd1L/JZ7v97j0BufeWfMpvSGIWphUlaMh2qWvSXVo9oHlEd8gGiVYSzd4pyIdDgDQPbUKULtEComnSfeZ33AYQRu6yZinQtO2k6RClKz+b99Qj+os+APO2D7hjuh0Zd0WwZolphmE9kGNdoL0VfY4QKWgWnHiKO5iJ8KJdk44Vim2mux3eWSWQ5qJ824xXxvcCEZmkuWQ9MWkurHtLc92QpX3DahQCrx4zn226NBW/h3fjSXPR2oNrrWk40kDlQUzrOjyINDFtGxt5JWq7DJuzS/1MC9J2ST4zAG3lfdLdS08x6z6saGrYdpkzGjQAVp+8FHTkA7NF+xygosy8w9jHRrEX7eyoaI3v6mDIjgUrjF4NdK81O9wUreaEDhw8BfRZRYgCvSYUvc1ubBp9i/DGTjXvC9ZHTsYu69ESVbc2Ihse2hWnmRcxqhEaImKEoSO8/rmTnsmgt/L3+poyF0wsww5PaxdNEy866BVQWG+OyRviuR28qp0QL+zicUEabuWEEX0g+gYSC/uQIM4GmhE6OhbIdlbgyrfZmqxpeQG74M601uSkYe1q2YNL0Cq5wy6E1ztYdWxiWn2nCLsYp5/724N9gVboFg6iX0D98STA2BO8+5bh5+PRLWCa0iCexZuoaHOZTvKpcVkrTfQM90e1UIyINoGsEW/3iunVIYSxR8/xAoQB40YewrhdlZiJfwEmVs+TR7Q3zTWNciwt5SHxLBg3Wrun2npJ+kq8qCFkJRbGOQoJUyJfhXVEC4QhD6DeczxiK0CC58zRJe7QQmQWKmeG8XeGqM1p+f1ZGhjYG3G2xRZvRB/8WZV5kD+StQDG1dAYNayL/Us+wTJK3f0+GCEfhLNJ9yASql+S38gzy2eJrGWds0QeCduzlJY28yPNz4r3scodttjgWvuuQp0UCwPgGuLfY9uwhpEvGe8FvJMsDGWtW8MytgwQtWX+LrGOik28aNvGR66D/TRaP2n7LlyfhUP+WNPM03DalHDaZgjijt7sPsA5QauJCHdz2ukDUN4RhixUEw0h7Bg2MXuxaFY32FN7J7sVcg91+Y45Y9vGRwsf0N9CPaseedEAdgq3nzPtjsQpasSkNS0dk7YKzoOlYy/a25osgHDeCcdOE8OzCjlbsNG5ezAiPUla0l4Bh7b4Xqtt5LlKQ7n/uH8LW78P44bQDK1Xjn56QOe19yh+fP6oHz9uZXYPxgrfhyp+tJnPEJ1w6H5pdA2fFY1mfUVNPcixMBayZAPCEB28yvIuWqVd+Vk5ywuaL/NJoN/oe6jLDRbSP3iP136OTTpE3myodyFdlMOeOqUmd7MugVEWnFuXx6N56aC8zuum0YbSYlqO3+Ub5q2+Zollna+P7eTnTEd/QqIl9JNFzyDS2xhWf7LKEY0CFET+pyYdY4tkS+uAphMPFJrcwnRMJFK/QHxBlW326tvCHql7n51voH3C6qMr7W+hPdBPF3WPiBAH2NCKfmukapWDk+mi7he5wpAe91jllB7d6MIFmrYA5DxkjLPqUI+oDXOBatJpGNDjHple7RFomovnGnjEYRsKrE6PDUWxyYAMf1aFblQVdTyov8WP+xOBdQbYFnAO7M6A/kRyoD+0DYP8OV1shthyoz7bsGojA/n1rCpQ71APGl91qL/SPavQtzsYDbCR9d+73R0ZRWhNFCXgf+DvL+uvz35sWMg6f7eW08fRw92nj99unXM4p8E6v354LkbvG8NatrWm3V+fP+f2qi8+YheZQnFJ4eP539Gkx/rZ+lt1bKa3jx/Ws3Rne1ajieIG23/+fmNnK7J3X0UPxXQbFQWSXvTt5nq8or7R6nf9+dufkwjk18uHCK356flDrOo/BVo6kRsvGjxtgMZEf7wOSDNIc1CLIvLOyehYIruWx5FpeUTOolv+ClOalb5oNUi62ROMc43RLOTXA+OJAZmh1AjS17OC1hCJrvi/MM+eWCayR1eB3R1PS21e6OVg0vwjqN/n5cEePYqRSkmq/6VeocsjvEKz39QTo41eI3s2yE2JHe3ia0LyDGtcj70XMY6qALkx6eNQA7wxen0fjvTGSOB2oudiyDYmka8Rf8I/5w29MfZa3l8oI7FOtTJ0qmlOnpKsU/UxYgOxrKGb9VnWJg84v2Z9T2LIjz5je/LWT0RM+gTNW89G3baMkaKx0Gddn+C23qPAF4HKFRsNswvyAvSkZpC994zIBOkBy56jFchtc9L1iO57Wi9HX9osSyWaZmtdYTvENBnpFqlQ6psWrqHvaOKtxyfq3CkeaaTrp3P2Cq18liuoTVDrNtvCZQ888hq00EMW8La53tJOgnpujnhAD2XdrkueCw1FCsE4ELOT3lB7V1mzlrncyZhibOjdy9FWR4iRHhXNY6TbddlbFL2oKDaMvImNM8JtHLa1BBbr6TU5kqI+pB2CzkhHjhRb3Y/LUR6BeoYsJSbNkt6bjbpHEemVQgtlGpLV8bzptlSKspAxYg3LZp0YOluJoUMvWmqjy96sF4imOWvxu+dof6TSvE8vApTNiNn+X8gl7Z/NJYf4K/Z4Qf1WXDL3QLIBSSTUbb5bbKh+PwSVKxjZml0/WNei6KyAuSOwakZkAvOiW0C6s4vSwigPkFnI4xr4+ToG3gh3pv6Pt+KQuMQ+/H5YmuruY2nuYwcZ/Qjhh7hvaUIyKvGGC9cLpxmcLP8ZTrtFWAhp8W/payi5H1AfXVPK/oPZjuPS3SEN5wipuuqJRIiF/OmRY+TAMfRYyZg4Ivq/IyIppYYsdLttwoa17okgLyrha1rCmDSUSc0ci7Wt/ePmNl3rQM5xIUYsVdyNjz2nCPPpNi4QNaAydkTlgrDrhAQyQf2SxRDuiCi7bdBr7YL6AdpEcZMKxw/QsrHZWt80PxfUfHd8JbU57ItrRq1Yn6f8C2KmYI3Im870CLFYo9l6hJCW3sEz0PUIQQT2Q4+QmjILoIwLNFxDjGkitdCIWqgP0yPEYq04e4TEpMnNPP3soBXCZ0+HajHq8WiTbQLOPlDlLloQIhHqqHjqb9ehHUuooGsFFbdr1rF4UVzX1uIVsgVZs9SGI0LOVc6ZBVJERb6jnOltG7g3LqEoRK2AtPWYqHFr9eOxdCyAwTZ2CKRFHKunI3W2AMY7C6DhpUgWQBvnI39vzMNvSKMvNec967HfCtyL7l4k0dk9FnLUGPdY0zOyvioWhlDx9DT8TugMdb1oKzznha3SzYitKmZbuDtkPTP8XJIN0QvNshOiZUrzSI5YMnL1sfrkw2d4CJh5ERrOrqHMZ78fh/OGfhxVLsjukeamnMlRjOSXS36biDJGmn1IcGS/tOXYcRuF1xPZzx4+rR9ppkcSSl8MzhAgfTGmieF99A/5Yky3lB/GTvKgpdu9fen/m+04ZpeaMFWUbSJpY480ChyM+F0ZRyoSlQ5HuneQ9BNjH8FDp6J5y1Phe9v4Qi2XA1nYOTYyDRqKLazGmy7foDg8+ZlyNShWY1z5gjPfpCh9Zo4en0eygZD5FzbMV1RtTFBLT0NEdhbQOKBzoRF3SrSZ44I5JwfvpnrfhKTfqUZXatYCFTvNh+Z1TR7pmBVOxTeCvbY1ryfyR+vJDaLGYvApoDFy3grSClSa5slj73ppnW69y9Xbg7yPeQhprVArpGM4jPUgz2iX46N9z/we9mHD2g3cr4Ue95z6eBsawHYexV5znMlGx6A0N27TWWPMIaGdfEfyUfZ8hfPBGidNS9EwL2AME0lrsIlz2AIscU5aOuwRoFqJFxRzFpPXR26xhVcbo823MNTyaTDmaD18KFIDY5B1D3n2RBvJnCWCeYmRsyTdeWrhLSZtCqyvpvVjj3+HKUb3fO3FkZX/VjhSUGaijocyxoTjHlDsuYyFhfFp+4SYfmtht9Q1UWLJNwo1hPNP3vyNrmHNbPZmoNwveCe2WjSF4nMbzSeSNKAaF+AoADifqTaGVoOMZwV4PGlaAW9oc3HYl3XMcf5EX9iLUFkTT55L9sAiTw3YOx0DWiQvVAnjC4x3p5wFpTIX2UajD+Q1oWGCmD3JjLkE0ic13xA3ZS8PV/f4lp6NPM6G46rNufi29BxnDwrAKTwXjSPKyBzKSdBGcxhzkW20fSnrHm59bDSQxjdg/1gbWaEXeevhlwjDM2azi/QJmnh3NlUttq208dCjiDJkahg7kPHo5EkqpCfUThu7lwvPFm/Bhft6Dt4wViWUdElf95LmbO4Re2dlhr5uIeUPM1qN1sv0EW96oug2nUi71B+Kf/s1rS9EOnRi6siIOYpTxLYowRKnkhkadOlVUgjpQ7egzBWdOCqMYtm2AcnuotOPzNSQ1OQPRjaAwtO5EfmV1WyXoD5cncv5xKmkJ7zH2cYMXy4aq4woRW/2qeEfKdeBM34FHKOojkNDQQnbeLQ4jNBipJKMdtm7LDfW4+PIjtbGCqJfKebYkz6vqj8n3tD2OfrfORR3WJV6Vqwp5tPD58hBC8rahb6Mqh9lhBqOiTWKdf/gDfvmFrgGNUbeQhvgwHqORsT5Z2kptRLo52u5urwQUBY82QYxPL1LR3C8lnB/CSHxuVLmK+NT4Ttbz3wTOJzFjM8ha6t0X1COKBi3Pp4bXaZiREralGliUDfygZUUOeyct50GIZS/R+QV6lxh50OMUWBWjwxUK/55gOYIGVgG+pVzDBzK6MJ3zjMkfU9qK3S5js9glwLRHUh2men2x8y8qRQd4ukzY9akfi0RysqQPoBvaGhYs7OacgeKVi+bUe4SIyMkvYfi3TDmm/SypY7FuU1PtHTQF7/TsDRrxqpwH534HdKjaHpPz8AsttQ9Gv3xKTJiX9j7M+37vRH7whEdTqSdMHq3xdh1wfkL04WuWSDKwDGUEqPv26NBcZZ91s1Xe3z22HgUK6QTNe4mJLlrSNYUzBJ9wD9TxJOjfEj774nmnxlfuIAJUf4cNMZ+26vaoxs2LtzlBWoxh4yxP/5N6TEUaM2FHoFqDuqxP1ZK7dGJJq4NKHJgPps9+hmlxwT9XEfoFR0P8pzN9/hlKT1iNuwNRofBLR/SY32ox0xEc1eQRnDQOu7Bw1reIpg1WicQZQ/ocQ8OVnu0YdYjjDzArERvMWvACXPXDcmKMujO7PEFUHpEL2kvRoQ5bGcO3sLSivGEp6U37PTs8YBQewQOizmG8s0wSnF4ZxZ4wo/IAXXY23zhYG4htPWAHPwmd8bHTLXrkKQ+wPJD/If7uMKOggNHKdCrAf3+gOMP8Zce4BXbyWK/N6t9gtaZxP7w7fb0sro7XX6/XRq1Bybdmgq9niNfdE+RHKW4yVaLBFzcwnoQAmOs1Ni+szRcRynpthEn7j4ruE35XbePHYZbheJhGV+dzMPHaNnxPuj3M0J/j2HeKpU5f9rDrm9HO/dpx7P6gdDM/KMH+441OdYYvwX90zlCnyKM3wA658Vp760/PL+X73mfB/qvWBfoN4R1yUcUPzaXd0T4a9RSY9xb3527TUN3li4f8upzNUs7eW+7HiY9T1pvqQw1pH70/fa0XubO+fOt7fXkOUMPk9yun4vJiOybsKqj4nHzFDjjUXSF2WgyLzr1vT/7/PbJo6brPxI9zh59ROAdr6v2Lt2nmOMLvaV8Nwa0Qrn8KO/XGJ6hvISamB4aYlsiPs28/Cpocjvr4RaD4xeWeBoeprd2/W12FVlY7eZaJGZ9jy9D4wNMXyCmtL1VTuiUKHEJ6orBbVkQ1mTejrpgpPfMA7jKzdjqjeb5ySuT/SYr4yPCxSz9cJcW8xbrAwdHqzrGtf3T9MaMingrHmOfYdSaAEym8Zh8DVIrxhJiZtbdZ0WyVH7X7UPhMVEaPeSpX+fi/KHjhfh78ZgRSIeAK0qXcSnJTjbIJR5I0GiN6d3zg/N7+Z47vwmPQSyMViQ7vJD0s8owxgy9yuy4D4PZ1ii0Tx7C9KQCqt4jT76OSpS/CZXI6jPMhleN3S1nqfLNWRWiJGvvoZ8/dWUWv8nKJNZZVQDNwBjx7Zmxz8gPOBn9ipXxN7/LbaK899vzkgG/TawII9h/xaq8XnYRuZVXl9XsH5Zd0EMfc9Xl6d2vk13auRs7HlXn8+gqWmwxOuDxsAq9eFrUO4wOfBVkl3Ca1b0Y/SprZmjruvKdn4LR006VuTenFe066LQCqONpMMrtpEWhG4pxr0KLo00IhYKMn3kh6Yr6bsVoGT4mQEtOHqKfgEJ98dNpxbCTg34da/S6B/TlbU8OIjKRORhR9mtOzs+XXwaeHMy3YLe0NE6LNWaciNC35pesy8+XXoati8/RVtVCwL+t9ALoFuMYvCjtjek6TFFfoSHrRu7sifHo1rrsn2drK5yMV9EctUAhSPOKXVCXbHqlHPV3PX1s7Va3ad6Ej1kdPSYuWlC656ZvJj9D47Gdv67xUDgwazyQc5LPvtXSDOJAmJkI/XR6NUKH5vjau1H8JndjAfi8sLC2BUkupA1aAI4v4b2+s0cbZIfV52WYzhbk7/jW3Mb9TbgNZ5KoCjsmWZeoBtq87Ihijn/Byni/ycoAPcjWGHFM9u3tnULNSGiT58yvuFPV+De5U/4G7eoRevpPy/bkoHbeAX4Dz3ttSasI+Pcs9e3ZFH0u3lY731dn+AX8xkWvIJBNNH5DdmWRjbTcybrE0yv9qL/r6UPhNyHWfoJ/5xXw41/Jb9r5G/wG8SiMupH8phpjvRT0OHK3ci3wGtQj77fMoH8w/DtF76033vserPEL16YBfoMWqp2OKPWBB5VYqUP0ZriyLcpaDP+sHKsDvDFVDf4B6UaugtCpKubQDOE+bKUbsl2hR3bRSjduNEHPkgRm369v/Zknp5sP6heenBRrRkHvabLVu8bTxRojQDHnxh673gHK8VqO8/MlnPaMGBxnR4mZ47iUBygNMW9Iy3Fqyi6VBl68R1f/U8/O20g4vRznp0s4V1h1NqnjK0ArV8Fvz3EAj6JNyo1kZgWmJeUa9t4K92pFDs3xtXfj50s4w+5GgrYZD+iE2NlrAoFST1Qt3N4cg/ZoHmE16XQJWP5y/uYc5x+QcAZxHBcohIdxTjt7N2Z8ddGDHfhxP9XIsdbr1PcAq45y0zvplVQj7sGpv+xWVcU6wrwNWElne6uSNcYOUUzlr7hV4ufLOMNuFVo8C4xwdna2Pt8B3As3LRn1W0Hf7FZ9wVsSPXWyFnV5iZzVxzWs3rJro8P/5btTcDFybq7OYRWfSvJOTDuZiv59ay/X0N/q5vp8WezNZBTVd1cnmBGntQE5pIGcU57gtV7lrc1e1FNF8QenA3c7tx96cna+OifT9d3z7NP5E5wFkGOCUTeX5/P3mysX+hs9351eNntzfTono/waPUG3eRnabIU9q7D9TpheA4dPQQeF/Q88BZ210WnHfjqKc61uTroeMa/N23pnLxdwjv4Ipmity9T1oYyxN48fnm8r6O9qac2ug873uX2ygTUSxfXlQ/GYdXO5ivCV+895K7s8w+QfP8zllCIWSGyKnWveJF9Tr834hRWzerMXRRRHZ1aLDGTmhMTT4r8ERbqafVgxZVsw+0hcipBRMTNGbuu5MCyumZdsjNqIG4zcjada9oKGrE9oZdEjgLiSst5WRtS8VZXI8z3RAS+IYe++dVf/srM7Mo+WHoUn6G+zVib9/Wa5n057M4m/pEIj1fuMpnq0Y5s1k2Ls1RinKUX0CSOOnGoJoZWfIvS4Jr2tVDk550oTGWfRpAg9qkzVYGRfp43IPa5XypVVjHxkFtdMovHK8Wj5wMLeOSlRp/31VwOKKdP2nbN+iu2eYK4Bgf2GtR7lSPkghMz1YLeZX/TKgpwzguPZuR+Mo9PyMfG76piqdvF4MBdFdNHZF27TOYdvdZuyPdESL8sshlGIiRHv6o+4yhbVf0N/kKbNR9rNGFBw7C7n2dmE00W3DVb14n4syhCgR0PK2GDcdcyCQnG2lqadTjGeYrxScwXJjCg2xQVTBqxSUJVANY9qypXf6ftUVorXxpeNOE8RxqMWHGcKp9N4B51grkFE4wB6nRkZ7DpruC+ysOmr3frCzIGc8UQ5aRyHnlEcOme3kbG/ehvUR2xzGWGk5zZLgpatZLGN9cUbz3VX9BpIMVUqwxpEiCBKznVs1tSrMHZ5wXmAiaokRt2fHGOCV5wVZGy1NZG07BXbWqtYowT2sjJilKtQZtbxMQbOrGdr99et9TdUYzDVY7Q5AhWkLu0+0M3HWiouZ1Wj2lpOf73WgO5Mp16rUGqoCp9zQYnEqItWuLt6rZlcs7EeC67X/BFM0fV7EVEO7LZea8D1XLr1Wu1dvdYQKbGRPxhzMbfZl3yZz9qo18qx3pIbIOWkbE9qrmlZ39bnGmWU92tAvdbsA2a6BcSerUK7z8rwUuwgK3FuVOwANJci1ykqXUFWMucLraJCtymCnTIuq7mFzH41frbLY6KgO1lNKNdPH4+lDo1Yaa685Bux0p1+92Zrnr0R1Ykws7WOFCmjNN1yLdMFUV+qCMx5qfSsBYhkscYhV0fGzBmJlpcJc1kSX6c8V3kNJ8Y1sy1wBg063Y2kRLWe+xMzdYdt9gmZ6c539IwaWBVqV/Uopvw5/ijWs8I3TP2pjRsypdqoMf3yxlnMw6kiX61TqYTq43I18pIzsGOVIi2GnSvKcQaIBeNZkZn5zrxtG4G+VJgjQMsuLqJt/D/mGcwblnEDt9sG5lVxlTUYD9wVvVrStkIWZsVP/U4+MuI0Mr8l5wOzLHVNZH7Fzpnp5ujkam9aVj3iTGPKR4Y1izgvqNZGVoulzOUNr6u55rKNIEwl+wEMnJadd2H+hXCbIzNpupRw14ayk3T6oWjS9l1Ind3ueGQbHrPsx5wXv0vOvW999t7xjp/CS+84UWsNzTC3JPxdGrmaZIYYvjcOV1xPaj3D/8LiXK5jqgkj8wcbueeoDWbO3/Zj5BusOdMNoz3M1o7c28geOeJs/KVxDtvsMSXnAW4wg6S2v4I5U4tSqfKCiWS5zRbtUhVnAxEvZGUF/D1VTdWQ4j5ZMeypErUn14TUE/+o6gfmLkVrGsmQB3NS9PCFAe8obYxQjLEacjNGrQyfn4Mx2ed9FcCkZeDHeqEEo1m8EGMlJ8e8sVdSGvJG8o1AHkPI9Yg39sbGD3pjaMFtdDEWHXU8R7yxr/bboDdiLn7M3VlQjq7BbzyMzoa8XYQYc4O1VpFiHjHfqE+XNOCNQL8p/gnuZ3PcKeqtSjHojSVWO3CjKWUNOuKNvTkbhrwx9UdnFVYxIEn1mDn2Vioa8kbMeYDZErMjz21vHY0hb6wSQXxLUJXSY27Ka7xAH+vlzL5cz056ciqYdqBOjPl+O8i4ml2dLPOrHG0336GH6ub63IvpRpSGfQi1XIiWD9hA5jvtDvDadUR1NmTe2bmr602hR67HYXzHkrlrfAcYqlzHXEkUqJWma919l4ZwBheNgbPteNJWxwrXWKWUsV3e/kYYv5F5o1ybKg2xlGq24Yx4F/1tMJrvDGUHrF5I2CJr36tqltQ28LtkreewNfqvsCIi5RPWx9Y+3/H6H9viD9qW/a4lY09lxZdnH8AKYRhzgVXaW/+/zNutx0ti7I+tecN2I1iz11UBSQtH3WugRuhxg9nsXK2ai6zz94urfmDNGNI9jfVsm9vnmZbp7YxytltY90rNaIz1SlGvtTHqfjU9z/dJDaM3zPTP+fuqhR13cx6a2dja/H2uMXbKLWlk+sff69lyKX9gZuS1pByPDd9GjKCAtdW+91Hjv5LZxlmfrOsZ61196GLEUo1KaULMdNfmhBd9FSE4yzXqO+n3pr4UdW5KJulC1nbXM0lzrsdWN0vzQD9CvaazCEZthRS5PpjX3+uRsNs8o1rliDbPKFNMuCUT1P3p9cW4SsFim2fU1P1xzTGp+wOOTHPWuEebZ5Q4T5tnVK9gIcfOfQF/43p4lt5PSDps0oNux0K6W1UK5ZoE3KYJp2WnH54za/Pk/NX9258JsL8y0wvuR+FxVmxNB80ZIlMtY30dUyZptOmqbQupQdK4LmvfVO0ZcTXKrm1aQ6W+tmNZo5yjxhjk/TQtp4lpf3ZIL6xqJcluoPxurxVrn16336NzJ5m+PstbXu/k19dXNcb6CBijhfLUoKxfV92sX3p/xXH9VQeqQlehfdx8D1StrnKUVPF0v0leQJDIMJ+dSAbmgLw8mPkyqY/rcd9JNPMrxmlhD6wR2CsxHoWMer1VQzO3F7bfO/IXZxjCbDMucASsv9F6sI/iHQ4+PtvMC2vDdXJAlIZUdaAGru/oEgRmQIG9FKhH0vyLuMbfsdWf21tx8Eb1VHDQqvuhV1fhUf7rw1o7zN7y4zyEFeajkx4ah/vrxYBqf4A3jumvl2cq/TWcX2L4fPs127v+gAJile+mN0tJp+b9AWopkDvAGcFsmEP6O0DNI8BZx/QH2PJAf+ijCrhOoBVhSH/j43NMHpROeyqUDpZOh0aKMxeMMav2YOn0UO6Nl0qnnbrlr5NOyeuS4oN+S+nUr3vq88Gq+vbWq6AqR1zhINElE7LDth4M5D3UrZTAlcKkB0NbKSxpDElp5y0hK4X1eDB4OymG+jGlGJvtf7LGTOvN0B2vx/WCslFHipFrIT0YaqoSoXsw7Dw/eJyaJMbVoJN6K62lZMs2JTpZe6OV+ihLfkcyjGn9W4+csO7zyIkESVvSI6eQp83wyKE20iNH+FbXI2dRt3UJ5VhMSdlF1N/ahkOsRK3XsuLqauyRQx5ghkfO4CpvIIXoGhJeY1ObsKvytqsosFei683i+7rc7v196jWrLaLYw3JAH85dPIzXiQO8LkUkD/L/dDEwX/GPcQycFoyfcGPyU34DSagqjutPHOhPhKS1BhnVHsTbD81XYBzsAquT2UN48SFJEuRzzLgGMn2x6eVvx2KPdHxcfwckSbQUHjffH0uSQBkwcgnrTQ7bjwPYDfNAYv5tzFzXG/V07PqBBHlMfz1xzXrV7ekY8AvJkIPu78/Bbsk/gN1Kh+Y5zd0jLAsH8rW8FLt1sOqrsFuMcqIoPczF+ibY7T8c5j8c5j8c5j8cZiCH+fH+/h4cJnsNhzkmd00TTo+zXf84d81LOUyHo76GwzSczysZvZntehiHcXprXu04DOp/a6Jr07epteODxH5MHZsBPAvr2BxRa+ewJSEQx/WYH6wmFVD+yyNm3YeEdPtJA7y/iaattv71FhnUpWDFqwQjUt7GIuMc1+NhiwzmL4NZiwIrM/5Ki0zHKnCcRWZYhoegRjsGzPYoi8wPs4O81CIzelOLTEU+mRv27P0HLTIHcBzghjVWy0SUP8RC0cnY1LHIjDF/iRvvyXF6rIUHTs0a+sJb37yBhYfyHBzR30ELT5xmR833gIVHhFW45lp0w/ajP95p259F+1Fl1kAL1KH1A7x6VH/iwP5iDAhayLAK3ZD1E2GPH//h2238bfxC+3Zam/rR95QJ6ITtC4yH9CxA2t2c1g3SzOD0YXlzdfd0x7ltXfKZnH+UOT7aZ5klcyKJNmejzFOh3VkYH+VpIgw63T7v+o+8P3MiG+j8O2f6zvlYLOfPf92sHuCv+Zdv96v/8t7Z8H/L+2/HkZ/4P8AtvP8DP3hn2/DX9/uvq/sau7DtET96vvl6/2WlPHL8d87ksT69f3q8X31toIn8wf91PYt/08jmrvx7M7+joeCzD/Z/ex/48cP9vHyQXduWbHvzjR+U2/7h6Qm/FT481pP75XL759enp5Xy3enXm+eH8OnuHlv8Pw==</diagram></mxfile>
|
2210.00364/main_diagram/main_diagram.pdf
ADDED
|
Binary file (11.4 kB). View file
|
|
|
2210.00364/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
A primary goal of representation learning is to learn representations r(x) of complex data x that "make it easier to extract useful information when building classifiers or other predictors" (Bengio et al., 2013). Disentangled representations, which aim to recover and separate (or, more formally, identify) the underlying factors of variation z that generate the data as x = g(z), are a promising step in this direction. In particular, it has been argued that such representations are not only interpretable (Kulkarni et al., 2015; Chen et al., 2016) but also make it easier to extract useful information for downstream tasks by recombining previously-learnt factors in novel ways (Lake et al., 2017).
|
| 4 |
+
|
| 5 |
+
While there is no single, widely-accepted definition, many evaluation protocols have been proposed to capture different notions of disentanglement based on the relationship between the learnt representation or $code\ c = r(x)$ and the ground-truth data-generative factors z (Higgins et al., 2017; Eastwood & Williams, 2018; Ridgeway & Mozer, 2018; Kim & Mnih, 2018; Chen et al., 2018; Suter et al., 2019; Shu et al., 2020). In particular, the metrics of Eastwood & Williams (2018)—disentanglement (D), $completeness\ (C)$ and $informativeness\ (I)$ —estimate this relationship by learning a $probe\ f$ to predict z from c and can be used to relate many other notions of disentanglement (see Locatello et al. 2020, § 6).
|
| 6 |
+
|
| 7 |
+
In this work, we extend this DCI framework in several ways. Our main idea is that the functional capacity required to recover z from c is an important but thus-far neglected aspect of representation quality. For example, consider the case of recovering z from: (i) a noisy version thereof; (ii) raw, high-dimensional data (e.g. images); and (iii) a linearly-mixed version thereof, with each $c_i$ containing the same amount of information about each $z_j$ (precise definition in § 6.1). The noisy version (i) will do quite well with just linear capacity, but is fundamentally limited by the noise corruption; the raw data (ii) will likely do quite poorly with linear capacity, but eventually outperform (i) given sufficient capacity; and the linearly-mixed version (iii) will perfectly recover z with just linear capacity, yet achieve the worst-possible disentanglement score of D=0. Motivated by this observation, we introduce a measure of explicitness or ease-of-use based a representation's loss-capacity curve (see Fig. 1).
|
| 8 |
+
|
| 9 |
+
<sup>&</sup>lt;sup>1</sup>Max Planck Institute for Intelligent Systems, Tübingen, Germany
|
| 10 |
+
|
| 11 |
+
<sup>&</sup>lt;sup>2</sup>School of Informatics, University of Edinburgh
|
| 12 |
+
|
| 13 |
+
<sup>&</sup>lt;sup>3</sup>Department of Engineering, University of Cambridge
|
| 14 |
+
|
| 15 |
+
<sup>&</sup>lt;sup>4</sup>Technical University of Denmark
|
| 16 |
+
|
| 17 |
+
<sup>\*</sup>Equal contribution.
|
| 18 |
+
|
| 19 |
+
<span id="page-1-3"></span><span id="page-1-0"></span>
|
| 20 |
+
|
| 21 |
+
Figure 1: Loss-capacity curves. Empirical loss-capacity curves (see § 4.1) for various representations (see legend), datasets (top: MPI3D-Real, bottom: Cars3D), and probe types (left: multi-layer perceptrons / MLPs, middle: Random Fourier Features / RFFs, right: Random Forests / RFs). The loss was first averaged over factors $z_i$ , and then means and 95% confidence intervals were computed over 3 random seeds. Details in § 6.
|
| 22 |
+
|
| 23 |
+
**Structure and contributions.** First, we connect the DCI metrics to two common notions of linear and nonlinear identifiability (§ 3). Next, we propose an extended DCI-ES framework (§ 4) in which we: (i) introduce two new complementary measures of representation quality—*explicitness* (E), derived from a representation's *loss-capacity curve*, and *size* (S); and then (ii) elucidate a means to compute the D and C scores for arbitrary black-box probes (e.g., MLPs). Finally, in our experiments (§ 6), we use our extended framework to compare different representations on the MPI3D-Real (Gondal et al., 2019) and Cars3D (Reed et al., 2015) datasets, illustrating the practical usefulness of our E score through its strong correlation with downstream performance.
|
| 24 |
+
|
| 25 |
+
Given a synthetic dataset of observations x = g(z) along with the corresponding K-dimensional data-generating factors $z \in \mathbb{R}^K$ , the DCI framework quantitatively evaluates an L-dimensional data representation or $code\ c = r(x) \in \mathbb{R}^L$ using two steps: (i) train a probe f to predict z from c, i.e., $\hat{z} = f(c) = f(r(x)) = f(r(g(z)))$ ; and then (ii) quantify f's prediction error and its deviation from the ideal one-to-one mapping, namely a permutation matrix (with extra "dead" units in c whenever L > K). For step (i), Eastwood & Williams (2018) use Lasso (Tibshirani, 1996) or Random Forests (RFs, Breiman 2001) as linear or nonlinear predictors, respectively, for which it is straightforward to read-off suitable "relative feature importances".
|
| 26 |
+
|
| 27 |
+
<span id="page-1-2"></span>**Definition 2.1.** $R \in \mathbb{R}^{L \times K}$ is a matrix of relative importances for predicting z from c via $\hat{z} = f(c)$ if $R_{ij}$ captures some notion of the contribution of $c_i$ to predicting $z_j$ s.t. $\forall i, j: R_{ij} \geq 0$ and $\sum_{i=1}^{L} R_{ij} = 1$ .
|
| 28 |
+
|
| 29 |
+
For step (ii), Eastwood & Williams use **R** and the prediction error to define and quantify three desiderata of disentangled representations: *disentanglement* (D), *completeness* (C), and *informativeness* (I).
|
| 30 |
+
|
| 31 |
+
**Disentanglement.** Disentanglement (D) measures the average number of data-generating factors $z_j$ that are captured by any single code $c_i$ . The score $D_i$ is given by $D_i = 1 - H_K(P_{i.})$ , where $H_K(P_{i.}) = -\sum_{k=1}^K P_{ik} \log_K P_{ik}$ denotes the entropy of the distribution $P_{i.}$ over $row\ i$ of R, with $P_{ij} = R_{ij} / \sum_{k=1}^K R_{ik}$ . If $c_i$ is only important for predicting a single $z_j$ , we get a perfect score of $D_i = 1$ . If $c_i$ is equally important for predicting all $z_j$ (for $j = 1, \ldots, K$ ), we get the worst score of $D_i = 0$ . The overall score D is then given by the weighted average $D = \sum_{i=1}^L \rho_i D_i$ , with $\rho_i = \frac{1}{K} \sum_{k=1}^K R_{ik}$ .
|
| 32 |
+
|
| 33 |
+
**Completeness.** Completeness (C) measures the average number of code variables $c_i$ required to capture any single $z_j$ ; it has also been called *compactness* (Ridgeway & Mozer, 2018). The score $C_j$ in capturing $z_j$ is given by $C_j = (1 - H_L(\tilde{P}_j))$ , where $H_L(\tilde{P}_j) = -\sum_{\ell=1}^L \tilde{P}_{\ell j} \log_L \tilde{P}_{\ell j}$ denotes the
|
| 34 |
+
|
| 35 |
+
<span id="page-1-1"></span><sup>&</sup>lt;sup>1</sup>W.l.o.g., it can be assumed that $z_i$ and $c_j$ are normalised to have mean zero and variance one for all i, j, for otherwise such normalisation can be "absorbed" into $g(\cdot)$ and $r(\cdot)$ .
|
| 36 |
+
|
| 37 |
+
<span id="page-2-7"></span>entropy of the distribution $\tilde{P}_{.j}$ over *column j* of $\mathbf{R}$ , with $\tilde{P}_{ij} = R_{ij}$ . If a single $c_i$ contributes to $z_j$ 's prediction, we get a perfect score of $C_j = 1$ . If all $c_i$ equally contribute to $z_j$ 's prediction (for i = 1, ..., L), we get the worst score of $C_j = 0$ . The overall completeness score is given by $C = \frac{1}{K} \sum_{i=1}^{K} C_i$ .
|
| 38 |
+
|
| 39 |
+
**Remark 2.2.** Together, D and C quantify the degree of "mixing" between c and z, i.e., the deviation from a one-to-one mapping. They are reported separately as they capture distinct criteria.
|
| 40 |
+
|
| 41 |
+
**Informativeness.** The informativeness (I) of representation c about data-generative factor $z_j$ is quantified by the prediction error, i.e., $I_j = 1 - \mathbb{E}[\ell(z_j, f_j(c))]$ , where $\ell$ is an appropriate loss function. Note that $I_j$ depends on the capacity of $f_j$ , as depicted in Fig. 1. Thus, for $I_j$ to accurately capture the informativeness of c about $z_j$ , $f_j$ must have sufficient capacity to extract all of the information in c about $z_j$ . This capacity-informativeness dependency motivates a separate measure of representation *explictness* in § 4.1. The overall informativeness score is given by $I = \frac{1}{K} \sum_{i=1}^{K} I_i$ .
|
| 42 |
+
|
| 43 |
+
The goal of learning a data representation which recovers the underlying data-generating factors is closely related to blind source separation and independent component analysis (ICA, Comon 1994; Hyvärinen & Pajunen 1999; Hyvärinen et al. 2019). Whether a given learning algorithm provably achieves this goal up to acceptable ambiguities, subject to certain assumptions on the data-generating process, is typically formalised using the notion of *identifiability*. Two common types of identifiability for linear and nonlinear settings, respectively, are the following.
|
| 44 |
+
|
| 45 |
+
<span id="page-2-3"></span>**Definition 3.1.** We say that c = r(x) = r(g(z)) identifies z up to sign and permutation if c = Pz for some signed permutation matrix P (i.e., |P| is a permutation).
|
| 46 |
+
|
| 47 |
+
<span id="page-2-4"></span>**Definition 3.2.** We say c identifies z up to permutation and element-wise reparametrisation if there exists a permutation $\pi$ of $\{1,...,K\}$ and invertible scalar-functions $\{h_k\}_{k=1}^K$ s.t. $\forall j: c_j = h_j(z_{\pi(j)})$ .
|
| 48 |
+
|
| 49 |
+
We now establish theoretical connections between the DCI framework and these identifiability types.
|
| 50 |
+
|
| 51 |
+
<span id="page-2-2"></span>**Proposition 3.3.** If D = C = 1 and K = L (i.e., $dim(\mathbf{c}) = dim(\mathbf{z})$ ), then **R** is a permutation matrix.
|
| 52 |
+
|
| 53 |
+
All proofs are provided in Appendix A. Using Prop. 3.3, we can establish links to identifiability, provided the inferred representation c perfectly predicts the true data-generating factors z, i.e., I=1.
|
| 54 |
+
|
| 55 |
+
<span id="page-2-8"></span>**Corollary 3.4.** Under the same conditions as Prop. 3.3, if $z = W^{\top}c$ (so that I = 1) for some W with $R_{ij} = \frac{|w_{ij}|}{\sum_{l=1}^{L} |w_{ij}|}$ , then c identifies z up to permutation and sign (Defn. 3.1).
|
| 56 |
+
|
| 57 |
+
For nonlinear f, we give a more general statement for suitably-chosen feature-importance matrices R.
|
| 58 |
+
|
| 59 |
+
<span id="page-2-5"></span>**Corollary 3.5.** Under the same conditions as Prop. 3.3, let z = f(c) (so that I = 1) with f an invertible and differentiable nonlinear function, and let R be a matrix of relative feature importances for f (Defn. 2.1) with the property that $R_{ij} = 0$ if and only if $f_j$ does not depend on $c_i$ , i.e., $\left|\left|\partial_i f_j\right|\right|_2 = 0$ . Then c identifies z up to permutation and element-wise reparametrisation (Defn. 3.2).
|
| 60 |
+
|
| 61 |
+
<span id="page-2-6"></span>**Remark 3.6.** While the *if* part of Corollary 3.5 holds for most feature importance measures, the *only if* part, in general, does not: not using a feature $c_i$ is typically a *sufficient* condition for $R_{ij} = 0$ , but it need not be a *necessary* condition (as required for Corollary 3.5). E.g., measures based on *average* performance may not satisfy this since a feature may not contribute on average, but still be used—sometimes helping and sometimes hurting performance (see § 7 for further discussion). In contrast, Gini importances, as used in random forests, *do* satisfy the necessary condition. While the non-invertibility of random forests prevents an explicit link to identifiability (typically studied for continuous features), they can still be a principled choice in practice (where features are often categorical).
|
| 62 |
+
|
| 63 |
+
**Summary.** We have established that the learnt representation c identifies the ground-truth z up to:
|
| 64 |
+
|
| 65 |
+
- sign and permutation if D = C = I = 1 and f is linear;
|
| 66 |
+
- permutation and element-wise reparametrisation if D = C = I = 1 and $R_{ij} = 0 \Leftrightarrow ||\partial_i f_j||_2 = 0$ .
|
| 67 |
+
|
| 68 |
+
<span id="page-2-1"></span><sup>&</sup>lt;sup>2</sup>Here we deviate from Eastwood & Williams (who had $I_i = \mathbb{E}[\ell(z_i, f_i(c))]$ ) such that 1 is now the best score.
|
| 69 |
+
|
| 70 |
+
# Method
|
| 71 |
+
|
| 72 |
+
Motivated by our theoretical insights from § 3—considering different probe function classes provides links to different types of identifiability—and the empirically-observed performance differences between representations trained with different-capacity probes shown in Fig. 1, we now propose several extensions of the DCI framework.
|
| 73 |
+
|
| 74 |
+
We first introduce a new complementary notion of disentanglement based on the functional capacity required to recover or predict z from c. The key idea is to measure the *explicitness* or *ease-of-use* (E) of a representation using its *loss-capacity curve*.
|
| 75 |
+
|
| 76 |
+
**Notation.** Let $\mathcal{F}$ be a probe function class (e.g., MLPs or RFs), let $f_j^* \in \arg\min_{f \in \mathcal{F}} \mathbb{E}[\ell(z_j, f(c))]$ be a minimum-loss probe for factor $z_j$ on a held-out data split<sup>3</sup>, and let $\operatorname{Cap}(\cdot)$ be a suitable capacity measure on $\mathcal{F}$ —e.g., for RFs, $\operatorname{Cap}(f)$ could correspond to the maximum tree-depth of f.
|
| 77 |
+
|
| 78 |
+
**Loss-capacity curves.** A loss-capacity curve for representation c, factor $z_j$ , and probe class $\mathcal{F}$ displays test-set loss against probe capacity for increasing-capacity probes $f \in \mathcal{F}$ (see Fig. 1). To plot such a curve, we must train T predictors with capacities $\kappa_1, \ldots, \kappa_T$ to predict $z_j$ , with
|
| 79 |
+
|
| 80 |
+
<span id="page-3-5"></span>
|
| 81 |
+
$$f_j^t \in \operatorname{arg\,min}_{f \in \mathcal{F}} \mathbb{E}\left[\ell(z_j, f(c))\right] \quad \text{s.t.} \quad \operatorname{Cap}(f) = \kappa_t.$$
|
| 82 |
+
(4.1)
|
| 83 |
+
|
| 84 |
+
Here $\kappa_1, \ldots, \kappa_T$ is a list of T increasing probe *capacities*, ideally<sup>4</sup> shared by all representations, with suitable choices for $\kappa_1$ and $\kappa_T$ depending on both $\mathcal{F}$ and the dataset. For example, we may choose $\kappa_T$ to be large enough for all representations to achieve their lowest loss and, for random forest fs, we may choose an initial tree depth of $\kappa_1 = 1$ and then T - 2 tree depths between 1 and $\kappa_T$ .
|
| 85 |
+
|
| 86 |
+
**AULCC.** We next define the *Area Under the Loss-Capacity Curve* (AULCC) for representation c, factor $z_j$ , and probe class $\mathcal{F}$ as the (approximate) area between the corresponding loss-capacity curve and the loss-line of our best predictor $\ell_j^{*,c} = \mathbb{E}[\ell(z_j, f_j^*(c))]$ . To compute this area, depicted in Fig. 2, we use the trapezoidal rule
|
| 87 |
+
|
| 88 |
+
$$AULCC(z_j, c; \mathcal{F}) = \sum_{t=2}^{t^{*,c}} \left( \frac{1}{2} \left( \ell_j^{t-1,c} + \ell_j^{t,c} \right) - \ell_j^{*,c} \right) \cdot \Delta \kappa_t,$$
|
| 89 |
+
|
| 90 |
+
where $t^{*,c}$ denotes the index of c's lowest-loss capacity $\kappa_{*,c}$ ; $\ell_j^{t,c} = \mathbb{E}[\ell(z_j, f_j^t(c))]$ the test-set loss with predictor $f_j^t$ , see Eq. (4.1); and $\Delta \kappa_t = \kappa_t - \kappa_{t-1}$ the size of the capacity interval at step t. If the lowest loss is achieved at the lowest capacity, i.e. $t^{*,c} = 1$ , we set AULCC = 0.
|
| 91 |
+
|
| 92 |
+
**Explicitness.** We define the **explicitness** (E) of representation c for predicting factor $c_j$ with predictor class $\mathcal{F}$ as
|
| 93 |
+
|
| 94 |
+
$$E(z_j, c; \mathcal{F}) = 1 - \frac{\text{AULCC}(z_j, c; \mathcal{F})}{\frac{1}{2}(\kappa_T - \kappa_1)(\ell_j^b - \ell_j^*)},$$
|
| 95 |
+
|
| 96 |
+
<span id="page-3-4"></span>
|
| 97 |
+
|
| 98 |
+
Figure 2: Explicitness via the area under the loss-capacity curve (AULCC). Here, $\kappa_1, ..., \kappa_T$ (x-axis) are a sequence of increasing function-capacities and $\ell^{1,c}, ..., \ell^{T,c}$ (y-axis) are the losses achieved by the corresponding optimal predictors for **c**. The lowest loss $\ell^{*,c}$ is achieved at capacity $\kappa_{*,c}$ , while $\ell^b$ and $\ell^*$ are suitable baseline and best-possible losses for the probe class.
|
| 99 |
+
|
| 100 |
+
where $\ell_j^b$ is a suitable baseline loss (e.g., that of $\mathbb{E}[z_j]$ ) and $\ell_j^*$ a suitable lowest loss (e.g., 0) for $\mathcal{F}$ . Here, the denominator represents the area of the light-blue triangle in Fig. 2, normalizing the AULCC such that $E_j \in [-1,1]$ so long as $\ell_j^* < \ell_j^b$ . The best score $E_j = 1$ means that the best loss was achieved with the lowest-capacity probe $f_j^1$ , i.e., $\ell_j^{*,c} = \ell_j^{1,c}$ and $\kappa_{*,c} = \kappa_1$ , and thus our representation c was explicit or easy-to-use for predicting $z_j$ with $f \in \mathcal{F}$ since there was no surplus
|
| 101 |
+
|
| 102 |
+
<span id="page-3-2"></span><sup>&</sup>lt;sup>3</sup>In practice, all expectations are taken w.r.t. the corresponding empirical (train/validation/test) distributions.
|
| 103 |
+
|
| 104 |
+
<span id="page-3-3"></span><sup>&</sup>lt;sup>4</sup>True for RFs but not input-size dependent MLPs (see § 6).
|
| 105 |
+
|
| 106 |
+
<span id="page-4-0"></span>capacity required (beyond $\kappa_1$ ) to achieve our lowest loss. In contrast, $E_j = 0$ means that the loss reduced linearly from $\ell_j^b$ to $\ell_j^*$ with increased probe capacity, i.e., AULCC = Normalizer in Fig. 2. More generally, if $\ell^{*,c} = \ell^*$ , i.e. the lowest loss for $\mathcal{F}$ can be reached with representation c, then $E_j < 0$ implies that the loss decreased sub-linearly with increased capacity while $E_j > 0$ implies it decreased super-linearly. The overall explicitness score is given by $E = \frac{1}{K} \sum_{i=1}^{K} E_i$ .
|
| 107 |
+
|
| 108 |
+
**E vs. I.** While the informativeness score $I_j$ captures the (total) amount of information in c about $z_j$ , the explicitness score $E_j$ captures the *ease-of-use* of this information. In particular, while $I_j$ is quantified by the *lowest prediction error with any capacity* $\ell^{*,c}$ , corresponding to a single point on c's loss-capacity curve, $E_j$ is quantified by the *area under this curve*.
|
| 109 |
+
|
| 110 |
+
A fine-grained picture of identifiability. Compared to the commonly-used mean correlation coefficient (MCC) or Amari distance (Amari et al., 1996; Yang & Amari, 1997), the D, C, I, E scores represent empirical measures which: (i) easily extend to mismatches in dimensionalities, i.e., L > K; and (ii) provide a more fine-grained picture of identifiability (violations), for if the initial probe capacity $\kappa_1$ is linear and R satisfies Corollary 3.5, we have that:
|
| 111 |
+
|
| 112 |
+
- $D = C = I = E = 1 \implies$ identified up to sign and permutation (Defn. 3.1);
|
| 113 |
+
- $D = C = I = 1 \implies$ identified up to permutation and element-wise reparametrisation (Defn. 3.2);
|
| 114 |
+
- I = E = 1 $\Longrightarrow$ identified up to invertible linear transformation (cf. Khemakhem et al., 2020).
|
| 115 |
+
|
| 116 |
+
Thus, if D = C = I = E = 1 does not hold exactly, which score deviates the most from 1 may provide valuable insight into the type of identifiability violation.
|
| 117 |
+
|
| 118 |
+
**Probe classes.** As emphasized above, whether or not a representation c is explicit or easy-to-use for predicting factor $z_j$ depends on the class of probe $\mathcal{F}$ used, e.g., MLPs or RFs. More generally, the explicitness of a representation depends on the way in which it is used in downstream applications, with different downstream uses or probe classes resulting in different definitions of explicit or easy-to-use information. We thus conduct experiments with different probe classes in § 6.
|
| 119 |
+
|
| 120 |
+
We next introduce a measure of representation size (S), motivated by the observation that larger representations tend to be both more informative and more explicit (see Tab. 1, more details below). Reporting S thus allows size-informativeness and size-explicitness trade-offs to be analysed.
|
| 121 |
+
|
| 122 |
+
A measure of size. We measure representation size (S) relative to the ground-truth as:
|
| 123 |
+
|
| 124 |
+
$$S = \frac{K}{L} = \frac{\dim(z)}{\dim(c)}.$$
|
| 125 |
+
|
| 126 |
+
When $L \ge K$ , as often the case, we have $S \in (0, 1]$ with the perfect score being S = 1. However, if we also consider the L < K case, which would likely sacrifice some informativeness, we have $S \in (1, K]$ .
|
| 127 |
+
|
| 128 |
+
**Larger representations are often more** *informative*. When L < K, it is intuitive that larger representations are more informative—they can simply preserve more information about z. When L > K, however, it is also common for larger representations to be more informative, perhaps due to an easier optimization landscape (Frankle & Carbin, 2019; Golubeva et al., 2021). Tab. 1 illustrates this point, where AE-5 denotes an autoencoder with L = 5. Note that K = 7 for MPI3D-Real (see § 6).
|
| 129 |
+
|
| 130 |
+
**Larger representations are often more** *explicit*. The explicitness of a representation also depends on its size: larger representations tend to be more explicit, as is apparent from the second column of Tab. 1. To explain this, we plot the corresponding loss-capacity curves in Fig. 3. Here we see that the increased explicitness (i.e., smaller AULLC) of larger representations stems from a substantially lower initial loss when using a linear-capacity MLP probe. The fact that larger representations perform better with linear-capacity MLPs is unsurprising since they have more parameters.
|
| 131 |
+
|
| 132 |
+
Finally, to meaningfully discuss more flexible probe-function choices within the DCI-ES framework, we point out that the D and C scores can be computed for arbitrary black-box probes f by using *probe-agnostic* feature-importance measures. In particular, in our experiments (§ 6), we use SAGE (Covert et al., 2020) which summarises each feature's importance based on its contribution to
|
| 133 |
+
|
| 134 |
+
<span id="page-5-4"></span><span id="page-5-3"></span><span id="page-5-2"></span>
|
| 135 |
+
|
| 136 |
+
| Representation | I | E | S | |
|
| 137 |
+
|----------------|------|------|------|--|
|
| 138 |
+
| AE-5 | 0.75 | 0.74 | 1.4 | |
|
| 139 |
+
| AE-7 | 0.92 | 0.71 | 1.0 | |
|
| 140 |
+
| AE-10 | 0.99 | 0.72 | 0.7 | |
|
| 141 |
+
| AE-100 | 1.0 | 0.90 | 0.07 | |
|
| 142 |
+
| AE-500 | 1.0 | 0.93 | 0.01 | |
|
| 143 |
+
|
| 144 |
+
Table 1: I, E and S scores for auto-encoders of various sizes on MPI3D-Real with MLP probes.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
|
| 148 |
+
Figure 3: Loss-capacity curves for auto-encoders of various sizes on MPI3D-Real with MLP probes.
|
| 149 |
+
|
| 150 |
+
predictive performance, making use of Shapley values (Shapley, 1953) to account for complex feature interactions. Such probe-agnostic measures allow the *D* and *C* scores to be computed for probes with no inherent or built-in notion of feature importance (e.g., MLPs), thereby generalising the Lasso and RF examples of Eastwood & Williams (2018, § 4.3). While SAGE has several practical advantages over other probe-agnostic methods (see, e.g., Covert et al., 2020, Table 1), it may not satisfy the conditions required to link the *D* and *C* scores to different identifiability equivalence classes (see Remark 3.6). Future work may explore alternative methods which do, e.g., by looking at a feature's mean *absolute* attribution value (Lundberg & Lee, 2017) since, intuitively, absolute contributions do not allow for a cancellation of positive and negative attribution on average (cf. Remark 3.6).
|
2210.00828/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-03-07T10:42:52.910Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36" etag="SD5y0QpqRdZh0C0OL5Fh" version="16.6.5" type="device"><diagram id="zqUXkllNMNKVq8QEYoBN" name="Page-1">7Vxbd5s4EP41Pmf3IT5IIAGPieM4e5K2OZumafqyhxjVpsWIlXFs769fAZK52Q5OzS11XgLSSMCMvm9GI8k9dTBbjZjlTz9Qm7g9qNirnnrZgxBgCPi/sGQdlxi6GRdMmGMLoaTg3vmPiEJFlC4cm8wzggGlbuD42cIx9TwyDjJlFmN0mRX7Tt3sU31rIp6oJAX3Y8slBbFHxw6m4itQSvqaOJOpfDJQRM3MksKii/nUsuky9Sx12FMHjNIgvpqtBsQNlSf1End0taN282KMeEGZBvPJ+Y1yZn3++Hj30/8wG14z/+FM9PJiuQvxwT2IXd7fxXfKu+VvHayFKvC/CyorzuaRoc65AND8VVLJrybh/1vHIxbj1X8gAHtY6aFB4My4IbECEf5TPoS/bfycuJVQ1OaRkNGFZ5PwAxRevZw6Abn3rXFYu+TjjZdNg5nL70D4Yo7rDqhLWdRWvURD41Lb9JpWlPxqwgKyShUJxY0InZGArbmIrFWFVcUohtLKy2RMAF2UTVPjQbazxDCcbLpOLMUvhLEOMRwoKIvYfOSKW8qCKZ1Qz3KHSelFVp2JzC2lvlDiDxIEawFDaxHQrIrJygm+hs37SNw9pWouV6Ln6GYtbzz+valG4e1Tui5pFt3JduHAEC8C9ppxThdsTPboShO8YbEJCfbIoVgu1OPeQcGIawXOS5Yhjm5hrQDN0fD24bgQ0YbgClUEEbUkRGBVEEGVcxunsnfIbUbj3AZP3FaW23BJbtNbxW14JzTnvuVJyMR8t0FTuqozNKiCpmlQP9HgmwyHGqdB9USDZWnQKEmDZqto0Ph9aLBsUFFdNIi7gqbjoeLoo100vaNO5DmEsSFAGWNres6IMSxFq5wdN6/xdtOa9Xs4rIDOezgNNu7hOoPJlIeDDbk4UDaNAVrl40Axj3Ht2DbxIrtbAdmDF1ACLzn95vBzdXUFB4Pj4MfMwgdsS3DALfDRKoOP3ix84JvwozeFH9RN/BTTWIMFiz4/tDzxO4MfqLYOQLsD8NPcOGU5FecsZ6KmIwezg5FDQ8QnIfUq8cF2MZ98766sgORRAmFJlFQ25918w4nfDrOc3jS/he1P/FaS39Sy/Ka1i9/UnejsevovDyhVaZwKi9PQKqiQv/+7Y0NVa5wN0YkNS7Nh2RVh2K4lYfh+14QLgCobXlTGhlrDmaM3AQqYZhpSZ0pfUcCBuIpq7wjj9BwQJoR+AWx6EWzfvj5eX3jr6cOjNxqdPxs/Ztfw+Dml7SsxWIf97FoMlORd01oMrGe7wXv0sxpo3M+esirloV92y4FMMrbFzxZznjckfLwvqITMnoltO95kD3DalVhWjSznNZ9Y1oyuAGmrg4w7wdmVztDfAv0VfB3Zv0pDVu03dRX3Yd5xan3NrNV3qsWE55cYBJxm1j7pJDgxah04u+jljFDHKT/HY1+12dhXrSmm1bHZ11Vz82fk49u+YiqbP1BvuCuVkIJsOGp4iUu8CR/fXYSs3rqdDnIvbrd2OlQfme6ba74amGpHP++0A8BaflKq6hy0qT+Y7TL+wsowixo+OqcrODWalL6pGoeFaLzyTTT+a6MF1zNaDKi3a7TArkQLRzwwWRMzYDO3cRjnHEjF3lur54jge0hWYSVnqm0noGWoX0uyCmldBWY6REgy2HKOjWqeY+8g9jLnpVE1HkB/GyucM2atU2JRQmu+5zkol3/Vcj+gcJg8v4jf4LgUtXtZrEqK6uL5Bj0/brZSVJ35dFTc4fGRrEKjTfdvgQ9CqYzeLNeZePx6zPUTgv4i1IszttxzUTHjPcYUR7jpreeoq9AgAga8X3TRQ5dhX5zV5oKP+O08YPQnkUbxqEdydpJFZeOLA7yKloMULjnTrO6YWDEk+JssLWaHDDnn8PidzZVPDLTAXMXlxi/R5claEOAdx/LS1gIVWevTzfh5RUffHp7OrBdLYZis2VlnplKpCE2DhV0HAFcWoW1VW12JVgX2Tbg70ar1Edw5Cz/eTG2fClIwv2PkxaGL+evOtK1pVgQbTLPuywUdP9a8//xB9vTMZOlfnr+IOk1aK+E2zbxcclxwIxj+qFhB7tMi4B3+k0hFgW1eqoGotjAktgyc0iESLLsb0zh8lPDb5PfoYugmv+qnDv8H</diagram></mxfile>
|
2210.00828/main_diagram/main_diagram.pdf
ADDED
|
Binary file (17.3 kB). View file
|
|
|
2210.00828/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Road layout modelling from satellite images constitutes an important task of remote sensing, with numerous applications in and navigation. The vast amounts of data available from the commercialization of geospatial data, in addition to the need for accurately establishing the connectivity of roads in remote areas, have led to an increased interest in the precise representation of existing road networks. By nature, these applications require structured data types that provide efficient representations to encode geometry, in this case, graphs, a de facto choice in domains such as computer graphics, virtual reality, gaming, and the film industry. These structured-graph representations are also commonly used to label recent road network datasets [@van2018spacenet] and map repositories [@OpenStreetMap]. Based on these observations, we propose a new method for generating predictions directly as spatial graphs, allowing us to explicitly incorporate geometric constraints in the learning process, encouraging predictions that better capture higher-level dataset statistics.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:statistics" data-latex-placement="t">
|
| 6 |
+
<embed src="figures/statistics.pdf" style="width:100.0%" />
|
| 7 |
+
<figcaption>Typical road network features (SpaceNet dataset). From left to right: (a) the distribution of angles between road segments leading to the same intersection is biased towards 0 and 90 degrees, parallel or perpendicular roads. The same also holds (b) for angles between random road pieces within a ground distance of 400 meters. (c) Most road vertices belong to a single road piece, with a degree of 2. (d) The average number of intersections for areas of 400<span class="math inline">×</span>400 meters by ground distance.</figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
In contrast, existing methods for road layout detection, mostly rely on pixel-based segmentation models that are trained on masks produced by rasterizing ground truth graphs. Performing pixel-wise segmentation, though, ignores structural features and geometric constraints inherent to the problem. As a result, minimum differences in the pixel-level output domain can have significant consequences in the proposed graph, in terms of connectivity and path distances, as manifested by the often fragmented outputs obtained after running inference on these models. In order to address these significant drawbacks, we propose a new paradigm where we: (i) directly generate outputs as spatial graphs and (ii) formalize the problem as a game where we sequentially construct the output by adding edges between key points. These key points can in principle come from any off-the-shelf detector that identifies road pieces with sufficient accuracy. Our generation process avoids having to resort to cumbersome post-processing steps [@batra2019improved; @montoya2015evaluation] or optimize some surrogate objectives [@mattyus2018matching; @mosinska2018beyond] whose relation to the desired qualities of the final prediction is disputed. Concurrently, the sequential decision-making strategy we propose enables us to focus interactively on different parts of the image, introducing the notion of a current state and producing reward estimates for a succession of actions. In essence, our method can be considered as a generalization of previous refinement techniques [@batra2019improved; @li2019topological] with three major advantages: (i) removal of the requirement for greedy decoding, (ii) ability to attend globally to the current prediction and selectively target parts of the image, and (iii) capacity to train based on demanding task-specific metrics.
|
| 11 |
+
|
| 12 |
+
More precisely, our contributions are the following:
|
| 13 |
+
|
| 14 |
+
- We propose a novel generic strategy for training and inference in autoregressive models that removes the requirement of decoding according to a pre-defined order and refines initial sampling probabilities via a tree search.
|
| 15 |
+
|
| 16 |
+
- We create a new synthetic benchmark dataset of pixel-level accurate labels of overhead satellite images for the task of road network extraction. This gives us the ability to simulate complex scenarios with occluded regions, allowing us to demonstrate the improved robustness of our approach. We plan to release this dataset publicly.
|
| 17 |
+
|
| 18 |
+
- We confirm the wide applicability of our approach by improving the performance of existing methods on the popular SpaceNet and DeepGlobe datasets.
|
| 19 |
+
|
| 20 |
+
# Method
|
| 21 |
+
|
| 22 |
+
We parametrize a road network as a graph ${\mathcal G}= \{ {\mathcal V}, {\mathcal E}\}$ with each vertex $v_i = [x_i, y_i]^\top\in {\mathcal V}$ representing a key point on the road surface. The set of edges $(v_i, v_j) \in {\mathcal E}$, corresponds to road segments connecting these key points. We can then generate a probability distribution over roads by following a two-step process: i) generation of a set of vertices and ii) generation of a set of edges connecting them. Formally, for an image ${\mathcal I}$, a road network ${\mathcal R}$ is derived as: $$\begin{equation}
|
| 23 |
+
{\mathcal R}= \argmax_{{\mathcal V}, {\mathcal E}} \displaystyle P({\mathcal V}, {\mathcal E}\:\vert\:{\mathcal I}) = \displaystyle P({\mathcal E}\:\vert\:{\mathcal V}, {\mathcal I}) \displaystyle P({\mathcal V}\:\vert\:{\mathcal I}).
|
| 24 |
+
\end{equation}$$ The graph nodes typically correspond to local information in an image, and we therefore resort to a CNN-based model to extract key points, providing the set ${\mathcal V}^{\prime}$, that sufficiently captures the information in the ground truth graph ${\mathcal G}$. The construction of edges, however, requires higher-level reasoning that can cope with parallel roads, junctions, occlusions, or poor image resolution, among other difficulties.
|
| 25 |
+
|
| 26 |
+
Considering probabilistic models over sequences and using the chain rule, we can factorize the joint distribution as the product of a series of conditional distributions $$\begin{equation}
|
| 27 |
+
\displaystyle P({\mathcal E}\:\vert\:{\mathcal V}, {\mathcal I}; \sigma) = \prod_{n=1}^{N_{{\mathcal E}}} \displaystyle P(e_{\sigma(n)} \:\vert\:e_{<\sigma(n)}, {\mathcal V}, {\mathcal I}),
|
| 28 |
+
\label{eq:chain-rule-edges}
|
| 29 |
+
\end{equation}$$ where $e_{<\sigma(n)}$ represents $e_{\sigma(1)}, e_{\sigma(2)}, \dots, e_{\sigma(n - 1)}$ and $\sigma \in S_{N_{{\mathcal E}}}$ denotes the set of all permutations of the integers $1, 2, \dots, N_{{\mathcal E}}$, with $N_{{\mathcal E}}$ the number of edges. For our work, we consider the setting where these sequences are upper bounded in length, i.e. $N_{{\mathcal E}} \leq N_{\max}$, a reasonable assumption when dealing with satellite images of fixed size. Autoregressive models (ARMs) have been used to solve similar tasks in the past by defining a fixed order of decoding [@oord2016conditional; @vanwavenet; @nash2020polygen; @para2021generative]. In our case, this would correspond to sorting all key points by their $x$ and $y$ locations and generating edges for each of them consecutively. We call this the *autoregressive order*. There are, however, two major drawbacks.
|
| 30 |
+
|
| 31 |
+
First, the evaluation metrics used for this task define a buffer region in which nodes in the ground truth and the predicted graph are considered to be a match. Therefore, a newly generated edge can be only partially correct, when only partially overlapping with the ground truth graph. This non-smooth feedback comes in clear contrast to the supervised training scheme of ARMs, minimization of the negative log-likelihood, that assumes perfect information regarding the key points' locations, i.e. that the sets ${\mathcal V}$ and ${\mathcal V}^{\prime}$ are the same. In practice, this condition is rarely met, as the exact spatial graph can be represented in arbitrarily many ways by subdividing long edges into smaller ones or due to small perturbation to key points' locations. It is thus imperative that our model can estimate the expected improvement of adding selected edges, which implicitly can also signal when to appropriately end the generation process.
|
| 32 |
+
|
| 33 |
+
Second, the requirement to decode according to the autoregressive order introduces a bias and limits the expressiveness of the model [@uria2014deep]. As a result, it can lead to failures in cases with blurry inputs or occlusions [@li2019topological]. Previous solutions include the use of beam search, either deterministic or stochastic [@meister2021conditional]. Beam search does not however eliminate the bias introduced in the selection order of the key points, while suffering from other deficiencies, such as degenerate repetitions [@holtzman2019curious; @fan2018hierarchical]. In order to address these shortcomings, we advocate for a permutation invariant strategy. We present a novel generic strategy, which improves autoregressive models without requiring significantly more computational cost.
|
| 34 |
+
|
| 35 |
+
{#fig:whole-model width="95%"}
|
| 36 |
+
|
| 37 |
+
We start by introducing a base autoregressive model, illustrated in Fig. [4](#fig:whole-model){reference-type="ref" reference="fig:whole-model"}. Given an image and a set of key points, our model produces a graph by sequentially predicting a list of indices, corresponding to the graph's flattened, unweighted edge-list. Each forward pass produces probabilities over the set of key points, which leads to a new action after sampling. A successive pair of indices defines an edge as its two endpoints. A special end-of-sequence token is reserved to designate the end of the generation process.
|
| 38 |
+
|
| 39 |
+
Following @wang2018pixel2mesh [@smith2019geometrics], we begin by extracting visual features per key point, by interpolating intermediate layers of a ResNet backbone to the key points' locations, which are further augmented by position encodings of their locations. We then further process these features using two lightweight Transformer modules. The first transformer (Transformer I in Fig. [4](#fig:whole-model){reference-type="ref" reference="fig:whole-model"}) encodes the features of the key points as embeddings. The second transformer (Transformer II in Fig. [4](#fig:whole-model){reference-type="ref" reference="fig:whole-model"}) takes as input the currently generated edge list sequence, corresponding to the currently partially generated graph. Edges are directly mapped to the embeddings of their comprising key points, supplemented by position and type embeddings, to differentiate between them, as shown in Fig. [5](#fig:edge-token-embedding){reference-type="ref" reference="fig:edge-token-embedding"} (a). An additional global image embedding, also extracted by the ResNet, is used to initialize the sequence. The Transformer II module produces a single hidden state, which is linked with the $N_{{\mathcal V}^\prime} + 1$ (corresponding to the provided key points, supplemented by the special end of the generation token) key points' embeddings by a pointer network [@vinyals2015pointer], via a dot-product to generate the final distribution. This allows a variable number of actions that depends on the current environment state, instead of using a fixed action space.
|
| 40 |
+
|
| 41 |
+
In order to address the problems of greedy decoding (analysed Section [3](#sec:methodology){reference-type="ref" reference="sec:methodology"}), we frame our road extraction task as a classical Markov-decision process (MDP). The generation of a graph for every image defines an environment, where the length of the currently generated edge list determines the current step. Let $\vo_t$, $\alpha_t$ and $r_t$ correspond to the observation, the action and the observed reward respectively, at time step $t$. The aim is to search for a policy that maximizes the expected cumulative reward over a horizon $T$, i.e., $\max_{\pi} J(\pi) \coloneqq \mathop{\mathbb{E}}_{\pi}[\sum_{t = 0}^{T - 1} \gamma^t r_t]$ where $\gamma \in (0, 1]$ indicates the discount factor and the expectation is with respect to the randomness in the policy and the transition dynamics. We set the discount factor to 1 due to the assumed bounded time horizon, and we note that although the dynamics of the environment are deterministic, optimizing the reward remains challenging.
|
| 42 |
+
|
| 43 |
+
Each action leads to the selection of a new key point, with new edges being added once every two actions. The addition of a new edge leads to a revision of the predicted graph and triggers an intermediate reward $$\begin{equation}
|
| 44 |
+
\label{eq:incremental-reward}
|
| 45 |
+
r_t = \text{sc}({\mathcal G}_{\text{gt}}, {\mathcal G}_{\text{pred}_t}) - \text{sc}({\mathcal G}_{\text{gt}}, {\mathcal G}_{\text{pred}_{t - 1}}),
|
| 46 |
+
\end{equation}$$ where $\text{sc}({\mathcal G}_{\text{gt}}, {\mathcal G}_{\text{pred}_t})$ is a similarity score between the ground truth graph ${\mathcal G}_{\text{gt}}$ and the current estimate ${\mathcal G}_{\text{pred}_{t}}$. Discussion of the specific similarity scores used in practice is postponed for Section [3.3](#sec:short_evaluation_metrics){reference-type="ref" reference="sec:short_evaluation_metrics"}.
|
| 47 |
+
|
| 48 |
+
A proper spatial graph generation entails (i) correct topology and (ii) accurate location prediction of individual roads. For the latter, intermediate vertices of degree $2$ are essential. We call a road segment (RS), an ordered collection of edges, between vertices of degree $d(.)$ two (or a collection of edges forming a circle): $$\begin{align*}
|
| 49 |
+
\text{RS} = \{(\vv_{\text{rs}_1}, \vv_{\text{rs}_2}), \dots, (\vv_{\text{rs}_{k - 1}}, \vv_{\text{rs}_k})\} \text{ s.t } (\vv_{\text{rs}_i}, \vv_{\text{rs}_{i + 1}}) \in {\mathcal E}\text{ for } i = 1, \dots, k - 1 \\
|
| 50 |
+
d(\vv_{\text{rs}_i}) = 2, \text{ for } i = 2, \dots k - 1, \quad (d(\vv_{\text{rs}_1}) \neq 2 \text{ and } d(\vv_{\text{rs}_k}) \neq 2 \text{ or } \vv_{\text{rs}_1} = \vv_{\text{rs}_k}).
|
| 51 |
+
\end{align*}$$ During the progression of an episode (i.e. the sequential generation of a graph), the topological nature of the similarity scores in Eq. [\[eq:incremental-reward\]](#eq:incremental-reward){reference-type="ref" reference="eq:incremental-reward"} implies that the effect of each new edge to the reward will be reflected mostly once its whole corresponding road segment has been generated. To resolve the ambiguity in the credit assignment and allow our agent to look ahead into sequences of actions, we rely on Monte Carlo Tree Search (MCTS) to simulate entire sequences of actions. We use a state-of-the-art search-based agent, MuZero [@schrittwieser2020mastering], that constructs a learnable model of the environment dynamics, simulating transitions in this latent representation and leading to significant computational benefits.
|
| 52 |
+
|
| 53 |
+
{#fig:edge-token-embedding width="90%"}
|
| 54 |
+
|
| 55 |
+
Specifically, MuZero requires three distinct parts (see also Fig. [5](#fig:edge-token-embedding){reference-type="ref" reference="fig:edge-token-embedding"}):
|
| 56 |
+
|
| 57 |
+
1. A representation function $f$ that creates a latent vector of the current state $\vh_t = f_{\theta} (\vo_t)$. For this step, we use the autoregressive model, as shown in Fig. [4](#fig:whole-model){reference-type="ref" reference="fig:whole-model"}. Our current latent representation $\vh_t$ contains the graph's hidden state, along with the key points' embeddings used to map actions to latent vectors. As key points remain the same throughout the episode, image-based features (Components (1) and (2) in Fig. [4](#fig:whole-model){reference-type="ref" reference="fig:whole-model"}) are only computed once.
|
| 58 |
+
|
| 59 |
+
2. A dynamics network $g$, we use a simple LSTM [@hochreiter1997long], that predicts the effect of a new action by predicting the next hidden state and the expected reward: $(\hat{\vh}_t, \hat{r}_t) = g_{\theta} (\tilde{\vh}_{t - 1}, \alpha_t)$. We can replace $\tilde{\vh}_{t - 1}$ with the latent representation $\vh_{t - 1}$, or its previous computed approximation $\hat{\vh}_{t - 1}$ for tree search of larger depth larger than 1.
|
| 60 |
+
|
| 61 |
+
3. A prediction network $\psi$, that estimates the policy and the value for the current state $(\vp_{t + 1}, v_t) = \psi_{\theta}(\tilde{\vh_t})$. We compute the policy via a pointer network, as described in Section [3.1](#sec:architecture){reference-type="ref" reference="sec:architecture"}. Value estimates are produced by a simple multi-layer network.
|
| 62 |
+
|
| 63 |
+
The dynamics network guides the search and evaluates the expected reward of actions. For every newly generated edge, we also explicitly inform the network regarding the creation of new intersections and the expected relative change in the overall road surface generated via embeddings (see Fig. [5](#fig:edge-token-embedding){reference-type="ref" reference="fig:edge-token-embedding"}). By using the dynamics network, we bypass the expensive call to the decoder module during the search, and can instead approximate small modifications in the latent representation directly. For our experiments, the dynamics network requires up to 90 times less floating-point operations to simulate trajectories, compared to using the edge embeddings' decoder. Effectively, our method does not involve significantly more computation budget compared to the base autoregressive model.
|
2210.01769/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2210.01769/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In an effort to understand visual encoding and decoding processes, researchers in recent years have curated multiple datasets recording fMRI signals while the subjects are viewing natural images [@allen2022massive; @chang2018bold5000; @shen2019end; @vanrullen2019reconstructing]. In particular, the Natural Scenes Dataset (NSD [@allen2022massive]) was built to meet the needs of data-hungry deep learning models, sampling at an unprecedented scale compared to all prior works while having the highest resolution and signal-to-noise ratio (SNR). In addition, all the images used in NSD are sampled from MS-COCO [@lin2014microsoft], which has far richer contextual information and more detailed annotations compared to datasets that are commonly used in other fMRI studies (e.g., Celeb A face dataset [@liu2018large], ImageNet [@deng2009imagenet], self-curated symbols, grayscale datasets). This dataset, therefore, offers the opportunity to explore the decoding of complex images that are closer to real-life scenes.
|
| 4 |
+
|
| 5 |
+
Human visual decoding can be categorized into stimuli category classification [@akamatsu2021perceived], stimuli identification [@takada2020question], and reconstruction. We focus on stimuli reconstruction in this study. Different from previous efforts in reconstructing images from fMRI [@beliy2019voxels; @fang2020reconstructing; @gaziv2020self; @mozafari2020reconstructing; @ren2021reconstructing; @shen2019end; @shen2019deep], we approach the problem with one more modality, that of text. The benefits of adding the text modality are threefold: first, the brain is naturally multimodal. Research [@cao2019causal; @ghazanfar2006neocortex; @pasqualotto2016multisensory] indicates that the brain is not only capable of learning multisensory representations, but a larger portion of the cortex is engaged in multisensory processing: for example, both visual and tactile recognition of objects activate the same part of the object-responsive cortex [@pietrini2004beyond]. Visual-linguistic pathways along the border of the occipital lobe [@popham2021visual] also bring a more intertwined view of the brain's representation of these two modalities. Second, multimodal deep models tend to explain the brain better (having higher representation correlations) than the visual-only models, even when compared with activities in the visual cortex [@choksi2021multimodal]. Lastly, our goal is to reconstruct complex images that have multiple objects in different categories with intricate interactions: it is natural to incorporate contextual information as an additional modality.
|
| 6 |
+
|
| 7 |
+
Instead of training a model to map all three modalities (fMRI, image, text) to a unified latent space, we propose to map fMRI to a well-aligned space shared by image and text, and use conditional generative models to reconstruct seen images from representations in that space. This design addresses the data scarcity issue of brain datasets by separating fMRI from the other two modalities. In this way, a large amount of data is readily available to learn the shared visual-language representation and to train a generative model conditioned on this representation. Furthermore, pre-trained models can be utilized to make the whole reconstruction pipeline more efficient and flexible.
|
| 8 |
+
|
| 9 |
+
Our contributions are as follows: (1) to the best of our knowledge, this is the first work on reconstructing complex images from human brain signals. It provides an opportunity to study the brain's visual decoding in a more natural setting than using object-centered images. Compared to previous works, it also decodes signals from more voxels and regions, including those outside the visual cortex, that are responsive to the experiment. This inclusion allows us to study the behavior and functionality of more brain areas. (2) We address the data scarcity issue by incorporating additional text modality and leveraging pre-trained latent space and models. For the reconstruction, we focus on semantic representations of the images while taking low-level visual features into account. (3) Our results show we can decode complex images from fMRI signals relatively faithfully. We also perform microstimulation on different brain regions to study their properties and showcase the potential usages of the pipeline.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
<figure id="fig:pipeline" data-latex-placement="t">
|
| 14 |
+
<figure id="fig:pipeline_detail">
|
| 15 |
+
<img src="fig/pipeline.png" />
|
| 16 |
+
<figcaption aria-hidden="true"></figcaption>
|
| 17 |
+
</figure>
|
| 18 |
+
<figure id="fig:pipeline_gen">
|
| 19 |
+
<img src="fig/pipeline.png" />
|
| 20 |
+
<figcaption aria-hidden="true"></figcaption>
|
| 21 |
+
</figure>
|
| 22 |
+
<figcaption>The pipeline for reconstructing seen images from fMRI signals. <a href="#fig:pipeline_detail" data-reference-type="ref" data-reference="fig:pipeline_detail">1</a> details different components, from collected data to the reconstructed image. The pipeline is trained in two stages: during the first stage, mapping models <span class="math inline"><em>f</em><sub><em>m</em><em>i</em></sub>, <em>f</em><sub><em>m</em><em>c</em></sub></span> are trained to encode fMRI activities into the CLIP embedding space. In the second stage, conditional generator <span class="math inline"><strong>G</strong></span> and contrastive discriminator <span class="math inline"><strong>D</strong></span> are finetuned while both <span class="math inline"><em>f</em><sub><em>m</em><em>i</em></sub>, <em>f</em><sub><em>m</em><em>c</em></sub></span> are kept frozen. <a href="#fig:pipeline_gen" data-reference-type="ref" data-reference="fig:pipeline_gen">2</a> shows the image generation process once models are trained.</figcaption>
|
| 23 |
+
</figure>
|
| 24 |
+
|
| 25 |
+
Recent developments in contrastive models allow more accurate embeddings of images and their semantic meanings in the same latent space. This performance is realized using massive datasets: models such as CLIP [@radford2021learning] and ALIGN [@jia2021scaling] utilize thousands of millions of image-text pairs for representation alignment. In comparison, brain imaging datasets that record pairs of images and fMRI range from 1.2k to 73k samples, making it difficult to learn brain encoding and decoding models from scratch. However, we can utilize aligned embeddings obtained from pre-trained contrastive models as the intermediary and generate images conditioned on these embeddings. In our pipeline, as shown in [3](#fig:pipeline){reference-type="ref+label" reference="fig:pipeline"}, we first map fMRI signals to CLIP embeddings of the observed image and its captions, then pass these embeddings to a conditional generative model for image reconstruction.
|
| 26 |
+
|
| 27 |
+
<figure id="fig:cap_screen" data-latex-placement="t">
|
| 28 |
+
<img src="fig/cap_screen.png" style="width:80.0%" />
|
| 29 |
+
<figcaption>Image caption screening through CLIP encoders. For this sample, threshold is put at half of the largest probability: <span class="math inline">0.5 × 0.519</span>. Therefore, captions (2) and (3) of the image are kept.</figcaption>
|
| 30 |
+
</figure>
|
| 31 |
+
|
| 32 |
+
Each image $\mathbf{x}_{\text{img}}$ in the COCO dataset has five captions $\{\mathbf{x}_{\text{cap}_\text{1}}, \cdots, \mathbf{x}_{\text{cap}_\text{5}}\}$ collected through Amazon's Mechanical Turk (AMT), and in nature, these captions vary in their descriptive ability. Fig. [4](#fig:cap_screen){reference-type="ref" reference="fig:cap_screen"} shows a sample image with its five captions, and we can tell captions (2) and (3) are more objective and informative than caption (4) when it comes to describing the *content* of that image, thus are more helpful to serve as the image generation condition. We utilize pre-trained CLIP encoders to screen the high-quality captions since representations in the CLIP space are trained to be image-text aligned. A caption with an embedding more aligned to the image embedding is more descriptive than a less aligned one; it is also less general and more specific to this particular image because of the contrastive loss in CLIP. For the screening, we pass each image together with its five captions to the CLIP model, which outputs corresponding probabilities that the captions and image are proper pairs. We keep captions with probabilities larger than half of the highest probability. After screening out less informative captions, we have one to three high-quality captions per image.
|
| 33 |
+
|
| 34 |
+
[]{#ssc:fmri_clip label="ssc:fmri_clip"} Each fMRI signal that reflects a specific image is a 3D data volume, and the value on position $(i, j, k)$ is the relative brain activation on this voxel triggered by the image. We apply an ROI (region of interest) mask on this 3D volume to extract signals of cortical voxels that are task-related and have good SNRs. The signal is then flattened into a 1D vector and voxel-wise standardized within each scan session. The end results $\mathbf{x}_{\text{fmri}}$ are used by our image reconstruction pipeline. We choose to use the ROI with the widest region coverage, and the length $N$ of $\mathbf{x}_{\text{fmri}}$ ranges from 12682 to 17907 for different brains in the NSD dataset.
|
| 35 |
+
|
| 36 |
+
Our goal is to train two mapping models, $f_{mi}$ and $f_{mc}$ in [3](#fig:pipeline){reference-type="ref+label" reference="fig:pipeline"} (collectively denoted as $f_m$), that encodes $\mathbf{x}_{\text{fmri}} \in \mathbb{R}^{N}$ to $\mathbf{h}_{\text{img}}=\operatorname{C_{img}}(\mathbf{x}_{\text{img}}) \in\mathbb{R}^{512}$ and $\mathbf{h}_{\text{cap}}=\operatorname{C_{txt}}(\mathbf{x}_{\text{cap}}) \in\mathbb{R}^{512}$ respectively. Here $\operatorname{C_{img}}$, $\operatorname{C_{txt}}$ are CLIP image and text encoders, and $\mathbf{x}_{\text{cap}}$ is one of the image captions chosen randomly from the vetted caption pool. We construct both $f_m$ as a CNN with one Conv1D layer followed by four residual blocks and three linear layers. The training objective is a combination of MSE loss, cosine similarity loss, and contrastive loss on cosine similarity. We use the infoNCE definition [@van2018representation] of contrastive loss, for the $i^{\text{th}}$ sample in a batch of size $B$: $$\begin{equation}
|
| 37 |
+
\begin{aligned}
|
| 38 |
+
\operatorname{Contra}(a^{(i)}, b^{(i)}) = -
|
| 39 |
+
% \frac{1}{2}
|
| 40 |
+
\mathbb{E}_i \left[
|
| 41 |
+
% \left(\operatorname{log}\frac{\operatorname{exp}(\operatorname{cos}(a^{(i)}, b^{(i)})/\tau)}{\sum_{j=1}^B\operatorname{exp}(\operatorname{cos}(a^{(j)}, b^{(i)})/\tau)} +
|
| 42 |
+
\operatorname{log}\frac{\operatorname{exp}(\operatorname{cos}(a^{(i)}, b^{(i)})/\tau)}{\sum_{j=1}^B\operatorname{exp}(\operatorname{cos}(a^{(i)}, b^{(j)})/\tau)}\right]
|
| 43 |
+
% \right)
|
| 44 |
+
\end{aligned}
|
| 45 |
+
\label{eq:contra}
|
| 46 |
+
\end{equation}$$ For the mapping model $f_{mi}$ that encodes fMRI to image embeddings, we have $\begin{small}{\mathbf{h}_{\text{img}}^{(i)}}' = f_{mi}(\mathbf{x}_{\text{fmri}}^{(i)})\end{small}$. The training objective is: $$\begin{equation}
|
| 47 |
+
\begin{footnotesize}
|
| 48 |
+
\begin{aligned}
|
| 49 |
+
% \operatorname{Sim}(i) = \operatorname{cos}({\mathbf{h}_{img}^{(i)}}', {\mathbf{h}_{img}^{(i)}}),
|
| 50 |
+
\mathcal{L}_{mi} =
|
| 51 |
+
\mathbb{E}_i \left[
|
| 52 |
+
\alpha_1 ||{\mathbf{h}_{\text{img}}^{(i)}}' - {\mathbf{h}_{\text{img}}^{(i)}}||^2_2 + \alpha_2 (1 - \operatorname{cos}({\mathbf{h}_{\text{img}}^{(i)}}', {\mathbf{h}_{\text{img}}^{(i)}}))\right]
|
| 53 |
+
+ \alpha_3 \operatorname{Contra}({\mathbf{h}_{\text{img}}^{(i)}}', {\mathbf{h}_{\text{img}}^{(i)}})
|
| 54 |
+
% \operatorname{log}\frac{\operatorname{exp}(\operatorname{Sim}(i)/\tau)}{\sum_{j=1}^B \operatorname{exp}(\operatorname{Sim}(j)/\tau)}
|
| 55 |
+
,
|
| 56 |
+
\end{aligned}
|
| 57 |
+
\end{footnotesize}
|
| 58 |
+
\label{eq:mapper_loss}
|
| 59 |
+
\end{equation}$$ where $\tau, \alpha_1, \alpha_2, \alpha_3$ are non-negative hyperparameters selected through sweeps. The loss $\mathcal{L}_{mc}$ for caption embedding mapping model $f_{mc}$ is defined similarly. Although CLIP embeddings are trained to be aligned, there are still systematic differences between image and text embeddings, with embeddings under each modality showing outlier values at a few fixed positions. In addition, we also notice the generated images emphasize either image content (object proximity, shape, etc.) or semantic features depending on which condition we use. Therefore, including both embeddings as the conditions for a generator can cover both ends, and that is why we train two mapping models for the two modalities. Since the outlier indices are fixed for each modality across images, clipping the value should not affect image-specific information. Therefore, before normalizing the ground truth embeddings into unit vectors, we set $\mathbf{h} = \operatorname{clamp}(\mathbf{h}, -1.5, 1.5)$. This can greatly improve the mapping performance during training.
|
| 60 |
+
|
| 61 |
+
The mapping models output fMRI-mapped CLIP embeddings $\mathbf{h}_{\text{img}}'$ and $\mathbf{h}_{\text{cap}}'$ that serve as conditions for the generative model. We aim to generate images that have both naturalness (being photo-realistic) and high fidelity (can faithfully reflect objects and relationships in the observed image). Our generation model is built upon $\operatorname{Lafite}$ [@zhou2021lafite], a text-to-image generation model: it adapts unconditional StyleGAN2 [@Karras2019stylegan2; @Karras2020ada] to conditional image generation contexted on CLIP text embeddings.
|
| 62 |
+
|
| 63 |
+
In our generator $\mathbf{G}$, both conditions $\mathbf{h}_{\text{img}}'$ and $\mathbf{h}_{\text{cap}}'$ are injected into the StyleSpace: each of them goes through two fully connected (FC) layers and is transformed into condition codes $\mathbf{c}_{\text{img}}$ and $\mathbf{c}_{\text{cap}}$. These condition codes are max-pooled and then concatenated with the intermediate latent code $\mathbf{w}\in \mathcal{W}$, which is obtained from passing the noise vector $\mathbf{z}\in\mathcal{Z}$ through a mapping network (see [3](#fig:pipeline){reference-type="ref+label" reference="fig:pipeline"}). Using a mapping network to transform $\mathbf{z}$ into an intermediate latent space $\mathcal{W}$ is the key of StyleGAN as $\mathcal{W}$ is shown to be much less entangled than $\mathcal{Z}$[@shen2020interpreting]. The conditioned style $\mathbf{s}$ is then passed to different layers of $\mathbf{G}$ as in StyleGAN2, generating image ${\mathbf{x}_{\text{img}}}'$: $$\begin{equation}
|
| 64 |
+
% \begin{small}
|
| 65 |
+
\begin{aligned}
|
| 66 |
+
\mathbf{s}= \mathbf{w}||\operatorname{max}(\mathbf{c}_{\text{img}},\mathbf{c}_{\text{cap}}), \quad {\mathbf{x}_{\text{img}}}'= \mathbf{G}(\mathbf{s}).
|
| 67 |
+
\end{aligned}
|
| 68 |
+
% \end{small}
|
| 69 |
+
\end{equation}$$ We align the semantics of generated ${\mathbf{x}_{\text{img}}}'$ and condition vectors by passing ${\mathbf{x}_{\text{img}}}'$ through pre-trained CLIP encoders and apply contrastive loss ([\[eq:c_loss\]](#eq:c_loss){reference-type="ref+label" reference="eq:c_loss"} $\mathcal{L}_{c2}$) between them. For further alignment of the lower-level visual features, such as prominent edges, corners and shapes, we also pass the image through resnet50 and align the position-wise averaged representation obtained from $\operatorname{Layer2}$ ([\[eq:c_loss\]](#eq:c_loss){reference-type="ref+label" reference="eq:c_loss"}$\mathcal{L}_{c3}$).
|
| 70 |
+
|
| 71 |
+
The discriminator $\mathbf{D}$ has three heads that share a common backbone: the first head $\mathbf{D}_d$ classifies images to be real/fake, the second and the third semantic projection heads $\mathbf{D}_{si}$, $\mathbf{D}_{sc}$ map ${\mathbf{x}_{\text{img}}}'$ to $\mathbf{h}_{\text{img}}'$ and $\mathbf{h}_{\text{cap}}'$. The latter two ensure the generated images are faithful to the conditions. It is also shown that contrastive discriminators are useful for preventing discriminator overfitting and improving the final model performance [@kang2020contragan; @jeong2021training]. Applying contrastive loss ([\[eq:c_loss\]](#eq:c_loss){reference-type="ref+label" reference="eq:c_loss"} $\mathcal{L}_{c1}$) between the outputs from discriminator semantic projection heads and the condition vectors fed to $\mathbf{G}$ can therefore help stabilize the training. To summarize the objective function, the standard GAN loss is used to ensure the naturalness of generated ${\mathbf{x}_{\text{img}}}'$: $$\begin{equation}
|
| 72 |
+
\begin{footnotesize}
|
| 73 |
+
\begin{aligned}
|
| 74 |
+
\mathcal{L}_{\operatorname{GAN}_\mathbf{G}} & = - \mathbb{E}_i \left[ \operatorname{log} \sigma(\mathbf{D}_d({\mathbf{x}_{\text{img}}^{(i)}}'))\right], \\
|
| 75 |
+
\mathcal{L}_{\operatorname{GAN}_\mathbf{D}} & = - \mathbb{E}_i \left[\operatorname{log} \sigma(\mathbf{D}_d({\mathbf{x}_{\text{img}}^{(i)}})) - \operatorname{log}(1 - \sigma(\mathbf{D}_d({\mathbf{x}_{\text{img}}^{(i)}}')))\right],
|
| 76 |
+
\end{aligned}
|
| 77 |
+
\end{footnotesize}
|
| 78 |
+
\end{equation}$$ where $\sigma$ denotes the Sigmoid function. Meanwhile, contrastive losses are used to align the semantics of generated images and the fMRI-mapped condition vectors that supposedly residing in the CLIP space: $$\begin{equation}
|
| 79 |
+
\begin{footnotesize}
|
| 80 |
+
\begin{aligned}
|
| 81 |
+
&\mathcal{L}_{c1} = %\gamma
|
| 82 |
+
\operatorname{Contra}(\mathbf{D}_{sc}({\mathbf{x}_{\text{img}}^{(i)}}'), {\mathbf{h}_{\text{cap}}^{(i)}}') +\operatorname{Contra}(\mathbf{D}_{si}({\mathbf{x}_{\text{img}}^{(i)}}'), {\mathbf{h}_{\text{img}}^{(i)}}'),\\
|
| 83 |
+
&\mathcal{L}_{c2} = %\gamma
|
| 84 |
+
\operatorname{Contra}(\operatorname{C_{img}}({\mathbf{x}_{\text{img}}^{(i)}}'), {\mathbf{h}_{\text{cap}}^{(i)}}')
|
| 85 |
+
+
|
| 86 |
+
\operatorname{Contra}(\operatorname{C_{img}}({\mathbf{x}_{\text{img}}^{(i)}}'), {\mathbf{h}_{\text{img}}^{(i)}}'),\\
|
| 87 |
+
&\mathcal{L}_{c3} =
|
| 88 |
+
% below is using gt in the contra, while L_c2 is using mapped in contra
|
| 89 |
+
% \operatorname{Contra}(\operatorname{C_{img}}({\mathbf{x}_{\text{img}}^{(i)}}'), \operatorname{C_{img}}({\mathbf{x}_{\text{img}}^{(i)}}) ) + \gamma
|
| 90 |
+
\operatorname{Contra}(\operatorname{ResNet}({\mathbf{x}_{\text{img}}^{(i)}}'), \operatorname{ResNet}({\mathbf{x}_{\text{img}}^{(i)}}))
|
| 91 |
+
\end{aligned}
|
| 92 |
+
\end{footnotesize}
|
| 93 |
+
\label{eq:c_loss}
|
| 94 |
+
\end{equation}$$ The overall training objectives are: $\begin{small}
|
| 95 |
+
\begin{aligned}
|
| 96 |
+
% &
|
| 97 |
+
\mathcal{L}_{\mathbf{G}} = \mathcal{L}_{\operatorname{GAN}_\mathbf{G}} + \lambda_1\mathcal{L}_{c1} + \lambda_2\mathcal{L}_{c2} + \lambda_3\mathcal{L}_{c3},
|
| 98 |
+
\mathcal{L}_{\mathbf{D}} = \mathcal{L}_{\operatorname{GAN}_\mathbf{D}} + \lambda_1\mathcal{L}_{c1},
|
| 99 |
+
\end{aligned}
|
| 100 |
+
\end{small}$ where $\lambda_1, \lambda_2, \lambda_3,
|
| 101 |
+
% \lambda_4,
|
| 102 |
+
% \gamma$ are non-negative hyperparameters.
|
| 103 |
+
|
| 104 |
+
The whole generation pipeline, consisting of mapping models and GAN, is trained in two stages. First, mapping models $f_{mi}$ and $f_{mc}$ are trained on fMRI-CLIP embedding pairs. Next, starting from the trained mapping model weights and $\operatorname{Lafite}$ language-free model weights, we modify the losses and model structure and finetune the conditional generator. For the additional condition vector projection layers in $\mathbf{G}$ and semantic head in $\mathbf{D}$, we duplicate the weights in the existing parallel layers to make the model converge faster. Note that $\operatorname{Lafite}$ is pre-trained on the Google Conceptual Captions 3M dataset [@sharma2018conceptual] then finetuned on the MS-COCO dataset, both of which are much larger than NSD. Finetuning from it allows us to exploit the natural relationships between semantics and images with sparse fMRI data. We can still utilize a two-stage training to compensate for data scarcity even if no pre-trained conditional GAN like $\operatorname{Lafite}$ is available, for example, when using a different generator architecture. Only this time, we should firstly train the conditional GAN on a large image dataset with noise perturbed $\mathbf{h}_{\text{img}}$ and $\mathbf{h}_{\text{cap}}$ as the pseudo input condition vectors.
|
2211.01452/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-04T20:47:37.976Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36" etag="o2C6NTQT0oz4CRWlwJhN" version="16.6.4" type="google"><diagram id="OPD3DZt4RhMtSNNf3L-a" name="Page-11">1VhLc9owEP41zLQHGNnCjxyBvHpIJ9NMp2lvGqxgTWzLkWUw+fWVLNlYhsZAakg4gLXaXUn7fbtaM4CzuLhhKA3vaICjgQ2CYgAvB7btep74loK1EkDoKsGCkUCJrI3ggbxiLQRampMAZ4YipzTiJDWFc5okeM4NGWKMrky1JxqZq6ZooVcEG8HDHEV4S+0XCXiopL7T0L7FZBFWK1tAz8SoUtaCLEQBXTVE8GoAZ4xSrp7iYoYjGbsqLt/y4DELh2Pv5vWZes+z26E7Hipn14eY1EdgOOFHux7evkzwKn788ZP8cSw0WX2nhTYBSxTlOl4D243EItMnKtaSsESUlTPuSy5POh3YEJSfpghkfK0jXgmlg2FW8mEiFCw/LZoW7kL+hhJOQS7EkfgpqsXFKdT6SkvHv17CZjRPAiwPBsT0KiQcP6RoLmdXgsXSMY8jMbL0RjQvLVuPZ/Wx4PV1eRg4zTijz7gxo48pLEgUVfKEJrje0hIzjosW0TpQsmrqiJTDNMacrcuzlyY123S62WNbjVcb8lpQ64QN4rpahnS+LGrXG06IB02LAyhibVEkzzDbgkTEgZtxN+Op47YjlCgii0QM5yJuwjGcyqgSkcETPRGTIJDL7ATapEITa38b6xrR3uBzHAM+CLbhc3egB/tCz+5K8Pclbqyvi/gcmVuj+e/MVTNV4bdOm8vOhdWdy/Ypcxn2xIaKBimjS3Hds33Z8PGKxls0640nvtdZNCz74oRVY7yDJy3ocBJMZHsmMYhQlpG5CeRxIcYF4Y/ag3z+Le1Hjh5dFtpdOVjrgdoaDqo+8CBQxJlozua4u0fiiC0wf0PP2w1yA0RnR65XMoYjxMnSPMMuYPUK95SUGas5NHZ8s9Y4rulCHVNbNdvEtiOrVbQAGIHGp7UzFZUttyXr6iAcT0Tn3EQEBxBR0I2tS6OR5/mVQFn6nlMJNsblyLC+x4yImMmKdhJu25+B23AMTUr69nHcduBH4rbb02WciYyQb8pfRFM2K75+rru4fsluJas97vEChm7rpcvbs1Hr7QL2euLGjMZpznFJj7uD6CE2TdIMK4hSXFZbmgfdTXxvoF2YoFlVtWmC5u8Aze8LNP+gy0onz7tvqr6viLOWfqfdjXgt8PZua/xWirdZ0HOxvzhbI1M1JdYIlIqNpsQF7hmaks5mA5yTcZ1E2ZtxXou69v9inBhu/nVW6pu/7uHVXw==</diagram></mxfile>
|
2211.01452/main_diagram/main_diagram.pdf
ADDED
|
Binary file (8.93 kB). View file
|
|
|
2211.01452/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
[]{#sec:intro label="sec:intro"}
|
| 4 |
+
|
| 5 |
+
Pre-trained Transformer models can be easily fine-tuned on various downstream tasks with high performance and have been widely developed as model inference services [@bommasani2021opportunities; @feng2020codebert; @yang2019xlnet; @clark2020electra]. However, these model inference services can pose privacy concerns. For instance, GitHub Copilot, a code-generating engine adapted from pre-trained GPT weights, requires either users to reveal their code prompts to the service provider, or the service provider to release the Copilot's trained weights, which are business proprietary, to users [@chen2021evaluating; @brown2020language].
|
| 6 |
+
|
| 7 |
+
Secure Multi-Party Computation (MPC) offers a promising solution by keeping data and model weights private during inference [@evans2018pragmatic]. However, the vanilla Transformer inference in MPC is unacceptably slow. For instance, $\text{BERT}_\text{BASE}$ inference takes ${<}1$ second without MPC, but ${\sim}60$ seconds with MPC (Figure [\[fig:breakdown\]](#fig:breakdown){reference-type="ref" reference="fig:breakdown"}). An intuitive way to accelerate MPC inference replaces computational operations with their faster approximations and retrains the approximated model, which has been adopted on convolutional neural networks (CNNs) [@chou2018faster]. Unfortunately, adapting this solution to Transformers drastically decreases the model's performance (§ [5](#sec:experiment){reference-type="ref" reference="sec:experiment"}).
|
| 8 |
+
|
| 9 |
+
In this paper, we take the first step to pursue *privacy-preserving Transformer model inference in MPC*, while remaining *fast* and *performant*. We take inspiration from the approximation approach[^2] and attribute the performance degradation to two challenges. First, many MPC-friendly approximations toughen model training. For example, quadratic functions cause the gradient explosion problem in deep neural networks [@mishra2020delphi]. Second, downstream datasets used for Transformer fine-tuning usually contain insufficient data to retrain an approximated Transformer with common task objectives [@zhang2018generalized; @hinton2012improving].
|
| 10 |
+
|
| 11 |
+
To address these two challenges, we resort to the *knowledge distillation* (KD) framework. KD can ease the model training by matching intermediate representations between the teacher and the student model [@romero2014fitnets]; this intermediate supervision can alleviate the gradient explosion problem [@lee2015deeply]. At the same time, the KD objective is *data-efficient* and allows training an approximated Transformer on small downstream datasets [@touvron2021training].
|
| 12 |
+
|
| 13 |
+
<figure id="fig:intro" data-latex-placement="!t">
|
| 14 |
+
<embed src="iclr2023/figures/intro_fig.pdf" style="width:90.0%" />
|
| 15 |
+
<figcaption>An illustration of our proposed <span class="smallcaps">MPCFormer</span> framework. <span class="smallcaps">MPCFormer</span> takes a trained (or finetuned) Transformer model and adopts given MPC-friendly approximations, then uses KD on the downstream datasets to construct high-quality models. During inference time, <span class="smallcaps">MPCFormer</span> leverages an MPC engine to attain private model inference. For ease of illustration, we only show the service provider and the user. MPC Systems such as CrypTen <span class="citation" data-cites="knott2021crypten"></span> may also involve a trusted third party (TTP) to help with the joint computation. </figcaption>
|
| 16 |
+
</figure>
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
In this paper, we build [MPCFormer]{.smallcaps}, an *easy-to-adopt* framework for privacy-preserving Transformer inference. [MPCFormer]{.smallcaps} takes in an MPC-friendly approximation and a trained Transformer. It returns a Transformer with low inference latency in MPC and high ML performance simultaneously. To do so, [MPCFormer]{.smallcaps} first replaces bottleneck functions in the input Transformer model with the given MPC-friendly approximations. The resulting approximated Transformer model has a faster inference speed in MPC. Next, it applies knowledge distillation to train the approximated Transformer with high performance, using teacher guidance from the original input Transformer. Finally, the model provider can use the distilled approximated Transformer on top of an MPC engine, *e.g.,* CrypTen, for private model inference service. The overall workflow of [MPCFormer]{.smallcaps} is shown in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}.
|
| 21 |
+
|
| 22 |
+
We implement [MPCFormer]{.smallcaps} on an MPC system [@knott2021crypten], with various MPC-friendly approximations. In the process, we also design a new and faster MPC-friendly approximation to the Softmax function. We extensively evaluate our implementation with various Transformer models. On the IMDb benchmark, [MPCFormer]{.smallcaps} achieves similar ML performance to $\text{BERT}_\text{BASE}$ with a 5.3$\times$ speedup. It achieves similar ML performance to $\text{BERT}_\text{LARGE}$ with a 5.9$\times$ speedup. On the GLUE benchmark, it achieves 97% performance of $\text{BERT}_\text{BASE}$ with a 2.2$\times$ speedup. [MPCFormer]{.smallcaps} is also effective when given different trained Transformer models, *e.g.,* $\text{RoBERTa}_\text{BASE}$.
|
| 23 |
+
|
| 24 |
+
In this section, we first describe the Transformer model. Then we describe how functions in Transformer models can be implemented in MPC, and analyze performance bottlenecks.
|
| 25 |
+
|
| 26 |
+
An $n$-layer Transformer model consists of three components: (1) The embedding layer. (2) A stack of $n$ Transformer layers. (3) The prediction layer. The embedding layer maps a token (e.g. a word or an image patch) to a hidden representation [@devlin2018bert; @dosovitskiy2020image]. One Transformer layer consists of an attention module and several matrix multiplication modules [@bahdanau2014neural]. The prediction layer maps the output of the last Transformer layer to a task output (*e.g.,* a probability distribution for a classification task). A partial illustration of a Transformer model can be found in Figure [2](#fig:overview){reference-type="ref" reference="fig:overview"}.
|
| 27 |
+
|
| 28 |
+
The Transformer model inference process can be formulated as a 2-Parties Computation (2PC). In 2PC, the user party inputs the data, and the model provider party inputs the Transformer model. They jointly compute an inference result. Throughout the entire inference process, 2PC guarantees both parties only know information about their own inputs and the result [@yang2019federated].
|
| 29 |
+
|
| 30 |
+
We describe the **secret sharing** scheme as a mean to preserve privacy during the inference process [@damgaard2012multiparty; @goldreich2019play]. Assuming that the user provides a number $x$ as its input, the secret sharing scheme splits $x$ into two numbers, $x_1$ and $x_2$. It then lets the user party hold $x_1$ and distributes $x_2$ to the model provider party. There are two properties of $x_1$ and $x_2$. First, either $x_1$ or $x_2$ alone contains no information about $x$. This property allows the user to hide the actual value $x$ from the model provider. Second, together they reconstruct $x$. For instance, $x_1$ and $x_2$ add up to $x$: $x = x_1 + x_2$. The second property allows joint computation.
|
| 31 |
+
|
| 32 |
+
::: minipage
|
| 33 |
+
{width="90%"} []{#fig:breakdown label="fig:breakdown"}
|
| 34 |
+
:::
|
| 35 |
+
|
| 36 |
+
:::: minipage
|
| 37 |
+
::: {#tab:comm_analysis}
|
| 38 |
+
+:---------------+:--------------:+:--------:+:-:+
|
| 39 |
+
| *Routines* | Comm. (rounds) | |
|
| 40 |
+
+----------------+---------------------------+---+
|
| 41 |
+
| Addition | 0 | |
|
| 42 |
+
+----------------+---------------------------+---+
|
| 43 |
+
| Multiplication | 1 | |
|
| 44 |
+
+----------------+---------------------------+---+
|
| 45 |
+
| Comparison | 7 | |
|
| 46 |
+
+----------------+----------------+----------+---+
|
| 47 |
+
| *functions* | Comm. (volume) | Time (s) | |
|
| 48 |
+
+----------------+----------------+----------+---+
|
| 49 |
+
| MatMul | 3.5GB | 2.5 | |
|
| 50 |
+
+----------------+----------------+----------+---+
|
| 51 |
+
| GeLU | 14.8GB | 9.6 | |
|
| 52 |
+
+----------------+----------------+----------+---+
|
| 53 |
+
| Softmax | 50.3GB | 34.1 | |
|
| 54 |
+
+----------------+----------------+----------+---+
|
| 55 |
+
|
| 56 |
+
: The communication statistics explain the left figure. For instance, 50.3GB communication in Softmax functions takes 34.1 seconds. In particular, the run-time in MPC is dominated by the **communication** as opposed to computation. Overall, the communication takes 46.4 seconds, which is **79%** of the whole inference process.
|
| 57 |
+
:::
|
| 58 |
+
|
| 59 |
+
[]{#tab:comm_analysis label="tab:comm_analysis"}
|
| 60 |
+
::::
|
| 61 |
+
|
| 62 |
+
We take multiplication via Beaver triple as a joint computation example [@beaver1991efficient]. In multiplication, the user party provides $x$ and the model provider provides $y$, and they secret share $x$ and $y$. Thus, the user gets $x_1$ and $y_1$; the model provider gets $x_2$, and $y_2$. Beaver triple assumes a triple $c=ab$ has been generated[^3]. The triple is also secret shared so that the user party gets $c_1, a_1, b_1$, and the model provider gets $c_2, a_2, b_2$. The user first computes $\epsilon_1 = x_1 - a_1$, $\delta_1 = y_1 - b_1$ locally. The model provider similarly computes $\epsilon_2 = x_2 - a_2$, $\delta_2 = y_2 - b_2$ locally. They communicate these four numbers and reconstruct $\epsilon=\epsilon_1+\epsilon_2$, $\delta=\delta_1+\delta_2$. the user then use these two values to compute $r_1 = c_1 + \epsilon b_1 + \delta a_1 + \delta \epsilon$. The model provider computes $r_2 = c_2 + \epsilon b_2 + \delta a_2$. At this point, the multiplication result $xy$ can be reconstructed by $xy = r_1 + r_2$.
|
| 63 |
+
|
| 64 |
+
There are two important observations in the multiplication example: (1) it does not leak information to the other party. For instance, the user does not send $x_1$ to the model party. Instead, it sends $\epsilon_1 = x_1 - a_1$ where $a_1$ is a random mask; (2) it requires one extra round of communication compared to the multiplication without MPC. This partially explains why vanilla Transformer models are slow in MPC. In particular, functions in Transformers (e.g., nonlinear activation) can be mainly implemented by three routines[^4], i.e., addition, multiplication, and comparison. Any computational operations composed by these routines would result in extra complexity in MPC[^5].
|
| 65 |
+
|
| 66 |
+
Empirically, we show these complexities by running $\text{BERT}_\text{BASE}$ (Figure [\[fig:breakdown\]](#fig:breakdown){reference-type="ref" reference="fig:breakdown"}) and reporting communication statistics in Table [1](#tab:comm_analysis){reference-type="ref" reference="tab:comm_analysis"} with a secret-sharing-based MPC system [@knott2021crypten]. We observe that GeLU functions and Softmax functions in Transformer layers are the major sources of bottlenecks, which echoes findings in a concurrent study [@wang2022characterization]. $\operatorname{GeLU}(x) = x\times\frac{1}{2} \left[1+ \operatorname{erf}\left(\frac{x}{\sqrt{2}}\right)\right]$ is slow because the Gaussian Error function $\operatorname{erf}(\cdot)$ is evaluated by a high order Taylor expansion, which requires many multiplication routines. $\operatorname{Softmax}(\textbf{x}_i) = \frac{\exp(\textbf{x}_i)}{\sum_j \exp(\textbf{x}_j)}$ is slow because (1) The exponential function is evaluated by several iterations of squaring, which requires many multiplication routines; (2) the maximum operation over $\mathbf{x}$ is required for numerical stability [@paszke2019pytorch], which requires comparison routines.
|
| 67 |
+
|
| 68 |
+
In this section, we present the [MPCFormer]{.smallcaps} framework. [MPCFormer]{.smallcaps} allows the model provider to convert its Transformer model to a faster and performant one for private inference service. In [4.1](#sec:method_pipeline){reference-type="ref" reference="sec:method_pipeline"}, we introduce the workflow of [MPCFormer]{.smallcaps} (Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}), followed by the details of each step in [4.2](#sec:method_details){reference-type="ref" reference="sec:method_details"}.
|
| 69 |
+
|
| 70 |
+
In the inference service, a model provider holds a Transformer model $\mathcal{T}$, and the user holds data $X$. They reach to an agreement on an MPC system to perform private inference. In §[2](#sec:background){reference-type="ref" reference="sec:background"}, we illustrate that using $\mathcal{T}$ to perform the inference in the MPC system is slow. Instead of using $\mathcal{T}$, the model provider can use [MPCFormer]{.smallcaps} to generate a more suited one $\mathcal{S}$. $\mathcal{S}$ runs much faster than $\mathcal{T}$ in the MPC setting while having similar ML performance compared to $\mathcal{T}$.
|
| 71 |
+
|
| 72 |
+
To use [MPCFormer]{.smallcaps}, the model provider needs to provide the trained Transformer model $\mathcal{T}$, the downstream dataset $\mathcal{D}$, and MPC-friendly approximations $\mathcal{A}$. These MPC-friendly approximations $\mathcal{A}$ need to be fast in MPC and will be used to replace bottleneck functions in $\mathcal{T}$. The workflow can be concisely described as: $$\begin{equation}
|
| 73 |
+
\small
|
| 74 |
+
%\left\{
|
| 75 |
+
\begin{aligned}
|
| 76 |
+
\operatorname{Convert:} \;\; \mathcal{S} &= \operatorname{\textsc{MPCFormer}{}}(\mathcal{T}, \mathcal{D}, \mathcal{A}) \\
|
| 77 |
+
\operatorname{Inference:} \;\; y &= \operatorname{MPC}_{\mathcal{S}}(X)
|
| 78 |
+
\end{aligned}%\right.
|
| 79 |
+
\end{equation}$$
|
| 80 |
+
|
| 81 |
+
[MPCFormer]{.smallcaps} is a two-stage framework as shown in Figure [2](#fig:overview){reference-type="ref" reference="fig:overview"}. The first stage leverages $\mathcal{A}$ and $\mathcal{T}$ to construct an MPC-friendly Transformer architecture $\mathcal{S}'$, which achieves *fast* inference in the MPC system. The second stage applies knowledge distillation (KD) to $\mathcal{S}'$ to learn the output model $\mathcal{S}$, which is fast in MPC *and* preserves the *high performance* of $\mathcal{T}$.
|
| 82 |
+
|
| 83 |
+
![The overview of the [MPCFormer]{.smallcaps} framework. The first stage uses MPC-friendly approximations and $\mathcal{T}$ to construct a faster Transformer architecture $\mathcal{S}'$. The second stage uses Knowledge Distillation on $\mathcal{S'}$ to learn a performant and fast Transformer model $\mathcal{S}$.](iclr2023/figures/overview.pdf){#fig:overview width="0.8\\columnwidth"}
|
| 84 |
+
|
| 85 |
+
In the first stage, [MPCFormer]{.smallcaps} replaces bottleneck functions in $\mathcal{T}$ with given $\mathcal{A}$ to construct a MPC-friendly Transformer architecture $\mathcal{S}'$ (Figure [2](#fig:overview){reference-type="ref" reference="fig:overview"}). Below we show how we construct $\mathcal{A}$ for our experiments *i.e.,* using the MPC system @knott2021crypten. In §[2](#sec:background){reference-type="ref" reference="sec:background"}, we identify the bottleneck to be GeLU and Softmax functions. We thus construct $\mathcal{A}$ for these two functions.
|
| 86 |
+
|
| 87 |
+
Analysis in $\S$ [2](#sec:background){reference-type="ref" reference="sec:background"} shows that multiplication in MPC requires extra communication. Thus, quadratics are the fastest nonlinear activation in MPC. Since GeLU and ReLU functions share similar function values, we simply take the quadratics designed for the ReLU function (§[3](#sec:related){reference-type="ref" reference="sec:related"}) to approximate the GeLU function: $GeLU(x) \approx 0.125x^2 + 0.25x + 0.5.$ We denote this approximation as "Quad\".
|
| 88 |
+
|
| 89 |
+
Prior works in CNNs have developed an MPC-friendly approximation to Softmax functions [@mohassel2017secureml]: $$\begin{equation}
|
| 90 |
+
\label{eq:2relu}
|
| 91 |
+
\small
|
| 92 |
+
\operatorname{softmax}(x) \approx \operatorname{ReLU}(x) / \sum \operatorname{ReLU}(x)
|
| 93 |
+
\end{equation}$$ We validate that this has a faster inference speed than the Softmax function in our setting (Figure [3](#fig:approx_breakdown){reference-type="ref" reference="fig:approx_breakdown"}). We denote this approximation as "2ReLU". However, this is not yet satisfactory. Analysis in §[2](#sec:background){reference-type="ref" reference="sec:background"} shows that evaluating the ReLU function requires heavy use of comparison routines, which is very expensive. Thus, we propose a more aggressive approximation for the Softmax by replacing the ReLU in Eq. [\[eq:2relu\]](#eq:2relu){reference-type="ref" reference="eq:2relu"} with a quadratic function: $$\begin{equation}
|
| 94 |
+
\label{eq:2quad}
|
| 95 |
+
\small
|
| 96 |
+
\operatorname{softmax}(x) \approx (x+c)^2 / \sum (x+c)^2
|
| 97 |
+
\end{equation}$$ We denote this as "2Quad". Importantly, "2Quad" and Softmax functions differ a lot by numerical values, while prior works argue that similarity in numerical values is crucial to the model's performance [@chou2018faster]. We are able to use this aggressive approximation because our next distillation stage is effective enough to bridge the performance gap.
|
| 98 |
+
|
| 99 |
+
{reference-type="ref" reference="sec:approximation"}. Blue areas represent the total running time. Orange areas represent the communication time. MPC-friendly approximations greatly reduce both the communication time and the total time. In particular, our proposed "2Quad\" is much faster than the original Softmax function, and the previous "2ReLU\" approximation.](iclr2023/figures/barplot.pdf){#fig:approx_breakdown width="0.65\\columnwidth"}
|
| 100 |
+
|
| 101 |
+
Figure [3](#fig:approx_breakdown){reference-type="ref" reference="fig:approx_breakdown"} shows the comparison between the running time of the original GeLU and softmax function against their approximations. In particular, 2Quad has a much faster inference speed than 2ReLU.
|
| 102 |
+
|
| 103 |
+
In the second stage, we use KD to make the fast approximated Transformer model $\mathcal{S}'$ performant. The benefits of KD are two-fold. First, it allows us to use more aggressive approximations such as "2Quad", which leads to higher speedups. Second, its data efficiency allows us to effectively learn a good $\mathcal{S}$ with the small downstream datasets. Concretely, we conduct layer-wise distillation to transfer knowledge from the input model $\mathcal{T}$ to $\mathcal{S}'$ by matching representation at the following four positions: (1) the embedding layer, (2) the attention matrix in each Transformer layer, (3) the hidden states after each Transformer layer, and (4) the final prediction layer. These four positions have been shown to store meaningful information in previous works [@hinton2015distilling; @jiao2019tinybert; @clark2019does]. We use the Mean Square Error (MSE) loss to match the representations between $\mathcal{T}$ and $\mathcal{S}$ for all positions. We follow the learning procedure of @jiao2019tinybert to first distill the embedding and Transformer layers (including the attention matrix and the hidden states) and then distill the prediction layer.
|
| 104 |
+
|
| 105 |
+
An important component of knowledge distillation is the initialization of the student model [@sanh2019distilbert]. Taking the advantage that $\mathcal{S}'$ and $\mathcal{T}$ share the same architecture, we initialize $\mathcal{S}'$ using weights in $\mathcal{T}$. We find that this outperforms random weight initialization, especially on smaller datasets (§[5.3](#sec:ablation){reference-type="ref" reference="sec:ablation"}).
|
2211.07521/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-08-18T14:32:50.154Z" agent="5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36" etag="hPDBcN4_xutOD7vlRr0s" version="14.9.6" type="google"><diagram id="18sp0VDz4fwBFDSJWjcN" name="Page-1">7Zpdc5s4FIZ/jS+TQQjzcZk4abu73dnMZGea7k1HBhloMfKCHNt7sb99JSQMEmBjJ5htG18k6JUQQuc50pHQBM6W2/cZWkW/kwAnE9MIthN4NzFNx4TsLxd2QrAMUwhhFgdCMirhMf4HCxGU6joOcC41IVFCEhqvVNEnaYp9qmgoy8hGLbYgSaAIKxTihvDoo6SpfooDGgnVM4xK/4DjMJJPhmXGEpVlpZBHKCCbmgTvJ3CWEULF1XI7wwnvOrVb3nXk7tuV4ZT2ueHXP6f+l1v7w81fcbxM7D8ev8y3V6Y0xjNK1vKFZWvpruwBHLAOkUmS0YiEJEXJfaXeZmSdBpg/x2CpqsxHQlZMBEz8iindSeuiNSVMiugykbk5zci3ff9CpohW8Ed3vq6UcrLOfHzoHSU1KAsxPVAOuHurMJgxWWKa7diNGU4QjZ/VhiCJVbgvV3U9u5C9f4olWgxhJ5R3zgqlikXsv9ccmtsFSenVAi3jhPnWzYTb30bLVdF7EFqFYeaE9bUuF0VnvE6U5lc5zuIFl4qMotK8MBSv0rRX230exVt6hZI4TEVmghe0ag67CuX/ot3zmiCen85z/u8hw88xWeeixaJsVhb+LSWbpLB7W+5NGGY4ZOYgaZnJenuuP5ppotdKWUO6ApbTt4liih9XqKBow0axNjhnJCFZcTe0fRfPF9wAcZLU9ABhd+HzO0pXL27/hqkfSd+QRsYZ68rDYDc5LG9wwbVR/9miBjm+AkfWuKlGKzZeCS2qjVSePRDJsJNkzlY3yTp0R7l6t86PcyCeOQ4HYYLyXFp+UCYc01IhKKGoQQCA04QAlNPVq1Ng9ZhY0uCGz9As5fOeiv1js8LxqYh1WLZ7kj1bJD7zxDX0pqVwt61n3+3qqQc2FrL3x1kpbmP6JJvDr0VlU5mqauKJnWLPPlPX0SkJ9pzihGmbeNTMPzWa1i+13jOcfMIDiQtHlvTZnqHQ52pMiebLm+oRil4PMFnXKhwDrSrRY42qGEZoVyu24gXyQy1WB03XPNwwrTiwbM0/RAMqb9nb4HwHmg7iQGfwXHc6nMzJpo+/Td2z/G0M3+kI+y7jO8CYutfT2myuOgArULrEqd4E2ICvu5NtDeRO2nM852DTnDIeKd/SsA6/iuE2RgbHuMQt9skNMy1lLBlmbLD/L2NDf3896odwXDeEUDdk2amnOp7rqtMhmGqRdofXvRYcTgscrxF/P+0XpOt9oA3qK671yfE3X1AeCrJTkmItvpaSXIXe+YwzPnvc8vg59lFyIzOWcRAUM1RbVK9uWPAmlhOcJOdF0TjUQCrTNZStFpTNoUJxdyAg1Fo0SBqmb5Zt4ck8kac3zGqTaHPNd1HMvO8HM/iG2bmYAccdF7NySfg9cPapnbN/30BrAc1yFdCc/cpnNNJAC2kvD7O7VsxH1sv9Q+3jXzpG3S5isbZiaPfMONuyXS1kd7W2DBxoA/OSfDgXBMQckw9ts8204Hl8XNmGvqSDzmUBafsU8iMMIOPyoc4TJvBejQ8Arz3Dq34XxmWYbyYduJg/KS/AO3NDtckLcL3LAjLMN4F2QH4SOoABFJvqPt+bjqlOh1fM2Rfl47R9YbkmCFAe7b+B10jh+gOibJ2QFoppwAY/1gkkjGZhoA0AzpkBRaMiaF7WvEPt7DZOVnwkbEnIis8ykvNjOrMIpSk/16ce5Gmez/mFLyuR3+N8zpjnMoY+n6NQUkJy9DyO1w32y5ax3fu/DcO8jKP3CZmfx02vbZZzm9UOZcfGy3xwKPm2iTgIadpjQWpZ2oGh3qfG3JMpZcnqlKsYC6uTwvD+Pw==</diagram></mxfile>
|
2211.07521/main_diagram/main_diagram.pdf
ADDED
|
Binary file (44.5 kB). View file
|
|
|
2211.07521/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Over the years, CNN architectures have evolved with many ideas to better deal with spatial image features. Moreover, their localized nature makes such features lack the global view of the image. Deeper architectures emerged that stack multiple convolution layers, known with different names; backbone, bottleneck, feature extractor, or encoder. The main feature of such architectures is their ability to cover spatial features at multiple scales. As we go deeper, the feature maps get smaller, while their content represents a wider region in the space, which gets us closer to better semantics of the image contents [@luo2016understanding]. With the emergence of AlexNet [@krizhevsky2012imagenet], many kinds of research investigate to further improve the performance of deep CNNs. [@simonyan2014very] [@he2016deep] [@szegedy2017inception] [@szegedy2016rethinking] [@srivastava2015training] have sought to strengthen the CNNs by making it deeper and deeper as they have shown that increasing the depth of a network could significantly increase the quality of the learned representations. Many researchers are continuously investigating to further improve the performance of deep CNNs by incorporating attention mechanisms to exploit its ability to cover the relationship between the learned spatial features.
|
| 4 |
+
|
| 5 |
+
Attention modules, in general, are designed to suppress noise while keeping useful information by refining the learned features using attention scaling. By quoting from the human perception process [@mnih2014recurrent] where the high-level information is used in guiding the bottom-up learning process by capturing more sophisticated features while disregarding irrelevant details. Human perception and visual attention [@beck2009top] [@desimone1998visual] [@mnih2014recurrent] [@desimone1995neural] is enhanced by top-down stimuli and non-relevant neurons will be suppressed in feedback loops. Referencing to human visual system, various different attention mechanisms [@cao2019gcnet] [@wang2018non] [@zhao2020exploring] [@vaswani2017attention] [@wang2017residual] have been explored and integrated into deep CNNs. Attention mechanisms were introduced in the context of CNNs to capture the relations between features, either across the spatial dimension as in [@carion2020end] [@hu2018gather] ,or across channel-wise dimension as in [@wang2018non] [@cao2019gcnet] [@hu2018squeeze] [@lee2019srm] [@Wang_2020_CVPR] or across both dimensions as in [@park2018bam] [@woo2018cbam] [@fu2019dual] [@wang2017residual] [@linsley2018learning] [@roy2018recalibrating] [@chen2017sca]. Although these attention methods have achieved higher accuracy than their counterpart baselines which do not invoke any attention mechanisms in their architectures, they often bring higher model complexity and exploit only the current feature map while refining it, that's why we call it local attention mechanisms.
|
| 6 |
+
|
| 7 |
+
Exploiting previous knowledge has been applied to image classification [@huang2017densely] [@iandola2014densenet], image segmentation [@ronneberger2015u], tracking [@ma2015hierarchical], and human pose estimation [@newell2016stacked] where they obtain enhanced performance. DenseNets [@huang2017densely] encourage feature reuse by connecting each layer to every other layer in a feed-forward fashion. U-Net [@ronneberger2015u] consists of two paths, which are contracting path to capture context and a symmetric expanding path that enables precise localization, where feature reuse is introduced through using skip connection between two paths. Driven by the significance of employing feature reuse while learning different tasks [@szegedy2015going] [@huang2017densely] [@iandola2014densenet] [@chen2016attention] [@ma2015hierarchical] [@newell2016stacked], a question arises: How can one incorporate previous knowledge aggregation while learning channel attention more efficiently?
|
| 8 |
+
|
| 9 |
+
To answer this question, we introduce PKCAM, a novel feature recalibration module based on channel attention, which improves the quality of the representations produced by a network using the global information to selectively emphasize informative features and suppress less useful ones. In contrast to the aforementioned attention mechanisms, our global context aware attention block obtains additional inputs from all preceding attention blocks, that have the same depth, and passes on its refined feature-maps to all subsequent blocks, creating global awareness from exploiting previous knowledge aggregation from earlier layers that can capture fine-grained information which is useful for precise localization while attending to features from earlier layers that can encode abstract semantic information, which is robust to target appearance changes.
|
| 10 |
+
|
| 11 |
+
The contributions of this paper are summarized as follows:
|
| 12 |
+
|
| 13 |
+
- We propose a simple and effective attention module, PKCAM, which can be integrated easily with any CNNs and applied across all it's blocks due to the lightweight computation of our novel architecture.
|
| 14 |
+
|
| 15 |
+
- We verify the effectiveness and robustness of PKCAM throughout extensive experiments with various baseline architectures on KITTI dataset.
|
| 16 |
+
|
| 17 |
+
- Through detailed analysis along with ablation studies, we examine the internal behavior and validity of our method.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
In this section, we first demonstrate an abstracted overview of our PKCAM. Then, we demonstrate the motivations to adopt the feature reuse concept via exploiting the previous knowledge to create a global aware attention block (i.e., PKCAM). In addition, we develop a method to efficiently fuse both local and global cross-channel interaction modules, and finally show how to integrate it for an arbitrary CNN architecture.
|
| 22 |
+
|
| 23 |
+
By scrutinizing the aforementioned channel attention techniques, previous knowledge aggregation was not explored from the channel attention module perspective. Therefore we studied the previous knowledge cross-channel interaction by proposing PKCAM.
|
| 24 |
+
|
| 25 |
+
Fig.[1](#Fig-MSCA_archeticture){reference-type="ref" reference="Fig-MSCA_archeticture"} demonstrates our PKCAM, which exploits both local and global feature maps, through fusing two parallel attention paths while recalibrating the current feature map, where the first one consists of the local cross-channel interaction (LCCI) module that models channel-wise relationships of the aggregated feature map. We call this module local as it is operating on the current feature map only. The second path, which we call the global path, consists of two stacked modules: previous knowledge aggregation (PKA) and global cross channel interaction (GCCI). The PKA module covers the channel interactions across different preceding aggregated feature maps, while the GCCI module utilizes the refined features produce by the PKA module to model channel-wise relationships in a computationally efficient manner. Both LCCI and GCCI modules could be any one of the on-the-shelf channel attention techniques which are studied in Section [2](#Related work){reference-type="ref" reference="Related work"}.
|
| 26 |
+
|
| 27 |
+
In contradiction to the aforementioned channel attention techniques which relies on the current output of an arbitrary CNN block, our proposed PKCAM exploits both the current CNN block output , $x_0 \in \mathbb{R}^{H_0\times W_0\times C_0}$, and a range of earlier CNN blocks output , $X_p = [x_1, x_2, ..., x_R]$, where $R$ is the coverage region that delimits how many previous CNN blocks output will be consolidated along side the current CNN block, $x_1 \in \mathbb{R}^{H_1\times W_1\times C_1}$, $x_2 \in \mathbb{R}^{H_2\times W_2\times C_2}$, and $x_R \in \mathbb{R}^{H_R\times W_R\times C_R}$.
|
| 28 |
+
|
| 29 |
+
In general, the earlier features $X_p$ have different channel dimensions, as the conventional is as we go deeper the depth is increased. Therefore, the first operation in our Previous Knowledge Aggregation(PKA) block is aligning the channel dimension among different CNN blocks. As $C_0 \ge C_1 \ge C_2 \ge C_R$, aligning operation can be done be learnable upsampling techniques or a simple repeating operation to align with the channel dimension of the current CNN block $C_0$, producing channel aligned feature maps, $x'_0 \in \mathbb{R}^{H_0\times W_0\times C_0}$, $x'_1 \in \mathbb{R}^{H_1\times W_1\times C_0}$, $x'_2 \in \mathbb{R}^{H_2\times W_2\times C_0}$, and $x'_R \in \mathbb{R}^{H_R\times W_R\times C_0}$.
|
| 30 |
+
|
| 31 |
+
Analogous to aligning the channel dimensions, the spatial dimensions; $H$ and $W$, is aligned through squeeze operation by adapting the general global average pooling equation as follows, $\widetilde{X} = \frac{1}{RWH}\sum_{k=1}^{R}\sum_{i=1}^{W}\sum_{j=1}^{H} X_{kij}$, where $\widetilde{X} \in \mathbb{R}^{R\times 1\times 1\times C_0}$ and represents the squeezed feature maps from the channel aligned aggregated feature maps $x'_i$, where $i$ = $0$, $1$, ..., $R$, producing $\widetilde{X} = [\widetilde{x}_0, \widetilde{x}_1, ..., \widetilde{x}_R]$, $\widetilde{x}_0 \in \mathbb{R}^{1\times 1\times C_0}$, $\widetilde{x}_1 \in \mathbb{R}^{1\times 1\times C_0}$ and $\widetilde{x}_R \in \mathbb{R}^{1\times 1\times C_0}$.
|
| 32 |
+
|
| 33 |
+
Given the aggregated feature $\widetilde{X}$, previous knowledge cross-channel attention can be learned by $Y=f(\widetilde{X})$, where $Y \in \mathbb{R}^{1\times 1\times C_0}$ , $f(\widetilde{X}) = W' \widetilde{X}$, and $W'$ could take one of the following forms,
|
| 34 |
+
|
| 35 |
+
$$\begin{equation}
|
| 36 |
+
\label{eq_fc}
|
| 37 |
+
W^{'}=
|
| 38 |
+
\begin{cases}
|
| 39 |
+
W_{1}^{'}=
|
| 40 |
+
\begin{bmatrix}
|
| 41 |
+
W^{'}_{1,1} & \cdots & W_{1,RC_0}\\
|
| 42 |
+
\vdots & \ddots & \vdots\\
|
| 43 |
+
W^{'}_{RC_0,1} & \cdots & W^{'}_{RC_0,RC_0}
|
| 44 |
+
\end{bmatrix} \\
|
| 45 |
+
W_{2}^{'}=
|
| 46 |
+
\begin{bmatrix}
|
| 47 |
+
W^{'}_{1,1} & 0 & \cdots & 0\\
|
| 48 |
+
0 & W^{'}_{2,2} &\cdots & 0 \\
|
| 49 |
+
\vdots & \vdots & \ddots & \vdots\\
|
| 50 |
+
0 & 0 & \cdots & W^{'}_{RC_0,RC_0}\\
|
| 51 |
+
\end{bmatrix}
|
| 52 |
+
\end{cases}
|
| 53 |
+
\end{equation}$$ where $W'_1$ is a $RC_0 \times RC_0$ parameter matrix which learns previous knowledge interaction in conjunction with cross-channel interaction. In contrast $W'_2$ is a $1 \times RC_0$ parameter matrix which learns previous knowledge interaction and channel interaction neglecting the cross channel relations. The key difference between $W'_1$ and $W'_2$ is that $W'_1$ considers previous knowledge cross-channel interaction while $W'_2$ does not, leading $W'_1$ to be more complex than $W'_2$. Interpreting Eq. [\[eq_fc\]](#eq_fc){reference-type="ref" reference="eq_fc"} to neural networks $W'_1$ and $W'_2$ can be regarded as a fully connected layer and depth-wise separable convolution layer respectively. However, obviously from Eq. [\[eq_fc\]](#eq_fc){reference-type="ref" reference="eq_fc"}, $W'_1$ and $W'_2$ have a tremendous number of parameters, driving to high model complexity, especially for large channel numbers as mainly $C_0 >> R$.
|
| 54 |
+
|
| 55 |
+
Therefore, we divide learning the previous knowledge cross-channel interaction into two sub-modules as shown in Fig. [1](#Fig-MSCA_archeticture){reference-type="ref" reference="Fig-MSCA_archeticture"}, learning previous knowledge interaction, and exploiting the cross-channel interaction. Consequently, in contrast to Eq. [\[eq_fc\]](#eq_fc){reference-type="ref" reference="eq_fc"}, $f(\widetilde{X})$ is splitted into two cascaded functions, $f_1(\widetilde{X})$ and $f_2(\widetilde{X})$, where $f_1(\widetilde{X})$ is responsible to learn the previous knowledge channel interaction and $f_2(\widetilde{X})$ is responsible to learn the cross-channel interaction.
|
| 56 |
+
|
| 57 |
+
Previous knowledge channel attention can be learned by Eq. [\[eq_sum\]](#eq_sum){reference-type="ref" reference="eq_sum"}, where for each channel the global information is aggregated using simple summation operation, where no learnable parameters are invoked. $$\begin{equation}
|
| 58 |
+
\label{eq_sum}
|
| 59 |
+
Y = f_1(\widetilde{X}) = \sum^{C_0}_{L=1}\sum^{R}_{K=1} \widetilde{X}_{LK}
|
| 60 |
+
\end{equation}$$ A possible compromise between Eq. [\[eq_fc\]](#eq_fc){reference-type="ref" reference="eq_fc"} and Eq. [\[eq_sum\]](#eq_sum){reference-type="ref" reference="eq_sum"} is Eq. [\[eq_ms\]](#eq_ms){reference-type="ref" reference="eq_ms"}, where a tiny number of parameters are used whereas $W^{'} \in \mathbb{R}^{1 \times 1 \times R}$ compared to the tremendous number of parameters that are invoked in Eq. [\[eq_fc\]](#eq_fc){reference-type="ref" reference="eq_fc"} while learning the previous knowledge channel interaction. From the perspective of the convolution neural network, Eq. [\[eq_ms\]](#eq_ms){reference-type="ref" reference="eq_ms"} could be readily interpreted to a 1-D convolution layer with kernel $k = \widetilde{W}$.
|
| 61 |
+
|
| 62 |
+
$$\begin{equation}
|
| 63 |
+
\label{eq_ms}
|
| 64 |
+
Y = f_1(\widetilde{X}) = \sum^{C_0}_{L=1} W^{'} \widetilde{X}_{L}
|
| 65 |
+
\end{equation}$$
|
| 66 |
+
|
| 67 |
+
Global cross-channel interaction could be learned by adopting one of the local channel attention modules [@hu2018squeeze] [@cao2019gcnet] [@wang2018non] [@lee2019srm] [@Wang_2020_CVPR] [@gao2019global] producing $Z_1 = f(Y)$ where $Z_1 \in \mathbb{R}^{1\times 1\times C_0}$. [@Wang_2020_CVPR] [@gao2019global] [@hu2018squeeze] [@cao2019gcnet] [@wang2018non] [@lee2019srm] refer to the term global as they are taking into consideration the whole spatial dimension from the fed features using GAP - Global Average Pooling. In contrast we refer to the term global as previous knowledge aggregation.
|
| 68 |
+
|
| 69 |
+
PKCAM contains two cross-channel attention modules as shown in Figure [1](#Fig-MSCA_archeticture){reference-type="ref" reference="Fig-MSCA_archeticture"}, one learns the cross-channel interaction from the global feature map, while the other one learns the cross-channel interaction from the local feature map. For both modules ECA-Net [@Wang_2020_CVPR] is adopted empirically based on Section [4.1.2](#Ablation studies-basic local attention module){reference-type="ref" reference="Ablation studies-basic local attention module"}. Finally, the current convolution output $x_0$ is recalibrated by the learned scales $S$, following Eq. [\[eq_recalibration\]](#eq_recalibration){reference-type="ref" reference="eq_recalibration"}, where $F \in \mathbb{R}^{1 \times 1 \times C_0}$ and $S \in \mathbb{R}^{1 \times 1 \times C_0}$ is obtained by fusing the global learned scales $Z_1$ with the local learned scales $Z_2$, following Eq. [\[eq_fusion\]](#eq_fusion){reference-type="ref" reference="eq_fusion"}.
|
| 70 |
+
|
| 71 |
+
$$\begin{equation}
|
| 72 |
+
\label{eq_recalibration}
|
| 73 |
+
F = \widetilde{x}_{\widetilde{W}} \odot
|
| 74 |
+
\end{equation}$$ $$\begin{equation}
|
| 75 |
+
\label{eq_fusion}
|
| 76 |
+
S = \phi(Z) :Z = Concat(Z_1,Z_2)
|
| 77 |
+
\end{equation}$$
|
| 78 |
+
|
| 79 |
+
where $\phi(Z)$ represents the mapping function from the global and local scales to the final scales $S$. The simplest mapping is summing both local and global scales as shown in Eq. [\[eq_sum_fusion\]](#eq_sum_fusion){reference-type="ref" reference="eq_sum_fusion"}, where no learnable parameters are included. We called this, a shallow fusion mechanism, as a fixed mapping is followed regardless of the feature map's structure and nature. $$\begin{equation}
|
| 80 |
+
\label{eq_sum_fusion}
|
| 81 |
+
\phi(Z) = \sum_{L=1}^{C_0} \sum_{K=1}^{2} Z_{LK}
|
| 82 |
+
\end{equation}$$ Other possible mapping function is considering the full cross-channel interaction between local and global scales, which could be regarded as a fully connected layer with tremendous number of parameters. But we waive this costly full cross-channel interaction to abide by our objective which aims to learn channel attention more efficiently in a lightweight manner besides avoiding the shallow summation fusion mechanism Eq. [\[eq_sum_fusion\]](#eq_sum_fusion){reference-type="ref" reference="eq_sum_fusion"}, therefore Eq. [\[eq_ms_fusion_M_form\]](#eq_ms_fusion_M_form){reference-type="ref" reference="eq_ms_fusion_M_form"} can be considered as a compromised mapping function, where we only concern about the channel interaction between each local channel and its counterpart in the global scales, which could be achieved easily using a 1-D convolution layer with kernel size equals two.
|
| 83 |
+
|
| 84 |
+
$$\begin{equation}
|
| 85 |
+
\label{eq_ms_fusion_M_form}
|
| 86 |
+
\phi(Z) =
|
| 87 |
+
\begin{bmatrix}
|
| 88 |
+
W_Z^1 & W_Z^2 \\
|
| 89 |
+
\end{bmatrix} \otimes
|
| 90 |
+
\begin{bmatrix}
|
| 91 |
+
Z_1 & \cdots & Z_1^{C_0}\\
|
| 92 |
+
Z_2 & \cdots & Z_2^{C_0}
|
| 93 |
+
\end{bmatrix}
|
| 94 |
+
\end{equation}$$
|
| 95 |
+
|
| 96 |
+
{#Fig-PKCAM_integration width="95%"}
|
| 97 |
+
|
| 98 |
+
::: {#Tab-all-vs-last}
|
| 99 |
+
Integration type All blocks Last block
|
| 100 |
+
------------------ ------------ ------------
|
| 101 |
+
Resnet-18 71.1 **71.15**
|
| 102 |
+
Resnet-34 74.25 **74.43**
|
| 103 |
+
Resnet-50 77.50 **77.56**
|
| 104 |
+
|
| 105 |
+
: Comparison of the various ways to integrate PKCAM into CNNs using the ImageNet dataset. Top-1 accuracy is reported.
|
| 106 |
+
:::
|
| 107 |
+
|
| 108 |
+
Figure [2](#Fig-PKCAM_integration){reference-type="ref" reference="Fig-PKCAM_integration"} illustrate a general way for integrating PKCAM into an arbitrary CNN architecture, where PKCAM was integrated into the last CNN block for each stage alongside arbitrary local channel attention (LCA) module for the rest of the blocks. Due to the lightweight topology of our PKCAM, it could be integrated to each block, where LCA is totally replaced.
|
| 109 |
+
|
| 110 |
+
{#Fig-PKCAM_Pseudo_code width="95%"}
|
| 111 |
+
|
| 112 |
+
Figure [3](#Fig-PKCAM_Pseudo_code){reference-type="ref" reference="Fig-PKCAM_Pseudo_code"} demonstrate a pseudo code for our PKCAM to show how easily it could be integrated to any CNN architecture.
|
| 113 |
+
|
| 114 |
+
::: table*
|
| 115 |
+
:::
|
2211.07710/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-10-24T08:11:07.847Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36" version="20.4.2" etag="yhN1EByl9iNr-Mf9CUSe" type="google"><diagram id="-b3SLyvowz2A3mZ0mzGM">7Vxdc7I4FP41XvYdIIJ4aW273Zl25521M7u9TCEqWyRujFX3128iCQgJFdsg7Su9KTmQg5w8z/kiQw+MF9vfCFzOH3GI4p5jhdseuOk5jm37HvvHJbtUMgDDVDAjUSguygWT6D8khJaQrqMQrQoXUoxjGi2LwgAnCQpoQQYJwZviZVMcF++6hDOkCCYBjFXpX1FI56nUd61cfo+i2Vze2bbEmQWUFwvBag5DvDkQgdseGBOMaXq02I5RzI0n7ZLOu6s4m/0wghJaZ4KTTniD8Vo82+8J5XMd6ydBYRTQCCdscI9gKH4x3UkzELxOQsQ12T1wvZlHFE2WMOBnN2zhmWxOF7E4PYvhipvdYschXM2zeZAEYoVthw9fVjheUzTKxUz6hgiN2AKM4miWMNkLphQv2IkpTqi4zuGqxfOwy9G20iZ2ZmkGUYQXiJIdu0RMcPpicXaldd3kaw2kbH6wzkMhgwJes0x1vgLsQCyCfkGAsiBtmr0Ba7r9mtYElgFz9o+bE4WM22KY4IT9u2asXPKh8B+YMFnR6pjQOZ7hBMYPGC+F8B9E6U5YD64pLq5EDF9QfA2D19le1RjHXO9NiKZwHVN+V0rwK1LlHOJ3cBHF3IL3KH5DnApF7NuuGKvTURKOuM/jrIlx8Po0j5JUfBfF8setKCRSl5eePRiVsJKakNutgIkVXpNAiFzhlSGZIcmO2sghKIY0eitq/wwKXAUFzLldLQkO0GoVJTMFE0dIVcTClJlRmr3ngLu78Xg4zNZTBghbWeEehzj/Oycb3RIbbZWNWbQ6ZKNjgIxeDTLmYA2454qC9yzfLKXsSkq9zxad92DLQ3Z/cz/8w5XDZ+GW94ObbWG0k6NtRA+msdGzeHZ+nE/iAznndLYOVLa6tZF1gBydG5ey2qQWd/iJo30asi1FAwHcvl8CZPo8YtZhxlNWZJcUlZGdWkFRtAd39ti18D7QJFfLNV0pqGd8pkWcF4ErwtKhoxEiKPKhgK0SIppEaRGFIb+NNk/I2WSluE1zUUvrvGpxBlRyBrNbR5TP9SpBatS19QdndG1+l2e0kmcMVc9lO+0lGkMFBn9gsjCaXkynUycIvkt6oSmdGuOgdOwdCc9MQsm4Agv77bHQtltbd5mvyePnQu6mz9fy3LCQGeaJYkVueCLGDv1B+ygr9CJKLi50kR/2jUFR+pZWoKg22NyrGO5YruZY19HD5OnxClLecUv7bNfcnGbjhR+g7xMvhjXjhWsiXoDW3cQpZd1HS8jLCkV9TSjyW+S/2oDs+F/Nf887J//VtuC7DiHn+W0uLdXQH/AFHwC5p4IctFj12Gpjz+lAXgnygX9OkA++VZC7rGDla4KV1yKP1S7WU7RATDLGydsPo4xFNsvyB1rGLhFhd6WIcNX87cxhR/JLERnUJDIwQeRh60T+gkXtiQ6guj1tuqQ93RvIMrVQuraYujpqO+0hShDkQX3A0lp2mj2WNcFTuoBbs84BTCECJ4bzVnY3AP94ATtsKLQ7XZvrktpcJn1AxavRcjVm93+wiiz7Kyms/cbVP0mtufevLfb/O4JcBEH8ZgjyvlqDBGm/A9oR5AhBvhsnfKsRThxRa5AT/Y4Tv3adpdmb2ubmVM3u1KzOAhznXaGli5GaNwWNFVpe5xK+tkv4gBfQ7HkFDQTN2hhTN4tmXsC1vqIbaIv5utcnjTFft6/Ty0BfWAHv3zWWJ65WexOM2AU2WG73hpDn2dGM/x/PIYEBRWQlVbJfk2pNL1CW+PPbhGM05Roud5OwW9pG7lkqmPyGOvia3aHGwDRZv2wwCfcpg9XhqT08aTad65yTCTwBtUHfWp5yUblDE3mCvgouw8u33KIKc3WvfKhmvZNrde6pRfc0qPl6yoh7chRAPd6PjOat32rbj3/OvW2g6+x2nV3Tnd1m3nYcUWswwnWd3V8vFdM0c8HAPENq+913mrm8mdA1caqDYVNNHFDnuwNdndQAORsgYs06yW+uTlIbtcbqpPTbS10/sL0CyT9n/+bzzWXHq0DSaPInOy2+vHYuNH2Rj1A4Xnt4KufMuq9dZV/E+mSgY8P8Q3GpJ8s/twdu/wc=</diagram></mxfile>
|
2211.07710/main_diagram/main_diagram.pdf
ADDED
|
Binary file (27.1 kB). View file
|
|
|