Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2002.08247/main_diagram/main_diagram.drawio +1 -0
- 2002.08247/main_diagram/main_diagram.pdf +0 -0
- 2002.08247/paper_text/intro_method.md +301 -0
- 2103.13841/main_diagram/main_diagram.drawio +0 -0
- 2103.13841/paper_text/intro_method.md +90 -0
- 2105.12848/main_diagram/main_diagram.drawio +1 -0
- 2105.12848/main_diagram/main_diagram.pdf +0 -0
- 2105.12848/paper_text/intro_method.md +155 -0
- 2106.03598/main_diagram/main_diagram.drawio +1 -0
- 2106.03598/main_diagram/main_diagram.pdf +0 -0
- 2106.03598/paper_text/intro_method.md +3 -0
- 2106.12871/main_diagram/main_diagram.drawio +1 -0
- 2106.12871/main_diagram/main_diagram.pdf +0 -0
- 2106.12871/paper_text/intro_method.md +47 -0
- 2107.00085/main_diagram/main_diagram.drawio +0 -0
- 2107.00085/paper_text/intro_method.md +85 -0
- 2108.12126/main_diagram/main_diagram.drawio +0 -0
- 2108.12126/paper_text/intro_method.md +151 -0
- 2110.06021/main_diagram/main_diagram.drawio +0 -0
- 2110.06021/paper_text/intro_method.md +64 -0
- 2110.09035/main_diagram/main_diagram.drawio +1 -0
- 2110.09035/main_diagram/main_diagram.pdf +0 -0
- 2110.09035/paper_text/intro_method.md +150 -0
- 2110.10031/main_diagram/main_diagram.drawio +1 -0
- 2110.10031/main_diagram/main_diagram.pdf +0 -0
- 2110.10031/paper_text/intro_method.md +90 -0
- 2110.12257/main_diagram/main_diagram.drawio +0 -0
- 2110.12257/paper_text/intro_method.md +86 -0
- 2205.01826/main_diagram/main_diagram.drawio +1 -0
- 2205.01826/main_diagram/main_diagram.pdf +0 -0
- 2205.01826/paper_text/intro_method.md +81 -0
- 2207.10080/main_diagram/main_diagram.drawio +1 -0
- 2207.10080/paper_text/intro_method.md +54 -0
- 2207.12646/main_diagram/main_diagram.drawio +0 -0
- 2207.12646/paper_text/intro_method.md +91 -0
- 2210.02159/main_diagram/main_diagram.drawio +1 -0
- 2210.02159/main_diagram/main_diagram.pdf +0 -0
- 2210.02159/paper_text/intro_method.md +155 -0
- 2210.16530/main_diagram/main_diagram.drawio +1 -0
- 2210.16530/main_diagram/main_diagram.pdf +0 -0
- 2210.16530/paper_text/intro_method.md +109 -0
- 2211.06401/main_diagram/main_diagram.drawio +1 -0
- 2211.06401/main_diagram/main_diagram.pdf +0 -0
- 2211.06401/paper_text/intro_method.md +82 -0
- 2212.01007/main_diagram/main_diagram.drawio +1 -0
- 2212.01007/main_diagram/main_diagram.pdf +0 -0
- 2212.01007/paper_text/intro_method.md +145 -0
- 2212.09034/main_diagram/main_diagram.drawio +0 -0
- 2212.09034/paper_text/intro_method.md +413 -0
- 2302.02061/main_diagram/main_diagram.drawio +206 -0
2002.08247/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2020-06-04T20:55:16.602Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/13.0.3 Chrome/80.0.3987.163 Electron/8.2.1 Safari/537.36" etag="lkNybjPIRkIQEnsexdRT" version="13.0.3" type="device"><diagram id="K2Svo0xp8vtg11QXq_fE" name="Page-1">7V1bc+pGtv41VE0e3NWr7/24bW/POXUymZxJKjOZl5QA2VaCgQM4255ff1brhi4tLDASwsa7tg0S6PZ969qrV4/4zdPLX1fB8vFvi2k4GzE6fRnx2xFjwITCP27La7KFgU02PKyiafqh7Yafov+E6Uaabn2OpuG69MHNYjHbRMvyxsliPg8nm9K2YLVafCt/7H4xK591GTyEtQ0/TYJZfes/o+nmMdlqJN1u/68wenjMzgw03TMOJn88rBbP8/R8I8bv459k91OQHSs9w/oxmC6+JZviQ/CvI36zWiw2yaunl5tw5p5t9tiS89w17M2vexXON22+8HT/w9P8Xytj6O/w9R//mjw//R9ccZ4c5s9g9hxm9xFf7eY1e0J44Uv38n4WvnxxT3zEr8P5NH15O5kF63U0wY2Pm6cZbgB8ud6sFn+EN4vZYhUfhBs25krhnvtoNitsv6G3cI2XfF2/nfQOw2kJwPTm/hounsLN6hU/8G0LW4baYwGxbNsqnAWb6M8y7EHKnof8cPkZflxEeCWMpky/UjR9NCnRORVEmvJh1ovn1SRMv1nEonowyaoHk+UjbYLVQ7ipHQlfFO59uynGeg/clfDgrmYbB9AivtItAdT/PS+yHVfrWIC/4AdALF+2O/HVg/v79/EmiOa4e7ZAGRs5TiaHHa+yj4Qvy1kwRywW8zVuC56WuHE+Xi+Tw8ySw90l15F8pcbJTfiyKTMumEUPc0dH5E+I5Lr+M1xtIryGL+mOp2g6dV+/XoV4E8E4PhTF90v3kOPHLq9H8tYd63mzSG40PnQjNd0pwped5MzZA7wMOEvVZIG9uWop0lfQZqaWWLEvBYC1kH2n4Jbt7z9X0+njLak673ORnJaeC2T2pPBchE+suRQdPRhVfy53YbB5Rt6clodOIlIDCsLLyzeAbk1XUIYYRUFRISz+gjJ3uSUILNU6/V1DTHsAY0cg8rf/+TW8uv1l879Lsfol+GXy319e/4P3BzXAdhG5NWvbP9v6g8zorAkXLQnNGFGqI05b8/Yjin2ZcJqS7dtjtAl/WgYTt/cb+n5lmlfM+DQIzf3EZ/jVxITje9zzsAqmET7NbN98MXdSsMBTRBv3uDj1cvoNzFtzGi0WYVZrpQXVUhpeVsigNJHUcooaVxsmQddgM5IIYRjVwlAmZaapStoaCLdUMWmpBim47QhPecGTo7iUELSK8Lom0kTUYQLKie4IGqHf71opn2s1UnT5l5ff4Dt8MUg/qWifjKMCHvp+FgcJjl6OTY4c84f0gOm7nxfL8obrxWazeEq3dUggjfpAIG+k4hqMYKKsEDQQQ42QYDWlQsu6HveZOd6Vv2Y9Lrui499YTIeh4K68uHcHIjBLKALIpWIabKYXMwwtI1pziyrf4h8QdQXhM8XHcFX87kLm4XuUA5J/fnjclSoH/h1+dfwbL6uI5NBDUxG9UwXJAEYrpahFrlSE3RJjtRVKakOVaRmcdUYU47HviqLy/9TCLjTRilGuJMYdRtOysFvU51pwakAgkplDdCpR574MSwW4vbNoKv454BkPJInGKZGAIT4GlWDwhZUlBH0hUNuU2luHRjNhjLBMccMpsIr313G6jcsLGXolg5XECAWA9ywrasJ33K7hV3vBn0ZS02D9mIdvu4hAqfr65a4exME4gJDl38iGN/wpzbOgDADiypSLrY1EUwBlaBFspBRHIy4NhvgKDmMPcpEYQF8Ao3ygTJcTBQojSiGV4FwLoQF6ppIvurxQaX8qoQdBwIUGXFmllbbl6A+jQsK1REeCA/qDgorDuPTGaZRBwhoMUQQDjrcm+iVTiyzShUxtyGQUagyXJJB4TMG7odKuk5yYSPZCpOP4RFoSDEG1cFACwiorVBKEU7b1Yw90j3af5bQWTnhSJIrOt5nPYUe/y3AV4SNw571duTGl+cMs/HG7sduMuLSCSocZR1cJKraGEkNrzlEPo3Z+mFuMPtdVRgHlkvaoqYyvX9R1HByVVYNTFvjcV6//cogTC/mGX92RCIXs/e1Lyonk3WvxXQHOg3LUA1E2ghKhtOCKaZcAE6ysa9Bz4RzjM+BaA7LmMF2DBosYbVCTKNQ1gvL6WRTGaJxhdC6ZhkpFTcfKRh4UmV1YeGQWSutGWqi0HFlQ4gcwQUBaKwTG8+j92AOdp4SGgMYsTu9aUzuLEFyC4lKAZtL2ykLFaiz8JX45GFvnLzc5luEqj+Ni9I2agguwFH1ZVBn1iqBeM7m6Wx1B4x+vE3vREa10hEUTgqLNtFIUtMnKnY6rIvAkVgplLSgtuTX9agjTPG74vnrNtLLN7R/5ijUHX2jQrWbiigBSQaBPLTVCApV4TFuCJoNLjR6MFqw+vGx6HTX0+dTHZQm7UKQ6rAwWtbMUqBSUBEr1sClSH5bcgss/C2Yu6jAYbKAel1xRWSntHxpmvtHDA8Q6FtVGyRafXI6ZscRIsNJKZTVUap4HRgnr8wcqOH3w+k8AQypDuxgekiy/UMxroRFndXAMyzcfHx9PSXqSxGSXJOabsFqmucSwUAkrqS1BrBlB31tIQ6VSmmeq8VRZTFuP3NNKvLPAuWOviLoaDIZxvK2qU5CGCK4x1NK4W3nqtntF8ZKLbga0p8oOxogwlmpLMdxH6S57ZGCQL9JNnOEWbXQ+F3PvGN8opCXlxplwISrhnDuLRn/QUZZJmc9s6inIt/tViV1o2BUNQUgAqRi3tpqaVERJSq0x2lmhA8vTHAsNGjl0NaXGWKRS+ebOYjlXGnmqONWs3zERe8l3Dp2GlBNumJXo/8SlIIdmPN/gIZ5GKSUZhj1WgbT9Fs1aT6mbq4H/5BNeOBBwlosZdH91zVDGJZAIqaCGg5IeB7nnGS8tJudejFq31WlACRoVFGNKGf6BMmUY1QRdcYqEkhT1jTmwqQXTgijmKksE5Wi5oHYWJYXCS5Bagsl425MyAVpPfF6sWt8lt5YwKtGx4Wi0VHkQDxRDvWbQJoF0yXRzoG/FNBotUEqjRVOCCVs/jeCA3ptSzJ2q3+I2oEfK5fomAL78xh1zk2mAZzBH+CQTABU3AjSXaD9Fxd1hlqBLhSTkSHnmm/Db7yRAoBdP/NQ6i7u0Pwdj0JNyE0JE2RN3k8jRorlZTML549WE8j61B1S4ZivWoNYCZmqnYUZq0FZSx9CeldZBk04uPDxqROgq1CzlFjmg0H5WRplBEak0RfZgGMcZP7Ra08SNf7jQLsNuef0sRlI8D1AMP0XPTcsAWox6XXjYuQ+HPESOSABD0eevMIQTYYTgloPUGAkcqA+dF4cRBChjXV0Xr8zBc6fRysQ0RJ3Lac9eHLQIaj/48CtGi0Rb9K852i0M93TZl9KoR6T7EKWcaqo94+WaIFE0Ry1ilNXZAFGpm5JLT0ltuJv05IKDrtpjQWa3u+rLIZK+HML5xJfWHB4yARFotFz/Da5YdSo4WCCoAiQYpayQKPQ1NvXsmcNlxPDUloi7mbWSgjWComOcl+RkJoIhpaRA02CkRHNUHctrPVdOCfSs8ERobgxwWT+L5tqgjlIYMUreb4oc4JLVOjUPmbUEBKAbonkyYaFiCRVBHwUjO6aoslQf2tJCcSIlUyye26er9RR4FulIqp1bJJngPfOwOa31bvuJ1nOUpLXEdxfj6TOelEgN6GopDlrmvW3y0WSKxtMNFFmlrWXKnNx4XtJaQ1Ba3MQxvpvaCWXKYPjlsgmxVaXWmgOHl53K0lajkUp4aVTtLMZtNpYZ9P1s3zprv6zWeLaY/PHzYzQf7W5W8EVSKvz0qxYz49HvYu2QkBOp8VN66nA2Xnz7ut2wZS4UWYuC/wZpq4GloS4WcxcXrDbZrS3wEWTbihf0Em0SYVEyfZuKSvZ2e1L35rXw5r1yknBsJ3ypHktosvOTw5I9dFcJWnGXzAWULyhbcslQ9ATKHnXGvjYfep8OWAqdXjwCBuAoiRUJB0ZQUWKYjmcxTFdH2xpkDwkTvBY+ltrFHbda6e6e2qamqxZUEgRWGVDUNTCoXDVwTigGh0xI1/rMpH0rtoohub4jqwlfk5w9R+y4z7W5DSfROlrMY0Y9JC/qs6vuUXQZnQXjeN2V18EP6nU8H48LYoyrY9CGMSVkvbwlzxL3soqCOGA8dxbNw6vs6mJ+ODGRDn737GjbbtDjtB3owBmxsxV0qddzU0PoA23IXo1FwQCjUgDCa2h12MN1KpSucwoD3G3b1VQdoxO0/wazORFtVu1xlv+t7HM5VT1ZPLnek7dbH6bkXEtNPY4FpTc3yfo+VeTzhLRxX5tGbj6JU3Z4IeE6k5PCkdJFlBoz3A3YtidGc30AdbUhHNBlxVBd5o0HsqknQCRHVxWjeQyy896DJdVDnFXSWgiJhpxpXWcFcsl1N0AfURnJTdXwHo8kbQbLOiLJjebSR5L7QJlk+9FIcjI2uOUkuJQGTYuREkPr+noRw6JDiyGrjugwNtKteFKng5mEk8lBdDgZ7OhWA0NUqRZacpoVsQ0W9Raplo5Qv6bWqwRCPvkwSgAUEBNnN7SigvtSa4NiQ4uMx8Uk7GCDMcT1h3HTciSvlg+6NjQYmTKuQKOX5FlCYFBcaNH79fx9yBNxwXCiwXJquLDCjQ8OnAuniyfOyzfYKf9UEG2EkfgBy0CqetuyQYHOThcffAzXYCcZ3OJRwCQiaFHZ2YG7iW3Wp3hPPVuu2Rti+ireTWg2m5IKQzqEXTDi0usIPBOK28oqgpIIbVwbGQwNlGa6niriQCwXaCKocpXlLFuyqjT2ygiqELfEDXXZBiaOsFid/259jUqOiXyuxw9DvjKW6zMcJ0OelUVeYGSojHFrQArU/pzVZX5g0LM69HHCeSRv/vIwkl9H+vov0Xcjffsbvlo+4wvc812NHidYi7kbeBEdRbc/lToeTjgv7GWefE9XZRTe+2HUg98xRbfRGjfb84os9m+UUcYISGmFUcwqwyvhuiSUAmeWKSqNVLJelTwoEc078nWmnZui79YGOI/re1PDMcRlXIXIO4FVoWxArzPAPPWYzTp1/vF1qiAi7pYiFMbDUpkqbsx1l9fg7CrP52ifUKd6HOEd+M0+OH7Mra5KhWsLwJjU3FS83YpNhLo+tf3C50l373BpPjl8LcSvK/j8pTasRTV7lpK4n4Uvaf3a9d5LJxo25kmmsWwPb+gtXN+NfKUGw6ohc7l8Nw+v8FOCloMgUtf27ltFdsXfOEtnE0D9fPfQIyl+WY58hTZPeDnR/Grj6kmcFqBLd28xvDTdN07rTxp2z8L7tEaHRvN8ZyH/5HbNNqt81/M8miym4dU4mkbJ3vBpjN5Ztv/bYjW9Gq/C4I9k73yxegpmo6bSH39NfKvK+vvgKZq9puqPqeBpGX+Vc1ehhYorGq+i2vbmk72zO0X7ruEN1f355mV126fE/n1w/BS5hq64+zbYBE7RJRJ7465F0X8nBjO+6X+g0ZzGVWZu52sUzqZrJ/D4/3qxmIUxRAmH5uO1+3OzmP/+PE8e0ZsYfzwwmwDz3WxXMtV6yg1rOG4JUM97r4iii4Uuzr9/g6LCSY9IM+cLPM6X//Dj7MBffrgtnHLceJl4/srJZ40nTwKv9PyO8/gqmCdErx82vi3W6rbm3dwWa3Vb89JtNbHjrPjCTskX1hlf2Cn5wvbjS4P29hDLzZBx5Ah3KPFMQ79UfovkT3KlY1SuLspKFOs8zHcsg+k0LgjeHuQA4+k3Mi7ou0rjvNgaOJNRMSPnbIHeZ21+WOAuDH2fnPn/9hgmF3yx77tu9nlW3TKL9qBeN0D++KND0BXkzwKHxrcI41UUuWDtxCyOrdZvA+tupLaxfr9OUxW0wpnqi2Hi+IMXx8UqwqcWhxXZ7Kw+sJxGfx7hiXiNT3zo1pNaWlcY3ZmvcOtyPdtJJnJUmYNyFdPFN6ZSmNDyGE2nTh1WKmBqGcNDRkh25/eusoRZ3r+e1jJ4wo1ae4dBRHPS6V1Z2D2KTPtI4lUydq2eb0893TQnvJxRE9UWR60bt7mGWaVDSVDE9puf26Ok9HMjX57WqviBi9peAeW7j9Qx4Nnpdk6hn82i5Tps0KPFIZb1Ep01x43oxWnR/UqK1dcvd3VGwDiAkI3qU1TfVZrwhlbmVBNpDLgRT1dtUOmuoJQiDG2SMZILDlkZR3HQ0xJlBDPSaGaUyJu4l2oDGz5zdHXepjaw2LogLezaghdXB6w2j4uHBfol3y9iy+oQ/T3cbF5TUNyYWJkO9Q4g23YJ1ZIFsYfMl3sL+McRU6VcbC2wg/91JrRWD+9Dpk0flc8nfa6xIKBcGKNBU15Zj/PMpC+buH5yjO/uKL3zYHx3Z22yvU+MhSGuibcbs5ZUlquBlFXEdR8FAW6JT18b70Eh3GaBjI+lXzPF9aZ+Tdh/Mv3awp/9fLInqt7reYlbFnBdxK2J8CcTtzb9uD4WMu0dTXVKZLIu3hdFWFSEriuxpUZzIeNSK3rejuZQgolhYSwUAaupMUyAckCes+0TZ6Jhm1t6HlH3ZkrtbSf0tLp3KCm2QcmlWxBHMcoN6l/B3TICZy2XZ5JiG5hcnjT5JtqsADAAzI6ITNbQ7m1k2CmRkQNFpll6OsQsmZLzDiiaJgZkZMhGG7ORsb6mArQBuR+zeC21r7zg7tbWmtlswX1n/pNbYt2ic5YDV1Kach2AVCpeydCtYg9CSE8roiGZvzbtZz4ulm7ZXNdVQAC4Jd8Z8HPGMu+9+imxTEZ+rTVu3NcAnLVYsk+tYpmgxGDYbzGIoNKy81axbWZWfr6hYmbc6q5guEEjqnTesiuLIinHINOt6wOonrWqz24eFsayBun7fdp0CYjs9a+F1w2LP+wZnxSWZMnXsvg1P1bzwhZH95FbxDXspBFnNu7WdRiTr/oh9wD9MBBPGMxmTufQQVcX0I8HevbBoYNuLqAfEfQzUe9DiYIH5aK5Si/FNWjt8vy0WszHJaES4mZezrdiw/bQONQR7VKu95dqOBeZFm1l2p5SpvlQqjcHJdN4ORhXKWGNNdoKni3nfKZC3dPowgGRWGaqP5pQ85OWhco2uZQPMb50/GEjZnXcF5uiTdeMVdb3a0iq7T0titnKVLjuBqca1gxs4cs9ICGWjcLi0bCTxXyOBiBtLTdK+7A1q1nDykN0zNPfjXnbK0LlcR2vw1uX5rApO9ycTy4b0LdXMXwD7LeXm8t5TiQ3QkH6u46LoIQa6ladkdaiPHgm8DZ85OiYsU+O2TBR6XRs5gxQyWa8AydUK4vGLvldbwM8INA6HYQ5I9AU+uZQ0IBDxqxNad6xMMujseb4bTdmu6O2o2KZ7ZWWoK8G2i16LkweZmWWTpCtbOLv+hDagKBuUx19PPFMq2ab62zPDGoAIKIo1vUFEgaEdVaacYJU3DbdvlcybptvPzBy9yiVdFWefTlTjOS9n/SMre3yLd9sqnz0wN5/Nf0Ou8AhnKAfnhNyWJywF06cnhN6UJzodKCuukKLdP98qKj4p+4JJD+degIm7grPLdNuiTuqyvl7Q0kxIvMsjD0gPyAbHL7Idxfy7Sm32CVRA5Fv6Lfm5pNxwlONcQacyAexLpzogBOegb8z4EQ2un/hRCe2wzPb8AwChp5rfC6kOIeIATqtAzr/kAG4Jrw4jsOGHDNAfaLFRcR7jxkymRqIiPsk+kKKY5GCnSkpLh7iACLJgZECLsMMAwglB0aKnud5fTJSQFunYlixZM8zPi+kOItYss8i2jOMJZlrp1KMJQcdSl4kfAihJAxLwutjkhcGdBw3DosBrN/qtE9GitZx48BIMeS0425wDyXNEOPGo5Mi/WplEttuJwf9k+IEomxRg56aL2YT8wou6G04idbxku90swrD+E8QzeMFVeOt9SUe8/Xj6X0YbJ5X8bKjlTWQK3yvL/dYW2XRuZHRJJh9SXc8RdPpLBaMcB39J50857iVro6Jx5XXbg1kPBbKwjotsR5101VMGEOspNufSrm85cQzHQKUZ5aeOMIkPS+6yjdYteeKpdK3YunPjhHuit0hoqflzNHkaTENZ41rlH40/IGVW4xJI4jydKf2rT3IOgO8HlH+fbxJsLpHzYunjA9SF+FpsHFg5gvNfmzprbSHExlIBeBsr4LKGgV1vYwV69uCan2Cej0LJn9cXS/ccsl/S+XTuzg9XnZypo8qr7rSWB486QSvdu5OWH0zP48B+veLSbxq9NeX5SyYB5vEmN9G68kq3CASyYZPygO3irKuzneqU8Er/91RwTef9Cjyn3tmN7PgOV4XPrUD+P+naP4QW+7UvfwcDABLtCn4bW+rBe5trdAZF3yTEo7BhVs08evQeX2L+7r40zZO/OdgiJCE0kaGSCbz1biLCqNX26GbPfv3kQQ9+/k6fVTn6jKYI5kKroGISlhXWIq96DgYD/i8M/DbjBtd2hb5GwopQTjtvG0RnkXY8qF7XlZD+0KKC0nakYRTpEGBJK0WctifJRwIiIKhgdMyxtc1Yc9skfDZlL+G83AVbMJ61qFqWsa5D7J6nu10QIaTXDqGpQFeXpMgdyaKNgb6zEiYfusds3GIw9sW5y0WjjVqpSYmHN97XItKYcU0CM39ZA99dczVZE/bFBWYz8js6YYqn8r4IfzmUhZ5xPItQjEoxig3i/nvz/OJS2Wsk1srxi03o3K+Mx7BwGgXX5YSnaStL7t+DJbu5eR1FiHjnQZ5ox5nnIjG9+N8QzD54yEWmL8/b/Aomf7Yo/frVI2VVHUK3t/fs8nEQ9XkCP9MlQhLYJm4B1F693MsrW5ALN/0fXi/KX/oH6nWYcfRedJU0zBSW2JsPbDijPi6wh7gXuPb1cLxbGs2EbfHJMrhX/8f</diagram></mxfile>
|
2002.08247/main_diagram/main_diagram.pdf
ADDED
|
Binary file (70.7 kB). View file
|
|
|
2002.08247/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
All the architectures were built using Keras. The loss is categorical cross entropy, adam optimizer and learning rate of 0.001 was used
|
| 4 |
+
|
| 5 |
+
``` {.python language="Python" style="mystyle"}
|
| 6 |
+
model.add(Dense(128, activation='relu', input_dim=input_shape))
|
| 7 |
+
model.add(Dropout(0.5))
|
| 8 |
+
model.add(Dense(256, activation='relu'))
|
| 9 |
+
model.add(Dense(256, activation='relu'))
|
| 10 |
+
model.add(Dropout(0.5))
|
| 11 |
+
model.add(Dense(128, activation='relu'))
|
| 12 |
+
model.add(Dropout(0.5))
|
| 13 |
+
model.add(Dense(64, activation='relu'))
|
| 14 |
+
model.add(Dropout(0.5))
|
| 15 |
+
model.add(Dense(output_shape, activation='softmax'))
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
``` {.python language="Python" style="mystyle"}
|
| 19 |
+
model = Sequential()
|
| 20 |
+
model.add(Dense(25,
|
| 21 |
+
kernel_initializer=keras.initializers.glorot_normal(seed=0), kernel_regularizer=keras.regularizers.l2(1e-4)))
|
| 22 |
+
model.add(Activation('relu'))
|
| 23 |
+
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_normal(seed=0))), kernel_regularizer=keras.regularizers.l2(1e-4)))
|
| 24 |
+
model.add(BatchNormalization())
|
| 25 |
+
model.add(Activation('relu'))
|
| 26 |
+
model.add(Dropout(0.3))
|
| 27 |
+
model.add(Dense(num_classes, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='softmax'))
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
``` {.python language="Python" style="mystyle"}
|
| 31 |
+
model = Sequential()
|
| 32 |
+
model.add(Dense(20, kernel_initializer=keras.initializers.glorot_normal(seed=5), activation='relu'))
|
| 33 |
+
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_normal(seed=5), activation='relu'))
|
| 34 |
+
model.add(Dense(num_classes, kernel_initializer=keras.initializers.glorot_normal(seed=5)))#, activation='softmax'))
|
| 35 |
+
model.add(Activation('softmax'))
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
``` {.python language="Python" style="mystyle"}
|
| 39 |
+
model = Sequential()
|
| 40 |
+
model.add(Dense(35, kernel_initializer=keras.initializers.glorot_normal(seed=5)))
|
| 41 |
+
model.add(Activation('relu'))
|
| 42 |
+
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_normal(seed=5)))
|
| 43 |
+
model.add(BatchNormalization())
|
| 44 |
+
model.add(Activation('relu'))
|
| 45 |
+
model.add(Dense(num_classes, kernel_initializer=keras.initializers.glorot_normal(seed=5)))#, activation='softmax'))
|
| 46 |
+
model.add(Dropout(0.5))
|
| 47 |
+
model.add(Activation('softmax'))
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
``` {.python language="Python" style="mystyle"}
|
| 51 |
+
model = Sequential()
|
| 52 |
+
model.add(Dense(15, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='relu'))
|
| 53 |
+
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='relu'))
|
| 54 |
+
model.add(Dropout(0.2))
|
| 55 |
+
model.add(Dense(num_classes, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='softmax'))
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
``` {.python language="Python" style="mystyle"}
|
| 59 |
+
model = Sequential()
|
| 60 |
+
model.add(Dense(40, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='relu'))
|
| 61 |
+
model.add(Dense(25, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='relu'))
|
| 62 |
+
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='relu'))
|
| 63 |
+
model.add(Dropout(0.2))
|
| 64 |
+
model.add(Dense(num_classes, kernel_initializer=keras.initializers.glorot_normal(seed=0), activation='softmax'))
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
::: small
|
| 68 |
+
``` {caption="We present below the top 5 boolean rule based features used by Logistic regression (with L1 penalty) on the Sky Survey Dataset along with their feature importances."}
|
| 69 |
+
|
| 70 |
+
GBFL rank 1 feature
|
| 71 |
+
|
| 72 |
+
1.51 >= dirf1 >= 0.0 & 0.66 >= dirf2 >= 0.0 &
|
| 73 |
+
0.26 >= dirf3 >= 0.0 & 629.05 >= fiberid >= 419.36 &
|
| 74 |
+
0.80 >= redshift >= 0.0 & 233.35 >= ra >= 137.26 &
|
| 75 |
+
57481 >= mjd >= 42354.42 & 14.42 >= dec >= 0.0 &
|
| 76 |
+
1770.52 >= plate >= 0.0
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
GBFL rank 2 feature
|
| 80 |
+
|
| 81 |
+
2.53 >= dirf1 >= 0.50 &
|
| 82 |
+
0.49 >= dirf2 >= 0.0 &
|
| 83 |
+
0.35 >= dirf3 >= 0.0 &
|
| 84 |
+
2213.15 >= plate >= 442.63 &
|
| 85 |
+
14.42 >= dec >= 0.0 &
|
| 86 |
+
0.80 redshift >= 0.0 &
|
| 87 |
+
262.10 >= fiberid >= 52.42 &
|
| 88 |
+
164.72 >= ra >= 109.81 &
|
| 89 |
+
57481 >= mjd >= 42354.42
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
GBFL rank 3 feature
|
| 93 |
+
|
| 94 |
+
1.51 >= dirf1 >= 0.0 &
|
| 95 |
+
0.49 >= dirf2 >= 0.0 &
|
| 96 |
+
0.35 >= dirf3 >= 0.0 &
|
| 97 |
+
260.81 >= ra >= 233.35 &
|
| 98 |
+
0.80 >= redshift >= 0.0 &
|
| 99 |
+
524.21 >= fiberid >= 157.26 &
|
| 100 |
+
14.42 >= dec >= 0.0 &
|
| 101 |
+
1770.52 >= plate >= 0.0 &
|
| 102 |
+
57481 >= mjd >= 42354.42
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
GBFL rank 4 feature
|
| 106 |
+
|
| 107 |
+
2.53 >= dirf1 >= 0.50 &
|
| 108 |
+
0.49 >= dirf2 >= 0.0 &
|
| 109 |
+
0.44 >= dirf3 >= 0.089 &
|
| 110 |
+
576.63 >= fiberid >= 366.94 &
|
| 111 |
+
260.81 >= ra >= 233.35 &
|
| 112 |
+
14.42 >= dec >= 0.0 &
|
| 113 |
+
0.80 >= redshift >= 0.0 &
|
| 114 |
+
1770.52 >= plate >= 0.0 &
|
| 115 |
+
57481.0 >= mjd >= 42354.42
|
| 116 |
+
|
| 117 |
+
GBFL rank 5 feature
|
| 118 |
+
|
| 119 |
+
2.53 >= dirf1 >= 0.50 &
|
| 120 |
+
0.49 >= dirf2 >= 0.0 &
|
| 121 |
+
0.35 >= dirf3 >= 0.0 &
|
| 122 |
+
10.82 >= dec >= 0.0 &
|
| 123 |
+
52.42 >= fiberid >= 0.0 &
|
| 124 |
+
178.44 >= ra >= 123.54 &
|
| 125 |
+
0.80 >= redshift >= 0.0 &
|
| 126 |
+
1770.52 >= plate >= 0.0 &
|
| 127 |
+
57481.0 >= mjd >= 42354.42
|
| 128 |
+
```
|
| 129 |
+
:::
|
| 130 |
+
|
| 131 |
+
::: small
|
| 132 |
+
``` {caption="We present below the top 5 boolean rule based features used by the decision tree on the WDBC dataset ranked by their importance."}
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
GBFL rank 1 feature
|
| 136 |
+
|
| 137 |
+
3575.86>=n2_area>=863.33 &
|
| 138 |
+
36.04>=n2_radius>=17.29 &
|
| 139 |
+
n1_fractald<=0.01 & n0_concavity<=0.28 &
|
| 140 |
+
n1_area<=363.87 & n1_compactness<=0.09 &
|
| 141 |
+
n1_concavepts<=0.03 & n0_symmetry>=0.13 &
|
| 142 |
+
n2_concavity<=0.83 & n2_fractald<=0.15 &
|
| 143 |
+
n0_area<=2108.08 & n0_smoothness<=0.14 &
|
| 144 |
+
n0_fractald>=0.04 & n0_concavepts<=0.16 &
|
| 145 |
+
n2_texture<=43.28 & n2_smoothness<=0.19 &
|
| 146 |
+
n0_perimeter<=188.5 & n2_symmetry>=0.15 &
|
| 147 |
+
n2_concavepts<=0.27 & n0_texture>=9.71 &
|
| 148 |
+
n0_compactness<=0.29 & n1_radius>=0.11 &
|
| 149 |
+
n1_texture>=0.36 & n1_perimeter>=0.75 &
|
| 150 |
+
n1_smoothness>=0 & n1_concavity>=0 &
|
| 151 |
+
n1_symmetry>=0 & n2_perimeter<=251.2 &
|
| 152 |
+
n0_radius>=10.5 & n2_compactness<=0.71
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
GBFL rank 2 feature
|
| 156 |
+
|
| 157 |
+
n0_concavity>=0.07 & n2_concavepts>=0.13 &
|
| 158 |
+
7.22<=n1_area<=274.71 &
|
| 159 |
+
0<=n1_fractald<=0.01 &
|
| 160 |
+
185.2<=n2_area<=2219.6 &
|
| 161 |
+
n0_perimeter<=140.26 &
|
| 162 |
+
0<=n1_compactness<=0.09 &
|
| 163 |
+
0<=n1_concavepts<=0.03 &
|
| 164 |
+
7.93<=n2_radius<=26.66 &
|
| 165 |
+
0.01<=n0_compactness<=0.29 &
|
| 166 |
+
0<=n0_concavity<=0.35 &
|
| 167 |
+
12.02<=n2_texture<=43.28 &
|
| 168 |
+
0.02<=n2_compactness<=0.88 &
|
| 169 |
+
0.05<=n2_fractald<=0.18 &
|
| 170 |
+
0.07<=n0_smoothness<=0.16 &
|
| 171 |
+
0.12<=n2_smoothness<=0.22 &
|
| 172 |
+
0.2<=n2_concavity<=1.25 &
|
| 173 |
+
0.09<=n2_concavepts<=0.27 &
|
| 174 |
+
21.06>=n0_radius>=6.98 &
|
| 175 |
+
n0_texture>=9.71 & n0_concavepts>=0.0 &
|
| 176 |
+
1322.25>=n0_area>=143.5 &
|
| 177 |
+
n0_fractald>=0.04 & 1.49>=n1_radius>=0.11 &
|
| 178 |
+
2.62>=n1_texture>=0.36 &
|
| 179 |
+
11.36>=n1_perimeter>=0.75 &
|
| 180 |
+
n1_smoothness>=0 & n1_concavity>=0 &
|
| 181 |
+
0.04>=n1_symmetry>=0 &
|
| 182 |
+
n2_perimeter>=50.41 &
|
| 183 |
+
n2_symmetry>=0.15 & n0_symmetry>=0.13
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
GBFL rank 3 feature
|
| 187 |
+
|
| 188 |
+
n0_concavity>=0.07 & n0_texture>=19.56 &
|
| 189 |
+
n1_area<=274.71 & n1_compactness<=0.06 &
|
| 190 |
+
n1_fractald<=0.01 & n2_compactness<=0.54 &
|
| 191 |
+
n2_fractald<=0.13 & n0_compactness<=0.23 &
|
| 192 |
+
n1_concavepts<=0.03 & n2_area<=2897.73 &
|
| 193 |
+
n2_concavity<=0.83 & n0_area<=2108.08 &
|
| 194 |
+
n0_smoothness<=0.14 & n0_concavity<=0.35 &
|
| 195 |
+
n0_concavepts<=0.16 & n1_smoothness<=0.01 &
|
| 196 |
+
n2_radius<=31.35 & n0_texture<=39.28 &
|
| 197 |
+
n0_perimeter<=188.5 & n2_texture<=49.54 &
|
| 198 |
+
n2_smoothness<=0.2226 & n0_symmetry>=0.106 &
|
| 199 |
+
n0_fractald>=0.04 & n1_radius>=0.11 &
|
| 200 |
+
n1_texture>=0.36 & n1_perimeter>=0.75 &
|
| 201 |
+
n1_concavity>=0 & n1_symmetry>=0 &
|
| 202 |
+
n2_perimeter>=50.41 & n2_symmetry>=0.15 &
|
| 203 |
+
n0_radius>=10.50 & n2_concavepts>=0.04
|
| 204 |
+
|
| 205 |
+
GBFL rank 4 feature
|
| 206 |
+
|
| 207 |
+
nucleus2_area >= 863.33 & nucleus2_radius >= 17.29 &
|
| 208 |
+
nucleus1_compactness <= 0.06 & nucleus1_fractal_dim <= 0.01 &
|
| 209 |
+
nucleus2_fractal_dim <= 0.13 & nucleus1_area <= 363.87 &
|
| 210 |
+
nucleus1_concave_pts <= 0.03 & nucleus2_compactness <= 0.71 &
|
| 211 |
+
nucleus0_smoothness <= 0.14 & nucleus0_compactness <= 0.29 &
|
| 212 |
+
nucleus2_texture <= 43.28 & nucleus2_concavity <= 1.04 &
|
| 213 |
+
nucleus0_perimeter <= 188.5 & nucleus0_area <= 2501.0 &
|
| 214 |
+
nucleus0_concavity <= 0.42 & nucleus0_concave_pts <= 0.20 &
|
| 215 |
+
nucleus2_radius <= 36.04 & nucleus2_area <= 4254.0 &
|
| 216 |
+
nucleus2_smoothness <= 0.22 & nucleus0_texture >= 9.71 &
|
| 217 |
+
nucleus0_symmetry >= 0.10 & nucleus0_fractal_dim >= 0.04 &
|
| 218 |
+
nucleus1_radius >= 0.11 & nucleus1_texture >= 0.36 &
|
| 219 |
+
nucleus1_perimeter >= 0.75 & nucleus1_smoothness >= 0 &
|
| 220 |
+
nucleus1_concavity >= 0 & nucleus1_symmetry >= 0 &
|
| 221 |
+
nucleus2_symmetry >= 0.15 & nucleus0_radius >= 14.02 &
|
| 222 |
+
nucleus2_perimeter >= 117.33 & nucleus2_concave_pts >= 0.09
|
| 223 |
+
|
| 224 |
+
GBFL rank 5 feature
|
| 225 |
+
|
| 226 |
+
nucleus2_concave_pts >= 0.13 & 7.22 <= nucleus1_area <= 274.71 &
|
| 227 |
+
0 <= nucleus1_fractal_dim <= 0.01 & 185.2 <= nucleus2_area <= 2219.6 &
|
| 228 |
+
0.01 <= nucleus0_compactness <= 0.23 & 0 <= nucleus0_concavity <= 0.28 &
|
| 229 |
+
0 <= nucleus0_concave_pts <= 0.13 & 0 <= nucleus1_smoothness <= 0.01 &
|
| 230 |
+
0 <= nucleus1_compactness <= 0.09 & 0 <= nucleus1_concave_pts <= 0.03 &
|
| 231 |
+
7.93 <= nucleus2_radius <= 26.66 & 0.02 <= nucleus2_compactness <= 0.71 &
|
| 232 |
+
0 <= nucleus2_concavity <= 0.83 & 0.05 <= nucleus2_fractal_dim <= 0.15 &
|
| 233 |
+
43.79 <= nucleus0_perimeter <= 164.38 & 0.05 <= nucleus0_smoothness <= 0.14 &
|
| 234 |
+
0.07 <= nucleus2_smoothness <= 0.19 & 18.27 <= nucleus2_texture <= 49.54 &
|
| 235 |
+
0.04 <= nucleus2_concave_pts <= 0.27 & nucleus0_radius >= 6.98 &
|
| 236 |
+
nucleus0_texture >= 9.71 & nucleus0_area >= 143.5 &
|
| 237 |
+
nucleus0_symmetry >= 0.10 & nucleus0_fractal_dim >= 0.04 &
|
| 238 |
+
1.49 >= nucleus1_radius >= 0.11 & nucleus1_texture >= 0.36 &
|
| 239 |
+
nucleus1_perimeter >= 0.75 & nucleus1_concavity >= 0.0 &
|
| 240 |
+
0.04 >= nucleus1_symmetry >= 0 & nucleus2_perimeter >= 50.41 &
|
| 241 |
+
0.42 >= nucleus2_symmetry >= 0.15
|
| 242 |
+
```
|
| 243 |
+
:::
|
| 244 |
+
|
| 245 |
+
::: small
|
| 246 |
+
``` {caption="Top 5 boolean rule based features used by the decision tree on the Magic dataset ranked by their importance."}
|
| 247 |
+
|
| 248 |
+
GBFL rank 1 feature
|
| 249 |
+
|
| 250 |
+
fSize >= 2.937951724137931 &
|
| 251 |
+
fSize <= 3.629234482758621 &
|
| 252 |
+
fAlpha <= 6.206896551724138 &
|
| 253 |
+
fLength >= 0.0 &
|
| 254 |
+
fWidth >= 0.0 &
|
| 255 |
+
fM3Long >= 0.0 &
|
| 256 |
+
fAlpha >= 0.0
|
| 257 |
+
|
| 258 |
+
GBFL rank 2 feature
|
| 259 |
+
|
| 260 |
+
fM3Long <= 0.0 &
|
| 261 |
+
fAlpha <= 9.310344827586206 &
|
| 262 |
+
fLength >= 0.0 &
|
| 263 |
+
fWidth >= 0.0 &
|
| 264 |
+
fAlpha >= 0.0 &
|
| 265 |
+
fSize >= 2.073848275862069
|
| 266 |
+
|
| 267 |
+
GBFL rank 3 feature
|
| 268 |
+
|
| 269 |
+
fSize >= 2.7651310344827587 &
|
| 270 |
+
fAlpha <= 34.13793103448276 &
|
| 271 |
+
fSize <= 3.456413793103448 &
|
| 272 |
+
fWidth <= 15.207889655172414 &
|
| 273 |
+
fLength >= 0.0 &
|
| 274 |
+
fWidth >= 0.0 &
|
| 275 |
+
fM3Long >= 0.0
|
| 276 |
+
|
| 277 |
+
GBFL rank 4 feature
|
| 278 |
+
|
| 279 |
+
fWidth >= 30.415779310344828 &
|
| 280 |
+
fSize >= 2.937951724137931 &
|
| 281 |
+
fWidth <= 60.831558620689655 &
|
| 282 |
+
fSize <= 3.629234482758621 &
|
| 283 |
+
fM3Long <= 0.0 &
|
| 284 |
+
fLength >= 0.0 &
|
| 285 |
+
fAlpha >= 12.413793103448276
|
| 286 |
+
|
| 287 |
+
GBFL rank 5 features
|
| 288 |
+
|
| 289 |
+
fM3Long >= 15.961793103448276 &
|
| 290 |
+
fM3Long <= 47.88537931034483 &
|
| 291 |
+
fWidth <= 7.603944827586207 &
|
| 292 |
+
fLength <= 34.57003448275862 &
|
| 293 |
+
fAlpha <= 21.724137931034484 &
|
| 294 |
+
fLength >= 0.0 &
|
| 295 |
+
fWidth >= 0.0 &
|
| 296 |
+
fAlpha >= 9.310344827586206 &
|
| 297 |
+
fSize >= 2.073848275862069
|
| 298 |
+
```
|
| 299 |
+
:::
|
| 300 |
+
|
| 301 |
+
**Remark:** Although, the method in [@macem] does not really perform the constrained optimization but uses regularization like 'Elasticnet' penalty to impose sparsity, we will assume that our PPs and PNs are the result of these optimizations just for simplicity of exposition. The only difference is that the sparsity $k$ cannot be pre-determined but is typically a constant for many training samples in practice.
|
2103.13841/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2103.13841/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
As deep neural networks progress to dramatically improve results in most of standard computer vision tasks, there is a growing community interest for more ambitious goals. One of them is to improve the data efficiency of the standard supervised methods that rely on large amount of expensive and time-consuming hand-labeled data. Just like the human intelligence is capable of learning concepts from few labeled samples, *few-shot learning* [24, 33] aims at adapting a classifier to accommodate new classes not seen in training, given a few labeled samples from these classes.
|
| 4 |
+
|
| 5 |
+
Earlier works in few-shot learning focus on evaluating their methods in homogeneous learning tasks, *e.g.* Ominglot [25], miniImageNet [53], tieredImageNet [43], where both the meta-train and meta-test examples are sampled from a single data distribution (or dataset). Recently, the interest of the community has shifted to a more realistic and challenging experimental setting, where the goal is to learn few-shot models that can generalize not only within
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
<span id="page-0-0"></span>Figure 1. Universal Representation Learning (URL). To learn universal representations from multiple domains that can generalize to previously unseen domains, one strategy [13, 29] is to learn one feature extractor for each domain and learn to retrieve or combine feature extractors for the target task during meta-test stage as in (a). We propose a universal representation network (b) which is learned by distilling knowledge learned from multiple datasets $\{f_{\phi_*^*}\}_{\tau}^K$ to one single feature extractor $f_{\phi}$ shared across all domains. In meta-test stage, we use a linear transformation $A_{\vartheta}$ that further refines the universal representations for better generalization to unseen domains. Our universal representation network achieves better generalization performance than using multiple domain-specific ones while being more efficient than (a).
|
| 10 |
+
|
| 11 |
+
a single data distribution but also to previously unseen data distributions. To this end, Triantafillou *et al.* [52] propose a new heterogeneous benchmark, Meta-Dataset that consists of ten datasets from different domains for meta-training and meta-test. While, initially two domains were kept as unseen domains, later three more unseen domains are included to meta-test the generalization ability of learned models.
|
| 12 |
+
|
| 13 |
+
While the few-shot methods [14, 48, 49, 53], which were proposed before Meta-Dataset was available, can be directly applied to this new benchmark with minor modifi-
|
| 14 |
+
|
| 15 |
+
cations, they fail to cope with domain gap between train and test datasets and thus obtain subpar performance on Meta-Dataset. Recently several few-shot learning methods are proposed to address this challenge, which can be coarsely grouped into two categories, adaptation [\[2,](#page-8-6) [44\]](#page-9-5) and feature selection based methods [\[13,](#page-8-3) [29\]](#page-8-4). CNAPS [\[44\]](#page-9-5) consists of an adaptation network that modulates the parameters of both a feature extractor and classifier for new categories by encoding the data distribution of few training samples. Simple CNAPS [\[2\]](#page-8-6) extends CNAPS by replacing its parametric classifier with a non-parametric classifier based on Mahalanobis distance and shows that adapting the classifier from few samples is not necessary for good performance. SUR [\[13\]](#page-8-3) and URT [\[29\]](#page-8-4) further show that adaptation for the feature extractor can also be replaced by a feature selection mechanism. In particular, both methods [\[13,](#page-8-3) [29\]](#page-8-4) learn a separate deep network for each training dataset in an offline stage, employ them to extract multiple features for each image, and then select the optimal set of features either based on a similarity measure [\[13\]](#page-8-3) or on learning an attention mechanism [\[29\]](#page-8-4). However, despite their good performance, SUR and URT are computationally expensive and require multiple forward passes through multiple networks during inference time.
|
| 16 |
+
|
| 17 |
+
In this work, we propose an efficient and high performance few-shot method based on multi-domain learning. Like [\[13,](#page-8-3) [29\]](#page-8-4), our method builds on multi-domain representations that are learned in an offline stage. However, we learn a single set of *universal* representations (a single deep neural network) over multiple domains which has a fixed computational cost regardless of the number of domains at inference unlike them. Similar to the adaptation based techniques [\[2,](#page-8-6) [44\]](#page-9-5), our method further employs a simple adaptation strategy to learn the domain specific representations from few samples (an illustration in Fig. [1\)](#page-0-0).
|
| 18 |
+
|
| 19 |
+
In particular, we propose to *distill* the knowledge from multiple domains to a single model, which can efficiently leverage useful information from multiple diverse domains. Learning multi-domain representations is a challenging task and requires to leverage commonalities in the domains while minimizing interference (negative transfer) (*e.g*. [\[8,](#page-8-7) [41,](#page-9-6) [56\]](#page-9-7)) between them. To mitigate this, we align the intermediate representations of our multi-domain network with the ones of the domain-specific networks after carefully aligning each space by using small task-specific adapters and Centered Kernel Alignment (CKA) [\[22\]](#page-8-8). Finally, inspired from the use of Mahalanobis distance in [\[2\]](#page-8-6), we adapt the learned multi-domain features into the new task by mapping them into a task-specific space. However, unlike [\[2\]](#page-8-6), we *learn* the parameters of this mapping via adaptation in a discriminative way. We rigorously evaluate our method in Meta-Dataset benchmark and show that our method outperforms the stateof-the-art few-shot methods significantly in both seen and unseen domain generalization.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
In this section, we describe the problem setting, introduce our method in two parts, multi-domain feature learning and feature adaptation.
|
| 24 |
+
|
| 25 |
+
Few-shot classification aims at learning to classify samples from a small training set with only few samples for each class. The task contains two sets of images: a support set $\mathcal{S} = \{(\boldsymbol{x}_i, y_i)\}_{i=1}^{|\mathcal{S}|}$ that contains $|\mathcal{S}|$ image and label pairs respectively that define the classification task and a query set $\mathcal{Q} = \{(\boldsymbol{x}_j)\}_{i=1}^{|\mathcal{Q}|}$ that contains $|\mathcal{Q}|$ samples to be classified. In words, we would like to learn a classifier on the support set that can accurately predict the labels of the query set.
|
| 26 |
+
|
| 27 |
+
As in [13, 29], we solve this problem in two steps: i)
|
| 28 |
+
|
| 29 |
+
a meta-training step where a learning algorithm receives a large dataset $\mathcal{D}_b$ and outputs a general feature extractor f, ii) a meta-test step where the target tasks $(\mathcal{S}, \mathcal{Q})$ are sampled from another large dataset $\mathcal{D}_t$ by taking the subsets of the dataset to build $\mathcal{S}$ and $\mathcal{Q}$ . Note that $\mathcal{D}_b$ and $\mathcal{D}_t$ contain mutually exclusive classes.
|
| 30 |
+
|
| 31 |
+
Our focus is to learn few-shot image classification that generalizes not only within previously seen visual domains but also to unseen ones. As it is challenging to obtain the domain-specific knowledge from only few samples in a previously unseen domain, inspired from [3, 41] we hypothesize that using domain-agnostic or universal representations is the key to the success of cross-domain generalization. To this end, we propose learning a multi-domain network that works well for all the domain-specific tasks simultaneously and use this network as a feature extractor for the target tasks.
|
| 32 |
+
|
| 33 |
+
Let assume that $\mathcal{D}_b$ consists of K subdatasets, each sampled from a different domain. One potential solution is train a multi-domain network by jointly optimizing its parameters over the images from all K domains (datasets):
|
| 34 |
+
|
| 35 |
+
<span id="page-2-0"></span>
|
| 36 |
+
$$\min_{\phi, \psi_{\tau}} \sum_{\tau=1}^{K} \frac{1}{|\mathcal{D}_{\tau}|} \sum_{\boldsymbol{x}, y \in \mathcal{D}_{\tau}} \ell(h_{\psi_{\tau}} \circ f_{\phi}(\boldsymbol{x}), y), \tag{1}$$
|
| 37 |
+
|
| 38 |
+
where $\ell$ is cross-entropy loss, f is a multi-domain feature extractor that takes an image as input and outputs a d dimensional feature and is parameterized by a single set of parameters $\phi$ which is shared across K domains. h is a domain-specific classifier that takes in $f_{\phi}(x)$ and outputs a probability vector over the target categories and it is parameterized by $\psi_{\tau}$ . While minimizing Eq. (1) results in a multi-domain feature extractor f, several previous works report that this optimization is problematic due to the interference between the different tasks [8, 56], varying dataset sizes and difficulty [20, 27] and often leads to subpar results compared to individual single-domain networks.
|
| 39 |
+
|
| 40 |
+
Motivated by this challenge, we propose a two stage procedure to learn multi-domain representations, inspired by the previous distillation methods [17, 27]. To this end, we first train domain-specific deep networks where each consists of a specific feature extractor $f_{\phi_{\tau}^*}$ and classifier $h_{\psi_{\tau}^*}$ with parameters $\phi_{\tau}^*$ and $\psi_{\tau}^*$ respectively, similarly to [13, 29]. However, instead of using K domain-specific feature extractors and select the most relevant feature like them, we propose to learn a single multi-domain network that performs well in K domains by distilling the knowledge of K pretrained feature extractors. This has two key advantages over [13, 29]. First using a single feature extractor, which has the same capacity with each domain-specific one, is significantly more efficient in terms of run-time and number
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+
<span id="page-3-0"></span>Figure 2. Illustration of our proposed method for multi-domain feature learning. Given training images from K different domains, we first train K domain-specific networks $f_{\phi_1^*}, \ldots, f_{\phi_K^*}$ and their classifiers $h_{\psi_1^*}, \ldots, h_{\psi_K^*}$ , freeze their weights and distill their knowledge to our multi-domain network by matching their features and predictions through two loss functions $\ell^f$ and $\ell^p$ respectively. As matching multiple features is challenging, we co-align all the features by using light-weight adaptors $A_{\theta_1}, A_{\theta_2}, \ldots, A_{\theta_K}$ and centered kernel alignment.
|
| 45 |
+
|
| 46 |
+
of parameters in the meta-test stage. Second learning to find the most relevant features for a given support and query set in [29] is not trivial and may also suffer from overfitting to the small number of datasets in the training set, while the multi-domain representations automatically contain the required information from the relevant domains.
|
| 47 |
+
|
| 48 |
+
In the second stage, we freeze the pretrained domainspecific feature extractors $f_{\phi^*}$ and transfer their knowledge into the multi-domain model at train time. Knowledge distillation can be performed at the prediction [17] and feature level [27, 45] by minimizing the distance between (i) the predictions of the multi-domain and corresponding singledomain network, and also between (ii) the multi-domain and single-domain features for given training samples. While Kullback-Leibler (KL) divergence is the standard choice for the predictions in [17], matching the multi-domain features to multiple single-domain ones simultaneously is an ill-posed problem, as the domain-specific features for a given image x can vary the multi-domain network significantly across different domains. To this end, as in [27], we propose to map each domain specific feature into a common space by using adaptors $A_{\theta_{\tau}} \in \mathbb{R}^{d \times d}$ with parameters $\theta_{\tau}$ and jointly train them along with the parameters of the multi-domain network:
|
| 49 |
+
|
| 50 |
+
$$\min_{\phi, \psi_{\tau}, \theta_{\tau}} \sum_{\tau=1}^{K} \frac{1}{|\mathcal{D}_{\tau}|} \sum_{\boldsymbol{x}, y \in \mathcal{D}_{\tau}} \left( \ell(h_{\psi_{\tau}} \circ f_{\phi}(\boldsymbol{x}), y) + \lambda_{\tau}^{p} \ell^{p}(h_{\psi_{\tau}} \circ f_{\phi}(\boldsymbol{x}), h_{\psi_{\tau}^{*}} \circ f_{\phi_{\tau}^{*}}(\boldsymbol{x})) + \lambda_{\tau}^{f} \ell^{f}(A_{\theta_{\tau}} \circ f_{\phi}(\boldsymbol{x}), f_{\phi_{\tau}^{*}}(\boldsymbol{x})) \right) \tag{2}$$
|
| 51 |
+
|
| 52 |
+
where $\ell^p$ is KL divergence on network predictions, $\ell^f$ is a distance function in the feature space, $\lambda^p_{\tau}$ and $\lambda^f_{\tau}$ are their
|
| 53 |
+
|
| 54 |
+
domain-specific weights. We illustrate this key idea in Fig. 2. In words, the multi-domain network is optimized to match the domain-specific features up to a transformation (i.e. $A_{\theta_{\tau}}$ ) and predict the ground-truth classes $y_{\tau}$ .
|
| 55 |
+
|
| 56 |
+
While Li and Bilen [27] show that L2 distance is effective to match the features across task-agnostic and task-specific networks, which are trained for different tasks on a single domain, here we argue that learning to match features that are trained on substantially diverse domains require better a more complex distance distance function to model nonlinear correlations between the representations. To this end, inspired from [22], we propose to adopt the Centered Kernel Alignment (CKA) [22] similarity index with the rbf kernel that is shown to be capable of meaningful non-linear similarities between representations of higher dimension than the number of data points.
|
| 57 |
+
|
| 58 |
+
Next we briefly describe CKA. Suppose $\mathbf{M} = [f_{\phi}(\boldsymbol{x}_1), \dots, f_{\phi}(\boldsymbol{x}_n)]^{\top} \in \mathbf{R}^{n \times d}$ and $\mathbf{Y} = [f_{\phi_{\tau}^*}(\boldsymbol{x}_1), \dots, f_{\phi_{\tau}^*}(\boldsymbol{x}_n)]^{\top} \in \mathbf{R}^{n \times d}$ denote the features that are computed by the multi-domain and domain-specific networks respectively for a given set of images $\{\boldsymbol{x}_1, \dots, \boldsymbol{x}_n\}$ . We first compute the Radial Basis Function kernel matrices $\mathbf{P}$ and $\mathbf{T}$ of $\mathbf{M}$ and $\mathbf{Y}$ respectively. Then we use two kernel matrices $\mathbf{P}$ and $\mathbf{T}$ to measure the dissimilarity of $\mathbf{X}$ and $\mathbf{Y}$ as following:
|
| 59 |
+
|
| 60 |
+
$$\ell^f(\mathbf{M}, \mathbf{Y}) = 1 - \text{tr}(\mathbf{PHTH}) / \sqrt{\text{tr}(\mathbf{PHPH})\text{tr}(\mathbf{THTH})},$$
|
| 61 |
+
(3)
|
| 62 |
+
|
| 63 |
+
where tr and $\mathbf{H}$ denote the trace of a matrix and centering matrix $\mathbf{H}_n = \mathbf{I}_n - \frac{1}{n}\mathbf{1}\mathbf{1}^{\top}$ respectively, the second term is the CKA similarity between the multi-domain and domain-
|
| 64 |
+
|
| 65 |
+
specific features. As the original CKA similarity requires the computation of the kernel matrices over the whole datasets, which is not scalable to large datasets, we follow [34] and compute them over each minibatch in our training. We refer to [22, 34] for more details.
|
| 66 |
+
|
| 67 |
+
During meta-test, given a support set $\mathcal{S} = \{(\boldsymbol{x}_i, y_i)\}_{i=1}^{|\mathcal{S}|}$ of a new learning task, we use the multi-domain model to extract features $\{f_{\phi}(\boldsymbol{x}_i)\}_{i=1}^{|\mathcal{S}|}$ and adapt them to the target task. To this end, we apply a linear transformation $A_{\vartheta}: \mathbb{R}^d \to \mathbb{R}^d$ with learnable parameters $\vartheta$ to the computed features, i.e. $\{\boldsymbol{z}_i\}_{i=1}^{|\mathcal{S}|} = \{A_{\vartheta} \circ f_{\phi}(\boldsymbol{x}_i)\}_{i=1}^{|\mathcal{S}|}$ where $\vartheta \in \mathbb{R}^{d \times d}$ . Then we follow a similar pipeline to the one in [13, 32, 48] to build a centroid classifier by averaging the embeddings belonging to this class:
|
| 68 |
+
|
| 69 |
+
$$c_j = \frac{1}{|\mathcal{S}_j|} \sum_{\boldsymbol{z}_i \in \mathcal{S}_j} \boldsymbol{z}_i, \mathcal{S}_j = \{\boldsymbol{z}_k : y_k = j\}, j = 1, \dots, C$$
|
| 70 |
+
|
| 71 |
+
where C is the number of classes in the support set. Next we estimate the likelihood of a support sample z by:
|
| 72 |
+
|
| 73 |
+
<span id="page-4-1"></span>
|
| 74 |
+
$$p(y = l|\mathbf{z}) = \frac{\exp(-d(\mathbf{z}, \mathbf{c}_l))}{\sum_{i=1}^{C} \exp(-d(\mathbf{z}, \mathbf{c}_i))},$$
|
| 75 |
+
(5)
|
| 76 |
+
|
| 77 |
+
where $d(z, c_l)$ is the negative cosine similarity.
|
| 78 |
+
|
| 79 |
+
We then optimize $\vartheta$ to minimize the following objective on the support set S:
|
| 80 |
+
|
| 81 |
+
<span id="page-4-0"></span>
|
| 82 |
+
$$\min_{\vartheta} \frac{1}{|\mathcal{S}|} \sum_{\boldsymbol{x}_i, y_i \in \mathcal{S}} [log(p(y = y_i | \boldsymbol{x}_i))]. \tag{6}$$
|
| 83 |
+
|
| 84 |
+
Solving Eq. (6) for $\vartheta$ results in high intra-class and low interclass similarity in the adapted space. We then use $\vartheta$ and Eq. (5) to predict the label of the query sample from $\mathcal Q$ by picking the closest centroid $c_j$ . Our meta-test pipeline is illustrated Fig. 3.
|
| 85 |
+
|
| 86 |
+
**Discussion.** In [2], Simple CNAPS uses the (squared) Mahalanobis distance between the features of class centroid and a query image, $d(\boldsymbol{z}, \boldsymbol{c}) = \frac{1}{2} (f_{\phi}(\boldsymbol{x}) - \boldsymbol{c}')^{\top} \mathbf{Q}^{-1} (f_{\phi}(\boldsymbol{x}) - \boldsymbol{c}')$ where Q is a covariance matrix specific to the task and class and c' is the class centroid in the feature space (before the adaptation). The authors show that considering the class covariance enables better adaptation of the feature extractor to the target task. Our adaptation strategy can be seen as a generalization of the Mahalanobis distance computation. Alternatively, assuming that $Q^{-1}$ can be decomposed into a product of a lower triangular matrix and its conjugate transpose, i.e. $\mathbf{Q}^{-1} = \mathbf{L}\mathbf{L}^{\top}$ , one can first pre-transform the features by multiplication, i.e. $z = \mathbf{L}^{\top} f_{\phi}(x)$ and then compute the distance between these features and centroids. Similarly, we apply a linear transformation to the features but unlike [2], we learn its parameters $\vartheta$ by optimizing Eq. (6).
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
|
| 90 |
+
<span id="page-4-2"></span>Figure 3. Illustration of adaptation procedure in meta-test. Given a support set and query image, our method learns to map their features to a task-specific space through a linear transformation $A_{\vartheta}$ and assign the query image to the nearest class center.
|
2105.12848/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-10T21:48:41.763Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36 Edg/90.0.818.56" etag="kFW1140biGBHwtp1_Qt0" version="14.6.11" type="google"><diagram id="rxm8TJNGeDZfb8SbZeqK" name="Page-3">7Vxtb5s6FP41kbYrtQIMJHzsknSb2k29baW1nyYHXMIGOBecNrm//tqAw0ucAAkvyW6lacTv4POc48fnHHUAxt7qcwAX82/YQu5AkazVAEwGiiKrhkQfrGYd1xhKUmEHjpV0SisenH9RUsm7LR0LhbmOBGOXOIt8pYl9H5kkVweDAL/lu71gN7/qAtrJilJa8WBCF211++FYZB7XjrRM7y/Ised8ZVlKWjzIOydThHNo4bfMWmA6AOMAYxL/8lZj5LLN4/sCFp6irm+Hd2PPfZnAh19P19ZFPPt1nSGbTwiQTw6e+uJvMEGu/nQ9//n6/HBx9+WZ6BfaKPk2suYbhiy6f0kRB2SObexDd5rWfgrw0rcQm1aipbTPLcYLWinTyl+IkHUCBrgkmFbNiecmrWjlkKdkOPv9zH5fDrWkOFll2iZrXvBJsH7KFuJhGi+mw6ISH/fiuO4YuziIPg+8jExkmrQ+JAH+jTIts5GmamxExe3msMDLwER79hgke0xgYCOyp6MR92MCyAAvkeZnhD1Ev4t2CJALifOaBzhM9MTe9EuGXgUBXGc6LLDjkzAz8x2roB0Slde5AiQKD0ABVyX9FSnXn/6I34CXMp+SVkVYrYFbVe4Tt3IOt+3AtmEQDvvCYAYH9cXcq3U6QykbFaUsKycl5uS1X6G7TFYaKLpLmPHGkalJAaD/s8S84SKMRHhFO8ijxSptZDwBmvkBj45HaYgifUdv9P977EE/O0C32XP85ds3vjT9knj1uGkLiSnOGFDe5g5BD4t41TdKpfKYqifnVxQQtNorGG58Qd74yqOk/JZynQ2jmWd4jiq1JEv53TLX01mui+VKq56U0vL3PkZrdaa18Z5KruOjCw7RqJXhVmtAq69sO0A2JBRgdBk4Q27I33QWbPRbG38YDNljRq8Y4dqjD1peD4aTgTb9K37Qig8UYlePH6Py+GN9YyH1ZyyGegVjoXRqLLSGMHQ0Rh4D6PjRVtCdUKTPyEcBdOkqDDPTyocClQTJCzN/zfCxjwp3kqSKLmb7tGhSoSNa/4nJ1aH32KukwXMsKzKWIvTk8dUWgDSjQPWVbQApotNGaQ1A+rkYoQLAbm4nbDkchu/QihwySjm0ZL1TaIlYaZHZ+NYV81GxzXVhGDpmgZaUMp9YhtwzBRjRWXoLPgYGsacCBoSvg+n78LprxxVjYaAASdL18bi2xErJRUYemkAcvK6ej2LLqbCZhx9Vw4KYY1qVjNrjnTCG2v6JYtq1NVFdr4kyEr/wrvcq6d+O14TryjGQ3gZsFZCXA7jgprM0NLJUMbANQ6pvik4E2EbRuyYfCuyChihGO8DWFfEL73QHFg25tr//1ndoXSgCP58Opw2KeiTvlMwNptNKim7VNIxt+nA3hyGig76eF12wmQnh6/9GxJwnDezdc0oNgK6zTjzIwjtxy6K2RmqLR40qYB6dklpulftEZwF1fyzsdB2ArpA2LDjrlGHfSAP1OG4iqYxwd6rxTjqa5w/K2Z7jRV45aoqg6tXO8aaOQh6d3G1sah9s29apwrWa25N46XDB5qxweS/YuXiWT9P7RwapAFmOSRzss0s29Jid8GfhIrMO3bJ4qQOX10XLp65E4rgW2uVVjP/tdyqKXq5RX2Pb0fHq1tEoEk1JYB1lgUa35p3kmthTcsQmfPGcaSkLZaTRi+dsW0ehjOToLA1lcKNzKqEM9WjC1VAAklmui+/T+7MKQhY1VxhX6FRzNb1Pza0ThGxaA5vWqx2X/CKTlVpyPhRzizRtL5nqJRdJG54L2E4k4q2eV0Ic5T15YlIzIe4kE+iGx5943cTNEi6PvBmyLMe32YCIYEePHKtGf0KQXi26o0SHKejyMFV7pcFnaN+0iuZtWNG8cdwYO5BR/dzWpLwpq3puN2V1jne7Fy/8B1qVHwj+HqTJQJIczx4ZloJVWVEr8pM+5S6tS85p2ZqpkQtoEHm+RaYGtMbbj8dHc/ews7qDAX2HPzIjS96lk1NDaeBOXZraRfchsB24Lb+v/mIZrcGCCT4dVVGW4Rwu2E8Lm0svElGZQGex9G9nbUtYkcslLMze09oS8QnxAmAYrTADNskdCqjJiGJSbbAFparTTGuRLhyn6iKzvUOxoEmYR7k3MwnKjzyRDrV34qm9KFEnRLk8o71zAgwKKdDcoVQWtmss8aSpfNWmGPBDbJ4YAz6rCH9Xp6yQE3dqIYBSBpkqtKhCyogkimVeXl6+A0MIDFHyRqfAUN+BcQrA2LpFi4DR7S0alPOx0sz1AyPKdF6e2yNLl6o0knVAL55A0mW2iZnotmqMchR+pIP9FP6EqDg/xo9mLhWTlWp4+IrBimLuWNW8JM3YAeuOXIVqaV5SX77Cm3Jf4c0f7yvc5K33ZuXUCrmLJ3rrFG1et7dObftA+N/cOtWqyUcb471DeodfO4HS7bWTnyOnd+28eSeRotuF0Lw2ZCJoMf2jVTHC0j/9Bab/AQ==</diagram></mxfile>
|
2105.12848/main_diagram/main_diagram.pdf
ADDED
|
Binary file (30.2 kB). View file
|
|
|
2105.12848/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Named entity recognition (NER), which aims to identify named entities from unstructured text, is an information extraction task fundamental to many downstream applications such as event detection [\(Li et al.,](#page-9-0) [2012\)](#page-9-0), relationship extraction [\(Bach and](#page-8-0) [Badaskar,](#page-8-0) [2007\)](#page-8-0), and question answering [\(Khalid](#page-9-1) [et al.,](#page-9-1) [2008\)](#page-9-1). Existing NER models are typically supervised by a large number of training sequences, each pre-annotated with token-level labels. In practice, however, obtaining such labels could be prohibitively expensive. On the other hand, many domains have various knowledge resources such as
|
| 4 |
+
|
| 5 |
+
knowledge bases, domain-specific dictionaries, or labeling rules provided by domain experts [\(Far](#page-8-1)[makiotou et al.,](#page-8-1) [2000;](#page-8-1) [Nadeau and Sekine,](#page-9-2) [2007\)](#page-9-2). These resources can be used to match a corpus and quickly create large-scale noisy training data for NER from multiple views.
|
| 6 |
+
|
| 7 |
+
Learning an NER model from multiple weak supervision sources is a challenging problem. While there are works on distantly supervised NER that use only knowledge bases as weak supervision [\(Mintz et al.,](#page-9-3) [2009;](#page-9-3) [Shang et al.,](#page-10-0) [2018;](#page-10-0) [Cao et al.,](#page-8-2) [2019;](#page-8-2) [Liang et al.,](#page-9-4) [2020\)](#page-9-4), they cannot leverage complementary information from multiple annotation sources. To handle multi-source weak supervision, several recent works [\(Nguyen et al.,](#page-9-5) [2017;](#page-9-5) [Safranchik et al.,](#page-10-1) [2020;](#page-10-1) [Lison et al.,](#page-9-6) [2020\)](#page-9-6) leverage the hidden Markov model (HMM), by modeling true labels as hidden variables and inferring them from the observed noisy labels through unsupervised learning. Though principled, these models fall short in capturing token semantics and context information, as they either model input tokens as one-hot observations [\(Nguyen et al.,](#page-9-5) [2017\)](#page-9-5) or do not model them at all [\(Safranchik et al.,](#page-10-1) [2020;](#page-10-1) [Li](#page-9-6)[son et al.,](#page-9-6) [2020\)](#page-9-6). Moreover, the flexibility of HMM is limited as its transitions and emissions remain constant over time steps, whereas in practice they should depend on the input words.
|
| 8 |
+
|
| 9 |
+
We propose the conditional hidden Markov model (CHMM) to infer true NER labels from multi-source weak annotations. CHMM conditions the HMM training and inference on BERT by predicting token-wise transition and emission probabilities from the BERT embeddings. These tokenwise probabilities are more flexible than HMM's constant counterpart in modeling how the true labels should evolve according to the input tokens. The context representation ability they inherit from BERT also relieves the Markov constraint and expands HMM's context-awareness.
|
| 10 |
+
|
| 11 |
+
Further, we integrate CHMM with a supervised BERT-based NER mode with an alternate-training method (CHMM-ALT). It fine-tunes BERT-NER with the denoised labels generated by CHMM. Taking advantage of the pre-trained knowledge contained in BERT, this process aims to refine the denoised labels by discovering the entity patterns neglected by all of the weak sources. The finetuned BERT-NER serves as an additional supervision source, whose output is combined with other weak labels for the next round of CHMM training. CHMM-ALT trains CHMM and BERT-NER alternately until the result is optimized.
|
| 12 |
+
|
| 13 |
+
Our contributions include:
|
| 14 |
+
|
| 15 |
+
- A multi-source label aggregator CHMM with token-wise transition and emission probabilities for aggregating multiple sets of NER labels from different weak labeling sources.
|
| 16 |
+
- An alternate-training method CHMM-ALT that trains CHMM and BERT-NER in turn utilizing each other's outputs for multiple loops to optimize the multi-source weakly supervised NER performance.
|
| 17 |
+
- A comprehensive evaluation on four NER benchmarks from different domains demonstrates that CHMM-ALT achieves a 4.83 average F1 score improvement over the strongest baseline models.
|
| 18 |
+
|
| 19 |
+
The code and data used in this work are available at [github.com/Yinghao-Li/CHMM-ALT.](https://github.com/Yinghao-Li/CHMM-ALT)
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
In this section, we describe our proposed method CHMM-ALT. We first sketch the alternate-training procedure (§ 4.1), then explain the CHMM component (§ 4.2) and how BERT-NER is involved (§ 4.3).
|
| 24 |
+
|
| 25 |
+
The alternate-training method trains two models—a multi-source label aggregator CHMM and a BERT-NER model—in turn with each other's output. CHMM aggregates multiple sets of labels from different sources into a unified sequence of
|
| 26 |
+
|
| 27 |
+
labels, while BERT-NER refines them by its language modeling ability gained from pre-training. The training process is divided into two phases.
|
| 28 |
+
|
| 29 |
+
- In **phase I**, CHMM takes the annotations $\boldsymbol{x}_{1:K}^{(1:T)}$ from existing sources and gives a set of denoised labels $\boldsymbol{y}^{*(1:T)}$ , which are used to fine-tune the BERT-NER model. Then, we regard the fine-tuned model as an additional labeling source, whose outputs $\tilde{\boldsymbol{y}}^{(1:T)}$ are added into the original weak label sets to give the updated observation instances: $\boldsymbol{x}_{1:K+1}^{(1:T)} = \{\boldsymbol{x}_{1:K}^{(1:T)}, \tilde{\boldsymbol{y}}^{(1:T)}\}.$
|
| 30 |
+
- In **phase II**, CHMM and BERT-NER mutually improve each other iteratively in several loops. Each loop first trains CHMM with the observation $\boldsymbol{x}_{1:K+1}^{(1:T)}$ from the previous one. Then, its predictions are adopted to fine-tune BERT-NER, whose output updates $\boldsymbol{x}_{K+1}^{(1:T)}$ .
|
| 31 |
+
|
| 32 |
+
Figure 2 illustrates the alternate-training method. In general, CHMM gives high precision predictions, whereas BERT-NER trades recall with precision. In other words, CHMM can classify named entities with high accuracy but is slightly disadvantaged in discovering all entities. BERT-NER increases the coverage with a certain loss of accuracy. Combined with the alternate-training approach, this complementarity between these models further increases the overall performance.
|
| 33 |
+
|
| 34 |
+
The conditional hidden Markov model is an HMM variant for multi-source label denoising. It models true entity labels as hidden variables and infers them from the observed noisy labels. Traditionally, discrete HMM uses one transition matrix to model the probability of hidden label transitioning and *one* emission matrix to model the probability of the observations from the hidden labels. These two matrices are constant, i.e., their values do not change over time steps. CHMM, on the contrary, conditions both its transition and emission matrices on the BERT embeddings $e^{(1:T)}$ of the input tokens $w^{(1:T)}$ . This design not only allows CHMM to leverage the rich contextual representations of the BERT embeddings but relieves the constant matrices constraint as well.
|
| 35 |
+
|
| 36 |
+
In phase I, CHMM takes K sets of weak labels from the provided K weak labeling sources. In phase II, in addition to the existing sources, it takes
|
| 37 |
+
|
| 38 |
+
<span id="page-2-0"></span><sup>&</sup>lt;sup>1</sup>We represent vectors, matrices or tensors with bold fonts and scalars with regular fonts; $1: a \triangleq \{1, 2, \dots, a\}$ .
|
| 39 |
+
|
| 40 |
+
<span id="page-3-0"></span>
|
| 41 |
+
|
| 42 |
+
Figure 2: The illustration of the alternate-training method. Phase I is acyclic, starting from getting K weak labels from supervision sources and ending at the fine-tuning of BERT-NER with CHMM's denoised output. Phase II contains several loops, each trains CHMM with K+1 sources, including the additional BERT predictions from the previous loop, and fine-tunes BERT-NER using the updated denoised labels.
|
| 43 |
+
|
| 44 |
+
<span id="page-3-1"></span>
|
| 45 |
+
|
| 46 |
+
Figure 3: An illustration of CHMM's architecture. Shaded circles are observed elements; white circles are hidden elements; rectangles are matrices. Rounded rectangles are multi-layer perceptrons containing the trainable parameters. The arrows between $\boldsymbol{w}^{(t)}$ and $\boldsymbol{e}^{(t)}$ denote the context representation ability of BERT. MLP denotes the "multi-layer perceptron".
|
| 47 |
+
|
| 48 |
+
another set of labels from the previously fine-tuned BERT-NER, making the total number of sources K+1. For convenience, we use K as the number of weak sources below.
|
| 49 |
+
|
| 50 |
+
**Model Architecture** Figure 3 shows a sketch of CHMM's architecture. Figure 3 shows a sketch of CHMM's architecture. Figure 3 shows a sketch of CHMM's architecture. Figure 3 shows a sketch of CHMM with $z^{(t)} \in \mathcal{L}$ , representing the underlying true labels to be inferred from multiple weak annotations. $\Psi^{(t)} \in \mathbb{R}^{|\mathcal{L}| \times |\mathcal{L}|}$ is the transition matrix, whose element $\Psi^{(t)}_{i,j} = p(z^{(t)} = j|z^{(t-1)} = i, e^{(t)}), i, j \in \{1, \dots, |\mathcal{L}|\}$ denotes the probability of moving from label i to label j at time step t. $\Phi^{(t)}_k \in \mathbb{R}^{|\mathcal{L}| \times |\mathcal{L}|}$ is the emission matrix of weak source k, each element in which $\Phi^{(t)}_{i,j,k} = p(x^{(t)}_{j,k} = 1|z^{(t)} = i, e^{(t)})$ represents the probability of source k observing label j when the
|
| 51 |
+
|
| 52 |
+
hidden label is i at time step t.
|
| 53 |
+
|
| 54 |
+
For each step, $e^{(t)} \in \mathbb{R}^{d_{\mathrm{emb}}}$ is the output of a pre-trained BERT with $d_{\mathrm{emb}}$ being its embedding dimension. $\Psi^{(t)}$ and $\Phi^{(t)}_{1:K}$ are calculated by applying a multi-layer perceptron (MLP) to $e^{(t)}$ :
|
| 55 |
+
|
| 56 |
+
$$s^{(t)} \in \mathbb{R}^{|\mathcal{L}|^2} = \text{MLP}(e^{(t)}),$$
|
| 57 |
+
(1)
|
| 58 |
+
|
| 59 |
+
$$\boldsymbol{h}^{(t)} \in \mathbb{R}^{|\mathcal{L}| \cdot |\mathcal{L}| \cdot K} = \text{MLP}(\boldsymbol{e}^{(t)}).$$
|
| 60 |
+
(2)
|
| 61 |
+
|
| 62 |
+
Since the MLP outputs are vectors, we need to reshape them to matrices or tensors:
|
| 63 |
+
|
| 64 |
+
$$S^{(t)} \in \mathbb{R}^{|\mathcal{L}| \times |\mathcal{L}|} = \text{reshape}(s^{(t)}),$$
|
| 65 |
+
(3)
|
| 66 |
+
|
| 67 |
+
$$\boldsymbol{H}^{(t)} \in \mathbb{R}^{|\mathcal{L}| \times |\mathcal{L}| \times K} = \text{reshape}(\boldsymbol{h}^{(t)}).$$
|
| 68 |
+
(4)
|
| 69 |
+
|
| 70 |
+
To achieve the proper probability distributions, we apply the Softmax function along the *label* axis so that these values are positive and sum up to 1:
|
| 71 |
+
|
| 72 |
+
$$\boldsymbol{\Psi}_{i,1:|\mathcal{L}|}^{(t)} = \sigma(\boldsymbol{S}_{i,1:|\mathcal{L}|}^{(t)}), \; \boldsymbol{\Phi}_{i,1:|\mathcal{L}|,k}^{(t)} = \sigma(\boldsymbol{H}_{i,1:|\mathcal{L}|,k}^{(t)}),$$
|
| 73 |
+
|
| 74 |
+
where
|
| 75 |
+
|
| 76 |
+
<span id="page-3-3"></span>
|
| 77 |
+
$$\sigma(\boldsymbol{a})_i = \frac{\exp(a_i)}{\sum_j \exp(a_j)}.$$
|
| 78 |
+
(5)
|
| 79 |
+
|
| 80 |
+
a is an arbitrary vector. The formulae in the following discussion always depend on $e^{(1:T)}$ , but we will omit the dependency term for simplicity.
|
| 81 |
+
|
| 82 |
+
**Model Training** According to the generative process of CHMM, the joint distribution of the hidden states and the observed weak labels for one sequence $p(\boldsymbol{z}^{(0:T)}, \boldsymbol{x}^{(1:T)}|\boldsymbol{\theta})$ can be factorized as:
|
| 83 |
+
|
| 84 |
+
$$p(\boldsymbol{z}^{(0:T)}, \boldsymbol{x}^{(1:T)} | \boldsymbol{\theta}) = p(z^{(0)}) p(\boldsymbol{x}^{(1:T)} | \boldsymbol{z}^{(1:T)})$$
|
| 85 |
+
|
| 86 |
+
$$= p(z^{(0)}) \prod_{t=1}^{T} p(z^{(t)} | z^{(t-1)}) \prod_{t=1}^{T} p(\boldsymbol{x}^{(t)} | z^{(t)}),$$
|
| 87 |
+
(6)
|
| 88 |
+
|
| 89 |
+
where $\theta$ represents all the trainable parameters.
|
| 90 |
+
|
| 91 |
+
<span id="page-3-2"></span><sup>&</sup>lt;sup>2</sup>We relax plate notation here to present details.
|
| 92 |
+
|
| 93 |
+
HMM is generally trained with an expectation-maximization (EM, also known as Baum-Welch) algorithm. In the expectation step (E-step), we compute the expected complete data log likelihood:
|
| 94 |
+
|
| 95 |
+
$$Q(\boldsymbol{\theta}, \boldsymbol{\theta}^{\text{old}}) \triangleq \mathbb{E}_{\boldsymbol{z}}[\ell_c(\boldsymbol{\theta})|\boldsymbol{\theta}^{\text{old}}].$$
|
| 96 |
+
(7)
|
| 97 |
+
|
| 98 |
+
<span id="page-4-4"></span> $\theta^{\text{old}}$ is the parameters from the previous training step, $\mathbb{E}_{z}[\cdot]$ is the expectation over variable z, and
|
| 99 |
+
|
| 100 |
+
$$\ell_c(\boldsymbol{\theta}) \triangleq \log p(\boldsymbol{z}^{(0:T)}, \boldsymbol{x}^{(1:T)} | \boldsymbol{\theta})$$
|
| 101 |
+
|
| 102 |
+
is the comptelete data log likelihood. Let $\boldsymbol{\varphi}^{(t)} \in \mathbb{R}^{|\mathcal{L}|}$ be the observation likelihood where
|
| 103 |
+
|
| 104 |
+
$$\varphi_i^{(t)} \triangleq p(\boldsymbol{x}^{(t)}|z^{(t)} = i) = \prod_{k=1}^K \sum_{j=1}^{|\mathcal{L}|} \Phi_{i,j,k}^{(t)} x_{j,k}^{(t)}.$$
|
| 105 |
+
(8)
|
| 106 |
+
|
| 107 |
+
Combining (6)–(8) together, we have
|
| 108 |
+
|
| 109 |
+
$$Q(\boldsymbol{\theta}, \boldsymbol{\theta}^{\text{old}}) = \sum_{i=1}^{|\mathcal{L}|} \gamma_i^{(0)} \log \pi_i + \sum_{t=1}^{T} \sum_{i=1}^{|\mathcal{L}|} \sum_{j=1}^{|\mathcal{L}|} \xi_{i,j}^{(t)} \log \Psi_{i,j}^{(t)} + \sum_{t=1}^{T} \sum_{i=1}^{|\mathcal{L}|} \gamma_i^{(t)} \log \varphi_i^{(t)},$$
|
| 110 |
+
|
| 111 |
+
where $\pi_1 = 1, \pi_{2:|\mathcal{L}|} = 0;^3 \gamma_i^{(t)} \triangleq p(z^{(t)} = i|\boldsymbol{x}^{(1:T)})$ is the smoothed marginal; $\xi_{i,j}^{(t)} \triangleq p(z^{(t-1)} = i, z^{(t)} = j|\boldsymbol{x}^{(1:T)})$ is the expected number of transitions. These parameters are computed using the *forward-backward* algorithm.<sup>4</sup>
|
| 112 |
+
|
| 113 |
+
In the maximization step (M-step), traditional HMM updates parameters $\theta_{\rm HMM} = \{\Psi, \Phi, \pi\}$ by optimizing (7) with pseudo-statistics. However, as the transitions and emissions in CHMM are not standalone parameters, we cannot directly optimize CHMM by this method. Instead, we update the model parameters through gradient descent *w.r.t.* $\theta_{\rm CHMM}$ using (9) as the objective function:
|
| 114 |
+
|
| 115 |
+
$$\nabla \boldsymbol{\theta}_{\text{CHMM}} = \frac{\partial Q(\boldsymbol{\theta}_{\text{CHMM}}, \boldsymbol{\theta}_{\text{CHMM}}^{\text{old}})}{\partial \boldsymbol{\theta}_{\text{CHMM}}}.$$
|
| 116 |
+
(10)
|
| 117 |
+
|
| 118 |
+
In practice, the calculation is conducted in the logarithm domain to avoid the loss of precision issue that occurs when the floating-point numbers become too small.
|
| 119 |
+
|
| 120 |
+
To solve the label sparsity issue, *i.e.*, some entities are only observed by a minority of the weak
|
| 121 |
+
|
| 122 |
+
sources, we modify the observations $\boldsymbol{x}^{(1:T)}$ before training. If one source k observes an entity at time step t: $\boldsymbol{x}_{j\neq 1,k}^{(t)}>0$ , the observation of non-observing sources at t will be modified to $\boldsymbol{x}_{1,\kappa}^{(t)}=\epsilon; \boldsymbol{x}_{j\neq 1,\kappa}^{(t)}=(1-\epsilon)/|\mathcal{L}|, \forall \kappa\in\{1,\ldots,K\}\backslash k,$ where $\epsilon$ is an arbitrary small value. Note that $\boldsymbol{x}_{1,\kappa}^{(t)}$ corresponds to the observed label 0.
|
| 123 |
+
|
| 124 |
+
<span id="page-4-1"></span>**CHMM Initialization** Generally, HMM has its transition and emission probabilities initialized with the statistics $\Psi^*$ and $\Phi^*$ computed from the observation set. But it is impossible to directly set $\Psi^{(t)}$ and $\Phi^{(t)}$ in CHMM to these values, as these matrices are the output of the MLPs rather than standalone parameters. To address this issue, we choose to pre-train the MLPs before starting CHMM's training by minimizing the mean squared error (MSE) loss between their outputs and the target statistics:
|
| 125 |
+
|
| 126 |
+
<span id="page-4-6"></span>
|
| 127 |
+
$$\ell_{\text{MSE}} = \frac{1}{T} \sum_{t} \| \boldsymbol{\Psi}^* - \boldsymbol{S}^{(t)} \|_F^2 + \| \boldsymbol{\Phi}^* - \boldsymbol{H}^{(t)} \|_F^2,$$
|
| 128 |
+
|
| 129 |
+
where $\|\cdot\|_F$ is the Frobenius norm. Right after initialization, MLPs can only output similar probabilities for all time steps: $\Psi^{(t)} \approx \Psi^*, \ \Phi^{(t)} \approx \Phi^*, \ \forall t \in \{1,2,\ldots,T\}$ . But their token-wise prediction divergence will emerge when CHMM has been trained. The initial hidden state $z^{(0)}$ is fixed to $\circ$ as it has no corresponding token.
|
| 130 |
+
|
| 131 |
+
**Inference** Once trained, CHMM can provide the most probable sequence of hidden labels $\hat{z}^{(1:T)}$ along with the probabilities of all labels $y^{*(1:T)}$ .
|
| 132 |
+
|
| 133 |
+
$$\begin{split} \hat{\pmb{z}}^{(1:T)} &= \argmax_{\pmb{z}^{(1:T)}} p_{\hat{\pmb{\theta}}_{\text{CHMM}}}(\pmb{z}^{(1:T)}|\pmb{x}_{1:K}^{(1:T)}, \pmb{e}^{(1:T)}), \\ y^{*}_{i}^{(t)} &= p_{\hat{\pmb{\theta}}_{\text{CHMM}}}(z^{(t)} = i|\pmb{x}_{1:K}^{(1:T)}, \pmb{e}^{(1:T)}), \end{split}$$
|
| 134 |
+
|
| 135 |
+
where $\hat{\theta}_{\text{CHMM}}$ represents the trained parameters. These results can be calculated by either the Viterbi decoding algorithm (Viterbi, 1967) or directly maximizing the smoothed marginal $\gamma^{(1:T)}$ .
|
| 136 |
+
|
| 137 |
+
The pre-trained BERT model encodes semantic and structural knowledge, which can be distilled to further refine the denoised labels from CHMM. Specifically, we construct the BERT-NER model by stacking a feed-forward layer and a Softmax layer on top of the original BERT to predict the probabilities of the classes that each token belongs
|
| 138 |
+
|
| 139 |
+
<span id="page-4-2"></span><sup>&</sup>lt;sup>3</sup>This assumes the initial hidden state is always $\circ$ . In practice, we set $\pi_{\ell} = \epsilon, \forall \ell \in 2: |\mathcal{L}|$ and $\pi_1 = 1 - (|\mathcal{L}| - 1)\epsilon$ , where $\epsilon$ is a small value, to avoid getting $-\infty$ from log.
|
| 140 |
+
|
| 141 |
+
<span id="page-4-3"></span><sup>&</sup>lt;sup>4</sup>Details are presented in appendix A.1.
|
| 142 |
+
|
| 143 |
+
<span id="page-4-5"></span><sup>&</sup>lt;sup>5</sup>Details are presented in appendix A.2.
|
| 144 |
+
|
| 145 |
+
to (Sun et al., 2019). The probability predictions of CHMM, $y^{*(1:T)}$ , often referred to as *soft labels*, are chosen to supervise the fine-tuning procedure. Compared with the hard labels $\hat{z}^{(1:T)}$ , soft labels lead to a more stable training process and higher model robustness (Thiel, 2008; Liang et al., 2020).
|
| 146 |
+
|
| 147 |
+
We train BERT-NER by minimizing the Kullback-Leibler divergence (KL divergence) between the soft labels $y^*$ and the model output y:
|
| 148 |
+
|
| 149 |
+
$$\hat{\boldsymbol{\theta}}_{\text{BERT}} = \underset{\boldsymbol{\theta}_{\text{BERT}}}{\arg\min} \mathcal{D}[\boldsymbol{y}^{*(1:T)} || \boldsymbol{y}^{(1:T)}]$$
|
| 150 |
+
|
| 151 |
+
$$= \underset{\boldsymbol{\theta}_{\text{BERT}}}{\arg\min} \sum_{t=1}^{T} \sum_{i=1}^{|\mathcal{L}|} y_{i}^{*(t)} \log \frac{y_{i}^{*(t)}}{y_{i}^{(t)}}, \quad (11)$$
|
| 152 |
+
|
| 153 |
+
where $\theta_{\rm BERT}$ denotes all the trainable parameters in the BERT model. BERT-NER does not update the embeddings $e^{(1:T)}$ that CHMM depends on.
|
| 154 |
+
|
| 155 |
+
We obtain the refined labels $\tilde{y}^{(1:T)} \in \mathbb{R}^{T \times |\mathcal{L}|}$ from the fine-tuned BERT-NER directly through a forward pass. Different from CHMM, we continue BERT-NER's training with parameter weights from the last loop's checkpoint so that the model is initialized closer to the optimum. Correspondingly, phase II trains BERT-NER with a smaller learning rate, fewer epoch iterations, and batch gradient descent instead of the mini-batch version. This strategy speeds up phase II training without sacrificing the model performance as $y^{*(1:T)}$ does not change significantly from loop to loop.
|
2106.03598/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-28T03:40:56.443Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="TEdK_pMm2ukuVwHqq1uI" version="14.6.6" type="google"><diagram id="j5UtGFnpkpKKWDSnm2Qn" name="Page-1">7VpZU+M4EP41qd2dKigfsZM8kkCYqYUtdgIFsy9TiizbWmTLI8k5+PUr2TI+kwEmB1M7PCTqVreO7v66JZGePYlWlwwk4TX1EOlZhrfq2ec9yzJtx5ZfirPOOUNbMwKGPS1UMmb4CWmmobkp9hCvCQpKicBJnQlpHCMoajzAGF3WxXxK6rMmINAzGiVjBgFBLbF77IlQ78KpSH9EOAiLmU1D90SgENYMHgKPLiss+6JnTxilIm9FqwkiyniFXXK96Ybe54UxFIuXKPxz8/lhPF8/zf++BLeWd/9neAJPHL02sS42jDy5f01SJkIa0BiQi5I7hilbIDWoKQlG09jLKENSpcIVpYkW+RcJsdaeBamgkhWKiOhen8ZiQgll2fS2kf1Jfnt7esecpgzqxV6YT8bg7svV1beLxbV9fwvT4eKkCBPAAiS27N3K5dSGKxNo410iGiHB1lKAIQIEXtQDAui4Cp7lStPLhrb+KzwxOLonpKHZ+kHrZ8QXRZw6BXm+qnaerzV1PA/a78qDw18efLUHzWN5cNuqF4CkeqYZxFM1bdOzpauU3ZchFmiWgMwkS1kIGykOE1IxLzI9Bw0knwtGH1GlZ+QObOBqh2gvm05B69k7En+xbsQEWm21o+51hroo6arcL+hlWePMonCFlfrmGvsyvdGy8ffR8+OAWWHxUGlX4CKpEi2KKMDyVpDVoP52ZFltZG2L5XeCLKuFrBjO8dcYycg/kx2fPGkI7GMo10JjyaC+/Di7mSitnjVRa1KhSCOJlkCOoSVEqJpAalN54KIpV1alZJ1Qjnl2LCRYiaWRtKsyb5okDHFOFXG6U1j7vm9B2AVrz527Thesd4Bk060j2Rm1kWy7B0Wy2TLrwevgAWH9diTbPyWS7RaS59CBHvvqYY4ARyWmZ4igABMcK4ieqHFjL4XSrwqiXKQMqMlCiVWBYp7DHquPG8Aeccwzxm/Kkur+mI+uskE2uMoGhMYBFqmH42wkOaSn7JDp5YkB+b68FPIiW3gsDTJoiNBjYJlp7TgJDCHqTgLzodN3jD0lAaueBOxBOwn0zY4kMNhbErCOnwQODuj+Twno/iZAwxBF/280n5vT6fS8C80De3w2nu4JzUbjcD5w2mg29oTmbbe0Soj0LJfIWcceXshmILKd5yxlESnsA40V91uqnrjGMxRQ5cW7T/l5LqNTfAM1CZSjpwzEEHNIc+ZHRBZIyFOhbP+F1LGvwVXkVQqxpyQupbKnZT7TORV6lLt5Gos0b98CeYbUitcYMsqpL/LpebYohv2894xhFVKqyWXnCc+7iu00t82TbANl5FUEGypboKROwWL9YRuoPuQyBbxKje1AK9V2D7ncAjK0ciPUDSPZeUi02LXgOW4RJmCOyBjAxyCbuFCJaYx2A+qB0wC10VGiu87pewN1+0pWhlItQsvIea9p3wWRioh4zpNdx9JxSsCgeavripbRIaOlfez/buKDua2Uo1kw/11f4y03/3bMPzJbGcp8Jz6IMFnnws/+1ONwXThS3OzR2TkvJIkuJC31bHl+WVi6xmhyw0rhibPC06UV1gtRU4gUhSnQhalrDFYpVGmlUIlKoWpqRpXCxfPCVVSnjhlAZyHrKkw/9gpTpo+Xv8eUuaX5MrOhphz3wWb/ZaJfB75rdjzndAF/F885ncAfbQS+Pui1gZ8hmmdmU3i2h8mqHWunp6cbTwgNJ0vbibon6x7T5q+6V7MAwUEsSSg9oi4yY+UJGd7kTHdE2POyu2dX6NSvoNt9/+p/obw8JqxRoxg4HcWgIySsvV0HNt8HfsXEYWJiZDdiouvFZzcxIcnyRwpZX+WnHvbFfw==</diagram></mxfile>
|
2106.03598/main_diagram/main_diagram.pdf
ADDED
|
Binary file (30.8 kB). View file
|
|
|
2106.03598/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Biomedical literature is widely accessible to the scientific community through databases such as Pubmed, PMC, and ScienceDirect. Within seconds, researchers can access millions of journal articles relating to an input query. Text generation tasks such as document summarization and question answering can allow researchers to quickly obtain important information from a large collection of papers, yet current methods generally underperform in these areas. Thus, new NLP methods are needed to parse the increasingly immense amounts of information.
|
2106.12871/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-05-22T19:59:16.298Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.1.8 Chrome/87.0.4280.88 Electron/11.1.1 Safari/537.36" etag="p-C7R2cJ24mvCOajGpdb" version="14.1.8" type="device" pages="2"><diagram id="4tRuhu1ZhObauQMwofnP" name="Page-1">5Vxtc6M2EP41nkk/JAMSrx/jJG0yl1xu6kyv91EGGdPDyMUitvPrTxgRA1IIbYhlsDMTwyKE2H2e1a5ePIJXi80fCVrOH4iPoxHQ/M0IXo8A0C3XYF+ZZMslmsMlQRL6XLYXTMIXXBTk0jT08apSkBIS0XBZFXokjrFHKzKUJGRdLTYjUfWpSxRgQTDxUCRKv4c+nRcvZhr7C7c4DObFo9k751cWqCjNX2U1Rz5Zl0TwZgSvEkJofrTYXOEoU1+hmPy+39+4+tqyBMe0zQ3h5Wp99/Xxcv3j2wR9OX/ZPt/+PNdBXs0zilL+ypM5q9JnspvFFPt+GAfs+AHRJNzwF6HbQj0JSWMfZw/QRnC8nocUT5bIy66uGSKYbE4XETvT2aGPVvNd2exkFkbRFYlIsqsIzszsj8lXNCE/cemKtftkd5CYluT5h8n5C+CE4s2bqtFfFc6wiskC02TLivAboMlv4TA1TG6zdcnmhR3nJXNDw+ZY4zgLXuveW4IdcGPIDTO+e9wuZ+dfArQxxtOn73+lT7fnBQFKhhG0j32GVX5KEjonAYlRdLOXjqv22Ze5J2TJDfEPpnTLiYdSSqo2y5+ZPahZtaxdJE083PRGnL4oCTBtKAfkpkpwhGj4XG1H92oX6cAIwHQJtAn+N8Wxlx0+4Q1lX3fxMqUNnNDf50SNBj7CzsyT0sBz8HTWEdyhWYE70FrC3dI+S+2i3nuOdtAS7VAl2kXnX/b592iLk07hPXM87EnhPXXMzOl2Am/DrXrzI4C30XM0w5ZoNlSiGQpovp88PbCqxjd/PrELUYbnVQ8BbRnOsQHaHJq/Nloi3FKJcEPQ+mUQJDhAFDfAukVoXoM11n0T2zJYu5YNkdUNrG23FobYbaPuT4O12CH2HNZmS1g7KmEtOpObOAhjjPM8dBdmM7+tneluVlXM/n1NWaCS/NapMz9M8F2PTqCu3JlbQ0O91RL1tkrUi1q/xvEqSy9HgClDu07IkmT5JT8fI+rNM+iTZIGi8IU1kcR9DmvcYpSl8P+mqZoJ9tCYYLdkgquSCaLWJ2RGF2hToLuH4NZ17ejQDVWAmekr2f6d3X9hFqc/yteuN7zy/GzLzzokgdOSBEqzV+fUu4N6lqs+MHIFkzymdMltYEXs8eMp801WkB2d2az5WhqHdNUUlv73ZIylYo5vyLTvgCm0OkrGdGDU/JUkG3MOmoyJjDhDomppNhZf0R9jQxCzY4+9Ous74DhTTOih6JJfWIS+nzs3vGLEme6qygyzJGFMd+9hjkfmdVYX82er3LXp3SjaAXYV5rYj6BlK9Aw60LN0ClDMvi52HxHi/1f+rsWqyI5JjGs04KL2lpVRrErCDkwJisyJm9LS4YXYyRuHNCaw1Pbx5R6+1OF32sdLX1wS6MonvA/VyTe1ssS1hzRbzLCbYLyLVxTFHs6GOno+w6jbVWYYpi5hxqf15nJq2CdKDUn4K6eGqZIaYm9/GtQwWX+rnBqOCmp0CHG3LcQPNX3T1MrhQ9ypDnFblqMe4u6Jev8ii36fG4caDW9s5smRw4bqyVHkm8cxMqgdkBySaX95wUNNkDY2842FWsWoVIeDTrMZtuRDfr7tTrWOEmhTq6+7VZ8mQF1QZL9ioQ+H8fzWb9lg2N5Wds1Wlvbqt4pKchbx+2pmeG3IB1ggWScwRBYcQ0YAwTH1CJ8zV9SQIRx/jyCuHhgkF44hdYBK5k27xPRHI/uWPQI8eI8gjqcOkgUsR1DNATEC7TspWg+LHmptTGMzB7Lm1zJqU2Zaa//exUSzHNuKJwbUZb9th01L+1VVUEAcN803dvR3rYtTD/ld2czxgV286Gl67uJB26FPcKhdeI3NHIiLd4168GKrdvB93q0kh4xkM7W8IFCKbHFb74B3dliGUQG+YxxB7qpkPVCXSAe9iFGKZp7u8t16SHMU6Bcjx77TQbITW15Q6To3IO7FPjE66Hp9DKfAnjI2GGKc2Xc2tJ2+BUqXtgFx8GwAG5yAeWwAB2KcP+ANG9B1q/qX5FmftWFD7l/EaP9sOoANG8WvAnA9u4Uf7X7DBjvd/x5cPkmy/109ePML</diagram><diagram name="Copy of Page-2" id="dfRLQN110mPvCxcZRX5F">3VlRd5owFP41Pq4HiKI+Vuu69bRbz9y6dm8ppJAtJCyECvv1u0goQlxnV52o53hMvtyE5LvfvUmwh6ZRdi5xHF4Jn7CeY/lZD531HMe2Ry78FEheIsPBuAQCSX1tVANz+oto0NJoSn2SNAyVEEzRuAl6gnPiqQaGpRSLptmDYM2nxjggBjD3MDPRr9RXYYkiZFl1wztCg1A/Gpr0AiNcWWvTJMS+WKxAaNZDUymEKktRNiWsYK8ihpLrmw/y5su34OL69s65YHc/+ZtysLcv6fK0Bkm42vLQerGJyivGiA8E6qqQKhSB4JjNanQiRcp9UgxrQa22uRQiBtAG8DtRKtdqwKkSAIUqYrp1w/XodScilR55xg5pXWEZkOfGG5R2xQJXxKHZOiciIkrmYCAJw4o+NhWEtRCDJ7uaayhoul9AvZ71I2apftJUsDTigN0UWAKF9zxOleGhJv+LkCoyj/GSogVE8Wu4fiRSkexZdqrWoQ4LnRjssaZ3UYeZXYVOuBJhrrUjQh2T0SMQ92BDcQ87Je6B4YpZdE98n/IA4EucE9lNWcPO0JC1U8l8f7LuH6OshxvKetwpWQ8NV1zOP1/BUJPZp8/QwAphJ91U9sDunLLNLHEEyh5vqGzb6ZS0x2uOI9zDinD4dlPR7ril6JF5BBmtETTamaDdYxS0Xd3bDitXV9NeccZpmlFG8XISy6N1R1O12zpbo72n6ipZrZD5MVXrLicwGNztyd/Jw0lcXvgfaFYQvjM2R+0jnWuyuS5NjHaWJiyTtcPLCmjb0a67XgsKE6wP5O0kP2y5pUxLulfLM0/TeIXyj+2ebu/9nm4bTP0H+QOtMr8t+p8MqurdattZpgcva7mubTts+htupt16WWWbl9CDjgLU3/eOehxHw62rdLNNALWP8LveBMyb6kHLv+/sW/62+SrlZPnpOS6Dh0/uJZSCovSveMsRQJVqsp0oKX4QcKSQgHDBixB7oIy1IMxowKHqgVcI4JOCeOphdqobIur77E9H3qYAduVhp/12Z2R6uL/Gwc7LHQzV+k+vMsLq/w7R7Dc=</diagram></mxfile>
|
2106.12871/main_diagram/main_diagram.pdf
ADDED
|
Binary file (31 kB). View file
|
|
|
2106.12871/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Robust data preprocessing pipeline enables organizations to take data driven business decisions. Most unstructured and semi-structured data after initial pre-processing is available in tabular format for further processing. These tabular datasets have to go through a process of data curation and quality check before consumption. Companies apply standard data quality checks/rules on business-critical columns to access and maintain the quality of data. Additionally, some of these datasets may contain sensitive information e.g. Protected Health Information (PHI), Personally Identifiable Information(PII), etc and need to be masked/de-identified i.e. identifying all columns which are Social Security Numbers, First Names, telephone numbers, etc. The first step in this data curation and quality check process is column level semantic tagging/mapping. There have been many attempts to automate this process [@powerbi; @Ramnandan; @Limaye; @Puranik2012ASA; @Hulsebos; @sato]. Traditionally, Semantic tagging is usually done using handcrafted rule-based systems[@google; @powerbi; @Trifacta]. In some cases, column descriptions are also available. However, as the data grows exponentially, the cost associated with maintaining rule-based system also increases. The semantic data type tagging is still largely manual where data stewards manually scan through databases and map columns of interest. Most ML based approaches [@sato; @Hulsebos; @Pham] use corresponding metadata, atomic datatypes, column description along with handcrafted features as input data for training the model.
|
| 4 |
+
|
| 5 |
+
We herein, propose **DCoM**-**D**eep **Co**lumn **M**apper; a generic collection of Deep learning based Semantic mappers to map the columns to semantic data types. These model take the raw values of the columns (or instances) as inputs considering those as texts and build NLP-based deep learning architectures to predict the semantic type of the columns (or instances). We also extracted 19 engineered features and used those as auxiliary features to DCoM models. We refrained ourselves extracting large number of features, because we wanted to make the DCoM models learn those features along with some interesting high level features on their own, for better semantic data type detection. As we are exploring NLP in this problem, we make the leverage of using advanced NLP-based deep learning layers or models such as Bi-LSTM, BERT, etc.
|
| 6 |
+
|
| 7 |
+
# Method
|
| 8 |
+
|
| 9 |
+
Prior approaches [@Venetis; @Limaye] to semantic type detection trained models, such as logistic regression, decision trees, feedforward neural network [@Hulsebos], extracting various features from the data. We, on the other hand, treated the data as natural language (text) and used the data itself as the input to the model. We used very small number of hand-engineered features unlike Hulsebos et al. [@Hulsebos]. Therefore, our DCoM models have two inputs, the values of the instances as text or natural language and hand-engineered features. We present two types of DCoM models based on the way we feed the text input to the model.
|
| 10 |
+
|
| 11 |
+
In this subsection, we discuss how the values of each instance can be fed to the model as a single sequence input to the DCoM. Considering the scenario, we can not simply pass the inputs separating values of each input with any separator token. This gives the model wrong information about the sequence of the data resulting faulty training and poor performance on the unseen data. For example, if we consider the last example from Table [1](#tab:sample_data){reference-type="ref" reference="tab:sample_data"} and create the input, `Deletes the property <SEP> Lets you edit the value of the property <SEP> Script execution will be stopped` for the model, the model gets to learn that the value `Lets you edit the value of the property` has relative position between `Deletes the property` and `Script execution will be stopped`, which is quite wrong, because the values in an instance does not have any relative position between them.
|
| 12 |
+
|
| 13 |
+
To mitigate the problem we introduce permutations from mathematics. With permutation we order $r$ items from the set of $n_i$ items. Here $n_i$ items are all the values of an instance $i$, and $r \in [1, n]$. Doing so, the above instance can be broken down to multiple subsets. A sample of the subset is shown in the Table [2](#tab:permutation){reference-type="ref" reference="tab:permutation"}. If we feed all the new instances of the subset to the model, it does not learn any relative positional information of `Lets you edit the value of the property` with respect to other values in that instance unlike earlier. Therefore, for the value `Deletes the property`, the model only learns the relative positions of the tokens e.g. `Deletes, the, property,` etc in a value, but not the relative position of the values in an instance. This helps the model getting the actual information present in the data for accurate prediction on the output. This permutation method also helps in augmenting new instances which helps in training the data hungry deep learning models with enriched data. It is not feasible to generate all the possible permutations before the training because of the huge numbers of subsets. Instead, during training, we sample $r$ between $1$ and $n$ for each of the instances and generate one permutation for each instance. More than one instances can be generated, but training the model for multiple epochs will result same for both cases.
|
| 14 |
+
|
| 15 |
+
:::: adjustbox
|
| 16 |
+
width=,center
|
| 17 |
+
|
| 18 |
+
::: {#tab:permutation}
|
| 19 |
+
**New Instance** **class**
|
| 20 |
+
------------------------------------------------------------------------------------------------------------- -------------
|
| 21 |
+
`Deletes the property` description
|
| 22 |
+
`Lets you edit the value of the property` description
|
| 23 |
+
`Script execution will be stopped` description
|
| 24 |
+
`Deletes the property <SEP> Lets you edit the value of the property` description
|
| 25 |
+
`Lets you edit the value of the property <SEP> Script execution will be stopped` description
|
| 26 |
+
`Deletes the property <SEP> Lets you edit the value of the property <SEP> Script execution will be stopped` description
|
| 27 |
+
`Deletes the property <SEP> Script execution will be stopped <SEP> Lets you edit the value of the property` description
|
| 28 |
+
`Lets you edit the value of the property <SEP> Deletes the property <SEP> Script execution will be stopped` description
|
| 29 |
+
|
| 30 |
+
: Permutations on the values of a single instance from class `description`.
|
| 31 |
+
:::
|
| 32 |
+
::::
|
| 33 |
+
|
| 34 |
+
Input preparation with this method is straight-forward compared to the earlier method. In this method, we also use permutations to generate new instances, but the value of $r$ is fixed during training and it is used as a hyper-parameter, where $r \in [1, \inf)$. Once we decide the value of $r$, $r$ number of inputs are used as text inputs to the model. Aggregation of embedding vectors of the inputs are performed once they are generated using the shared embedding weights. If $r > n$, where $n$ is the number of values of an instance, then this scenario can be handled in two ways. The first way is to pad $r-n$ inputs and aggregate the embedding vectors only for the non-padded inputs. Another way is, while generating new instances use permutation with replacement to always sample $r$ values out of $n$ values. Therefore, padding is not required for the latter method. We tried both the approaches and did not observe any significant difference in the result.
|
| 35 |
+
|
| 36 |
+
We used 19 out of 27 global statistical features [@Hulsebos] as our engineered features for the DCoM models. These features are normalized before feeding to the model. The complete list of these features are shown in Table [8](#tab:feat_imp){reference-type="ref" reference="tab:feat_imp"}. Once text and engineered inputs are prepared, we attach LSTM/Transformer/BERT layers to the text input. The output of the same is aggregated with the engineered inputs. We use some feed-forward layer after that. Finally we use one `softmax` layer with 78 units to get the probability of each of the classes as output. This is our generalized architecture design of DCoM models. Extensive hyper-parameter tuning is performed to finalize the number of layers, number of units in a layer and many other hyper-parameters in the model. This topic is discussed in detail in section [5](#sec:training){reference-type="ref" reference="sec:training"}. To name the DCoM models, type of the text input and name of the deep learning layers are used as suffixes with the name `DCoM`, e.g. DCoM model with single instance input and LSTM layers is named as `DCoM-Single-LSTM`. The architecture design of DCoM models for both single sequence and multiple sequences inputs are shown in the Figure [2](#fig:arch){reference-type="ref" reference="fig:arch"}.
|
| 37 |
+
|
| 38 |
+
<figure id="fig:arch" data-latex-placement="ht">
|
| 39 |
+
<img src="images/arch.png" />
|
| 40 |
+
<figcaption>Architecture diagram of (a) <code>DCoM-Single</code> and (b) <code>DCoM-Multi</code> models.</figcaption>
|
| 41 |
+
</figure>
|
| 42 |
+
|
| 43 |
+
We trained our model on train dataset, validated it on validation dataset and finally reported our results on the test dataset. As class imbalance is present, like Hulsebos et al. [@Hulsebos], we also evaluated our model performance using the average F1-score, weighted by the number of columns per class in the test set (i.e., the support).
|
| 44 |
+
|
| 45 |
+
The DCoM models are trained in Tensorflow 2 [@tensorflow2015-whitepaper] and the hyper-parameters are tuned using `keras-tuner`[@kerastuner]. For inputs of DCoM models we tried permutations with replacement as well as without replacement, but we did not observe any significant difference in outputs. The value of $r$ for which we observed best performance for `DCoM-Multi` models is $45$. For tokenizing text inputs, we experimented with character based, word based and BERT Wordpiece[@wordpiece] tokenizers, and we found out BERT Wordpiece tokenizer stood out to be working better with respect to the other tokenizers because of the obvious reasons stated in the paper[@wordpiece]. We compared our result between with and without pre-trained embedding weights. It is observed that, though in the final output there is no considerable difference, training with pre-trained embedding weights take approximately 40% less amount of time to converge. We experimented with the small, base and large variations of several BERT architectures and finalized `DistilBERT-base`[@distilbert] and `Electra-small`[@electra] based on their performances. We used Bi-directional LSTM layers in all the `DCoM-LSTM` variants. For `Dropout` layers we used $0.3$ as our dropout rate. We experimented `mean, sum, concatenation` and `weighted sum` functions for aggregation, but there was not any significant difference in outputs based on these variations. We used `Adam`[@adam] optimizer with initial learning rate $10^{-4}$. Along with this, we implement learning rate reducer with factor 0.5, which reduces the learning rate by 50% if the model performance does not improve on validation dataset after consecutive 5 epochs. Assigning class weights does not have much effect in the test results. As this is a multi-class (78 classes) classification problem, the DCoM models are trained with `categorical-crossentropy` loss and validated with `accuracy` and `average F1` score metric.
|
| 46 |
+
|
| 47 |
+
we tried two approaches for inference on the test dataset. In the first approach we performed single time inference on each of the instances of the test dataset. While doing so, we set $r$ to be the total number of values ($n_i$) for each instance for `DCoM-Single` models to perform inference. For `DCoM-Multi`, as $r$ values if prefixed, some values will be truncated for inputs with total values $n > r$. The other approach is to generate $k$ (where, $k > 1$) instances with permutation, sampling $r$ values $k$ times between 1 and $n_i$, where $n_i$ is the total number of values for instance $i$. Therefore, we get $k$ class predictions for each of the instances and finally with majority voting we pick the prediction class for each instance. We used $k$ value to be 10 in our case and it is observed that for both `DCoM-Single` and `DCoM-Multi`, we observed $0.2-0.5\%$ improvement in the test `average F1` score, but this improvement comes with the price of increased inference time by approximately $k$ times.
|
2107.00085/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2107.00085/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep Convolutional networks [\[30,](#page-11-0) [52\]](#page-12-0) have shown impressive performance in various computer vision tasks, e.g., image classification [\[19,](#page-10-0) [22\]](#page-11-1) and action recognition [\[48,](#page-12-1) [23,](#page-11-2) [57,](#page-12-2) [32\]](#page-11-3). However, there is an inherent problem of generalizability with deep-learning models, *i.e.*, models trained on one dataset(source domain) does not perform well on another domain. This loss of generalization is due to the presence of domain shift [\[11,](#page-10-1) [55\]](#page-12-3) across the dataset. Recent works [\[46,](#page-12-4) [29\]](#page-11-4) have shown that the presence of few labeled data from the target domain can significantly boost the performance of the convolutional neural network(CNN) based models. This observation led to the formulation of Semi-Supervised Domain Adaption (SSDA), which is a variant of Unsupervised Domain Adaptation where we have access to a few labeled samples from the target domain.
|
| 4 |
+
|
| 5 |
+
Unsupervised domain adaptation methods [\[42,](#page-12-5) [12,](#page-10-2) [36,](#page-11-5) [51,](#page-12-6) [35\]](#page-11-6) try to transfer knowledge from the label rich source domain to the unlabeled target domain. Many such existing domain adaptation approaches [\[42,](#page-12-5) [12,](#page-10-2) [51\]](#page-12-6) align the features of the source distribution with the target distribution without considering the category of the samples. These class-agnostic methods fail to generate discriminative features when aligning global distributions. Recently, owing to the success of contrastive approaches [\[6,](#page-10-3) [18,](#page-10-4) [39\]](#page-11-7), in self-representation learning, some recent works [\[26,](#page-11-8) [28\]](#page-11-9) have turned to instance-based contrastive approaches to reduce discrepancies across domains.
|
| 6 |
+
|
| 7 |
+
[\[46\]](#page-12-4) reveals that the direct application of the well-known UDA approaches in Semi-Supervised Domain Adaptation yields sub-optimal performance. [\[29\]](#page-11-4) has shown that supervision from labeled source and target samples can only ensure the partial cross-domain feature alignment. This creates
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
|
| 11 |
+
Figure 1: Conceptual description of CLDA approach. (a) Intial distribution of samples from both domain .(b) Instance Contrastive Alignment ensures unlabeled target samples move into the low entropy area forming robust clusters (c) Inter-Domain Contrastive Alignment minimizes the distance between the clusters of same class from both domain (d) The clusters of both domain are well aligned and samples are far away from decision boundary.
|
| 12 |
+
|
| 13 |
+
aligned and unaligned sub-distributions of the target domain, causing intra-domain discrepancy apart from inter-domain discrepancy in SSDA.
|
| 14 |
+
|
| 15 |
+
In this work, we propose CLDA, a simple single-stage novel contrastive learning framework to address the aforementioned problem. Our framework contains two significant components to learn domain agnostic representation. First, Inter-Domain Contrastive Alignment reduces the discrepancy between centroids of the same class from the source and the target domain while increasing the distance between the class centroids of different classes from both source and target domain. This ensures clusters of the same class from both domains are near each other in latent space than the clusters of the other classes from both domains.
|
| 16 |
+
|
| 17 |
+
Second, inspired by the success of self-representation learning in semi-supervised settings [\[17,](#page-10-5) [6,](#page-10-3) [49\]](#page-12-7), we propose to use Instance Contrastive Alignment to reduce the intra-domain discrepancy. In this, we first generate the augmented views of the unlabeled target images using image augmentation methods. Alignment of the features of the original and augmented images of the unlabeled samples from the target domain ensures that they are closer to each other in latent space. The alignment between two variants of the same image ensures that the classifier boundary lies in the low-density regions assuring that the feature representations of two variants of the unlabeled target images are similar, which helps to generate better clusters for the target domain.
|
| 18 |
+
|
| 19 |
+
In summary, our key contributions are as follows. 1) We propose a novel, simple single-stage training framework for Semi-supervised Domain Adaptation. 2)We propose using alignment at class centroids and instance levels to reduce inter and intra domain discrepancies present in SSDA. 3)We evaluate the effectiveness of different augmentation approaches, for instance-based contrastive alignment in the SSDA setting. 4)We evaluate our approach over three well-known Domain Adaptation datasets (DomainNet, Office-Home, and Office31) to gain insights. Our approach achieves the state of the art results across multiple datasets showing its effectiveness. We perform extensive ablation experiments highlighting the role of different components of our framework.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
In this section, we present our novel Semi-Supervised Domain Adaptation approach to learn domain agnostic representation. We will first introduce the background and notations used in our work and then describe our approach and its components in detail.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
Figure 2: **Outline of our CLDA Framework** Our approach consists of aligning the outputs of the neural network at two levels. At the instance level, we try to maximize the similarity between features of unlabeled target images and strongly augmented unlabeled target images using Instance Contrastive Alignment. At the class level, we pass the images from both domains through the network, where we assign the labels to features of unlabeled target images and compute the centroids of each class of the target domain. Similarly, we compute the centroids for source domain features using their class labels. Finally, we maximize the similarity between centroids of the same class across domains by employing Inter-Domain Contrastive Alignment. We also used cross-entropy loss on the labeled source and target images, apart from the above components in our framework.
|
| 28 |
+
|
| 29 |
+
In Semi-Supervised Domain Adaptation, we have datasets sampled from two domains. The source dataset contains labeled images $\mathcal{D}_s = \{(x_i^s, y_i^s)\}_{i=1}^{N_s} \subset \mathcal{R}^d \times \mathcal{Y}$ sampled from some distribution $P_S(X,Y)$ . Besides that, we have two sets of data sampled from target domain distribution $P_T(X,Y)$ . We denote the labeled set of images sampled from the target domain as $\mathcal{D}_{lt} = \{(x_i^{lt}, y_i^{lt})\}_{i=1}^{N_{lt}}$ . The unlabeled set sampled from target domain $\mathcal{D}_t = \{(x_i^t)\}_{i=1}^{N_t}$ contains large number of images $(N_t \gg N_{lt})$ without any corresponding labels associated with them. We also denote the labeled data from both domains as $\mathcal{D}_l = \mathcal{D}_s \cup \mathcal{D}_{lt}$ . Labels $y_i^s$ and $y_i^{lt}$ of the samples from source and labeled target set correspond to one of the categories of the dataset having K different classes/categories i.e $Y = \{1, 2, ... K\}$ . Our goal is to learn a task specific classifier using $D_s$ , $D_{lt}$ and $D_t$ to accurately predict labels on test data from target domain.
|
| 30 |
+
|
| 31 |
+
Labeled source and target samples are passed through the CNN-based feature extractor $\mathcal{G}(.)$ to obtain corresponding features, which are then passed through task-specific classifier $\mathcal{F}(.)$ to minimize the well-known cross-entropy loss on the labeled images from both source and target domains.
|
| 32 |
+
|
| 33 |
+
$$\mathcal{L}_{sup} = -\sum_{k=1}^{K} (y^i)_k \log(\mathcal{F}(\mathcal{G}((x_l^i))_k))$$
|
| 34 |
+
(1)
|
| 35 |
+
|
| 36 |
+
Our method is based on the observation that the samples from the same category across domains must cluster in the latent space. However, this is observed only for the source domain due to the availability of the labels. Samples from the target domain do not align to form clusters due to the domain shift between the target and the source distributions. This discrepancy between the cluster of the same category across domains is reduced by aligning the centroids of each class of source and target domain. [6, 17] have shown that having a separate projection space is beneficial for contrastive
|
| 37 |
+
|
| 38 |
+
training. Instead of using a separate projection, we have used the outputs from the task-specific classifier as features to align the clusters across the domain.
|
| 39 |
+
|
| 40 |
+
We represent the centroid of the images from the source domain belonging to class k as the mean of their features, which can be written as
|
| 41 |
+
|
| 42 |
+
<span id="page-4-1"></span>
|
| 43 |
+
$$C_k^s = \frac{\sum_{i=1}^{i=B} \mathbb{1}_{\{y_i^s = k\}} \mathcal{F}(\mathcal{G}(x_i^s))}{\sum_{i=1}^{i=B} \mathbb{1}_{\{y_i^s = k\}}}$$
|
| 44 |
+
(2)
|
| 45 |
+
|
| 46 |
+
where B is the size of batch. We maintain a memory bank $(C^s = [C_1^s, C_2^s, .... C_K^s])$ to store the centroids of each class from source domain. We use exponential moving average to update these centroid values during the training
|
| 47 |
+
|
| 48 |
+
$$C_k^s = \rho(C_k^s)_{step} + (1 - \rho)(C_k^s)_{step-1}$$
|
| 49 |
+
|
| 50 |
+
where $\rho$ is a momentum term, and $(C_k^s)_{step}$ and $(C_k^s)_{step-1}$ are the centroid values of class k at the current and previous step, respectively.
|
| 51 |
+
|
| 52 |
+
We also need to cluster the unlabeled target samples for Inter-Domain Contrastive Alignment. The pseudo labels obtained from the task specific classifier as shown in Eq. (3) is used as the class labels for the corresponding unlabeled target samples.
|
| 53 |
+
|
| 54 |
+
<span id="page-4-0"></span>
|
| 55 |
+
$$\hat{y_i^t} = argmax((\mathcal{F}(\mathcal{G}(x_i^t)))$$
|
| 56 |
+
(3)
|
| 57 |
+
|
| 58 |
+
Similar to the source domain, we also calculate the separate cluster centroid $C_k^t$ for each of the class k of the target samples present in the minibatch as per the Eq (2) where unlabeled target images replace the images from the source domain with their corresponding pseudo label. The model is then trained to maximize the similarity between the cluster representation of each class k from the source and the target domain. $C_k^s$ and $C_k^t$ form the positive pair while the remaining cluster centroids from both domains form the negative pairs. The remaining clusters from both domains are pushed apart in the latent space. This is achieved through employing a modified NT-Xent (normalized temperature-scaled cross-entropy) contrastive loss [6, 39, 49, 33] for domain adaptation given by
|
| 59 |
+
|
| 60 |
+
$$\mathcal{L}_{clu}(C_i^t, C_i^s) = -\log \frac{h(C_i^t, C_i^s)}{h(C_i^t, C_i^s) + \sum_{\substack{r=1\\q \in \{s,t\}}}^K \mathbb{1}_{\{r \neq i\}} h(C_i^t, C_r^q)}$$
|
| 61 |
+
(4)
|
| 62 |
+
|
| 63 |
+
where $h(\mathbf{u}, \mathbf{v}) = \exp\left(\frac{\mathbf{u}^{\top}\mathbf{v}}{||\mathbf{u}||_2||\mathbf{v}||_2}/\tau\right)$ measures the exponential of cosine similarity, $\mathbb{1}$ is an indicator function and $\tau$ is the temperature hyperparameter.
|
| 64 |
+
|
| 65 |
+
Recent works on contrastive learning [18, 39, 6] show encouraging results in single domain settings. [28] extends contrastive learning into multi-domain settings. Inspired by such success, we employ Instance Contrastive Learning to form stable and correct cluster cores in the target domain.
|
| 66 |
+
|
| 67 |
+
To perform contrastive alignment at the instance level, we first generate a strongly augmented version of the unlabeled target image i.e $x_i^t = \psi(x_i^t)$ where $\psi(.)$ is the strong augmentation function [8]. Next, we employ the NT-Xent loss [6, 39] as defined in Eq. (5) to ensure that these two variants of the same image are closer to each other in the latent space while the rest of the images in minibatch of size B are pushed apart. This idea stems from the cluster assumption in an ideal classifier, which states the decision boundary should lie in the low-density region, ensuring consistent prediction for different augmented variants of the same image.
|
| 68 |
+
|
| 69 |
+
<span id="page-4-2"></span>
|
| 70 |
+
$$\mathcal{L}_{ins}(\tilde{x}_{i}^{t}, x_{i}^{t}) = -\log \frac{h\left(\mathcal{F}(\mathcal{G}(\tilde{x}_{i}^{t}), \mathcal{F}(\mathcal{G}(x_{i}^{t}))\right)}{\sum_{r=1}^{B} h\left(\mathcal{F}(\mathcal{G}(\tilde{x}_{i}^{t})), \mathcal{F}(\mathcal{G}(x_{r}^{t}))\right) + \sum_{r=1}^{B} \mathbb{1}_{\{r \neq i\}} h\left(\mathcal{F}(\mathcal{G}(\tilde{x}_{i}^{t})), \mathcal{F}(\mathcal{G}(\tilde{x}_{r}^{t}))\right)}$$
|
| 71 |
+
(5)
|
| 72 |
+
|
| 73 |
+
In SSDA, [\[29\]](#page-11-4) has shown that target distribution gets divided into aligned and unaligned subdistribution in the presence of very few labeled target data. Thus, aligning the unaligned subdistribution can lead to improved performance, while perturbing the aligned sub-distribution can result in a negative transfer. Therefore, we only propagate the gradients for strongly augmented images to avoid perturbing the aligned sub-distribution in the target domain.
|
| 74 |
+
|
| 75 |
+
[\[6\]](#page-10-3) shows stronger augmentation in contrastive learning leads to improved performance. Consistent prediction across the input and strongly augmented unlabeled images in Instance Contrastive Alignment forces the unaligned target sub-distribution to move away from the low-density region towards aligned distribution. This ensures better clustering in the unlabeled target distribution, which is validated by improved accuracy as shown in Table [5](#page-8-0) after employing Instance Contrastive Alignment with Inter-Domain Contrastive Alignment.
|
| 76 |
+
|
| 77 |
+
Both of the components of the CLDA framework are necessary for the improved performance, as shown in Table [5](#page-8-0) . Instance Contrastive Alignment ensures that unlabeled target samples are consistent and are in the high-density region. However, it does not assure alignment between source and unlabeled target samples. Inter-Domain Contrastive Alignment reduces the discrepancy between unlabeled target samples and source domain but unlabeled target samples closer to the decision boundary might get pushed towards the wrong classes resulting in negative transfer. Thus, combining both components results in a much better alignment of the unlabeled target samples towards the source domain, leading to improved performance of the framework.
|
| 78 |
+
|
| 79 |
+
The overall training objective employs supervised loss, Inter-Domain Contrastive Alignment and Instance Contrastive Alignment which can be formulated as follows:
|
| 80 |
+
|
| 81 |
+
<span id="page-5-0"></span>
|
| 82 |
+
$$\mathcal{L}_{tot} = \mathcal{L}_{sup} + \alpha * \mathcal{L}_{clu} + \beta * \mathcal{L}_{ins}$$
|
| 83 |
+
(6)
|
| 84 |
+
|
| 85 |
+
We train the model in our framework by employing overall training loss described as in [\(6\)](#page-5-0).
|
2108.12126/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2108.12126/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Medical reports are the primary medium, through which physicians communicate findings and diagnoses from the medical scans of patients. The process is usually laborious, where typing out a medical report takes on average five to ten minutes [@MedRepACL18]; it could also be error-prone. This has led to a surging need for automated generation of medical reports, to assist radiologists and physicians in making rapid and meaningful diagnoses. Its potential efficiency and benefits could be enormous, especially during critical situations such as COVID or a similar pandemic. Clearly a successful medical report generation process is expected to possess two key properties: 1) clinical accuracy, to properly and correctly describe the disease and related symptoms; 2) language fluency, to produce realistic and human-readable text.
|
| 4 |
+
|
| 5 |
+
Fueled by recent progresses in the closely related computer vision problem of image-based captioning [@ST; @Tran_2020_CVPR], there have been a number of research efforts in medical report generation in recent years [@MedRepACL18; @MedRepACL19; @MedRepNIPS18; @MedRepAAAI19; @xue2018multimodal; @yuan2019automatic; @tienet; @yin2019; @lovelace-mortazavi-2020-learning; @Srinivasan_2020_ACCV]. These methods often perform reasonably well in addressing the language fluency aspect; on the other hand, as is also evidenced in our empirical evaluation, their results are notably less satisfactory in terms of clinical accuracy. This we attribute to two reasons: one is closely tied to the textual characteristic of medical reports, which typically consists of many long sentences describing various disease related symptoms and related topics in precise and domain-specific terms. This clearly sets the medical report generation task apart from a typical image-to-text problem such as image-based captioning; another reason is related to the lack of full use of rich contextual information that encodes prior knowledge. These information include for example clinical document of the patient describing key clinical history and indication from doctors, and multiple scans from different 3D views -- information that are typically existed in abundance in practical scenarios, as in the standard X-ray benchmarks of Open-I [@openi] and MIMIC-CXR [@johnson2019mimic].
|
| 6 |
+
|
| 7 |
+
<figure id="fig:classifier-generator-interpreter" data-latex-placement="t">
|
| 8 |
+
<img src="Fig/TMI_Model-main.png" style="width:80.0%" />
|
| 9 |
+
<figcaption>Our approach consists of three modules: a <em>classifier</em> that reads chest X-ray images and clinical history to produce an internal checklist of disease-related topics; a transformer-based <em>generator</em> to generate fluent text; an <em>interpreter</em> to examine and fine-tune the generated text to be consistent with the disease-related topics.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
The aforementioned observations motivate us to propose a categorize-generate-interpret framework that places specific emphasis on clinical accuracy while maintaining adequate language fluency of the generated reports: a classifier module reads chest X-ray images (e.g., either single-view or multi-view images) and related documents to detect diseases and output enriched disease embedding; a transformer-based medical report generator; and a differentiable interpreter to evaluate and fine-tune the generated reports for factual correctness. The main contributions are two-fold:
|
| 13 |
+
|
| 14 |
+
- We propose a differentiable end-to-end approach consists of three modules (classifier-generator-interpreter) where the classifier module learns the disease feature representation via context modeling (section [3.1.3](#sec:context-modeling){reference-type="ref" reference="sec:context-modeling"}) and disease-state aware mechanism (section [3.1.4](#sec:enriched-embedding){reference-type="ref" reference="sec:enriched-embedding"}); the generator module transforms the disease embedding to medical report; the interpreter module reads and fine-tunes the generated reports, enhancing the consistency of the generated reports and the classifier's outputs.
|
| 15 |
+
|
| 16 |
+
- Empirically our approach is demonstrated to be more competitive against many strong baselines over two widely-used benchmarks on an equal footing (i.e. without accessing to additional information). We also find that the clinical history of patients (prior-knowledge) play a vital role in improving the quality of the generated reports.
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
Our framework consists of a *classification* module, a *generation* module, and an *interpretation* module, as illustrated in Fig. [1](#fig:classifier-generator-interpreter){reference-type="ref" reference="fig:classifier-generator-interpreter"}. The classification module reads multiple chest X-ray images and extracts the global visual feature representation via a multi-view image encoder. They are then disentangled into multiple low-dimensional visual embedding. Meanwhile, the text encoder reads clinical documents, including, e.g., doctor indication, and summarizes the content into text-summarized embedding. The visual and text-summarized embeddings are entangled via an "add & layerNorm" operation to form contextualized embedding in terms of disease-related topics. The generation module takes our enriched disease embedding as initial input and generates text word-by-word, as shown in Fig. [2](#fig:generator-interpreter){reference-type="ref" reference="fig:generator-interpreter"}. Finally, the generated text is fed to the interpretation module for fine-tuning to align to the checklist of disease-related topics from the classification module. In what follows, we are to elaborate on these three modules in detail.
|
| 21 |
+
|
| 22 |
+
For each medical study which consists of $m$ chest X-ray images $\{X_i\}_{i=1}^m$, we extract the corresponding latent features $\{x_i\}_{i=1}^m \in \mathbb{R}^c$, where $c$ is the number of features, via a shared DenseNet-121 image encoder [@huang2017densely]. Then, the multi-view latent features $x \in \mathbb{R}^c$ can be obtained by max-pooling across the set of $m$ latent features $\{x_i\}_{i=1}^m$, as proposed in [@su15mvcnn]. When $m=1$, the multi-view encoder boils down to a single-image encoder.
|
| 23 |
+
|
| 24 |
+
Let $T$ be a text document with length $l$ consisting of word embeddings $\{w_1, w_2,...,w_l\}$, where $w_i \in \mathbb{R}^e$ embodies the $i$-th word in the text and $e$ is the embedding dimension. We use the transformer encoder [@vaswani2017attention] as our text feature extractor to retrieve a set of hidden states $H=\{h_1, h_2, ..., h_l\}$, where $h_i \in \mathbb{R}^e$ is the attended features of the $i$-th word to other words in the text, $$\begin{equation}
|
| 25 |
+
\label{eqn:encoder}
|
| 26 |
+
h_i = \textrm{Encoder}(w_i|w_1,w_2,...,w_l).
|
| 27 |
+
\end{equation}$$
|
| 28 |
+
|
| 29 |
+
The entire document $T$ is then summarized by $Q=\{q_1,q_2,...,q_n\}$, representing $n$ disease-related topics (e.g., pneumonia or atelectasis) to be queried from the document. We refer to this retrieval process as *text-summarized embedding* $D_\textrm{txt} \in \mathbb{R}^{n \times e}$, $$\begin{equation}
|
| 30 |
+
\label{eqn:dtxt}
|
| 31 |
+
D_\textrm{txt} = \textrm{Softmax}\left(QH^\intercal\right) H.
|
| 32 |
+
\end{equation}$$ Here matrix $Q \in \mathbb{R}^{n \times e}$ is formed by stacking the set of vectors $\{q_1,q_2,...,q_n\}$ where $q_i \in \mathbb{R}^e$ is randomly initialized, then learned via the attention process. Similarly, the matrix $H \in \mathbb{R}^{l \times e}$ is formed by $\{h_1, h_2, ..., h_l\}$ from Eq. [\[eqn:encoder\]](#eqn:encoder){reference-type="eqref" reference="eqn:encoder"}. The term $\textrm{Softmax}(QH^\intercal)$ is the word attention heat-map for the $n$ queried diseases in the document. The intuition here is for each disease (e.g., pneumonia) to be queried from the text document $T$. We only pay attention to the most relevant words (e.g., *cough* or *shortness of breath*) in the text that associates with that disease, also known as a vector similarity dot product. This way, the weighted sum of these words by Eq. [\[eqn:dtxt\]](#eqn:dtxt){reference-type="eqref" reference="eqn:dtxt"} gives the feature that summarizes the document w.r.t. the queried disease.
|
| 33 |
+
|
| 34 |
+
The latent visual features $x \in \mathbb{R}^{c}$ are subsequently decoupled into low-dimensional disease representations, as illustrated in Fig. [1](#fig:classifier-generator-interpreter){reference-type="ref" reference="fig:classifier-generator-interpreter"}. They are regarded as the *visual embedding* $D_{\textrm{img}}\in \mathbb{R}^{n \times e}$, where each row is a vector $\phi_j(x) \in \mathbb{R}^e, j=1,\ldots,n$ defined as follows: $$\begin{equation}
|
| 35 |
+
\phi_j(x) = A_{j}^\intercal x + b_{j}. % Treat equation as a part of the equation, put comma, stop at the end.
|
| 36 |
+
\end{equation}$$ Here $A_j \in \mathbb{R}^{c \times e}$ and $b_j \in \mathbb{R}^{e}$ are learnable parameters of the $j$-th disease representation. $n$ is the number of disease representations, and $e$ is the embedding dimension. Now, together with the available clinical documents, the visual embedding $D_\textrm{img}$ and the text-summarized embedding $D_\textrm{txt}$ are entangled to form *contextualized disease representations* $D_\textrm{fused} \in \mathbb{R}^{n \times e}$ as $$\begin{equation}
|
| 37 |
+
D_\textrm{fused} = \textrm{LayerNorm}(D_\textrm{img} + D_\textrm{txt}). % --> Explain layernorm in 1 sentence, and reference to the fig.1
|
| 38 |
+
\end{equation}$$
|
| 39 |
+
|
| 40 |
+
Intuitively, the entanglement of visual and textual information allows our model to mimic the hospital workflow, to screen the disease's visual representations conditioned on the patients' clinical history or doctors' indication. For example, the doctor's indication in Fig. [1](#fig:classifier-generator-interpreter){reference-type="ref" reference="fig:classifier-generator-interpreter"} shows *cough* and *shortness of breath* symptoms. It is reasonable for a medical doctor to request a follow-up check of the *pneumonia* disease. As for the radiologists receiving the doctors' indication, they may prioritize diagnosing the presence of *pneumonia* and related diseases based on X-ray scans and look for specific abnormalities. As empirically shown in Table [1](#tab:openi-ablation){reference-type="ref" reference="tab:openi-ablation"}, the proposed contextualized disease representations bring a significant performance boost in the medical report generation task. Meanwhile, our current embedding is basically a plain mingling of heterogeneous sources of information such as disease type (i.e., disease name) and disease state (e.g., positive or negative). As shown by the ablation study in Table [1](#tab:openi-ablation){reference-type="ref" reference="tab:openi-ablation"}, this embedding by itself is insufficient for generating accurate medical reports. This leads us to conceive a follow-up enriched representation below.
|
| 41 |
+
|
| 42 |
+
The main idea behind *enriched disease embedding* is to further encode informative attributes about disease states, such as *positive*, *negative*, *uncertain*, or *unmentioned*. Formally, let $k$ be the number of states and $S \in \mathbb{R}^{k \times e}$ the state embedding. Then the confidence of classifying each disease into one of the $k$ disease states is $$\begin{equation}
|
| 43 |
+
\label{eqn:cls}
|
| 44 |
+
p = \textrm{Softmax}(D_\textrm{fused}S^\intercal).
|
| 45 |
+
\end{equation}$$ $S \in \mathbb{R}^{k \times e}$ is randomly initialized, then learned via the classification of $D_\textrm{fused}$. $D_\textrm{fused}$ acts as features for the multi-label classification, and the classification loss is computed as $$\begin{equation}
|
| 46 |
+
\mathcal{L_C} = - \frac{1}{n} \sum_{i=1}^{n} \sum_{j=1}^{k} y_{ij} \log(p_{ij}),
|
| 47 |
+
\end{equation}$$ where $y_{ij} \in \{0,1\}$ and $p_{ij} \in [0,1]$ are the $j$-th ground-truth and predicted values for the disease $i$-th, respectively. The state-aware embedding $D_\textrm{states} \in \mathbb{R}^{n \times e}$ are then computed as $$\begin{equation}
|
| 48 |
+
D_\textrm{states} =
|
| 49 |
+
\left\{\begin{matrix}
|
| 50 |
+
yS, & \textrm{if training phase}\\
|
| 51 |
+
pS, & \textrm{otherwise}\\
|
| 52 |
+
\end{matrix}\right.
|
| 53 |
+
\end{equation}$$ $y \in \{0,1\}^{n \times k}$ is the one-hot ground-truth labels about the disease-related topics, whereas $p \in [0,1]^{n \times k}$ is the predicted values. During training, the ground-truth disease states facilitate our generator in describing the diseases & related symptoms based on accurate information (teacher forcing). At test time, our generator then furnishes its recount based on the predicted states.
|
| 54 |
+
|
| 55 |
+
Finally, the *enriched disease embedding* $D_\textrm{enriched} \in \mathbb{R}^{n \times e}$ is the composition of state-aware disease embedding $D_\textrm{states}$ (i.e., good or bad), disease names $D_\textrm{topics}$ (i.e., which disease/topic), and the disease representations $D_\textrm{fused}$ (i.e., severity and details of the diseases), $$\begin{equation}
|
| 56 |
+
D_\textrm{enriched} = D_\textrm{states} + D_\textrm{topics} + D_\textrm{fused}.
|
| 57 |
+
\end{equation}$$ Like the disease queries $Q$, $D_\textrm{topics} \in \mathbb{R}^{n \times e}$ is randomly initialized, representing diseases or topics to be generated. It is then learned in training through the medical report generation pipeline. The enriched disease embedding provides explicit and precise disease descriptions, and endows our follow-up generation module with a powerful data representation.
|
| 58 |
+
|
| 59 |
+
Our report generator is derived from the transformer encoder of [@vaswani2017attention]. The network is formed by sandwiching & stacking a masked multi-head self-attention component and a feed-forward layer being on top of each other for $N$ times, as illustrated in Fig. [2](#fig:generator-interpreter){reference-type="ref" reference="fig:generator-interpreter"}. The hidden state for each word position $h_i \in \mathbb{R}^e$ in the medical report is then computed based on previous words and disease embedding, as $D_{\textrm{enriched}}=\{d_i\}_{i=1}^n$, $$\begin{equation}
|
| 60 |
+
h_i = \textrm{Encoder}(w_i|w_1,w_2,...,w_{i-1},d_1,d_2,...,d_n).
|
| 61 |
+
\end{equation}$$ This is followed by predicting future words based on the hidden states $H=\{h_i\}_{i=1}^l \in \mathbb{R}^{l \times e}$, as $$\begin{equation}
|
| 62 |
+
p_{\textrm{word}} = \textrm{Softmax}(HW^\intercal).
|
| 63 |
+
\end{equation}$$ Here $W \in \mathbb{R}^{v \times e}$ is the entire vocabulary embedding, $v$ the vocabulary size, and $l$ the document length. Let $p_{\textrm{word},ij}$ denote the confidence of selecting the $j$-th word in the vocabulary $W$ for the $i$-th position in the generated medical report. The generator loss is defined as a cross entropy of the ground-truth words $y_{\textrm{word}}$ and predicted words $p_{\textrm{word}}$, $$\begin{equation}
|
| 64 |
+
\mathcal{L_G} = - \frac{1}{l} \sum_{i=1}^{l} \sum_{j=1}^{v} y_{\textrm{word},ij} \log(p_{\textrm{word},ij}).
|
| 65 |
+
\end{equation}$$
|
| 66 |
+
|
| 67 |
+
Finally, the *weighted word embedding* $\hat{W} \in \mathbb{R}^{l \times e}$, also known as the *generated report*, are: $$\begin{equation}
|
| 68 |
+
\hat{W} = p_{\textrm{word}}W.
|
| 69 |
+
\end{equation}$$ It is worth noting that this set-up facilitate the back-propagation of errors from the follow-up interpretation module.
|
| 70 |
+
|
| 71 |
+
It is observed from empirical evaluations that the generated reports are often distorted in the process, such that they become inconsistent with the original output of the classification module -- the enriched disease embedding that encodes the disease and symptom related topics. Inspired by the CycleGAN idea of [@ZhuEtAl:ICCV17], we consider a fully differentiable network module to estimate the checklist of disease-related topics based on the generator's output, and to compare with the original output of the classification module. This provides a meaningful feedback loop to regulate the generated reports, which is used to fine-tune the generated report through the word representation outputs $\hat{W}$.
|
| 72 |
+
|
| 73 |
+
Specifically, we build on top of the proposed text encoder (described in section [3.1.2](#sec:TNN){reference-type="ref" reference="sec:TNN"}) a classification network that classifies disease-related topics, as follows. First, the text encoder summarizes the current medical report $\hat{W}$, and outputs the report-summarized embedding of the queried diseases $Q$, $$\begin{equation}
|
| 74 |
+
\hat{D}_\textrm{txt} = \textrm{Softmax}(Q\hat{H}^\intercal)\hat{H} \in \mathbb{R}^{n \times e}.
|
| 75 |
+
\end{equation}$$ Here $\hat{H}$ is computed from the generated medical reports $\hat{W}$ using Eq. [\[eqn:encoder\]](#eqn:encoder){reference-type="eqref" reference="eqn:encoder"}. Second, each of the report-summarized embedding $\hat{d}_i \in \mathbb{R}^e$ (i.e., each row of the matrix $\hat{D}_\textrm{txt} \in \mathbb{R}^{n \times e}$) is classified into one of the $k$ disease-related states (i.e., positive or negative), as $$\begin{equation}
|
| 76 |
+
p_{\textrm{int}} = \textrm{Softmax}(\hat{D}_\textrm{txt}S^\intercal) \in \mathbb{R}^{n \times k}.
|
| 77 |
+
\end{equation}$$ Finally, the interpreter is trained to minimize the subsequent multi-label classification loss, $$\begin{equation}
|
| 78 |
+
\mathcal{L_I} = - \frac{1}{n} \sum_{i=1}^{n} \sum_{j=1}^{k} y_{ij} \log(p_{\textrm{int},ij}).
|
| 79 |
+
\end{equation}$$ here $y_{ij} \in \{0,1\}$ is the ground-truth disease label and $p_{\textrm{int},ij} \in [0,1]$ is the predicted disease label of the interpreter.
|
| 80 |
+
|
| 81 |
+
In fine-tuning the generated medical reports $\hat{W}$, all interpreter parameters are frozen, which acts as a guide to force the word representations $\hat{W}$ being close to what the interpreter has learned from the ground-truth medical reports. If the weighted word embedding $\hat{W}$ is different from the learned representation -- which leads to incorrect classification -- a large loss value will be imposed in the interpretation module. This thus forces the generator to move toward producing a correct word representation.
|
| 82 |
+
|
| 83 |
+
Collectively our model is trained in an end-to-end manner by jointly minimizing the total loss, $$\begin{equation}
|
| 84 |
+
\mathcal{L}_\textrm{total} = \mathcal{L_C} + \mathcal{L_G} + \mathcal{L_I}.
|
| 85 |
+
\end{equation}$$
|
| 86 |
+
|
| 87 |
+
:::: table*
|
| 88 |
+
::: adjustbox
|
| 89 |
+
width=2,center
|
| 90 |
+
|
| 91 |
+
+----------+--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 92 |
+
| Datasets | Methods | B-1 | B-2 | B-3 | B-4 | MTR | RG-L | SV | MV | AI | FT |
|
| 93 |
+
+:========:+:=======================================================+:=========:+:=========:+:=========:+:=========:+:=========:+:=========:+:==:+:==:+:==:+:==:+
|
| 94 |
+
| Open-I | S&T [@ST] | 0.316 | 0.211 | 0.140 | 0.095 | 0.159 | 0.267 | x | | | |
|
| 95 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 96 |
+
| | LRCN [@donahue2015long] | 0.369 | 0.229 | 0.149 | 0.099 | 0.155 | 0.278 | x | | | |
|
| 97 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 98 |
+
| | SA&T [@SAT] | 0.399 | 0.251 | 0.168 | 0.118 | 0.167 | 0.323 | x | | | |
|
| 99 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 100 |
+
| | Att-RK [@you2016image] | 0.369 | 0.226 | 0.151 | 0.108 | 0.171 | 0.323 | x | | | |
|
| 101 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 102 |
+
| | HRNN [@8970668] | 0.445 | 0.292 | 0.201 | 0.154 | 0.175 | 0.344 | x | | | |
|
| 103 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 104 |
+
| | 1-NN [@pmlr-v116-boag20a] | 0.232 | 0.116 | 0.051 | 0.018 | N/A | 0.201 | x | | | |
|
| 105 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 106 |
+
| | TieNet [@tienet] | 0.330 | 0.194 | 0.124 | 0.081 | N/A | 0.311 | x | | | |
|
| 107 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 108 |
+
| | Liu et. al. [@pmlr-v106-liu19a] | 0.359 | 0.237 | 0.164 | 0.113 | N/A | 0.354 | x | | | x |
|
| 109 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 110 |
+
| | CoAtt [@MedRepACL18] | 0.455 | 0.288 | 0.205 | 0.154 | N/A | 0.369 | x | | | |
|
| 111 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 112 |
+
| | HRGR-Agent [@MedRepNIPS18] | 0.438 | 0.298 | 0.208 | 0.151 | N/A | 0.322 | | x | | x |
|
| 113 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 114 |
+
| | KERP [@MedRepAAAI19] | 0.482 | 0.325 | 0.226 | 0.162 | N/A | 0.339 | | x | x | |
|
| 115 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 116 |
+
| | ReinforcedTransformer [@10.1007/978-3-030-32692-0_77] | 0.350 | 0.234 | 0.143 | 0.096 | N/A | N/A | x | | | x |
|
| 117 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 118 |
+
| | HRG-Transformer | 0.464 | 0.301 | 0.212 | 0.158 | N/A | N/A | | x | | |
|
| 119 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 120 |
+
| | SD&C [@MedRepACL19] | 0.464 | 0.301 | 0.210 | 0.154 | N/A | 0.362 | x | | | x |
|
| 121 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 122 |
+
| | Ours (SV) | 0.463 | 0.310 | 0.215 | 0.151 | 0.186 | 0.377 | x | | | |
|
| 123 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 124 |
+
| | Ours (MV) | 0.476 | 0.324 | 0.228 | 0.164 | 0.192 | 0.379 | | x | | |
|
| 125 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 126 |
+
| | Ours (MV+T) | 0.485 | 0.355 | 0.273 | 0.217 | 0.205 | 0.422 | | x | x | |
|
| 127 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 128 |
+
| | Ours (MV+T+I) | **0.515** | **0.378** | **0.293** | **0.235** | **0.219** | **0.436** | | x | x | x |
|
| 129 |
+
+----------+--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 130 |
+
| MIMIC | 1-NN [@pmlr-v116-boag20a] | 0.367 | 0.215 | 0.138 | 0.095 | 0.139 | 0.228 | x | | | |
|
| 131 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 132 |
+
| | SA&T [@SAT] | 0.370 | 0.240 | 0.170 | 0.128 | 0.141 | 0.310 | x | | | |
|
| 133 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 134 |
+
| | AdpAtt [@lu2017knowing] | 0.384 | 0.251 | 0.178 | 0.134 | 0.148 | 0.314 | x | | | |
|
| 135 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 136 |
+
| | Liu et. al. [@pmlr-v106-liu19a] | 0.313 | 0.206 | 0.146 | 0.103 | N/A | 0.306 | x | | | x |
|
| 137 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 138 |
+
| | Transformer [@vaswani2017attention] | 0.409 | 0.268 | 0.191 | 0.144 | 0.157 | 0.318 | x | | | |
|
| 139 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 140 |
+
| | GumbelTransformer [@lovelace-mortazavi-2020-learning] | 0.415 | 0.272 | 0.193 | 0.146 | 0.159 | 0.318 | x | | | x |
|
| 141 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 142 |
+
| | Ours (SV) | 0.447 | 0.290 | 0.200 | 0.144 | 0.186 | 0.317 | x | | | |
|
| 143 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 144 |
+
| | Ours (MV) | 0.451 | 0.292 | 0.201 | 0.144 | 0.185 | 0.320 | | x | | |
|
| 145 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 146 |
+
| | Ours (MV+T) | 0.491 | 0.357 | 0.276 | 0.223 | 0.213 | 0.389 | | x | x | |
|
| 147 |
+
| +--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 148 |
+
| | Ours (MV+T+I) | **0.495** | **0.360** | **0.278** | **0.224** | **0.222** | **0.390** | | x | x | x |
|
| 149 |
+
+----------+--------------------------------------------------------+-----------+-----------+-----------+-----------+-----------+-----------+----+----+----+----+
|
| 150 |
+
:::
|
| 151 |
+
::::
|
2110.06021/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2110.06021/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
Normalizing flows define a random variable as an invertible differentiable transformation $\boldsymbol{y} = f_{\boldsymbol{\phi}}(\boldsymbol{x}^{(0)})$ of a base variable $\boldsymbol{x}^{(0)} \sim p_0(\boldsymbol{x}^{(0)})$. In these expressions, $\boldsymbol{\phi}$ are parameters that control the form of the transformation and consequently the distribution of $\boldsymbol{y}$. The transformed (log-)density can then be given explicitly using the change of variables formula from probability theory, $$\begin{equation}
|
| 4 |
+
\label{eq: flow likelihood}
|
| 5 |
+
\log{p(\boldsymbol{y})} = \log{p_0(f_{\boldsymbol{\phi}}^{-1}(\boldsymbol{y}))} - \log{\left| \det{J}_{\boldsymbol{\phi}}(\boldsymbol{y}) \right|},
|
| 6 |
+
\end{equation}$$ where $J_{\boldsymbol{\phi}}(\boldsymbol{y})$ is the Jacobian matrix of $f_{\boldsymbol{\phi}}$. The base distribution $p_0$ is often taken to be standard normal ($\boldsymbol{x}^{(0)} \sim N(\boldsymbol{0}, \boldsymbol{I})$). In typical applications, the functional form of $f_{\boldsymbol{\phi}}$ is specified as the composition of a series of simpler invertible transformations: $$\begin{equation}
|
| 7 |
+
\boldsymbol{y} = f_{\boldsymbol{\phi}}(\boldsymbol{x}^{(0)}) = \underbrace{f^{(0)}_{\boldsymbol{\phi}^{(0)}} \circ \dots \circ f^{(m-1)}_{\boldsymbol{\phi}^{(m-1)}}}_{m\text{ times}}(\boldsymbol{x}^{(0)})~.
|
| 8 |
+
\end{equation}$$ We denote the resulting sequence of transformed variables as $\boldsymbol{x}^{(0)}, \boldsymbol{x}^{(1)}, \dots, \boldsymbol{x}^{(m-1)}, \boldsymbol{y}$. Each of these transformations (i.e. *flow layers*) are designed to have tractable formulas for inversion and log-Jacobian. The parameters $\boldsymbol{\phi}$ are usually trained by stochastic gradient descent to maximize the log-likelihood (given observed data) or evidence lower bound (in the variational inference setting).
|
| 9 |
+
|
| 10 |
+
Probabilistic programming allows a joint probability distribution over a set of random variables to be specified intuitively as a program that includes random sampling steps [@van2018introduction]. Computations on this distribution, including the joint log-density function, structured inference algorithms, and the bijective transformations described in this paper, may then be derived automatically as program transformations. Recently frameworks have emerged for 'deep' probabilistic programming, which exploit the automatic differentiation and hardware acceleration provided by deep learning frameworks; examples include Pyro [@bingham2019pyro], PyMC [@salvatier2016probabilistic], Edward [@tran2017deep], and TensorFlow Probability [@dillon2017tensorflow; @piponi2020joint]. These allow for straightforward implementation of gradient-based inference and parameter learning, and enabling methods such as ours that integrate probabilistic programs with standard 'deep learning' components such as flows.
|
| 11 |
+
|
| 12 |
+
Probabilistic programs that contain only deterministic control flow may equivalently be viewed as directed graphical models, in which each random variable $\boldsymbol{x}_j$ is a graph node connected by incoming edges to some subset of the previous variables $\boldsymbol{x}_{<j}$. This leads to the following general expression for the joint density of a set of vector-valued variables $\boldsymbol{x}_{1:n} = \{\boldsymbol{x}_j\}_{j=1}^n$: $$\begin{equation}
|
| 13 |
+
\label{eq: probprog}
|
| 14 |
+
p(\boldsymbol{x}_{0:n}; \boldsymbol{\phi}) = p_0(\boldsymbol{x}_0; \boldsymbol{\theta}_0(\boldsymbol{\phi})) \prod_{j=1}^n p_j(\boldsymbol{x}_j | \boldsymbol{\theta}_j(\boldsymbol{x}_{<j}; \boldsymbol{\phi}))
|
| 15 |
+
\end{equation}$$ Here $\boldsymbol{\theta}_j(\boldsymbol{x}_{<j}; \boldsymbol{\phi})$ denotes the 'link function' associated with each node that takes as input the model parameters $\boldsymbol{\phi}$ and the values of all parent variables (excepting $\boldsymbol{\theta}_0$, where there are no parents), and outputs the parameters of its respective density function.
|
| 16 |
+
|
| 17 |
+
As a practical matter, flow architectures generally assume layers of fixed size, so the experiments in this paper focus on programs of fixed structure and we use graphical-model notation for simplicity. However, the methods we describe do not require access to an explicit graph structure and could be straightforwardly applied to programs with stochastic control flow.
|
| 18 |
+
|
| 19 |
+
In this section, we will show that a large class of differentiable probabilistic programs can be converted into equivalent normalizing flow layers. More precisely, we will construct an invertible function $F_{\boldsymbol{\phi}}$ that maps spherical normal random inputs to the joint distribution of the probabilistic program: $$\begin{equation}
|
| 20 |
+
\boldsymbol{x}^{(1)} = F_{\boldsymbol{\phi}}(\boldsymbol{x}^{(0)}) \sim p(\boldsymbol{x}^{(1)}; \boldsymbol{\phi}).\qquad (\boldsymbol{x}^{(0)} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}))
|
| 21 |
+
\end{equation}$$ This is a special case of the so called *Rosenblatt transform* [@rosenblatt1952remarks]. We interpret this transform as a flow layer and we extend it by including trainable gate variables that can be used to \"disconnect\" a set of variables from its parents so to correct model miss-specification. In our applications, the input to the bijective transformation may be arbitrary real-valued vectors, for example, the output of a previous flow layer, which likely will not be normally distributed. In such cases the transformed values will not be samples from the original probabilistic program, but we expect the transformation to capture some of its properties, such as multimodality and the dependence structure of its random variables.
|
| 22 |
+
|
| 23 |
+
We first consider the case of a univariate random variable $x^{(1)} \sim p_{\boldsymbol{\phi}}(x^{(1)})$, which we can express as a continuous bijective transformation $f_{\boldsymbol{\phi}}$ of a standard normal variable $x^{(0)}$ via a slight extension of inverse transform sampling. The probability integral transform theorem states that the cumulative distribution function (CDF) of $p_{\boldsymbol{\phi}}$, $C_{\boldsymbol{\phi}}(x) = \int_a^x p_{\boldsymbol{\phi}}(y) \text{d} y$, with $p_{\boldsymbol{\phi}}$ defined over a generic interval $(a,b)$ and $a < x < b$, is uniformly distributed, $u = C_{\boldsymbol{\phi}}(x) \sim \mathcal{U}(0, 1)$ [@angus1994probability]. If the CDF is invertible, i.e., strictly increasing within its domain, it follows that we can sample $x = C_{\boldsymbol{\phi}}^{-1}(u) \sim p_{\boldsymbol{\phi}}$ given only a uniform random variable $u\sim \mathcal{U}(0, 1)$. We obtain $u$ through a second application of the probability integral transform theorem to the standard normal CDF $\Phi_{0,1}$. Our univariate 'flow' is therefore given by $$\begin{equation}
|
| 24 |
+
x^{(1)} = f_{\boldsymbol{\phi}}(x^{(0)}) = C_{\boldsymbol{\phi}}^{-1}(\Phi_{0,1}(x^{(0)}))\qquad(x^{(0)} \sim \mathcal{N}(0, 1))
|
| 25 |
+
\end{equation}$$ Closed-form expressions for $f_{\boldsymbol{\phi}}$ may be available in some cases (see below). In the general univariate setting where the inverse distribution function $C_{\boldsymbol{\phi}}^{-1}(u)$ is not available in closed form, it may be evaluated efficiently using numerical root-finding methods such as the method of bisection (see Appendix [7.6](#Appendix 5){reference-type="ref" reference="Appendix 5"}), with derivatives obtained via the inverse function theorem and the implicit reparameterization trick [@figurnov2018implicit]. However, this is not needed for maximum likelihood training of the parameters $\boldsymbol{\phi}$, since [\[eq: flow likelihood\]](#eq: flow likelihood){reference-type="ref+label" reference="eq: flow likelihood"} only involves $f_{\boldsymbol{\phi}}^{-1}$ and its derivatives.
|
| 26 |
+
|
| 27 |
+
**Example: Gaussian variables**. The inverse CDF of a Gaussian variable $x^{(1)} \sim \mathcal{N}\!\left(\mu, \sigma^2 \right)$ can be expressed as a scale-shift transformation of the inverse CDF of a standard normal variable: $\Phi^{-1}_{\mu, \sigma}(u) = \mu + \sigma \Phi^{-1}_{0, 1}(u)$. We can obtain the forward flow transformation by composing this function with the CDF of a standard Gaussian: $f_{\mu, \sigma}(x^{(0)}) = \Phi^{-1}_{\mu, \sigma}(\Phi_{0, 1}(x^{(0)})) = \mu + \sigma x^{(0)}$. This is the famous reparameterization formula used to train VAEs and other variational methods [@kingma2013auto]. The inverse function is $f^{-1}_{\mu, \sigma}(x^{(1)}) = (x^{(1)} - \mu)/\sigma$ while the log-Jacobian is equal to $\log{\sigma}$.
|
| 28 |
+
|
| 29 |
+
It is not generally straightforward to evaluate or invert the CDF of a multivariate distribution. However, the most popular multivariate distributions, such as the multivariate Gaussian, can be expressed as transformations of univariate standard normal variables, and so may be treated using the methods of this section. More generally, the next section presents a simple approach to construct structured bijections (i.e., flows) from multivariate distributions expressed as probabilistic programs.
|
| 30 |
+
|
| 31 |
+
A probabilistic program samples a set of variables sequentially, where (as in [\[eq: probprog\]](#eq: probprog){reference-type="ref+label" reference="eq: probprog"}) the distribution of each variable is conditioned on the previously-sampled parent variables. Using the results of Section [3.1](#sec: univariate){reference-type="ref" reference="sec: univariate"}, we can represent this conditional sample as an invertible transformation of a standard normal input. Feeding the result back into the model defines a sequence of local transformations, which together form a multivariate flow layer ([\[fig: code\]](#fig: code){reference-type="ref+label" reference="fig: code"}). More specifically, as with autoregressive flows, the forward transformation of the $j$-th input requires us to have computed the preceding outputs $\boldsymbol{x}^{(i+1)}_{<j}$. This may be expressed recursively as follows: $$\begin{equation}
|
| 32 |
+
\label{eq: forward transform}
|
| 33 |
+
\boldsymbol{x}^{(i+1)}_{1:n} =
|
| 34 |
+
f^{(i)}_{j,\boldsymbol{\phi}}(\boldsymbol{x}^{(i)}_j; \boldsymbol{x}^{(i+1)}_{<j}),
|
| 35 |
+
\end{equation}$$ where $f^{(i)}_{j,\boldsymbol{\phi}}(\cdot; \boldsymbol{x}^{(i+1)}_{<j})$ denotes the invertible transformation associated to the conditional distribution of the $j$-th variable given the values of its parents. The recursive expression terminates since the parenting graph has an acyclic-directed structure.
|
| 36 |
+
|
| 37 |
+
In the reverse direction, the variables $\boldsymbol{x}^{(i+1)}_{1:n}$ are all given, so all transformations may be evaluated in parallel: $$\begin{equation}
|
| 38 |
+
\label{eq: inverse transform}
|
| 39 |
+
\boldsymbol{x}^{(i)}_{1:n} = f^{(i)-1}_{j,\boldsymbol{\phi}}(\boldsymbol{x}^{(i+1)}_j; \boldsymbol{x}^{(i+1)}_{<j})
|
| 40 |
+
\end{equation}$$ This is a generalization of the inversion formula used in autoregressive flows [@kingma2016improved; @papamakarios2017masked]. The Jacobian of the transformation is block triangular (up to a permutation of the variables) and, consequently, the log determinant is just the sum of the log determinants of the block diagonal entries: $$\begin{equation}
|
| 41 |
+
\label{eq: ldj}
|
| 42 |
+
\log{\left| \det{J^{-1}_\phi(\boldsymbol{x}^{(i+1)}_{1:n})} \right|} = \sum_j \log{\left|\det{J}^{-1}_{j,\phi}(\boldsymbol{x}^{(i+1)}_j; \boldsymbol{x}^{(i+1)}_{<j}) \right|}
|
| 43 |
+
\end{equation}$$ where ${J}^{-1}_{j,\phi}(\boldsymbol{x}^{(i+1)}_j; \boldsymbol{x}^{(i+1)}_{<j})$ is the Jacobian of the local inverse transformation $f^{(i)-1}_{j,\boldsymbol{\phi}}(\boldsymbol{x}^{(i+1)}_j; \boldsymbol{x}^{(i+1)}_{<j})$.
|
| 44 |
+
|
| 45 |
+
We denote this multivariate invertible transformation as a *structured layer*. If the input $\boldsymbol{x}^{(i)}_{1:n}$ follows a standard Gaussian distribution, then the transformed variable follows the distribution determined by the input probabilistic program. This is not going to be the case when the layer is placed in the middle of a larger flow architecture. However, it will still preserve the structure of the probabilistic program with its potentially complex chain of conditional dependencies.
|
| 46 |
+
|
| 47 |
+
:::: algorithm
|
| 48 |
+
[]{#fig: code label="fig: code"}
|
| 49 |
+
|
| 50 |
+
::: algorithmic
|
| 51 |
+
State $\log{|\det{J}|} \gets 0$ $\{p_l\}_{l=1}^{m_j} \gets \boldsymbol{x}^{(i)}_j$.parents_idx $\tilde{\boldsymbol{x}}^{(i+1)}_j \gets f^{(i)}_{j,\boldsymbol{\phi}}(\boldsymbol{x}^{(i)}_j; \{\boldsymbol{x}^{(i+1)}_{p_l}\}_{l=1}^{m_j})$ $\boldsymbol{x}^{(i+1)}_j \gets \lambda_j^{(i)} \tilde{\boldsymbol{x}}^{(i+1)}_j + (1 - \lambda_j^{(i)}) \boldsymbol{x}^{(i)}_j$ $\log{|\det{J}|} \gets \log{|\det{J}|} + \log\left(\det{|{J}_{j,\phi}(\{\boldsymbol{x}^{(i+1)}_k\}_{k=1}^n) + (1 - \lambda_j^{(i)}) I}| \right)$ $\tilde{\boldsymbol{x}}^{(i+1)}_j \gets f^{(i)}_{j,\boldsymbol{\phi}}(\boldsymbol{x}^{(i)}_j; \boldsymbol{\phi}_j)$ $\boldsymbol{x}^{(i+1)}_j \gets \lambda_j^{(i)} \tilde{\boldsymbol{x}}^{(i+1)}_j + (1 - \lambda_j^{(i)}) \boldsymbol{x}^{(i)}_j$ $\log{|\det{J}|} \gets \log{|\det{J}|} + \log\left(\det{|{J}_{j, \boldsymbol{\phi}_0^j}(\boldsymbol{x}^{(i+1)}_j) + (1 - \lambda_j^{(i)}) I}| \right)$
|
| 52 |
+
:::
|
| 53 |
+
::::
|
| 54 |
+
|
| 55 |
+
Most user-specified models offer a simplified and imperfect description of the data. In order to allow the flow to skip the problematic parts of the model, we consider *gated* layers. In a gated layer, we take each local bijective transformation $g_{j,\boldsymbol{\phi}}$ to be a convex combination of the original transformation $f_{j,\boldsymbol{\phi}}$ and the identity mapping: $$\begin{equation}
|
| 56 |
+
g^{(i)}_{j, \boldsymbol{\phi}}(x^{(i)}_j; \boldsymbol{x}^{(i+1)}_{<j}, \lambda_j^{(i)}) = \lambda_j^{(i)} f^{(i)}_{j, \boldsymbol{\phi}}(x^{(i)}_j; \boldsymbol{x}^{(i+1)}_{<j}) + (1 - \lambda_j^{(i)}) x^{(i)}_j
|
| 57 |
+
\end{equation}$$ This gating technique is conceptually similar to the method used in highway networks [@srivastava2015highway], recurrent highway networks [@zilly2017recurrent] and, more recently, highway flows [@ambrogioni2021automatic-b]. Here the gates $\lambda_j^{(i)} \in (0, 1)$ are capable of decoupling each node from its parents, bypassing part of the original structure of the program. As before, the local inverses $g^{-1}_{\boldsymbol{\phi}}(\boldsymbol{x}^{(i+1)}_j; \boldsymbol{x}^{(i+1)}_{<j}, \lambda_j^{(i)})$ can in general be computed by numeric root search, but the Gaussian case admits a closed form in both directions: $$\begin{equation}
|
| 58 |
+
g_{\mu,\sigma}(x^{(i)}, \lambda) = (1 + \lambda(\sigma - 1)) x^{(i)} + \lambda \mu;\qquad
|
| 59 |
+
g^{-1}_{\mu,\sigma}(x^{(i+1)}, \lambda) = \frac{x^{(i+1)} - \lambda \mu}{1 + \lambda (\sigma - 1)}
|
| 60 |
+
\end{equation}$$ Analogues of [\[eq: forward transform\]](#eq: forward transform){reference-type="ref+label" reference="eq: forward transform"}, [\[eq: inverse transform\]](#eq: inverse transform){reference-type="ref+label" reference="eq: inverse transform"}, and [\[eq: ldj\]](#eq: ldj){reference-type="ref+label" reference="eq: ldj"} then define the gated joint transformation, its inverse and log Jacobian determinant, respectively. Note that the gating affects only the diagonal of the Jacobian, so the overall block-triangular structure is unchanged.
|
| 61 |
+
|
| 62 |
+
All the techniques described in this section can be fully automated in modern deep probabilistic programming frameworks such as TensorFlow Probability. A user-specified probabilistic program in the form given by Eq. [\[eq: probprog\]](#eq: probprog){reference-type="ref" reference="eq: probprog"} is recursively converted into a bijective transformation as shown in the pseudo-code in Fig. [\[fig: code\]](#fig: code){reference-type="ref" reference="fig: code"}. In TensorFlow Probability, the *bijector* abstraction uses pre-implemented analytic inverse and Jacobian formulas and can be adapted to revert to root finding methods otherwise.
|
| 63 |
+
|
| 64 |
+
A EMF is a normalizing flow architecture that contains one or more (gated) structured layers. EMFs can be used to combine the inductive biases of explicit models with those of regular normalizing flow architectures. Several probabilistic programs can be embedded in a EMF both in parallel or sequentially. In the parallel case, a single flow layer is comprised by two or more (gated) structured layers acting on non-overlapping subsets of variables. The use of several programs embedded in a single architecture allows the model to select the most appropriate structure during training by changing the gating parameters and the downstream/upstream transformations. This allows the user to start with a \"wrong but useful model\" and then learn the deviation from the distribution of the collected data using flexible generic normalizing flow transformations. A visualization of a hypothetical EMF architecture is given in Fig. [1](#fig: abstract){reference-type="ref" reference="fig: abstract"}.
|
2110.09035/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-12T13:08:51.685Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36" etag="KIv_OjTzFS0lK1OCEkXp" version="14.6.12" type="google"><diagram id="dJdUIphTvUCgs_umiYo5" name="Page-1">7V1bj5s4FP41eZzIxjaBx7mlq9VWqtSH3X2qaCAJWyakhOlk+uvXDjaXYMAJMZdMplUUjDFw7ufzsTNBjy/7T5GzXX8OXS+YGMDdT9DTxDAgNowJ+w/c96RlZvKGVeS7vFPW8NX/7fFGwFtffdfbFTrGYRjE/rbYuAg3G28RF9qcKArfit2WYVC869ZZ8TuCrOHrwgm8Ure/fTdeJ60WyfX+w/NXa3FnCPiZF0d05kPs1o4bvuXuhZ4n6DEKwzj59rJ/9AJGPEGXZKB5xdn0wSJvE6tcsFzd+69/fvI//4j37s9vzurp+fnOIskwv5zglb8xf9r4XZBgFYWvW97Ni2JvLyO88z04pmP2YDB9XSonXvjixdE77cIHwja/hIuIIY7fMoJDk7et88Qm1hRzVnM2r9LRM0rQL5wYpxDGbCYMHYaKIT14eFv7sfd16yzYmTeqCbRtHb/QOz5B+tXZbRPZXPp7j971YekHwWMYhNFhIOQSz3Ixbd/FUfjDy52xjO/INOmZSvrn6VzD4jL1c9RFlI4g/2eWiV3V5/KUn5UJ7VKd5IebcMMoTmVy4zJaPgF6FEbxOlyFGyf4Kwy3nOr/eXH8zi2K8xqHRZ54ez/+h10+Jfzo39yZpz0f+XDwLg429AVzF7HDf/PnsssOR9l17j2zRdnj05a5z+hyOJ+8MHvLc1hLSRW+RguvrqPFTacTrby6EW0kF5bIC5zY/1V8PhnrD5fSl3Xecx22ob+JdyXJSMdvISyWTjW9vNKJYeBsigp/JYs3LJ20O9VJmNPITD+rdLJv3TIVdSuR1Ra61YqFwqnq9fTneqDO/bsNy+Qwwa9v7POSjj7cxFzcDXBQkZjyOtzQYxt0HAjYQM4GHnZBoxx2DcoI2ca1hGRVnBipd7DRB2fMaDQIdx1aQ5Rz5XdgCoipGGEX4uss3D4zwmY3+eJFPqWjF7WMDIQnbYwMhI/RH3W3kwrStVSckHCNJbizKzCOblgIhcUtmGCq7fjThDzjbw/JQYnP1EDGR+Y38FcsOllQAjJNeWBm1F84wT0/8eK7bnAQCG/n/+bBIOMFT/LouORhQp7YWFQGdok4wJLB5vw8DpDYcc6mQ2zg+VwSOCnZ+HrcycRF3AkRPDVKBjw16gWLDbQxclZmJGfnYBmHsIRx9+yfHsahY8YhGeOwhG9EG9+QQgykG0i1mnFUmlOVqQJtXWSZSaRZThX04PoRff1EUjbMv9QJy9m5KGdUJQ0JKVsAaW6qj2YKkJamRD1hV32iTuDUsLEJLPppIyDQ2gJtTG20sZtpc36mUZ1RVOUgSsashsu1pMaM0gBRmbSAiQmUZA4VXS6P/8nAIzNgLmi3dTb0+yo+kCNpY76gwBTz52soTtwlnuWedqB02mcn01EYDgMPOEwyHH3kZMTiXWhz8eYXE4Q+UZoqwRA5JrSmM2CbsxmGNo1YrDI2MCS5gWWu5FKLLIt4zlqvK9MQ1rwx00hMW9uZl5f9FxbNZf5MXJGKj10cIUls+EUZt6tncJRuYx1NRZ/Sm35J7n7ZiaKrAQ+rDESKUYEpANiwyAwmnwVaE3Y2r/uDNh+o1nzowKvsI7zKwGfgVbABrTpOertEr5QNkvD5zfNaRkvL1U5GusU0T5ualIKZLaHMjjzR4BlPI6HKoJQHoIvU8GYxJgJguWSyXQo7U7wMfbsXeFlFADpkNEYGo83n5tPBkp2FxpyVSN8J11J08j1Cawg3hwC6IZqjUjfUO0IDkXIFYHcQDa6lYf8QDUQK1YGaMBrOsMoE8U4yCdkvaAORAgo4jtibM34ssA1E1S7yhtt0JBqjBG4gqq/4+wDITWrkGwNmbuBu2I0m7AZiGfx8XQ7kCtEbiOvR3xt8o9kqqRb9c/3qLZHHEm2+QTiXcEljYL6scKI1inNDcNrn2XeoiOCYksQSdIrg4CGsVjyqsoESqnS9mAFirasVS4GQubC870tJ6OR41nLRTijxyJYrQnxbr3gic1X8F1YOXsjIliymrza0NYuVqjfSZSkQf7xVi6domGppOxfY3iJEopD96wLbFbxR996eDHPtonD+1eFCC9tERr54ERJDp9UfBC/G6ieI1vWLnUbPZ7JmPFp0W8F4CRBRuNTmKEH4mg7i8JaS8fFWMWoJ9ki/CxmJDMlIIL3bIkaFRYyWPbULfyWD3i1mZytXyhTLi968XVykGKwnWXtkj+aL0yPIcyZD93otprGVV9RdPBmyL1L7opE0WnGVTrehaF6HN6B5XmFjbkVGvQnGOGuMgASt+Fg1RsKaN28loryBX9tlZC1ZKoGmLu+emmboyFRMyoi6GVjWBGjaNJnLpcJY4qz0bRgAOtkwoI5LDahmv9SR1OpfO8QJKhLo1LgPHpzpdib0AnU8jaiM6i4n2lJ8YSaaU3xAFF0EBEa/PkKhimMkSHiDyt6lk0CD1Vkx8keCzbToVEUtVVc6ZVy9TolxEJiaksAkp3IyzRmuAnay3U9TxKyGfMlnuvUFgVp3++m07B80b/czLLEU7O8lPWkmVteiCIdZcqFVZmFjycUUlhegDEyMJRZjMOGNnoni88Mi1Q0iUsPcHBbBfmcT4QB2ZSSw5Fxt0L9zhcq7IZwwb3m+zYcNu0mICcqTq+71EVCSxl69S6hfMcIi84JdL8bm2Jgag3cYJ8xXX1QtTp6btiTbu/Y6/wqhbG76NhGoRxPr1xETqokFaSFMfkYzM2jIppEusbUTvC0MbC9cDO06tkamZFOebrd3gjK0QPDdyPg+eAaX7EqR4QDM59aFqseYV8H5arGiw57Jfomx4/3wZThExtVbTaCsJvAoWJ7N+mejLBHL9tu7sVHKxlLMR/o3s4Ysb8zc642TCgppk/4VUpK9cmYOlm21M/D1xcO6fnpkJtkUodsfHjEkCeuNkScz0sZ9MxJVRToT8rjxfo7Brnb9Q0DHv3hDVLcG1cdFU6W4pYTgu85ufZh/gEVesvYvTkz5eFgBQYdBKRvEz9EbzTw4Shlq5waaSwNJDUFb7nh35CdToEyMkExNlHa8K42TRrxVA1Vsnde8xwM9jEIGLmTdI2e7/hy6HuvxPw==</diagram></mxfile>
|
2110.09035/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.9 kB). View file
|
|
|
2110.09035/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Network systems, such as infrastructure systems, supply chains, routing networks, and peer-to-peer computing networks, are vulnerable to malicious attacks. *Network resilience*, in the context of network science, is a measurement characterizing the ability of a system to defend itself from such failures and attacks [@schneider2011mitigation]. A resilient network should continue to function and maintain an acceptable level of utility when the network partially fails. Therefore, resilience is crucial when designing new systems or upgrading existing systems toward high reliability.
|
| 4 |
+
|
| 5 |
+
Current network resilience optimization methods improve the resilience of networks over graph topologies based on an atomic operation called *edge rewiring* [@schneider2011mitigation; @chan2016optimizing; @rong2018heuristic]. Concretely, for a given graph $G=(V,E)$ and two existing edges $AC$ and $BD$, an edge rewiring operation alters the graph structure by removing $AC$ and $BD$ and adding $AB$ and $CD$, where $AC, BD\in E$ and $AB, CD, AD, BC\notin E$. Edge rewiring operation has some nice properties against simply addition or deletion of edges: 1) it preserves the node degree--the number of edges and the total degree of a graph, while addition/deletion may cause change; 2) it achieves minimal changes in terms of utility measure in terms of graph Laplacian, while addition/deletion may lead to an unpredictable network utility degradation [@DBLP:conf/kdd/0001WDWT21; @jaume20202]. The challenge of the resilience task lies in determining which two edges are selected for rewiring, with the complexity of $O(E^{2T})$ for $T$ successive steps of edge rewiring.
|
| 6 |
+
|
| 7 |
+
Traditional non-learning-based resilience optimization methods typically fall into the categories of evolutionary computation [@zhou2014memetic] or heuristic-based [@schneider2011mitigation; @chan2016optimizing; @rong2018heuristic; @yaziciouglu2015formation], with the following limitations:
|
| 8 |
+
|
| 9 |
+
- *Transductivity*. Traditional methods are transductive since they search the resilience topology on a particular problem instance. This search procedure is performed for every individual graph without generalization.
|
| 10 |
+
|
| 11 |
+
- *Local optimality*. It is NP-hard to combinatorially choose the edges to rewire to obtain the globally optimal resilience [@mosk2008maximum]. Previous studies predominantly adopt greedy-like algorithms, yielding local optimality in practice [@chan2016optimizing].
|
| 12 |
+
|
| 13 |
+
- *Utility Loss*. Rewiring operation in resilience optimization may potentially lead to an degradation of the network utility, which may jeopardize the network functioning.
|
| 14 |
+
|
| 15 |
+
Although the learning-based paradigm equipped with GNNs has proved powerful for a large variety of graph tasks with rich features [@guided_tree_search; @effcient_TSP; @TSP_pretrain; @CO_on_graph; @rl_for_vrp; @rl_for_vrp_with_attention; @online_vrp], it still remains opaque how to effectively adapt such approaches to resilience optimization where only topological structure is available. One may infer that such a lack of feature can significantly hinder the learning ability, and a case agreeing with this has been discovered in solving the traveling salesman problem (TSP): Boffa showed that the performance of GNNs degenerates largely when node coordinates are missing (but pairwise distance is given), compared to the case when coordinates are available [@boffa2022neural]. Although Boffa adopted a distance encoding strategy to alleviate the performance gap in TSP [@boffa2022neural; @li2020distance], we empirically found that this encoding strategy is not working well for the more challenging resilience task (See Sec. [5.2](#sec:baseline){reference-type="ref" reference="sec:baseline"}). As such, it is demanding to devise a novel method that can be applicable for the resilience task without rich features. Readers are referred to Appendix [\[sec:whygnnfails\]](#sec:whygnnfails){reference-type="ref" reference="sec:whygnnfails"} for a more detailed analysis.
|
| 16 |
+
|
| 17 |
+
In this work, we present ResiNet, the first *inductive* learning-based framework for discovering resilient network topology using successive edge rewiring operations. To overcome the above limitation of GNNs in modeling graphs without rich features, we specially design a topology-oriented variant of GNN called **Fi**lt**r**ation **e**nhanced **GNN** (FireGNN). FireGNN creates a series of subgraphs (the filtration) by successively removing the node with the highest degree from the graph and then learns to aggregate the node representations from each subgraph. This filtration process innovation is inspired by persistent homology and the approximation of the persistence diagram [@edelsbrunner2008persistent; @aktas2019persistence; @hofer2020graph].
|
| 18 |
+
|
| 19 |
+
The main contributions of this paper are summarized as follows:
|
| 20 |
+
|
| 21 |
+
1. We propose ResiNet, the first data-driven learning-based framework to boost network resilience inductively in a degree-preserving manner with moderate loss of the network utility. ResiNet forms resilience optimization into a successive sequential decision process of edge rewiring operations. Extensive experiments show that ResiNet achieves near-optimal resilience gain while balancing network utilities. Existing approaches are outperformed by a large margin.
|
| 22 |
+
|
| 23 |
+
2. FireGNN, our technical innovation serving as the graph feature extractor, is capable of learning meaningful representations from pure topological structures, which provides sufficient training signals to learn an RL agent to perform successive edge rewiring operations inductively.
|
| 24 |
+
|
| 25 |
+
# Method
|
| 26 |
+
|
| 27 |
+
An undirected graph is defined as $G= (V, E)$, where $V = \{1, 2, \dots, N\}$ is the set of $N$ nodes, $E$ is the set of $M$ edges, $A \in \{0, 1\}^{N\times N}$ is the adjacency matrix, and $F \in \mathbb{R}^{N \times d}$ is the $d$-dimensional node feature matrix[^1]. The degree of a node is defined as $d_i = \sum_{j=1}^N A_{ij}$, and a node with degree 0 is called an isolated node.
|
| 28 |
+
|
| 29 |
+
Given the network resilience metric $\mathcal{R}(G)$ and the utility metric $\mathcal{U}(G)$, let $\mathbb{G}_G$ denote the set of graphs with the *same* node degrees as $G$. The objective of boosting the resilience of $G$ is to find a target graph $G^{\star} \in \mathbb{G}_G$, which maximizes the network resilience while preserving the network utility. Formally, the problem of maximizing network resilience is formulated as $$\begin{equation*}
|
| 30 |
+
\label{eq:maximizingrobustness}
|
| 31 |
+
G^{\star} = \mathop{\arg \max}_{G^{'} \in \mathbb{G}_G}\limits
|
| 32 |
+
\, \alpha \cdot \mathcal{R}(G^{'}) + (1 - \alpha) \cdot \mathcal{U}(G^{'}) \, ,
|
| 33 |
+
\end{equation*}$$ where $\alpha\in\mathbb{R}$ is the scalar weight that balances the resilience and the utility.
|
| 34 |
+
|
| 35 |
+
:::: wrapfigure
|
| 36 |
+
r0.4
|
| 37 |
+
|
| 38 |
+
::: center
|
| 39 |
+
{width="40%"}
|
| 40 |
+
:::
|
| 41 |
+
::::
|
| 42 |
+
|
| 43 |
+
Consistent with the conventional setting in network science, two families of resilience metrics $\mathcal{R(\cdot)}$ and two examples of utility metrics $\mathcal{U(\cdot)}$ are used in our experiments, with detailed definitions deferred to Appendix [\[sec:appendix_obj\]](#sec:appendix_obj){reference-type="ref" reference="sec:appendix_obj"}.
|
| 44 |
+
|
| 45 |
+
To satisfy the constraint of preserving degree, currently, the edge rewiring operation is the default atomic action for obtaining new graph candidates $G^{'}$ from $G$. As is shown in Figure [\[fig:explain_rewire\]](#fig:explain_rewire){reference-type="ref" reference="fig:explain_rewire"}, at each step, two existing edges $AC$ and $BD$ are first selected. Then the edge rewiring alters the graph structure by removing $AC$ and $BD$ and adding $AB$ and $CD$, where $AB, CD, AD, BC\notin E$. Combinatorially, a total of $T$ successive steps of edge rewiring has the complexity of $O(E^{2T})$.
|
| 46 |
+
|
| 47 |
+
<figure id="fig:policy_net" data-latex-placement="h">
|
| 48 |
+
<figure>
|
| 49 |
+
<embed src="figs/policy_architecture.pdf" style="width:100.0%" />
|
| 50 |
+
</figure>
|
| 51 |
+
<figcaption>Overview of the architecture of ResiNet to select two edges for rewiring.</figcaption>
|
| 52 |
+
</figure>
|
| 53 |
+
|
| 54 |
+
In this section, we formulate the task of boosting network resilience as a reinforcement learning task by learning to select two edges and rewire them successively. We first present the graph resilience-aware environment design and describe our innovation FireGNN in detail. Finally, we present the graph policy network that guides the edge selection and rewiring process.
|
| 55 |
+
|
| 56 |
+
We formulate the network resilience optimization problem via successive edge rewiring operations into the MDP framework. The Markov property denotes that the graph obtained at time step $t+1$ relies only on the graph at time step $t$ and the rewiring operation, reducing the complexity from original $O(E^{2T})$ to $O(TE^2)$.
|
| 57 |
+
|
| 58 |
+
As illustrated in Figure [1](#fig:policy_net){reference-type="ref" reference="fig:policy_net"}, the environment performs the resilience optimization in an auto-regressive step-wise way through a sequence of edge rewiring actions guided by ResiNet. Given an input graph, the agent first decides whether to terminate or not. If it chooses not to terminate, it selects one edge from the graph to remove, receives the very edge it just selected as the auto-regression signal, and then selects another edge to remove. Four nodes of these two removed edges are re-combined, forming two new edges to be added to the graph. The optimization process repeats until the agent decides to terminate. The details of the design of the state, the action, the transition dynamics, and the reward are presented as follows.
|
| 59 |
+
|
| 60 |
+
The fully observable state is formulated as $S_t=G_t$, where $G_t$ is the current input graph at step $t$. The detailed node feature initialization strategy is given in Appendix [\[appendix:node_features\]](#appendix:node_features){reference-type="ref" reference="appendix:node_features"}.
|
| 61 |
+
|
| 62 |
+
ResiNet is equipped with a node permutation-invariant, variable-dimensional action space. Given a graph $G_t$, the action $a_t$ is to select two edges and the rewiring order. The agent first chooses an edge $e_1=AC$ and a direction $A\to C$. Then conditioning on the state, $e_1$, and the direction the agents chooses an edge $e_2=BD$ such that $AB, CD, AD, BC\notin E$ and a direction $B\to D$. The heads of the two edges reconnect as a new edge $AB$, and so does the tail $CD$. As $A\to C$, $B\to D$ and $C\to A$, $D\to B$ refer to the same rewiring operation, the choice of the direction of $e_1$ is randomized (this randomized bit is still an input of choosing $e_2$). This effectively reduces the size of the action space by half. In this way, The action space is the set of all feasible pairs of $(e_1, e_2)\in E^2$, with a variable size no larger than $2|E|(|E|-1)$.
|
| 63 |
+
|
| 64 |
+
The formulation of the action space implies that if the agent does not terminate at step $t$, the selected action must form an edge rewiring. This edge rewiring is executed by the environment and the graph transits to the new graph.
|
| 65 |
+
|
| 66 |
+
Note that in some other work, infeasible operations are also included in the action space (to make the action space constant through the process) [@GCPN; @trivedi2020graphopt]. In these work, if the operation is infeasible, it is not executed, and the state is not changed. This reduces the sample efficiency, causes biased gradient estimations [@huang2020closer], and makes the process to be prone to stuck at the state (which requires manually disabling the specific action in the next step). ResiNet takes advantage of the state-dependent variable action space composed of only feasible operations.
|
| 67 |
+
|
| 68 |
+
ResiNet aims to optimize the resilience while preserving the utility, forming a complicated and possibly unknown objective function. Despite this, by [@wakuta1995vector], an MDP that maximizes a complicated objective is up to an MDP that maximizes the linear combination of resilience and utility for some coefficient factor. This fact motivates us to design the reward as the step-wise gain of such a linear combination as $$\begin{equation*}
|
| 69 |
+
R_t = \alpha \cdot \mathcal{R}(G_{t+1}) + (1 - \alpha) \cdot \mathcal{U}(G_{t+1}) \, - (\alpha \cdot \mathcal{R}(G_{t}) + (1 - \alpha) \cdot \mathcal{U}(G_{t}) ) \,,
|
| 70 |
+
\end{equation*}$$ where $\mathcal{R}(G)$ and $\mathcal{U}(G)$ are the resilience and the utility functions, respectively. The cumulative reward $\sum_{t=0}^{T-1}R_t$ up to time $T$ is then the total gain of such a linear combination.
|
| 71 |
+
|
| 72 |
+
:::: wrapfigure
|
| 73 |
+
r0.25
|
| 74 |
+
|
| 75 |
+
::: center
|
| 76 |
+
{width="25%"}
|
| 77 |
+
:::
|
| 78 |
+
::::
|
| 79 |
+
|
| 80 |
+
Motivated by graph filtration in persistent homology [@edelsbrunner2008persistent], we design the **fi**lt**r**ated graph **e**nhanced **GNN** termed FireGNN to model graphs without rich features, or even with only topology. As shown in Figure [\[fig:filtration_process\]](#fig:filtration_process){reference-type="ref" reference="fig:filtration_process"}, for a given input graph $G$, FireGNN transforms $G$ from the static version to a temporal version consisting of a sequence of subgraphs, by repeatedly removing the node with the highest degree[^2]. Observing a sequence of nested subgraphs of $G$ grants FirGNN the capability to observe how $G$ evolves towards being empty. Then FireGNN aligns and aggregates the node, edge and graph embedding from each subgraph, leading to meaningful representations in node, edge, and graph levels. Formally, the filtration in FireGNN is constructed as $$\begin{equation*}
|
| 81 |
+
%\label{eq:filtration}
|
| 82 |
+
\begin{split}
|
| 83 |
+
G^{(k-1)} &= G^{(k)} - v_k, \, \quad v_k = \argmax_{ v_i \in G^{(k)}} \text{DEGREE}(v_i) \\
|
| 84 |
+
(V, \emptyset) &= G^{(0)} \subset G^{(1)} \subset \dots \subset G^{(N)} = G \\
|
| 85 |
+
\tilde{G} &= [G^{(0)}, G^{(1)}, \dots, G^{(N)}] \, ,
|
| 86 |
+
\end{split}
|
| 87 |
+
\end{equation*}$$ where $G^{(k)}$ denotes the remaining graph after removing $N-k$ nodes with highest node degrees, $v_k$ denotes the node with highest degree in current subgraph $G^{(k)}$, DEGREE$(\cdot)$ measures the node degree, $G^{(N)}$ is the original graph, and $G^{(0)}$ contains no edge. The sequence of the nested subgraphs of $G$ is termed the filtrated graph $\tilde{G}$.
|
| 88 |
+
|
| 89 |
+
Regular GNN only operates on the original graph $G$ to obtain the node embedding for each node $v_i$ as $h(v_i) = \phi(G^{(N)} = G)_i \, ,$ where $\phi(\cdot)$ denotes a standard GNN model. In FireGNN, by using the top $K+1$ subgraphs in a graph filtration, the final node embedding $h(v_i)$ of $v_i$ is obtained by $$\begin{equation*}
|
| 90 |
+
\label{eq:filtratedGNN}
|
| 91 |
+
% h(v_i) = \text{Agg}_N \left (\phi(G^{(N-K)})_i, \phi(G^{(N-k+1)})_i, \dots, \phi(G^{(N-1)})_i, \phi(G^{(N)})_i \right ) \, ,
|
| 92 |
+
h(v_i) = \text{AGG}_N \left(
|
| 93 |
+
h^{(N-K)}(v_i), \dots, h^{(N-1)}(v_i), h^{(N)}(v_i) \right ) \, ,
|
| 94 |
+
\end{equation*}$$ where $\text{AGG}_N(\cdot)$ denotes a node-level aggregation function, $h^{(k)}(v_i)$ is the node embedding of $i$ in the $k$-th subgraph $G^{(k)}$, and $K \in [N]$. In practice, $h^{(k)}(v_i)$ is discarded when calculating $h(v_i)$ if $v_i$ is isolated or not included in $G^{(k)}$.
|
| 95 |
+
|
| 96 |
+
The directed edge embedding $h^{(k)}(e_{ij})$ of the edge from node $i$ to node $j$ in each subgraph is obtained by combining the embeddings of the two end vertices in $G^{(k)}$ as $$\begin{equation*}
|
| 97 |
+
\label{eq:filtratedGNN_subedge_embedding}
|
| 98 |
+
h^{(k)}(e_{ij}) = m_f \left (\text{AGG}_{N \to E} \left (h^{(k)}(v_i), h^{(k)}(v_j)
|
| 99 |
+
% , \dots, h(G^{(N-1)}), h(G^{(N)}))_i
|
| 100 |
+
\right ) \right) \, ,
|
| 101 |
+
\end{equation*}$$ where $\text{AGG}_{N \to E}(\cdot)$ denotes an aggregation function for obtaining edge embedding from two end vertices (typically chosen from $\mathtt{min}$, $\mathtt{max}$, $\mathtt{sum}$, $\mathtt{difference}$, and $\mathtt{multiplication}$). $m_f(\cdot)$ is a multilayer perceptron (MLP) model that ensures the consistence between the dimensions of edge embedding and graph embedding.
|
| 102 |
+
|
| 103 |
+
The final embedding of the directed edge $e_{ij}$ of the filtrated graph $\tilde{G}$ is given by $$\begin{equation*}
|
| 104 |
+
\label{eq:filtratedGNN_edge_embedding}
|
| 105 |
+
% h(e_{ij}) = \text{Agg}_E \left (h(G^{(0)})_{i \to j}, h(G^{(1)})_{i \to j}, \dots, h(G^{(N-1)})_{i \to j}, h(G^{(N)})_{i \to j} \right ) \, ,
|
| 106 |
+
% \resizebox{0.485\textwidth}{!}{
|
| 107 |
+
h(e_{ij}) = \text{AGG}_E \left (
|
| 108 |
+
h^{(N-K)}(e_{ij}) , \dots, h^{(N-1)}(e_{ij}), h^{(N)}(e_{ij}) \right ) \, ,
|
| 109 |
+
% }
|
| 110 |
+
\end{equation*}$$ where $\text{AGG}_E(\cdot)$ denotes an edge-level aggregation function.
|
| 111 |
+
|
| 112 |
+
With the node embedding $h^{(k)}(v_i)$ of each subgraph $G^{(k)}$ available, the graph embedding $h^{(k)}(G)$ of each subgraph $G^{(k)}$ is calculated by a readout functions (e.g., $\mathtt{mean}$, $\mathtt{sum}$) on all non-isolated nodes in $G^{(k)}$ as $$\begin{equation*}
|
| 113 |
+
\label{eq:filtratedGNN_subgraph_embedding}
|
| 114 |
+
% \begin{split}
|
| 115 |
+
h^{(k)}(G) = \, \text{READOUT} \left ( {h^{(k)}(v_i)}
|
| 116 |
+
% , h(G^{(k)}_2), \dots, h(G^{(k)}_N)
|
| 117 |
+
\right) \,
|
| 118 |
+
\forall v_i \in G^{(k)} \, \text{and} \, \, d^{(k)}_i \ge 0 \,. \\
|
| 119 |
+
% \end{split}
|
| 120 |
+
\end{equation*}$$ The final graph embedding of the filtrated graph $\tilde{G}$ is given by $$\begin{equation*}
|
| 121 |
+
\label{eq:filtratedGNN_graph_embedding}
|
| 122 |
+
h(G) = \text{AGG}_G \left (h^{(N-K)}(G), \dots, h^{(N-1)}(G), h^{(N)}(G) \right ) \, ,
|
| 123 |
+
\end{equation*}$$ where $\text{AGG}_G(\cdot)$ denotes a graph-level aggregation function.
|
| 124 |
+
|
| 125 |
+
Having presented the details of the graph resilience environment and FireGNN, in this section, we describe the policy network architecture of ResiNet in detail, which learns to select two existing edges for rewiring at each step. At time step $t$, the policy network uses FireGNN as the graph extractor to obtain the directed edge embedding $h(e_{ij}) \in \mathbb{R}^{2|E| \times d}$ and the graph embedding $h(G) \in \mathbb{R}^{d}$ from the filtrated graph $\tilde{G}_t$, and outputs an action $a_{t}$ representing two selected rewired edges, leading to the new state $G_{t+1}$ and the reward $R_t$.
|
| 126 |
+
|
| 127 |
+
To be inductive, we adapt a special autoregressive node permutation-invariant dimension-variable action space to model the selection of edges from graphs with arbitrary sizes and permutations. The detailed mechanism of obtaining the action $a_t$ based on edge embedding and graph embedding is presented as follows, further reducing the complexity from $O(TE^2)$ to $O(TE)$.
|
| 128 |
+
|
| 129 |
+
An edge rewiring action $a_t$ at time step $t$ involves the prediction of the termination probability $a_t^{(0)}$ and the selection of two edges $(a_t^{(1)}$ and $a_t^{(2)})$ and the rewiring order. The action space of $a_t^{(0)}$ is binary, however, the selection of two edges imposes a huge action space in $O(|E|^2)$, which is too expensive to sample from even for a small graph. Instead of selecting two edges simultaneously, we decompose the joint action $a_t$ into $a_t = (a_t^{(0)}, a_t^{(1)}, a_t^{(2)})$, where $a_t^{(1)}$ and $a_t^{(2)}$ are two existing edges which do not share any common node (recall that $a_t^{(1)}$ and $a_t^{(2)}$ are directed edges for an undirected graph). Thus the probability of $a_t$ is formulated as $$\begin{equation*}
|
| 130 |
+
\mathbb{P}(a_t|s_t) = \mathbb{P}(a_t^{(0)} |s_t) \mathbb{P}(a_t^{(1)} |s_t, a_t^{(0)}) \mathbb{P}(a_t^{(2)} |s_t, a_t^{(0)}, a_t^{(1)}) \,.
|
| 131 |
+
\end{equation*}$$
|
| 132 |
+
|
| 133 |
+
The first policy network $\pi_0(\cdot)$ takes the graph embedding as input and outputs the probability distribution of the first action that decides to terminate or not as $$\begin{equation*}
|
| 134 |
+
\label{eq:prob_ac0}
|
| 135 |
+
% \begin{split}
|
| 136 |
+
% h(G) &= \textmd{Enc}(s_t) \\
|
| 137 |
+
% l_0 &= m_{f_0}(h(G)) \\
|
| 138 |
+
\mathbb{P}(a_{t}^{(0)} |s_t) = \pi_0(h(G)) \,,
|
| 139 |
+
% \end{split}
|
| 140 |
+
\end{equation*}$$ where $\pi_0(\cdot)$ is implemented by a two layer MLP. Then $a_t^{(0)} \sim \text{Bernoulli}(\mathbb{P}(a_{t}^{(0)} |s_t)) \in \{0, 1\}$.
|
| 141 |
+
|
| 142 |
+
If the signal $a_t^{(0)}$ given by the agent decides to continue to rewire, two edges are then selected in an auto-regressive way. The signal of continuing to rewire $a_t^{(0)}$ is input to the selection of two edges as a one-hot encoding vector $l_c$. The second policy network $\pi_1(\cdot)$ takes the graph embedding and $l_c$ as input and outputs a latent vector $l_1 \in \mathbb{R}^{d}$. The pointer network [@vinyals2015pointer] is used to measure the proximity between $l_1$ and each edge embedding $h(e_{ij})$ in $G$ to obtain the first edge selection probability distribution. Then, to select the second edge, the graph embedding $h(G)$ and the first selected edge embedding $h(e_t^{(1)})$ and $l_c$ are concatenated and fed into the third policy network $\pi_2(\cdot)$. $\pi_2(\cdot)$ obtains the latent vector $l_2$ for selecting the second edge using a respective pointer network. The overall process can be formulated as $$\begin{equation*}
|
| 143 |
+
\label{eq:action:probs}
|
| 144 |
+
\begin{split}
|
| 145 |
+
l_1 &= \pi_1([h(G), l_c]) \\
|
| 146 |
+
\mathbb{P}(a_{t}^{(1)} |s_t, a_t^{(0)}) &= f_1(l_1, h(e_{ij})), \, \forall e_{ij} \in E \\
|
| 147 |
+
l_2 &= \pi_2([h(G), h_{e_t^{(1)}}, l_c]) \\
|
| 148 |
+
\mathbb{P}(a_{t}^{(2)} |s_t, a_t^{(1)}, a_t^{(0)}) &= f_2(l_2, h(e_{ij})), \, \forall e_{ij} \in E \, ,
|
| 149 |
+
\end{split}
|
| 150 |
+
\end{equation*}$$ where $\pi_i(\cdot)$ is a two-layer MLP model, $[\cdot,\cdot]$ denotes the concatenation operator, $h_{e_t^{(1)}}$ is the embedding of the first selected edge at step $t$, and $f_i(\cdot)$ is a pointer network.
|
2110.10031/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-05T20:33:03.948Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36" etag="uqOyarOnstxEKy02wqTK" version="15.3.8" type="google"><diagram id="WIdgV9lL6vWbblT1w2Wm" name="Page-1">7V1de5u4Ev41vnQeJPF5mY8m27PdbbfZfU57bvbBINu0GLyAHWd//ZEw2CCJhDhgyTbtRYwEAvS+Gs1ohtEI3S42D4m7nP8W+zgcQc3fjNDdCEKILI38oSXP2xIAob0tmSWBX5TtCx6Df/G20CgKV4GP09p5WRyHWbCsF3pxFGEvq5W5SRI/1U+bxmH9pkt3VtxQ2xc8em6IudP+G/jZfFuKkFY5/RcczOZZ+cbI2dYs3PLsoo107vrxU+Vm6MMI3SZxnG1/LTa3OKS9V/bL9hb3DbW7J0twlLW5QEcz34Z/Ws53/+On39cLy4LBGICio9duuCreeQTNkLR4M41Jw9Cc0d/1Mi3NnoseMv9ZxWXFOM3huyYnIG252VeWrTy6i2WIx09Bisk5HxfLOMncyMNl6+ThRTedJGwJdyKsPRTM8IaWz7NFSAoA+ZlmSfwT38ZhnJCSKI4wfeggDJkiNwxmETn0SK9iUn6zxkkWEEJcFxWLwPfpbW6e5kGGH5euR+/5ROhPypJ4FfmYdrhGb+9681WCH2jVnU4KlnFAW/2wJo2nxUk8jAWy9MZ4UykqYH3A8QJnyTM5pazVre0lxSDTC0yf9ozVUVE2r5BVL0jsFoNktmt5TyPyo2DSW1gl4BSDUL2nRH1ZAa9VR1bBHEE0dX3L9TjkSY2W/9vVlOMadQOGqZlXRg0OUAqLCh6glDdVPMy+8IAq4DH1PMeRgAfg8ACy8UAK4OF52JhORXggEznI7wsPw7EYPBz9ihdYxwVEVwEQH0/siYQBgrgBgmQPEJFScnSBZXvYkzGB6Bweumw8TAXw8DUPYygBD4PDw5CNh6UAHtPpFEoZHyaHhykbD1sNPKa2LQEPi8PDko2HowAeshQs0+bwsGXjUWrcxwWE797modE02XQBiMMB4kgHRAUTvXmE9C2xAAMIlG6iAzVsdDlTusXZ6FC6jQ5UMNJlzekW5ACB0gFRwUinq1hSRghnpEPpRrrQdXApqyYWZ6VD6VY6aGGGpHN3SX9OQ7y5pg4y0h048oufd17opmngjRjfiZtkhWsO6NsLikPrarumG68SD3/BSUDeBCcU4iCakXoqu8jVM5w1VDJw2hr9T6lSdKIlgnZ3Vh3a/FlLL1te/RNn3nz0iqMF+6UPsAHyCqCGAM+yLMGhmwXruudQBHJxhy+U3XtGIY3hk6GVDCsbKbp5e92eLFxTpsk1VW+ogIRtKGfd7sXfsb5tcLyjnfxYHBauNm+VrHPRQHGrC4o4yebxLI7c8FMcL4tTfuAsey54566yuE5SvAmyb/Ry0mnbo++VmrtN0XJ+8FweROQ9KxfRw+/Vuv1l+dH+ut2AKd6lswGwk1rkHvcB7fYGU6aRzttHeQEdWGg228d66US75QBpzfz3eU2sU2EVyw4Gy4O5wrOgiT3vZ0d3oItlFGDlHXAYadezkNL1U6FTV0LqFGn4upAylRJSsIXZSJoJlil+XSF20+U2qGkabCjtDl32YuCZAN+f7s+s1ADNQg7uRkMG0GAGOOTdvEigT6G+9GPYwoK8FGwsRkM0JWOj24M0VlgaGy2lcckPRaSxgQZWnQGrSsGtCKtgi4WvC5lH9CYlXto8Ango5Ix47aARD857xJttR7yj1IjXT2bp4SLnkdasUmse0Z2BVWfAKsV0XtgiWPNCtBOkq6admMOIV3jEW6c5j5TevYFVJ80q1eaRFkHmFzKPQFu1eUQAhdJWLqiO9zO3ce22471t/MmRZpHBH3oWrFJsFmkRk3YpswhUbRYZ9EaVR7zT1hox1BrxLT6+upARjyzFIiDKhgdsADIUi4BAMtaGTliyHjKTHEMaW11L46b4SdYoNdpFi3cW5D1E7CisPZTG5anFT6LBJ3YWrOpcCr7P0hniwM6CVWpZOuVzD9o0QEAxS0e/uE/QTmnEt17NVM3T+lq21EMzo36OwiCiSVEfswS7iyHPqUjKIAivuO8W+I+uob1L71EVNbAvUWOIHKVdkKJIpzxQQeiSRTUi2LuskhUm2MekgQn7ogFexPT5Bh6IUvlAJg1D+SFRNQsDEGkeRl88UCFZoqzUSkY5Yb+U6UoTwMFmKOguVWKbJe/XEmB0pEzZLyhTNVW0OeNFI0aKZLEAmsHm9yXC+tBEFqBybXNrDR+KE0jd58pp+bBJX3h0ALmb2WXJnoLbZjtdSrVEhmQXU8efiRtEw8wh1CA0G10xHnmo8TqEUFr1ZrZaLT4fr0kKFUTXCUsr4LBDHpgHSyu71DN2eZutnkSVwz22eRxBJSUH21t0mKb0YB1IDMdiZzaRdqML2NqbzWGJ1iNUxmM6xWaeP68LPGxO0xCk8TwuHlISc79nfJR2Qhd4OA6HhyMZjzaxSNxKag/T54uBDOEkv3upNHEqUTO+qs+vfJbE3YZTb55cubTW0GZo09HkavEJ5oHx4rM5fP5au7YZVz+zsS1acZJk2hpnqR/avDHLyqq2/OW1B66prpRDrelO/bJRFOndhRH71Y38eEGq8+3jKAEHg1Yw/fJc3UWEvWrO9rUW6vS1sPFXSmBpSYM9HECCwtUBssDWYB1YkddLBOwO7e6R7cvp9TGaYtJXdFNI7Y8VHhwfTZl3mPBPwcck0Dqmru00y//JoduIQl1EkRG6f2XHUFI8aSEP+nC++HjqrvKHeJNZRl7iTQr+G5hi2MyyFBS4yI7qkwHaMaTHV5xSIM5PfPRJFt1hVEdBVnuhxg070CF+fYLfftXG//lncY8nqx//rtar34SbyW4h9YN1g2CZul6dPHMcrjEFpUqZQ0l27bvL3KiA2qevr0ui2mOKWfdabCDLsC6Q1hCDtAEFIRREBouCafrL0doipvJsd/nQQX0jZ/ne81JbuMg9PhDrz5EOB1BhYztZOzkzDi+BwntkMPpSYj5E6yCJowWOWmsvp2/jasxYs0VzUV8m7v+SHz/D8dfsj5sw+Tv7/LBe/aW9oHgM9oxMe0bTuJi/Y8oCIVfgwBUluaJbzMrZMW1fIVNEAT4DU+QzxSz9FzKY4rmJfTeHXz///eD8bt1G5vW3QDj/MCBd+m5ujJtQwAFZnkODDxY61HOIELt+x20M191WSUImima3BiYGC5dCUiUcuxyW0c/pdqWf3AkOv8RpkAUxrZ3EWRYvyAkhrbhxvZ+zXBQxu6LmQSv5za7LDycpLbivKIvnuZtn2TLNxSWRiveeH+lXgRdH04AIueTKo/7Fe9/NXPKHlqdUNrpk7IxXKU7GuTCji0pjHOJFLtPgvUWlwX2WuOl87JPyDI8TvIjXeExkx9WS+imFMqgVS4sL2H1ahJ+5GIIFGjYZRGdSqUXE2SCVVJVKQGcX/ZB+cGy+xS4VI/2oUqlFrN3ARGWZyLIH2FywdGsmsqwGpS53JCYKN2/vYiUoj3jB46cgpc6Gj4tlnGRu7t06JadWB/OgzewzI9j8GokiS53elPNmOy5dutHhkI/oF3T6yLhduNmcRoNaN7+MrLuieI/79i7njjsAusF+pSNI1NZXULEY+uYkAAP03UFvoPoX3uVn9tJgF33X25mQP7mwxg4Q1vW6u9PuDWFymMS00/ezPnmn+TbBAvrwfw==</diagram></mxfile>
|
2110.10031/main_diagram/main_diagram.pdf
ADDED
|
Binary file (24.1 kB). View file
|
|
|
2110.10031/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Continual learning (CL) is a learning scenario where a model learns from a continuous and online stream of data and is regarded as a more realistic and practical learning setup than offline learning on a fixed dataset (He et al., 2020). However, many CL methods still focus on the offline setup (Kirkpatrick et al., 2017; Rebuffi et al., 2017; Saha et al., 2021) instead of the more realistic online setup. These methods assume access to a large storage, storing the entire data of the current task and iterating on it multiple times. On the other hand, we are interested extensively in the more realistic online setup where only a small memory is allowed as storage. Meanwhile, even for the online CL methods, we argue they have room for more practical and realistic improvements concerning multiple crucial aspects. The aspects include the class distributions such as the disjoint (Rebuffi et al., 2017) or the blurry (Aljundi et al., 2019c) splits and the evaluation metric that focuses only on the task accuracy such as average task accuracy (Aavg).
|
| 4 |
+
|
| 5 |
+
The two main assumptions on the class distributions in existing CL setups, *i.e*. , the disjoint and blurry splits, are less realistic for the following reasons. The disjoint split assumes no classes overlap over different tasks; already observed classes will never appear again.
|
| 6 |
+
|
| 7 |
+
The above is not plausible because already observed classes can still appear later on in real-world scenarios (see Fig. 2 of (Bang et al., 2021)). On the other hand, in the blurry split (Aljundi et al., 2019c) no new classes appear after the first task even though the split assumes overlapping classes over tasks. This is also not plausible as observing new classes is common in real-world scenarios.
|
| 8 |
+
|
| 9 |
+
The typical evaluation metric such as Aavg in which the accuracy is measured only at the task transition is also less realistic. It implicitly assumes that no inference queries occur in the middle of a task. However, in real-world scenarios, inference queries can occur *at any-time*. Moreover, there is no explicit task transition boundary in most real-world scenarios. Thus, it is desirable for CL
|
| 10 |
+
|
| 11 |
+
<sup>∗</sup> indicates equal contribution.
|
| 12 |
+
|
| 13 |
+
<sup>†</sup> indicates corresponding author.
|
| 14 |
+
|
| 15 |
+
This work was done while HK, DK and JC were interns and an AI technical advisor at NAVER AI Lab.
|
| 16 |
+
|
| 17 |
+
models to provide good inference results at any time. To accurately evaluate whether a CL model is effective at such 'any-time' inference, we need a new metric for CL models.
|
| 18 |
+
|
| 19 |
+
In order to address the issues of the current CL setups, we propose a new CL setup that is more realistic and practical by considering the following criteria: First, the class distribution is comprised of the advantages from both blurry and disjoint. That is, we assume that the model continuously encounters new classes as tasks continue, *i.e*. , class-incremental and that classes overlap across tasks, *i.e*. , blurry task boundaries, while not suffering from the restrictions of blurry and disjoint. Second, the model is evaluated throughout training and inference such that it can be evaluated for any-time inference. We call this new continual learning setup 'i-Blurry'.
|
| 20 |
+
|
| 21 |
+
For the i-Blurry setup, we first propose a plausible baseline using experience replay (ER) with reservoir sampling and a tuned learning rate scheduling. While existing online CL methods are applicable to the i-Blurry setup, they perform only marginally better than our baseline or often worse.
|
| 22 |
+
|
| 23 |
+
To better handle the i-Blurry setup, we propose a novel continual learning method, which improves the baseline in three aspects. We design a new memory management scheme to discard samples using a per-sample importance score that reflects how useful a sample is for training. We then propose to draw training samples only from the memory instead of drawing them from both memory and the online stream as is done in ER. Finally, we propose a new learning rate scheduling to adaptively decide whether to increase or decrease the learning rate based on the loss trajectory, *i.e*. a data-driven manner. To evaluate the algorithms in the new setup, we evaluate methods by conventional metrics, and further define a new metric called 'area under the curve of accuracy' (AAUC) which measures the model's accuracy throughout training.
|
| 24 |
+
|
| 25 |
+
We summarize our contributions as follows:
|
| 26 |
+
|
| 27 |
+
- Proposing a new CL setup called i-Blurry, which addresses a more realistic setting that is online, task-free, class-incremental, of blurry task boundaries, and subject to any-time inference.
|
| 28 |
+
- Proposing a novel online and task-free CL method by a new memory management, memory usage, and learning rate scheduling strategy.
|
| 29 |
+
- Outperforming existing CL models by large margins on multiple datasets and settings.
|
| 30 |
+
- Proposing a new metric to better measure a CL model's capability for the desirable any-time inference.
|
| 31 |
+
|
| 32 |
+
# Method
|
| 33 |
+
|
| 34 |
+
For a more realistic and practical CL setup, considering real-world scenarios, we strictly adhere to the online and task-free CL setup (Lee et al., 2019; Losing et al., 2018). Specifically, we propose
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
|
| 38 |
+
Figure 1: i-Blurry-N-M split. N% of classes are partitioned into the disjoint set and the rest into the BlurryM set where M denotes the blurry level (Aljundi et al., 2019c). To form the i-Blurry-N-M task splits, we draw training samples from a uniform distribution from the 'disjoint' or the 'Blurry M' set (Aljundi et al., 2019c). The 'blurry' classes always appear over the tasks while disjoint classes gradually appear.
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+
Figure 2: Comparison of $A_{\rm AUC}$ with $A_{avg}$ . (a) online version of RM (Bang et al., 2021) (b) proposed CLIB. The two-stage method delays most of the training to the end of the task. The accuracy-to-{# of samples} plot shows that our method is more effective at any time inference than the two-stage method. The difference in $A_{avg}$ for the two methods is much smaller than that of in $A_{\rm AUC}$ , implying that $A_{\rm AUC}$ captures the effectiveness at any-time inference better.
|
| 43 |
+
|
| 44 |
+
a novel CL setup (named as **i-Blurry**), with two characteristics: 1) class distribution being class incremental and having blurry task boundaries and 2) allowing for any-time inference.
|
| 45 |
+
|
| 46 |
+
**i-Blurry-N-M Split.** We partition the classes into groups where N% of the classes are for disjoint and the rest of 100-N% of the classes are used for BlurryM sampling (Aljundi et al., 2019c), where M is the blurry level. Once we determine the partition, we draw samples from the partitioned groups. We call the resulting sequence of tasks as **i-Blurry-**N-M split. The i-Blurry-N-M splits feature both class-incremental and blurry task boundaries. Note that the i-Blurry-N-M splits generalize previous CL setups. For instance, N=100 is the disjoint split as there are no blurry classes. N=0 is the BlurryM split (Aljundi et al., 2019c) as there are no disjoint classes. M=0 is the disjoint split as the blurry level is M=0 (Bang et al., 2021). We use multiple i-Blurry-N-M splits for reliable empirical validations and share the splits. Fig. 1 illustrates the i-Blurry-N-M split.
|
| 47 |
+
|
| 48 |
+
A New Metric – Area Under the Curve of Accuracy $(A_{AUC})$ . Average accuracy $(i.e.\ , A_{avg} = \frac{1}{T} \sum_{i=1}^{T} A_i$ where $A_i$ is the accuracy at the end of the i<sup>th</sup> task) is one of the widely used measures in continual learning. But $A_{avg}$ only tells us how good a CL model is at the few discrete moments of task transitions (5-10 times for most CL setups) when the model could be queried at any time. Thus, a CL method could be poor at any-time inference but the $A_{avg}$ may be insufficient to deduce that conclusion due to its temporal sparsity of measurement. For example, Fig. 2 compares the online version of RM (Bang et al., 2021), which conducts most of the training by iterating over the memory at the end of a task, with our method. RM is shown as it is a very recent method that performs well on $A_{avg}$ but particularly poor at any-time inference. Only evaluating with $A_{avg}$ might give the false sense that the difference between the two methods is not severe. However, the accuracy-to- $\{\#$ of samples $\}$ curve reveals that our method shows much more consistently high accuracy during training, implying that our method is more suitable for any-time inference than RM. To alleviate the limitations of $A_{avg}$ , we shorten the accuracy measuring frequency to after every $\Delta n$
|
| 49 |
+
|
| 50 |
+
Figure 3: Overview of the proposed CLIB. We compute sample-wise importance during training to manage our memory. Note that we only draw training samples from the memory whereas ER based methods draw them from both the memory and the online stream.
|
| 51 |
+
|
| 52 |
+
samples are observed instead of at discrete task transitions. The new metric is equivalent to the area under the curve (AUC) of the accuracy-to-{# of samples} curve for CL methods when $\Delta n=1$ . We call it area under the curve of accuracy ( $A_{\rm AUC}$ ):
|
| 53 |
+
|
| 54 |
+
$$A_{\text{AUC}} = \sum_{i=1}^{k} f(i \cdot \Delta n) \cdot \Delta n, \tag{1}$$
|
| 55 |
+
|
| 56 |
+
where the step size $\Delta n$ is the number of samples observed between inference queries and $f(\cdot)$ is the curve in the accuracy-to- $\{$ # of samples $\}$ plot. High $A_{AUC}$ corresponds to a CL method that consistently maintains high accuracy throughout training.
|
| 57 |
+
|
| 58 |
+
The large difference in $A_{\rm AUC}$ (see Fig. 4) implies that delaying strategies like the two-stage training scheme are not effective for any-time inference, a conclusion harder to deduce with just $A_{avq}$ .
|
| 59 |
+
|
| 60 |
+
To address the realistic i-Blurry setup, we establish a baseline for the challenging online and task-free i-Blurry setup. For the memory management policy, we use reservoir sampling (Vitter, 1985) and for memory usage, we we use experience replay (ER). For the LR scheduling, we use an exponential LR schedule but reset the LR when a new class is encountered. Please see Sec. A.1 for details.
|
| 61 |
+
|
| 62 |
+
Note that the above baseline still has room to improve. The reservoir sampling does not consider whether one sample could be more useful for training than the others. ER uses samples from the stream directly, which can skew the training of CL models to recently observed samples. While the exponential with reset does increase the LR periodically, the sudden changes may disrupt the CL model. Thus, we discuss how we can improve the baseline in the following sections. The final method with all the improvements is illustrated in Fig. 3.
|
| 63 |
+
|
| 64 |
+
In reservoir sampling, the samples are removed from the memory at random. Inspired by works on sample importance (Kloek & Van Dijk, 1978; LeCun et al., 1990; Chang et al., 2017; Katharopoulos & Fleuret, 2018; Csiba & Richtárik, 2018), we propose a novel sampling strategy specific for CL that removes samples from the memory based on sample-wise importance as following Theorem 1.
|
| 65 |
+
|
| 66 |
+
**Theorem 1.** Let $C = M \cup \{(x_{new}, y_{new})\}$ , M be a memory from the previous time step, $(x_{new}, y_{new})$ be the newly encountered sample, $l(\cdot)$ be the loss and $\theta$ be the model parameters. Assuming that the model trained with the optimal memory, $M^*$ will induce maximal loss decrease on C, the optimal memory is given by $M^* = C \setminus \{(\bar{x}, \bar{y})\}$ with
|
| 67 |
+
|
| 68 |
+
$$(\bar{x}, \bar{y}) = \underset{(x_i, y_i) \in \mathcal{C}}{\operatorname{arg\,min}} \, \mathbb{E}_{\theta} \left[ \sum_{(x, y) \in \mathcal{C}} l(x, y; \, \theta) - l\left(x, y; \, \theta - \nabla_{\theta} l(x_i, y_i; \, \theta)\right) \right]. \tag{2}$$
|
| 69 |
+
|
| 70 |
+
*Proof.* Please see Sec. A.2 for the proof.
|
| 71 |
+
|
| 72 |
+
Theorem 1 states that when a new sample is appended to the memory and a sample has to be discarded, we should discard the sample that incurs the least loss decrease *i.e*. , least useful for training. We solve Eq. 2 by keeping track of the sample-wise importance H<sup>i</sup> :
|
| 73 |
+
|
| 74 |
+
$$\mathcal{H}_{i} = \mathbb{E}_{\theta} \left[ \sum_{(x,y) \in \mathcal{C}} l(x,y;\theta) - l(x,y;\theta - \nabla_{\theta} l(x_{i},y_{i};\theta)) \right]. \tag{3}$$
|
| 75 |
+
|
| 76 |
+
Intuitively, H is the expected loss decrease when the associated sample is used for training. We update H associated with the samples used for training after every training iteration. Specifically, the model is trained with a random batch from the memory. Then, for all samples in C, we measure the loss difference before and after training with the batch. If the loss decreases, the importance scores of the samples in the batch increase and vice versa. Note that because the importance score of a sample is relative to that of other samples in the memory, when the importance scores of the samples in the batch decrease, the scores of the samples not in the batch increase comparatively. Thus, two types of samples have high importance scores; 1) samples that were in the batch when the loss decreased and 2) samples that were not in the batch when the loss increased.
|
| 77 |
+
|
| 78 |
+
The expectation in Eq. 3 is taken over θ's optimization trajectory *i.e*. , over time. For computational efficiency, we use the exponential moving average as empirical estimates instead. The empirical estimates are calculated by the discounted sum of the differences between the actual loss decrease and the predicted loss decrease (see Alg. 1). The memory is updated whenever a new sample is encountered, and the full process is given in Alg. 2. The memory management strategy significantly outperforms reservoir sampling, especially with memory only training.
|
| 79 |
+
|
| 80 |
+
ER uses joint training where half of the training batch is obtained from the online stream and the other half from memory. However, we argue that using the streamed samples directly will skew the training to favor the recent samples more. Thus, we propose to use samples only from the memory for training, without using the streamed samples. The memory works as a distribution stabilizer for streamed samples through the memory update process (see Sec. 4.2), and samples are used for training only with the memory. We observe that the memory only training improves the performance despite its simple nature. Note that this is different from Prabhu et al. (2020) as we train with the memory during the online stream but Prabhu et al. (2020) does not.
|
| 81 |
+
|
| 82 |
+
The exponential with reset scheduler resets the LR to the initial value when a new class is encountered. As the reset occurs regardless of the current LR value, it could result in a large change in LR value. We argue that such abrupt changes may harm the knowledge learned from previous samples.
|
| 83 |
+
|
| 84 |
+
Instead, we propose a new data-driven LR scheduling scheme that adaptively changes the LR in a data-driven manner based on how good the LR is for optimizing over the memory.
|
| 85 |
+
|
| 86 |
+
Specifically, from the current base LR η¯ and step size γ < 1, we try both a high LR η/γ ¯ and a low LR η¯ · γ for training. For each LR, we keep a history of length m that tracks the loss decrease for each LR. When both histories are full, we perform a Student's t-test with significance level α = 0.05 to compare the LRs. If one LR is better, the base LR is set to the better LR, *i.e*. , η¯ γ or η¯ · γ.
|
| 87 |
+
|
| 88 |
+
We depict this scheme in Alg. 3 in Sec. A.4. The adaptive LR is important for CL methods because the data distribution changes over time and adapting to the current training data is more useful.
|
| 89 |
+
|
| 90 |
+
With all the proposed components, we call our method Continual Learning for i-Blurry or (CLIB).
|
2110.12257/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2110.12257/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Federated Learning (FL) [@mcmahan:2017] is a machine learning paradigm where the objective is to collectively train a high-quality central model while the training data remains distributed over a number of clients. The data that is owned by each client is private to itself and not transferable to other clients or the server. Hence, each client participates in FL by sharing updates derived from data rather than sharing the data itself. One popular algorithm that is used in FL settings is called federated averaging algorithm (FedAvg) [@mcmahan:2017]. This framework is different from distributed optimization setting [@smith-jmlr-2018; @recht-nips-2011] primarily in two aspects: (a) *Non-IID Data*: Any particular client's local dataset will not be representative of the population distribution; (b) *Unbalanced Data*: The size of private data owned by individual clients could vary significantly. FL setting assumes a synchronous update of the central model and the following steps proceed in each round of the learning process [@mcmahan:2017]. There is a fixed set of $K$ clients and each client owns a fixed local dataset. In each round, the central server randomly selects a fraction of clients. Each selected client downloads the state of current central model and then performs local computation based on its local dataset. Finally, it sends the model updates to the server. The server then aggregates the updates received from each selected client and applies them to its global model. The state of the central model is thus updated and this process repeats until the central model converges. Note that only a fraction of clients is selected in each round as adding more clients would lead to diminishing returns beyond a certain point [@mcmahan:2017]. Though initially the emphasis was on mobile-centric FL applications involving thousands of clients, recently there is significant interest in enterprise driven FL applications that involve only a few tens of clients [@dinesh-verma-2019]. Though FL paradigm has received significant interest recently from the research community, the problem of assessing quality of the individual client's private data with respect to the central server's learning objective is still under-explored and we refer to this as *Federated Relevant Client Selection (FRCS) problem*. This is an important problem as selecting clients with irrelevant or corrupted data could annihilate the stability of the central model. We call a client as *irrelevant* if the updates shared by it adversely affects the performance of the central model. Next, we motivate the need for studying FRCS problems.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:motivation" data-latex-placement="!hbtp">
|
| 6 |
+
<img src="images/SFedAvg_plots.png" style="width:95.0%" />
|
| 7 |
+
<figcaption>Performance of FedAvg algorithm applies to (a) <span class="math inline">10</span> relevant clients (b) <span class="math inline">6</span> relevant and <span class="math inline">4</span> irrelevant clients.</figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
**Motivation:** To motivate the impact of clients' local data quality on the central model, we apply standard FedAvg algorithm to two cases -- (a) where all clients possess relevant data (b) where some clients possess irrelevant data. The central server tries to build a three layered neural network that classifies even labeled hand written digits i.e., {$0,2,4,6,8$}. In this setting, all the data with odd labeled digits becomes *irrelevant* w.r.t. the central server's objective. We work with MNIST dataset and distribute the training data to $10$ clients. In case (a), we distribute even-labeled data to each of the $10$ clients and in case (b) we distribute even labeled data to $6$ clients and irrelevant odd labelled data to remaining $4$ clients. To simulate irrelevance, we work with open-set label noise [@openset_noise] strategy, wherein we randomly flip each odd label of the $4$ irrelevant clients to one of the even labels. Figures [1](#fig:motivation){reference-type="ref" reference="fig:motivation"}$(a)$ and [1](#fig:motivation){reference-type="ref" reference="fig:motivation"}$(b)$ show the accuracy of the model on the test data across $100$ communication rounds for case (a) and case (b) respectively. The graph curves *run $1$* to *run $5$* show accuracy in $5$ independent runs and the dark red line corresponds to average of the accuracies in these runs. As expected, we can see that the model convergence is smooth in case (a) when all the clients are relevant. In case (b), even though the relevant clients are in a majority, the updates incorporated from a few irrelevant clients affect the overall stability of the model. This experiment demonstrates the need to address the limitation of existing federated learning methods to deal with clients having irrelevant or corrupted data.
|
| 11 |
+
|
| 12 |
+
**Contributions of Our Work:** Here we briefly summarize the primary contribution of this work:
|
| 13 |
+
|
| 14 |
+
- We model FL as a cooperative game with updates received from clients as players and performance of the model on the server's validation dataset as the characteristic function. We compute Shapley value for each client and use it to assess its relevance w.r.t. the server's learning objective.
|
| 15 |
+
|
| 16 |
+
- Using the Shapley values, we propose an algorithm called Shapley values based federated averaging (S-FedAvg) that empowers the server to select relevant clients with high probability.
|
| 17 |
+
|
| 18 |
+
- Using S-FedAvg, we propose techniques to solve two FRCS problems: (a) Class-specific best client selection: detect clients that have good quality data corresponding to a particular target label; and (b) Data Label Standardization: detect and correct corrupted data samples of clients.
|
| 19 |
+
|
| 20 |
+
- Extensive experimentation that shows the efficacy of our solution approach.
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
{#fig:frcs_block_giagram width="\\linewidth"}
|
| 25 |
+
|
| 26 |
+
We propose an algorithm that models updates received from clients as players of a co-operative game. Then, we derive Shapley values for clients and use them to approximate the likelihood of a client being relevant. This paves way to an algorithm called S-FedAvg, which is further used to solve FRCS problems. Before delving into the solution approach, we briefly introduce some preliminary game-theoretic concepts here.
|
| 27 |
+
|
| 28 |
+
**Cooperative Games** [@myerson:1997; @straffin:1993]: Let us now formalize the notions of a coalitional game and the Shapley value. To this end, we denote by $N = \{1,2,\ldots,n\}$ the set of players of a coalitional game. A *characteristic function* $v: 2^N \to\mathbb{R}$ assigns to every coalition $C \subseteq N$ a real number representing payoff attainable by this coalition. By convention, it is assumed that $v(\emptyset)=0$. A *characteristic function game* is then a tuple $(N,v)$.\
|
| 29 |
+
**Shapley Value:** It is usually assumed that the grand coalition, i.e., the coalition of all the agents in the game, forms. Given this, one of the fundamental questions of coalitional game theory is how to distribute the payoff of the grand coalition among the players. Among many different answers, Shapley [@Shapley1971] proposed to evaluate the role of each player in the game by considering its marginal contributions to all coalitions this player could possibly belong to. A certain weighted sum of such marginal contributions constitutes a player's payoff from the coalition game and is called the Shapley value [@myerson:1997; @straffin:1993]. Let us see how to compute Shapley values.
|
| 30 |
+
|
| 31 |
+
Formally, let $\pi \in \Pi(N)$ denote a permutation of agents in $N$, and let $C_{\pi}(i)$ denote the coalition made of all predecessors of agent $i$ in $\pi$ (if we denote by $\pi(j)$ the location of $j$ in $\pi$, then: $C_{\pi}(i) = \{j \in \pi: \pi(j) < \pi(i)\}$). Then the Shapley value is defined as follows [@shapley:1996]:
|
| 32 |
+
|
| 33 |
+
$$\begin{equation}
|
| 34 |
+
\label{originalShapley}
|
| 35 |
+
SV_i(v) = \frac {1}{|N|!} \sum_{\pi \in \Pi}[v(C_{\pi}(i) \cup \{i\}) - v(C_{\pi}(i))],
|
| 36 |
+
\end{equation}$$
|
| 37 |
+
|
| 38 |
+
i.e., the payoff assigned to $a_i$ in a coalitional game is the average marginal contribution of $a_i$ to coalition $C_{\pi}(i)$ over all $\pi \in \Pi$. It is easy to show that the above formula can be rewritten as: $$\begin{equation}
|
| 39 |
+
\label{Original_SV2nd_form}
|
| 40 |
+
SV_i(v) = \sum_{C \subseteq A \setminus \{i\}}\frac {|C|!(|N|-|C|-1)!} {|N|!} [v(C \cup \{i\}) - v(C)].
|
| 41 |
+
\end{equation}$$
|
| 42 |
+
|
| 43 |
+
In FL setting, in communication round $t$, each client $k \in S^t$ initializes its local model with parameters $\theta^t$ downloaded from the server and trains it using its private data $\mathbb{D}_k$. It runs Stochastic Gradient Descent algorithm for $E$ number of epochs to get the local parameters $\theta_k^{t+1}$. Then it derives an update $\delta_k^{t+1}$ by computing the difference of its local parameters with the global parameters as $\delta_k^{t+1} = \theta_k^{t+1} - \theta^t$ and shares it to the server. Now, the server computes importance of each client by making use of the following cooperative game and a validation dataset $D_V$. We define a cooperative game, $(\delta^{t+1},v)$, wherein the set of updates of individual clients $\delta^{t+1} = \{\delta_s^{t+1}\}_{s \in S^t}$ is the set of players and $v$ is a characteristic function that attaches a value for each $X \subseteq S^{t}$. We define the value of each $X \subseteq S^t$ as performance of the model built using updates only from $X$ on validation dataset $D_V$.
|
| 44 |
+
|
| 45 |
+
::: linenomath
|
| 46 |
+
$$\begin{align*}
|
| 47 |
+
\theta_X^{t+1} &= \theta^t + \frac{1}{|X|} \sum\limits_{s\in X}\delta_s^{t+1} \addtocounter{equation}{1}\tag{\theequation}\\
|
| 48 |
+
v(X, D_V) &= \mathcal{P}\big(f_{\theta_X^{t+1}}, D_V\big) \label{eq:char_perf} \addtocounter{equation}{1}\tag{\theequation}
|
| 49 |
+
\end{align*}$$
|
| 50 |
+
:::
|
| 51 |
+
|
| 52 |
+
where $\mathcal{P}$ function denotes the performance (accuracy) of the central model with parameters $\theta_X^{t+1}$ on the validation data $D_V$.
|
| 53 |
+
|
| 54 |
+
Let $\varphi = (\varphi_1, \varphi_2, \ldots, \varphi_K)$ be the relevance vector wherein $\varphi_k$ represents the relevance of client $k$. $\varphi$ is private to the server. We posit that more the relevance value for a client, more is its likelihood to be relevant and thereby more is its contribution to the objective of central model. The server initializes the relevance vector to be uniform $\varphi_k = \frac{1}{K};\; \forall k \in S$ at $t=0$. Then at each round $t$, we obtain the Shapley value for selected clients $S_t$ from the cooperative game $(\delta^{t+1}, v)$ mentioned above. Let us denote the computed Shapley values as $sv(s)$ for each $s \in S_t$. Now, the server updates the relevance vector $\varphi$ as follows.
|
| 55 |
+
|
| 56 |
+
::: linenomath
|
| 57 |
+
$$\begin{align*}
|
| 58 |
+
\varphi_s = \alpha * \varphi_s + \beta * sv(s);\;\forall s \in S_t \addtocounter{equation}{1}\tag{\theequation}
|
| 59 |
+
\end{align*}$$
|
| 60 |
+
:::
|
| 61 |
+
|
| 62 |
+
We note that even though server updates the relevance vector for only a subset of clients at each round, the effect of partial updation is levied on all the $K$ clients. As we will see in the proposed S-FedAvg algorithm, we compute the relative relevance of each client my marginalizing $\varphi$ as $P_{\varphi} = softmax(\varphi)$, where $softmax(\cdot)$ is the traditional softmax function. By definition, $sv(\cdot)$ can be negative. In our definition of cooperative game, a client would receive a negative Shapley value, when the update it shares adversely affects the performance of the central model when used in coalition with updates shared by other clients.
|
| 63 |
+
|
| 64 |
+
Based on the above framework, we propse S-FedAvg algorithm which is outlined in Algorithm [\[s-fedavg\]](#s-fedavg){reference-type="ref" reference="s-fedavg"}.
|
| 65 |
+
|
| 66 |
+
::: algorithm
|
| 67 |
+
$\varphi_k \gets \frac{1}{K}\;\; \forall k \in S$;\
|
| 68 |
+
initialize central model model parameters $\theta_1$ randomly;\
|
| 69 |
+
:::
|
| 70 |
+
|
| 71 |
+
::: algorithm
|
| 72 |
+
[]{#update-shap-alg label="update-shap-alg"} $N\gets$ set of clients in $\delta^{t+1}$ $P \gets$ set of $R$ permutations of $\delta^{t+1}$\
|
| 73 |
+
:::
|
| 74 |
+
|
| 75 |
+
In the beginning, server initializes $\varphi$, the relevance scores of clients uniformly and the parameters of the central model $\theta_0$ randomly. Then, it runs the *outer for* loop in Algorithm [\[s-fedavg\]](#s-fedavg){reference-type="ref" reference="s-fedavg"} for $T$ communication rounds where in each round $t$, it does the following. First, the server computes a probability distribution $P_{\varphi}$ by applying softmax to the relevance vector $\varphi$. Then, it samples $m$ clients using $P_{\varphi}$ to form the sampled set of clients for receiving updates, call it $S_t$, for round $t$. In all our experiments, we set $T = 100$ and $m = 5$. Next, the server signals the clients in $S_t$ to share the updates. Each client on receiving the signal executes the *inner for* loop in Algorithm [\[s-fedavg\]](#s-fedavg){reference-type="ref" reference="s-fedavg"}.
|
| 76 |
+
|
| 77 |
+
Each client $k \in S^t$ first downloads the model parameters $\theta^t$ and initialize their local model with it. We note that the architecture of local model is exactly same as that of the server. Then, it learns a local model by minimizing its local loss function $l_k$ using its local dataset $\mathbb{D}_k$. Clients use stochastic gradient descent algorithm to fit the local model for $E$ number of epochs with a local learning rate of $\eta_k$. In all our experiments, we set $\eta_k = 0.01$ and $E=5$. We further decay $\eta_k$ with a factor of $0.995$ every $20$ communication rounds at the server. Each selected client on fitting their local model derives the update using $\delta_k^{t+1} = \theta_{local} - \theta^t$ and shares it to the server. Once, server receives updates from all the clients in $S^t$, it first computes the Shapley values according to Algorithm [\[shapley_value_convex_games\]](#shapley_value_convex_games){reference-type="ref" reference="shapley_value_convex_games"} using a co-operative game $(\delta^{t+1},v)$. Since computing Shapley values takes time complexity exponential in order of number of players, they are approximated by monte-carlo simulations in general [@castro-cor-2009; @fatima-aij-2008; @bachrach-aamas-2008]. We note that in all our experimental settings, we have $m=5$ players in the game. In such a setting, computing exact Shapley values requires computations over $m!=120$ permutations which is manageable. However, to test the robustness of the proposed approach, we set number of monte-carlo simulations as $R=10$ and run the *for loop* in Algorithm [\[shapley_value_convex_games\]](#shapley_value_convex_games){reference-type="ref" reference="shapley_value_convex_games"} for $10$ permutations only. For each permutation $p$ in the *for loop*, we define $S_{p,i} = \{j | j \in N \land p(j) \leq i \}$ (wherein $p(j)$ indicates the position of client $j$ in the permutation $p$). Then we compute its marginal value $a^{p}_{i} = v(\{S_{p,i}\cup i\}, D_V) - v(S_{p,i}, D_V)$. We then compute average of these marginal values and update the relevance scores according to the equation $\alpha * \varphi_k + \beta * sv[k]\;\; ; \forall k \in S^t; \alpha \in (0,1]$. In our experiments, we set $\alpha=0.75$ and $\beta=0.25$ respectively. Intuitively, $\alpha$ acts as the emphasis server places on past performance of clients and $\beta$ servers as the emphasis server places on current performance of clients respectively. So for a selected client $k \in S^t$, on expanding its relevance computation equation, we get
|
| 78 |
+
|
| 79 |
+
::: linenomath
|
| 80 |
+
$$\begin{align*}
|
| 81 |
+
\varphi_k &= \beta * sv^t[k] + \alpha * \mathcal{I}_{k\in S^{t-1}} * sv^{t-1}[k] \\
|
| 82 |
+
&+ \alpha^2 * \mathcal{I}_{k\in S^{t-2}} * sv^{t-2}[k] \addtocounter{equation}{1}\tag{\theequation}
|
| 83 |
+
\end{align*}$$
|
| 84 |
+
:::
|
| 85 |
+
|
| 86 |
+
where $\mathcal{I}_{k\in S^l}$ is the indicator that $k$ was sampled during $l^{th}$ communication round. $sv^{l}[k]$ here indicates the Shapley value of client $k$ at round $l$, which is undefined if $k$ if it is not sampled during $l$. We can see that for a client to have a good relevance score at time $t+1$, it should not only share relevant updates in the current round $t$, but also should have shared relevant updates in the earlier rounds $\{1,2,\cdots,t-1\}$ whenever it was sampled by the server. The entire training schedule of $FRCS$ is elucidated in the Figure [2](#fig:frcs_block_giagram){reference-type="ref" reference="fig:frcs_block_giagram"}.
|
2205.01826/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-03T21:48:05.879Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" etag="czJBiORCBQrFAsP74R2b" version="17.5.0" type="device"><diagram id="4ILGjfBfkreejU0Xxxt_" name="Page-1">7V1tl5q6Fv41rnXuB10hvPpxdMZOu6Y9vcf2tv10V4SonEFiAeelv/4mgSAkqKiAc0+dM6sjSQyQ/exnv2TD6enj1cu7CK2XH4mHgx4E3ktPv+1BqBkQ9tgv8F7TFhtYacMi8r1s0LZh6v/CWSPIWje+h+PSwISQIPHX5UaXhCF2k1IbiiLyXB42J0H5rGu0yM4Itg1TFwVYGfbN95Jl2uqYhdH32F8sxZk1kPWskBicTREvkUeeC+fS73r6OCIkST+tXsY4YIsn1iWdaLKjN7+wCIdJnS+s/5xM4AN4jO8fR86vT/Ov2Fr2NTOd5gkFm+yOe9AK6ISjOaHz0stOXrO1sH5uiOjox1xSN3SABtcv2076acH+/oXCRz9ciMlmkeh4IHEsWunFpmdJu7J1yk8I6Q1QOdOD0fPST/B0jVzW80yhRtuWySqgRxr9iOJ1Kvy5/4K97BozMA3ZoR8EYxKQiM+rz032Xzas0J7+0PY4icgjLvRY/If2eChesjPw06oiyKTyhKMEvxSaMpG8w2SFk+iVDsl6dTtTkEw/oAEHmUyei3hLm5YFqIk2lCF8kc+9BQH9kOHgCExASxFERDahx28b0NsmUbIkCxKi4IGQdbYWf+Mkec2WHG0SUhaQJA8cejdMOekhodBLWyZ+IIbjFz/5Xvj8g515AM3s8PYluxJ+8Jod7JRGTDaRi/fcsZ6Ow94C75VZhAOU+E9laqha/+yrn4nPdUjIWrCGkDXUy1OkF5p9q6jK0kSGIFExkSlhIUHRAifKRBwO+f2cjhD9fNLQqkijBw36O/9vzx71zDHVoIR+wqFLrUrUs2/ZLx9Rlz+2sNV2cIhEDJ6JHc+oIgAHznROAAVQx3QuxnH6LWyIDLQyQDRbV6jAACoVQGF0GucCHaqkTLVkmh2GJGTs3CI9uAGKY99VGOJ0Zbe6UXY4lJVdmqKussugUCZqWdktRdmn/kuCccjWH5M1BQIFKI7Yn0cqIIoDem0h+xfaPeaUWGjFdI2r7Jf8MFNZIJR5I9TYD//eRGyWXMk3Ww2XpqOd0oyDwpQZG7m5Gm8JRwfAns/nKgfd4tiN/Bm7m2TJ/31dsz9kfuS1DtphKQ/P0YbPW+aobXvRLwr8Rci0iGoJpdAiZYHt0QOeJ4dsaH0S0yyjTGKOwyy3zGMadFQis/SB3ZZbozktubpiFl80TPEKhQklLcX5vVvNsOdxvxikMt5ixJfnO4Qbbh/LBqmEiIyci+DJmhRYMOn6NOi5yTpWvucFu/zuMttL1N2EFbQkKzh0VPRoFWZQb8sKCqhezApWOslyUOO42HWrfJeRYxqmcJe/F1zn1K2223KrtWE3ptYwbREzvUqe9tGetXDJxUSgW89aLNnv7lo3QCOGPXxrzrTeCY1Ua/l+JZeoR5HRjePYgE8e0pXIQ3N2UIzN2fH2BPzofBoRKcRU9brnG0rC6LUwYM2UP95PI5Lzr8l81DKNiCUr0MiMeH7AbmOJotX56jyxxxYAe6HSrDqbhlYOg4DqU9pVToHVljYbb1abdympfUhJD9FA7kmcoc36W9fmxpRwd5pszaNl1ZTPSMTsMbfjHEAwXWywoqvlh/2EgYZ1Ambjy30zkiRkpXavURpxKB2EKt88YP4l6+HIE13P9Dr6swijx7STf+wjfs/lL/dTetgOYl/NRzE/o59FG2xIwCLNnQHUP35Vdt04jcPD6jtH7uOCU0g/S2KwKaPF7A9oODwfQaEJIEsH55+Nf+0+0amBLpgjt/yFexw8YRYzqidDUeQ/FfZ+FFcyb07vW2nmONjferT5KoZZkiVzxnfj8Wlsd7r50i9tvpwGkvj10iHvw/WGzTdl2Yaw6ZxH/VxGhOmloxmfikk1Y346r0ljols2F9X29PaOSqc0nwLRbKfsP1rVOTRQGb+0hRijK8R8plLgFh2CBzRjFQVXxBziF73ML5pjVOwjDzvFS1u1BQpePuEFuuLlOLxAaQvZuTi9qBtNR8PFroLLH4h6ROBLhPxwn1fye+JAtwwpd6qJpNjFkKAZ3SbKjqoyyWPr/KAQkdcPrWtscSv+KOA/++R+OCUPnLrht974RvmZoDCvoNgHinqVaY0ixHljCFEL164IuShCDPi2EAIaqAG4bq+lyQpbKvO4+PaaBoYX1P/T1P/gtlmDlXGH1RU0TujV22O2CcvBR81C2MZ22Q27G6Ts2Ic5JPQSUTdgAW5Ho5HpnIeN5o19NTaGEq9QoukWG1oDaa6rjUhlqb85G6F1G0X8n9RgXL5iQlF7CJVS0B1FE+pcQym31bV50dRUlktWa5z4iU/C8xW58+ILZUVBVXa50/0rDTbA09X5whnLF74P5ziStquuCUMIHVGXsS9dWFWd2166sKPy3DY4PffM5GLe+RzyYt6mYnbYUS2uBqDsv0kMsIPFu6vLydei8Y2pmLg+YufHT5gzRTt+3ITeVHXFQg6nEmwUHmrZOJhqvX63pkFXTQNKEuQ+voG1D9gGZbrBTSqNw4M0IDcSO61Hu+LUwfDitl5vYG+wUmPBgNXBNKWnXLajvGarymOrNAjizQJV1Uldu3qOKfG3iIcPxGzi8aoWhN/Wg1tgoA87Eb7gjIPCr+0f5FO2LHzLrtD9bsUvzlYSv+zubZNimbdXIuHCIrH2z9QY4CjkLRDoiiSg4i0eeJ2EcNYU8XnWzDKtsyLvwrqbFesu2s502xxbH9hShaJ4M8Wxz1BVzWVKEXjbj1GJassOHvb0V36AIj9hwpq6JLrWN9bgGSlI0IaawjJd15zA1qwMsK9WZp+V0UVK4WI2xrzkk3l1KweO3ESqnU7eKdHDWQXhG16imqhuHlnPK6ePNWRD2Ruqn5PuMJthqsFuJ2BubZPyVMCf7mOd6zwNJT6DzuDUrQtdnmpYC7ut4O2L7fx07kdB9EH/z6/3ifHtz683/c4cqzEKPd9DSV7VHV8dq4OmdSjyMnsfA2gtP/9tfo8+xV/Rj79+ff/xPZq9j123X+VY/TP4aQB49FjgKAdaB1iKH33GkU8Xl8HustRFzeUAFH6kZ6xta+DohW6jIeNXsWVrDhxn93W0VgNSidiqbOMVsW8GsYqxNbTL4JIbeqdTaKoljpd6N+dRQUwr0UdHW5ry+6igfuprO6FEa+29tvPj/SSe3n2ajOz+rR+++0kmkV/x2s47bv75y4rdJY57+Yv8ese+tM+lIMJxcvJr+9gqsCtJ37nH908H+YhwFq8Lvl77r/Xbeze/64v9LGjJj4p1/2q/SmB39mDy9VH241FjSBFJx4+yw+8fP+Dh07/77gd/cu8+actoWiskuW4pnVsJpGvqnpIUQdQ1nZWTNWY+6eH2Pfzp8O3/zUC/+x8=</diagram></mxfile>
|
2205.01826/main_diagram/main_diagram.pdf
ADDED
|
Binary file (54.8 kB). View file
|
|
|
2205.01826/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Semantic typing is a group of fundamental natural language understanding problems that aim at classifying tokens (or spans) of interest into semantic categories. This includes a wide range of long-standing NLP problems such as entity typing, relation classification, and event typing. Inferring
|
| 4 |
+
|
| 5 |
+
the types of entities, relations or events mentioned is not only crucial to the structural perception of human language, but also plays an important role in many downstream tasks such as entity linking [\(Onoe and Durrett,](#page-9-0) [2020\)](#page-9-0), information extraction [\(Zhong and Chen,](#page-11-0) [2021\)](#page-11-0) and question answering [\(Yavuz et al.,](#page-11-1) [2016\)](#page-11-1).
|
| 6 |
+
|
| 7 |
+
Most traditional methods tackle semantic typing problems by training task-specific multi-class classifiers with token or sentence representations from language models to predict a probability distribution over a pre-defined set of classes [\(Dai et al.,](#page-8-0) [2021;](#page-8-0) [Yamada et al.,](#page-10-0) [2020\)](#page-10-0). However, this approach comes with several limitations. First, these models simply convert labels into indices, thus completely ignoring the rich semantics carried by the label text itself. For example, given "*Currently Ritek is the largest producer of OLEDs.*", knowing what the entity type *company* means would naturally simplify the inference of "*Ritek*" is a *company* in this context. Second, models trained as classifiers do not generalize well to class labels that are rarely seen or unseen in the training data, as these models rely on the abundance of annotated examples to associate semantics to label indices. In particular, since these classifiers are limited by the pre-defined label set, they cannot infer any unseen labels unless being re-trained or incorporated with label mapping rules. As a result, these models struggle to handle more fine-grained semantic typing tasks in real-world scenarios [\(Choi et al.,](#page-8-1) [2018;](#page-8-1) [Chen et al.,](#page-8-2) [2020\)](#page-8-2) where any free-form textual labels may be used to represent the types, many of which may also be unseen during training.
|
| 8 |
+
|
| 9 |
+
In contrast to the aforementioned traditional paradigm for semantic typing, several studies have explored alternative approaches such as promptbased learning [\(Schick and Schütze,](#page-10-1) [2021;](#page-10-1) [Ding](#page-8-3) [et al.,](#page-8-3) [2021\)](#page-8-3) and indirect supervision from NLI models [\(Yin et al.,](#page-11-2) [2019;](#page-11-2) [Sainz et al.,](#page-10-2) [2021\)](#page-10-2) to make more efficient use of label semantics. How-
|
| 10 |
+
|
| 11 |
+
<span id="page-0-0"></span><sup>∗</sup>Equal contributions.
|
| 12 |
+
|
| 13 |
+
<sup>1</sup>Our code and pre-trained models are available at [https:](https://github.com/luka-group/UniST) [//github.com/luka-group/UniST](https://github.com/luka-group/UniST).
|
| 14 |
+
|
| 15 |
+
<span id="page-1-1"></span>
|
| 16 |
+
|
| 17 |
+
Figure 1: UNIST projects the input sentence with task descriptions (in blue) and marked token span of interest (with enclosing special tokens), and candidate labels into a shared semantic embedding space. In training, it optimizes a margin ranking loss such that positive labels are closer to the input sentence than negative labels. During inference, UNIST simply ranks candidate labels based on the similarity between input and label embeddings.
|
| 18 |
+
|
| 19 |
+
ever, these methods usually require hand-crafted templates or mapping between labels and language model vocabulary that do not scale well to diverse, free-form labels across various semantic typing tasks. Instead, we seek a generalizable approach that captures label semantics while requiring minimal effort to be adapted to a different task.
|
| 20 |
+
|
| 21 |
+
In this paper, we propose UNIST, a unified framework for semantic typing that projects context sentences and candidate labels into a shared semantic embedding space. UNIST provides a unified solution to two major categories of semantic typing tasks, namely lexical typing (e.g., entity typing, event typing) and relational typing (relation classification). By optimizing a margin ranking loss, our model captures label semantics such that positive labels are encoded closer to their respective context sentences than negative labels by at least a certain similarity margin. Depending on the task requirement, either top-k candidate labels or any candidate labels with similarity above a certain threshold are given as the final predictions. Furthermore, we add a *task description* to the end of the context sentences to specify the task and token (spans) of interest, and use a single model for encoding both context sentences and labels. This simple technique allows us to unify different semantic typing tasks without introducing separate taskspecific model components or learning objectives, while differentiating among distinct task prediction processes during inference. UNIST demonstrates strong performance on three semantic typing benchmarks: UFET [\(Choi et al.,](#page-8-1) [2018\)](#page-8-1) for (ultra-fine) entity typing, TACRED [\(Zhang et al.,](#page-11-3) [2017\)](#page-11-3) for relation classification, and MAVEN [\(Wang et al.,](#page-10-3) [2020\)](#page-10-3) for event typing, even achieving comparable performance with a single model trained to solve
|
| 22 |
+
|
| 23 |
+
all three tasks simultaneously.
|
| 24 |
+
|
| 25 |
+
The main contributions of this work are threefold. First, the proposed UNIST framework converts distinct semantic typing tasks into a unified formulation, where both input and label semantics can be effectively captured in the same representation space. Second, we incorporate a modelagnostic task representation scheme to allow the model to differentiate among distinct tasks in training and inference without introducing additional task-specific model components. Third, UNIST demonstrates substantial improvements in both effectiveness and generalizability on entity typing, relation classification and event typing. In addition, our unified framework makes it possible to learn a single model for all three tasks, which performs comparably to dedicated models trained separately on each task.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
In this section, we present the technical details of UNIST, our unified framework for semantic typing. We first provide a general definition of a semantic typing problem ([§2.1\)](#page-1-0), followed by a detailed description of our model ([§2.2\)](#page-2-0), training objective([§2.3\)](#page-3-0), and inference ([§2.4\)](#page-3-1).
|
| 30 |
+
|
| 31 |
+
Given an input sentence s and a set of one or more token spans of interest E = {e1, ...en}, e<sup>i</sup> ⊂ s, the goal of semantic typing is to assign a set of one or more labels Y = {y1, ...yk}, Y ⊂ Y to E that best describes the semantic category E belongs to in the context of s. Y denotes the set of candidate labels, which may include a large number of free-form phrases [\(Choi et al.,](#page-8-1) [2018\)](#page-8-1) or ontological labels [\(Zhang et al.,](#page-11-3) [2017\)](#page-11-3). In this paper, we consider
|
| 32 |
+
|
| 33 |
+
<span id="page-2-1"></span>
|
| 34 |
+
|
| 35 |
+
| Task | Input Format |
|
| 36 |
+
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
|
| 37 |
+
| Entity Typing | Currently <e> Ritek </e> is the largest producer of OLEDs.<br>Describe the type of Ritek. |
|
| 38 |
+
| <subj> Herrera </subj> 's wife <obj> Ramona </obj> died in 1991.<br>Relation Classification<br>Describe the relationship between person Herrera and person Ramona. | |
|
| 39 |
+
| The siege <t> began </t> on 15 September.<br>Event Typing<br>Describe the type of began. | |
|
| 40 |
+
|
| 41 |
+
Table 1: Input formats for different semantic typing tasks. The four pairs of special tokens marks entities, subjects, objects and triggers respectively.
|
| 42 |
+
|
| 43 |
+
two categories of semantic typing tasks, *lexical typing* of a single token span (e.g., entity or event typing), and *relational typing* between two token spans (relation classification).
|
| 44 |
+
|
| 45 |
+
Overview. As illustrated in [Fig. 1,](#page-1-1) UNIST leverages a pre-trained language model (PLM) to project both input sentences and the candidate labels into a shared semantic embedding space, where the semantic relatedness between the input and label is reflected by their embedding similarity. This is accomplished by optimizing a margin ranking objective that pushes negative labels away from the input sentence while pulling the positive labels towards the input. This simple, unified paradigm allows our model to rank candidate labels based on the affinity of semantic representations with regard to the input during inference. Meanwhile, our model is not limited to a pre-defined label set, as any textual label, whether seen or unseen during training, can be ranked accordingly as long as the model captures its semantic representation. In order to specify the task at hand along with the tokens (or spans) we aim to classify, we add a task description to the end of the input sentence. This allows our framework to use unified representations from a single encoder for both inputs and labels, as well as support the inference of distinct semantic typing tasks without introducing task-specific model components.
|
| 46 |
+
|
| 47 |
+
Task Description. To highlight the tokens (or spans) we aim to type, we first enclose them with special marker tokens indicating their roles (entities, subjects, objects, or triggers). Next, we leverage the existing semantic knowledge in PLMs and add a natural language task description to the end of the input sentence to specify the task at hand along with tokens (or spans) of interest. The general format for lexical semantic typing is
|
| 48 |
+
|
| 49 |
+
*Describe the type of <tokens>*.
|
| 50 |
+
|
| 51 |
+
and that of relational semantic typing is
|
| 52 |
+
|
| 53 |
+
*Describe the relationship between <subject> and <object>*.
|
| 54 |
+
|
| 55 |
+
Examples of different input formats (including special tokens and task descriptions) can be found in [Tab. 1.](#page-2-1) In addition, relational typing (relation classification) tasks may incorporate entity types from NER models alongside input sentences. Entity type information has been shown to benefit relation classification [\(Peng et al.,](#page-9-1) [2020;](#page-9-1) [Zhong and Chen,](#page-11-0) [2021;](#page-11-0) [Zhou and Chen,](#page-11-4) [2021a\)](#page-11-4), and can be easily incorporated into our task description, as shown in the given example.
|
| 56 |
+
|
| 57 |
+
Input Representation. We use a RoBERTa model [\(Liu et al.,](#page-9-2) [2019\)](#page-9-2) to jointly encode the input sentence and the task description. Given an input s and its task description d, we concatenate s and d into a single sequence, and obtain the hidden representation of the <s> token as the input sentence representation, denoted by u:
|
| 58 |
+
|
| 59 |
+
$$\mathbf{u} = f_{\text{encoder}}([s, d]).$$
|
| 60 |
+
|
| 61 |
+
A traditional approach to semantic typing is to train classifiers on top of the representations of specific tokens of interest [\(Wang et al.,](#page-10-4) [2021a;](#page-10-4) [Ya](#page-10-0)[mada et al.,](#page-10-0) [2020\)](#page-10-0). In the case of relational typing where two entities are involved, their representations are usually concatenated, leading to dimension mismatch with lexical typing tasks and requiring a different task-specific module to handle. Instead, thanks to the introduction of task description, UNIST always uses the universal <s> token representation for both inputs and labels, and across different semantic typing tasks.
|
| 62 |
+
|
| 63 |
+
Label Representation. Most semantic typing tasks provide textual labels in natural language from which a language model can directly capture label semantics. Some relation classification datasets such as TACRED use extra identifiers *per:* and *org:* to distinguish same relation type with different subject types. For example, *per:parent* refers to the parent of a person, while *org:parent* represents the parent of an organization such as a company. In this case, we simply replace *per:* and *org:* with *person* and *organization* respectively. The label text is encoded by the exact same model used to encode the input sentence. Given the label y, we again take the <s> token representation as the label representation, denoted by v:
|
| 64 |
+
|
| 65 |
+
$$\mathbf{v} = f_{\mathrm{encoder}}(y).$$
|
| 66 |
+
|
| 67 |
+
Let Y be the set of all candidates labels for a semantic typing task. Given an input [s, d] and the positive label set Y ⊂ Y, we first randomly sample a negative label y <sup>0</sup> ∈ Y\Y for each training instance. Then, we encode the input [s, d], positive label y and negative label y 0 into their respective semantic representations u, v, and v 0 . UNIST optimizes a margin ranking loss such that positive labels, which are more semantically related to the input than negative labels, are also closer to the input in the embedding space. Specifically, the loss function for a single training instance is defined as:
|
| 68 |
+
|
| 69 |
+
$$\mathcal{L}_{s,y,y'} = \max\{c(\mathbf{u}, \mathbf{v}') - c(\mathbf{u}, \mathbf{v}) + \gamma, 0\},\$$
|
| 70 |
+
|
| 71 |
+
where c(·) denotes cosine similarity and γ is a nonnegative constant. The overall (single-task) training objective is given by:
|
| 72 |
+
|
| 73 |
+
$$\mathcal{L}_t = \frac{1}{N_t} \sum_{s \in S_t} \sum_{y \in Y_s} \mathcal{L}_{s,y,y'},$$
|
| 74 |
+
|
| 75 |
+
where S<sup>t</sup> is the set of training instances for task t, Y<sup>s</sup> is the set of all positive labels of s, and N<sup>t</sup> is the number of distinct pairs of training sentence and positive label. In addition to the single-task setting which optimizes an individual task-specific loss Lt , we also consider a multi-task setting of UNIST where it is jointly trained on different semantic typing tasks and optimizes the following objective:
|
| 76 |
+
|
| 77 |
+
$$\mathcal{L} = \frac{1}{N} \sum_{t \in T} \sum_{s \in S_t} \sum_{y \in Y_s} \mathcal{L}_{s,y,y'}.$$
|
| 78 |
+
|
| 79 |
+
where T is the set of semantic typing tasks UNIST is trained on, and N is the total number of training instances.
|
| 80 |
+
|
| 81 |
+
UNIST supports different strategies for inference depending on the task requirement. If the number of labels for each input is fixed, we simply retrieve the top-k closest candidate labels to the input as the final predictions. Otherwise, all candidate labels with similarity above a certain threshold are given as predictions. Note that UNIST is not restricted to a pre-defined label set, as any textual label in natural language can be encoded by UNIST into its semantic representation and ranked accordingly during inference.
|
2207.10080/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-03T13:24:56.572Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" etag="QnQX86RBK26vALk0rk3z" version="17.5.0" type="google"><diagram id="onlQtUbi14uvF-NsiEVn" name="Page-1">5L3XsqRKsi36Nf142tDiMdGZkIhE84bWWvP1l5hVtXSL3Xt13z52yqzmzAwgCMLdh4/hEVT9BWXbQ5zCoXj3Sdr8BYGS4y8o9xcEoQn6/gkazm8NOIZ8a8inMvnWBP3cYJZX+q0R/tG6lkk6f2/71rT0fbOUw68b477r0nj5VVs4Tf3+69Oyvkl+1TCEefqrYYAGMw6b9HenuWWyFN9aKfwXZ0tpmRc/7gxD34+04Y+TvzfMRZj0+y+aUP4vKDv1/fLtU3uwaQPm7tfzIvyNoz8NbEq75Z+54GM9SKukHOLthUxmcjg6Gf/nuzG2sFm/P/D3wS7njxm4xz2Aj2X7NVXMlk5LeU+QEkZpo/dzuZR9dx+P+mXp2/uEBhxgwrjOp37tErZv+uk+nqRZuDbLL3p4NGUOrlz64W4N5+GbAbPySO8xM183fPxohX603J+LZQHmf4CHRYR1aPow+ete1mWbJmX4137K72bwfQDf789Xcf9AwV/q/mHrTZ/38F+HLr87/f08fp9aMMz0+EXT93kV075Nl+m8T/l+FMOwvyLYt6u++znyw0P2n70GRuG/4t87L37hNMT3U8Pvvpr/dIOfzXl/+G7R/4F10T+wLvFlgqy/H/aXZibGtf9x4P/MX2F4Ty8E48Px88H7U/7991cv0Y8Guyv1qQc9cuESzks/pT/OuQce/fa6u+3bAH40/8bl7mkH7cXS3o/LwffHeZn6Ov3hSl3fAU/Myqb5TVP43aXi25jp9Ae+1pZJAm7D7EW5pOYQxuCe+41dd9uXvwLf+/K2P8MxUPyvJP4rx0DhP3AMDPq9V6D/Lq/A/6ZX/GQoMb1nE4G0brmnNwcPJHf93qRJDprvUP0b5v2vtCNwte+JBcb/HLuSyF9J6ldmxbA/MCtC/vUHoP5HLEv8YzRPu+QB0iIA7KaPa6sou1+b6O9P3jcD/kiFN57dbXefQtn8uP6e1en0wOV/haCfGvyvBuLHV+743v+3b+cvv+npdAP5l9n/fhwu4ZSny9+Zj+/c48tt/55Vf2Ez/A8i8UfblDbhUm6/ZgZ/ZMTvd9D78gtlvzvNT3b/4TL4b/xg7tcpTr9f9ctM/tuOKPqvJP3H7vejr29z87u+vtzqpyf/1z2N+p2n/R4yHl3XLyFgB/Pv3PDXTvZHMfwLj/xjH/yBGH+55/Xrz9/wzl+iy33ug2YxnvlzYAChf21R9A9B4O941J+OAPQ/tsv/E5bAkP+/LQHDvzPF7+Y+Xqfta+q/weS/AZmPcvkGxT+++T+Q9v78MwiDLz9j8B+j+a+RHP8HSP43bfoN5P4xtPxDcEf+q8Ad/S0UwP8iuP/kuT86Qv45ZL99Jzx/cdoATpj/zoDxPx7w3xwX9OvzEepX598fvo3gT00z8O/1KdAZ6R0ZCGSm45p2ty/990Aa/WBJRvhzII34DV1AcPz3kAb9RyHtj/Tkf55ffsc0+JeI9hO+/S1M+0+jGPJPohj83wVjtwvfyob++Q/xx2n1f0xZ6b/bL0r+Z+krjP3f6sn/67z6X+JoOPW7yshPHO5/6ly/RUqUwP+z3vS3Kyr/dJ0N+aM62x/lOuhzj7ZM73v931VN+zdUYX5QkN9Skn+YIf+MAsy1PbNIUlxjabUPo/WB6Uo/4covYeSON/P7135aij7vu7Dhf279zTz9fI7Sg7r4l8GqdFnO77MXrkv/a3P+McuH/y6O/KJK81MS9H957G9kxF+YEflBn35f4f9DqPnXYAv+Az3wx3OP/rGz/NNA9b/jRr+viHyPXllE8P8iiipQLM+y/x6K+of1j/8sRf19AeRPTezI/0ho/3tC8Nck4n8RWf80R/1vow6/qfX8WbzhP11E/UGm/5USUdyE81zG/2u//ReF0c/+TtO/Ki39FaeIv+/195d/usL/D32Y/r/ThbE/ayngty6M/7bs9CdVi3Dkjwf8t8aF/7Yei/4HqkXI/6Lk+mfF009xgf0UB/9MyfVfj8I/L5awfzKW/rtCCflNXRL9l1fViH9NRf5PQwn+LWki/34oweQfj+vfG0q/L/X9wQIfUIFt+n06/rslJ/LnMF7st7gG/ZOM99+25o/8vvjwc9ng/0EL4eTvt9xA/2Tl/N9mpH+i3Phfus3up112+R3+/ffY/77D7s6YKUA6Ie//D9hR99cGIOT3bXVfnTD9lKS/8J1vjT+N+ecDc50ucfH9/t+e7OvS37rj36k2/K+8BuzT+k1Jmv6DyKaxv0Lo305lf3o96Z9YcPkv9JskXMLbb759RQTgDwhbOoz22SFZzPvH/Uc17YK38/tTON8/mI19+OC3Kf0FYWjy/pg/+IY3nA/WaWeSoHsMR+3dW3e9r5VYHBIOj8BpEVRaN+S+LMifPPt2bYYvHzW/rEb9NPlcqV85zw4Ctr5CUShek5Feurg5KX4napdE3Eax9BspBZ3w6WQ8HScSitVEr3sUcSXeP62ktd7kDL7T+Ilmzlk2wn2TzyOPoMvCCfhbfJxDD4c748+t8Wpg+AaCmBrz/HMfmt7gJ0aKZ/VS709yxEdnQz0CTcSLMNTdm3TpzUDjHWZ93kruPGXJe4RaI2hdVPQCyh/RoJQYypIdV8Ax04dFyz+wfr1HNT2qu8tXWTYzu0/03T8TDqM03b+Ln0eg48vFuE3R5ptMBOXgPwLLVR3VQOxFqoc4qKA0h2fX2pSat/It77vk03f+gQl0O7Cu2bwGPiBPg3x9wk6+O+9rz255FsyElisePFS4QfGIkMR7TMpOjjuLwwUQP6oviCEN+TWJQ/94Gp9QuhLe7AWbHZuH2XsWL39EkTC6T/We9YA8xEh8S8PALNxSruyLLxmuKO87BkoQHiFC98Mk4i1EklzETbFRaxOHU81sWn1bP6xHJ5qbZMsIUgTmyDNK/a6UeXoN2u4H/fX0u8Ll6+Z4m1mHg9E/c6N3wEwK73Ccp5CxfBeOuMM/wvs56v215iF79YjFbkRBcrRxgH3EwLHn7b6KQ/la6Ly3dyqYRClgZmi1b4w+9c6JzKCwXof6bn32+rDDyvOy96jWVo1nBbZ4VAUV24F82sPkkjmGQE3EDkOvTEJB2Dw3odcUvBvBeQ6N8MQSX7IgiQ+1SfhwjVxxHyURN37t3LJ74Aln0DFssGxTxPcN7UVf3m0HtZaDv6xgEWJyzXKxFfjKtdmVTFVVHChjeBYOKzdM3mF73MVvkn+mn2awfZY4wOwUO7sVbCU4H8fQMtKsWuhCnUWvFw3s/bTLQDTOKjAftRNWvEBsr3VupOKsm6seZtNoguGRCfvc3yBwIzVj2HR0X8m4MBdYdloWIyUIKQy9nBrLRjhnW4bXPa1HJNeRC1spoM7pOL5qurDAqhl/MaGEku8vv3gURg8C0251B1YV4JbworgK0eVj7+pYeM8Mm08at5ruo9P561m2ejzzu/iCUqtOH+1rjerjCVMecDCn+pByO12zEuJ1iNSWYMLA76TH3H06K5edZ7xut2+SABgc9Tz5Q9HLpioPMBIL+MV1u1bZG2+hRcmL3SbQbz28RYV/KPQqogpvJVk8fRh0ZDBhDbv3yW/1UXoPMHh7pcf60N6o5jC3ryekvDQeZB/qCJxOKF1cNs+xdmRLArPfjQ2zu9VtbYGRUptYUk398iud3q1pPOQRXrEnpDJwi2+HP4lMTAhjbTeN8Ta9x/EGMMe5XjtwwDTxMaSDqaTU2hMwgyxBRrCmQTqLzPXH0gUi07zFA5UCVJDqgEiANcMfd6Qoa5QPe7wCJ34q4Y4f67tVDPpNBHoQ2L1gDu8Q10SaFSZVDAItFaTkLKrXQ1U+GCKNez2laCMoIfLEeZ85uuUhuy/WE3fbsRhU1xZph5RAfvDmV+TQeGj4I+4jyp0RmRmNZ1I7+6fJjQmkqpVk7mEXd6Vp1fULPfEqsY5FaulMGemCp+QbxgdafMbCLWeZU7Bsu4TCNs46CgqN9pDwj4FBxtOWefAuQS6iQuHAlnfgYkoizmSWXLmWamqztXanYcZUTc2lZaVrgvt8mljneSQ+Cxm587Nbz2lvb412DJLi+Exj2mF9eApHhPpTwoER7pvBRm88+YxYRIepkpTZBZ/+ersh5ZYPBiK6460F/9RHtN18lhn3OOipoNz4zB6+vEz8dLbh6dOJ1obgKPZDGcenrbnPyRaf78ialw4f8nirz2N95J9nbWEJIj0q1RV10wZvRmQN6LjpOclSWlJlbVw3P6OzE8FhDWfGc3HUb5KJpEmDZwtG6UkTlTNsSszjsOT2meTl0oLp0OIwa2P1jfRfGULP9cBm37Dv0y/rQogPEWaZzJvWJPaeNFTMpXWDLEHgIWfMzuWTlK8HCac6SmQShHrlE0f8+2geFgVvmoBVmM/RE4gGuYcsiKjzSHv6VfvS7bp7+HxOYIrMM3iHvYKyNm/eeKFz24e8m1U4jUm26wjqa3MVI7jS5zGCPuPileiy+nE1v3tlwfXh7ZcfR18uJhQlGpV0C294xwkeHrr5u8gsMGT7jCfF7z87fbxbvBJWTgpwLW6xDoSVlq4BN0QetV4n+vnYi6YY1TlE5qMlXltHXMIJzmpney2Cz+bnX14Xa6U3JyM04R9JX3D1RHly7B+NGNjpTma6WCfrpx49kqZT+/0iuyO8Mh56MdgUGk+2LV2OrAhts+++h97c7wdQ7NFoNDwndV/blPfVXHcK8JlS2xqSNUMNGQO70+kJwPP6bDlNvdFVYAlox8fkxOWg/sBuATyn/3AdBjt7CIYubMfnEd3pvo8TJ2Zt6728sKd/HRffa6Fk0XDgwpluU/Ry6yAGV8tw3WiWR/P3W8kCpzB5o3j67RsaaviJ0cFmidY4fM1KxOKsBc/RhSJIiQGAhcdnVh/1IFg5fESxW7lWcSj7lgyP26PNJyXSjw47nNi1lw1dUA2Lo+yDxt5DUN6d6r+nNFZ9Elvf+FTi4abND64ijirDecmgM2OFmYDb40EI/cH0c3TVJHn2rqssW8wTqBUmOB040Tu7yeBFzqTM1VHSPHu+oYilhOJL9e5xPoWrYr/5fnVCyFLBieJetbpFo5mj6rvIY2YzQ1+zVpbEIayeHZV7ed0Bp1/vKI10FIo98vbGA7CH/iWadzpJy9iTcIpAdnNrP8JREVmHpJ6QccVUT5APK4QVds+yyw6MoLXnHfKUMSc+diOL+aLatHUOo/RfgPGSEROTJzwTojw+XkFrCHGmekeXFaFg6jrKBh1J4xgEqc7MhWP1qLr3q8+k856HmKIZ2W16uotwTQ50FqtAIvYUP5JDiJFsFV2FQ5uaGip6Dx+D2xt1NIUen8aywOzN7e6RDgnI8GfTt0ttFh+ZEYnMyxthgY9ZlUToWRbHEO1PsGlyV5xhb2QpDoEFYso15vFTxrKsIBQRmHwg4+TKgfU/ITpcKkNs6zEI9Ao4GmY8uk/ifyMgxavoQ7ep9xQq+7f7ItNjnuDDO+HnFgF0UDN1k7QX5b27QBJ7xdJ8pO8eWFK8uX6qA2VM5zHMZJEXBMi6SS7OZiVVMasptnsohvx0tmO0au6Axx9B9C+9DEuWZuzhFXIE/JjXG+m3R6nHwkGMkcY5len4rP6pOMGlbmxjMuLj6k+r3kZaQiO2BmAmpO8IgOYjoT+STxnJovMZCUenzsUvFgRWdH22DaDhAzmAMeKxrEs/bB88IVWtE2w6XPAfXeJWwX61dn28nX0/amsF8a5WgYEy0PNQBBRgmsR4QqhdNV7TrzW1qzTmB95VLkIet76mj9Sx1ckqObOIuR6ZB6YQb4S+ZLZNETavSDG07HoC794Z4p2ZqGzxn/5yaTU7vqabnSGnl2YodMoIJZWhqZOxyt5ChtH3Npu06gXvTyM8SQxOOLPvpFthMVVkBhYd8SEpiQWtNKnh3mzhoJ98Oz1IP4sdC1+9Co5mbPoEPVQArdXJ+GXE5hAeeauxT+AANxxYXdfq8/LRWVxznKuxX3JpHGKpG9uZxm4zCSMYenaSupLc/CR6ZmlmT34UAriPMfua+54YpxiVwNuFCF1Ez1GCxRVtkFnrnuZHpngJM3fgUFgFMvyVj59YGeUO7UOT7lFVQoJ4auIwtPx4UMKlM6XIGzO7y6nLZkTcV58+GEPEKsFEBWfxfJtv7xp3an+W6wsW3jEgfjq5yTthv/WI+BrxnuAEvXxwg5iwz8FjlYbcXC/4wumC6bgE1vSbf5kcRM6zaM7jtOpecF2djYHEXUx2j93zeqIF5NXDcjclTDbDnCOLJSBadHKIXc3tjdvhm0WutxOil4FSies4IR9PQwK9ME0jr9kaNGlY7oQHe/rZVRfjkET8lj92MNsY0E+UT1HdWwLvZoKeiWkQuSGEbKGwTaQJJBtavPqwsM4nTR7R/KwyDONSSzezZ8Z490TKgukl+3Xl6MB0Z1fIBAWloZspHWSD3z6ajSK8+Q6dPxG1Xc87/VGdahk0oW6+DcNcQgDTWIk5Ok8Lg5sPj2GXDTmH68DSrcTwhC3kchjbthho4/ZuJpfctH3hD1x9obQv8doFMmzRvJ4fZAIZKmGQ7fgmcLDuNVZvWH9Sk7NSX/AjvIlbvl6FAc1jsReTLK/DEOlOyZjWKj91qOGKs0Bcozl06VR3m2bAEBMSFVQdnWVi6u6bWkfsOY7qIpBKrp+9YPzpjKbnXlN1/PZny9A4mUBe+SNVR5pU8cykGJN9yumCHX7R3goKL5FNHpl+UlxbTpDR5UxCsfabC0ouds9YIrKxfyrnYJqHq5suB4QMgaOPD7Whs/fiBBPv0mckL9r+stucbaA7uy4Ds570Y6XTc4rO0/MgWjRplSc/pJKM1FChLQbhG8FMZHkdM9Lgx9lDzi5bhejs0OKDCs9k4iwRBeV6fpVdVHsC3Cw5zqu1sI+5BIqf6e9zQ080R/3n6C5zQ7AvnzgDrX7a53Bj+q1dFunwDkTuza0K4LcUL23MGZdirABmKNTKIqe9776AIEvC83yhF8hhDFQyvSPFgnYDBajt6CXMa3h/KWSGHHk8uc7mQ+rMfgJ4Hu3GtoGx4y8x9HY1ZG5mMFWC1CgfkeOOtg18BZhwBdhhOguA8jOT7IPBhih0+xP6MJJK6kxb6wnC6+6TSa1jdK47/iqXM6bqeKf0iBkYx0Bet3XvZ3OMZFdpqHqsd2/LOivRJq++tMGnxD/kzEIV9z7AfcCT0tpVSlHURVCWInzVndhE4Cp6pCm6itXLlEjwlL6XbC3p7d4rTdt31pKhY8NVWEyJynSJtX/ifhaNlzIlIlo8UIhI6q9XqRWqqMlntc2zY7mwJHkPnL6V31N5VIoF6GM15dgcdGqDNuhHSCxo20jEc9xF+SqbxWanWaUXzCbh5V+WeOvGpSWcADIZ+1hv8a5/Ip3Y6pwsV9lSswUHd4a5SKUTA24E7c4ASlQ+nfwzeaL9mT0ezHfUHtdrTXb6qXxwLb1T+MfeOIzFl2Al+HCAdE7CuXf3UmTASMBfNm8PyEK0m1cr36p6ncU24TZR/q4+nsyZlAdlXQTwR5VQm1AQIodatfd+EFgHmJeJoS21NRXkoxpHSy6+uoOAJoXrP1evU4DaGMzFfhEBuYbvNGjjt0ftwIG06+0ZyfgxpZMYgm0M+QoO7Zh89DITRfs5JCxXHnyGfjJYNSmNBIXSr/faqU8eH2OXYu0Ya6eW5bOmRmwxcDIgogQ+iUSuwZa6eNd8fF5XmqDRc+4aBKmCb5Rovu+H5w9fHEYQHFjD+8qaQw9D1WfXkrEluPWSWJxEckydVdC609VvL38ENOUzcHDskOd8q9xy4UBnOw36dGbK10BO8o7qqJongouyUeKrT0ugqnLr+yQmQcnm0ns5Qxn7tJYLW7EnwACSkPv14QyscXDYAydegXnzUOT2C9Vgy3F8FPsKam3uBX/s2evMB0lDe0nxARbSJeegVZHEGuZxwFRa1a5WHmfJXLivsERNlbsp31YfKCAcKSslwFHn2CR77kVn/JqDIRDlccSgQiLQl3zTlHS45bRkw734DEUN6nUr++QZgmsPewKEtFcXkkOwagdAr7jR8DI1jF6ew/GMj9bsBbQLXiuUsg9QPYp0UShAaSGAGUOm6JpnbmF8fwfgoZTOYMgoKErmwD4VoHcYwlLdE6hUP/16dEvwY4xc8WrzOAR2QPYnb60vh1sJ+iWzHYteps5gipeoO1GNWJXnB6MiHd2aMuGos4IULYB1LMmUcq6125yH0nWXSBjfshXTank/TUutiJvrLPq6AFnNNO4ypnpmbc6Fw2ZNV2tHX2h4+Z0NNznQK6+EV1ddZinTEg4UcCkGD0wqi0RjHMJHyXtVYxgEzWLhjO/1KhPb1Cn3MzAoyG/Kuki0AxtD4cgmRWHx3lCp1q9kBPzTSiNVrG9Lm6hstrRVh2ygn5pzExFCBeUXfOeWJFEL2bMRNpnVaZUMQvJVxL3ldTpqlRdno9x/lnhxH7boCdrFvkGWNYTlEXtRiy2DhnVBHgZDvAGqAm9iIpCJhbpjqVmmtmSwBPNyrWuBNBp4atiQFT6IxHO8xdO7OhxIKnkkDI7AviS0hxxNwKWMTu09MYfnGO1gVIdzrfEDIFn6W11pI9eFSDOGqQ7iAw52AMxQJLPD+EkSSHpg6hBzXxQ/X1gohapqSx/RSCPXYynN6IEnFkhG560DBF0G3I3Eas6NOMyms1EK0H7oPJrUJZyOzZPLQXQiUq5AD5gzQLKo6q9yqi73GtnFoKBpUB6K7JvHw1S9OiUicUsGapEeSM04CPlMltkCpM6bjLsP/5Y3Jt2m/IudNMMx59y2Sgpu/KaAvrJ4TVQ+BIjQdnleApjxCVG3suY9eLJTjSZSYYYTO9n8ffzCYOCGTAf+8Q7Jv+mhs4i8f7vbJm4LNaYKxTW1ZVXLsR3mugmdclkXXu8YUXoDmFk8DNhoI48PeSFJBfrbKKWBVR7SSD3CSdVoy2cL+TV51cLDBA/HIMSWx2KOcS+UfeYg26wItWxg0DdRH2k0J6XuJnZg5UXXpVp5NCy/Xcl7I8OQmCaJvAizaN0kRevtMdEMnDum+cnel+YBsaUEl0FaQ4cT/OkUoORGEI/Vd1ozdd/nih0WYoJyuesgL/SkD3p9sCqPvqB+O1cddZn4wyfAOYwWH83zSa+dnYjrsEoSgF6tkSDv/UpMzK0VCkAsSlcv4N96cRRPvn5dyIs6Yx1fGqeA7zxRQXjyltazCVyhsI7Gw1oOtYQITGB4SuhGQm+5ct+RaK/NLjwx7ilFQfAIBcg9ebznbNUrQRp7q/6rl2lZF63kDbfTKYOaDY7LsCheg3cdWAQzSWCwskB071uFLwDzHurjXSWwQzoXSXNDCvAxYkZZSvRg5syr8p6O1CcSlEQOln1Z9WBS8Kju87lGb6k1eK4sF9l/s+ELRHJQvzlA7FrM4kXuo2J3oDavzJvbwZLDGYEFwYHdGykHiGDAwBHqpWZg0SxkOkqf5VQc+j1nijE64OdGgvpj/J7h9x5TPYdvbjm83uFQqRP76KIFVAjRIyDiQVTYgqtEDgD1NHMPuX29hhrCIS2FpnENOg69tKVF4Y/SQM3z9TCYp0ciXLuIH63FJXWW+eB4Wz4F3bemVmavBzcaB8CN/CB0qwNp+YeXz7TrrNyRvX1OB57wrN+nhSDpNice94U1VN5lmYZdXG++Ae84hkTp4RcPPzGf9vNP/rXq2YHUZe60N6PBJAI+cFMPF4FvTcU6x42UKOlu1THdaASmHZHbQGep85xRqXx29DdNU2+v86YtXdDPk/tS7tiPFZywjxR+IgYWv5/ztBhsI9iEJnnZJqcS3WjegqiuHLKzbrwfmrxdJwpImJDhSjQFBx/5CJ0mPFwRsW25ynq0Yz9KmL/KLD7iQeMlHSddmT6cc7ZPaILOJJJLs7Yfh5F9sbmowagKLiAhETWwTUEC8mxCoPVwAQ+z37M+mo8KnesGHH6xeaJr8tfiJh6zRVtgS2lRuebBNzKCKSXbFkNTEZdQK8GYm0/J5onMqqUc6APx9YnoynrR3jp7M1DojDV/ln1YZjZ8X57Yx2Xn01eBVyKFm0+yCynraxepOXTfbdYEr7ev+6+kGLziG+rZINmCRIynL1nhonqQi2Q2Om4+6nNtB+WrPss9TB5gilFIaCYeIyg5uEX0jOlbE2S0bjnP1Ps4Y9qyzgCMpVH0FvAf4jGpxFsnzUbYLyVAr2MMBY/7JDegju9CJZzS+3BPKCngBfjFYoTeVBTKVuOiBVzulhuCHBJZPRuBf8nkZhCNoN66CdMtCzv1ZdsoVAd+K3/Rhukopd7Udp/MquN0jhP1QPF2Yp64lol36kLcPdB5TB77KNWmposuAs6lECk/WgEFTWAtIAuIPZwBYzg11FR7sz7LkCHJl9glJW+gkgVXD3LtMuKk388g1qRjw7/WNtRcXooY6tOZaEYbi2FMVE+InuKscpIYpRKdbEi1Sk9VGxCkb57s1r1uSYsnWqBfT5InLapNNGk+HeFsEfLsmvKT3HmhO9+jZcZcA8MtcoN3rE+rUnj48YpX5UWk5EJnHQaD/OpSFE4WKE8kT3PHKXGocR2nK9vSRFiU7scG+RTUQqhbVTeC51B07AHFqIsr5EILXGPdZ2yGFKVIBXd06dT32PqgfqwvKzbiIZlNo1zufWaNJr8UqWSveklJIoHFxRN3wzYO+cNsrYWW1SQVVpbGvQXHVkqQ1Mor3trwwJ43Ryj0oPYhlDxmZ80THkqaOGuF55ci4umFQZL2HZEHWnJ6x7leWDD6JT17DfiFrN/2YWCBYfTxkUuv1ye9PXvke1LqlR514Pk50k2MJM5DbKDxCPheO/0uoQljlDPVcFJ9WcZANmtH7mcJdiDsicCP5CRd76Z+l5rasdxC+pKTgzh688Q9ByGtB2smQh2zLByjc3j4vPut/zjM/Tg81gkQFp0DKT++9itVKdyOYnfM45W/mexOVnCOn+McaQ+PPKz87UJcvH8G03uaHeAowhI3sd7NPNIJB3tnBAtqauLTWKtUITKDR0ufZBupWelIsxBmJA4syxSVdO/ToCu1h8cgxzOG8TquWXYZU6NXlg/r0922G7EakqK7U+S4AT+tt8zz/RGh+ejgO95TtHQLdRrEwjEKxpuWHy+ruWzL/2Il0IqkNT0G/fO9aKBumzQofr0JHeNBZRKNlcqAGOlBqGc/gFS7cfWrI9ESMU2sW7XDYBgVv8rajeXyucDx5nmvRY8ipVuPYnJJ+ONNFyKH2YRT8PvOc2L1CBPrA40CWTAUhvUHKk2LKr+IWtgto2IG64GBqGdN9UPqLsAvC9C8g+nXF0SMV/IVjrnytZtnUIyFkPgkS3VJlaKLzl2k358gEXRkgrEc0F9ku4tDFlAb+ZYGj/OiMD6y1pC3L38Mlp56lURnNcJLJL/p9vP0QSx5NgE5YuKhdLTi8c2Olocb1N5A6elmW4KYXftQN/HWfqjqaEWrTzmWosILr6hEA5gVPtxIXdzK5s/b489ZNuOwf69epYlLtfeDopgWRQwg4SoXhLdYW8Ug2fZZt6Jf6rsK+1HZQQW+HpmcPsBCruO90A9mhYneL1kXwB3jJ8NbcAaaOS+h5fnjAmnVAucG78BH9LUOLxdNjP1lvW/gf1VyQ5un9Tzrq94UVfSFLpFhvg/itHv1xxsbsOwGleYAdd9SuCgqx5yLIvHdQF2ejergYlo3FBOOGfgnvJ7ToIRHGnMaRW6o2mn7sEG8cTmIzVnfpvSNs1Oz73T+4RLr+kCJRGH6OehSt1iqXCWhMeyRjMUuplk0jolNU8Dr4Wf2OBFHp/IyoFn2m4ItgijnKSqG13VTyq+lCj55pRYCvIaEHm+Cm9MnP2iuWgiApVC0buyc6h6hcJx0+flGOnot+PDOZD4araCicXFucN0iZL5BiikHAaqjh21zwPOQ5nMzcSjYsIHtmNmW5P3GPEwtORXRqPDxiqajldRv3fpOqBfr4PujnoDaARNRZFMQZm4nNzVTqg/wqwWU+5yWIjTt5g+3QKqBB9DxTsthKCU72XTcGng63BvFyBNlGUBK1aBPwCJIEmmxvj0rDFXGSWiyfV+2aN3OzPyW9pdAUwFBOfdqMOQAtzSLvMcQzmunypOjlx+DVu8JTpWW8+EybC17qxLl8I9kNl/TltqDVKyb9HrLXMfjCYORuGo647WNPHsl9fyJUNovC4LosLHxjoUjV1a7RZpWJeC+VDUiTRVVrEaMgsYZvipid5YRaOWQRwLeduNqES+9pQADsXKgWrwHkGd4pMyTyaGpqOtCU5Ve23xQ6The2PnGsFYSUN15Wfrzqq/5uBqd64NilgpTyetVCSw6yAvGdyDS3UnnzTysT3vTiZvFTGlr1GNaPyspPE31Fk/PCrjKBXk+qwT9F82P15bXKCMTQe6xdZxCDSX2wqu3Ko/tI5WKLtTTSU0c+7BV0B7zMkVer3lQkE16c8Zs4zypvnpyu1byuhzdhLAMZJwKeoovGFdD/argDLZh1M8QDmcdeedOUqnMBUdOQ9MjLfhineQMwYUmDDiigwVs2us5z9+zzXJU8H07mBsv4YUjcMVzEpwIp+BbsOeAuOVPtzWcNcIVoPJJ0zYI8f1J2cS3t68qMjr3SttxvL0/RIsvG1AAzLR+89sXUIxSppUTdjRE6OEy63BR8EnTDcqdAJqhcriTXrWgvtb7ryOTZ7cxUQVHlgZUVTpr7Z67fYf/x7j1no7E50veJlGl8JIKd1/ExK3naEsf6HJOU5ngAmiP/MTp9E0xwdpRDmovhIRrbVDJMIXRswebwx2bM440DLsH7Kr6Tft5xpV5I5L2epw5NaUQfU45JjEDB3Lzis5U8yzP5034xemJmoxunAIpZgj2btpA8mUMlU0yHGBWC18s7juNplwFnGbQgWzRSRRz4kwDjkm28zoI8mIUbeKLY9ccao+u6FuMvcGKaqYMqEcQr+fw+cjnAJr2elf0Kohsg2P7TEqvcgRGzD5jO21GWezPHYhErcrpwUbfk4Tibux90J5dQbVu6f2itRDFQHFWSG7hQwKwynj+UbwSgf7YoJbUYADYhSA3ZIzclIJjZlfyxu0iNow0tSgVv8p3HaGGJhfEBCtfyRzFXA83wq2tLLAPggl6wOXTYRBANabC10H5FG8vOGlKtZE9RuNl3WmXW0osPJYwTTOLb06tP9ZHdCKJwQfdYg9Fn7fJeFLhQbOxxm2cnbx7AGXsnklowIZ0soRinEOe2juwk3T+W4UxfAXFRuEwNWnYXOyJHYdGdkg98fUrabMpxCdSLZzrmDPSUHMk7LlJGDZQ9X2OjB9wsY44PtTKOi1wMfV0e2JccxNpbibrMLkZa2aYtFqxV89ZJnocrk6ftx2JBsxY4Ff/3BfdnH3iw23j7hezGPR7eCufW0bCldIjK/sgvYVkidWXQN0DRVx/EL0DCQVxpfMtFDQ7r++pnPArAfS64vuwiOnQxiIwyu56QF3T0KXgkOa0Lj3+ku+5NVTu20KBIJllfjRMF/L2wBtkqMuhq7+e1Qd6Gvcp6RIFXkxr+yhtm5gxCks9Rj4zoEc7h9UIlPS1p6S3kpB24vJbDBQ5c+KN3eB1p0jOERKcfytayUC4/iQu61hkgGvv2k2bl/IavZMg6Ftf6QtjZrfsbM/SNiEf5pvSdmagPFcC8jQS64DosCWX5LCE6cN82Mlvac7NDzZekSrHxoQOYk80NpCfNVziSTyh8Oxjf9YM9k713DODOvorPOJAhdPypr2u2kAQSXTL232UnhJs57Hyjwf7eJj2h3HUVRnlFnHPXrDeHjZ7Yqy2S799bVJoA/Ld33ikCxrFEQKTkpuAl3DjSQoyvh7FuwZ1QpFMXUyPh765jRZzwYAkyAgZR/0+szqOAxbM4UGsi6C9ENbguYMaFrLlHm0TJhli6w8OCwqwLe7M5quMvmpCkLceVD45/kzGZkVR7zAoXoNd6on17KI3RFRmhZZG/zhOrRmUtslIh+8m//GI1I72zA4wu8KJqXgr70G3xqHI7idLnFVap0in4Qg9YbBL0DHcDnmbuZYr7yZIPbE+ALgQ6ws1GHUdN/ecaCkCBbyx7cNNOD8QfZw+91oi/Xp78Jfk6y4+L1rYF5ZZIGP3iXOTBpaltqnOm4dP2uK5UpOG930WvlS9nBlPzeKMCldAUYJlCpXPSVR9U8NyDr0XnVyImdgn83WaliiSJuSWD15IXRXmbKgOpphOzdrWC0rI15bTQk/HJ6XL0mWQo7AAKxycDTK6bmdwITVQ1dIl1tWOItAWNPdViUMt1w8MlgKeQ784Y83n9uXJR406y7C9BWbH7GczVAViWKsV0bRd5I1Whs7wll5QmqwvxcJ3RA/0zltyHRFJtZ+1e3wpXH+COVs2M+rkfOXa2pDNB3Nr9uisuA/KVfhHxzIeEioPrCIAxZNjMRAamoyOZ32DA+oeZmdCA/y1WNthXN3QB60UoE6OLjftmKKU8Su8HoTbCnXdPtmMVrrB8E0yOYRW8Ib0a+WRmZC3tVarciZFefNyxOblj3fELlGmRAWKN9UrFx9BV723cX7MtjGx29i7YAF440pugD9oejbL7EVxIqym6WsnxfWYP6czazXV3aHqA9aN7W3VC7X1qFEgdfIkScTCR0zHrAPfPwEceteaTTWtfh7p2psw+4A4M3XBwpav4XcUm8z7DNUdMj70+sCFj1NPQ1tPAlOKT4n2uYfmZIGPak/eX/a3HT6KUJeIkQA72YVpRjK9P8kIpzl/ukXvfhI8stGojuZiE8wsOClPZfZQXvq5I1BuCKNcPAzJwBKow7tYpKJuwJw1D33lbJeQGXdsI4ySZ/sc3hG1t3XrboN8/rO+ZqxHMKQNYSYhCV0ASdLAabRv8ATYVH0jKXdolmdcGtH0Jekau9/mIowey0uMDILwme8cHkGyWjQ+IltSpBJQEzZ16Sl5gcsQYyiHw6DZ4uLP0z4pjLPkvDhEw6uU9JxC43V2da/OKzcK0datKfLtQU/5tbZHNKpC9MEmERfiwTED2eiwztCu50hsMcxZDjRNAfbQAUMQDI2BwtmSNLtRS9MeRwDF05AJ3Jsq3zIhj7CteCTf0tPsKdf8VrsXGS97qE6G+Hqhj/qdYX2YIPPypA1bmKR927I+qbOiwYzny6oVPFUxLLgA6wqGSYIaX2j49ZZ5oA4w4a70BLqNHlIWC5CXbkpQZ6ee07IGKNO8onvCDGr32sdRuoYUIiRUaBaj5rjJ5/ZG3IN+7q7Uj+JD4SX6gbXd1gWg1gh0Kfa8GFomu2ZXfEjNilO884JToXPqzs2Wjc8jhKZsYSrSwDevpCnPYBnD4E/7yd06mdvb0GxfHzRZjIUzxfmREXdU8wahVnb67mal4UuL527BOiXb8qywoVyGEyHQGBmjXXLV6rFLUXm6vOeVSSPnVtSX+JSgkoZTMftWraEunwn56q+Gi+0A1ZcgTXU8aED+Za4kmJP4OkseaV5HxBSxc9BUUBqk0HTSjTzy+0sLdX2YfV4ewbJmn7YHLkeI0sxNCj/s0Ky1ct4oLww37YBCF41cSu3G54xnqL6+m4cmhR0g6K4lalE/soOv9PITLQzAczt/JY9FEU7SFG5fGxtcenC0Dy8pdyH5uT8G+N2pc8DE7RNabc10rcdUOZ1VoHYECrKtDOt5f7RGZ0fhJFZQH6w17Ls0/iBSVUrkRmhV9HyBNwvcOSH5ELerRzXgKNgXoPUuXQPJ0G3LkNqZvfN2KQSZs9JBm3bvW9fCx5NAtIx6iylTm3tX21bPmcm3csBKlWIpXR2srARRkztceH7SgJ3Ze/zxHHsRUnabYk378IXKP8fhKRuBEGyatzGmDkxQknPqPdftipUC4ooYX2OPI8h6QePA/8Qxd1iAfIuzsaXbE2K0cJbsbfewc3phgjO/xMIycYaX+zyx7xjm+puAuaNtSqI82mxbJ0moDs9uV4u333IXWDfyGZcx9KTihtKsvfawmDaDxjsx1OtXvWf2qtZC0c25nzzfVLNuTPJrrR3x9Xdk3cgTK+3N68Zbk7qs+LCTejjeHjINAxny2GmQsimz59KzsxOy0gPHEqmjO7bgpAF/7NqGsdcUowp53NiMINUGzwJL2DaVCdrQE6i/8/ASo2Chgg25mXpNqsPY9kxPJHKBXbsxNd4Iu/Y03C27Z+TztD23j1wGgOcqaiefOeps+pRmlWIuqHLjZRsntO8X24eVgKj0uNi/G6Vma4faFgoO3j/kWYw9jRYvUIIo6MBDbG7ENXb6RDcVqB4YzNHKQ54PUynm+NCtFREPvkwpG37OrnrSPPm4oF72wYoyOpXP8MCjo40lkRZZY3bY/NmJj4PKdg2yUak7SA6f1gO+ow5717eKXklKM3HZizNTGbVZVIbYEoIjbYQnlBr+zRUmB8LwwR354A75pycDMSzG71v6DVx7ZY8I6KDAEtapjN+GwIR3mo85sPxYqdYR79liDm+4+ChlKu4kg1aPktL7tstcBtFzRp0+hD3YLKreeKOMMNg7JYCyBs9sHStGdA7qQ1uo229PTs43cpMOzdDHgMU6iZsdKihCpUq0hcw1Z4uE0AcaqVYc0nrui/a1WehLitj0SzmS9BUksnKsmeWK0bzKV1CmR+8q4CF8rFkejRp5pOZwnUb5wvgskLC+1hxQYx/3h5JePpADCqijvnnW+2GR5XXFdMx0N/JURWq1tHy9HBCrxllr+qRZV5LYtFlCw/gp05Zin5hT93LUkbtr280UnDtugwfWpqBHH4SGxoIaJViqEU4UXSecS/I43Wqr99uPni5WZzG1D9P70yrfOaXrEaE4NPZV8LG9KpXlkoot/kDSEmXsT3m8IBsfkfyWZ7qozSWoXwuZu2TWyz0TXquXXQxCiKmqqVGz0HNYrmkZcrraj3ESUbWI7FbxluQR/KvU6Fcdm6Z3UylZSVOVfMjCHJ8ai3xeCB2fHFBL67A7DL4Z+YKxls/ZqkSCUn4ulZ5N2LCVMqZhP2WnJhR/m64BjU40m16yJ/YPGzV04oE1zIuj6nZLmT346AuaP1gXxmZtHpi4uzI0ZT3laHX+OYAlKQ+zgobb4m8kBFQIV4KKdltHUQi4a6xqeWQ4mGQSJk+o/WhCoo1IRxta1DghD2v7duUoh9Az8G7wGediRUOEi2e8HT5z3pBZ8jrnOZwzD/umRZlKGde+jyDYZxJIhpxz48X3typHcXgmqdKdCbdaSfQyKX5QhCuoE+rW2BzSI7wvypIbdORo7pBCdJ65jUl26BFFI8VMruE1/go0hAhz5m1eLzrjpmO2ECmv5U45x+lYtauqwVDYVUmU2tfIZgkJpenw153LwfBAIN3zF8pX4r2TRf82NTx9kakZ3c6LpFEE/jMFYZfDpcgu1ANXgLJwnjZ+6M4y4WKEtseFymGRCpigcxMu3GU+oFACZnt7kV+bMwxabbH18x41GT9p5uZ96Py1flrTOhmaJ/y1S2U0YjLwAWsi7QS5nas7VkGWvvZtrYMXAYHf+gbkOX4EAu55ucHyNn2uxfTSBQYKHQ/Lr1K8NAN7D3pFCKwx5GBHjA4zeohuqIwOqvE6rBd1Byu3tS9KbL5io4bDZhKbetA9CuDBqFM8i+NtXFNJ/GZfrSMiUB8ZllFQbKuuNJnsxEqK2wh/ve07Jamhl6c9yk1RFS5wdKEZQ3dbvVgyedJbo1DEVEZzrRffvN6u2dJnorqpPoG31IX8Zj7qofG2KyhFLFYzumzyddC+4TRNZANCQIjGFQ45PRp1weq5zUSbXIt3ki1b2QmWEbAXupNtb8UEs1ybyqCH8LPaCcf7YXuLtFwN0SVzNEzHcRihBi5qI6Vu73GXPgC4tcR6GiLWpPs8A4eEwBpFsMjvfRLf3cE7B4zSOv7CslsjruD/1fm2C0jQ+66+OnrhL+D3Bt1Ws3XrBgEpSD7XT9AYxPSle8AQxryKqOuqkhnUjH+8gZ+oED2CRbAr5WaVxWgd9A3WyUjea0ite7Rikvmeiuqksti778XMMJNPvw5SQaNGRkeYTzyVIfzZZEQp6dwLs+NRVg/3nT+ZGqx99pJQUtEHVi+RR6lt11Y0vlFKeXgKTIf7jfWdp5fO7hwX4lpnsdMlEn9tVHoDD4YbJiSM9WXpvmcccT9k+uuse03w0IcO0Dm+6DNAw7kK8Qlo8efRJ4OdTLblMdq5RuzN4rRcgCzx5jZEeNy8r+qwANQEl0QBa27MhgJl5Q08uVCgBkHkKUdi0+48TdTHJpO3VRK399fAcaaEv3qPJb3nmfGFO1EB+i7aTxNP13JeJQGUzurzy8DCw17fbAtiBgNs6hQuuigV7eje7YvRMvlUihogflGSfkqyH+cz6zH41zKFEG9CZJmwSLQ/YAOOsHDLC9pT6zVa+VedFIbUCD8Qx7Apna1cXHQ3V8k25aauX0UgJsx0a6dGUBDNLpWD9gC6Uyl5hJxanXq8uY2k042N0soYpVnExpez3jxieLj9xH8esfXa9/D9lCbio15x1KC+Yddcn3zt/PLFZD0HH2CcwMM2FbkwNSs7O9qMlxRkvHI3T2GOSpXlyuP3sKG8m/HFD3oCSAeXWeNJQPgv1IZBhVD6yxGnntCdjmzmG6qwWb5UbM71xirisQfLj+gjQUpUUy/Ro0qJm6IpQL6g7nhaygTnyvstfg5+T1GC5Rz4fl4rSshs/16bXPHFJ8EOwtCVwBLkM0HJkweGxqkrcKMx73wVGA7GASJxxE3ryyRO6RrD2ley6YH2SBYExtxQEScWXcRAmC4sqMSbF9cH/SUpX9kX7j7Js80fK049/LF8P/0szWWTvP3+LTnTa3Apmm6goHuhTi+VNd9wvkjpEyksi/8u/aCB9c1l9oX5WmpbX1lZLj1YN2VSLa6A0tTx9AVAW/M1beor9CXasLIRbo6+/OMV+57DZx/7XfldF6Zbdweco8cvjdi3vXkwuOjr/naPnb0Upz76EL9HpAWrrsNxBFDipEo2Wpas5PRAjKsevgYMtZ8QpYYEnjJP1Ltmd/h8TAQsTcDFCMO6rp5Yxiv4oLOHJHDnGz8RIMcipcw34DGKrJEbkdj7YDlfa8hg3QfeLfYpfxywxsFovtqSOTStElfjLdEbvHY2Cqsci1B/y9zz4+vVg9NN0KZFugsmiesBcfY81oMWXy9QiSSUwmPdQL3Tk5FuzAVdH4QkyG4BAH0jaEsJlqdG6hCIbh+Pe+vr0pSfeU4Yu9pHbdssU0ffOvXzZs8pw71iteoBvcXYs/Y3pzkJpS9OwjZxjnmwQI2eo1OJmkhiXy+4lOTb41rp5UCB5SzSYwbkf2YzV9EK/4Zk8Nj19EK6Dh8E2IW3nFdGKRR3+oF5WteAkiiw+7USNGol9PVunk8qbArc1x+1GOkQgycekl0VmAssL1r8/CChgkyhL7MsWBkG2S9OwnGdWAGFFwotMdCruzSch2nTZAyaXJZpncD2t3l98Zd8paYQUVSlUPBCHgCswucnJ3y3er/2MhCsW41Xzf/H03UsO44ry695e3qzFEUvOtGTO3rvPb/+Ejr9pqN7Js4JiQYoZGVWFQoLsI8HpDj4oWskPeb3RmUot7/44NI7/HPy3teIZgo6xEcFb2OfpBo8cGLkDRGnPI9Ej5QZhFHMJwkfHvebpenXoclgieIP9wG3vYqbSimMUOndwDcuHpGQFebNeFO+rFQMQl6p3aS+GJc6f27ZhUGsBLsRAcqpeNXVgtxdCOzWGM+n0FslKWNfxNAdrSx+rEL5IIzqy7ghhopc2HXYOVxsaLpw4mh0pe0qixSxZLqg9dkdo5hkugJtWAFcoTsoZOkhpkBAvxA5balCk1uFsXqTadqSURACkLQ3TFHhdcUt+/6aYPYPfwGzjnUBNwu1VIO8IqgRzOoJiw0DpcJlmBByXV1ECl5DyoLRCODHHuzBF45TVUG9B7kJ6x5tBaVuJYR7+8dg7aW6FHWevH2E/Aa1N91YnLhiO4co0XOJ4jN3kt2xL7oNXY2sE3OKuh2E3MAWsR3HyWA6WWGSNNkK38kzIE3Yl/tcIUWuVWCjoKrSqYFlCS5h5GyQ3/7n2h6h7J/BIpTIiGtvHsNBOIfMshun84oA/NbI3vliuLlyM3Bqu16H4jWvmW4o8OzD6OwGen8pp6t/WzSzIfTFlM/bt+347pcC4xO+P/zj3XJR9UdI2omX4sUx1I0nelIolYj9ntJlHj/uFBg8nVmA3M80wmvn+2vLnXN//B5EnfljCSlEemgT3plV14bfyr0PWlR5w+gvubrD7Z7A3i4XNdDjXc0fBFI/nNyFcYeH+izaalFqEAneHLyZJtm/OMSyHbzZurz+j21v0kdjYnReDFQ4yRXDF5ceEpCQgWA52SrLWHXTXhR9inexfLjg926GyfQntfCXlywNaokZcYz3LLaqApZrsYz6MJ6nSlIrKSoaZK58bmftrj2sfeMqwgTk3abI6ql86H0AtuLt5TWtlhbFvrL+i+Qa3o5ZJrpiS9APJRhDKiK/dfuV1LfrPCIzjX3Wgxkx0VnKD90hPcYumnfx7Jtgy4UvT5MIUU4TcXbs3Hps4UCgQ49qaBvaLk3mUYk1BYO1iQ6askcYxHAX3FStUi8FJ6Oogc2tkIMrL1IT7tmlK4nHHzl41CuAB+jXwZBWWPpONmvo+x12kXNRPDrj+/tiBACn4uF1VXQoYg1NX2X7scHrmkYCbdy43ew1/LoUTeAtqc1poeolEuomhxCPNj5rs7x37pHmQ3ts8QwyGlqbpt3DDS9Xv6Z7fjcS2EjO75tgORZJeissjNO6GGKc/sqzsCE6rFqODAECQmhPYNTw49X+okiGqK+Cl8M+cH51F7oiKCzUcDSxSUZJl4NGpnRyjmGBGAdDee/dJfm/KCHeAqvJKSrXj/0ddWGuEM+jYmiL5EtP7xctkqSpJNkON2Zxr3qt3bjCqPGMIyL3QSOCSsolKo/fVqXMSPvdYDu+B09Mlqm0D+98hPHsJv7ut8jC60ZDYML9ZVGeUT+wSS1WAvuFzzDSHmlDbzyyhl4+1rvVimFYPtTbRsyGYRXDppPqI+yYghpXzxH1i8+9fKX4RX8Mhd2BGSZnHH/WAX+QQo+AMw8MPBIc+Y4g65kaQNPW7OLlyAlf9KVaxVquRaAd00/S6u2ufNyrBJxmmwQe/wb2d2epRzCF6nsTG0vTc5WBfE/7OtX50XypUYXvNOm4mAchC5M9eSz98iueazAS0ipBA0HX2xPPiY8U9FeeFGaice3N/eHOWiUkOZ3H4TONoFMMoxjetdLC+4FivXy17w6XbtNKLyTjL1SMTBZHld+uqU+65zfQZjlE48VrtEiFbs/s29McSgekrguGlRjkaSe+Fd7hF3ASZYcI/PV6VRpCAzM6GXTnfZcUnWFaNlVF57JlN11IabMRcI6sRJDq1KQkR/cFx03gpvsX9lAs00evsiGD8KPMa6EbWo/b3IA4nopAzPD6kf1h4vDDINg0vk/Ixb056gURfpWmB2suDKQm7VbbT9Y32bip0LknSpmh6P01coi9bjpTIFWTcmYY7vFAwWf5HFkeXKA74b6SM+V4u6H85+f+U+KUlyyub958Gb8fXe+QiVKo/CquWlTOFKmd6V2sO9kZBllFHMfUTO+V72ecAU9ZU3UcIEWhwh7Gxg+I3jGjVKH4qUns51fDDS8C50RNGUWumZk9hS8ZMPRceNgYn5KJUUKv+hHdwKsmI+MS1/aI2OP5HOD50mP4lnNBElroYAfYKBhH8iHQei1IZPF5rd7OD6vUtVbI294n7B8DSybUdktTH+cOrSugE1azKNKS0jx579vvueIx3JQufBxly9VQU8qpLl9CYargLkP3u8v/gXpN4vAWD0nRAiyUK4NoDfC7N+shD3b74mX6nFZwkcHD6lS/DcFqJgub5JNWpGC5dws6PzgOZYrFreGNrTvffaL0YZ2/0gRUNj3vEza0HHg5ykZiEulG+Hj0h97ouZ+lR9rN7C5SE7mGS4xZkDjS/uCvYll5ZBZbbyg4M2QQHpYrn6ofCsMDNiam82x0pK2Kz+RFA0UpTT6Zf4SOJWdu/+zxDUP+z9Nfh0YGpuyP74TTaAZvvmNkRNBAWDzx0Wb5csbCeVA/eegAiCElaeKtb10fsJjrY0dIAQLYbR8gB9hpzA/UxHWcPEhhqqbfcLKqtN3EuZ1CkPf4nIdmcVnJDEy4BKhzuRuigIqG+CxBrMDQwPTF9sfwid4gtpaaCK4d3GuiXqnQhioKvePHhrrsr66QJ7C+RkkPfUVCQ4h277n1TVHrCBB6jz9G/zkG7lGAaf0RNIiBe9AwwzpgwnGZiigrLQj0TodnQFqIWUxyzVHtKSr1RlZdBoiOXWgArx1YngC5aRdjroLxMExR4EPtCPFsdCOO9FEKmDJ6FSWieGT3MAV41F3ckkgjmIPolX6+wS7aSOe2+DgZ5PAVpCgtdQW1HvLuG3ezI+FJDxKIh1wlXEaYLIwD2MhiABrk9ikBzBhU8lREYzt9XzaWh/lcika93uO+mPfBCKgn+WawSfJEKdKhqxsUYQdbWkC4CS1AOKklO1cKS0tOLIJbdBo1A8Rfemm3isi+WTOebWryNqMjrvAABM+1CW56DYkepN8M9VmVhXCJAkgo/1Rd9axJe9ZW6wxydSo7U2xCdGO7sO5w219bLYEGcnQJHlfM91f+ONjPaawqDpz3mj8gOYhvR47kkULj5Lv1rQa95jIlCl349owt9iD0TT9au1bRGkI8KPl0MDPkboBqS9hOjtTJSmO+0nxHXo4GY3CZ2Bx3pYrkm8/ggFGcPw2+G275Av6C2qWUPSN46MrQq7mBabv+bL+AhvGGhSCe+WjvCuJMKr9pU6acK31I9g12IvLMHtPxkQ9raiOb9oJTyiB68y/2ZTkgHqnu0qXVy6eMqDx60yOdrxNtTAqrsOxWMwILof05Dg/zAdebMyHtNWf2dej2uuL9ZlHGt14Op+hxc+YGtvpqYOQ2Z09iKjkywDeJIqJcX8/Gvc+OiZpfR5h9cBDw/8s+vnOvt9+AqsMbrC77JAxI/u1bGfQx2j+qMbPWaOiiG3XNhcBkUrjfZdjOEkSHmSN9BeE6yuYX122OGvRY7g39WPkBNE69UzIguU2ot62Dyn4OQWBGKEM9mxs0/uqCPOswnqphUyJc8YtAD2KandPxKj7il9LbN/EryHMt4BqRA1ZEAYzbyoUa2I/B7NT3CPQYckUcIqCjq7MxfcHKAiBT+npbbhRCSKoQSWmUSAmWhmED15A9DY7q9kA0Xbk/vxJUhkaTSPnQjwCl4d5jBq8tECl8D2Vl8lS6PtKg66XHBBRoUAYcMVGRBEwzs/vQf3Eg1hRbboczRFZiByiDUSqn9YtJQVpevCc0xNEEIzXEkS0fVPngUATduogik2uUGnoxL/BqTI7lBkS/4GA9elA+WdrwAeAk2zAsbFAFVx2UnpnFtQWSnpOC0yuB4GOJeoF77pDbbcOGq+Tn8Y0nNTbotrlBqa8zj5upFqZi4v86ON2HAsjS5/1FhvC1YvMaqAm3z3KhKn/RguTkz8SG4koXWaivB3LWMeLHahWejkVTnsnXmfKn5a6ktppHp9gnjf4qREnWEZyHWISlEPLx82odnecKovKNvLlnsaDgFnbJovF1Gh/WVaxgkBbbtTr9+wX5eUcOmrNw/XvpW7JybCG8zd21PH375Tpq+ThPLdF7oBCfS71IEqGa69d87hYWihv8DcV3+7h1S+eMHa5V0Cma3/AIMthZPr2H8LnFyq5n6p2juJetIg2K9BKGIxAXvdrsNskyfgD1rPNLvkvpNVJ5v/oR1842OTInVipLrKHpo4SZdw4iXyE5eoCF21PQUnmzbFSBJsolBHUh4c0t40knOn3d9jYCPk41j3g4hwPMAkrcnPYsZfWV1poeedD3CPWViqDE6ZIECTL/pndNE0te0ar38cuQm5/cRxSZ3JIpp14VJEFfcmaZT36P5AI2nvAZ6Gf3AAiAKnhF4UwbiQzAO4GNwLR8bulZnZsuLPrSUUEJ/lQv/jeNfuk69vOAgX3WdIosqEzroeP6s1UE8denyqlwNCU7na4VWIW7w0Ype2WVPY2kHzpRIX5404iZIYKQJZQG4TZm2zq0wNJHVOZXHkrkftAf4MJCEsVnZXRWYUFbkklWT+wUdm2CqHs0NrU37DOSHi6KYFNToFmfFr7QiSGcHrwiEoeP1/CW/VnSeUWqYcR8ZcQhj18aa/U8KZ7eejdS6Y3vfr/o+a/TpO9ncZ8e20NPfTBl0RTzF1F8emVnImIqnerqz5IW2c95V9j2wXBaCX32BQIgv/oflDLyFQ5N8mi40+0MB8THnYc2TiWk9VdaOHuPEoJKRDrzpWseTJhQPI896ketnw0y60ZKmpLzigz0Tjj4IpJKox3UmC+2kDuYys1izb8IirDmR+B1G2dTlDWxXUhULwmYfJ8Q5d1Wb5gkqoIgPx5+fZPPMElpKISc6uGYohkCfjNjzo7sgsukZn/EIGfdM1GDDoRmuXldtmsFYMVb22IDjNUxGsv7B17BIkzLcKc+P36tnlt3+P7Hc9UrgI09ZR3Gypk733vpEXIH/PAZMoI7dA0qUGCILyOQUaxFzkJbCUotZmb3aAfEfEk/TxP89jBNTkdfD1RaQ912cN5BtL0Q6MMX2+5zc8JJaKjqWERfQbsu/DYEUagSriCloNPu9kGUD1lv0qVcqPbLntDieLdhb24aq3as51cb95CMd3ByAplRlv5rbQylBV3Si0RoHrSGO4mATIvlZsQ0x00YvrXDGgmPvBJCD9WrU6JQF1VWQsuclEwOyTXJ3nN626YDCZq1tdVFJx4hFMCfI8u7xWZcNxNVAQqnbNtE/9SFwHDkXE3kt2c621zT+54BfukK5/aQyXSiwV43aX3I2ZmhsKZ08Er5oq9M57HQvTla8vddZF8fBSj2gYEunTosTrmf08Qmrk97f8ZrU7uywrTim3jUh55oUAL6wvLlEGeIoy0bFDhrgqsmUOAGK4fqAzMsHcmPFpkFF7YXfZmJpvRRjS+Q7MXLctAeevJo17H7YrqHvlUp7MfAc62XWETe2kM4Vhk8/lNAH77lpQdTBmJkew/e8BYjcDYTLwYyuOVCZCcug5rZE5ApYNC/pl19vaYIZ+4gGMQCdFIJc0ZaODX0qUSBqzoBu3agwcRtKzDqyd+0Okvt1wuh4bUrgJ0ZyGuXR0goK/7QZzPbQPya0HfcZJaQdlFQqUt75Pb5yK4usdlKMCimVljuV8XrJb0Y2eR4RxJgDQhmth+75l1SOnOcLGNON6079C5iIAYJ00RYQqkZ7iIxZ/MkZ80iv79NET7XCd9HKUKy9JawYqgH7F1M9fAejsNLhNxlzsJxzcLh8WHSJQqHeQb9co75IMf3W6tFJRmbtwigxTLzKEFo69Zpe1ajbY7LkBQTQBmGPebRm4mNXK2RRIEhUPeygu03FBoRzz0QmXiDmC5Tm9YlE3v6Eno0ry9pUgErAimWlTbaL4ihfAPaVAjDfQt4+lqN424R0zXDYNe32ZvBhtch38K3wAu9dLNfU2JCoCu+2mCFMOTOH8V1m5BNs+xwpm/AdfE8AYj8bluLKnXH1sC7ZUXSFqNfUCDaUVTl1xeievpUDsSwu0zv097DGdts11TJ+TvxGImRPF5ldxOEVQsaZOMUfWcJVVzng7c5kLlmxiIwonKEXW3uROP+vudFacm8dblezsKe7kSKksJ1irDk1Y9yCWNvkdZdS8wAg8xoHKbKYlypk+f4gVzQ1VrPGRX2kqnRzRzN9Ro9SoNJaheR/dLzVGdHswR3NrXaOOKw8nxIlX/xiFjO0xJ5+K0taiav+LJmmcf7xLaS+lTqeLvpF9P6Rgc561+l3Sps/IU2mpjOJKRsYudColWOqnZML8ACuUb5NYq05/cEogiuxH0mO2DGtXawXgUqBxm+Nk6s65JnCFo9b643tMkGiNAw7ISWpguzwXoKx/ZqPo/f5c4Fjsqte8O0azreuGAA9pco1fEhFj/kub9gyzEhitM8AYQ2kORqpjZ0wVHuzOToA2WSmwDlPSwMV/NISgW5HX2j6xeksd/ta37mMH7Bi9O/FGfdpDuLHuDHKoSLtzNRinljqcz/Ku9la71g5M6VT8XsFWfDUDQTzR7Zhe24neHD/O6gQxngmZseztfKFte2oZfYwlnLE/wpu/mAJ006ftkWu8GFN+wVjc5WQTOKzqQNyFxdliwNoYJrzsbEKfAcXSocPBxDm0VhttznAHyYswoU//gy1LBG+EIjHzuKK0KyItx4v1rOoT5QDwgt4bjhfZ2RlVfz8nAfStDaIoLPqLdxuoOnMxTZEXore1qv7YLvbHlQNsha9yesKLRnNavS2e0t7/sP0vKyPOkOieXWRVdJmOAXtK0cq3nrsKJEqCTUNk2Aow2PvK2YSmWBmS3v7PPqcr1F2S1/PKifqF+vKlPmKEeUIGW4fL8lkZ162Zm6Q8/lecy1xhy3wLBxaohZC5vIHs+kDJ35wo/5r0A6k34kIIpiMzKyaohH8t1tznpvr2IofBsVidWvrEw5cNqJXeX7bqi6/gw7y2BUx6/bFvmu0RVehfkrQiY2f6Zirpxx9pGGd7V6s8Du6Ay9TM7lmzDzS0J9pENMLPz0CIfqBKlM9khegSGu16S6mJz08pXp27oeLEriQWMFFl/a/IVztZHGlfOwnKoZPxbxb9eqQSNE6u3w5rru6X3OB2Ezu3VqfOUi+I4sH7gI8SSqvTmiZTGIRCTGj91hI3ATEHZQGF2aGApE3HAQHKh9tpe5xDT+rQ+OmHAayEcCymzfpRjOu2jmBQlpQSWLSTGEs+yYpgR6ZxTNDOClF/KFjmAbiBOIzKSH3KrR2sKP+k/oRWavJR5gpU00vnUNhJausXFYUtDWCM3tGP9XZQyUTkamG1LFf71GGFtekDnFC9x/ASQCdT3gUw3/sYrZHiG6x8Au2DAub3xBwUYUf4ZPNRKWSRftTaqCniF8+xfXzvPX/W/LgFhnma5q72L4EJo1hrYrTtN8YcOyU+s4zyf3XGF+FPMq2w+QUHa1M3IXdtYZrZ9avwm9GsYCT67YMPIsgW/X5SWfJH+m/lgSypWqDcA8XpGjUSIUHr8D9KP/DuLRHpY0VSULIkLl4rWWElfk0odAXBx6KBwpHxbc02eze7HSMgH5wjLb9GM9PCYvBqmUWQ7+4lZmbpD3I4E7e5v1lmvPVJ+DVBBqxE3FHv/VSOlxpLLmMZeTff2qFY1l+35d9IwRgWwN5XqveNzTFxTgzmf3DBrK+gmtTOVRABu/Ka6ICVMBCJQMCSZqY2gH/LLKQWNRV++X1KnollWiHbon2VGoABqggoez+GdUzCYUPLMNTc7mML36yJazGZvMmhDRylUgCHLlrVhsPZzgwRDs+6wZxnRc/teA5tf5I3q7EQzTJEHrLRsJxfCsi0Rin6l3PIUvny8kDsved4Ek4fbbSvX8RH9d1+crU+2vX/R0J85izEXFPMwhvKPqV66w8UH3/nIdz8dbHdqw5y1TNycZ6NHFPFKkLelqky0Gl5qHSua2+R6UYXYkp6lOz9H9fkcZiaJo2433u2E1mXxrTA2x3D34n7Q9CFdqBoIFelTGQVc0sPr42ueSu152A/Q6r3DQ/8wsR8bxWGwQp8j/DJ8zcryfOyy/0nt/O3ycGjSPbUZ9kIbxzB+WZBpGa7wo2OUhSaxfEHIGVvMYLhDRB5TwZpwrUCUxXJDkES03dp347V2q9Pp1cgSy3Yo1UB7XUdRPzYMYSbB834CqZlHcypmfkfH9WGiJh2keLs/17Pu+WQMitZ7Imkzzj8T6fqq95Lle0pLFO9qvp4svfLRwDsDcqY2brYEga3qS+3x3zJ1orX5HUhGw9rNEe+/YDx/E5nIHse/uZi5mSXioS9gi33joXesWGDVzWjT8w3GMa6Z42mIyaIrHK4WIpaBJO3iBZKyW+NFF6I4HgjY10PgyAx/5+pPARjPJ64jF+4oy8+THgLoXO+PRYAxtQqdpioqKEuegQxqzkqYrWxgVadLNnVxgMI5Afyh4fgUsU6hjK1lcXWBBkodbrveR1Y6+OMHvTvhN9PY4FG/mW7W3B3c/I4Ba3k5lI/fSi29AaYLiMcekBXGcAWnEEaCjZ4WrYTVln7b9lcggaQCl9Erk6U4f2wxLFeFfEwgMyq0W4Rt76pYqP8vanqFUfb8MspyNscNOAU+E9+s1VBMdJVhqh9xGEKuVsgwacyhNxxzPW84CEg5fslseHvUxvonS5BYhF9tMfokwiVSCo4lynuv3FJWNFx5ZcjruimhQJjFebLTKeEWtJ6mNFZIv2AwtOllgPSYtWMZB4HufCWkYqqW6/c9Vm6/RFVhTfpni6yUPepbMS8oo+b0uzubAjGUvebHuDxv+zO+q/Rl+bs2uaBEj4Ozd7HAO8zWZE8piZ+SaBoT76cy36Mn0HkdaOR25nQGaPISjXVcxllFbZkM1y3wRo9yG9SfuXelHbi7BI8GsFwlD/GTWnK8u04ms5uP/zdazHOhXbNF06Owgd64Gs2eDiClBda/QQT0Si/MWj3igKkDiI16wvD4xyORMllkQlvFm+FkZNUy+X8Mn2GZnAmtQ5Wem9w+M43TxxtVEfB3NFWpHV0DU5vk+Oj4i7x9LSlOa4lFOWOfPoTZUz+FqZ7+/h0o/FPf2whohwzUU3JZ7cPX63b3VbM/v7xHimCK4slxuchGmp+haQm1D4ocX4DbfXAzzLOQToygItRxYG73c6MGKDsAr59mMIuvPST7rPTdeDGi6yuTyfVD9p7XHW67OV4HEWvVCPbyn9doE/wbI92cj3jESB5CJM20Xv4+FnaZDQ8gUNR4G0z+/+DxUp736sB+uUO29n3M3nMwQG8+SX4EuqvXz5rQGE9j6K7Nppmokf5dsDyzQenZF2dkIb3BP/Bm975Dqq2UPS5pupNteZ/A8W/lKfO40/BnCJE4UF6ZRd3FdwWUqwOkXelpFw9gXdaMfcxWuBRgrHHa07bjyx2pmNoz5E0V9HyJb1If5qdbbgmf22kZXmusXV6jdjS1UNrxfCnH65hIbdCdRy4a2LdlIv1IKjMHKr+yy2a+qxp69XJyYugMe8VbG/WN4uMMfqDTGJ5AafAERv40e9hcyrHf9DXJ37BUofbWifaakXijKUjzLwcliBF+VtnS33RYELNArokdplKNxnYf87vhMk1EEtP/9xC6svCRoPvABjtD6vu1EfrhOZbBXAhGpkcF4vKHBav/6nWocRdOmJUStkaTZ7i3e/czSjJBqo2n09YK8sjmDwRWbHj4HKEbi8pTQie73/sZJDby0LydKmex7/rXeZ7BQ7IzWJURuepZlRDVNmyLLON1AkynLb6dV/R2J31IllQWd1dYfZDiP8Dkyg6/Y8tdFCBhmZljNpkxRMbzeL/vxZYsfP6QM/yOHCLxtmePdJL9q5a60wID7MUwzavzbIoAiCHSl+uhpwp1WC/0rygdfRL3zWh7TSayHajAPSSMz5bEUWhhp9QSsA6e+jU7fYeuDNQ1OYft17fhHS/m4huF2oY9oqobh437g/DqKZQ8ULSU1lqSSJF/QG0/KYPPVOtl8PV5z3Hd49sVBFHWEYF7euDKRJrhbXZbXme/j9aABEeq/iPWBbDm0ylYIqk6yO59hgkS9Pf3LWPIoJLBM/QKjrMq7dqoPmSBXNL7DDVXLvkdhob0Ar2b06wZBwNqfcboXsfwRngpfwd0z8o9+md0fpijx5O2XA0rUeDsthoc0zDh2nAGny9oexyrYoOwXUPN3d4q8v48mQUdpIiaeVoViZagbkpkEzBqYB5AqEsNHbnKq8CqN6V0s76QLVCydJwn+pUdtP0aZPDkV2PmutXljqDWmIAaDYJQI4HDVv3BOjKQoOW7kdrCzuoOvktIV2XILijd4oIoSftAMBcMwqrZMZYYnqH4g0dVoHCvgR60FTvklj7UXpv7oHsq5VnR2Jy/xUQ8Se1CiZDCHBqMohTLFkQPT2rY9A3EcmiZrkpw3uR4w4dDdi3p4UQzLoee0UMqaN06RsL4Rbe+twDmPSNKr96v46qGKJDqS9uHMI2S73onPR4unisHiGSflC7NfpwuivhSFvB5EJXoLT8AL6ZyRUwx4Hx4ktPkDcpJ8zQdiiubPRPxZL7E7d+/gYtQNEz9srd3PNLTnVTK3wd4feHUmu9m67vM6wSF9ZQ7gsvMAZA94St/d4B0NmevV7s52KOaD2Q2yPDdNeMLTN8U8qx7lFkFkP29oxUPNMDbx5w8MtWcbO0d/x27M8ygvYD08nPCu9SSqmeIserE+Wz7zl3+tuYDi4NeOTN9zvbuxtAT3J+zY9EHmDY2vZFUCFAVClMmfIfCzQ9kHRwedZo3nsocQ6L03uRdrU+pfxvYIxa1ckQp/mT3nhmH0uW5Cvr24R5FFd2D7GSoWPgkH8zOii2VZP7OjDvbAbPzvlM6buuBt5rCjOXxq7m7XuW5t8pVGrQV/hxxtB0nEO8iG2W1RpKX5FD31tRMsseBKGNAOSdh6GZnQzQxYcwitZ0BhDVO7cStctxDf+T2DjB4NWjvnSqvwCNEyaZ4zwo04ZxlY3KPkW3gtM65C0A2sUmvfEbLs71tubiTGaf3RsjERGv1NPjxy8QETIFK9/2eV902S9GNX1DOnh3qEFJY8WFoR5e/YB1DUghJ0Zqze7lP7M4qkNsHW/Gc8PwWdDbjvtcNnE9IHu6KF+ljb2t3hMcvt9SB3aebB1jszYMEbcc68mfi/NsC+D+zg9frqALxfbec+Oprjxb5vGn9S0g4YcXs+Dz0zzWXKjpt5v4cmS+LPenl8ejjPprQtTHcAeWy5OF5hrpwAlaBWsZAVEmWapsvmx0Sayj5xWTOoXRRFUsMoT/5dEYINAPEUeJ/HVwX7I+ZEuakchyd32gioRE8+RJotr80wdJ3unhnUld8JRh3spjmePnb9/OA5ovbNKLo6A7yhMlt6xSQK80svXyHpo/2fGefBpFSPkn8jht6znT9+Y3rNVNmthRQoVdb1kcOUL+Yb+NqhvIEWWwBYXneEOftK54Umfh/nws8ZIQnpzt/qNVI/B4wwTJbbhOnitIEbvVj2oCy6fwCd0CfxuBr0Qn15YFIZGXfjbpGR9fCbIktM7GHsJU4yELOTh5TWNXTSRfR1p2BqqD7QWdBbel9Et2l3PDaNE4ninEloB+IArTp+HI45UnJMhcebtG9HZskgMavFtvAYQ44DyOm8p9mvw8ns9+S4wl33PXRk7gpei6i01kLkv+h09Xk/Hqw8eServdlc7pDCF7ouRNYcOZ8/EuxNOKINE5r+0h2O6DmMefATP3G1MB+++TfMomjQBVtimUBRyzD3lRK3lfRo1s1TdDH+bdZroQ/PguhjpT0mCxTt9lsVMGFHMfP8Li4f8Xh0FPSQhdkBY5PtAZU9k09QgmKENsczL5emqEdyn+jzKA/fWQrXdZ1H2+nvh6mQR7f67s1irQX2HvVQ8ojaV/Fgedryz1gwr++LBQGnW+/pKqiUx5dQ4sTrAMdW07ZPzuE5sw2fr/MXJWCaNTo4Jjbi/oxexpWf3Sy2CByBwCQxgA+6HviVlEjZalCJ3aC9DfkQV/HH/1H5mBpsGSJZDKfxG3elQwA1A4z2RjINj1ScADLJX/IdVMC4dShIlHQaoHArR5q8OimSnTOP/WQgVUGOlcNZXXexpE+8y8fDynU+wTW51jvYmMn82rgJL/g9EMjgYbsyeq+rYo9zvH8BudveKlmwfKgb78wNZ1xy2y+IrRsZz3RzVc1RhxN24Dn6iWxIQn9Pm88qvzd+je373hkPbiHQ9kuDt3jkeuTH70F5qRRFFVTbDHOFdbw3u7uYArrDnGAxv4TY6FeoCugoevETC/nVyX+MDmUet2Gw1PwJvIfAg9gRDI9p9N1QVDix7AaYdh/8Tj8zDiFgVHCMTCnsLpu1b47XI9+4LCKiKM62epk/Vi0d6osdYaKxyLR34fsPUC31UJl3fDu/tsn7Vu4ZEKJQKpqpjifr40r+jJikVbNQU/t3ZoMBVul9fL8vJKYfNH/UYWagMEzy0fy4glKsre1x+NjRekYTycYIs0bCfVAJYsBwB04Dp4n/4wZ3JhbY97TuMUGmw4USdpNbnP7gBodLWL+hyvsrvYere/yGORsQ4Vjv1+5mAYEZvUMW1CPY7SrttbnSpF+jc0ZNS8Xo4/dH+vU9eN9/ESG4tCaijqA6Q6tfXS1w+PW8dfbne3bHOnUHYk9xP6TmhOqy5eoSyaTioyKdrDQHDSJchacYuriw+UOsaIddKitNn6pg0a6V6SMbP2NHCaw5c7/DySCZeq36XivAc+Jv8wHx15HooTZBkYVMMDLqpOuvgNWx6KMn+knmLMw3QPMT3j5fq/I+l1TbIowf7jdx7jHr7ZobCJI0VWB9CdXgqfgzCsZP0OPuhm6w4dAla0IKzzmdDaQ7fyah5bb2M4Xd8eeu9rJ+0AnHZd7Tw5iCSwYwPdIVQ3F519Gn9wNX59ViWVBAfYFL4iCzUxODDzkr7JUBv2YWcvjiscUMLQoNI+QR7We1UOqTim+CU9luxa2XlNr4vNLG7eFk8I1BBUXfAxKhB9obs74uc+ij441H6KJ6y5WwxkCL4HQEaDLN5OlZM727StozM52LG+eH1hyUZZyDmJh+fBxmM8ZG3K2Tz5BLWm+z+zunJsW3Rx1lVotnIuRVVDR1kRdrHYgwWB5fSkmmi476ZYRY77mH3nVIDU+wBvQzTkTA/zK5rk9UrvsKx5FnfBAuE/7b+P38N7K6bTUg7UsZbApSRDlKvaDVsHXf6GwZ9W8co59JnrYLFHX+f+SfNUkxZh8XzIoWRBvytUzTQqHZ7i+jclG99H6/LrWUDhmeeM4zwaoFVVZuT/rP0mgyOkrlh2KO5z/BhEpH8T6xB07J99HoZLrd2qN/TAEldUEgqFkoF+8rWoNIEJH7ZgsHHKbMWGo7qaNrinA0t5l5bw7SvWRI5+9MJxZ75R6NIbBr61RUV2+dNGC90zub8L3yjyNvJrQdpkCJVpkZGAyttcA6Dc9Tueg8flqDNgz9jB9pnrRstVw/PR4W78RiECKWW9Orc+b9F5tgLHucsVFPGuhYx8+zZuGguwfeqooY8wM6OrzsIzk4p/UYl5zLNLfmu5sphbo85ENa9n0p5pk833GHbbLltKGQ9gtan8EYrB4WLBd2RFowAzma4aiLeEmezxAWCaK4S1wTV9jGRKsbe9zcIqTaKdz79fAWUCEAAyVdpe50etGdZHegmjhNa6Ki78TDFPJgRaj88Z2/g0z/6dd7R1FRNbRx0QZaHDhLgk6gVY5CeVPG4/EUSQLu4vvwMPX2onqaps8AZQa1Zn9XKD0SdfAH7q36pJUx5a7MYNqffYFpa3At3MRXmfktjugw8qmWGYkfunGt/tyYJq9GuphsR5CoL/BMWQs4gsubziLY8sPR7eYxQNKtVrcmcZwxsixL0uFRRajS3jg6uw7QVA0Su7Zjz/j1qjEKWL09iI94JoN1NLjCAeXW3rfcyLd6vWInOmDT6eJ6RE5lOMaXFwhF2s/bxjreZDo3g8P6DSMobYljeV6Lm7LfbaKGt/71W1n3fejEPE3F89EwL70wl+DIuEQ/m7OlVltXoKvHcd2MHzZ2BlP7TZQ5+Xw/rN9krx4jvhILmdiXa07RL86VSwkTytFIQZ2WD81UwaVujt/RNFkDdeSUEgEZf0fRbRmP83s9Mxwc8SPXlo1/n2mwCC/m4c2rVB9Urz0q93N6KzZB7iM/Zv7ClxYfaQAQjPSquZPKwN5OZj115lZ/m3p+sQ9N8BDuEPgKS97PMvUdH2RbbfnAmsTmTPGk8h+Z+e8vbRhn9HiB6SFXWCM5Ck8HOI4BG6+/g6PGaEzPMIEtXvzhstzoHiRzTEGupAWUdmZemMcL4mi8+fWZIxEp6h/9pN/FAtjPp+sQvCLXC/+pNzHP8zBdH5FxRtqrqIZNeWZ95g823dC0hyKZ73ezBiwXtFDDx7bvO9Hoa0gVbyIhCIKsd+VKHt3XDKzQpxD7Kj4gN1meUmODPLHq5Xr/jvYuiGK96Bcs5stc5JoGmsSaL1MYttvEEOHF/2abwrcadX4mSGaqaH/kmLKZKSKxDadaU0V6WptsfSIPQurnB5YolPE91JZzEsYKoqxYFDk8otnrKxsiG/lNXs8qaq1v72XlpFry6CoM5ts9GUBC9QV1zWKGsLT4fQhaLq6ogzY23C9VhSAKmmjc15y33dz5LjS3JdC6I+hRkvgk//JM9xFrCMlFj78xX5Eli+IjI/MT5jXDd92+SBXLWXGCuCCQ82HLg96TIry2x9I0DHcnODoDlXurtXQ92uuH+WK/L2oYbH42uo0GquP4hwoIjyb4gP4OBASAJlA797PQcf47kD2vQdJjV6h/Rx1dB1DhCxI/RIWeH6vRcHJ+po0tz2A29D/vdd33KTm8+v0Xy+AMtVbBRiFoRhalWFiuMv++R/yKF3478KkkOxEdoMxkQatm1Xp5nEFh0Y/rqlA/z01XcLKVVoDc8NFHa2ZMNiGzJDl6rQxyZJL6TPUUSnY/JDMVo+ZKz+UUNRsJfP/1fddrO/9mYjD599bnKj+t4xHARA9iqWKnhjBOB448+5BRqGd9oyh7TggIn8j15ENvjw/d10JVHxwK7qSv2WUXZtPx2jb8nTJJWaMoPRfGvWUSzTlGFBO11FOIbXiqNg1wI66QH3oAWJrzYKbJldJb6uQKNAFiv4Ot1sZNZNDKWPblmA6s+u8EzxXTCzu5IMTi8cGqRLtOBxiGz59mpZmVVEu0OKbv6vNtx0ioCL/C319O+rAEGYE5zf55D2pTBfvhJJL42Ed6gwgSbG/ZMksX9aw0SN/hVCeThx1ZidImwNcFpBFGf/UTzJ5m6kA8dHS8EvGB/4jsodwdFn4mCaLaAd8Y4U9ja/R5vECke8BSEJWVD12s+97PojYgIBAUhjLahTRzUk0fpvNbiO7xrHA9zZByU6qgwLLHZOTmykxZ6v8AL18eB/l+6T3xXqrL7kNYe6Sn+3V4hraGMEHlC1G0nkWeIchr8NZ/+RsCECNyHcOIaqWHT4sPhnFg6AFA8gnwxCgcaw8i5vu2Ad9ZDO83Eq/oBH9ZG/NfR5gMzr7fl3VKFrdWRGrtj9bmNb9eNFBO6lNMMSfQJwHYTGsfR3zvoDak5YpA+jw31/H7Bcuvquxs5zGNu0qqUqpT0387THHoHhKlaamWTexmlLwJf6uIXTMXZGvPlf0mOy5yvz0ZH+sc1SlAVzHopiojZQFBXofwGPYICsJYo+9hKOLZEp3yunLlqU0WjpEsp0knZRirAYcPMNlZ/qAIzDmlydVmz8ZxnMCbABeB8EpZiJebbaKRQCPDUo2JC6UazwtBn3ieagwbSp8rcnN93/VG+w0tW7kQ3C02lR4lDTQf4PepQekbni0O0+UqjudZvmCkMvov4cfQRdHjIx9tZ3G9qX4sSqpJiuKMz0VtOJ2GcWc/Dm7N5y7swukzfXaQ1eJFxygVd0MoEHe+eJlb9d6mk7f73Ub6IgOVlOjcXviEfdOspDnQwypwZW/ED7kLkMsl3PtmK+LzmqRGY6f7NVXTth4vcOTCVg50iY0PNxonEwT9lv7c5jshvV81JMZHQFOgbNm2FsnIRQi9Ifee83J9HlwUUXaA3hwA/f3uf2cNnamtmbzRV9VFP0wOSx/K9Ut43u91eDxzWeLkmndbNxHSsPT3I4UVG7pbgn7X+55n2XgJ/ekp5fafA3/RJ+wpHts8nBFPXd+n3GoxX6A0nUlPduUHY3eED6gLrRtzJ9ItbOrC60kxNCCp39aKz0kQJFN+p1CJWonTU81FpnFrenW5yaDjEeNKF/mtNvgevBXwwpG8qGJ5eOASzUTrHELjU7eWZ4ruxcOZSp/ovkaDepkc16p29GgIIXrvb9TmcB2cLMhQVDEX7iTB2rROn8N8XEykCt4drVKn1q/rUTP9+zh+mpR73PJfVpdPwVqkYfBHsxYwpPH3yz+ruj6Lw1Pcmdt6fwaYhYqP+WYlpK/R/ou0+j6KyQfzWIkuuPeKHMHXyPN9WV8SRdNr2fIuUMxu1x93jRTWkVbfW8UaF9E9EbJb/1uhODUHrfkQQYTQBJQGlUOB+/YemeJFpgD7Y5hlbnIFbUcTNNHQrsa5Qjt495S5klPpsP8o9x5uDUtkFWTV8ij/fC66JKQlmyN+BJ6JtqV75tDGNYnwVGJmgq3RekD/FAaUoqNfFxn75n2kf/QIqkBg0zl/x6CWZX802kKb8wIDm6rfrtLuR7dnxy8jUZe/T1bPD+rDRj3wyQK+5QGu4O4Oe4Cpvms6pyK318H8ajPIeQcyvO1B/AXZysNjWrV8DM1h+nuzMmo/1dd7SNVy0UeNg41Xlcs9fiKFE1SvG8Xdi2CjjnHlUJdxJo/0/J5hY+R/df58CtuuHa7WBWjHJfHM41z57VncK1g1yIAY3GqpjHtaD/V5vtE17TqZ+Iei9QdbweJ7D+p7lnDD29hYuhO0PX99dmT2FwpsvtnroXS903Jf94u05YNrtYVrq+4JEgwwdY/Q/eYxSmC8+aGhIDMAUA3TmeJ38CCdljhFqZyH2DfdFYXKCrbW1S4FKob2/5Q5gxNvbKuH6BMaYBzxc/pQ5cNR5ZLSmPrEaE95UN11fqKwah2Miqj35xp21lAA02k2Uuf70nKn4jMdNSL7cUt7KtVBA/cBiE/fvwNdsoSUmdYksPCGABFDnY4OrmNuQHlL3/dGLuUs5L8nLmHbZmzw9fHa4gy2TJtJ9rvRQXdJPCOPtHFT506nbIGepdlfoFzGOM28oYPf5rbHr1fVAIOZ0uByFtfdxebBPX1pG1afEBXI8/BsRsxGmsUlgKOuhjoisU0r0Anu0eHgnm3yWT7SSOXXxixuq+vkW2Kzwfli7wSO2qapik+LW4S3z1GOK0Z4myR12VqoiD3NENPkS0DnPHDuylzbwPL7CH+dRKxWs/yFFw3Crtw35nHWMAlvjwHx2xWGi4/LTNIwrMY4j80riRAVg6dmyVfj9rKBiWZloNrdR87moxuk2Y8hOizvVlseSt4QFtq2aI7dqwkI78YYZp9fMa1ltiyLzfqaO0Pp0tpd9UBxkFy7pqkCO5NR/0qMn8cvSG1sU2xcxfHLy+7mPPKUJQsk5uCFrfJdxtHyIUZxC2/8/K7U6tW0qoPHE618Ie092LHeJJkqproorq+PRRFQRsRZw92VHyJiStaauhO8URzb+vqC5WkbeMp/cl0EJxhfxit6hO8jUTBfyTd2jmkdU2rhg87gs+XLIkWL+1jXgCdRlKBfMM3YutFbUlyEvEiuzmz3LbZ4Walnt77eUTHlhl97efSrF7OmKZTcNn+g2U/atEm+3oNjHqexoI+/5tuT9iwoB+8ezIN+Df4ChU9dIxo/j+pDWXXp1sXCzj1ZSWtpts6IdVstk7FjzBR9QUE4QELRx1T2Rb1vYpdJKwfkkq4u+wrTBZ4THT/A/pRMAo1q9MyiAhOetO3Gt7w75Eb11xUR5m3JusvlGaf5er5e+4JK7GjcFPIcCeWgSWknE3ytk35PH2rNua/tYV3elxOL4VBDkt1S6HdYOvlVNEM081cTsV/RUJHDYepXkE7VaLl6UOkULaQjoQ2l5D+/qzQ22J138+lex3v5sc0Ugrhp1kHIOz96wjpdZfXBUEK0yasxTCzs0Y/Ihm0FqIqH5haOFkGaEgHATDzcizh8ZrdREcd/k0p83cR1inedLPVcnunrVwlEYord5nd3Dp9i3Scr/fzpcaaEvrP+dl+w3lL3ipEr2HX9uLycgCYJRC85N10NxyvhbG6HTbBcv4TYTdS4/LMm+O08zwR02Zs0JbRTL2Es8LWizyDHsIj0bBhr3mfoArU3++tjhxa1zTmM/E5LXC+zZoYI7ux9iuMX4X4Dw7bA+d88bmCtY7zmnsfj37EvVj3Krp8Vzmi+c9+maDbIXooKP9p1GZ45WyLYlDZ4MvCOA+nCjAOHAYsvxtC9X2mtZX6HuVfOcw7snqu12U/zR9IC4N1JzpJi5BcQTqHH82AXzbuJWDZM7xmYyVNAiBR+9naJvLd9Tbkt+Bf0/lZj6brSMGeFtLwHDBPNxbc3R0tb7zdbRi0tDyDy92fJtXB+FBsvOMjm4NGHLn1sE87ke29OTHH69SdeSj31SSEWabp3V8tcHeAHiqZ9oC4JVBbJz2LbR04bqIOG5jPt1ddOAMpjAQ3dphU1j0JVNPg3y2qCHO6Vp9Wlh5zncRANvT8r2wZgyyJEtFQa6/x7St71zCD0rsPXr7KEKoJzt+R5zYp1mj/nuM0sCdwZhHYfa4QCWk795BHdn+IrWs0Vc8u2RIFx0nmLG5o9ma6D5W/iEUZB58D1SMmj+bHmwQwdkoF+h9RilcCHX7DYnn8ZPgsv/lNvht+x8Nxrjr66SlhQU1O9jo8lJDqUw9H00n/qu8AccIk9CyQUGItkwqHDAFZp0b/eZuO3ZQbyf+VdybKjOBb9mlp2BbPNksHYDAYzGthhZhCDGAz217fwe1nxXrk7uheVkYv0Cq4Eks7RnZDAx0kceBdhNp+2ZbkleMynfT3ExTwVBHz0MTLZYNkL8ZgUILkFeiT55jZVjtIiaXG7lidkpzuIaNSrNFya+KQ+s+y0vZkJLhioRwd6xlxPnmSGJ558PRgR89KcoV+x98lBual9vYh9PsYGrstx1OjnbtQOvCiVS895kR8J7hlgErsZX4TVlktEp72c3OZK6kwpPXu75naDDahbb/e8izpjKiSErLGXgz5/9s91X6TN/apuMFr51NUEJ+3PRp/ouyQJdoJdJvbguJ3ki4rQY0KVrYXNJ8fNKmCCOZ6fLfMYM5nJ1FQZr7kXGvHVBSC0Qx+l0BXVlBGXXx6akM+VhburMOAcY4Hlll2PRrvt/C6XJ9kaXmKWkencnWcTur5kPdjHFnHxnZLvINJxK7qiPmq2qefTlVMTFKY3uExdR6zs6kc2H3ee4lZiF5C2cVZjvRQ4fNswlT8OzOiZTKy1SQLwnU2l02Dy9r31Yb3velUvWDVM7+vtPEzCvddWQZ6vts/1x0sDj8yZmSa9waV+m2qTLBOiF3BEcN3SlWPWRgENwACTopnZHhbzyIZLtrz++a9OVj9lt91Sp22SNZknJcOccMRr/l6sXJ1bSa8ta39SAXdKdxVdmOeFjUnaoODxftp7+MsCACy68Y+ZG7sOXiZ6V0Cs3aNY3H7UMn6dwrLuhZDjq5R067ND2a4f6815IQTarWjyEmTVw8mJVrpNx0NqtDsX5eIgtmYzWXZzEVuLl/jmlj0hgE/rpiGpygvToq/PW2bDJHmNpsb1Jt7ejqxwDbY4s4Kj75akMwfzDWTKidhZs9gGxHNppNRRh1kE50W4VzB+jiBGfTGQs3SZx2ZcDW9Rl2vAbsuxt2E9E2KAnN6k0e5Ln/tMk8zpql+dGmTiShYO0YcBl9h+Dzu4cx+UuT0DHkTmUhisXJ+VShuJlscfAkrqPet6vE1GSMrlvChhEt5NH8LRwx2yUvtLwbNkRVUe1lIv63GLc0/EA05H3Nyv6eODGwMAgLjhoXJmuF0smzjvLblxXmUbD0ge47h6PWUnks1auuAaWE5+QUsWlCq2AYoRe65CNCbRmgW58louAhSUT5fUa14fhzoTF8YqpJy94yikhy/ctxFJreAFOJBqSPjVwg13g5aUOolOPu4SFbyVACkv7KZYaWScSbuMxjR24BlNIIrLqK6nLsLs9kbfsaaEO1WdMRclazKcl8rQ+9l66gN0zszI9fDByYxsM7d5D6mT2fNKf+BbV6cDpvCDWhauqOIVbMa1Sy/Gk4dj5FayZovQe2Tbt45MSjqZpSBrXJ+dr1O2Gb747o9rsu8OBchR3aTJxNw8aHtWc0jMkmq90PJg74zRXul03ZkmdeyQHTHUBiNUuBJ2OUySlNKqWiy1l9hOS/d80gKnhpOWO1abqsPtuZoyTXHdw1wUrUuoVS0Za0wE0JP+gpxmgLwacBUvF6DcG6m3H3yLvYQtHp6voetY1tVc5XDnwQbnirQ7ntzSyptznTOs08U1kwiKXMGEOdRPtYOqjUemhII8zTDs0Fpm/jZpaxraLn+EGK25dUjvnt288+uee/2ZZC0zkmMQu/mEkWQ1HnTiULysR6SZwHvgAW4Hw53KWTWr48ejqrPDAehFi1WmPR2Bjaf3SGKamuymaO/yI3+5GDUz7DX9iNVdhKapYymuUdu7wyVjeM6eqdg+MI2iPM3PaMqBjBJInthEV2Fbsa29kwj9NThh5w4i/215lnfsvVnAB0lfbBwZZX9VJ0exlGWgjrZ7tO7dU9UCh4zPgnBsjsyHshytaXo6potUrxf6GJfiOiBWkoL0dVMckCiqSnieVnZJCe6YqxEELolwKAHsjVCdwkrs+VjRjVrWlXsnzf3pjlH8evBOhyNHIKfkYzFyBdhHvH+OROTkZG2OQ/7q1hIold3dtRp6WyQFwLJCKYCDLxZgtY3jQV/py+YDIbB6yHp7YEZCUlE7IuzahduCI8FkO4scGf85E2dLDg5BxaerRwcfcUS/JYUH9wCx2fJdCt5cBiJP4Imd+7AXD0geak6M4OlAxUM3lqJ0cM9uLigDlmb4a2tuE2TcnWT3ore9e2fk0pML2HTEgVl/jMmI6CIOB/E1RiwD931GqmCOHF5Sbxq3/Q5Acmp7NhtEHom6jaJH7J6icGr9g0TpOoF/iPpoSNvpi4g8/EEKzXpMuwY5OUQI9nnB9nWO7YrH5+l+/+f28ZFNtJTJVHzeg6X/3B5+buIiLfPi89409uenNBo/JPlfLWybUT7a3ca+CikAP7rxOiawMvm45nmXs9tJu5pTY1i80YX29fSv3efgIjCn38Y2Tg/wKUG3KfsRnfBLUU6p3UfxVrIMUY9kxdSgFkUcHUZjn8Zbn7NyTVGrPIhuKeCjuM6Hbm4ToQPdgIqTNItmMP1VoRuSdPhR2Hbt1tRYp1O84YKhk6xrp/eLN6ldPre+4Mw/wxOxvSb1laht2+3fWCKod4Z+yP5xfvZv/BC/NT/Ud0WiaObX8sO+8UP+zvzQ7Hd+CIr9tfzgxBtB1O9MELP/m4HDf7GB+9H+F4IuQzelZaseGfqNqhfMG/Qv2P4HXV/xozfIp6Gr0x84/0GQ2Ov3V8n1EwP8z612VgLwpa60Fw7/VDjAkNh3M0Zg78EAhv2nSOBn0UC/0WClWYlmKoFZKYimsmvHNzYQAtN3yL9D/DnVvyL5KYpAmbfoNEaIpUjOb3iWcQS4z4KmTBLw31Ty+yz4SjPxk9SE/T8JIn8aQcwbQe/a8TsZMgrFyvTfSKJ+sS17j6Z/a2dDseQbR+TuF3P0HlH/1hzRxHeCKJr4WQRtS0hdN30pOyJAi3OXpFuNfwM=</diagram></mxfile>
|
2207.10080/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recent decades have witnessed the success of protein science with neural networks AlphaFold2 [@AlphaFold], achieving remarkable performance in understanding the structure and functionality of the protein. However, relying solely on protein sequence information cannot adequately capture biological knowledge, which may be critical for many tasks. The effective use of knowledge in related fields can improve the upper limit of predicting protein structure and function. Note that biologists have contributed lots of domain knowledge in knowledge bases like Gene Ontology [@geneontology]. Thus, it is intuitive to leverage knowledge graphs (KGs) as vital support for protein understanding. Yet one major stumbling block is the limitation of high-quality knowledge graphs that can cover biology knowledge and protein sequences.
|
| 4 |
+
|
| 5 |
+
The research challenges of protein knowledge graph construction we face are as follows: various biological knowledge bases and protein data are publicly available in different formats on the Web; however, it is a non-trivial task to integrate those heterogeneous sources of data. For example, Gene Ontology contains extensive biological functional knowledge but is not directly associated with protein sequences. And protein ID exists differently in additional protein databases. To integrate protein information, we propose **Uniprot & GOA-based Protein Alignment**, a method that uses GOA and Uniprot ID mapping to align and merge data sources. Another major challenge is the extremely unbalanced distribution. Statistically, we observe that 97.9% of relational triples involve relations of *is_a*, *part_of* and *enables*, which makes it challenging to leverage long-tailed semantic knowledge for protein understanding. To alleviate this data imbalance, we propose **Gene Ontology-based Relation Refinement**, which uses the transitivity in the Gene Ontology annotation principles. For example, if a protein has an oxidoreductase function, it must also have a catalytic function. So we associate proteins with these high-level GO terms through their subfunctions. We construct a multimodal protein KG based on the above-mentioned technologies and further discuss its potential applications. The contributions of this study can be summarized as follows:
|
| 6 |
+
|
| 7 |
+
- We contribute a multimodal protein knowledge graph containing all the experimentally verified high-quality proteins with biological knowledge.
|
| 8 |
+
|
| 9 |
+
- We provide a prototype with an online demo to predict the secondary structure and function based on ProteinKG65. For details, please refer to the demo website: <http://proteinkg.zjukg.cn>.
|
| 10 |
+
|
| 11 |
+
<figure id="main" data-latex-placement="t">
|
| 12 |
+
<embed src="figures/2.pdf" style="width:50.0%" />
|
| 13 |
+
<figcaption> We show the format of ProteinKG65, taking the protein <code>A0A023GS28</code> as an example, showing some Go terms associated with this protein and their relationship and a wide range of application scenarios to ProteinKG65.</figcaption>
|
| 14 |
+
</figure>
|
| 15 |
+
|
| 16 |
+
We build ProteinKG65 based on Gene Ontology[^2] and UniprotKB[^3]. Figure [1](#main){reference-type="ref" reference="main"} shows the details of ProteinKG65 and some potential application scenarios. Gene Ontology describes the knowledge of the biological domain concerning molecular function, cellular components, and biological processes, which provides structured, computable knowledge about the function of genes and gene products. UniProtKB is the central hub for the collection of functional information on proteins with accurate, consistent and rich annotation.
|
| 17 |
+
|
| 18 |
+
<figure id="construct" data-latex-placement="t">
|
| 19 |
+
<embed src="figures/proteinkg_flow.pdf" style="width:50.0%" />
|
| 20 |
+
<figcaption> The construction process of ProteinKG65</figcaption>
|
| 21 |
+
</figure>
|
| 22 |
+
|
| 23 |
+
**Uniprot & GOA-based Protein Alignment:** In Figure [2](#construct){reference-type="ref" reference="construct"}, we show the basic process of the method, the main steps are as follows: Steps 1 and Step 2 extract gene annotations and gene ontology information from the gene ontology knowledge base. Step 3 and Step 4, we use the method to obtain all the proteins in the annotation and retrieve the corresponding protein sequence from the Swiss-Prot database. Then, we align and merge three data sources according to ID.
|
| 24 |
+
|
| 25 |
+
**Gene Ontology-based Relation Refinement:**Step 5, We refine the relations in ProteinKG25. We sample GO terms from protein-go triplets in three ontologies, denoted as:
|
| 26 |
+
|
| 27 |
+
$$\begin{equation}
|
| 28 |
+
\mathcal{M} = \sum_{i}Sample(\mathcal{G}_{i}, k_{i}), i \in (BP, CC, MF)
|
| 29 |
+
\end{equation}$$ where $\mathcal{G}_{i}$ is GO terms in protein-go triplets, and $k_{i}$ denotes the top $k_{i}$ nodes we want in different ontology. After that, we try to extract the corresponding descendant GO sets of these terms, denoted as: $$\begin{equation}
|
| 30 |
+
\mathcal{C} = filter(\sum_{g} F(g)), g \in \mathcal{M}
|
| 31 |
+
\end{equation}$$ where function $F$ denotes getting the subtrees of nodes selected in $\mathcal{M}$. We use $filter()$ because there may be an overlap between the descendant sets of different GO terms in $\mathcal{M}$. $g$ is the terms name in sampled nodes. Finally, we update the original protein-go triplets, replacing the relation with a new generate relation $\mathcal{R}^N$ denoted as:
|
| 32 |
+
|
| 33 |
+
$$\begin{equation}
|
| 34 |
+
\mathcal{R}^N = \{ \mathcal{R}^O \oplus g | \mathcal{R}^{O}_{tail} \in \mathcal{C} , g \in \mathcal{M} \} \cup \{ \mathcal{R}^{O} | \mathcal{R}_{tail}^{O} \notin \mathcal{C} \}
|
| 35 |
+
\end{equation}$$ where $\mathcal{R}^O$ denotes the origin relation in the protein-go triplet. The GO appears in $\mathcal{C}$, we contact origin relation with top $k$ nodes selected in $\mathcal{M}$; otherwise, we keep the original.
|
| 36 |
+
|
| 37 |
+
::: {#tab:Statistic}
|
| 38 |
+
Setting Protein GO Relation Triplet
|
| 39 |
+
-------------- ------- --------- -------- ---------- -----------
|
| 40 |
+
train 543,110 28,524 57 4,884,034
|
| 41 |
+
Transductive valid 25,241 5,009 44 51,243
|
| 42 |
+
test 217,463 17,908 57 575,160
|
| 43 |
+
train 543,110 28,524 57 4,884,034
|
| 44 |
+
Inductive valid 855 270 31 2,216
|
| 45 |
+
test 3,085 1,062 50 11,127
|
| 46 |
+
|
| 47 |
+
: The statistic of Protein-GO triplets in ProteinKG65.
|
| 48 |
+
:::
|
| 49 |
+
|
| 50 |
+
[]{#tab:Statistic label="tab:Statistic"}
|
| 51 |
+
|
| 52 |
+
To make the ProteinKG65 consistent with the real-world application setting, we take two different settings for the protein-go triplets dataset(transductive & inductive). We detail the statistics of different settings in Table [1](#tab:Statistic){reference-type="ref" reference="tab:Statistic"}.
|
| 53 |
+
|
| 54 |
+
With the proposed ProteinKG65, we have implemented a prototype protein understanding system[^4] based on a knowledge-enhanced pre-trained protein language model OntoProtein[^5]. Biologists and computer scientists can utilize the prototype to analyze proteins for structure and function prediction. Note that ProteinKG65 contains rich sequence--structure-function relations with biological expert experience, which can be applied to protein-drug molecule binding prediction and biological QA systems.
|
2207.12646/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2207.12646/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Conventional classifiers trained with the cross-entropy loss treat all misclassifications equally. However, certain categories may be more semantically related to each other than to other categories, implying that some classification mistakes may be more severe than others. For instance, an autonomous vehicle confusing a {\tt car} for a {\tt truck} is not as severe as mistaking a {\tt pedestrian} for {\tt road}, where the latter mistake could lead to a catastrophe. Similarly, falsely identifying a {\tt pine tree} with an {\tt oak tree} is less severe than identifying it as a {\tt rose}.
|
| 4 |
+
Classifiers trained to make mistakes with lower severity could benefit and are often critical in many real-world applications.
|
| 5 |
+
|
| 6 |
+
[t!]
|
| 7 |
+
\centerline{\includegraphics[width=\textwidth]{figures/teaser_hist.pdf}}
|
| 8 |
+
\caption{Overview of HAF. We propose a probabilistic approach using to learn hierarchy-aware features that respect the label hierarchy in the feature space and thereby make semantically meaningful mistakes. We train separate classifiers, with a shared feature space, for each level of the label hierarchy. We model the relationship between the fine-grained classes and their respective coarser classes using the label hierarchy and impose consistency constraints on the probability distributions. We further impose simple geometric constraints on the weight vectors of classifiers from different levels to align the weight vectors of fine-grained classes with their corresponding weight vectors of coarser-classes. }
|
| 9 |
+
|
| 10 |
+
The severity of a mistake is typically defined based on some notion of semantic similarity between class labels. For example, a taxonomic hierarchy tree defined over the class labels can express specific semantic relationships between classes through its tree structure, thus enabling an ordering of classes. This ordering was obtained using the lowest common ancestor (LCA) measure in . These hierarchies are often readily available in the class label space as part of language datasets like WordNet or from biological taxonomies, e.g., the one used with the iNaturalist-19 dataset .
|
| 11 |
+
|
| 12 |
+
Bertinetto et al. proposed approaches to reduce the severity of mistakes by employing hierarchy-sensitive adaptations of the cross-entropy loss. They reported reduction in the mistake severity based on the average hierarchical distance of top-$k$ predictions at the cost of an increased top-$1$ error, with the trade-off being controlled by a hyperparameter. A more desirable solution would be the one that reduces the severity of mistakes while maintaining or reducing the overall top-$1$ error. Karthik et al. highlighted this trade-off and pointed out that the classical approach of Conditional Risk Minimization (CRM) could reduce the mistake severity without a significant change in the top-$1$ error. Moreover, CRM is a test-time intervention that applies post-hoc corrections on the class likelihoods using the LCA measure between classes. Despite its simplicity, the CRM approach is versatile and its effectiveness is remarkable. In Sec. , we show that CRM, when combined with other approaches, almost always improves the mistake severity, without a significant impact on the top-$1$ error.
|
| 13 |
+
|
| 14 |
+
While CRM improves the quality of prediction errors, being a test-time approach, it does not affect the model. Consequently, the learned representations are inherently inadequate because the cross-entropy loss function ignores all semantic structure in the label space and treats each class independently.
|
| 15 |
+
|
| 16 |
+
{\color{red}}{ To overcome this limitation, the hierarchical cross-entropy (HXE) loss was proposed in , which essentially amounts to a weighted combination of the cross-entropy loss applied at different levels of the hierarchy\footnote{{\color{red}} See the supplementary material for a derivation}.
|
| 17 |
+
} Chang et al. pointed out that training with a coarse class cross-entropy loss deteriorates the accuracy at fine-grained levels. This is likely the reason why both variants proposed in , HXE and the soft-labels loss, result in a trade-off between top-$1$ error and the severity of mistakes. Chang et al. mitigate this trade-off by disentangling the coarse and the fine-grained features by explicitly partitioning the feature space. This disentangling approach proved to be successful for small hierarchies, however, the feature vector partitioning limits its scalability to larger hierarchies. We argue that for addressing the problem of mistake severity, while maintaining the top-$1$ error, it is important to learn a feature space that captures the structure available in the label space. To this end, we propose learning a hierarchy-aware feature (HAF) space that is explicitly trained to inherit the hierarchical structure of the labels.
|
| 18 |
+
|
| 19 |
+
We observe that a hierarchy-aware feature space should enable classification at all levels of the hierarchy, and simultaneously lead to a lower mistake severity at the finest level. The label hierarchy structure constrains the coarse-level class labels to be a composition of disjoint sets of its sub-classes in the hierarchy. We exploit two key properties of the classifiers acting on the feature space to help inherit this compositional structure from the label space.
|
| 20 |
+
|
| 21 |
+
{\color{red}}{First, we train a classifier using the fine-grained cross-entropy and use its predictions to obtain target soft labels (Fig. ) for training the coarse-level auxiliary classifiers. The coarse-level classifiers minimize the Jensen-Shannon divergence (JSD) between their predictions and the target soft labels. This loss avoids the use of hard labels at coarser levels and thus serves as a consistency regularization for the fine-grained classifier, which in turn leads to improved mistake severity without compromising the top-$1$ error. We take this approach to avoid the pitfall highlighted in , which states that fine-grained features can lead to better coarse-grained predictions, however, explicitly using cross-entropy loss for coarse-level classifiers leads to feature spaces that worsens the performance at a finer granularity.}
|
| 22 |
+
|
| 23 |
+
Second, we impose geometric consistency constraints on the classifier weight vectors that align sub-classes belonging to the same super-class (Fig. (b)). The resulting loss promotes a feature space (Fig. (a)) that respects the semantic hierarchy of the label space (Fig. (c)). We present further details of the loss terms in Sec. .
|
| 24 |
+
|
| 25 |
+
{\color{red} our approach using hierarchical classifiers. } \\
|
| 26 |
+
We hypothesize that the label space hierarchies (Fig. (c)) can be used at the training time to learn hierarchy-aware features as in Fig (a) while improving or preserving the overall number of mistakes. A hierarchy-aware feature space should facilitate classification at all levels of the hierarchy.
|
| 27 |
+
|
| 28 |
+
To this end, we train a classifier at each level of the hierarchy with inputs from the common feature space as shown in Fig. (b).\\
|
| 29 |
+
{\color{red} consistency constraints on probability distributions } \\
|
| 30 |
+
|
| 31 |
+
The relationship of coarse-class being the superset of fine-grained classes exist in the label hierarchy. We take a probabilistic framework to model this relationship of label hierarchy in the feature space. With our proposed loss functions, we encourage the mixture distribution of sub-classes to get aligned with the distribution of its super-class distribution. We enforce consistency constraints on the probability distribution of a sample from the coarse-classifier and its prediction from fine-grained classifier. We minimize Jensen-Shannon divergence between the estimated probability of coarse-classifier from it's fine-grained predictions and the predicted probability from the coarse classifier in bottom-up fashion, pairwise from finest classifier to the coarsest classifier.
|
| 32 |
+
|
| 33 |
+
{\color{red} consistency constraints on classifier weights. } \\
|
| 34 |
+
Our approach of learning hierarchical classifiers in the same feature space imposes geometric relationships between the coarse and their corresponding fine-grained classifiers (Fig.(b)). We, thus, compel the weight vectors of coarse-grained classifier to orient with the weight vectors of their fine-grained classes.
|
| 35 |
+
In order to force these constraints, we maximize the similarity between the weight vectors of the sub-classes and their corresponding weight vectors of its sub-classes.
|
| 36 |
+
This encourages the classifiers weights of all levels to respect the label hierarchy. Consequently, in this learned feature space, the angular separation between the sub-classes of the same coarse class is reduced while increasing the separation between the fine-grained classes from different coarse-class.
|
| 37 |
+
|
| 38 |
+
We summarize our contributions below.
|
| 39 |
+
|
| 40 |
+
- We introduce a novel approach for learning a hierarchy-aware feature (\ours) space by inheriting the structure of the label space. We design the loss functions that impose probabilistic and geometric constraints between coarse and fine level classifiers.
|
| 41 |
+
- We empirically demonstrate that \ours scales well with large label hierarchies and reduces mistake severity while maintaining the top-$1$ fine-grained error.
|
| 42 |
+
|
| 43 |
+
# Method
|
| 44 |
+
|
| 45 |
+
Consider a label hierarchy tree with $H+1$ levels, where the root is at level-0, and $h\in[1,\ldots,H]$ denote the hierarchical level with $h\!=\!1$ and $h\!=\!H$ the coarsest and finest levels respectively. We ignore the root node for our purposes as it denotes the universal super-set containing all classes. Let $\cal{X}$ = $\{ \bx_i, y_i^h \vert i=1,\ldots,N \}$ be the set of $N$ images and their respective ground-truth labels at level $h$. We denote the common feature extractor $f_{\phi}(\cdot)$, which is implemented using some backbone neural network and is parameterized by $\phi$. As illustrated in Fig. , we use classifiers at each level of the hierarchy in training \ours and denote the level-$h$ classifier as $g^h(\cdot)$ parameterized by the weight matrix $\bW^h$. The resulting prediction probabilities are denoted by $ p^h(\widehat{y}_i^h|\bx_i;\bW^h) = g^h(f_{\phi}(\bx_i))$, where $\widehat{y}_i^h$ is the label predicted for $\bx_i$ by $g^h(\cdot)$ and can take class labels from the set of classes at level-$h$ as $\calC^{h}=\left\{ \bigcup_{i=1}^{|A|} A_i, \bigcup_{i=1}^{|B|} B_i, \bigcup_{i=1}^{|C|} C_i, \ldots\right\}$, where we define the set of classes at level-$(h-1)$ as $\calC^{h-1}=\{A,B,C,\ldots\}$. With a slight abuse of notation, here we use $A$ to denote a super-class label at level-$(h-1)$ and the set of its sub-classes $\{A_1,A_2,\ldots\}$ at level-$h$.
|
| 46 |
+
|
| 47 |
+
}$)}
|
| 48 |
+
|
| 49 |
+
We use the ground truth labels only at the finest level of the hierarchy and apply the cross-entropy loss to train the level-$H$ classifier, i.e., ${g}^H(\cdot)$. The fine-grained cross-entropy loss for a sample is given by
|
| 50 |
+
|
| 51 |
+
L_{CE_{fine}} = - \sum_{c\in\calC^H} \mathbf{1}\!\left[y_i^H\!=\!c\right]\log \left(p^H(\widehat{y}_i^H\!=\!c|\bx_i;\bW^H)\right)
|
| 52 |
+
|
| 53 |
+
where $\mathbf{1}[\cdot]$ serves as an indicator function and takes a value of one when the argument is true, else zero.
|
| 54 |
+
|
| 55 |
+
$)}
|
| 56 |
+
For making better mistakes, we want the classifiers at all levels to use the same feature space and yet make predictions consistent with the label hierarchy. While it is natural to use the cross-entropy loss for training the classifiers at all levels, as noted in and observed during our initial experiments, this choice of loss compromises the fine-grained accuracy. Instead, we enforce the consistency across classifiers at different levels by using soft labels and a symmetric entropy-based loss function. We minimize the Jensen-Shannon Divergence (JSD) between the predictions of a coarse classifier $g^{h-1}(\cdot)$ and the soft labels obtained from the next fine-level classifier $g^h(\cdot)$. As defined above, for a given class label $A\in\calC^{h-1}$, let $P[\widehat{y}^{h-1}\!=\!A|\bx_i]$ denote the probability of the sample $\bx_i$ belonging to the class $A$, which is computed as
|
| 57 |
+
|
| 58 |
+
P\left[\widehat{y}_i^{h-1}\!=\!A|\bx_i\right] = \sum_{k=1}^{|A|} p^h(\widehat{y}_i^h=A_k|\bx_i;\bW^{h})
|
| 59 |
+
|
| 60 |
+
The probabilities $P[c], \forall c\in\calC^{h-1}$ are concatenated together to construct the probability vector $\widehat{p}^{h-1}(\widehat{y}_i^{h-1}|\bx_i)$, which is used as the soft label for $\bx_i$. This soft label generation process is illustrated in Fig. .
|
| 61 |
+
|
| 62 |
+
The JSD is minimized between the soft labels and the predictions from the classifier $g^h(\cdot)$. For convenience, we use $p_i^h$ to refer to $ p^h(\hat{y_i}^h|x_i;\bW^h) $ and similarly $\widehat{p}_i^h$ for the corresponding soft label. The JSD based total \JSD is computed by summing the pairwise losses across the levels
|
| 63 |
+
|
| 64 |
+
\text{L}_{shc} = \sum_{h=1}^{H-1}JS^h\left(p_i^h||\widehat{p}_i^h\right) = \frac{1}{2} \sum_{h=1}^{H-1} (\text{KL}(p_i^h || m) + \text{KL}(\widehat{p}_i^h || m))
|
| 65 |
+
|
| 66 |
+
where $m$ = $\frac{1}{2}(p_i^h + \widehat{p}_i^h)$ and KL$(\cdot||\cdot)$ refers to Kullback-Leibler divergence.
|
| 67 |
+
|
| 68 |
+
[24]{r}{0.44\textwidth}
|
| 69 |
+
|
| 70 |
+
\centering{
|
| 71 |
+
\includegraphics[width=0.42\textwidth]{figures/notations.pdf}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
\caption{Constructing the soft labels for training the coarse-level classifiers. The super-class target probability is the sum of its sub-classes' predicted probability. The colors indicate the class relationships across levels $h-1$ and $h$.}
|
| 75 |
+
|
| 76 |
+
It is important to highlight the key difference between the soft labels generated above and those defined in . The latter are designed using the LCA-based distance between classes, whereas our choice of soft labels can be interpreted as a learned label-smoothing that better regularizes the coarse-level classifiers. Yuan et al. make a similar argument about label smoothing in the context of knowledge distillation. The use of a symmetric loss like in eqn. () further enables the classifiers at both levels to jointly drive the feature space learning. This behavior of the coarse classifiers improving the performance of the finer-level classifiers is analogous to the Reversed Knowledge Distillation (Re-KD) setting as presented in , where the authors showed that a student ($g^{h-1}(\cdot)$) is capable of improving the performance of the teacher ($g^h(\cdot)$).
|
| 77 |
+
|
| 78 |
+
While $\text{L}_{shc}$ improves the mistake severity (as we show in Sec. ) successfully by virtue of better regularization, it does not directly encourage discrimination between coarse-level classes. Therefore, we use a pairwise margin-based loss to promote a more discriminative feature space. We use this loss over coarser levels $h \in \calH$ where $\calH$ is [$k$, $H-1$] and $k$ ranges from [$1$ , $H-1$]. For a given batch of samples, we create pairs of samples that have dissimilar labels at a level $h$, i.e., $\calB^h = \{(i,j) | y^h_i\neq y^h_j\}$. Then we compute the margin loss over the batch as
|
| 79 |
+
|
| 80 |
+
L_{m} = \sum_{h\in\calH} \sum_{(i,j)\in\calB^h}\max\left(0, m - \text{JS}^h(p^h_i || p^h_j)\right)
|
| 81 |
+
|
| 82 |
+
where $p^h_i$ is the softmax probability generated by $g^h(f_\phi(\bx_i))$ and $m$ is the margin. The margin loss is only applied to the coarser levels of the hierarchy, as the cross-entropy loss of () is sufficient for fine-grained discrimination.
|
| 83 |
+
|
| 84 |
+
$)}
|
| 85 |
+
\ours uses classifiers at all the levels of hierarchy. In a hierarchy-aware feature space, the weight vectors of the coarse class and its fine-grained classes should be correlated. The losses introduced in the previous subsections impose probabilistic consistency across the classifier predictions, and only indirectly affect the feature space geometry. In order to better orient the feature space to inherit the label space hierarchy, we use a geometric consistency loss. As before, let $A\in\calC^{h-1}$ be a given super-class and its sub-classes be $A_k\in\calC^h, k=1,\ldots,|A|$. Let the weight vector corresponding to the super-class $A$ be $\bw^{h-1}_A$ and similarly the weight vectors corresponding to the sub-classes be $\bw^{h}_{A_k}$. Note that the classifier $g^{h-1}(\cdot)$ is defined by the weight matrix $\bW^{h-1}$, which is obtained by stacking the weight vectors $\bw_c, c\in\calC^{h-1}$. We further constrain each weight vector to be unit norm $||\bw^h_c||_2=1, \forall c,h $, across all classifiers. For the super-class $A\in\calC^{h-1}$, we define the target weight vector as $\widehat{\bw}_A^{h-1} = \widetilde{\bw}^{h-1}_A/||\widetilde{\bw}_A^{h-1}||_2$, where $\widetilde{\bw}_A^{h-1}=\sum_{k=1}^{|A|}\bw_{A_k}^h$. Thus, the \weightconst loss to be minimized is
|
| 86 |
+
|
| 87 |
+
L_{gc} = \sum_{h=1}^{H-1} \sum_{c\in\calC^h}\left(1 - \cos\left(\bw_c^{h},\widehat{\bw}_c^h\right)\right)
|
| 88 |
+
|
| 89 |
+
where $\cos\left(\bw_c^{h},\widehat{\bw}_c^h\right)$ refers to the cosine similarity between the weight vectors $\bw_c^{h}$ and $\widehat{\bw}_c^h$.
|
| 90 |
+
|
| 91 |
+
Finally, the total loss is given by $L_{total} =$ \CEfinestsymb + \JSDsymb + \dissimilarsymb + \weightconstsymb .
|
2210.02159/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-28T11:40:37.150Z" agent="5.0 (X11)" etag="8Tk-2DUegNgJim_qwfkO" version="20.3.6" type="browser"><diagram id="uxejzuZ-OBssMPIjlCyk" name="Page-1">7Vpbd5s4EP41Pmf3IT4CcfNjEidtt02aNm0uT3tkkDEJtqgsYju/fiUjMBfZ4ARSt1s/2DAahJj55tPMmB48nS7fURRNLoiHw54OvGUPDnu6rkHg8B8hWSUSZ2AlAp8GnlTaCK6DZyyFQErjwMPzgiIjJGRBVBS6ZDbDLivIEKVkUVQbk7B41wj58o5gI7h2UYgrareBxyZSqoGc+nsc+BN5a8eUA1OUKSeC+QR5ZJG7FzzrwVNKCEuOpstTHArjpXZJJjrfMpotjOIZa3LBefzuQp//e3k7+jqzvi0f/NGxewSTWZ5QGMsHlotlq9QClMQzD4tJQA+eLCYBw9cRcsXogvucyyZsGvIzjR8SPhIw4WtDaI+DMDwlIaHrqaCHsDN2uXzOKHnEuRHLdfBozEeqTyUf9AlThpc5kXzKd5hMMaMrriJHoSFdICGnW/J8sXEgTN09yfkuwwCSoPGzuTd25QfStGoz4wjd395MLx+dGLvnYP59QL4epWbdZWefGzpq/vgZ5NEoxGVUqcxiDYpmgVrVLAOFVbTOrKLZ9VbhUROJQzce4Xr4jRKsfhplAuQ++msEf45ZGMywlHuIPn7OsAr6wCwK9bVUgWgvoNzmAZlxAUZzJsCckJYK8COAIbZUgAfYAY7DR7gXGeILozLAqgjYjaj6sJD+NsyKu02775i2Zujpt1F1v232Qf6jdQUG53cHg21ZwEEqMBhQH5q2uIKDIScfrz9vApKUO/V+Dg62phUo49AhNPjdIYRNzQBABaETXVj2p0LoQFGRbl+/LyrGAEGgJJaToa1ERWLxAyCWn4+Z+Onh5uY9ev44+njmG9fnH2JrddQgMcEerxPkKaFsQnwyQ+HZRnpSTJw3Op8IiSRAHjBjK1n0oJiRIs62JoMMUR+zHWm91BNr3OkcikPEgqdiraOy5vrSY0rRKqcQkWDG5rmZr4Qg53OrlHEapQqlTn8ASp5MVrDlaqi+W7bcOYmpi+VVJYBkNnkFzzSopQ6NZySpmApSsQAaaLtykgrZ6EPbAmoS2buG042XFSuDFmqVXRFVcK4VMmmfgpetHzFJB44SCx9zBc2KlmvjpOP8yBe/X6746BWiaDpP5+RLTKZNNLbiiHHM4GciVn4SYRrwZ11TeSa/2gjr0DYOljhtwGilPYiHjrB6ZeNxXOwq6/kRp2+zJSxAp4QFWE1Dra4KVyUYVNvDnmBwtoDhHCMWU7xuIEVN4cANyYrOLDpkRgRVFLwnRSgMfOFgl7tnDRLhlsBF4bEcmAaeF27jquIOJ5Yo8aODKtdQwpCEU1skkQIhLU8UJJE16vLIgF0BQ1XDtgQMThAsEPbj9tbBOIFJY8L4nyKk3PMyzDdESPjhPPh2/ePqJobwbsouLxff54eeWSY50o7MWJGBKvVg2xnoq/L5rnfvaxJyUDYNxr066i1EgWEVeRJaViUKHLMaBFYLQaB0R4Nu+IFmyvDgMmXTLv3bYVcpzlEwnNMVwzXo4x40w8GGDKcfFMP9wsVnNaIAMAEe7xNRAGjDk9OWIqpcb1h2X29WcphO3zY78rDR3R528emqk72rXC4ib+CZKvchbGhQ72a30weKfy06SvnUyUeD7S7PiDgckUWeDNcCPjAhNHgWDdtQCGfesXi/QQR0iObzwC3avugobk66usuf3K/D0kxPh8v84HBVl4PX0qPRkB5rvJLK9utUttYcrExU3m6N8kSJYSoTvbp3Cmt6p+UH1Gr0tZJ+8e2RYq/1BS1SZWqgSsPLgbAfrJcBu5Mj4jgHan62wbQ4WfVeFwrrs1w/bXd81KYZRstpxtvEkamX4F9uuHYdR21BsQknvwiKG/jd59HXKhQboW5XG7sWdU3/QPqDur1QZypQ13Z/MJj5PdE9nj/+aQ6qQbslz8/AUJPkd9Yb1A5if3w5uTTt+b0NadilDMcEZh86L+ONyruTdme84Tzf0LuVcT+8vbh9OPqMxt4/gzZqv2280TNP/3rkX38fRSmDCPogXswn/FUJRLOyZSpenK1ge3stWcKQYVZrSc1UtE5fwBH8dPMyeAKczSv18Ow/</diagram></mxfile>
|
2210.02159/main_diagram/main_diagram.pdf
ADDED
|
Binary file (10.7 kB). View file
|
|
|
2210.02159/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Object-centric representation learning aims to learn representations of individual objects in scenes given as static images or video [\(Burgess et al., 2019;](#page-9-0) [Locatello et al., 2020;](#page-9-1) [Elsayed et al., 2022\)](#page-9-2). One way to formalize object-centric representation learning is to consider it as an input partitioning problem. Here we are given a set of spatial scene features, and we want to *partition* the given features into k per-object features, also called slots, for some given number of objects k. A useful requirement for a partitioning scheme is that it should be *topology-aware*. For example, the partitioning scheme should be aware that points close together in space are often related and may form part of the same object. A related problem is to *match* object representations in two closely related scenes, such as frames in video, to learn *object permanence* across space and time.
|
| 4 |
+
|
| 5 |
+
In this paper we focus on differentiable solutions for the k-partition and matching problems that are also efficient and scalable for object-centric learning. We formulate the topology-aware k-partition problem as the problem of solving k *minimum* s*-*t *cuts* in the image graph (see Figure [1\)](#page-2-0) and the problem of matching as a *bipartite matching* problem.
|
| 6 |
+
|
| 7 |
+
An interesting feature of the minimum s-t cut and bipartite matching problems is that they can both be formulated as linear programs. We can include such programs as layers in a neural network by parameterizing the coefficients of the objective function of the linear program with neural networks. However, linear programs by themselves are not continuously differentiable with respect to the objective function coefficients [\(Wilder et al., 2018\)](#page-9-3). A greater problem is that batch solution of linear programs using existing solvers is too inefficient for neural network models, especially when the programs have a large number of variables and constraints. We solve these problems by 1) approximating linear programs by regularized equality constrained quadratic programs, and 2) precomputing the optimality condition (KKT matrix) factorizations so that optimality equations can be quickly solved during training.
|
| 8 |
+
|
| 9 |
+
The advantage of using equality constrained quadratic programs is that they can be solved simply from the optimality conditions. Combined with the appropriate precomputed factorizations for the task of object-centric learning, the optimality conditions can be solved very efficiently during training. A second advantage of using quadratic programming approximations is that quadratic programs can
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>**Require:** Input features x of dimension $C \times H \times W$ with C channels, height H and width W
|
| 14 |
+
|
| 15 |
+
- 1: Compute quadratic program parameters $y_i = f_y^i(x)$ , for $i \in \{1, ..., k\}$ where $f_q^i$ are CNNs.
|
| 16 |
+
- 2: Optionally transform spatial features $x_f = f_x(x)$ , where $f_x$ is an MLP transform acting on the channel dimension. $x_f$ has dimension $D \times H \times W$ .
|
| 17 |
+
- 3: Solve regularized quadratic programs for minimum s-t cut and extract vertex variables $z_i = \operatorname{qsolve}(y_i)$ for each $y_i$ . Each $z_i$ has dimension $H \times W$ .
|
| 18 |
+
- 4: Normalize $z_i$ across cuts i = 1, ..., k for each pixel with a temperature-scaled softmax.
|
| 19 |
+
- 5: Multiply $z_i$ with $x_f$ along H, W for each i to obtain K masked features maps $r_i$ .
|
| 20 |
+
- 6: Return $r_i$ as the k-partition.
|
| 21 |
+
|
| 22 |
+
be differentiated relative to program parameters using the implicit function theorem as shown in prior literature (Barratt, 2019; Amos and Kolter, 2017).
|
| 23 |
+
|
| 24 |
+
To learn the objective coefficients of the cut problem by a neural network, the linear program needs to be solved differentiably, like a hidden layer. For this, we can relax the linear program to a quadratic program and employ techniques from differentiable mathematical programming (Wilder et al., 2018; Barratt, 2019) to obtain gradients. This amounts to solving the KKT optimality conditions which then result in the gradients relative to the parameters of the quadratic program (Amos and Kolter, 2017; Barratt, 2019). However, with a naive relaxation, the required computations for both the forward and backward pass are still too expensive for use in object-centric representation learning applications.
|
| 25 |
+
|
| 26 |
+
Given that the techniques generally employed for differentiably solving quadratic programs are limited to smaller program sizes (Amos and Kolter, 2017), we introduce optimizations in the gradient computation specific to the problem of solving graph cuts for image data. For instance, we note that the underlying *s-t* flow graph remains unchanged across equally-sized images, allowing us to pre-compute large matrix factorization. Furthermore, we replace the forward pass by a *regularized equality constrained quadratic program* constructed from the linear programming formulation of the minimum *s-t* cut problem. When combined with these task specific optimizations, equality constrained quadratic programs can be solved significantly more efficiently than general quadratic programs with mixed equality and inequality constraints (Wright and Nocedal, 1999). The regularization of slack variables ensures that the output of the new quadratic program can still be interpreted as an *s-t* cut solution. The use of sparse matrix computations in the forward and backward passes ensures that time and memory usage is significantly reduced.
|
| 27 |
+
|
| 28 |
+
To summarize, we make the following contributions in this paper.
|
| 29 |
+
|
| 30 |
+
- 1. We formulate object-centric representation learning in terms of partitioning and matching.
|
| 31 |
+
- 2. We propose s-t cuts in graphs for topology-aware k-partition with neural networks.
|
| 32 |
+
- 3. We propose to use regularized equality constrained quadratic programs as a differentiable, general, efficient and scalable scheme for solving partitioning and matching problems with neural networks.
|
| 33 |
+
|
| 34 |
+
We first describe the general formulations of the graph partitioning problem specialized for images and then describe limitations when considering graph partitioning in image settings. With these limitations in mind, we describe the proposed *neural* s-t cut and *matching* algorithms, which allow for efficient and scalable solving of graph partitioning and matching. Last, we describe how to learn end-to-end object-centric representations for static and moving objects with the proposed methods.
|
| 35 |
+
|
| 36 |
+
The problem of finding minimum s-t cuts in graphs is a well-known combinatorial optimization problem closely related to the max-flow problem (Kleinberg and Tardos, 2005). We are given a directed graph G=(V,E) with weights for edge (u,v) denoted by $w_{u,v}$ and two special vertices s and t. The minimum s-t cut problem is to find a partition of the vertex set V into subsets $V_1$ and
|
| 37 |
+
|
| 38 |
+
$V_2, V_1 \cap V_2 = \emptyset$ , such that $s \in V_1, t \in V_2$ and the sum of the weights of the edges going across the partition from $V_1$ to $V_2$ is minimized. In classical computer vision, the minimum cut problem has been used extensively for image segmentation (Boykov and Jolly, 2001; Shi and Malik, 2000). The problem of image segmentation into foreground and background can be reduced to minimum s-t cut by representing the image as a weighted grid graph with the vertices representing pixels (Figure 1) where the weights on the edges represent the similarity of neighbouring pixels. Next, the vertices s and t are introduced and for each pixel vertex v we include edges (s, v) and (v, t). The weights on edges $w_{s,v}, w_{t,v}$ represent the relative background and foreground weight respectively for the pixel vertex v. For example, $w_{s,v} > w_{t,v}$ may indicate that v is more likely to be in the foreground rather than the background. At the same time, the neighbouring edge weights $w_{u,v}$ indicate that similar pixels should go in the same partition. Solving the min-cut problem on this graph leads to a segmentation of the image into foreground and background. Note that this procedure encodes the underlying image topology in the graph structure.
|
| 39 |
+
|
| 40 |
+
**Solving Cut Problems.** Given a directed graph G=(V,E) with edge weights $w_{u,v}$ and two special vertices s and t, we can formulate the min-cut problem as a linear program (Dantzig and Fulkerson, 1955). To do so, we introduce a variable $p_u$ for each vertex u in the linear program. Similarly, for each edge (u,v), we introduce a variable denoted $d_{u,v}$ . The edge weights $w_{u,v}$ act as objective function parameters in the linear program, which is written as follows.
|
| 41 |
+
|
| 42 |
+
<span id="page-2-1"></span>minimize
|
| 43 |
+
$$\sum w_{uv}d_{uv}$$
|
| 44 |
+
subject to
|
| 45 |
+
$$d_{uv} \ge p_u - p_v \qquad (u,v) \in E,$$
|
| 46 |
+
|
| 47 |
+
$$p_s - p_t \ge 1, \qquad (1)$$
|
| 48 |
+
|
| 49 |
+
$$d_{u,v} \ge 0 \qquad (u,v) \in E,$$
|
| 50 |
+
|
| 51 |
+
$$p_v \ge 0 \qquad u \in V$$
|
| 52 |
+
|
| 53 |
+
The program can then be fed to a linear programming solver for the solution. Although there are other more efficient methods of solving min-cut problems, we resort to the linear programming formulation because of its flexibility and that it can be approximated with differentiable proxies.
|
| 54 |
+
|
| 55 |
+
We propose finding minimum s-t cuts in the image graph with weights parameterized by neural networks, $w_{u,v} = f(x)$ with image x, for partitioning image feature maps, x' = g(x), into k disjoint partitions. Solving the standard minimum s-t cut problem can divide the vertex set of an input graph into two partitions. We generalize the minimum s-t cut problem to partition
|
| 56 |
+
|
| 57 |
+
<span id="page-2-0"></span>
|
| 58 |
+
|
| 59 |
+
Figure 1: (Top) Image graph for 2-partition with an *s-t* cut and 3 parallel graphs for 3-partition (Below).
|
| 60 |
+
|
| 61 |
+
a given graph into any fixed k number of partitions by solving k parallel min-cut problems and subsequently normalizing the vertex (or edge) variables to sum to 1 across the k copies of the graph. We note that 2-partition can also be formulated as a subset selection problem (Xie and Ermon, 2019). The advantage of using minimum s-t cuts for partitioning is that the method then depends on the underlying topology and neighbourhood relations of image data represented in the image graph.
|
| 62 |
+
|
| 63 |
+
k-Partition Generalization. We generalize the minimum s-t cut method for 2-partition to k partitions by solving k parallel 2-partition problems and normalizing the vertex variables $p_u^i$ , i denoting partition index, from the optimum solution of equation 1 with a softmax over i. We also experimented with solving the k-partition problem with a single graph cut formulation. However, we found such a formulation to have high memory usage with increasing k and limited ourselves to the parallel cut formulation which worked well for our experiments.
|
| 64 |
+
|
| 65 |
+
**Obstacles.** There are some obstacles in the way of including linear programs as hidden layers in neural networks in a way that scales to solving large computer vision problems. The first of these is that linear programs are not continuously differentiable with respect to their parameters. One solution to this is to use the quadratic programming relaxation suggested by Wilder et al. (2018). However, the gradient computation for general large quadratic programs requires performing large matrix factorization which is infeasible in terms of computing time and memory when working with image-scale data.
|
| 66 |
+
|
| 67 |
+
A second obstacle is that established quadratic programming solvers such as OSQP (Stellato et al., 2020) and Gurobi (Gurobi Optimization, LLC, 2022) run on CPU and cannot solve programs in large batches essential for fast neural network optimization. There have been attempts to solve general quadratic programs with GPU acceleration (Amos and Kolter, 2017). However, in practice, these attempts do not scale well to quadratic program solving in batch settings with the tens of thousands of variables and constraints that arise when working with image data.
|
| 68 |
+
|
| 69 |
+
We describe our proposed method and optimizations to differentiably solve approximations of the s-t cut problems as quadratic programs. Our method depends on the fact that 1) quadratic program solutions, unlike linear programs, can be differentiated relative to the objective function parameters (Amos and Kolter, 2017; Wilder et al., 2018) and 2) that quadratic programs with equality constraints only can be solved significantly more quickly than general quadratic programs (Wright and Nocedal, 1999). Combining the method with specific optimizations allows to solve large batches of quadratic programs on GPU allowing us to scale to image data.
|
| 70 |
+
|
| 71 |
+
We use the fact that quadratic programs with equality constraints only can be solved much more quickly and easily than programs with mixed equality and inequality constraints. Given an equality constrained quadratic programming, we can directly solve it by computing the KKT matrix factorization and solving the resulting triangular system (Wright and Nocedal, 1999).
|
| 72 |
+
|
| 73 |
+
In general, linear or quadratic programs (such as those for s-t cut) have non-negativity constraints for variables which cannot be represented in equality constrained quadratic programs. Instead, we use regularization terms in the objective function which ensure that variables remain within a reasonable range. After solving the regularized equality constrained program, the solutions can be transformed, for example by a sigmoid or softmax, to bring them within the required range.
|
| 74 |
+
|
| 75 |
+
We approximate the minimum s-t cut linear program (equation 1) by the following quadratic program with equality constraints only.
|
| 76 |
+
|
| 77 |
+
minimize
|
| 78 |
+
$$\sum w_{uv}d_{uv} + \gamma \sum_{u,v} (d_{uv}^2 + s_{uv}^2) + \gamma \sum_{v} p_v^2$$
|
| 79 |
+
subject to
|
| 80 |
+
$$d_{uv} - s_{uv} = p_u - p_v \qquad (u,v) \in E,$$
|
| 81 |
+
|
| 82 |
+
$$p_s - p_t - s_{st} = 1,$$
|
| 83 |
+
|
| 84 |
+
$$(2)$$
|
| 85 |
+
|
| 86 |
+
where the variables $s_{uv}$ are slack variables and $\gamma$ is a regularization coefficient. We achieve this by first adding one slack variable $s_{u,v}$ per inequality constraint to convert them into equality constraints plus any non-negativity constraints. Next, we add a diagonal quadratic regularization term for all variables, including slack variables in the objective function. Finally, we remove any non-negativity constraints to obtain a quadratic program with equality constraints only.
|
| 87 |
+
|
| 88 |
+
**Parameterization.** Given input features x, we can include regularized equality constrained programs for s-t cut in neural networks by parameterizing the edge weights $w_{u,v}$ by a ConvNet f as w=f(x). Using the image graph from Figure 1 as an example, each vertex is connected with 4 neighbouring vertices and s and t for a total of 6 edges per vertex (5 out-going and 1 incoming). Assuming the image graph has height H and width W, we can parameterize the weights for k-partition by using a ConvNet f with output dimensions (6k, H, W) giving the weights for 6 edges per pixel per partition. Our experiments show that the output of the regularized equality-constrained quadratic program for minimum s-t cut can easily be interpreted as a cut after a softmax and works well for image-scale data.
|
| 89 |
+
|
| 90 |
+
**Forward and backward pass computations.** We now describe the computations required for solving regularized equality constrained quadratic programs and their gradients relative to the objective function parameters. A general equality constrained quadratic program has the following form.
|
| 91 |
+
|
| 92 |
+
minimize
|
| 93 |
+
$$\frac{1}{2}z^tGz + z^tc$$
|
| 94 |
+
|
| 95 |
+
subject to $Az = b$ , (3)
|
| 96 |
+
|
| 97 |
+
where $A \in \mathbb{R}^{l \times n}$ is the constraint matrix with l constraints, $b \in \mathbb{R}^l$ , $G \in \mathbb{R}^{n \times n}$ and $c \in \mathbb{R}^n$ . For
|
| 98 |
+
|
| 99 |
+
our case of quadratic program proxies for linear programs, G is a multiple of identity, $G = \gamma I$ , and corresponds to the regularization term. For our particular applications, c is the only learnable parameter and A and b are fixed and encode the image graph constraints converted to equality constraints by use of slack variables. Any non-negativity constraints are removed.
|
| 100 |
+
|
| 101 |
+
<span id="page-4-2"></span>
|
| 102 |
+
$$\begin{array}{ll} \text{minimize} & \sum c_{uv}d_{uv} \\ \text{subject to} & p_u+d_{u,v}=1, \\ & p_v+d_{u,v}=1 \; \{u,v\} \in E, \\ & d_{u,v} \geq 0 \; \{u,v\} \in E \end{array} \tag{4}$$
|
| 103 |
+
|
| 104 |
+
Figure 2: Linear program for matching
|
| 105 |
+
|
| 106 |
+
Figure 3: Schematic of partitioning module
|
| 107 |
+
|
| 108 |
+
The quadratic program can be solved directly by solving the following KKT optimality conditions for some dual vector $\lambda$ (Wright and Nocedal, 1999),
|
| 109 |
+
|
| 110 |
+
<span id="page-4-1"></span>
|
| 111 |
+
$$\begin{bmatrix} G & A^t \\ A & 0 \end{bmatrix} \begin{bmatrix} -z \\ \lambda \end{bmatrix} = \begin{bmatrix} c \\ -b \end{bmatrix}, \tag{5}$$
|
| 112 |
+
|
| 113 |
+
where the matrix on the left is the KKT matrix. We can write the inverse of the KKT matrix using Gaussian elimination (the Schur complement method, (Wright and Nocedal, 1999)), with $G = \gamma I$ , as
|
| 114 |
+
|
| 115 |
+
<span id="page-4-0"></span>
|
| 116 |
+
$$\begin{bmatrix} \gamma I & A^t \\ A & 0 \end{bmatrix}^{-1} = \begin{bmatrix} C & E \\ E^t & F \end{bmatrix}, \tag{6}$$
|
| 117 |
+
|
| 118 |
+
where $C=\frac{1}{\gamma}(I-A^t(AA^t)^{-1}A)$ , $E=A^t(AA^t)^{-1}$ and $F=-(AA^t)^{-1}$ . We note that only $(AA^t)^{-1}$ appears in inverted form in equation 6 which does not change and can be pre-computed. We can pre-compute once using a Cholesky factorization to factor $AA^t$ as $AA^t=LL^t$ in terms of a triangular factor L. Then during training whenever we need to solve the KKT system by computing Cz or Ez (equation 6) for some z, we can do this efficiently by using forward and backward substitution with the triangular factor L.
|
| 119 |
+
|
| 120 |
+
Once we have the KKT matrix factorization equation 6 we can reuse it for the gradient computation. The gradient relative to parameters c is obtained by computing $\nabla_c l(c) = -C\nabla_z l(z)$ , where l(.) is a loss function and z is a solution of the quadratic program (see Amos and Kolter (2017) for details). We can reuse the Cholesky factor L for the gradient making the gradient computation very efficient.
|
| 121 |
+
|
| 122 |
+
**Sparse Matrices.** The final difficulty is that the constraint matrix A can be very large for object-centric applications since the image graph scales quadratically with the image resolution. For 64x64 images we get s-t cut quadratic programs with over 52,000 variables and over 23,000 constraints (resulting in a constraint matrix of size over 4GB) which have to solved in batch for each training example. Since the constraints matrices are highly sparse (about 6% non-zero elements for 64x64 images) we use sparse matrices and a sparse Cholesky factorization (Chen et al., 2008). During training we use batched sparse triangular solves for the backward and forward substitution to solve a batch of quadratic programs with different right hand sides in equation 5. Assuming a graph size of N, solving the quadratic program directly (with dense Cholesky factorization) has complexity about $O(N^3)$ and is even greater for general quadratic programs. With our optimizations this reduces to $O(Nn_z)$ , where $n_z$ is the number of nonzero elements, which is the time required for a sparse triangular solve.
|
| 123 |
+
|
| 124 |
+
We can extend the regularized quadratic programming framework to the problem of matching. Given two sets of k-partitions $P_1$ and $P_2$ we want to be able to match partitions in $P_1$ and $P_2$ . We can treat this as a bipartite matching problem which can be specified using linear program 4. We denote nodes variables as $p_u$ for node u, edge variables for edge $\{u,v\}$ as $d_{u,v}$ and the edge cost as $c_{u,v}$ .
|
| 125 |
+
|
| 126 |
+
Unlike the s-t cut problems, the matching problems that we use in this paper are quite small with
|
| 127 |
+
|
| 128 |
+
the bipartite graph containing no more than $2 \times 12 = 24$ nodes. For such small problems one could conceivably employ (after suitable relaxation (Wilder et al., 2018)) one of the CPU-based solvers or the parallel batched solution proposed by Amos and Kolter (2017). However, in this paper, we use the same scheme of approximating the regularized equality constrained quadratic programs that we use for graph cut problems for the matching problem and leave the more exact solutions to further work. See Appendix C for details of the formulation of regularized quadratic programs for matching.
|
| 129 |
+
|
| 130 |
+
We now discuss how to use the specific k-partition formulation described above for object-centric learning. Given scene features, x, of dimension $C \times H \times W$ as input, we transform the features into k vectors or *slots* where each slot corresponds to and represents a specific object in the scene. The objective is that, by means of graph cuts that preserve topological properties of the pixel space, the neural network can learn to spatially group objects together in a coherent fashion, and not assign far-off regions to the same slot arbitrarily.
|
| 131 |
+
|
| 132 |
+
We use a similar encoder-decoder structure as Slot Attention (Locatello et al., 2020). Crucially, however, we replace the slot attention module by our min-cut layer. Specifically, based on scene features that are extracted by an CNN backbone, we compute k minimum s-t cut quadratic program parameters using a small CNN. We solve the k quadratic programs and softmax-normalize the vertex variables. This gives us (soft) spatial masks $z_i$ normalized across i. We multiply the
|
| 133 |
+
|
| 134 |
+
<span id="page-5-0"></span>**Require:** Input features x of dimension $C \times H \times W$ with C channels, height H and width W
|
| 135 |
+
|
| 136 |
+
- 1: Add position encoding to features x.
|
| 137 |
+
- 2: Partition x using Algorithm 1 to obtain k-partition $r_i$ of dimensions $D \times H \times W$ .
|
| 138 |
+
- 3: Average $r_i$ across H, W.
|
| 139 |
+
- 4: Transform each $r_i$ to get $s_i = f(r_i)$ with MLP f.
|
| 140 |
+
- 5: Return slots $S = \{s_1, ..., s_k\}$ .
|
| 141 |
+
|
| 142 |
+
masks $z_i$ with the (optionally transformed) scene features x broadcasting across the channel dimension. This gives us masked features $r_i$ of dimensions $D \times H \times W$ that form a k-partition. The procedure is described in Algorithm 2. To obtain slot object representations, we add position encoding to the scene features x before partitioning. After partitioning, we average each partition $r_i$ of dimension $D \times H \times W$ across the spatial dimensions to obtain k vectors of dimension D which are then transformed by MLPs to obtain the slots. Finally, each slot is separately decoded. Following Locatello et al. (2020), we use a separate output channel to combine the individual slot images. This combined image is then trained to reconstruct the original scene x.
|
| 143 |
+
|
| 144 |
+
**Background slot.** The basic object-centric framework described in Algorithms 1 and 2 can easily be extended to work with a specialized background slot. We do this by computing one set of masks for a 2-partition and one for a k-1-partition, denoted mask\_fg, mask\_bg, mask\_object, and multiplying the foreground and object masks as mask\_fg\*mask\_object. In experiments, we find that computing slots in this way specializes a slot for the background.
|
| 145 |
+
|
| 146 |
+
Matching and motion. We extend the model for learning representations of moving objects in video by combining the model with the matching procedure described in Algorithm 3. We work with pairs of frames. For each frame, we compute k per-frame slot representations as before. Next using the matching procedure we match the two sets of slots to get new slots $r_i$ as in Algorithm 3, and transform the new slots by MLP transformations. The new slot is used in the decoder to reconstruct the input pair of frames by optimizing the sum of the reconstruction error for the two frames.
|
| 147 |
+
|
| 148 |
+
<span id="page-5-1"></span>**Require:** Input features $x_1, x_2$ of dimension $C \times H \times W$ with C channels, height H and width W
|
| 149 |
+
|
| 150 |
+
- 1: Compute slots S, T for $x_1, x_2$ respectively using Algorithm 2.
|
| 151 |
+
- 2: Compute a cost for each pair of slots $s_i \in S$ and $t_i \in T$ by computing inner products as $c_{i,j} = -\langle s_i, t_j \rangle$ .
|
| 152 |
+
- 3: Solve matching using method in 3.2 to obtain matching matrix $M_{i,j}$ .
|
| 153 |
+
- 4: Apply softmax with temperature to $M_{i,j}$ along dimension j.
|
| 154 |
+
- 5: Compute paired slot $r_i = M_{i,:}T$
|
| 155 |
+
- 6: Return k slots pairs $(s_i, r_i)$ .
|
2210.16530/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-03-01T10:26:08.676Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/16.5.1 Chrome/96.0.4664.110 Electron/16.0.7 Safari/537.36" etag="stNTYIHJIUV5Tb1_oGOW" version="16.5.1" type="device"><diagram id="JN010e5WW4VA-edj3ZMB" name="Page-1">7V1bc5s6EP41njl9SAZJXB8dJ2k6vSVNe3r61MEg27QYXIETu7/+CAwYJGFjGxzs0IfGCCFA+31a7Wq19NBgunhLzNnko29jtwcle9FD1z0INR3S/6OC5apAQcaqYEwce1UE1gWPzl+cFEpJ6dyxcVCoGPq+GzqzYqHlex62wkKZSYj/XKw28t3iXWfmGHMFj5bp8qXfHTucJKUQ6esTd9gZT5Jbq1Ly4FMzq7wqCCam7T+viuI66KaHBsT3w9Wv6WKA3ajv0n5ZNXRbcjZ7MIK9sNIF8O6Tcg0fPgdPI83V335Tl/YFSJp5Mt158sY9qLq0wauRT9uljx0uk75Q/8z99MRFEEuqTytAOKPCvlqfp7/G0d87BxOTWBOH9iat9xiSuRXOCU5vQB91dY9V9aSXstvB4NmZuqZHj64m4dSlhYD+DEKThAlO6BvTcz5x/tJ2zLSGFR04HiZfl7OoVkhw1IbpOmOPHlq0vzCJ3sNx3YHv+iS+HVJNSRqN4jsQ/zfOnUGaJA0GyZvnykfxP1pO/LlnYzu5f1QrfUCYvVheUGmnYxLiRa4oEdxb7E9xSJa0SnJWTumwLLLjeQ1JWUnKJjk0orTQTGgwzppeI4X+SMCyC3C0g4ED9BLgpA0ND2plQOsR33WprNeAG7L32AbC54kT4seZaUXHz3SIK6IxwtYtHVMcb5wr+eg/rQs8/Hxj0zElbVPLPSvOTqwfP3qr7Lw79HNX595xsD6IGdGPxrqSVjzfK7sWe/YOV2rXPG1GuoUtS0Sboa5EoNxAD6CL6bFxpKpMGrXIGajDS4WjDZQEtGmMNanWyAEM57Dhk3Dij33PdHOIubLm5CnrvXVfRj27vuCD78+SKr9wGC6THjbnoV9ELF444X/R5bQzVkc/cmeuF0nL8cEyPaBMWuYuig5/5M+tL4uP0uvyolYziKR6tGRs3CT8wJ8TC2+qmHQwpcQYb2wxGb+i7t8IJoJdM3SeitOB+odTJLUGGSCHizVKtiGjgIs1TFqNDGnxe36F+nffx0hxvt+/08ivhwvULmDobdez93SO5kXqrtOynZaFQOWnpkfWsVprRtIz1bFGVR2rt2oolUFrgHGmKrYqMFIfUEuAkT63QMcephpr09Q33tMlPf8Bm4RRtUW1KtLAnao9U1WL5MvUnnw5ZctTJ8HqozPlsBdMzNgjhxch14s5DIogapvBJKsbHdybYYiJF5dASRbIgZXraATFcrXVoaqoTUoOpUNMKjr1Eqqc6AAQiE6mVZXDpbfJ1MkJ79/4Zye9jdIDKrjUtaOKb+mOdPOm/8n+81H/eT98em//HAjE11MG/0x+AvrnDSe+cCW3gis935Xx0Fjs96SI85xHXRd59fvJialj2/FsSST74qQpL2ajSaEB1jCRICcxJHKZ1zBUPsoPV++Nv4PHi/nXT7fLpx8P4OuFYKUllhY8OWlxQqgswMrSysTwUtKCJdJCnbSiZaXigAg13rHelLSEzwshJ5Ltpt1JW3MFu7SSaderaMQlonwB6z6+lM7ezWWuwsx3vDDItXwfFfTKl0YNJQ+nXevTH6snWIMxe5UD8In2wOcrcz3Ujs/anQx74RPB3fDJ1leNI8BT7uB5dHjWvgD5IvBEiRpoFJ8A7APQk0dhgWFHgKTWjhETGrtBkq2vJK6VZiGpcAbCVzP4TUveeSNMhURFwjlQ1gFtbIwY4+2wdUnSkMjb0VckSU7hsz0WrRAthzbBaAfLmhGPIljxE7lCjMZm/+VBjC3x37/98q3z2Z+Dz17ZlSxQS4POUoWpCKLQgKLwfJEbi91s7RJ5plx3WiIHLzjfS6dD2+d7SK1bux42aErcoHnvu461PEBtgaEJMBTRUJLUm/7tBrXFKaoiX2tQW4bOuBhFegsK9BaAjSmu8iDqliiuAXFCx+p01xnorozw1de9ioQROHkzt31x00FTdCmPhWwJXfpWSIXXsaVjS8aCF2MLao9T79QnebJadZKn1D3HE7s8AGCMCi3b25I2snqp5DoGSrW45Iw94HWkWNrtWCiOEfti7giISsehphHFzo5liZn0rp6Uw9Ouzj0WuYrO7Fpln4t17hXrN+TcQ/uEChcxdfThMg9c6VJ7yUWQ6uMlaATdu4NSLpqGGmoeZGkn5Waz77zZPOSQV93+ViXTAJpocgSvtWhnelW3canod3B8MdMhnbe2gZ45xwr2tt6UvS233t7+gp9NYncWRIMWhK1g3ZZFJNHhEKlqXRZExu/qlIGsr1jAGVlkQ3Czv/oo0/q9EY+hGZbnlOgY86oYA1pAGYX3rKc4DGamVwtrDuNe3wod3xOTIytbPWvHmbPnjGhmdmTKAGkfZ9ULG1zt8U9paWasE8t6ofGBGx/x1CeHrEGOTFWPLR2OeAMNKRttoObXIBVkMOSTkXaJ+L1EiiiAJttgVL8g4EH0O9eYz02Y3U7KymF38pFI+S3A5PPwV5QmD0quOcRuK2cnN4uZ61hO9JA7TVFquffMCXw7WvGW0pGozIKIZjRf4ux9OWlsZhnKyTWhZfU5FbdTqJtkjUZYLdkUqhlDqbbFQO1g75co5Eswwqu7D/D0MMfrSjxvSeKgOzwcOvGIs4lqGxII7cFAuWPgK2Eg0rZTUBTtVQ8FK067+CDy3ahlbCGoU0srlceLsoYGvhf4rmObjNuhxFXn7OS+4/feVt9TSzB9enMYNxVhNVkoou0qVz0lwns0aw0SvDLw3WH7+j4KhFn3rGqkN2YiqJ2JULuJgKqaCEeKK1HYGFk2d0VJVAnXkKZsaagknKC2FXxpnx23hzuUGoPePkQodVhsB2bVTF5aMwv4/LZDNjzFkCsBc+fd5EweCmYv2db6sgEZODcQOaDxcbBfsGnTtr4TJ9y476yOROpnN6U9naAKxMwKkKELdxOJnIcq2MN9fyKmZJK2qnn70ejsx5NcJsuGzOq7XNmNe0BSL1Vg5P7xxJNE2/jgPhnATtet8x4fw5OjSx0TXysTRTEeQuYd0ZUDpMNChk/QPm4sBZVc2fpoWR5hsM++6oPiFuAeCcbBYWLftplif7mn3bdV7qkx+MJh4wqzyiPLx8iFgyBn/SUpd5OPryVKnFTW4pwK/+AHAY9k13VmQYTYbco1mK2+xDZyFhEqtqV2tU2sj4RrCKql4+GoHtPJYJK4Qv0S8TO49HN1eTWSljWQh6E8Ae9ZioDNNaXIAutVBwIRNPfdnfbskmytj5sfvjdlk66QCaNln9hJlwY7DBw8dUNVp26ZGmsNCPjtWPf3n890JAaSzAYiAvmYg/Hnu9tgSv5+0hbk+d3N8r87+wGmOZfbb0HVmjCymTyQWzf4pokb697gq2mK8D51TYqFmfDFudWnJ5er+7Adl+z0SvApLmEGihpCBoRS4Q2VSCr/BLHnSjJXf8ib1yUlmaEhPJ6QhInuyz4icX6J7jeLhckzkIWP1Z/Snh6uv/q9GgHXn05HN/8D</diagram></mxfile>
|
2210.16530/main_diagram/main_diagram.pdf
ADDED
|
Binary file (53.7 kB). View file
|
|
|
2210.16530/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
A major problem with most model-free RL methods is their sample inefficiency and poor generalizability. Even for simple tasks, these methods require thousands of interactions with the environment. Furthermore, even after learning a task, slight changes in the environment dynamics or the underlaying task will force us to basically retrain the agent from scratch. To combat these issues, a number of methods have been proposed that utilize ideas from multi-task and meta learning ([@zintgraf2019varibad; @wang2016learning]). Some of these methods treat the task-ID like an unobservable latent variable in the underlaying POMDP and condition their policy on a learned, task belief state [@zintgraf2019varibad]. However, these methods generally use some simplifying assumptions that limits their generalizability. Our proposed method, BIMRL, is similar in that it learns to identify a suitable task-specific latent and uses it to quickly adapt, but relaxes some of these assumptions. Similar to some prior works ([@zintgraf2019varibad; @zintgraf2021exploration]), we use variational methods to estimate this latent variable. However, by introducing a new factorization of the log-likelihood of trajectories, our method learns a different context embedding that is more consistent with POMDP assumptions and can also be interpreted in a more meaningful way.
|
| 4 |
+
|
| 5 |
+
Another central issue with RL is the well-known exploration-exploitation dilemma [@zintgraf2019varibad]. If an agent wants to quickly adapt to a new task, it must be able to explore the environment and obtain relevant experiences. Nonetheless, with complex tasks and environments, conventional exploration methods, such as epsilon-greedy exploration, are not enough. Intrinsic rewards that encourage suitable exploration are currently one of the most successful approaches. Our novel memory module provides an intrinsic reward to guide the exploration; thus alleviating the problems that arise from exploration across different tasks. Aside from this reward our memory module, which consists of an episodic part that resets after each episode, and a Hebbian part ([@limbacher2020h]) which retains information across different episodes of a particular task, gives us a performance boost in memory-based tasks while helping deal with catastrophic forgetting that comes up in multi-task settings.
|
| 6 |
+
|
| 7 |
+
To summarize, our contributions are as follows:
|
| 8 |
+
|
| 9 |
+
1. Proposing a multi-layer architecture that bridges model-based and model-free approaches by utilizing predictive information on state-values. Each layer of our architecture corresponds to a different part of the Prefrontal Cortex (PFC) which is known to be responsible for important cognitive functionalities such as learning a world-model [@russin2020deep], planning [@miller2021multi], and shaping exploratory behaviours [@russin2020deep]. One of the objectives in this architecture is derived from our newly proposed factorization of the log-likelihood of trajectories. This factorization is more robust to violations of POMDP assumptions and also takes the predictive coding ability of the brain into consideration [@alexander2018frontal].
|
| 10 |
+
|
| 11 |
+
2. Introducing a neuro-inspired memory module compatible with discoveries in cognitive neuroscience. This module will help in memory-based tasks and give the agent the ability to preserve information, both within an episode and across different episodes of some task. The design of this memory module is reminiscent of Hippocampus (HP) as the main region assigned to episodic memory in the brain [@eichenbaum2017prefrontal].
|
| 12 |
+
|
| 13 |
+
3. Proposing a new intrinsic reward based on our memory module. This reward does not disappear over time, is task-agnostic, and is very effective in a multi-task setting. It will encourage exploratory behaviour that leads to faster task identification.
|
| 14 |
+
|
| 15 |
+
# Method
|
| 16 |
+
|
| 17 |
+
Injecting a modular inductive bias into policies is a promising strategy for improving out-of-distribution generalization in a systematic way. [@goyal2019recurrent] proposed a modular structure where each module can become an expert on a different part of the state space. These modules will be dynamically combined through attention mechanisms to form the overall policy. [@mittal2020learning] introduced an extension called the bidirectional recurrent independent mechanism (BRIMs). BRIM is composed of multiple layers of recurrent neural networks where each module also receives information from the upper layer. A modified version of BRIM is used in our architecture as well.
|
| 18 |
+
|
| 19 |
+
There has been efforts on trying to combine model-based and model-free approaches. Dyna [@sutton1991dyna] and successor representation [@momennejad2017successor] are two such attempts. There has also been some investigations on whether our brains work in a model-based or model-free manner, which has resulted in some neuro-inspired computational models ([@kim2020reliability]). We propose a new way of combining these two approaches that is inspired by the predictive capabilities of PFC [@russek2017predictive].
|
| 20 |
+
|
| 21 |
+
Another central issue in RL is efficient exploration of the environment. Methods that encourage effective exploratory behaviour through the use of an intrinsic reward are among the most popular and effective ways of dealing with this problem. Intrinsic rewards can be obtained from a novelty or curiosity criterion ([@pathak2017curiosity]). It is worth noting that effective exploration becomes an even bigger issue in the meta-learning setting ([@zintgraf2021exploration]).
|
| 22 |
+
|
| 23 |
+
In this chapter we introduce our proposed method. We will first derive a factorization for a trajectory's log-likelihood. Using this factorization, we will design a multi-layered architecture with multiple loss functions. We will then shift our focus and introduce our memory module. Finally, we will give some intuitive interpretations of our proposed method and mention some of the connections to different regions of the human nervous system.
|
| 24 |
+
|
| 25 |
+
<figure id="fig:full_model" data-latex-placement="!ht">
|
| 26 |
+
<img src="model1" style="height:8cm" />
|
| 27 |
+
<figcaption>A bird’s-eye view of our model. The major components of BIMRL include the task inference module, the multi-layer hierarchical structure and the memory module.</figcaption>
|
| 28 |
+
</figure>
|
| 29 |
+
|
| 30 |
+
Our method builds upon VariBAD [@zintgraf2019varibad] and we use the same terminology as is used there. Similar to VariBAD, we aim to optimize the following ELBO loss corresponding to log-likelihood of a trajectory $\tau$: $$\begin{equation}
|
| 31 |
+
\begin{multlined}[t]
|
| 32 |
+
\mathrm{ELBO}_t = \mathbb{E}_{\rho}\left[\mathbb{E}_{q_{\phi}\left(m \mid \tau_{: t}\right)}\left[\log p_{\theta}\left(\tau_{: H^{+}} \mid m\right)\right] \right. \\ \left.
|
| 33 |
+
-K L\left(q_{\phi}\left(m \mid \tau_{: t}\right) \| p_{\theta}(m)\right)\right],
|
| 34 |
+
\end{multlined}
|
| 35 |
+
\end{equation}$$ where $\rho$ is the trajectory distribution induced by our policy, $m$ is the belief state, and $H^+$ is the time horizon for a task, which is set to be four times the horizon for an episode. Notice that this objective is comprised of two parts. The first part, $\mathbb{E}_{q_{\phi}\left( m \mid \tau_{: t} \right) } \left[ \log p_{\theta} \left( \tau_{: H^{+}} \mid m \right) \right]$, is known as the reconstruction loss and the second part is the KL-divergence between the variational posterior $q_\phi$ and the prior over the belief state, $p_\theta(m)$. Unlike VariBAD, we factorize the reconstruction loss as follows: $$\begin{align}
|
| 36 |
+
\label{bimrl}
|
| 37 |
+
\log p&\left(\tau_{: H^{+}} \mid m, a_{0,\cdots, n}\right) =\log p\left(s_{0} \mid m\right) \\ &+\sum_{i=0}^{H^{+}-1}\sum_{j=0}^{\text{min}(n, i)}\left[\log p\left(s_{i+1} \mid s_{i-j}, a_{i-j, \cdots, i}, m\right)\right]\nonumber
|
| 38 |
+
\\ &+\sum_{i=0}^{H^{+}-1}\sum_{j=0}^{\text{min}(n, i)}\left[\log p\left(r_{i+1} \mid s_{i-j}, a_{i-j, \cdots, i}, m\right)\right]\nonumber
|
| 39 |
+
\\ &+\sum_{i=0}^{H^{+}-1}\sum_{j=0}^{\text{min}(n, i)}\left[\log p\left(a_{i+1} \mid s_{i-j, \cdots, i+1}, m\right)\right]\nonumber
|
| 40 |
+
\end{align}$$ It is worth mentioning that unlike previous methods which use the Bayes Adaptive Markov Decision Process (BAMDP) formulation and assume perfect observability given the belief state (which will not hold in extreme partial observability scenarios), our factorization uses the predictive information in $m$ and asserts that our belief should contain enough information to predict the next $n$ states and rewards, given the next $n$ actions. Contrary to VariBAD that conditions the trajectory on the full history of actions, we condition a trajectory on only the first $n$ actions (hence, the added fourth summation term in [\[bimrl\]](#bimrl){reference-type="ref" reference="bimrl"}). This modification will result in an added action simulation network which will also help in learning better representations in the first level of the hierarchical structure. This predictive coding capability is compatible with those observed in the PFC ([@alexander2018frontal]).
|
| 41 |
+
|
| 42 |
+
Our proposed architecture is composed of a recurrent task inference module responsible for updating our estimate of belief state about the current task, followed by a hierarchical structure similar to BRIM [@mittal2020learning], that conditions on the inferred belief state. As we will see, this hierarchical structure is similar to PFC in how they function. The last layer of this hierarchical structure is a controller whose hidden state will be fed into an actor-critic network. We also have a memory module that directly interacts with the controller. Figure [1](#fig:full_model){reference-type="ref" reference="fig:full_model"} demonstrates how all these modules are put together. In what follows, we will briefly introduce each part[^3].
|
| 43 |
+
|
| 44 |
+
The task inference module is a recurrent network that processes the last (observation, action, reward) and produces a belief state that will be passed to each of the three layers in the hierarchical structure as well as the memory module.
|
| 45 |
+
|
| 46 |
+
The first level of BRIM (as shown in Fig. [1](#fig:full_model){reference-type="ref" reference="fig:full_model"}) is responsible for learning a world model. This layer uses the last observation, action and reward in addition to the current belief state to predict dynamics, inverse dynamics and the rewards. It does so using three simulation networks. This layer is trained to maximize the trajectories log-likelihood as is formulated in [\[bimrl\]](#bimrl){reference-type="ref" reference="bimrl"}. Figure [2](#fig:l1){reference-type="ref" reference="fig:l1"} illustrates this layer in more detail.
|
| 47 |
+
|
| 48 |
+
<figure id="fig:l1" data-latex-placement="H">
|
| 49 |
+
<p><img src="l1_brim" style="width:90.0%" alt="image" /> ,</p>
|
| 50 |
+
<figcaption>First level of the hierarchical structure. Responsible for learning a world-model.</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
|
| 53 |
+
The second level (the "Planning" box in Fig. [1](#fig:full_model){reference-type="ref" reference="fig:full_model"}) is responsible for predicting the value of the next $n$ steps. It will take in the previous level's hidden state ($h_1$) as well as the current state and the next $n$ actions, and predicts the value for the next $n$ time-steps. It aims to minimize the following loss function: $$\begin{align}
|
| 54 |
+
Loss_{\mathrm{layer } 2} = \sum_{i=0}^{H^{+}-1}\sum_{j=0}^{n}\left[\left\Vert V_{\psi }\right(s_i, a_{i, \cdots i+j}, h_2\left) - G_{i+j+1: i+j+1+k} \right\Vert^{2}\right], \label{loss_l2}
|
| 55 |
+
\end{align}$$ where $G_{t:t+k}$ is the $k$-step TD return defined as $$\begin{align}
|
| 56 |
+
G_{t: t+k} = R_{t+1}+\gamma R_{t+2}+\cdots+\gamma^{k-1} R_{t+k}+\gamma^{k} V_{\theta}^{\pi}\left(S_{t+k}\right). \label{g_n}
|
| 57 |
+
\end{align}$$ In the above equation, $\psi$ denotes the parameters of $n$-step value decoder network, $h_2$ is the output of the second level and $\theta$ shows the parameters of the critic head in the actor-critic network.
|
| 58 |
+
|
| 59 |
+
The existence of this layer between the upper layer, responsible for learning the model of the environment, and the lower layer, which directly affects decision making, allows the following interpretations:
|
| 60 |
+
|
| 61 |
+
- **Model-based approach:** Model-based methods first obtain a model of the environment and then plan their next action using this model. In our proposed architecture, the first layer aims to learn as much as possible about the environment. The second layer receives this information and predicts the value for the next $n$ steps. To do so accurately, this layer must be able to perform some sort of planning. Therefore, in our model the two explicit stages in model-based approaches are performed implicitly by the networks in the first and second layer.
|
| 62 |
+
|
| 63 |
+
- **Context-based RL:** Methods such as VariBAD inform the agent of its task by providing it with a context vector obtained from a dynamics prediction network. The context vector that we provide is even more informative as it also contains the necessary information for predicting the value of states in the next several steps.
|
| 64 |
+
|
| 65 |
+
Finally, the third level of BRIM, known as the controller, aggregates the information form the previous BRIM layer and the most recent observations form the environment. Additionally, the hidden state of this layer modulates access to the memory, which will be discussed in the next section. The aggregated information will then be used by shallow actor and critic networks to determine the next action and value. We use PPO [@schulman2017proximal], an on-policy learning algorithm, to train our policy networks.
|
| 66 |
+
|
| 67 |
+
This hierarchical structure is reminiscent of certain parts of the brain. The first layer is similar to the medial Prefrontal Cortex (mPFC) as they are both responsible for learning a predictive model of the environment [@alexander2018frontal]. Similarly, the second layer corresponds to the Obitofrontal Cortex (OFC) which, in some neuroscientific literature, is identified as the main region responsible for multi-step planning in the brain [@miller2021multi]. Lastly, a part of the nervous system that connects PFC to HP (episodic memory) and the Striatum (which corresponds to actor-critic networks [@sutton2018reinforcement]) is known as the Anterior Cingulate Cortex (ACC) [@monosov2020outcome]. This region manages the use of control or habitual behaviours based on the degree of uncertainty about the task. It is also known as one of the pathways for transferring information from PFC to HP to guide the transition between the working memory (corresponding to PFC [@wang2018prefrontal]) and the episodic memory [@eichenbaum2017prefrontal]. In our model this corresponds to the third layer which modulates the memory and aggregates information in time [@monosov2020outcome].
|
| 68 |
+
|
| 69 |
+
The memory module is itself composed of two parts: an explicit episodic memory and a Hebbian memory. At each time step, the output of these two parts are combined using an attention mechanism and is then sent to the controller (see Fig. [3](#fig:memory_full){reference-type="ref" reference="fig:memory_full"}). The hidden state of the controller is used as the query for this attention mechanism and weights the contribution of each module.
|
| 70 |
+
|
| 71 |
+
By concatenating the embedded observation of the agent along with the output of the task inference module at each time-step, we create the "event key" and the hidden state of the controller is used as the "event value". In what follows, we will reference these event keys and values.
|
| 72 |
+
|
| 73 |
+
<figure id="fig:memory_full" data-latex-placement="!htb">
|
| 74 |
+
<img src="full_mem" style="width:90.0%" />
|
| 75 |
+
<figcaption>The interaction between the controller and the memory module</figcaption>
|
| 76 |
+
</figure>
|
| 77 |
+
|
| 78 |
+
Explicit memory is a slot-based memory that stores the event keys and values. At each time-step, the hidden state of the last level of BRIM is used as the query and the explicit memory is accessed using a multi-head attention mechanism. At the end of each episode, we compute the normalized "reference time" for each slot, which indicates how often each stored event was called upon by the incoming queries, on average. Equation ([\[equ:ref_time\]](#equ:ref_time){reference-type="ref" reference="equ:ref_time"}) shows how it is calculated. $$\begin{equation}
|
| 79 |
+
r_{i}=\frac{\sum_{t=s_{i}}^{H} W_{i}^{t}}{H-s_{i}} \label{equ:ref_time}
|
| 80 |
+
\end{equation}$$ In above equation $r_{i}$ is the reference time for the $i$-th slot, $s_{i}$ is the time when this slot was added to the memory, $W_{i}^{t}$ is attention weight attributed to the $i$-th slot at time $t$ and $H$ is the episode length. As we will see, the reference time is needed for updating the Hebbian memory. Figure [4](#fig:episodic_memory){reference-type="ref" reference="fig:episodic_memory"} demonstrates how the episodic memory works.
|
| 81 |
+
|
| 82 |
+
<figure id="fig:episodic_memory" data-latex-placement="!htb">
|
| 83 |
+
<img src="epi1_mem" style="width:90.0%" />
|
| 84 |
+
<figcaption>Episodic memory</figcaption>
|
| 85 |
+
</figure>
|
| 86 |
+
|
| 87 |
+
The Hebbian memory works on the time scale of tasks, i.e., it won't be reset after each episode. Once an episode is completed, the top $k$ percent of slots with the largest reference times are transferred to the Hebbian memory through the process of memory consolidation [@camina2017neuroanatomical]. These transferred slots will update the Hebbain memory using the Hebbian learning rule. Equation ([\[equ:hebb_learning_rule\]](#equ:hebb_learning_rule){reference-type="ref" reference="equ:hebb_learning_rule"}) shows a general form of this learning rule: $$\begin{align}
|
| 88 |
+
\Delta W_{k l}^{\mathrm{assoc}}=\gamma_{+}\left(w^{\mathrm{max}}-W_{k l}^{\mathrm{assoc}}\right) v_{t, k}^{\mathrm{s}} k_{t, l}^{\mathrm{s}}-\gamma_{-} W_{k l}^{\mathrm{assoc}} k_{t, l}^{\mathrm{s}}{ }^{2} \label{equ:hebb_learning_rule}
|
| 89 |
+
\end{align}$$ Here, $\gamma_{+}$ is the correlation coefficient, $\gamma_{-}$ is the regularization coefficient, $w^{\mathrm{max}}$ imposes a soft constraint on the maximum connection weight, and $W_{k l}^{\mathrm{assoc}}$ denotes the parameters of the Hebbian network. As can be seen in the above equation, this learning rule consists of multiple meta-parameters that control the plasticity of synaptic connections. Inspired by the phenomenon of meta-plasticity ([@farashahi2017metaplasticity]), a meta-learning mechanism is used so that these meta-parameters can be learned. Using the Hebbian learning rule as learning mechanism in the inner loop will also circumvent the issue of facing a second order optimization problem.
|
| 90 |
+
|
| 91 |
+
This transfer of information from a dynamic memory that grows into a fixed-size memory can also help with the decontextualization of the event [@duff2020semantic].
|
| 92 |
+
|
| 93 |
+
Our proposed intrinsic reward is added to the sparse extrinsic reward of the environment to encourage exploration, both on episodic and task-level time scales. This reward is obtained by multiplying two components. The first component, $r^{\mathrm{curiosity}}_t$, is a curiosity based reward that encourages visiting surprising states. It is a convex combination of reward, state, and action prediction errors. This reward is scaled using a second component, $\alpha_t$. We can think of this second part as a factor that adjusts for the *newness* of the current observation. It is defined as the distance between the most recent observation and the $k$-th nearest event stored in the episodic memory. If the agent encounters an observation that is unlike what has been seen in the on-going episode, this coefficient will be large, resulting in a high intrinsic reward. The following equations fully define the intrinsic reward: $$\begin{equation}
|
| 94 |
+
r_{t}^{\mathrm{intrinsic}}\left(r_{t-1}, s_{t-1}, a_{t-1}, m\right)=\alpha_{t}r^{\mathrm{curiosity}}_{t},
|
| 95 |
+
\end{equation}$$ where $$\begin{equation}
|
| 96 |
+
\alpha_{t} = \left\|\boldsymbol{x}_{t}-\mathrm{NN}_{k, \boldsymbol{X}}\left(\boldsymbol{x}_{t}\right)\right\|, \notag
|
| 97 |
+
\end{equation}$$
|
| 98 |
+
|
| 99 |
+
$$\begin{align}
|
| 100 |
+
\begin{array}{r}
|
| 101 |
+
r^{\mathrm{curiosity}}_t=-\mathbb{E}_{q_{\phi}\left(m \mid \tau_{: t}\right)}\left[\lambda_{\mathrm{reward}} \times \log p_{\theta}\left(r_{t} \mid s_{t-1}, a_{t-1}, s_{t}, m\right)\right. \\ \notag
|
| 102 |
+
\left.+ \lambda_{\mathrm{state}} \times \log p_{\theta}\left(s_{t} \mid s_{t-1}, a_{t-1}, m\right)\right. \\
|
| 103 |
+
\left.+ \lambda_{\mathrm{action}} \times \log p_\theta (a_t \mid s_t, s_{t-1}, m)\right],
|
| 104 |
+
\end{array}
|
| 105 |
+
\end{align}$$
|
| 106 |
+
|
| 107 |
+
$$\begin{align}
|
| 108 |
+
\lambda_{\mathrm{state}} + \lambda_{\mathrm{action}} + \lambda_{\mathrm{reward}} = 1. \notag
|
| 109 |
+
\end{align}$$ Note that $\mathrm{NN}_{k, \boldsymbol{X}}$ is the $k$-th nearest event to the current one, among the records that are stored in the episodic memory, $\boldsymbol{X}$.
|
2211.06401/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-10-15T22:07:04.085Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="cONJv8nf0o0do9tfV6AM" version="20.4.1" type="device"><diagram id="TEzdxfDKJocyGb33DYir" name="Page-1">7V1dc6M2FP01mWkfzCAhvh5jJ9l2utvuTNpp+9TBINtsMHJBjpP++koYYRByLH+AE6/9EIPAAs49urrnSiI31mj+8ikLFrMvJMLJDTSjlxvr7gZCYNqAffGS13WJY6N1wTSLo/KkTcFj/B8WvyxLl3GE88aJlJCExotmYUjSFIe0URZkGVk1T5uQpHnVRTDFrYLHMEjapX/GEZ2tSz3b3JT/hOPpTFwZmOWRcRA+TTOyTMvrpSTF6yPzQFRTnprPgoisakXW/Y01ygih6635ywgnHFaB2Pp3D1uOVrec4ZTq/ODu2zfzy+Pk99Fj8gV/DsHzBPw+KGt5DpJlCUV5s/RVYMPue8E343kB4nBG5wnbBWzzGWc0ZijeJvE0ZWWULGqln4MxTr6SPKYx4UfHhFIyZyck/MCwQm5EEpIV17ImxYedUlzsNl+sjW2ykkDsTOIXHIlT2P6MUs6SW/7k8CGMUsuIGU8mcRrhzAjZFeFDFNCAffHynH1PMc7pYEXmOB08xZSVAOixv+Ng+k9xzsB0BqzIWKRTdqWcZuSpIgYwbFbWRl9AyZ4ev9SKSmt8wuxyNHtlp1TNxmVVFT8qmw1wy/3VhoRuSZ9ZjX+iLChpP63q3tifbZQU2IMOaDcdyJImcYpHVUs0K4RqhjRNcDccsSNRkM+4uYrTdjJDTaig3AsZ1jhrUnBCUlr6EwDFfnm3StosSMxruX9mleVlJYLi85cp929GsMqRkTN6zYMnnP0zL/zdum4VWSdxkjSe3RoOze54I6wvSIPapLEUpLG6Io27mzSsFubKcQ3qMCFLbo7VLKb4cRGEvHDF0JfM24T2ofiwcmanKMYbcxR+tzPEoe8bsNlUoQUNRWuFnt9GvrLQyaGHGv47m5H5eJnvjTUGkY1dVeP2HdcKnA4do9XEGkFoKKBWcBx2xXFoaQDNO7PC14G9wZ54IQ5DFdhjz0Z2h94EWJI7sfWgBp1Bbe/jT3bA3HL/Eu5RgL2JEncn9PB40h/JoakiObAVyHfmTZw+kbexFyEV8h4cW06n7gVKcRd03LNjv1cv+mGxt3wJedFfngt3C+7GXYVFDW05epwkZBXOgowaXHaMg60Wk/uACXbUfUDk+mPTPFH0KEkOX0V9Zd/amQl0HH4a3XKdX0SOQZ7HoWSDfeiKo0Y2oA1VDQlbgYQoy3AS0Pi5mUNQwVNe4SuXHjU/BJs9b6tLzckyC3H5q7qylysyd1REg2yKaauiwlrVYx+hGjXa0EUa0LRkAe+Yhln/WAdaFO5bc8cm9k/QRvFLTP9i26ZhA1Tu/832B+zJXKssuOMImGLntbbzFWcxewyeALjbOx5eA6/RD6xxfIvs74qC/qlcCJJUgNOvCwGmRux5sMAS2QGRgeXHp5yiJZWKVGKV4IWcnEEWlmklaHcXDjlyKkepAQBo8wds5MLJO2Sgo3avaeGu5Ikwt2jToo2fKykM4DUt/BHSwnLIUHmXcyWGQb8JhTOmcjwJegucPaEAdDIKPSnbMYiiidIhABb5+fhEDaBtBe28cXdW8DSawCVqI6ZgkL/5iDyPMI2MuL7W3a/eruNW6H+n9jXdZmfjHmpQuKOiri0our+LlbYVwju1bRXpvROS+XJIc7jbQK3oqHeeXaKoAjtF1QxnZC2ecpIwhOAD74AfIkLzgbj7uqjaptG3RdP9zgSoyW5BSVcRZVcnNeLsDhW7Tg79w5Hroyp2hFTRf7+a3dKQXlfN/u40O1IKx35Vu6UjWS5RtSPFhK5+NbuloSZ6G42ewC2j0c7YsZ2ONDtSdKf9KnZ0Aj3wIRVda8wRea2IZR+ZvrOyriNupDE78iItKWlz1I48D1Xniqo6tyI83orvW58Lr79bn4uo7p0QraXPj/EYLYV+Bq55XptaDGoRhZbacjMOzclAMjojU5KyWJjwGLcg3DdM6WsZzAZLSo5wKrv5g3RZcaS5LWnCkmV6PbuC6yQBHvkox5Z6nySANGTCBRrDkleCIPAerGFr9JIdL1DYGKBDxWw50nCBDyuD1A2gWIbjGXLK94Twn399SD/wu/LkFEe5DKrPBSPA1pifcgFz55E8zde3lNTvNWNhaySeLwJ7afTId1W87xd75yJX/yF5WrznavmXztb+AUcn0vmAOFsSzv65V1kCRyP/eRndqNPA3jLB+btR90LdiRwwAlWv2S/PXY1w8RIzk8iWQxjf8LzaqpYDk0fI26/erhMVrkZEeuAI6htDPeOAkcTI2cO9JZ3fzQAckheveK7hWGdumWI+YccZQJGQFts8GW0aReJieyr61GlnW9M3nDptKA9dd90afdCPTVN2n3/Vd+pW5fsbsxZ79SGGc5OhHypUppBH3Pd1+DKnoKfHKdajBq+104opJLk+d8V1NhRd13hiwqrCAydhNh2OGzx2/l3y14cV01kGeUHMW3YC48DL5iDbmvLvR5wx5ywq4rOkxJFW82AenKqiDSk9q8jYtib4yL3YPI6iZFsuoNkGt3ZhfSkioHzHD+q1Q/KvoYSG5SyzNRJpnz+Y0Fk1fYlyFilWnvWrZf3v41U2yJSAV48N9pqV9DXWnX3ANELbw2i+KKtD96LKS657dx4Q6EQKAKoiBfYdzLkl0nHOvwZywQ1/qGbRKOGz8NmB4u2weS3MWN/LNdKos8lus0m59N9UjGN2SChV+vUUhNIh0K94xUo/JWQcJIJGVxa9ySJkt6Ie5v9Bm0bIUS1IAY7R2TRaaG53TvkiSA+XMTUltC74TMKCMX8sooDiXC1wqrL1xa88qvNoMzNCvIgEtAM40KvugeZ2X6TWwNuZspfvUlUkuabqPjJxwtvO6iq4tR1YlW+pE6/fN70K6nfvuH4Y/nh1VvtzRsome23G9OupAOyNMKMrYfYmDJRecKxyMT0TZnt699SEub0S5mjCQE8lzbqizC/3w5/i34BLqfdrSB/B8v7pz4HOdIFTZbPubu+9h1HL1jyLO/Luhw9vAt9CWWGLN3IssKlmlO997yqbpQRelXw/V4bl5zSmcRGAfmci+ThS2ag1Y18ZZHaUaVHSym03316XGUnjvFp4Hjv067bsYAHPMJHDLOQ6rgsdqc4tQ7iKMdrWmoza+9d2jCwfMGartGg/C8dOPFPgHDTwdswA0Le6FNYhs1eDC5n4nUwqOQNV2nMvfC0LK1yP/I+2WLesubT0+AkjvsS+vSaMsN3Nf4hbn775D3zW/f8=</diagram></mxfile>
|
2211.06401/main_diagram/main_diagram.pdf
ADDED
|
Binary file (69.4 kB). View file
|
|
|
2211.06401/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Since the creation of emojis around the turn of the millennium [@stark2015conservatism; @alshenqeeti2016emojis], they have become of a staple of informal textual communication, expressing emotion and intent in written text [@barbieri-etal-2018-interpretable]. This development in communication style has prompted research into emoji analysis and prediction for English [e.g. @barbieri-etal-2018-semeval; @barbieri-etal-2018-interpretable; @felbo-etal-2017-using; @tomihira2020multilingual; @zhang2020emoji]. Comparatively little research attention has been given to the low resource languages.
|
| 4 |
+
|
| 5 |
+
Emoji-prediction has posed a challenge for the research community because emojis express multiple modalities, contain visual semantics and the ability to stand in place for words [@padilla-lopez-cap-2017-ever]. The challenge is further compounded by the quantity of emojis sent and the imbalanced distribution of emoji use [@cappallo2018new; @padilla-lopez-cap-2017-ever]. Machine learning (ML) for emoji analysis and prediction has traditionally relied on traditional server-side architectures. However, training such models risks leaking sensitive information that may co-occur with emojis or be expressed through them. This can lead to potential breaches of data privacy regulation (e.g. the European General Data Protection Regulation and the California Consumer Privacy Act). In contrast, federated learning (FL) [@mcmahan2017communication] approaches the task of training machine learning models by emphasising privacy of data. Such privacy is ensured by training models locally and sharing weight updates, rather than the data, with a central server (see Figure [1](#fig1){reference-type="ref" reference="fig1"}). The FL approach assumes that some client-updates may be corrupted during transmission. FL therefore aims to retain predictive performance while emphasising user privacy in scenarios with potential data loss.
|
| 6 |
+
|
| 7 |
+
{#fig1}
|
| 8 |
+
|
| 9 |
+
Motivated by prior work in privacy preserving ML [e.g. @ramaswamy2019federated; @yang2018applied] and emoji prediction for low resource languages [e.g. @choudhary2018contrastive], we examine the application of FL to emoji topic prediction for Hindi. Specifically, we collect an imbalanced dataset of $118,030$ tweets in Hindi which contain $700$ unique emojis that we classify into $10$ pre-defined categories of emojis. The dataset contains $700$ unique emojis, that we classify into $10$ pre-defined categories of emojis.[^2] We further examine the impact of two different data balancing strategies on federated and server-side, centralised model performance. Specifically, we examine: re-sampling and cost-sensitive re-weighting. We consider $6$ centralised models which form our baselines: Bi-directional LSTM [@Hochreiter_Schmidhuber_1997], IndicBert [@kakwani-etal-2020-indicnlpsuite], HindiBERT,[^3] Hindi-Electra,[^4] mBERT [@devlin-etal-2019-bert], and XLM-R [@conneau-etal-2020-unsupervised]; and LSTMs trained using two FL algorithms: FedProx [@li2018federated] and a modified version of CausalFedGSD [@francis2021towards].
|
| 10 |
+
|
| 11 |
+
We show that LSTMs trained using FL perform competitively with more complex, centralised models in spite of only using up to $50\%$ of the data.
|
| 12 |
+
|
| 13 |
+
# Method
|
| 14 |
+
|
| 15 |
+
<figure data-latex-placement="H">
|
| 16 |
+
<img src="images/cls_wt_train_loss.png" />
|
| 17 |
+
</figure>
|
| 18 |
+
|
| 19 |
+
<figure data-latex-placement="H">
|
| 20 |
+
<img src="images/cls_iid_p.png" />
|
| 21 |
+
</figure>
|
| 22 |
+
|
| 23 |
+
<figure data-latex-placement="H">
|
| 24 |
+
<img src="images/cls_iid_r.png" />
|
| 25 |
+
</figure>
|
| 26 |
+
|
| 27 |
+
<figure data-latex-placement="H">
|
| 28 |
+
<img src="images/cls_wt_valid_f1.png" />
|
| 29 |
+
</figure>
|
| 30 |
+
|
| 31 |
+
<figure data-latex-placement="H">
|
| 32 |
+
<img src="images/cls_wt_non-iid_train_loss.png" />
|
| 33 |
+
</figure>
|
| 34 |
+
|
| 35 |
+
<figure data-latex-placement="H">
|
| 36 |
+
<img src="images/cls_non-iid_p.png" />
|
| 37 |
+
</figure>
|
| 38 |
+
|
| 39 |
+
<figure data-latex-placement="H">
|
| 40 |
+
<img src="images/cls_non-iid_r.png" />
|
| 41 |
+
</figure>
|
| 42 |
+
|
| 43 |
+
<figure data-latex-placement="H">
|
| 44 |
+
<img src="images/cls_wt_non-iid_valid_f1.png" />
|
| 45 |
+
</figure>
|
| 46 |
+
|
| 47 |
+
This section provides detailed graphs for GPU usage in Watts for every variation of experiments run.
|
| 48 |
+
|
| 49 |
+
<figure data-latex-placement="H">
|
| 50 |
+
<img src="images/imbalanced_gpu.png" />
|
| 51 |
+
</figure>
|
| 52 |
+
|
| 53 |
+
<figure data-latex-placement="H">
|
| 54 |
+
<img src="images/balanced_gpu.png" />
|
| 55 |
+
</figure>
|
| 56 |
+
|
| 57 |
+
<figure data-latex-placement="H">
|
| 58 |
+
<img src="images/cls-wt_gpu.png" />
|
| 59 |
+
</figure>
|
| 60 |
+
|
| 61 |
+
We observe that when we run the original CausalFedGSD and our modification on the same hardware settings with the same number of parameters, the modified version finishes training in just 3.75 hours as opposed to the original CausalFedGSD implementation which takes around 47 hours to finish training. Figure [5](#fig:gpu_util){reference-type="ref" reference="fig:gpu_util"} is a run comparison based on the heaviest variant for both algorithms. The highest runtime recorded for both was for the class weight dataset $c=0.5$ variant. Similar performances are recorded for all other variants on all datasets.
|
| 62 |
+
|
| 63 |
+
<figure id="fig:gpu_util" data-latex-placement="H">
|
| 64 |
+
<img src="images/Causalfedgsd_vs_modified_causalfedgsd.png" />
|
| 65 |
+
<figcaption>Comparison between optimization times for the baseline CausalFedGSD vs Modified CausalFedGSD</figcaption>
|
| 66 |
+
</figure>
|
| 67 |
+
|
| 68 |
+
[^1]: The dataset and code can be accessed at https://github.com/deep1401/fedmoji
|
| 69 |
+
|
| 70 |
+
[^2]: These categories are obtained from the Emojis library, available at https://github.com/alexandrevicenzi/emojis.
|
| 71 |
+
|
| 72 |
+
[^3]: https://huggingface.co/monsoon-nlp/hindi-bert
|
| 73 |
+
|
| 74 |
+
[^4]: https://huggingface.co/monsoon-nlp/hindi-tpu-electra
|
| 75 |
+
|
| 76 |
+
[^5]: The developers of Hindi Electra also note similar under-performance on other tasks.
|
| 77 |
+
|
| 78 |
+
[^6]: Please refer to the appendices for additional details on model performance and training.
|
| 79 |
+
|
| 80 |
+
[^7]: https://simpletransformers.ai/
|
| 81 |
+
|
| 82 |
+
[^8]: https://pytorch.org/docs/stable/notes/randomness.html
|
2212.01007/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-04-03T13:01:09.305Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36" etag="FGTwqukZ8-zgTigLWj_s" version="17.2.4" type="device"><diagram id="P6RYTtqfngQ0z6XY3V_G" name="Page-1">7V1bc6M4Gv01rp19CKX75bGTTs9s7XT3zGa3tnteurCNbSbYeDHuJPPrV2BxlzGJMWC3XakKCCEE53D0ffokMcJ3y+efA3u9+OhPHW+EwPR5hN+PEIKCEvUvSnnZpQghdwnzwJ3qTFnCg/uXoxOBTt26U2dTyBj6vhe662LixF+tnElYSLODwH8qZpv5XvGqa3uurwiyhIeJ7TmVbP91p+FC3wXiWfovjjtfJFeGTN/f0k4y6yI2C3vqP+Wuhe9H+C7w/XC3tXy+c7zo4SXPZVehD3uOphULnFXY5ITwY/gF/fPl0/jT4zr8/nL3VXxmN4Ttivlue1t9x7q24UvyCAJ/u5o6USlghG+fFm7oPKztSXT0SYGu0hbh0lN7UG1O7c0izhvtzPxVqCGFUu1vwsB/dO58zw9U0spfOVEm1/OSpBHCd/FPn5xL3/1Uuq+u7YYRnVBUn+pj0E/muxOEznMuST+Wnx1/6YTBi8qijyK6O0NTFHFkcY3iU4Y5ZEBTZJEDnHFupXzTXJunV8jgUBsakVegI2DL6OQBIRVAoqcMCaakhIoGqgJt7rxZ/FPp6j0MbXflBLpCLcADSfJuaoAwYRYTVYAEtJInloeIKojkqSAi/OwgSoWoJXwILaHDjehI2jU04jA06lmso83Jduwcxma8A/LXcZpgTx7nMbyft6HnrpxUBYPHz6lOAQvQYiKKU2OQPHf9iz5ro2EHVVUEgAJnVgdqmSYg/hX0krQEOKcWg0XMlWTCKuTIBDk4FdzycuCe2UzE4JXgTkHtF27CpMV5z3gnBV8C3u8U3LdisK83ZdwipG+8GxhD54I3AMI2vd91eFPeGd6MUQuivvFGl4P3VKk5x4N9vzkzezzd4o0vB284tqGDBou3YMAivbff5HLwHpR5Dgkt+csc9m6dU3o5aA/KOoeESEtKLqmEAjOOSclUFxbEkmIihPLSgGR9E6FBj+O5EGFQZvsBIlBl04kcE3oXhAYdZ+fCg0GZ8wd4wBi2Ei3AAOHe3boL6qUblJl/gAecKaOAIS4Bk5zxpBO3Px5cUPfdoMz/AzwQVFiZGiDaoVuwfJz88dGn9//5/fOXh83vfzzdPz/eQFiB3ZnOnQe96wfhwp/7K9u7z1JvixGWP7fLdRJCiSOG6Tm/+v5aw/enE4YvOpe9Df0id5xnN/wSw0713lddeLT9/jm/85LsrNT9f8nvfM1KiHaz0+K95Lx9IVOcmZJJ6LkY/6QZQ6JHVOCHgTIbfxtMnJpXMPESQzuYO2ENRsxMuMDx7ND9XqxI60LBTP2AzAv1wytQh/1v6ycHbnYv7DuVAYP1c/zokuNqa777T6I/ehdF78ezEb99HvH3I3qvtn6Cf4+2o2KByvHgzpf2N5UO0xw3qmYfkNr9qVzCze6c5fabKmR3DV1ldcau1kkFStx/VeywIitsIpzxzNBQ2Y6YTQx0i2TFndjeO8+dr1Ti0p1O4zds4QfuX1FstxCn1PWM9j177Hi3qcKWApObiLSrub6NIqlhOzqHQSlIgajB68XUEMSXJxMzcBWzvsQMNxQz2quYmTo9uxAzZBYz1EDMkgqOgyOrx+xlJGCr8Sb6l2kkGrZGFpWvDeFCRQ8NVW0wnES3u5Gtqul9la2OZIs2lC3Sq2yZenO7kC1sli08BNnCP7ZsCQhNQaWTKZeZmKbe5U6dA3qnbmVuf4MRAef2UhEUtkU9ejeZqpS6GkRDJW93lBw7oR37Gy3xvjtuT9mYUVbl9mw2QxMTt1ugsygFzczuAzGNAT4dm0195J1ahwmbUcZm1AmbkZHN6Mrmt7HZZFN2zGVTP3+nJkPCZZxxGXfCZWzkMr5y+W1cFhBbqBrG7pbO4qSOe9HE3EPvmEruStuf6tDE9tSxf6XEu1Ppobt0NirLp/iM1xBFoRPWscEwfUcn2brzcKIwjyaC7O9VNNGvSNBax6xtkxZKYHEKFZeIiAMmBd4hyiwK8zGzCgchxRZglQLyjBTQyg/HwCebO2QaXqX15ih2/mO13oY5Eo3PmUEtkIYLi7EsuJa0cwlpZDQ/LDfuosIZCS19psSCc2oItpFiiC6hVfuc2e/WH8eZz9vwSpqC0mAkLcxwJhZF1ghpMRlrDIGCAcPg+yHR5qRO98G28G/XxrC1oQJYWkBGbIIAAQJKWkaBlQQshtnowaTGR/RcH9FTnWzn+pwb9FRnndNf88f29FSX51Q7q+m7aJ672h97/uTx3wv1FsTJH1wvDdaWrXWHxdZ61b7ncgwqVKt2gO8l28EObqmlLN/BXdv5UepWP6LLW5/6m+/GopS5FxYGRDIlugQRWTT6qDqKCCQcUKyacVAqfne7usQSedMqvp3Pcr8Rt1nbq3acZt3DUxHE3RX2CKKqvLve7JOt3Lthb9a7dRlm7nNEqCahlvoVAj58uMWMtWS8xWOjUrmDxanMVMkhBWkjzKpj5JhyCZAkFDEJoDIDRVX79mRpXftkvfTpZ9mz1o1eFZWrXz9CUUHGv6IK6hvNBNA0UDMJkhwM7RXY2kj19o+YaiB6yQvfnugd2Z7WR4IvgVSHqLOfdH2TyhAqPg9SkSupBkuqpubZ4Eglz8Xyr10iqZE5v99srzf08+x5q/9xhOXfVK+SLrR+LX9OqEWlsp85J4BghBtZ/go8+yWXbR1l2NTUoTRqlbPSKl6l/IrnqO4EtbGrQrt+yP6Owbb9EHz1Qyp+CEfSApjSdP7eoP2QeiG+hNb9B/BD6MBad3zxpDpfk7GxHzI0UtErqQZLqsZ+yNBIxc/FD/lhIxBNG8FkMHG/foiy9ywsIACUQKKyJtXvKgSxP7jbtukPr6Z/xfTHWFiUZ2OOhh2CePViuB1JXQ5jVN/svbXh7cbwr59k24Fb0LoiHtnY1s+vvVpwZ+EWDI1U9StQ9Emq9sjxZnqei1vQke3GKbNy46XKrXe0Fno2fCCZV5sUv7uLNkw347MaSOObUu0GWCBe0z7PN9ZcDzEyEi3O/5sTuOp5lb470MqUXJjAdpB04jQcgxgDixGgf6hEMkay1ZkPOAWGooW0KJFQK0PJ92Assj6p4PrS8FT8rX/wTdaE8oP71B9Y58igvYQcPw6uG7VnmShzL7BZ8kRZK5N1UFr+9gLEoDTRi6Cqh2D6bsmpJsZAWN9VewEdICcI4KbtcpsdHaypWZ++aEMxwWD9CJFTcajlxSMGxwfeHx/2ND+pDr2+LWuvlakPA5yBYMFLIagYPEEFFlYyiat9ghrN+G5Cn0fQogr+Ybu7daQQKU6hYsmH+F5rEsczqxCj6dyscrEWqUzn68ihM80GiHrMZ9momT76yJusRrPHsG3EDH0CFfjAGJno6NDGyNR1jRmARD88kJQAi7AMyOp3SQYEpCk4tgMS/vBAYswtCXJvZHXNhgEhWR+2v7aySStLLFFsaAkvdSU0bmjjSc77GlpVbKGhBSWv4MRNbTeTqVqigzFk3SNHqCWYzH5FA5o1s54Ndhko0IWXi+3PLutmxOv5a0cJMireyoQ6Cz1a0L4/C73BR61yuE08e7NxJ0Wci0SpD/nnBzaICtxsVAnDlddlfTsbcs24aU3tJO3Y+Bou+d9UWuWmoClvBKqWRWG3/GjwGawrP47gB+NH8ANGtubB4k5NkQYfyLpS5AiKkHYpYiruzRRRu4EfjZHMsivHcPHRnzpRjv8D</diagram></mxfile>
|
2212.01007/main_diagram/main_diagram.pdf
ADDED
|
Binary file (58.8 kB). View file
|
|
|
2212.01007/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Real-world image classification data usually exhibits an imbalanced distribution due to the natural scarcity of certain classes, industry barriers, and large data collection costs. The severely imbalanced data distribution causes substantial obstruction to the learning process, considering it is difficult to balance the classification performance of head and tail classes. The imbalanced learning problem attracts extensive research interests [\[4,](#page-8-0) [12,](#page-8-1) [34\]](#page-8-2). However, existing methods are incapable of deriving high accuracy on tail classes without hindering the performance of head classes or maintaining an efficient framework. This paper is targeted at learning with long-tailed training data while alleviating the above issues.
|
| 4 |
+
|
| 5 |
+
When learning deep convolutional neural networks (CNNs) with long-tailed samples, the optimization of network parameters is dominated by samples of head classes, which leads to relatively low performance for tail classes. Conventional solutions to the data imbalance problem is biasing the optimization process towards less frequent classes, such as class-balanced re-sampling [\[4,](#page-8-0) [14\]](#page-8-3),
|
| 6 |
+
|
| 7 |
+
<sup>∗</sup> Corresponding author.
|
| 8 |
+
|
| 9 |
+
re-weighting [\[20,](#page-8-4) [38\]](#page-8-5), or classifier margin adjustment [\[18,](#page-8-6) [48\]](#page-8-7). However, these data rebalancing methods hamper the learning of head classes by interfering the representation capacity of CNNs. A few works attempt to address this problem through ensembling multiple classifiers learned under diverse sampling strategies [\[49\]](#page-9-1) or adopting auxiliary classifiers to highlight the learning of tail classes [\[40\]](#page-8-8). However, such methods require increased network parameters and computation burden. Besides, the impact of data imbalance on feature representation learning can not be thoroughly alleviated since they still depend on data resampling or reweighting algorithms to manage multiple classifiers.
|
| 10 |
+
|
| 11 |
+
Batch normalization is a critical component for mitigating the internal covariate shift in the feedforward calculation process of CNNs [\[21\]](#page-8-9). It can accelerate the optimization rate of network parameters and improve the generalization ability. Under the scenario of data imbalance, a single-modal Gaussian probability function can not fully model the feature space and is prone to overlook tail classes. Thus the conventional batch normalization can merely eliminate the global covariate shift, but neglect the internal covariate shift of tail classes. This harms the learning efficiency and generalization capacity on tail classes.
|
| 12 |
+
|
| 13 |
+
To address the above problem, we generalize the feature normalization by modeling the feature space with compound Gaussian distributions. As shown in Figure [1,](#page-0-0) the features of training samples are composed of several scattered clusters. For the purpose of fitting the features more comprehensively, we employ a compound set of mean and variance parameters to implement the feature normalization process. Every set of mean and variance parameters is applied for whitening a group of features within a local subspace, and independent affine parameters are utilized for reconstructing the distribution statistics. Such a compound feature normalization helps to eliminate the local covariate shift and alleviate the dominance of head classes. Based on the compound feature normalization, we set up the mainstream branch for the classification model and devise a moving average based expectation maximization algorithm to evaluate the statistical parameters.
|
| 14 |
+
|
| 15 |
+
The estimation of statistical parameters in the multi-modal Gaussian probability function easily falls into local minima, where multiple Gaussian distributions still concentrate on head classes while ignoring tail classes. Hence, we devise a dual-path learning framework to diversify those Gaussian distributions among all classes. An auxiliary branch is set up with the split normalization, which separates classes into different subsets and processes them with independent statistical and affine parameters. This benefits to disperse statistical parameters of different Gaussian distributions. Additionally, the mainstream and auxiliary branches interact with each other via the stop-gradient based consistency constraint [\[7\]](#page-8-10) for enhancing the representation learning. The main contributions of this paper are concluded as follows:
|
| 16 |
+
|
| 17 |
+
- We propose a novel compound batch normalization algorithm based on a mixture of Gaussian distributions, which can alleviate the local covariate shift and prevent the dominance of head classes.
|
| 18 |
+
- A dual-path learning framework based on the compound and split feature normalization techniques is devised to diversify the statistical parameters of different Gaussian distributions.
|
| 19 |
+
|
| 20 |
+
• Exhaustive experiments on commonly used datasets demonstrate significant improvement of our method compared to existing state-of-the-art methods.
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
First, we remind the calculation procedure of batch normalization. Suppose the input feature map be $\mathbf{X} \in \mathbb{R}^{B \times D \times H \times W}$ , where B, D, H, and W denote the batch size, number of channels, height, and width, respectively. We can flatten dimensions except for channels into a single dimension, resulting in a two-dimensional tensor $\mathbf{x} \in \mathbb{R}^{D \times N}$ (N = BHW). Channel-wise statistical variables including mean $\hat{\boldsymbol{\mu}} \in \mathbb{R}^{D \times 1}$ and variance $\hat{\boldsymbol{\sigma}}^2 \in \mathbb{R}^{D \times 1}$ are estimated as,
|
| 25 |
+
|
| 26 |
+
$$\hat{\boldsymbol{\mu}} = \frac{1}{N} \sum_{i=1}^{N} \mathbf{x}_i,\tag{1}$$
|
| 27 |
+
|
| 28 |
+
$$\hat{\sigma}^2 = \frac{1}{N} \sum_{i=1}^{N} (\mathbf{x}_i - \hat{\mu})^2, \tag{2}$$
|
| 29 |
+
|
| 30 |
+
where $\mathbf{x}_i \in \mathbb{R}^{D \times 1}$ represents the feature of the *i*-th point. During the network optimization, these statistical variables are accumulated with the moving average operation:
|
| 31 |
+
|
| 32 |
+
$$\mu := \lambda \mu + (1 - \lambda)\hat{\mu},\tag{3}$$
|
| 33 |
+
|
| 34 |
+
$$\sigma^2 := \lambda \sigma^2 + (1 - \lambda)\hat{\sigma}^2. \tag{4}$$
|
| 35 |
+
|
| 36 |
+
$\lambda$ is the momentum factor, determining the updating rate of $\mu$ and $\sigma$ . Afterwards, the input feature $\mathbf{x}$ is normalized as below:
|
| 37 |
+
|
| 38 |
+
$$\hat{\mathbf{x}} = \Sigma^{-\frac{1}{2}} (\mathbf{x} - \boldsymbol{\mu}), \tag{5}$$
|
| 39 |
+
|
| 40 |
+
where $\epsilon$ denotes a constant. $\Sigma = \operatorname{diag}(\sigma^2)$ , where $\operatorname{diag}(\cdot)$ transforms the input vector into a diagonal matrix. Finally, the feature values are scaled and shifted with affine parameters $\gamma$ and $\beta$ ,
|
| 41 |
+
|
| 42 |
+
$$\mathbf{x}' = \gamma \hat{\mathbf{x}} + \beta. \tag{6}$$
|
| 43 |
+
|
| 44 |
+
The batch normalization operation can remove the internal covariate shift during the feedforward propagation of neural networks, which benefits to stabilizing and accelerating the training speed. However, when the training samples are severely unbalanced, the calculation of the statistical variables is dominated by head classes, and the learning of affine parameters is also biased towards them. This induces to overlook of tail classes in the normalization process, which hinders the learning on less frequent classes.
|
| 45 |
+
|
| 46 |
+
For modeling the feature space more comprehensively, we adopt a mixture of statistical variables to fit the feature distribution. Suppose the number of Gaussian distributions be M. The j-th Gaussian distribution is represented with a triplet of variables, including prior probability, mean, and variance variables, which are defined by $\tau_j$ , $\mu_j$ , and $\Sigma_j$ , respectively. $\Sigma_j \in \mathbb{R}^{D \times D}$ is the diagonal variance matrix. Then, the batch normalization can be extended to compound
|
| 47 |
+
|
| 48 |
+
<span id="page-3-0"></span>
|
| 49 |
+
|
| 50 |
+
Figure 2: Compound batch normalization based on a mixture of Gaussian distributions.
|
| 51 |
+
|
| 52 |
+
<span id="page-3-1"></span>
|
| 53 |
+
|
| 54 |
+
Figure 3: Split batch normalization.
|
| 55 |
+
|
| 56 |
+
probability distributions. The input feature is normalized according to M Gaussian distributions independently, deriving of M normalization branches. Provided a feature vector $\mathbf{x}_i$ , the normalization with the j-th Gaussian distribution is formulated as follows,
|
| 57 |
+
|
| 58 |
+
$$\hat{\mathbf{x}}_{i}^{(j)} = \Sigma_{j}^{-\frac{1}{2}} (\mathbf{x}_{i} - \mu_{j}). \tag{7}$$
|
| 59 |
+
|
| 60 |
+
For the estimation of compound statistic variables, we apply the moving average to implement the expectation-maximization algorithm. First, we calculate the probability values of every feature vector in M Gaussian distributions. Then, a temporary set of prior probability, mean, and variance variables is estimated based on input features and their probability values for the current batch of samples. These temporary variables are accumulated in the moving average manner. The algorithmic detail of the estimation process is introduced below.
|
| 61 |
+
|
| 62 |
+
**Expectation Step.** Based on the previously estimated statistic variables, we can calculate the probability values of all input feature vectors with respect to M Gaussian distributions. The probability of $\mathbf{x}_i$ belonging to the j-th distribution is estimated as follows,
|
| 63 |
+
|
| 64 |
+
$$w_{ij} = \frac{\tau_j f(\mathbf{x}_i, \boldsymbol{\mu}_j, \boldsymbol{\Sigma}_j)}{\sum_{k=1}^{M} \tau_k f(\mathbf{x}_i, \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k)}.$$
|
| 65 |
+
(8)
|
| 66 |
+
|
| 67 |
+
$f(\cdot)$ is the Gaussian probability density function, which is formulated as follows,
|
| 68 |
+
|
| 69 |
+
$$f(\mathbf{x}_{i}, \boldsymbol{\mu}_{j}, \Sigma_{j}) = \frac{\exp(-\frac{1}{2}(\mathbf{x}_{i} - \boldsymbol{\mu}_{j})^{T} \Sigma_{j}^{-1}(\mathbf{x}_{i} - \boldsymbol{\mu}_{j}))}{\sqrt{(2\pi)^{M} |\Sigma_{j}|}}.$$
|
| 70 |
+
(9)
|
| 71 |
+
|
| 72 |
+
**Maximization Step.** Temporary statistic parameters are estimated according to the probability values for each training batch. Practically, the temporary prior probability $\hat{\tau}_i^c$ , mean $\hat{\mu}_i^c$ , and variance
|
| 73 |
+
|
| 74 |
+
$\hat{\sigma}^c_i$ of the *j*-th Gaussian distribution are calculated as below,
|
| 75 |
+
|
| 76 |
+
$$\hat{\tau}_{j}^{c} = \frac{1}{N} \sum_{i=1}^{N} w_{ij},\tag{10}$$
|
| 77 |
+
|
| 78 |
+
$$\hat{\mu}_{j}^{c} = \frac{\sum_{i=1}^{N} w_{ij} \mathbf{x}_{i}}{\sum_{i=1}^{N} w_{ij}},$$
|
| 79 |
+
(11)
|
| 80 |
+
|
| 81 |
+
$$\hat{\sigma}_{j}^{c} = \frac{\sum_{i=1}^{N} w_{ij} (\mathbf{x}_{i} - \hat{\boldsymbol{\mu}}_{j})^{2}}{\sum_{i=1}^{N} w_{ij}}.$$
|
| 82 |
+
(12)
|
| 83 |
+
|
| 84 |
+
**Temporal Accumulation.** Similar to the conventional batch normalization, the moving average is applied to accumulate the above temporary variables, namely,
|
| 85 |
+
|
| 86 |
+
$$\tau_i := \lambda^c \tau_i + (1 - \lambda^c) \hat{\tau}_i^c, \tag{13}$$
|
| 87 |
+
|
| 88 |
+
$$\boldsymbol{\mu}_j := \lambda^c \boldsymbol{\mu}_j + (1 - \lambda^c) \hat{\boldsymbol{\mu}}_j^c, \tag{14}$$
|
| 89 |
+
|
| 90 |
+
$$\Sigma_j := \lambda^c \Sigma_j + (1 - \lambda^c) \operatorname{diag}(\hat{\sigma}_j). \tag{15}$$
|
| 91 |
+
|
| 92 |
+
Here, $\operatorname{diag}(\cdot)$ transforms the input vector into a diagonal matrix, and $\lambda^c$ is a constant.
|
| 93 |
+
|
| 94 |
+
Finally, separate scaling and bias coefficients are learned to redistribute the values of different normalization branches. Those redistributed values are combined by the following weighted summation operation,
|
| 95 |
+
|
| 96 |
+
$$\mathbf{x}'_{i} = \sum_{j=1}^{M} w_{ij} (\gamma_{j} \hat{\mathbf{x}}_{i}^{(j)} + \beta_{j}).$$
|
| 97 |
+
(16)
|
| 98 |
+
|
| 99 |
+
$\gamma_j$ and $\beta_j$ represent the scaling and bias coefficient of the j-th normalization branch respectively. The calculation process of the above normalization algorithm is illustrated in Figure 2. More details can be found in Appendix A.
|
| 100 |
+
|
| 101 |
+
The generalized batch normalization can be easily incorporated into existing convolutional neural networks by replacing their original normalization layers. However, learning compound distributions with the expectation-maximization algorithm may easily fall into local optima and suffer from model collapse. To overcome this problem, we set up a split normalization strategy to diversify the Gaussian distributions with different sets of training samples.
|
| 102 |
+
|
| 103 |
+
First, we uniformly split all class labels into M independent groups according to their serial numbers, resulting in $\{\mathbb{C}^j\}_{j=1}^M$ , where $\mathbb{C}^j$ represents the j-th set of class labels. For $j \neq k$ , $\mathbb{C}^j \cap \mathbb{C}^k = \emptyset$ . The union of all class sets $(\bigcup_{j=1}^M \mathbb{C}^j)$ is equal to the sequence of integers from 1 to the number of classes (K). According to these class sets, the input features in $\mathbf{x}$ can be separated into M groups as well, namely $\{\mathbf{x}^j \in \mathbf{R}^{D \times N_j}\}_{j=1}^M$ , where the features in $\mathbf{x}^j$ come from images with class labels in $\mathbb{C}^j$ , and $N_j$ represents the number of features in the j-th group. Then, a split normalization strategy illustrated in Figure 3 is devised to process M sets of features with M Gaussian distributions, respectively. Meanwhile, each set of features
|
| 104 |
+
|
| 105 |
+
<span id="page-4-0"></span>
|
| 106 |
+
|
| 107 |
+
Figure 4: The proposed dual-path learning framework.
|
| 108 |
+
|
| 109 |
+
is utilized for calculating the other temporary mean and variance,
|
| 110 |
+
|
| 111 |
+
$$\hat{\mu}_{j}^{s} = \frac{1}{n^{j}} \sum_{i=1}^{n^{j}} \mathbf{x}_{i}^{j}, \tag{17}$$
|
| 112 |
+
|
| 113 |
+
$$\hat{\sigma}_{j}^{s} = \frac{1}{n^{j}} \sum_{i=1}^{n^{j}} (\mathbf{x}_{i}^{j} - \hat{\mu}_{j}^{s})^{2}.$$
|
| 114 |
+
(18)
|
| 115 |
+
|
| 116 |
+
The above variables are also leveraged to update $\mu_i$ and $\Sigma_i$ : $\mu_i$ := $\lambda^s \mu_j + (1 - \lambda^s) \hat{\mu}_i^s$ , and $\Sigma_j := \lambda^s \Sigma_j + (1 - \lambda^s) \operatorname{diag}(\hat{\sigma}_i^s)$ . This process is beneficial for diversifying multiple Gaussian distributions and preventing the distribution collapse issue. The calculation process of the split batch normalization is illustrated in Algorithm 3 (Appendix A).
|
| 117 |
+
|
| 118 |
+
As shown in Figure 4, we build up the classification model with ResNet [17], which is learned with two branches. In the top branch, the compound normalization is applied for implementing the network feedforward process, while the split normalization is adopted for standardizing intermediate features in the bottom branch. Given an input image $\mathbf{I} \in \mathbb{R}^{3 \times H \times W}$ , we utilize a series of weak augmentation operations to transform it into $I_{weak}$ , and the other series of strong augmentation operations is leveraged to produce the other variant of the input image denoted by $I_{strg}$ . Feeding $I_{weak}$ and $I_{strg}$ into the branch based on the compound normalization, we can obtain predicted class-wise logits $\mathbf{o}_{weak}^{c} \in \mathbb{R}^{K \times 1}$ and $\mathbf{o}_{strq}^{c} \in \mathbb{R}^{K \times 1}$ , respectively. K denotes the number of classes. The split normalization branch also derives class-wise logits $\mathbf{o}_{weak}^s$ and $\mathbf{o}_{strg}^s$ from $I_{weak}$ and $I_{strg}$ , respectively. The maximization of the similarity between predictions of two branches is employed for optimizing network parameters,
|
| 119 |
+
|
| 120 |
+
<span id="page-4-1"></span>
|
| 121 |
+
$$L^{sim} = -S(\mathbf{o}_{strg}^{c}, \text{detach}(\mathbf{o}_{weak}^{s})) - S(\mathbf{o}_{strg}^{s}, \text{detach}(\mathbf{o}_{weak}^{c})). \tag{19}$$
|
| 122 |
+
|
| 123 |
+
The similarity metric $S(o_1, o_2)$ is implemented with the cosine function, $S(\mathbf{o}_1, \mathbf{o}_2) = \mathbf{o}_1^T \mathbf{o}_2/(||\mathbf{o}_1||_2||\mathbf{o}_2||_2)$ . 'detach(o)' represents the stop gradient operation, which means $\mathbf{o}_{w}^{s}$ and $\mathbf{o}_{w}^{c}$ are regarded Algorithm 1: Algorithm for one training epoch of the dual learning framework.
|
| 124 |
+
|
| 125 |
+
<span id="page-4-3"></span>**Input:** Training images: $\{\mathbf{I}_l\}_{l=1}^L$ , and their labels: $\{\mathbf{y}_l\}_{l=1}^L$ ; M sets of class labels: $\{\mathbb{C}^j\}_{j=1}^M$ ; M sets of Gaussian statistic variables: $\{\tau_j\}_{j=1}^M$ , $\{\mu_j\}_{j=1}^M$ , and $\{\Sigma_j\}_{j=1}^M$ . **Output:** Optimized network parameters.
|
| 126 |
+
|
| 127 |
+
- 1: Shuffle training images and separate into minibatches with size of B;
|
| 128 |
+
- 2: **for** i = 1 **to** $\frac{L}{B}$ **do**
|
| 129 |
+
- Fetch a batch of training images $\mathbf{B} \in \mathbb{R}^{B \times 3 \times H \times W}$ ;
|
| 130 |
+
- Distort images in B with weak augmentation operations, resulting in Bweak;
|
| 131 |
+
- Feed $\mathbf{B}_{weak}$ through the compound and split network paths, resulting in $\mathcal{O}^c_{weak}$ and $\mathcal{O}^s_{weak}$ , respectively;
|
| 132 |
+
- Distort B with strong augmentation operations, deriving of
|
| 133 |
+
- Feed $B_{strq}$ through the compound and split network paths, resulting in $O_{strq}^c$ and $O_{strq}^s$ , respectively;
|
| 134 |
+
- Calculate training losses according to Eq. (19) and (20);
|
| 135 |
+
- Update network parameters by stochastic gradient descent;
|
| 136 |
+
- 10: end for
|
| 137 |
+
|
| 138 |
+
Finally, the balanced softmax function is employed for calculating the training loss on the network prediction of strongly augmented image $I_{strq}$ ,
|
| 139 |
+
|
| 140 |
+
<span id="page-4-2"></span>
|
| 141 |
+
$$L^{cls} = -\log\left(\frac{n_y \exp(o_{strg}^c[y])}{\sum_{i=1}^K n_i \exp(o_{strg}^c[i])}\right). \tag{20}$$
|
| 142 |
+
|
| 143 |
+
$n_i$ denotes the number of samples in the *i*-th class, and $o_{strq}^c[i]$ is the *i*-th element in $\mathbf{o}_{strq}^c$ . y represents the ground-truth label of the input image I. The practical training procedure is implemented with the minibatch size of B. One training epoch of the dual learning framework is summarized in Algorithm 1.
|
| 144 |
+
|
| 145 |
+
In training stage, the running variables(mean, variance and prior probability) of each Gaussian distribution are updated with temporal variables. During testing, the running variables are fixed, and only the calculation path of compound batch normalization is preserved while the path of split batch normalization is no longer used in the inference process of the model.
|
2212.09034/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2212.09034/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In the past decades, *Neural Networks* (NNs) have achieved great success in many areas. As a classic NN architecture, *Multi-Layer Perceptrons* (MLPs) [@rumelhart1986learning] stack multiple *Feed-Forward* (FF) layers with nonlinearity to universally approximate functions. Later, *graph neural networks* (GNNs) [@scarselli2008graph; @bruna2014spectral; @gilmer2017neural; @kipf2017semi; @velivckovic2017graph; @hamilton2017inductive; @klicpera2018predict; @wu2019simplifying] build themselves upon the MLP architecture, e.g., by inserting additional *Message Passing* (MP) operations amid FF layers [@kipf2017semi] to propagate features across instance nodes.
|
| 4 |
+
|
| 5 |
+
Two cornerstone concepts lying in the basis of deep learning research are model's *representation* and *generalization* power. While the former is concerned with what function class NNs can approximate and to what extent they can minimize the *empirical risk* , the latter instead focuses on the inductive bias in the learning procedure, asking how well the learned function can generalize to unseen in- and out-of distribution samples, reflected by the *generalization gap* . There exist a number of works trying to dissect GNNs' representational power [@xu2018powerful; @maron2019provably; @oono2019graph] and optimization properties [@xu2021optimization], while their generalizability and theoretical connections with MLP are far less well-understood.
|
| 6 |
+
|
| 7 |
+
In this work, we show that GNNs are intrinsically good generalizers that naturally encourage low generalization gap in node-level prediction, by probing into the key component that indeed contributes to the superiority of GNNs over MLP. In specific, we bridge these two types of NNs by introducing a new class of MLP variants, named as *Propagational MLPs* (PMLPs). During training, PMLPs are exactly the same as a standard MLP (e.g., same architecture, training data, initialization, loss function, and optimization algorithm), and thus have the same training dynamics as MLP (e.g., values of weights, empirical risk, and loss landscape). The only difference is that in the testing phase, PMLPs additionally insert non-parametric message passing layers between (corresp. GCN [@kipf2017semi]), before (corresp. SGC [@wu2019simplifying]) or after (corresp. APPNP [@klicpera2018predict]) FF layers, as shown in Fig. [1](#fig_frame){reference-type="ref" reference="fig_frame"}(a). We have the following *results and empirical contributions:*
|
| 8 |
+
|
| 9 |
+
**$\bullet$ An intriguing phenomenon revealing GNNs' inherent generalizability.** The performance of the proposed Propagational MLP (PMLP) in inductive node prediction tasks is remarkably close or even better than its GNN counterpart and significantly outperforms MLP despite being the same model (see example in Fig. [1](#fig_frame){reference-type="ref" reference="fig_frame"}(b)), which is verified on ten benchmark datasets including three large-scale ones. This indicates that the GNN architecture, naturally (i.e., irrelevant to training) encourages lower generalization gap (Fig. [1](#fig_frame){reference-type="ref" reference="fig_frame"}(c)), and is also the main source of performance gain from MLP to GNN, providing a new perspective for understanding GNNs and in broader sense enabling us to leverage well-established theoretical frameworks for MLPs to enrich those for GNNs.
|
| 10 |
+
|
| 11 |
+
**$\bullet$ Consistency across different experimental settings.**The observed phenomenon is consistent across different architecture choices (i.e., number of layers, size of hidden states), model instantiations (i.e., activation function, message passing implementation) and data characteristics (i.e., data split, graph sparsity, noisy edges). In some cases (i.e., removing self-loops and adding noisy edges), PMLP can even outperform its GNN counterpart by a large margin.
|
| 12 |
+
|
| 13 |
+
**$\bullet$ Practical value.** PMLPs, though originally motivated from theoretical aspects, they also possess practical significance such as better training efficiency ($6.82$ times more efficient than same-sized GNNs on large datasets), robustness to noisy edges, an ideal testbed for evaluating GNN architecture designs and serving as a new paradigm for model development.
|
| 14 |
+
|
| 15 |
+
<figure id="fig_frame" data-latex-placement="t!">
|
| 16 |
+
<embed src="fig/frame.pdf" style="width:90.0%" />
|
| 17 |
+
<figcaption><strong><em>(a) Model illustration</em></strong> for MLP, GNN (in GCN-style) and PMLP. <strong><em>(b) Learning curves</em></strong> for node classification on Cora that depicts a typical empirical phenomenon. <strong><em>(c) Intrinsic generalizability</em></strong> of GNN reflected by close generalization performance of GNN and PMLP. <strong><em>(d) Extrapolation illustration</em></strong>: both MLP and PMLP linearize outside the training data support (<span class="math inline">•</span>: train sample, <span class="math inline">∘</span>: test sample), while PMLP transits more smoothly and exhibits larger tolerance for OOD testing sample.</figcaption>
|
| 18 |
+
</figure>
|
| 19 |
+
|
| 20 |
+
As mentioned above, our empirical finding narrows down many difference factors between MLPs and GNNs to a key one that attributes to their performance gap, i.e., improvement in generalizability due to the change in network architecture. Then, a natural question arises: 'Why this is the case and what kind of inductive bias is injected when extending MLP to PMLP?'. In this work, we take an initial step towards answering this question with the *following insights and theoretical contributions:*
|
| 21 |
+
|
| 22 |
+
**$\circ$Comparing MLP, PMLP and GNN at infinite width limit.** We compare MLP, PMLP, and GNN in the *Neural Tangent Kernel* (NTK) regime [@jacot2018neural], where models are over-parameterized and gradient descent finds the global optima. From this perspective, the distinction of PMLP and MLP is rooted in the change of NTK *feature map* determined by model architecture while fixing their minimum RKHS-norm solutions (Proposition [1](#prop1){reference-type="ref" reference="prop1"}). We also derive the explicit formula for computing the feature map for PMLP/GNN by extending the definition of *Graph Neural Tangent Kernel* (GNTK) [@du2019graph] to the node regression setting (Lemma [2](#lemma3){reference-type="ref" reference="lemma3"}).
|
| 23 |
+
|
| 24 |
+
**$\circ$PMLPs are more likely to generalize OOD nodes.** We consider an important (yet overlooked) aspect of generalization analysis, i.e., *extrapolation* behavior for Out-of-Distribution (OOD) testing samples [@xu2021neural], and reveal that alike MLP, PMLP eventually converges to a linear function as the testing sample increasingly departs from the training data support (Theorem [4](#thm4){reference-type="ref" reference="thm4"}). Nevertheless, its convergence rate is smaller than that of MLP by a factor related to node degrees and features' cosine similarity (Theorem [5](#thm5){reference-type="ref" reference="thm5"}). That is, PMLP is more tolerant to OOD samples and thus has larger potential to generalize near the training data support, which is illustrated in in Fig. [1](#fig_frame){reference-type="ref" reference="fig_frame"}(d).
|
| 25 |
+
|
| 26 |
+
# Method
|
| 27 |
+
|
| 28 |
+
[]{#sec_formulate label="sec_formulate"}
|
| 29 |
+
|
| 30 |
+
Assume a graph dataset $\mathcal{G}=(\mathcal{V}, \mathcal{E})$ where the node set $\mathcal{V}$ contains $n$ nodes instances $\{(\boldsymbol x_u, y_u)\}_{u\in \mathcal V}$, where $\boldsymbol x_u\in \mathbb R^{d}$ denotes node features and $y_u$ is the label. Without loss of generality, $y_u$ can be a categorical variable or a continuous one depending on specific prediction tasks (classification or regression). Instance relations are described by the edge set $\mathcal{E}$ and an associated adjacency matrix $\mathbf{A} \in\{0,1\}^{n \times n}$. In general, the problem is to learn a predictor model with $\hat y = f(\boldsymbol x;\mathbf \theta, \mathcal G^{k}_{\boldsymbol x})$ for node-level prediction, where $\mathcal G^{k}_{\boldsymbol x}$ denotes the $k$-hop ego-graph around $\boldsymbol x$ over $\mathcal G$.
|
| 31 |
+
|
| 32 |
+
**Graph Neural Networks and Multi-Layer Perceptrons.** The core design of GNNs lies in the message passing paradigm that recursively aggregates layer-wise neighbored information to encode structural features into node-level representations for downstream prediction. To probe into the connection between mainstream GNNs and MLP from the architectural view, we re-write the GNN formulation in a general form that explicitly disentangles each layer into two operations, namely a *Message-Passing* (MP) operation and then a *Feed-Forwarding* (FF) operation: $$\begin{equation}
|
| 33 |
+
\label{eqn-gnn-mpff}
|
| 34 |
+
\begin{split}
|
| 35 |
+
\mbox{(MP):\quad} \tilde{\mathbf{h}}^{(l-1)}_{u}= \sum\mathop{}_{v\in\mathcal N_{u}\cup\{u\}} a_{G}(u,v) \cdot {\mathbf{h}}_{u}^{(l-1)},\quad
|
| 36 |
+
\mbox{(FF):\quad} {\mathbf{h}}_{u}^{(l)} = \psi^{(l)}\left(\tilde{\mathbf{h}}^{(l-1)}_{u} \right),
|
| 37 |
+
\end{split}
|
| 38 |
+
\end{equation}$$ where $\mathcal N_u$ is the set of neighbored nodes centered at $u$, $a_{\mathcal G}(u,v)$ is the affinity function dependent on graph structure $\mathcal G$, $\psi^{(l)}$ denotes a feature transformation mapping at the $l$-th layer, and $\mathbf h^{(0)}_u = \boldsymbol x_u$ is the initial node feature. For example, in Graph Convolution Network (GCN) [@kipf2017semi], $a_{\mathcal G}(u,v) = {\mathbf A_{uv}}/{\sqrt{\tilde d_u\tilde d_v}}$, where $\tilde d_u$ denotes the degree of node $u$ (with self-loop), and $\psi^{(l)}$ is a fully-connected layer with non-linearity. For an $L$-layer GNN, the prediction is given by $\hat y_u = \psi^{(L)}(\mathbf h_u^{(L-1)})$, where $\psi^{(L)}$ is often set as linear transformation for regression tasks or with Softmax for classification tasks. Note that GNN models in forms of Eq. [\[eqn-gnn-mpff\]](#eqn-gnn-mpff){reference-type="ref" reference="eqn-gnn-mpff"} degrade to an MLP with a series of FF layers after removing all the MP operations: $$\begin{equation}
|
| 39 |
+
\hat y_u = \psi^{(L)}(\cdots (\psi^{(1)}(\mathbf x_u)) = \psi(\mathbf x_u).
|
| 40 |
+
\end{equation}$$
|
| 41 |
+
|
| 42 |
+
**Typical Types of GNN Architectures.** Besides GCN, many other mainstream GNN models can be written as the architectural form defined by Eq. [\[eqn-gnn-mpff\]](#eqn-gnn-mpff){reference-type="ref" reference="eqn-gnn-mpff"} whose layer-wise updating rule involves MP and FF operations, e.g., GAT [@velivckovic2017graph] and GraphSAINT [@zeng2019graphsaint]. Furthermore, there are also other types of GNN architecture represented by SGC [@wu2019simplifying] and APPNP [@klicpera2018predict] where the former adopts multiple MP operations on the initial node features, and the later stacks a series of MP operations at the end of FF layers. These two classes of GNNs are also widely explored and studied. For example, SIGN [@rossi2020sign], $\mbox{S}^2$GC [@zhu2020simple] and GBP [@chen2020scalable] follow the SGC-style, and DAGNN [@liu2020towards], AP-GCN [@spinelli2020adaptive] and GPR-GNN [@chien2020adaptive] follow the APPNP-style.
|
| 43 |
+
|
| 44 |
+
**Bridging GNNs and MLPs: Propagational MLP.** After we decouple the MP and FF operations from GNNs' layer-wise updating, we notice that the unique and critical difference of GNNs and MLP lies in whether to adopt MP (somewhere between the input node features and output prediction). To connect two families, we introduce a new model class, dubbed as *Propagational MLP* (PMLP), which has exactly the same architecture as conventional MLP, namely, the same feed-forwarding network. During the inference/testing stage, PMLP$_{GCN}$ incorporates a message passing layer into each layer's feed-forwarding, PMLP$_{SGC}$ adds multiple MP layers in the first layer, and PMLP$_{APP}$ adds them in the last layer. For clear head-to-head comparison, Table [\[tab-model\]](#tab-model){reference-type="ref" reference="tab-model"} summarizes these models with the models adopted in training and testing stages.
|
| 45 |
+
|
| 46 |
+
In this section, we conduct extensive experiments on a variety of node-level prediction benchmarks. **Section [3.1](#sec-exp-result){reference-type="ref" reference="sec-exp-result"}** shows that the proposed PMLPs can significantly outperform the original MLP though they share the same weights, and approach or even exceed their GNN counterparts. **Section [3.1](#sec-exp-result){reference-type="ref" reference="sec-exp-result"}** shows this phenomenon holds across different experimental settings. Due to space limit, we present the key experimental results in the main text and defer extra results and discussions to Appendix [9](#app_detail){reference-type="ref" reference="app_detail"} and [11](#app_add){reference-type="ref" reference="app_add"}.
|
| 47 |
+
|
| 48 |
+
**Datasets.** We use ten widely adopted node classification benchmarks involving different types of networks: three citations networks (Cora, Citeseer and Pubmed), two product co-occurrency networks (Amazon-Computer and Amazon-Photo), two coauthor-ship networks (Coauthor-CS, Coauthor-Computer), and three large-scale networks (OGBN-Arxiv, OGBN-Products and Flickr). More detailed descriptions for dataset statistics and splits are deferred to Appendix [9](#app_detail){reference-type="ref" reference="app_detail"}.
|
| 49 |
+
|
| 50 |
+
**Competitors.** For fair comparison, we set the layer number and hidden size to the same values for GNN, PMLP and MLP in the same dataset, namely all models can be viewed as sharing the same 'backbone'. Furthermore, for variants PMLP$_{SGC}$ and PMLP$_{APP}$, we consider comparison with SGC and APPNP, respectively, and slightly modify their original architectures: for SGC, we use a multi-layer neural network instead of one-layer after linear message passing, and for APPNP, we remove the residual connection (i.e., $\alpha=0$) such that the message passing scheme is aligned with other models. We basically use GCN convolution for MP layer and ReLU activation for FF layer for all models unless otherwise stated. PMLPs use the MLP architecture for validation. Detailed descriptions on hyperparameter settings are deferred to Appendix [9](#app_detail){reference-type="ref" reference="app_detail"}.
|
| 51 |
+
|
| 52 |
+
**Protocol.** We adopt inductive learning setting as the evaluation protocol, which is a commonly used benchmark setting by the community and close to the real scenarios where testing nodes are from new observations and unknown during training. More specifically, for node set $\mathcal V = \mathcal V_{tr} \cup \mathcal V_{te}$ where $\mathcal V_{tr}$ (resp. $\mathcal V_{te}$) denotes the set of training (resp. testing) nodes, the training process is only exposed to $\mathcal G_{tr} = \{\mathcal V_{tr}, \mathcal E_{tr}\}$, where $\mathcal E_{tr}\subset \mathcal V_{tr} \times \mathcal V_{tr}$ only contains edges for nodes in $\mathcal V_{tr}$ and the trained model is tested with the whole graph $\mathcal G$ for prediction on $\mathcal V_{te}$. Another important reason why we adopt the inductive learning setting is to guarantee a fair comparison between PMLP and its GNN counterpart as the information of validation/testing nodes are not used for training MLP/PMLP.
|
| 53 |
+
|
| 54 |
+
[]{#main_result label="main_result"}
|
| 55 |
+
|
| 56 |
+
[]{#tab_large label="tab_large"}
|
| 57 |
+
|
| 58 |
+
**How do PMLPs perform compared with GNNs and MLP on common benchmarks?** The main results for comparing the testing accuracy of MLP, PMLPs and GNNs on seven benchmark datasets are shown in Table. [\[main_result\]](#main_result){reference-type="ref" reference="main_result"}. We found that, intriguingly, three variants of PMLPs consistently outperform MLP by a large margin on all the datasets despite using the same model with the same set of trainable parameters (furthermore, even the same parameter weights are shared by the trained MLP and PMLPs since they adopt the same implementation during training). Moreover, PMLPs are as effective as their GNN counterparts and can even exceed GNNs in some cases. These results suggest two implications. First, the performance improvement brought by GNNs (or more specifically, the MP operation) over MLP may not purely stem from the more advanced representational power, but the generalization ability. Second, the message passing can indeed contribute to better generalization ability of MLP, though it currently remains unclear how it helps MLP to generalize on unseen testing data. We will later try to shed some light on this in-depth question via theoretical analysis in Section [4](#sec_theory){reference-type="ref" reference="sec_theory"}.
|
| 59 |
+
|
| 60 |
+
**How do PMLPs perform on larger datasets?** We next apply PMLPs to larger graphs which can be harder for extracting informative features from observed data. The results on three large-scale datasets are shown in Table [\[large_result\]](#large_result){reference-type="ref" reference="large_result"} where PMLP still considerably outperforms MLP. Yet differently, there is a certain gap between PMLP and GNN. These results are also valuable, and we conjecture that this is because in such large graphs the relations between inputs and target labels can be more complex, which requires more expressive architectures for learning desired node-level representations. This hypothesis is further validated by the results of training losses in Table [\[large_result\]](#large_result){reference-type="ref" reference="large_result"}, which indeed shows that GNNs can yield lower fitting error on the training data, presumably contributing to better accuracy on testing data. Critically though, these results also imply that the MP design works for learning on graph data due to two-fold aspects: better representational power as well as better generalization.
|
| 61 |
+
|
| 62 |
+
We next conduct more experiments and comparison as further investigation for verifying the consistency of the observed phenomenon across different settings regarding model implementation and graph property. We also try to reveal how PMLPs work for representation learning through a close look at the produced embeddings by different models, and the results are deferred to Appendix [11](#app_add){reference-type="ref" reference="app_add"}.
|
| 63 |
+
|
| 64 |
+
<figure id="fig_hyper" data-latex-placement="t!">
|
| 65 |
+
|
| 66 |
+
<figcaption>Performance variation with increasing layer number and size of hidden states. (See complete results in Appendix <a href="#app_add" data-reference-type="ref" data-reference="app_add">11</a>).</figcaption>
|
| 67 |
+
</figure>
|
| 68 |
+
|
| 69 |
+
<figure id="fig_model" data-latex-placement="t!">
|
| 70 |
+
|
| 71 |
+
<figcaption>Performance variation with different activation functions in FF layer and different transition matrices in MP layer. (See complete results in Appendix <a href="#app_add" data-reference-type="ref" data-reference="app_add">11</a>)</figcaption>
|
| 72 |
+
</figure>
|
| 73 |
+
|
| 74 |
+
<figure id="fig_info" data-latex-placement="t!">
|
| 75 |
+
<embed src="fig/split_coarsen_noise_gcn.pdf" style="width:90.0%" />
|
| 76 |
+
<figcaption>Impact of graph structural information by changing data split, sparsifying the graph, adding random structural noise on Cora. (See complete results in Appendix <a href="#app_add" data-reference-type="ref" data-reference="app_add">11</a>)</figcaption>
|
| 77 |
+
</figure>
|
| 78 |
+
|
| 79 |
+
**Q1: What is the impact of model layers and hidden sizes?** In Fig. [2](#fig_hyper){reference-type="ref" reference="fig_hyper"}(a) and (b), we plot the testing accuracy of GCN, PMLP$_{GCN}$ and MLP w.r.t. different layer numbers and hidden sizes. The results show that the observed phenomenon in Section [3.1](#sec-exp-result){reference-type="ref" reference="sec-exp-result"} consistently holds with different settings of model depth and width, which suggests that the generalization effect of the MP operation is insensitive to model architectural hyperparameters. The increase of layer numbers cause performance degradation for all three models, presumably because of over-fitting.
|
| 80 |
+
|
| 81 |
+
**Q2: What is the impact of different activation functions in FF layer?** As shown in Fig. [3](#fig_model){reference-type="ref" reference="fig_model"}(a), the relative performance rankings among GCN, PMLP and MLP stay consistent across four different activation functions (tanh, cos, ELU, ReLU) and in particular, in some cases the performance gain of PMLP over GCN is further amplified (e.g., with cos activation).
|
| 82 |
+
|
| 83 |
+
**Q3: What is the impact of different propagation schemes in MP functions?** We replace the original symmetric normalized transition matrix ${\mathbf P}_{\text{sym}} = \tilde{\mathbf D}^{-\frac{1}{2}}\tilde{\mathbf A}\tilde{\mathbf D}^{-\frac{1}{2}}$ used in the MP layer by other commonly used transition matrices: 1) ${\mathbf P}_{\text{no-loop}} = {\mathbf D}^{-\frac{1}{2}}{\mathbf A}{\mathbf D}^{-\frac{1}{2}}$, i.e., removing self-loop; 2) ${\mathbf P}_{\text{rw}} = \tilde{\mathbf D}^{-1}\tilde{\mathbf A}$, i.e., random walk matrix; 3) ${\mathbf P}_{\text{diff}} = \sum_{k=0}^{\infty}\frac{1}{e\cdot k!}(\tilde{\mathbf D}^{-1}\tilde{\mathbf A})^k$, i.e., heat kernel diffusion matrix. The results are presented in Fig. [3](#fig_model){reference-type="ref" reference="fig_model"}(b) where we found that the relative performance rankings of three models keep nearly unchanged when replacing the original MP with random walk and diffusion matrices. And, intriguingly, the performance of GNNs degrade dramatically after removing the self-loop, while the accuracy of PMLPs stays at almost the same level. The possible reason is that the self-loop connection plays an important role in GCN's training stage for preserving enough centered nodes' information, but does not affect PMLP.
|
| 84 |
+
|
| 85 |
+
**Q4: What is the impact of training proportion?** We show in Fig. [4](#fig_info){reference-type="ref" reference="fig_info"}(left) that the labeled portion for training data has negligible impacts on the relative performance of three models, indicating the phenomenon is consistent regardless of the amount of graph structure information used in training.
|
| 86 |
+
|
| 87 |
+
**Q5: What is the impact of graph sparsity?** As suggested by Fig. [4](#fig_info){reference-type="ref" reference="fig_info"}(middle), when the graph goes sparser, the absolute performance of GCN and PMLP degrades yet their performance gap remains unchanged. This shows that the quality of input graphs indeed impacts the testing performance and tends to control the performance upper bound of the models. Critically though, the generalization effect brought by PMLP is insensitive to the graph completeness.
|
| 88 |
+
|
| 89 |
+
**Q6: What is the impact of noisy structure?** As shown in Fig. [4](#fig_info){reference-type="ref" reference="fig_info"}(right), the performances of both PMLP and GCN tend to decrease as we gradually add random connections to the graph, whose amount is controlled by the noise ratio (defined as $\mbox{\# noisy edges} / |\mathcal E|$), while PMLP shows better robustness to such noise presumably because, again, noisy structures could have negative effects on GNNs in training but not PMLP.
|
| 90 |
+
|
| 91 |
+
Towards theoretically answering why "GNNs are inherently good generalizers\" and explaining the superior generalization performance of PMLP and GNN, we next compare MLP, PMLP and GNN from the *Neural Tangent Kernel* (NTK) perspective, and examine their extrapolation behaviors.
|
| 92 |
+
|
| 93 |
+
**Linearization of Neural Networks.** For a neural network $f(\boldsymbol x; \boldsymbol{\theta}): \mathcal X \rightarrow \mathbb R$ with initial parameters $\boldsymbol{\theta}_0$ and a fixed input sample $\boldsymbol x$, performing first-order Taylor expansion around $\boldsymbol{\theta}_0$ yields the linearized form of NNs [@lee2019wide] as follows: $$\begin{equation}
|
| 94 |
+
\label{eq_linearization}
|
| 95 |
+
{f}^{lin}(\boldsymbol x; \boldsymbol{\theta})=f(\boldsymbol x; \boldsymbol{\theta}_0)+\nabla_{\boldsymbol{\theta}} f(\boldsymbol x; \boldsymbol{\theta}_0)^\top\left(\boldsymbol{\theta}-\boldsymbol{\theta}_0\right),
|
| 96 |
+
\end{equation}$$ where the gradient $\nabla_{\boldsymbol{\theta}} f(\boldsymbol x; \boldsymbol{\theta}_0)$ could be thought of as a feature map $\phi(\boldsymbol x): \mathcal X \rightarrow \mathbb R^{|\boldsymbol{\theta}|}$, depending on the specific initialization. As such, when $\boldsymbol{\theta}_0$ is initialized by Gaussian distribution with certain scaling and the network width tends to infinity (i.e., $m \rightarrow \infty$, where $m$ denotes the layer width), the feature map becomes constant and is determined by the model architecture (e.g., MLP, GNN and CNN), inducing a kernel called the *neural tangent kernel* (NTK) [@jacot2018neural]: $$\begin{equation}
|
| 97 |
+
\operatorname{NTK}(\boldsymbol x_i, \boldsymbol x_j) = \phi_{ntk}(\boldsymbol x_i)^\top \phi_{ntk}(\boldsymbol x_j) = \left\langle \nabla_{\boldsymbol{\theta}} f(\boldsymbol x_i; \boldsymbol{\theta}), \nabla_{\boldsymbol{\theta}} f(\boldsymbol x_j; \boldsymbol{\theta}) \right\rangle
|
| 98 |
+
\end{equation}$$
|
| 99 |
+
|
| 100 |
+
**Kernel Regression with NTK.** Recent works [@liu2020linearity; @liu2020toward] show that the spectral norm of Hessian matrix in the Taylor series tends to zero with increasing width by $\Theta(1/\sqrt{m})$, and hence the linearization becomes almost exact. Therefore, training an over-parameterized NN using gradient descent with infinitesimal step size is equivalent to kernel regression with NTK [@arora2019exact]: $$\begin{equation}
|
| 101 |
+
f(\boldsymbol x; \boldsymbol w) = \boldsymbol w^\top \phi_{ntk}(\boldsymbol x), \quad \mathcal{L}(\boldsymbol w)=\frac{1}{2} \sum_{i=1}^n\left(y_i-\boldsymbol w^\top \phi_{ntk}(\boldsymbol x_i )\right)^2.
|
| 102 |
+
\end{equation}$$
|
| 103 |
+
|
| 104 |
+
We next show the equivalence of MLP and PMLP in training at infinite width limit (NTK regime).
|
| 105 |
+
|
| 106 |
+
::: {#prop1 .proposition}
|
| 107 |
+
**Proposition 1**. *MLP and its corresponding PMLP have the same minimum RKHS-norm NTK kernel regression solution, but differ from that of GNNs, i.e., $\boldsymbol w^*_{mlp} = \boldsymbol w^*_{pmlp} \neq \boldsymbol w^*_{gnn}$.*
|
| 108 |
+
:::
|
| 109 |
+
|
| 110 |
+
**Implications**. From the NTK perspective, stacking additional message passing layers in the testing phase implies transforming the fixed feature map from that of MLP $\phi_{mlp}(\boldsymbol x)$ to that of GNN $\phi_{gnn}(\boldsymbol x)$, while fixing $\boldsymbol w$. Given $\boldsymbol w^*_{mlp} = \boldsymbol w^*_{pmlp}$, the superior generalization performance of PMLP (i.e., the key factor of performance surge from MLP to GNN) can be explained by such transformation of feature map to $\phi_{gnn}(\boldsymbol x)$ in testing: $$\begin{equation}
|
| 111 |
+
\begin{split}
|
| 112 |
+
f_{mlp}(\boldsymbol x) = \boldsymbol w^{*^\top}_{mlp} \;\phi_{mlp}(\boldsymbol x), \quad f_{pmlp}(\boldsymbol x) = \boldsymbol w^{*^\top}_{mlp} \;\phi_{gnn}(\boldsymbol x), \quad f_{gnn}(\boldsymbol x) = \boldsymbol w^{*^\top}_{gnn} \;\phi_{gnn}(\boldsymbol x).
|
| 113 |
+
\end{split}
|
| 114 |
+
\end{equation}$$ This perspective significantly simplifies the subsequent theoretical analysis and might inspire more works in this direction. In the following, we first derive the NTK feature map for PMLP and GNN in node regression, and will use the result for analysing the extrapolation behavior of PMLP.
|
| 115 |
+
|
| 116 |
+
**Graph NTK in Node-Level Regression.** Following existing work that analyse shallow and wide NNs [@arora2019fine; @chizat2020implicit; @xu2021neural], we focus on a two-layer GNN using average aggregation with self-connection. By extending the original definition of graph neural tangent kernel (GNTK) [@du2019graph] from graph-level regression to node-level regression as specified in Appendix. [7](#app_lemma3){reference-type="ref" reference="app_lemma3"}, we have the following explicit form of GNTK feature map for a two-layer GNN denoted as $\phi_{gnn}(\boldsymbol x)$.
|
| 117 |
+
|
| 118 |
+
::: {#lemma3 .lemma}
|
| 119 |
+
**Lemma 2**. *The explicit form of GNTK feature map for a two-layer GNN $\phi_{gnn}(x)$ with average aggregation and ReLU activation in node regression is $$\begin{equation}
|
| 120 |
+
\phi_{gnn}(\boldsymbol x_i) = c \sum_{j \in \mathcal N_i \cup\{i\}}\left[\mathbf{X}^{\top} \boldsymbol{a}_j \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_j \right), \boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_j \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_j \right), \ldots\right],
|
| 121 |
+
\end{equation}$$ where $c=O(\tilde{d}^{-1})$ is a constant proportional to the inverse of node degree, $\mathbf X\in \mathbb R^{n\times d}$ is node features, $\boldsymbol a_i\in \mathbb R^{n}$ denotes adjacency vector of node $i$, $\boldsymbol{w}^{(k)} \sim \mathcal{N}(\mathbf{0}, \boldsymbol{I}_d)$ is a random Gaussian vector in $\mathbb R^d$, two components in the brackets repeat infinitely many times with $k$ ranging from $1$ to $\infty$, and $\mathbb{I}_{+}$ is an indicator function that outputs $1$ if the input is positive otherwise $0$.*
|
| 122 |
+
:::
|
| 123 |
+
|
| 124 |
+
As indicated by Proposition [1](#prop1){reference-type="ref" reference="prop1"}, the fundamental difference between MLP and PMLP at infinite width limit stems from the difference of feature map in the testing phase. This reduces the problem of explaining the success of GNN to the question that why this change is significant for generalizability.
|
| 125 |
+
|
| 126 |
+
**Extrapolation Behavior of MLP**. One important aspect of generalization analysis is regarding model's behavior when confronted with OOD testing samples (i.e., testing nodes that are considerably outside the training support), a.k.a. *extrapolation analysis*. A previous study on this direction [@xu2021neural] reveal that a standard MLP with ReLU activation quickly converges to a linear function as the testing sample escapes the training support, which is formalized by the following theorem.
|
| 127 |
+
|
| 128 |
+
::: {#thm_hownn .theorem}
|
| 129 |
+
**Theorem 3**. ***[@xu2021neural]**. Suppose $f_{mlp}(\boldsymbol x)$ is an infinitely-wide two-layer MLP with ReLU trained by square loss. For any direction $\boldsymbol{v} \in \mathbb{R}^d$ and step size $\Delta t>0$, let $\boldsymbol{x}_0 = t \boldsymbol{v}$, we have $$\begin{equation}
|
| 130 |
+
\label{eq_thm3}
|
| 131 |
+
\left|\frac{\left(f_{mlp}(\boldsymbol{x}_0+\Delta t \boldsymbol{v})-f_{mlp}(\boldsymbol{x}_0)\right)/\Delta t}{c_{\boldsymbol{v}}} - 1\right|= O(\frac{1}{t}).
|
| 132 |
+
\end{equation}$$ where $c_{\boldsymbol{v}}$ is a constant linear coefficient. That is, as $t\rightarrow \infty$, $f_{mlp}(\boldsymbol x_0)$ converges to a linear function.*
|
| 133 |
+
:::
|
| 134 |
+
|
| 135 |
+
The intuition behind this phenomenon is the fact that ReLU MLPs learn piece-wise linear functions with finitely many linear regions and thus eventually becomes linear outside training data support. Now a naturally arising question is how does PMLP compare with MLP regarding extrapolation?
|
| 136 |
+
|
| 137 |
+
**Extrapolation Behavior of PMLP**. Based on the explicit formula for $\phi_{gnn}(\boldsymbol x)$ in Lemma [2](#lemma3){reference-type="ref" reference="lemma3"}, we extend the theoretical result of extrapolation analysis from MLP to PMLP. Our first finding is that, as the testing node feature becomes increasingly outside the range of training data, alike MLP, PMLP (as well as GNN) with average aggregation also converges to a linear function, yet the corresponding linear coefficient reflects its ego-graph property rather than being a fixed constant.
|
| 138 |
+
|
| 139 |
+
::: {#thm4 .theorem}
|
| 140 |
+
**Theorem 4**. *Suppose $f_{pmlp}(\boldsymbol x)$ is an infinitely-wide two-layer MLP with ReLU activation trained using squared loss, and adds average message passing layer before each feed-forward layer in the testing phase. For any direction $\boldsymbol{v} \in \mathbb{R}^d$ and step size $\Delta t>0$, let $\boldsymbol{x}_0 = t \boldsymbol{v}$, and as $t\rightarrow \infty$, we have $$\begin{equation}
|
| 141 |
+
\left(f_{pmlp}(\boldsymbol{x}_0+\Delta t \boldsymbol{v})-f_{pmlp}(\boldsymbol{x}_0)\right)/\Delta t \quad \rightarrow \quad c_{\boldsymbol v}\sum_{i\in \mathcal N_{0} \cup \{0\}} ({\tilde d}\cdot\tilde{d}_i)^{-1}.
|
| 142 |
+
\end{equation}$$ where $c_{\boldsymbol{v}}$ is the same constant as in Theorem [3](#thm_hownn){reference-type="ref" reference="thm_hownn"}, $\tilde d_0 = \tilde d$ is the node degree (with self-connection) of $\boldsymbol x_0$, and $\tilde d_i$ is the node degree of its neighbors.*
|
| 143 |
+
:::
|
| 144 |
+
|
| 145 |
+
**Remark**. This result also applies to infinitely-wide two-layer GNN with ReLU activation and average aggregation in node regression settings, except the constant $c_{\boldsymbol v}$ is different from that of MLP. The theoretical results presented here and in the following are applicable to both inductive and transductive setting since the variable $\boldsymbol w^{*}_{gnn}$ is not of interest in the analysis.
|
| 146 |
+
|
| 147 |
+
**Convergence Comparison**. Though both MLP and PMLP tend to linearize for outlier testing samples, indicating that they have common difficulty to extrapolate non-linear functions, we find that PMLP in general has more freedom to deviate from the convergent linear coefficient, implying smoother transition from in-distribution (non-linear) to out-of-distribution (linear) regime and thus could potentially generalize to out-of-distribution samples near the range of training data.
|
| 148 |
+
|
| 149 |
+
::: {#thm5 .theorem}
|
| 150 |
+
**Theorem 5**. *Suppose all node features are normalized, and the cosine similarity of node $\boldsymbol x_i$ and the average of its neighbors is deonoted as $\alpha_i \in [0,1]$. Then, the convergence rate for $f_{pmlp}(\boldsymbol x)$ is $$\begin{equation}
|
| 151 |
+
\label{eq_thm5}
|
| 152 |
+
\left|\frac{\left(f_{pmlp}(\boldsymbol{x}_0+\Delta t \boldsymbol{v})-f_{pmlp}(\boldsymbol{x}_0)\right)/\Delta t}{c_{\boldsymbol v}\sum_{i\in \mathcal N_{0} \cup \{0\}} ({\tilde d}\cdot\tilde{d}_i)^{-1}} - 1\right| = O\left(\frac{1+(\tilde{d}_{max} -1) \sqrt{1-\alpha_{min}^2}}{ t}\right).
|
| 153 |
+
\end{equation}$$ where $\alpha_{min} = \operatorname{min}\{\alpha_i\}_{i\in \mathcal N_0 \cup \{0\}} \in [0,1]$, and $\tilde{d}_{max}\geq 1$ denotes the maximum node degree in the testing node $\boldsymbol x_0$'s neighbors (including itself).*
|
| 154 |
+
:::
|
| 155 |
+
|
| 156 |
+
This result indicates larger node degree and feature dissimilarity imply smoother transition and better compatibility with OOD samples. Specifically, when the testing node's degree is $1$ (connection to itself), PMLP becomes equivalent to MLP. As reflection in Eq. [\[eq_thm5\]](#eq_thm5){reference-type="ref" reference="eq_thm5"}, $\tilde{d}_{max} = 1$, and the bound degrades to that of MLP in Eq. [\[eq_thm3\]](#eq_thm3){reference-type="ref" reference="eq_thm3"}. Moreover, when all node features are equal, message passing will become meaningless. Correspondingly, $\tilde{\alpha}_{min} = 1$, and the bound also degrades to that of MLP.
|
| 157 |
+
|
| 158 |
+
**Other sources of performance gap between MLP and GNN / When PMLP fails?** Besides the intrinsic generalizability of GNNs that is revealed by the performance gain from MLP to PMLP in this work, we note that there are some other less significant but non-negligible sources that attributes to the performance gap between GNN and MLP in node prediction tasks:
|
| 159 |
+
|
| 160 |
+
$\bullet$ **Expressiveness:** While our experiments find that GNNs and PMLPs can perform similarly in most cases, showing great advantage over MLPs in generalization, in practice, there still exists a certain gap between their expressiveness, which can be amplified in large datasets and cause certain degrees of performance difference. This is reflected by our experiments on three large-scale datasets. Despite that, we can see from Table [\[tab_large\]](#tab_large){reference-type="ref" reference="tab_large"} that the intrinsic generalizability of GNN (corresponding to $\Delta_{mlp}$) is still the major source of performance gain from MLP, and requires further in-depth investigation.
|
| 161 |
+
|
| 162 |
+
$\bullet$ **Semi-Supervised / Transductive Learning:** As our default experimental setting, inductive learning ensures that testing samples are unobserved during training and keeps the comparison among models fair and reasonable. However in practice, the ability to leverage the information of unlabeled nodes in training is a well-known advantage of GNN (but not the advantage of PMLP). Indeed, we also empirically find that PMLP, combined with some classic semi-supervised learning methods such as manifold regularization [@belkin2006manifold] and label propagation [@zhu2003semi] can again approach GNN in semi-supervised learning setting. Therefore, it would also be valuable to theoretically study the efficacy of GNN in terms of how it benefits from unlabeled nodes in training in comparison with other semi-supervised learning methods.
|
| 163 |
+
|
| 164 |
+
**Current Limitations and Outlooks.** From modeling perspective, the current design of PMLP does not take into account parametrized MP layers such as the one used in GAT [@velivckovic2017graph], but could be further extended, e.g., by training parametrized MP layers as independent modules, which will be left as future work. For theoretical analysis, the result in Theorem [5](#thm5){reference-type="ref" reference="thm5"} provides a bound to show PMLP's better potential in OOD generalization, rather than guaranteeing its superior generalization capability. More theoretical works are expected to be done to answer when and why PMLPs perform closely to their GNN counterparts. Moreover, following most theoretical works on NTK, we consider the regression task with squared loss for analysis instead of classification. However, as evidences [@janocha2017loss; @hui2020evaluation] show squared loss can be as competitive as softmax cross-entropy loss, the insights obtained from regression tasks could also adapt to classification tasks.
|
| 165 |
+
|
| 166 |
+
**Conclusion.** In this work, we bridge MLP and GNN by introducing an intermediate model class called PMLP, which is equivalent to MLP in training, but shows significantly better generalization performance after adding non-parametric message passing layers in testing and can rival with its GNN counterpart in most cases. This phenomenon is consistent across different datasets and experimental settings. To shed some lights on this phenomenon, we show despite that both MLP and PMLP cannot extrapolate non-linear functions, PMLP converges slower, indicating smoother transition and better tolerance for out-of-distribution samples.
|
| 167 |
+
|
| 168 |
+
To analyse the extrapolation behavior of PMLP and compare it with MLP, As mentioned in the main text, training an infinitely wide neural network using gradient descent with infinitesimal step size is equivalent to solving kernel regression with the so-called NTK by minimizing the following squared loss function: $$\begin{equation}
|
| 169 |
+
\label{eq_kernelreg}
|
| 170 |
+
f(\boldsymbol x; \boldsymbol w) = \boldsymbol w^\top \phi_{ntk}(\boldsymbol x), \quad \mathcal{L}(\boldsymbol w)=\frac{1}{2} \sum_{i=1}^n\left(y_i-\boldsymbol w^\top \phi_{ntk}(\boldsymbol x_i )\right)^2.
|
| 171 |
+
\end{equation}$$ Let us now consider an arbitrary minimizer $\boldsymbol w^* \in \mathcal H$ in NTK's reproducing kernel Hilbert space. The minimizer could be further decomposed as $\boldsymbol w^* = \hat{\boldsymbol w}^* + \boldsymbol w_{\bot}$, where $\hat{\boldsymbol w}^*$ lies in the linear span of feature mappings for training data and $\boldsymbol w_{\bot}$ is orthogonal to $\hat{\boldsymbol w}^*$, i.e., $$\begin{equation}
|
| 172 |
+
\hat{\boldsymbol w}^* = \sum_{i=1}^{n} \lambda_i \cdot \phi_{ntk}(\boldsymbol x_i), \quad \langle \hat{\boldsymbol w}^*, \boldsymbol w_{\bot} \rangle_{\mathcal H} = 0.
|
| 173 |
+
\end{equation}$$ One observation is that the loss function is unchanged after removing the orthogonal component $\boldsymbol w_{\bot}$: $$\begin{equation}
|
| 174 |
+
\begin{split}
|
| 175 |
+
\mathcal{L}(\boldsymbol w^*) &= \frac{1}{2} \sum_{i=1}^n\left(y_i-\langle\sum_{i=1}^{n} \lambda_i \cdot \phi_{ntk}(\boldsymbol x_i) + \boldsymbol w_{\bot}, \phi_{ntk}(\boldsymbol x_i )\rangle_{\mathcal H}\right)^2\\
|
| 176 |
+
&= \frac{1}{2} \sum_{i=1}^n\left(y_i-\langle\sum_{i=1}^{n} \lambda_i \cdot \phi_{ntk}(\boldsymbol x_i), \phi_{ntk}(\boldsymbol x_i )\rangle_{\mathcal H}\right)^2 = \mathcal{L}(\hat{\boldsymbol w}^*).
|
| 177 |
+
\end{split}
|
| 178 |
+
\end{equation}$$ This indicates that $\hat{\boldsymbol w}^*$ is also a minimizer whose $\mathcal H$-norm is smaller than that of $\boldsymbol w^*$, i.e., $\|\hat{\boldsymbol w}^*\|_{\mathcal H} \leq \|{\boldsymbol w}^*\|_{\mathcal H}$. It follows that the minimum $\mathcal H$-norm solution for Eq. [\[eq_kernelreg\]](#eq_kernelreg){reference-type="ref" reference="eq_kernelreg"} can be expressed as a linear combination of feature mappings for training data. Therefore, solving Eq. [\[eq_kernelreg\]](#eq_kernelreg){reference-type="ref" reference="eq_kernelreg"} boils down to solving a linear system with coefficients $\boldsymbol{\lambda} = [\lambda_i]_{i=1}^n$. Resultingly, the minimum $\mathcal H$-norm solution is $$\begin{equation}
|
| 179 |
+
\boldsymbol w^*=\sum_{i=1}^n\left[y_i {\mathbf K}^{-1}\right]_i \phi_{ntk}(\boldsymbol x_i),
|
| 180 |
+
\end{equation}$$ where $\mathbf K \in \mathbb R^{n\times n}$ is the kernel matrix for training data. We see that the final solution is only dependent on training data $\{\boldsymbol x_i, y_i\}_{i=1}^n$ and the model architecture used in training (since it determines the form of $\phi_{ntk}(\boldsymbol x_i)$). It follows immediately that the min-norm NTK kernel regression solution is equivalent for MLP and PMLP (i.e., $\boldsymbol w^*_{mlp} = \boldsymbol w^*_{pmlp}$) given that they are the same model trained on the same set of data. In contrast, the architecture of GNN is different from that of MLP, implying different form of feature map, and hence they have different solutions in their respective NTK kernel regression problems (i.e., $\boldsymbol w^*_{mlp} \neq \boldsymbol w^*_{gnn}$).
|
| 181 |
+
|
| 182 |
+
Graph Neural Tangent Kernel (GNTK) [@du2019graph] is a natural extension of NTK to graph neural networks. Originally, the squared loss function is defined for graph-level regression and the kernel function is defined over a pair of graphs: $$\begin{equation}
|
| 183 |
+
\operatorname{GNTK}(\mathcal G_i, \mathcal G_j) = \phi_{gnn}(\mathcal G_i)^\top \phi_{gnn}(\mathcal G_j) = \left\langle \nabla_{\boldsymbol{\theta}} f(\mathcal G_i; \boldsymbol{\theta}), \nabla_{\boldsymbol{\theta}} f(\mathcal G_j; \boldsymbol{\theta}) \right\rangle,
|
| 184 |
+
\end{equation}$$ $$\begin{equation}
|
| 185 |
+
\mathcal{L}(\boldsymbol{\theta})=\frac{1}{2} \sum_{i=1}^n\left(y_i-f(\mathcal G_i; \boldsymbol{\theta})\right)^2,
|
| 186 |
+
\end{equation}$$ where $f(\mathcal G_i; \boldsymbol{\theta})$ yields prediction for a graph such as the property of a molecule. The formula for calculating GNTK is given by the following (where we modify the original notation for clarity and alignment with our definition of GNTK in node-level regression setting): $$\begin{equation}
|
| 187 |
+
\begin{split}
|
| 188 |
+
\left[\operatorname{GNTK}^{(0)}(\mathcal G_i, \mathcal G_j)\right]_{uu'}& =\left[\boldsymbol{\Sigma}^{(0)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{uu'} = \boldsymbol x_u^\top \boldsymbol x_{u'}, \\
|
| 189 |
+
\text{where}\quad&\boldsymbol x_u\in \mathcal V_i, \boldsymbol x_{u'}\in \mathcal V_j.
|
| 190 |
+
\end{split}
|
| 191 |
+
\end{equation}$$ The message passing operation in each layer corresponds to: $$\begin{equation}
|
| 192 |
+
\begin{split}
|
| 193 |
+
\left[\boldsymbol{\Sigma}_{mp}^{(\ell)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{u u^{\prime}}&=c_u c_{u^{\prime}} \sum_{v \in \mathcal{N}_u \cup\{u\}} \sum_{v^{\prime} \in \mathcal{N}_{u^{\prime}} \cup\left\{u^{\prime}\right\}}\left[\boldsymbol{\Sigma}^{(\ell)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{v v^{\prime}} \\
|
| 194 |
+
\left[\operatorname{GNTK}_{mp}^{(\ell)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{u u^{\prime}}&=c_u c_{u^{\prime}} \sum_{v \in \mathcal{N}_u \cup\{u\}} \sum_{v^{\prime} \in \mathcal{N}_{u^{\prime}} \cup\left\{u^{\prime}\right\}}\left[\operatorname{GNTK}^{(\ell)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{v v^{\prime}},
|
| 195 |
+
\end{split}
|
| 196 |
+
\end{equation}$$ where $c_u$ denotes a scaling factor. The calculation formula of feed-forward operation (from $\operatorname{GNTK}_{mp}^{(\ell-1)}\left(\mathcal G_i, \mathcal G_j\right)$ to $\operatorname{GNTK}^{(\ell)}\left(\mathcal G_i, \mathcal G_j\right)$) is similar to that for NTKs of MLP [@jacot2018neural]. The final output of GNTK (without jumping knowledge) is calculated by $$\begin{equation}
|
| 197 |
+
\operatorname{GNTK}\left(\mathcal G_i, \mathcal G_j\right)= \sum_{u \in \mathcal V_i, u^{\prime} \in \mathcal V_j}\left[\operatorname{GNTK}^{(L-1)}\left(\mathcal G_i, \mathcal G_j\right)\right]_{u u^{\prime}}.
|
| 198 |
+
\end{equation}$$
|
| 199 |
+
|
| 200 |
+
We next extend the above definition of GNTK to the node-level regression setting, where the model $f(\boldsymbol x; \boldsymbol{\theta}, \mathcal G)$ outputs prediction of a node, and the kernel function is defined over a pair of nodes in a single graph: $$\begin{equation}
|
| 201 |
+
\operatorname{GNTK}(\boldsymbol x_i, \boldsymbol x_j) = \phi_{gnn}(\boldsymbol x_i)^\top \phi_{gnn}(\boldsymbol x_j) = \left\langle \nabla_{\boldsymbol{\theta}} f(\boldsymbol x_i; \boldsymbol{\theta}, \mathcal G), \nabla_{\boldsymbol{\theta}} f(\boldsymbol x_j; \boldsymbol{\theta}, \mathcal G) \right\rangle,
|
| 202 |
+
\end{equation}$$ $$\begin{equation}
|
| 203 |
+
\mathcal{L}(\boldsymbol{\theta})=\frac{1}{2} \sum_{i=1}^n\left(y_i-f(\boldsymbol x_i; \boldsymbol{\theta}, \mathcal G)\right)^2.
|
| 204 |
+
\end{equation}$$ Then, the explicit formula for GNTK in node-level regression is as follows. $$\begin{equation}
|
| 205 |
+
\begin{split}
|
| 206 |
+
\operatorname{GNTK}^{(0)}(\boldsymbol x_i, \boldsymbol x_j) = \boldsymbol{\Sigma}^{(0)}\left(\boldsymbol x_i, \boldsymbol x_j\right) = \boldsymbol x_i^\top \boldsymbol x_j,
|
| 207 |
+
\end{split}
|
| 208 |
+
\end{equation}$$ Without loss of generality, we consider using random walk matrix as implementation of message passing. Then, the **message passing operation** in each layer corresponds to: $$\begin{equation}
|
| 209 |
+
\begin{split}
|
| 210 |
+
\boldsymbol{\Sigma}_{mp}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=\frac{1}{(|\mathcal N_i|+1)(|\mathcal N_j|+1)} \sum_{i' \in \mathcal N_i \cup\{i\}} \sum_{j' \in \mathcal N_j \cup\left\{j\right\}} \boldsymbol{\Sigma}^{(\ell)}\left(\boldsymbol x_{i'}, \boldsymbol x_{j'}\right) \\
|
| 211 |
+
\operatorname{GNTK}_{mp}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=\frac{1}{(|\mathcal N_i|+1)(|\mathcal N_j|+1)} \sum_{i' \in \mathcal N_i \cup\{i\}} \sum_{j' \in \mathcal N_j \cup\left\{j\right\}} \operatorname{GNTK}^{(\ell)}\left(\boldsymbol x_{i'}, \boldsymbol x_{j'}\right),
|
| 212 |
+
\end{split}
|
| 213 |
+
\end{equation}$$ Moreover, the **feed-forward operation** in each layer corresponds to: $$\begin{equation}
|
| 214 |
+
\begin{split}
|
| 215 |
+
\boldsymbol{\Sigma}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{u, v \sim \mathcal{N}\left(\mathbf{0}, \boldsymbol{\Lambda}^{(\ell)}\right)}{\mathbb{E}}[\sigma(u) \sigma(v)], \\
|
| 216 |
+
\dot{\boldsymbol{\Sigma}}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{u, v \sim \mathcal{N}\left(\mathbf{0}, \boldsymbol{\Lambda}^{(\ell)}\right)}{\mathbb{E}}[\dot{\sigma}(u) \dot{\sigma}(v)], \\
|
| 217 |
+
\operatorname{GNTK}^{(\ell)}(\boldsymbol x_i, \boldsymbol x_j) &= \operatorname{GNTK}_{mp}^{(\ell-1)}(\boldsymbol x_i, \boldsymbol x_j) \cdot \dot{\boldsymbol{\Sigma}}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right) + {\boldsymbol{\Sigma}}^{(\ell)}\left(\boldsymbol x_i, \boldsymbol x_j\right),\\
|
| 218 |
+
\text{where}\qquad\boldsymbol{\Lambda}^{(\ell)} &= \left[\begin{array}{cc}
|
| 219 |
+
\boldsymbol{\Sigma}_{mp}^{(\ell-1)}(\boldsymbol x_i, \boldsymbol x_i) & \boldsymbol{\Sigma}_{mp}^{(\ell-1)}(\boldsymbol x_i, \boldsymbol x_j) \\
|
| 220 |
+
\boldsymbol{\Sigma}_{mp}^{(\ell-1)}(\boldsymbol x_j, \boldsymbol x_i) & \boldsymbol{\Sigma}_{mp}^{(\ell-1)}(\boldsymbol x_j, \boldsymbol x_j)
|
| 221 |
+
\end{array}\right]
|
| 222 |
+
\end{split}
|
| 223 |
+
\end{equation}$$ Suppose the GNN has $L$ layers and the last layer uses linear transformation that is akin to MLP, the final GNTK in node-level regression is defined as $$\begin{equation}
|
| 224 |
+
\operatorname{GNTK}(\boldsymbol x_i, \boldsymbol x_j) = \operatorname{GNTK}_{mp}^{(L-1)}(\boldsymbol x_i, \boldsymbol x_j)
|
| 225 |
+
\end{equation}$$
|
| 226 |
+
|
| 227 |
+
We next derive the explicit NTK formula for a two-layer graph neural network in node-level regression setting. For notational convenience, we use $\boldsymbol a_i \in \mathbb R^n$ to denote adjacency, i.e., $$\begin{equation}
|
| 228 |
+
(\boldsymbol a_i)_j = \begin{cases}1/(|\mathcal N_i|+1) & \text{ if }(i,j)\in \mathcal E \\
|
| 229 |
+
0 & \text{ if }(i,j)\notin \mathcal E\end{cases},
|
| 230 |
+
\end{equation}$$ and $\mathbf G = \mathbf X \mathbf X^\top \in \mathbb R^{n\times n}$ to denote the Gram matrix of all nodes. Then we have
|
| 231 |
+
|
| 232 |
+
*(First message passing layer)* $$\begin{equation}
|
| 233 |
+
\begin{split}
|
| 234 |
+
&\operatorname{GNTK}^{(0)}(\boldsymbol x_i, \boldsymbol x_j) = \boldsymbol{\Sigma}^{(0)}\left(\boldsymbol x_i, \boldsymbol x_j\right) = \boldsymbol x_i^\top \boldsymbol x_j, \\
|
| 235 |
+
&\operatorname{GNTK}_{mp}^{(0)}(\boldsymbol x_i, \boldsymbol x_j) = \boldsymbol{\Sigma}_{mp}^{(0)}\left(\boldsymbol x_i, \boldsymbol x_j\right) = \boldsymbol a_i^\top \mathbf G \boldsymbol a_j, \\
|
| 236 |
+
\end{split}
|
| 237 |
+
\end{equation}$$
|
| 238 |
+
|
| 239 |
+
*(First feed-forward layer)* $$\begin{equation}
|
| 240 |
+
\begin{split}
|
| 241 |
+
\boldsymbol{\Sigma}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{u, v \sim \mathcal{N}\left(\mathbf{0}, \boldsymbol{\Lambda}^{(1)}\right)}{\mathbb{E}}[\sigma(u) \sigma(v)], \\
|
| 242 |
+
\dot{\boldsymbol{\Sigma}}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{u, v \sim \mathcal{N}\left(\mathbf{0},\boldsymbol{\Lambda}^{(1)}\right)}{\mathbb{E}}[\dot{\sigma}(u) \dot{\sigma}(v)], \\
|
| 243 |
+
\boldsymbol{\Lambda}^{(1)} = \left[\begin{array}{cc}
|
| 244 |
+
\boldsymbol a_i^\top \mathbf G \boldsymbol a_i & \boldsymbol a_i^\top \mathbf G \boldsymbol a_j \\
|
| 245 |
+
\boldsymbol a_j^\top \mathbf G \boldsymbol a_i & \boldsymbol a_j^\top \mathbf G \boldsymbol a_j
|
| 246 |
+
\end{array}\right] &= \left[\begin{array}{c}
|
| 247 |
+
\mathbf X^\top \boldsymbol a_i \\
|
| 248 |
+
\mathbf X^\top \boldsymbol a_j
|
| 249 |
+
\end{array}\right]\cdot\left[\begin{array}{cc}
|
| 250 |
+
\mathbf X^\top \boldsymbol a_i & \mathbf X^\top \boldsymbol a_j
|
| 251 |
+
\end{array}\right]\qquad\qquad\quad
|
| 252 |
+
\end{split}
|
| 253 |
+
\end{equation}$$ By noting that $\boldsymbol a_i^\top \mathbf G \boldsymbol a_j = (\mathbf X^\top \boldsymbol a_i)^\top \mathbf X^\top \boldsymbol a_j$ and substituting $\sigma(k) = k\cdot \mathbb{I}_{+}(k)$, $\dot{\sigma}(k) = \mathbb{I}_{+}(k)$, where $\mathbb{I}_{+}(k)$ is an indicator function that outputs $1$ if $k$ is positive otherwise $0$, we have the following equivalent form for the covariance $$\begin{equation}
|
| 254 |
+
\begin{split}
|
| 255 |
+
\boldsymbol{\Sigma}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{\boldsymbol w \sim \mathcal{N}\left(\mathbf{0}, I_d\right)}{\mathbb{E}}\left[\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \boldsymbol w^\top \mathbf X^\top \boldsymbol a_j \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j)\right], \\
|
| 256 |
+
\dot{\boldsymbol{\Sigma}}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right)&=c \cdot \underset{\boldsymbol w \sim \mathcal{N}\left(\mathbf{0}, I_d\right)}{\mathbb{E}}\left[\mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j)\right].
|
| 257 |
+
\end{split}
|
| 258 |
+
\end{equation}$$ Hence, we have $$\begin{equation}
|
| 259 |
+
\label{eq_gntk1}
|
| 260 |
+
\begin{split}
|
| 261 |
+
\operatorname{GNTK}^{(1)}(\boldsymbol x_i, \boldsymbol x_j) &= \operatorname{GNTK}_{mp}^{(0)}(\boldsymbol x_i, \boldsymbol x_j) \cdot \dot{\boldsymbol{\Sigma}}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right) + {\boldsymbol{\Sigma}}^{(1)}\left(\boldsymbol x_i, \boldsymbol x_j\right)\\
|
| 262 |
+
&= c \cdot \underset{\boldsymbol w \sim \mathcal{N}\left(\mathbf{0}, I_d\right)}{\mathbb{E}}\left[\boldsymbol a_i^\top \mathbf G \boldsymbol a_j\cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j)\right]\\
|
| 263 |
+
&+c \cdot \underset{\boldsymbol w \sim \mathcal{N}\left(\mathbf{0}, I_d\right)}{\mathbb{E}}\left[\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \boldsymbol w^\top \mathbf X^\top \boldsymbol a_j \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j)\right]
|
| 264 |
+
\end{split}
|
| 265 |
+
\end{equation}$$
|
| 266 |
+
|
| 267 |
+
*(Second message passing layer / The last layer)*
|
| 268 |
+
|
| 269 |
+
Since the GNN uses a linear transformation on top of the (second) message passing layer for output, the neural tangent kernel for a two-layer GNN is given by $$\begin{equation}
|
| 270 |
+
\label{eq_finalgntk}
|
| 271 |
+
\begin{split}
|
| 272 |
+
\operatorname{GNTK}(\boldsymbol x_i, \boldsymbol x_j) &=\operatorname{GNTK}_{mp}^{(1)}(\boldsymbol x_i, \boldsymbol x_j)\\
|
| 273 |
+
&= \frac{1}{(|\mathcal N_i|+1)(|\mathcal N_j|+1)} \sum_{i' \in \mathcal N_i \cup\{i\}} \sum_{j' \in \mathcal N_j \cup\left\{j\right\}} \operatorname{GNTK}^{(1)}\left(\boldsymbol x_{i'}, \boldsymbol x_{j'}\right)\\
|
| 274 |
+
&= \left\langle \left[\phi^{(1)}(\boldsymbol x_1), \cdots, \phi^{(1)}(\boldsymbol x_n)\right]^\top \boldsymbol a_i, \left[\phi^{(1)}(\boldsymbol x_1), \cdots, \phi^{(1)}(\boldsymbol x_n)\right]^\top \boldsymbol a_j\right\rangle_{\mathcal H}
|
| 275 |
+
\end{split}
|
| 276 |
+
\end{equation}$$ where $\mathbf K^{(1)} \in \mathbb R^{n\times n}$ and $\phi^{(1)}:\mathbb R^{d}\rightarrow \mathcal H$ is the kernel matrix and feature map induced by $\operatorname{GNTK}^{(1)}$. By Eq. [\[eq_finalgntk\]](#eq_finalgntk){reference-type="ref" reference="eq_finalgntk"}, the final feature map $\phi_{gnn}(\boldsymbol x)$ is $$\begin{equation}
|
| 277 |
+
\label{eqn_phignn}
|
| 278 |
+
\phi_{gnn}(\boldsymbol x_i) = \left[\phi^{(1)}(\boldsymbol x_1), \cdots, \phi^{(1)}(\boldsymbol x_n)\right]^\top \boldsymbol a_i.
|
| 279 |
+
\end{equation}$$
|
| 280 |
+
|
| 281 |
+
Also, notice that in Eq. [\[eq_gntk1\]](#eq_gntk1){reference-type="ref" reference="eq_gntk1"}, $$\begin{equation}
|
| 282 |
+
\begin{split}
|
| 283 |
+
\boldsymbol a_i^\top \mathbf G \boldsymbol a_j\cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j) &= \phi_i^\top \phi_j, \\
|
| 284 |
+
\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)\cdot \boldsymbol w^\top \mathbf X^\top \boldsymbol a_j \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_j) &= (\boldsymbol w^\top \phi_i)^\top \boldsymbol w^\top \phi_j,\\
|
| 285 |
+
\mbox{where\;\;} \phi_i = \mathbf X^\top \boldsymbol a_i \cdot \mathbb{I}_{+}(\boldsymbol w^\top \mathbf X^\top \boldsymbol a_i)&.
|
| 286 |
+
\end{split}
|
| 287 |
+
\end{equation}$$ Then, the feature map $\phi^{(1)}$ can be written as $$\begin{equation}
|
| 288 |
+
\label{eqn_phi1}
|
| 289 |
+
\begin{split}
|
| 290 |
+
\phi^{(1)}(\boldsymbol x_i) = c'\cdot\Big[\mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(1)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right)&, \boldsymbol{w}^{(1)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(1)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right), \\
|
| 291 |
+
\mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(2){\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right)&, \boldsymbol{w}^{(2)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(2)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right), \\
|
| 292 |
+
&\cdots\\
|
| 293 |
+
\mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(\infty){\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right)&, \boldsymbol{w}^{(\infty)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(\infty)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right)\Big]\\
|
| 294 |
+
\end{split}
|
| 295 |
+
\end{equation}$$ where $\boldsymbol{w}^{(k)}\sim \mathcal{N}\left(\mathbf{0}, I_d\right)$ is random Gaussian vector in $\mathbb R^d$, with the superscript $^{(k)}$ denoting that it is the $k$-th sample among infinitely many i.i.d. sampled ones, $c'$ is a constant. We write Eq. [\[eqn_phi1\]](#eqn_phi1){reference-type="ref" reference="eqn_phi1"} in short as $$\begin{equation}
|
| 296 |
+
\label{eqn_phi1short}
|
| 297 |
+
\begin{split}
|
| 298 |
+
\phi^{(1)}(\boldsymbol x_i) = c'\cdot\Big[\mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right)&, \boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} \mathbf{X}^{\top} \boldsymbol{a}_i \right), \cdots\Big].\\
|
| 299 |
+
\end{split}
|
| 300 |
+
\end{equation}$$ Finally, substituting Eq. [\[eqn_phi1short\]](#eqn_phi1short){reference-type="ref" reference="eqn_phi1short"} into Eq. [\[eqn_phignn\]](#eqn_phignn){reference-type="ref" reference="eqn_phignn"} completes the proof.
|
| 301 |
+
|
| 302 |
+
To analyse the extrapolation behavior of PMLP along a certain direction $\boldsymbol v$ in the testing phase and compare it to MLP, we consider a newly arrived testing node $\boldsymbol x_0 = t\boldsymbol{v}$, whose degree (with self-connection) is $\tilde{d}$ and its corresponding adjacency vector is $\boldsymbol a\in \mathbb R^{n+1}$, where $(\boldsymbol a)_i = 1/\tilde{d}$ if $(i,0)\in \mathcal E$ otherwise $0$. Following [@xu2021neural; @bietti2019inductive], we consider a constant bias term and denote the data $\boldsymbol x$ plus this term as $\hat{\boldsymbol x} = [\boldsymbol x | 1]$. Then, the asymptotic behavior of $f(\cdot)$ at large distances from the training data range can be characterized by the change of network output with a fixed-length step $\Delta t\cdot \boldsymbol v$ along the direction $\boldsymbol v$, which is given by the following in the NTK regime $$\begin{align}
|
| 303 |
+
\frac{1}{{\Delta t}} \left( f_{mlp}\left(\hat{\boldsymbol{x}}\right) - f_{mlp}\left(\hat{\boldsymbol{x}}_0\right) \right) &= \frac{1}{{\Delta t}} \boldsymbol w_{mlp}^{*^\top} (\phi_{mlp}(\hat{\boldsymbol{x}}) - \phi_{mlp}(\hat{\boldsymbol{x}}_0))\label{eq_mlp}\\
|
| 304 |
+
\frac{1}{{\Delta t}} \left( f_{pmlp}\left(\hat{\boldsymbol{x}}\right) - f_{pmlp}\left(\hat{\boldsymbol{x}}_0\right) \right) &= \frac{1}{{\Delta t}} \boldsymbol w_{mlp}^{*^\top} (\phi_{gnn}(\hat{\boldsymbol{x}}) - \phi_{gnn}(\hat{\boldsymbol{x}}_0)) \label{eq_target}
|
| 305 |
+
\end{align}$$ where $\boldsymbol x = \boldsymbol x_0 + \Delta t\cdot \boldsymbol v= (t+\Delta t)\boldsymbol v$, $\hat{\boldsymbol x} = [\hat{\boldsymbol x} | 1]$ and $\hat{\boldsymbol x}_0 = [\hat{\boldsymbol x}_0 | 1]$. As our interest is in how PMLP extrapolation, we use $f(\cdot)$ to refer to $f_{pmlp}(\cdot)$ in the rest of the proof. By Lemma. [2](#lemma3){reference-type="ref" reference="lemma3"}, the explicit formula for computing this node's feature map is given by $$\begin{equation}
|
| 306 |
+
\phi_{gnn}(\hat{\boldsymbol x}_0) = \left[\phi^{(1)}(\hat{\boldsymbol x}_0), \phi^{(1)}(\boldsymbol x_1; \hat{\boldsymbol x}_0), \cdots, \phi^{(1)}(\boldsymbol x_n; \hat{\boldsymbol x}_0) \right]^\top \boldsymbol a,
|
| 307 |
+
\end{equation}$$ where $$\begin{equation}
|
| 308 |
+
\label{eqn_phi0}
|
| 309 |
+
\begin{split}
|
| 310 |
+
\phi^{(1)}(\hat{\boldsymbol x}_0) = c'\cdot\Big[[\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \cdot \mathbb{I}_{+}&\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right), \\
|
| 311 |
+
&\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right), \ldots\Big],
|
| 312 |
+
\end{split}
|
| 313 |
+
\end{equation}$$ and similarly $$\begin{equation}
|
| 314 |
+
\label{eqn_phii}
|
| 315 |
+
\begin{split}
|
| 316 |
+
\phi^{(1)}(\boldsymbol x_i; \hat{\boldsymbol x}_0) = c'\cdot\Big[[\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}&\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a}_i \right), \\
|
| 317 |
+
&\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a}_i \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a}_i \right), \ldots\Big],
|
| 318 |
+
\end{split}
|
| 319 |
+
\end{equation}$$ $\boldsymbol{w}^{(k)} \sim \mathcal{N}(\mathbf{0}, \boldsymbol{I}_d)$, with $k$ going to infinity, $c^{\prime}$ is a constant, $\mathbb{I}_{+}(k)$ is an indicator function that outputs $1$ if $k$ is positive otherwise $0$, and $(\boldsymbol a_i)_1 = 1/(|\mathcal N_i| + 1)$ if $i$ is connected to the new testing node. It follows from Eq. [\[eq_target\]](#eq_target){reference-type="ref" reference="eq_target"} that $$\begin{equation}
|
| 320 |
+
\label{eq_target_new}
|
| 321 |
+
\begin{split}
|
| 322 |
+
&\frac{1}{\Delta t}\left(f\left(\hat{\boldsymbol{x}}\right) - f\left(\hat{\boldsymbol{x}}_0\right)\right)\\
|
| 323 |
+
= &\frac{1}{\Delta t} \boldsymbol{w}^{*^\top}_{mlp} \left[\phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0), \cdots, \phi^{(1)}(\boldsymbol x_n; \hat{\boldsymbol x}) - \phi^{(1)}(\boldsymbol x_n; \hat{\boldsymbol x}_0) \right]^\top \boldsymbol a\\
|
| 324 |
+
= &\frac{1}{\tilde d \Delta t} \boldsymbol{w}^{*^\top}_{mlp} \left( \phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right) + \frac{1}{\tilde d \Delta t} \sum_{i\in \mathcal N_0} \boldsymbol{w}^{*^\top}_{mlp} \left(\phi^{(1)}(\boldsymbol x_i; \hat{\boldsymbol x}) - \phi^{(1)}(\boldsymbol x_i; \hat{\boldsymbol x}_0)\right)
|
| 325 |
+
\end{split}
|
| 326 |
+
\end{equation}$$ Now, let us consider $\boldsymbol{w}^{*^\top}_{mlp} \left(\phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right)$. Recall that $\boldsymbol{w}^{*^\top}_{mlp}$ is from infinite dimensional Hilbert space, and $\boldsymbol w^{(k)}$ is drawn from Gaussian with $k$ going to infinity in Eq. [\[eqn_phi0\]](#eqn_phi0){reference-type="ref" reference="eqn_phi0"} and Eq. [\[eqn_phii\]](#eqn_phii){reference-type="ref" reference="eqn_phii"}, where each $\boldsymbol w^{(k)}$ corresponds to some certain dimensions of $\boldsymbol{w}^{*}_{mlp}$. Let us denote the part that corresponds to the first line in Eq. [\[eqn_phi0\]](#eqn_phi0){reference-type="ref" reference="eqn_phi0"} as $\boldsymbol\beta_{\boldsymbol w^{(k)}}$ and the second line in Eq. [\[eqn_phi0\]](#eqn_phi0){reference-type="ref" reference="eqn_phi0"} as $\boldsymbol\gamma_{\boldsymbol w^{(k)}}$. Consider the following way of rearrangement for (the first element in) $\phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)$ $$\begin{equation}
|
| 327 |
+
\begin{split}
|
| 328 |
+
&[\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)- [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)\\
|
| 329 |
+
= &[\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)\right)\\
|
| 330 |
+
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad+ \frac{1}{\tilde d}\;[\Delta t \boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{(k)^{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right),
|
| 331 |
+
\end{split}
|
| 332 |
+
\end{equation}$$ where $\frac{1}{\tilde d}\;[\Delta t \boldsymbol{v} \;|\; 0]$ is obtained by subtracting $[\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a}$ from $[\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a}$. Then, we can re-write $\boldsymbol{w}^{*^\top}_{mlp} \left(\phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right)$ into a more convenient form: $$\begin{align}
|
| 333 |
+
&\frac{1}{\Delta t}\boldsymbol{w}^{*^\top}_{mlp}\left( \phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right) \label{eq_phiall} \\
|
| 334 |
+
=& \int \frac{1}{\tilde d}\; \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \label{eq1term} \\
|
| 335 |
+
+& \int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \label{eq2term} \\
|
| 336 |
+
+& \int \frac{1}{\tilde d}\; \boldsymbol{\gamma}_{\boldsymbol{w}} \boldsymbol{w}^\top [\boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \label{eq3term}\\
|
| 337 |
+
+& \int \boldsymbol{\gamma}_{\boldsymbol{w}} \boldsymbol{w}^\top [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \label{eq4term}
|
| 338 |
+
\end{align}$$ **Remark.** For other components in Eq. [\[eq_target_new\]](#eq_target_new){reference-type="ref" reference="eq_target_new"}, i.e., $\frac{1}{\Delta t}\boldsymbol{w}^{*^\top}_{mlp}\left(\phi^{(1)}(\boldsymbol x_i; \hat{\boldsymbol x}) - \phi^{(1)}(\boldsymbol x_i; \hat{\boldsymbol x}_0)\right)$ where $i \in \mathcal N_0$, the corresponding result of the expansion in Eq. [\[eq_phiall\]](#eq_phiall){reference-type="ref" reference="eq_phiall"} is similar, which only differs by a scaling factor (since the first element in both $\boldsymbol a$ and $\boldsymbol a_i$ indicating whether the current node is connected to the new testing node is non-zero) and replacing $\boldsymbol a$ with $\boldsymbol a_i$. Therefore, in the following proof we focus on Eq. [\[eq_phiall\]](#eq_phiall){reference-type="ref" reference="eq_phiall"} and then generalize the result to other components in Eq. [\[eq_target_new\]](#eq_target_new){reference-type="ref" reference="eq_target_new"}.
|
| 339 |
+
|
| 340 |
+
We first analyse the convergence of Eq. [\[eq_phiall\]](#eq_phiall){reference-type="ref" reference="eq_phiall"}. Specifically, for Eq. [\[eq1term\]](#eq1term){reference-type="ref" reference="eq1term"}: $$\begin{equation}
|
| 341 |
+
\label{eq1cvg}
|
| 342 |
+
\begin{split}
|
| 343 |
+
&\int \frac{1}{\tilde d}\; \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right) \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 344 |
+
= &\int \frac{1}{\tilde d}\; \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0/t,\mathbf{X}/t]^{\top} \boldsymbol{a} \right) \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 345 |
+
\rightarrow &\int \frac{1}{\tilde d}\; \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\boldsymbol{v} \;|\; 0] \cdot \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\boldsymbol v \;|\; 0] \right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) = \frac{c'_{\boldsymbol v}}{\tilde d}, \quad \text { as } t \rightarrow \infty
|
| 346 |
+
\end{split}
|
| 347 |
+
\end{equation}$$ where the final result is a constant that depends on training data, direction $\boldsymbol{v}$ and node degree $\tilde d$. Moreover, the convergence of Eq. [\[eq2term\]](#eq2term){reference-type="ref" reference="eq2term"} is given by $$\begin{equation}
|
| 348 |
+
\label{eq2cvg}
|
| 349 |
+
\begin{split}
|
| 350 |
+
&\int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 351 |
+
= &\int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [[\boldsymbol v \;|\; \frac{1}{t+\Delta t}],\frac{\mathbf{X}}{t+\Delta t}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [[\boldsymbol v \;|\; \frac{1}{t}],\frac{\mathbf{X}}{t}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 352 |
+
\rightarrow &\int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\boldsymbol v \;|\; 0] \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\boldsymbol v \;|\; 0] \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) = 0, \quad \text { as } t \rightarrow \infty
|
| 353 |
+
\end{split}
|
| 354 |
+
\end{equation}$$ The similar results in Eq. [\[eq1cvg\]](#eq1cvg){reference-type="ref" reference="eq1cvg"} and Eq. [\[eq2cvg\]](#eq2cvg){reference-type="ref" reference="eq2cvg"} also apply to analysis of Eq. [\[eq3term\]](#eq3term){reference-type="ref" reference="eq3term"} and Eq. [\[eq4term\]](#eq4term){reference-type="ref" reference="eq4term"}, respectively. By combining these results, we conclude that $$\begin{equation}
|
| 355 |
+
\label{eqn_coefficient}
|
| 356 |
+
\frac{1}{\Delta t}\boldsymbol{w}^{*^\top}_{mlp}\left( \phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right) \rightarrow \frac{c_{\boldsymbol v}}{\tilde d}, \quad \text { as } t \rightarrow \infty
|
| 357 |
+
\end{equation}$$ It follows that $$\begin{equation}
|
| 358 |
+
\label{eq_convergefinal}
|
| 359 |
+
\frac{1}{\Delta t}\left(f\left(\hat{\boldsymbol{x}}\right) - f\left(\hat{\boldsymbol{x}}_0\right)\right) \rightarrow c_{\boldsymbol v}{\tilde d}^{-1}\sum_{i\in \mathcal N_0 \cup \{0\}} \tilde{d}_i^{-1}, \quad \text { as } t \rightarrow \infty.
|
| 360 |
+
\end{equation}$$
|
| 361 |
+
|
| 362 |
+
In conclusion, both MLP and PMLP with ReLU activation will eventually converge to a linear function along directions away from the training data. In fact, this result also holds true for two-layer GNNs with weighted-sum style message passing layers by simply replacing $\boldsymbol{w}^{*}_{mlp}$ by $\boldsymbol{w}^{*}_{gnn}$ in the proof.
|
| 363 |
+
|
| 364 |
+
However, a remarkable difference between MLP and PMLP is that the linear coefficient for MLP is a constant $c_{\boldsymbol v}$ that is fixed upon a specific direction $\boldsymbol v$ and not affected by the inter-connection between testing node $\boldsymbol x$ and training data $\{(\boldsymbol x_i, y_i)\}_{i=1}^n$. In contrast, the linear coefficient for PMLP (and GNN) is also dependent on testing node's degree and the degrees of adjacent nodes.
|
| 365 |
+
|
| 366 |
+
Moreover, by Proposition. [1](#prop1){reference-type="ref" reference="prop1"}, MLP and PMLP share the same $\boldsymbol w_{mlp}^*$ (including $\boldsymbol{\beta}_{\boldsymbol w}$ and $\boldsymbol{\gamma}_{\boldsymbol w}$), and thus the constant $c_{\boldsymbol v}$ in Eq. [\[eq_convergefinal\]](#eq_convergefinal){reference-type="ref" reference="eq_convergefinal"} is exactly the linear coefficient of MLP. This can also be verified by setting $\boldsymbol x$ to be an isolated node, in which case ${\tilde d}^{-1} \sum_{i\in \mathcal N_0 \cup \{0\}} \tilde{d}_i^{-1} = 1$ and PMLP is equivalent to MLP. Therefore, we can directly compare the linear coefficients for MLP and PMLP. As an immediate consequence, if all node degrees of adjacent nodes are larger than the node degree of the testing node, the linear coefficient will become smaller, vice versa.
|
| 367 |
+
|
| 368 |
+
We next analyse the convergence rate for Eq. [\[eq1cvg\]](#eq1cvg){reference-type="ref" reference="eq1cvg"} and Eq. [\[eq2cvg\]](#eq2cvg){reference-type="ref" reference="eq2cvg"} to see to what extent can PMLP deviate from the converged linear coefficient as an indication of its tolerance to out-of-distribution sample. For Eq. [\[eq1cvg\]](#eq1cvg){reference-type="ref" reference="eq1cvg"}, we have $$\begin{equation}
|
| 369 |
+
\begin{split}
|
| 370 |
+
&\left|\int \frac{1}{\tilde d}\; \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\boldsymbol{v} \;|\; 0] \cdot \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right) - \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\boldsymbol v \;|\; 0] \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w})\right|\\
|
| 371 |
+
\leq \quad &\frac{c'_1}{\tilde d}\cdot\int \left| \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right) - \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\boldsymbol v\;|\; 0] \right) \right| \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 372 |
+
=\quad &\frac{c'_1}{\tilde d} \cdot\int \left| \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [[\boldsymbol x_0 \;|\; 1],\mathbf{X}]^{\top} \boldsymbol{a} \right) - \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\boldsymbol x_0\;|\; 0] \right) \right| \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 373 |
+
\end{split}
|
| 374 |
+
\end{equation}$$ Based on the observation that the integral of $| \mathbb{I}_{+}(\boldsymbol{w}^{\top} \boldsymbol v_1 ) - \mathbb{I}_{+}(\boldsymbol{w}^{\top} \boldsymbol v_2 ) |$ represents the volume of non-overlapping part of two half-balls that are orthogonal to $\boldsymbol v_1$ and $\boldsymbol v_2$, which grows linearly with the angle between $\boldsymbol v_1$ and $\boldsymbol v_2$, denoted by $\measuredangle\left(\boldsymbol v_1, \boldsymbol v_2\right)$. Therefore, we have $$\begin{equation}
|
| 375 |
+
\label{eq_theta1}
|
| 376 |
+
\begin{split}
|
| 377 |
+
&\frac{c'_1}{\tilde d} \cdot\int \left| \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [[\boldsymbol x_0 \;|\; 1],\mathbf{X}]^{\top} \boldsymbol{a} \right) - \mathbb{I}_{+}\left(\boldsymbol{w}^{\top} [\boldsymbol x_0\;|\; 0] \right) \right| \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 378 |
+
=\quad &\frac{c_1}{\tilde d}\cdot \measuredangle\left([[\boldsymbol x_0\;|\;1],\mathbf{X}]^{\top} \boldsymbol{a} \;,\; [\boldsymbol x_0\;|\; 0]\right),
|
| 379 |
+
\end{split}
|
| 380 |
+
\end{equation}$$ Note that the first term in the angle can be decomposed as $$\begin{equation}
|
| 381 |
+
\tilde{d}\cdot[[\boldsymbol x_0 \;|\; 1],\mathbf{X}]^{\top} \boldsymbol{a} = [\boldsymbol x_0 \;|\; 0] + [\boldsymbol 0 \;|\; 1] + \sum_{i \in \mathcal{N}(0)} \boldsymbol x_i.
|
| 382 |
+
\end{equation}$$ Suppose all node features are normalized, then we have $$\begin{equation}
|
| 383 |
+
\label{eq_rate1}
|
| 384 |
+
\begin{split}
|
| 385 |
+
\measuredangle\left([[\boldsymbol x_0, 1],\mathbf{X}]^{\top} \boldsymbol{a} \;,\; [\boldsymbol x_0, 0]\right) & \leq \measuredangle\left([\boldsymbol x_0, 0] \;,\; [\boldsymbol x_0, 1]\right) + \measuredangle(\hat{\boldsymbol x}_0 \;,\; \hat{\boldsymbol x}_0 + \sum_{i \in \mathcal{N}(0)} \boldsymbol x_i)\\
|
| 386 |
+
& = \operatorname{arctan}(\frac{1}{t}) + \operatorname{arctan}(\frac{(\tilde d-1)\sqrt{1-\alpha^2}}{(\tilde d-1) \alpha + \sqrt{t^2+1}})\\
|
| 387 |
+
& = O(\frac{1+(\tilde{d}-1)\sqrt{1-\alpha^2}}{t})
|
| 388 |
+
\end{split}
|
| 389 |
+
\end{equation}$$
|
| 390 |
+
|
| 391 |
+
where $\alpha$ denotes the cosine similarity of the testing node and the sum of its neighbors. The last step is obtained by noting $\operatorname{arctan}(x) < x$. Using the same reasoning, for Eq. [\[eq2cvg\]](#eq2cvg){reference-type="ref" reference="eq2cvg"}, we have $$\begin{equation}
|
| 392 |
+
\begin{split}
|
| 393 |
+
&\left| \int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \right|\\
|
| 394 |
+
\leq \quad & \left|\boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a}\right|\cdot \int \left| \mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right) \right| \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 395 |
+
= \quad &\left|\boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a}\right| \cdot \int \left| \mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} \Big[[\boldsymbol x_0\;|\;1],\mathbf{X}\Big]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} \Big[[\boldsymbol x_0\;|\;\frac{t}{t+\Delta t}],\frac{t}{t+\Delta t}\mathbf{X}\Big]^{\top} \boldsymbol{a} \right) \right| \mathrm{d} \mathbb{P}(\boldsymbol{w})\\
|
| 396 |
+
= \quad &\left|\boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a}\right|\cdot \measuredangle\left( \Big[[\boldsymbol x_0\;|\;1],\mathbf{X}\Big]^{\top} \boldsymbol{a} \;,\; \Big[[\boldsymbol x_0\;|\;\frac{t}{t+\Delta t}],\frac{t}{t+\Delta t}\mathbf{X}\Big]^{\top} \boldsymbol{a}\right),
|
| 397 |
+
\end{split}
|
| 398 |
+
\end{equation}$$ Note that the second term in the angle can be re-written as $$\begin{equation}
|
| 399 |
+
\Big[[\boldsymbol x_0\;|\;\frac{t}{t+\Delta t}],\frac{t}{t+\Delta t}\mathbf{X}\Big]^{\top} \boldsymbol{a} =\frac{t}{t+\Delta t} \cdot\Big[[\boldsymbol x_0\;|\;1],\mathbf{X}\Big]^{\top} \boldsymbol{a} + \frac{\Delta t}{t+\Delta t} \cdot \Big[[\boldsymbol x_0\;|\;0],\mathbf{0}\Big]^{\top} \boldsymbol{a},
|
| 400 |
+
\end{equation}$$ and hence the angle is at most $\Delta t/(t+\Delta t)$ times of that in Eq. [\[eq_theta1\]](#eq_theta1){reference-type="ref" reference="eq_theta1"}. It follows that $$\begin{equation}
|
| 401 |
+
\label{eq_rate2}
|
| 402 |
+
\begin{split}
|
| 403 |
+
&\left| \int \boldsymbol{\beta}_{\boldsymbol{w}}^{{\top}} [\hat{\boldsymbol x}/\Delta t,\mathbf{X}/\Delta t]^{\top} \boldsymbol{a} \left(\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x}_0,\mathbf{X}]^{\top} \boldsymbol{a} \right)-\mathbb{I}_{+}\left(\boldsymbol{w}^{{\top}} [\hat{\boldsymbol x},\mathbf{X}]^{\top} \boldsymbol{a} \right)\right) \mathrm{d} \mathbb{P}(\boldsymbol{w}) \right|\\
|
| 404 |
+
= \quad &O(\frac{t+\Delta t}{\tilde d}) \cdot O(\frac{1+(\tilde{d}-1)\sqrt{1-\alpha^2}}{t}) \cdot O(\frac{\Delta t}{t+\Delta t})\\
|
| 405 |
+
= \quad &O(\frac{1+(\tilde{d}-1)\sqrt{1-\alpha^2}}{\tilde d t})
|
| 406 |
+
\end{split}
|
| 407 |
+
\end{equation}$$ For Eq. [\[eq3term\]](#eq3term){reference-type="ref" reference="eq3term"} and Eq. [\[eq4term\]](#eq4term){reference-type="ref" reference="eq4term"}, similar results can be derived by bounding $\boldsymbol w$ with standard concentration techniques. By substituting the above convergence rates to Eq. [\[eq_phiall\]](#eq_phiall){reference-type="ref" reference="eq_phiall"}, and dividing it by the linear coefficient in Eq. [\[eqn_coefficient\]](#eqn_coefficient){reference-type="ref" reference="eqn_coefficient"}, the convergence rate for Eq. [\[eq_phiall\]](#eq_phiall){reference-type="ref" reference="eq_phiall"} is $$\begin{equation}
|
| 408 |
+
\begin{split}
|
| 409 |
+
& \left|\frac{\frac{1}{\Delta t}\boldsymbol{w}^{*^\top}_{mlp}\left( \phi^{(1)}(\hat{\boldsymbol x})-\phi^{(1)}(\hat{\boldsymbol x}_0)\right) - c_{\boldsymbol v}/\tilde d}{c_{\boldsymbol v}/\tilde d}\right| = O(\frac{1+(\tilde{d} -1) \sqrt{1-\alpha^2}}{ t})\\
|
| 410 |
+
\end{split}
|
| 411 |
+
\end{equation}$$ It follows that the convergence rate for Eq. [\[eq_target\]](#eq_target){reference-type="ref" reference="eq_target"} is $O(\frac{1+(\tilde{d}_{max} -1) \sqrt{1-\alpha_{min}^2}}{ t})$, where $\tilde{d}_{max}$ denotes the maximum node degree in the testing node' neighbors (including itself) and $$\begin{equation}
|
| 412 |
+
\alpha_{min} = \operatorname{min}\{\alpha_i\}_{i\in \mathcal N_0 \cup \{0\}}
|
| 413 |
+
\end{equation}$$ denotes the minimum cosine similarity for the testing node' neighbors (including itself). This completes the proof.
|
2302.02061/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2023-05-18T01:10:28.894Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36" version="21.3.2" etag="8e8NmURWx3KgQn5Xb0Wl" type="device">
|
| 2 |
+
<diagram name="Page-1" id="54jVFKupPCcHayrSYOD1">
|
| 3 |
+
<mxGraphModel dx="1274" dy="931" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="1" shadow="0">
|
| 4 |
+
<root>
|
| 5 |
+
<mxCell id="0" />
|
| 6 |
+
<mxCell id="1" parent="0" />
|
| 7 |
+
<mxCell id="2" value="<font style="font-size: 30px;">`s_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 8 |
+
<mxGeometry x="770" y="260" width="80" height="80" as="geometry" />
|
| 9 |
+
</mxCell>
|
| 10 |
+
<mxCell id="3" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;fontSize=30;" edge="1" source="4" target="5" parent="1">
|
| 11 |
+
<mxGeometry relative="1" as="geometry" />
|
| 12 |
+
</mxCell>
|
| 13 |
+
<mxCell id="4" value="<font style="font-size: 30px;">`a_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 14 |
+
<mxGeometry x="920" y="260" width="80" height="80" as="geometry" />
|
| 15 |
+
</mxCell>
|
| 16 |
+
<mxCell id="5" value="<font style="font-size: 30px;">`s_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 17 |
+
<mxGeometry x="1080" y="260" width="80" height="80" as="geometry" />
|
| 18 |
+
</mxCell>
|
| 19 |
+
<mxCell id="6" value="<font style="font-size: 30px;">`x`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 20 |
+
<mxGeometry x="770" y="380" width="80" height="80" as="geometry" />
|
| 21 |
+
</mxCell>
|
| 22 |
+
<mxCell id="7" value="" style="curved=1;endArrow=classic;html=1;rounded=0;fontSize=30;exitX=0.758;exitY=0.093;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;exitPerimeter=0;strokeWidth=2;" edge="1" parent="1">
|
| 23 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 24 |
+
<mxPoint x="829.46" y="267.44000000000005" as="sourcePoint" />
|
| 25 |
+
<mxPoint x="1090.535728752538" y="271.71572875253787" as="targetPoint" />
|
| 26 |
+
<Array as="points">
|
| 27 |
+
<mxPoint x="958.8199999999999" y="120" />
|
| 28 |
+
</Array>
|
| 29 |
+
</mxGeometry>
|
| 30 |
+
</mxCell>
|
| 31 |
+
<mxCell id="8" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;entryX=0;entryY=1;entryDx=0;entryDy=0;" edge="1" source="6" target="5" parent="1">
|
| 32 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 33 |
+
<mxPoint x="920" y="160" as="sourcePoint" />
|
| 34 |
+
<mxPoint x="970" y="110" as="targetPoint" />
|
| 35 |
+
</mxGeometry>
|
| 36 |
+
</mxCell>
|
| 37 |
+
<mxCell id="9" value="<font style="font-size: 30px;">`s_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 38 |
+
<mxGeometry x="765" y="620" width="80" height="80" as="geometry" />
|
| 39 |
+
</mxCell>
|
| 40 |
+
<mxCell id="10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;fontSize=30;" edge="1" source="11" target="12" parent="1">
|
| 41 |
+
<mxGeometry relative="1" as="geometry" />
|
| 42 |
+
</mxCell>
|
| 43 |
+
<mxCell id="11" value="<font style="font-size: 30px;">`a_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 44 |
+
<mxGeometry x="915" y="620" width="80" height="80" as="geometry" />
|
| 45 |
+
</mxCell>
|
| 46 |
+
<mxCell id="12" value="<font style="font-size: 30px;">`s_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 47 |
+
<mxGeometry x="1075" y="620" width="80" height="80" as="geometry" />
|
| 48 |
+
</mxCell>
|
| 49 |
+
<mxCell id="13" value="<font style="font-size: 30px;">`x_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 50 |
+
<mxGeometry x="1075" y="750" width="80" height="80" as="geometry" />
|
| 51 |
+
</mxCell>
|
| 52 |
+
<mxCell id="14" value="" style="curved=1;endArrow=classic;html=1;rounded=0;fontSize=30;exitX=0.758;exitY=0.093;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;exitPerimeter=0;strokeWidth=2;" edge="1" parent="1">
|
| 53 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 54 |
+
<mxPoint x="824.46" y="627.44" as="sourcePoint" />
|
| 55 |
+
<mxPoint x="1085.535728752538" y="631.7157287525379" as="targetPoint" />
|
| 56 |
+
<Array as="points">
|
| 57 |
+
<mxPoint x="953.8199999999999" y="480" />
|
| 58 |
+
</Array>
|
| 59 |
+
</mxGeometry>
|
| 60 |
+
</mxCell>
|
| 61 |
+
<mxCell id="15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;fontSize=30;" edge="1" source="16" target="13" parent="1">
|
| 62 |
+
<mxGeometry relative="1" as="geometry" />
|
| 63 |
+
</mxCell>
|
| 64 |
+
<mxCell id="16" value="<font style="font-size: 30px;">`x_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 65 |
+
<mxGeometry x="765" y="750" width="80" height="80" as="geometry" />
|
| 66 |
+
</mxCell>
|
| 67 |
+
<mxCell id="17" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;exitX=0.903;exitY=0.769;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" source="9" target="13" parent="1">
|
| 68 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 69 |
+
<mxPoint x="853.0542795902863" y="477.6763720899785" as="sourcePoint" />
|
| 70 |
+
<mxPoint x="1096.7157287525379" y="398.28427124746213" as="targetPoint" />
|
| 71 |
+
</mxGeometry>
|
| 72 |
+
</mxCell>
|
| 73 |
+
<mxCell id="18" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;exitX=1;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;" edge="1" source="11" target="13" parent="1">
|
| 74 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 75 |
+
<mxPoint x="847.24" y="691.52" as="sourcePoint" />
|
| 76 |
+
<mxPoint x="1087.7324516582958" y="785.4696923433066" as="targetPoint" />
|
| 77 |
+
</mxGeometry>
|
| 78 |
+
</mxCell>
|
| 79 |
+
<mxCell id="19" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;exitX=0.966;exitY=0.307;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=0;exitPerimeter=0;" edge="1" source="16" target="12" parent="1">
|
| 80 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 81 |
+
<mxPoint x="847.24" y="691.52" as="sourcePoint" />
|
| 82 |
+
<mxPoint x="1087.7324516582958" y="785.4696923433066" as="targetPoint" />
|
| 83 |
+
</mxGeometry>
|
| 84 |
+
</mxCell>
|
| 85 |
+
<mxCell id="20" value="Contextual MDP" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;strokeWidth=2;fontSize=30;" vertex="1" parent="1">
|
| 86 |
+
<mxGeometry x="930" y="440" width="60" height="30" as="geometry" />
|
| 87 |
+
</mxCell>
|
| 88 |
+
<mxCell id="21" value="Markov DCMDP" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;strokeWidth=2;fontSize=30;" vertex="1" parent="1">
|
| 89 |
+
<mxGeometry x="845" y="830" width="228" height="30" as="geometry" />
|
| 90 |
+
</mxCell>
|
| 91 |
+
<mxCell id="22" value="<font style="font-size: 30px;">`s_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 92 |
+
<mxGeometry x="1230" y="131.72" width="80" height="80" as="geometry" />
|
| 93 |
+
</mxCell>
|
| 94 |
+
<mxCell id="23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;fontSize=30;" edge="1" source="24" target="25" parent="1">
|
| 95 |
+
<mxGeometry relative="1" as="geometry" />
|
| 96 |
+
</mxCell>
|
| 97 |
+
<mxCell id="24" value="<font style="font-size: 30px;">`a_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 98 |
+
<mxGeometry x="1380" y="131.72" width="80" height="80" as="geometry" />
|
| 99 |
+
</mxCell>
|
| 100 |
+
<mxCell id="25" value="<font style="font-size: 30px;">`s_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 101 |
+
<mxGeometry x="1540" y="131.72" width="80" height="80" as="geometry" />
|
| 102 |
+
</mxCell>
|
| 103 |
+
<mxCell id="26" value="<font style="font-size: 30px;">`x_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 104 |
+
<mxGeometry x="1540" y="261.72" width="80" height="80" as="geometry" />
|
| 105 |
+
</mxCell>
|
| 106 |
+
<mxCell id="27" value="" style="curved=1;endArrow=classic;html=1;rounded=0;fontSize=30;exitX=0.758;exitY=0.093;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;exitPerimeter=0;strokeWidth=2;" edge="1" parent="1">
|
| 107 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 108 |
+
<mxPoint x="1289.46" y="137.44000000000005" as="sourcePoint" />
|
| 109 |
+
<mxPoint x="1550.535728752538" y="141.71572875253787" as="targetPoint" />
|
| 110 |
+
<Array as="points">
|
| 111 |
+
<mxPoint x="1418.82" y="-10.000000000000028" />
|
| 112 |
+
</Array>
|
| 113 |
+
</mxGeometry>
|
| 114 |
+
</mxCell>
|
| 115 |
+
<mxCell id="28" value="<font style="font-size: 30px;">`x_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 116 |
+
<mxGeometry x="1230" y="261.72" width="80" height="80" as="geometry" />
|
| 117 |
+
</mxCell>
|
| 118 |
+
<mxCell id="29" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;exitX=0.966;exitY=0.307;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=0;exitPerimeter=0;" edge="1" source="28" target="25" parent="1">
|
| 119 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 120 |
+
<mxPoint x="1312.24" y="203.24" as="sourcePoint" />
|
| 121 |
+
<mxPoint x="1552.7324516582958" y="297.18969234330666" as="targetPoint" />
|
| 122 |
+
</mxGeometry>
|
| 123 |
+
</mxCell>
|
| 124 |
+
<mxCell id="30" value="<font style="font-size: 30px;">`\sigma_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#d5e8d4;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 125 |
+
<mxGeometry x="1230" y="390" width="80" height="80" as="geometry" />
|
| 126 |
+
</mxCell>
|
| 127 |
+
<mxCell id="31" value="<font style="font-size: 30px;">`\sigma_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#d5e8d4;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 128 |
+
<mxGeometry x="1540" y="390" width="80" height="80" as="geometry" />
|
| 129 |
+
</mxCell>
|
| 130 |
+
<mxCell id="32" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;entryX=0;entryY=0;entryDx=0;entryDy=0;" edge="1" source="22" target="31" parent="1">
|
| 131 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 132 |
+
<mxPoint x="1310" y="191.72000000000003" as="sourcePoint" />
|
| 133 |
+
<mxPoint x="1393.535134516514" y="311.7264857853827" as="targetPoint" />
|
| 134 |
+
</mxGeometry>
|
| 135 |
+
</mxCell>
|
| 136 |
+
<mxCell id="33" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;entryX=0.02;entryY=0.313;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" source="28" target="31" parent="1">
|
| 137 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 138 |
+
<mxPoint x="1320" y="201.72000000000003" as="sourcePoint" />
|
| 139 |
+
<mxPoint x="1540" y="410" as="targetPoint" />
|
| 140 |
+
</mxGeometry>
|
| 141 |
+
</mxCell>
|
| 142 |
+
<mxCell id="34" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;entryX=0.261;entryY=-0.011;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" target="31" parent="1">
|
| 143 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 144 |
+
<mxPoint x="1430" y="211.72000000000003" as="sourcePoint" />
|
| 145 |
+
<mxPoint x="1420" y="301.72" as="targetPoint" />
|
| 146 |
+
</mxGeometry>
|
| 147 |
+
</mxCell>
|
| 148 |
+
<mxCell id="35" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" source="30" target="31" parent="1">
|
| 149 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 150 |
+
<mxPoint x="1308.2842712474621" y="340.00427124746216" as="sourcePoint" />
|
| 151 |
+
<mxPoint x="1394.7199999999998" y="386.8800000000001" as="targetPoint" />
|
| 152 |
+
</mxGeometry>
|
| 153 |
+
</mxCell>
|
| 154 |
+
<mxCell id="36" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;fontSize=30;exitX=0.97;exitY=0.263;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.008;entryY=0.638;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" source="30" target="26" parent="1">
|
| 155 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 156 |
+
<mxPoint x="1466.7199999999998" y="372.60000000000014" as="sourcePoint" />
|
| 157 |
+
<mxPoint x="1543" y="316" as="targetPoint" />
|
| 158 |
+
</mxGeometry>
|
| 159 |
+
</mxCell>
|
| 160 |
+
<mxCell id="37" value="Logistic DCMDP" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;strokeWidth=2;fontSize=30;" vertex="1" parent="1">
|
| 161 |
+
<mxGeometry x="1300" y="480" width="260" height="30" as="geometry" />
|
| 162 |
+
</mxCell>
|
| 163 |
+
<mxCell id="38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;strokeWidth=2;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" source="39" target="45" parent="1">
|
| 164 |
+
<mxGeometry relative="1" as="geometry">
|
| 165 |
+
<mxPoint x="1270" y="780" as="targetPoint" />
|
| 166 |
+
</mxGeometry>
|
| 167 |
+
</mxCell>
|
| 168 |
+
<mxCell id="39" value="<font style="font-size: 30px;">`s_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#d5e8d4;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 169 |
+
<mxGeometry x="1230" y="630" width="80" height="80" as="geometry" />
|
| 170 |
+
</mxCell>
|
| 171 |
+
<mxCell id="40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;fontSize=30;" edge="1" source="41" target="43" parent="1">
|
| 172 |
+
<mxGeometry relative="1" as="geometry" />
|
| 173 |
+
</mxCell>
|
| 174 |
+
<mxCell id="41" value="<font style="font-size: 30px;">`a_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 175 |
+
<mxGeometry x="1380" y="630" width="80" height="80" as="geometry" />
|
| 176 |
+
</mxCell>
|
| 177 |
+
<mxCell id="42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;strokeWidth=2;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" source="43" target="46" parent="1">
|
| 178 |
+
<mxGeometry relative="1" as="geometry">
|
| 179 |
+
<mxPoint x="1580" y="770" as="targetPoint" />
|
| 180 |
+
</mxGeometry>
|
| 181 |
+
</mxCell>
|
| 182 |
+
<mxCell id="43" value="<font style="font-size: 30px;">`s_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#d5e8d4;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 183 |
+
<mxGeometry x="1540" y="630" width="80" height="80" as="geometry" />
|
| 184 |
+
</mxCell>
|
| 185 |
+
<mxCell id="44" value="" style="curved=1;endArrow=classic;html=1;rounded=0;fontSize=30;exitX=0.758;exitY=0.093;exitDx=0;exitDy=0;entryX=0;entryY=0;entryDx=0;entryDy=0;exitPerimeter=0;strokeWidth=2;" edge="1" parent="1">
|
| 186 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 187 |
+
<mxPoint x="1289.46" y="637.44" as="sourcePoint" />
|
| 188 |
+
<mxPoint x="1550.535728752538" y="641.7157287525379" as="targetPoint" />
|
| 189 |
+
<Array as="points">
|
| 190 |
+
<mxPoint x="1418.82" y="490" />
|
| 191 |
+
</Array>
|
| 192 |
+
</mxGeometry>
|
| 193 |
+
</mxCell>
|
| 194 |
+
<mxCell id="45" value="<font style="font-size: 30px;">`o_t`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 195 |
+
<mxGeometry x="1230" y="755" width="80" height="80" as="geometry" />
|
| 196 |
+
</mxCell>
|
| 197 |
+
<mxCell id="46" value="<font style="font-size: 30px;">`o_{t+1}`</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=2;fillColor=#dae8fc;strokeColor=#000000;fontSize=30;" vertex="1" parent="1">
|
| 198 |
+
<mxGeometry x="1540" y="760" width="80" height="80" as="geometry" />
|
| 199 |
+
</mxCell>
|
| 200 |
+
<mxCell id="47" value="POMDP" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;strokeWidth=2;fontSize=30;" vertex="1" parent="1">
|
| 201 |
+
<mxGeometry x="1316" y="830" width="228" height="30" as="geometry" />
|
| 202 |
+
</mxCell>
|
| 203 |
+
</root>
|
| 204 |
+
</mxGraphModel>
|
| 205 |
+
</diagram>
|
| 206 |
+
</mxfile>
|