Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2003.13951/main_diagram/main_diagram.drawio +1 -0
- 2003.13951/main_diagram/main_diagram.pdf +0 -0
- 2003.13951/paper_text/intro_method.md +60 -0
- 2010.12810/main_diagram/main_diagram.drawio +1 -0
- 2010.12810/main_diagram/main_diagram.pdf +0 -0
- 2010.12810/paper_text/intro_method.md +185 -0
- 2103.00673/main_diagram/main_diagram.drawio +1 -0
- 2103.00673/main_diagram/main_diagram.pdf +0 -0
- 2103.00673/paper_text/intro_method.md +177 -0
- 2106.11539/main_diagram/main_diagram.drawio +0 -0
- 2107.12309/main_diagram/main_diagram.drawio +0 -0
- 2107.12309/paper_text/intro_method.md +91 -0
- 2109.02639/main_diagram/main_diagram.drawio +1 -0
- 2109.02639/main_diagram/main_diagram.pdf +0 -0
- 2109.02639/paper_text/intro_method.md +5 -0
- 2109.06253/main_diagram/main_diagram.drawio +1 -0
- 2109.06253/main_diagram/main_diagram.pdf +0 -0
- 2109.06253/paper_text/intro_method.md +85 -0
- 2201.01666/main_diagram/main_diagram.drawio +1 -0
- 2201.01666/main_diagram/main_diagram.pdf +0 -0
- 2201.01666/paper_text/intro_method.md +161 -0
- 2201.04450/main_diagram/main_diagram.drawio +1 -0
- 2201.04450/paper_text/intro_method.md +19 -0
- 2202.09852/paper_text/intro_method.md +102 -0
- 2203.07319/main_diagram/main_diagram.drawio +0 -0
- 2203.07319/paper_text/intro_method.md +63 -0
- 2203.08481/main_diagram/main_diagram.drawio +0 -0
- 2203.08481/paper_text/intro_method.md +75 -0
- 2203.09824/main_diagram/main_diagram.drawio +0 -0
- 2203.09824/paper_text/intro_method.md +107 -0
- 2204.12665/main_diagram/main_diagram.drawio +1 -0
- 2204.12665/main_diagram/main_diagram.pdf +0 -0
- 2204.12665/paper_text/intro_method.md +157 -0
- 2204.13516/main_diagram/main_diagram.drawio +1 -0
- 2204.13516/main_diagram/main_diagram.pdf +0 -0
- 2204.13516/paper_text/intro_method.md +161 -0
- 2205.01358/main_diagram/main_diagram.drawio +1 -0
- 2205.01358/main_diagram/main_diagram.pdf +0 -0
- 2205.01358/paper_text/intro_method.md +119 -0
- 2205.11258/main_diagram/main_diagram.drawio +1 -0
- 2205.11258/main_diagram/main_diagram.pdf +0 -0
- 2205.11258/paper_text/intro_method.md +89 -0
- 2205.11894/main_diagram/main_diagram.drawio +0 -0
- 2205.11894/paper_text/intro_method.md +213 -0
- 2206.01299/main_diagram/main_diagram.drawio +1 -0
- 2206.01299/main_diagram/main_diagram.pdf +0 -0
- 2206.01299/paper_text/intro_method.md +142 -0
- 2206.06614/main_diagram/main_diagram.drawio +0 -0
- 2206.06614/paper_text/intro_method.md +126 -0
- 2206.10027/main_diagram/main_diagram.drawio +1 -0
2003.13951/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2019-11-14T13:49:59.385Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36" version="12.2.6" etag="tas7PG4JZbc4CcnBgxXw" type="google" pages="2"><diagram id="ijLP0poUjWmY-Py2JvF3">rbzZsqs4sy76NHV5IgAbN5f0BkyPwHAHGNODaGyDn36nPMasqj/WXnvtE+dUhOcAIaWkVOaXXwpUf+2EblWmBJfGcM/bvxjqvv61E/9imD2928MfUrL9lJzJHSkopur+U0T/U+BVn/y3kPotfVb3fP6PisswtEuF/7MwG/o+z5b/KEumaXj/Z7XH0P5nrzgp8v9S4GVJ+19Lw+q+lD+lJ+b4T/klr4ryT8/04fzzpEv+VP6dyVwm9+H9r6Kd9NdOmIZh+bnqViFvifL+6OWnnfzfPP17YFPeL/83DX71/kra5+/cfse1bH8mC0PE5LLqvlrhX/m0VKCLa5LmrT3M1VINPTxPh2UZOqjQkgd8kjXFNDz7uzC0w/QVtXt8//uXDK6tCtJ2GTCUJjP+Wa1HteYwQP7bJfenlPpTAtf3ZEn+2nE/t4xc47z4ixHg6lzDP3uOc7wm1tyC4zlH4mKObzgORiAzb17kuELhi0zhneLCvzNNcApd5B1T5OaryDee9D6hizs8LiWVQIVCE/a9Kqizur4zXczwVeRWU3Soy5axtl9strD/mOL1j3SnAYlvkFSYoltsqvsujVrd/r/9iPQ3x7kCJx45ihM5T+W4UuK5VeLh0cnhLvCYcxCZ73fO//wnCdzbkWHGquAM15/aiwsPCknk3qrEvZEMTaBAPnOOCvpxXN5VSwNJikTLJb9pqyzqfJNIqkrp69sNPOrBNQajbUXR6HKZRYo7tFqVDbo3UKYf7SyxOTiUK7vNXfUQ9gM5CAO6jMPObOIw7hOlHbOdS9/7+z6/jWKphkq5r7QK636rhWHLVnE36jXWww4fKjxO+me5hsxyrNgnLNNq3JT1VGvbcvVp8xbS5zpmnpcKgIX3W/b5GA75tJ5eNnV+pDsotPvDHz2Bbt7SHz39TPz/oCeJ6EmAaiLPZT96crTdt/YqSbwnrXyp8ci5l5lrVMYgqbKku0N64QXP+KAXF0T91RXKqDEtqT25X111oDms14gxPwVrNabkovjiSa2OaDcI2nsUBriOlaCLmRInvUndb/Euv7SHYrCkMkouldrpDbZQGye3TuuSYbQanCT9qHfjPFnUkia757U7vGdbWrP0shm9Ti02ou/pjTH7ZP+0GzZP+4PVj/9FS+cBDMzgOU51YY6c/RbrUN2hvRmom/RCT6iSJOflbQ+t8YxZjNVPkPnpU59j3gnEOqLQM0pp09RAZNLy3AGtDIeHtsmgwAj86ERrti6+xSDqnpUw132vHzWDUjqOeiZDtaeqVpZlrqTVxDd5k3sPH9+DSyNYE903lYQXKLQjf28XtNaOD0+jbsWmCbVNDTdmHY7zUTQ9bymC2yRM1yFbwnApac1DS3TsnLVEsexL6F13Q1OFlS3L9upJgVyKideqGI8hV6OtTbp4VFaBWjTX48KqQErBDRib2nN48qquJ6LH4h0XNqas+l7Ee6g0ta7hFs2Lh4/35A6tbHz8mXpfql4Y3kkuBlrhCX2NfRnvJ1d0BvnqukEty1dPaMfGCKuq6jiE0IBrafULPWxl1Jhqpmd6Aj/aHco3pk3ZmlxllHHiFWgrE13X+dJ1PYkSUhkvSqLzN6FqPh47SKBNWeWgZtUZd9dPDEp8Wlj3qiJ4Y9dDewCV0vUPCe+f0MdVHUqc/BkhcQ6Pzt0PVj4QdqkYTa5zCmq0s8Z2tuVrDfoYJmmn1BO09T/gekOk3mTdeoV9+2g2PVYsdkwPMkNr7lvnP13X9RqSOxuMSNG7ZlQfOT6V7DTl7HuKyl0bftCemFj7TiaTxuX4DOys7xLmGj/XQ8BrmVwj3tR8JJVWGu2XrjZNnNPIuXbtx090R3wnYRcPVwRzJHaTPgdtmPU9ilcvxkISTRWLDYppcLO7wCL5iVsXiM8CzPHIbT/IXxpqZzosY2jB3Usfjo/UpOdHr81udpZqH2T6y6ZXZkTLY/TZKdSho/aBoEmmkWARpXcdLaURiR+3q6q5cjCG5cKGoK2+Qev7/SjRrerEOKOYquFgsqYZATlwumrPg9kMo3PRk0c3rGBbuLC/xn+Hse84RDHtxwvUb7WBC1uy0NyloM4+cvDeRB1vLV7ATYGuTOFVsXmcxF17Caen8fBI40CNADQzasCFrt+7jnhcdDR1ANu3aRrS5qA9eivtJxzjofM46jAUYLRhBXcWThx4EsgVVZrmdY9x3rCakFH2x5PoyxMm9Iz6jijVQ4hTkrBZwTg52eIGDu/D0YU5Ms2KOt2puiCqTW0xvdKgVZnDodg8nbE0bek2oaeOhGJx3Asb4bfX73GGbjrrQuPRl02d4qPUrfo0MRduEtaT2S2KCnq8Bjz/CRblvbg2YvBBnu4FomwU7rteF8ObnmbrlR49M4lGVvZVI+i1bkY2CaIldp6iq/DPLbkBTql0l9QlzOn+tPTGiWUrdpugYZOuqofsZL/Q1HfN7UiLfUMfbHTaDa7piz4DJtvEc5R4vdKYyNM8qoUlkolGuoTnu0b7olk01QX4YTGYHN+1o9av/gMwpO88DZDOJdCpOxu6Oc+79EzK9rF4is+Nz8+xG7XPXOsYAX1kQKEdi5P1mZ3fZAGHwVc664reXdF1DYU9CRwtA7DUwvtGpB66hsVChrpc9xt9D2NLoL94J13dX8QqR7NEJa6Dq+Q2qLVMDXGAqn7toUKAMONQhQAr5zpcBxMosd7tweX0SPSLJIkAmWPwLu5HnJvx9NVPbqbBM0dZ10R0l+X3Vbc77CTj2neTTi/8TvEAqeRrOCpXD9BvdFaYV22qCSealmyufglkDcWDaWgjcxSfHj826BTcdoE6BUg7tG7jH586OJEehbi0zUDHjJ4xoIgOfDOezC3us1z0aXZU70lXX+SbGLQuHZ2ElhuGKdt5oqzzLoHSpnzLFuY96uPcwHLxgN+Douh3iBzgO7qeAqIq1oCfibNUxX3zmvH60W1DBia0abnOJdWOxe+kemerLlb9SDsFrS5ai1usz6Ki4xHWQW89b8auOWzXUQJD3snOpJ1eYRsU/o+hWjW0Ct6OiN5pBUFqP88DaZiHW4IFv8O8toZ+kepDRMJ2jif4l7uMCCBVOdC7s//Ji3fXKb3SdaMataZpYdGXYC2dxOs8WesKqjUa34TIRbVcuVFGgu56RHOMY+VJ7DMHaifHM4vTCrVXt8KaslCjjitLL+xwxl1WwtJqLrppMmBdA3FRfj91/xusr+j2jTucoDTY9xAEd4qBodAiKb56kuZJ6NB1ZeECCu0pZmY4CPs85jRPAU05QgQASC2cKOjJG7N4Cq6OqCOlowJOamH0tYm4Bf62H4cbgA34YKRP7glGKVQFvy0FnSGWGzs+aC0+XMzCF35CsM74+kUA7JRvGZ9RCLp9X+BZByEabE8xDdNOusAoosL09CT2qjIRHNCuBRG0Gaeao4g6Go51Gzq6cKPMQxzMu0mZTZ7WdYDQYK4DoSIiISiZkiUrSdrLZny4FqwGDp0J+nj8eOnnQIGDw7LS7v3jxeOoeaK/XrvS8/XANXpiwENB0OBmouAjwR3zIBwHaMZcoXJNatOAEP50s4bH2+JzOoIIbvSI1oFfmX5xlmvX0860HRymcswUHOqoYjRLz5pODJDSp3NgA7Fws9ORP1Cnm7rWZlyD+X/8A/WKcXlMVmKxuQXDvrbZPdl8Hf5NNHieb06+uxK7PHsQrt/kwg8P1Fv9+D4Nq4YP1BZqvu+PAA4djNwDO2lKP7l7Sqd6oEOc8PQgXVEaqHnrmo7uKvmiVS3gzrxGne6DN+pts4jJ9HrToRjcYSVeypw819Yg8QiC/vqMa+jAoJDBLwpQoYYDLJOoGN+AtXUsATYVUVuPKAq4VYwBAbTnmFYXAENgCIBY3ZujyoEEVw4gDikN16pJWFCFNJBQ63ULmYLP1cNgyjFn8YQ53ExrL7Dy1/YgegvsAClLtRfHsLl4vW50oyNVgMZ4BILwkjlbtnhcYDpRIeZawAw96lhfEKLQYXTE0i9peLofKZOjwQbz4YCWd4QOALiy4PDefoCwTaancxqwRxNiYEEvcteENs72mFCIiZ3kEyyD/PBygI0qiXxkDZRzqLWuA1Nkkhoo4WR+0p2WdeftO/Bq5Dhwi4hvaGMMkMQEslm3B3Q0Vb5qIvo1149YkIAVARsfr116XwmDMNACIev+5V0GpC4rk2SirxuUfNMc2hIDQfTnvWxgT1u1Aq04t/SSxKZGo3n2oWGTNnASfOwuggHfveNYE8pEsriIkX9MCb/3pko795UEqixxaFdUKIo6NKbhUN8L3IAfkLC0ir43WsDIJ7An6Jz2bXRFQmMS/qUmzRZBsAobbECcml7nPR2bQcyX43s8KmgFYIeJAMH7yGASDonTKfB6DevBE/I8cDmDVm4s5uj49K6cxffOwNpgLiznHQkup2xrsKuf0dfmrQlMkn4jc/D+QiNY0A0gChg393EJVHJXIATlwIHJQsqgchoMEmZ35VKIMxqYMagRtd5adoCFCKk8kKlBRqW4+jS4FwLVNI5WJJle82AJCfBE2ZbdgjopkyMAIR9Tf6aj1aVwAlfUWlX9lcatdZuCVSk/ZyB8x7NeDd3ZyVlVRxTt+7UfviGF6RIdhk3ajdyFo46mCbG50n8mgneXLAE8V5rJnAvgaQ126cERCdMDDF0QkMPDte6wC0zN+oSHoLlLCDx+BbdbIUUTBf+pua6jN0VY1RB1A9FBwPgcShaoCHdH2Q4K171zXSerK1YsWJhy9psr6BPcq5zL+UeV+OlnN/P6VfO4P3Zs3TUJcEUs84dYcf0s0BbtgPJwd6aoYzUDVeowGJra2K4Cf/XGXEii//mkp/FhlNZVZ7Hem9PPdsuJ+daBpf14ZDvg577/qJDaRMUwordljf1pjJB6RMdmMWoVH4Q6SVAmFJ6zjlUUGBDbZMC9SqUoWg1MQ8WK70DqscrmGDqrGISI8P+grdcghq6W7BwfFNOpwNyqgnrT29I5p142rmAl0GQV6ZQtffZTWUN7+Qj4iB+7CI/Haj0LbVcokLsaQaXee0Da/bD6XBNkK80Hm5wNSZHL7xV6QTQanlskBVd/5dXH/quN9lT58Gy/YyEewMVb1PUHK/Xn9bXh94J9yqGgOLxF90qvpgujp9SFpdYpCayR2vnNQsmdsx+AeXl4Bmg3LcjMONmElgDgxTesk7watfwAf1pCjcHAXZek1i3m3YCDfOVtGgPQu1XamzY/PhW8z5dAFTIFoO4mUuebHGLOB0RHN0DyvhNGtuGjHYrHF7twDT4dlLBIGie41Q91fLfJ+74Omv2kOf+DqJ0QhB9vtJGWNlTM4uNFdtaeLdEnq8bqOMizCTHgUgBj9jOexF2kDCdqb/q+wOqiV9AKYBqdRMcaMl4vHQn/Fp+QkLoerRYDJkR4pYaP8/KqRSjfWdWeNoBh/+4e3ZplfH4mWhxk0+foTLvp1bGiFlUPF9HJeP9t6REmiVztmQVDK3umNn20OQ81o8xEbBahC9kBAKiTTmDOT22L11t8ZUtTV0al15Oke6PqYUKUAR+h2AqSXZhQPOCbsD+aXy6OOvNyn7DQH1dLf8DSx+3zhSQaS3vZgWyG1VfZC4824ZMRoimIVSFvVUKv6La9I0lE4MUrM6SDWGGWNY9tNqOlsrfmTDQWaGySt96c6/ETMttZadxWOvO+r+CjgJWkY0eKcaiyb4bH/ivNyLcyywZ95iAT/DiHZjg7514+i9eHu1cOXH0IBKYkwxHYKVU5/JLD2Uc39MkV/AL6e1D6k99Qf32Dxhrx9CecqlZoKFuirf1VoJ5r82C+ad7d1E11eZ7uqbI1xv16oO2nfZj8o2JqAz3v39J61jE4PH+hoXrRqMQr78JGcCVZV8LV2NYWEE4H1kqdtHW8MhgHLwlyPY8Um5LvbxelwmKnGXOwPNzSekhTGa/FPj4f5PXsIr4EyjG9WOlDy/HpPJbbfSAuONaKVFdjfE/j/gIdOexBTVCYrWQk5l2ivDHjhIG5QYIfJZeM0K1RS+d1P8nF+3oKTDs97s/Dgb6nsruE4/BGKX9U9KBWE6q5nKykMq71iKeMwxl3LsPru7nNH3N/vyuR9a4Xv2jT63udrUAOGODepyV+CUuaNN7gubeF3q9BG6yeJgWmt58GTB2vaJSuV0TytsV7A1UCAu8kuu5L22KmI8FbBZgoA0FZGSC+Iuymz2irqvrcJQWzewUzRVXzxavciNSO6WNXb9L50IPCvb3q3gzhXCV1crnkkzuah0+G1LEV9HQZ72QXL3qqEiSzshdhc/H2hbECLTomPuRpjrLHzmIx1EnohsJglF7QamaMgzEIIHGk3npZx/kAOcHzmDQBl9PhGKzdHYg7jP2uXHS42ioY1ZMeZtOEFFaMWxQMCz6wxHIrnUDmpyR8Cz+CQx/Z8XZUHl49haQwJkzsBT/s1f7b3o/Jm/bny8zeAUAbYcpiMO2TWz9Sn7AeMXD86mh4VHR4HNsgWfxsCtLUDciGhJbWJBidg37Up1Hvb9jHRVlnaOtfvo6W1YAE1i+pgQPeWRGi4UE407lLDLl2ooejr9gD5LV0otjylYGr80i5idrtgZUDEgpdmmzN4imQtEJuiXL9Jgupoy3Su5H5RtFLLJKBUPHN2vxPJaLxxtnhuGhdM86NMHru43R2d6MVsqjmxt7177lP6ffEhzixZVRLUrb4ZhQmpEi9yZlaWpANJki7ppC6e40GFOJpJfdAk0OFQkf5Wo2sy9i+ICR+lCxXCNXOITidj0F6+UZj1MkGO763E7WLj8GMD0paudl+NAo6ZkhWZeqB12mrvvK4E7vJ9TpZp9F24eqbGaIbm9SBPuuGMB8gwXZVEDfGYFRhK9eJJHuqnshj4KJPtB9uskq2XZOZQpu8IjGIs4B7T7j4KIla08Df2LBrIPHTmlQ0A0xg3qFTdVScmJ9CEbjueDByD6Ka4j5aM5Goe/C5NFLArE3+sPhuoXGJj5nhHhCz1K0kW5A+A3EkOyCQCouzK4NZIwaMnXuikkFp0q1eNMhbw8tahSu9Vt+Je62j2EN7YBxXg1GTyPUhFwuz2nKBFCmjQ72rOVY/MaRZkHHZOtUxc9f5UaXiWjT9o+7H6HmVo7FN/DiYtoN9He24jMYUnoDqT896O9zm9xyYrTaPO/ausY/FO3fqeNTRNei3g78dLIevBFzoQM1GJFaLOiScgAcKFkeYjMeOv7lefe8a+V63ENF11/Xf+oGaLoGi7mQNfRM/AnAo4f3CAmO8Vp2pDjs9aj7IuZg3TO3c5nzA+6kZ34vObnTEELiW1VGdEAYlzlhnm3WQ/dHSAVb1UaAvUqxaZ6cJouai6BVD4Qkpi9vpYR8NKwrl787iHr+9btgJ66VqGVpXD3njRnuOHWk/1VG4cULPBQPGtRRHewxp/8uhE0gQYLjT5yiLsiXQsQGJSt/uR9qL8i1RDeC4LayZPjViEwsBoJV/5N0Z5x+RiW8MlSZzfiORJ3CPqh0bU5okAajX1JdF3e0QSZIc1Hsig6p2HphgqP0Uv57MiUDNXvTL4QytdztIIQhh5Sfz7e0iAkvelTC77bwozBIdxLG/HUUT6uw+yE/sUXjkM1t+WS8/SsMrLk52cGNu5w+UbcYx9dLk2b2f9IGhSKDaC0mSetk9im2QS166BVkpYv/mtj7w3ytiiwbW8ULL2qwOgXFTV8LaFk62uSk3fM9wn0wrUCAY1CMtrnJMALthAfw52PoprhRQ4jtSsng8nV+fXnaPF+d0qV6lHBwvzP3YoZNtO7QUzfmSqo61iqLsegzDd8/K0g5G0MTh7gJRw/am1N3b3rAt6YgzMOvpiDoqsFzf0ym0hkDazrD4ZIs5e4dqRSKJhHYywsJsQlKzJ6nlcSddPaXhB23wuCipEuyjeEo0CYj9u8I+OO6UXI2wUiGhGuOxSidgKLobtuE4XT/B5gRhFNBOvH1C3Ny8VPvUQ1LKt+lsQcB35Du63S+8GTZvnNxkRR8ahPZCB+ktKsDFgVZygIgY67zoo+s96ZKhspJYKku82Ax4ME6MrjNMc0VPpk7R09xyt7GYhqHtZWCoV1ydOyfVzRb822yvTJR/CMnf6jioY3cezt2VGCHKb7/3zWXeXuxd2fC5VbaWWGT72A5mG5pwXzPOV4DcUt02WVvDoFcsnLvgdcXOTxVocgMRy/gugBYwaLhVUym2cRaF0QlBRIYclTH3FSTASOa9PeTmBad+QdONMjo/BmEsdkOvsG1Q5ytfwVzM+TyabURiblxuRilQ52+wv6WgFW6QAHhNqLVgsxULsYJsZEWvFqr34XLzX+Kye0hGcHradDKzmhOwiTr2zhho2SzfO6tJrUzHNjUybavzbatOi3pXlqMgCoPAWKGJWFVuHhyY2vRuZ/JdixxPGGdH2opOTb0Eh3VmdkfmVi3Q6dVrLCH4QGhOcbrejSBdkUJSKIt9idzTfhkb5sZroLs0vlYx1kRwcOVZSWnaSewuL+gnpB16u3b9VfGDnTHd9DWEFVvSmFWYAUf9zN8gIu4vdB8Ir4bJCvT1Yl/kJn26YdTmWFOzxMCpytrbltOelr5URnLmhyO8ou87WlH6GNOhvWOlvyPWvJ+JixeiYypxrKaOtxHmfbmedoeAXVIrkUw9YfvzprVH/bJ6r0NC+hQ5u3+up8uBfezNF56Pek7AqHr2hJfvvcMxi4xT8v7uUl9ddjqI5LW7wllg8qcN95bWnsz29Dp9gJTLD1E87r5zMXKgAO+irN5p5+f5npswsKXHIU0KPpLIi8Qn2LrLjOo2HYSXOe67hfAUK318zKDWd0x6pdzWyZWjJUWhyQMD3D87R7U4MlFhUjPprGee7QQfxm31V9CQ7fQmeM2VOLhOGjwI8pZBzT4Qii+jflX7vt3p8RyFhJBJJaHhEJwGmlDF3RSSIBTwwd7jlH7c23nXkCr0eI3nNbYFpvYfD4rZ2SeO2QBoHtytfEYPE5tAFXmBC+goyhddsJNFthKGDOf1OHHd9fAJjvM+9/fEA2RsPfvycab2k/l6FOd93pHFjz+2sJuz6uUtZHrPqljHnNHpusrcS5KSAZfnpCfJjfZgL2VItkT4DspL0cbnbX5fTtc6SQgfPZ3EXL1qG9CD7XSnFfdGa2x0yVtGAPzuCSXUqdeNmAtkXnKo2nvS552I7B5aTImBbInRh7zjPC7WpTwGPjXHr5+gkZBoqck3S/gmP51g3nfOow3JNwhP+sGSZE5LRmp6kE97SDe8y+6uYjXWa2SmaA7QrmKBD57whUJ62ee3I6pdaaoWcUui6DB15iNRXI3/UON8Ci06rd8FpF9X5q0egklZosV3S6PTa52ePMYh4BOO0nxVc/cW++Po5rXO6IKe5Xp6c0rR9wgEY0FSge2t5CoCqkL2hRdB6TGKR4dWErpyNoOiqFPVdQuKjTqBmBCMVbIMHar4QxEFwEDsmyuxuPUON8MdywXo4Nfat2My5bebzzPuSsv3RiFo/sVZQOadynpiktz4MSVILL9Z7wLB4ssqruXWaIQFEriu9hm+qfi5TXZLpfp9zcqQVJoJuXOzmxqcCXX7zMPpCXV/H8L0by5QOTeznCnVb3HQxy57h26EhfCN+PRsTi9meF7UYIFeQDgx60tL3cRGZ0gs6Qlb/FZ4XX9aAFeEoclp7KX6kt9+5U3s/dLSyc/j6nsP8m5klqR3IJLmEj6u5/FX2owiM4Tr73AnErl0I7DZ+/UrLnRQt/vnOYbpyD/T8b5zIhKBWsEAvmUr8/lYgOG+exfO/cOVHZcd1/TwuGZP2fVrespnW/xIR8ys/ULsMWGkTJFdt3oqr/pwtw71+HkN8V3P57MUd31+UdVqeppkI/h859RSGhCXBaosJdk9qdro6B7ovXmTAv00bUCi9VpoJieo6ofUsy4yp4RbetnrVMimps0VdIMT23HXBueed6st97v2YziuPsZHYfYR5vY6vdzWU0dzl1039WnBfrrFEC/dw/rYsrdcWrfy3SiihWByGl8bL5vnU9Mu2ihbakVUleWuxiE7MMV2wDnEpxPk3JhZXgVAB8G7F2b7F46Xl3A/nKDAMG567WVDCokdXmv501PWeCJ7peTTOt/UKi/PtfhYT23ZCM9PIDuR6wxnzkCfXELcfYy0yr9tzaHBr647zqd8QmPQ5/5Vls1gBq4T8a4n7VdITDmDwE/LcHszjl+zWXQTM+ieeLM+06Jpthl5OT5AXr22F7Tbp4O5k33loR7ojyRT26s2k1vfb3cmgWwAl3nQsnh+rIH/NFFd7K8n2ssPWqok75RxunjubVO4f4YhMr3H6+LUt+GQmY8K0Rv/kLlmfIIrnB1sD/fuVSnfPaFQXfI6KuKpdDW0eRcS14Qk6GorvKGaSi4oYvk+tz/jtrwJIlZy/nDA5fYaeZW46Aysjcd3dZ/Y9sim6T68j4p415NYFo9enqxTivwe5uor7M5P31k3sZ8iV5o+7gZnqvnRiOoHP44toxl7S5EJGafGN3tEx6FmsnbIVk3uzeBSsVPY1AQDk7edlcg4racA0s8bU40ytTp3KW4h+Vt8r1+CsvT5Lbl2YxNc6k+gs8VOza2E3vnVSFXxoZpq/fNM3l22GhjRYrcQHrhsdQpZ2idU9HPSml5FIk2kdJREldNddRbTvHYNroSua001wYIiIWDOepRQ7lUF6zLtFKNU+WKkWSNf6ee37Mzvaajqvtm0DxL7CndKUusB5VJWHjDBtgCIrQSaHkZ8KoG+/pLRnLY7eZMfBHTmo53U8RHYPcG5nAacUT7MAFcX49p8SXADSEXa9h8eGv1Dcru/YY1QYrZghdcXt2IClxZByS+ghff28oOE1g/wnZ7/QGNG4PWLcz/4CnIInv7gHxnOh8GkTvCts6R2Alwaav5bOgylh6HsvkHgZyw/MjR4vPx5/Povj3+GClI+gMC3byJPQyN4vv49KJIO2D/Sf0ZMf1VB5q5+AXUC8bFLNBO5aSfUzyygljrW0vV6GnZS1s0VXZH01J5u/sD51VO8BOSN13GWvXDursoL8hrgCiijjTa/ovuHlux3aAMTk+0QC5z0uVlsqARNqWT7EbnNxvWD3gUuG+U9LmhprCXCeDCPtUckzFW40z43tDP0Z94GylzsnAAwQB4lVOvYQ2e/E5Pe9srUAXpAvc6dkVe02jWzf2aeC9vy15+t0+EdpahebrNd6G+5HJw2eth6Rq/nchC3J47F2syrUCDMA0wWCON6a/Y7JWcWOb1casIXzCuT3Z43HD8TdbclZRSPiZbsLcyKHvJYJWlHja+wG1aWFe6lWOYOkPCsMjE8nzX2Uzs9jc5pxixg0JZHnb7hq0rWwu1mvNK2827vTog34tzyUzbD4nZqruHetP37eeuv+CAOIsrA7kOj+PSl4uVtTLOJyXYQew7ns20HwYhNfJROkxsPFI42/f6pmZn1KUR9jpqBWYdmvCR7HBcC/W++O5bbEnw8q2RL+SJVfEmHvd5Z9L26q6hvXa92h+cB3aKe7MAlpl9NmbUPni+gRGQHIyFMeN7cvBjDzjvd/CaIenU8i8+LKRzuGq3ifX239OvFtLI4lbKm1tTL7ZTcr1lK8/x7l/c5+igDm8ab47qj9/QOT9+DXDpeCzpr3i49Pvebruiizt5Mx3PpVAGKrTsZqnOjzz6stCLboZNWDpiwOOxtp45YqEis3DfEClLSCANlR9r+fJCU2cqNYeWnoZdja1Ct5Z4rfRLUmHx9UTb95KHpnsTdK/T4evSBAZrGNUjqrJ3bTXeaQHya2tMMNoGtnEYYmAjRHS4/YRNJ4MqGPm2Wl+uNquFuWwasrg2zcUUH2BMRh1tyGLLwImBmwz9fCPjyNIJXJL/O9k+n+CIccUifuPi39rd13cubRKoFJN3/gx/HP/n+xbo2qtn+C/oIUp3b6w98IgJAty8HTL+17R9cbL/SgQn+1Fa2XywoCbS6kIv/VidP++VfBT/tSVc/Aqzf0UB4qf/g0+uby/8ALYweGKL7ZaRkNPnn37j7h39+RT2sP+zT+qOV7zyJIOjwn8qEqcIPn/6e25f8fsqj4a0+mKcHPA9FkTJ3H1/exHDdTZ851k/U/SnXiQYm4I16ze1HBxCsuceq3t54RS80fFDm+OoI2Z7ZH9HZElFyolWBD68Sjxr6KorPh6JLqjaySSEfPq2GJa+NlSlJ7hLlaG8RnfJejbNrQWsJd+T4TX9dJXMfQUGR+2jy/ByY12QiJKYBjgqRwipj8R0uEspz2YzdUlrpLLxv9nhgTX+wdKy+e425lBk6VNVkZd+vRISaO5O/ElNTA/zIPtJTWy7Yk2UbQc3iOqdsIs66fb1RmJbl4rm+2umAyhruz7Jc7qK3O1o0X6Fz6R/0K3+vbf128TrITLRDYLpuzGHz7kkhFR2RsnNqIAWyvl0+27G3NmGoskAPxeDWWBRdfOj9dTAezGhAMDfKEvkncFc7mrZAoD3v0nWSKPMQ39XwIjt2UB4mTxJKhfLIR8H8pNPqKZq8Np/JpkOzZZAr5yY7HAb1c7FNQDLF99LkjBNVMUxDHVOyd4m92aO+m0EMQ8hB8KlYT6pNNRgf6RFBzO3BptsfuvAnAsuJdCr5Gf8YLTx5LBwriMAkOubHBc/DL3/4bqJBDTvhv56xgFjiDY+v2xGS8oBmxHb/1WA7kIj824L4IUl24JeI5+7H2X5IxfW7uT6Q7bi/nQCaHQ0w+9vP7h3OqcvfYsCBckr8088xiH8bkArHPy5+ZQisged8udHfQv/Tr8l8v176x21hFOB7v57+JQ5M8SSp4t+7gsV/4MiPgH8N+t9e/h3kr5d/tcv/h9f/zhgLLTAcFRHFwJzrBa2GGDZLoNLMrC/T7Xpr7ztzTOZJKg7oRL5dBtZuKqu2fFB1LVDVBEbBTwKaOAHtqeeuahQn2XaS71bTfBFFGQ3PJQkm4TNbYmh89OGo7hZ7Q0pC0XKQWpJa7CdIvjbP5qeP36xUNN6wM9La/Anw5aKHwwiRwJYDGlW4kVEt5Bgcww06FWI94r7fQfJQ4HD/wz3/mUYZBGG3m4yVNYPOSliwWPHpjgZifOy1c6AcY6wcQnxLJ5QanJ6z7Pg8oosr3xTPMgK/fUxorvcyim+aGciXwK/czaYn7ZnN7HhW/SAetnWCSFEde69B68Vf2vChr0A37/bizAHhnBNf48cVExsU1oDk/s/fDP2Irr908kO2C+Dnn0e5+kniw61r9WSs5j8oTTxlvOggk2w5LOfRT8F1oOD8WrxfUwmOzk3/aRCSEHNEFvqxq3u/eMRKmSHDwJCeX4ngO4vDesuvcfYbcdeWoD2Etq29gaE//2xPXIEHk8rTr4Mtefjw/pEA1pn8horhO8zHl/Q+ftnwVwLzhr6BLv++Dft3va9CIGG121/+/fqnhv1Pje+Exu/uw586RLGMvLX+V5E7wtapnBFbvf9bjj6c53/LIT4EOPXL5Gcb/60ZRMg8UcIUQhCEm6+6iEK+suPhRD47kVOe9PejLCLxQuGHDmJINwTyoKNx6qF4OA1/6jsuBDdgsLXi+neMLZxdRf1m2vtj1YBfHoK1+n7QvLhLLwdB8Ajx8Oay47iOmhHnDsmVbUHfhYPKfILWUJ5eWF6vwZLWqianLVZkbj/pi1jU1JQtJ4mrp5ut7WLyhl1rPm55cYJELHMZ3CEKufqzGxP1+EwSW+0Wb6tn/1NNwqmuxmE/rsp+pLKZZrQ39Erp9zemwyb7pAI5VqP+RMWKvB7/ftb5P92zz+icjHMWpN2RRskz7p/oXGk1EjvMHAJhP1iVdsvo2wMHuERiVRxUY9hidhwO3SI4wvgp4sm7WqOoa/WW2DopwFmAPzP1DGydrA/lepFs99FNxxVZAIt3yT4+RNonWBmGNrsCdbp1L7Px+/UNmMfEDW/b6z/zlZgD2NACgdMwTT86ttlKUTgoKGZEjMXyJE0MQgMNHD4dBfZ2GuttXCJZ4SGsrHK6vcAaJmZcCxTJF+v9fa0DfgS/0BBqUvQMyA4t2Mq4VrV8JUP9U4mOV68QWjKQ9AcSfsX8tCHzUTmc/akBLvh3R98a1x+vnCWheX+nWJzGFhoBKEI/E3nn5JPhYod3f2ZLhtszI8XpNxAMM0Jwix8aCGnQmyIlYCqXpmbGsKq76KbWgu5vJVGpLvDfou/wOQQ54wMYkgH86vQhqll8l3VggHmr8D3+dvjYXC8Pgp7Dxu+iODnSbV3296BCqBJsYsQzwBMGfYku2HGMMzBmecNzBHPTU3rccwZ68ocvqTCu7WXrzmAJ5i0TGvWC3aNbfmR7G32Y2lTCeNLip+p9W1j1rDtvkRrZ75gOEDZh9Fpxhjx5fwzu0N7PYPgEp2CmERVBJLpMPjoEeh/OGPIm5pYGlWDU6nRfjLLIh0kg+6AD3JI3M8kb4gdxteuWSftDkJSHIPBfwfNyLDEs3+AX3KWh9WI/iko4fhRLXy5X25VODx6SO5d857BI2byQj39+zpr938W6n/vH96stOf5rx/+eRs6nJV//2xPN9N/npLtVyYcuXyYANOq3wf/D7E+/jd7/Opu9+z3YXP7rXDbD/jkT/nsevPhb2j9HpsmLjJ9T0//7E9S7//kE9fcYNDnQ/D3C/C6rJfdwkpGn7ykhJ5/LpQP5Ig2Xj6pt/xyZ7oeenLiel2lo/j5pvv+75F8nqylKlhnu/ycN0ufd7/H03/P59J/j6v/S6N9H2P+t0TP1/1qhcPvPcffvs3/9TwN20v8C</diagram></mxfile>
|
2003.13951/main_diagram/main_diagram.pdf
ADDED
|
Binary file (13.5 kB). View file
|
|
|
2003.13951/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
![**Overall Architecture** The image encoding processes is highlighted in part *a)*. The input monocular image is encoded using a ResNet encoder and then passed through the Self-Attention Context Module. The computed attention maps are then convolved with a 2D convolution with the number of output channels equal to the number dimensions for the Discrete Disparity Volume (DDV). The DDV is then projected into a 2D depth map by performing a *softargmax* across the disparity dimension resulting in the lowest resolution disparity estimation (Eq. [\[eq:DDV_low_res\]](#eq:DDV_low_res){reference-type="ref" reference="eq:DDV_low_res"}). In part *b)* the pose estimator is shown, and part *c)* shows more details of the Multi-Scale decoder. The low resolution disparity map is passed through successive blocks of UpConv (nearest upsample + convolution). The DDV projection is performed at each scale, in the same way as in the initial encoding stage. Finally, each of the outputs are upsampled to input resolution to compute the photometric reprojection loss. ](figures/CVPR-Arch4.pdf){#fig:arch width="100%"}
|
| 4 |
+
|
| 5 |
+
Perception of the 3D world is one of the main tasks in computer/robotic vision. Accurate perception, localisation, mapping and planning capabilities are predicated on having access to correct depth information. Range finding sensors such as LiDAR or stereo/multi-camera rigs are often deployed to estimate depth for use in robotics and autonomous systems, due to their accuracy and robustness. However, in many cases it might be unfeasible to have, or rely solely on such expensive or complex sensors. This has led to the development of learning-based methods [@saxena2006learning; @saxena2009make3d; @karsch2014depth], where the most successful approaches rely on fully supervised convolutional neural networks (CNNs) [@eigen2014depth; @eigen2015predicting; @fu2018deep; @guo2018learning; @mayer2018makes]. While supervised learning methods have produced outstanding monocular depth estimation results, ground truth RGB-D data is still limited in variety and abundance when compared with the RGB image and video data sets available in the field. Furthermore, collecting accurate and large ground truth data sets is a difficult task due to sensor noise and limited operating capabilities (due to weather conditions, lighting, etc.).
|
| 6 |
+
|
| 7 |
+
Recent studies have shown that it is instead possible to train a depth estimator in a self-supervised manner using synchronised stereo image pairs [@garg2016unsupervised; @godard2017unsupervised] or monocular video [@zhou2017unsupervised]. While monocular video offers an attractive alternative to stereo based learning due to wide-spread availability of training sequences, it poses many challenges. Unlike stereo based methods, which have a known camera pose that can be computed offline, self-supervised monocular trained depth estimators need to jointly estimate depth and ego-motion to minimise the photometric reprojection loss function [@garg2016unsupervised; @godard2017unsupervised]. Any noise introduced by the pose estimator model can degrade the performance of a model trained on monocular sequences, resulting in large depth estimation errors. Furthermore, self-supervised monocular training makes the assumption of a moving camera in a static (i.e., rigid) scene, which causes monocular models to estimate 'holes' for pixels associated with moving visual objects, such as cars and people (i.e., non-rigid motion). To deal with these issues, many works focus on the development of new specialised architectures [@zhou2017unsupervised], masking strategies [@zhou2017unsupervised; @monodepth2; @vijayanarasimhan2017sfm; @luo2018every], and loss functions [@godard2017unsupervised; @monodepth2]. Even with all of these developments, self-supervised monocular trained depth estimators are less accurate than their stereo trained counterparts and significantly less accurate than fully supervised methods.
|
| 8 |
+
|
| 9 |
+
In this paper, we propose two new ideas to improve self-supervised monocular trained depth estimation: 1) self-attention [@non-local; @vaswani2017attention], and 2) discrete disparity volume [@kendall2017end]. Our proposed self-attention module explores non-contiguous (i.e., global) image regions as a context for estimating similar depth at those regions. Such approach contrasts with the currently used local 2D and 3D convolutions that are unable to explore such global context. The proposed discrete disparity volume enables the estimation of more robust and sharper depth estimates, as previously demonstrated by fully supervised depth estimation approaches [@kendall2017end; @liu2019neural]. Sharper depth estimates are important to improving accuracy, and increased robustness is desirable to allow self-supervised monocular trained depth estimation to address common mistakes made by the method, such as incorrect pose estimation and matching failures because of uniform textural details. We also show that our method can estimate pixel-wise depth uncertainties with the proposed discrete disparity volume [@kendall2017end]. Depth uncertainty estimation is important for refining depth estimation [@fu2018deep], and in safety critical systems [@kendall2017uncertainties], allowing an agent to identify unknowns in an environment in order to reach optimal decisions. As a secondary contribution of this paper, we leverage recent advances in semantic segmentation network architectures that allow us to train larger models on a single GPU machine. Experimental results show that our novel approach produces the best self-supervised monocular depth estimation results for KITTI 2015 and Make3D. We also show in the experiments that our method is able to close the gap with self-supervised stereo trained and fully supervised depth estimators.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
In the presentation of our proposed model for self-supervised monocular trained depth estimation, we focus on showing the importance of the main contributions of this paper, namely self-attention and discrete disparity volume. We use as baseline, the Monodepth2 model [@monodepth2] based on a UNet architecture [@ronneberger2015u].
|
| 14 |
+
|
| 15 |
+
We represent the RGB image with $\mathbf{I}:\Omega \rightarrow \mathbb R^3$, where $\Omega$ denotes the image lattice of height $H$ and width $W$. The first stage of the model, depicted in Fig. [2](#fig:arch){reference-type="ref" reference="fig:arch"}, is the ResNet-101 encoder, which forms $\mathbf{X} = resnet_{\theta}(\mathbf{I}_t)$, with $\mathbf{X}:\Omega_{1/8} \rightarrow \mathbb R^{M}$, $M$ denoting the number of channels at the output of the ResNet, and $\Omega_{1/8}$ representing the low-resolution lattice at $(1/8)^{th}$ of its initial size in $\Omega$. The ResNet output is then used by the self-attention module [@non-local], which first forms the query, key and value results, represented by: $$\begin{equation}
|
| 16 |
+
\begin{split}
|
| 17 |
+
f(\mathbf{X}(\omega)) = & \mathbf{W}_f\mathbf{X}(\omega), \\ g(\mathbf{X}(\omega)) = & \mathbf{W}_g\mathbf{X}(\omega), \\
|
| 18 |
+
h(\mathbf{X}(\omega)) = & \mathbf{W}_h\mathbf{X}(\omega),
|
| 19 |
+
\label{eq:self_attention_defintion_key_query_value}
|
| 20 |
+
\end{split}
|
| 21 |
+
\end{equation}$$ respectively, with $\mathbf{W}_f,\mathbf{W}_g,\mathbf{W}_h \in \mathbb R^{N \times M}$. The query and key values are then combined with $$\begin{equation}
|
| 22 |
+
\mathbf{S}_{\omega} = softmax(f(\mathbf{X}(\omega))^T g(\mathbf{X}) ),
|
| 23 |
+
\label{eq:self_attention_query_times_key}
|
| 24 |
+
\end{equation}$$ where $\mathbf{S}_{\omega}: \Omega_{1/8} \rightarrow [0,1]$, and we abuse the notation by representing $g(\mathbf{X})$ as a tensor of size $N \times H/8 \times W/8$. The self-attention map is then built by the multiplication of value and $\mathbf{S}_{\omega}$ in [\[eq:self_attention_query_times_key\]](#eq:self_attention_query_times_key){reference-type="eqref" reference="eq:self_attention_query_times_key"}, with: $$\begin{equation}
|
| 25 |
+
\mathbf{A}(\omega) = \sum_{\tilde{\omega} \in \Omega_{1/8}} h(\mathbf{X}(\tilde{\omega})) \times \mathbf{S}_{\omega}(\tilde{\omega}),
|
| 26 |
+
\label{eq:self_attention}
|
| 27 |
+
\end{equation}$$ with $\mathbf{A}:\Omega_{1/8} \rightarrow \mathbb R^N$.
|
| 28 |
+
|
| 29 |
+
The low-resolution discrete disparity volume (DDV) is denoted by $\mathbf{D}_{1/8}(\omega) = conv_{3 \times 3}(\mathbf{A}(\omega))$, with $\mathbf{D}_{1/8}:\Omega_{1/8} \rightarrow \mathbb R^K$ ($K$ denotes the number of discretized disparity values), and $conv_{3 \times 3}(.)$ denoting a convolutional layer with filters of size $3 \times 3$. The low resolution disparity map is then computed with $$\begin{equation}
|
| 30 |
+
\sigma(\mathbf{D}_{1/8}(\omega)) = \sum_{k=1}^{K} softmax(\mathbf{D}_{1/8}(\omega)[k]) \times disparity(k),
|
| 31 |
+
\label{eq:DDV_low_res}
|
| 32 |
+
\end{equation}$$ where $softmax(\mathbf{D}_{1/8}(\omega)[k])$ is the softmax result of the $k^{th}$ output from $\mathbf{D}_{1/8}$, and $disparity(k)$ holds the disparity value for $k$. Given the ambiguous results produced by these low-resolution disparity maps, we follow the multi-scale strategy proposed by Godard [@monodepth2]. The low resolution map from [\[eq:DDV_low_res\]](#eq:DDV_low_res){reference-type="eqref" reference="eq:DDV_low_res"} is the first step of the multi-scale decoder that consists of three additional stages of upconv operators (i.e., nearest upsample + convolution) that receive skip connections from the ResNet encoder for the respective resolutions, as shown in Fig. [2](#fig:arch){reference-type="ref" reference="fig:arch"}. These skip connections between encoding layers and associated decoding layers are known to retain high-level information in the final depth output. At each resolution, we form a new DDV, which is used to compute the disparity map at that particular resolution. The resolutions considered are (1/8), (1/4), (1/2), and (1/1) of the original resolution, respectively represented by $\sigma(\mathbf{D}_{1/8})$, $\sigma(\mathbf{D}_{1/4})$, $\sigma(\mathbf{D}_{1/2})$, and $\sigma(\mathbf{D}_{1/1})$.
|
| 33 |
+
|
| 34 |
+
Another essential part of our model is the pose estimator [@zhou2017unsupervised], which takes two images recorded at two different time steps, and returns the relative transformation, as in $$\begin{equation}
|
| 35 |
+
\mathbf{T}_{t \rightarrow t'} = p_{\phi}(\mathbf{I}_t,\mathbf{I}_{t'}),
|
| 36 |
+
\label{eq:pose}
|
| 37 |
+
\end{equation}$$ where $\mathbf{T}_{t \rightarrow t'}$ denotes the transformation matrix between images recorded at time steps $t$ and $t'$, and $p_{\phi}(.)$ is the pose estimator, consisting of a deep learning model parameterised by $\phi$.
|
| 38 |
+
|
| 39 |
+
The training is based on the minimum per-pixel photometric re-projection error [@monodepth2] between the source image $\mathbf{I}_{t'}$ and the target image $\mathbf{I}_{t}$, using the relative pose $\mathbf{T}_{t \rightarrow t'}$ defined in [\[eq:pose\]](#eq:pose){reference-type="eqref" reference="eq:pose"}. The pixel-wise error is defined by $$\begin{equation}
|
| 40 |
+
\ell_p = \frac{1}{|\mathcal{S}|}\sum_{s \in \mathcal{S}} \left ( \min_{t'} \mu^{(s)} \times pe(\mathbf{I}_t,\mathbf{I}^{(s)}_{t \rightarrow t'}) \right ),
|
| 41 |
+
\label{eq:photo_rep_loss}
|
| 42 |
+
\end{equation}$$ where $pe(.)$ denotes the photometric reconstruction error, $\mathcal{S}=\{ \frac{1}{8},\frac{1}{4},\frac{1}{2},\frac{1}{1} \}$ is the set of the resolutions available for the disparity map, defined in [\[eq:DDV_low_res\]](#eq:DDV_low_res){reference-type="eqref" reference="eq:DDV_low_res"}, $t' \in \{ t-1, t+1\}$, indicating that we use two frames that are temporally adjacent to $\mathbf{I}_t$ as its source frames [@monodepth2], and $\mu^{(s)}$ is a binary mask that filters out stationary points (see more details below in Eq.[\[eq:automasking\]](#eq:automasking){reference-type="ref" reference="eq:automasking"}) [@monodepth2]. The re-projected image in [\[eq:photo_rep_loss\]](#eq:photo_rep_loss){reference-type="eqref" reference="eq:photo_rep_loss"} is defined by $$\begin{equation}
|
| 43 |
+
\mathbf{I}^{(s)}_{t \rightarrow t'} = \mathbf{I}_{t'} \big < proj(\sigma(\mathbf{D}^{(s)}_t), \mathbf{T}_{t \rightarrow t'} , \mathbf{K}) \big >,
|
| 44 |
+
\end{equation}$$ where $proj(.)$ represents the 2D coordinates of the projected depths $\mathbf{D}_t$ in $\mathbf{I}_{t'}$, $\big <. \big >$ is the sampling operator, and $\sigma(\mathbf{D}^{(s)}_t)$ is defined in [\[eq:DDV_low_res\]](#eq:DDV_low_res){reference-type="eqref" reference="eq:DDV_low_res"}. Similarly to [@monodepth2], the pre-computed intrinsics $\mathbf{K}$ of all images are identical, and we use bi-linear sampling to sample the source images and $$\begin{equation}
|
| 45 |
+
\quad pe(\mathbf{I}_t, \mathbf{I}^{(s)}_{t^\prime}) = \frac{\alpha}{2} (1 - \mathrm{SSIM}(\mathbf{I}_t, \mathbf{I}^{(s)}_{t^\prime})) + (1 - \alpha) \|\mathbf{I}_t - \mathbf{I}^{(s)}_{t^\prime}\|_1,
|
| 46 |
+
\end{equation}$$ where $\alpha = 0.85$. Following [@godard2017unsupervised] we use an edge-aware smoothness regularisation term to improve the predictions around object boundaries: $$\begin{eqnarray}
|
| 47 |
+
\ell_s &=& \left | \partial_x d^*_t \right | e^{-\left | \partial_x \mathbf{I}_t \right |} + \left | \partial_y d^*_t \right | e^{-\left | \partial_y \mathbf{I}_t \right |},
|
| 48 |
+
\label{eq:smoothness}
|
| 49 |
+
\end{eqnarray}$$ where $d^*_t = d_t / \overline{d_t}$ is the mean-normalized inverse depth from [@wang2017learning] to discourage shrinking of the estimated depth. The auto-masking of stationary points [@monodepth2] in [\[eq:photo_rep_loss\]](#eq:photo_rep_loss){reference-type="eqref" reference="eq:photo_rep_loss"} is necessary because the assumptions of a moving camera and a static scene are not always met in self-supervised monocular trained depth estimation methods [@monodepth2]. This masking filters out pixels that remain with the same appearance between two frames in a sequence, and is achieved with a binary mask defined as $$\begin{equation}
|
| 50 |
+
\mu^{(s)} = \big [ \min_{t'}pe(\mathbf{I}_t,\mathbf{I}^{(s)}_{t' \rightarrow t}) < \min_{t'}pe(\mathbf{I}_t,\mathbf{I}_{t'}) \big ],
|
| 51 |
+
\label{eq:automasking}
|
| 52 |
+
\end{equation}$$ where $[.]$ represents the Iverson bracket. The binary mask $\mu$ in [\[eq:automasking\]](#eq:automasking){reference-type="eqref" reference="eq:automasking"} masks the loss in [\[eq:photo_rep_loss\]](#eq:photo_rep_loss){reference-type="eqref" reference="eq:photo_rep_loss"} to only include the pixels where the re-projection error of $\mathbf{I}^{(s)}_{t' \rightarrow t}$ is lower than the error of the un-warped image $\mathbf{I}_{t'}$, indicating that the visual object is moving relative to the camera. The final loss is computed as the weighted sum of the per-pixel minimum reprojection loss in [\[eq:photo_rep_loss\]](#eq:photo_rep_loss){reference-type="eqref" reference="eq:photo_rep_loss"} and smoothness term in [\[eq:smoothness\]](#eq:smoothness){reference-type="eqref" reference="eq:smoothness"}, $$\begin{equation}
|
| 53 |
+
\ell = \ell_{p} + \lambda\ell_s
|
| 54 |
+
\label{eq:final-loss}
|
| 55 |
+
\end{equation}$$ where $\lambda$ is the weighting for the smoothness regularisation term. Both the pose model and depth model are trained jointly using this photometric reprojection error. Inference is achieved by taking a test image at the input of the model and producing the high-resolution disparity map $\sigma(\mathbf{D}_{1/1})$.
|
| 56 |
+
|
| 57 |
+
<figure id="fig:qual-results">
|
| 58 |
+
|
| 59 |
+
<figcaption><span><strong>Qualitative results on the KITTI Eigen split <span class="citation" data-cites="eigen2015predicting"></span> test set.</strong></span> Our models perform better on thinner objects such as trees, signs and bollards, as well as being better at delineating difficult object boundaries.</figcaption>
|
| 60 |
+
</figure>
|
2010.12810/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-05-25T09:24:29.185Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" version="13.1.3" etag="3o28evVuaOTmBJbeFNo2" type="google"><diagram id="WRsPlDLglBqI_sLvmbQO">7V1bb9s2FP41BtqHBLpfHhOn2wqswbAUWPpUKBJtC5UlT1Yaeb9+pEXaukZ0LR5KhtOgMSmJEr9z4XcOaXGmz9f576m3WX1JAhTNNCXIZ/rDTNNMTbXxH1KzK2pU11CKmmUaBrTuWPEU/odoJTvtNQzQtnJiliRRFm6qlX4Sx8jPKnVemiZv1dMWSUTvStvfeEtUOYNUPPle1Kz9JwyyVVHraNax/g8ULlfszqrlFkdePP/HMk1eY3q/OIlRcWTtsWZo49uVFyRvpcfSP830eZokWfFpnc9RRHANK8/+W8fRwyOnKM54LtCKC3560SvtNX2ubMdgeFuFGXraeD4pv2FZz/T7VbaOcEnFH73tpgB/EeYIt3q/CKNonkRJur9cf1GDYKHg+m2WJj9Q6Yiq2LqLG72nz4DSDOWd/VAP6GCNQ8kaZekOn8IuUByqblTbcNNF+e0oOpMKflWSGqvzqLIsD00fUcMfKHDtIOr9IKI4uCMqiUt+5G23oV9FEXcz3T3jgsIK30jh1jZZ+SEvH33YsVIeZs+sEfy5uMykpeNFpMCuKZ4OBTX13yavqU+rbNqFzEuXiElAUbjFwol6iiIvC39Wn6NNFPQOfyUhvvFB6jWZ62athaJH9KKyFdTasWsN3bhutaEChkZDe8U49JpLV4x+XSGuY9M0pMUCWb7fZkiB7b4oynuGRF2k98LuwS/JLgPTND4DM4awMFO4mzoH3TKKxln2cA5G1tQx2lWxAIDMvhDIDp4ZADPnUjAD1DP3QjDTAPWMRQHTBw1Q0VT1QkDTITVNfBAEBBqkpnEEPZMAzYDUtDb2b0UZ6XuyD0iO6Fn/vibswM12nxy5wyfoyiY/HsSflsVfg/ya8yyMAjSz78lHlGcvC/zZn9kP+Pd7QM+id8QPW9yUNVGTHrm+KrAtFmUYL3HJPJa+JlisDzcqEUebwBMsnUW0j3xXYRCguCbofXKkLmNWybIjpPF9QoXoyb5EZB76XnQXhUvc5MMat02e/N6jFT6WP0oH4gp2S6zjNHXEfieC5dYRjljnCIXagXpJajWr8n1kLhZtVqVbuqsHA+VjsAefVWPzJoSHMb6M4eHCs0BsC4ZONDSzamjKwvOrF3wN12iLDzyiN/z/38nai5uW+eQnKWL3fklZ9SPK3pL0B77uw9PjxzNMss1sWsyrYRWd9tOmTVXTG0Q9zKp6GLp1azYURLVbFMQ1B9APjsivgJZlfvUa7itvQ85b50uS/b4lHs5feWl268VxknlZmMTf1RLwEVoQ0W1ICgmln35ioLa0rZSeT4qO0jIOmuRfm8Va+x+qu2VL3v8MJCrV0CqiauOzetuAqQ5hyBzhZl9utSnI0wEubtpImY4mE6qqtlE1KFuvtsGbC2147kZLwyVDVY7A+CpdzCss69Yt/ajDiNrRwCStcUTzV0kTRqQPJFySsL9Vjj89zQ4oao4cRDHF0TU4DTBbYTg11WZ5y5IYHbcpxiEYPOuvwNA4UHyEtDZNNy1TtRHv0N8HY23KR21gKCpSbp1THTZSZuHx9hob/5qSQJgSx3Tpddgg/s7oGcq5hw3DtcBGCo5UB5sM5w10BQ4qdboEm9Lgmd8dQ16oEOq7hiAQpLa4vnDpG64xw2wfMyojz6mJoIFu+4utzPFJZLRqpKA6x7oh7lrKb83fyW/1VW8uIRXWZRH0ippXsW3IRFjrdL7wGYn8OiNx9npHu6Y2KmCAM0Da5nQO1uRxo2dlSi2VZlu1MW6YYL7R7HAUTefI25wby5rICYw20Tvai16i32cZjMkmrBl50+2GvQhbHyx+VQYQiJZEEAGyKiAgWjI1UfyqDSAQZWoiR1JiEiDaMjVR/JJuSQOLAwii+DXfkgYWSBDFrwKXNLBAgih+WbikgQUSRPHrxCUNLIAgGpcasRiAo7NxqRELKIgCIhYBwwQoJALiDwFOHxQSAdGEABcOComA2ECEQ4Yc1QQwfRHuFRISAbxdhHuFhEQACxfhXiEhEcCpRbhXQEhMAQxZgHs1AUccUwDfFeBeQSGZBnsFhWQa7BUUkmmwV1BIuFe1dfXz/KVqN7Xlz4fv4pRXYbUBMAwCHGT1fARMjoVm4rrIQT4H6KIls4uyyGSB7S8v2jirz7LYYlef82rfIF4jJIsd9kAA+Y4bSxYd7MMAUg9k8b8eDCDfQWPJInxdGMC/IsuSRfA4PaIERGRlJ3kdpARIZGUnef2lBEhkZSd53acESMZGKCW8Pc8aOcGUAAlbyD6BdxCf6IFBURw7ZZWBydgprAxMRkppIUMbexqcFhSSiZBaUEwmwmpBMZkIrQXFZKS8FjJTYk+D10JCwqZgxgZJ3aWCYjIRlgqKyURYKigmcK//ya9fRD9LTRyFva+AfRu5+VYUUV9Ed7h3U+nq6PlT7XZtqt0ymmYibpLWAVls4PTvaiKwiyCrCRyZqwkckNUEjszVBA4HSR6gi/1LBwR2kYMDD9BFR2IXXVmctsBWypoQVxZn7eoz/JoQVxZF7YEAMnHqykqc9mEAqQeyEqc9GECGJq6sTGkXBvATua6sxCinR5SAiKy0KK+DlACJrKwor7+UAImsJCmv+4SH5KCgo8FEwpysqoycYkrBRADnFLQs5EQnDAvj2GmrFFDGzmOlgDJSYgsZ4KjKNKgtLCYTIbewoEyE3sKCMhGCCwqKiI2Mh/CroJs5i9iYWIBfhcVEAMMV4VdhQZkIX4UFZSJ8FRaUNr563boAyV8x0qM1MleMqOPZGdgLsA4pcRJu0YxM9FKlW3vZCosCq9oj1rMP+MCcHDbn23C59r7j+j9x/eePo9G/Dr1ST9Kr4ba8XZLdJmaD7VRs6GpNWy32GpjyjDtLs5T1VWXfATxPYTmir77NNlAeZs+lz98IPreaSYsPOcVrX9ixQowf9blcKC4zWfF42b60q4De2JWj2OqCWjLtQrFPBQWaf+UV0Kar7AVzVO43bNuTUzfvsN1aQ3atoQE31G3dL3la7i28urd33NsALq0e9Bia07LllEiX1pYmGJS5rbysnbeNRq3Gx9p6XkXn6rVx0GBUCoK2te4ffdjYT+o2fZ/jzWvWqVGXtZVdz1vHapuQ2qa4nexwMU2IHI4jJe7V6ksSIHLG/w==</diagram></mxfile>
|
2010.12810/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.7 kB). View file
|
|
|
2010.12810/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Autoregressive models play a crucial role in modeling high-dimensional probability distributions. They have been successfully used to generate realistic images [18, 21], high-quality speech [17], and complex decisions in games [29]. An autoregressive model defines a probability density as a product of conditionals using the chain rule. Although this factorization is fully general, autoregressive models typically rely on simple probability density functions for the conditionals (e.g. a Gaussian or a mixture of logistics) [21] in the continuous case, which limits the expressiveness of the model.
|
| 4 |
+
|
| 5 |
+
To improve flexibility, energy-based models (EBM) represent a density in terms of an energy function, which does not need to be normalized. This enables more flexible neural network architectures, but requires new training strategies, since maximum likelihood estimation (MLE) is intractable due to the normalization constant (partition function). Score matching (SM) [9] trains EBMs by minimizing the Fisher divergence (instead of KL divergence as in MLE) between model and data distributions. It compares distributions in terms of their log-likelihood gradients (scores) and completely circumvents the intractable partition function. However, score matching requires computing the trace of the Hessian matrix of the model's log-density, which is expensive for high-dimensional data [14].
|
| 6 |
+
|
| 7 |
+
To avoid calculating the partition function without losing scalability in high dimensional settings, we leverage the chain rule to decompose a high dimensional distribution matching problem into simpler univariate sub-problems. Specifically, we propose a new divergence between distributions, named Composite Score Matching (CSM), which depends only on the derivatives of *univariate* logconditionals (scores) of the model, instead of the full gradient as in score matching. CSM training is
|
| 8 |
+
|
| 9 |
+
particularly efficient when the model is represented directly in terms of these univariate conditional scores. This is similar to a traditional autoregressive model, but with the advantage that conditional scores, unlike conditional distributions, do not need to be normalized. Similar to EBMs, removing the normalization constraint increases the flexibility of model families that can be used.
|
| 10 |
+
|
| 11 |
+
Leveraging existing and well-established autoregressive models, we design architectures where we can evaluate all dimensions in parallel for efficient training. During training, our CSM divergence can be optimized directly without the need of approximations [15, 25], surrogate losses [11], adversarial training [5] or extra sampling [3]. We show with extensive experimental results that our method can be used for density estimation, data generation, image denoising and anomaly detection. We also illustrate that CSM can provide accurate score estimation required for variational inference with implicit distributions [8, 25] by providing better likelihoods and FID [7] scores compared to other training methods on image datasets.
|
| 12 |
+
|
| 13 |
+
Given i.i.d. samples $\{\mathbf{x}^{(1)},...,\mathbf{x}^{(N)}\}\subset\mathbb{R}^D$ from some unknown data distribution $p(\mathbf{x})$ , we want to learn an unnormalized density $\tilde{q}_{\theta}(\mathbf{x})$ as a parametric approximation to $p(\mathbf{x})$ . The unnormalized $\tilde{q}_{\theta}(\mathbf{x})$ uniquely defines the following normalized probability density:
|
| 14 |
+
|
| 15 |
+
$$q_{\theta}(\mathbf{x}) = \frac{\tilde{q}_{\theta}(\mathbf{x})}{Z(\theta)}, \ Z(\theta) = \int \tilde{q}_{\theta}(\mathbf{x}) d\mathbf{x},$$
|
| 16 |
+
(1)
|
| 17 |
+
|
| 18 |
+
where $Z(\theta)$ , the partition function, is generally intractable.
|
| 19 |
+
|
| 20 |
+
To learn an unnormalized probabilistic model, [15] proposes to approximate the normalizing constant using one dimensional importance sampling. Specifically, let $\mathbf{x} = (x_1, ..., x_D) \in \mathbb{R}^D$ . They first learn a set of one dimensional conditional energies $E_{\theta}(x_d|\mathbf{x}_{< d}) \triangleq -\log \tilde{q}_{\theta}(x_d|\mathbf{x}_{< d})$ , and then approximate the normalizing constants using importance sampling, which introduces an additional network to parameterize the proposal distribution. Once the partition function is approximated, they normalize the density to enable maximum likelihood training. However, approximating the partition function not only introduces bias into optimization but also requires extra computation and memory usage, lowering the training efficiency.
|
| 21 |
+
|
| 22 |
+
To avoid computing $Z(\theta)$ , we can take the logarithm on both sides of Eq. (1) and obtain $\log q_{\theta}(\mathbf{x}) = \log \tilde{q}_{\theta}(\mathbf{x}) - \log Z(\theta)$ . Since $Z(\theta)$ does not depend on $\mathbf{x}$ , we can ignore the intractable partition function $Z(\theta)$ when optimizing $\nabla_{\mathbf{x}} \log q_{\theta}(\mathbf{x})$ . In general, $\nabla_{\mathbf{x}} \log q_{\theta}(\mathbf{x})$ and $\nabla_{\mathbf{x}} \log p(\mathbf{x})$ are called the *score* of $q_{\theta}(\mathbf{x})$ and $p(\mathbf{x})$ respectively. Score matching (SM) [9] learns $q_{\theta}(\mathbf{x})$ by matching the scores between $q_{\theta}(\mathbf{x})$ and $p(\mathbf{x})$ using the Fisher divergence:
|
| 23 |
+
|
| 24 |
+
$$L(q_{\theta}; p) \triangleq \frac{1}{2} \mathbb{E}_{p}[\|\nabla_{\mathbf{x}} \log p(\mathbf{x}) - \nabla_{\mathbf{x}} \log q_{\theta}(\mathbf{x})\|_{2}^{2}].$$
|
| 25 |
+
(2)
|
| 26 |
+
|
| 27 |
+
Ref. [9] shows that under certain regularity conditions $L(\theta; p) = J(\theta; p) + C$ , where C is a constant that does not depend on $\theta$ and $J(\theta; p)$ is defined as below:
|
| 28 |
+
|
| 29 |
+
$$J(\theta; p) \triangleq \mathbb{E}_p \left[ \frac{1}{2} \| \nabla_{\mathbf{x}} \log q_{\theta}(\mathbf{x}) \|_2^2 + \operatorname{tr}(\nabla_{\mathbf{x}}^2 \log q_{\theta}(\mathbf{x})) \right],$$
|
| 30 |
+
|
| 31 |
+
where $\operatorname{tr}(\cdot)$ denotes the trace of a matrix. The above objective does not involve the intractable term $\nabla_{\mathbf{x}} \log p(\mathbf{x})$ . However, computing $\operatorname{tr}(\nabla_{\mathbf{x}}^2 \log q_{\theta}(\mathbf{x}))$ is in general expensive for high dimensional data. Given $\mathbf{x} \in \mathbb{R}^D$ , a naive approach requires D times more backward passes than computing the gradient $\nabla_{\mathbf{x}} \log q_{\theta}(\mathbf{x})$ [25] in order to compute $\operatorname{tr}(\nabla_{\mathbf{x}}^2 \log q_{\theta}(\mathbf{x}))$ , which is inefficient when D is large. In fact, ref. [14] shows that within a constant number of forward and backward passes, it is unlikely for an algorithm to be able to compute the diagonal of a Hessian matrix defined by any arbitrary computation graph.
|
| 32 |
+
|
| 33 |
+
To make SM more scalable, we introduce Composite Score Matching (CSM), a new divergence suitable for learning unnormalized statistical models. We can factorize any given data distribution $p(\mathbf{x})$ and model distribution $q_{\theta}(\mathbf{x})$ using the chain rule according to a common variable ordering:
|
| 34 |
+
|
| 35 |
+
$$p(\mathbf{x}) = \prod_{d=1}^{D} p(x_d | \mathbf{x}_{< d}), \quad q_{\theta}(\mathbf{x}) = \prod_{d=1}^{D} q_{\theta}(x_d | \mathbf{x}_{< d})$$
|
| 36 |
+
|
| 37 |
+
where $x_d \in \mathbb{R}$ stands for the d-th component of $\mathbf{x}$ , and $\mathbf{x}_{< d}$ refers to all the entries with indices smaller than d in $\mathbf{x}$ . Our key insight is that instead of directly matching the joint distributions, we can match the conditionals of the model $q_{\theta}(x_d|\mathbf{x}_{< d})$ to the conditionals of the data $p(x_d|\mathbf{x}_{< d})$ using the Fisher divergence. This decomposition results in simpler problems, which can be optimized efficiently using one-dimensional score matching. For convenience, we denote the conditional scores of $q_{\theta}$ and p as $s_{\theta,d}(\mathbf{x}_{< d},x_d)\triangleq \frac{\partial}{\partial x_d}\log q_{\theta}(x_d|\mathbf{x}_{< d}): \mathbb{R}^{d-1}\times\mathbb{R}\to\mathbb{R}$ and $s_d(\mathbf{x}_{< d},x_d)\triangleq \frac{\partial}{\partial x_d}\log p(x_d|\mathbf{x}_{< d}): \mathbb{R}^{d-1}\times\mathbb{R}\to\mathbb{R}$ respectively. This gives us a new divergence termed $Composite\ Score\ Matching\ (CSM)$ :
|
| 38 |
+
|
| 39 |
+
$$L_{CSM}(q_{\theta}; p) = \frac{1}{2} \sum_{d=1}^{D} \mathbb{E}_{p(\mathbf{x}_{< d})} \mathbb{E}_{p(\mathbf{x}_{d}|\mathbf{x}_{< d})} \left[ \left( s_d(\mathbf{x}_{< d}, x_d) - s_{\theta, d}(\mathbf{x}_{< d}, x_d) \right)^2 \right]. \tag{3}$$
|
| 40 |
+
|
| 41 |
+
This divergence is inspired by composite scoring rules [1], a general technique to decompose distribution-matching problems into lower-dimensional ones. As such, it bears some similarity with pseudo-likelihood, a composite scoring rule based on KL-divergence. As shown in the following theorem, it can be used as a learning objective to compare probability distributions:
|
| 42 |
+
|
| 43 |
+
**Theorem 1** (CSM Divergence). $L_{CSM}(q_{\theta}, p)$ vanishes if and only if $q_{\theta}(\mathbf{x}) = p(\mathbf{x})$ a.e.
|
| 44 |
+
|
| 45 |
+
*Proof Sketch.* If the distributions match, their derivatives (conditional scores) must be the same, hence $L_{CSM}$ is zero. If $L_{CSM}$ is zero, the conditional scores must be the same, and that uniquely determines the joints. See Appendix for a formal proof.
|
| 46 |
+
|
| 47 |
+
Eq. (3) involves $s_d(\mathbf{x})$ , the unknown score function of the data distribution. Similar to score matching, we can apply integration by parts to obtain an equivalent but tractable expression:
|
| 48 |
+
|
| 49 |
+
$$J_{CSM}(\theta; p) = \sum_{d=1}^{D} \mathbb{E}_{p(\mathbf{x}_{< d})} \mathbb{E}_{p(x_d | \mathbf{x}_{< d})} \left[ \frac{1}{2} s_{\theta, d} (\mathbf{x}_{< d}, x_d)^2 + \frac{\partial}{\partial x_d} s_{\theta, d} (\mathbf{x}_{< d}, x_d) \right], \tag{4}$$
|
| 50 |
+
|
| 51 |
+
The equivalence can be summarized using the following results:
|
| 52 |
+
|
| 53 |
+
**Theorem 2** (Informal). Under some regularity conditions, $L_{CSM}(\theta; p) = J_{CSM}(\theta; p) + C$ where C is a constant that does not depend on $\theta$ .
|
| 54 |
+
|
| 55 |
+
*Proof Sketch.* Integrate by parts the one-dimensional SM objectives. See Appendix for a proof. $\Box$
|
| 56 |
+
|
| 57 |
+
**Corollary 1.** Under some regularity conditions, $J_{CSM}(\theta, p)$ is minimized when $q_{\theta}(\mathbf{x}) = p(\mathbf{x})$ a.e.
|
| 58 |
+
|
| 59 |
+
In practice, the expectation in $J_{CSM}(\theta; p)$ can be approximated by a sample average using the following unbiased estimator
|
| 60 |
+
|
| 61 |
+
$$\hat{J}_{CSM}(\theta; p) \triangleq \frac{1}{N} \sum_{i=1}^{N} \sum_{d=1}^{D} \left[ \frac{1}{2} s_{\theta, d} (\mathbf{x}_{< d}^{(i)}, x_{d}^{(i)})^{2} + \frac{\partial}{\partial x_{d}^{(i)}} s_{\theta, d} (\mathbf{x}_{< d}^{(i)}, x_{d}^{(i)}) \right], \tag{5}$$
|
| 62 |
+
|
| 63 |
+
where $\{\mathbf{x}^{(1)},...,\mathbf{x}^{(N)}\}$ are i.i.d samples from $p(\mathbf{x})$ . It is clear from Eq. (5) that evaluating $\hat{J}_{CSM}(\theta;p)$ is efficient as long as it is efficient to evaluate $s_{\theta,d}(\mathbf{x}_{< d},x_d) \triangleq \frac{\partial}{\partial x_d} \log q_{\theta}(x_d|\mathbf{x}_{< d})$ and its derivative $\frac{\partial}{\partial x_d} s_{\theta,d}(\mathbf{x}_{< d},x_d)$ . This in turn depends on how the model $q_{\theta}$ is represented. For example, if $q_{\theta}$ is an energy-based model defined in terms of an energy $\tilde{q}_{\theta}$ as in Eq. (1), computing $q_{\theta}(x_d|\mathbf{x}_{< d})$ (and hence its derivative, $s_{\theta,d}(\mathbf{x}_{< d},x_d)$ ) is generally intractable. On the other hand, if $q_{\theta}$ is a traditional autoregressive model represented as a product of normalized conditionals, then $\hat{J}_{CSM}(\theta;p)$ will be efficient to optimize, but the normalization constraint may limit expressivity. In the following, we propose a parameterization tailored for CSM training, where we represent a joint distribution directly in terms of $s_{\theta,d}(\mathbf{x}_{< d},x_d)$ , $d=1,\cdots D$ without normalization constraints.
|
| 64 |
+
|
| 65 |
+
We introduce a new class of probabilistic models, named *autoregressive conditional score models* (AR-CSM), defined as follows:
|
| 66 |
+
|
| 67 |
+
**Definition 1.** An autoregressive conditional score model over $\mathbb{R}^D$ is a collection of D functions $\hat{s}_d(\mathbf{x}_{\leq d}, x_d) : \mathbb{R}^{d-1} \times \mathbb{R} \to \mathbb{R}$ , such that for all $d = 1, \dots, D$ :
|
| 68 |
+
|
| 69 |
+
- 1. For all $\mathbf{x}_{< d} \in \mathbb{R}^{d-1}$ , there exists a function $\mathcal{E}_d(\mathbf{x}_{< d}, x_d) : \mathbb{R}^{d-1} \times \mathbb{R} \to \mathbb{R}$ such that $\frac{\partial}{\partial x_d} \mathcal{E}_d(\mathbf{x}_{< d}, x_d)$ exists, and $\frac{\partial}{\partial x_d} \mathcal{E}_d(\mathbf{x}_{< d}, x_d) = \hat{s}_d(\mathbf{x}_{< d}, x_d)$ .
|
| 70 |
+
- 2. For all $\mathbf{x}_{< d} \in \mathbb{R}^{d-1}$ , $Z_d(\mathbf{x}_{< d}) \triangleq \int e^{\mathcal{E}_d(\mathbf{x}_{< d}, x_d)} dx_d$ exists and is finite (i.e., the improper integral w.r.t. $x_d$ is convergent).
|
| 71 |
+
|
| 72 |
+
Autoregressive conditional score models are an expressive family of probabilistic models for continuous data. In fact, there is a one-to-one mapping between the set of autoregressive conditional score models and a large set of probability densities over $\mathbb{R}^D$ :
|
| 73 |
+
|
| 74 |
+
**Theorem 3.** There is a one-to-one mapping between the set of autoregressive conditional score models over $\mathbb{R}^D$ and the set of probability density functions $q(\mathbf{x})$ fully supported over $\mathbb{R}^D$ such that $\frac{\partial}{\partial x_d} \log q(x_d|\mathbf{x}_{< d})$ exists for all d and $\mathbf{x}_{< d} \in \mathbb{R}^{d-1}$ . The mapping pairs conditional scores and densities such that
|
| 75 |
+
|
| 76 |
+
$$\hat{s}_d(\mathbf{x}_{< d}, x_d) = \frac{\partial}{\partial x_d} \log q(x_d | \mathbf{x}_{< d})$$
|
| 77 |
+
|
| 78 |
+
The key advantage of this representation is that the functions in Definition 1 are easy to parameterize (e.g., using neural networks) as the requirements 1 and 2 are typically easy to enforce. In contrast with typical autoregressive models, we do not require the functions in Definition 1 to be normalized. Importantly, Theorem 3 does not hold for previous approaches that learn a single score function for the joint distribution [25, 24], since the score model $s_{\theta} : \mathbb{R}^{D} \to \mathbb{R}^{D}$ in their case is not necessarily the gradient of any underlying joint density. In contrast, AR-CSM *always* define a valid density through the mapping given by Theorem 3.
|
| 79 |
+
|
| 80 |
+
In the following, we discuss how to use deep neural networks to parameterize autoregressive conditional score models (AR-CSM) defined in Definition 1. To simplify notations, we hereafter use $\mathbf{x}$ to denote the arguments for $s_{\theta,d}$ and $s_d$ even when these functions depend on a subset of its dimensions.
|
| 81 |
+
|
| 82 |
+
We propose to parameterize an AR-CSM based on existing autoregressive architectures for traditional (normalized) density models (e.g., PixelCNN++ [21], MADE [4]). One important difference is that the output of standard autoregressive models at dimension d depend only on $\mathbf{x}_{< d}$ , yet we want the conditional score $s_{\theta,d}$ to also depend on $x_d$ .
|
| 83 |
+
|
| 84 |
+
To fill this gap, we use standard autoregressive models to parameterize a "context vector" $\mathbf{c}_d \in \mathbb{R}^c$ (c is fixed among all dimensions) that depends only on $\mathbf{x}_{< d}$ , and then incorporate the dependency on $x_d$ by concatenating $\mathbf{c}_d$ and $x_d$ to get a c+1 dimensional vector $\mathbf{h}_d = [\mathbf{c}_d, x_d]$ . Next, we feed $\mathbf{h}_d$ into another neural network which outputs the scalar $s_{\theta,d} \in \mathbb{R}$ to model the conditional score. The network's parameters are shared across all dimensions similar to [15]. Finally, we can compute $\frac{\partial}{\partial x_d} s_{\theta,d}(\mathbf{x})$ using automatic differentiation, and optimize the model directly with the CSM divergence.
|
| 85 |
+
|
| 86 |
+
Standard autoregressive models, such as PixelCNN++ and MADE, model the density with a prescribed probability density function (e.g., a Gaussian density) parameterized by functions of $\mathbf{h}_d$ . In contrast, we remove the normalizing constraints of these density functions and therefore able to capture stronger correlations among dimensions with more expressive architectures.
|
| 87 |
+
|
| 88 |
+
To sample from an AR-CSM model, we use one dimensional Langevin dynamics to sample from each dimension in turn. Crucially, Langevin dynamics only need the score function to sample from a density [19, 6]. In our case, scores are simply the univariate derivatives given by the AR-CSM.
|
| 89 |
+
|
| 90 |
+
Specifically, we use sθ,1(x1) to obtain a sample x<sup>1</sup> ∼ qθ(x1), then use sθ,2(x1, x2) to sample from x<sup>2</sup> ∼ qθ(x<sup>2</sup> | x1) and so forth. Compared to Langevin dynamics performed directly on a high dimensional space, one dimensional Langevin dynamics can converge faster under certain regularity conditions [20]. See Appendix C.3 for more details.
|
| 91 |
+
|
| 92 |
+
During training, we use the CSM divergence (see Eq. (5)) to train the model. To deal with data distributions supported on low-dimensional manifolds and the difficulty of score estimation in low data density regions, we use noise annealing similar to [24] with slight modifications: Instead of performing noise annealing as a whole, we perform noise annealing on each dimension individually. More details can be found in Appendix C.
|
| 93 |
+
|
| 94 |
+
In this section, we first compare the optimization performance of CSM with two other variants of score matching: Denoising Score Matching (DSM) [28] and Sliced Score Matching (SSM) [25], and compare the training efficiency of CSM with Score Matching (SM) [9]. Our results show that CSM is more stable to optimize and more scalable to high dimensional data compared to the previous score matching methods. We then perform density estimation on 2-d synthetic datasets (see Appendix B) and three commonly used image datasets: MNIST, CIFAR-10 [12] and CelebA [13]. We further show that our method can also be applied to image denoising and anomaly detection, illustrating broad applicability of our method.
|
| 95 |
+
|
| 96 |
+
# Method
|
| 97 |
+
|
| 98 |
+
Setup To illustrate the scalability of CSM, we consider a simple setup of learning Gaussian distributions. We train an AR-CSM model with CSM and the other score matching methods on a fully connected network with 3 hidden layers. We use comparable number of parameters for all the methods to ensure fair comparison.
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
|
| 102 |
+
Figure 1: Comparison with SSM and SM in terms of loss variance and computational efficiency.
|
| 103 |
+
|
| 104 |
+
CSM vs. SM In Figure 1a, we show the time per iteration of CSM versus the original score matching (SM) method [9] on multivariate Gaussians with different data dimensionality. We find that the training speed of SM degrades linearly as a function of the data dimensionality. Moreover, the memory required grows rapidly *w.r.t* the data dimension, which triggers memory error on 12 GB TITAN Xp GPU when the data dimension is approximately 200. On the other hand, for CSM, the time required stays stable as the data dimension increases due to parallelism, and no memory errors occurred throughout the experiments. As expected, traditional score matching (SM) does not scale as well as CSM for high dimensional data. Similar results on SM were also reported in [25].
|
| 105 |
+
|
| 106 |
+
CSM vs. SSM We compare CSM with Sliced Score Matching (SSM) [25], a recently proposed score matching variant, on learning a representative Gaussian N (0, 0.1 2 I) of dimension 100 in Figure 2 (2 rightmost panels). While CSM converges rapidly, SSM does not converge even after 20k iterations due to the large variance of random projections. We compare the variance of the two objectives in Figure 1b. In such a high-dimensional setting, SSM would require a large number of projection vectors for variance reduction, which requires extra computation and could be prohibitively expensive in practice. By contrast, CSM is a deterministic objective function that is more stable to optimize. This again suggests that CSM might be more suitable to be used in high-dimensional data settings compared to SSM.
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+
Figure 2: Training losses for DSM, SSM and CSM on 100-d Gaussian distribution N (0, 0.1 2 I). Note the vertical axes are different across methods as they optimize different losses.
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
|
| 114 |
+
(a) MNIST negative loglikelihoods (b) 2-d synthetic dataset samples from MADE MLE baselines with n mixture of logistics and an AR-CSM model trained by CSM.
|
| 115 |
+
|
| 116 |
+
Figure 3: Negative log-likelihoods on MNIST and samples on a 2-d synthetic dataset.
|
| 117 |
+
|
| 118 |
+
CSM vs. DSM Denoising score matching (DSM) [28] is perhaps the most scalable score matching alternative available, and has been applied to high dimensional score matching problems [24]. However, DSM estimates the score of the data distribution after it has been convolved with Gaussian noise with variance σ 2 I. In Figure 2, we use various noise levels σ for DSM, and compare the performance of CSM with that of DSM. We observe that although DSM shows reasonable performance when σ is sufficiently large, the training can fail to converge for small σ. In other words, for DSM, there exists a tradeoff between optimization performance and the bias introduced due to noise perturbation for the data. CSM on the other hand does not suffer from this problem, and converges faster than DSM.
|
| 119 |
+
|
| 120 |
+
Likelihood comparison To better compare density estimation performance of DSM, SSM and CSM, we train a MADE [4] model with tractable likelihoods on MNIST, a more challenging data distribution, using the three variants of score matching objectives. We report the negative loglikelihoods in Figure 3a. The loss curves in Figure 3a align well with our previous discussion. For DSM, a smaller σ introduces less bias, but also makes training slower to converge. For SSM, training convergence can be handicapped by the large variance due to random projections. In contrast, CSM can converge quickly without these difficulties. This clearly demonstrates the efficacy of CSM over the other score matching methods for density estimation.
|
| 121 |
+
|
| 122 |
+
In this section, we focus on a 2-d synthetic data distribution (see Figure 3b). We compare the sample quality of an autoregressive model trained by maximum likelihood estimation (MLE) and an AR-CSM model trained by CSM. We use a MADE model with n mixture of logistic components for the MLE baseline experiments. We also use a MADE model as the autoregressive architecture for the AR-CSM model. To show the effectiveness of our approach, we use strictly fewer parameters for the AR-CSM model than the baseline MLE model. Even with fewer parameters, the AR-CSM model trained with CSM is still able to generate better samples than the MLE baseline (see Figure 3b).
|
| 123 |
+
|
| 124 |
+
In this section, we show that our method is also capable of modeling natural images. We focus on three image datasets, namely MNIST, CIFAR-10, and CelebA.
|
| 125 |
+
|
| 126 |
+
Setup We select two existing autoregressive models — MADE [4] and PixelCNN++ [21], as the autoregressive architectures for AR-CSM. For all the experiments, we use a shallow fully connected network to transform the context vectors to the conditional scores for AR-CSM. Additional details can be found in Appendix C.
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
|
| 130 |
+
Figure 4: Samples from MADE and PixelCNN++ using MLE and CSM.
|
| 131 |
+
|
| 132 |
+
**Results** We compare the samples from AR-CSM with the ones from MADE and PixelCNN++ with similar autoregressive architectures but trained via maximum likelihood estimation. Our AR-CSM models have comparable number of parameters as the maximum-likelihood counterparts. We observe that the MADE model trained by CSM is able to generate sharper and higher quality samples than its maximum-likelihood counterpart using Gaussian densities (see Figure 4). For PixelCNN++, we observe more digit-like samples on MNIST, and less shifted colors on CIFAR-10 and CelebA than its maximum-likelihood counterpart using mixtures of logistics (see Figure 4). We provide more samples in Appendix C.
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
|
| 136 |
+
Figure 5: Salt and pepper denoising on CIFAR-10. Autoregressive single-step denoising on MNIST.
|
| 137 |
+
|
| 138 |
+
Besides image generation, AR-CSM can also be used for image denoising. In Figure 5, we apply 10% "Salt and Pepper" noise to the images in CIFAR-10 test set and apply Langevin dynamics sampling to restore the images. We also show that AR-CSM can be used for single-step denoising [22, 28] and report the denoising results for MNIST, with noise level $\sigma=0.6$ in the rescaled space in Figure 5. These results qualitatively demonstrate the effectiveness of AR-CSM for image denoising, showing that our models are sufficiently expressive to capture complex distributions and solve difficult tasks.
|
| 139 |
+
|
| 140 |
+
We show that the AR-CSM model can also be used for out-of-distribution (OOD) detection. In this task, the generative model is required to produce a statistic (e.g., likelihood, energy) such that the outputs of in-distribution examples can be distinguished from those of the out-of-distribution examples. We find that $h_{\theta}(\mathbf{x}) \triangleq \sum_{d=1}^{D} s_{\theta,d}(\mathbf{x})$ is an effective statistic for OOD. In Tab. 1, we compare the Area Under
|
| 141 |
+
|
| 142 |
+
| Model | PixelCNN++ | GLOW | EBM | AR-CSM(Ours) |
|
| 143 |
+
|---------------|------------|------|------|--------------|
|
| 144 |
+
| SVHN | 0.32 | 0.24 | 0.63 | 0.68 |
|
| 145 |
+
| Const Uniform | 0.0 | 0.0 | 0.30 | 0.57 |
|
| 146 |
+
| Uniform | 1.0 | 1.0 | 1.0 | 0.95 |
|
| 147 |
+
| Average | 0.44 | 0.41 | 0.64 | 0.73 |
|
| 148 |
+
|
| 149 |
+
Table 1: AUROC scores for models trained on CIFAR-10.
|
| 150 |
+
|
| 151 |
+
the Receiver-Operating Curve (AUROC) scores obtained by AR-CSM using $h_{\theta}(\mathbf{x})$ with the ones obtained by PixelCNN++ [21], Glow [10] and EBM [3] using relative log likelihoods. We use SVHN,
|
| 152 |
+
|
| 153 |
+
| | MNIST | Γ(AIS) | CelebA (FID) |
|
| 154 |
+
|------------|-------|--------|--------------|
|
| 155 |
+
| Latent Dim | 8 | 16 | 32 |
|
| 156 |
+
| ELBO | 96.74 | 91.82 | 66.31 |
|
| 157 |
+
| Stein | 96.90 | 88.86 | 108.84 |
|
| 158 |
+
| Spectral | 96.85 | 88.76 | 121.51 |
|
| 159 |
+
| SSM | 95.61 | 88.44 | 62.50 |
|
| 160 |
+
| SSM-AR | 95.85 | 88.98 | 66.88 |
|
| 161 |
+
| CSM (Ours) | 95.02 | 88.42 | 62.20 |
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+
Table 2: VAE results on MNIST and CelebA.
|
| 166 |
+
|
| 167 |
+
Figure 6: CSM VAE MNIST and CelebA samples.
|
| 168 |
+
|
| 169 |
+
constant uniform and uniform as OOD distributions following [3]. We observe that our method can perform comparably or better than existing generative models.
|
| 170 |
+
|
| 171 |
+
In this section, we show that CSM can also be used to improve variational inference with implicit distributions [8]. Given a latent variable model $p_{\theta}(\mathbf{x}, \mathbf{z})$ , where $\mathbf{x}$ is the observed variable and $\mathbf{z} \in \mathbb{R}^D$ is the latent variable, a Variational Auto-Encoder (VAE) [11] contains an encoder $q_{\phi}(\mathbf{z}|\mathbf{x})$ and a decoder $p_{\theta}(\mathbf{x}|\mathbf{z})$ that are jointly trained by maximizing the evidence lower bound (ELBO)
|
| 172 |
+
|
| 173 |
+
$$\mathbb{E}_{p_{data}(\mathbf{x})} [\mathbb{E}_{q_{\phi}(\mathbf{z}|\mathbf{x})} \log p_{\theta}(\mathbf{x}|\mathbf{z}) p(\mathbf{z}) - \mathbb{E}_{q_{\phi}(\mathbf{z}|\mathbf{x})} \log q_{\phi}(\mathbf{z}|\mathbf{x})], \tag{6}$$
|
| 174 |
+
|
| 175 |
+
Typically, $q_{\phi}(\mathbf{z}|\mathbf{x})$ is chosen to be a simple *explicit* distribution such that the entropy term in Equation (6), $H(q_{\phi}(\cdot|\mathbf{x})) \triangleq -\mathbb{E}_{q_{\phi}(\mathbf{z}|\mathbf{x})}[\log q_{\phi}(\mathbf{z}|\mathbf{x})]$ , is tractable. To increase model flexibility, we can parameterize the encoder using implicit distributions—distributions that can be sampled tractably but do not have tractable densities (*e.g.*, the generator of a GAN [5]). The challenge is that evaluating $H(q_{\phi}(\cdot|\mathbf{x}))$ and its gradient $\nabla_{\phi}H(q_{\phi}(\cdot|\mathbf{x}))$ becomes intractable.
|
| 176 |
+
|
| 177 |
+
Suppose $z_d \sim q_\phi(z_d|\mathbf{z}_{< d},\mathbf{x})$ can be reparameterized as $g_{\phi,d}(\epsilon_{\leq d},\mathbf{x})$ , where $g_{\phi,d}$ is a deterministic mapping and $\epsilon$ is a D dimensional random variable. We can write the gradient of the entropy with respect to $\phi$ as
|
| 178 |
+
|
| 179 |
+
$$\nabla_{\phi} H(q_{\phi}(\cdot|\mathbf{x}))) = -\sum_{d=1}^{D} \mathbb{E}_{p(\epsilon_{< d})} \mathbb{E}_{p(\epsilon_{d})} \left[ \frac{\partial}{\partial z_{d}} \log q_{\phi}(z_{d}|\mathbf{z}_{< d}, \mathbf{x})|_{z_{d} = g_{\phi, d}(\epsilon_{\leq d}, \mathbf{x})} \nabla_{\phi} g_{\phi, d}(\epsilon_{\leq d}, \mathbf{x}) \right],$$
|
| 180 |
+
|
| 181 |
+
where $\nabla_{\phi} g_{\phi,d}(\epsilon_{\leq d}, \mathbf{x})$ is usually easy to compute and $\frac{\partial}{\partial z_d} \log q_{\phi}(z_d | \mathbf{z}_{\leq d}, \mathbf{x})$ can be approximated by score estimation using CSM. We provide more details in Appendix D.
|
| 182 |
+
|
| 183 |
+
**Setup** We train VAEs using the proposed method on two image datasets – MNIST and CelebA. We follow the setup in [25] (see Appendix D.4) and compare our method with ELBO, and three other methods, namely SSM [25], Stein [26], and Spectral [23], that can be used to train implicit encoders [25]. Since SSM can also be used to train an AR-CSM model, we denote the AR-CSM model trained with SSM as SSM-AR. Following the settings in [25], we report the likelihoods estimated by AIS [16] for MNIST, and FID scores [7] for CelebA. We use the same decoder for all the methods, and encoders sharing similar architectures with slight yet necessary modifications. We provide more details in Appendix D.
|
| 184 |
+
|
| 185 |
+
**Results** We provide negative log-likelihoods (estimated by AIS) on MNIST and the FID scores on CelebA in Tab. 2. We observe that CSM is able to marginally outperform other methods in terms of the metrics we considered. We provide VAE samples for our method in Figure 6. Samples for the other methods can be found in Appendix E.
|
2103.00673/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-02-04T14:09:37.961Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="ArGrX3Mnjos6oL0IAYEN" version="14.2.9" type="google"><diagram id="tTIBuuKjo8MK0GvrSuut" name="Page-1">7X1Zc6rKF++nOVX3PvxTzMMjMwoqoqj4cot5EEUZ1U9/6SRmJzGDyY5KdpLadY422mD/Vq95rf4P5ZZbKbPWYS91veQ/BHK3/6H8fwhCIMh/4B/k7u4GUJS4GwiyyL0bgv8MjKK9dz8I3Y+WkevlTz5YpGlSROung066WnlO8WTMyrK0fvoxP02e3nVtBd7RwMixkuPRaeQW4f0oDEF/LsheFIT3t6bw+wtL6/Dh+4E8tNy0fjSECv+hXJamxd2r5ZbzErB2h3W5+574ytWHB8u8VfHkRlWUR/bh+d+eBMPELk3WyP8znE5u0MuAXG7+d49YZSXl/ST3P6DYHWatw6jwRmvLAe/rBvj/UDYslknzDm5eWvn6Dgs/2nrNrVg/ShIuTdLs9uuo73uE4zTjeZGlC+/RFZek7WZpUfb4193/4MrLCm/7aOj+l0leuvSKbNd8ZPsA0t1X7kkPOUBT/wGSuh8KH0N4P2bdk07wMPOflWxe3C/mywubkGgtleb/erGjDkYzn6SV/H/EP7qwGHzlhYXPQLKvr+CzNRdFTqDFr1lZhIBuMLxli3sGsv3A4lLQV5Et1jqypcijpWWtwgn7abZshv+P5fvRynP/79F6N7+4eLrET1d0la68Z4t5P2QlUbBq3jrNOnrNOAvWL2rkHXN/YRm5LrgN+xKmWVquXIAgDzB5kGjgAVwrD2+vHN5oVtHcYXU7gkAY+PzCa37c/Ue+AtBn/J1Cb9AjRMkXEEXwGww9G6jUEahcuqp+Mf0MphgGPzDEy4Aq/6/WCkMaeHZSCqakMLIT/Y9uj3w5WsoXFvwt+XI1lejFhUVaJFz+amUxGG/XysL0CUTb7PY1eFlYtwbD42V9adXzwsqKexsNrFtjdBVWw82y++84aZJY64P1cfuJMEpc1dqlZXG4zeHdS5zsCDpRZFGCOA/xvyT/SfoGxy9J/wc78hSUmp9aRFaiN+RurYJTADsGxM3S9djKAq+4H1inEZAaQtWsYn4/9gIyRbq+v5h4/uG7dloU6fL+TXa/XA+T3q4Vzjb/mt/IQTf4f3jza7jmPfznffMPfDwrGhnZoN9QE5jDs/Ki9vLiVODf2gHH9PA+3gh5Nrzh8+J96wWx/uD911CmzdL7ya1KEDZqhLf6K0zu6f1NTA6LfyFATrAdvxKQv1/9OyZ58EbBQAtbe1nUrIeXgdtHq+Aw/hKL/aCCdwast08xfQw9cVHo0e8G/Qt4ng8f/NiYuvDWxH5l46Vl4+6/1/bma6KSOBv8+A8XldjnROX5ADnBWGsvv2ysCRYhCPFkU/pjqHxQqJ0PpGOf4jcC6QuE2tv4nCrUzofPsXvwV6hdRqi9gP3l7b8PeGX+SaFGfU6onQ0Q+MwOmO8KSFuMNPinO0zexufqRtopsXdv5TIgF+fPkj6Cpfn92W4GFu+GINHDgAmu3kA4chjgt/fre/du9/iddnB/PAy6YgR+x11I66m/BEAEvCQFmAKBnntB7h7ec4/Sgt51Mz+CAH/B038Yy7zEKqLq6fQv4XJ/Bw2I1j+Aw/QrXuzDFHlaZo53/60/6B5NhMLoDU2RMEbf/6FP50WpG/pggR/mLm4ViaO5bynnYSX+hphO8Mi8Q0z/MPJH+Qvw55BHDnkHD1CTF4b5BMfOV8L8iMWgGPyYxUA3NI19hsV8inIQqDho4q/z+/et/kvSGP1J7vKcxnCYuDCNneA9OhON3QYsH5EYBKHtIrH3dfBLCrDPkhhKXJvEPuBq+RER9kYXOSV98eJBdvjX53Iun8v9JmhXkB3+4U4WuG1OFuTCTpYfFGR/Deu2+G+QC/tv2haPeAefq/tvDnD8ysbLycYWBdmRC+fAtE1U3v3cFgXZkTMnvXyLIPtrqLQlyI5cODWlbULtHXyuHmQ/pazgV6idRai1IciOXDgHpnVCjficUDsfIGd2wHxXQFpjpP1wh8k7+FzdSENP8J/8Btn/+7IYBYo2IushMN78veLU/mjIAiPQGxx9FHN/Ou01Qu7oCf6ZHxtyPy4Z/GTI/SHEfq2QO3qCm+dfDLlfi3KwQ57vAXACvyFgDCcgDKMJ9BAl+wQdNctH/OFNxLO7IDcUhkEPf/iFqezC+TstCbpfTUxhx/HWz0bTAenQOPJH5D0jLPQGouCLUhOB/b3W88uzPibtbijyD/OAv4ayUIq6QZ4lF5LwDQJRh1YpFyIo+IWCQmNVREXSQIRAqrVrcDuC5ONd1V65+ccSkuwkdRbjMFq9RtJPCRiFTiOwtxbmahlBz7uSfDrpjMZuEOrtuc5OYye4Hn8mzCjyRTBjeAtgPnYxXpCVnOBO++tOgj7ySidBwibwkxPG3lq9V/0t8HNKOTCIqzXTOaVs6+/7FB1W9qhPkSg2+tg5V/yho1ZrVvyUuqyzrvjpnaE+teIYgrdtxT/mCfqg5ML+TnK9n2V4TgXl2e5Aoc+qwtCR5Dqa69yS64MFWz8IZhR9rqDQwK3wSXP6BR3leLozg42dkgn7L2kKGHJlLoqdkurakl6Rn9UUjnxO11/0E0RXS9pIflJZaB2ZnyBFWlP78liHPoKVJ9hzc6nn2vVL8F26KAY7d+bnD86Rut8db+oxl06Kwi6dWNqyHI97em9PUhT2/ZJIv0tRzKtYtyTfCjt3fmrL84ffw+fa+VbYuRNIf2XjMfbtKYrBfni66j35f1xUng+QC+entrEo5lVUWlIUg52SpNpekL5AqL2Nz7WLYrBTMj1/hdpZhFoLimKwUzJA/2Whhr6fW3VZJfOUZMmfCEhbjDT0hztM3sHn6kYa+vcdvr68KOaJW+OD6ad3D//toqYPRxg9d25/ODiOvN2QErt8dQyGfiyb75jGfiBBHMdkP9unkn5WNINdtmjmoUjnKuifIRv9LwnqhN6Cp7gOLkh6+GcTdZ6THv7cLDs76Z3ggroE6X1RWc0FKO8E/f6CUvDTlIeS16a8D/ja2hLZh/6Q9WUj+42yc0o2zMWD+6eUTv36ej5pF71yysg1nTvYzz4l4Z7eW+TcwS7sbftBwf3XsG6L3wj72cdKvofP1f1G2O+xkheXjS0K7mM/+1jJe/JvUXAf+9bJUF8U3H8NlbYE97ETXGHtBekLhFq7j5XEsN8jDq4l1NoQ3Md+dgO/e/Jvkf2HXzgZ6rsA0hYjDf/pDpN2H0uA4Sf4T36D+/dgnbPD2NuNMD8b68fINxthXiPUj/9t77ofSB7HhY2f7Y+JXTnUj//tkZT/Vqj/igT1Rgu6TwdVG3J60jaTwp5SG3nVtpkY/reZbP9UsP9qsg4HsVzoiAr+mvhI0FnzeQPEy/fThF84XflyPctOOSzzLzuRuLhHudh/L2QCUIh9d8hloygXgwf6p/5Q66eSA+C3yxjh5/1kyOPMgAu3eDpD47jXF/sZPDR934LhoiA8rGZ7QLhEL7nXQIAgjrvNlrkoCA+52q0B4ZQ2PZ/vgoW/ucKfkIyvre+F0iqf7SH0s+cfoPBx/7Pnc51bCp7SK+gX+T8dxp+iBYEeZkf6+cdVohf64L0587lVcPoE6/8fV08eCniv1lSKPsEIP3/vtEurJ0cJpNfH4QR79Pzt1C6sobRuM3wgLaINedgP6v0R0jTJQyR5bcX/JUQvnpdNnzmN4tuH6b+URO73ULtStel/obnMF8P0yX4z58Po2/WbuWz29mXgb0muwIPj9/tQw0t7/pKQXTt9AIdOMPJ/5ezZ5Wx70r5xCPkVuy9ukvZkguPQt66ceZwJfhmgWpIcjr9wrto3wu0cAvI9yK6dL46fchrdr4C8lIBsQQo5fsrBdT9NQOKfE5Dnw+jCVTjfGKPWGI+/7p4PQnZ94/EE7893yj3/d8LRCPyKT//DiQjo273m8MsnoOOnnHh4tizQf4dGjsPan0xSQKFnOen4ZXPS8Q8eyNj6nPSvp7ET+oKd4uW4IDXin06dekaNOHrZvmD4B8+NbHuS+pWI8X2L4oLi8/PESF2bGD/gKWxDcsRD2m1LkiMaxemUtKNL50fg8G9vngu6pfATNLWL+6Hgf6E/zxfD9MmWPefD6Nu17PnG+RGvwt8WF9cppV3tooazh3/egezqLq5TCsF+5ezZ5WyL8iNOqUr7aWL3k02FzobRKUVr7WW058uPeA2otuRHnFJy1l7cziMgv6b/0PkgO8Hb9SsgLyUg25AfgfwLx7V9LWp3K9Aiu/SfOOP+Mhi1xXhEft09H4Ts6sbjKafX/+ZHXKeF0dvt+j6bLoFRb7bru0a2BHKC0+k3W+L9kOBRlexnWzrgV86WQFpyYlp7syWuSGNvNfX7bBC6obA3m/pRV23qhyN/m0X4r+dLXE1IEm/3+fs0PVIv9PmjrtDn7wXfxal9/j7W0++U03H/uk8I3aiI1ovt5FD0L0/dgN/RtLHn3Y9eONrvok1B8HN0yHm2rh7s4h75JhLnW3H42Yo/IHC9Fb8Ejb/bC+dyNH71FSc+cn6aU9reK6b/IwDstFy5nqvaDwOWswgyMDooiyQCkvde5GaLP0KukZX400HkdhQ+BupPb6+7BDIYegnmhz5fZ9xAJ/Qxol+Akz4fnB9w8FwNzjdhIxzKs/0XQLc8ynfOCucpTYwuDOcHkj2uknnZKvjo4zTLF/jrpdMsid/Tov4712lR9xukVTmVxA8/Hopo2/FQxKWPh/puCZRnwLolAS/i0idPtewkxffwuXZ0izjl5Klf2fi1srE9eZDEKcdA/cuiEn9Fo71WJhZxyslM7eWXjZ1BCgwh/J0T51VUWpLhSJxyglF7Qfp7ofYOPtdOZyTwMxfs/Aq1NucuEvgHnDb/pFB7vw/0hZXMf6Er0BkAaY2R9sMdJu/gc3UjjfiAEd3OoIJN4Rj+QsjPpxzPOa9X+pSzLi4bVPhI2X87ggrXhA97Kahwc8j1vGZc4dy1+z9ZzTyhCcDF9cpL9wFom5iEW9YglPh+pfjfJq7wGtZtUVnPXdLfdhfMO/hcXWU9d4H9r2xsdVzh0rX7rROV9OdE5fkA+dZx2MbUEBiC/ctswldRaUtc4YDJ9wTpC4Ta2/hcPa5w7qL4X6HW6rjCP1Fv/xcQIe8XMl1WyfwnqunPAEhbjLRTSud/MD5XN9KID+DTirjCx8+Y/zx6J9SePHdMo/C1QwvEB9wirUT0UKXwel3D+RB94XT6qyMKox/r/GAnqbMYh9HqKYp/Cqqflk8TH9pVosjTt7vqqRsTeUDlE9XRr2FyoWJ9BDmCnPhkRTRCkUdz4fQNedlm8QT5ASOyjSzgIab4ehTyfCwAQxrAqLaxgI/1VvgKFvAqG/73WABKfh0LwNBjFkAAir8oB6A+4HxtR37Bg7r2mig6oxrX3vwC6sw+25/sbrpDrV35BdSF3b9tM5epT57QfT5Avt0J3d8mv+A1rNviuqK+dR3O34di3sHn6q4r6jf37uKysUX5BdQP9yxTn0zFOx8g3y4V74v55dfky50Pnx+eL/cOPldPLaB+8+WuJc/akFpA//B8OeqT+XLnA+TC+XLfBZC22Gf0T/eVfE2q3Pnw+XY1blf2QVOPzjGgn0WgXmjEe3FnNP1rcJ9NQaFbWOx2Sifgf5nB0m0rdqO/nYX9bZzRr2HdGmXnhxvv7+BzfWXn13i/uGxsjzOahH648U63rNiNhC5svH+VqHzPGvkCTtruCjgSurBZ3zpJ1+4KOBL6rYC7lqRrgZuahH52Bdw9+bfHKCShn10B9yogLbHcSOhne1Hew+falhsJfcBya4eb+srnu9yvV4s80yR8ZvPrB+sk9/ujVZ5pEv7Zodl7em+REgJ/O6Ptu3imX8W6LfoN/O0y5L/UXn8Pn6vrNx9pNfsrG79GNrbIM33uDrWtF5XvB9Yv6z+7dAvZ1vHLVwBpi//5+3WUvSg+V/c/n7vt6688a7X/+dxdZVsvz1p2sif5w/u8vgpIW+yzS7d9/Wb4XN0+O8DxffzP7ToKpBUO6N+2sOdTSl4jiWtqIT+8DyzZtj6w5KX7wP4gB3TLW8ySl24x2zaDveUtZknkzA6VX9nYagc08rNP/rwn/xY5oJELe1Baxy9fAaQtDmj0wg6Vb4bP1R3Q6G9PxmvJszY4oNEP+Gv+RXmGtqxFI4leOAHluwDSFvsM/eG+knfwubp9dvXjBR53EH+92/jdM3273uLNV25o6NHfPdyHRtPPfdKnHzVAvDkvefFjB0j0BEvvrE3q/2lCAk3q3wMc/hwtgZ7170xNwZelJewEp94jWrrn84/I6IlP9A9NQcee19vrzePOwOUbFHsYMG8HaBo7DPDb+xnu3u0ev9MOvtuDEHkgXPp9Dvj6sUXfjkxhDHpCPDjySQYHE8QNBaEPNAk/mxa5gSDk+dWLkecHfJrXO2HlmCnexYSRl7Wk91jmmfpePeU1Lx+8QL1AsOeL5mLf4gitI3RfgvQP4NdA97hcDMOOo/UXBZc4pVTl35Ys3jYqZoc7NK/NR69vHwQ5vAE3oQ6Xnj/HXwqoZsFvJcH7bPaaqvuzY2Eo7AanMIygaQKnqEMu4kdFGwqTNzBC4Nih2d+zu9DITXOTP+oYflHhRpxS0XOWHXJHnX82CFBEf1WvD9DrF6leKIbc4BDypxnlM3uARm+gCxsExCkln19HlD+VhI7VsU+alOihjOIwEUFemF5OiKK9SS+P+RKJPmZMcKOVIZ9hTL8k+Aku9qAxfpyLoTc0RcLYK1yMoG7oQzebS1ElcVHl87OdI74fzdBfxLYe2NS12Bbxt7rXtdnWj6E5lKBv/mjwx0r8Z9kW0K5w9BHbagHXOiGC12qT+XWifMkWhh/bwg+G8eEG/xgZP2edjdHw4ID7BPe8AZbxgXQPJ+i1xK4lTsgRb7Nd+3NYKwmqfKAjQvlbuxYQII1f35glseMUTNYqnLCfZsv/ECIBWVR21rwK7vKp7kb89PbH/KFWYlOmhwv/u3P1Ms0HYGi9/XPxa2f5P4dpmh9+N9NXzl5HDXU1MPg+8IgjUJFZq9x/tChnuev/fWf2o+FnPKPwtsVTRvF0p94zkhfc81YSBaBKw/FWt9ucBU74yLES5v7CMnLd5LX4QnYXVPjvsTv2LF58/OlufBYeRrHjeA0MvZSTd7aADf6BlMzfgM3H6iuvHrEh8WOjmUtX1S+z/GWW345ZYgR8dWb5gXzZX2b5dnT7qSB8oRL9wpzye6altA9aDKVvyPePYLswut8zLaWFSUcNuo/P2qPJliF9SpjggHS0tIB5/yANVcv2Ei3NoyJKgVQ8pLmzCbjAPgD8uHHH7d8LEvU2b5618rXngN/sR1sgQdnbWzKHUegw0rx2rcJqtIe7t4i4XgX/IVw0YQd6DSlSkDLNX39khIIRNK84DLyfc4zZ/I8dbMM6AqPsIhGGE93kHGS2HxUrQ49ljaqD3t6cisEmWU6UHS0Se5GrcA4ZJdu0s9N764HpiNzUSOY9Ns92bqmIk1V/lCnmfCH2TaqPjUYxBKsr2FWrRgSyxgpdlZVBzvwBhe41b0w0Twyh2sAf4Qt4jzGqwSZuEFbRvtY0ZtBcDWbbOBrKAauFoaXucC5lbRbFRLw3EdRALlcDLNVLbYIO1sG856FxLy5GlT+G5GQfj5aBMdI2674KS+N1lYWQCEtbQZtwBJLxZQ/mPErsOfxGlRYFhfJlYDVPqYRUGVWNgiB6YUNvIjmMiQYzNvV3Bp5Q+p7PI0nnpziT9kwEFrh5c7EYloWRrvQYFVZbJDBmcbQeOHzONXqZ2MVHZUnrmIoXeZ6zZXfmh9B6hm3znSw013dTv2vVg0kzj79othy7wozZbqo6a9neJrUUUGgxJxyLZcczBA5hHeMKCRaXCXg8H+2NuCSoPGRjclNz050xYC8g7CTL5XUVr+05taPg3m60h5UZyzh7ZeWrjLAMhxxY4bWJp1IWMFzSi2NxifWGw4Whws0ltl4OV+NaBDDw9KjK+ws12S0lN4IX5ERTtgEuzpqPwRicgBvS7gxW1nUi9xiD3uxjfJhw63A/6NjcoluKmJgy6IZRS1bdqjWRsvnQH8371criGMSfdPEVMzXkDJ7iNm0uYT0CYHSGkxIeefNaTFh4O8Umc9q00BjdmX69Hg5m3n5XapUwSKIymEnNk7IcYhTNfhcjuvn60GM5i9koNjNLfKQu5HU31cBGYJiRMRnoCs6ZnQ7Yk2dVW1D8qIcOht4cjod/XIfyAtODoRvqbKUop0S//gG+J95CntzxPX4C1or0m5eCf8f5UtZDrGlDMMFkoo9kDSpVSusESeEY/VE5nvBsH9yFRzodGUt5wgjZLkt0BgttNtzmjC+OoHCU6tyomSLiFC0IRWWjdGbOzJDDMV8ic1x1eruxzw942l5W29Vg1V3QZJ7s+aXPOj3CQaZVUdjkPG/u43pWjUHFasjq8RyJBhOpqD2MqcOFUXe6GlM5rI8J08DfwMlsnxoAfCHees2va14plIiM5yHFiuFmOCVUUhQyTPT2mV4ZQr8jEdJu3DznUg3cuhjG9AJdjHrO3DY13bbJurdd82Endemas1fxKh+CvW3aJmCIEqMnNL+1JWZAMdlQmIemkPNyINM57cGTYVjDJtnJA3XNzo1a3XgMhHlpqgTUZMLVKsaZgJMKc7bC1AkdCKugT7LLmBEGTJIrSsPbc2YVdCvGhziZibFJs5cbAm5YEQRxPlMHFTN3NVoTeK0/gnhMrJZ7NJvt/ObhtohrIcI8Rf0870KmhwouM+1OxgqjxjOmg2nu0DQZue7ZjDdgNs4uzjWmwVXryHi2mgJGDReuu9G4eSg60NhM+7naDOL+XqhddyD52WxdLuEp2kDE4tqaRmrZ8baKT0XNx0xajHfzqIbr1F81HxgNp7fSDSdkLfYCwsMNeKWrFDMhUQcY7W6XAbiZG5Mlh8o0MYUuyvYwRuuNLAphODiiGSdbcHA+MHRSl2BGyFb9jBGsAbW2YkD0cOABceFy5KSaZh0ND8JgokczPPfkToAHgWP7+AYfjCYZBMuOiGvGTMnKIqOSvrLEwO8ghD1tsSiN1/BC1QsBV+lEH3eYbO/66AJHFiup1AV96E7HZQc8tV0a6aaAu5RImfFs4auTqcYb1mjHqKvZGFBJuLDiYSFuMTFeFVvwfH1jRJRTe5tNmA0K9YTQoxaWx3d3/ZnqqazPTjiUlfdlLHkVB1miNQ4xKYMTgldsr0cNWJr1aqRDLcWu7bNcAMeMu82rzazRMpZlw8LEHjMvFz26WJdjfa0MtnRZw9PYDpGgJJoZu47cM0xxMJwzsO5NygRWM4Vd516C7r2BMjadaTTdRInbEPHcGisjehoMPMPvBFu5dExGcueBVLqL/aL5OTy+GfSL/UQXjFFR2xSHjgo4iWdhAETlEhVQat0hRmaodbe5sOo5zEZQOnoeq4yPlv0ukg88CLZ2PgmRWk6QMCvG21WNuRi/iqXZMKZWgyG+4Ye7SRfbAxHfQ/beGHAtegwl8nSR+AKEVCODm5DL5oG26+bSAq8Raq0OtiaPFYE7RLBsWJadCIIdtiOMvEYcitlkbOwUKC5micgtoVibd+RViSZkEno+5JshrhcNCXL9QLKYHFPTWTQgIWbKiRNjKKzhFJ9l7GQwV7xJvh4uAcMkvFR1tK1HMOMBlcCINdlO4nUZN/KXCK3M6IuzaS3LwT4k1KXSfAFZFwrQO5heiNtmARGyrDfDOoUkw6iUCwGSpj2GVZEO0ZcnmzJV2M06I0KqV0bNPFIixn1rHG/SkKtswBpQ3Vq6RmByaS3pIdbsPdHeDBNkvYSsgZH5peuJ23i8wLeG5Su521cMnoG9wPLX2Kbm5uPm+3bzpWIabI0Q3hGmIBRgZ0qkjmXMyrD2JsrElF3LYr9TCsKEkUdEf+D1xnrGeUD5Y8YTAuFsajXZKM2XdrU81UR2OJwOgdhhu7qBC9miGwTBrapxbm0DuXmWGoES1A3yKLv0cM70Y83jBY8YDN/g59M8PlCk/I01D/5W8wjvNA/Oj7gIBy9Q497icqcmUBrC/lDfNzZXkJmWKOiqtBe2ncTm9ovcZnRlWypKj6JGS2EtEdRoPpX40JwMd3HDYhZdnOTrSDeCaL/fE3jcCEPWgag9Tu3wOUKWgGvP1qsY7DqU2lIVGKDmKLC+Bi5H95ubMutaUgNNDgZTBskqDw74iuRTVWZmJdNIQZYVKX5hL+d13WE8tmPOAoVER526YT2smjUL6rAoUBQ4PCPFjCO6kb1ZMt1BhC6tAbiIybzY63q9oD+X7HmvlOlxmbh2qFZMTxI7up9DHVQhOhy5aZ4LGensHNOk7nC+mTuGGWAcKq4afhbbKRkOgfycV2ggk+zUz61km2CkMc+AXM7G5H7Nmpxac3Q4ZAxt5C0bRSNyO+o4dLp12GgYGDd3iwisEycEPtUZM0g0QuK8YZTN1xAbSO4KyHDj9rcHq3qnKjWw5NS+hTQ3Z5kcT0cSxjGNvK5ZwnZsaeUMhXIWD+Id23HLYdLr4/ze1ki2FuxAkBnBELrjOl4njrFuiGzncBpcTZs5lAJYOl1u3wnsOU1gkJNhU0zo7uxA08GTjFmZFMVKHw7BnR2N6vBIb2zXUKBkLKxFu2yQQRWdywaqVKizKbi54d4ZgbsMRxm2nOE+43bhXXcPlggbOc2dCjknkqqies2DbI28o5eujslGN0ImM0Aa9S7VhGBrgZnofErqQMroDdRLnN2MgZ65Q+IEyCenzyibacebhpRFl8MAA6pic6P+mJ5F0Hg9YQvTtpRccS0t6cQ8Xw/idCkpmaatckD9i2FEJaKp5CwSSvZOqmbjyjawkOI7+61PuoXDcpjVURC+F+bdmTozZYidJWwXkDSVMVA1yrlOr+tW63EsW4C+5xI2zSEcTXnwqDsl6yezXuZzOwKJSXkzdDoVUwXmFDa6poDvmACIjYr2t9u6+c5mrPEeGUycLjnuj227J3Qoh1XAVFiFVeVoV6sIvQ98KdzNBtDG2w/jgisHPlpZK49ZV4jBTyZEIRXgCUnNlj1AsjSuAcN8SGRVSXcX4BK8thervQcsVg8C1kNBUUsbX1NCsADg19o09kaS76OeGFxBmqA0fGy7Qg/VyY+DKtQNDb8gRNCzSZAPdKD8xhKEy8F7606CMKVHzDq3okT447MrkWIy1DlZ21LsdhpYHCVysUKPemqjdPFUIBg6y9W5KPqQpqrDYa4z+gaLOqbUG6miaq5VZkcZIqfkM2m4gKlJjI16o/S/W1bnyLk7jlGt69gDZjDTpi46r2wbcRe4VwtTgaNUDEJjtUvx1q7fKzFmVg+gDoJE44piIEhcsTXccMAdb1vbWd4omMqYASYdY0VUD+zVHEqcJBijaa/PToXK7KQiajC5Vsk2d7sFMMXhV6nK9KW9I2amCJkjaTlOO8XY7XbdQdibyVOGoT2UXXcl2q6BQsnsfNrj8xpPeeCkgkQyDqQJ0RjkgKO7GBfXUpUBkaFSvJLwScUAM27sgm3J0BTKShTDbzEVWY5pEd8q9S4Q1xMIY2QIXQVhNca2WdBFmQ3k0pjoMzxLbHmBKQmxG3BVILuxvrRkwFbmPp/uq3TrsY2KP8LXfcDUnZzdElYApAqT8WjW6866kMev8/7S5bYQTg7RMTpdEEgqV3V34Swpaxh0m8edM1tImJj1JpWVhT0BSrjidEVoKdpyOEfglblQGzOMtf0BjAj6LJ8SpdpYs7g8WTuSywdm0DEZ3WRHQSKWMIsMXIas1VAcyKuUC+HJpM+wMLslbRsrdN9WlnoyljN9kgWN4Txl3LJaDHE/qDAOq6fjMRuH46E5GLAm3GlWkO0lmbeDglFWxJ6Z2YEDhFvE2fCC4oky00d0EVMOxiqWss6VWrB4HqmrsrTrBaNOE7gU+9NQA6ZUSIf6ri/QM2BV9hoziA2SITFPi6nBDuGBpjLl0IKGk2Gfnq00dWVuUCspJsKtB9RRdxvTBJJVjV2tl883sI1axpILJslw3hOcPr0aS56CEPZ2mSsLtdAB2VgyNJvV400+XFHBZC5CMUHQMq+aFGms2JmOTqZDVKTkWwobYRswPzFSAOncanpCIo4Xo3K45Lgz82XoBiWeafnkDQkdJ/E95tH0y1r+2Vj0B8Kj35lF14/DKo2mXt86F7mDkm8jM5eKK2MbM0ABsGByN1nMYXiRhItwbkwjlx2ge2loIKNxV65WvBwlSsKKo0W665FdaD4dAYLr9BOSJpcejiL+vqRJtHInMOAlRoFqg5zC8X2sDShsuq7NjBFz1qsVkiFyhjO2A1fbEJNgNmBh7NZPTmJ+wHpsirEi0OFps2FRfaaH8X5wyz6XmEwH/pTFTXYeCDaTOgxdAzJg5hSj1z01tjLS3wsZ4wBb2R5j3YwvRXJs8rMAATERjEnWEbOfweGU4THBD+YDljUdSE2hvB8w/lqud8A4AROwksjYNh7uO1I1kd0SmciZmayWct3zekyIMBrGkSEc9eOAV9Ai8BtbXi0cuYggfY6N1BjZd+cjOAPe0wW0zemSt7FYy72yT1U6UGH3tdzh92old+1VxRqJL1aB4jGJOegKfEoYOU/vSNOZTlme6OhsrWxqjedgcmDkjJr6ntQIqH7k7vc5MfSZbUhKOyPaNDrteBaamj6elsya1LNZ3Q+7CFcQyoKcT/FeNNkNA0ogawsxu6tdDXj/khrJgeHuhWl/WZmWMwvVGjAqvsB4uVR3uSIja2gnu3ZA5hzg73NDWGtmV3OHcJVP1W5/DbFuF8BoYFrobJdxvZzAm9XQyhtezmlAvlZCw/IhGpNcaQvNgDHQW1gWh7oiGg7IOHcMFDQZZJeD2u8bnQKGEnhnQOOA5uUAbbhaDxplwTbf0Iy+H3pjrjcBhOGxPS3co3EEIlduhxDlmZ0muZ5BATodeewOQ/b5jNmIOZVwE20GHJWx16WB6AoSkg/o1Ixmxpq3umurMT8wUt8tp7s9BCVcEZu0xNqpZDVyWZQppqgZm+6MImOuznC07oyjYOb0jTtDiNrI6+lYJwqUz2fzsd6To0JpJJkHmLEyy5Lx1Mq6+MKp1guT6RsBeHpF3aDEXmBpyvLM3c5n1l5jFhAuKkYGEJqpK1cjDSuXcJWkdMZ1LRJ1pynU06p+I8uyBONsuWqU72IG++iAN0T5VgJvlQ1WaMSE6CujebDcdSI9F5Bc8xs+iNXCbh9Rm5LP3E0YEy5nQA5Rzspxb8oXXZSTZHtrDuFARuxVLjZrwPtdeNBzl+6eAQRDsRLtownum8XSIBNfnfVSkghm9iSDaGZJS8MJXYS8MliKEEL5qyLQZzKyAjHMuBbqha8xCWoDs0xe1mjD/wSXg7d5yndwfavLk3i/51U4r8LtApV0w1324/W8m/oROvC9ULVG7kDOcy2jxgsEkguqN+ujuqnQW98LsDTvj6ExOfacimQdoaa6LEF0sO5iJXglfmvYjKLtbZQ03RZWf6WXmY3N8nAEb4Nq1l27irnuE4Q546V5xysGyaAretIC2oTGar3XVJmy0HwTjvxAS8HuqfszZmh2HZQWnP1mnDUsvxirtunnctTszFqW2VL2eT6lQ7u56UbB6I07X5tiMB5w8KaYShGC1Jqtw31j0O9jhAnWOBA3a10bmVvgFU/qqzju8OeOO4w4Tow4iNAnMhw5owz/QBLMd5bh6WMZLqvNjgTuuz10kOHTmedEk4nOSz5hLXGGmEuhWerzjVUqfVOJ8DDCBJVgaTZaFKy6NvRg2imXzLAcDkfsMBpZ6UBRpl0J+FEsMZ2uN1GSbuadNMJMbpqs48bCAC45mt43kh1s1phP9gPPc6g5he16FFD+gTiYdU2oEYjsJFA0RjCUktrMJ3IxVJeM6bGD/RLoBBhP192KqfGZPy4XCm3y+K3wYTSKsUxuX/N9xrSYlOLImrUZ/pajNsytluARBU/0ojdCpAFK77R6wq5UlwHpCfk4sQPDV5ZiLx7JWtCXGR5YNUrPtToEsBbjjCg9T1jmfbfUhu7UwSfRmHdScOt5byn2OUUHTihGHW01dnEbCQAT78O8VwZzRiMY2+D8tBpVGh+wVtlTbZqC4RxGdIjKxFUyAs6puU7ZcSy6cxJiIkya1k6Bp7PC1smt6w43ZsNtxKnJa0GvYsfYCjiZllzc5S29nIixC0eJmkt7bCdrsm1BK3UiJtSWH6L7HcKhwVxix1Nkp/J2PVAygwC6F1XtivmiFtiqs2GwCT6ageBg6PHOhg1TiORTCPjCVpLsc3ipkyYU9lFLzmco59d+znXKlICXOYPWZjehGqMpmRZkD5K0HDGhAZbtB8TYMTdZhUF7nmIVMXHpAeStODtpzOieTE6mI0taOBYzoLIIt0f+EF2q8haiuGDfQ8R0wltG4AmW1uOygDeHCO2JMsHwWcdi6G5jAS46m1016NbMrb4mqckyy1QcEAuAlnWGy8inGxOU910xC/plv4KdQWgTt55EcZ0q3Wndw0RGhRAk3aU7hB46/toCkXGWtheBbelhru/WRN8NbXHMd7NwPqvn0qhKWU/uAz8f0ljZow3LW+vOpNdTN4iDjLeOvFzzW2aycccxFWxUHcQMuZ49ZWzKLmbbysuhyQAda8sxJRoDaSRU0goY1pg3JiwmiynZH3R8eIN19GpojcdjWhr1F5SlmUNlH0QU2kX6XD/aAi2w27NBdHa8ZVYlro7wAMPD3dIgio2TyFOlIduku2qAsmclrewmskw5Li9m8T5lqe1o2tMnxJiuFvAIKb14InUTHNpOst6KdIeutZmGiqixGdSoD6GBzvhFxuqw3U92WzlQCo4wO3a0k2h2j+jKluMjfAmtGY3d5/3plhGdXZd1NmhmzeL+drbpxnVsNeqpluqRWSllsfOnG2MGxHifS+bCTtsTu3i07QhqjNFcFeTYkK7GW3/ZQbucuTdqSmE3u1x2KChd4N5c0IFjW9d3877Fmpq03ouIKfdoajPp13tlGXhTaW5JXMxPXMgAnotJKkRrcuvNsF7iuAy5HXYzc7heuXjpTIGftWQLtjQ5CJPWRZZUtritQHgBY4qohKYDme3njByo4RQbsaXIl2koalE11FKDnc5K1Byzzq2XSVzmanRhA/rIsXmytEVv3qiw+ztpS36ksCZL89y2svfTTv/k4r9YMX7nLjjKJYVfTiANMsuNmsV/NpylhXUv5jH8zNmk0A38rCQKhm4OK/XY34HeIC+4PPDzaUvkR3L9fzB+z7ceerz1rgDeBw42+saqLnsbUXDuY9JDyBkX4IV4p+p2eAeZalA0NfSx5EOLZMRFHWHTZTHgJ9rudZvtOB2nq0SOkOiKMF0qa2FSO6IqCH0dav6TKn1urZuBxRSwLWRSVBviVhClhY+blBNTWNWnm43iTkSgzVKV5nkUvt+jGkg6xXGcxNidFkFMTLFDk/UsBqY4ZgHTHZKHNImxLKFSAyqwa6EIgBdiXawQqgaWZq0akkGNxwW9mffxNTdmRJIv40aBaubEh6RSG+PxbAjIY7vbN6SDIrtFnVeB2EOZJJYxrLuTSAnjJGouQTNMlYxo5SNKTDhJYmpjl7M785QqLT7MaptgEm4rTGcMyGuRgHpRaFUYM3RnRWhD1qGCVQwU0Wa/BdKwKy7yEqalKBytlyN9taXXXgf4Dsw0RAInhUxF761gfJEjK2yONXbBwF2nnNbpLUfaIp6LAY84GgXNR8tuDNx+jeI94jNfNhbpKr1V0wSs7BgCMkfjQWcJfDtTrUMHzASBe/ZyMp4B2pCNaXcbj4B0j9ReYsAQRtiBPPMFPJ8zNJUKMT4x+JnRzTWbWhK+wnY84PgJQq7aYyN0vmQoozNxt5CIIt1Ud3sbkHOjDMe9dJ5LrjzaDLNIDhe1WPK1QJeRNwCWwSZ3pDAC2sFckDEBZmciH8q0Q8JK0oHKQT/HeWNiLJJugC7RHawLqNePOMQrKMkOVLxbFWLAbqyFxchTvu9OUkmIgerJbOIZtJ+OdxPFXYJlYDbLJecqA43BZtusgkHemuNaaR+Hy3Q9XNn8tO+wSUUkvlNzW2aO7eDdUEBfz9Z5xBvPW8wGP/PpIzR1Q5DHPU8esckXWCR9czgz4ww88me49H955C+P/OWRLeWRz1qGgcyTx3/Y2XgkINQUVGc/XJMaXT3spa4HPvH/AQ==</diagram></mxfile>
|
2103.00673/main_diagram/main_diagram.pdf
ADDED
|
Binary file (34.3 kB). View file
|
|
|
2103.00673/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In the past decade, Convolutional Neural Networks (ConvNets) have achieved phenomenal success in many machine learning and computer vision applications [1–7]. Normalization is one of the most important components of modern network architectures [8]. Early normalization techniques, such as batch normalization (BatchNorm) [4], are cornerstones for effective training of models beyond a few layers. Since then, the values of normalization for optimization and learning is extensively studied, and many normalization techniques, such as layer normalization [9], instance normalization [10], and group normalization [11] are proposed. Many of such normalization techniques are based on estimating certain statistics of neuron inputs from training data. However, precise estimations of the statistics may not always be possible. For example, BatchNorm becomes ineffective when the batch size is small [12], or batch samples are statistically dependent [13].
|
| 4 |
+
|
| 5 |
+
Weight normalization [14] is a powerful alternative to BatchNorm that improves the conditioning of neural network training without the need to estimate statistics from neuron inputs. Weight normalization operates by either reparameterizing or regularizing the network weights so that all the weights have unit Euclidean
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup>The first two authors contributed to this work equally.
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
|
| 11 |
+
Figure 1: **Comparison between BatchNorm and ConvNorm** on activations of k = 1, . . . , C channels. Batch-Norm subtracts and multiplies the activations of each channel by computed scalars: mean µ and variance σ 2 , before a per-channel affine transform parameterized by learned parameters β and γ; ConvNorm performs per-channel convolution with precomputed kernel v to normalize the spectrum of the weight matrix for the convolution layer, following with a channel-wise convolution with learned kernel r as the affine transform.
|
| 12 |
+
|
| 13 |
+
norm. Since then, various forms of normalization for network weights are proposed and become critical for many tasks such as training Generative Adversarial Networks (GANs) [15] and obtaining robustness to input perturbations [16, 17]. One of the most popular forms of weight normalization is enforcing orthogonality, which has drawn attention from a diverse range of research topics. The idea is that weights in each layer should be orthogonal and energy-preserving. Orthogonality is argued to play a central role for training ultra-deep models [18–22], optimizing recurrent models [23–26], improving generalization [27], obtaining robustness [28, 29], learning disentangled features [30, 31], improving the quality of GANs [32, 33], learning low-dimensional embedding [34], etc.
|
| 14 |
+
|
| 15 |
+
**Exploiting convolution structures for normalization.** Our work is motivated by the pivotal role of weight normalization in deep learning. In the context of ConvNets, the network weights are multi-dimensional (e.g., 4-dimensional for a 2D ConvNet) convolutional kernels. A vast majority of existing literature [27, 28, 35–39] imposes orthogonal weight regularization for ConvNets by treating multi-dimensional convolutional kernels as 2D matrices (e.g., by flattening certain dimensions) and imposing orthogonality of the matrix. However, this choice ignores the translation-invariance properties of convolutional operators and, as shown in [22], does not guarantee energy preservation. On the other hand, these methods often involve dealing with matrix inversions that are computationally expensive for deep and highly overparameterized networks.
|
| 16 |
+
|
| 17 |
+
In contrast, in this work we introduce a new normalization method dedicated to ConvNets, which explicitly exploits translation-invariance properties of convolutional operators. Therefore, we term our method as *Convolutional Normalization* (ConvNorm). We normalize each output channel for each layer of ConvNets, similar to recent preconditioning methods for convolutional sparse coding [40]. The ConvNorm can be viewed as a reparameterization approach for the kernels, that actually it normalizes the weight of each channel to be tight frame.1 While extra mathematical hassles do exist in incorporating translation-invariance properties, and it turns out to be a blessing, rather than a curse, in terms of computation, as it allows us to carry out the inversion operation in our ConvNorm via fast Fourier transform (FFT) in the frequency domain, for which the computation complexity can be significantly reduced.
|
| 18 |
+
|
| 19 |
+
**Highlights of our method.** In summary, for ConvNets our approach enjoys several clear advantages over classical normalization methods [41–43], that we list below:
|
| 20 |
+
|
| 21 |
+
• **Easy to implement.** In contrast to weight regularization methods that often require hyperparameter tuning and heavy computation [41, 43], the ConvNorm has no parameter to tune and is efficient to compute.
|
| 22 |
+
|
| 23 |
+
<sup>1</sup>Tight frame can be viewed as a generalization of orthogonality for overcomplete matrices, which is also energy preserving.
|
| 24 |
+
|
| 25 |
+
Moreover, the ConvNorm can serve as a simple plug-and-play module that can be conveniently incorporated into training almost any ConvNets.
|
| 26 |
+
|
| 27 |
+
- Improving network robustness. Although the ConvNorm operates on each output channel separately, we show that it actually improves the overall layer-wise Lipschitzness of the ConvNets. Therefore, as demonstrated by our experiments, it has superior robustness performance against noise corruptions and adversarial attacks.
|
| 28 |
+
- Improving network training. We numerically demonstrate that the ConvNorm accelerates training on standard image datasets such as CIFAR [44] and ImageNet [45]. Inspired by the work [40,46], our high-level intuition is that the ConvNorm improves the optimization landscape that optimization algorithms converge faster to the desired solutions.
|
| 29 |
+
|
| 30 |
+
**Related work.** Besides our work, a few very recent work also exploits the translation-invariance for designing the normalization techniques of ConvNets. We summarize and explain the difference with our method below.
|
| 31 |
+
|
| 32 |
+
- The work [22,43] derived a similar notion of orthogonality for convolutional kernels, and adopted a penalty based method to enforce orthogonality for network weights. These penalty methods often require careful tuning of the strength of the penalty on a case-by-case basis. In contrast, our method is parameter-free and thus easier to use. Our method also shows better empirical performance in terms of robustness.
|
| 33 |
+
- Very recent work by [29] presented a method to enforce *strict* orthogonality of convolutional weights by using Cayley transform. Like our approach, a sub-step of their method utilizes the idea of performing the computation in the Fourier domain. However, as they normalize the whole unstructured weight matrix, computing expensive matrix inversion is inevitable, so that their running time and memory consumption is prohibitive for large networks.<sup>2</sup> In contrast, our method is "orthogonalizing" the weight of each channel instead of the whole layer, so that we can exploit the convolutional structure to avoid expensive matrix inversion with a much lower computational burden. In the meanwhile, we show that this channel-wise normalization can still improve layer-wise Lipschitz condition.
|
| 34 |
+
|
| 35 |
+
**Organizations.** The rest of our paper is organized as follows. In Section 2, we introduce the basic notations and provide a brief overview of ConvNets. In Section 3, we introduce the design of the proposed ConvNorm and discuss the key intuitions and advantages. In Section 4, we perform extensive experiments on various applications verifying the effectiveness of the proposed method. Finally, we conclude and point to some interesting future directions in Section 5. To streamline our presentation, some technical details are deferred to the Appendices. The code of implementing our ConvNorm can be found online:
|
| 36 |
+
|
| 37 |
+
https://github.com/shengliu66/ConvNorm.
|
| 38 |
+
|
| 39 |
+
# Method
|
| 40 |
+
|
| 41 |
+
**Review of deep networks.** A deep network is essentially a *nonlinear* mapping $f(\cdot): x \mapsto y$ , which can be modeled by a composition of a series of simple maps: $f(x) = f^{L-1} \circ \cdots \circ f^1 \circ f^0(x)$ , where every $f^{\ell}(\cdot)$ $(1 \le \ell \le L)$ is called one "layer". Each layer is composed of a linear transform, followed by a simple nonlinear activation function $\varphi(\cdot)$ .3 More precisely, a basic deep network of L layers can be defined recursively by interleaving linear and nonlinear activation layers as
|
| 42 |
+
|
| 43 |
+
$$\boldsymbol{z}^{\ell+1} = f^{\ell}(\boldsymbol{z}^{\ell}) = \varphi \circ \mathcal{A}^{\ell}(\boldsymbol{z}^{\ell}) \tag{1}$$
|
| 44 |
+
|
| 45 |
+
for $\ell=0,1,\ldots,L-1$ , with $\boldsymbol{z}_0=\boldsymbol{x}$ . Here $\mathcal{A}^\ell(\cdot)$ denotes the linear transform and will be described in detail soon. For convenience, let us use $\boldsymbol{\theta}$ to denote all network parameters in $\left\{\mathcal{A}^\ell(\cdot)\right\}_{\ell=0}^{L-1}$ . The goal of deep learning
|
| 46 |
+
|
| 47 |
+
<sup>&</sup>lt;sup>2</sup>In [29], the results are reported based on ResNet9, whereas our method can be easily added to larger networks, e.g. ResNet18 and ResNet50
|
| 48 |
+
|
| 49 |
+
$<sup>^3</sup>$ The nonlinearity could contain BatchNorm [4], pooling, dropout [47], and stride, etc.
|
| 50 |
+
|
| 51 |
+
is to fit the observation y with the output $f(x, \theta)$ for any sample x from a distribution $\mathcal{D}$ , by learning $\theta$ . This can be achieved by optimizing a certain loss function $\ell(\cdot)$ , i.e.,
|
| 52 |
+
|
| 53 |
+
$$\min_{\boldsymbol{\theta} \in \boldsymbol{\Theta}} L(\boldsymbol{\theta}; \left\{ \left(\boldsymbol{x}^i, \boldsymbol{y}^i\right) \right\}_{i=1}^m) \; := \; \frac{1}{m} \sum_{i=1}^m \ell\left(f(\boldsymbol{x}^i, \boldsymbol{\theta}), \boldsymbol{y}^i\right),$$
|
| 54 |
+
|
| 55 |
+
given a (large) training dataset $\{(x^i,y^i)\}_{i=1}^m$ . For example, for a typical classification task, the class label of a sample x is represented by a one-hot vector $y \in \mathbb{R}^k$ representing its membership in k classes. The loss can be chosen to be either the cross-entropy or $\ell_2$ -loss [48]. In the following, we use (x,y) to present one training sample.
|
| 56 |
+
|
| 57 |
+
An overview of ConvNets. The ConvNet [49] is a special deep network architecture, where each of its linear layer can be implemented much more efficiently via convolutions in comparison to fully connected networks [50]. Because of its efficiency and popularity in machine learning, for the rest of the paper, we focus on ConvNets. Suppose the input data x has C channels, represented as
|
| 58 |
+
|
| 59 |
+
$$\boldsymbol{x} = (\boldsymbol{x}_1, \boldsymbol{x}_2, \cdots, \boldsymbol{x}_C), \tag{2}$$
|
| 60 |
+
|
| 61 |
+
where for 1D signal $x_k \in \mathbb{R}^m$ denotes the kth channel feature of x.<sup>4</sup> For the $\ell$ th layer $(0 \le \ell \le L - 1)$ of ConvNets, the linear operator $\mathcal{A}^{\ell}(\cdot) : \mathbb{R}^{C_{\ell} \times m} \mapsto \mathbb{R}^{C_{\ell+1} \times m}$ in (1) is a convolution operation with $C_{\ell+1}$ output channels,
|
| 62 |
+
|
| 63 |
+
$$egin{align} m{z}^{\ell+1} &= \left(m{z}_1^{\ell+1}, m{z}_2^{\ell+1}, \cdots, m{z}_{C_{\ell+1}}^{\ell+1}
|
| 64 |
+
ight), \ \ m{z}_k^{\ell+1} &= \left(\sum_{j=1}^{C_\ell} m{a}_{kj}^\ell * m{z}_j^\ell
|
| 65 |
+
ight) \quad (1 \leq k \leq C_{\ell+1}), \ \end{array}$$
|
| 66 |
+
|
| 67 |
+
where \* denotes the convolution between two items that we will discuss below in more detail. Thus, for the $\ell$ th layer with $C_{\ell}$ input channels and $C_{\ell+1}$ output channels, we can organize the convolution kernels $\{a_{kj}\}$ as
|
| 68 |
+
|
| 69 |
+
$$A^\ell = egin{bmatrix} a^\ell_{11} & a^\ell_{12} & \cdots & a^\ell_{1C_\ell} \ a^\ell_{21} & a^\ell_{22} & \cdots & a^\ell_{2C_\ell} \ dots & dots & \ddots & dots \ a^\ell_{C_{\ell+1}1} & a^\ell_{C_{\ell+1}2} & \cdots & a^\ell_{C_{\ell+1}C_\ell} \end{bmatrix}.$$
|
| 70 |
+
|
| 71 |
+
**Convolution operators.** For the simplicity of presentation and analysis, we adopt *circular* convolution instead of linear convolution.<sup>5</sup> For 1D signal, given a kernel $a \in \mathbb{R}^n$ and an input signal $x \in \mathbb{R}^m$ (in many cases $m \gg n$ ), a circular convolution \* between a and x can be written in a simple matrix-vector product form via
|
| 72 |
+
|
| 73 |
+
$$y = a * x = C_a \cdot x,$$
|
| 74 |
+
|
| 75 |
+
where $C_a$ denotes a circulant matrix of (zero-padded) $a_a$
|
| 76 |
+
|
| 77 |
+
$$C_{a} := \begin{bmatrix} s_0[a] & s_1[a] & \cdots & s_{m-1}[a] \end{bmatrix},$$
|
| 78 |
+
|
| 79 |
+
which is the concatenation of all cyclic shifts $s_k[a]$ $(0 \le k \le m-1)$ of length k of the (zero-padded) vector a. Since $C_a$ can be decomposed via the discrete Fourier transform (DFT) matrix F:
|
| 80 |
+
|
| 81 |
+
<sup>&</sup>lt;sup>4</sup>If the data is 2D, we can assume $x \in \mathbb{R}^{m_1 \times m_2}$ . For simplicity, we present our idea based on 1D signal.
|
| 82 |
+
|
| 83 |
+
<sup>&</sup>lt;sup>5</sup>Although there are slight differences between linear and circulant convolutions on the boundaries, actually any linear convolution can be *reduced* to circular convolution simply via zero-padding.
|
| 84 |
+
|
| 85 |
+
$$C_{a} = F^* \operatorname{diag}(\widehat{a}) F, \quad \widehat{a} = Fa,$$
|
| 86 |
+
(3)
|
| 87 |
+
|
| 88 |
+
where $\hat{a}$ denotes the Fourier transform of a vector a. The computation of a \* x can be carried out efficiently via fast Fourier transform (FFT) in the frequency domain. We refer the readers to the appendix for more technical details.
|
| 89 |
+
|
| 90 |
+
In the following, we introduce the proposed ConvNorm, that can fully exploit benign convolution structures of ConvNets. It can be efficiently implemented in the frequency domain, and reduce the layer-wise Lipschitz constant. First of all, we build intuitions of the new design from the simplest setting. From this, we show how to expand the idea to practical ConvNets and discuss its advantages for training and robustness.
|
| 91 |
+
|
| 92 |
+
Let us build some intuitions by zooming into one layer of ConvNets with both input and output being *single-channel*,
|
| 93 |
+
|
| 94 |
+
$$\boldsymbol{z}_{out} = \mathcal{A}_L(\boldsymbol{z}) = \boldsymbol{a} * \boldsymbol{z}_{in}, \tag{4}$$
|
| 95 |
+
|
| 96 |
+
where $z_{in}$ is the input signal, a is a single kernel, and $z_{out}$ denotes the output before the nonlinear activation. The form (4) is closely related to recent work on blind deconvolution [46]. More specifically, the work showed that normalizing the output $z_{out}$ via *preconditioning* eliminates bad local minimizers and dramatically improves the optimization landscapes for learning the kernel a. The basic idea is to multiply a preconditioning matrix which approximates the following form<sup>6</sup>
|
| 97 |
+
|
| 98 |
+
$$P = \left( C_{\boldsymbol{a}} C_{\boldsymbol{a}}^{\top} \right)^{-1/2}. \tag{5}$$
|
| 99 |
+
|
| 100 |
+
As we observe
|
| 101 |
+
|
| 102 |
+
$$\widetilde{oldsymbol{z}}_{out} \ = \ oldsymbol{P} oldsymbol{z}_{out} \ = \ \underbrace{ig(oldsymbol{C_a}oldsymbol{C_a}^ opig)^{-1/2}oldsymbol{C_a}}_{oldsymbol{Q(a)}} \cdot oldsymbol{z}_{in},$$
|
| 103 |
+
|
| 104 |
+
the ConvNorm is essentially *reparametrizing* the circulant matrix $C_a$ of the kernel a to an *orthogonal* circulant matrix $Q(a) = \left(C_a C_a^\top\right)^{-1/2} C_a$ , with $QQ^\top = I$ . Thus, the ConvNorm is improving the conditioning of the *vanilla* problem and reducing the Lipschitz constant of the operator $\mathcal{A}_L(\cdot)$ in (4). On the other hand, the benefits of this normalization can also be observed in the frequency domain. Based on (3), we have $P = F^* \operatorname{diag}(v) F = C_v$ with $v = F^{-1} \left( |\widehat{a}|^{\odot -1} \right)$ . Thus, we also have
|
| 105 |
+
|
| 106 |
+
$$oldsymbol{Q}(oldsymbol{a}) = oldsymbol{C_v} \cdot oldsymbol{C_a} \ = \ oldsymbol{C_{v*a}} = oldsymbol{F}^* \operatorname{diag}(\widehat{oldsymbol{g}}(oldsymbol{a})) oldsymbol{F}, \quad \widehat{oldsymbol{g}}(oldsymbol{a}) \ = \ \widehat{oldsymbol{a}} \odot |\widehat{oldsymbol{a}}|^{\odot - 1} \,,$$
|
| 107 |
+
|
| 108 |
+
with $\odot$ denoting entrywise operation and $g = F^{-1}\left(\widehat{a} \odot |\widehat{a}|^{\odot - 1}\right)$ . Thus, we can see that:
|
| 109 |
+
|
| 110 |
+
- Although the reparameterization involves matrix inversion, which is typically expensive to compute, for convolution it can actually be much more efficiently implemented in the frequency domain via FFT, reducing the complexity from $O(n^3)$ to $O(n \log n)$ .
|
| 111 |
+
- The reparametrized kernel g is effectively an *all-pass* filter with flat normalized spectrum $\hat{a} \odot |\hat{a}|^{\odot -1}$ . From an information theory perspective, this implies that it can better preserve (in particular, high-frequency) information of the input feature from the previous layer.
|
| 112 |
+
|
| 113 |
+
<sup>&</sup>lt;sup>6</sup>In the work [46], they cook up a matrix by using output samples $\tilde{P} = \left(\frac{C}{m} \sum_{i=1}^{m} C_{z_{out}^i} (C_{z_{out}^i})^{\top}\right)^{-1/2}$ . When the input samples $z_{in}^i$ are i.i.d. zero mean, it can be showed that $\tilde{P} \approx P$ for large m. For ConvNets, we can just use the learned kernel a for cooking up P. <sup>7</sup>An all-pass filter is a signal processing filter that passes all frequencies equally in gain, but can change the phase relationship among various frequencies.
|
| 114 |
+
|
| 115 |
+
So far, we only considered one layer ConvNets with single-channel input and output. However, recall from Section 2, modern deep ConvNets are usually designed with many layers; each typical layer is constructed with a linear transformation with *multiple* input and output channels, followed by strides, normalization, and nonlinear activation. Extension of the normalization approach in Section 3.1 from one layer to multiple layers is easy, which can be done by applying the same normalization repetitively for all the layers. However, generalizing our method from a single channel to multiple channels is not obvious, that we discuss below.
|
| 116 |
+
|
| 117 |
+
In [40], the work introduced a preconditioning method for normalizing multiple kernels in convolutional sparse coding. In the following, we show that such an idea can be adapted to normalize each output channel, reduce the Lipschitz constant of the weight matrix in each layer, and improve training and network robustness. Let us consider any layer $\ell$ ( $1 \le \ell \le L$ ) within a *vanilla* ConvNet using 1-stride, and take one channel (e.g., k-th channel) of that layer as an example. For simplicity of presentation, we hide the layer number $\ell$ . Given $z_{k,out} = \sum_{j=1}^{C_I} a_{kj} * z_{j,in}$ , the k-th output channel can be written as
|
| 118 |
+
|
| 119 |
+
$$egin{aligned} oldsymbol{z}_{k,out} &= \underbrace{egin{bmatrix} oldsymbol{C}_{oldsymbol{a}_{k1}} & oldsymbol{C}_{oldsymbol{a}_{k2}} & \cdots & oldsymbol{C}_{oldsymbol{a}_{kC_I}} \end{bmatrix}}_{oldsymbol{A}_k} \cdot \underbrace{begin{bmatrix} oldsymbol{z}_{1,in} \ oldsymbol{z}_{2,in} \ oldsymbol{z}_{in} \end{bmatrix}}_{oldsymbol{z}_{in}}, \end{aligned}$$
|
| 120 |
+
|
| 121 |
+
with $C_I$ and $C_O$ being the numbers of input and output channels, respectively. For each channel $k=1,\cdots,C_O$ , we normalize the output by
|
| 122 |
+
|
| 123 |
+
$$P_k = \left(\sum_{j=1}^{C_I} C_{\boldsymbol{a}_{kj}} C_{\boldsymbol{a}_{kj}}^{\top}\right)^{-1/2} = \left(\boldsymbol{A}_k \boldsymbol{A}_k^{\top}\right)^{-1/2},$$
|
| 124 |
+
(6)
|
| 125 |
+
|
| 126 |
+
so that
|
| 127 |
+
|
| 128 |
+
$$\widetilde{\boldsymbol{z}}_{k,out} = \boldsymbol{P}_k \boldsymbol{z}_{k,out} = \underbrace{\left(\boldsymbol{A}_k \boldsymbol{A}_k^{\top}\right)^{-1/2} \boldsymbol{A}_k}_{\boldsymbol{Q}_k(\boldsymbol{A}_k)} \cdot \boldsymbol{z}_{in}.$$
|
| 129 |
+
(7)
|
| 130 |
+
|
| 131 |
+
Thus, we can see the ConvNorm is essentially a reparameterization of the kernels $\{a_{kj}\}_{j=1}^{C_I}$ for the k-th channel. Similar to Section 3.1, the operation can be rewritten in the form of convolutions
|
| 132 |
+
|
| 133 |
+
$$oldsymbol{Q}_k(oldsymbol{A}_k) \ = \ oldsymbol{P}_koldsymbol{A}_k \ = \ egin{bmatrix} oldsymbol{C}_{oldsymbol{v}_k*oldsymbol{a}_{k1}} & \cdots & oldsymbol{C}_{oldsymbol{v}_k*oldsymbol{a}_{kC_I}} \end{bmatrix}$$
|
| 134 |
+
|
| 135 |
+
with
|
| 136 |
+
$$P_k = C_{v_k}$$
|
| 137 |
+
and $v_k = F^{-1} \left(\sum_{i=1}^{C_I} |\widehat{a}_{ki}|^{\odot 2}\right)^{\odot -1/2}$ ; it can be efficiently implemented via FFT. Here, as for multiple kernels the matrix $A_k$ is overcomplete (i.e., $A_k$ is a wide rectangular matrix), we cannot
|
| 138 |
+
|
| 139 |
+
Here, as for multiple kernels the matrix $A_k$ is overcomplete (i.e., $A_k$ is a wide rectangular matrix), we cannot normalize the channel-wise weight matrix $A_k$ to exact orthogonal. However, it can be normalized to tight frame with $Q_kQ_k^{\top} = I$ . This further implies that we can normalize the spectral norm $\|Q_k\|$ of the weight matrix $Q_k$ in each channel to unity (see Figure 2 (Left)).
|
| 140 |
+
|
| 141 |
+
Combining the operation for all the channels, the ConvNorm for each layer overall can be summarized as follows:
|
| 142 |
+
|
| 143 |
+
$$\widetilde{\boldsymbol{z}}_{out} = \begin{bmatrix} \boldsymbol{P}_1 \boldsymbol{z}_{1,out} \\ \vdots \\ \boldsymbol{P}_{C_O} \boldsymbol{z}_{C_O,out} \end{bmatrix} = \underbrace{\begin{bmatrix} \boldsymbol{Q}_1 \\ \vdots \\ \boldsymbol{Q}_{C_O} \end{bmatrix}}_{\boldsymbol{Q}} \boldsymbol{z}_{in},$$
|
| 144 |
+
(8)
|
| 145 |
+
|
| 146 |
+
that we normalize each output channel k by different matrix $P_k$ . The proposed ConvNorm has several advantages that we discuss below.
|
| 147 |
+
|
| 148 |
+
**Proposition 3.1** The spectral norm of Q introduced in (8) can be bounded by
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+
Figure 2: Condition number for each channel (averaged over all channels) (Left), and spectral norm for each layer (Right) on ResNet18 except for skip connection layers. ConvNorm normalizes the channel-wise condition number to 1 and reduces the layer-wise spectral norm. We use the method in [51] to calculate the singular values of the weight matrix.
|
| 153 |
+
|
| 154 |
+
$$\|Q\| \le \sqrt{\sum_{k=1}^{C_O} \|Q_k\|^2},$$
|
| 155 |
+
|
| 156 |
+
that spectral norm of $m{Q}$ is bounded by the spectral norms of all the weights $\{m{Q}_k\}_{k=1}^{C_O}$
|
| 157 |
+
|
| 158 |
+
**Proof** We defer the proof to the Appendix A.3.
|
| 159 |
+
|
| 160 |
+
- Efficient implementations. There are many existing results [29, 39, 41] trying to normalize the whole layerwise weight matrix. For ConvNets, as the matrix is neither circulant nor block circulant, computing its inversion is often computationally prohibitive. Here, for each layer, we only normalize the weight matrix of the individual output channel. Thus similar to Section 3.1, the inversion in (6) can be much more efficiently computed via FFT by exploiting the benign convolutional structure.
|
| 161 |
+
- Improving layer-wise Lipschitzness. As we can see from Proposition 3.1, although ConvNorm only normalized the spectral norm of each channel, it can actually reduce the spectral norm of the whole weight matrix, improving the Lipschitzness of each layer; see Figure 2 (Right) for a numerical demonstration on ResNet18. As extensively investigated [28,29,42], improving the Lipschitzness of the weights for ConvNets will lead to enhanced robustness against data corruptions, for which we will demonstrate on the proposed ConvNorm in Section 4.1.
|
| 162 |
+
- Easier training and better generalization. For deconvolution and convolutional sparse coding problems, the work [40,46] showed that ConvNorm could dramatically improve the corresponding nonconvex optimization landscapes. On the other hand, from an algorithmic unrolling perspective for neural network design [52,53], the ConvNorm is analogous to the preconditioned or conjugate gradient methods [54] which often substantially boost algorithmic convergence. Therefore, we conjecture that the ConvNorm also leads to better optimization landscapes for training ConvNets, that they can be optimized faster to better solution qualities of generalization. We empirically show this in Section 4.2.
|
| 163 |
+
|
| 164 |
+
To achieve the full performance and efficiency potentials of the proposed ConvNorm, we discuss some essential implementation details in the following.
|
| 165 |
+
|
| 166 |
+
**Efficient back-propagation.** For ConvNorm, as the normalization matrix in (6) is constructed from the learned kernels, it complicates the computation of the gradient in back-propagation when training the network. Fortunately, we observe that treating the normalization matrices {Pk} as constants during back-propagation usually does *not* affect the training and generalization performances, so that the computational complexity in training is not increased. We noticed that such a technique has also been recently considered in [55] for self-supervised learning, which is termed as *stop-gradient*.
|
| 167 |
+
|
| 168 |
+
**Learnable affine tranform.** For each channel, we include an (optional) *affine transform* after the normalization P<sup>k</sup> · zk,out = Cv<sup>k</sup> · zk,out = v<sup>k</sup> ∗ zk,out in (7) as follows:
|
| 169 |
+
|
| 170 |
+
$$\overline{\boldsymbol{z}}_k = \boldsymbol{r}_k * \widetilde{\boldsymbol{z}}_{k,out} = \boldsymbol{r}_k * \boldsymbol{v}_k * \boldsymbol{z}_{k,out},$$
|
| 171 |
+
|
| 172 |
+
where the extra convolutional kernel r<sup>k</sup> is learned along with the original model parameters. The idea of including this affine transform is analogous to including a learnable rescaling in BatchNorm, which can be considered as an "undo" operation to make sure the identity transform can be represented [4]. The difference between our affine transform and BatchNorm is that we apply channel-wise convolutions instead of simple rescaling (see Figure 1). Note that when r<sup>k</sup> is an inverse kernel of v<sup>k</sup> (i.e., rk∗v<sup>k</sup> = 1), the overall transformation becomes an identity. The effectiveness of affine transform is demonstrated in the ablation study in Appendix C.4.
|
| 173 |
+
|
| 174 |
+
**Dealing with stride and 2D convolution.** There are extra technicalities that we briefly discuss below. For more details, we refer the readers to Appendix B.
|
| 175 |
+
|
| 176 |
+
- *Extension to 2D convolution.* Although we introduced the ConvNorm based on 1D convolution for the simplicity of presentations, it should be noted that our approach can be easily extended to the 2D case via 2D FFT.
|
| 177 |
+
- *Dealing with stride.* Strided convolutions are universal in modern ConvNet architectures such as the ResNet [5], which can be viewed as downsampling after unstrided convolution. To deal with stride for our ConvNorm, we first perform an unstrided convolution, normalizing the activations using ConvNorm and then downsampling the normalized activations. In comparison, the method proposed in [29] is incompatible with strided convolutions.
|
2106.11539/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2107.12309/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2107.12309/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
A scene graph is a structural representation that summaries objects of interest as nodes and their relationships as edges [@johnson2015image; @krishna2017visual]. Recently, scene graphs have been successfully applied in different vision tasks, such as image retrieval [@johnson2015image; @schuster2015generating], object detection, semantic segmentation, human-object interaction [@gkioxari2018detecting], image synthesis [@johnson2018image; @ashual2019specifying], and high-level vision-language tasks like image captioning [@gao2018image; @yang2019auto] or visual question answering (VQA) [@johnson2017inferring]. It is treated as a promising approach towards holistic scene understanding and a bridge connecting the large gap between vision and natural language domains. Therefore, the task of scene graph generation has caught increasing attention in communities.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:tease" data-latex-placement="t!">
|
| 6 |
+
<div class="center">
|
| 7 |
+
<embed src="./figures/teaser.pdf" style="width:99.0%" />
|
| 8 |
+
</div>
|
| 9 |
+
<figcaption>The difference between scene graph generation from image and video. In the video, the person is watching TV and drinking water from the bottle. Dynamic Scene graph generation can utilize both spatial context and temporal dependencies (3rd row) compared with image-based scene graph generation (2nd row). Nodes in different colors denote objects (<code>person,bottle,tv</code>) in the frames.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
While the great progress made in scene graph generation from a single image (static scene graph generation), the task of scene graph generation from a video (dynamic scene graph generation) is new and more challenging. The most popular approach of static scene graph generation is built upon an object detector that generates object proposals, and then infers their relationship types as well as their object classes. However, objects are not sure to be consistent in each frame of the video sequence and the relationships between any two objects may vary because of their motions, which is characterized by *dynamic*. In this case, temporal dependencies play a role, and thus, the static scene graph generation methods are not directly applicable to dynamic scene graph generation, which has been fully discussed in [@ji2020action] and verified by the experimental results analyzed in Sec. [4](#sec:exp){reference-type="ref" reference="sec:exp"}. Fig. [1](#fig:tease){reference-type="ref" reference="fig:tease"} showcases the difference between scene graph generation from image and video.
|
| 13 |
+
|
| 14 |
+
Action recognition is an alternative to detect the dynamic relationships between objects. However, actions and activities are typically regarded as monolithic events that occur in videos in action recognition [@caba2015activitynet; @kay2017kinetics; @sigurdsson2016hollywood; @li2021pose]. It has been studied in Cognitive Science and Neuroscience that people perceive an ongoing activity by segmenting them into consistent groups and encoding into a hierarchical part structure [@kurby2008segmentation]. Let's take the activity \"drinking water\" as an example, as shown in Fig. [1](#fig:tease){reference-type="ref" reference="fig:tease"}. The person starts this activity by holding the bottle in front of her, and then holds it up and takes water. More complex, the person is looking at the television at the same time. Decomposition of this activity is useful for understanding how it happens and what is going on. Associating with the scene graph, it is possible to predict what will happen: after the person picks up the bottle in front of her, we can predict that the person is likely to drink water from it. Representing temporal events with structured representations, dynamic scene graph, could lead to more accurate and grounded action understanding. However, most of the existing methods for action recognition are not able to decompose the activity in this way.
|
| 15 |
+
|
| 16 |
+
In this paper, we explore how to generate a dynamic scene graph from sequences effectively. The main **contributions** are summarized as: (1) We propose a novel framework, Spatial-Temporal Transformer (STTran), which encodes the spatial context within single frames and decodes visual relationship representations with temporal dependencies across frames. (2) Distinct from the majority of related works, multi-label classification is applied in relationship prediction and a new strategy to generate a dynamic scene graph with confident predictions is introduced. (3) With several experiments, we verify that temporal dependencies have a positive effect on relationship prediction and our model improves performance by understanding it. STTran achieves state-of-the-art results on Action Genome [@ji2020action].
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
A dynamic scene graph $G_{dyn}(\mathcal{V}_{t},\mathcal{E}_{t})$ can be modeled as a static scene graph $G_{stat}(\mathcal{V},\mathcal{E})$ with an extra index $t$ representing the relations over time as an extra temporal axis. Inspired by the transformer characteristics: (1) the architecture is permutation-invariant, and (2) the sequence is compatible with positional encoding, we introduce a novel model, Spatial-Temporal Transformer (**STTran**), in order to utilize the spatial-temporal context along videos (see Fig. [2](#fig:model){reference-type="ref" reference="fig:model"}).
|
| 21 |
+
|
| 22 |
+
<figure id="fig:model" data-latex-placement="ht!">
|
| 23 |
+
<embed src="figures/framework3.pdf" style="width:99.0%" />
|
| 24 |
+
<figcaption>Overview of our method: the object detection backbone proposes object regions in RGB video frames and the relationship feature vectors are pre-processed (Sec. <a href="#Sec:representation" data-reference-type="ref" data-reference="Sec:representation">3.2</a>). The encoder of the proposed Spatial-Temporal Transformer (Sec. <a href="#Sec:sttran" data-reference-type="ref" data-reference="Sec:sttran">3.3</a>) first extracts the spatial context within single frames. The relation representations refined by encoder stacks from different frames are combined and added to learned frame encodings. The decoder layers capture temporal dependencies and relationships are predicted with linear classifiers for different relation type (such as <span class="math inline"><em>a</em><em>t</em><em>t</em><em>e</em><em>n</em><em>t</em><em>i</em><em>o</em><em>n</em></span>, <span class="math inline"><em>s</em><em>p</em><em>a</em><em>t</em><em>i</em><em>a</em><em>l</em></span>, <span class="math inline"><em>c</em><em>o</em><em>n</em><em>t</em><em>a</em><em>c</em><em>t</em></span>). <span class="math inline">⊕</span> indicates element-wise addition while FFN stands for feed-forward network.</figcaption>
|
| 25 |
+
</figure>
|
| 26 |
+
|
| 27 |
+
First, we take a brief review on the transformer structure. The transformer is proposed by Vaswani [@vaswani2017attention] and consists of a stack of multi-head dot-product attention based transformer refining layers. In each layer, the input $\bm{X} \in \mathbb{R}^{N\times D}$ that has $N$ entries of $D$ dimensions, is transformed into queries ($\bm{Q} = \bm{X}\bm{W}_Q$, $\bm{W}_Q \in \mathbb{R}^{D\times D_q}$), keys ($\bm{K}= \bm{X}\bm{W}_K$, $\bm{W}_K \in \mathbb{R}^{D\times D_k}$) and values ($\bm{V} = \bm{X}\bm{W}_V$, $\bm{W}_V \in \mathbb{R}^{D\times D_v}$) though linear transformations. Note that $D_q$, $D_k$ and $D_v$ are the same in the implementation normally. Each entry is refined with other entries through dot-product attention defined by: $$\begin{equation}
|
| 28 |
+
\label{eq:attention}
|
| 29 |
+
\centering
|
| 30 |
+
\begin{aligned}
|
| 31 |
+
Attention(\bm{Q},\bm{K},\bm{V}) = Softmax\left(\frac{\bm{Q}\bm{K}^T}{\sqrt{D_k}}\right) \bm{V},
|
| 32 |
+
\end{aligned}
|
| 33 |
+
%\label{eq:attention}
|
| 34 |
+
\end{equation}$$ To improve the performance of the attention layer, multi-head attention is applied which is defined as : $$\begin{equation}
|
| 35 |
+
\centering
|
| 36 |
+
\begin{aligned}
|
| 37 |
+
& MultiHead(\bm{Q},\bm{K},\bm{V}) = Concat(h_1,\dots,h_h)\bm{W}_O,\\
|
| 38 |
+
& h_i = Attention(\bm{X}\bm{W}_{Q_i},\bm{X}\bm{W}_{K_i},\bm{X}\bm{W}_{V_i}).
|
| 39 |
+
\end{aligned}
|
| 40 |
+
\label{eq:multiheawd_1}
|
| 41 |
+
\end{equation}$$ A complete self-attention layer contains the above self-attention module followed by a normalization layer with residual connection and a feed-forward layer, which is also followed by a normalization layer with residual connection. For simplicity, we denote such a self-attention layer as $Att(.)$. In this work, we design a Spatio-Temporal Transformer based on $Att(.)$ to explore the spatial context, which works on a single frame, and temporal dependencies that work on sequence, respectively.
|
| 42 |
+
|
| 43 |
+
We employ Faster R-CNN [@ren2015faster] as our backbone. For the frame $I_t$ at time step $t$ in a given video with $T$ frames $V=[I_1,I_2,\dots,I_T]$, the detector provides visual features $\lbrace\bm{v}^1_t,\dots,\bm{v}^{N(t)}_t\rbrace_\in\mathbb{R}^{2048}$, bounding boxes $\lbrace\bm{b}^1_t,\dots,\bm{b}^{N(t)}_t\rbrace$ and object category distribution $\lbrace\bm{d}^1_t,\dots,\bm{d}^{N(t)}\rbrace$ of object proposals where $N(t)$ indicates the number of object proposals in the frame. Between the $N(t)$ object proposals there is a set of relationships $R_t=\lbrace r^1_t,r^2_t,\dots,r^{K(t)}_t\rbrace$. The representation vector $\bm{x}^k_t$ of the relation $r^k_t$ between the $i$-th and $j$-th object proposals contains visual appearances, spatial information and semantic embeddings, which can be formulated as: $$\begin{equation}
|
| 44 |
+
\bm{x}^k_t = \left< \bm{W}_{s}\bm{v}^i_t,\bm{W}_{o}\bm{v}^j_t,\bm{W}_{u}\varphi(\bm{u}^{ij}_t\oplus f_{box}(\bm{b}^i_t,\bm{b}^j_t))),\bm{s}^i_t,\bm{s}^j_t \right>
|
| 45 |
+
\label{Eq:rel_features}
|
| 46 |
+
\end{equation}$$ where $\left<,\right>$ is concatenation operation, $\varphi$ is flattening operation and $\oplus$ is element-wise addition. $\bm{W}_s$, $\bm{W}_o\in\mathbb{R}^{2048\times512}$ and $\bm{W}_u\in\mathbb{R}^{12544\times512}$ represent the linear matrices for dimension compression. $\bm{u}^{ij}_t\in\mathbb{R}^{256\times7\times7}$ indicates the feature map of the union box computed by RoIAlign [@he2017mask] while $f_{box}$ is the function transforming the bounding boxes of subject and object to an entire feature with the same shape as $\bm{u}^{ij}_t$. The semantic embedding vectors $\bm{s}^i_t$, $\bm{s}^j_t\in\mathbb{R}^{200}$ are determined by the object categories of subject and object. The relationship representations exchange spatial and temporal information in Spatial-Temporal Transformer.
|
| 47 |
+
|
| 48 |
+
The Spatio-Temporal Transformer maintains the original encoder-decoder architecture [@vaswani2017attention]. The difference is, the encoder and decoder are delegated the more concrete tasks.
|
| 49 |
+
|
| 50 |
+
concentrates on the spatial context within a frame whose input is a single $\bm{X}_t=\lbrace \bm{x}^1_t,\bm{x}^2_t,\dots,\bm{x}^{K(t)}_t\rbrace$. The queries $\bm{Q}$, keys $\bm{K}$ and values $\bm{V}$ share the same input and the output of the $n$-th encoder layer is presented as: $$\begin{equation}
|
| 51 |
+
\bm{X}^{(n)}_t = Att_{enc.}(\bm{Q}=\bm{K}=\bm{V}=\bm{X}^{(n-1)}_t)
|
| 52 |
+
\label{Eq:enc_formula}
|
| 53 |
+
\end{equation}$$ The encoder consists of N identical $Att_{enc.}$ layers that are stacked sequentially. The input of the $(n)$-th layer is the output of the $(n-1)$-th layer. For simplicity, we remove the superscript $n$ in the following discussion. Unlike the majority of transformer methods, no additional position encoding is integrated into the inputs since the relationships within a frame are intuitively parallel. Having said that, the spatial information hiding in the relation representations (see Eq. [\[Eq:rel_features\]](#Eq:rel_features){reference-type="ref" reference="Eq:rel_features"}) plays a crucial role in the self-attention mechanism. The final output of the encoder stacks is sent to the Temporal Decoder.
|
| 54 |
+
|
| 55 |
+
is introduced for the temporal decoder. Without convolution and recurrence, the knowledge of sequence order such as positional encoding must be embedded in the input for the transformer. In contrast to the word position in [@vaswani2017attention] or the pixel position in [@carion2020end], we customize the frame encodings to inject the temporal position in the relationship representations. The frame encodings $\bm{E}_{f}$ are constructed with learned embedding parameters, since the amount of the embedding vectors depending on the window size $\eta$ in the Temporal Decoder is fixed and relative short: $\bm{E}_{f} = [\bm{e}_{1},\dots,\bm{e}_{\eta}],$ where $\bm{e}_{1},\dots,\bm{e}_{\eta}\in\mathbb{R}^{1936}$ are the learned vectors with the same length as $\bm{x}^k_t$.
|
| 56 |
+
|
| 57 |
+
The widely used sinusoidal encoding method is also analyzed (see Table [\[tab:ablation\]](#tab:ablation){reference-type="ref" reference="tab:ablation"}). We adopt the learned encoding method because of its overall better performance. The window size $\eta$ is fixed and therefore the video length does not affect the length of frame encodings.
|
| 58 |
+
|
| 59 |
+
captures the temporal dependencies between frames. Not only the amount of calculation required and the memory consumption increase greatly, but also useful information is easily overwhelmed by a large number of irrelevant representations. In this work, we adopt a sliding window to batch the frames so that the message is passed between the adjacent frames in order to avoid interference with distant frames.
|
| 60 |
+
|
| 61 |
+
Different from [@vaswani2017attention], the self-attention layer of our temporal decoder is identical to the spatial encoder $Att_{enc.}()$, the masked multi-head self-attention layers are removed. A sliding window of size $\eta$ runs over the sequence of spatial contextualized representations $[\bm{X}_1,\dots,\bm{X}_T]$ and the $i$-th generated input batch is presented as: $$\begin{equation}
|
| 62 |
+
\bm{Z}_{i} = [\bm{X}_{i},\dots,\bm{X}_{i+\eta-1}], i\in\lbrace1,\dots,T-\eta+1 \rbrace
|
| 63 |
+
\label{Eq:batch_input}
|
| 64 |
+
\end{equation}$$ where the window size $\eta\leq T$ and $T$ is the video length. The decoder consists of $N$ stacked identical self-attention layer $Att_{dec}()$ similar as the encoder structure. Considering the first layer: $$\begin{equation}
|
| 65 |
+
\centering
|
| 66 |
+
\begin{aligned}
|
| 67 |
+
& \bm{Q} = \bm{K} = \bm{Z}_i + \bm{E}_{f},\\
|
| 68 |
+
& \bm{V} = \bm{Z}_i, \\
|
| 69 |
+
&\hat{\bm{Z}}_i = Att_{dec.}(\bm{Q},\bm{K},\bm{V}).
|
| 70 |
+
\end{aligned}
|
| 71 |
+
\label{eq:multiheawd}
|
| 72 |
+
\end{equation}$$ Regarding the first line in Eq. [\[eq:multiheawd\]](#eq:multiheawd){reference-type="ref" reference="eq:multiheawd"}, same encoding is added to the relation representations in the same frame as queries and keys. The output from the last decoder layer is adopted for final prediction. Because of the sliding widow, the relationships in a frame have various representation in different batches. In this work, we choose the earliest representation appearing in the windows.
|
| 73 |
+
|
| 74 |
+
We employ multiple linear transformations to infer different kinds of relationships (such as attention, spatial, contacting) with the refined representations. In reality, the same type of relationship between two objects is not unique in semantics, such as synonymous actions $<$`person-holding-broom`$>$ and $<$`person-touching-broom`$>$. Thereby, we introduce the multi-label margin loss function for predicate classification as follows: $$\begin{equation}
|
| 75 |
+
L_{p}(r,\mathcal{P}^{+},\mathcal{P}^{-}) = \sum_{p\in\mathcal{P}^{+}} \sum_{q\in\mathcal{P}^{-}}max(0,1-\phi(r,p)+\phi(r,q))
|
| 76 |
+
\label{Eq:pred_loss1}
|
| 77 |
+
\end{equation}$$ For a subject-object pair $r$, $\mathcal{P}^{+}$ are the annotated predicates while $\mathcal{P}^{-}$ is the set of the predicates not in the annotation. $\phi(r,p)$ indicates the computed confidence score of the $p$-th predicate.
|
| 78 |
+
|
| 79 |
+
During training, the object distribution is computed by two fully-connected layers with a ReLU activation and a batch normalization in between. The standard cross entropy loss $L_o$ is utilized. The total objective is formulated as: $$\begin{equation}
|
| 80 |
+
L_{total} = L_p+L_o
|
| 81 |
+
\label{Eq:pred_loss2}
|
| 82 |
+
\end{equation}$$
|
| 83 |
+
|
| 84 |
+
There are two typical strategies to generate a scene graph with the inferred relation distribution in previous works: (a) **With Constraint** only allows each subject-object pair to have at most one predicate while (b) **No Constraint** allows a subject-object pair to have multiple edges in the output graph with multiple guesses. **With Constraint** is more rigorous and indicates the ability of models to predict the most important relationships, but it is incompetent for the multi-label task. Although **No Constraint** can reflect the ability of multi-label prediction, tolerant multiple guesses cause wrong information in the generated scene graph.
|
| 85 |
+
|
| 86 |
+
In order to make the generated scene graph closer to ground truth, we propose a new strategy named **Semi Constraint** allowing that a subject-object pair has multiple predicates such as $<$`person-holding-food`$>$ and $<$`person-eating-food`$>$. The predicate is regarded as positive iff the corresponding relation confidence is higher than the threshold.
|
| 87 |
+
|
| 88 |
+
At test time, the score of each relationship triplet $<$`subject-predicate-object`$>$ is computed as: $$\begin{equation}
|
| 89 |
+
s_{rel} = s_{sub}\cdot s_{p}\cdot s_{obj},
|
| 90 |
+
\label{Eq:pred_loss3}
|
| 91 |
+
\end{equation}$$ where $s_{sub}$,$s_{p}$,$s_{obj}$ are the confidence score of subject, predicate and object respectively.
|
2109.02639/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile><diagram id="oy_I6MdvfGH_0F1L6v73" name="Page-1"><mxGraphModel dx="774" dy="622" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0"><root><mxCell id="0"/><mxCell id="1" parent="0"/><mxCell id="262" value="" style="shape=table;html=1;whiteSpace=wrap;startSize=0;container=1;collapsible=0;childLayout=tableLayout;" vertex="1" parent="1"><mxGeometry x="580" y="590" width="420" height="420" as="geometry"/></mxCell><mxCell id="263" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;collapsible=0;dropTarget=0;pointerEvents=0;fillColor=none;top=0;left=0;bottom=0;right=0;points=[[0,0.5],[1,0.5]];portConstraint=eastwest;" vertex="1" parent="262"><mxGeometry width="420" height="84" as="geometry"/></mxCell><mxCell id="264" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="263"><mxGeometry width="84" height="84" as="geometry"/></mxCell><mxCell id="265" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="263"><mxGeometry x="84" width="84" height="84" as="geometry"/></mxCell><mxCell id="266" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="263"><mxGeometry x="168" width="84" height="84" as="geometry"/></mxCell><mxCell id="267" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="263"><mxGeometry x="252" width="84" height="84" as="geometry"/></mxCell><mxCell id="268" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="263"><mxGeometry x="336" width="84" height="84" as="geometry"/></mxCell><mxCell id="269" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;collapsible=0;dropTarget=0;pointerEvents=0;fillColor=none;top=0;left=0;bottom=0;right=0;points=[[0,0.5],[1,0.5]];portConstraint=eastwest;" vertex="1" parent="262"><mxGeometry y="84" width="420" height="84" as="geometry"/></mxCell><mxCell id="270" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="269"><mxGeometry width="84" height="84" as="geometry"/></mxCell><mxCell id="271" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=#d80073;top=0;left=0;bottom=0;right=0;strokeColor=#A50040;fontColor=#ffffff;opacity=50;" vertex="1" parent="269"><mxGeometry x="84" width="84" height="84" as="geometry"/></mxCell><mxCell id="272" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=#d80073;top=0;left=0;bottom=0;right=0;opacity=50;strokeColor=#A50040;fontColor=#ffffff;" vertex="1" parent="269"><mxGeometry x="168" width="84" height="84" as="geometry"/></mxCell><mxCell id="273" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=#d80073;top=0;left=0;bottom=0;right=0;opacity=50;strokeColor=#A50040;fontColor=#ffffff;" vertex="1" parent="269"><mxGeometry x="252" width="84" height="84" as="geometry"/></mxCell><mxCell id="274" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="269"><mxGeometry x="336" width="84" height="84" as="geometry"/></mxCell><mxCell id="275" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;collapsible=0;dropTarget=0;pointerEvents=0;fillColor=none;top=0;left=0;bottom=0;right=0;points=[[0,0.5],[1,0.5]];portConstraint=eastwest;" vertex="1" parent="262"><mxGeometry y="168" width="420" height="84" as="geometry"/></mxCell><mxCell id="276" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="275"><mxGeometry width="84" height="84" as="geometry"/></mxCell><mxCell id="277" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=#d80073;top=0;left=0;bottom=0;right=0;strokeColor=#A50040;fontColor=#ffffff;opacity=50;" vertex="1" parent="275"><mxGeometry x="84" width="84" height="84" as="geometry"/></mxCell><mxCell id="278" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=#60a917;top=0;left=0;bottom=0;right=0;strokeColor=#2D7600;fontColor=#ffffff;opacity=50;" vertex="1" parent="275"><mxGeometry x="168" width="84" height="84" as="geometry"/></mxCell><mxCell id="279" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="275"><mxGeometry x="252" width="84" height="84" as="geometry"/></mxCell><mxCell id="280" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="275"><mxGeometry x="336" width="84" height="84" as="geometry"/></mxCell><mxCell id="281" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;collapsible=0;dropTarget=0;pointerEvents=0;fillColor=none;top=0;left=0;bottom=0;right=0;points=[[0,0.5],[1,0.5]];portConstraint=eastwest;" vertex="1" parent="262"><mxGeometry y="252" width="420" height="84" as="geometry"/></mxCell><mxCell id="282" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="281"><mxGeometry width="84" height="84" as="geometry"/></mxCell><mxCell id="283" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="281"><mxGeometry x="84" width="84" height="84" as="geometry"/></mxCell><mxCell id="284" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="281"><mxGeometry x="168" width="84" height="84" as="geometry"/></mxCell><mxCell id="285" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="281"><mxGeometry x="252" width="84" height="84" as="geometry"/></mxCell><mxCell id="286" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="281"><mxGeometry x="336" width="84" height="84" as="geometry"/></mxCell><mxCell id="287" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;collapsible=0;dropTarget=0;pointerEvents=0;fillColor=none;top=0;left=0;bottom=0;right=0;points=[[0,0.5],[1,0.5]];portConstraint=eastwest;" vertex="1" parent="262"><mxGeometry y="336" width="420" height="84" as="geometry"/></mxCell><mxCell id="288" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="287"><mxGeometry width="84" height="84" as="geometry"/></mxCell><mxCell id="289" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="287"><mxGeometry x="84" width="84" height="84" as="geometry"/></mxCell><mxCell id="290" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="287"><mxGeometry x="168" width="84" height="84" as="geometry"/></mxCell><mxCell id="291" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="287"><mxGeometry x="252" width="84" height="84" as="geometry"/></mxCell><mxCell id="292" value="" style="shape=partialRectangle;html=1;whiteSpace=wrap;connectable=0;overflow=hidden;fillColor=none;top=0;left=0;bottom=0;right=0;" vertex="1" parent="287"><mxGeometry x="336" width="84" height="84" as="geometry"/></mxCell><mxCell id="293" value="" style="shape=image;verticalLabelPosition=bottom;labelBackgroundColor=#ffffff;verticalAlign=top;aspect=fixed;imageAspect=0;image=data:image/png,iVBORw0KGgoAAAANSUhEUgAAAF4AAAA9CAYAAAAkst6aAAAJo0lEQVR4Xr2ai5HkSBVFBw/ABfAAXAAPwAXwAFwAD8AF8ABcAA/ABfAAVqdj7sTRrcxXWbUdRJy7s9L7SEqlXn6qv3z58uUHG+348aXfX/rrpX9e+u+lf389/vWlH17a4fw/ufSHS/+69L9L5CEfeX55yb6NbZMmVv47mZU9eolVAtT89BKNQgNZNHqf48WsSG4anMaO/nOp83D+d5dW95I8zzSx8t/JrOzRS6wSIENPToPQKzmm5wd6+R8vueF4SQ15/3Ypjf2bSz+65Ov9/NKfL+Wl/OlS4/ucNLHy38ms7NFLPAt0g9Lg4BjH/faSG58GtN9fLtGY//h6bOLDy6D8OA9fm4lv54BT26R3mfI82G4HBSWDB6cMuIc7puNS96NfXcIn5SWN3nEc58Ug5+jSlfjOAae2Se8y5Xmw3Q5Eyks3Ojim47rk0Ht/dikNyqC6iuMFxacbni/H7K4Np7ZJ7zLlebDdDr5CQ6cBfnHJPs/EgNgNmIGTl2Icl9rvuIivxb5WM9nexTknNSuf6H7wlb9f4uGnB96JGr1rwOnLYbDdxU0vv5ls7+Kck5qVT3Q/uEiJoSFsO9Wqx6PdLCfa9fgeoFvNZHsX55zUrHyi+8FFBkd6rm2nYvq3akAWRY3jqP1ZTCUupcl+rWayvYtzTmpsYwxbTZ8/oHF4YGqygx4cC/v1IisNeZqDssJNTrMoa2Llz0BPA/Dvyo4mVv5Rwzka2tNj2vZhdZ9Go6c9S2rsl4E0otFX8/bGOdq3bdZE+3q6ijLVbU2s/KOGcymhbpOsh74RA73tWVITH8qFL4C4aAbpieSITNusCfv1oI+8prAmVv5Rw7lcy23yUXYdSE/f7bMYx1j0IF8gYhsA2t+0bZI5tdHz+r68PrBvc2pbqWdr375+O53iGIuBtR8OhfY3bZtkTm2re9uNI82pbSXGEmo8L4BS921wtdMpjrF6jwWxJgjtb9o2yZzaeOCMYdzT/3v/p7U8+UwrGKm70dErq05z6tdMcae2Sa8wxtl4qhWZirZeWXWaU79miju1TXqFMc7GU63ozbFoFR9NnPo1U9ypbdIrjHE2nmpFbwcjaukqPpo49WumuFPbpFcY40ajmPx29Z2pqeNan8Eq705mZd/JTLZm9BuNYvLb1Xfm745rfQarvDuZlX0nM9ma0W80islvV9/5EhzX+gxWeXcyK/tOZrI1o99oFPZrub4/rNAGnKOxbdIKvjTm7CxaEPfCdrVZ5YomVv6sEcjP6tjXpENmQ6xj7gcD9rO4qHt5Gp6VIvYJ5zH8aM4qz/adGu+O8vAo90QHyWp1lSuaaF///kCD0/i5Zu6DTbGOux8M2M/q/ZncRHb+JpwnZKAmh+07BeKYRRFL47vMuSGyknaO1oT9svNIg/d6hYVj2gS1/X4wYD+r90DS8N/2JAacJ+RPRNh+sH2n4C3tEJ/8OJN7ZDLgHK2J+KTR+TfPavVOLS/H9tsBCfJDhM+3DJ9vGjsPx7kVU56cy69QfMIrP8j52DKr4rptA5cfdLIDazpnJhPehwL7rUrw7S8s8j98Gm5AtPuVJrDRlKQRxzzoil0e4JiXTg56h3tRk/OxZXDnftoGsUf+Kk5wTgZucqx+SbJf93iey+Xm4z+rRkfUxjhaIW/eMRzvHmyXBzhOLXZvbz+wLb09L9s2YDDNw0cPvwA9wTnzEvMbg7Efjexr8lzuyB9vxg0XJSA4aUTPtG9EnQ72n3C5gCnOtrysXXlcdSqeuXHMjow/uy/a5K/wIq57u8bqJzHkRgAHoUyjaHxfAK0GOLTDvXL1a1WT85Sj3IP9rXSOKF9x45gdLmnPyAwr6uqxLTM4exBykL8SXhw/ofki7hGOW0GdzAPtrtfkPPfBYNylKcqYYXG/2BrH7eD+TspUpsQW7Xy7hv/C1zfYI7bJLCG/WVK7EpdcPfCAL4zosZnF9ILJ+Pwrtv6rAu7rhM5pNSufvPC0BfLAim5vB2c+Td7OjvTuLkMpPcnlH5KDL8yNpAwwF7YNmXdsPBe5rdU9reicVrPySRVJWyDP0tAH6fU4U4tolIbBLyWBf92jfUFfjAe1HxdnEMziA/UnGJl3bJSEXCNazURWdE6rWflkwE87UD3a5xs0rAciApir9h8odWOCE/LAHZM8yY3ygh1rmXdsPKyvx7Od0jmtpu0Z8FGen7Gh/e4HF/QKGpcbTwPSw5mpZDR3TOIaXgBjQV4m/9LTWb6vFmZN261nZJaUBkBdPld5o4mVv5X6bq0qyC3oFMc8i2vfnZqVT/SM1cKu5+6rvNHEyt9Kfbc4/4CDTnHMs7j23alZ+UTPyJeaB8/gbTqnNbHyt1Lfo8zfH+jAqFn5nMis7FGz8olM27ICtjgH7cucnhK4WskaxzS2eYYYZW1iP/RwImpWPicyK3vUrHwi07asMSJ6f2jfrCE+q+GzgWatVuLo4UTUrHxOZFb2qFn5RMbnmVH0g++2LzIIZt9/wnGNbRlbrGA/9HAialY+JzIre9SsfCLj8/1rGPK+in2zqt1tIRjHNbZlnRN59W8/dGNruLCtZVb26BWmuJ2NhWAGNeQyY5huYqe+O9eJmpz3/D3y3tPIyQVWMit79ApT3M7Wq9XdflMWV7vNtUlNzvO1+drodKV8dIGVzMoevcIUt7NRVvzgq33zbI9kmveqmpzP77rWyJT0HZzv++Q8zWE/5H2gLiWp6wyo07bHO1Dfc1307MXeDz4B5/s+OU9z2A9Ra72IoZF9zP/j0zjHq2T+nmug3eZfdD/4BJzv++Q8zWE/i9kKvZ9en30i/zzYOPZVsmhzw08bgOjGymGlZuUTTZz6Ne/GTThny6zsq/rOedMxN9q4U7PyiSZO/Zp34yacs2VW9qyAo9R30zE32rhTs/KJJk79mnfjJpyzZdrmnz2j1aKs4260cadm5RNNnPo178ZNOCeDL3N8SkjvpdsP7coMMo75sN0OhM+3rWlfy6zsUbPyOZGZbE182DDLtnLkhZDzrfaGWCescBy6Hwifb1vTvpZZ2aNm5XMiM9ma+DDX78bc/dkJtdx+u5UyOA7dD4TPt61pX8us7FGz8jmRmWxNfJh6ujHRqsenxMSn/wCgSVx0PxiwX+uUV+Imv1PbO2KG4kbvXkx58eoY7RZlpq9zPxiwX+uUV+Imv1PbO2JGkkan1rPxxh4QvZ6SwxfhRvdG24Sv8eF7OxiwX+uUV+Imv1Pbu/LfELVobL4KGpxe7rgJ+3343g4G7Nc65ZW4ye/U9q4CPZ2XwEyFnk+v78a2Jm6+3wGLpVS3+6/lHgAAAAx0RVh0bGF0ZXgAeF97aWp9TcI/8QAAAA50RVh0cmVzb2x1dGlvbgA2MDDyft0mAAAADHRFWHRjb2xvcgAwMDAwMDDCn/ahAAAAFnRFWHRTb2Z0d2FyZQBsYXRleDJwbmcuY29tSgWpCgAAAABJRU5ErkJggg==;" vertex="1" parent="1"><mxGeometry x="759.18" y="790" width="61.64" height="40" as="geometry"/></mxCell></root></mxGraphModel></diagram></mxfile>
|
2109.02639/main_diagram/main_diagram.pdf
ADDED
|
Binary file (4.49 kB). View file
|
|
|
2109.02639/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
We highlight that the score $\hat{p}_{nl}(x)$ that we use to conduct OOD detection allows a principled likelihood interpretation: **the unnormalized likelihood of a non-local model.** We believe this to be the first time that the likelihood of a non-local model is considered in the literature. However, other likelihood ratio variants have been previously explored for OOD detection. We briefly discuss related work and highlight where our method differs from relevant literature.
|
| 4 |
+
|
| 5 |
+
In [@ren2019likelihood], it is assumed that each data sample $x$ can be factorized as $x=\{x_b,x_s\}$, where $x_b$ is a background component, characterized by population level background statistics: $p_b$. Further, $x_s$ then constitutes a semantic component, characterized by patterns belonging specifically to the in-distribution data: $p_s$. A full model, fitted on the original data, (*e.g*.. Flow) can then be factorized as $p_f(x)=p_f(x_b,x_s)=p_b(x_b)p_s(x_s)$ and the semantic model can correspondingly be defined as a ratio: $p_s(x)=\frac{p_f(x)}{p_{b}(x)},$ where $p_f(x)$ is a full model. In order to estimate $p_b(x_b)$, the authors of [@ren2019likelihood] design a perturbation scheme and construct samples from $p_{b}(x_b)$ by adding random perturbation to the input data. A further full generative model is then fitted to the samples towards estimating $p_b$. In our method, both $p_l(x)$ and $p_{nl}(x)$ constitute distributions of the same sample space (that of the original image $x$) whereas $p_s$ and $p_b$ in [@ren2019likelihood] form distributions in different sample spaces (that of $x_s$ and $x_b$, respectively). Additionally, in comparison with our local and non-local experts factorization of the model distribution, their decomposition of an image into 'background' and 'semantic' parts may not be suitable for all image content and the construction of samples from $p_b(x_b)$ (adding random perturbation) lacks principled explanation. In [@serra2019input; @schirrmeister2020understanding], the score for OOD detection is defined as $s(x)=\frac{p_f(x)}{p_c(x)}$ where $p_f$ is a full model and $p_c$ is a complexity measure containing an image domain prior. In practice, $p_c$ is estimated using a traditional compressor (*e.g*.. PNG or FLIF), or a model trained on an additional, larger dataset in an attempt to capture general domain information [@schirrmeister2020understanding]. In comparison, our method does not require the introduction of new datasets and our explicit local feature model, $p_l$, can be considered more transparent than PNG of FLIF. Additionally, our likelihood ratio can be explained as the *(unnormalized) likelihood of the non-local model* for the in-distribution dataset, whereas the score described by [@schirrmeister2020understanding; @serra2019input] does not offer a likelihood interpretation. In Section [4](#sec:lossless){reference-type="ref" reference="sec:lossless"}, we discuss how the proposed local model may be utilized to build a lossless compressor, further highlighting the connection between our OOD detection framework and traditional lossless compressors (*e.g*.. PNG or FLIF). In Table [4](#tab:comp:rate){reference-type="ref" reference="tab:comp:rate"}, we report experimental results showing that a lossless compressor based on our model significantly improves compression rates *c.f*.. PNG and FLIF, further suggesting the benefits of the introduced OOD detection method.
|
2109.06253/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-19T11:14:01.600Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" etag="szaIRJe6_4bAWjpuFHvb" version="14.6.2" type="google"><diagram name="Page-1" id="9f46799a-70d6-7492-0946-bef42562c5a5">7Vxtc6M2EP41nrYffAPGBvtj4iTtdHKdNsldpx9lkEFzGBEhx879+q6EiN/AAYMS4pDJGLTojd2HfVaL7J41Xax/ZygOvlIPh72B4a171lVvMDDHxgQOQvKsJIZlpxKfEU/JNoJ78hNnFZV0STyc7FTklIacxLtCl0YRdvmODDFGV7vV5jTcHTVGPj4Q3LsoPJT+SzweKKlpGJsLf2DiB2ro8UhdmCH3h8/oMlLj9QbWXP6llxco60vVTwLk0dWWyLruWVNGKU/PFuspDoVyM7Wl7W4Krr7Mm+GIl2kQ/De9/XPy7VsUzO/Mx+FNMP/nez+7mScULpVCHhgikbAo4ijBXE2eP2cKg/uIxSlHMyG6DPgihKIJp6uAcHwfI1dcXwFgQJZwxLgyu2WAAAzJYQTMVCOXhiGKEyJ7u5I1AhJ6t+iZLnk2Tla6nEPrezUZ0RqFxI/g3AUtiC7TCulw5hjK6v4w43hdqDnzxR4AdEwXmLNnqKIaTJSSFMQtc5SWVxu8jBxVJ9iCiumoikhh1H/pemMnOFGmqmI288BshXaC++QEhXfw8KDIL2OyQ4t4jMYPiPmYK0FMidD39ROoMFGyOQnDKQ2pMGxEIzEQp7G6GOJ51nZGOacLVWBKXS+dSlWNLuEfNDo1vox6I7ibKZTNTRn+RXXGpzRKuACs6AOjhK9wwk9FwZEn5BAbGRiMclgY2rqgMMiBgh1mSpCeM7UJiB+XwtuANizXtW3h4jYi2xfHhwCLJkg0TOQnFc4ApR7tS9Y1TDXtPW3VKPikn0cb8NXGFQW7z0PpewPieThqFiAFzmMLEUAab4kIqyFEbBk0E4oO+onU2wVUMCfxOgXPHoxuJYoCCaDe1OpdTMDnc3m3CUlE10sGn8tI1IsQ5/g8oTXRAq31HqzeD2rDjofawUOOXZKHxrqgMDrJ6xhGPg9NkRxygVWkfZ7uoSYkhicyjzYM2A1hoAbz3KE4hqni/oKS8wRNTU4pAE1lTtEGIqfjlHZwijkYliOVbDnaPBbGJzoUw3DdQ99wRcXN0aXIMQUk+iGPMkaFoohK5YdY6/iUenJyGPWsm7N0IzWh45zGPfqgMmkIKjW4528cJfhn/4kuBY4el3IR9It4arOlzkzYOb0VuSrC54qumiRVgK6qJKUNbfZh3vTrMuSkn4g8ZOQKGzOcoEUcksiXM4WJDoy/oK5VaNkPkFItbdfjS1dnL6f6kiXfsqhj5FCNlfFP8ybtcqrl4456j7ddOaeai4WRrvS6XZxTTWIU5XKEYhfBD8yf/TowhkLDQBbGID0xfkvppSyZlM/EpnPKWMMGpwMn0SyJ03KleRs1Z31s3b47z8/LbvbrGWNr9KZ4L84Yvx3e66WM2/QMFKUgOvwfx/96D/vv9zx0ae2WhAI5gX5uKDDUBoXitHZHqS1yKa+nwnNdiD7cFKfCOxpqN2aq0pA2DGVL3Y513ph1zLIrUMfRZfq8ZMRJ7kMdID6v6D4aSJbvBeXHp192XkXRfdXx3o2lm9OD7GjGGummpvreL9/xeQnMKfBirxGWPq9VP29W12vVfgXzmXxWuQixXjKlXYmhzlnUjXb1OY/i92JdtKsz2h0YTskky1CX6fPybR882v3EnsY6MRejDV6N5fDaEpZ04KqdtNEGNrujsfehsWyzz6tbSCa6TJ+3c7VbRrfepdin8ZU+HBXveu1WXeeBrap0pQ1rdrfbvi38NSn50mGsbQdkG/zOydvePkKmrj3L026XYNupw379Gxa5VKHPPRR/w+JjhiXn7TKafhPSban8gM6iapypzXmM82KLAnslRFjpQvy4k4gYCQNTECq+kqJisjyTbVl0X62McqQ66Jtjo6yij+9StTNVZV9acA5VPMnRsF1dw1Dc/EyUvLb1Y1zW9f8=</diagram></mxfile>
|
2109.06253/main_diagram/main_diagram.pdf
ADDED
|
Binary file (33.3 kB). View file
|
|
|
2109.06253/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In this work, we address the *beam-search problem* in Neural Machine Translation [\(Koehn and](#page-8-0) [Knowles,](#page-8-0) [2017\)](#page-8-0). Beam Search is the standard hypothesis search method for autoregressive sequence generation. Large beams provide more probable hypotheses than small beams; however, the overall translation quality drops with growing beam size after a certain point. This effect is especially strong for long sentences, connecting with the fact that NMT models are biased to giving high probabilities to short hypotheses. [Stahlberg and Byrne](#page-9-0) [\(2019\)](#page-9-0) showed that exact search by likelihood for neural machine translation finds empty string as the optimal hypothesis in more than 50% of cases.
|
| 4 |
+
|
| 5 |
+
One of the most famous methods to mitigate quality degradation with growing beam size is length normalization [\(Bahdanau et al.,](#page-8-1) [2016;](#page-8-1) [Wu](#page-9-1) [et al.,](#page-9-1) [2016\)](#page-9-1). This technique normalizes loglikelihoods of a hypothesis in beam search by its length, thus promoting long hypotheses. Other methods examine adding a reward for each token's score during the decoding process [\(Yang et al.,](#page-9-2) [2018;](#page-9-2) [Murray and Chiang,](#page-8-2) [2018\)](#page-8-2).
|
| 6 |
+
|
| 7 |
+
While the beam-search problem has been extensively studied [\(Sountsov and Sarawagi,](#page-8-3) [2016;](#page-8-3) [Murray and Chiang,](#page-8-2) [2018;](#page-8-2) [Kumar and Sarawagi,](#page-8-4) [2019;](#page-8-4) [Meister et al.,](#page-8-5) [2020;](#page-8-5) [Eikema and Aziz,](#page-8-6) [2020;](#page-8-6) [Yang et al.,](#page-9-3) [2020;](#page-9-3) [Wang and Sennrich,](#page-9-4) [2020\)](#page-9-4) there is still no consensus on the underlying reason for such model behavior. Furthermore, prior work has investigated this problem primarily for NMT models, giving little attention to other domains that are also known to suffer from it, such as Automatic Speech Recognition (ASR) [\(Chorowski and](#page-8-7) [Jaitly,](#page-8-7) [2017;](#page-8-7) [Zhou et al.,](#page-9-5) [2020\)](#page-9-5). [Murray and Chi](#page-8-2)[ang](#page-8-2) [\(2018\)](#page-8-2) noticed that since in each step of beamsearch generation, negative log-probability is added to the hypothesis' score, if a model overestimates the probability of an already generated sequence of tokens, there is no way to downgrade this probability afterward. Consequently, models are biased towards finalizing a short hypothesis by generating an end-of-sequence token (EOS) rather than generating a long continuation. Their experiments show a connection between quality degradation and decreasing length of hypotheses with growing beam size. Our work is in agreement with their explanation. Moreover, we show that the main quality degradation with large beams in NMT and ASR comes from short translations obtained by early termination of long hypotheses from small beams.
|
| 8 |
+
|
| 9 |
+
Our work examines how the distribution of sentence lengths in a dataset affects the beam-search problem. We demonstrate that the *beam-search problem is strongly connected with the distribution of sentence lengths in training datasets*. Specifically, we show that common NMT datasets, such as
|
| 10 |
+
|
| 11 |
+
IWSLT and WMT, exhibit a strongly skewed distribution of sentence lengths, with the mode focused on short sentences. NMT models learn biased probability distributions and fail on long sentences during inference. In contrast, for ASR models trained on Librispeech [\(Panayotov et al.,](#page-8-8) [2015\)](#page-8-8), where the distribution of sentence lengths is more symmetrical and biased towards longer sentences, the beamsearch degradation occurs at much larger lengths. Based on our findings, we propose a simple and effective dataset augmentation technique that makes training examples longer – *Multi-Sentence Resampling*. It creates a new dataset where each training sample can be a concatenation of multiple sentences. Our method alleviates quality degradation with growing beam size and increases the final quality of the model.
|
| 12 |
+
|
| 13 |
+
The key contributions of our work are as follows:
|
| 14 |
+
|
| 15 |
+
- We show that quality degradation with growing beam size comes mostly from short translations, which are early finalized prefixes of long hypotheses;
|
| 16 |
+
- We show that training datasets that are biased towards short sentences strongly contribute to the beam-search problem;
|
| 17 |
+
- We introduce *Multi-Sentence Resampling* a simple and effective dataset augmentation technique that alleviates beam search problem and increases the final translation quality[1](#page-1-0) .
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
```
|
| 22 |
+
D Ð old train dataset with pairs of
|
| 23 |
+
sentences;
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
N Ð maximum number of sentences in one example in a new train dataset;
|
| 27 |
+
|
| 28 |
+
S " |D| ¨ M Ð number of examples in the new train dataset;
|
| 29 |
+
|
| 30 |
+
R Ð H – new dataset;
|
| 31 |
+
|
| 32 |
+
for i *in* 1..S do
|
| 33 |
+
|
| 34 |
+
```
|
| 35 |
+
n Ð random integer from 1 to N
|
| 36 |
+
new_source " ""
|
| 37 |
+
new_target " ""
|
| 38 |
+
for k in 1..n do
|
| 39 |
+
(cur_source, cur_target) " sample
|
| 40 |
+
random example from D
|
| 41 |
+
new_source `" cur_source
|
| 42 |
+
new_target `" cur_target
|
| 43 |
+
end
|
| 44 |
+
R.appendppnew_source, new_targetqq
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
end return R;
|
| 48 |
+
|
| 49 |
+
<span id="page-5-0"></span>
|
| 50 |
+
|
| 51 |
+
| | | The cat sat on a |
|
| 52 |
+
|----------------------------------|------------------------------------|--------------------|
|
| 53 |
+
| Train | dataset | Call |
|
| 54 |
+
| The cat sat on a mat. | Le chat était assis sur une natte. | Do you think t |
|
| 55 |
+
| Call me back. | Rappelle-moi. | The cat s |
|
| 56 |
+
| Do you think that this is a good | Pensez-vous que c'est une | Do you think that |
|
| 57 |
+
| idea? | bonne idée? | The cat s |
|
| 58 |
+
| | | The est set on a r |
|
| 59 |
+
|
| 60 |
+
| | Multi-sentence res | sampling with N=3 | |
|
| 61 |
+
|---|----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|--|
|
| 62 |
+
| | The cat sat on a mat. Call me back. | Le chat était assis sur une<br>natte. Rappelle-moi. | |
|
| 63 |
+
| | Call me back. | Rappelle-moi. | |
|
| 64 |
+
| > | Do you think that this is a good<br>idea? Call me back.<br>The cat sat on a mat. | Pensez-vous que c'est une bonne idée? Rappelle-moi. Le chat était assis sur une natte. | |
|
| 65 |
+
| | Do you think that this is a good idea? | Pensez-vous que c'est une bonne idée? | |
|
| 66 |
+
| | The cat sat on a mat. | Le chat était assis sur une natte. | |
|
| 67 |
+
| | The cat sat on a mat. Do you think that this is a good idea? Call me back. | Le chat était assis sur une<br>natte. Pensez-vous que c'est une bonne<br>idée? Rappelle-moi. | |
|
| 68 |
+
|
| 69 |
+
Figure 4: Example of Multi-Sentence Resampling with N=3. In the new train dataset with equal probability there are examples with 1,2 and 3 sentences from the original dataset.
|
| 70 |
+
|
| 71 |
+
In this section, we introduce *Multi-Sentence Resampling* (MSR) – a simple data augmentation method that alleviates the beam search problem by addressing dataset length bias and which increases the overall quality of translation models. Specifically, MSR augments a dataset such that instead of one sentence, each training example consists of 1 to N sentences, randomly chosen from a dataset and concatenated one after another. It preserves the order of sentences: the source side is concatenated to the source side, and the target side is concatenated to the target side of a new train example. For each new training example, MSR randomly chooses from 1 to N sentences, so that the model does not overfit to the particular number of sentences.
|
| 72 |
+
|
| 73 |
+
Figure [4](#page-5-0) illustrates the algorithm. The original training dataset, with 3 examples, is on the left, and the dataset created by *Multi-Sentence Resampling* is shown on the right. The new dataset contains 6 train examples: 2 examples with one sentence, 2 examples with a concatenation of two original sentences, and 2 examples with a concatenation of 3 original sentences. The full procedure is described in Algorithm 1. The algorithm takes an original train dataset, desired number of examples in the augmented dataset S, and the maximum number of sentences in an example N. *Multi-Sentence Resampling* increases the average length of the dataset and provides a diverse set of long training examples. In contrast to other methods that use rescoring of hypotheses and per-token rewards [\(Yang et al.,](#page-9-2) [2018\)](#page-9-2) or predict target length separately [\(Yang](#page-9-3) [et al.,](#page-9-3) [2020\)](#page-9-3), our method does not change the search procedure.
|
| 74 |
+
|
| 75 |
+
Figure [5](#page-5-1) illustrates how train examples length distribution changes in IWSLT17 Fr-En dataset for N from 2 to 5. With growing N distributions be-
|
| 76 |
+
|
| 77 |
+
<span id="page-5-1"></span>
|
| 78 |
+
|
| 79 |
+
Figure 5: Distribution of sentence lengths for different N in MSR for IWSLT17 Fr-En dataset (en part). Vertical lines represent mean of the corresponding distribution.
|
| 80 |
+
|
| 81 |
+
come more flatten for lengths presented in the test set. The average length of examples in a new train dataset can be approximately calculated as
|
| 82 |
+
|
| 83 |
+
$$new\_length \simeq \sum_{n=1}^{N} \frac{L \cdot n}{N} = L \cdot \frac{N+1}{2},$$
|
| 84 |
+
|
| 85 |
+
where L is the average length of the original dataset.
|
2201.01666/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="macaques.ca" modified="2021-11-09T22:33:47.739Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="w2Vjg2LPWJp2iYy9Y3Yi" version="14.6.5" type="device"><diagram id="s91LsO4ZEgU5-gJD804P" name="Page-1">7Vpdb9owFP01aE+d8l14LLTrtGlTt07b+jRZxCRWnZg5pkB//RxikzgfpaTghIkXFF87TnzPuefG1wzsSbS6pWAefiE+xAPL8FcD+3pgWa5j8N/UsM4MtmlmhoAiPzMVDPfoGQqjuC9YIB8mykBGCGZorhqnJI7hlCk2QClZqsNmBKtPnYMAVgz3U4Cr1l/IZ2FmHbpGbv8IURDKJ5uG6ImAHCwMSQh8siyY7JuBPaGEsOwqWk0gTn0n/ZLd96Ghd/tiFMbsNTf8HDuTEfv0OXHAbRCHePL45/lCzPIE8EIseGB5mM83RvwiSC+kYUb4c7gHwTQb6f1dpK8+/oEiDpBlfIVL/vudRCDOO+UkfMAkRUROxl8zm099BjcXnrvxGltLKPhyOOq8MV6GiMH7efYmS847bgtZhHnL3N5Z9IxcJqQMrgom4albSCLI6JoPEb22BFjQdiiay5wDnjCFBfgl1kCwLthOnAPDLwQ2e+Bk1eBUdk/sX6WE560pBkmCpqpX4Aqx3/zaeO+K1kOh5zpduCEba9Fo9GRCFlTw4EViQV8Jr6q/Cw51axwqbRRiwNCTGpR1XhZPuCNow1YBp2OocDploLL1iLuKQVSaqMwLszwRAzSArDLRBvTtstvzwO4ZD3qCrz0swWK0xNcxu8XX1aTH9GSE2G4ApEAsR6cSe40IvQmQCszJu3osGmDrEqJRzyC6PIBIxvxFCiqZNh+kMqaNXCc3rXWxdQcph5xBuks9MzF5YSWjXqlsRRydtlm0zBhXr8qONMUwOJ0Y9hoQ6SqGJdW6COL28Wpa/Q5Y71CfRUO9AWvW7VM1fPcenEE7902jE2VaaYNlXpamOBmm1e20j5EbvmlJDTO0ugZJuIuWb/jYq6mM6E0U+22JYxLDV2bPvoTW0US8/Pl27NBy/nOkKsWhsna1rjLpRqquDKFrB6W3TGm9Nt96vaKaY7kKQ6y2Ba+ynm8PXnRRrbmectiK14+TqXiVgt+qKadoPXswO6ynNPrz1AL2YLnB7TqL1xVP9sniCaPkcXuYegp53TG8EnZ2O+wcc8dER8ZOQrWH2OaoSkFNOy6SzUH9FR9gGvNVUW1byrP5FnnmysrqSDYhmNCchDOEcckEMAriVLQ49dJ67TjVaTQF+Ep0RMj3cZPwU7KIfegLrfL5RmvTONjRh62wZeRV8oDWnZZVt9PqB3usM3t2bdQ7Z0/d7q8f7LHP7KmeBzhqprJGHdNn/5NxXfRxzvSp0GdoqvRxu6bP/ttMXfRxz/Spqo+l0ud4yYs3879gZt/Z+f9Y7Zt/</diagram></mxfile>
|
2201.01666/main_diagram/main_diagram.pdf
ADDED
|
Binary file (12.4 kB). View file
|
|
|
2201.01666/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep reinforcement learning (DRL) methods have proven to be powerful at solving sequential decision-making tasks across domains (Silver et al., 2016; OpenAI et al., 2019). Combining the flexibility of the reinforcement learning framework with the representational power of deep neural networks enables policy optimization in complex and high-dimensional environments with unknown dynamics models to maximize the expected cumulative reward (Sutton & Barto, 2018).
|
| 4 |
+
|
| 5 |
+
An important limitation of DRL methods is their sample inefficiency: an enormous amount of data is necessary and makes training expensive. This makes applying DRL in the real world challenging, for example in robotics (Sunderhauf et al., 2018; Dulac-Arnold et al., 2019). In tasks like manipulation, ¨ sample collection is a slow and costly process (Liu et al., 2021). It is even more expensive in riskaverse applications like autonomous driving (Kothari et al., 2021).
|
| 6 |
+
|
| 7 |
+
Among the current state-of-the-art approaches to improve learning efficiency, a promising direction is to exploit the prevalence of uncertainty in the underlying DRL algorithm. By adopting a Bayesian framework, we can consider the sampled quantities in DRL as random variables and leverage information about their distributions to improve the learning process (Osband et al., 2018). In this paper, we consider the particular problem of unreliable supervision in the temporal difference update and the policy optimization process. In DRL, value predictions are used to supervise the training: in temporal difference-based algorithms, they are included in bootstrapped target values which are used as labels; in actor-critic frameworks, the policy is trained to optimize them. That these value predictions are noisy slows the learning and brings instability (Kumar et al., 2019; 2020). The amount of noise in the supervision depends on the uncertainty of the value prediction, which evolves during the training process and depends on the state (and action) evaluated. It is therefore *heteroscedastic*.
|
| 8 |
+
|
| 9 |
+
While there is an extensive body of literature focused on using the uncertainty of the value prediction to guide the exploration/exploitation trade-off (Dearden et al., 1998; Strens, 2001; Osband et al., 2016; Pathak et al., 2017; Chen et al., 2017; Osband et al., 2018; Fortunato et al., 2019; Osband et al., 2019; Flennerhag et al., 2020; Clements et al., 2020; Jain et al., 2021; Aravindan & Lee, 2021), there are very few works focused on leveraging it to mitigate the impact of unreliable supervision.
|
| 10 |
+
|
| 11 |
+
<sup>∗</sup>Canada CIFAR AI Chair
|
| 12 |
+
|
| 13 |
+
Distributional RL (Bellemare et al., 2017) considers the value function as a distribution to be learned as such. It is orthogonal to our proposition: we consider the uncertainty of the labels used to learn a scalar value function. In the offline RL setting, where the dataset is limited, uncertainty-weighted actor-critic (UWAC) (Wu et al., 2021) uses inverse-variance weighting to discard out-of-distribution state-action pairs using Monte Carlo dropout (Gal & Ghahramani, 2016) for uncertainty estimation.
|
| 14 |
+
|
| 15 |
+
Closer to our work, Lee et al. (2021) propose SUNRISE, in which each sample of the Bellman backup in the TD update step is weighted to lower the importance of the targets which have a high standard deviation. The weights w(s',a') are computed based on a sigmoid of the negative standard deviation $\hat{Q}_{\rm std}(s',a')$ scaled by a temperature hyperparameter T, and then offset such that they are between 0.5 and 1: $w(s,a) = \sigma(-\hat{Q}_{\rm std}(s',a')*T) + 0.5$ . The uncertainty of the target is estimated by sampled ensembles. While SUNRISE proposes other contributions such as an exploration bonus, the heuristic weighting scheme and the limitations of sampled ensembles in capturing the predictive uncertainty leave space for improvement in the mitigation of the effects of unreliable supervision.
|
| 16 |
+
|
| 17 |
+
We propose inverse-variance reinforcement learning (IV-RL). IV-RL also uses weights to reduce the importance of uncertain targets in training. It does so by addressing the problem from two viewpoints. First, we use variance networks (Kendall & Gal, 2017), whose loss function for regression is the negative log-likelihood instead of the L2 distance. For a given state-action pair (s, a), the network learns the target's noise, due for example to the stochasticity of the environment or the update of the policy. It then naturally down-weights the highly noisy samples in the training process. Second, we use variance ensembles (Lakshminarayanan et al., 2017) to estimate the uncertainty of the target due to the prediction of Q(s', a') during the temporal-difference update. We merge the predicted variances of several variance networks through a mixture of Gaussians, which has been shown to be a reliable method to capture predictive uncertainty (Ovadia et al., 2019). We then use Batch Inverse-Variance (BIV) (Mai et al., 2021), which has been shown to significantly improve the performance of supervised learning with neural networks in the case of heteroscedastic regression. BIV is normalized, which makes it ideal to cope with different and time-varying scales of variance. We show analytically that these two different variance predictions for the target are complementary and their combination leads to consistent and significant improvements in the sample efficiency and overall performance of the learning process.
|
| 18 |
+
|
| 19 |
+
In summary, our contribution is threefold:
|
| 20 |
+
|
| 21 |
+
- 1. We present a systematic analysis of the sources of uncertainty in the supervision of model-free DRL algorithms. We show that the variance of the supervision noise can be estimated with two complementary methods: negative log-likelihood and variance ensembles.
|
| 22 |
+
- 2. We introduce IV-RL, a framework that accounts for the uncertainty of the supervisory signal by weighting the samples in a mini-batch during the agent's training. IV-RL uses BIV, a weighting scheme that is robust to poorly calibrated variance estimation.<sup>1</sup>
|
| 23 |
+
- 3. Our experiments show that IV-RL can lead to significant improvements in sample efficiency when applied to Deep Q-Networks (DQN) (Mnih et al., 2013) and Soft-Actor Critic (SAC) (Haarnoja et al., 2018).
|
| 24 |
+
|
| 25 |
+
In section 2, we introduce BIV as a weighting scheme for heteroscedastic regression, and variance ensembles as an uncertainty estimation method. We analyse the sources of uncertainty in the target in section 3, where we also introduce our IV-RL framework. We finally present our experimental results in section 4.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
In supervised learning with deep neural networks, it is assumed that the training dataset consists of inputs $x_k$ and labels $y_k$ . However, depending on the label generation process, the label may be noisy. In regression, we can model the noise as a normal distribution around the true label: $\tilde{y}_k = y_k + \delta_k$ with $\delta_k \sim \mathcal{N}(0, \sigma_k^2)$ . In some cases, the label generation process leads to different variances for the label noises. When these variances can be estimated, each sample is a triplet $(x_k, \tilde{y}_k, \sigma_k^2)$ .
|
| 30 |
+
|
| 31 |
+
<sup>&</sup>lt;sup>1</sup>The code for IV-RL is available at https://github.com/montrealrobotics/iv\_rl.
|
| 32 |
+
|
| 33 |
+
Batch Inverse-Variance (BIV) weighting (Mai et al., 2021) leverages the additional information $\sigma_k^2$ , which is assumed to be provided, to learn faster and obtain better performance in the case of heteroscedastic noise on the labels. Applied to L2 loss, it optimizes the neural network parameters $\theta$ using the following loss function for a mini-batch D of size $K^2$ :
|
| 34 |
+
|
| 35 |
+
$$\mathcal{L}_{BIV}(D,\theta) = \left(\sum_{k=0}^{K} \frac{1}{\sigma_k^2 + \xi}\right)^{-1} \sum_{k=0}^{K} \frac{(f_{\theta}(x_k) - \tilde{y}_k)^2}{\sigma_k^2 + \xi}$$
|
| 36 |
+
(1)
|
| 37 |
+
|
| 38 |
+
This is a normalized weighted sum with weights $w_k = 1/(\sigma_k^2 + \xi)$ . Normalizing in the mini-batch enables control of the effective learning rate, especially in cases where the training data changes over time, such as in DRL. By focusing on the relative scale of the variances instead of their absolute value, it also provides robustness to poor scale-calibration of the variance estimates.
|
| 39 |
+
|
| 40 |
+
As explained in Mai et al. (2021), $\xi$ is a hyperparameter that is important for the stability of the optimization process. A higher $\xi$ limits the highest weights, thus preventing very small variance samples from dominating the loss function for a mini-batch. However, by controlling the discrimination between the samples, $\xi$ is also key when the variance estimation is not completely trusted. It provides control of the effective mini-batch size EBS, according to:
|
| 41 |
+
|
| 42 |
+
$$EBS = \frac{\left(\sum_{k}^{K} w_{k}\right)^{2}}{\sum_{k}^{K} w_{k}^{2}} = \frac{\left(\sum_{k}^{K} \frac{1}{(\sigma_{k}^{2} + \xi)}\right)^{2}}{\sum_{k}^{K} \frac{1}{(\sigma_{k}^{2} + \xi)^{2}}}$$
|
| 43 |
+
(2)
|
| 44 |
+
|
| 45 |
+
For example, imagine a mini-batch where most samples have very high variances, and only one has a very low variance. If $\xi=0$ , this one low-variance sample is effectively the only one to count in the mini-batch, and EBS tends towards 1. Increasing $\xi$ would give more relative importance to the other samples, thus increasing EBS. With a very high $\xi$ compared to the variances, all weights are equal, and EBS tends towards K; in this case, the BIV loss tends towards L2.
|
| 46 |
+
|
| 47 |
+
**Tuning the** $\xi$ **parameter** The simplest way to set $\xi$ is to choose a constant value as an additional hyperparameter. However, the best value is difficult to evaluate a priori and can change when the profile of variances changes during a task, as is the case in DRL.
|
| 48 |
+
|
| 49 |
+
It is instead possible to numerically compute the value of $\xi$ which ensures a minimal EBS for each mini-batch. This method allows $\xi$ to automatically adapt to the different scales of variance while ensuring a minimal amount of information from the dataset is accounted for by the algorithm. The minimal EBS is also a hyper-parameter, but it is easier to set and to transfer among environments, as it is simply a fraction of the original batch size. As such, it can be set as a batch size ratio.
|
| 50 |
+
|
| 51 |
+
The predictive uncertainty of a neural network can be considered as the combination of aleatoric and epistemic uncertainties (Kendall & Gal, 2017). Aleatoric uncertainty is irreducible and characterizes the non-deterministic relationship between the input and the desired output. Epistemic uncertainty is instead related to the trained model: it depends on the information available in the training data, the model's capacity to retain it, and the learning algorithm (Hüllermeier & Waegeman, 2021). There is currently no principled way to quantify the amount of task-related information present in the input, the training data, or the model. The state of the art for predictive uncertainty estimation instead relies on different sorts of proxies. These sometimes capture other elements, such as the noise of the labels, which we can use to our advantage. We focus here on the relevant methods to our work.
|
| 52 |
+
|
| 53 |
+
Several networks independently train an ensemble of size N that can be interpreted as a distribution over predictions. The expected behaviour is that different networks will only make similar predictions if they were sufficiently trained for a given input. The sampled variance of the networks' outputs is thus interpreted as epistemic uncertainty. It is possible to include a random Bernoulli mask
|
| 54 |
+
|
| 55 |
+
<sup>&</sup>lt;sup>2</sup>We replaced $\epsilon$ in Mai et al. (2021) with $\xi$ to avoid confusion with the reinforcement learning conventions.
|
| 56 |
+
|
| 57 |
+
of probability p to each training sample, to ensure that each network undergoes different training. This method, used by Clements et al. (2020) and Lee et al. (2021), has the same principle as single network Monte-Carlo dropout (Gal & Ghahramani, 2016). As the variance is sampled, the standard deviation is usually on the same scale as the prediction.
|
| 58 |
+
|
| 59 |
+
When used at the very beginning of the training process, sampled ensembles present one particular challenge: as the networks are initialized, they all predict small values. The initial variance, instead of capturing the lack of knowledge, is then underestimated. To address this problem, Randomized Prior Functions (RPFs) enforce a prior in the variance by pairing each network with a fixed, untrained network which adds its predictions to the output (Osband et al., 2019). RPFs ensure a high variance at regions of the input space which are not well explored, and a lower variance when the trained networks have learned to compensate for their respective prior and converge to the same output. The scale of the prior is a hyper-parameter.3
|
| 60 |
+
|
| 61 |
+
With variance networks, the uncertainty is predicted using loss attenuation (Nix & Weigend, 1994; Kendall & Gal, 2017). A network outputs two values in its final layer given an input x: the predicted mean µ(x) and variance σ 2 (x). Over a minibatch D of size K, the network parameters θ are optimized by minimizing the negative log-likelihood of a heteroscedastic Gaussian distribution:
|
| 62 |
+
|
| 63 |
+
$$\mathcal{L}_{LA}(D,\theta) = \sum_{k=0}^{K} \frac{1}{K} \frac{(\mu_{\theta}(x_k) - y(x_k))^2}{\sigma_{\theta}^2(x_k)} + \ln \sigma_{\theta}^2(x_k)$$
|
| 64 |
+
(3)
|
| 65 |
+
|
| 66 |
+
Variance networks naturally down-weight the labels with high variance in the optimization process. The variance prediction is trained from the error between µθ(x) and y(x). Hence, noise on the labels or changes in the regression task over time will also be captured by loss attenuation. As the variance is predicted by a neural network, it may not be well-calibrated and may be overestimated (Kuleshov et al., 2018; Levi et al., 2020; Bhatt et al., 2021). This can (1) give wrong variance estimates but also (2) affect the learning process by ignoring a sample if the variance estimate is too high.
|
| 67 |
+
|
| 68 |
+
Variance ensembles, or deep ensembles (Lakshminarayanan et al., 2017), are ensembles composed of variance networks. The predictive variance is given by a Gaussian mixture over the variance predictions of each network in the ensemble. This method, when trained, is able to capture uncertainty more reliably than others (Ovadia et al., 2019), with N = 5 networks in the ensemble being sufficient. Similarly to sampled ensembles, variance ensembles suffer from underestimated early epistemic variance estimation. However, compared to variance networks, they seem empirically less prone to calibration issues (1) in the final variance estimation because the mixture of Gaussians dampens single very high variances, and (2) in the learning process because even if one network does not learn correctly, the others will.
|
| 69 |
+
|
| 70 |
+
For better readability, we will use the terms var-network and var-ensemble in the rest of the paper.
|
| 71 |
+
|
| 72 |
+
While our work focuses on using uncertainty estimates to mitigate the impact of unreliable supervision, we can take advantage of the structure in place to better drive the exploration/exploitation trade-off. In particular, we used BootstrapDQN (Osband et al., 2016) for exploration. In this method, a single network is sampled from an ensemble at the beginning of each episode to select the action. This method is improved with the previously described RPFs (Osband et al., 2018). In continuous settings, we instead followed Lee et al. (2021) and added an Upper Confidence Bound (UCB) exploration bonus based on uncertainty prediction. As the variance in UCB is added to Q-values, it must be calibrated: we evaluate it with sampled ensembles. Possible interactions between exploration strategies and IV-RL are discussed in Appendix H.
|
| 73 |
+
|
| 74 |
+
<sup>3</sup>Using RPFs in the context of DRL can destabilize the exploration due to a significant initial bias added to the value of each state-action. It must be compensated for with uncertainty-aware exploration, such as Bootstrap (Osband et al., 2016), where it leads to a significant improvement in performance for exploration-based tasks.
|
| 75 |
+
|
| 76 |
+
Many model-free DRL algorithms use temporal difference updates. In methods such as DQN (Mnih et al., 2013), PPO (Schulman et al., 2017) and SAC (Haarnoja et al., 2018), a neural network is trained to predict the Q-value of a given state-action pair $Q^{\pi}(s,a)^4$ by minimizing the error between the target T(s,a) and its prediction $\hat{Q}(s,a)$ . T(s,a) is computed according to Bellman's equation:
|
| 77 |
+
|
| 78 |
+
$$T(s,a) = r + \gamma \bar{Q}(s',a') \tag{4}$$
|
| 79 |
+
|
| 80 |
+
s' and r are sampled from the environment given (s,a), and a' is sampled from the current policy given s'. $\bar{Q}(s',a')$ is predicted by a copy of the Q-network (called the target network) which is updated less frequently to ensure training stability. The neural network's parameters $\theta$ are optimized using stochastic gradient descent to minimize the following L2 loss function:
|
| 81 |
+
|
| 82 |
+
$$\mathcal{L}_{\theta} = \left| \left| T(s, a) - \hat{Q}_{\theta}(s, a) \right| \right|^{2} \tag{5}$$
|
| 83 |
+
|
| 84 |
+
The target T(s,a) is a noisy approximation of $Q^{\pi}(s,a)$ that is distributed according to its distribution $p_T(T|s,a)$ . The generative model used to produce samples of T(s,a) is shown in Figure 1, and has the following components:
|
| 85 |
+
|
| 86 |
+
- 1. if the reward r is stochastic, it is sampled from $p_R(r|s,a)$ 5;
|
| 87 |
+
- 2. if the environment dynamics are stochastic, the next state s' is sampled from $p_{S'}(s'|s,a)$ ;
|
| 88 |
+
- 3. if the policy is stochastic a' is sampled from the policy $\pi(a'|s')$ ;
|
| 89 |
+
- 4. $\bar{Q}$ is a prediction from a var-network $p_{\bar{Q}}(\bar{Q}|s',a')$ ;
|
| 90 |
+
- 5. T is deterministically generated from r and $\bar{Q}$ by equation (4).
|
| 91 |
+
|
| 92 |
+
As the variance of the noise of T(s,a) is not constant, training the Q-network using $\mathcal{L}_{\theta}$ as in equation (5) is regression on heteroscedastic noisy labels.
|
| 93 |
+
|
| 94 |
+
As seen in section 2.1, BIV can be used to reduce the impact of heteroscedastic noisy labels in regression, provided estimates of the label variances. We thus aim to evaluate $\sigma_T^2(T|s,a)$ based on the sampling process described in section 3.1.1, $\sigma$ and O value estimation are independent.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+
Figure 1: Bayesian network representing the target sampling process
|
| 99 |
+
|
| 100 |
+
process described in section 3.1.1. r and Q-value estimation are independent given s and a, hence:
|
| 101 |
+
|
| 102 |
+
$$\sigma_T^2(T|s,a) = \sigma_R^2(r|s,a) + \gamma^2 \sigma_{S'A'\bar{Q}}^2(\bar{Q}|s,a)$$
|
| 103 |
+
|
| 104 |
+
$$\tag{6}$$
|
| 105 |
+
|
| 106 |
+
where $p_{S'A'\bar{Q}}$ is the compound probability distribution based on components 2-4 in Figure 1:
|
| 107 |
+
|
| 108 |
+
$$p_{S'A'\bar{Q}}(\bar{Q}|s,a) = \iint p_{\bar{Q}}(\bar{Q}|s',a')p_{S'A'}(s',a'|s,a)da'ds'$$
|
| 109 |
+
(7)
|
| 110 |
+
|
| 111 |
+
where $p_{S'A'}(s',a'|s,a)=p_{A'}(a'|s')p_{S'}(s'|s,a)$ . Using the law of total variance, the variance of $\bar{Q}$ is given by:
|
| 112 |
+
|
| 113 |
+
$$\sigma_{S'A'\bar{Q}}^{2}\left(\bar{Q}|s,a\right) = \mathbb{E}_{S'A'}\left[\sigma_{\bar{Q}}^{2}\left(\bar{Q}|s',a'\right)|s,a\right] + \sigma_{S'A'}^{2}\left(\mathbb{E}_{\bar{Q}}\left[\bar{Q}|s',a'\right]|s,a\right) \tag{8}$$
|
| 114 |
+
|
| 115 |
+
Plugging (8) into (6) gives:
|
| 116 |
+
|
| 117 |
+
$$\sigma_{T}^{2}\left(T|s,a\right) = \gamma^{2}\underbrace{\left(\mathbb{E}_{S'A'}\left[\sigma_{\bar{Q}}^{2}\left(\bar{Q}|s',a'\right)|s,a\right]\right)}_{\text{Predictive variance of Q-network}} + \underbrace{\gamma^{2}\left(\sigma_{S'A'}^{2}\left(\mathbb{E}_{\bar{Q}}\left[\bar{Q}|s',a'\right]|s,a\right)\right) + \sigma_{R}^{2}(r|s,a)}_{\text{Policy and environment induced variance}}$$
|
| 118 |
+
(9)
|
| 119 |
+
|
| 120 |
+
<sup>&</sup>lt;sup>4</sup>In this paper, we focus on Q-values, although the principle can also apply to state values.
|
| 121 |
+
|
| 122 |
+
<sup>&</sup>lt;sup>5</sup>Stochastic reward can happen for example if the reward is corrupted or estimated (Romoff et al., 2018; Campbell et al., 2015)
|
| 123 |
+
|
| 124 |
+
We can identify two distinct components in equation 9 that contribute to the overall variance of the target. The first is the (expectation of the) variance that is due to the uncertainty in the neural network prediction of the value function, $\mathbb{E}_{S'A'}[\sigma_{\bar{Q}}^2(\bar{Q}|s',a')]$ . The second is the uncertainty due to the stochasticity of the environment and of the policy, $\sigma_R^2(r|s,a) + \gamma^2 \sigma_{S'A'}^2(\mathbb{E}_{\bar{Q}}[\bar{Q}|s',a'])$ .
|
| 125 |
+
|
| 126 |
+
Uncertainty in neural network prediction For a given policy $\pi$ and a given s', a', the agent may not have seen enough samples to have an accurate approximation of $Q^{\pi}(s', a')$ . This corresponds to an epistemic source of uncertainty that should be captured by sampled ensembles. However, as $\pi$ is updated, the regression target, $Q^{\pi}(s', a')$ , is also changing. This can be interpreted as variability in the underlying process which will be captured by var-networks. We can thus combine both sampling-based and var-network methods and evaluate $\sigma^2_{\bar{Q}}\left(\bar{Q}(s',a')\right)$ with var-ensembles.
|
| 127 |
+
|
| 128 |
+
We assume that the estimate of $\sigma_{\bar{Q}}^2(\bar{Q}|s',a')$ given a sampled (s',a') is unbiased and can therefore use it to directly approximate the expectation $\mathbb{E}_{S'A'}[\sigma_{\bar{Q}}^2(\bar{Q}|s',a')]$ . These values are used in the BIV loss $\mathcal{L}_{BIV}$ (equation 1) across a mini-batch sampled from the replay buffer. In this case, $\xi$ is used to control the trust in the variance estimation, as explained in section 2.1.
|
| 129 |
+
|
| 130 |
+
**Stochastic environment and policy** The other potential source of variance in the target is the result of the stochasticity of the environment encapsulated by $p_R(r|s,a)$ and $p_{S'}(s'|s,a)$ and of the policy represented by $\pi(a'|s')$ . Note that in model-free RL, we have no explicit representation of $p_R(r|s,a)$ or $p_{S'}(s'|s,a)$ , which are necessary to estimate this source of uncertainty.
|
| 131 |
+
|
| 132 |
+
$Q^{\pi}(s,a)$ is defined as the expected value of the return. As a result, even in the case where $\bar{Q}(s',a') = Q^{\pi}(s,a)$ in (4), there is still noise over the value of the *target* that is being used as the label due to the stochasticity of the environment and policy that generate r,a' and s'. Assuming a zero mean and normally distributed noise, this underlying stochasticity of the generating process is well-captured by a var-network with a loss attenuation using the negative log-likelihood formulation described in equation (3).
|
| 133 |
+
|
| 134 |
+
Based on the motivation above, we propose our IV-RL loss over minibatch D of size K as a linear combination of $\mathcal{L}_{BIV}$ and $\mathcal{L}_{LA}$ , balanced by hyperparameter $\lambda$ :
|
| 135 |
+
|
| 136 |
+
$$\mathcal{L}_{IVRL}(D,\theta) = \mathcal{L}_{BIV}(D,\theta) + \lambda \mathcal{L}_{LA}(D,\theta)$$
|
| 137 |
+
|
| 138 |
+
$$= \sum_{k=0}^{K} \left[ \left( \sum_{j=0}^{K} \frac{1}{\gamma^{2} \sigma_{\bar{Q},j}^{2} + \xi} \right)^{-1} \frac{\left( \mu_{\hat{Q}_{\theta}}(s_{k}, a_{k}) - T(s_{k}, a_{k}) \right)^{2}}{\gamma^{2} \sigma_{\bar{Q},k}^{2} + \xi} + \frac{\lambda}{K} \left( \frac{\left( \mu_{\hat{Q}_{\theta}}(s_{k}, a_{k}) - T(s_{k}, a_{k}) \right)^{2}}{\sigma_{\hat{Q}_{\theta}}^{2}(s_{k}, a_{k})} + \ln \sigma_{\hat{Q}_{\theta}}^{2}(s_{k}, a_{k}) \right) \right]$$
|
| 139 |
+
(10)
|
| 140 |
+
|
| 141 |
+
The var-network with parameters $\theta$ predicts both $\mu_{\hat{Q}_{\theta}}(s_k, a_k)$ and $\sigma_{\hat{Q}_{\theta}}^2(s_k, a_k)$ for each element k of the mini-batch. $\sigma_{\bar{Q},k}^2$ is instead provided by the target var-ensemble which predicts $\bar{Q}(s_k', a_k')$ when estimating the target $T(s_k, a_k)$ . In $\mathcal{L}_{\mathrm{BIV}}$ of equation (1), $\sigma_k^2$ is replaced by $\gamma^2 \sigma_{\bar{Q},k}^2$ according to equation (9). Empirically, the value of $\lambda$ is usually optimal around 5 or 10. Its impact is studied more in details in appendix C. High-variance samples generated from the target estimation will be downweighted in the BIV loss, while high-variance samples due to the stochasticity of the underlying environment and policy will be down-weighted in the LA loss. In the remainder of the paper, we show how this loss can be applied to different architectures and algorithms, and demonstrate that in many cases it significantly improves sample efficiency.
|
| 142 |
+
|
| 143 |
+
In section 3.1, we discussed how the target's uncertainty can be quantified and how the Bellman update can then be interpreted as a heteroscedastic regression problem. This is applicable in most
|
| 144 |
+
|
| 145 |
+
model-free DRL algorithms, whether they are based on Q-learning or policy optimization. In the special case of actor-critic algorithms, the state-values or Q-values predicted by the critic network are also used to train the policy $\pi_{\phi}$ 's parameters by gradient ascent optimization. An estimate of their variance can also be used to improve the learning process.
|
| 146 |
+
|
| 147 |
+
The objective is to maximize the expected Q-value:
|
| 148 |
+
|
| 149 |
+
$$\mathbb{E}_{s \sim D, a \sim \pi_{\phi}(s)}[Q(s, a)] \tag{11}$$
|
| 150 |
+
|
| 151 |
+
where D is the state distribution over the probable agent trajectories. The expectation is computed by sampling a mini-batch of size K from a replay buffer containing state-action pairs $(s_k, a_k)$ . The Q-value is approximated by the critic as $\hat{Q}(s_k, a_k)$ . The actor's parameters $\phi$ are then trained to maximize the unweighted average:
|
| 152 |
+
|
| 153 |
+
$$1/K \sum_{k=0}^{K} \hat{Q}(s_k, a_k) \tag{12}$$
|
| 154 |
+
|
| 155 |
+
If we instead consider the critic's estimation $\hat{Q}(s_k, a_k)$ as a random variable sampled from $p_{\hat{Q}}(\hat{Q}|s,a)$ , with mean $\mu_{\hat{Q}}(s_k,a_k)$ and variance $\sigma^2_{\hat{Q}}(s_k,a_k)$ , we can instead infer the expected value in equation (11) using Bayesian estimation (Murphy, 2012):
|
| 156 |
+
|
| 157 |
+
$$\left(\sum_{k=0}^{K} 1/\sigma_{\hat{Q}}^{2}\left(s_{k}, a_{k}\right)\right)^{-1} \sum_{k=0}^{K} \frac{1}{\sigma_{\hat{Q}}^{2}(s_{k}, a_{k})} \mu_{\hat{Q}}(s_{k}, a_{k}) \tag{13}$$
|
| 158 |
+
|
| 159 |
+
The normalized inverse-variance weights are a direct fit with the BIV loss in equation 1: we therefore also use BIV to train the actor. As the target and the Q-networks have the same structure, $\sigma_{\hat{Q}}^2(s_k, a_k)$ can be estimated using the same var-ensembles used in section 3.1.2.
|
| 160 |
+
|
| 161 |
+
We have adapted IV-RL to DQN and SAC to produce IV-DQN and IV-SAC. Comprehensive pseudocode, as well as implementation details for each algorithm, can be found in appendix A.
|
2201.04450/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-11-11T12:21:10.305Z" agent="5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15" etag="5Ap8BDno_rK_y9-185Kn" version="15.7.3"><diagram id="_YQDJr4yNO_PoCpfpSXb" name="Page-1">7Vpdc5s6EP01fuwdECDwY+2k7UOSydTNpH3qyLAYWoy4shzb99dfYQTmQ4lpDDjO9CWxDtICZ89KqxUjY7rcfmYkCW6pB9EIad52ZFyNEHIMXfxNgV0GWI4EFiz0MqgEzML/QIKaRNehB6tKR05pxMOkCro0jsHlFYwwRjfVbj6NqndNyAIawMwlURN9DD0eyNeytAP+BcJFkN9Z1+SVJck7S2AVEI9uSpBxPTKmjFKe/VpupxCl3OW8ZOM+PXO1eDAGMW8z4Go3vfn08PVx8/vX958Pd1/uHrH+QVp5ItFavvAI4UjYm/hUmBVPzXeSCvzvmuYXPqz2jvooOiAr2R4uphQTtzrgW7gUHkTaHWzE3690SeLyALyQ/0+6bWbl+uohNySIyGx1aT+3slrPc0gv3bAEH3mMBowqD4UYXccepJ7TxOVNEHKYJRmxGxFnAgv4MhItPX2aXFlpXz+MoimNKBPtmMaQXueM/oYcHCEDuw7M/eK+T8A4bJ+VlV6IVQQ50CVwthNd5ABjLPUtAxzn7U0pXCQUlCLFlBiRAbooLB80LH5IGf+BpNFbkXRJjCVhLP9QLxcrDGy/MWEYpwsDdyIMWM77dbdec7fwrGeB45kqnztobmDc02SgtfO51ZfPzWd9nrx6ZehfBAWcdCOM1LpMrew3oZPG3KDSiTWgTiyFTmqMQ+x9TBPKw/TpkVWwd4FepTvF7wnnwOI9gjSjIDRPIgU5k6oPS07S9YJm8Br56VGSSyRaLwQbg4jw8KlqXsWsvMM9DfcRIn1o6VUf2tr4H6tqZEXXzAU5rpya1kxh46gpTtgCeMPU3tnFq7/e/7jh/8nN7NvtqrO4049N0L7jguuqAm/uWKal9TNBm0Yz8AyVaHBv6ZrdjDSh+ZlsUsYDuqAxia4PaC1yDn1uKE0kvb+A850kn6w5rXoGtiH/ng4XOstaP0pXrrbS8r6xyxuxeN9skI3tHPixB3DePAzdtypj74GJlUFMCxJsG+GCnH0gvcCiJDGLkpfY1tTyaD0XnORr+/QMTOtk8fUYTeiaD5CF1WaAetD7PlIHvYfn2OopK7PQmbMy568OzqGDetZ1dh2Mj2dd/VE+yHpramaeyeQrroL0YVfcPG87JfycTsJvEqZJ1uz84TeMFvRxVQm6QgnjQZXQQfn370T8CiWg6kRcxH9ZCXjcVILRmxJUVdOhSAfds8BWkT7GtkF6yoKKsCqTbg4afqqK5Psi3dRqpJstSbd7I11VEnxvpFcXGqTa5Jv6kKSr6mtnSTlub+5XZ19lBpJBLd9AysxzUBkoymwh8f0whp8BEG84D3gEHF+5znd7Nlr1gK4ocysDsb8lp4MSTFe5f+Z40SMiO2Dnj8phNGHWat2KU1HDGVQSqmpMjXNBY5L+XIXxIgJ5DDLxQgYuD2l6yhGnRdgT/dHJnFc7WFIleYp0o7/EukWN42LYtbU3xi5SlTIuld26di27ya4qVe6PXVV54FLZrWtXxe6w2m2x5b4Ydk2tVuVULGum4kC/P3Zb7K0vhl1cP7VVsTuodltsoi+GXavGruGce2Zo8TXK5bKr2IWiQWeG5ib0/bCrKzIyFbuv2E2I5uFj9ey7m8MX/8b1/w==</diagram></mxfile>
|
2201.04450/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Discourse parsing aims at uncovering the structure of argumentation and information flow of a text. It is associated with the coherence and naturalness of a text. Applications that may benefit from discourse information include text summarization [@marcu-1997-discourse], sentiment analysis [@mukherjee-bhattacharyya-2012-sentiment], essay scoring [@nadeem-etal-2019-automated], text classification [@ji-smith-2017-neural], machine translation [@sim-smith-2017-integrating] and so on.
|
| 4 |
+
|
| 5 |
+
Several frameworks for discourse parsing have been proposed. Rhetorical Structure Theory (RST) [@mann1988rhetorical] assumes that discourse structure can be represented by a tree. It addresses text organization by means of relations that hold between discourse units, and a text is a hierarchically connected structure, in which each part has a role to play [@taboada2006rhetorical]. RST discourse parsing resembles constituency-based syntactic parsing, and facilitated by the creation of the RST discourse corpus [@carlson-etal-2001-building], the RST framework is widely used in computational discourse processing. Another influential framework is the Penn Discourse Treebank (PDTB) style discourse parsing [@prasad-etal-2008-penn; @prasad-etal-2018-discourse], which focuses on explicit and implicit local discourse relations. The Segmented Discourse Representation Theory (SDRT) [@asher2003logics] and the Graphbank [@wolf-gibson-2005-representing] use graph to represent discourse structure.
|
| 6 |
+
|
| 7 |
+
@li-etal-2014-text introduce the dependency framework into discourse parsing. Compared with RST discourse parsing, discourse dependency parsing is more flexible and potentially capable of capturing non-projective structures.
|
| 8 |
+
|
| 9 |
+
Because of a lack of discourse corpora annotated under the dependency framework, previous studies center around finding automatic means of converting RST tree structures to discourse dependency structures. Studies in this direction include @li-etal-2014-text, @hirao-etal-2013-single, @yoshida-etal-2014-dependency and @morey-etal-2018-dependency. These methods mostly originate from the syntactic parsing field. @muller-etal-2012-constrained propose a method of deriving discourse dependency structures from SDRT graphs.
|
| 10 |
+
|
| 11 |
+
@lee2006complexity points out that discourse structure is likely to be less complex than syntactic structure. The biaffine neural dependency parser by @Manning2019 achieves good performance in syntactic parsing, and in this study, we investigate empirically whether it also performs well if applied to discourse parsing. The SciDTB corpus [@yang-li-2018-scidtb] is a domain-specific and manually-annotated discourse corpus with data from abstracts of the ACL anthology. It uses dependency trees to represent discourse structure. With this corpus, it is possible to develop a discourse dependency parser directly.
|
| 12 |
+
|
| 13 |
+
Dependency parsing generally can be implemented in transition-based approaches or graph-based approaches. In studies using graph-based approaches, dependency parsing is treated as a task of finding the highest-scored dependency tree. The arc-factored model proposed by @mcdonald-etal-2005-non is commonly used. The score of an edge is defined as the dot product between a high-dimensional feature representation and a weight factor, and the score of a dependency tree is obtained by summing the scores of the edges in the tree. Margin Infused Relaxed Algorithm (MIRA) [@crammer2003ultraconservative] is generally used to learn the weight factors. The Eisner algorithm [@eisner1996three] and the Chu-Liu-Edmonds algorithm [@chu1965shortest; @edmonds1967optimum] can be used to find the highest-scored dependency tree from the scores of all the possible arcs. The Eisner algorithm is a dynamic programming approach and because of its restriction on the position and orientation in finding the head, it can only generate projective structures. The Chu-Liu-Edmonds algorithm aims at finding the maximum spanning tree (MST) from the scores and takes an iterative approach in removing cycles. It can produce non-projective structures.
|
| 14 |
+
|
| 15 |
+
@li-etal-2014-text's discourse dependency parser is an example using this approach. Since there were no discourse corpora annotated with dependency structure, an algorithm for converting RST trees to dependency trees is used. However, based on the study by @hayashi-etal-2016-empirical, the dependency trees converted with this algorithm are all projective, and this may be the reason why in @li-etal-2014-text's experiments, the accuracy is higher when the Eisner algorithm is used than when the MST algorithm is used.
|
| 16 |
+
|
| 17 |
+
The work by @kiperwasser-goldberg-2016-simple addresses the problem of feature representation in developing neural dependency parsers. Instead of using feature templates, in their study, each word is represented by embedding vectors, which are fed to BiLSTM layers. The resulting BiLSTM representations are scored using a multi-layer perceptron (MLP) and trained together with the parser to learn feature representations for the specific task.
|
| 18 |
+
|
| 19 |
+
On this basis, @Manning2019 propose a biaffine dependency parser. The embedding vectors of a sequence of words are passed to a multi-layer BiLSTM network. Instead of using an MLP scorer, the output from the BiLSTM network is fed to four MLP layers, from which four vectors are produced. These vectors are passed to two biaffine layers, one deciding the most probable head for a word, and the other determining the label for the pair formed by the word and its head predicted by the previous biaffine layer. The two biaffine classifiers are trained jointly to minimize the sum of their softmax cross-entropy losses. To ensure the tree is well-formed, Tarjan's algorithm is used to remove cycles[^1].
|
2202.09852/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Online recommender systems often involve predicting various types of user feedback such as clicking and purchasing. Multi-Task Learning (MTL) [@caruana1997multitask] has emerged in this context as a powerful tool to explore the connection of tasks for improving user interest modeling [@ma2018entire; @lu2018like; @wang2018explainable].
|
| 4 |
+
|
| 5 |
+
Common MTL models consist of low-level *shared network* and several high-level *individual networks*, as shown in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"}(a), in the hope that the shared network could transfer the knowledge about "how to encode the input features\" by sharing or enforcing similarity on parameters of different tasks [@ruder2017overview]. Most prior works [@ma2018modeling; @tang2020progressive; @ma2019snr] put efforts on designing different shared network architectures with ad-hoc parameter-sharing mechanisms such as branching and gating. In these models, each task is trained under the supervision of its own binary ground-truth label ($1$ or $0$), attempting to rank positive items above negative ones. However, using such binary labels as training signals, the task may fail to accurately capture user's preference for *items with the same label*, despite that learning the auxiliary knowledge about these items' relations may benefit the overall ranking performance.
|
| 6 |
+
|
| 7 |
+
To address this limitation, we observe that the predictions of other tasks may contain useful information about how to rank same-labeled items. For example, given two tasks predicting 'Buy' and 'Like', and two items labeled as 'Buy:$0$, Like:$1$' and 'Buy:$0$, Like:$0$', the task 'Buy' may not accurately distinguish their relative ranking since both of their labels are $0$. In contrast, another task 'Like' will identity the former item as positive with larger probability (e.g. $0.7$), the latter with smaller probability (e.g. $0.1$). Based on the fact that a user is more likely to purchase the item she likes [^1], we could somehow take advantage of these predictions from other tasks as a means to transfer ranking knowledge.
|
| 8 |
+
|
| 9 |
+
Knowledge Distillation (KD) [@hinton2015distilling] is a teacher-student learning framework where the student is trained using the predictions of the teacher. As revealed by theoretical analysis in previous studies [@tang2020understanding; @phuong2019towards], the predictions of the teacher, also known as *soft labels*, are usually seen as more informative training signals than binary *hard labels*, since they could reflect 'whether the sample is true positive (negative)'. On the perspective of backward gradient, KD can adaptively re-scale student model's training dynamics based on the values of soft labels. Specially, in the above example, we could incorporate predictions $0.7$ and $0.1$ in the training signals for task 'Buy'. Consequently, the gradients w.r.t the sample labeled 'Buy:$0$ & Like:$0$' in the example will be larger, indicating it is a more confident negative sample. Through this process, the task 'Buy' could hopefully give accurate rankings of same-labeled items. Motivated by these above observations, we proceed to design a new knowledge transfer paradigm on the optimization level of MTL models by leveraging KD. It is non-trivial due to three critical and fundamental challenges:
|
| 10 |
+
|
| 11 |
+
- **How to address the task conflict problem during distillation?**Not all knowledge from other tasks is useful [@yu2020gradient]. Specially, in online recommendation, the target task may believe that a user prefers item$_A$ since she bought item$_A$ instead of item$_B$, while another task may reversely presume she prefers item$_B$ since she puts it in the collection rather than item$_A$. Such conflicting ranking knowledge may be harmful for the target task and could empirically cause significant performance drop.
|
| 12 |
+
|
| 13 |
+
- **How to align the magnitude of predictions for different tasks?**Distinct from vanilla KD where teacher and student models have the same prediction target, different tasks may have different magnitude of positive ratio. Directly using another task's predictions as training signals without alignment could mislead the target task to yield biased predictions [@zhou2021rethinking].
|
| 14 |
+
|
| 15 |
+
- **How to enhance training when teacher and student are synchronously optimized?**The vanilla KD adopts asynchronous training where the teacher model is well-trained beforehand. However, MTL inherently requires synchronous training, where each task is jointly learned from scratch. This indicates the teacher may be poorly-trained and provide inaccurate or even erroneous training signals, causing slow convergence and local optima [@wen2019preparing; @xu2020privileged].
|
| 16 |
+
|
| 17 |
+
<figure id="fig_motiv" data-latex-placement="t">
|
| 18 |
+
<embed src="images/fc_motiv.pdf" style="width:49.0%" />
|
| 19 |
+
<figcaption>Illustration of the motivation of CrossDistil.</figcaption>
|
| 20 |
+
</figure>
|
| 21 |
+
|
| 22 |
+
In this paper, we propose a novel framework named as Cross-Task Knowledge Distillation (CrossDistil). Different from prior MTL models where knowledge transfer is achieved by sharing representations in bottom layers, CrossDistil also facilitates transferring ranking knowledge on the top layers, as shown in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"}(b). To solve the aforementioned challenges: **First**, we introduce augmented tasks to learn the knowledge of the ranking orders of four types of samples as shown in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"}(c). New tasks are trained based on a quadruplet loss function, and could fundamentally avoid conflicts by only preserving the useful knowledge and discarding the harmful one, as shown in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"}(d). **Second**, we consider a calibration process that is seamlessly integrated in the KD procedure to align predictions of different tasks, which is accompanied with a bi-level training algorithm to optimize parameters for prediction and calibration respectively. **Third**, teachers and students are trained in an end-to-end manner with a novel error correction mechanism to speed up model training and further enhance knowledge quality. We conduct comprehensive experiments on a large-scale public dataset and a real-world production dataset that is collected from our platform. The results demonstrate that CrossDistil achieves state-of-the-art performance. The ablation studies also thoroughly dissect the effectiveness of its modules.
|
| 23 |
+
|
| 24 |
+
# Method
|
| 25 |
+
|
| 26 |
+
<figure id="fig_frame" data-latex-placement="t">
|
| 27 |
+
<embed src="images/fc_frame.pdf" style="width:85.0%" />
|
| 28 |
+
<figcaption>Illustration of computational graph for CrossDistil.</figcaption>
|
| 29 |
+
</figure>
|
| 30 |
+
|
| 31 |
+
This paper focuses on multi-task learning for predicting different user feedback (e.g. click, like, purchase, look-through), and considers two tasks denoted as task $A$ and task $B$ to simplify illustration. As shown in the left panel of Fig. [2](#fig_frame){reference-type="ref" reference="fig_frame"}, we first split the set of training samples into multiple subsets according to combinations of tasks' labels: $$\begin{equation}
|
| 32 |
+
\begin{split}
|
| 33 |
+
% \mathcal D^{++} = \{\mathbf x_i \in \mathcal D &| y^A_i = 1, y^B_i = 1\}, \\
|
| 34 |
+
\mathcal D^{+-} = \{(\mathbf x_i, y^A_i, y^B_i) &\in \mathcal D ~|~ y^A_i = 1, y^B_i = 0\},\\
|
| 35 |
+
\mathcal D^{-+} = \{(\mathbf x_i, y^A_i, y^B_i) &\in \mathcal D ~|~ y^A_i = 0, y^B_i = 1\},\\
|
| 36 |
+
% \mathcal D^{--} = \{\mathbf x_i \in \mathcal D &| y^A_i = 0, y^B_i = 0\},\\
|
| 37 |
+
\mathcal D^{-\cdot} = \mathcal D^{--} \cup \mathcal D^{-+}&, \;\mathcal D^{+\cdot} = \mathcal D^{+-} \cup \mathcal D^{++},\\
|
| 38 |
+
\mathcal D^{\cdot-} = \mathcal D^{--} \cup \mathcal D^{+-}&, \;\mathcal D^{\cdot+} = \mathcal D^{-+} \cup \mathcal D^{++},
|
| 39 |
+
\end{split}
|
| 40 |
+
\end{equation}$$ where $\mathbf x$ is an input feature vector, $y^A$ and $y^B$ denote hard labels for task $A$ and task $B$ respectively. The goal is to rank positive samples before negative ones, which can be expressed a bipartite order $\mathbf x_{_{+\cdot}} \succ \mathbf x_{_{-\cdot}}$ for task $A$ and $\mathbf x_{_{\cdot+}} \succ \mathbf x_{_{\cdot-}}$ for task $B$, where $\mathbf x_{_{+\cdot}} \in \mathcal D^{+\cdot}$ and so forth. Note that these bipartite orders may be contradictory among different tasks, e.g., $\mathbf x_{_{+-}} \succ \mathbf x_{_{-+}}$ for task $A$ while $\mathbf x_{_{+-}} \prec \mathbf x_{_{-+}}$ for task $B$. Due to the existence of such conflicts, directly conducting KD by treating one task as teacher and another task as student may cause inconsistent training signals and is empirically harmful for the overall ranking performance.
|
| 41 |
+
|
| 42 |
+
To enable knowledge transfer across tasks via KD, we introduce auxiliary ranking-based tasks that could essentially avoid task conflicts while preserving useful ranking knowledge. In specific, we consider a quadruplet $(\mathbf x_{_{++}}, \mathbf x_{_{+-}}, \mathbf x_{_{-+}}, \mathbf x_{_{--}})$ and the corresponding multipartite order $\mathbf x_{_{++}} \succ \mathbf x_{_{+-}} \succ \mathbf x_{_{-+}} \succ \mathbf x_{_{--}}$ for task A. In contrast with the original bipartite order, the multipartite order reveals additional information about the ranking of samples, i.e., $\mathbf x_{_{++}} \succ \mathbf x_{_{+-}}$ and $\mathbf x_{_{-+}} \succ \mathbf x_{_{--}}$ without introducing contradictions. Therefore, we refer such order as *fine-grained ranking*. Based on this, we introduce a new ranking-based task called *augmented task $A+$* for enhancing knowledge transfer by additionally maximizing $$\begin{equation}
|
| 43 |
+
\label{eqn-obj-a+}
|
| 44 |
+
\begin{split}
|
| 45 |
+
&\ln p(\Theta | \succ)\\
|
| 46 |
+
% = &\ln p(\mathbf x_{_{++}} \succ \mathbf x_{_{+-}} \wedge \mathbf x_{_{-+}} \succ \mathbf x_{_{--}} | \Theta) \cdot p(\Theta)\\
|
| 47 |
+
= &\ln p(\mathbf x_{_{++}} \succ \mathbf x_{_{+-}} | \Theta) \cdot p(\mathbf x_{_{-+}} \succ \mathbf x_{_{--}} | \Theta) \cdot p(\Theta)\\
|
| 48 |
+
= &\sum_{(\mathbf x_{_{++}}, \mathbf x_{_{+-}},\atop \mathbf x_{_{-+}}, \mathbf x_{_{--}})} \ln \sigma(\hat r_{_{++\succ+-}}) + \ln \sigma(\hat r_{_{-+\succ--}}) - Reg(\Theta),\\
|
| 49 |
+
\end{split}
|
| 50 |
+
\end{equation}$$ where $r$ is the logit value before activation in the last layer, $\hat r_{_{++\succ+-}} = \hat r_{_{++}} - \hat r_{_{+-}}$, and sigmoid function $\sigma(x) = 1/(1+exp(-x))$. The loss function for augmented task $A+$ is $$\begin{equation}
|
| 51 |
+
\label{eqn_apr}
|
| 52 |
+
\begin{split}
|
| 53 |
+
\mathcal L^{A+} = &\sum_{(\mathbf x_{_{++}}, \mathbf x_{_{+-}}, \atop\mathbf x_{_{-+}}, \mathbf x_{_{--}})} -\beta_1^A \ln \sigma(\hat r_{_{++\succ+-}}) -\beta_2^A \ln \sigma(\hat r_{_{-+\succ--}})\\
|
| 54 |
+
+ &\sum_{(\mathbf x_{_{+\cdot}}, \mathbf x_{_{-\cdot}})} - \ln \sigma(\hat r_{_{+\cdot\succ-\cdot}}),
|
| 55 |
+
\end{split}
|
| 56 |
+
\end{equation}$$ which consists of three terms that respectively correspond to three pair-wise ranking relations of samples, where coefficients $\beta_1$, $\beta_2$ balance their importance. The loss function for augmented task $B+$ could be defined in a similar spirit. These augmented ranking-based tasks are jointly trained with original regression-based tasks in MTL framework as shown in the second panel of Fig. [2](#fig_frame){reference-type="ref" reference="fig_frame"}. The original regression-based loss function is formulated as: $$\begin{equation}
|
| 57 |
+
\label{eqn_la}
|
| 58 |
+
\begin{split}
|
| 59 |
+
\mathcal L^{A} = &CE(y^A, \hat y^A), \qquad \mathcal L^{B} = CE(y^B, \hat y^B),\\
|
| 60 |
+
CE(y,\hat y) &= \sum_{\mathbf x_i \in \mathcal D} - y_i \ln \hat y_i - (1- y_i) \ln (1-\hat y_i),
|
| 61 |
+
\end{split}
|
| 62 |
+
\end{equation}$$ where $\hat y = \sigma(r)$ is the predicted probability.
|
| 63 |
+
|
| 64 |
+
The introduced auxiliary ranking-based tasks could avoid task conflicts and act as prerequisites for knowledge transfer through KD. Besides, the task augmentation approach itself is beneficial for the generalizability of main tasks [@hsieh2021boosting] by introducing more related tasks that may provide hints about what shall be learned and transferred in shared layers.
|
| 65 |
+
|
| 66 |
+
We next design a cross-task knowledge distillation approach that can transfer fine-grained ranking knowledge for MTL. Since the prediction results of another task may contain the information about unseen rankings between samples of the same label, a straightforward approach is to use soft labels of another task to teach the current task by the vanilla hint loss (i.e. distillation loss) as in Eqn. [\[eqn_vnkd\]](#eqn_vnkd){reference-type="eqref" reference="eqn_vnkd"}. Unfortunately, such naive approach may be problematic and even imposes negative effects in practice. This is because the labels of different tasks may have contradictory ranking information that would harm the learning of other tasks as mentioned previously. To avoid such conflicts, we instead treat augmented ranking-based tasks as teachers, original regression-based tasks as students, and adopt the following distillation loss functions:
|
| 67 |
+
|
| 68 |
+
$$\begin{equation}
|
| 69 |
+
\label{eqn_dis}
|
| 70 |
+
\begin{split}
|
| 71 |
+
\mathcal L^{A-KD} &= CE(\sigma(\hat r^{A+} / \tau), \sigma(\hat r^A/ \tau)),\\
|
| 72 |
+
\mathcal L^{B-KD} &= CE(\sigma(\hat r^{B+}/ \tau), \sigma(\hat r^{B} / \tau)).
|
| 73 |
+
\end{split}
|
| 74 |
+
\end{equation}$$ Note that soft labels $\hat y^{A+} = \sigma(\hat r^{A+} / \tau)$ and $\hat y^{B+} = \sigma(\hat r^{B+} / \tau)$ are invariant when training the student model, and hence the student will not mislead the teacher. The loss functions for students are formulated as $$\begin{equation}
|
| 75 |
+
\label{eqn_stu}
|
| 76 |
+
\begin{split}
|
| 77 |
+
\mathcal L^{A-Stu} &= (1 - \alpha^A) \mathcal L^{A} + \alpha^A \mathcal L^{A-KD},\\
|
| 78 |
+
\mathcal L^{B-Stu} &= (1 - \alpha^B) \mathcal L^{B} + \alpha^B \mathcal L^{B-KD},\\
|
| 79 |
+
\end{split}
|
| 80 |
+
\end{equation}$$ where $\alpha^A \in [0,1]$ is the hyper-parameter to balance two losses. The soft labels output by augmented ranking-based tasks are more informative training signals than hard labels. As an example, for samples $\mathbf x_{_{++}}, \mathbf x_{_{+-}}, \mathbf x_{_{-+}}, \mathbf x_{_{--}}$, the teacher model for augmented task $A+$ may give predictions $0.9$, $0.8$, $0.2$, $0.1$ which intrinsically contains auxiliary ranking orders $\mathbf x_{_{++}} \succ \mathbf x_{_{+-}}$ and $\mathbf x_{_{-+}} \succ \mathbf x_{_{--}}$ that are not revealed in hard labels. Such knowledge is then explicitly transferred through the distillation loss and can meanwhile regularize task-specific layers from over-fitting the hard labels.
|
| 81 |
+
|
| 82 |
+
However, an issue of the aforementioned approach is that augmented tasks are optimized with pair-wise loss functions and thus are not predicting a probability, i.e., the prediction $\sigma(\hat r^{A+})$ does not agree with the actual probability that the input sample is a positive one. Directly using the soft labels of teachers may mislead students and cause performance deterioration. To solve this problem, we propose to calibrate the predictions so as to provide numerically sound and unbiased soft labels. Platt Scaling [@niculescu2005predicting; @platt1999probabilistic] is a classic probability calibration method. We adopt it for calibration in this work. Still, one can replace it with any other more complex methods in practice. Formally, to get calibrated probabilities, we transform the logit values of teacher models through the following equation: $$\begin{equation}
|
| 83 |
+
\begin{split}
|
| 84 |
+
\tilde r^{A+} = P^A \cdot \hat r^{A+} + Q^A, \;\; \tilde y^{A+} = \frac{1}{1+\exp{ \tilde r^{A+}}}
|
| 85 |
+
\end{split}
|
| 86 |
+
\end{equation}$$ where $\tilde r$ and $\tilde y$ are the logit value and probability after calibration, respectively. The same process is also used for task $B+$. $P$, $Q$ are learnable parameters specific to each task. They are trained by optimizing the calibration loss $$\begin{equation}
|
| 87 |
+
\label{eqn_cal}
|
| 88 |
+
\mathcal L^{Cal} = \mathcal L^{A-Cal} + \mathcal L^{B-Cal} = CE(y^A, \tilde y^{A+}) + CE(y^B,\tilde y^{B+}).
|
| 89 |
+
\end{equation}$$ We fix MTL model parameters when optimizing $\mathcal L^{Cal}$ as shown in the third panel of Fig. [2](#fig_frame){reference-type="ref" reference="fig_frame"}. Note that, since the calibrated outputs of the teacher model are linear projections of the original outputs, the ranking result is unaffected so that the latent fine-grained ranking knowledge in soft labels is preserved during the calibration process. Distillation losses in Eqn. [\[eqn_dis\]](#eqn_dis){reference-type="eqref" reference="eqn_dis"} are then revised by replacing $\hat r^{A+}, \hat r^{B+}$ with $\tilde r^{A+}, \tilde r^{B+}$.
|
| 90 |
+
|
| 91 |
+
Conventional KD adopts a two-stage training process where the teacher model is trained in advance and its parameters are fixed when training the student model [@hinton2015distilling]. However, such asynchronous training procedure is not favorable for industrial applications such as online advertising. Instead, due to simplicity and easy maintenance, synchronous training procedure where teacher and student models are trained in an end-to-end manner is more desirable as done in [@xu2020privileged; @anil2018large; @zhou2018rocket]. In our framework, there are two sets of parameters for optimization, namely, parameters in MTL backbone for prediction (denoted as $\Theta$) and parameters for calibration including $P^A$, $P^B$, $Q^A$ and $Q^B$ (denoted as $\Omega$). To jointly optimize prediction parameters and calibration parameters, we propose a bi-level training procedure where $\Theta$ and $\Omega$ are optimized in turn for each iteration as shown in the training algorithm. For sampling, it is impractical to enumerate every combination of samples as in Eqn. [\[eqn_apr\]](#eqn_apr){reference-type="eqref" reference="eqn_apr"}. Instead, We adopt bootstrap sampling strategy as used in [@rendle2012bpr; @shan2018combined] as unbiased approximation.
|
| 92 |
+
|
| 93 |
+
::: algorithm
|
| 94 |
+
Construct set $\mathcal D^{++}, \mathcal D^{+-},\mathcal D^{-+},\mathcal D^{--}, \mathcal D^{+\cdot}, \mathcal D^{-\cdot},\mathcal D^{\cdot+},\mathcal D^{\cdot-}$
|
| 95 |
+
:::
|
| 96 |
+
|
| 97 |
+
In KD-based methods, the student model is trained according to predictions of the teacher model, without considering if they are accurate or not. However, inaccurate predictions of the teacher model that is contradictory with the hard label could harm the student model's performance in two aspects. First, at early stage of training when the teacher model is not well-trained, frequent errors in soft labels may distract the training process of the student model, causing slow convergence [@xu2020privileged]. Second, even at later stage of training when the teacher model is relatively well-trained, it is still likely that the teacher model would occasionally provide mistaken predictions that may cause performance deterioration [@wen2019preparing]. A previous work [@xu2020privileged] adopts a warm-up scheme by removing distillation loss in the earliest $k$ steps of training. However, it is not clear how to choose an appropriate hyper-parameter $k$, and it cannot prevent errors after $k$ steps.
|
| 98 |
+
|
| 99 |
+
In this work, we propose to adjust predictions of the teacher model $\tilde y$ to align with the hard label $y$. Specifically, we clamp logit values for the teacher model (if the prediction is inconsistent with the ground truth) as follows: $$\begin{equation}
|
| 100 |
+
\label{eqn_corr}
|
| 101 |
+
r^{Teacher}(\mathbf{x}) \leftarrow \mathds{1}[y]\cdot Max\left\{\mathds{1}[y] \cdot r^{Teacher}(\mathbf{x}),m\right\}
|
| 102 |
+
\end{equation}$$ where $r^{Teacher}$ could be $\tilde r^{A+}$ or $\tilde r^{B+}$, $\mathds{1}[y]$ is an indicator function that returns $1$ is $y=1$ else returns $-1$, and $m$ is the error correction margin, a hyper-parameter. This procedure could accelerate convergence by eliminating inaccurate predictions at the early stage of training, and further enhance knowledge quality at the later stage to improve student model's performance. The proposed error correction mechanism has the following properties: 1) It does not affect the predictions of the teacher model if they are sufficiently correct (that predicts the true label with at least probability $\sigma(m)$); 2) It does not affect training of the teacher model since the computation of distillation loss has no backward gradient for teachers as shown in Fig. [2](#fig_frame){reference-type="ref" reference="fig_frame"}.
|
2203.07319/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.07319/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Face image super resolution (face SR or face hallucination) algorithms have been developing rapidly in recent years, with its wide application in video restoration and AI photographing. Face SR has a close relationship with general image SR [@srcnn; @fsrcnn; @esrgan; @sftgan; @ranksrgan; @bsrgan; @realesrgan] and face generation tasks [@stylegan1; @stylegan2; @progan; @stylegan3; @swagan]. Similar as general image SR, face SR is a restoration problem, whose goal is to reconstruct correct structures and preserve identity information. Differently, face SR has to deal with very large upscaling factors (8-64) [@gpen; @gfpgan; @glean], thus requiring to generate a large amount of finer details, which is similar to face generation. As a combination of restoration and generation problem, face SR has unique solution pipelines, which always involve various additional facial priors [@fsrnet; @bulat2018super; @kim2019progressive; @zhu2016deep; @kim2019progressive; @yu2018face; @shen2018deep; @psfrgan], like parsing map and attribution map.
|
| 4 |
+
|
| 5 |
+
Recent advances have found that a face GAN can take the place of all previous facial priors, and produce realistic face details. This is based on the observation that a well-trained GAN model has already contained enough face information, which is sufficient to provide strong priors. For instance, GLEAN [@glean] adopts the intermediate features of a StyleGAN [@stylegan2] as latent banks, and achieves superior performance on large-factor SR tasks. While GFPGAN [@gfpgan] and GPEN [@gpen] introduce face GAN models to solve blind face restoration problem, and both can recover promising facial details. Their success can be attributed to the utilization of GAN priors and motivates later works to find more applications.
|
| 6 |
+
|
| 7 |
+
However, if we must rely on such a prior, face SR will face two apparent limitations. First, as face GAN is trained on specific datasets (e.g., FFHQ [@ffhq]), the corresponding face SR methods can only deal with the same kind of face images (e.g., frontal faces with a fixed size), significantly restricting its applications. Second, as face GAN is not specially designed for face SR, we have to add additional operations in the network for adaptation [@gpen; @gfpgan; @glean; @dgp], which is a waste of computation resources. Then we will ask: can we get rid of these priors, and design a pure data-driven framework?
|
| 8 |
+
|
| 9 |
+
Another issue unsolved in face SR is the flexibility of generation. Existing methods can only output a single restoration result with a fixed style. However, in real scenarios, users might want to adjust the generative strength to meet personalized requirements. For example, they will desire more details in old photo restoration, but less hallucination effects in surveillance video enhancement. "How to control the amount of generated details\" is a practical demand. Furthermore, real-world images may have various sizes, but conventional SR models (e.g., GLEAN [@glean] and ESRGAN [@esrgan]) for fixed upscaling factors cannot handle such diverse cases.
|
| 10 |
+
|
| 11 |
+
To address the problems, we propose a generative and controllable face SR framework, called GCFSR, which has three appealing properties. First, it could reconstruct faithful images with promising identity information. This is also the basic requirement of face SR task. Second, it could generate realistic face details, without reliance on any additional priors, including facial priors and GAN priors. This shows that GAN prior is not an essential part in face SR task. Third, its generative strength can be interactively adjusted (Figure [\[fig:figure1\]](#fig:figure1){reference-type="ref" reference="fig:figure1"}). This can also be used in handling different and continuous upscaling factors. These three properties are guaranteed by three special designs in GCFSR, which are the encoder-generator architecture, style modulation and feature modulation modules. GCFSR enjoys a very concise architecture without extra priors or initialization. We will detail our designs in the Method section. More importantly, GCFSR has a nice training property. It is end-to-end trainable and converges fast. When the upscaling factor is small ($\leq8$), it is possible to discard all pixel-wise constraints and use a single GAN loss to achieve state-of-the-art performance. This has never been revealed in previous SR methods. Extensive experiments and ablation studies have demonstrated the effectiveness of each module. Combing them together, GCFSR could achieve superior performance to GAN based methods in both small and large upscaling factors. We can also observe vivid face details and gradually modulated effects in qualitative results (see Figure [\[fig:figure1\]](#fig:figure1){reference-type="ref" reference="fig:figure1"}).
|
| 12 |
+
|
| 13 |
+
# Method
|
| 14 |
+
|
| 15 |
+
Before introducing the proposed GCFSR, we give a brief review on previous state-of-the-art GAN-prior-based image restoration/super resolution methods: GLEAN [@glean], GFPGAN [@gfpgan], and GPEN [@gpen]. Here we provide the detailed descriptions of these methods in Table [6](#table:observation){reference-type="ref" reference="table:observation"}.
|
| 16 |
+
|
| 17 |
+
[]{#table:observation label="table:observation"}
|
| 18 |
+
|
| 19 |
+
::: {#table:observation}
|
| 20 |
+
+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+
|
| 21 |
+
| | **GLEAN** [@glean] | **GFPGAN** [@gfpgan] | **GPEN** [@gpen] |
|
| 22 |
+
+:===============================================================================================================================+:==============================================================================================================================:+:==============================================================================================================================:+:==============================================================================================================================:+
|
| 23 |
+
| Degradation | single | multiple | multiple |
|
| 24 |
+
+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+
|
| 25 |
+
| ::: {#table:observation} | ::: {#table:observation} | ::: {#table:observation} | ::: {#table:observation} |
|
| 26 |
+
| ------------- | ----------- | ----------- | ----------- |
|
| 27 |
+
| Network | encoder | encoder | encoder |
|
| 28 |
+
| Description | GAN prior | CS-SFT | Concat |
|
| 29 |
+
| ------------- | decoder | GAN prior | GAN prior |
|
| 30 |
+
| | ----------- | ----------- | ----------- |
|
| 31 |
+
| : The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen]. | | | |
|
| 32 |
+
| ::: | : The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen]. | : The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen]. | : The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen]. |
|
| 33 |
+
| | ::: | ::: | ::: |
|
| 34 |
+
+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+
|
| 35 |
+
| ::: {#table:observation} | $188.29$M | $90.76$M | $71.21$M |
|
| 36 |
+
| ------------ | | | |
|
| 37 |
+
| Parameters | | | |
|
| 38 |
+
| ------------ | | | |
|
| 39 |
+
| | | | |
|
| 40 |
+
| : The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen]. | | | |
|
| 41 |
+
| ::: | | | |
|
| 42 |
+
+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+
|
| 43 |
+
| GAN prior | fixed | fixed | finetuned |
|
| 44 |
+
+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------+
|
| 45 |
+
|
| 46 |
+
: The detailed descriptions of the state-of-the-art GAN-prior-based methods: GLEAN [@glean], GFPGAN [@gfpgan], GPEN [@gpen].
|
| 47 |
+
:::
|
| 48 |
+
|
| 49 |
+
First of all, GLEAN [@glean] is proposed for image super resolution on single upscaling factor, while GFPGAN and GPEN could deal with multiple degradations. From Table [6](#table:observation){reference-type="ref" reference="table:observation"}, we observe that GLEAN relies heavily on additional modules for SR. Specifically, GLEAN utilizes a RRDBNet [@esrgan] for feature extraction, and then combines the fixed GAN prior with an additional decoder to generate the final output. Thus, GLEAN has significantly more parameters than the other two methods (Table [6](#table:observation){reference-type="ref" reference="table:observation"}). Similarly, GFPGAN [@gfpgan] adopts an additional UNet [@unet] trained with L1 loss for the degradation removal, and then transforms the features in Unet to the parameters of scaling and shifting operations, which will be used to modify the fixed GAN prior. With the pretrained GAN prior, GLEAN and GFPGAN could achieve better performance than others at the beginning of training (Figure [1](#fig:convergence){reference-type="ref" reference="fig:convergence"}). Differently, GPEN [@gpen] directly concatenates the features from the encoder and GAN prior. As the concatenation operation introduces new parameters to GAN prior, GPEN gives the GAN prior a small learning rate for further finetuning. However, as shown in Figure [1](#fig:convergence){reference-type="ref" reference="fig:convergence"}, this strategy leads to slow convergence and inferior performance compared with other methods.
|
| 50 |
+
|
| 51 |
+
<figure id="fig:convergence" data-latex-placement="htbp">
|
| 52 |
+
<embed src="convergence.pdf" />
|
| 53 |
+
<figcaption>The convergence curves of GCFSR (ours), GFPGAN <span class="citation" data-cites="gfpgan"></span>, GPEN <span class="citation" data-cites="gpen"></span>, and GLEAN <span class="citation" data-cites="glean"></span>. The x and y axes denote the training iterations (k) and FID scores on CelebA-HQ for <span class="math inline">64×</span> SR.</figcaption>
|
| 54 |
+
</figure>
|
| 55 |
+
|
| 56 |
+
In conclusion, previous GAN-prior-based methods either design complicated modules to modify the fixed GAN prior, or further finetune the GAN prior for adaptation. These observations indicate that utilizing GAN prior in face restoration/super resolution is not a trivial task. If this is the case, can we design a new generative model without reliance on the pretrained GAN prior? Thus, in this work, we propose a very concise architecture -- GCFSR without extra priors. As we can see from Figure [1](#fig:convergence){reference-type="ref" reference="fig:convergence"}, the end-to-end trainable GCFSR converges fast and outperforms the state-of-the-art GAN-prior-based methods.
|
| 57 |
+
|
| 58 |
+
<figure id="fig:overview" data-latex-placement="t">
|
| 59 |
+
<div class="center">
|
| 60 |
+
<embed src="framework.pdf" style="width:95.0%" />
|
| 61 |
+
</div>
|
| 62 |
+
<figcaption>The architecture of GCFSR. It contains an encoder (red) and a generator (green & blue). The encoder network uses several strided convolutional layers to extract the multi-level features and latent codes <span class="math inline"><strong>w</strong></span>. The generator takes the topmost encoded feature maps and latent codes <span class="math inline"><strong>w</strong></span> to generate realistic face details by a sequence of style-modulated convolutions <span class="citation" data-cites="stylegan2"></span>, namely as <em>style modulation</em> (green) here. While the <em>feature modulation</em> (blue) module controls how much the encoded and generated features are expressed under the conditional upscaling factor <span class="math inline"><em>s</em></span>. We train the whole network in an end-to-end manner. (The colored blocks are trained from scratch, while the other blocks are fixed or contain no trainable parameters.)</figcaption>
|
| 63 |
+
</figure>
|
2203.08481/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.08481/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Visual grounding (VG) task [\[13,](#page-8-0) [24,](#page-8-1) [40,](#page-9-0) [65\]](#page-10-0) has achieved great progress in recent years, with the advances in both computer vision [\[16,](#page-8-2)[20,](#page-8-3)[21,](#page-8-4)[25,](#page-8-5)[26,](#page-8-6)[46,](#page-9-1)[56,](#page-9-2)[57,](#page-9-3)[59\]](#page-9-4) and natural language processing [\[4,](#page-8-7)[14,](#page-8-8)[41](#page-9-5)[,50](#page-9-6)[,53\]](#page-9-7). It aims to localize the
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Figure 1. Comparison with fully and weakly-supervised visual grounding method. (a) Fully-supervised VG utilizes image region-query pairs as supervision signals. (b) Weakly-supervised VG adopts only language queries. (c) Our Pseudo-Q method is free of any task-related annotations.
|
| 8 |
+
|
| 9 |
+
objects referred by natural language queries, which is essential for various vision-language tasks, *e.g.*, visual question answering [\[2\]](#page-8-9) and visual commonsense reasoning [\[67\]](#page-10-1).
|
| 10 |
+
|
| 11 |
+
Most existing visual grounding methods can be categorized into two types: fully-supervised [\[8,](#page-8-10) [13,](#page-8-0) [22,](#page-8-11) [23,](#page-8-12) [33,](#page-9-8) [35\]](#page-9-9) and weakly-supervised [\[6,](#page-8-13) [10,](#page-8-14) [19,](#page-8-15) [36,](#page-9-10) [38,](#page-9-11) [49,](#page-9-12) [55,](#page-9-13) [58\]](#page-9-14). Although these two lines of works have made remarkable successes, they rely heavily on manually annotated datasets. However, obtaining a large quantity of manual annotations, especially natural language queries, is expensive and timeconsuming. To annotate queries, humans need to firstly recognize the visual objects and identify their attributes, and then determine diverse relationships between them on a case-by-case basis, such as spatial (*e.g.*, *left* and *right*), preposition (*e.g.*, *in* and *with*), action (*e.g.*, *throwing some-*
|
| 12 |
+
|
| 13 |
+
<sup>\*</sup>Equal contribution.
|
| 14 |
+
|
| 15 |
+
<sup>†</sup>This work was done during an internship at Tsinghua.
|
| 16 |
+
|
| 17 |
+
<sup>‡</sup>Corresponding author.
|
| 18 |
+
|
| 19 |
+
<span id="page-1-0"></span>*thing*), and comparative (*e.g.*, *smaller* and *bigger*). Among them, *spatial* relation is the most frequently queried one.
|
| 20 |
+
|
| 21 |
+
To reduce the burden of human annotation, we propose a pseudo language query based approach (Pseudo-Q) for visual grounding. Our inspiration comes from previous works [\[17,](#page-8-16) [31\]](#page-8-17) that address the high annotation cost issue in image captioning task, by leveraging an unlabelled image set, a sentence corpus, and an off-the-shelf object detector. However, the visual grounding task is more complicated and challenging, as it involves the modelling of relations between objects.
|
| 22 |
+
|
| 23 |
+
To accurately ground objects by language queries, it's fundamental to recognize their categories, attributes, and relationships. Thus, when it comes to generating pseudo region-query pairs for an unlabelled image set, we need to focus on three key components: (1) salient objects (nouns) which are most likely to be queried, (2) intrinsic attributes possessed by the queried objects, and (3) the important spatial relationships between the objects. Motivated by [\[17,](#page-8-16) [42\]](#page-9-15), we leverage an off-the-shelf object detector [\[1\]](#page-8-18) to locate the most notable candidates with high confidence, and an attribute classifier [\[1\]](#page-8-18) to recognize common attributes. However, these detectors are unable to distinguish the spatial relations between objects. Thus, we present a heuristic algorithm to determine the *spatial relationships* between the objects of the same class by comparing their areas and relative coordinates. With these three essential components, pseudo-queries with respect to spatial relations between objects can be generated.
|
| 24 |
+
|
| 25 |
+
To further improve the performance of our method, we also propose a query prompt module which attentively tailors generated pseudo queries into task-related query templates for visual grounding. For the visual-language model, we put forward a multi-level cross-modality attention mechanism in the fusion module to encourage a deeper fusion between visual and language features.
|
| 26 |
+
|
| 27 |
+
Extensive experiments have verified the effectiveness of our method. First, in fully supervised manner, it can reduce human annotation costs by *31*% without sacrificing original model's performance on RefCOCO [\[65\]](#page-10-0). Second, without bells and whistles, it can obtain superior or comparable performance even compared with state-of-the-art weaklysupervised visual grounding methods on five datasets, including RefCOCO [\[65\]](#page-10-0), RefCOCO+ [\[65\]](#page-10-0), RefCOCOg [\[40\]](#page-9-0), ReferItGame [\[28\]](#page-8-19) and Flickr30K Entities [\[44\]](#page-9-16).
|
| 28 |
+
|
| 29 |
+
In summary, this paper makes three-fold contributions:
|
| 30 |
+
|
| 31 |
+
- (1) We introduce the first pseudo-query based visual grounding method that deals with the most dominant spatial relationships among objects.
|
| 32 |
+
- (2) We propose a query prompt module to specifically tailor pseudo-queries for visual grounding task, and a visual-language model equipped with multi-level cross-modality attention is put forward to fully capture
|
| 33 |
+
|
| 34 |
+
- the contextual relationships of different modalities.
|
| 35 |
+
- (3) Extensive experiments demonstrate that our approach can not only dramatically reduce the manual labelling costs without performance sacrifice under the fully supervised condition, but also surpass or achieve comparable performance with state-of-the-art weaklysupervised visual grounding methods.
|
| 36 |
+
|
| 37 |
+
# Method
|
| 38 |
+
|
| 39 |
+
In this section, we explain our Pseudo-Q method in detail. In Sec. 3.1, we introduce the overview of Pseudo-Q. In Sec. 3.2, we elaborate the pseudo-query generation module. In Sec. 3.3, the details of the task-related query prompt module are shown. Finally, we illustrate the mechanism of our multi-level cross-modality attention in Sec. 3.4.
|
| 40 |
+
|
| 41 |
+
Previous visual grounding methods rely on expensive human annotations, *i.e.*, image region-query pairs for fully-supervised approaches [13, 22, 35] or image-query pairs for weakly-supervised approaches [36, 37, 49]. We firstly propose a pseudo language query based method without using any task-related annotations at training.
|
| 42 |
+
|
| 43 |
+
Specifically, the Pseudo-Q approach consists of three components, including: (1) pseudo-query generation module, (2) query prompt module, and (3) visual-language model. The illustration of Pseudo-Q is shown in Figure 2. Taking an unlabeled image as an explanation, the detector
|
| 44 |
+
|
| 45 |
+
can produce several object proposals. Then, these proposals are fed into pseudo-query generation module, which can automatically generate *nouns*, *attributes*, and *relationships* for these proposals. Together with these elements, we can easily create pseudo language queries.
|
| 46 |
+
|
| 47 |
+
Subsequently, the query prompt module refines created pseudo language queries for visual grounding task. Finally, we propose a visual-language model to fully capture the contextual relationship between the image regions and corresponding pseudo language queries.
|
| 48 |
+
|
| 49 |
+
In general, the first step for visual grounding is recognizing the categories of queried objects. However, such a simple grounding strategy leads to ambiguities in complex scenes, e.g., "a talk person on the left" or "a short person on the right", without understanding their spatial relations or attributes. Thus, to accurately locate visual objects by language queries, a visual grounding model needs to understand queried objects' categories, attributes, and their relationships. Based on the above analysis, generating pseudo language queries for candidate objects involving three key components: nouns, attributes and relationships.
|
| 50 |
+
|
| 51 |
+
**Nouns.** Inspired by works [17, 31, 42], we adopt an off-the-shelf detector [1] to obtain the object proposals. Unlike image classification task where each image contains only one major object, scenes in visual grounding task are more complex due to plenty of candidate objects. While it is natural to select the most salient objects as candidates, such a process requires intensive manual labor which is not available in our setup. Instead, we use detection confidence as a criterion. Concretely, the top-N objects with highest confidence are kept as our proposals. Furthermore, we empirically discover that the detector will focus on a large quantity of tiny objects which are less likely to be queried. Thus, we propose to remove tiny objects before generating proposals.
|
| 52 |
+
|
| 53 |
+
<span id="page-3-3"></span><span id="page-3-2"></span>
|
| 54 |
+
|
| 55 |
+
Figure 3. (a) The **pseudo-query generation module** produces spatial relationships and attributes for corresponding objects. (b) The **visual-language model** consists of a visual encoder, a language encoder, and a cross-modality fusion module.
|
| 56 |
+
|
| 57 |
+
Attributes. They are important semantic cues that help models understand scenes better. We investigate that, in existing datasets [28, 40, 65], common attributes including color, size (tall), material (wooden) and human state of motion (e.g. standing and walking), etc. Similar to obtaining the nouns, we take advantage of an off-the-shelf attribute classifier [1] to predict the above common attributes of corresponding objects. In general, one object may have several attributes, such as "a tall person is walking", and it is ideal to recognize as many attributes as possible. However, limited by the capability of the model, we only keep the attribute with the highest confidence and exceeding a predefined threshold as the final proposal. Furthermore, clothes are also important attributes for a person which can be determined by calculating the IoU value between clothes and person, as shown in Figure 3(a).
|
| 58 |
+
|
| 59 |
+
**Relationships.** We observe that *spatial relationship* is one of the most frequently used relations in most existing datasets [40,65] to distinguish different objects. In order to excavate latent spatial relationships, we propose a heuristic algorithm as shown in Figure 3(a).
|
| 60 |
+
|
| 61 |
+
In general, spatial relationship can be divided into three dimensions: horizontal (*i.e.*, *left*, *middle*, and *right*), vertical (*i.e.*, *top* and *bottom*) and depth (*i.e.*, *front* and *behind*). Note that each previously generated object proposal is represented by a set of coordinates which naturally embrace spatial information. We can obtain the horizontal and vertical spatial relationships by comparing the center coordinates of objects along with these two dimensions. Meanwhile, to increase the robustness of the algorithm, the numerical difference of two objects' coordinates in the same dimension is required to be greater than a pre-defined threshold. Finally, we can determine the spatial relations, such as *left*, *right* and *center*, for different visual objects of the same category.
|
| 62 |
+
|
| 63 |
+
In the depth dimension, we assume that, for the same kind of object, the closer the object is to a camera the larger the object region. Concretely, we calculate the ratio of the area of the largest object region to the smallest object region and set a threshold to determine whether there is a *front* and *behind* relationship. If the ratio exceeds the threshold, we assign *front* and *behind* relationships to the largest and smallest objects respectively.
|
| 64 |
+
|
| 65 |
+
**Pseudo-queries.** After obtaining three key elements, we can generate all possible pseudo-queries for an image following the templates in Appendix. Finally, we sample up to M pseudo image region-query pairs if the number of candidates is greater than M, otherwise, we sample all.
|
| 66 |
+
|
| 67 |
+
With the advances of pre-trained language models [4, 14], prompt engineering is proposed to better utilize their learned knowledge at pre-training stage. Inspired by the recent success of prompt engineering in visual-language tasks, *e.g.*, image-language pre-training [45], we propose a query prompt module to excavate the hidden knowledge of pre-trained language model (Sec. 3.4) by refining generated pseudo language queries for visual grounding task.
|
| 68 |
+
|
| 69 |
+
While the prompt templates proposed in CLIP [45] works well for the image classification task, we empirically find that they are ineffective for the challenging visual grounding task. Consequently, in this work, we explore prompt templates exclusively for visual grounding. Our introduced query prompt module follows certain templates, e.g., "find the region that corresponds to the description {pseudo-query}" or "which region does the text {pseudo-query} describe?". Such design is specifically tailored for visual grounding task, since the focus of this task lies on locating the regions of referred objects.
|
| 70 |
+
|
| 71 |
+
Our visual-language model consists of a visual encoder, a language encoder and a cross-modality fusion module to fuse information from two modalities. The designs of <span id="page-4-1"></span>the visual encoder and the language encoder are following TransVG [\[13\]](#page-8-0). We elaborate them in Appendix.
|
| 72 |
+
|
| 73 |
+
Cross-modality fusion module. Previous method [\[13\]](#page-8-0) naively utilizes final features of visual and language encoders to acquire cross-modality information. However, such a simple approach is suboptimal, since each level of visual feature possesses valuable semantic information [\[21,](#page-8-4) [34\]](#page-9-27). To be more specific, low-level features usually denote coarse information, *e.g.*, shape and edge, while high-level features can represent finer information, *e.g.*, intrinsic object properties. Thus, we further propose multi-level crossmodality attention (ML-CMA) to thoroughly fuse textual embedding with multi-level visual features.
|
| 74 |
+
|
| 75 |
+
The mechanism of ML-CMA is shown in Figure [3\(](#page-3-2)b). Features of each visual transformer layer are passed into a cross-modality fusion module with the extracted textual embedding to calculate cross-modality self-attention. Then, we concatenate all updated visual or textual features from different levels respectively, and utilize a fully connected layer to map them into the original dimension. Finally, all features are concatenated and fed into a regression head to predict referred object regions. The regression head composes of three fully connected layers.
|
2203.09824/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.09824/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
This work studies to what extent voice can hint face geometry motivated by recent studies on voice-face matching and cross-modal learning [\[30,](#page-8-0) [59,](#page-9-0) [67\]](#page-10-0). Many physiological attributes are embedded in voices. For example, speech is produced by articulatory structures, such as vocal folds, facial muscles, and facial skeletons, which are all densely connected. Such a fact intuitively indicates potential correlations between voices and face shapes [\[20\]](#page-8-1). Experiments in cognitive science point out that audio cues are associated with visual cues in human perception– especially in recognizing a person's identity [\[5\]](#page-8-2). Recent neuroscience research further shows that two parallel processing of low-level audi-
|
| 4 |
+
|
| 5 |
+
<span id="page-0-0"></span>
|
| 6 |
+
|
| 7 |
+
Figure 1. Cross-Modal Perceptionist. We study the correlations between voices and face geometry under both supervised and unsupervised learning settings. This work targets at more explainable human-centric cross-modal learning for biometric applications.
|
| 8 |
+
|
| 9 |
+
tory and visual cues are integrated in the cortex, where voice processing affects facial structural analysis for the perception purpose [\[64\]](#page-9-1).
|
| 10 |
+
|
| 11 |
+
Traditional research in the voice domain focuses on utilizing voice inputs for predicting more conspicuous attributes which include speaker identity [\[7,](#page-8-3) [29,](#page-8-4) [46\]](#page-9-2), age [\[16,](#page-8-5) [45,](#page-9-3) [51\]](#page-9-4), gender [\[28\]](#page-8-6), and emotion [\[58,](#page-9-5) [66\]](#page-10-1). A novel direction in recent development goes beyond predicting these attributes and tries to reconstruct *2D face images from voice* [\[9,](#page-8-7)[36,](#page-9-6)[60\]](#page-9-7). Their research is built on an observation that one can approximately envision how an unknown speaker looks when listening to the speaker's voice. Attempts towards validating this assumptive observation include the work [\[36\]](#page-9-6) for image reconstruction and works [\[9,](#page-8-7)[60\]](#page-9-7) using generative adversarial networks (GANs). They aim to output face images from only a speaker's voice.
|
| 12 |
+
|
| 13 |
+
However, face images from voices are inherently illposed: the task involves predicting extraneous attributes that voices cannot hint, including image backgrounds, hairstyles, headgears, or beards. These attributes are apparently that one can choose without changing voices. Similar concerns arise regarding the correlations between voices and facial textures or ethnicity. [\[36\]](#page-9-6) demonstrates a t-SNE plot in which ethnicity is scattered across all samples, indicating its low correlations to voices. As a result, quantifying the differences between an output face image and a <span id="page-1-0"></span>reference is hard and less grounded.
|
| 14 |
+
|
| 15 |
+
Instead of producing face images, our analysis moves to the 3D domain with mesh representations and predicts one's face geometry or skull structures from voices, which is free from the above issues. Working on 3D meshes is less ambiguous than images because the former includes less noisy variations unrelated to a speaker's voice, such as stylistic variations, hairstyles, background, and facial textures. Moreover, meshes enable more straightforward quantification of differences between prediction and groundtruth in the Euclidean space– unlike the case in using face images, where sources of differences involve backgrounds and hairstyles.
|
| 16 |
+
|
| 17 |
+
From the perspective of 3D faces, much research attention has been paid to 3D reconstruction from monocular images [\[17,](#page-8-8) [50,](#page-9-8) [62,](#page-9-9) [69\]](#page-10-2) or video sequences [\[13,](#page-8-9) [26\]](#page-8-10) for 3D face animation or talking face synthesis. In contrast, we are the first to investigate the correlations between one's 3D face geometry and voices, and we focus on the analysis of the face geometry gleaned from one's voices. Our goal is to validate the correlations between voices and face geometry towards more explainable human-centric cross-modal learning with neuroscience support.
|
| 18 |
+
|
| 19 |
+
The analysis inevitably involves acquiring large-scale 3D face scans with paired voices, which is expensive and subject to privacy. To deal with this issue, we propose a novel *Voxceleb-3D* dataset that includes paired voices and 3D face models. Voxceleb-3D is inherited from two widely used datasets: Voxceleb [\[31\]](#page-8-11)) and VGGFace [\[39\]](#page-9-10), which include voice and face images of celebrities, respectively. The approach [\[70\]](#page-10-3) we adopt to create Voxceleb-3D is inspired by 300W-LP-3D [\[69\]](#page-10-2), the most-used 3D face dataset, and we will describe details in Sec[.3.2.](#page-2-0)
|
| 20 |
+
|
| 21 |
+
Our analysis framework Cross-Modal Perceptionist (CMP), investigates the feasibility to predict face meshes using 3D Morphable Models (3DMM, Sec[.3.1\)](#page-2-1) from voices on the following two scenarios (Fig. [1\)](#page-0-0). We first train neural networks directly from Voxceleb-3D in a *supervised learning* manner using the paired voices and 3DMM parameters (Sec[.3.2\)](#page-2-0). We further investigate an *unsupervised learning* setting to inspect whether face geometry can still be gleaned without paired voices and 3D faces, which is a more realistic scenario. In this case, we use *knowledge distillation* (KD) [\[21\]](#page-8-12) to transfer knacks from the state-of-the-art method for 3D faces from images, SynergyNet [\[62\]](#page-9-9), into our student network and jointly train speech-to-image and image-to-3D blocks (Sec[.3.3\)](#page-3-0).
|
| 22 |
+
|
| 23 |
+
We design a set of metrics to measure the geometric fitness based on points, lines, and regions for both the supervised and the unsupervised scenarios. The evaluation attempts to show correlations between 3D faces and voices with straightforward neural network-based approaches. The analysis with CMP enables us to comprehend the correlations between face geometry and voices. Our research lays explainable foundations for human-centric cross-modal learning and biometric applications using voice-face correlations, such as security and surveillance when only voice is given.
|
| 24 |
+
|
| 25 |
+
Our goal is not to recover high-quality 3D face meshes from voices comparable to synthesis from visual modalities such as image or video inputs, but we try to answer the core question under our CMP framework: can face geometry be gleaned from voice? We break down the question into four parts and will answer them through experiments.
|
| 26 |
+
|
| 27 |
+
- Q1. Is it feasible to predict visually reasonable face meshes from voice?
|
| 28 |
+
- Q2. How stable is the mesh prediction from different utterances of the same person?
|
| 29 |
+
- Q3. Compared with face meshes produced by cascading separately trained speech-to-image and image-to-3Dface methods, can the performance of a joint training flow, where mesh prediction is trained with voice information, improve? How much?
|
| 30 |
+
- Q4. What is the major improvement that voice information can bring in the joint training flow?
|
| 31 |
+
|
| 32 |
+
Our contributions are summarized.
|
| 33 |
+
|
| 34 |
+
- 1. Towards explainable human-centric cross-modal learning, we are the first to study the correlations between face geometry and voices.
|
| 35 |
+
- 2. We devise an analysis framework, Cross-Modal Perceptionist, which studies both supervised and unsupervised approaches to learn face meshes from voices.
|
| 36 |
+
- 3. We show extensive analysis and discussion and answer to four breakdown questions to validate the correlations between voices and face shapes
|
| 37 |
+
|
| 38 |
+
# Method
|
| 39 |
+
|
| 40 |
+
Our goal is to analyze how a person's voice relates to one's face geometry in the 3D space. Thus, we learn 3D face meshes using 3D Morphable Models (3DMM) from input speech and analyze the correlations under supervised and unsupervised learning settings. The supervised setting learns the correlation from a paired voice and 3D face dataset. The unsupervised learning studies a realistic case
|
| 41 |
+
|
| 42 |
+
<span id="page-2-2"></span>
|
| 43 |
+
|
| 44 |
+
Figure 2. **Supervised learning framework.** Given a speech input, voice embedding is extracted by $\phi_v$ . $\phi_{dec}$ then estimates 3DMM parameters $\alpha$ for 3D face modeling. The supervision is computed with groundtruth $\alpha^*$ .
|
| 45 |
+
|
| 46 |
+
when such paired dataset is not available, is it still possible to predict face geometry from voice?
|
| 47 |
+
|
| 48 |
+
3DMM [11] is a popular method for 3D face modeling using principal component analysis (PCA). By estimating the weights of basis matrices, a 3D face can be constructed. We can decompose a face into two components: the average face and face shape variation. That is, for a face A,
|
| 49 |
+
|
| 50 |
+
<span id="page-2-3"></span>
|
| 51 |
+
$$A = \bar{A} + V\alpha,\tag{1}$$
|
| 52 |
+
|
| 53 |
+
where $\bar{A} \in \mathbb{R}^{3N}$ is the average face with N three-dimensional vertices, $V \in \mathbb{R}^{3N \times P}$ is a basis matrix for the face shape variation, $\alpha \in \mathbb{R}^P$ is the coefficients. Note that we can reshape A into $A_r \in \mathbb{R}^{3 \times N}$ , a matrix representation suitable for 3D rotation and translation.
|
| 54 |
+
|
| 55 |
+
We set N=53490 vertices following BFM [42], a particular form of 3DMM. Per the dimensionality of shape variation basis, we choose P=50 following Synergy-Net [62], the state-of-the-art 3D face reconstruction methods from images using BFM. There are 12 additional pose parameters in SynergyNet used to align reconstructed 3D faces to its 2D image inputs: a rotation matrix $R \in \mathbb{R}^{3\times 3}$ and a translation vector $t \in \mathbb{R}^3$ , i.e., $A_p = RA_r + t$ . In our analysis, we only use these pose parameters for visualizing how well a predicted face mesh fits a 2D shape outline.
|
| 56 |
+
|
| 57 |
+
We first describe the supervised learning setting, illustrated in Fig. 2. Given a paired speech sequence and 3DMM parameters for an identity, we build an encoder-decoder structure first to extract voice embedding $v \in \mathbb{R}^{64}$ from a mel-spectrogram [15], which is a commonly used time-frequency representation for speech, of the input speech. Following [60], the voice encoder $\phi_v$ is pretrained on the large-scale speaker recognition task. Then, we train a decoder $\phi_{dec}$ to estimate 3DMM parameters, $\alpha$ . We use groundtruth 3DMM parameters to supervise the training with $L_2$ loss.
|
| 58 |
+
|
| 59 |
+
$$\mathcal{L}_{reg} = \|\alpha - \alpha^*\|^2 \tag{2}$$
|
| 60 |
+
|
| 61 |
+
where $\alpha^*$ is groundtruth 3DMM parameters.
|
| 62 |
+
|
| 63 |
+
<span id="page-3-3"></span>In addition, we adopt the triplet loss on the estimated 3DMM parameters $\alpha$ . The triplet loss minimizes the difference of pairwise relations between (anchor, positive) and (anchor, negative) pairs with a soft margin.
|
| 64 |
+
|
| 65 |
+
<span id="page-3-2"></span> $\mathcal{L}_{tri} = max\{\|\alpha - \alpha_p\|_2 - \|\alpha - \alpha_n\|_2 + 1, 0\},$ (3) where $\alpha$ plays as an anchor, $\alpha_p$ is a positive sample for the anchor, representing the same identity but regressed from different images, and $\alpha_n$ , coming from a different identity, is a negative sample for the anchor. The triplet loss aims at coherent 3DMM parameters for the anchor and positive samples due to the same identities and simultaneously contrasting to the negative sample due to a different identity. The overall loss function is $\mathcal{L}_{sup} = \mathcal{L}_{reg} + \mathcal{L}_{tri}$ .
|
| 66 |
+
|
| 67 |
+
The challenge of this supervised learning problem is how to obtain $\alpha^*$ . Most large voice datasets, such as Voxceleb [31], only contain speech for celebrities, and most large face datasets, such as VGGFace [39], only consist of publicly scraped face images. We first follow [60] to fetch the intersection of voice and image data from Voxceleb and VGGFace. Then, we propose to fit 3D faces from 2D to create a novel dataset, Voxceleb-3D, using an optimization-based approach adopted by 300W-LP-3D [69], the most-used 3D face dataset. In detail, we use an off-the-shelf 3D landmark detector [6] to extract facial landmarks from collected face images and then optimize 3DMM parameters to fit in the extracted landmarks. Our Voxceleb-3D contains paired voice and 3D face data to fulfill our supervised learning.
|
| 68 |
+
|
| 69 |
+
Obtaining real 3D face scans is very expensive and limited by privacy, and the workaround of optimization-based 3DMM fitting with facial landmarks is time-consuming. An unsupervised framework may serve real-world scenarios. As a result, we propose an unsupervised framework with knowledge distillation. By leveraging a well-pretrained expert, it helps to validate whether face geometry can still be gleaned with neither real 3D face scans nor optimized 3DMM parameters.
|
| 70 |
+
|
| 71 |
+
Our unsupervised framework, illustrated in Fig. 3, has two stages: (1) synthesizing 2D face images from voices with GAN and (2) 3D face modeling from synthesized face images. The motivation is that we first use the GAN to generate 2D faces from voices to obtain the speaker's appearance. However, 2D images contain variations of backgrounds, textures, hairstyles that are irrelevant to voice. Thus, the second-stage image-to-3D-face module disentangles geometry from other variations.
|
| 72 |
+
|
| 73 |
+
Synthesizing face images from voices with GANs. Previous research develops a GAN-based speech-to-image framework [60]. A voice encoder $\phi_v$ extracts voice embeddings from input speech. Then a generator $\phi_g$ synthesizes face images from the voice embeddings, and a discriminator $\phi_{dis}$ decides whether the synthesis is indistinguishable
|
| 74 |
+
|
| 75 |
+
<span id="page-3-1"></span>
|
| 76 |
+
|
| 77 |
+
Figure 3. Unsupervised learning with KD. The unsupervised framework contains a GAN for face image synthesis with voice encoder $\phi_v$ , generator $\phi_g$ , discriminator $\phi_{dis}$ , and classifier $\phi_c$ . Then, knowledge distillation is used to achieve unsupervised learning, where information of image-to-3D-face mapping distilled from the expert network (yellow block) is exploited to train the student network (blue block). 2D face is a latent representation in this fashion. Beside using pseudo-groundtruth $\alpha^E$ to train the student, we also distill knowledge at intermediate layers using conditional probability distributions.
|
| 78 |
+
|
| 79 |
+
from a real face image. Last, a face classifier $\phi_c$ learns to predict the identity of an incoming face, ensuring that the generator produces face images that are truly close to the identity in interest. Here we overload notations of $\phi_v$ and other components introduced later for 3D face modeling in both Sec.3.2 and 3.3 due to the same functionalities.
|
| 80 |
+
|
| 81 |
+
In detail, given a speech input S, its corresponding speaker ID id, and real face images $I_r$ for the speaker, the image synthesized from the generator is $I_f = \phi_g(\phi_v(S))$ . The loss formulation is divided into two parts: real and fake images. For real images, the discriminator learns to assign them to "real" (r) and the classifier learns to assign them to id. The loss for real images is $\mathcal{L}_r = \mathcal{L}_d(\phi_{dis}(I_r), r) + \mathcal{L}_c(\phi_c(I_r), id)$ showing the discriminator and classifier losses respectively. For fake images, after producing $I_f$ from $\phi_g$ , the discriminator learns to assign them to "fake" $(\bar{r})$ and the classifier also learns to assign them to id. The loss counterpart for fake images is $\mathcal{L}_f = \mathcal{L}_d(\phi_{dis}(I_f), \bar{r}) + \mathcal{L}_c(\phi_c(I_f), id)$ .
|
| 82 |
+
|
| 83 |
+
3D face modeling from synthesized images. After image synthesis by GAN, we build a network to estimate 3DMM parameters from fake images. The parameter estimation consists of an encoder $\phi_I$ and an decoder $\phi_{dec}$ to obtain 3DMM parameters $\alpha = \phi_{dec}(\phi_I(I_f))$ . 3D face meshes are then reconstructed by Eq. 1.
|
| 84 |
+
|
| 85 |
+
To fulfill the unsupervised training, we distill the knowledge of image-to-3D-face reconstruction from a pretrained expert network. The expert, consisting of encoder $\phi_I^E$ and decoder $\phi_{dec}^E$ , reconstructs 3D face models from syn-
|
| 86 |
+
|
| 87 |
+
<span id="page-4-3"></span><span id="page-4-0"></span>
|
| 88 |
+
|
| 89 |
+
Figure 4. **Samples of face meshes in Voxceleb-3D.** We overlay the 3D faces with associated images to show how well 3D meshes fit in 2D face outlines.
|
| 90 |
+
|
| 91 |
+
the sized face images and produces pseudo-groundtruth of 3DMM parameters $\alpha^E$ . $\alpha^E$ is used to train the student network by $L_2$ loss:
|
| 92 |
+
|
| 93 |
+
$$\mathcal{L}_{p-gt} = \|\alpha^E - \alpha\|^2. \tag{4}$$
|
| 94 |
+
|
| 95 |
+
This KD strategy *circumvents the needs of paired voice and 3D face data* and helps us achieve unsupervised learning.
|
| 96 |
+
|
| 97 |
+
In addition to pseudo-groundtruth, we also distill knowledge at intermediate layers and minimize their distribution divergence between the expert and the student. We measure the distributions in the feature spaces by the extracted image embedding $z^E \in \mathbb{R}^{B \times \nu}$ and $z \in \mathbb{R}^{B \times \nu}$ of the expert and the student network. We maintain the batch dimension B and collapse the rest to $\nu$ . Then as in [40], we calculate the conditional probability z between feature points as follows.
|
| 98 |
+
|
| 99 |
+
$$z_{i|j} = \frac{K(z_i, z_j)}{\sum_{k, k \neq j} K(z_k, z_j)}, z_{i|j}^E = \frac{K(z_i^E, z_j^E)}{\sum_{k, k \neq j} K(z_k^E, z_j^E)}, \quad (5)$$
|
| 100 |
+
|
| 101 |
+
where $K(\cdot, \cdot)$ is scaled and shifted cosine similarity whose outputs lie in [0,1]. Kullback-Leibler (KL) divergence is then used to minimize the two conditional distributions.
|
| 102 |
+
|
| 103 |
+
$$\mathcal{L}_{div} = \sum_{i} \sum_{j \neq i} z_{j|i}^{E} \log \left( \frac{z_{j|i}^{E}}{z_{j|i}} \right). \tag{6}$$
|
| 104 |
+
|
| 105 |
+
The KD loss is $\mathcal{L}_{KD} = \mathcal{L}_{p-gt} + \mathcal{L}_{div}$ . The overall unsupervised learning loss is combined with GAN loss and also triplet loss in Eq.3.
|
| 106 |
+
|
| 107 |
+
$$\mathcal{L}_{unsuper} = \mathcal{L}_f + \mathcal{L}_r + \mathcal{L}_{KD} + \mathcal{L}_{tri}. \tag{7}$$
|
2204.12665/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-12T02:41:05.491Z" agent="5.0 (X11)" version="16.2.4" etag="ZD99ta0xXjHlJEeDB537" type="device"><diagram id="ObqIvV5U78cAvIUt-a7y">7Vpbc5s4FP41zLQPZYRAEjwm7mUfsp1uOzttn3YwyDZbjLyyHNv761cCyVwETtLgkmbL5IIO4gjOp++TdITjz9aHdzzerH5nKc0dCNKD4792IPQCCB31A9KjtkQQVJYlz1Jtqw2fsn+pNppquyyl21ZFwVgusk3bmLCioIlo2WLO2b5dbcHydqubeKlbBLXhUxLn1Kr2OUvFqrKGqFH7N5otV6ZlD+gr69hU1i62qzhl+0Zb/hvHn3HGRHW2PsxorqJn4lI5ejtw9fRgnBbiPjdoJLbiaN6NpvJVdZFxsWJLVsT5m9p6zdmuSKlyAGSprnPD2EYaPWn8mwpx1LjFO8GkaSXWub5KD5n40jj/qly5SJdeH7TnsnA0hULw45dmoXGXKta3lSVzX/V+6qXacWc7nmiTr3tQzJfUIAbtQHoneGTHpmxNZTOyCqd5LLLbtvtYd7DlqV6NgTzRMPRDoh/nNs532qmDZi+UU/krT19akO1XmaCfNnH5QnvJuXa44+2mIsEiOyjYrhesEBocT1VYZHk+YznjpTt/IY8wlPat4OwbbVzxsR/56Smqt5QLehjseQMB0zdAgt0AEMn9KIQBwl5QedCagAlxsY+9CASk/Ftd3dd084Hrez7yCY5IBImJ26pBvIEqj0En+EUYiJ4WYdAQYbaXIswwMYao9AMIA7ELgGELwf4TIQweQuePv+SJDIfIhEOu3328ccjrF9LEZJTyrKDSuJUmB84qGBv2WNpf9iKr/LXBbGNVsIJ2YNKmOM+WhSwmEg4q7dcKrEyO+Vf6wjpL05LOfb2nTfFufxkBfazQh23Eg9AlkQUzDt0A90Aruwd6PJxkSv2rNe9r48pd+ldL3teW4vXrnzU4ynKH0YvF/XQytHXS0GEKnQwtJr6dvbfgbIN1h1J2g8WZkM/MFF1eReBhSpnIYxyuBGHogo46IuKG0OKKh5FLkE0WL3x8uKOzTKH5nO0vPkk4T5EmesFwl75/4D0sRSqM6iNsgYBQ5KIzHVn7/cAy2dzJKQKeGzadtnwSj0hdBKej476io/bYXAf1NIJ8XB/+Q1qpGG61UvaQU5Tu1WnMCvd/Pb80k8nWBHNC4fS8X6D0gxJMCAq0hrOPZRsqyA7EuZoHzrk8W4ryDSvLdhMXLSjxPzuVbCnF8NW2BONKVvC8zaG+aLxczeWAFiflEKcdyuesfJp2xhxPHzR8SmUcZfj0MXS7M03iGhI0Z5rIhT0zzQCMgK7/81HuYfPM+1AusCkXTsi4wGKcXsrZq7PJV95JSufhfKz5pO9iUq+8O+R4opkqD/2i0Gna8FQoNJgN6Ut8PGMKeW0KdXJX9kJtIgoRG64uHCZNdBPPaf6BbTO9/p0zIdhaxdts8agum8bb1YleVsqpAWM3+yQU/zR2Zs/Jq7xv1JOsD0u12eauWfJtt3HXMVf/kh3Pj9e87CuDmYzmot0LwTiAQ7XWa6yYOutxYhY6DYwhAa6BvgksHGM+YSc/xpkRGi+q8vd7mbFCRVsJ1Yo65cZlwqmgpce4/G/ltEFcpOqvno0Ca5cISG2WZT1ltV2dkZwB32fGedlbEsG4erzdNiuWjZbbk+UqUO3g3TWHfrZZXYiQZEmbGbCPGaGLeyQPjcGM6FlpXHsIAwABujijfSNgiALskna6CGFiYegZXMeWNuOjR9oeJ0oXEcjkBEJd0a9GiOF758bwnoo949+knyueKMImYsdpQ0bm1q38+x9dggo+0ps/tRLeSvVMq+2oxS7Pj87pAxNlBnl8pKX+7TOJOQRYjnigoDvOCmWlcbK6QwYf86yDUjrQ4k+hsB3Wqk4yVpIDAeJ2FnKoT3kBdAPvMsoLJ00rPmQvbcQ9MTO6NBdkE6Z2zeM8/00xhEF7zfXjd8TgpEm9H7F5fB8C9CT1pvx6Bg5m9d5eKgPRzTMkFJXycdHvzXwSnclAoJA8ya9n4KRJvKdCmZ4kHpySMnYW77kOGtB3QzS09375IUQW62+hq+30+pNy/81/</diagram></mxfile>
|
2204.12665/main_diagram/main_diagram.pdf
ADDED
|
Binary file (21 kB). View file
|
|
|
2204.12665/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep Reinforcement Learning (DRL) has been successfully used for sequential decision making in tasks using image-based state representations [@DBLP:journals/corr/MnihKSGAWR13]. However, many problems in the real world cannot be readily expressed as such and are naturally described by using factored representations in a symbolic representation language such as PDDL or RDDL [@DBLP:journals/jair/LongF03; @Sanner:RDDL]. For example, in a logistics problem, the objective consists of delivering packages to their destined locations using a truck to carry them. Symbolic description languages such as first-order logic (FOL) can easily capture states and objectives of this scenario using predicates such as *in-truck($p$)* where $p$ is a parameter that can be used to represent any package. Symbolic representations for such problems are already available in the form of databases and converting them to image-based representations would require significant human effort. Due to their practical use, symbolic descriptions and algorithms utilizing them are of keen interest to the research community.
|
| 4 |
+
|
| 5 |
+
A key difficulty in applying RL to problems expressed in such representations is that their state spaces generally grow exponentially as the number of state variables or objects increases. However, solutions to such problems can often be described by compact, easy-to-compute "generalized policies\" that can transfer to a class of problems with differing object counts, significantly reducing the sample complexity for learning a good, instance-specific policy.
|
| 6 |
+
|
| 7 |
+
**Running Example** We illustrate the benefits of computing generalized policies using the Sysadmin$(n)$ domain that has been used in many planning competitions. A problem in this domain consists of a set of $n$ computers connected to each other in an arbitrary configuration. At any timestep, the computers can shutdown with an unknown probability distribution that depends on the network connectivity. The agent is also awarded a positive reward that is proportional to the total number of computers that are up. Similarly, at each timestep, the agent may reboot any one of the $n$ computers bearing a small negative reward or simply do nothing. In our problem setting, action dynamics are not available as closed-form probability distributions, making RL the natural choice for solving such problems.
|
| 8 |
+
|
| 9 |
+
A state in this problem is succintly described by a factored representation with boolean state variables (propositions) that describe which computers are running and their connectivity. It is easy to to see that the state spaces grow exponentially as $n$ increases. However, this problem has a very simple policy that can provide a very high reward; reboot any computer that is not running or do nothing. Even though a general policy for such a problem is easy to express, traditional approaches to RL like Q-learning cannot transfer learned knowledge and have difficulties scaling to larger problems with more computers. Our major contribution in this paper is learning a *generalized, relational Q-function* that can express such a policy and use it to efficiently transfer policies to larger instances, reducing the sample complexity for learning.
|
| 10 |
+
|
| 11 |
+
Many existing techniques that compute generalized policies do so by using human-guided or automatic feature engineering to find relevant features that facilitate efficient transfer (see Sec. [5](#sec:related_work){reference-type="ref" reference="sec:related_work"} for a detailed discussion of related work). For example, @DBLP:conf/aips/NgP21 \[[-@DBLP:conf/aips/NgP21]\] use an external feature discovery module to learn first-order features for Q-function approximation. API [@DBLP:journals/jair/FernYG06] uses a taxonomic language with beam search to form rule-based policies.
|
| 12 |
+
|
| 13 |
+
In this paper, we approach the problem of learning generalized policies from a Q-function approximation perspective. We utilize deep learning along with an automatically generated feature list to learn a nonlinear approximation of the Q-function. Our approach learns a generalizable, relational Q-function that facilitates the transfer of knowledge to larger instances at the propositional level. Our empirical results show that our approach can outperform existing approaches for transfer (Sec. [4](#sec:empirical_evaluation){reference-type="ref" reference="sec:empirical_evaluation"}).
|
| 14 |
+
|
| 15 |
+
The rest of the paper is organized as follows. The next section presents the required background. Sec. [3](#sec:our_approach){reference-type="ref" reference="sec:our_approach"} describes our approach for transfer followed by a description of using our algorithm for generalized reinforcement learning (Sec. [3.3](#sec:grl){reference-type="ref" reference="sec:grl"}). Sec. [4](#sec:empirical_evaluation){reference-type="ref" reference="sec:empirical_evaluation"} presents an extensive empirical evaluation along with a discussion of some limitations of our approach. We then provide an account of related work in the area (Sec. [5](#sec:related_work){reference-type="ref" reference="sec:related_work"}) and conclude by summarizing our contributions (Sec. [6](#sec:conclusion){reference-type="ref" reference="sec:conclusion"}).
|
| 16 |
+
|
| 17 |
+
# Method
|
| 18 |
+
|
| 19 |
+
We establish our problem in the context of reinforcement learning for Markov Decision Processes (MDPs). Adapting @DBLP:journals/jair/FernYG06 \[[-@DBLP:journals/jair/FernYG06]\], we represent relational MDPs as follows: Let $D = \langle \mathcal{P}, \mathcal{A} \rangle$ be a problem domain where $\mathcal{P}$ is a set of predicates of arity no greater than 2, and $\mathcal{A}$ is a set of parameterized action names. An MDP for a domain $D$ is a tuple $M = \langle O, D, S, A, T, R, \gamma, s_0\rangle$ where $O$ is a set of objects. A fact is an instantiation of a predicate $p \in \mathcal{P}$ with the appropriate number of objects from $O$. A state $s$ is a set of true facts and the state space $S$ is a finite set consisting of all possible sets of true facts. Similarly, the action space $A$ is composed of all possible instantiations of action names $a \in \mathcal{A}$ with objects from $O$. $T$ is a transition system, implemented by a simulator, that returns a state $s'$ according to some fixed, but *unknown* probability distribution $P(s'|s, a)$ when applying action $a$ in a state $s$. We assume w.l.o.g. that the simulator only returns actions that are executable in a given state and that there is always one such action (which can be easily modeled using a *nop*). $R: S \times A \rightarrow \mathbb{R}$ is a reward function that is also implemented by the simulator. $\gamma$ is the discount factor, and $s_0$ is the initial state.\
|
| 20 |
+
**Example** The Sysadmin domain introduced in the preceding section can be described by predicates running$(c_x)$ and link$(c_x,c_y)$. The possible actions are reboot$(c_x)$, and nop$()$. $c_x$ and $c_y$ are parameters that can be grounded with objects of a specific problem. A state of a problem $M_\emph{eg}$ drawn from Sysadmin$(2)$ with connectivity $K_2$ using computer names $c_0$ and $c_1$ where only $c_0$ is up can be described as $s_\text{eg} = \{ \text{running}(c_0), \text{link}(c_0, c_1), \text{link}(c_1, c_0) \}$. The action space of $M_\emph{eg}$ would consist of actions nop$()$, reboot$(c_0)$, and reboot$(c_1)$, with their dynamics implemented by a simulator.
|
| 21 |
+
|
| 22 |
+
A solution to an MDP is expressed as a deterministic *policy* $\pi: S \rightarrow A$, which is a mapping from states to actions. Let $t$ be any time step, then, given a policy $\pi$, the value of taking action $a$ in a state $s$ is defined as the expected return starting from $s$, executing $a$, observing a reward $r$ and following the policy thereafter [@DBLP:books/lib/SuttonB98].
|
| 23 |
+
|
| 24 |
+
$$\begin{equation*}
|
| 25 |
+
q_\pi(s, a) = \mathbb{E}_\pi \bigg[ \sum_{i=0}^{\infty} \gamma^i r_{t+i+1} \bigg| s_t=s, a_t=a \bigg]
|
| 26 |
+
\end{equation*}$$
|
| 27 |
+
|
| 28 |
+
The optimal action-value function (or Q-function) is defined as the maximum expected return over all policies for every $s \in S$ and every $a \in A$; $q_*(s, a) = \max\limits_\pi q_\pi(s, a)$. It is easy to prove that the optimal Q-function function satisfies the *Bellman* equation (expressed in action-value form): $$\begin{equation*}
|
| 29 |
+
q_*(s, a) = \mathbb{E}_{s'\sim T} \bigg[ r_{t+1} + \gamma \max\limits_{a' \in A} q_*(s', a') \bigg| s_t=s, a_t=a \bigg]
|
| 30 |
+
\end{equation*}$$
|
| 31 |
+
|
| 32 |
+
Reinforcement learning algorithms iteratively improve the Q-function *estimate* $Q(s, a) \approx q_*(s, a)$ by converting the Bellman equation into update rules. Given an observation sequence $(s_t, a_t, r_{t+1}, s_{t+1})$, the update rule for Q-learning [@Watkins:1989] to estimate $Q(s_t, a_t)$ is given by: $$\begin{equation*}
|
| 33 |
+
Q(s_t, a_t) = Q(s_t, a_t)
|
| 34 |
+
+ \alpha \delta_t
|
| 35 |
+
\end{equation*}$$ where $\delta_t = r_{t+1} + \gamma \max\limits_{a' \in A} Q(s_{t+1}, a') - Q(s_t, a_t)$ is the temporal difference, TD$(0)$, error, and $\alpha$ is the learning rate. Q-learning has been shown to converge to the optimal Q-function under certain conditions [@DBLP:books/lib/SuttonB98]. Q-learning is an *off-policy* algorithm and generally uses an $\epsilon$-greedy exploration strategy, selecting a random action with probability $\epsilon$, and following the greedy policy $\pi(s) = \mathop{\mathrm{argmax}}_{a}Q(s, a)$ otherwise.
|
| 36 |
+
|
| 37 |
+
Let $f$ be a feature, we then define a feature kernel $\phi_f(s)$ as a function that maps a state $s$ to a set of objects. We utilize description logic to derive and express feature kernels building upon the recent work by @DBLP:conf/aaai/BonetFG19 \[[-@DBLP:conf/aaai/BonetFG19]\]. This is described in Sec. [3.1](#subsec:abstraction){reference-type="ref" reference="subsec:abstraction"}.
|
| 38 |
+
|
| 39 |
+
Our goal is to compute approximate Q-functions whose induced policies generalize to problem instances with differing object counts in a way that allows RL approaches to find good policies with minimal learning. To do so, we use the sampled state space of a small problem to automatically generate domain-specific relational abstractions that lift problem-specific characteristics like object names and numbers (Sec. [3.1](#subsec:abstraction){reference-type="ref" reference="subsec:abstraction"}). Sec. [3.2](#subsec:deep_learning_for_value_approximation){reference-type="ref" reference="subsec:deep_learning_for_value_approximation"} describes our method of representing these abstractions as input features to a deep neural network. Finally, Sec. [3.3](#sec:grl){reference-type="ref" reference="sec:grl"} expands on how our algorithm, Generalized Reinforcement Learning (GRL), utilizes the deep neural network to learn approximate Q-values of abstract states and use them for transfer learning.
|
| 40 |
+
|
| 41 |
+
One challenge in Q-function approximation is the use of a representation language from which it is possible to extract features that can provide useful information for the decision making process. We now provide a formal description of the general classes of abstraction-based, domain-independent feature synthesis algorithms that we used in this paper.
|
| 42 |
+
|
| 43 |
+
**Description Logics (DL)** are a family of representation languages popularly used in knowledge representation [@DBLP:books/daglib/dlbook]. Building upon many different threads of research in learning and generalization for planning [@DBLP:journals/apin/MartinG04; @DBLP:journals/jair/FernYG06; @DBLP:conf/aaai/BonetFG19; @DBLP:conf/aaai/FrancesBG21], we chose DLs due to the diversity of features they can represent while providing a good balance between expressiveness and tractability.
|
| 44 |
+
|
| 45 |
+
In the relational MDP paradigm, unary predicates $\mathcal{P}_1 \in \mathcal{P}$ and binary predicates $\mathcal{P}_2 \in \mathcal{P}$ of a domain $D$ can be viewed as *primitive* *concepts* $C$ and *roles* $R$ in DL. DL includes constructors for generating *compound* concepts and roles from primitive ones to form expressive languages. Our feature set $F_{\emph{DL}}$ consists of concepts and roles formed by using a reduced set of grammar from @DBLP:conf/aaai/BonetFG19 \[[-@DBLP:conf/aaai/BonetFG19]\]:
|
| 46 |
+
|
| 47 |
+
::: linenomath
|
| 48 |
+
$$\begin{align*}
|
| 49 |
+
C, C' &\rightarrow \mathcal{P}_1 \mid \neg C \mid C \sqcap C' \mid \forall R.C \mid \exists R.C \mid R = R'\\
|
| 50 |
+
R, R' &\rightarrow \mathcal{P}_2 \mid R^{-1}
|
| 51 |
+
\end{align*}$$
|
| 52 |
+
:::
|
| 53 |
+
|
| 54 |
+
where $\mathcal{P}_1$ and $\mathcal{P}_2$ represent the primitive concepts and roles, and $R^{-1}$ represents the inverse. $\forall R.C = \{ x \mid \forall y \text{ } R(x,y) \land C(y) \}$ and $\exists R.C = \{ x \mid \exists y \text{ } R(x,y) \land C(y) \}$. $R = R'$ denotes $\{x \mid \forall y \text{ } R(x, y) = R'(x, y)\}$. We also use $\emph{Distance}(c_1, r, c_2)$ features [@DBLP:conf/aaai/FrancesBG21] that compute the minimum number of $r$-steps between two objects satisfying concepts $c_1$ and $c_2$ respectively.
|
| 55 |
+
|
| 56 |
+
We control the total number of features generated by only considering features up to a certain complexity $k$ (a tunable hyperparameter) that is defined as the total number of grammar rules required to generate a feature.\
|
| 57 |
+
**Example** The primitive concepts and roles of the Sysadmin domain are running$(c_x)$ and link$(c_x, c_y)$ respectively. For the running example $M_\emph{eg}$, a feature $f_\emph{up} \equiv \text{running}(c_x)$ evaluates to the set of objects satisfying it, i.e., $\phi_{f_\emph{up}}(s_\emph{eg}) = \{ c_0 \}$. This feature can be interpreted as tracking the set of computers that are running (or up). It is easy to see that DL features such as ${f_\emph{up}}$ capture relational properties of the state and can be applied to problems with differing object names and quantities.
|
| 58 |
+
|
| 59 |
+
It is clear that when using the DL features defined above, $\phi_f(s)$ evaluates to a set of objects that satisfy $f$. Thus, both $|\phi_f(s)|$ and $o \in \phi_f(s)$ are well-defined. The only exceptions are *Distance* features that directly return numerical values. We simply define $|\phi_f(s)| = \phi_f(s)$ and $\forall o \in O, o \in \phi_f(s) = 0$ for such distance-based features.
|
| 60 |
+
|
| 61 |
+
We used the D2L system [@DBLP:conf/aaai/FrancesBG21] for generating such DL based features. We describe our use of the D2L system in more detail in Sec. [4.2](#subsec:tasks){reference-type="ref" reference="subsec:tasks"}.
|
| 62 |
+
|
| 63 |
+
Given a set of DL features $F$, another key challenge is to identify a subset $F' \subseteq F$ of features that can learn a good approximation of the Q-function. We use deep learning utilizing the entire feature set $F$ for Q-value estimation.
|
| 64 |
+
|
| 65 |
+
Given a feature set $F$ and a domain $D$, the input to our network is a vector of size $|F| + |\mathcal{A}| +N \times |F|$ where $|\mathcal{A}|$ is the total number of actions in the domain, and $N$ is the maximum number of parameters of any action in $\mathcal{A}$.
|
| 66 |
+
|
| 67 |
+
<figure id="fig:network_architecture" data-latex-placement="ht">
|
| 68 |
+
<embed src="images/network_architecture.drawio.pdf" />
|
| 69 |
+
<figcaption>Our process for estimating Q-values.</figcaption>
|
| 70 |
+
</figure>
|
| 71 |
+
|
| 72 |
+
Given a concrete state $s$, the *abstract state feature vector* is defined as $\overline{s} = \langle |\phi_{f_1}(s)|, \ldots, |\phi_{f_n}(s)| \rangle$ for all features $f_i \in F$. Similarly, given a parameterized action $a(o_1, \ldots, o_n)$, the *abstract action feature vector* is defined as a vector $\overline{a} = \langle A_\emph{name} | F_{o_1} | \ldots | F_{o_N} \rangle$ where $A_\emph{name}$ is a one-hot vector of length $|\mathcal{A}|$ encoding the action name $a$, $F_{o_i}$ is a vector of length $|F|$ encoded with values $1_{[o_i \in \phi_{f_j}(s)]}$ for every feature $f_j \in F$, and $|$ represents vector concatenation. Together, $\overline{s}$ and $\overline{a}$ comprise the input to the network.\
|
| 73 |
+
**Example** Let $f_\emph{up} \equiv \text{running}(c_x)$ and $F_\emph{DL} = \{ f_\emph{up} \}$ for the Sysadmin domain. Then, the abstract state vector $\overline{s_\emph{eg}}$ for the concrete state $s_\emph{eg}$ of the running example would be $\langle 1 \rangle$ and it indicates an abstract state with one computer running. The same vector would be generated for a state where computer $c_1$ was running instead of $c_0$. Assuming actions are indexed in alphabetical order, nop$()$ would be encoded as $\langle 1, 0 | 0 \rangle$. Similarly, reboot$(c_0)$ would be encoded as $\langle 0, 1 | 1 \rangle$.
|
| 74 |
+
|
| 75 |
+
Fig. [1](#fig:network_architecture){reference-type="ref" reference="fig:network_architecture"} illustrates our process for estimating the Q-values. Given a concrete state $s$ and action $a$, our network (that we call $Q_\emph{GRL}$) predicts the estimated Q-value $Q_\emph{GRL}(\overline{s}, \overline{a}) \approx q_*(s, a)$ by converting $s$ and $a$ to abstract state $\overline{s}$ and action $\overline{a}$ feature vectors based on the feature set $F$. Since the dimensionality of the abstract state and action vectors are fixed and do not depend upon a specific problem instance, the same network can be used to predict Q-values for states and actions across problem instances. This is the key insight into our method for transfer.
|
| 76 |
+
|
| 77 |
+
The abstract state captures high-level information about the state structure, whereas the abstract action captures the membership of the instantiated objects in the state, allowing our network to learn a generalized, relational Q-function that can be transferred across different problem instances.
|
| 78 |
+
|
| 79 |
+
Intuitively, Alg. [\[alg:grl\]](#alg:grl){reference-type="ref" reference="alg:grl"} presents our approach for Generalized Reinforcement Learning (GRL). For a given MDP $M$, an initial $Q_\emph{GRL}$ network, and a set of features $F$, GRL works as follows: Lines $1-5$ transfer knowledge from the $Q_\emph{GRL}$ network by converting every concrete state $s$ and action $a$ to abstract state $\overline{s}$ and abstract action $\overline{a}$ vectors using the approach in Sec. [3.2](#subsec:deep_learning_for_value_approximation){reference-type="ref" reference="subsec:deep_learning_for_value_approximation"}. Next, every concrete Q-table entry $Q(s, a)$ is initialized with the predicted value $Q_\emph{GRL}(\overline{s}, \overline{a})$. Note that the Q-table for different problems $M' \not=M$ are different since their state and action spaces are different, however, $\overline{s}$ and $\overline{a}$ are fixed-sized vector representations of any state and action in these problems. This allows $Q_\emph{GRL}$ to transfer knowledge to any problem $M$ with any number of objects.
|
| 80 |
+
|
| 81 |
+
Lines $9-12$ do Q-learning on the problem $M$ to improve the bootstrapped policy further. Lines $13-16$ further improve the generalization capabilities of $Q_\emph{GRL}$ by incorporating any policy changes that were observed while doing Q-learning on $M$. GRL returns the task specific policy $Q$ and the updated generalized policy $Q_\emph{GRL}$.
|
| 82 |
+
|
| 83 |
+
**Optimization** Lines $2-5$ can be intractable for large problems. We optimized transfer by only initializing entries in a lazy evaluation fashion. An added benefit is that updates to $Q_\emph{GRL}$ for any abstract state can be easily reflected when encountering a new state that maps to the same abstract state.
|
| 84 |
+
|
| 85 |
+
::: theorem
|
| 86 |
+
**Theorem 1**. *Solving problem M using GRL converges under standard conditions of convergence for Q-learning.*
|
| 87 |
+
:::
|
| 88 |
+
|
| 89 |
+
::: proof
|
| 90 |
+
*Proof (Sketch).* The proof is based on the following intuition. $Q_{\emph{GRL}}$ is used to initialize every $Q(s, a)$ entry of $M$ exactly once after which Q-learning operates as normal. The rest of the proof follows from the proof of convergence for Q-learning [@DBLP:books/lib/SuttonB98]. ◻
|
| 91 |
+
:::
|
| 92 |
+
|
| 93 |
+
:::: algorithm
|
| 94 |
+
::: algorithmic
|
| 95 |
+
MDP $M$, GRL network $Q_{\emph{GRL}}$, features $F$,\
|
| 96 |
+
epsilon $\epsilon$, learning rate $\alpha$
|
| 97 |
+
|
| 98 |
+
$Q \gets$ initializeEmptyTable$()$ $\overline{s}, \overline{a} \gets \text{abstraction}(F, s, a)$ $Q(s, a) = Q_\emph{GRL}(\overline{s}, \overline{a})$ $\mathbb{B} \gets$ $s \gets s_0$
|
| 99 |
+
|
| 100 |
+
$a \gets$ getEpsilonGreedyAction$(s)$ $s', r \gets$ executeAction$(s, a)$ $\delta = r + \gamma \max\limits_{a' \in A} Q(s', a') - Q(s, a)$ $Q(s, a) = Q(s, a) + \alpha \delta$ $\overline{s}, \overline{a} \gets$ $(F, s, a)$ $(\overline{s}, \overline{a}, Q(s, a))$ to $\mathbb{B}$
|
| 101 |
+
|
| 102 |
+
Sample a mini-batch $B$ from $\mathbb{B}$ Train $Q_{\emph{GRL}}$ using $B$ $s' \gets s$
|
| 103 |
+
|
| 104 |
+
$Q, Q_{\emph{GRL}}$
|
| 105 |
+
:::
|
| 106 |
+
::::
|
| 107 |
+
|
| 108 |
+
Transfer capabilities can often be improved if the training strategy uses a curriculum that organizes the tasks presented to the learner in increasing order of difficulty [@DBLP:conf/icml/BengioLCW09]. However, the burden of segregating tasks in order of difficulty often falls upon a domain expert.
|
| 109 |
+
|
| 110 |
+
We adopt *leapfrogging* [@DBLP:conf/aips/GroshevGTSA18; @DBLP:conf/aaai/KariaS21], an approach that follows the "learning-from-small-examples\" paradigm by using a problem generator to automatically create a curriculum for learning. Leapfrogging is an iterative process for speeding up learning when used in conjunction with a transfer learning algorithm such as GRL. Leapfrogging is analogous to a loose curriculum, enabling self-supervised training in contrast to curriculum learning that does not enable automatic self-training.
|
| 111 |
+
|
| 112 |
+
Leapfrogging operates by initially generating a small problem $M_\emph{small}$ that can be easily solved by vanilla Q-learning without any transfer. It applies GRL (Alg. [\[alg:grl\]](#alg:grl){reference-type="ref" reference="alg:grl"}) to this problem using an uninitialized $Q_{\emph{GRL}}$ network. Once this problem is solved, leapfrogging generates a slightly larger problem and invokes GRL again. The $Q_{\emph{GRL}}$ network from the previous iteration allows GRL to utilize knowledge transfer to solve this new problem relatively quickly while also improving the generalization capabilities of the next generation $Q_{\emph{GRL}}$ network.
|
| 113 |
+
|
| 114 |
+
We performed an empirical evaluation on four different tasks and our results show that GRL outperforms the baseline in zero-shot transfer performance. We also show that GRL is competitive with approaches receiving additional information like closed-form action models. We now describe the evaluation methodology used for assessing these hypotheses.
|
| 115 |
+
|
| 116 |
+
<figure id="fig:results" data-latex-placement="ht">
|
| 117 |
+
<embed src="images/results_ijcai-22.pdf" />
|
| 118 |
+
<figcaption>Zero-shot transfer performance of GRL (higher values better) compared to MBRRL. The problem number refers to the instance number in the IPPC problem collection. We interpolate the data between any two epochs. We also compare with SymNet, an approach that needs closed-form action models, and thus, is not applicable in our setting. As such, we plot SymNet results as greyed out. We assume minimum reward is obtained when the Q-values are set to NaN as was the case for some problems in Academic Advising for MBRRL. For Academic Advising, we report instances 5, 7 and 9 since instances 6 and 10 contained settings that were incompatible with our system.</figcaption>
|
| 119 |
+
</figure>
|
| 120 |
+
|
| 121 |
+
We ran our experiments utilizing a single core and 16 GiB of memory on an Intel Xeon E5-2680 v4 CPU containing 28 cores and 128 GiB of RAM.
|
| 122 |
+
|
| 123 |
+
We used the network architecture from Fig. [1](#fig:network_architecture){reference-type="ref" reference="fig:network_architecture"} for all of our experiments. Our system is implemented in Python and we used PyTorch [@NEURIPS2019_9015] with default implementations of mean squared error (MSE) as the loss function and Adam [@DBLP:journals/corr/KingmaB14] as the optimization algorithm for training each domain-specific $Q_{\emph{GRL}}$ network.
|
| 124 |
+
|
| 125 |
+
Our system uses RDDLsim as the simulator, and thus, accepts problems written in a subset of the Relational Dynamic Influence Definition Language (RDDL) [@Sanner:RDDL] .
|
| 126 |
+
|
| 127 |
+
As our baseline, we compare our approach with a first-order Q-function approximation-based approach for transfer; MBRRL [@DBLP:conf/aips/NgP21]. We also compare our approach with SymNet [@DBLP:conf/icml/GargBM20], an approach that requires closed-form action models, information that is unavailable to MBRRL and GRL in our setting.[^1]
|
| 128 |
+
|
| 129 |
+
MBRRL computes first-order abstractions using conjunctive sets of features and learns a linear first-order approximation of the Q-function over these features. They employ "mixed approximation\" wherein both the concrete $Q(s, a)$ values as well as the approximated Q-values are used to select actions for the policy.
|
| 130 |
+
|
| 131 |
+
SymNet uses a Graph Neural Network (GNN) representation of a parsed Dynamic Bayes Network (DBN) for a problem. SymNet thus has access to additional domain knowledge in the form of closed-form action models, and as a result, it is not directly applicable in the RL setting that we consider. Nevertheless, it can serve as a good indicator of the transfer capabilities of GRL that does not need such closed-form representations of action models. We also tried modifying TraPSNet [@DBLP:conf/aips/GargBM19], a precursor of SymNet that does not require action-models, but could not run it due to the limited support for the domains we considered.
|
| 132 |
+
|
| 133 |
+
We consider tasks used in the International Probablistic Planning Competition (IPPC) [@Sanner:IPPC], which have been used by SymNet and MBRRL as benchmarks for evaluating transfer performance.
|
| 134 |
+
|
| 135 |
+
Sysadmin: SYS$(n)$ is the Sysadmin domain that was described earlier in the paper. Academic Advising: AA$(l, c, p)$ is a domain where the objective is to pass a certain number of levels $l$ containing $c$ courses each of which need $p$ pre-requisites to be passed first. Game of Life: GoL$(x, y)$ is John Conway's Game of Life environment on a grid of size $x \times y$ [@DBLP:journals/scholarpedia/IzhikevichCS15]. Wildfire: WF$(x, y)$ is an environment set in a grid of size $x \times y$. Cells have a chance to burn which also influences their neighbor's probability of burning.
|
| 136 |
+
|
| 137 |
+
The original versions of Game of Life and Wildfire contain 4-ary predicates that we converted to an equivalent binary version by converting predicates like *neighbor*$(x_1,y_1,x_1,y_2)$ to *neighbor*$(l_{11},l_{12})$ for use in our evaluation.
|
| 138 |
+
|
| 139 |
+
**Training** For SymNet, we utilized the same problems (IPPC instances 1, 2, and 3) for training as published by the authors. We trained each problem for 1250 episodes. For MBRRL, we utilized the same training procedure as that of the authors wherein we used IPPC instance #3 for training. We trained each problem for 3750 episodes using Q-learning with an initial $\epsilon = 1$ and a decay rate of $0.997$.
|
| 140 |
+
|
| 141 |
+
For our leapfrogging approach with GRL, we used a problem generator to generate the problems for training. We used SYS$(3)$, SYS$(4)$, SYS$(6)$; AA$(2, 2, 2)$, AA$(3, 3, 3)$, AA$(4, 4, 4)$; GoL$(2, 2)$, GoL$(3, 3)$, GoL$(4, 4)$, and WF$(2, 2)$, WF$(3, 3)$, WF$(4, 4)$ for training Sysadmin, Academic Advising, Game of Life, and Wildfire respectively. We trained each problem for 1250 episodes using GRL .
|
| 142 |
+
|
| 143 |
+
**Testing** We used the same set of problems as SymNet; instances $5-10$ from the IPPC problem collection. The state spaces of these problems are much larger than the training problems used by GRL. For example, the state space size of SYS$(n)$ is $2^n$. For training, the largest problem used by GRL was SYS$(6)$, whereas the test problem, instance 10 (Sysadmin #10) of the IPPC is SYS$(50)$. Due to space limitations, we report results obtained on instances 5, 6, and 10 and include the others in the supplemental information.
|
| 144 |
+
|
| 145 |
+
**Hyperparameters** We used the IPPC horizon $H$ of 40 timesteps after which the simulator was reset to the initial state. For GRL, we used Q-learning with $\epsilon = 0.1$. To train a $Q_{\emph{GRL}}$ network, we used a replay buffer of size $20000$, a mini-batch size of 32, and a training interval of 32 timesteps with 25 steps of optimization per interval. For MBRRL and GRL, we used $\gamma = 0.9$ and $\alpha = 0.05$ for Sysadmin and Game of Life domains as they provide positive rewards. For Academic Advising and Wildfire, we used $\gamma = 1.0$ and $\alpha = 0.3$.
|
| 146 |
+
|
| 147 |
+
For MBRRL and SymNet, we used the default values of all other settings like network architecture, feature discovery threshold etc. that were published by the authors.
|
| 148 |
+
|
| 149 |
+
**Evaluation Metric** To showcase the efficacy of transfer learning, our evaluation metric compares the performance of MBRRL and our approach after zero-shot transfer. We freeze the policy after training, transfer it to the test instances and run it greedily for 100 episodes. We report our results using mean and standard deviation metrics computed using 10 individual runs of training and testing.
|
| 150 |
+
|
| 151 |
+
**Feature Generation** We used the D2L system [@DBLP:conf/aaai/FrancesBG21], that *requires* a sampled state space as input, for generating description logic features. We modified their code to not require action models and set a complexity bound of $k=5$ for feature generation. We sampled the state space from the first problem used by GRL for training per domain and provided it to D2L for generating the feature set $F_\emph{DL}$.
|
| 152 |
+
|
| 153 |
+
Our results are shown in Fig. [2](#fig:results){reference-type="ref" reference="fig:results"}. From the results it is easy to see that GRL has excellent zero-shot transfer capabilities and can easily outperform or remain competitive with both MBRRL and SymNet. We now present our analysis followed by a brief discussion of some limitations and future work.
|
| 154 |
+
|
| 155 |
+
**Comparison with MBRRL** Our approach is able to outperform MBRRL significantly on Sysadmin, Academic Advising and Wildfire. The DL abstractions used by the GRL features are more expressive than the conjunctive first-order features used by MBRRL, allowing GRL to learn more expressive policies. Additionally, leapfrogging allows scaling up training and learning of better policies in the same number of episodes in contrast to using a fixed instance for training.
|
| 156 |
+
|
| 157 |
+
**Comparison with SymNet** SymNet utilizes significant domain knowledge. Edges are added between two nodes if an action affects them. This information is unavailable when just observing states as sets of predicates. It is impressive that despite not using such knowledge, GRL is able to remain competitive with SymNet in most of the problems.
|
2204.13516/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-11T12:48:56.141Z" agent="5.0 (Macintosh)" etag="EnPmXSV3jqEtFQnlQN5n" version="15.3.8" type="device"><diagram id="SBC1E5kvOKq1K5xZTNfM" name="Page-1">7Vhbb9owFP41SNvDqtyg7SPQtEXaaAWol0cTG2LViZljbvv1O0nsJE5CS7dW6rTygv35+Pqdz+c4HXcY7a4EWoU/OCas41h413EvOo5je04P/lJknyOnGlgKipVRCUzpL6JAS6FrikliGErOmaQrEwx4HJNAGhgSgm9NswVn5qwrtFQzWiUwDRAjDbN7imWYo2fdivU1octQz2xbqiVC2lgBSYgw31Yg1++4Q8G5zEvRbkhYenj6XPJ+lwdai4UJEstjOszHG5eg8cC7HFy77G64602335x8lA1ia7XhjtNjMN4A0026aLlXJ9H7uU5XOljwWH5LMp76YGBbK+B6ULZDaan+mbb/q4Em/Xs9FmwuH86cAuBstSb6YTYw8x9mr9yBY0zogOus0mLMJfwNtiGVZLpCQYptQXeAhTJiULOhOEfB01LwdYxv1pLRmCgcI/F0A72oTLVonVhdABMp+FPh2q7a1lTNbel6rkvbKlZX9TvlihsiJNlVIOWHV4RHRIo9mKjWcyUJdSeAnPL6tlTYqTIJK+LS3ZDS9LIYuHR7KCjPf4UK7NOGDPzxbDQb+VPN0Fxodr7cj2bXYDx7vPWnXxtcwXRwNx1BE0pW+YW1oDuC26mgjA054wLqmCzQOltKbqfxjuNa2U9RdYkiytKDnaGQR+h9CHS8GoOamgqDZy0Mnr0bg+cNBif+9/5sdDOe/p8UedYHo0jHwXaKrKqq/k/Gur2PxpjdYMwIrq1xcpFTU4bBOyIwitHhADvXwNifwDp/XDTv3EqcnLdgfxBSSYz7aXoItYChJKGB6UhAo9g/qBCYVR6zoNnV1YtdtfFir2oH4qnyFA/qqUNQSDD7jC5jAOdcSh5BA0Nzwm55QiXlaUMAIxNR6fG9ZiB56v1IjVOYh1zQXzAp0nvJBh4UWYF28pinqcFz/l1kAYecO+Froeh+Nr+USCyJPCICE6zT8QNa2ZvPg4o0ui3S0JggDEm6MXP6Nr2oCW85zZxZhzvHVKbn1iSXn4PqVc3BawN5tbjp1rWbH9QzA2lDvlgkxLDJJF6c1F+ovte8p0ef+nmlfvI+XGAiWuy1sloecfZHFoJbE4LrnJ90/1AK1otDvSiGfes23lEazXfCZPgpjU9ppP589mbSqCeC/4I03GZ23xCG/owRrAXbDwR4TZoTvJTSZ46V5vJZbQEPgWtdfjEz957N7I99GwgukdLF+Rvl+m63a1Bstzir0+asp++V7LuHPwUWKffEb0/EazzDGUiTQ5MDrfbKYSuoceHUb7SIYswOvQNLTznu3ea9EZeeyaXXwqXXwqXzeiqhWn4wzqVbfnZ3/d8=</diagram></mxfile>
|
2204.13516/main_diagram/main_diagram.pdf
ADDED
|
Binary file (20.9 kB). View file
|
|
|
2204.13516/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Information Extraction (IE) is a key step in Natural Language Processing (NLP) to extract information, which is useful for question answering and knowledge base population, for example. Relation Extraction (RE) is a specific case of IE [@grishman2012information] with the focus on the identification of semantic relations between entities (see Figure [1](#fig:sample-re){reference-type="ref" reference="fig:sample-re"}). The aim of the most typical RE setup is the extraction of informative triples from texts. Given a sequence of tokens $[t_0, t_1 ..., t_n]$ and two entities (spans), $s_A = [t_i, \ldots, t_j]$ and $s_B = [t_u, \ldots, t_v]$, RE triples are in the form $(s_A, s_B, r)$, where $r \in R$ and $R$ is a pre-defined set of relation labels. Because of the directionality of the relations, $(s_B, s_A, r)$ represents a different triple.
|
| 4 |
+
|
| 5 |
+
We survey existing RE datasets---outside the biomedical domain---with an additional focus on the task definition.[^1] Existing RE surveys mainly focus on modeling techniques [@bach2007review; @pawar2017relation; @aydar2021neural; @liu_survey_2020]. To the best of our knowledge, we are the first to give a comprehensive overview of available RE datasets. We also revisit RE papers from the ACL community over the last five years, to identify what part(s) of the task definition recent work focuses on. As it turns out, this is often not easy to determine, which makes fair evaluation difficult. We aim to shed light on such assumptions.[^2]
|
| 6 |
+
|
| 7 |
+
<figure id="fig:sample-re">
|
| 8 |
+
|
| 9 |
+
<figcaption><span id="fig:sample-re" data-label="fig:sample-re"></span>RE annotation sample. The sentence contains two annotated spans denoting two entities, with respective types <code>METHOD</code> and <code>TASK</code>, and a semantic relation between them labeled as <code>USED-FOR</code>.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
Moreover, recent work in NLP has shown that single test splits and in-distribution evaluation overestimate generalization performance, arguing for the use of multiple test sets or split evaluation [@gorman-bedrick-2019-need; @sogaard-etal-2021-need]. While this direction has started to be followed by other NLP tasks [@petrov2012overview; @pradhan-etal-2013-towards; @williams-etal-2018-broad; @yu-etal-2019-sparc; @zhu-etal-2020-crosswoz; @liu2021crossner], for RE *cross-dataset* and *cross-domain* evaluation have received little attention. We explore this direction in the scientific domain and propose to study the possible presence of distinctive *sub-domains* [@lippincott-etal-2010-exploring]. Sub-domains are differences between subsets of a domain that may be expected to behave homogeneously. Using two scientific datasets, we study to what degree: (*a*) they contain overlapping data; (*b*) their annotations differ; and (*c*) sub-domains impact Relation Classification (RC)---the task of classifying the relation type held between a pair of entities (details in Section [3](#sec:the-task){reference-type="ref" reference="sec:the-task"}).
|
| 13 |
+
|
| 14 |
+
The contributions of this paper are:
|
| 15 |
+
|
| 16 |
+
- To the best of our knowledge, we are the first to provide a comprehensive survey on currently available RE datasets.
|
| 17 |
+
|
| 18 |
+
- We define RE considering its modularity. We analyze previous works and find unclarity in setups; we call for more rigour in specifying which RE sub-part(s) are tackled.
|
| 19 |
+
|
| 20 |
+
- We provide a case study on Relation Classification in the scientific domain, to fill a gap on cross-domain and cross-dataset evaluation.
|
| 21 |
+
|
| 22 |
+
RE has been broadly studied in the last decades and many datasets were published. We survey widely used RE datasets in chronological order, and broadly classify them into three domains based on the data source: (1) news and web, (2) scientific publications and (3) Wikipedia. An overview of the datasets is given in Table [\[tab:relatedworks\]](#tab:relatedworks){reference-type="ref" reference="tab:relatedworks"}. Our empirical target here focuses on the scientific domain as so far it has received no attention in the cross-domain direction; a similar investigation on overlaps in data, annotation, and model transferability between datasets in other domains is interesting future work.
|
| 23 |
+
|
| 24 |
+
:::: table*
|
| 25 |
+
::: threeparttable
|
| 26 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 27 |
+
| **Dataset** | **Paper** | **Data Source** | **\# Relation Types** |
|
| 28 |
+
+:=====================+:================================+:====================================+======================:+
|
| 29 |
+
| **News and Web** | |
|
| 30 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 31 |
+
| CoNLL04 | @roth-yih-2004-linear | News articles | 5 |
|
| 32 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 33 |
+
| ACE | @doddington-etal-2004-automatic | News and conversations | 24 |
|
| 34 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 35 |
+
| NYT | @NYT | New York Times articles | 24-57 |
|
| 36 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 37 |
+
| SemEval-2007 | @girju-etal-2007-semeval | Sentences from the web | 7 |
|
| 38 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 39 |
+
| SemEval-2010 | @hendrickx-etal-2010-semeval | Sentences from the web | 10 |
|
| 40 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 41 |
+
| TACRED | @zhang-etal-2017-position | Newswire and web text | 42 |
|
| 42 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 43 |
+
| FSL TACRED | @few-shot-tacred | TACRED data | 42 |
|
| 44 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 45 |
+
| DWIE | @ZAPOROJETS2021102563 | Deutsche Welle articles | 65 |
|
| 46 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 47 |
+
| **Scientific publications** | |
|
| 48 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 49 |
+
| ScienceIE | @augenstein-etal-2017-semeval | Scientific articles | 2 |
|
| 50 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 51 |
+
| SemEval-2018 | @gabor-etal-2018-semeval | NLP abstracts | 6 |
|
| 52 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 53 |
+
| [SciERC]{.smallcaps} | @luan-etal-2018-multi | Abstracts of AI proceedings | 7 |
|
| 54 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 55 |
+
| **Wikipedia** | |
|
| 56 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 57 |
+
| GoogleRE | \- | Wikipedia | 5 |
|
| 58 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 59 |
+
| mLAMA | @kassner-etal-2021-multilingual | GoogleRE data | 5 |
|
| 60 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 61 |
+
| FewRel | @han-etal-2018-fewrel | Wikipedia | 100 |
|
| 62 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 63 |
+
| FewRel 2.0 | @gao-etal-2019-fewrel | FewRel data + Biomedical literature | 100 + 25 |
|
| 64 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 65 |
+
| DocRED | @yao-etal-2019-docred | Wikipedia and Wikidata | 96 |
|
| 66 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 67 |
+
| SMiLER | @seganti-etal-2021-multilingual | Wikipedia | 36 |
|
| 68 |
+
+----------------------+---------------------------------+-------------------------------------+-----------------------+
|
| 69 |
+
:::
|
| 70 |
+
::::
|
| 71 |
+
|
| 72 |
+
The CoNLL 2004 dataset [@roth-yih-2004-linear] is one of the first works. It contains annotations for named entities and relations in news articles. In the same year, the widely studied ACE dataset was published by @doddington-etal-2004-automatic. It contains annotated entities, relations and events in broadcast transcripts, newswire and newspaper data in English, Chinese and Arabic. The corpus is divided into six domains.
|
| 73 |
+
|
| 74 |
+
Another widely used dataset is The New York Times (NYT) Annotated Corpus,[^3] first presented by @NYT. It contains over 1.8 million articles by the NYT between 1987 and 2007. NYT has been created with a distant supervision approach [@mintz-etal-2009-distant], using Freebase [@bollacker2008freebase] as knowledge base. Two further versions of it followed recently: @zhu-etal-2020-towards-understanding (NYT-H) and @jia-etal-2019-arnor published manually annotated versions of the test set in order to perform a more accurate evaluation.
|
| 75 |
+
|
| 76 |
+
RE has also been part of the SemEval shared tasks for four times so far. The two early SemEval shared tasks focused on the identification of semantic relations between nominals [@nastase2021semantic]. For SemEval-2007 Task 4, @girju-etal-2007-semeval released a dataset for RC into seven generic semantic relations between nominals. Three years later, for SemEval-2010 Task 8, @hendrickx-etal-2010-semeval revised the annotation guidelines and published a corpus for RC, by providing a much larger dataset (10k instances, in comparison to 1.5k of the 2007 shared task).
|
| 77 |
+
|
| 78 |
+
Since 2017, three RE datasets in the scientific domain emerged, two of the three as SemEval shared tasks. In SemEval-2017 Task 10 @augenstein-etal-2017-semeval proposed a dataset for the identification of keyphrases and considered two generic relations (`HYPONYM-OF` and `SYNONYM-OF`). The dataset is called ScienceIE and consists of 500 journal articles from the Computer Science, Material Sciences and Physics fields. The year after, @gabor-etal-2018-semeval proposed a corpus for RC and RE made of abstracts of scientific papers from the ACL Anthology for SemEval-2018 Task 7. The data will be described in further detail in Section [4.1](#datasets){reference-type="ref" reference="datasets"}. Following the same line, @luan-etal-2018-multi published [SciERC]{.smallcaps}, which is a scientific RE dataset further annotated for coreference resolution. It contains abstracts from scientific AI-related conferences. From the existing three scientific RE datasets summarized in Table [\[tab:relatedworks\]](#tab:relatedworks){reference-type="ref" reference="tab:relatedworks"}, in our empirical investigation we focus on two (SemEval-2018 and [SciERC]{.smallcaps}). We leave out ScienceIE as it focuses on keyphrase extraction and it contains two generic relations only.
|
| 79 |
+
|
| 80 |
+
The Wikipedia domain has been first introduced in 2013. Google released GoogleRE,[^4] a RE corpus consisting of snippets from Wikipedia. More recently, @kassner-etal-2021-multilingual proposed mLAMA, a multilingual version (53 languages) of GoogleRE with the purpose of investigating knowledge in pre-trained language models. The multi-lingual dimension is gaining more interest for RE. Following this trend, @seganti-etal-2021-multilingual presented SMiLER, a multilingual dataset (14 languages) from Wikipedia with relations belonging to nine domains.
|
| 81 |
+
|
| 82 |
+
Previous datasets were restricted to the same label collection in the training set and in the test set. To address this gap and make RE experimental scenarios more realistic, @han-etal-2018-fewrel published Few-Rel, a Wikipedia-based few-shot learning (FSL) RC dataset annotated by crowdworkers. One year later, @gao-etal-2019-fewrel published a new version (Few-Rel 2.0), adding a new test set in the biomedical domain and the `None-Of-The-Above` relation (cf. Section [3](#sec:the-task){reference-type="ref" reference="sec:the-task"}).
|
| 83 |
+
|
| 84 |
+
Back to the news domain, @zhang-etal-2017-position published a large-scale RE dataset built over newswire and web text, by crowdsourcing relation annotations for sentences with named entity pairs. This resulted in the TACRED dataset with over 100k instances, which is particularly well-suited for neural models. @few-shot-tacred used TACRED to make a FSL RC dataset and compared it to FewRel 1.0 and FewRel 2.0, aiming at a more realistic scenario (i.e., non-uniform label distribution, inclusion of pronouns and common nouns).
|
| 85 |
+
|
| 86 |
+
All datasets so far present a sentence level annotation. To address this, @yao-etal-2019-docred published DocRED, a document-level RE dataset from Wikipedia and Wikidata. The difference with a traditional sentence-level corpus is that both the intra- and inter-sentence relations are annotated, increasing the challenge level. In addition to RE, DocRED annotates coreference chains. DWIE by @ZAPOROJETS2021102563 is another document-level dataset, specifically designed for multi-task IE (Named Entity Recognition, Coreference Resolution, Relation Extraction, and Entity Linking).
|
| 87 |
+
|
| 88 |
+
Lastly, there are works focusing on creating datasets for specific RE aspects. @cheng-etal-2021-hacred, for example, proposed a Chinese document-level RE dataset for *hard cases* in order to move towards even more challenging evaluation setups.
|
| 89 |
+
|
| 90 |
+
Given our analysis, we observe a shift in target domains: from news text in seminal works, over web texts, to emerging corpora in the scientific domain and the most recent focus on Wikipedia. Similarly, we observe the emerging trend for FSL.
|
| 91 |
+
|
| 92 |
+
Different datasets lend themselves to study different aspects of the task. Concerning cross-domain RE, we propose to distinguish three setups:
|
| 93 |
+
|
| 94 |
+
1. Data from different domains, but same relation types, which are general enough to be present in each domain (limited and often confined to the ACE dataset) [e.g., @plank-moschitti-2013-embedding].
|
| 95 |
+
|
| 96 |
+
2. Stable data domain, but different relation sets [e.g., FewRel by @han-etal-2018-fewrel]. Note that when labels change, approaches such as FSL must be adopted.
|
| 97 |
+
|
| 98 |
+
3. A combination of both: The data changes and so do the relation types [e.g., FewRel 2.0 by @gao-etal-2019-fewrel].
|
| 99 |
+
|
| 100 |
+
In the case study of this paper, given the scientific datasets available, we focus on the first setup.
|
| 101 |
+
|
| 102 |
+
Conceptually, RE involves a pipeline of steps (see Figure [2](#fig:RE-task){reference-type="ref" reference="fig:RE-task"}). Starting from the raw text, the first step consists in identifying the entities and eventually assigning them a type. Entities involve either nominals or named entities, and hence it is either Named Entity Recognition (NER) or, more broadly, Mention Detection (MD).[^5] After entities are identified, approaches start to be more blurry as studies have approached RE via different angles.
|
| 103 |
+
|
| 104 |
+
One way is to take two steps, Relation Identification (RI) and subsequent Relation Classification (RC) [@ye-etal-2019-exploiting], as illustrated in Figure [2](#fig:RE-task){reference-type="ref" reference="fig:RE-task"}. This means to first identify from all the possible entity pairs the ones which are in some kind of relation via a binary classification task (RI). As the proportion of positive samples over the negative is usually extremely unbalanced towards the latter [@gormley-etal-2015-improved], a priori heuristics are generally applied to reduce the possible combinations (e.g., entity pairs involving distant entities, or entity type pairs not licensed by the relations are not even considered). The last step (RC) is usually a multi-class classification to assign a relation type $r$ to the positive samples from the previous step. Some studies merge RI and RC [@seganti-etal-2021-multilingual] into one step, by adding a `no-relation` (`no-rel`) label. Other studies instead reduce the task to RC, and assume there exists a relation between two entities and the task is to determine the type (without a `no-rel` label). Regardless, RI is influenced by the RC setup: Relations which are not in the RC label set are considered as negative samples in the RI phase. Some studies address this approximation by distinguishing between the `no-rel` and the `None-Of-The-Above` (`NOTA`) relation [@gao-etal-2019-fewrel]. Note that, in our definition, the `NOTA` label differs from `no-rel` in the sense that a relation holds between the two entities, but its type is not in the considered RC label set.[^6]
|
| 105 |
+
|
| 106 |
+
{#fig:RE-task width="\\columnwidth"}
|
| 107 |
+
|
| 108 |
+
RE studies rarely address the whole pipeline. We analyze all the ACL papers published in the last five years which contain the *Relation Extraction* keyword in the title and determine which sub-task is performed (NER/MD, RI, RC). Table [\[tab:ACL-RE-history\]](#tab:ACL-RE-history){reference-type="ref" reference="tab:ACL-RE-history"} shows such investigation. We leave out from this analysis (*a*) papers which make use of distant supervision or which somehow involve knowledge bases, (*b*) shared task papers, (*c*) the bioNLP field, (*d*) temporal RE, and (*e*) Open RE.
|
| 109 |
+
|
| 110 |
+
The result shows that gold entities are usually assumed for RE, presumably given the complexity of the NER/MD task on its own. Most importantly, for end-to-end models, recent work has shown that ablations for steps like NER are lacking [@taille-etal-2020-lets]. Our analysis further shows that it is difficult to determine the RI setup. While RC is always performed, the situation is different for RI (or `no-rel`). Sometimes RI is clearly not done (i.e., the paper assumes a scenario in which every instance contains at least one relation), but most of the times it is either not clear from the paper, or done in a simplified scenario (e.g., datasets which already clear out most of the `no-rel` entity pair instances). As this blurriness hampers fair evaluation, we propose that *studies clearly state which step they include*, i.e., whether the work focus is on RC, RI+RC or the full RE pipeline and how special cases (`no-rel` and `NOTA`) are handled. These details are utterly important as they impact both model estimation and evaluation.
|
| 111 |
+
|
| 112 |
+
The traditional RE pipeline is, by definition of pipeline, prone to error propagation by sub-tasks. Joint entity and relation extraction approaches have been proposed in order to alleviate this problem [@miwa-bansal-2016-end; @zhang-etal-2017-end; @bekoulis-etal-2018-adversarial; @BEKOULIS201834; @wang-lu-2020-two; @wang-etal-2021-unire]. However, @taille-etal-2020-lets recently discussed the challenge of properly evaluating such complex models. They surveyed the evaluation metrics of recently published works on end-to-end RE referring to the *Strict*, *Boundaries*, *Relaxed* evaluation setting proposed by @bekoulis-etal-2018-adversarial. They observe unfair comparisons and overestimations of end-to-end models, and claim the need for more rigorous reports of evaluation settings, including detailed datasets statistics.
|
| 113 |
+
|
| 114 |
+
While some recent work shifts to joint models, it is still an open question which approach (joint or pipeline) is the most robust. @zhong-chen-2021-frustratingly found that when incorporating modern pre-trained language models (e.g., BERT) using separate encoders can surpass existing joint models. Since the output label space is different, separate encoders could better capture distinct contextual information. At the moment it is not clear if one approach is more suitable than the other for RE. For this reason and because of our final goal, which is a closer look to sub-domains in the scientific field, we follow the pipeline approach and, following most work from Table [\[tab:ACL-RE-history\]](#tab:ACL-RE-history){reference-type="ref" reference="tab:ACL-RE-history"}, we here restrict the setup by focusing on the RC task.
|
| 115 |
+
|
| 116 |
+
To summarize, open issues are: *1)* The unclarity of RE setups, as illustrated in Table [\[tab:ACL-RE-history\]](#tab:ACL-RE-history){reference-type="ref" reference="tab:ACL-RE-history"} ---specially regarding RI---leads to problematic evaluation comparisons; *2)* A lack of cross-domain studies, for all three setups outlined in Section [2](#sec:survey){reference-type="ref" reference="sec:survey"}.
|
| 117 |
+
|
| 118 |
+
In this section, we present the two English corpora involved in the experimental study (Section [4.1](#datasets){reference-type="ref" reference="datasets"}), explain the label mapping adopted for the cross-dataset experiments (Section [4.2](#sec:cross-data){reference-type="ref" reference="sec:cross-data"}), discuss the overlap between the datasets and the annotation divergence between them (Section [4.3](#sec:annotation-disagreement){reference-type="ref" reference="sec:annotation-disagreement"}), and introduce the sub-domains considered (Section [4.4](#sec:exp-sub-domains){reference-type="ref" reference="sec:exp-sub-domains"}).
|
| 119 |
+
|
| 120 |
+
The corpus contains 500 abstracts of published research papers in computational linguistics from the ACL Anthology. Relations are classified into six classes. The task was split into three sub-tasks: (1.1) RC on clean data (manually annotated), (1.2) RC on noisy data (automatically annotated entities) and (2) RI+RC (identifying instances + assigning class labels). For each sub-task, the training data contains 350 abstracts and the test data 150. The train set for sub-task (1.1) and (2) is identical.
|
| 121 |
+
|
| 122 |
+
The dataset consists of 500 abstracts from scientific publications annotated for entities, their relations and coreference clusters. The authors define six scientific entity types and seven relation types. The original paper presents a unified multi-task model for entity extraction, RI+RC and coreference resolution. [SciERC]{.smallcaps} is assembled from different conference proceedings. As the data is released with original abstract IDs, this allows us to identify four major sub-domains: AI and ML, Computer Vision (CV), Speech Processing, and NLP, sampled over a time frame from 1980 to 2016. Details of the sub-domains are provided in Table [\[tab:SciERC-domains\]](#tab:SciERC-domains){reference-type="ref" reference="tab:SciERC-domains"} in Appendix [7](#sec:appendix-conferences){reference-type="ref" reference="sec:appendix-conferences"}. To the best of our knowledge, we are the first to analyze the corpus at this sub-domain level.
|
| 123 |
+
|
| 124 |
+
We homogenize the relation label sets via a manual analysis performed after an exploratory data analysis, as we find that most of the labels in SemEval-2018 and [SciERC]{.smallcaps} have a direct correspondent, and hence we mapped them as shown in Table [\[tab:gold-labels\]](#tab:gold-labels){reference-type="ref" reference="tab:gold-labels"}. The gold label distribution of the relations on the two datasets is shown in Figure [6](#fig:gold-label-distribution){reference-type="ref" reference="fig:gold-label-distribution"} in Appendix [8](#sec:appendix-gold-label-distribution){reference-type="ref" reference="sec:appendix-gold-label-distribution"}. We decided to leave out the two generic labels from [SciERC]{.smallcaps} and one relation from SemEval-2018 which does not have any correspondent and is rare.
|
| 125 |
+
|
| 126 |
+
Our analysis further reveals a high overlap in articles between SemEval-2018 and [SciERC]{.smallcaps} corresponding to 307 ACL abstracts.[^7] Interestingly, the overlap contains a huge annotation divergence. In more detail, we identify three main annotation disagreement scenarios between the two datasets (represented by the 3 samples in Table [\[tab:annotation-disagreement\]](#tab:annotation-disagreement){reference-type="ref" reference="tab:annotation-disagreement"}):
|
| 127 |
+
|
| 128 |
+
- **Sample 1**: *The annotated entities differ and so the annotated relations do as well.* SemEval-2018 annotates just one entity and thus there can not even exist a relation; as the corresponding sentence in [SciERC]{.smallcaps} is annotated with two entities, it contains a relation.
|
| 129 |
+
|
| 130 |
+
- **Sample 2**: *The amount of annotated entities and the amount of annotated relations are the same, but the annotations do not match.* The relations involve non-mutual entities and so do not correspond.
|
| 131 |
+
|
| 132 |
+
- **Sample 3**: *The annotated entities are the same, but the relation annotations differ.* This involves conflicting annotations, e.g., the bold arrow shows the same entity pair annotated with a different relation label.
|
| 133 |
+
|
| 134 |
+
Table [1](#tab:datasets-annotation){reference-type="ref" reference="tab:datasets-annotation"} shows the annotation statistics from the two corpora and their overlap. Overall both datasets contain the same amount of abstracts, but the amount of annotated relations differs substantially. The overlap between the two corpora reports a similar trend. Even the fairer count of the common labels (see Table [\[tab:gold-labels\]](#tab:gold-labels){reference-type="ref" reference="tab:gold-labels"}) reveals that the annotation gap still holds (ratio of 1:1.8). In more detail, the entity pairs annotated in both dataset by using a strict criterion (i.e., entity spans with the same boundaries) are only 394 (considering relations from the whole relation sets). Out of them, only 327 are labeled with the same relation type, meaning that there are 67 conflicting instances as the bold arrow in Table [\[tab:annotation-disagreement\]](#tab:annotation-disagreement){reference-type="ref" reference="tab:annotation-disagreement"} (Sample 3).
|
| 135 |
+
|
| 136 |
+
::: {#tab:datasets-annotation}
|
| 137 |
+
+------------------------------------------------------------------+
|
| 138 |
+
| **Whole corpus** |
|
| 139 |
+
+:====================+:===================:+:====================:+
|
| 140 |
+
| | SemEval-2018 | [SciERC]{.smallcaps} |
|
| 141 |
+
+---------------------+---------------------+----------------------+
|
| 142 |
+
| \# abstracts | 500 | 500 |
|
| 143 |
+
+---------------------+---------------------+----------------------+
|
| 144 |
+
| \# relations | 1,583 | 4,648 |
|
| 145 |
+
+---------------------+---------------------+----------------------+
|
| 146 |
+
| **Datasets Overlap** (307 abstracts) |
|
| 147 |
+
+---------------------+---------------------+----------------------+
|
| 148 |
+
| \# relations | 1,087 | 2,476 |
|
| 149 |
+
+---------------------+---------------------+----------------------+
|
| 150 |
+
| \# common relations | 1,071 | 1,922 |
|
| 151 |
+
+---------------------+---------------------+----------------------+
|
| 152 |
+
| Same entity pair | 394 |
|
| 153 |
+
+-------------------------------------------+----------------------+
|
| 154 |
+
| Same entity pair + same relation type | 327 |
|
| 155 |
+
+-------------------------------------------+----------------------+
|
| 156 |
+
|
| 157 |
+
: SemEval-2018 and [SciERC]{.smallcaps} annotation comparison. The common relations are the ones with a direct correspondent in both datasets (see Table [\[tab:gold-labels\]](#tab:gold-labels){reference-type="ref" reference="tab:gold-labels"}).
|
| 158 |
+
:::
|
| 159 |
+
|
| 160 |
+
::: table*
|
| 161 |
+
:::
|
2205.01358/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-03-29T06:12:00.687Z" agent="5.0 (X11)" etag="vlFYgldB_2umjxRvv46Y" version="16.6.6" type="device"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7V1dd5s4E/41Pie9iA8CJOCydpq8727bbZvttrs3OcTIDhtsHIyTuL9+JRDGCAHyBx8hSU8aGECA5tFoZvQIDbTx/PkqsJd3n3wHewNVcZ4H2sVAVYEOTPKHSjaxxEBaLJgFrsNOSgXX7i/MhAqTrl0HrzInhr7vhe4yK5z4iwWehBmZHQT+U/a0qe9l77q0ZzgnuJ7YXl76w3XCu1hqqkYq/x92Z3fJnQGy4iNzOzmZFbG6sx3/KRZFL6d9GGjjwPfDeGv+PMYerbykXn78f/PD+3iPrn77unqwv49+//PzX+dxtVzuc8n2FQK8CA8uGlxPP99sfnO/39+PZh+fL/4cb6bnADJtPtremtUYe9twk1Rh4K8XDqbFKANt9HTnhvh6aU/o0ScCGiK7C+ce2QNkc+bZqxU7deovwkt77noUO2N/Hbg4IEV/xk/sIIMLIHU5WoWBf7/VES1q6nre2Pf8IHoMzbGxOZ1sz9w5giYmvp2SIz55LDekd4P0/pL1xur3EQchft5ROavHK+zPcRhsyCnsqB5fwFqEqcF4/2kXXwz+d7vYstiJNsP0bFtyqjeywVS3lxrVF6PG6RSjiVCNjmHdKkpbakSaIqdGVTmBGr/9/Of60XwwPy3gcml+u/z69bt9njxBmRaxQ8wb2/WD8M6f+Qvb+5BKR6n0o+8vmRL+xWG4YXqy16Gf1XYKDbrn2bfYG9mT+1kkTzTk4Km99sKc5lK5Y6/utqXsp7UVgdUEl9QNYCfS9y9VboA9O3Qfs+ZfpCp26RffJQ+4BYUKlQwsjKQjS4oI7WCGQ3ZVqvD3QWBvdk5b0hNWxfcxdPF9Lvd8rhRv8ROk6NvWyRGABDlAUlwsA9JCZ3aIc+gkLS/MQsv23NmCbE8IDIjZ0Ea0fbqkf37PDsxdx4lwG+CV+8u+jYqizZ7VICkXjgbwgpZFgLtitiYHw4W/wJy1SURl9qs246LCoaJouoEMhejOytoaK29rABDamppMDTBOYWqyXcoBhiffW+BnN/xJCxxCtvc3K55uXzzv7mzYzuGmRtibGg1ZGqXC0sQPmrM01QVBU8pkHWAlxN6HRLdFu5LlkW1tGyQwIzHY9cNFbRCgbMUgU9DFC1qdfoJWV4assooiUcaSbobEJcO/fFrcaIkDlzwANZ+p/EsqrHLmpu4zTkKzuLsPCXB9an3PgankfTQMHIgNkY9mIUOzkbDNlWND2nDCIUAEwRrUiG4UQ7UwcW/bVZspUBuKPB7atdCo1GYGBT2saTg4urIDe+4vnFRCtmbsb3TlbSL49PFLIiMPd8ufR2TxTRLxS+lxE7wZdYIFKKSTBWb6z8ghBQmQotWFFJh3mNrvVtW9u8i6uz5N43os88CuTwd6eUEn8tY1U/zARc9Vcf7R3roQfJaoe4mtiOM+5gxQIDA1gvN2RXB8Rn6ngT0ZGHSP4Cl0bfpgU/I7MC54cUhl1J6ML7AX2jfk+BMRTc/WA3UcviPyd/J3Fz9QXJbyjj4AfWeFPsDKvVHO1nuWvy2MPJh2MaPXq0QTyJ7T/nRxu1pGFyrr+Cbugvz960apvolYmlfAQVoqzPIACceAS97sZn3yrvk2ASrwGKbTqVqQ1UG3CKLitALrMuKjfuDgQNCZ7JSnkB/LqjNyU5ShzqX4DEG8ZqB8x2JqQ6OuJJ9V7Ibk2ygFQbDCpP2N/o2xCsdkm7z5eDgk/91H+xfbJkvMJPl/bj9TSXTq9Ca61riI21EsLwQ6h8IK3GVBmsFVHnaZVCSHOnOCxai7NaFea/IQwWxIb4pCetikl2pZnfA9SD0Hm5/s+mgnCuDjcJ7upjF8tLcRKn5/nyXu6SXaT93ODUwMA0OGJRnXny4cF8TjxCo8RX3bI+sUX0IYUVfbVZFOc3MAmKphQWRlW7Kl5FuyKIg4RW6uQIGCKCI261nH5jXrEEBzaFmmCTVFJ7YY8iqkCt7+qDmFivIHNSpUFZjmFxrLZ7Lp2zxHjYpW+Y42nw3SmozxgSIaM3/Tppzp1fSuaVOvdpvayV5zuNfz3ZIJhzBfV+AUA9QFlQUPrqyK2j+8ulj9iFgYwuox6oMSqq4dUo67XOHq2NxeLWM+VJS9L2mquSa9d2QurSxYoKy8Una0IOZQ1KYDmYHNpnUgQV2qQQesYQjsRsMqESUu2lZJO80iSQ+DjGmHrWtIJnXwOhrNNr0zzIYUats6khllf52tqD2VyIz2LZz3lMacuuDCPGiOZ1mig0MzbPtqojKjtrUblSm1HRWJcqKJ7GhGTabJ6pwbLDuoaGSLOeeKqZtNA0TJgtZQVYGphK6VULf+3tmWJWuVo7CSrrV1uyXwKpsCbgWv6oF4Na2htfvD8ZrlRsRPB1+JGQWNwRe8dpMIQSkYZCEGVKUMY3x2pnaMSeRsOmMiXxvGDqWxVmDsnPfjageZRK6rzyDrdF8JDwTZOURDOuKX/mSKtZrGmEzGsMcYk6XaN4Ox7Lg9b2+kw4dsMeeSE4hOhymZDGifo9KkwjuCKs5yHdo98lEpzyOqHVaVLPgUXwnpnR44j0cp35MTgLp8zjPio4nnvaK77znhbM+RseIBw3yKUgTrUwylCueUiRKUPaIfF3CNSW3j0PUczG5OGTpb1uS7Y173jQLcGgWYn1YA1WF+eBno5tC0BP1GXfPDhfnmHG4++qvVWYbMrjKO+phKZwyZ+yGf/EamlzSgOxxg2q6iUgeldPi9odsQb9iB2HR0EVRN9VZD4mGLE/GGuTmJBspb7dp4w2JUyeSb+8sbrvzygJL3Oxv6QsFRo1NIoNY3FnCGBQxLWMCGYMpHoyxgJMijx4a4yuN4zUoFml5MCyY6raAFAxH3sDYVG6JIvQ8xTxO0YH4KKWqZSGqI4uM3ZUqygmG3lGlJDDC1QgrmkkmGIOZvmhSMDq+r8rrvASUYyYwhvVzSFpLPUbVF0kJd5GSfjtq4hwY6QgdGXSRot9IkukkGRl1ka7fSYLpKBU6+UNspDXWhBbWlkMR96NeIa6KHygFX1GmmyMtkARudopZXAKo2EnACwUoOMJIlB6BuMU76yAA21A5B90QE4JdqC3tJ/zW6xDA/EWmuJwDrCffX6CG/XB5hne4i+0H8NXrILZcHGOoUwHrB+jU6RSRvPgTt1oyYnlB+9f1ANaGEK3cijSu5pWQ43p8ioHNdXl6q43F0hR2EHMYj2aVLX50B0WF74lVKSoHYEXwZ2YTkoW49VwzQjWHDWQ5RJvONU45F6dYTcMoLR1aLWRbNUcrFJkjbbzJLP0zQUZ9mb6qHy/ZNpqLytuPQ5UvMpifhaRLjKa2QPTiqsAXTr0vvaFkXLRxU30fOBGtCnXLyQrL+yWQdeJtRYE/uqeNXNYqVJYbXt3jdtvnPAttxcToroZDWWAE5eUssMMUiPmqNihcTUrffyafM/4e17QwKpgS8tF5XPVGve4CyrWzDF1ICmhzgBJogIVZJRi4DRby9wuHcXaxXDCbqG1KkkcKFllX9QX3QSILSI3x41RL58PEV/fDgD0DIiWjSJvdlbMEqp41y3kmQeTRexDHfte+ReqFFf3g4A7B8EtqLhpNqtQUnnctsmQI4ARH3hk/bnxBPdc1Lj/EE3kBzNGh0jt0vWM/PaBYzMgy6LmUVjlhZuSMJA+1Uq53mCmr6A3dQ5PB0GTyvJCXFA+OIlFSuqKZTUlBEQChfcjSLm+p1R7+4S+y5CwxK/aT+rUfamiuOeFqewHdqdlkaKGIhFKfyeOSoeyOnLPH3BhxZ4AhXAmkWOPmBYRpzgT7kbhqY7moonELb/tYAgDIO8du6xfy6xRbgipBet5iuMQr5soYJ4eLUqxcjVHK3QidI5qrBsSsZr/+Bjyvn96s/pvgPczr9thnZD4IF1z/7DqZeD7bDNbEL7dqZLM4zX8za/STRMV+7aj5RyKla5J2AujLL4u/pyLCPmrZJ2S8VDXUTDXaIb+fKUDGtQcVsGbr3BQcuqSYKzAJC3FFrnTa01CmwrCE0s7jhByALbJhUWZJk32J7KB+Ykd3ApxFTejrlEH0idoee8R8=</diagram></mxfile>
|
2205.01358/main_diagram/main_diagram.pdf
ADDED
|
Binary file (62.4 kB). View file
|
|
|
2205.01358/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Node classification on graphs has various real-world applications [Song *et al.*[, 2021\]](#page-6-0) ranging from computer vision, natural language processing, social network, biomedical sciences. Often it is done in a semi-supervised setting in which the labeled data is significantly less than the unlabeled data, and the goal is to use both the labeled and unlabeled data to obtain a good classification score. This makes it an ideal approach for the problems where labeled data is not cheap or scarcely available.
|
| 4 |
+
|
| 5 |
+
Many state-of-the-art methods for node classification use machine learning on graphs [\[Kipf and Welling, 2017;](#page-6-1) [Velickovi](#page-6-2) ˇ c´ *et al.*, 2018; Monti *et al.*[, 2017\]](#page-6-3). Graph Neural Networks (GNNs) are emerging as powerful tools for graph representation learning. The earliest method for node classification on graphs used Laplacian regularization [\[Zhu](#page-6-4) *et al.*[, 2003;](#page-6-4) [Belkin](#page-5-0) *et al.*, 2004; [Zhou and Scholkopf, 2005](#page-6-5) ¨ ]. The Laplacian regularization can also be formulated as a PDE approach on graphs (Dirichlet problem [\(10\)](#page-1-0)). Several non-trivial Laplacians have been proposed, and the Dirichlet problem associated with each one of them could be used for node classification [\[Calder, 2018;](#page-5-1) Kyng *et al.*[, 2015;](#page-6-6) [Calder and Slepcev, 2020;](#page-5-2) [El Alaoui](#page-6-7) ˇ *et al.*, 2016]. In this work, we consider the Dirichlet problem associated with normalized Laplacian as defined in [\[Zhou and Scholkopf, 2005](#page-6-5) ¨ ]. More specifically, we consider the time-dependent formulation of the Dirichlet problem [\(15\)](#page-2-0) and argue about making its solution as competitive as state-of-the-art methods for transductive learning (Table [2\)](#page-4-0).
|
| 6 |
+
|
| 7 |
+
The triggering point of this work was the comparison of GCN's [\[Kipf and Welling, 2017\]](#page-6-1) accuracy and the solution of the Dirichlet problem [\(15\)](#page-2-0) on handcrafted *knn* graphs for MNIST [\[LeCun](#page-6-8) *et al.*, 1998], FMNIST [Xiao *et al.*[, 2017\]](#page-6-9), and the popular citation graphs Cora, Citeseer, Pubmed [\[Sen](#page-6-10) *et al.*[, 2008\]](#page-6-10). We observed that while the solutions of the PDE and the GCN are comparable for node classification on the handcrafted *knn* graphs, the latter performed significantly better on the citation graphs (Table [1\)](#page-2-1). For the handcrafted graphs, the nodes correspond to the images, and node features are the pixel values of the images. The plausible reason for the similar performances on handcrafted *knn* graphs would be that edges connect the similar nodes, as the euclidean distance between the images happens to be a good metric for the graph construction in this case. The citation graphs are given to us by default; the nodes represent documents, edges correspond to citation links between the documents rather than their similarity, and each node has a high dimensional bag-of-words feature vector. The apparent reason for the superior performance of the GCN on citation graphs would be that not only does it take the graph as an input, but it also takes the node features as input and utilizes their hidden representations.
|
| 8 |
+
|
| 9 |
+
Main contributions. Recent work has shown that GNNs are good at utilizing the node feature information [\[Faber](#page-6-11) *et al.*[, 2021\]](#page-6-11). Since the GCN beats the PDE-based approach on citation graphs by employing the node features along with the graph. This led us to ask: *How to make the solution of the PDE* [\(15\)](#page-2-0) *at par with the GCN by utilizing the node feature information?*
|
| 10 |
+
|
| 11 |
+
- We show that this is achieved by utilizing the node features to learn the initial condition [\(15c\)](#page-1-1) of the PDE (Figure [1\)](#page-3-0).
|
| 12 |
+
- We benchmark the improved solution against the several state-of-the-art methods for node classification. Our approach yields competitive results (Table [2,](#page-4-0) Row 08).
|
| 13 |
+
- Although we learn the initialization vector to boost the performance of the PDE, further marginal gains are also added by learning the graph weights (Table [2,](#page-4-0) Row 09).
|
| 14 |
+
- Unlike GNNs, which need to be retrained every time to incorporate new labels, this approach can easily incorporate new labels after training. We demonstrate this by incorporating validation labels (Table [2,](#page-4-0) Row 11). Importantly, we do not use the new labels to retrain.
|
| 15 |
+
|
| 16 |
+
In this section, we review notation on graphs and formulate node-classification as the *time-dependent* Dirichlet problem on graphs.
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
A weighted graph G=(V,E,w) consists of a finite set V of N nodes and a finite set $E\subset V\times V$ of edges. Let (u,v) be an edge connecting the nodes u and v. A weighted graph is associated with a weight function $w:V\times V\to [0,1].$ It represents a measure of similarity between two nodes. The set of edges according to the weight function is given as: $E=\{(u,v)|w(u,v)\neq 0\}.$ The set of nodes in the neighborhood of node u is denoted as N(u). The notation $v\in N(u)$ means node v is in the neighborhood of node v, v is in the neighborhood of node v, v is in the neighborhood of node v, v is in the neighborhood of node v. The notation $v\in N(u)$ means node v is in the neighborhood of node v, v is v in the neighborhood of node v. The v is in the neighborhood of node v is v in the neighborhood of node v is v in the neighborhood of node v is v in the neighborhood of node v. The notation v is v in the neighborhood of node v is v in the neighborhood of node v. The notation v is v in the neighborhood of node v is v in the neighborhood of node v. The notation v is v in the neighborhood of node v is v in the neighborhood of node v. The notation v is v in the neighborhood of node v is v in the neighborhood of node v is v in the neighborhood of node v is v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v is v in the neighborhood of node v in the neighborhood of node v in the neighborhood of node v in the neighborhood of node v in the neighborhood of node
|
| 21 |
+
|
| 22 |
+
Let H(V) be a Hilbert space of the real-valued function on the nodes of the graph. A function $f:V\to R$ of H(V), represents a signal on a node and assigns a real value f(u) to each node $u\in V$ . Similarly, let H(E) be a Hilbert space of the real-valued function defined on the edges. These two spaces are equipped with the following inner products:
|
| 23 |
+
|
| 24 |
+
$$\langle f, g \rangle_{H(V)} = \sum_{u \in V} f(u)g(u), \quad \forall f, g \in H(V)$$
|
| 25 |
+
|
| 26 |
+
$$\langle F, G \rangle_{H(E)} = \sum_{(u,v) \in E} F(u,v)G(u,v), \ \forall F, G \in H(E)$$
|
| 27 |
+
(1)
|
| 28 |
+
|
| 29 |
+
The weighted graph gradient (or difference) operator $\nabla_w: H(V) \to H(E)$ is defined as:
|
| 30 |
+
|
| 31 |
+
$$\nabla_w(f)(u,v) := \sqrt{\frac{w(u,v)}{\delta_w(v)}} f(v) - \sqrt{\frac{w(u,v)}{\delta_w(u)}} f(u)$$
|
| 32 |
+
(2)
|
| 33 |
+
|
| 34 |
+
The norm of the graph gradient on each node is given as:
|
| 35 |
+
|
| 36 |
+
$$\|\nabla_w(f)(u)\| = \left(\sum_{v \in N(u)} (\nabla_w(f))^2(u, v)\right)^{\frac{1}{2}}$$
|
| 37 |
+
(3)
|
| 38 |
+
|
| 39 |
+
The Dirichlet energy associated with the signal f(u) is defined as :
|
| 40 |
+
|
| 41 |
+
$$J(f) := \frac{1}{2} \sum_{v \in V} \|\nabla_w(f)(u)\|^2 \tag{4}$$
|
| 42 |
+
|
| 43 |
+
Often it is also known as *Tikhonov regularization* [Belkin *et al.*, 2004]. The weighted graph divergence operator $div_w: H(E) \to H(V)$ satisfies the discrete version of stokes law:
|
| 44 |
+
|
| 45 |
+
$$\langle \nabla_w(f), F \rangle_{H(E)} = \langle f, -div_w(F) \rangle_{H(V)}$$
|
| 46 |
+
where $f \in H(V)$ and $F \in H(E)$ (5)
|
| 47 |
+
|
| 48 |
+
Using (1), (5) and let $f = \mathbf{1}_{\{u\}}$ , it can be shown that divergence operator is given as:
|
| 49 |
+
|
| 50 |
+
$$div_w(F)(u) = \sum_{v \in N(u)} \sqrt{\frac{w(u,v)}{\delta_w(u)}} (F(u,v) - F(v,u)) \quad (6)$$
|
| 51 |
+
|
| 52 |
+
The graph Laplacian is defined as $\Delta_w: H(V) \to H(V)$ :
|
| 53 |
+
|
| 54 |
+
$$\Delta_w(f)(u) := \frac{1}{2} div_w(\nabla_w(f))(u) \tag{7}$$
|
| 55 |
+
|
| 56 |
+
Using the definitions of $\nabla_w$ and $div_w$ , the Laplacian is given as:
|
| 57 |
+
|
| 58 |
+
$$\Delta_w(f)(u) = \sum_{v \in N(u)} w(u, v) \left( \frac{f(v)}{\sqrt{\delta_w(u)\delta_w(v)}} - \frac{f(u)}{\delta_w(u)} \right)$$
|
| 59 |
+
(8)
|
| 60 |
+
|
| 61 |
+
Let us consider the problem of node classification on graphs in transductive settings for a multi-class problem. Let $V_0 = \{u_1, u_2, ..., u_m\}$ be a subset of nodes in V over which the labels are given as $y_1, y_2, ..., y_m \in \{\mathbf{e_1}, \mathbf{e_2}, ..., \mathbf{e_k}\}$ , where $\mathbf{e_i} \in R^k$ represents the $i^{th}$ class out of k classes (one-hot vector). The goal of transductive learning is to extend these labels to the unlabeled nodes. One way to achieve this is via solving the following inverse problem:
|
| 62 |
+
|
| 63 |
+
<span id="page-1-5"></span>
|
| 64 |
+
$$\underset{f(u)}{\arg\min} \left\{ \sum_{u \in V} \frac{1}{2} \|\nabla_w(f)(u)\|^2 : f(u) = g(u) \quad \forall u \in V_0 \right\}$$
|
| 65 |
+
|
| 66 |
+
<span id="page-1-2"></span>Here, $g: V_0 \to R^k$ is the label function on the nodes, such that $g(u) \in \{\mathbf{e_1}, \mathbf{e_2}, ..., \mathbf{e_k}\}$ . The other way to extend the known labels is via solving the following PDE (a.k.a the Dirichlet problem) on the graph, which is obtained through the E-L equation of the above optimization:
|
| 67 |
+
|
| 68 |
+
<span id="page-1-0"></span>
|
| 69 |
+
$$\Delta_w(f)(u) = 0 \quad \forall u \in V \setminus V_0$$
|
| 70 |
+
|
| 71 |
+
$$f(u) = g(u) \quad \forall u \in V_0$$
|
| 72 |
+
(10)
|
| 73 |
+
|
| 74 |
+
<span id="page-1-4"></span>It is worth remembering that the above equation is vectorvalued PDE, with k components. The final label decision for the unlabeled node u is determined by the largest component of f(u):
|
| 75 |
+
|
| 76 |
+
$$l(u) = \arg\max_{j \in \{1, \dots, k\}} \{f_j(u)\}$$
|
| 77 |
+
(11)
|
| 78 |
+
|
| 79 |
+
We now digress briefly to mention the connection with the widely-used label propagation algorithm [Zhu, 2005]. For a symmetric graph and under somewhat different definitions of gradient and divergence
|
| 80 |
+
|
| 81 |
+
$$\nabla_{w}(f)(u,v) = \sqrt{w(u,v)}(f(v) - f(u))$$
|
| 82 |
+
|
| 83 |
+
$$div_{w}(F)(u) = \sum_{v \in V} \sqrt{w(u,v)}(F(u,v) - F(v,u)), \quad (12)$$
|
| 84 |
+
|
| 85 |
+
<span id="page-1-3"></span>the Laplacian on the left-hand side of (10) yields the classic unnormalized (*a.k.a combinatorial*) Laplacian [Chung and Graham, 1997]. The Dirichlet problem then becomes:
|
| 86 |
+
|
| 87 |
+
$$\sum_{v \in N(u)} w(u, v)(f(v) - f(u)) = 0, \quad \forall u \in V \setminus V_0$$
|
| 88 |
+
|
| 89 |
+
$$f(u) = g(u), \quad \forall u \in V_0$$
|
| 90 |
+
(13)
|
| 91 |
+
|
| 92 |
+
<span id="page-2-1"></span>
|
| 93 |
+
|
| 94 |
+
| Dataset | Eq15 | GCN |
|
| 95 |
+
|----------------------------|----------------------------------------------------|----------------------------------------------------------------|
|
| 96 |
+
| MNIST<br>FMNIST | $93.2 \pm 0.0$<br>$76.0 \pm 0.0$ | $91.3 \pm 0.2$<br>$77.4 \pm 0.2$ |
|
| 97 |
+
| Cora<br>Citeseer<br>Pubmed | $72.5 \pm 0.0$<br>$49.7 \pm 0.0$<br>$72.5 \pm 0.0$ | $81.5 \pm^{\dagger}$ $70.3 \pm^{\dagger}$ $79.0 \pm^{\dagger}$ |
|
| 98 |
+
|
| 99 |
+
Table 1: Performances of (15) and the GCN on handcrafted and citation graphs. Observe the significant differences in scores over the latter.†Values taken from the GCN paper.
|
| 100 |
+
|
| 101 |
+
The above equation can be solved by the Jacobi's iterative method. Which yields the classic label propagation:
|
| 102 |
+
|
| 103 |
+
$$f^{n+1}(u) = \frac{\sum_{v \in N(u)} w(u, v) f^n(v)}{\sum_{v \in N(u)} w(u, v)}, \quad \forall u \in V \setminus V_0$$
|
| 104 |
+
$$f^{n+1}(u) = g(u), \quad \forall u \in V_0$$
|
| 105 |
+
(14)
|
| 106 |
+
|
| 107 |
+
A traditional approach to solve E-L equation of a variational problem is to make it time-dependent. A celebrated example of this is R.O.F. PDE, which is used in image processing for anisotropic denoising [Rudin et al., 1992]. Formulating the Dirichlet problem in (10) as time-dependent, it becomes:
|
| 108 |
+
|
| 109 |
+
$$\frac{\partial f(u,t)}{\partial t} = \Delta_w(f)(u,t) \qquad \forall u \in V \setminus V_0 \qquad (15a)$$
|
| 110 |
+
|
| 111 |
+
$$f(u,t) = g(u) \qquad \forall u \in V_0 \qquad (15b)$$
|
| 112 |
+
|
| 113 |
+
$$f(u,t) = g(u) \qquad \forall u \in V_0 \tag{15b}$$
|
| 114 |
+
|
| 115 |
+
$$f(u,0) = \psi_0(u) \qquad \forall u \in V \qquad (15c)$$
|
| 116 |
+
|
| 117 |
+
At steady-state, when $\frac{\partial f}{\partial t} \to 0$ , the solution of above equation is equivalent to that of (10). Basically, it is a gradient flow in the direction of minimizing the Dirichlet energy while satisfying the boundary constraint. The above equation can also be viewed as heat diffusion with non-homogeneous boundary condition on graphs. Notice that the Dirichlet problem in (10) had only a boundary condition, but now it also has an initial condition, denoted as $\psi_0$ .
|
| 118 |
+
|
| 119 |
+
A common choice of setting $\psi_0$ is to let the signal be $g(u) \in \mathbb{R}^k$ (one-hot vectors) on the labeled nodes and zero vectors elsewhere. It can be seen as an initial unnormalized probability vector on nodes. In this paper, we choose to address it as 'front $\psi_0$ ' since the approach in (15) is similar to level-set (front-propagation) methods in computer vision [Osher, 1993; Osher and Paragios, 2003].<sup>1</sup>
|
2205.11258/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-05T16:02:18.813Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36" version="15.4.1" etag="5hcsscNF4_8eqtDHJV1u" type="google"><diagram id="BCCHRfq1-4QzMlCQZTpj">7V1bk6M2Fv41XbW7Ve1CXAQ8zvQkuw9JaiqzVbt5xCDbZLDxYnq6O79+JZBskOQ2dkvCFHJSiS2BoM93LjoXSQ/e0/b1n1Wy3/xaZqh4cJ3s9cH78uC6wHfdB/Kvk721LWEYtg3rKs/oRaeGb/lfiDY6tPU5z9Chd2FdlkWd7/uNabnbobTutSVVVb70L1uVRf+p+2RNn+icGr6lSYGEy/6TZ/WmbY3c8NT+L5SvN+zJAMZtzzZhF9MhDpskK186z/J+evCeqrKs22/b1ydUEOIxurQv9POZ3uOLVWhXD7mBAvEjKZ7p35bQF6vf2F9blc+7DJEbnAfv88smr9G3fZKS3heML27b1NsC/wL46yoviqeyKKvmXi9LULRKcfuhrsrvqNMD0wgtV+SOcldTiAGkv39OtnlBmOPf+RYD7Tq/oRf839/LbbJjt9AXZH8Bqmr0epYK4EhbzJSo3KK6esOXsBuCYBHD9i7KkkdeezkB7PoLx29bNx14PXplQrlqfRz/RHj8hdJejoNncSC94TAcHD0g+BYE0huPCkIggLCcIQhYJY0JAhRIjjJsAenPsqo35brcJcVPp9bPfVBO1/xSlnsKxZ+ort8oZZPnuuSAukzm9rXIu1xL1woVSZ3/6N8nIxG99WuZ4xFPeIROHwzGp2yIQ/lcpYje1TW43EBecGGgOqnWqBYGahA7/j2DQAytJBECe6NKUmRtypkJlh+bnGDFAg7pHHGQTLBkOGgSBiZ3c0dBMsMyiQIQaG6tex8N6Cuy7vxA6qw7sK77GcviOa5JywKs737GtEiB0KXUROd9jrNdiWkxioLovc/RwEu8d6MoWPf9goH3QKjGwAsDKTTwov8+S1kS/XejsiQ68KJw7bJPJKuEf+3KHRGmLDlsGlRAHwHS/jWpa1TtmhbX8Y70Z5kkV0b3q5G7XdQ6RA0kRGVtH5RI4Ic9TP3gxoAaCP33BzojkRix5K1z2Z5ccHjnhWG8iLsf4f0Xnu+cPh7HZe3zblYGYvziwYVFTXmlx4/wf88l63g8NFz0CV8A4P711Im/rcn/v5aHnMB4YMPhN2lHbPsFZseSXPeZus+EVAK6HEubkiJfE7ZPMQci3P6Z6IU8TYpPtGObZ1lji2R6rK/pblBMKmZXorMBIjGWGMqdDVeBOnLFCIpaZ2MVpSiVqpZlFPjBjbRXbRSCwO2hYDhby1547iiEA1DQZJeZGdY2O5oIBPGIEOgOfEwDAghGhECMesxRCqA3IgRiyGOOUiBYZLPpPVaxoC38NxEUeItsMK0kqTeYpSDwFtkkBGKwYo7mgNdFhhNC2msNJoJCOAAFTYLgiW7yLM1BPCIEoo88Rwh478AoBKKPPEeLzHsHRiEQfWQBAePpgwGwtS95v+kDYZYVc2ANTR9AFlk/N5Ci9AFkqfHTc3TmCzwxMPCO5IPLkj88eM8z37pIDgeqXpIqpV1NT5VkOR6vy5rNR54/UKAK3IhDQaYKZGwLfBXKQAwVqEni/IbWiU3i3OgdGM7geGKkgqF22OO/8QNM8Fwlxbd9kRMD0eGEdtgznHCV+a/KGvNZSWB+JGQTLctq5cotSwaXMCAgZ3mFUjrIjpSZvMdDArdtyir/C1MjKUblIc/h6jxlAcdoEXsSTeL6CphId6xl6SAPQRmQDoqcKLqPmZ0HB8CgcdGm7sUdE0EhGoCCrvm1GGuZIwT+EH2ka9Ws9lhLkoWJ3F3xYYCF+z4g8EaEQHusZRoQ8KsvTEKgO9YyDUUkWGSD2Q9/QKzlZgSYR/RREmP8vqM63dA3OO7/cQq6qNVCJumvuxghc1KEXJkIBDAAoRJ8FEDAT4dMQqB9/cUkIAj46ZBJCET/3hoC4zlY3zrIMtfMZOrDF73jWU5KvREh0L7rwSTMAW+RTUIQiN7xHCHgLbJRCETvWEDAeA52gP1oX/J+c7BeGC0YjB9Nw/pOcHEsRZlY3wGSR+lMxrKE0107phocH5O7xsl8fxWpVbDAXV/yH3mGCDHoajkil6/Jdl/YlOsQznA5zgChzB+TsIaKhGsgK4VQwRouYY1vb7t6g5pL8UDPywqt0avliiFcASS7JASiygCRvmR8IKvIOKuZZ1MmA1xuthZ6AiwBk+Fecptt5PwhUGwEpellNVwMhNgfZE6VyIUNnzS9wYgQ2OKCVqOMCIEtLpApIt81BwEUwycWApJkEA2yFIJ3nPrBEOje7GAiEAQjQiAGEOa4kMN1sMMQXTYH7jG+o1wd2S0PWgoTIDrxMndUUOwmCA0p/bsCxW6L0JASDlNZuqZPois9S9GIxkXB7oxwxnxLXQmNSkl7ScI0gHjffBv173S72BNB5H3bbRKRUPS4Z2kyJMoKSlDQl5sIRb97lkBcUFYyUPRZELanydwtyAV9JQNFl74SXfJZIiI6GkZR0H124kRQEB0NoygMSGfb3UZuOD7ggrq7de9ybE4EdtG0fTn2fSSP0ln1Fso2OriZGcWClQuiffnEi3vnOo+rwgiAv7j11EmhoAPwQyk8d1IMd1jgrwHej4NFwEEPbwNeHOpYy6MB+Hs8K2O1QvDMliRhvHSmYH6wkuYrlm82OR4ML46lyOR40JM8SqvJEaNJc1xy5HkuT3iT4e5IjCDNEoVgXBQGrHq5DoQ7IKnI2CZDo5ErkHSWjC1BwWAxTGRrMBoUGB3Gye9HNuDTML4zLgq693uYCAruuCjoXrAwERT8ASjoMs26D8ucCARwRAh0L1iYBgS8XTZc4hJpd4EngQJvl026CbHo/85RFwmCYLZ8IrblE1JBMFsvwbbpn7s6cgegoEsd2QIJ2ezUKATWWZbNTo1CYEsj9OQqec12a2KK37xXV1bK9yH/HJ0pqVhpFYQivhuwZ9i9810AooUD1LAeFrWLYynivsAHkkdpZUDdkZGJbEDnOQLhTfqEuk/mnAgKwBkPArsZphSCoak7BRAAR3doZCIYSJSRURh0b+kwDX8kINXacQ8Gw5kjwAaeORLQGR8JMUwyx3ghdMdHwkZLGiT8YUhoMxK2tqCBAY4Mgy0uOGOrDWdWgSN60nMMpEtstUk3Dji2zuCMoTYLg253ehowyPSSzJXTl+gGQLdTPQ0kZHpJhoRGCwFEv3qWFkKimkyGN4DoVM8SBtGBMAuDLdQ/40CYhUF2/gGHgs18X5+BlGm5WzOQMCAbAcTHj5k9AaAfSf4CnclIAK46cGEaq9L4jIrR8ABbGN4V5osL4PsUPl3zS1nuKV3/RHX9RgmbPNdln+roNa//S25fBPTXH52eL6905ObH28PgpftnRL6VoraJrcFrRaDfNgCywTI/HIBwKgBg2lRvnZvIzz+6fafbml9qgTueKdJFjhWAqUTuNlXIr9Sli5jP7tzCTsiQX69AUUaWrW5mK7YsbnJs5fNnvihnq9iy1c1sdcwRTo6voKdZXbliDEpgtMMm2ZOvmHp1nhS/o7ROduuGwy5M8pZlXZdb+qOiMyjyvUAr9rVueNHhXA3qzWAOrvPd+qk5lYsw1WPLVf2zvfALZo0PREbpnsd1+I7qdHMtg31sUhlwCPuuMKkEsiPzIgVzmqNEzFlLdD2LaKjWkMxOj2e3jK41jiOyXaXgBWvEHdrLXa9Aa4iBOjVHM/5OTmE8Hc1YDT2O8SrXU5m64M/DSdIoO0VVOj1YC/hBpkbB8EcKBSx43VUwbK8w1ce1ArZqwGqYazWML9Mw9zLf9Z1+5OqShgn4LQ6Vaxhdh79aDXNRw0j2CZIqmQAcdyZVr2cCq2du0zNQpmegej0zHErZSiorykZEmQ9xyycLUJcQ2xCrmOgYJMSRRIjZpHscIR6wRayNE1w1jff4Q9R9thNPfyYvLUBSEi2wMcXbxNOTxRhdDdGCwVB6NoynVjxle2XKxPPMHFiFeHo2mHejeMqCee6YU2D2RlY8VUXZ+cWwUtMZahJMsT5OjUPjLXDXU7ld5hgW/MAdFk78rmTAvGEFwpvY5xnq6GD61pw303M2KPwSjkiKfE3qxNKGJXADQStPk+IT7djmWXaWN8WjH66s9FExv3JC/iwGdgZYr6hY4vsoqe/xzgew8kEcEsk4JGFjHJ73J9g/dxii13Fsze/NGb5QZRiQf2TOMWw+dIROe/tRZPt95yLjAFlhmBKn2ZNV2r3HOGlLBcI01Xr5N/wO+F/8HKfz7e/kKyGO07DZiqLzqbkCJtt90+lhpsWmgiK2axCrGsT4a/rDvVAakOH89rFNZ4FIvenjAfMT1l9t/66stklxvISoqEeqbUg3VTi97hwz6I4O7xCp6HbWVbI7rPCgbPhGdbYXvJRV1n969/Zlkn5fN+z/yJEQm5Mj6Vw/Pn0PeEJektelRF7/YaVVpbT6cFRpPR/ctNI6NWlNrbTqltYgGlValZ5wpmpTtwRFK+mSCphGaLk67/XezZIKEAeLGHZWH/RLFI47K199zqLrXjWuqjMXQbSIg/jcsg78WK1rLTx7HJueLS+9mHMJb2ZMH8JLQ6na9hK64pP0sp+4QHx89ls6yENQxn4Oipzonez4/bBfJKiR25gvAGZY75gs1sJ4+GdVkunX6XI8J9r8WmaIXPF/</diagram></mxfile>
|
2205.11258/main_diagram/main_diagram.pdf
ADDED
|
Binary file (25.3 kB). View file
|
|
|
2205.11258/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Regular expressions (regexes in short) are a powerful tool for processing sequence data in NLP or information retrieval. A regex---a finite sequence---is a formal representation of (infinite) symbol sequences. However, it is not easy to write a good regex for a set of sequences. Especially for those who have little experience with formal expressions, a single mistake in regex composition leads to an invalid regex or, even worse, a regex with different semantics to one's intention. An automatic regex synthesis tries to moderate this human error.
|
| 4 |
+
|
| 5 |
+
Examples [@OncinaG92; @BartoliDDMS14] and natural language descriptions [@LocascioNDKB16; @ZhongGYPXLLZ18] are the two major sources of regex synthesis. Examples are specific in their meaning without any implicit semantics, but are hard to provide the complete user intent described by a regex. On the other hand, natural language descriptions are ambiguous but describes the high-level concept of a regex. As these two different types of information are complementary to each other, there have been many studies [@ChenWYDD2020; @LiLXCCHCC21; @ZhongGYXGLZ2018] that suggest to use both modalities as they can use examples to fix the ambiguous expressions generated from natural language descriptions.
|
| 6 |
+
|
| 7 |
+
The *regex synthesis problem* is to make a proper regex [$R$]{style="color: red"} from both positive and negative sets such that all the positive samples are generated by [$R$]{style="color: red"} and all the negative strings are not generated by [$R$]{style="color: red"}. This problem is also called the regex *inference* or *learning* problem in the literature.
|
| 8 |
+
|
| 9 |
+
The traditional approaches for the regex synthesis problem are often inapplicable in practical settings. For example, @Angluin87's [-@Angluin87] $L^*$ is impractical because it requires the equivalence test between a target regex and a generated regex, which is known to be PSPACE-complete and thus intractable [@StockmeyerM73]. @Gold78 shows that the state characterization approach is NP-complete and thus intractable. Heuristic approaches based on state-merging DFA induction algorithms such as RPNI [@OncinaG92] and blue-fringe [@LangPP98] are not very promising on synthesizing complex regexes because it is almost impossible to obtain a concise regex even when they are able to produce a concise DFA from examples. As recent approaches, both AlphaRegex [@LeeSO16] and RegexGenerator++ [@BartoliDDMS14; @BartoliDMT16] generate candidate regexes and match against the given examples until the target regex matches all examples successfully. While these approaches generate correct regexes when a time limit is very long, they often fail when the example size is large. In addition, both approaches do not use any characteristics in the given examples, which is a useful feature to speed up the process. In other words, these methods are impractical for generating regexes within a reasonable time limit.
|
| 10 |
+
|
| 11 |
+
For practical usage, we notice that the divide-and-conquer approach is promising for synthesizing formal representation. For example, @AlurRU17 synthesize a program with hierarchical partition of the given input-output pairs. A decision tree reflects the hierarchical partition forms the control flow of the synthesized program. This method noticeably improves program synthesis time due to the partitioned problem space. @FarzanN21CAV [@FarzanN21PLDI] propose an effective method that converts a program into an equivalent program by splitting a problem into several subproblems. This divide-and-conquer approach is efficient since individual tasks for solving subproblems can run simultaneously. Recently, @BarkePP20 [@ShrivastavaLT21] propose program synthesis methods that first finds partial solutions satisfying subsets of input-output examples and combines the solutions to find the global solution.
|
| 12 |
+
|
| 13 |
+
In this paper, we propose a novel regex synthesis framework based on the divide-and-conquer paradigm for boosting existing regex synthesis algorithms. Given a set of positive examples, we split examples into multiple parts by grouping similar substrings using a neural network and solve subproblems defined over the multiple parts in serial order. We train a neural network called the *NeuralSplitter* that embeds a set of positive examples into a fixed-size vector and produces the most probable split for each example in the set. As far as we are aware, this is the first work that aims to synthesize a regex by processing given positive and negative examples with a neural network.
|
| 14 |
+
|
| 15 |
+
Our main contributions are as follows:
|
| 16 |
+
|
| 17 |
+
1. We propose a regex synthesis framework *SplitRegex* based on the divide-and-conquer paradigm, which has been employed in many sequential algorithms. As far as we are aware, this is the first attempt to synthesize regexes in the divide-and-conquer manner.
|
| 18 |
+
|
| 19 |
+
2. We design a neural network called the *NeuralSplitter*---a two-stage attention-based recurrent neural network trained to predict the most probable split of input positive strings.
|
| 20 |
+
|
| 21 |
+
3. We conduct exhaustive experiments on various benchmark datasets including random dataset and practical regex datasets for demonstrating the effectiveness of the proposed framework. We show that our framework can be easily combined with the off-the-shelf regex synthesis algorithms with minor modifications.
|
| 22 |
+
|
| 23 |
+
Namely, our framework solves regex synthesis problem, which is PSPACE-complete, much faster when applied to any existing regex synthesis models, and thus boosts the overall performance.
|
| 24 |
+
|
| 25 |
+
# Method
|
| 26 |
+
|
| 27 |
+
Let $\Sigma$ be a finite alphabet and $\Sigma^*$ be the set of all strings over the alphabet $\Sigma$. The empty string is denoted by $\lambda$. A *regular expression* (regex) over $\Sigma$ is $a \in \Sigma$, or is obtained by applying the following rules finitely many times. For regexes $R_1$ and $R_2$, the union $R_1 + R_2$, the concatenation $R_1
|
| 28 |
+
\cdot R_2$, the star $R_1^*$, and the question $R_1^?$ are also regexes. We denote the set of strings (language) represented by a regular expression $R$ by $L(R)$. Note that $L(R_1^?)$ is defined as $L(R_1) \cup \{\lambda \}$.
|
| 29 |
+
|
| 30 |
+
Given positive and negative strings, we consider the problem of synthesizing a concise regex that is consistent with the given strings. The examples are given by a pair $(P, N)$ of two sets of strings, where $P \subseteq \Sigma^*$ is a set of *positive strings* and $N \subseteq \Sigma^*$ is a set of *negative strings*.
|
| 31 |
+
|
| 32 |
+
Then, our goal is to find a regex $R$ that accepts all positive strings in $P$ while rejecting all negative strings in $N$. Formally, $R$ satisfies the following condition: $P \subseteq L(R) \textrm{ and } L(R) \cap N = \emptyset.$
|
| 33 |
+
|
| 34 |
+
<figure id="fig:overview">
|
| 35 |
+
<div class="center">
|
| 36 |
+
<embed src="images/overview.pdf" style="width:75.0%" />
|
| 37 |
+
</div>
|
| 38 |
+
<figcaption>Overview of the proposed approach.</figcaption>
|
| 39 |
+
</figure>
|
| 40 |
+
|
| 41 |
+
Our approach involves the following procedure:
|
| 42 |
+
|
| 43 |
+
1. Split the input positive strings in the set $P$ into multiple partitions $P_1, P_2, \ldots, P_S$, where $S$ is the number of splits, using the NeuralSplitter model.
|
| 44 |
+
|
| 45 |
+
2. Run the regex synthesis engine for each $P_i$ for $1 \le i \le S$ with the set $N$ of negative strings. We will explain in more detail about how to use the set $N$ of negative strings for final regex synthesis.
|
| 46 |
+
|
| 47 |
+
3. Concatenate the obtained subregexes $R_1,R_2,\ldots, R_S$ such that $P_i \subseteq L(R_i)$ to produce the final regex $R$.
|
| 48 |
+
|
| 49 |
+
In the first step, NeuralSplitter splits positive examples into the same number of substrings for 'dividing' the entire regex synthesis problem into less complex subregex synthesis problems. In order to train the NeuralSplitter, we generate target labels for positive examples. For instance, when we are given $P = \{ {\sf aabbccabca, abcbbc, bbbcabb, aabbbc}\}$ and $R = {\sf a^*b^*c^*.^*}$, we obtain ground truth labels for the four positive examples in $P$ as follows: $\{{\sf 1122330000, 123000, 2223000, 112223}\}.$ The digits , , and in the ground truth labels imply that the corresponding symbols are generated from the first, second, and third subregex of $R$: ${\sf a^*}$, ${\sf b^*}$, and ${\sf c^*}$, respectively. Note that the output label for symbols generated from the wildcard pattern regex ${\sf .^*}$ (capturing zero or more of any characters) is always regardless of the position of the corresponding subregex.
|
| 50 |
+
|
| 51 |
+
Note that we cannot split negative examples just as we do for positive examples using the NeuralSplitter model as there may be another split (not produced by the NeuralSplitter) that results in negative strings recognized by the synthesized regex. For instance, consider a simple case when given $P = \{ {\sf abbaaa, abaaa, aaa}\}$ and $N = \{ {\sf aaaa} \}$. The NeuralSplitter may simply predict that substrings with the same repeated symbols should be grouped to form a subregex. Then, the first part has positive examples $P_1 = \{ {\sf a} \}$ and $N_1 = \{ {\sf \lambda }\}$, the second part has $P_2 = \{ {\sf bb, b, \lambda} \}$ and $N_2 = \{ {\sf aaaa}\}$, and finally the last part has $P_3 = \{ {\sf aa, aaa }\}$ and $N_3 = \{ {\sf \lambda}\}$. It is possible that the synthesis algorithm generates ${\sf ab^*aaa^?}$, which accepts the negative example. Note that there is no other split admitting a regex satisfying both $P$ and $N$.
|
| 52 |
+
|
| 53 |
+
We resolve this problem by introducing the concept of *prefix-conditioned subregex synthesis*. The basic idea of our approach is to first synthesize $n-1$ subregexes once the NeuralSplitter produces $n$ splits ($P_1, P_2, \ldots , P_n$) from $P$. Then, we concatenate the resulting $n-1$ regexes to be $R_1R_2 \cdots R_{n-1}$ and use this regex to obtain the final regex from the last split sample $P_n$ that satisfies all prefix subregexes as well as the negative samples. Namely, for each regex $R_i$, we only consider the corresponding split examples $P_i$ from $P$. We use the negative samples to prevent a regex synthesis algorithm to return a trivial regex such as the wildcard pattern regex ${\sf .^*}$. One may consider using the NeuralSplitter approach for the negative samples. Our empirical observation confirms that it causes a substantially larger amount of additional computations, and the performance becomes worsened.
|
| 54 |
+
|
| 55 |
+
When there are common strings between the split positive examples $P_i$ and the negative examples $N$, we remove the common strings from $N$. If $P_i$ has only one example (singleton set), then we simply return the example as $R_i$. The proposed method generates a regex $R$ from a set $P$ of positive examples and a set $N$ of negative examples that satisfy the following conditions:
|
| 56 |
+
|
| 57 |
+
- $L(R_i) \subseteq P_i$ and $L(R_i) \cap (N \setminus P_i) = \emptyset$ for $1 \le i \le n-1$ (independent subregex synthesis).
|
| 58 |
+
|
| 59 |
+
- $L(R_1 R_2 \cdots R_n) \cap N = \emptyset$ (prefix-conditioned subregex synthesis).
|
| 60 |
+
|
| 61 |
+
- $L(R_i) = P_i$ if $|P_i| = 1$ for $1 \le i \le n$ (singleton subregex synthesis).
|
| 62 |
+
|
| 63 |
+
While the proposed method can generate a regex quickly by dividing a complex synthesis problem into several easier problems, one limit is that it cannot generate the wildcard pattern regex, ${\sf .^*}$, as a subregex $R_i$ due to the negative examples. Because the wildcard pattern is common and handy in practice to representing complicated sequences, we treat the symbols generated from the wildcard pattern differently compared with the rest of the symbols---we assign a special label ${\sf 0}$ for the symbols in the data preprocessing step. For instance, if a string ${\sf abbccdd}$ is generated from ${\sf a^*b^*c^*d^*}$, then the label is ${\sf 1223344}$. However, if ${\sf abbccdd}$ is generated from ${\sf a^*.^*}$, then the label is ${\sf 1000000}$. When reading these special symbol 0's, our framework immediately generates wildcard patterns without going through the prefix-conditioned subregex synthesis.
|
| 64 |
+
|
| 65 |
+
Since both AlphaRegex and Blue-Fringe synthesize regexes at the symbol level, they cannot be used for synthesizing raw practical regexes that cover potentially all ASCII characters. Therefore, we preprocess or exclude regexes as described below:
|
| 66 |
+
|
| 67 |
+
- Regex simplification: raw examples generated from practical regexes are typically very long. In order to effectively reduce the lengths of examples for faster regex synthesis, we replace substrings from regexes with special reserved symbols (let us denote by '[A]{.sans-serif}'--'[Z]{.sans-serif}') and control characters with '[!]{.sans-serif}'. For example, a regex [function:(public\|private\|protected)]{.sans-serif} can be preprocessed to [A!(B\|C\|D)]{.sans-serif} by replacing substrings [function]{.sans-serif}, [public]{.sans-serif}, [private]{.sans-serif} and [protected]{.sans-serif} by [A]{.sans-serif}, [B]{.sans-serif}, [C]{.sans-serif} and [D]{.sans-serif}, respectively. In practice, we can find common substrings used throughout the given positive examples to simplify them and synthesize a regex in the simplified form.
|
| 68 |
+
|
| 69 |
+
- Backreference: we do not consider backreference for regex synthesis as some baseline approaches such as AlphaRegex and Blue-Fringe algorithm do not support regex features that capture non-regular languages.
|
| 70 |
+
|
| 71 |
+
- Lookahead/lookbehind: while regexes with these features still only capture regular languages, we do not consider these features as they require the regex engine to perform backtracking that causes catastrophic runtime in some extreme cases.
|
| 72 |
+
|
| 73 |
+
- Quantifier with exact count '${\sf \{n\}}$' or range '${\sf \{n, m\}}$': we replace numerical quantifiers with '${\sf *}$' to 1) reduce the search space and 2) control the length of randomly generated strings.
|
| 74 |
+
|
| 75 |
+
- Negated class: A simple negated class $[\ \hat{}x]^*$ generates all strings except for $x$ and, then, the model outputs $.^*$ unless a specific negative input containing $x$ is provided. If provided, then the learning is very trivial because the negative set is $\{x\}$ only. Thus, we do not consider negated classes in our study.
|
| 76 |
+
|
| 77 |
+
For each regex, we randomly generate 20 positive and 20 negative examples up to certain length (10 for random dataset and 15 for the other datasets). In order to generate positive examples, we utilize a Python library called Xeger.[^1] For generating negative examples, we first generate positive examples and randomly substitute symbols with other symbols to make the examples outside the language represented by the regex. Then, we use Python library pyre2, which is a Python wrapper for Google's RE2 regex library, to perform matchings of perturbed examples against the regex. We find this is effective compared to the complete random generation of negative examples.
|
| 78 |
+
|
| 79 |
+
In order to guarantee that every regular expression has 20 positive and 20 negative examples, we exclude regular expressions that do not generate more than 20 positive strings (e.g., regexes without quantifiers such as $*$ and $+$). From the generated 40 strings, we use 10 positive/negative strings for generating regexes and use the remaining strings for measuring the semantic accuracy of synthesized regexes by membership queries. We utilize the fullmatch method of pyre2 to find subregexes from the whole regex that match corresponding substrings from given positive strings.
|
| 80 |
+
|
| 81 |
+
We introduce a neural network called the *NeuralSplitter* that is trained to split positive examples generated from each regex. For a set $P = \{p_1, p_2, \ldots, p_n \}$ of positive examples, where $p_i \in \Sigma^*$ for $1 \le i \le n$, we embed each string $p_i$ with an RNN encoder into $d$ dimensional space as follows: $h_i = {\rm RNN}_{\sf enc1}(p_i),$ where $h_i \in \mathbb{R}^{d}$. Then, we embed the set $P$ of positive examples with the second RNN encoder ${\rm RNN}_{\sf enc2}$ as follows: $h_P = {\rm RNN}_{\sf enc2}(h_1, h_2, \ldots, h_n),$ where $h_P \in \mathbb{R}^{d}$.
|
| 82 |
+
|
| 83 |
+
From the hidden vectors $h_i$ for $1 \le i \le n$ and $h_P$, the RNN decoder is trained to produce the ground truth label for each positive string $p_i$ as follows: $$o_1, o_2, \ldots, o_{l_i} = {\rm RNN}_{\sf dec}(W_{\sf dec}^\top \cdot [h_i;h_P]),$$ where $l_i = |p_i|$, $o_j \in \mathbb{R}^d$ for $1 \le j \le l_i$, $W_{\sf dec} \in \mathbb{R}^{2d \times d}$.
|
| 84 |
+
|
| 85 |
+
Finally, we retrieve the predicted label for each positive example $p_i$ as follows:$y_j = {\rm softmax}(W_{\sf output}^\top \cdot o_j),$ where $y_j \in \mathbb{R}^{|V|}$, $W_{\sf output} \in \mathbb{R}^{d \times |V|}$, and $V$ is the output vocab. Note that $y_j$ is the predicted probability distribution for the label of $j$th symbol of the positive example $p_i$.
|
| 86 |
+
|
| 87 |
+
During the decoding process, the RNN decoder exploits the attention mechanism to better point out the relevant parts from the entire set of positive strings. Since the split label for a positive string relies on the information from all positive strings, we implement two-step attention mechanism that enables our model to attend both in set level and example level. For instance, when we have two strings ${\sf aabbbc}$ and ${\sf aabbcc}$, the most probable labels for these two strings are ${\sf 112223}$ and ${\sf 112233}$. However, if the second string is ${\sf caabbcc}$, where only the first symbol ${\sf c}$ is appended in front of ${\sf aabbcc}$, then the most probable labels for two strings become ${\sf 223334}$ and ${\sf 1223344}$. In this reason we need to use the information of all given positive strings to correctly predict the output labels.
|
| 88 |
+
|
| 89 |
+
We use bi-directional GRU network for RNN encoders and decoder of NeuralSplitter. Cross entropy loss is used for training the neural network.
|
2205.11894/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2205.11894/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
A broad spectrum of dynamical systems consists of multiple interacting objects. Since their interplay is typically *a priori* unknown, learning interaction dynamics of objects from data has become an emerging field in dynamical systems [1, 2, 3]. The ever-growing interest in interaction modeling is due to the diversity of real-world applications such as autonomous driving [4], physical simulators [5], and human-robot interactions [6]. Standard time-series algorithms or deep learning approaches (e.g. recurrent neural networks), that have been designed for single-object systems, do not scale to a large number of interacting objects since they do not exploit the structural information of the data.
|
| 4 |
+
|
| 5 |
+
In recent years, graph neural networks (GNNs) have emerged as a promising tool for interactive systems, where objects are represented as graph nodes. State-of-the-art methods learn interactions by sending messages between objects in form of multi-layer perceptrons [1] or attention modules [7]. These methods yield highly flexible function approximators that achieve accurate predictions when trained on large-scale datasets. However, their predictions come without calibrated uncertainties, hindering their reliable implementation for uncertainty-aware applications.
|
| 6 |
+
|
| 7 |
+
In contrast, Gaussian processes (GPs) are well-known for providing calibrated uncertainty estimates. They have been successfully employed on discrete time-series data [8, 9, 10] and, more recently,
|
| 8 |
+
|
| 9 |
+
<sup>∗</sup>Work performed during internship at Bosch Center for Artificial Intelligence
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Figure 1: An overview of our predictive model. (*Top-left*) An input bouncing balls sequence with four balls, which move independently other than collision (interaction) times. An encoder is used to extract initial values and global latents. (*Bottom-left*) The differential function is formed by sampling from the GP posteriors on the independent kinematics and interaction functions. (*Middle&right*) Given the samples, predicted trajectories are computed using the forward integration of the differential function.
|
| 14 |
+
|
| 15 |
+
to continuous-time generalizations of these methods [11, 12, 13]. Importantly, none of the these works adresses dynamical models for interacting systems. While it is possible to study each object in isolation, ignoring the interaction effects might lead to inaccurate predictions.
|
| 16 |
+
|
| 17 |
+
In this work, we address the shortcomings of both model families by presenting an uncertainty-aware continuous-time dynamical model for interacting objects. Our formulation decomposes the dynamics into independent (autonomous) and interaction dynamics. While the former governs the motion of an object in isolation, the latter describes the effects that result from interactions with neighboring objects. For successful uncertainty characterization, we propose to infer the unknown independent and interaction dynamics by two distinct GPs. We demonstrate that having a function-level GP prior on the individual dynamics components is the key to successfully disentangling these dynamics, which in turn allows for interpretable predictions and leads to improved extrapolation behavior.
|
| 18 |
+
|
| 19 |
+
We employ latent Gaussian process ordinary differential equations (GP-ODEs) for dynamics learning, allowing to learn complex relationships between interacting objects without the need of having access to fully observed systems. Thanks to recently proposed decoupled sampling scheme [14], the computational complexity of our model scales linearly with the number of time points at which the ODE system is evaluated. As a result, our algorithm scales gracefully to datasets with thousands of sequences. To demonstrate the benefits of our framework, we exhaustively test our method on a wide range of scenarios varying in function complexity, signal-to-noise ratio, and system observability. Our model consistently outperforms non-interacting dynamical systems and alternative function approximators such as deterministic/Bayesian neural networks.
|
| 20 |
+
|
| 21 |
+
In this section, we give background on continuous-time systems and Gaussian processes. Both together form the backbone of our uncertainty-aware framework for interactive dynamical systems.
|
| 22 |
+
|
| 23 |
+
Continuous-time dynamical systems are often expressed using differential functions $\dot{\mathbf{x}}(t) \equiv \frac{d}{dt}\mathbf{x}(t) \equiv \mathbf{f}(\mathbf{x}(t))$ , where $\mathbf{x}(t) \in \mathbb{R}^D$ represents the state of an ODE system at time t and and $\mathbf{f} : \mathbb{R}^D \mapsto \mathbb{R}^D$ is the *time differential function* governing the dynamics evolution. The state solution $\mathbf{x}(t_1)$ at an arbitrary time $t_1$ is characterized by the initial value at time point $t_0$ and the differential function:
|
| 24 |
+
|
| 25 |
+
$$\mathbf{x}(t_1) = \mathbf{x}(t_0) + \int_{t_0}^{t_1} \mathbf{f}(\mathbf{x}(\tau)) d\tau.$$
|
| 26 |
+
|
| 27 |
+
Existing work aims to approximate the unknown differential by Gaussian processes [11, 15] or neural networks [16]. These methods have shown to accurately capture the dynamics and outperform
|
| 28 |
+
|
| 29 |
+
their discrete-time counterparts in a wide range of applications such as time series forecasting [17], classification [18] or reinforcement learning [19]. Furthermore, ODE models allow to easily inject domain knowledge into the system, enabling interpretable and flexible hybrid models [20, 21, 22].
|
| 30 |
+
|
| 31 |
+
Gaussian processes (GPs) define priors over functions [23]:
|
| 32 |
+
|
| 33 |
+
$$f(\mathbf{x}) \sim \mathcal{GP}(\mu(\mathbf{x}), k(\mathbf{x}, \mathbf{x}')),$$
|
| 34 |
+
|
| 35 |
+
where $f: \mathbb{R}^D \to \mathbb{R}$ maps D-dimensional inputs into one-dimensional outputs. GPs are fully specified in terms of their mean and their covariance:
|
| 36 |
+
|
| 37 |
+
$$\mathbb{E}[f(\mathbf{x})] = \mu(\mathbf{x}), \qquad \operatorname{cov}[f(\mathbf{x}), f(\mathbf{x}')] = k(\mathbf{x}, \mathbf{x}'),$$
|
| 38 |
+
|
| 39 |
+
where $\mu: \mathbb{R}^D \to \mathbb{R}$ is the mean and $k: \mathbb{R}^D \times \mathbb{R}^D \to \mathbb{R}$ is the kernel function. GPs can be treated as an extension of a multivariate normal distribution to infinitely many dimensions, where any fixed set of inputs $\mathbf{X} \in \mathbb{R}^{N \times D}$ follows the Gaussian distribution
|
| 40 |
+
|
| 41 |
+
$$p(\mathbf{f}) = \mathcal{N}(\mathbf{f} \mid \boldsymbol{\mu}_{\mathbf{X}}, \mathbf{K}_{\mathbf{X}\mathbf{X}}), \tag{1}$$
|
| 42 |
+
|
| 43 |
+
where the mean function $\mu_{\mathbf{X}}$ is evaluated at inputs $\mathbf{X}$ , and $\mathbf{K}_{\mathbf{X}\mathbf{X}}$ the kernel function evaluated at all input pairs in $\mathbf{X}$ . While GPs provide a natural mechanism to handle uncertainties, their computational complexity grows cubically with the number of inputs. This problem is often tackled by *sparse GPs*, which rely on augmenting the GP with M inducing inputs $\mathbf{Z} = [\mathbf{z}_1^T, \dots, \mathbf{z}_M^T]^T \in \mathbb{R}^{M \times D}$ and corresponding output variables $\mathbf{u} = [u_1, \dots, u_M]^T \in \mathbb{R}^{M \times 1}$ with $u_m \equiv f(\mathbf{z}_m)$ [24, 25]. Assuming the commonly used zero-mean prior, the conditional distribution over $f(\mathbf{X})$ follows the GP:
|
| 44 |
+
|
| 45 |
+
$$p(\mathbf{f} \mid \mathbf{u}) = \mathcal{N}(\mathbf{f} \mid \mathbf{K}_{\mathbf{XZ}} \mathbf{K}_{\mathbf{ZZ}}^{-1} \mathbf{u}, \ \mathbf{K}_{\mathbf{XX}} - \mathbf{K}_{\mathbf{XZ}} \mathbf{K}_{\mathbf{ZZ}}^{-1} \mathbf{K}_{\mathbf{ZX}}), \tag{2}$$
|
| 46 |
+
|
| 47 |
+
where $K_{ZZ}$ is the covariance between all inducing points Z, and $K_{XZ}$ between the input points X and the inducing points Z. The inducing points can thereby be interpreted as a compressed version of the training data in which the number of inducing points M acts as a trade-off parameter between the goodness of the approximation and scalability.
|
| 48 |
+
|
| 49 |
+
In this work, we employ the squared exponential kernel $k(\mathbf{x},\mathbf{x}') = \sigma^2 \exp\left(-\frac{1}{2}\sum_{d=1}^D \frac{(x_d-x_d')^2}{\ell_d^2}\right)$ , where $x_d$ denotes the d-th entry of the input $\mathbf{x}$ , $\sigma^2$ is the output variance and $\ell_d$ is the dimension-wise lengthscale parameter.
|
| 50 |
+
|
| 51 |
+
In Sec. 3.1, we describe our continuous-time formulation for systems of interacting objects. It decomposes the dynamics into independent kinematics and an interaction component that takes the interactions to neighboring objects into account. Placing a GP prior over the individual components is essential in order to arrive at (i) calibrated uncertainty estimates and (ii) disentangled representations as we later on also verify in our experiments. In Sec. 3.2, we embed the GP dynamics into a latent space that can accommodate missing static or dynamic information. Both together allows the application of our continuous-time formulation to a wide range of scenarios and allows for learning interpretable dynamics. We conclude this section by our variational inference framework (Sec 3.3) based on sampling functions from GP posteriors.
|
| 52 |
+
|
| 53 |
+
We assume a dataset of P sequences $\mathcal{Y} = \{\mathbb{Y}_1, \dots, \mathbb{Y}_P\}$ , where each sequence $\mathbb{Y} \equiv \mathbf{Y}_{1:N} \equiv \mathbf{y}_{1:N}^{1:A}$ is composed of measurements of A objects at time points $\mathcal{T} = \{t_1, \dots, t_N\}$ . Without loss of generality, we assume that the measurement $\mathbf{y}^a(t_n) \equiv \mathbf{y}_n^a \in \mathbb{R}^O$ is related to the physical properties of object a, such as position and velocity, which can routinely be measured by standard sensors. The *dynamic state* of object a at any arbitrary time t is denoted by a latent vector $\mathbf{h}^a(t) \in \mathbb{R}^D$ , which does not necessarily live in the same space as the observations. We furthermore assume that each object a is associated with a a global feature vector $\mathbf{c} \in \mathbb{R}^C$ , which corresponds to the static attributes that remain constant over time. Finally, we denote the concatenation of all states by $\mathbf{H}(t) = [\mathbf{h}^1(t), \dots \mathbf{h}^A(t)] \in \mathbb{R}^{A \times D}$ and all globals by $\mathbf{C} = [\mathbf{c}^1, \dots \mathbf{c}^A] \in \mathbb{R}^{A \times C}$ .
|
| 54 |
+
|
| 55 |
+
In the following, we propose to disentangle the complex continuous-time dynamics into *independent kinematics* and *interaction* differentials. More concretely, we introduce the following dynamics:
|
| 56 |
+
|
| 57 |
+
$$\frac{d}{dt}\mathbf{H}(t) = \left[\frac{d}{dt}\mathbf{h}^{1}(t), \dots, \frac{d}{dt}\mathbf{h}^{A}(t)\right],\tag{3}$$
|
| 58 |
+
|
| 59 |
+
$$\frac{d}{dt}\mathbf{h}^{a}(t) = \mathbf{f}_{s}\left(\mathbf{h}^{a}(t), \mathbf{c}^{a}\right) + \sum_{a' \in \mathcal{N}_{a}} \mathbf{f}_{b}\left(\mathbf{h}^{a}(t), \mathbf{h}^{a'}(t), \mathbf{c}^{a}, \mathbf{c}^{a'}\right),\tag{4}$$
|
| 60 |
+
|
| 61 |
+
where $\mathbb{N}_a$ denotes the set of neighbors of object a in a given graph. The first function $\mathbf{f}_s: \mathbb{R}^{D+C} \mapsto \mathbb{R}^D$ models the independent (autonomous) effects, which specifies how the object would behave without any interactions. The second function $\mathbf{f}_b: \mathbb{R}^{2D+2C} \mapsto \mathbb{R}^D$ models the *interactions* by accumulating messages coming from all neighboring objects. Since message accumulation is the *de-facto* choice in interaction modeling [2, 26], the additive form of the differential function is a very generic inductive bias.
|
| 62 |
+
|
| 63 |
+
Our formulation models interactions between pairs of objects explicitly via the differential equation (Eq. (5)). Higher-order interactions are taken into account via the continuous-time formulation that allows information to propagate through the complete graph via local messages over time such that the state of the object $\mathbf{h}_n^a$ can also depend on objects that are not directly connected in the graph. In contrast to discrete formulations, for which the message passing speed is limited by the sampling rate, our continuous-time formulation enjoys instant propagation of information across objects. Finally, please see Section for an investigation of our interaction component under a kernel perspective.
|
| 64 |
+
|
| 65 |
+
**Remark-1** In Sec A.1 we demonstrate two straightforward extensions of our formulation with non-linear message accumulation, which we empirically show to have no gain over our formulation.
|
| 66 |
+
|
| 67 |
+
**Remark-2** Previous GP-based ODE methods [11, 12, 15] assume a black-box approximation to the unknown system $\frac{d}{dt}\mathbf{H}(t) = \mathbf{f}(\mathbf{H}(t))$ whereas our state representation gracefully scales to a varying number of objects and also include global features.
|
| 68 |
+
|
| 69 |
+
Real-world data of interactive systems necessitates embedding the dynamics into a latent space in order to allow for missing information; the observations may contain only partial estimates of the states and the globals **C** might not be observed at all. We account for those circumstances by treating the states **H** and the globals **C** as latent variables, leading to following generative model (see Figure 1):
|
| 70 |
+
|
| 71 |
+
$$\mathbf{h}_{1}^{a} \sim \mathcal{N}(\mathbf{0}, \mathbf{I}), \quad \mathbf{c}^{a} \sim \mathcal{N}(\mathbf{0}, \mathbf{I}),$$
|
| 72 |
+
|
| 73 |
+
$$\mathbf{f}_{s}(\cdot) \sim \mathcal{GP}(\mathbf{0}, k_{s}(\cdot, \cdot)), \quad \mathbf{f}_{b}(\cdot) \sim \mathcal{GP}(\mathbf{0}, k_{b}(\cdot, \cdot)),$$
|
| 74 |
+
|
| 75 |
+
$$\mathbf{h}_{n}^{a} = \mathbf{h}_{1}^{a} + \int_{t_{1}}^{t_{n}} \mathbf{f}_{s} \left(\mathbf{h}^{a}(\tau), \mathbf{c}^{a}\right) + \sum_{a' \in \mathcal{N}_{a}} \mathbf{f}_{b} \left(\mathbf{h}^{a}(\tau), \mathbf{h}^{a'}(\tau), \mathbf{c}^{a}, \mathbf{c}^{a'}\right) d\tau,$$
|
| 76 |
+
|
| 77 |
+
$$\mathbf{y}_{n}^{a} \sim p(\mathbf{y}_{n}^{a} | \mathbf{h}_{n}^{a}),$$
|
| 78 |
+
(5)
|
| 79 |
+
|
| 80 |
+
where we introduced a standard Gaussian prior over the initial latent state, and assumed that the data likelihood decomposes across time and objects. We furthermore model unknown functions $\mathbf{f}_s$ and $\mathbf{f}_b$ under independent vector-valued GP priors.
|
| 81 |
+
|
| 82 |
+
In our experiments, we further set $p(\mathbf{y}_n^a|\mathbf{h}_n^a) = \mathcal{N}(\mathbf{y}_n^a|\mathbf{B}\mathbf{h}_n^a, \mathrm{diag}(\boldsymbol{\sigma}_e^2))$ , where $\mathbf{B} \in \mathbb{R}^{O \times D}$ maps from the latent to the observational space and $\boldsymbol{\sigma}_e^2 \in \mathbb{R}_+^O$ is the noise variance. We further fix $\mathbf{B} = [\mathbf{I}, \mathbf{0}]$ where $\mathbf{I} \in \mathbb{R}^{O \times O}, \mathbf{0} \in \mathbb{R}^{O,D-O}$ , in order to arrive at an interpretable latent space in which the first dimensions correspond to the observables. This assumption is fairly standard in the GP state-space model literature since more complex emission models can be subsumed in the transition model without reducing the model complexity [27].
|
| 83 |
+
|
| 84 |
+
Modeling partially observed systems often leads to non-identifiability issues that hamper optimization and ultimately lead to deteriorated generalization performance. One way to counteract this behavior is to inject prior physical knowledge into the system by decomposing the state space of each object $\mathbf{h}^a(t) \equiv [\mathbf{s}^a(t), \mathbf{v}^a(t)]$ into position $\mathbf{s}^a(t)$ and velocity $\mathbf{v}^a(t)$ components [28]. Using elementary physics, the differential function has then the form of $\frac{d}{dt}\mathbf{h}^a(t) = [\mathbf{v}^a(t), \frac{d}{dt}\mathbf{v}^a(t)]$ with $\frac{d}{dt}\mathbf{v}^a(t) = \mathbf{f}_s(\mathbf{h}^a(t), \mathbf{c}^a) + \sum_{a' \in \mathcal{N}_a} \mathbf{f}_b(\mathbf{h}^a(t), \mathbf{h}^{a'}(t), \mathbf{c}^a, \mathbf{c}^{a'})$ .
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
|
| 88 |
+
Figure 2: Test predictions for I-GPODE (left) and I-NODE (right) on bouncing balls dataset. The solid curves are the groundtruth trajectories and the shaded regions denote the predicted 95% confidence intervals. I-GPODE (ours) yields better calibrated long-term predictions than I-NODE. Additional results can be found in Figure 5.
|
| 89 |
+
|
| 90 |
+
**Remark** Unlike previous work, our formulation incorporates global features c that modulate the dynamics. In many applications such as control engineering and reinforcement learning, the dynamics are modulated by external control signals [29] which can also be incorporated into our framework.
|
| 91 |
+
|
| 92 |
+
Next, we derive an efficient approximate inference scheme that provides a high level of accuracy and model flexibility. In the above formulation, the model unknowns are the initial values $\mathbf{H}_1 \equiv \mathbf{h}_1^{1:A}$ , global variables $\mathbf{C} = \mathbf{c}^{1:A}$ and the differentials $\mathbf{f}_s$ and $\mathbf{f}_b$ . Since the exact posterior $p(\mathbf{f}_s, \mathbf{f}_b, \mathbf{H}_1 \mathbf{C} | \mathcal{Y})$ in non-linear ODE models is intractable, we opt for stochastic variational inference [30]. We first describe the form of the approximate posterior and then discuss how to optimize its parameters.
|
| 93 |
+
|
| 94 |
+
Variational family Similarly to previous work [16], we resort to amortized inference for initial values and global variables, $\mathbf{H}_1 \sim q_\phi(\mathbf{H}_1|\mathbf{Y}_{1:N})$ and , $\mathbf{C} \sim q_\rho(\mathbf{C}|\mathbf{Y}_{1:N})$ , where $\phi$ is the parameters of a neural network encoder that outputs a Gaussian distribution with diagonal covariance. For the unknown time differentials $\mathbf{f} = \{\mathbf{f}_s, \mathbf{f}_b\}$ , we follow the standard sparse GP approximation in which each output dimension $d \in [1, D]$ has its own independent set of inducing values $\mathbf{U}_{s,d}, \mathbf{U}_{b,d} \in \mathbb{R}^D$ and kernel output variances $\sigma_{s,d}^2, \sigma_{b,d}^2 \in \mathbb{R}_+$ . Inducing locations $\{\mathbf{Z}_s, \mathbf{Z}_d\}$ and lengthscales $\{\ell_s, \ell_b\}$ are shared across output dimensions. Denoting the collection of inducing points by $\mathbf{U} = \{\mathbf{U}_s, \mathbf{U}_b\}$ , we arrive at the mean-field variational posterior:
|
| 95 |
+
|
| 96 |
+
$$q(\mathbf{U}) = \prod_{d=1}^{D} \mathcal{N}(\mathbf{U}_{s,d} \mid \mathbf{m}_{s,d}, \mathbf{S}_{s,d}) \mathcal{N}(\mathbf{U}_{b,d} \mid \mathbf{m}_{b,d}, \mathbf{S}_{b,d}),$$
|
| 97 |
+
|
| 98 |
+
where the means $\{\mathbf{m}_{s,d}, \mathbf{m}_{b,d}\}_{d=1}^{D}$ and the covariances $\{\mathbf{S}_{s,d}, \mathbf{S}_{b,d}\}_{d=1}^{D}$ are free variational parameters. Putting everything together, our variational approximation becomes as follows [31]:
|
| 99 |
+
|
| 100 |
+
$$q(\mathbf{H}_1, \mathbf{C}, \mathbf{f}, \mathbf{U}) \equiv q(\mathbf{H}_1)q(\mathbf{C})p(\mathbf{f}_s|\mathbf{U}_s)p(\mathbf{f}_b|\mathbf{U}_b)q(\mathbf{U}),$$
|
| 101 |
+
|
| 102 |
+
where $p(\mathbf{f}_s|\mathbf{U}_s)$ and $p(\mathbf{f}_b|\mathbf{U}_b)$ follow Eq. (2). Our variational family makes two assumptions that are fairly standard in the (deep) GP literature (e.g. [32]): (i) we apply the same independence assumptions in the approximate posterior as in the prior resulting in a mean-field solution, and (ii) we assume that the inducing outputs $\mathbf{U}$ capture the sufficient statistics of the training data allowing the use of the prior $p(\mathbf{f} \mid \mathbf{U})$ in the approximate posterior.
|
| 103 |
+
|
| 104 |
+
**Variational bound** We then seek to optimize the parameters of the approximate posterior q by maximizing a lower bound to the evidence [30]:
|
| 105 |
+
|
| 106 |
+
$$\log p(\mathcal{Y}) \geq \int q(\mathbf{H}_1, \mathbf{C}, \mathbf{f}, \mathbf{U}) \log \frac{p(\mathcal{Y}, \mathbf{H}_1, \mathbf{C}, \mathbf{f}, \mathbf{U})}{q(\mathbf{H}_1, \mathbf{C}, \mathbf{f}, \mathbf{U})} d\mathbf{H}_1 d\mathbf{C} d\mathbf{f} d\mathbf{U}.$$
|
| 107 |
+
|
| 108 |
+
In the following, we detail its computation for a single data instance $\mathbf{Y}_{1:N}$ , omitting its generalization to multiple sequences for the sake of better readability,
|
| 109 |
+
|
| 110 |
+
$$\log p(\mathbf{Y}_{1:N}) \ge \mathbb{E}_q \left[\log p(\mathbf{Y}_{1:N}|\mathbf{H}_1, \mathbf{C}, \mathbf{U})\right] - \text{KL}[q(\mathbf{H}_1)||p(\mathbf{H}_1)] - \text{KL}[q(\mathbf{C})||p(\mathbf{C})] - \text{KL}[q(\mathbf{U})||p(\mathbf{U})], \tag{6}$$
|
| 111 |
+
|
| 112 |
+
where KL denotes the Kullback-Leibler divergence.
|
| 113 |
+
|
| 114 |
+
**Likelihood computation via decoupled sampling from GP posteriors** Computing the conditional log-likelihood $\log p(\mathbf{Y}_{1:N}|\mathbf{H}_1,\mathbf{C},\mathbf{f},\mathbf{U})$ entails a forward pass in time (Eq. (5)) which can be done with any standard ODE solver. The difficulty lies in marginalizing over the approximate posterior of the initial latent states $q(\mathbf{H}_1)$ , global variables $q(\mathbf{C})$ , and the GP functions $q(\mathbf{f},\mathbf{U})$ . Each marginalization step alone is already analytically intractable, let alone their combination. We therefore opt for Monte Carlo integration which gives us an unbiased estimate of the expected log-likelihood. We start by drawing L samples from the approximate posteriors
|
| 115 |
+
|
| 116 |
+
$$\mathbf{H}_{1}^{(l)} \sim q_{\phi}(\mathbf{H}_{1}|\mathbf{Y}_{1:N}), \quad \mathbf{C}^{(l)} \sim q_{\rho}(\mathbf{C}|\mathbf{Y}_{1:N}), \quad \mathbf{U}^{(l)} \sim q(\mathbf{U}), \quad \mathbf{f}^{(l)}(\cdot) \sim p(\mathbf{f}|\mathbf{U}),$$
|
| 117 |
+
(7)
|
| 118 |
+
|
| 119 |
+
where l denotes the sample index and $\mathbf{f}^{(l)}(\cdot)$ is a function drawn from the sparse GP posterior. Sampling from the GP posterior naively scales cubically with the number of data points. Moreover, since we do not know a-priori on which points the ODE solver evaluates the function, we would have to sequentially draw points from the posterior. While this can still be done cubically in time by performing low-rank updates, it often leads to numerical instabilities for small step sizes. To overcome this challenge, we resort to the decoupled sampling scheme proposed in [14], where we first draw the prior samples from a set of random Fourier features and then update them using Matheron's rule to obtain posterior samples. After having sampled the quadruple via Eq. (7), we can compute the trajectory $\mathbf{H}_{1:N}^{(l)}$ deterministically by forward integrating Eq. (5). Monte Carlo estimate of the log-likelihood becomes
|
| 120 |
+
|
| 121 |
+
$$\mathbb{E}_{q}\left[\log p(\mathbf{Y}_{1:N}|\mathbf{H}_{1},\mathbf{C},\mathbf{f},\mathbf{U})\right] \approx \frac{1}{L} \sum_{l,n,a} \log p(\mathbf{y}_{n}^{a}|\mathbf{h}_{n}^{a^{(l)}}),$$
|
| 122 |
+
|
| 123 |
+
where the log-likelihood term decomposes between objects and between time points, enabling doubly stochastic variational inference [33].
|
| 124 |
+
|
| 125 |
+
**KL regularizer** The prior distributions over the inducing variables follow the Gaussian process $p(\mathbf{U}) = p(\mathbf{U}_s)p(\mathbf{U}_b)$ with $p(\mathbf{U}_s)$ and $p(\mathbf{U}_b)$ following the equivalent of Eq. (1). Since we furthermore assumed a standard Gaussian prior $p(\mathbf{H}_1) = \mathcal{N}(\mathbf{0}, \mathbf{I})$ and $p(\mathbf{C}1) = \mathcal{N}(\mathbf{0}, \mathbf{I})$ with suitable dimensions for the initial values, all KL terms can be computed in closed form.
|
| 126 |
+
|
| 127 |
+
**Computational complexity** Evaluating the differential function costs $O(M^2(A+I))$ for a graph with A objects, I interactions and M inducing points. The term $O(M^2A)$ stems from the independent kinematics, the term $O(M^2I)$ from the interaction effects. As a consequence, forward integration using RK4 solver takes $O(TM^2(A+I))$ time, where T is the number of time points.
|
| 128 |
+
|
| 129 |
+
Our approach enhances the capabilities of the GPODE model family in the following three aspects:
|
| 130 |
+
|
| 131 |
+
Explicit modeling of interactions Standard GPODEs model interactions by allowing the time differential to take the whole state vector $\mathbf{H}(t)$ as input and to learn one independent GP for each latent dimension. This shared latent space assumption entails three major drawbacks: (i) obligation to fix the object count as a model hyperparameter, (ii) dependency of the learned model on a predefined ordering of the objects in the scene, (iii) inevitable growth of the latent dimensionality proportional to the object count. The latter sets a severe bottleneck especially for GP modeling as the performance of many kernel functions in widespread use are highly sensitive to input dimensionality. (For example, on the bouncing ball dataset with $N_a$ balls and D latent states per object, GPODE needs to learn a latent function with $N_aD$ -dimensional inputs and outputs.) In contrast, I-GPODE needs only to learn two functions, the independent kinematics $\mathbf{f}_s$ and the interaction function $\mathbf{f}_b$ , whose input sizes scale independently of $N_a$ . Our Table 2 indicates that learning interaction dynamics without the strong inductive bias of our model is difficult and the GPODE model chooses to stay at the prior instead leading to deteriorated MSEs and ELLs.
|
| 132 |
+
|
| 133 |
+
**Disentangled representation** We infer object-specific latent variables that modulate the dynamics, which allows our model to disentangle the dynamics from static object properties (e.g. charge information). The interpretability and likely physical correspondence of the disentangled factors have the potential to facilitate the use of our approach in transfer learning and explainable AI applications.
|
| 134 |
+
|
| 135 |
+
**Inference of latent state dynamics** We perform the learning in a latent space, where the initial value of a trajectory is given by an encoder, leading to the following two advantages: First, the
|
| 136 |
+
|
| 137 |
+
Table 1: A taxonomy of the state-of-the-art ODE systems with NN/GP dynamics, with/without interactions, and with/without latent dynamics. These methods also form the baselines we compare with [neural ODE (NODE), Bayesian NODE (BNODE), latent dynamics (L), interacting (I)].
|
| 138 |
+
|
| 139 |
+
| Function uncertainty | Interacting | Latents | Reference idea | Abbreviation |
|
| 140 |
+
|----------------------|--------------|--------------|-------------------|--------------|
|
| 141 |
+
| X | X | Х | Neural ODE [16] | NODE |
|
| 142 |
+
| X | X | ✓ | Latent NODE [17] | NODE-L |
|
| 143 |
+
| × | ✓ | X | GDE [34] | I-NODE |
|
| 144 |
+
| × | ✓ | ✓ | LG-ODE [35] | I-NODE-L |
|
| 145 |
+
| ✓ | X | X | Bayesian ODE [11] | GPODE |
|
| 146 |
+
| ✓ | X | ✓ | GP-SDE [13] | GPODE-L |
|
| 147 |
+
| ✓ | X | ✓ | $ODE^2VAE$ [28] | BNODE-L |
|
| 148 |
+
| ✓ | ✓ | X | Our work | I-GPODE |
|
| 149 |
+
| ✓ | $\checkmark$ | $\checkmark$ | Our work | I-GPODE-L |
|
| 150 |
+
|
| 151 |
+
dynamical system and the data points may live in different spaces, which facilitates learning from high-dimensional sequences. Second, Bayesian modeling of state dynamics on a latent space enables reliable quantification and principled treatment of sources of uncertainty, such as imprecision of modeling assumptions, approximation error, and measurement noise.
|
| 152 |
+
|
| 153 |
+
# Method
|
| 154 |
+
|
| 155 |
+
We consider three bouncing ball datasets Table 2: Comparison of standard and interacting GP-ODE with varying noise levels to reflect different levels of problem difficulties. To demonstrate the merits of our decomposed formulation in Eq. (4), we compare it against a standard, GP-based noninteracting dynamical model (GPODE) in which the time differential takes the whole state vector $\mathbf{H}(t)$ as input [11]. As shown in Table 2, our interaction model consistently outperforms its standard counterpart irrespective of the noise level and function approximator. We also
|
| 156 |
+
|
| 157 |
+
systems on bouncing ball datasets.
|
| 158 |
+
|
| 159 |
+
| Noise<br>Level | Model | MSE ↓ | ELL↑ |
|
| 160 |
+
|----------------|------------------|-----------------------------------------------------------------------------------|--------------------------------------|
|
| 161 |
+
| No<br>Noise | I-GPODE<br>GPODE | $\begin{array}{ c c c } 17.3 \pm 1.2 \\ 23.9 \pm 0.9 \end{array}$ | $-25.9 \pm 3.6$<br>$-597.6 \pm 28.5$ |
|
| 162 |
+
| Low<br>NOISE | I-GPODE<br>GPODE | $\begin{array}{ c c } \textbf{17.8} \pm \textbf{0.9} \\ 23.6 \pm 0.7 \end{array}$ | $-26.5 \pm 4.5$ $-580.8 \pm 19.8$ |
|
| 163 |
+
| HIGH<br>NOISE | I-GPODE<br>GPODE | $egin{array}{ c c c c c c c c c c c c c c c c c c c$ | $-29.4 \pm 1.1 \\ -569.3 \pm 27.5$ |
|
| 164 |
+
|
| 165 |
+
note that the results are consistent when we replace the GP approximation with deterministic and Bayesian neural networks (NNs), which indicates the robustness of the inductive bias. (see Table 12).
|
| 166 |
+
|
| 167 |
+
Next, we compare our GP interaction model (I-GPODE) against neural network variants with/without uncertainty (I-NODE, I-BNODE), obtained by replacing GPs with NNs and Bayesian NNs. The findings in Table 3 strongly indicate that I-GPODE learns better calibrated uncertainties, measured by ELL, than I-BNODE and I-NODE (see Figure 2 and 5 for visual illustrations). In terms of MSE, we observe that I-NODE tends to attain the smallest values. However, increasing noise levels deteriorates the performance of I-NODE and eventually the results become comparable to I-GPODE. The results stay consistent
|
| 168 |
+
|
| 169 |
+
Table 3: Comparison of our GP approximation against alternative function approximators.
|
| 170 |
+
|
| 171 |
+
| NOISE<br>LEVEL | MODEL | MSE ↓ | ELL ↑ |
|
| 172 |
+
|----------------|------------------------------|----------------------------------------|----------------------------------------------|
|
| 173 |
+
| NO<br>NOISE | I-GPODE<br>I-NODE<br>I-BNODE | 17.3 ± 1.2<br>8.5 ± 0.9<br>20.4 ± 0.6 | −25.9 ± 3.6<br>−61.9 ± 8.6<br>−37.8 ± 4.2 |
|
| 174 |
+
| LOW<br>NOISE | I-GPODE<br>I-NODE<br>I-BNODE | 17.8 ± 0.9<br>13.3 ± 0.9<br>21.0 ± 0.8 | −26.5 ± 4.5<br>−96.2 ± 12.5<br>−36.4 ± 2.8 |
|
| 175 |
+
| HIGH<br>NOISE | I-GPODE<br>I-NODE<br>I-BNODE | 18.4 ± 0.9<br>15.6 ± 1.5<br>20.8 ± 0.7 | −29.4 ± 1.1<br>−128.9 ± 23.2<br>−47.6 ± 10.5 |
|
| 176 |
+
|
| 177 |
+
when we repeat the experiment with two balls (see Table 11).
|
| 178 |
+
|
| 179 |
+
In the following, we study whether the estimated functions can disentangle independent kinematics from interaction effects. We train I-GPODE, I-NODE and I-BNODE on a dataset with three balls and evaluate the trained independent dynamics function f<sup>s</sup> on a test dataset with one ball (since the test dataset incorporates a single object, the dynamics do not involve the interaction function fb). Three test sequences as well as the independent dynamics function predictions are illustrated in Figure 3 (see Table 9
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+
Figure 3: Disentanglement between independent kinematics and interactions. Our model variants are trained on a bouncing ball dataset with three balls and tested with a single ball using the independent dynamics function f<sup>s</sup> only. We show the mean predictions with circles darkened as time lapses.
|
| 184 |
+
|
| 185 |
+
for a quantitative comparison and Figure 4 for additional illustrations). We observe that I-NODE predictions tend to deviate from the test trajectory more quickly compared to I-GPODE predictions. We conjecture that this behaviour is because neural networks are overflexible and thus the learned functions may not necessarily decompose independent kinematics from interaction effects whereas the function-level regularization of I-GPODE helps with disentanglement.
|
| 186 |
+
|
| 187 |
+
In our last bouncing ball experiment, we move to a setting in which the velocities are no longer observed. First, we keep the velocities as latent states and contrast two variants of our model, i.e. with structured latent space (I-GPODE-L-S) and with unstructured latent space (I-GPODE-L). As shown in Table 4, injecting strong prior knowledge helps in this challenging setting in which the first order model clearly fails (see also Table 13 for training results). Finally, we compare I-GPODE-L-S with I-GPODE, which drops the velocity component from the latent space (hence learning the dynamics
|
| 188 |
+
|
| 189 |
+
Table 4: Comparison of I-GPODE variants on a bouncing ball dataset without velocity observations. The suffices "-L" and "-S" stand for latent variable model and structured state space.
|
| 190 |
+
|
| 191 |
+
| NOISE | MODEL | MSE ↓ | ELL ↑ |
|
| 192 |
+
|---------------|-------------------------------------|------------------------------------------|--------------------------------------|
|
| 193 |
+
| NO<br>NOISE | I-GPODE<br>I-GPODE-L<br>I-GPODE-L-S | 24.1 ± 1.2<br>23.3 ± 0.8<br>18.2 ± 1.0 | −300 ± 23<br>−176 ± 301<br>−33 ± 2 |
|
| 194 |
+
| LOW<br>NOISE | I-GPODE<br>I-GPODE-L<br>I-GPODE-L-S | 23.3 ± 0.8<br>47.0 ± 47.4<br>20.5 ± 0.9 | −239 ± 11<br>−761 ± 922<br>−76 ± 22 |
|
| 195 |
+
| HIGH<br>NOISE | I-GPODE<br>I-GPODE-L<br>I-GPODE-L-S | 23.8 ± 0.7<br>119.3 ± 94.3<br>21.8 ± 0.7 | −254 ± 16<br>−920 ± 982<br>−115 ± 19 |
|
| 196 |
+
|
| 197 |
+
in the data space). As demonstrated in Table 4, I-GPODE is clearly outperformed by I-GPODE-L-S. It can thus be suggested that our latent variable construction is necessary in presence of missing states.
|
| 198 |
+
|
| 199 |
+
In the final part of the experiments, we consider a more challenging dataset of charged particles. Since the dynamics are modulated by unknown charges, we turn to our global latent variable formulation. In other words, our learning task becomes simultaneously inferring the functions $\mathbf{f}_s$ and $\mathbf{f}_b$ , the initial latent states $\mathbf{H}_1$ , as well as a latent variable $\mathbf{c}^a \in \mathbb{R}$ associated with each observed trajectory $\mathbf{y}_{1:N}^a$ . To form the upper and lower performance bounds, we include two baselines in which the charges are either observed or completely dropped from the model description. The results are shown in Table 5. We notice that the structured
|
| 200 |
+
|
| 201 |
+
In the final part of the experiments, we consider a more challenging dataset of systems on the charges dataset (no noise).
|
| 202 |
+
|
| 203 |
+
Table 5: Comparison of interacting GPODE and NODE systems on the charges dataset (no noise).
|
| 204 |
+
|
| 205 |
+
| | MODEL | MSE↓ | ELL ↑ |
|
| 206 |
+
|------------------------------|-----------------------|----------------------------------|--------------------------------|
|
| 207 |
+
| WITHOUT<br>GLOBAL<br>LATENTS | I-GPODE<br>I-NODE | $19.2 \pm 0.9$<br>$14.1 \pm 0.8$ | $-171 \pm 7$<br>$-460 \pm 36$ |
|
| 208 |
+
| WITH<br>GLOBAL<br>LATENTS | I-GPODE<br>I-NODE | $15.4 \pm 2.3$<br>$9.9 \pm 0.6$ | $-172 \pm 88$<br>$-282 \pm 14$ |
|
| 209 |
+
| | I-GPODE-S<br>I-NODE-S | $12.9 \pm 2.1$<br>$9.7 \pm 0.2$ | $-177 \pm 60$ $-282 \pm 8$ |
|
| 210 |
+
| GLOBALS<br>OBSERVED | I-GPODE<br>I-NODE | $10.7 \pm 1.1$<br>$7.5 \pm 1.2$ | $-97 \pm 9$<br>$-148 \pm 27$ |
|
| 211 |
+
| | | | |
|
| 212 |
+
|
| 213 |
+
state space formulation boosts the performance of I-GPODE. However, the effect is less pronounced compared to the previous setting in which dynamic information is missing. See Table 14 for more results with different global variable encoders.
|
2206.01299/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-13T21:35:32.204Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" etag="g8CxFuvlBfqyZcdZGPE2" version="18.0.3" type="google"><diagram id="ngF9ZonbGfZ6tKLtqhvf" name="Page-1">7Z1dc5s4FIZ/jS+bQV8gLptk0+3Mdqcz3ZltL6lRbKbYZDFukv31KwyyQcI29gJHKTQXNQJko+flSOfoa0buVi8f0uBp+SkJRTzDTvgyI/czjDFysPwvT3ktUhDiZcoijcIy7ZDwJfpXlIlOmbqNQrGpXZglSZxFT/XEebJei3lWSwvSNHmuX/aYxPVvfQoW5Tc6h4Qv8yAWxmV/R2G2LFIJcSqX/y6ixbL8aury8pZVoK4uL90sgzB5riSR32bkLk2SrPi0erkTcV58qmCK+x6OnN3/slSsszY3LO/vPrIlWfq320j8uf4Qsfn8necX2fwM4m35yOWvzV5VGaTJdh2KPBdnRm6fl1EmvjwF8/zss8Qu05bZKpZHSH58TNZZiRFxebzJ0uSHuEviJJVJ62Qt9omqQHF+WxTH6qIZJgKFTHgyPQw2y91Xo/Lgc5BlIl3vUrCTp5YPINJMvBwtGrQvcKlVkaxElr7KS8obuINvPFbcpZTql9ieD9iRVyQtK8DlQxZiK4W22Gd+QCE/lDQuIMOdiYwk41pIBk1kMPYxvsG2kcETGUmGU8uwkAlL3vahyDIudOIiz+7vsYYLm7jIs8SxjYs7ccm5cMu4eJMdk2c8bDSVMYMmM1mynAy3kMxky+QZTg0nBp4Mn8hIJ8ahdmHxJ68/P4sJt4zL5PPvGmXINi6T07/j4mtcqA/MZWos52epZxkXb6pf5BmXkhoW4kBjmaoXecZzbMMy1S45FhffMMvAeBMY6VdipvuV4GT8KRaza5AR27hMkZhdgwzZxmWyZDsuvm1cpvhYfpZ5lnEx/ZeHGXbjLC+87fcaH/efbT7qbVfy7za7on8vL8D06WVXOuq8/LTI/3dURvKH7fIqkk9gR+ex14kSk+jjo3Dnc0MQ8gy9z//q0sG0G7CYOnWu2DO5qjGFVbDqvs7Bmk3t2wns5WApsg2sGQnq7I1FIGClBeYhbQLr7v4N88ZilwGDNVv6nb2xYwKrv7HwYE1XobM3FsOADQR/BK9jsZqnAAbW9DU6e2PHBNZ4Y8HBms5K/Y0tYBCYdhCfC/gGLqINPZWDMjLnwHT28o0JrP7ywYNFLfrUxDp8n08Uk0ff42T+469ltK4Xdz2YoBecvP8hio+jkYWXvn7Nb75h6vBbefXu4P6lzLk4elVHL1H2tfL52yELeXS4KT9Q97S3wZw9cGePXYRqFtwx6LLIkm06FycKG5eNkyxIFyI7RaVZRRWVsAaRqLRUxEEW/az/3ibllN/wOYnkk+xFSjzNC1NhPpVF8ZzlXQf9GRm5rJ4RRVpGRTkYGe2EvH/s/6Ft/Atoe6/nb1U526dtNfX0nLQpqLQ1+0tVD9Sl0tYzIlzLqG9pt+jcnaTdkbRpS2m7NkmbMNyRtLGWUd/SbjH6apJ2R9Ju2yDhoNLGmiI9rTXcWtp6Rnqzum9ptxhf0Lu0r5Do4XW45GUAFTZvKexjDttAyiZ6M+JaZWtNbeQMrOwWIzSGM9pVlVYs+Bmjja53Ix9Z/ncymtChtts2tQmotPVOWvdKL9LIiAzsRbYY5DJJe+CmNgOVNjvj/F1rtQkb2ItsMU5okvbATW0PVNquFiBRvUMXS9vV2+wDe5EtVu2apD1wY9sHlbanx/6ubWsb8fFh29rY7LL5I9lsTHnHcfS0kQcXjO7Ep0eQnO9WU7rL86ykF/9mnXSi6Q1CtXZSRUa8QUa9daEpy2KDV181GeikwTgW4nLOGKfW/ageJx6h3Roa3Nb1gbU0ei8vu9LSMD0jFRsdytLY0MvQGK/6BbXtv4n2oRZBRe7VlSi68RDzMPcYZsZ6SZjeIIoRYa68kjj+sH4RtqELYiy691rqHtblJ7rur/WL+Gndu6C6t7Z/4hfUPWupe6uiuMi9NtTln9a9D6p7s/fi4/ppmxniv3bS3Ft0q/Z9UWB+VY+DTuVvvrt8ZlUL6JbOrEL74SAnplYNSldVqD3NwAGiCzMLB2GmrdDnmjNdh6Vrxqi6nK0BRRdkxgZCTFuIqWEe87B0zYhXl9MBoCwzyJQA5LhaW6hhma1h6fY4hW5s9S7j2qpQDdWuOyhcczRDlzOaR1XtuioIcKLWHRbuual0U63bfuEwbSuKpkp3WLimO9QZ3LFVuhyfr3MHhUtNb6jTOvfyNQnebp1rLJAJXedS0xnqFC4eEVxjmU1wuKYv1Cncy6dHv2G4rm1wzQ7wTlvLMGYZprXsYctay7TfxZ2AzDIQXG4bXLMrs1O4MGYZBi6ntsHtd5EnKLMM4udyxzI/l/YYocJwZhkGrmsb3J4jVEBmGQSuj22D23OECiiIAROh0kfIQEeoWI8RKgIXxACB6+vbHIPD7TFCReCCGEAduvqmvOB0zRBVp931Y7LL5hbl0N31rMfFx8dmmM2NzsHpmkGqTumOyjIb26WD0+1xGfKxBTLMfVehh8kxM0zVKd0xRTLM3VvB6fa7IPmoQhnmHrDgdM1AVWd0x9bzZ2woBz48nfU4tWRsXX/GtnTgdF0zVNUp3TH1/Rmb28HT7XFqydiGyiGqpnLYMi1MDanuiy5QNAOGLkO20e13o7xRDZZDzLeN7gDLBxwv4qb116pLDRQ/5fzUfbPAh5lqzxSrY8uKtZ1qj1w9J+7Xc+p5Or1rxj2yaCUMLZx57y4RQuMWtPqrWl7UwbvH9NUNOfRGla4ZjbgXPyNZtFiNmK6UvHzyrMnIaSXVUHhBHC3yfXrnsqSETL/NyzGaB/H78sQqCsP8axqt6mmkXRhFfX0i2rAfodMAhvQGxgwk7MGY5nE0YLDT8MYMC8aMAezBoBGDUatbgoHxTPd9D8YZLxiiXDUwML75xsyT1Wq7loWWRcnaYDOGPeZVG1dVOGrliyqlplYj9S+mJA/TJPd5Do01WYrLT0ko8iv+Aw==</diagram></mxfile>
|
2206.01299/main_diagram/main_diagram.pdf
ADDED
|
Binary file (19.7 kB). View file
|
|
|
2206.01299/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recent efforts in improving communication efficiency for distributed learning have significantly decreased the dependency of training deep learning models on fast data center networks --- the *gradient* can be compressed to lower precision or sparsified [@alistarh2016qsgd; @zhang2017zipml; @bernstein2018signsgd; @wen2017terngrad], which speeds up training over low bandwidth networks, whereas the *communication topology* can be decentralized [@koloskova2019decentralized; @li2018pipe; @lian2017can; @lian2018asynchronous; @tang2018communication; @tang2018d], which speeds up training over high latency networks. Indeed, today's state-of-the-art training systems, such as Pytorch [@li13pytorch; @pytorchlightning], Horovod [@sergeev2018horovod], Bagua [@gan2021bagua], and BytePS [@jiang2020unified], already support many of these communication-efficient training paradigms.
|
| 4 |
+
|
| 5 |
+
However, with the rise of large foundation models [@DBLP:journals/corr/abs-2108-07258] (e.g., BERT [@devlin2018bert], GPT-3 [@brown2020language], and CLIP[@radford2021learning]), improving communication efficiency via compression becomes more challenging. Current training systems for foundation models such as Megatron [@shoeybi2019megatron], Deepspeed [@rasley2020deepspeed], and Fairscale [@baines2021fairscale], allocate different layers of the model onto multiple devices and need to communicate --- *in addition to* the gradients on the models --- the *activations* during the forward pass and the *gradients on the activations* during the backward pass. Compressing these *activations* leads to a very different behavior compared with compressing the gradient --- *simply compressing these activations in a stochastically unbiased way will lead to biases in the gradient that cannot be measured easily or expressed in closed form*. This either breaks the unbiasedness assumptions made by most gradient compression results [@alistarh2016qsgd; @zhang2017zipml; @bernstein2018signsgd; @wen2017terngrad] or makes error compensation over gradient biases [@tang2019doublesqueeze; @tang2019deepsqueeze] difficult to adopt.
|
| 6 |
+
|
| 7 |
+
<figure id="tab:my_label" data-latex-placement="!t">
|
| 8 |
+
<div class="minipage">
|
| 9 |
+
<div class="minipage">
|
| 10 |
+
<figure id="fig:intro" data-latex-placement="H">
|
| 11 |
+
<figure id="fig:intro_ppl">
|
| 12 |
+
<img src="imgs/intro_wiki_ppl" />
|
| 13 |
+
<figcaption>Fine-tune WikiText2</figcaption>
|
| 14 |
+
</figure>
|
| 15 |
+
<figure id="fig:intro_vars">
|
| 16 |
+
<img src="imgs/intro_abs_means" />
|
| 17 |
+
<figcaption>Activation and delta</figcaption>
|
| 18 |
+
</figure>
|
| 19 |
+
<figcaption>(a) Fine-tuning GPT2-1.5B with different activation precisions in communication; (b) Average absolute value of activations and their changes for GPT2-1.5B during training. </figcaption>
|
| 20 |
+
</figure>
|
| 21 |
+
</div>
|
| 22 |
+
<div class="minipage">
|
| 23 |
+
|
| 24 |
+
</div>
|
| 25 |
+
</div>
|
| 26 |
+
<figcaption>Summary of technical highlights among standard SGD, AC-GD, TinyScript and <code>AC-SGD</code>. AC-GC <span class="citation" data-cites="evans2021ac"></span> and TinyScript <span class="citation" data-cites="fu2020don"></span> assume that the returned gradient is unbiased, whereas <code>AC-SGD</code> algorithm does not rely on such an assumption.</figcaption>
|
| 27 |
+
</figure>
|
| 28 |
+
|
| 29 |
+
Previous efforts on activation compression [@han2016deep; @hubara2017quantized; @jain2018gist; @chakrabarti2019backprop; @chen2021actnn] illustrate, albeit mostly empirically, that large deep learning models can tolerate some compression errors on these activation values. However, when it comes to the underlying theoretical analysis, these efforts mostly make assumptions that do not apply to neural networks with non-linear activation functions --- the only two recent efforts that claim theoretical convergence analysis [@evans2021ac; @fu2020don] assume that an unbiased compression on activations leads to an unbiased error on the gradient. Not surprisingly, these algorithms lead to suboptimal quality under relatively aggressive compression, illustrated in Figure [1](#fig:intro_ppl){reference-type="ref" reference="fig:intro_ppl"} --- in many cases, using activation compression to fine-tune a model might be worse than zero-shot learning without any fine-tuning at all.
|
| 30 |
+
|
| 31 |
+
In this paper, we focus on the problem of activation compression for training language models over slow networks by asking the following:
|
| 32 |
+
|
| 33 |
+
- **Q1.** *Can we design an algorithm for activation compression with rigorous theoretical guarantees on SGD convergence?*
|
| 34 |
+
|
| 35 |
+
- **Q2.** *Can such an algorithm be implemented efficiently without additional runtime overhead and outperform today's activation compression algorithms without accuracy loss?*
|
| 36 |
+
|
| 37 |
+
Our answers to both questions are ***Yes***. Our Technical contributions are listed below:
|
| 38 |
+
|
| 39 |
+
**[Contribution 1.]{.underline}** We propose `AC-SGD`, a novel algorithm for activation compression. The idea of `AC-SGD` is simple --- instead of directly compressing the activations, *compress the change of activations for the same training example across epochs*. Intuitively, we expect `AC-SGD` to outperform simply compressing the activations because it enables an interesting "self-enforcing" dynamics: *the more training stabilizes $\rightarrow$ the smaller the changes of the model across epochs $\rightarrow$ the smaller the changes of activations for the same training example across epochs $\rightarrow$ the smaller the compression error using the same #bits $\rightarrow$ training stabilizes more.*
|
| 40 |
+
|
| 41 |
+
**[Contribution 2.]{.underline}** The theoretical analysis of `AC-SGD` is non-trivial since we have to analyze the above dynamics and connect it to SGD convergence, which is quite different from most of today's results on gradient compression and error compensation. Under mild technical conditions and quantization functions with bounded error, we show that `AC-SGD` converges with a rate of $O(1/\sqrt{T})$ for non-convex objectives, the same as vanilla SGD [@bottou2018optimization; @liu2021distributed]. To the best of our knowledge, `AC-SGD` is the first activation compression algorithm with rigorous theoretical analysis that shows a convergence rate of $O(1/\sqrt{T})$ (without relying on assumptions of unbiased gradient).
|
| 42 |
+
|
| 43 |
+
**[Contribution 3.]{.underline}** We then show that `AC-SGD` can be optimized and implemented efficiently, without adding additional end-to-end runtime overhead over non-compression and other compression schemes (it does require us to utilize more memory and SSD for storage of activations).
|
| 44 |
+
|
| 45 |
+
**[Contribution 4.]{.underline}** We then conduct extensive experiments on sequence classification and language modeling datasets using DeBERTa-1.5B and GPT2-1.5B models, respectively. We show that `AC-SGD` can aggressively compress activations to 2-4 bits without sacrificing convergence performance, where direct quantization of activations fails to converge; in slow networks, `AC-SGD` achieves up to $4.3\times$ end-to-end speedup.
|
| 46 |
+
|
| 47 |
+
**[Contribution 5.]{.underline}** Last but not least, we also show that `AC-SGD` can be combined with state-of-the-art gradient compression algorithms to enable "end-to-end communication compression": *All data exchanges between machines, including model gradients, forward activations, and backward gradients are compressed into lower precision*. This provides up to $4.9\times$ end-to-end speed-up, without sacrificing model quality.
|
| 48 |
+
|
| 49 |
+
<figure id="fig:illustration" data-latex-placement="t!">
|
| 50 |
+
<embed src="imgs/illustration.pdf" style="width:80.0%" />
|
| 51 |
+
<figcaption>The communication pattern of training large language models with both data parallelism and pipeline model parallelism. <span class="math inline"><em>C</em></span> denotes a compression module. The goal of this paper is to understand the design of <span class="math inline"><em>C</em></span> for <em>forward activation</em> and <em>backward gradient</em>.</figcaption>
|
| 52 |
+
</figure>
|
| 53 |
+
|
| 54 |
+
# Method
|
| 55 |
+
|
| 56 |
+
Training large language models over multiple devices is a challenging task. Because of the vast number of parameters of the model and data examples, state-of-the-art systems need to combine different forms of parallelism. Figure [5](#fig:illustration){reference-type="ref" reference="fig:illustration"} illustrates an example in which such a model is distributed over four machines: *(Pipeline Model Parallelism)* The model is partitioned onto Machine 1 and Machine 2 (similarly, Machine 3 and Machine 4), each of which is holding a subset of *layers*. To compute the gradient over the model using backpropagation, these two machines need to communicate the *activations* during the forward pass and the *gradient on activations* during the backward pass. *(Data Parallelism)* Machine 1 and Machine 3 (similarly, Machine 2 and Machine 4) process the same set of *layers* for different *macro-batches*. In this case, each of them will hold a replica of the same model. After the gradient over their model parameters are ready, they need to communicate the *model gradient*, usually by taking an average [@li13pytorch; @sergeev2018horovod; @gan2021bagua].
|
| 57 |
+
|
| 58 |
+
In slow networks, the communication among all machines often becomes the bottleneck [@liu2021distributed]. To improve the speed of training, one can conduct *lossy compression* of the data before they are communicated, illustrated as the $C$ module in Figure [5](#fig:illustration){reference-type="ref" reference="fig:illustration"}. When the model fits into a single machine, there have been intensive efforts on compressing the model gradient [@alistarh2016qsgd; @zhang2017zipml; @bernstein2018signsgd; @wen2017terngrad]. However, when it comes to pipeline model parallelism, such compression techniques are less studied. In this paper, we focus on designing efficient communication compression algorithms to compress both forward activations and backward gradients. As we will show later, both can be compressed significantly with `AC-SGD` without hurting the model quality. We also show that it is possible to combine `AC-SGD` with state-of-the-art gradient compression techniques to enable the end-to-end compression scheme illustrated in Figure [5](#fig:illustration){reference-type="ref" reference="fig:illustration"}.
|
| 59 |
+
|
| 60 |
+
In this paper, we focus on the following technical problem. Note that, for the simplicity of notations, we present here the case where the model is partitioned onto $K = 2$ machines. `AC-SGD` works for cases with $K > 2$: (1) in experiments, we consider $K=8$, i.e., a single model is partitioned onto 8 machines; (2) in the supplementary material we provide the theoretical analysis for $K > 2$.
|
| 61 |
+
|
| 62 |
+
Given a distribution of samples $\mathcal{D}$, we consider the following optimization task:
|
| 63 |
+
|
| 64 |
+
$$\begin{align}
|
| 65 |
+
\label{eqnMainTask}
|
| 66 |
+
\min_{x\in\mathbb{R}^d} \hspace{5mm} f(x) := \mathbb{E}_{\xi \sim \mathcal{D}} F ( b ( a(\xi, x^{(a)}), x^{(b)}) ),
|
| 67 |
+
\end{align}$$ where $F$ is a loss function, $a(-)$ and $b(-)$ correspond to two sets of *layers* of the model --- $a(-)$ has model $x^{(a)}$ and $b(-)$ has model $x^{(b)}$. In Figure [5](#fig:illustration){reference-type="ref" reference="fig:illustration"}, Machine 1 would hold $x^{(a)}$ and Machine 2 would hold $x^{(b)}$. In the following, we call the machine that holds $x^{(a)}$ Machine $a$ and the machine that holds $x^{(b)}$ Machine $b$ . In the standard backpropagation algorithm, the communication between these two machines are as follows:
|
| 68 |
+
|
| 69 |
+
- Given a data sample $\xi$, Machine $a$ sends to Machine $b$ the forward activation: $a(\xi, x^{(a)})$
|
| 70 |
+
|
| 71 |
+
- Machine $b$ sends to Machine $a$ the backward gradient on $a(\xi, x^{(a)})$.
|
| 72 |
+
|
| 73 |
+
A natural way at compressing forward activations is to send, instead of $a(\xi, x^{(a)})$, a quantized version $m(\xi, x^{(a)}) = Q(a(\xi, x^{(a)}))$. This is the quantization scheme that state-of-the-art methods such as AC-GC [@evans2021ac] and TinyScript [@fu2020don] use. Both AC-GC [@evans2021ac] and TinyScript [@fu2020don] assume that gradient is unbiased when $m(\xi, x^{(a)})$ is an unbiased estimator of $a(\xi, x^{(a)})$. This enables their convergence rates of $\mathcal{O}(1/\sqrt{T})$. However, because of the non-linearity of $F$ and $b$ in a deep learning model with non-linear activation functions, an unbiased $m(\xi, x^{(a)})$ *will* lead to biases on the gradient. In Appendix, we will provide an example showing that such a gradient bias will hurt SGD convergence even for a very simple optimization problem. On the theory side, previous efforts on understanding gradient bias [@biasedSGD] also has shown that even bounded bias on gradient can impact the converges of SGD. As we will show later, empirically, this bias can indeed lead to suboptimal models under aggressive compression.
|
| 74 |
+
|
| 75 |
+
Throughout the paper we use the following definitions:
|
| 76 |
+
|
| 77 |
+
- $f^*$ is the optimal value of [\[eqnMainTask\]](#eqnMainTask){reference-type="ref" reference="eqnMainTask"}.
|
| 78 |
+
|
| 79 |
+
- $N$ is the number of samples.
|
| 80 |
+
|
| 81 |
+
- $x_{t} = (x_{t}^{(a)}, x_{t}^{(b)})$ is the full model at iteration $t$.
|
| 82 |
+
|
| 83 |
+
- $\nabla f(\cdot)$ is the gradient of function $f$.
|
| 84 |
+
|
| 85 |
+
- $g_{\xi_t}(x_t) = \nabla F(\xi_t; x_t)$ is the stochastic gradient.
|
| 86 |
+
|
| 87 |
+
- $Q(\cdot)$ is the quantization function used to compress activations.
|
| 88 |
+
|
| 89 |
+
- $m(\cdot)$ is the message exchanged between $a$ and $b$ in the feed forward path.
|
| 90 |
+
|
| 91 |
+
- $\|\cdot \|$ denotes the $L_2$-norm.
|
| 92 |
+
|
| 93 |
+
:::: algorithm
|
| 94 |
+
::: algorithmic
|
| 95 |
+
$x_0$, learning rate $\gamma$, sub-network $a(-)$ weights $x^{(a)}$, sub-network $b(-)$ weights $x^{(b)}$, quantization function $Q$, array of previous messages $m$ initialized to 0\
|
| 96 |
+
Randomly sample $\xi_t$ Set $m(\xi_t) = a(\xi_t, x_{t}^{(a)})$ Update $m(\xi_t) \leftarrow m(\xi_t) + Q\big( a(\xi_t, x_{t}^{(a)}) - m(\xi_t)\big)$ // Machine $a$ sends $Q\big( a(\xi_t, x_{t}^{(a)}) - m(\xi_t)\big)$ to Machine $b$, which knows $m(\xi_t)$ through a local version of $m$\
|
| 97 |
+
Update $x_{t+1}^{(b)} \leftarrow x_{t}^{(b)} - \gamma \cdot \nabla_{x^{(b)}} (f\circ b)\vert_m$\
|
| 98 |
+
// Machine $b$ sends $Q(\nabla_{a} (f\circ b)\vert_m)$ to Machine $a$\
|
| 99 |
+
Update $x_{t+1}^{(a)} \leftarrow x_{t}^{(a)} - \gamma \cdot Q(\nabla_{a} (f\circ b)\vert_m) \cdot \nabla_{x^{(a)}} a$\
|
| 100 |
+
$x = (x_{T}^{(a)}, x_{T}^{(b)})$\
|
| 101 |
+
:::
|
| 102 |
+
::::
|
| 103 |
+
|
| 104 |
+
In this section we present the `AC-SGD`, with the goal to mitigate the above mentioned difficulties that appear in direct quantization of the activation functions.
|
| 105 |
+
|
| 106 |
+
Algorithm [\[alg:SHwT\]](#alg:SHwT){reference-type="ref" reference="alg:SHwT"} illustrates the `AC-SGD` algorithm. The idea behind Algorithm [\[alg:SHwT\]](#alg:SHwT){reference-type="ref" reference="alg:SHwT"} is simple --- *instead of compressing the activations directly, compress the changes of activations for the same training example across epochs*. As illustrated in Algorithm [\[alg:SHwT\]](#alg:SHwT){reference-type="ref" reference="alg:SHwT"}, for iteration $t$ and the data sample $\xi_t$, *if* it is the first time that $\xi_t$ is sampled, Machine $a$ communicates the full precision activations without any compression: $m(\xi_t) = a(\xi_t, x_t^{(a)})$ (Lines 4-5). Both machines will save $m(\xi_t)$ in a local buffer. If $\xi_t$ has been sampled in previous iterations, Machine $a$ communicates a compressed version: $$Q(a(\xi_t, x_t^{(a)}) - m(\xi_t)),$$ where $m(\xi_t)$ was the previous message, stored in the local buffer. Both machines then update this local buffer: $$m(\xi_t) \leftarrow m(\xi_t) + Q(a(\xi_t, x_t^{(a)}) - m(\xi_t)).$$ Machine $b$ then use $m(\xi_t)$ as the forward activations, compute backward gradients, and communicate a quantized version of the backward gradient to Machine $a$ (Line 11). We use $$\delta_\xi = a(\xi_t, x_t^{(a)}) - m(\xi_t)$$ to denote the *message error* in sending the activations.
|
| 107 |
+
|
| 108 |
+
**Update Rules.** The above algorithm corresponds to the following update rules, at iteration $t$ with sample $\xi_t$:
|
| 109 |
+
|
| 110 |
+
$$\begin{align*}
|
| 111 |
+
x_{t+1}^{(a)} &= x_{t}^{(a)} - \gamma \cdot Q(\nabla_{a} (f\circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})}) \cdot \nabla_{x^{(a)}} a\vert_{x_{t}^{(a)}}, \\
|
| 112 |
+
x_{t+1}^{(b)} &= x_{t}^{(b)} - \gamma \cdot \nabla_{x^{(b)}} (f \circ b) \vert_{(m (\xi, x_{t}^{(a)}), x_{t}^{(b)})},
|
| 113 |
+
\end{align*}$$ where $\gamma$ is the learning rate, $\nabla_{x^{(b)}} (f \circ b) \vert_{(m (\xi, x_{t}^{(a)}), x_{t}^{(b)})}$ is the gradient on $x^{(b)}$ using the compressed forward activations $(m (\xi, x_{t}^{(a)})$, and $Q(\nabla_{a} (f\circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})})$ is the quantized backward gradient.
|
| 114 |
+
|
| 115 |
+
Setting $x_{t} = (x_{t}^{(a)}, x_{t}^{(b)})$, we can rephrase the update rule as $$x_{t+1} = x_{t} - \gamma \cdot \left( g_{\xi} (x_t) + \Delta_{\xi} (x_t) \right),$$ where $g_{\xi} (x_t)$ is the stochastic gradient and $\Delta_{\xi} (x_t)$ is the *gradient error* introduced by communication compression. We have $\Delta_{\xi} = (\Delta_{\xi}^{(a)} + \Delta_{\xi}^{(Q)}, \Delta_{\xi}^{(b)})$ given by: $$\begin{align*}
|
| 116 |
+
\Delta_{\xi}^{(Q)} (x_t) &= Q(\nabla_{a} (f\circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})}) \cdot \nabla_{x^{(a)}} a\vert_{x_{t}^{(a)}} - \nabla_{a} (f\circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})} \cdot \nabla_{x^{(a)}} a\vert_{x_{t}^{(a)}}, \\
|
| 117 |
+
\Delta_{\xi}^{(a)} (x_t) &= \nabla_{a} (f\circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})} \cdot \nabla_{x^{(a)}} a\vert_{x_{t}^{(a)}} - \nabla_{a} (f\circ b \circ a)\vert_{(x_{t}^{(a)}, x_{t}^{(b)})} \\
|
| 118 |
+
\Delta_{\xi}^{(b)} (x_t) &= \nabla_{x^{(b)}} (f \circ b)\vert_{(m(\xi, x_{t}^{(a)}), x_{t}^{(b)})} - \nabla_{x^{(b)}} (f \circ b)\vert_{(a(\xi, x_{t}^{(a)}), x_{t}^{(b)})},
|
| 119 |
+
\end{align*}$$ where $\Delta_{\xi}^{(Q)} (x_t)$ is the error introduced by the gradient quantization in the backpropagation part, whilst $\Delta_{\xi}^{(a)} (x_t)$ and $\Delta_{\xi}^{(b)} (x_t)$ are the errors that the gradients of $a$ and $b$, respectively, inherit from the bias introduced in the forward pass.
|
| 120 |
+
|
| 121 |
+
We now prove the main theorem which states that, under some standard assumptions that are often used in the literature [@bottou2018optimization; @liu2021distributed], the convergence rate of `AC-SGD` algorithm is $O(1/\sqrt{T})$ for non-convex objectives, same as vanilla SGD.
|
| 122 |
+
|
| 123 |
+
We make several assumptions on the networks and the quantization. It is important to note is that we put no restrictions on either the message error $\delta_\xi$, nor the gradient error $\Delta_\xi$.
|
| 124 |
+
|
| 125 |
+
- **(A1: Lipschitz assumptions)** We assume that $\nabla f$, $\nabla (f\circ b)$ and $a$ are $L_{f}$, $L_{f\circ b}$-, and $\ell_a$-Lipschitz, respectively, recalling that a function $g$ is $L_g$-Lipschitz if $$\| g(x) - g(y) \| \leq L_g \| x-y\|, \hspace{5mm} \forall x, \forall y.$$ Furthermore, we assume that $a$ and $f\circ b$ have gradients bounded by $C_a$ and $C_{f\circ b}$, respectively, i.e. $\| \nabla a(x) \| \leq C_a$, and $\| \nabla (f\circ b) (x) \| \leq C_{f\circ b}$.
|
| 126 |
+
|
| 127 |
+
- **(A2: SGD assumptions)** We assume that the stochastic gradient $g_\xi$ is unbiased, i.e. $\mathbb{E}_\xi [g_\xi (x)] = \nabla f(x)$, for all $x$, and with bounded variance, i.e. $\mathbb{E}_{\xi} \| g_\xi (x) - \nabla f(x) \|^2 \leq \sigma^2$, for all $x$.
|
| 128 |
+
|
| 129 |
+
::: {#thmMainThm .thm}
|
| 130 |
+
**Theorem 1**. *Suppose that Assumptions A1, A2 hold, and consider an unbiased quantization function $Q(x)$ which satisfies that there exists $c_Q < \sqrt{1/2}$ such that $\mathbb{E}\| x-Q(x) \| \leq c_Q \|x\|$, for all $x$.[^2] Let $\gamma = \frac{1}{3(C+3L_f)\sqrt{T}}$ be the learning rate, where $$C = \frac{4c_{Q} \ell_{a} (1+C_a) L_{f\circ b} N}{\sqrt{1-2c_{Q}^2}}.$$ Then after performing $T$ updates one has $$\begin{equation}
|
| 131 |
+
\label{eqnMainResult}
|
| 132 |
+
\frac{1}{T} \sum_{t\in [T]} \mathbb{E}\| \nabla f(x_t)\|^2 \lesssim \frac{(C+L_f)(f(x_1) - f^*)}{\sqrt{T}} + \frac{\sigma^2 + (c_{Q}C_aC_{f\circ b})^2}{\sqrt{T}}.
|
| 133 |
+
\end{equation}$$*
|
| 134 |
+
:::
|
| 135 |
+
|
| 136 |
+
We present the full proof of Theorem [1](#thmMainThm){reference-type="ref" reference="thmMainThm"} in Appendix [\[appMainThm\]](#appMainThm){reference-type="ref" reference="appMainThm"}, whereas here we explain the main intuition. The usual starting point in examining convergence rates is to use the fact that $f$ has $L_f$-Lipschitz gradient. It is well known that this implies $$\gamma \langle \nabla f(x_t), g_{\xi_t}(x_t) \rangle + f(x_{t+1}) - f(x_t) \leq - \langle \nabla f(x_t), \Delta_{\xi_t}(x_t) \rangle + \frac{\gamma^2L_f}{2} \| g_{\xi_t}(x_t) + \Delta_{\xi_t}(x_t) \|^2.$$ After taking the expectation over all $\xi_t$ and summing over all $t=1,\ldots,T$, we easily see that the key quantity to bound is $\sum_{t=1}^{T} \mathbb{E}\|\tilde\Delta_{\xi_t}(x_t)\|^2$, where $\tilde\Delta_{\xi_t}(x_t) = (\Delta_{\xi_t}^{(a)}(x_t),\Delta_{\xi_t}^{(b)}(x_t))$. On the other hand, the main object that we can control is the message error, $\delta_\xi$. Therefore, we first prove an auxiliary result which shows that $\|\tilde \Delta_{\xi_t}(x_t) \| \leq (1+C_a) \ell_a \| \delta_{\xi_t}(x_t)\|$, for all $t$. The key arguments for bounding $\delta_{\xi_t}$ closely follow the self-improving loop described in the introduction, and can be summarized as follows. Since we are compressing the information in such a way that we compare with all the accumulated differences, this allows us to unwrap the changes which appeared since the last time that we observed the current sample, in an iterative way. However, since these are gradient updates, they are bounded by the learning rate --- as long as we have a quantization method that keeps enough information about the signal, we can recursively build enough saving throughout the process. In particular, the more stability we have in the process, the smaller the changes of the model and the compression error gets, further strengthening the stability.
|
| 137 |
+
|
| 138 |
+
The bound is tight with respect to quantization --- setting $c_Q = 0$ (implying $C=0$), i.e. quantization does not incur any loss, recovers the original original SGD convergence (cf. [@bottou2018optimization; @liu2021distributed]).
|
| 139 |
+
|
| 140 |
+
**Additional storage and communication.** `AC-SGD` requires us to store, for each data example $\xi$, the compressed activation $m(\xi)$ in a local buffer in memory or SSD. For example, in GPT2-XL training, a simple calculation shows that we need an approximately extra 1TB storage. When using data parallelism, it reduces to 1TB / $\#$ parallel degree, but also incurs communication overhead if data is shuffled in every epoch. In addition, when the example $\xi$ is sampled again, $m(\xi)$ needs to be (1) loaded from this local buffer to the GPU, and (2) updated when a new value for $m(\xi)$ is ready.
|
| 141 |
+
|
| 142 |
+
**Optimization.** It is easy to implement and optimize the system such that this additional loading and updating step do not incur significant overhead on the end-to-end runtime. This is because of the fact that the GPU computation time for a forward pass is usually much longer than the data transfer time to load the activations --- for GPT2-XL with 1.5 billion model parameters, a single forward pass on 6 layers require 44 ms, whereas loading $m(\xi)$ need 0.2 ms from memory and 12 ms from SSD. One can simply pre-fetch $m(\xi)$ right before the forward pass, and hide it within the forward pass of other data examples. Similarly, updating $m(\xi)$ can also be hidden in the backward computation. It is also simple to reduce the communication overhead by shuffling data only once or less frequently.
|
2206.06614/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2206.06614/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In recent years, the Transformer architecture [@10.5555/3295222.3295349] achieved exceptional performance on many machine learning applications, especially for text [@devlin2019bert; @raffel2020exploring] and image processing [@dosovitskiy2021image; @caron2021emerging; @yuan2021tokenstotoken]. This intrinsically relates to its few-shot learning nature [@brown2020language]: the attention weights work as context-dependent parameters, inducing better generalization. Furthermore, this architecture parallelizes token processing by design. This property avoids backpropagation through time, making it less prone to vanishing/exploding gradients, a very common problem for recurrent models. As a result, they can handle longer sequences more efficiently.
|
| 4 |
+
|
| 5 |
+
This work argues that these two capabilities are essential for a Meta-Reinforcement Learning (meta-RL) agent. We propose TrMRL (**Tr**ansformers for **M**eta-**R**einforcement **L**earning), a memory-based meta-Reinforcement Learner which uses the transformer architecture to formulate the learning process. It works as a memory reinstatement mechanism [@Rovee-Collier2012] during learning, associating recent working memories to create an episodic memory which is used to contextualize the policy.
|
| 6 |
+
|
| 7 |
+
Figure [1](#fig:tmrl){reference-type="ref" reference="fig:tmrl"} illustrates the process. We formulated each task as a distribution over working memories. TrMRL associates these memories using self-attention blocks to create a task representation in each head. These task representations are combined in the position-wise MLP to create an episodic output (which we identify as episodic memory). We recursively apply this procedure through layers to refine the episodic memory. In the end, we select the memory associated with the current timestep and feed it into the policy head.
|
| 8 |
+
|
| 9 |
+
Nonetheless, transformer optimization is often unstable, especially in the RL setting. Past attempts either fail to stabilize [@mishra2018simple] or required architectural additions [@parisotto2019stabilizing] or restrictions on the observations space [@loynd2020working]. We hypothesize that this challenge is because the instability of early stages of transformer optimization harms initial exploration, which is crucial for environments where the learned behaviors must guide exploration to prevent poor policies. We argue that this challenge can be mitigated through a proper weight initialization scheme. For this matter, we applied T-Fixup initialization [@pmlr-v119-huang20f].
|
| 10 |
+
|
| 11 |
+
<figure id="fig:tmrl" data-latex-placement="t">
|
| 12 |
+
<embed src="1.pdf" style="width:100.0%" />
|
| 13 |
+
<figcaption>Illustration of the TrMRL agent. At each timestep, it associates the recent past of working memories to build an episodic memory through transformer layers recursively. We argue that the self-attention works as a fast adaptation strategy since it provides context-dependent parameters.</figcaption>
|
| 14 |
+
</figure>
|
| 15 |
+
|
| 16 |
+
We conducted a series of experiments to evaluate meta-training, fast adaptation, and out-of-distribution generalization in continuous control environments for locomotion and robotic manipulation. Results show that TrMRL presents comparable or superior performance and sample efficiency compared to the meta-RL baselines. We also conducted an experiment to validate the episode memory refinement process. Finally, we conducted an ablation study to show the effectiveness of the T-Fixup initialization, and the sensibility to network depth, sequence size, and the number of attention heads.
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
We define a Markov decision process (MDP) by a tuple $\mathcal{M} = (\mathcal{S}, \mathcal{A}, \mathcal{P}, \mathcal{R}, \mathcal{P}_{0}, \gamma, H)$, where $\mathcal{S}$ is a state space, $\mathcal{A}$ is an action space, $\mathcal{P} : \mathcal{S} \times \mathcal{A} \times \mathcal{S} \rightarrow [0, \infty)$ is a transition dynamics, $\mathcal{R} : \mathcal{S} \times \mathcal{A} \rightarrow [-R_{max}, R_{max}]$ is a bounded reward function, $\mathcal{P}_{0}: \mathcal{S} \rightarrow [0, \infty)$ is an initial state distribution, $\gamma \in [0, 1]$ is a discount factor, and $H$ is the horizon. The standard RL objective is to maximize the cumulative reward, i.e., $\max \mathbb{E} [\sum_{t=0}^{T} \gamma^{t} \mathcal{R} (s_{t}, a_{t})]$, with $a_{t} \sim \pi_{\boldsymbol{\theta}} (a_{t} \mid s_{t})$ and $s_{t} \sim \mathcal{P}(s_{t} \mid s_{t-1}, a_{t-1})$, where $\pi_{\boldsymbol{\theta}} : \mathcal{S} \times \mathcal{A} \rightarrow [0, \infty)$ is a policy parameterized by $\boldsymbol{\theta}$.
|
| 21 |
+
|
| 22 |
+
In the meta-RL setting, we define $p(\mathcal{M}) : \mathcal{M} \rightarrow [0, \infty)$ a distribution over a set of MDPs $\mathcal{M}$. During meta-training, we sample $\mathcal{M}_{i} \sim p(\mathcal{M})$ from this distribution, where $\mathcal{M}_{i} = (\mathcal{S}, \mathcal{A}, \mathcal{P}_{i}, \mathcal{R}_{i}, \mathcal{P}_{0, i}, \gamma, H)$. Therefore, the tasks[^1] share a similar structure in this setting, but reward function and transition dynamics vary. The goal is to learn a policy that, during meta-testing, can adapt to a new task sampled from the same distribution $p(\mathcal{M})$. In this context, adaptation means maximizing the reward under the task in the most efficient way. To achieve this, the meta-RL agent should learn the prior knowledge shared across the distribution of tasks. Simultaneously, it should learn how to differentiate and identify these tasks using only a few episodes.
|
| 23 |
+
|
| 24 |
+
The transformer architecture [@10.5555/3295222.3295349] was first proposed as an encoder-decoder architecture for neural machine translation. Since then, many variants have emerged, proposing simplifications or architectural changes across many ML problems [@dosovitskiy2021an; @DBLP:journals/corr/abs-2005-14165; @parisotto2019stabilizing]. Here, we describe the encoder architecture as it composes our memory-based meta-learner.
|
| 25 |
+
|
| 26 |
+
The transformer encoder is a stack of multiple equivalent layers. There are two main components in each layer: a multi-head self-attention block, followed by a position-wise feed-forward network. Each component contains a residual connection [@he2015deep] around them, followed by layer normalization [@ba2016layer]. The multi-head self-attention (MHSA) block computes the self-attention operation across many different heads, whose outputs are concatenated to serve as input to a linear projection module, as in Equation [\[eq:mhsa\]](#eq:mhsa){reference-type="ref" reference="eq:mhsa"}:
|
| 27 |
+
|
| 28 |
+
$$\begin{align}
|
| 29 |
+
\label{eq:mhsa}
|
| 30 |
+
\text{MHSA}(K,Q,V) &= \text{Concat}(h_{1}, h_{2}, \dots, h_{\omega})W_{o}, \nonumber \\
|
| 31 |
+
& h_{i} = \textit{softmax}(\frac{Q K^{T}}{\sqrt{d}} \cdot M) V,
|
| 32 |
+
\end{align}$$
|
| 33 |
+
|
| 34 |
+
where $K,Q,V$ are the keys, queries, and values for the sequence input, respectively. Additionally, $d$ represents the dimension size of keys and queries representation and $\omega$ the number of attention heads. $M$ represents the attention masking operation. $W_{o}$ represents a linear projection operation.
|
| 35 |
+
|
| 36 |
+
The position-wise feed-forward block is a 2-layer dense network with a ReLU activation between these layers. All positions in the sequence input share the parameters of this network, equivalently to a $1 \times 1$ temporal convolution over every step in the sequence. Finally, we describe the positional encoding. It injects the relative position information among the elements in the sequence input since the transformer architecture fully parallelizes the input processing. The standard positional encoding is a sinusoidal function added to the sequence input [@10.5555/3295222.3295349].
|
| 37 |
+
|
| 38 |
+
The training of transformer models is notoriously difficult, especially in the RL setting [@parisotto2019stabilizing]. Indeed, gradient optimization with attention layers often requires complex learning rate warmup schedules to prevent divergence [@pmlr-v119-huang20f]. Recent work suggests two main reasons for this requirement. First, the Adam optimizer [@kingma2017adam] presents high variance in the inverse second moment for initial updates, proportional to a divergent integral [@liu2020variance]. It leads to problematic updates and significantly affects optimization. Second, the backpropagation through layer normalization can also destabilize optimization because the associated error depends on the magnitude of the input [@xiong2020layer].
|
| 39 |
+
|
| 40 |
+
Given these challenges, @pmlr-v119-huang20f proposed a weight initialization scheme (T-Fixup) to eliminate the need for learning rate warmup and layer normalization. This is particularly important to the RL setting once current RL algorithms are very sensitive to the learning rate for learning and exploration.
|
| 41 |
+
|
| 42 |
+
T-Fixup appropriately bounds the original Adam update to make variance finite and reduce instability, regardless of model depth. We refer to @pmlr-v119-huang20f for the mathematical derivation. We apply the T-Fixup for the transformer encoder as follows:
|
| 43 |
+
|
| 44 |
+
- Apply Xavier initialization [@Glorot10understandingthe] for all parameters excluding input embeddings. Use Gaussian initialization $\mathcal{N}(0, d^{-\frac{1}{2}})$, for input embeddings, where $d$ is the embedding dimension;
|
| 45 |
+
|
| 46 |
+
- Scale the linear projection matrices in each encoder attention block and position-wise feed-forward block by $0.67N^{-\frac{1}{4}}$.
|
| 47 |
+
|
| 48 |
+
<figure id="fig:tasksdistribution" data-latex-placement="t">
|
| 49 |
+
<embed src="2.pdf" style="width:50.0%" />
|
| 50 |
+
<figcaption>The illustration of two tasks (<span class="math inline">𝒯<sub>1</sub></span> and <span class="math inline">𝒯<sub>2</sub></span>) as distributions over working memories. The intersection of both densities represents the ambiguity between <span class="math inline">𝒯<sub>1</sub></span> and <span class="math inline">𝒯<sub>2</sub></span>.</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
|
| 53 |
+
In this work, we argue that two critical capabilities of transformers compose the central role of a Meta-Reinforcement Learner. First, transformers can handle long sequences and reason over long-term dependencies, which is essential to the meta-RL agent to identify the MDP from a sequence of trajectories. Second, transformers present context-dependent weights from self-attention. This mechanism serves as a fast adaptation strategy and provides necessary adaptability to the meta-RL agent for new tasks.
|
| 54 |
+
|
| 55 |
+
<figure id="fig:attn" data-latex-placement="t">
|
| 56 |
+
<embed src="3.pdf" style="width:80.0%" />
|
| 57 |
+
<figcaption>Illustration of causal self-attention as a fast adaptation strategy. In this simplified scenario (2 working memories), the attention weights <span class="math inline"><em>α</em><sub><em>i</em>, <em>j</em></sub></span> drives the association between the current working memory and the past ones to compute a task representation <span class="math inline"><em>μ</em><sub><em>t</em></sub></span>. Self-attention computes this association by relative similarity.</figcaption>
|
| 58 |
+
</figure>
|
| 59 |
+
|
| 60 |
+
We represent a working memory at the timestep $t$ as a parameterized function $\phi_{t}(\boldsymbol{s}_{t}, \boldsymbol{a}_{t}, r_{t}, \eta_{t})$, where $\boldsymbol{s}_{t}$ is the MDP state, $\boldsymbol{a}_{t} \sim \pi(\boldsymbol{a}_{t} \mid \boldsymbol{s}_{t})$ is an action, $r_{t} \sim \mathcal{R}(\boldsymbol{s}_{t}, \boldsymbol{a}_{t})$ is the reward, and $\eta_{t}$ is a boolean flag to identify whether this is a terminal state. Our first hypothesis is that we can define a task $\mathcal{T}$ as a distribution over working memories, as in Equation [\[eq:taskdist\]](#eq:taskdist){reference-type="ref" reference="eq:taskdist"}: $$\begin{equation}
|
| 61 |
+
\label{eq:taskdist}
|
| 62 |
+
\mathcal{T}(\phi) : \Phi \rightarrow [0, \infty),
|
| 63 |
+
\end{equation}$$
|
| 64 |
+
|
| 65 |
+
where $\Phi$ is the working memory embedding space. In this context, one goal of a meta-RL agent is to learn $\phi$ to make a distinction among the tasks in the embedding space $\Phi$. Furthermore, the learned embedding space should also approximate the distributions of similar tasks so that they can share knowledge. Figure [2](#fig:tasksdistribution){reference-type="ref" reference="fig:tasksdistribution"} illustrates this concept for a one-dimensional representation.
|
| 66 |
+
|
| 67 |
+
We aim to find a representation for the task given its distribution to contextualize our policy. Intuitively, we can represent each task as a linear combination of working memories sampled by the policy interacting with it:
|
| 68 |
+
|
| 69 |
+
$$\begin{align}
|
| 70 |
+
\label{eq:taskrep}
|
| 71 |
+
\mu_{\mathcal{T}} &= \sum_{t = 0}^{N}\alpha_{t} \cdot \mathcal{W}(\phi_{t}(\boldsymbol{s}_{t}, \boldsymbol{a}_{t}, r_{t}, \eta_{t})), \nonumber \\
|
| 72 |
+
&\text{with} \sum_{t = 0}^{N}\alpha_{t} = 1
|
| 73 |
+
\end{align}$$
|
| 74 |
+
|
| 75 |
+
where $N$ represents the length of a segment of sampled trajectories during the policy and task interaction. $\mathcal{W}$ represents an arbitrary linear transformation. Furthermore, $\alpha_{t}$ is a coefficient to compute how relevant a particular working memory $t$ is to the task representation, given the set of sampled working memories. Next, we show how the self-attention computes these coefficients, which we use to output an episodic memory from the transformer architecture.
|
| 76 |
+
|
| 77 |
+
In this work, our central argument is that self-attention works as a fast adaptation strategy. The context-dependent weights dynamically compute the working memories coefficients to implement Equation [\[eq:taskrep\]](#eq:taskrep){reference-type="ref" reference="eq:taskrep"}. We now derive *how* we compute these coefficients. Figure [3](#fig:attn){reference-type="ref" reference="fig:attn"} illustrates this mechanism.
|
| 78 |
+
|
| 79 |
+
Let us define $\phi_{t}^{k}$, $\phi_{t}^{q}$, and $\phi_{t}^{v}$ as a representation[^2] of the working memory at timestep $t$ in the keys, queries, and values spaces, respectively. The dimension of the queries and keys spaces is $d$. We aim to compute the attention operation in Equation [\[eq:mhsa\]](#eq:mhsa){reference-type="ref" reference="eq:mhsa"} for a sequence of $T$ timesteps, resulting in Equation [\[eq:mhsatrmrl\]](#eq:mhsatrmrl){reference-type="ref" reference="eq:mhsatrmrl"}:
|
| 80 |
+
|
| 81 |
+
$$\begin{align}
|
| 82 |
+
\label{eq:mhsatrmrl}
|
| 83 |
+
\textit{softmax}(\frac{Q K^{T}}{\sqrt{d}} \cdot M) V &= \frac{1}{\sqrt{d}}\begin{bmatrix}
|
| 84 |
+
\alpha_{1, 1} & \alpha_{1,2} & \dots \\
|
| 85 |
+
\vdots & \ddots & \\
|
| 86 |
+
\alpha_{T, 1} & & \alpha_{T, T}
|
| 87 |
+
\end{bmatrix}\begin{bmatrix}
|
| 88 |
+
\phi_{1}^{v} \\
|
| 89 |
+
\vdots \\
|
| 90 |
+
\phi_{T}^{v}
|
| 91 |
+
\end{bmatrix} = \begin{bmatrix}
|
| 92 |
+
\mu_{1} \\
|
| 93 |
+
\vdots \\
|
| 94 |
+
\mu_{T}
|
| 95 |
+
\end{bmatrix}, \nonumber \\
|
| 96 |
+
&\text{where }
|
| 97 |
+
\begin{cases}
|
| 98 |
+
\alpha_{i, j} = \frac{\exp{\langle\phi_{i}^{q}, \phi_{j}^{k}\rangle}}{\sum_{n = 1}^{i}\exp{\langle\phi_{1}^{q}, \phi_{n}^{k}\rangle}} & \text{if $i \leq j$}\\
|
| 99 |
+
0 & \text{otherwise.}
|
| 100 |
+
\end{cases}
|
| 101 |
+
\end{align}$$
|
| 102 |
+
|
| 103 |
+
where $\langle a_{i}, b_{j}\rangle = \sum_{n = 0}^{d} a_{i, n}\cdot b_{j, n}$ is the dot product between the working memories $a_{i}$ and $b_{j}$. Therefore, for a particular timestep $t$, the self-attention output is:
|
| 104 |
+
|
| 105 |
+
$$\begin{align}
|
| 106 |
+
\label{eq:eprep}
|
| 107 |
+
\mu_{t} &= \frac{1}{\sqrt{d}} \cdot \frac{\phi_{1}^{v} \cdot \exp{\langle\phi_{t}^{q}, \phi_{1}^{k}\rangle} + \cdots + \phi_{t}^{v} \cdot \exp{\langle\phi_{t}^{q}, \phi_{t}^{k}\rangle}}{\sum_{n = 1}^{i}\exp{\langle\phi_{1}^{q}, \phi_{n}^{k}\rangle}} \nonumber \\
|
| 108 |
+
&= \frac{1}{\sqrt{d}}\sum_{n = 1}^{t} \alpha_{t, n} W_{v}(\phi_{t}).
|
| 109 |
+
\end{align}$$
|
| 110 |
+
|
| 111 |
+
Equation [\[eq:eprep\]](#eq:eprep){reference-type="ref" reference="eq:eprep"} shows that the self-attention mechanism implements the task representation in Equation [\[eq:taskrep\]](#eq:taskrep){reference-type="ref" reference="eq:taskrep"} by associating past working memories *given that* the current one is $\phi_{t}$. It computes this association with *relative similarity* through the dot product normalized by the softmax operation. This inductive bias helps the working memory representation learning to approximate the density of the task distribution $\mathcal{T}(\phi)$.
|
| 112 |
+
|
| 113 |
+
We now argue that the transformer model implements a memory reinstatement mechanism for episodic memory retrieval. An episodic memory system is a long-lasting memory that allows an agent to recall and re-experience personal events [@tulving2002]. It complements the working memory system, which is active and relevant for short periods [@BADDELEY2010R136] and works as a buffer for episodic memory retrieval [@zilli2008modeling]. Adopting this memory interaction model, we model an episodic memory as a transformation over a collection of past memories. More concretely, we consider that a transformer layer implements this transformation:
|
| 114 |
+
|
| 115 |
+
$$\begin{equation}
|
| 116 |
+
\label{eq:trblock}
|
| 117 |
+
e^{l}_{t} = f(e^{l-1}_{0}, \dots, e^{l-1}_{t}),
|
| 118 |
+
\end{equation}$$
|
| 119 |
+
|
| 120 |
+
where $e^{l}_{t}$ represents the episodic memory retrieved from the last layer $l$ for the timestamp $t$ and $f$ represents the transformer layer. Equation [\[eq:trblock\]](#eq:trblock){reference-type="ref" reference="eq:trblock"} provides a recursive definition, and $e^{0}_{t}$ (the base case) corresponds to the working memories $\phi_{t}$ In this way, the transformer architecture recursively refines the episodic memory interacting memories retrieved from the past layer. We show the pseudocode for this process in Algorithm [\[alg:cap\]](#alg:cap){reference-type="ref" reference="alg:cap"} (Appendix [13](#ap:pseudocode){reference-type="ref" reference="ap:pseudocode"}). This refinement is guaranteed by a crucial property of the self-attention mechanism: it computes a consensus representation across the input memories associated to the sub-trajectory, as stated by Theorem [\[theor1\]](#theor1){reference-type="ref" reference="theor1"} (Proof in Appendix [12](#ap:theorproof){reference-type="ref" reference="ap:theorproof"}). Here, we define consensus representation as the memory representation that is closest on average to all likely representations [@kumar-byrne-2004-minimum], i.e., minimizes the Bayes risk considering the set of episodic memories.
|
| 121 |
+
|
| 122 |
+
::: restatable
|
| 123 |
+
theoremfta []{#theor1 label="theor1"} Let $\mathcal{S}^{l} = (\boldsymbol{e}^{l}_{0}, \dots, \boldsymbol{e}^{l}_{N}) \sim p(\boldsymbol{e}|\mathcal{S}^{l}, \boldsymbol{\theta}_{l})$ be a set of normalized episodic memory representations sampled from the posterior distribution $p(\boldsymbol{e}|\mathcal{S}^{l}, \boldsymbol{\theta}_{l})$ induced by the transformer layer $l$, parameterized by $\boldsymbol{\theta_{l}}$. Let $K$, $Q$, $V$ be the Key, Query, and Value vector spaces in the self-attention mechanism. Then, the self-attention in the layer $l+1$ computes a consensus representation $e^{l+1}_{N} = \frac{\sum_{t=1}^{N}{\boldsymbol{e}_{t}^{l, V}}\cdot\exp{\langle\boldsymbol{e}^{l, Q}_{t}, \boldsymbol{e}^{l, K}_{i} \rangle}}{\sum_{t=1}^{N}{\exp{\langle\boldsymbol{e}^{l, Q}_{t}, \boldsymbol{e}^{l, K}_{i} \rangle}}}$ whose associated Bayes risk (in terms of negative cosine similarity) lower bounds the Minimum Bayes Risk (MBR) predicted from the set of candidate samples $\mathcal{S}^{l}$ projected onto the $V$ space.
|
| 124 |
+
:::
|
| 125 |
+
|
| 126 |
+
Lastly, we condition the policy head in the episodic memory from the current timestep to sample an action. This complete process resembles a memory reinstatement operation: a reminder procedure that reintroduces past elements in advance of a long-term retention test [@Rovee-Collier2012]. In our context, this "long-term retention test\" identifies the task and acts accordingly to maximize rewards.
|
2206.10027/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-12-09T00:23:16.096Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36" etag="C8vAqdsTeGWu_d0L_FfR" version="15.9.4" type="google"><diagram id="mS_iGQeKi8B5CuxLVy-u" name="Page-1">7L3XtuM4si36NfV4xqA3j/SeIkUrvtGI3omi6L7+AmtlVVdXV+2zq3vc2/chNVIrRQcCgYgZMQMg8QsuDIeypHNtTcWz/wVDiuMXXPwFwyiaBn/hjvN7B05i3zuqpSm+d6H/2OE11/PHTuTH3k9TPN//dOI6Tf3azP+8M5/G8Zmv/7QvXZZp/+fTyqn/57vOafX8lx1envb/ujdqirX+3suQyD/2q8+mqn+9M4r8ODKkv578o4h3nRbT/r3r6xxc+gUXlmlav38Nh/Dsoex+lct3QfJfHP2tYstzXP83F0xrHdVTlf+fhqdHkmH6MOj+D/5dypb2nx8N/lHZ9fxVAqDeM/zZDF+i4rfnsjZAQGaaPXtnejdrM43geDat6zSAE3p4gE/zrlqmz1gIUz8tX0Xh5dfnd2VwfVPBa9dpBnvT9/zdhWVzPEGt+a9bcr/uRX7dA34X6Zr+gnPfm5g8j9UvmNCE/O2+I4ZSTRz42F5QS0EFfiHwj60JnAb+5886bCzwI0fwOYjUu/eI8ypX9S0Z+nfi8cgz5ntNCYk0enCc2qZLMs9Ff879FIN7WcSU7q+AtfgrXaNJscuLshtV71bHSLD8F4w/0VSa6C72FyvbbzjsEoxnwX8t+E7gDFl+b22wJGnUdE6J6qYZe0/0VqIzPKsE327E4U/Mp2qnjDf8/GwroYE9gjcuOCyO9c/n5m8LzbJEid8Lku794/liwDHMZsrI7nvKmUoSmUqeavjXfXyBQ7hAR7jAxPvzfQOlxakApbdFQfjBL9lKwClORsJbH4ECazyy3hYvbwf8PsvTpgz8ViqWcXNamhFbheVrEb3o7fEMTeKC4iEmUE0+IUlZBVsuucmnVW3E4jNYH9Cf57sUrwunL3BSgJqwSSpeMLkZP/TUOYBW8+AfvdkOaFmB2Ihb0vM18iwU42ZQZmlSCO7DTiex69Wu44ZlcXAqHRmhHo3eSFCyXFUyMh7lC98fYOsyHbOkgXXIzdx+XsWs7ixzjIxvs4wm4KpOswzLlBu48dWyiVbNtMIxdSvqbUgtSKCZ1kVBcQk+kAzPW+91qRsdtJb3WSl5ine2ZconrHdBfsB/75cNLI9/+ntsHdf1FBDqSwnk4w37QL5mVYctYIOLbY6sEQxgnIbqUFdaaI0IW1ok962yv9Rho9pNepwD6A/+YRzO+8Lr4jH44FjYf14TXvlq2VfkpoYpS0fYHmhSWd5EK1GtjQualYH9glYa2aLHcvPGnJINhLjzwnjdHr5qdIr21o1cwZNtG0xwl1L0Byau0suTvOH9uILVbvXQNR8vnjX7+/K6PdMgP+kkfyMre+imsQzBo4srFKj+e7jky5hQ6oqjULVWNJrRtYif7vThHhjVR+lnXQuKTJ+lMUYs7qwiLQi3m+1o0448iqy866AO+Y4w5+UnY3RdGS001SePlDh5vT7rtmFDr2bpMylWKG4Kg3/UwG+vmZRvtzFeUurOJtAWqBBdQOsZqGd0BP9u0f7GAmeoyuh5WxJYQMxohr/o5/2dcIziF32labdz7f0yWHgjjg2ssIQ4kcSJOYxrMw2F7y2MKGCpUvkpB//jsWTubcwrA8VJbI5QMzREtruaHsUx/7PgI30v0ghTgEbySIFYLxcJUaH0yvGoLIIXQN/fI2QZwGGCuxVbnLif4VpCDnib6Y7fpqt3FxpIgX5f/uC/7pM4MtR1zTYhAejIczKT8AXa1ZzXKSvNw/m0+BvbO2+mJresGC1ZMvOdHLAFE8dWVqGx5drTxWZHgdaxfyyonlFRv1c06xvhXN/m5lhhRzymJ/7KP6nYuON53E28tbNbE6/SPLKKb1uauXWTXkQuVxiswN3LSSdPKjfH2zBTg71YKB/HqBDSDfaUtvoWhjE0N0VmQ56riXVzA2S1lEFPxjgccyWanGWhnhjnMeQpvefyQcqJ/46YHbs2nh1RXXwWnF7eHlOlsZsBygq9T+CUKWiH35VAYFcRD8aI+uY+PtIpYFz/fgnFZ0LIl6oyN03UgiDDvGC6L/7N+qA7gulP9hhEO9coTe4b8XE00oh6iWMrLDT2UT48wQw/fLZ23LIppJ1FY87MkkpMNb56bnmzZveDS0gRtrZGYVQR2vor4DDr+U6dm6cMfb85WS7jocVvl6OEX1ZcD+8cl78054Cgn9EzBMFQpnuNJ0X8Um6ufuOJcMUfXap66R3CZtJP7cK6WY/PXhfeFyN+2z1LNfeziNUdnND3A7NLzb5zlsgVzLzXkXZEn015P3dd0h3g8ltWLTsUIpeGpoOciVlK7HnrvFy8mwTUBLr1FMIGN27QhSnbU1dId+6ewk1Kt3KgRSJPL3R1MwjHjHKjyZ7vncfDkIXRTS1Em+2zJXSfpR/kIPVaJ5Lyu5/ScYE+YqgGG1xoq3NVvh7NOb/aAGx/UOc0yioQgLvkdW2pbmNCxxjNVd0hu1n1csiEete0n3XBkBCfws/RKA0cd9Geaql2j14y6IHu7nM/FpmIE7lgPe6nQ59s3mMsvZLOct56oUwH6Uy/nCqf6duzqIoB28ny9BSys5UDYDa/udHLn93pNQQnwVMgisX4muQa78mBX7crWfDIWuQ3UtTcSCaxlBiwR7mPvouSdBF+uwrQJHdx+dzBARP3Gn0+nkSp6eKsWu62Km7coD77AKhJAseqyU20jxUl4Pa9DUkcs+rweV8NxqxCEpQknoJ125WCCJjbbZV8K4kq7hMilQBMflRuC9m+Na6rrJvWVbUlBWuNc9TtrGRbMSpdj/H8nu+JraLePFG91hK7unWPULnGO/hgFJW7YlapIwr6hrek93FL2lvrVL1oqPdV2DMcDw0bQq6kNerYSZQ8DHqzXSXG9cHT5RMQW07BtKcFDC4ez0Sr8UDdHQZUX1akpZLeiYLZ8T2fDh7XF3fCtuqFLb0+lbbuII9b0/GoFhoeRO7ieuGU/InU7tW6zkfHWhYRm45DlSfDtlWomWNoXoglqBcPI5YY4W4J296b6f7hNk65M/p7HrnGlG7vXVmGs2YVzX+T2b5bj5QAXZUubwvPU4Xl6As9xNq9tKoQLpF2Wvdmh+VtFDgGBNy9F0EQ6O7UY24qzdLFVn71r1oEAXW4nG1owtsvzHsfcSZNt97zYkGzQgCq8pvxMMeHgKtThXl8TiWnnn6y6zFm1IfM2/tjecsWk3ggvuqfkx2nbroYlbA+eMS96oc6rXgxyWXizerwfArQa52PVwYNBq84nPNDBLo7gUxp4aytFEYLwRnUCxnvFYa0HnIgIyHq7GJ5l05GsUCS94Fm43sTccq7pwAOZh4v1vWS64sYVdmAIHzBJw+UglFPdhsc0grwL4frZZ8qPuiHo1KBgy8v4/6hJBfJ28PZQMzhUvWnLEesqzN76d2eRvSmmltCCbilU5FX6ZN7+onWcRR7YX/D4m9tZLYz8FAGfnnkTU3Z0wq5a1Ou12VKfuhi8bVSrt94mSzkLh4Nnw64jceXiqFc/SGZY6retw4GaE9CvcdKaXpZgPDXrvSpN5xaeGM199nASFbLzFEWGc4lQz4i5XG3WGUvtIcg4mrFVRB4QEA1P1YtiWug4DFZaWiLN6uxNdeH9Y5mmFAdKDBToZELSgwE003EitMfmf1C2ClfDLyvLWrtTwpXQ15xW3OrPq1Y0P1Ab9N1IVfYmI8lkuJFnYghmGLGQjoya5LUIVOFNpC39u7M8XBGzmDyzTj67lhDvepzQuRyQQGRFa9CjEpc19JuRvwkstQDrdYtcyeWt1nU9qNNVcDtcq1a8XY4anTWEKGbCipns0eFdfJh1JiW+p/AVl7qC6E77DHHFaWnQquV/lWERn/TYEBfQUF/lLSsd2N460MovuWTz+9Cdnq2ef8Kx4fJ5wo+j1PegiSS1ydE0+7vPVY6faKr+RYU1qBICde8F5+vEzPs+MlW+GCIK7N7FUQcBMZdd9GQ/9g+CFHI+VgoUydiJ7JxGBRqWXCek7WJtR9OdzV7UsO0IjU4Yvhc5qVvGZ4lc/QRoaauIOgcdHEt4CUvhqz0FQmW2p1AymNCPmmP6S+tRtPcmeyt1xJDheH57ZuC9O5dTis0E/pBzXKbf+ziOXzWOa9Gn+br1LdpPnoMzUAQKrM19eOjcQfkT5U/OL71AJWSh3SuYLSns3uo7NN6+dugkuNaD9GdUAEZ119jyXgLiwTwtDKyKL54kkzWE69AaStsEOtOMT2OdiW21sAFQu4lQ3dYq8lSA4gQ8jY5NW+MnFyiXuJB4eHq7mOLJhNLIwIOdLzynLOpLbZSdel1RMwRBv6KvRoVCPCIX+yiuq9q+m5zFzZuRCnpq8394/iKCYUyfkYBjw/cgELptIabrYCh8lyEu3NVUOwtbfzHi3ugDm8oqVO7Nal7mUT1IS8Q73ss+/v9TnAZwwTSx5S7m4+LQUoceK4RUdRLea4PDReFMomg7g1JQ4Ze9+SsxCQJOAip7szuxSlWK7irejGNV6BxD804YVVvkgcemut+bsU5PfwgfnHi5Iq5YYi5fM9LP9Ly4+Ek0kRMFSAhqPfOfGFhKAqtxHFTy1qCvS1J9IOIDR/ANiddrj2uZBGmcQ/LNrJ8pxPI3zARlXrpbhDGY9ei5zuece/OildgIbzHqeV9s5cml1KENMmHeo2hKOndsLcsVoFob2yLuuW55iFUwP3ytCYJXM1wkk9q6uU/W16y07tnDEaEdSJhpogYcApAh/7YjVozG2SS7V5EKFPgRD6XCtwZHJVzpXsDiNspG1m4DDVXTWIfL5SVatn92HVhnoKAfk/Ton9uXoz5JVGJCmRCtWM1sW3dhVDYAH+Z+ogGjS3EGca6bKFCWK6AkL4gVa2JO6dl+F7E/ltjDTUO9DQiqMonrEJnHyJfHZzNZPeHPNaw9EnhzTuSU23xilTkgWhvhc7OPe3vd44ZxdcnIKQMSUZelSiBffNYrmKasFp85GkH4VZTg2tMtFbTxC3uEx8OAZAYZUR0gcFuRerHksK5zl0WW1RWdKZfMxA8fSiMqb5o8jj7LXPPjCi6S+EDFfJIqj+Ui96KVt7dAsDSce4adkOPh0Jy0e0hoMp8jNzj84KRnejE1cvohHsl9bt1AdVGT5LBSxHmCDi5w+haH+7QaIdMBIx7unmCpscviT7lQSox8kbqGz0oidf5nLRW+4ONAsOs3h0P2Sj1IKRSR8x3RN+Vw8VTYcBXsTX0wRBLLUsp+jMIG+8O4uTMWNABAl2xxNRwiXbFWVcA52BcnjZHEenelDTHksSOsqU4VOHQh4fAe5+llWNLblcuKF7cVHZTSqdZut5LXVuJm9PLz7PqQiKcp2weJCXv2zbaiqK6K+tTjict7SND+SDWUNFIzui0UNI7V9VM7EVpGb9JdznT+l5a7wV0D12hJH+g77Eygz4IOLFh0rdKBjfc1nQLUM2HSLgK8xblBNAipuQR1IacTY59Jw/qPitgzsuAAMP2BsGmmYcgViohkz0BaIEsAbsnxvF2GCkpiNCt3dZb0pF7hqf3stxIdBCEhh03yfow3C0vvF/Tw9AGPtuLQvTq+mm0Ghqh5Fb1kSt7tvBWRlZSfMV6lOrV8dnWamJ+1jXnnnpHrVQphC/AJBqel5GVp96Y19htzwBHwkMDEVIzzyEkjXgfJ+WjTN12bxyzHj/R56anCiqqpAeIOn+mgIbEDKEPUSk/aapGOZcLJFtMQJTerO/YjQehRz4vM0KZinzye1RU0KfxnXOsy1rLB2RokfK47UEiMeL+yF8IWQSjCFv8TLv9qlW8dikjjfjK6l4Wz5XKeH/52rDea/Yr4SgDQXOAtj7uV4hq5hBg+p77n2pqAdAvknRHdQwhupIsuvfptvxjprsbBAQtnPV4jB8VlSOrqw1+zT+eeL9un/MqycW65a0kTCLzrFxcJSNZJRyYaeRxhPAe/SQTh25xCDPwrvC+DdoDKUyCvCMfvjhQmGy6X3uqIQYXMFPdQOvAywYm+/aGpt2DUXj9pKJHQjwlji64dvSy9uj7nlftk2jXTW+gDJqKpjwdUfWOf6ValD3CiI89y9Vr8ZgUimEqrk/kYI2V+/x0aTzxu7fLOQNvOQ4xlnHyotQA5ovUVjB3DQHyUqPjph1GJKwu5wVeMFkSl9Z2Wp2iOUrqfHfeMKNIEuSurrFIczsijwLdVNNNMsi5AkhfKaZsYcq6y7ouUhHyFi7SFk1Cxzln1tp4Tw/8MUHPy7+rSr4+GPEwIIm4ENwHJFm6FC8VxKcBT+EWkbkiHSYWEYyRCDnh9IjoG2uM5dplVrRFLJYeT4NV27FXUGuTzABmoe5RFCcqe3WHhpTIMWcutjOokwiY1HSNsD/vzItDUNFdwzXTwmtdOrO8fULYizL9BrSWaNiHk7bEhJqN0q8v5HD55oWmzoeJjG7AK1bpzJcUXZnhn9pN3k9OTVN7EVPMgDkyHtE+kJvduRDrEk9AxM7UiK+YQ9ZGblEqpziVja4PTc0e2+NRWSUZL+4QLFssbokw8Ys2PmIaYganckrV8GPq4C89UKGhjBtlq3VkfYYrzRu+VMTDSRdrejGAHM+ZdLvfcTmijtt7swtMUvr9WImk3ndy1HT/upiqC6qxwEOXVWw/Yj+JdT1fWiMah9Jbz2ZREQGLBXN6kiXxBlwmOD19QmGSZXdyAYckbMDIcY7G2zO9j8UDIe7QZjEvZz+QH7DZa12z2Zdg1kuTF1ngekBzifRuKqpyGZp367U3tqBBOeFp96FSktach52TnjTve8PFQViz+aOo3V2f9jHaiXiKmmXHVHvNWAP4nLoY+Ka77Ejnc8zJJLFPUiyoaYfQpjQIPLVe4yyMqxUELKjiYsC2ZSptqw/zuOZdM7OKqyv6rkHbc+53vySNkjSbKx3gibULE0D71iPRPL9CVc4QVn0vr0GNHteteUuTS0+7lMyb0VDJJ8KcMXr1nJIGrbS/Fl+p3EN9YJ8BphgHEQRm0+6QASvEiDvlgqsCLtw9CmOxRXcS11X9AIdwbl5OmTCF3yai44rabNshJR6Pd47hCOgFtX+vcguipQsZKp9BJsnQkRHmAK0nJOvpJBqGKnH3ZH9LWXhyBHcdT4N1tMSu8p569R498XjGBgn/6eXOSqxFl/iXOt2GrE5FRhcI7FHrYZQpTc49AHfPscrxwuojYx4USZzPzxLXn41zJ2FK/wnY0I51ZmPvIF65CuCezstcnylMptQvwR9Guct0ggDObLp9ZG5ZAcxG+f6wTzFSgJe0wmo+onsgktkUd4la0K7a7/h49xvXTwktH15LLyuVyayWt29ptL2rHZs+vFAwdXGZH6AMEfLohku90OzOlACWbOypcy9+MjBoQefdBsHaehwwVLo7C1Ei4ruwyieHa97D/eSNvNxR48A++2XuvmqDgg1DWt3bW4IJsGodvOw+OihlbqqjWzvfVutyRjXVVDopEVQYdEbXSI6UiSAYeIh6TRMxp7z1tWXH4lkXctvQ3qcgC0hfoSJmAjfZjzVcJELDZmcqgKLLDR3HGtepUwbUDTvcsn5EbTiWkZDnIY44zIq8XeYdZT63YDbF8skLKq+GXZdR8a4IQrXOMmWnVp/nXbZuoxoTLIaSkMreugxfspI5Q+DhBVsukgxS1iN5dsEDEHGCw23vFJnJygxjJmSbl3J8Qi/MqXAnyObgnayBUALPiLjSTTwpTzpMwD4edEeTUzZoForCdM++FLaZ84CaiwK6Q3+NPOxF0hY1IDFMeROge1H+Sqeofl4YslAxjZ4aUr+6VxwS0lth0P3YhU47hcsXIWjKiSVc7icBwUU35z27vrUBQZigFTW3h1Q9yAJzcw0nJWRddEdNe14cxTUSHINKb4GDDypg+DbkuWJwapp0hCTrUfeDJdW7KiDs2Z/d0e6fYMDi2jK6niZkGKBhwzZrzGyR8w31JgAO4zZt1jn576EgMYKo4eCn5fhTS3Fqo3jm9OjaMYoq776NXEDKTYqeYTn5GzUmyP1JeuSsswg3Qp9nzLQZUsj+wpSwsw9qacL0Hp0z/nF2N3s4OspOXPeI1YcuEymKdJTK8XMYS2pQc2uvmLHiKYUYe7zPr2fM9hPXm2N10+rg2JvXWTWxmUWndXPoLuV9mjjdBk9N7tX0enbT15o0gteYCkYJW+EtTuPmPfS7psgy9ixGXK0RnP/UA/puYFtyooKkQh+ml4O9RvYNEUhRpdhC8PdOfDTWvtEzTAXfrQGf76Fns0QQrE1ZcQ7MMsQdjynX5KvAcR22wWNvsh8wt06MeKZq7xEV9LjH/BXtryliJOjIw8eB3EblUQWVKnz5T5iMCxQ0Ij9uF8JTSiAaLe5el7D6ndSnPMSkLunetD6jgzK/eOiNDst1LR4PSZudaqLwDmAs++JDhDeCHHmMOWetcaiTAOuCitk95N3aS9ptGVqm2/N7LBNrvx24L7z8cqaehFLNWoObr0WUTX0rA+S6gHGbrY6ZL68JM7RTezGVhn16Bn44miX/jONnq4cVw8i5kdwwW1LayEjm0YIJCd5ElGEuMHp+Co0aup1snC9UviJD1WaGtznzyVTCNKjuxzYF0318vNvMkKcHR6HdGMV1vl3v6CNlAFec3kqWKHdljC+ShtnTeuLw8wHTCITpJIlAc+HeuTd15EwFkEFXt5eeDQ4uHmorMFz1phnR55qwd/JK28FZwmctlx4tvyw4HN9jdHGWZR1oRfoypXYsiCV6gpDEbvaem4yKecCpGZL5mG5ic8sRvWq5OZBdlq/ch5rbfEwmezK+jUF/ZwM8t4psPdbGCS8WcYjLgjcxAzAbLU2B13QtwLE4o345eRdZ6G0XMmp1z1eYrHBEBR3rREdheLp68Q7HblDPMFA9hOTzSStPKtQ4MjygVbvGa1mb9LXEXat0Exf6UqfIFTwCIuW782Kn0jmrz1jaM7eza65f6v1Qt54cB7lq7dqB5vtCOl+hcrT1/HviT4A9AS8xa952h6mtDoc+WaacAEsIMeXyYtJq4nyTa6fSko4W4h4G3P1WYJptTDngAGoZ6u+RlymqOnUnZC1Xu9qy4K7Q1ErOOoQ3Oa/+SyxVj+7nzn3RerRz5htdYedGEwp6VxTVAelfVPhEQxNb7rAOYkNmKCeYlpN/5mNAX1rwOh+DUvHNpB5I07Szz9LPN72QtLu/I1dq21vyYpHAQjth9F+ds2LvZT4s3reQt/kMXwvvXfzbkqRwp7Z0x/SMxEalU7L0xRGUzJXLXZiBunJFxfBEsLuI/WxvsWBXiqDa+k3AGhhJir6o2NSchXn5EW7BRXQeljbR4AzAw2WV3MNzxN04tPtrttPTengKX2FE8dpp0gRQdrjjgXQ6TGcRJbuTaxOsQT7ZfeGGZ/XIcqFpGVsx35rzzDmXlZ/LrcpdYP7kNmeyB6K58OIe/SykmVlPz/qNdqu9UGK+M0fwiBQBvQj1gJBY+da1VMeS+7dm2US1YzCny2eWu7+hS/cyOOMApwDUCsh79D/EHSaRCNGQqHAM+8MtXNrk9A4GajJFwEzTGwITjRzTMiK+4bGAhmsYUjBye9LbwPX35qwzWthgbokTNt0aYbIV8KT5QS4rKblkx7xcCcZkTcDpXH3bPxUVtDMqaoxfnPLmwnAnu58NiOoWNHfKFs9p5BFO6cTmCuowxqbwgjVAxnKzu+DG2y1nM6L9QAcjMjbqiSS2zGOGh0bzm6ITrWw8nvp0GxkivZG6AZLSxhsOKDLxy1+M4ZUpy9CTZu4RqyZMaDKJvqLtymlsQM/dSO5dDfJv0CfrE2C+lbuDqcvIrhjS/nlFXL6+n5V6H/02sw47hgNomioRnNOqbO8wH2xER8cUdkYaPSMycfLVayxufpo8F7xAcW8m06AiWelWQgSNsGpFZzOL0kAbEYGVN3c2B39mbBztqY3q5mVk2khUhKYqEzWtuBsYb3Wq8Ld5633TH0HQl7cx1IEauSFsY1I3uQMkJPpk+DtgVc5Q3rsaYyw+cu7NqHCajyXXvTeC9dm24koKohbvfPKpBbvL6WMwH7Ph2Jz8klQGwCo/3qnk/YpYTZY541jDlH7h9z3ah2ct6Yfmv+hGzKjQ+XQD3eP4whKYizwC51nKmjAIVQ+HIrK6VKMcO4oPkykQyQpKnffHott2vGjR6NvAccq6Q5l2Jo0wam/ffD10W/Vu5E2Ixah6ok90N5MOoWSt8wx/jnYYOtsQVM+XtSXHvBFBeOs4L5p5nVay7cyXIWOi1x1wLb/kMqf+4lIzjLugWqaU53i8vbB6occzHJ8LLV1zQWgYiVUwPF1KuQefjQUq69088sDfb9xFhuIOA9PY80nWqjXSRnelfgUJZnIOBrxr24ts7v2YZNaScEDI9+E0gs+3m84yukC/fzJ/+E2Wn0mZsS1iZ3zimxJPNyfiR+bBWs9lRoX9TTywKV6SoohirjVc1knwZn+NdW74qxud9v0uNi9o8XMVDlLiec1Q55cBgvlnA+1Ae8n0qQuku7LadcLoF+ZY8GeNAp+2+iccNtrwDU4feJQ4nPi2riy6ljaJBvxdPwiWQphPdZGLCKKKUPMT/bilNQB8mHsRWvsO3Z2OMepauJLq8IiYbAjREBPrOUJFg2hrw3Hhu8l3ENrYd0/Y0ynVMSQ1nHYciXSV9V3R6+v4JLIapmTCPBO5qWlxJcKMlu/E1He0GjmMCqsJEHOGAdAFZ4DytONsNDuzWFmWF+3TsDYWnxHWRLozXgfGvsrcKrFmVOsE3u911U+uVeqO68DIoy1O1rNDGs4L7M5azG7tpMKs8vPYRcofBG6jmkO8S3uRvHhS5aZ8eCyiaKrjwxnKcHboeY8J5irzFWXIQgpbemOcBumz+wZ9/XODEyGRNA3vj0KubV+c2wxDPo/ovj2G4PNUACf5/jAOXu+PyF6S2K0eQzimEcFqMMCkt1Or0shltT5HfZ+3c8nuU6XXnqPeB/h8C4HfSjGLTATY5041MU9xJrKYX5P4XmtqvWYKed3GitUGQH4xvb8NYHtA+0KRNq3hidtYr7l6XxxPf/ghV7kYeyYeh5it+7EEVH4qPWZBjeKFhmPy4T7cel26gyuz6Pjk13wmygOUXiOFylHmyeIFnn+KywKwpI/mJZE3v9ssUdusVlvh9WZsk9l4r58C+slPqwG1rjTxgOVQdyW8Hrg+gxrNGUZUX/cb5CbDQ8TxtAbUodKE3+4NW2r7PtKk6h3JxWkzMfIyf7TUHOwt81jiEXOb5RG7iaGreWqGJoAyfpOMjdzjBJStT4V6328Ns2Wj9VsvPTB2NfG6zgXmMFtuy9E7mSvBFgzhBep0PrBQLobwUwjWH+vWPa7f1Qvn0XzYv2WC3VsTC99JhPZADheQ0cfymD9er3oB6v6+XgWQrDkUcxZz3+XgoOeEf7kOSaNk+J/bc5+T4fEtk3+RByhT/F/K88e1UJP9AAF9x7bFd5v0OygjwXpQ1gyutedEnIFWo/uttUggs9ZuXdL09frhV6ut2H0iIMTjqnszcjFL7FvL73vr4hDL++d7eBF5FYoM2hF+3QPuN4celD8djzbAk8juHyey38QKMX33sK98vfkumngImoh8DcptbT/sH/6P/gLtziXUvnXsmUZAtoP8/mM7vu9xh/e47Eg7H77ePBq0s1q9AffYLb9bb2LYWB462JdEJK1G3EQNsdrH9z2Ue3vrki0f0LqAso3QOsGCLVfIUYO6L/BffX3rZNcDumw1xP5P+3v+/YjvA+jzFfSrlUYk+hS+5XJHejsQ5wCUCfDgXgPdYLVWwqzWPW0QJd98UA+x222x+q6LVOjAyqlwCM8c67es/brfYQnEaf8os1D6IWlnJx96KIMabH+f97u+uHX2mUQyAuoUZBj7BvX6qqsmah/L575tWuCY3/pN6UG/sefTJ7717Ye9u4M8/jj3t32Z0n2fA745fpeL0aUAOgK9Zj/gPtcjToC+aP96DlbX2VD0CZDxb3r9u+PpX+rO3NnK47Da7rx5aAv0bgf9CmTYrY/hsScNAmQZ4DdfAnIMrkT5FYeg/nzjB8CRLRnv+CPWezcqgNwAZndftlp9y5ebfKX/FDFg6r+2V+Av4DewQpzxVAlB27QT4ORh+hxm+toRihIK7OUA39O4JGA73K41zDcSf0nXxPUO9JTwiPJf94r79hcy//VsKYntFvTt/t2v4Bi45jdcB9uOwH57BeXLS/zjfv/tr/Ffvv/3l5v++3UA35998rNPfvbJzz752Sc/++Rnn/zsk5998l1XR2SJXJHbFAsRDUTVkA0U51d2pIWPvtxtmHJRpDQAsbzUi+7J7/nADml09ODA1MXY3Qla0eUvOUbt5e5Nt7Dm7mcoFKbWGDdlCpteqFHutd7mr3HXKrSVWQtkpOTXp1gdmBnueZiEexG7d5HhVPnKUR3p7nBKbX/c00Otd1xjcfYgcXoX6nhIX5gv3V763M837bASu5Aft42COXWZq2slUIzBmQtL/3Dk9qFrOAK5Fr1SkDCHul6sUJHdDmP1Lm4i6pHImyQxWzTOo/PUd2qfjNwfD8VS4kQkuufnPGFqMxd2wnRE+z3LznUocvMuRCWvP9f0kjz/kFVZjtpSUNv3Wycb2k6XxRycsjPdq1+TZFxsLXeP04g6zQXCjoJAS2XDQFLDS7wVDlFeDMfALGzjkYlqhLvVHBU3B55GwJQjB4cfCmwb9LPMZ5mzBPN6sO9ck+xdVtoYpv1aCqPzh06ojLIsXrShmpq0pL7QEXLyqvvZ0Tw2c+QJ87wEzknKOPp6x1+6jzoaSag4fAjkNZg2jwu4t30yBGVxleIMb34xrp2rnNbyj+nJ4i0e4utWovS8jqV49GE+2c9WjFCoMfygm3fE5eDEI/iY6NdQcrWlTx8eFWJei2L4aPMbbpvSLnHDvD++rsTvuh8EKuk+Qv7OVF0TPTUX6CAu63dJDp72suLFeoXPuIAiEz+jmnv5mUr8pNmH5wVa5TKtUVdp/rGyqKAQBrYJRWn0tTwufBBGagA0UH6I4TRJ9UPIFawWghFm7Ta83DbHglMO8uH9sfuFLmMSV4mvWQgbuUTlSNEzKAbOmuJ+zUn+1z4CnPwmc3buqZSIzll02fie3a43olCfZXmNH+EY5ldaLR62tqm8QcuGqffx5TrjgpxyUwbSanok6Lgi6hm1eEbo20Rew2xETv9OX6/XZCATZbU5mgclm6/mycZY5RcHuH9TIxynTQWoiaG639uVogEt4Rt+F7hHlX9v61AV7FjNcXvPZ+t5U4ON2V2mdLqLgTlyRZfZOEQDmaIKo9d4JFP2zSOeaq1peOmHzMdDVtMtl+M4GDWN7PYB58fAiQ83wxyyrYBTTsbnJ7ZGpvTblZY2syEUqryYUgetY3M1jffn8TYFd/5xJfhH0VullIj13rbj7EANiibNd/vK45lDbPEBJ5UB1OAdmvpgOL7T3vWmb8K60SFF5kKhLyhF9ONI0k/8bSwXfOTg2sZ93+HQ1Wo8Rx1pHBU+WwdVxnymg15DgDWd4wYHIWX2ZJ+bXhGXpzs0kKksvoEZuPBncNt/bFvBjeNEWaw47nCJ721BULeP2jUPzG7z+0wbN8fpFsb8HkXo9IvRkcE4IsMsPyE33XmMNIZF78ib78ZLvU9Bpd6rh2CbTxyKC7fe5QANdYYP1vPRnGTsKtQuIqUja7kXVSNPTKae7azBx+zHrjs59unwVWelY4VdffYps2EkmfNNzwj1FFkjimOgenA86rwjIx4fcFSUbGn8utqV/yxQFdWX+13jTXGqCwLTHFkLTvgHi6SMUjFPR2oZM16qXZMq+rl9FSjj8fNiwcdxHBSi42bESTnQIxI9MU7iDBXAiVBjQIpTAaT2tf0HTf3alrhbmTu6BOzlhagUZSvtARWD6f2Cvxj6xn9G4+xertkgq17SObpmkXbD37R0MRETpQP59kuMZJ6d915SZPVuN3y3yia/Wi+NyI+5zhrwUNHwNe8FPnV5kgx8uYLyxLmluFYozBnx/etbI3P6AcebylrqmFg+cq9U4Hhq81bwbH3D65k+uVUaHJiB8tP96Ebnn8QhcnbLsOO2HZt55h/TyaJ+9sqTjOhxIVYG+MLVYHGocwbLorArOnqQ/k15wXkS2f4CUjvgtHdG63ydWfzE+HgFOxJ0ER9wUGZsbRraEJM/sib+YUzqDWe2jASCRA+dyTBmoQ+6aAySyfxTfQpwWCSKEwH9kBg+nv3jcMa1OR5ffkv+aPF2tneaPkP6ayrRrJ/rKPYI8/H193ZEXy0bcGuZTSZsrKAtS8exD6eAQ01mn92itTbwgqZv+bfm4SOs4occmW0zwuvx5xhmiA/OFSoorcmu+O/t/yr8f3/0qHtww0mvYRzvuPfOlAPgZ/n1bLW35yqvOfh5Qp0y8wWloW0TNrKk2NpwhXLvdAR2RCOu9G23kM8WL2TR2w/i6dxfHUqRBQB8BAO6BUeofaCnjI3MadQ3YrEOaEaMwQcO1vrtFn+LE1oj8gNbjG3bFoO14CMTZVFbvRf8BcT97udvhwToNlT1vN8yWT+1/Da6k3fbFgo04DlOlKdt8uMHmsPb0nQSDjZ8oQe918AHwoCzvGky+owr2ktIktQ7oA3btr77Y7sjhdpxhCOez+e3Zn7XnmeZKotqcCcVEY+dkAwPWhbUEcL3R3JIhMIAeDEZcBf9hQ4AgvEZF6k33/9tFE+bg0cr4G3ceKZXjJ5QspT9mB5n6gn6rqBxKt7XSMPpR7ld0PLsdqKGt7ngWbFBg4JVhw8NNnmfv7LyM8KJWeuSRrP4LZuvyXAXez0XCluXTJx3KqWxQ/tXm//a/oPNf23zsC/amtS22+tFfcRC6WcjqABEPYGfw1bzEccfMsBPibV9N5xLUeehz1a+gg5ThOr4EDY/w9D1KmKEwBlyLKwNL0gHAlz4IZ+nuwTwvDF+8/aG0Q+u+1cpfm3/QYrf21VJ7kwX91C3IZT+OgXwQrWeBCqA36dUaGSUXqHXBhr6zgRWuxKGkCgXRAC80AHnfhnhabNljGoiCo1l9JsbVgBl5mPkYk9SatzrjdtktFDXheOxFkIApID3fb12KrcHx5TE/72O/+cAUPHaEmnG+0UNs1Bo0Bs+a9UtFhcBLSq+NeApFuZFwgGRMRdjjF6RbJaAygEr2UwQkwGlZyS1Ga+QReAsDQq09SA6z0XXMh5wUDSMaLJXpkl/rh+/Q8rfIWcnsrfx3TBQQ/F3gqgpxr6z2YK4/K07tkrAV0qw0RIiK7JlwADcxevOwuGHnkE7k+E3GMN2Vfn8rCszQSTzy7IUbQ22jc6HuKWE/Q2jw7qGc37S8v0a1e92swcAxEuCBrFQzAZjw+5KorB/eP63tTtQYGfrILeuaYq/0P6/aJ1koQAHF2KzxONdqgBkPksDH5NEKdnwv16Lg/LRi7q+3haUf+63zg9Q273DuL304YsSchyhZvw35BxxHzJvcT6wYnbUdtccFUmwZDCGi2KCLgO2twwJxNPMevhQf/5gBV/bLtRBSTi4nefervW9bVW1jrBdPJ7YXQagT/JYEQdymsouQGocH1+LaRc0+7kSgDvogjOwChl5QLUp+ebbOhIo4tJMJMfBAdUgKrxoAdJr3/V/9o6RwFAmw+lrOAX7wobHMC8GWt5x2q7uM8RvEGl9vQAJul3vip9frRk8kaUjKLQBbymEdfz/rJ34x+8SnYdDkRKVhmZPaEjZFuxthbNgWS1jMUqOMGUYKnoFQTCcBs1TXzxiyMoZp1nSBm0EAp9A3NlOL1NgnWuilJcL6oaEAKEyG2MjzLnXFknDaOAPOPq9/Wcs6f+DDxO+JS+xcReRBbcYCxzq4PB04o/A2kbvIV/hY2n2mgpUEDCwZQH0yJc16PM6p6b08zFUm89XO6TaPHz/Aeyw1r6QH16F/ooxgBu9X9KPqV3PZ+njJxt96QBxA+xAocttxZHxS7r6mY8S8l3Qm4F6xWBPLTSGk+f+Np9MHhVMl2QbDlWyY8KtakDE1wkEoIrxnML2sfLLG/0ZeSFl8HTGnfBsAO3EDQGIMnu3ciAnDXGWqImV3KHtD24i9ZhgEJco57tJyyudQtuh6efNwhv25tC3jH4+hdi0S6S9E8B7FMDXfgx2o4W9U1vKn4kKhLJQK//Mef2pc6j442Q6H4YU5QYhCXBN4OZvwAv9QIYT9GKI8s+R/RZ/CeI5xJcOm8fWMVFSeLrNf+bwMvK5BwEgw0SLlDGnQ5PH6cfsmbfmAcgXyXxiCYHgU46wL2r2Zs2HfXN8Zp1S7LO8TYiX/XMY8+9biUn777UGz1AaukfVwZeDtEBYyO+AGdonmWMRiW+VzcDn9ximW5r1sZQxfNRyf+uiQH2BO7RTgQgF4tM5Ind2ubMcmCer7QFCwHbFaQAopbLIxEdvQD+BO/3q74QZPvW6OSWccnHZOP5ohb+H7v/xRwZlBauLAe3AJgMBESMFNG4kQdUfkVTXRZEFqOXPIHpaAuCHF+I5y7YfkAB+xpV+knwUhlGyfcB2F97G61zelfvnvOyPFvK97TqAiJ/m7usI0NIMkOX60xVxiLrZC1vrtPuizhlOmMg5B4ueLYBfQ7MBWkexSuMen2IHhOLN30aAmrDnvDk4AqgesBiPY8vyhVAJfF4wICEfgxEsZApeGKK0DlD/b2Ojy9EhjGcG8pUKq/kDa4iqhV2LA/6PyIYL9N6aN5smCoKWX8fXFNL2NW5Y6YxvP/7QJzOa42yexGfYKNzaUITeMFWESBElTwjgMHh3kZvy/Gt296d1vj39rsmPLEYZB6nfeFb50HU4B9OpCYgUky9HkhHIPCSDO8J0xQ2/AO3PGzER/cwthCkUSPgONpktihKC6wiAwwD2fUcShaaM8700aVHGIVZ8nsMG9CdMnQWoBAgEcBIg+xyxLdk159+NV+znSqIkTBn9Gh+CcMQH8UjpkfItpNhnG9E4AxjjEWmHZ7DFB03qL6x4jjC8yQbyG+uhvsyw26WQ9HCqKAGHzJ7Gc8id4VKk6y/4zh999df2f25q/7dPZeUvqFM3vN0fNwN5ZxJo3XOYJxPBYxBMFiZwTZ0XR6zpIR9XbVfMlJoffBDf9I3Orx8RiyNVP3yd669fGZiHA9kXxGlgYV++aGcc0Q2ntzw5CJHfVBCbQpL59b60BP7A1+WhUM/PtqFUQ4XQWeaC/COIUpySbR6z9Ap19y9Iw59BsSYyr0ySorL0vJORqQShC0JHDoIomuIWxzhBenDYo7j1UUMReLnCicAZdnw6GBIBW+P3XIUP4PFX16R5O270bfQ/ImultoJIlC168DEZhdi+GT98pGgBV370ApkZv7PgK5hk6hNnIMa4gbYhBHxKIW63Gz4Iq6NKYvXHvv/e/tM2qqYGqH/CsiRN7//w5yCOZGEGYyA/3lcGY4aUAIqQyYMYgSnYOZRgMmI54DM/MM5ny2/JznEnAu1fluR8kQhCszsRGF/sPcHoDVJVmqJgxhMZfWx9pfsCW8o8TVkTkEDowBHaux/PUfxyL1IPdPtPY9A/5X/ieUCnmBU/PPijikO2FNkfOduvfBSwyxC9R514EIwBYAum0WQpw3rA7fTv7I2cZvJDtI2wvwd4xA4JbJgDK59F6OoJxaBXGgKD9dqHV2eAD9e6vdHUl+JR42OsISb8Ae2+t/80Eyj2wZu7JyiLF2NZj0B1fJTNIv4zwsE1tU6WFkYKmgcVHPp7PMflE8g1gPHKm1ZOwJkv68ODOAoN6RLgj4a0rYWN3qfMgYP5iLZIhYdNjM719yKN//ijcZ6BNsICecm+eVOq1NoN/9AA7QAcbl9Rc2Jhhffhp23b6qa70cn7LDTIyqrNWxaaMJDXGi/0FPolM3+bee+Ux6uTT/bpOyebfz10vXniCn0k8BsrMCwA7tCAIIkaRqi2sSGHH+c/iIaBpd/IjEiOd6r8iGvnJqWewYKz2yjWO6NSqSzWABWSKOiZe1A4OMF4OsIwsS8hB/OEJJZ8xv2P+Am+XkdmNKvUbQLAAwMCY1M+8uiM4ZDYXUMKmCYbfZLDxhVo0t/IJ4ltT2qbBdBiCmYFXbNApXY3L/nbsJVbtGgCXmB345t9OjycgQvHYd1yyxKYSQA8OpidrxcQRxT2braC/coH8r/lAzeYb+fll8y9ZM3GHxjXevUjkkH/vqhq8V7pML/7a/vEzui1PvtZXpTGIw/FFNGT5/92Jm8Uz/uzlDIUjmzVZ5fR13GfhVurAQaaD75OOAh8LbLMpmgfEec3o+KD7etNIuMb8En+88UAnLbIzjmE+rFtOP0dpYeQ7ZQtqhGk9SAWK+rnNKpaOGaypnqOj+CT+/4Pmta2gOVMqcqTjS7+vZyUwKnC/KhaEPrxAEn6D4hx4HO7skzTML8M/t9YuJO+P0whuZm/vg9ZTYxP3FItgFgYUzih8KFc5iZW2dx/JdOGsi7Vekpfi0Bv2Ye+IU0GR1B8nEUK2x+0PmD+0h/8OSj8Xz6i1mpazxkVY9LX7n71xvHQXrV2TZP8t2RivA6g4GGC1ZMhNYXDKeFZW3+Rjfwbfu5/yA3+r3Me39v/uFBoDqV7c4E6/Bgl1cxm50hS6/6KK/ypN/h//WPxreztPBabO2FJ1QJfQLGJrJbZGGAsJXD1J7rxwbdOk8BCXB8+dO0MJGldOIvdnmWJEfz56F4eaPYf6/+1/aeYJJIBZumnnv3wxfmPUeXvnBiV9v58Hht2B+GbwGs18VSnr8BoqJevoToc+cop03BkPdFg3G6KRLgff08fTkGC2NnST3yVG9W3iMXpW5F1pO9aBUIFpAADRpZGqqLUpD53/41sGMTN2+u3Ye/h5crTCgTvM873uHsaosXY5JUOEfOvbO3Ps+41uTPuc5EykttkboJv/pkLnC6mcDaLDb/SdKYEmMF9s2hYxhArXHyZ0ROrXk/nRUNax9yQGTCpY7KVp6H/FX/50969eW53llpXfhHXRrCvA1QIQMo7wrN10S3nIund6nwfgU8FFQlkLfL7zTDKK7HGk0/+DZw/eGrzEVauJE5qub/HdkRN8wqnE1+y28jaRRKsQnl4xgIZfGDGt5ypz+t1QN5Iw4G232WA9Bkmh5lNuY7gotnv8a0+ar8YowwjD4N4/P/Twl2bQGw/+DV21Ru3MwWjDxP3b47xYBOANRFE4PWhdXUW9S+B5X54Us2CTw+CAPIvbOFPO5k/T6AdHB9NK53jnvimbpOBbAsMbT3tqdSAIzKf2JThuzKNk/83LHzng0kDvtQT1hf1NhEYaDTApJdxrFZfl2CU/Hejp1eMkFoPiFsiZ9ndUPdIg1kN3SEiyCr8rxG/TYWG+HaS1K3/Xg6GCMUaoUL94bQrW+Ar6DozvOsIy3jkKxsAAQM4iXp8HssnUrmKCaivbXV2FqJuPJM3r+Of/qECTv2vLfof7uvQAeuUeETE+euFrYCXowCgTF4WXg7DoXYAGNi18zBI+rts0piO2uyw/LJ8nf+AMOahTZw3JFgB7gIzHHFv3jnjb8+HuMVdg6JJHJ6I4MrDezGOB2+S6N/r0T9HvP9h5sFXmYkFs0icCihdcjEa+ci+bE7qZb/zPu4gCL+uRsJPS/GEC5wUzzL99OsvcDzgj0uz/FitBa578jx+t+vHUi3Kcxqe6wLoNvLjKE79WInlx7o55K/b++9Wofmx1Ez9uwVoft2X/lj3pvqt5H8sDQMHgr5Xh/kbK8UQP1eK+blSzM+VYn6uFPNzpZifK8X8XCnm50oxP1eK+blSzM+VYn6uFPNzpZifK8X8XCnm50oxP1eK+blSzC8/V4r5f9q71542oTiO46+GZHugodxKH24zu7rEzGzZ9mSh5aBkFSpF7Xz1O4fSAuVsNl0pLn4xjQUsl//vnGNB0w9SDFIMUgxSDFIMUgxSDFIMUgxSDFIMUoyBFIMUYyDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMVsrEeKQYo52CeWd/4gEzIhEzIhEzIhEzIhk+WxIsUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDF7DQhxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxVQTUgxSjIEUgxSzbWtDikGKQYp5NFKM+pNiXYpZzfclxbhIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDEGUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUoyBFIMUYyDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMVsrEeKQYo52CeWd/4gEzIhEzIhEzIhEzIhk+WxIsUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDF7DQhxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxVQTUgxSjIEUgxSzbWtDikGKQYp5PFKMsyHFOP1KMR5SDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUoyBFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFGMgxSDFGEgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDEb65FikGIO9onlnT/IhEzIhEzIhEzIhEzIZHmsSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMUgxO01IMUgxSDFIMUgxSDFIMUgxSDFIMUgxSDFIMdWEFIMUYyDFIMVs29qQYpBikGIejxRjbkgxZr9SjHqX+RAVI5LwRZald3JuMg3m83gii3GZX8kdyaHWfikWcf5VSS7HnumV89+KedMclfMni5J6KWZ+1WbORBbLcylqbf6tyiK8EI0F8/Qmm4iHGJx2FrVau5par5ZlYhrk8W1zn7oAyj2cpbE84irq0fB4OGik7VgbMS7PoHxhlaRmW35jQ8UVTn1DeZBdiLy1oaJJrM9891biaxqJV/SJcaOpeNc3qVoapUl+NI/v5VJ5rW36s0W1Tj67UN9fpYm6LeaVXUtBCcsV601nO2/bVxfB8rVZHMr3gqb8zWQ+W9jW81bTLigjhRIVTe/uMs7F+SwoGtVdFsya7TyKp9MaexQGwo9UV5D7SX+K2hpv4otxVB7reXGoJ/6eBhDHbg4gqw5ca9SDgaZVW3ZHI4htaRqHrLgz/5Evn7SqLs81b5a2WcIkTcRGvctFQSlLTWQF1YjRIqeu4jBUu9Fm2Uy7g3BctxGO51itcEa6bPYwut+KLx/u359896/fOG/fmXY6FdHRoBXNJ3H6eb+9wBV+6Oh6gW+Nbc/7U6FbVdXUfvteYG7ZCwZdVVrXCfYxQlodjZDOwqmPkFYxQnqOHCG94Eolnozns3VYPY+Ye20rzvCAI6a2rdhPpFc6Xt+90umoV9od9Up7Ydd75eAJ9Uq7717pPpVeafXdK9s66WmciCDba62jKLIm2lYdemPPPVCte39fMmzV+rW8qs1Fst9iu+pLO4QUU7vYarb2Y8upmxDsUd8h6O6xqAuj+//xCunfslm9JVjfyBi2stHd/9rhCkn9P0uqfrtWN0LkOV5+TEOhfuI3</diagram></mxfile>
|