Eric03 commited on
Commit
dcb58d0
·
verified ·
1 Parent(s): 5e32908

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2008.10858/main_diagram/main_diagram.drawio +1 -0
  2. 2008.10858/main_diagram/main_diagram.pdf +0 -0
  3. 2008.10858/paper_text/intro_method.md +15 -0
  4. 2009.00585/main_diagram/main_diagram.drawio +1 -0
  5. 2009.00585/main_diagram/main_diagram.pdf +0 -0
  6. 2009.00585/paper_text/intro_method.md +238 -0
  7. 2101.10043/main_diagram/main_diagram.pdf +0 -0
  8. 2105.09437/main_diagram/main_diagram.drawio +1 -0
  9. 2105.09437/main_diagram/main_diagram.pdf +0 -0
  10. 2105.09437/paper_text/intro_method.md +128 -0
  11. 2105.09821/main_diagram/main_diagram.drawio +0 -0
  12. 2105.09821/paper_text/intro_method.md +48 -0
  13. 2105.13022/main_diagram/main_diagram.drawio +1 -0
  14. 2105.13022/main_diagram/main_diagram.pdf +0 -0
  15. 2105.13022/paper_text/intro_method.md +39 -0
  16. 2106.13195/main_diagram/main_diagram.drawio +1 -0
  17. 2106.13195/main_diagram/main_diagram.pdf +0 -0
  18. 2106.13195/paper_text/intro_method.md +30 -0
  19. 2110.01823/main_diagram/main_diagram.drawio +1 -0
  20. 2110.01823/main_diagram/main_diagram.pdf +0 -0
  21. 2110.01823/paper_text/intro_method.md +29 -0
  22. 2110.05886/main_diagram/main_diagram.drawio +1 -0
  23. 2110.05886/main_diagram/main_diagram.pdf +0 -0
  24. 2110.05886/paper_text/intro_method.md +189 -0
  25. 2110.14805/main_diagram/main_diagram.drawio +1 -0
  26. 2110.14805/main_diagram/main_diagram.pdf +0 -0
  27. 2110.14805/paper_text/intro_method.md +77 -0
  28. 2111.00160/main_diagram/main_diagram.drawio +1 -0
  29. 2111.00160/paper_text/intro_method.md +82 -0
  30. 2112.00054/main_diagram/main_diagram.drawio +0 -0
  31. 2112.00054/paper_text/intro_method.md +73 -0
  32. 2112.07436/main_diagram/main_diagram.drawio +1 -0
  33. 2112.07436/main_diagram/main_diagram.pdf +0 -0
  34. 2112.07436/paper_text/intro_method.md +192 -0
  35. 2202.01651/main_diagram/main_diagram.drawio +1 -0
  36. 2202.01651/main_diagram/main_diagram.pdf +0 -0
  37. 2202.01651/paper_text/intro_method.md +0 -0
  38. 2202.03057/main_diagram/main_diagram.drawio +0 -0
  39. 2202.03057/main_diagram/main_diagram.pdf +0 -0
  40. 2202.03057/paper_text/intro_method.md +53 -0
  41. 2204.00559/main_diagram/main_diagram.drawio +0 -0
  42. 2204.00559/paper_text/intro_method.md +114 -0
  43. 2205.00274/main_diagram/main_diagram.drawio +1 -0
  44. 2205.00274/main_diagram/main_diagram.pdf +0 -0
  45. 2205.00274/paper_text/intro_method.md +23 -0
  46. 2205.14393/main_diagram/main_diagram.drawio +1 -0
  47. 2205.14393/main_diagram/main_diagram.pdf +0 -0
  48. 2205.14393/paper_text/intro_method.md +64 -0
  49. 2207.00046/main_diagram/main_diagram.drawio +1 -0
  50. 2207.00046/main_diagram/main_diagram.pdf +0 -0
2008.10858/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-06-22T15:30:10.331Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36" etag="hVj61gA20KShSrwmgIL7" version="13.3.0" type="device"><diagram id="fvk-823djI9nrVz6E-Es" name="Page-1">7Z1dd6M2E4B/TS7DQR8IcZlkN+1Fe86ek3bfbm96iI1jt7bJYrJJ9te/woDNx4CxLYSItRdtEAKM5tEgjWZGV+Ru9fZL5D/Pfw+nwfIK29O3K/LpCmMXMfHfpOA9LaB2VvAULaZpkb0veFj8DNJClJe+LKbBJitLi+IwXMaL53LhJFyvg0lcKvOjKHwtV5uFy2mp4Nl/CmoFDxN/WS/932Iaz9NS7tj78l+DxdM8fzKyszMrP6+cFWzm/jR8LRSRz1fkLgrDOP1r9XYXLJO2K7fLfcPZ3Q+LgnXc5QLnJ1/h2ctvf/izDXtYveN/7r9fo+w2P/zlS/bG2a+N3/MmiMKX9TRI7oKuyO3rfBEHD8/+JDn7KmQuyubxapmd3sRR+F9wFy7DaHs14fiRMCbOzBbLZaF86gR8SkV5/T3y3xREcfBWKMre65cgXAVx9C6qZGepxyzPS6/KOBPoWdhJi173gsOOm5bNi0JzrKymn9HytHvEvkHFH1mbwu377RP5Hs9C9vX159PXvycPv5JwfY05BxqYLeOkQULxwsWWZt9fwvzE9WbbFW5EBYSf3/YnxV9Pyf+D/Dabl8e8TLQS81eJRNaPm+e08jK96L5QDboUn34pOf1SClctlKaNlBdXyBR4xG34rcN1UCEvK/KXi6e1OJwI6AJRfpvAthDd/iY7sVpMp8ljQN73PcKWBbDTEWDmQPyeTy+oHYhh17B7iF2H2RC77sDsUsOuYfcQuwyDendodh3DrmH3ILseqHeHHjOwRnZrospgnuwmBXteyWxGxL86wlVuUKPcq3zZ1aJaQcc6Xa7rWE3mdbrV6fu6XOoFrKR39WUwiwfv6NDslhLoI4UZqff03P4gvae7ant6Q0fv1hu7/Azb3rJX+RmSFUxH5TGyXzyGOkYf9aiPmAb6qNnSpps+0nTkoZHS0a2OUR7SlEd9xs01UB7eh1Ie5/bk6v3P6N0XNhNSpEONPupPHxFbA32Ur3/3tzDLJjx4nF3VF2b9gM8mslq3PlS0j1iYdWUszMLti8+3sTqQjbX22WgyurbbMKGyy7BrOsi2PO7t/zkVfrhVx4faCk2cSMK66OnsYMNOMzsusxxWYEc7dCQsS56ODjHoNC+nUNti+qkaCUuBp/NyeKntcnlxxSiR6qdfoOW3igjSBs+9GXFFGnP/Oam3entK/Dmt2TJ8ncz9KLb89TqM/XgRrv/B9VH80n8Mll/CzSKpIIqj9GVvn8NFIq7PP0RLb7JnVAaczL259e63MkrvL4o9WR8ETi2OS1JCDq/JCHkWoXUxMdaXmKC1k4qYgvX0JnFg3feAqb+Z74b7bUP7ZHp5n7SoaLjo/a8Eeguz/PibOL62LRvlBZ/esm6RHr0Xj74E0UK8c9Lh2ntOMC050tYlUmhuB+gSeVkULAUEP4LSzSEBZE/4kgBWEDhD4s2aRgAEY4sWz1YEvAlfokmQ3XIvY+Ap9bX3ZF5DHXv3j5RvHfvRUxDXbr3FZ9dKZxAFWb8NURKIYti1SEGuqE4Urp09FqjtQ3ArUPmMVRVQkEXUACUDKE+oqDoykoECvIOGBQp3sGidPzRB9aFJhyHIjXdHP9/KGXJQz7V48RtDy1KgDmRdRABvvC/j4u6ldBZFYTQovm6yRu2UlI1L+smm2TC5kDjd+09U3d7GngITv+Ckid/i48/6HOaKAWQJGsQwBI3j7UqVLBhgZZalyBi0j/sgCBS6KRqlpgKszLQEAYMNMEcBwzi0aqYWmGbXbgXAGNv1ccBwDYBp9hBWAIwxXh8FDLE1AKbZhXNRlZVkglJCupNxSYNeF1gawxiMNHJBO3p/g95mt72+gQkf/zXAHAGM3ZkWtJ85yY9oh4xQaoCJksQoBhj4e0TKRjJid54f9aZbCGQkK5ORZ6vJZY9aMThMkmhFgKSvZsiS6BSbVtfbkesNjEizra4mqI8d7NrtLUcVr6bRK0mso8srGUf0in5DjFuM7df9cv+BncUQtDKrdUonzY6vvei7BuRG1nslRu0peqsPGDCj9nuolZSMCs11pQ4qtHmZTjcVOjLUz9NbY9JBGgtK5tBPIyldkAp1W1Uo10GFNi9cj1GFnsU1cH99Y7U10kEjV7MjlJJRoblpWwcVqiRMaHhfXMQ8i7bEElM6uL8n6RAKNLgoevHFpcixmNPsi6uBbCQkbzdJAnpRsgS1W0tBeJQ6s5Bm3wSTJUBreBiYoUQpPPnDTJ6AscHDNYDnsOeCSRqgIzwEzo2kFp5mnwbjgzkcOd6BSWXn8U5vc0p6OCuSGn+pP42qSZBwsUUKccR2eeo7uO8UhYbHFWHIiC9vbEpNosExoZYQRpOJgmAiPgkFMZ6YsSJ5TMXJMglCw5XPSM8x4A70cblEoXue5QF5SAo5AKqS6Spo0QiWzQvAVIVOlQsd+jBcoNAJO9DTzxA6S3p3qzlEtdAhPwfpZs3xJKLCjmsVBVSVkI2gkb/qvFRO89KqST4w+JAOE689MyqrAaQ6CwFX7CGqrQegTj5FI1vcliWojnV0eSXjzgkkUqpE/FDsABk2FW8zMx4PzpHR3bma1j42F6BZNXolozU7aE2qgdb8WE6bGtF9nqoak9rRWFAyB3gaSemCtCaraU2mgdZUvJtyz1rzLJSB+xtX94+vWUcopYvWmlwDrTkGl2opzc8t0pxpmmIXEIVSB2reYVeCwSXRU6JpWo480E40ytyTo7zE+LZ369bMrts9IV6U+gV69pC8YMPLUbzkLguD0aLMBRmixfivH0eLOzQteEhajMP6cbR4Q9PS7GNgcnjqk8OT8q5jlt4mgvmEaHCXdJPCczsnqqfwJM7AbugetC5VEcAlOKc6jFi2vZ+u8pKYnFPdzpPbItZ0W5onfFfklOp1SHtwCcJmmO/cAncSPtH1OLmXZ7cYOtRKWNHGqtpL2GvvzqcKW9wWuXsnZjSssJVYF8fjY+5wZuFKYGDurTagU7nXbGlU51QeXZ0yybuAYbvDeHm7ygo/XIfN7RC4lVBFJPumQQ2N16baOX4kjNU77NQJ+JTKaettgtJixhu3rD/t+mAYO269ndFuja2HlpaQ8wZDHRYyvSBgRbbJIQHqzBULsaRbEXm3Omxbuoyp3zYpWhv5oJphgI7pzayEWjYTMuQb8k8kHxOsPfktuyIZ8g35p5K/zVmiIe0S1iYN7Yb2Sr4VhvWkHTfSXhPWx47I/QBOrbrV6fu6i/GkPWQZ0CDuFZndnwZWTpKDirSqYzSJIk2iQSwo+mCbIGkUtHeeRtBFZ+pWxygnacqp3QysQcglMtsLtf1WjWMudaujSKEa5aRIOWkQ2Yi6bNxz3jo4m/DgcXZVXwf3Az6byGrp1jEq4V3Xwd3+1sHBfXl6cSo3e78cSQ9vXVEjnj14En007M5B2NDTvCpFuSUYafJ30gKeQXcOMsFzLYs8KEmqrx8xw24XZALo2rZhcCzENCQGWgSvCOGSfKexk6QyKeTnrgwqiAfuRqjYlRqBu/NUxCYjvkE0ZPT+V9INLMzy42/i+Nq2bJQXfHrLOkp69F48+hJEC/HSSRds70uaRExs/bSKCTvK0YyOTUuBTN7J+3C0D145t3Z5vRWFUSBw5x4DlQyoPM+yC5vw5Pq8CFU9nOZoprwEmv1tMIRU00N6h6vDRhAGrlPg2noceX0ABfgyDU1RhzDdwRNJSRiG1DasLglBg7RRiI5h6/AB9qvWQjbNNksTaTf4VDDZvsUu7v9UCdWkFABIeaQdNXuea8pPunlsB6Wj2JJg9jkfETBDJ4ZDzqCmSmPcPg6XoTPDIUdZIkFj2T4fl6FTwyFwg9ndCBh03jK54RTkhquvm9F8Y9piZJQLmtP7G+yCW9OqwSV8/Nfg0h0XwhkYSwcTg/azph6gafZe7xua9BtkoIGgAaKNh89AiVp21FWbgvJPM2hJ1IhLrHzpu/8UlOIwChMp7G3hicnz93AaJDX+Dw==</diagram></mxfile>
2008.10858/main_diagram/main_diagram.pdf ADDED
Binary file (31.8 kB). View file
 
2008.10858/paper_text/intro_method.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Knowledge graphs (KGs) are large collections of structured knowledge, organized as subject and object entities and relations, in the form of fact triples $<$*sub*, rel, *obj*$>$. The usefulness of knowledge graphs, however, is affected primarily by their incompleteness. The task of *link prediction* or *knowledge graph completion* (KGC) aims to infer missing facts from existing ones, by essentially *scoring* a relation and entities triple for use in predicting its validity, and thereby avoiding the cost and time of extending knowledge graphs manually. To accomplish this, several models have been proposed, including linear and non-linear models. Bilinear models have additionally been used in multi-modal learning due to their expressive nature, where the fusion of features from different modalities plays a key role towards the performance of a model, with concatenation or element-wise summation being commonly used fusion techniques. The underlying assumption is that the distributions of features across modalities may vary significantly, and the representation capacity of the fused features may be insufficient, therefore limiting the final prediction performance [@yu2017multi]. In this work, we apply this assumption to knowledge graphs by considering that the *entities* and *relations* come from different multi-modal distributions and good fusion between them can potentially construct a KG.
4
+
5
+ A major drawback of using bilinear modeling methods is the quadratic growth of parameters, which results in high computational and memory costs and risks overfitting. In multi-modal learning, *factorization* techniques have therefore been researched to address these challenges [@kim2016hadamard; @fukui2016multimodal; @yu2017multi; @ben2017mutan; @li2017factorized; @liu2018efficient], and *constraints* based bilinear maps have become a more prevalent standard in link prediction [@yang2014embedding; @trouillon2016complex; @kazemi2018simple]. Applying constraints can be seen as hard regularization since it allows for incorporating background knowledge [@kazemi2018simple], but restricts the learning potential of the model due to limited parameter sharing [@balazevic2019tucker]. We focus on a constraint-free approach, using the low-rank factorization of bilinear models, as it offers flexibility and generalizes well, naturally leading to other models under certain conditions. Our work extends the multi-modal factorized bilinear pooling (MFB) model, introduced by @yu2017multi, and applies it to the link prediction task.
6
+
7
+ Our contributions are outlined as follows:
8
+
9
+ - We propose a simple and parameter efficient linear model by extending multi-modal factorized bilinear (MFB) pooling [@yu2017multi] for link prediction.
10
+
11
+ - We prove that our model is *fully expressive* and provide bounds on entity and relation embedding dimensions, along with the factorization rank.
12
+
13
+ - We provide relations to the family of bilinear link prediction models and Tucker decomposition [@tucker1966some] based TuckER model [@balazevic2019tucker], generalizing them as special cases. We also show the relation to 1D convolution based HypER model [@balavzevic2019hypernetwork], bridging the gap from bilinear to convolutional link prediction models.
14
+
15
+ - On real-world datasets the model achieves on par or state-of-the-art performance, where at extreme low-ranks, with limited number of parameters, it outperforms most of the prior arts, including deep learning based models.
2009.00585/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-11-21T01:29:07.330Z" agent="Mozilla/5.0 (X11; Linux x86_64; rv:70.0) Gecko/20100101 Firefox/70.0" version="12.2.7" etag="BNw2c6cvxGDyWLX_kLwa" type="device" pages="1"><diagram id="7O8NLXl4dIR6XBTcBj0B">7V1Lk5s4EP41rsoexgWI5zEzk0fVbra2KofdnLawkW2y2CKAZzz59StAsgHJGNtCwg45ZCwhZNTf11J3S7Qn4Gm9+5T48eoLCmA0MbRgNwHPE8PQNc/Af/Kat7LG8fSyYpmEAWl0qPga/oT0TlK7DQOY1hpmCEVZGNcr52izgfOsVucnCXqtN1ugqP6tsb+ETMXXuR+xtX+HQbYqa11LO9R/huFyRb9Z18iVtU8bky7SlR+g17KqaAM+TMBTglBWflrvnmCUC4/Kpezo45Gr+wdL4CbrcgMBIs3e6NhggIdKiijJVmiJNn704VD7ON8mLzC/X8eFBG03QVHScOn7dh0TuCxcPNz/B0IxueM7zLI30sjfZghXrbJ1RK7CXZj9k/c2tUjpG+k7//y8qxbeaGGTJW+Vm/LiN9pfXjjcVpTofWnmJxl5kvJqUCmlWYL+2+Ob97VAm6wyurz4hCKUFIIDGkZZA7i+FGkuxzrUaJvMSRUgpPWTJSQ42YDFTt8zAqsSRGuInx43SWDkZ+FLvXufcHq5b3eAHX8gyPNZQL76xY+2pFOGFgecc0m8rsIMfo39YjyvWMnrIJLuYJLB3eQYMY8Mjt5gE5UhU4RBlev1oHAeqVpVdM3VrheH2aoUG7SBwyO+5oJzqF+U/oJJiIUDk+v1wblOHyxWHyyF+mANTx8M22vog87og8HRByBAH+wb1AfD9ar6oE090x2+TogkO7n1LxTiHvc8AkaDRyaod1GqJbmrajkwHblT2/H2/1yz3q/uTk1XO/wz6l9TqjrzNQUV96PtxE7nFtmJLeDqbD3VdEcyO83rZmx3WDO2O8AZ22tohMQZ27tBnZBlul9pqlAfdCjMp88zJOoDXZ2xouvM8IfPfe8w/RNr5WDOD9daqamFMTC1MAaoFgpteJ318afTKSMSPLisPu6SNJQVRH0WYRQ1qvwoXG5wcY6FkfPxMRdVOPej9+TCOgyCIn7EE3RdH0XI2mysvjaYWp2kbQkQtimYfIGfrvZtq8LHWgr1wIIOgxS+4tkO8G1B8RinEY+x2XiMw5OmLUCcHfzxJZZnfHScJBDsz2hz7fzxO43x28z4LYsdvymCTvbp8eM7wjg9pl4VKvlpXIbEF+Eup1RHcljt0rFqwgGspukcbugi5jVn6LIBmjLZdPCK1MrGVMcbb7CykS8Mo4MPoZYo9cmXEluGaG7RnejVlZ7wnQEWzIaDQBaxqoNQ2fLs1xvg8VkiineDoaMSQzBiKAJDTuRWHoYXbK0OfPo8BVMT1gYlL0XRU4liB39MrblAH0iBvXCLm6WDnKd49sIR1MUz/BY3FQeJIs9ikIaiO1oMfVkM0jBUupWp3w2GPHtBFoZg8OEFoCy+AMb4Qn/2ApBlEQO18YU7QpFjL8hDUW2E4Y5Q5FgM8lBUenz7ri0GeRgOPsLgyLMQxoiCGEbTXa4ao6XZwGNEQRCKnHdF5KGoNqJwPygCpbo43L3/RvxawvJiaiOl+1tePEmUNscwQo/LizQUxzBCj8uLNBQ7vE49jHCnjOXlFl+mHiKlectLyTQZlLZGFHtbXuShOEYS+lte5KHIO/GOZ1nzx7uJ9ZRnmJktJs7jz4nzPHGeqlU7XPVb2ZYhwjBe/Gl/H8180jTP67wGti6B+xfE6Glrzp6f2deayDuXb0cZGXENGPvHFtELD2khmve4gRvvDtfwp2X+98+PtJt0O6OVOq3DT1WprtSW30mrb5AXjfcUr+OF03xvjH1Hj2srAQG84MUlBPBCMLkYygh8xApDjTOJO/KZn4XAU8Zni3fu45flMxj5LIDPwFA3P9PXtUc+5x9/H/ksgs8K7Q2rPdyoNPujc4GzuPcOe84gY5+kRHuyO172R7szcc5LAPY+Sfy3SoM4z7iVVnpupvWyGvaC3kggyqQBs9va4w/lExwIeV52L4sNpr7AeYblbmhogf+L0DKXW4Jm/iyMwiyEKcPqs3ItNLN81rMtBBZ0A5OZlvAV15gBW1C2BSbbn86J2PKSV4hwT632kC1/WviVokYdV4faTNllXuCduBI/LXSmQXvMd8wN3FcuVA4NLIU0aA8ajzSQSANXIQ3az6+NNJBIA7rIK+FB+wm4kQcyeWAo5AEvVp3vLO2ObDB1PvEgMTbayFkMaAKzExniDAFGtt1+7O7mfG9Nkhpd6XvbQ/a9m2mOTvneTIZhwb43DUUNkaPGgONDpniOOj04ghdxVHeNsziqN1MBi+ZoewzzFzzT0jMRPXU2h30qFjhH6xgDXvDvtViwMZNtf51bGJtZGk84Gw4CY4OSMrE2fxmHExvk/TKOiNCgzUtse2J7KQo38IE+SLHBhB/Ewn8LcWiN3SeLt/v04ichplKuormoUJrBJCxgX6Dk1U+Ch9hPU3lbTBFcFI8lZIMJtChpgrJi3FTZcZfhZkmeOggTzP7yKlbbHHshVrFWT8q5T8FR4Rd9x1a4VTzGHC+b6dtI1GWm5yRPsB2FMz0v4/F529jciSRA2UOcoGA7z7rOFpf4rafRuNJvdaZ1HdVdNlksPbMtfA3gnT8VcsSgiBuQ86qzifP4YeI8/4v//siPslqP8bvmYdbG+dafxflW65lGILoBXJ8xuoPc5okuMGc+T6o7P1ceSqwv+brJBipczpIv5Me/ejqseibcd4stNWpVQNvbedMco4cWbDseVb9DuHXbUoa3M+CY46Ctqyv9aEdnras+AjqdecA7x5jrGW/KvZlovlefSPe2joRgvtMehBoVqy/F4kXzFW6SO7w3fwW4LfMEpelDLnUUv92w4+JqjN/C/opOX36LoyZ2dTifOManBHm/jfhn1x9KEaHgvLxkI4dujkOgGeOUyaELYlsjhwbIIaBwHrogBjdyaIAcUrmWjbns+nNMOKf1aLRNiWNyfaSPOyHcvkuiu4a8vRRcTFAuwP21T3joqy8ogHmL/wE=</diagram></mxfile>
2009.00585/main_diagram/main_diagram.pdf ADDED
Binary file (26.1 kB). View file
 
2009.00585/paper_text/intro_method.md ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ The best-known and most studied probability distributions, which are analitically manageable, are rarely expressive enough for real-world complex datasets, such as images or signals. However, they have properties that make them amenable to work with, for instance, they allow for tractable parameter estimation, they have closed-form likelihood functions, and sampling from them is simple.
4
+
5
+ One way to obtain more expressive models is to assume the existence of latent variables, leverage certain factorization structures, and use well-known distributions for the individual factors of the product that constitutes the model's joint distribution. By using these structures and specific, well-chosen combinations of distributions (namely, conjugate prior-likelihood pairs), these models are able to remain tractable - normally via bespoke estimation/inference/learning algorithms.
6
+
7
+ Another approach to obtaining expressive probabilistic models is to apply transformations to a simple distribution, and use the *change of variables* formula to compute probabilities in the transformed space. This is the basis of *normalizing flows*, an approach proposed by Rezende and Mohamed [2015], and which has since evolved and developed into the basis of multiple state-of-the-art techniques for density modelling and estimation [Kingma and Dhariwal, 2018], [Dinh, Sohl-Dickstein, and Bengio, 2017], [De Cao, Titov, and Aziz, 2019], [Papamakarios, Pavlakou, and Murray, 2017].
8
+
9
+ Given a random variable $z \in \mathbb{R}^D$ , with probability density function $f_Z$ , and a bijective and continuous function $g(\cdot; \boldsymbol{\theta}) : \mathbb{R}^D \to \mathbb{R}^D$ , the probability density function $f_X$ of the random variable $\boldsymbol{x} = g(\boldsymbol{z})$ is given by
10
+
11
+ $$f_X(\boldsymbol{x}) = f_Z(g^{-1}(\boldsymbol{x}; \boldsymbol{\theta})) \Big| \det \left( \frac{d}{d\boldsymbol{x}} g^{-1}(\boldsymbol{x}; \boldsymbol{\theta}) \right) \Big|$$
12
+ (1)
13
+
14
+ $$= f_Z(g^{-1}(\boldsymbol{x};\boldsymbol{\theta})) \Big| \det \left( \frac{d}{d\boldsymbol{z}} g(\boldsymbol{z};\boldsymbol{\theta}) \Big|_{\boldsymbol{z} = g^{-1}(\boldsymbol{x};\boldsymbol{\theta})} \right) \Big|^{-1}, \tag{2}$$
15
+
16
+ where $\det\left(\frac{d}{dx}g^{-1}(x;\theta)\right)$ is the determinant of the Jacobian matrix of $g^{-1}(\cdot;\theta)$ , computed at x. Since $g(\cdot;\theta)$ is a transformation parameterized by a parameter vector $\theta$ , this expression can be optimized w.r.t. $\theta$ , with the goal of making it approximate some arbitrary distribution. For this to be feasible, the following have to be easily computable:
17
+
18
+ • $f_Z$ - the starting probability density function (also called *base density*). It is assumed that it has a closed-form expression. In practice, this is typically one of the basic distributions (Gaussian, Uniform, etc.)
19
+
20
+ - $\det\left(\frac{d}{dx}g^{-1}(x;\theta)\right)$ the determinant of the Jacobian matrix of $g^{-1}$ ; for most transformations, this is not "cheap" to compute.
21
+ - The gradient of $\det\left(\frac{d}{dx}g^{-1}(x;\theta)\right)$ w.r.t. $\theta$ ; this is crucial for gradient-based optimization of $\theta$ to be feasible. For most cases, this is not easily computable.
22
+
23
+ As will become clear, the crux of the *normalizing flows* framework is to find transformations that are expressive enough, and for which the determinants of their Jacobian matrices, as well as the gradients of those determinants are both "cheap" to compute.
24
+
25
+ Consider L transformations $h_\ell$ , for $\ell=0,1,...,L-1$ that fulfill the three requirements listed above. Let each of those transformations be parameterizable by a parameter vector $\boldsymbol{\theta}_\ell$ , for $\ell=0,1,...,L-1$ . The dependence on the parameter vectors will be implicit from here on. Let $\boldsymbol{z}_\ell=h_{\ell-1}\circ h_{\ell-2}\circ ...\circ h_0(\boldsymbol{z_0})$ , where $\boldsymbol{z_0}$ is sampled from $f_Z$ , the base density. Notice that, with this notation, $\boldsymbol{z}_L=\boldsymbol{x}$ . Furthermore, let g be the composition of the g transformations. Applying the change of variables formula to
26
+
27
+ $$z_0 \sim f_Z \tag{3}$$
28
+
29
+ <span id="page-3-0"></span>
30
+ $$x = h_{L-1} \circ h_{L-2} \circ \dots \circ h_0(z_0), \tag{4}$$
31
+
32
+ noting that $g^{-1}=h_0^{-1}\circ h_1^{-1}\circ ...\circ h_{L-1}^{-1}$ and using the chain rule for derivatives, leads to
33
+
34
+ $$f_X(\boldsymbol{x}) = f_Z(g^{-1}(\boldsymbol{x})) \left| \det \left( \frac{d}{d\boldsymbol{x}} g^{-1}(\boldsymbol{x}) \right) \right|$$
35
+ (5)
36
+
37
+ $$= f_Z(g^{-1}(\boldsymbol{x})) \prod_{\ell=0}^{L-1} \left| \det \left( \frac{d}{d\boldsymbol{z}_{\ell+1}} h_{\ell}^{-1}(\boldsymbol{z}_{\ell+1}) \right) \right|$$
38
+ (6)
39
+
40
+ $$= f_Z(g^{-1}(\boldsymbol{x})) \prod_{\ell=0}^{L-1} \left| \det \left( \frac{d}{d\boldsymbol{x}_{\ell}} h_{\ell}(\boldsymbol{x}_{\ell}) \right|_{\boldsymbol{x}_{\ell} = h_{\ell}^{-1}(\boldsymbol{z}_{\ell+1})} \right) \right|^{-1}$$
41
+ (7)
42
+
43
+ Replacing $h_{\ell}^{-1}(z_{\ell+1}) = z_{\ell}$ in (7) leads to
44
+
45
+ <span id="page-3-1"></span>
46
+ $$f_X(\boldsymbol{x}) = f_Z(g^{-1}(\boldsymbol{x})) \prod_{\ell=0}^{L-1} \left| \det \left( \frac{d}{d\boldsymbol{z}_{\ell}} h_{\ell}(\boldsymbol{z}_{\ell}) \right) \right|^{-1};$$
47
+ (8)
48
+
49
+ taking the logarithm,
50
+
51
+ $$\log f_X(\boldsymbol{x}) = \log f_Z(g^{-1}(\boldsymbol{x})) - \sum_{\ell=0}^{L-1} \log \left| \det \left( \frac{d}{d\boldsymbol{z}_{\ell}} h_{\ell}(\boldsymbol{z}_{\ell}) \right) \right|. \tag{9}$$
52
+
53
+ Depending on the task, one might prefer to replace the second term in (9) with a sum of log-absolute-determinants of the Jacobians of the inverse transformations. This choice would imply replacing the minus sign before the sum with a plus sign:
54
+
55
+ $$\log f_X(\boldsymbol{x}) =$$
56
+
57
+ $$= \log f_Z(g^{-1}(\boldsymbol{x})) + \sum_{\ell=0}^{L-1} \log \left| \det \left( \frac{d}{d\boldsymbol{z}_{\ell+1}} h_\ell^{-1}(\boldsymbol{z}_{\ell+1}) \right) \right|. \tag{10}$$
58
+
59
+ We started by assuming that the transformations $h_{\ell}$ fulfill the requirements listed in Section 2.2. For that reason, it is clear that the above expression is a feasible objective for gradient-based optimization. In practice, this is carried out by leveraging modern automatic differentiation and optimization frameworks [Ho et al., 2019; Kingma and Dhariwal, 2018; Dinh, Sohl-Dickstein, and Bengio, 2017]. Sampling from the resulting distribution is simply achieved by sampling from the base distribution and applying the chain of transformations. Because of this, normalizing flows can be used as flexible variational posteriors, in variational inference settings, as well as density estimators.
60
+
61
+ An affine transformation is arguably the simplest choice; it can stretch, sheer, shrink, rotate, and translate the space. It is simply achieved by the multiplication by a matrix A and summation of a bias vector b:
62
+
63
+ $$z \sim p(z) \tag{11}$$
64
+
65
+ $$x = Az + b. (12)$$
66
+
67
+ The determinant of the Jacobian of this transformation is simply the determinant of A. However, in general, computing the determinant of a $D \times D$ matrix has $\mathcal{O}(D^3)$ computational complexity. For that reason, it is common to use matrices with a certain structure that makes their determinants easier to compute. For instance, if A is triangular, its determinant is the product of its diagonal's elements. The downside of using matrices that are constrained to a certain structure is that they correspond to less flexible transformations.
68
+
69
+ It is possible, however, to design affine transformations whose Jacobian determinants are of $\mathcal{O}(D)$ complexity and that are more expressive than simple triangular matrices. Kingma and Dhariwal [2018] propose one such transformation. It constrains the matrix A to be decomposable as $A = PL(U + \operatorname{diag}(s))$ , where $\operatorname{diag}(s)$ is a diagonal matrix whose diagonal's elements are the components of vector s. The following additional constrains are in place:
70
+
71
+ - P is a permutation matrix
72
+ - L is a lower triangular matrix, with ones in the diagonal
73
+ - U is an upper triangular matrix, with zeros in the diagonal
74
+
75
+ Given these constraints, the determinant of matrix A is simply the product of the elements of s.
76
+
77
+ ![](_page_4_Figure_11.jpeg)
78
+
79
+ Figure 1: (a) Density of a Gaussian distribution with $\mu = [0,0]$ and $\Sigma = I$ (b) Density of the distribution that results from applying some affine transformation to the Gaussian distribution in (a)
80
+
81
+ Intuitively, introducing non-linearities endows normalizing flows with more flexibility to represent complex distributions. This can be done in a similar fashion to the activation functions used in neural networks. One example of that is the parameterized rectified linear unit (PReLU) transformation. It is defined in the following manner, for a *D*-dimensional input:
82
+
83
+ $$f(z) = [f_1(z_1), f_2(z_2), ..., f_D(z_D)],$$
84
+ (13)
85
+
86
+ where
87
+
88
+ $$f_i(z_i) = \begin{cases} z_i, & \text{if } z_i \ge 0, \\ \alpha z_i, & \text{otherwise.} \end{cases}$$
89
+ (14)
90
+
91
+ In order for the transformation to be invertible, it is necessary that $\alpha > 0$ . Let us define a function j(.) as
92
+
93
+ $$j(z_i) = \begin{cases} 1, & \text{if } z_i \ge 0, \\ \alpha, & \text{otherwise;} \end{cases}$$
94
+ (15)
95
+
96
+ it is trivial to see that the Jacobian of the transformation is a diagonal matrix, whose diagonal elements are $j(z_i)$ :
97
+
98
+ $$J(f(z)) = \begin{bmatrix} j(z_1) & & & & \\ & j(z_2) & & & \\ & & \ddots & & \\ & & & j(z_D) \end{bmatrix}.$$
99
+ (16)
100
+
101
+ With that in hand, it is easy to arrive at the log-absolute-determinant of this transformation's Jacobian, which is given by $\sum_{i=1}^{D} \log |j(z_i)|$
102
+
103
+ ![](_page_5_Figure_5.jpeg)
104
+
105
+ Figure 2: (a) Samples from of a Gaussian distribution with $\mu = [0,0]$ and $\Sigma = I$ . The samples are colored according to the quadrant they belong to. (b) Samples from the distribution in a) transformed by a PReLU transformation.
106
+
107
+ ![](_page_5_Figure_7.jpeg)
108
+
109
+ Figure 3: Samples from a Gaussian with $\mu=[0,0]$ and $\Sigma=I$ , transformed by PReLU transformations with different $\alpha$ parameters. (a) $\alpha=0.1$ (b) $\alpha=0.5$ (c) $\alpha=5$
110
+
111
+ Dinh, Sohl-Dickstein, and Bengio [2017] propose a batch-normalization transformation, similar to the well-known batch-normalization layer normally used in neural networks. This transform simply applies a rescaling, given the batch mean $\tilde{\mu}$ and variance $\tilde{\sigma}^2$ :
112
+
113
+ $$f(z) = \frac{z - \tilde{\mu}}{\sqrt{\tilde{\sigma}^2 + \epsilon}},\tag{17}$$
114
+
115
+ where $\epsilon \ll 1$ is a term used to ensure that there never is a division by zero. This transformation's Jacobian is trivial:
116
+
117
+ $$\prod_{i=1}^{D} \frac{1}{\sqrt{\tilde{\sigma}_i^2 + \epsilon}}.$$
118
+ (18)
119
+
120
+ As mentioned previously, one of the active research challenges within the normalizing flows framework is the search and design of transformations that are sufficiently expressive and whose Jacobians are not computationally heavy. One brilliant example of such transformations, proposed by Dinh, Sohl-Dickstein, and Bengio [2017], is called affine coupling layer.
121
+
122
+ This transformation is characterized by two arbitrary functions s(.) and t(.), as well as a mask that splits an input z of dimension D into two parts, $z_1$ and $z_2$ . In practice, s(.) and t(.) are neural networks, whose parameters are to be optimized so as to make the transformation approximate the desired output distribution. The outputs of s(.) and t(.) need to have the same dimension as $z_1$ . This should be taken into account when designing the mask and the functions s(.) and t(.). The transformation is defined as:
123
+
124
+ $$\begin{cases} \boldsymbol{x_1} &= \boldsymbol{z_1} \odot \exp\left(s(\boldsymbol{z_2})\right) + t(\boldsymbol{z_2}) \\ \boldsymbol{x_2} &= \boldsymbol{z_2}. \end{cases}$$
125
+ (19)
126
+
127
+ To see why this transformation is suitable to being used within the framework of normalizing flows, let us derive its Jacobian.
128
+
129
+ - ∂x₂/∂z₂ = I, because x₂ = z₂.
130
+ ∂x₂/∂z₁/∂z₁ is a matrix of zeros, because x₂ does not depend on z₁.
131
+ ∂x₁/∂z₁/∂z₁ is a diagonal matrix, whose diagonal is simply given by exp (s(z₂)), since those values are constant w.r.t $z_1$ and they are multiplying each element of $z_1$ .
132
+ - $\frac{\partial x_1}{\partial z_2}$ is not needed, as will become clear ahead.
133
+
134
+ Writing the above in matrix form:
135
+
136
+ $$J_{f(z)} = \begin{bmatrix} \frac{\partial x_1}{\partial z_1} & \frac{\partial x_1}{\partial z_2} \\ \frac{\partial x_2}{\partial z_1} & \frac{\partial x_2}{\partial z_2} \end{bmatrix}$$
137
+
138
+ $$\begin{bmatrix} \operatorname{diag}\left(\exp\left(s(z_2)\right)\right) & \frac{\partial x_1}{\partial z_2} \end{bmatrix}$$
139
+ (20)
140
+
141
+ $$= \begin{bmatrix} \operatorname{diag}\left(\exp\left(s(z_2)\right)\right) & \frac{\partial x_1}{\partial z_2} \\ \mathbf{0} & I \end{bmatrix}$$
142
+ (21)
143
+
144
+ shows that the Jacobian matrix is (upper) triangular. Its determinant - the only thing we need, in fact is therefore easy to compute: it is simply the product of the diagonal elements. Moreover, part of the diagonal is simply composed of ones. The determinant, and the log-absolute-determinant become
145
+
146
+ $$\det (J_{f(z)}) = \prod_{i} \exp (s(\boldsymbol{z}_{\boldsymbol{2}}^{(i)}))$$
147
+ (22)
148
+
149
+ $$\det (J_{f(z)}) = \prod_{i} \exp (s(\boldsymbol{z}_{2}^{(i)}))$$
150
+
151
+ $$\log |\det (J_{f(z)})| = \sum_{i} s(\boldsymbol{z}_{2}^{(i)}),$$
152
+ (22)
153
+
154
+ where $z_2^{(i)}$ is the *i*-th element of $z_2$ . Since a single affine coupling layer does not transform all of the elements in z, in practice several layers are composed, and each layer's mask is changed so as to make all dimensions affect each other. This can be done, for instance, with a checkerboard pattern, which alternates for each layer. In the case of image inputs, the masks can operate at the channel level.
155
+
156
+ Another ingenious architecture for normalizing flows has been proposed by Papamakarios, Pavlakou, and Murray [2017]. It is called masked autoregressive flow (MAF). Let z be a sample from some base distribution, with dimension D. MAF transforms z into an observation x, of the same dimension, in the following manner:
157
+
158
+ $$x_i = z_i \exp(\alpha_i) + \mu_i \tag{24}$$
159
+
160
+ $$(\mu_i, \alpha_i) = g(\boldsymbol{x}_{1:i-1}). \tag{25}$$
161
+
162
+ In the above expression g is some arbitrary function. The inverse transform of MAF is trivial, because, like the affine coupling layer, MAF uses g to parameterize a shift, $\mu$ , and a log-scale, $\alpha$ , which translates to the fact that the function g itself does not need to be inverted:
163
+
164
+ $$z_i = (x_i - \mu_i) \exp(-\alpha_i). \tag{26}$$
165
+
166
+ Moreover, the autoregressive structure of the transformation constrains the Jacobian to be triangular, which renders the determinant effortless to compute:
167
+
168
+ $$\det(J_{f(z)}) = \prod_{i=1}^{D} \exp(\alpha_i), \tag{27}$$
169
+
170
+ $$\log \left| \det \left( J_{f(\boldsymbol{z})} \right) \right| = \sum_{i=1}^{D} \alpha_{i}. \tag{28}$$
171
+
172
+ As stated above, the function g used to obtain $\mu_i$ and $\alpha_i$ can be arbitrary. However, in the original paper, the function proposed a masked autoencoder for distribution estimation (MADE), as described by Germain et al. [2015].
173
+
174
+ Much like the partitioning in the affine coupling layer, the assumption of autoregressiveness (and the ordering of the elements of $\boldsymbol{x}$ for which that assumption is held) carries an inductive bias with it. Again, like with the affine coupling layer, this effect is minimized in practice by stacking layers with different element orderings.
175
+
176
+ Generally speaking, normalizing flows can be used in one of two scenarios: (direct) density estimation, where the goal is to optimize the parameters so as to make the model approximate the distribution of some observed set of data; in a variational inference scenario, as way of having a flexible variational posterior. The second scenario is out of the scope of this work.
177
+
178
+ The task of density estimation with normalizing flows reduces to finding the optimal parameters of a parametric model. In general, there are two ways to go about estimating the parameters of a parametric model, given data: MLE and MAP. In the case of normalizing flows, MLE is the usual approach². To fit a normalizing flow via MLE, a gradient based optimizer is used to minimize $\hat{\mathcal{L}}(\theta) = -\mathbb{E}[\log p(\boldsymbol{x}|\theta)]$ . However, this expectation is generally not accessible, since we have only finite samples of $\boldsymbol{x}$ . Because of that, the parameters are estimated by optimizing an approximation of that expectation: $-\frac{1}{N}\sum_{i=1}^{N}\log p(\boldsymbol{x}_i|\theta)$ .
179
+
180
+ To perform optimization on this objective, stochastic gradient descent (SGD) - and its variants - is the most commonly used algorithm. In general terms, SGD is an approximation of gradient descent, which rather than using the actual gradient, at time step t, to update the variables under optimization, works by computing several estimates of that gradient and using those estimates instead. This is done
181
+
182
+ <span id="page-7-0"></span><sup>&</sup>lt;sup>2</sup>In theory it is possible to place a prior on the normalizing flow's parameters and do MAP estimation. To accomplish this, similar strategies to those used in Bayesian Neural Networks would have to be used.
183
+
184
+ by partitioning the data in mini-batches, and computing the loss function and respective gradients over those mini-batches. This way, one pass through data - an *epoch* - results in several parameter updates.
185
+
186
+ The ability of leveraging domain knowledge to endow a probabilistic model with structure is often useful. The goal of this work is to devise a model that combines the flexibility of normalizing flows with the ability to exploit class-membership structure. This is achieved by learning a mixture of normalizing flows, via optimization of a variational objective, for which the variational posterior over the class-indexing latent variables is parameterized by a neural network. Intuitively, this neural network should learn to place similar instances of data in the same class, allowing each component of the mixture to be fitted to a cluster of data.
187
+
188
+ Let us define a mixture model, where each of the K components is a density parameterized by a normalizing flow. For simplicity, consider that all of the K normalizing flows have the same architecture[3](#page-8-1) , i.e., they are all composed of the same stack of transformations, but they each have their own parameters.
189
+
190
+ Additionally, let q(z|x; γ) be a neural network with a K-class softmax output, with parameters γ. This network will receive as input an instance from the data, and produce the probability of that instance belonging to each of the K classes.
191
+
192
+ Recall the evidence lower bound (the dependence of q on x is made explicit):
193
+
194
+ $$ELBO = \mathbb{E}_q[\log p(\boldsymbol{x}, z)] - \mathbb{E}_q[\log q(z|\boldsymbol{x})].$$
195
+
196
+ Let us rearrange it:
197
+
198
+ $$ELBO = \mathbb{E}_q[\log p(\boldsymbol{x}|z)] + \mathbb{E}_q[\log p(z)] - \mathbb{E}_q[\log q(z|\boldsymbol{x})]$$
199
+ (29)
200
+
201
+ <span id="page-8-2"></span>
202
+ $$= \mathbb{E}_q[\log p(\boldsymbol{x}|z) + \log p(z) - \log q(z|\boldsymbol{x})]$$
203
+ (30)
204
+
205
+ Since q(z|x) is given by the forward-pass of a neural network, and is therefore straightforward to obtain, the expectation in [\(30\)](#page-8-2) is given by computing the expression inside the expectation for each possible value of z, and summing the obtained values, weighed by the probabilities given by the variational posterior:
206
+
207
+ $$ELBO = \sum_{z=1}^{K} q(z|\boldsymbol{x}) \left( \log p(\boldsymbol{x}|z) + \log p(z) - \log q(z|\boldsymbol{x}) \right).$$
208
+ (31)
209
+
210
+ Thus, the whole ELBO is easy to compute, provided that each of the terms inside the expectation is itself easy to compute. Let us consider each of those terms:
211
+
212
+ - log p(x|z) is the log-likelihood of x under the normalizing flow indexed by z. It was shown in the previous section how to compute this.
213
+ - log p(z) is the log-prior of the component weights. For simplicity, let us assume this is set by the modeller. When nothing is known about the component weights, the best assumption is that they are uniform.
214
+ - − log q(z|x) is the negative logarithm of the output of the encoder.
215
+
216
+ Let us call this model *variational mixture of normalizing flows* (VMoNF). For an overview of the model, consider Figures [4](#page-9-0) and [5](#page-9-1)
217
+
218
+ <span id="page-8-1"></span><sup>3</sup>This is not a requirement, and in cases where we have classes with different levels of complexity, we can have components with different architectures. However, the training procedure does not guarantee that the most flexible normalizing flow is "allocated" to the most complex cluster. This is an interesting direction for future research.
219
+
220
+ <span id="page-9-0"></span>![](_page_9_Picture_0.jpeg)
221
+
222
+ Figure 4: Plate diagram of a mixture of K normalizing flows. θ<sup>k</sup> is the parameter vector of component k.
223
+
224
+ <span id="page-9-1"></span>![](_page_9_Picture_2.jpeg)
225
+
226
+ Figure 5: Overview of the training procedure of the VMoNF.
227
+
228
+ In a similar fashion to the variational auto-encoder, proposed by Kingma and Welling [\[2014\]](#page-14-1), a VMoNF is fitted by jointly optimizing the parameters of the variational posterior q(z|x; γ) and the parameters of the generative process p(x|z; θ). After training, the variational posterior naturally induces a clustering on the data, and can be directly used to assign new data points to the discovered clusters. Moreover, each of the fitted components can be used to generate samples from the cluster it "specialized" in.
229
+
230
+ To implement and test the proposed model, Python was the chosen language. More specifically, this work heavily relies on the PyTorch [Paszke et al., [2017\]](#page-14-14) package and framework for automatic differentiation. Moreover, the parameter optimization is done via stochastic optimization, namely using the Adam optimizer, proposed by Kingma and Ba [\[2015\]](#page-14-15).
231
+
232
+ Figure [5](#page-9-1) gives an overview of the training procedure:
233
+
234
+ - 1. The *log-probabilities* given by each component of the mixture are computed.
235
+ - 2. The values of the variational posterior probabilities for each component are computed.
236
+ - 3. With the results of the previous steps, all three terms of the ELBO are computable.
237
+ - 4. The ELBO and its gradients w.r.t the model parameters are computed and the parameters are updated.
238
+ - 5. Steps 1 to 4 are repeated until some stopping criterion is met.
2101.10043/main_diagram/main_diagram.pdf ADDED
Binary file (8.51 kB). View file
 
2105.09437/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-11-12T21:36:33.154Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15" etag="Wk3jW8AFrXUbsfdJAEXc" version="13.9.7" type="google"><diagram id="nMhcQkM5K66BNZaovuNt" name="Page-1">7Vxbc5s4FP41nn0ygyTE5dF20nSmu213OrNtHgkoNhtsXMCN3V+/EggQQrYxxjbZiZOM4QAC6XzfuUiHjNBsuX2I3fXir8gn4Qjq/naE7kYQQh0g+sUku1wCbAvnknkc+FxWCb4FvwkX6ly6CXyS1E5MoyhMg3Vd6EWrFfHSmsyN4+i1ftpzFNbvunbnpCH45rlhU/o98NNFLrWhVck/kmC+KO4MTCc/snSLk3kTycL1o9dclHUO3Y/QLI6iNN9abmckZKNXjEs+Ah/2HC0fLCartM0Fn+5TEz9uvO8ff/7++mkBd8nfuzFv5ZcbbniHR9AMaXtTP/hFN+dssxA9R/ROtCPpjo+O+XMTFQfGSaa7CT0BOettdbBzK7qqlc9RkOwYoCJvs2Q9563SfucNFzfTiyNPsfwEjXNLsaLTvY/DjxMfA9ZuBSmM1mzT2zzRr+nrIkjJt7XrMdkrJSGVLdJlSPcA63y0WfnE//OpFLjeyzxm0i+bNAxWhMt9N375QpsJUsZUXdNxXQgzKTvzOQzW/7A9vv2Rt5D3+w4weZLG0QuZRWEUZ0+NzOzDh0iQ5x8mD8JQkM8c9lP2X0R5AVkSp2QriDjqH0i0JGnMMMKPjjGw82u4ERojnVuX14rSpcVZCHQuhS43I/Oy9YppdIOT7QTiGXuJ1xZdlpIf9NtdMhCsnhL2pdEz+V9rOkiIo8Oc1lHlhsF8xTBIlUKoxqZMGQE1mhN+YBn4Prt8GhP6wO5T1hQDxjoKVmk2lng6wnesrU0aFdBhF0SpmwYRa8TRe9K/pdfVbza1D7BC+8allG+erXxDqfwHOnSrOT38maSvUfzSXedxbje41o4YmdKzZdR/Iam3KOxDndVT516fMMMyj10/IJUlWEXMEPWhbGBLyrYso6FtQ1doGziXUrelULc03mTlT1i0wkgVukkSePUhJtsg/cEHlW0/Ctt3W3Fnl9lgzHe/kjignWAkzU75d7Nc8yALgNJQF5GN1VSZ7xL72VOZdMeeTT7MDmktiTaxRw4MDB8H4tdisKZuBdWpeFrIYhJSAvyqR24qbfI7fGXGSIAOlKADdQkReYf4ZWKoJbVUuo2iJQPhekupG89J2mgpg1fZ8e6Is//fiKNAi3flw7Gd7Ok0XOxWD5jt7fjeXqTm6jgUJ4NhQLUAFHCwZqOe0Klq7MIAdd4BehJAj5pS3BbIA7G5HHs0ktFoylx9zG6YRpLbL+O5I3imCHN3wmk8Pt3rI8p+F/cxakk33chb7JUsBV8PsaVIC5PN8j6kWVnC4qm1AHWSSwX0H80d96SKPURpTQdpN2NyWwE7+1IxGlDNhLztMXbkcMa49RgjxRgLsyzH0x7I0p4si2mkvQb7xTOqG77dbjqFlIo8oik3Wefzi8/BliVG2aNxvwF7ylSxPjiV4fMd9ekOFhPbN2putPKcpZs/4EbL0EADlimEB0DTgbU3RCh25ACh4eYte2LiD2e5a7Otu9aH4a73uL+xCaxuDhvKyZYBr5sigR5ypFsiG1mGhGxwPrKxbU0Msw9kDwSwCNGREsJL26yjDtlAszpCmLa9x1pfCcKwRVx4DQh3ATAQwMs5sBe6tSnFS1jjwsoeN8doUOjGjq4ZevXBErjlJKgtsnEjdNRtDSLhTvC6QG8RnPc9XbDPZnafLmhDGKYykTJ82oLtHzXe0wn7OY8ILXkwkEmEcoEPOppg5B2JBxBrFNEVdmX0tqXFGEFdE0igGyfe6NI0ge80GRRNhuUtxhgcpAmooxcYHefixghJ95FuREP9Go2uzBLVLMTFo6YLobsvut6SJXhgLJHDetQxiqLu4ggPwE15oCpB6dtbVIg7mIj2zqejKy23BPywUuSx4RyEqUFTDCGBxoY0T9OeDrL7kemgd6HDqSs4Y2Q05rCusIajmkfto+RntvNC8jD5PKoKHoPi2EqYAw9GzcrIY0VAQy81VFQYHS9KOaFuDEhpcFHEIfASqYh5uarB/Qsol0MRfEdRryii8cTNcaRKFC+NI/COo35xhBWLg9fG0W2XB8+dS+4teOxh3QS0jR4HNgeN5DVro2u+1Jh1Bo7dKgTsLSNS1WVfD81KMIIjUKwWBB0AazyA0DyBCa3mBIry7TNQDt8oyhsFS7Ajys1G9aV927UVVXX6oF8BuyOrKEiIP3rLb4E9/nH2k72x+Aj0VCRlyvm7Kp6+8ttc8KbFJA1LfnJQJFv6WfY5z9IPy4Cbvb3c0cBfo6VLVy6psrc9xuBtlKoi6Q09oHg/86plj7CH9xNuWRyGbYH9QNP1w/y/VvTXuuoRDsp2GMXzVDPGjqZ3nAY3APWWQFhmrSPfRJpjiiUGPZkWulv9q4H89Oo/NqD7/wA=</diagram></mxfile>
2105.09437/main_diagram/main_diagram.pdf ADDED
Binary file (29.6 kB). View file
 
2105.09437/paper_text/intro_method.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Please follow the steps outlined below when submitting your manuscript to the IEEE Computer Society Press. This style guide now has several important modifications (for example, you are no longer warned against the use of sticky tape to attach your artwork to the paper), so all authors should read this new version.
4
+
5
+ All manuscripts must be in English.
6
+
7
+ Please refer to the author guidelines on the ICCV 2021 web page for a discussion of the policy on dual submissions.
8
+
9
+ Papers, excluding the references section, must be no longer than eight pages in length. The references section will not be included in the page count, and there is no limit on the length of the references section. For example, a paper of eight pages with two pages of references would have a total length of 10 pages. **There will be no extra page charges for ICCV 2021.**
10
+
11
+ Overlength papers will simply not be reviewed. This includes papers where the margins and formatting are deemed to have been significantly altered from those laid down by this style guide. Note that this LaTeX guide already sets figure captions and references in a smaller font. The reason such papers will not be reviewed is that there is no provision for supervised revisions of manuscripts. The reviewing process cannot determine the suitability of the paper for presentation in eight pages if it is reviewed in eleven.
12
+
13
+ The LaTeX style defines a printed ruler which should be present in the version submitted for review. The ruler is provided in order that reviewers may comment on particular lines in the paper without circumlocution. If you are preparing a document using a non-LaTeX document preparation system, please arrange for an equivalent ruler to appear on the final output pages. The presence or absence of the ruler should not change the appearance of any other content on the page. The camera ready copy should not contain a ruler. (LaTeX users may uncomment the `\iccvfinalcopy` command in the document preamble.) Reviewers: note that the ruler measurements do not align well with lines in the paper --- this turns out to be very difficult to do well when the paper contains many figures and equations, and, when done, looks ugly. Just use fractional references (e.g. this line is $095.5$), although in most cases one would expect that the approximate location will be adequate.
14
+
15
+ Please number all of your sections and displayed equations. It is important for readers to be able to refer to any particular equation. Just because you didn't refer to it in the text doesn't mean some future reader might not need to refer to it. It is cumbersome to have to use circumlocutions like "the equation second from the top of page 3 column 1". (Note that the ruler will not be present in the final copy, so is not an alternative to equation numbers). All authors will benefit from reading Mermin's description of how to write mathematics: <http://www.pamitc.org/documents/mermin.pdf>.
16
+
17
+ Many authors misunderstand the concept of anonymizing for blind review. Blind review does not mean that one must remove citations to one's own work---in fact it is often impossible to review a paper unless the previous citations are known and available.
18
+
19
+ Blind review means that you do not use the words "my" or "our" when citing previous work. That is all. (But see below for tech reports.)
20
+
21
+ Saying "this builds on the work of Lucy Smith \[1\]" does not say that you are Lucy Smith; it says that you are building on her work. If you are Smith and Jones, do not say "as we show in \[7\]", say "as Smith and Jones show in \[7\]" and at the end of the paper, include reference 7 as you would any other cited work.
22
+
23
+ An example of a bad paper just asking to be rejected:
24
+
25
+ > ::: center
26
+ > An analysis of the frobnicatable foo filter.
27
+ > :::
28
+ >
29
+ > In this paper we present a performance analysis of our previous paper \[1\], and show it to be inferior to all previously known methods. Why the previous paper was accepted without this analysis is beyond me.
30
+ >
31
+ > \[1\] Removed for blind review
32
+
33
+ An example of an acceptable paper:
34
+
35
+ > ::: center
36
+ > An analysis of the frobnicatable foo filter.
37
+ > :::
38
+ >
39
+ > In this paper we present a performance analysis of the paper of Smith , and show it to be inferior to all previously known methods. Why the previous paper was accepted without this analysis is beyond me.
40
+ >
41
+ > \[1\] Smith, L and Jones, C. "The frobnicatable foo filter, a fundamental contribution to human knowledge". Nature 381(12), 1-213.
42
+
43
+ If you are making a submission to another conference at the same time, which covers similar or overlapping material, you may need to refer to that submission in order to explain the differences, just as you would if you had previously published related work. In such cases, include the anonymized parallel submission [@Authors14] as additional material and cite it as
44
+
45
+ > \[1\] Authors. "The frobnicatable foo filter", F&G 2014 Submission ID 324, Supplied as additional material `fg324.pdf`.
46
+
47
+ Finally, you may feel you need to tell the reader that more details can be found elsewhere, and refer them to a technical report. For conference submissions, the paper must stand on its own, and not *require* the reviewer to go to a tech report for further details. Thus, you may say in the body of the paper "further details may be found in [@Authors14b]". Then submit the tech report as additional material. Again, you may not assume the reviewers will read this material.
48
+
49
+ Sometimes your paper is about a problem which you tested using a tool which is widely known to be restricted to a single institution. For example, let's say it's 1969, you have solved a key problem on the Apollo lander, and you believe that the ICCV70 audience would like to hear about your solution. The work is a development of your celebrated 1968 paper entitled "Zero-g frobnication: How being the only people in the world with access to the Apollo lander source code makes us a wow at parties", by Zeus .
50
+
51
+ You can handle this paper like any other. Don't write "We show how to improve our previous work \[Anonymous, 1968\]. This time we tested the algorithm on a lunar lander \[name of lander removed for blind review\]". That would be silly, and would immediately identify the authors. Instead write the following:
52
+
53
+ > We describe a system for zero-g frobnication. This system is new because it handles the following cases: A, B. Previous systems \[Zeus et al. 1968\] didn't handle case B properly. Ours handles it by including a foo term in the bar integral.
54
+ >
55
+ > \...
56
+ >
57
+ > The proposed system was integrated with the Apollo lunar lander, and went all the way to the moon, don't you know. It displayed the following behaviours which show how well we solved cases A and B: \...
58
+
59
+ As you can see, the above text follows standard scientific convention, reads better than the first version, and does not explicitly name you as the authors. A reviewer might think it likely that the new paper was written by Zeus , but cannot make any decision based on that guess. He or she would have to be sure that no other authors could have been contracted to solve problem B.
60
+
61
+ FAQ\
62
+ **Q:** Are acknowledgements OK?\
63
+ **A:** No. Leave them for the final copy.\
64
+ **Q:** How do I cite my results reported in open challenges? **A:** To conform with the double blind review policy, you can report results of other challenge participants together with your results in your paper. For your results, however, you should not identify yourself and should not mention your participation in the challenge. Instead present your results referring to the method proposed in your paper and draw conclusions based on the experimental comparison to other results.\
65
+
66
+ <figure id="fig:onecol" data-latex-placement="t">
67
+ <div class="center">
68
+
69
+ </div>
70
+ <figcaption>Example of caption. It is set in Roman so that mathematics (always set in Roman: <span class="math inline"><em>B</em>sin <em>A</em> = <em>A</em>sin <em>B</em></span>) may be included without an ugly clash.</figcaption>
71
+ </figure>
72
+
73
+ Compare the following:\
74
+
75
+ --------------------- -------------------
76
+ `$conf_a$` $conf_a$
77
+ `$\mathit{conf}_a$` $\mathit{conf}_a$
78
+ --------------------- -------------------
79
+
80
+ \
81
+ See The TeXbook, p165.
82
+
83
+ The space after , meaning "for example", should not be a sentence-ending space. So is correct, *e.g.* is not. The provided `\eg` macro takes care of this.
84
+
85
+ When citing a multi-author paper, you may save space by using "et alia", shortened to "" (not "*et. al.*" as "*et*" is a complete word.) However, use it only when there are three or more authors. Thus, the following is correct: " Frobnication has been trendy lately. It was introduced by Alpher [@Alpher02], and subsequently developed by Alpher and Fotheringham-Smythe [@Alpher03], and Alpher  [@Alpher04]."
86
+
87
+ This is incorrect: "\... subsequently developed by Alpher  [@Alpher03] \..." because reference [@Alpher03] has just two authors. If you use the `\etal` macro provided, then you need not worry about double periods when used at the end of a sentence as in Alpher .
88
+
89
+ For this citation style, keep multiple citations in numerical (not chronological) order, so prefer [@Alpher03; @Alpher02; @Authors14] to [@Alpher02; @Alpher03; @Authors14].
90
+
91
+ <figure id="fig:short">
92
+ <div class="center">
93
+
94
+ </div>
95
+ <figcaption>Example of a short caption, which should be centered.</figcaption>
96
+ </figure>
97
+
98
+ All text must be in a two-column format. The total allowable width of the text area is $6\frac78$ inches (17.5 cm) wide by $8\frac78$ inches (22.54 cm) high. Columns are to be $3\frac14$ inches (8.25 cm) wide, with a $\frac{5}{16}$ inch (0.8 cm) space between them. The main title (on the first page) should begin 1.0 inch (2.54 cm) from the top edge of the page. The second and following pages should begin 1.0 inch (2.54 cm) from the top edge. On all pages, the bottom margin should be 1-1/8 inches (2.86 cm) from the bottom edge of the page for $8.5 \times 11$-inch paper; for A4 paper, approximately 1-5/8 inches (4.13 cm) from the bottom edge of the page.
99
+
100
+ All printed material, including text, illustrations, and charts, must be kept within a print area 6-7/8 inches (17.5 cm) wide by 8-7/8 inches (22.54 cm) high.
101
+
102
+ Page numbers should be included for review submissions but not for the final paper. Review submissions papers should have page numbers in the footer with numbers centered and .75 inches (1.905 cm) from the bottom of the page and start on the first page with the number 1.
103
+
104
+ Page numbers will be added by the publisher to all camera-ready papers prior to including them in the proceedings and before submitting the papers to IEEE Xplore. As such, your camera-ready submission should not include any page numbers. Page numbers should automatically be removed by uncommenting (if it's not already) the line
105
+
106
+ % \iccvfinalcopy
107
+
108
+ near the beginning of the .tex file.
109
+
110
+ Wherever Times is specified, Times Roman may also be used. If neither is available on your word processor, please use the font closest in appearance to Times to which you have access.
111
+
112
+ MAIN TITLE. Center the title 1-3/8 inches (3.49 cm) from the top edge of the first page. The title should be in Times 14-point, boldface type. Capitalize the first letter of nouns, pronouns, verbs, adjectives, and adverbs; do not capitalize articles, coordinate conjunctions, or prepositions (unless the title begins with such a word). Leave two blank lines after the title.
113
+
114
+ AUTHOR NAME(s) and AFFILIATION(s) are to be centered beneath the title and printed in Times 12-point, non-boldface type. This information is to be followed by two blank lines.
115
+
116
+ The ABSTRACT and MAIN TEXT are to be in a two-column format.
117
+
118
+ MAIN TEXT. Type main text in 10-point Times, single-spaced. Do NOT use double-spacing. All paragraphs should be indented 1 pica (approx. 1/6 inch or 0.422 cm). Make sure your text is fully justified---that is, flush left and flush right. Please do not place any additional blank lines between paragraphs.
119
+
120
+ Figure and table captions should be 9-point Roman type as in Figures [1](#fig:onecol){reference-type="ref" reference="fig:onecol"} and [2](#fig:short){reference-type="ref" reference="fig:short"}. Short captions should be centered.
121
+
122
+ Callouts should be 9-point Helvetica, non-boldface type. Initially capitalize only the first word of section titles and first-, second-, and third-order headings.
123
+
124
+ FIRST-ORDER HEADINGS. (For example, **1. Introduction**) should be Times 12-point boldface, initially capitalized, flush left, with one blank line before, and one blank line after.
125
+
126
+ SECOND-ORDER HEADINGS. (For example, **1.1. Database elements**) should be Times 11-point boldface, initially capitalized, flush left, with one blank line before, and one after. If you require a third-order heading (we discourage it), use 10-point Times, boldface, initially capitalized, flush left, preceded by one blank line, followed by a period and your text on the same line.
127
+
128
+ Please use footnotes[^1] sparingly. Indeed, try to avoid footnotes altogether and include necessary peripheral observations in the text (within parentheses, if you prefer, as in this sentence). If you wish to use a footnote, place it at the bottom of the column on the page on which it is referenced. Use Times 8-point type, single-spaced.
2105.09821/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2105.09821/paper_text/intro_method.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In each generation $g$, DE uses an evolutionary search based on difference vectors to generate new candidate solutions. DE is a population-based EA which uses three basic iterative steps (mutation, crossover and selection). At the beginning of the search on a $D$-dimensional problem, we initialize a population of $N$ individuals $x_{i,g}=(x^{1}_{i,g},x^{2}_{i,g},...,x^{D}_{i,g})$ randomly within the search range of the problem being solved. Each individual $x_{i,g}$ is evaluated by computing its corresponding objective function value. Then the mutation operation generates a new offspring for each individual. The canonical DE uses a mutation strategy called *rand/1*, which selects three random parents $x_{r_1}, x_{r_2}, x_{r_3}$ to generate a new mutant vector $v_{i,g}$ for each $x_{i,g}$ in the population as shown in Eq. [\[Eq.mutation\]](#Eq.mutation){reference-type="ref" reference="Eq.mutation"} where $F$ is a scaling factor parameter and takes a value within the range (0,1\]. $$\begin{equation}
4
+ v_{i,g}= x_{r_1,g} + F\cdot(x_{r_2,g} - x_{r_3,g}).
5
+ \label{Eq.mutation}
6
+ \end{equation}$$
7
+
8
+ The crossover operation then combines each individual $x_{i,g}$ and its corresponding mutant vector $v_{i,g}$ to generate the final offspring/child $u_{i,g}$. The canonical DE uses a simple binomial crossover to select values from $v_{i,g}$ with a probability $p$ (called crossover rate) and $x_{i,g}$ otherwise. For the members $x_{i,g+1}$ of the next generations, DE then uses the better of $x_{i,g}$ and $u_{i,g}$. More details on DE can be found in appendix [\[More-detail-DE\]](#More-detail-DE){reference-type="ref" reference="More-detail-DE"}.
9
+
10
+ Successive Halving (SH) [@jamieson-aistats16a] is a simple yet effective multi-fidelity optimization method that exploits the fact that, for many problems, low-cost approximations of the expensive blackbox functions exist, which can be used to rule out poor parts of the search space at little computational cost. Higher-cost approximations are only used for a small fraction of the configurations to be evaluated. Specifically, an *iteration* of SH starts by sampling $N$ configurations uniformly at random, evaluating them at the lowest-cost approximation (the so-called lowest *budget*), and forwarding a fraction of the top $1/\eta$ of them to the next budget (function evaluations at which are expected to be roughly $\eta$ more expensive). This process is repeated until the highest budget, used by the expensive original blackbox function, is reached. Once the runs on the highest budget are complete, the current SH iteration ends, and the next iteration starts with the lowest budget. We call each such fixed sequence of evaluations from lowest to highest budget a *SH bracket*. While SH is often very effective, it is not guaranteed to converge to the optimal configuration even with infinite resources, because it can drop poorly-performing configurations at low budgets that actually might be the best with the highest budget.
11
+
12
+ Hyperband (HB) [@li-iclr17a] solves this problem by hedging its bets across different instantiations of SH with successively larger lowest budgets, thereby being provably at most a constant times slower than random search. In particular, this procedure also allows to find configurations that are strong for higher budgets but would have been eliminated for lower budgets. Algorithm [\[alg:HB\]](#alg:HB){reference-type="ref" reference="alg:HB"} in Appendix [\[More-detail-HB\]](#More-detail-HB){reference-type="ref" reference="More-detail-HB"} shows the pseudocode for HB with the SH subroutine. One iteration of HB (also called *HB bracket*) can be viewed as a sequence of SH brackets with different starting budgets and different numbers of configurations for each SH bracket. The precise budgets and number of configurations per budget are determined by HB given its 3 parameters: *minimum budget*, *maximum budget*, and $\eta$.
13
+
14
+ The main advantages of HB are its simplicity, theoretical guarantees, and strong anytime performance compared to optimization methods operating on the full budget. However, HB can perform worse than BO and DE for longer runs since it only selects configurations based on random sampling and does not learn from previously sampled configurations.
15
+
16
+ We design DEHB to satisfy all the desiderata described in the introduction (Section [1](#sec:introduction){reference-type="ref" reference="sec:introduction"}). DEHB inherits several advantages from HB to satisfy some of these desiderata, including its strong anytime performance, scalability and flexibility. From the DE component, it inherits robustness, simplicity, and computational efficiency. We explain DEHB in detail in the remainder of this section; full pseudocode can be found in Algorithm [\[alg:dehb\]](#alg:dehb){reference-type="ref" reference="alg:dehb"} in Appendix [\[More-detail-DEHB\]](#More-detail-DEHB){reference-type="ref" reference="More-detail-DEHB"}.
17
+
18
+ ![Internals of a DEHB iteration showing information flow across fidelities (top-down), and how each subpopulation is updated in each DEHB iteration (left-right).](Figures/DEHB-flow-new.pdf){#fig:DEHB-bracket width="0.9\\columnwidth"}
19
+
20
+ A key design principle of DEHB is to share information across the runs it executes at various budgets. DEHB maintains a *subpopulation* for each of the budget levels, where the *population size* for each subpopulation is assigned as the maximum number of function evaluations HB allocates for the corresponding budget.
21
+
22
+ We borrow nomenclature from HB and call the HB iterations that DEHB uses *DEHB iterations*. Figure [1](#fig:DEHB-bracket){reference-type="ref" reference="fig:DEHB-bracket"} illustrates one such iteration, where *minimum budget*, *maximum budget*, and $\eta$ are $1$, $27$, and $3$, respectively. The topmost sphere for *SH Bracket 1*, is the first step, where $27$ configurations are sampled uniformly at random and evaluated at the lowest budget $1$. These evaluated configurations now form the DE subpopulation associated with budget $1$. The dotted arrow pointing downwards indicates that the top-$9$ configurations ($27/\eta$) are *promoted* to be evaluated on the next higher budget $3$ to create the DE subpopulation associated with budget $3$, and so on until the highest budget. This progressive increase of the budget by $\eta$ and decrease of the number of configurations evaluated by $\eta$ is simply the vanilla SH. Indeed, each SH bracket for this first DEHB iteration is basically executing vanilla SH, starting from different minimum budgets, just like in HB.
23
+
24
+ One difference from vanilla SH is that random sampling of configurations occurs only once: in the first step of the first SH bracket of the first DEHB iteration. Every subsequent SH bracket begins by reusing the subpopulation updated in the previous SH bracket, and carrying out a DE evolution (detailed in Section [4.2](#dehb-sh-de){reference-type="ref" reference="dehb-sh-de"}). For example, for SH bracket 2 in Figure [1](#fig:DEHB-bracket){reference-type="ref" reference="fig:DEHB-bracket"}, the subpopulation of $9$ configurations for budget $3$ (topmost sphere) is propagated from SH bracket 1 and undergoes evolution. The top $3$ configurations ($9/\eta$) then affect the population for the next higher budget $9$ of SH bracket 2. Specifically, these will used as the so-called parent pool for that higher budget, using the modified DE evolution to be discussed in Section [4.2](#dehb-sh-de){reference-type="ref" reference="dehb-sh-de"}. The end of *SH Bracket 4* marks the end of this DEHB iteration. We dub DEHB's first iteration its *initialization iteration*. At the end of this iteration, all DE subpopulations associated with the higher budgets are seeded with configurations that performed well in the lower budgets. In subsequent SH brackets, no random sampling occurs anymore, and the search runs separate DE evolutions at different budget levels, where information flows from the subpopulations at lower budgets to those at higher budgets through the modified DE mutation (Fig. [3](#fig:mutation-types){reference-type="ref" reference="fig:mutation-types"}).
25
+
26
+ ![[]{#fig:SH-using-DE label="fig:SH-using-DE"} Modified SH routine under DEHB](Figures/SH_with_DE.pdf){#fig:SH-using-DE width="0.8\\columnwidth"}
27
+
28
+ We now discuss the deviations from vanilla SH by elaborating on the design of a SH bracket inside DEHB, highlighted with a box in Figure [1](#fig:DEHB-bracket){reference-type="ref" reference="fig:DEHB-bracket"} (SH Bracket 1). In DEHB, the top-performing configurations from a lower budget are not simply promoted and evaluated on a higher budget (except for the *Initialization* SH bracket). Rather, in DEHB, the top-performing configurations are collected in a *Parent Pool* (Figure [2](#fig:SH-using-DE){reference-type="ref" reference="fig:SH-using-DE"}). This pool is responsible for transfer of information from a lower budget to the next higher budget, but not by directly suggesting best configurations from the lower budget for re-evaluation at a higher budget. Instead, the parent pool represents a good performing *region* w.r.t. the lower budget, from which *parents* can be sampled for mutation. Figure [3](#fig:mutation-types){reference-type="ref" reference="fig:mutation-types"}b demonstrates how a parent pool contributes in a DE evolution in DEHB. Unlike in vanilla DE (Figure [3](#fig:mutation-types){reference-type="ref" reference="fig:mutation-types"}a), in DEHB, the mutants involved in DE evolution are extracted from the *parent pool* instead of the population itself. This allows the evolution to incorporate and combine information from the current budget, and also from the decoupled search happening on the lower budget. The *selection* step as shown in Figure [3](#fig:mutation-types){reference-type="ref" reference="fig:mutation-types"} is responsible for updating the current subpopulation if the new suggested configuration is better. If not, the existing configuration is retained in the subpopulation. This guards against cases where performance across budget levels is not correlated and good configurations from lower budgets do not improve higher budget scores. However, search on the higher budget can still progress, as the first step of every SH bracket performs vanilla DE evolution (there is no parent pool to receive information from). Thereby, search at the required budget level progresses even if lower budgets are not informative.
29
+
30
+ Additionally, we also construct a *global population pool* consisting of configurations from all the subpopulations. This pool does not undergo any evolution and serves as the parent pool in the edge case where the parent pool is smaller than the minimum number of individuals required for the mutation step. For the example in Figure [2](#fig:SH-using-DE){reference-type="ref" reference="fig:SH-using-DE"}, under the *rand1* mutation strategy (which requires three parents), we see that for the highest budget, only one configuration ($3/\eta$) is included from the previous budget. In such a scenario, the additional two required parents are sampled from the global population pool.
31
+
32
+ ![[]{#fig:mutation-types label="fig:mutation-types"} Modified DE evolution under DEHB](Figures/mutation-types.pdf){#fig:mutation-types width="0.85\\columnwidth"}
33
+
34
+ <figure id="fig:cifar10-speed">
35
+ <p>[]</p>
36
+ <p><span><img src="misc/cifar10_speed.png" style="width:4cm" alt="image" /></span></p>
37
+ <figcaption><span id="fig:cifar10-speed" data-label="fig:cifar10-speed"></span> Runtime comparison for DEHB and BOHB based on a single run on the Cifar-10 benchmark from NAS-Bench-201. The <span class="math inline"><em>x</em></span>-axis shows the actual cumulative wall-clock time spent by the algorithm (optimization time) in between the function evaluations.</figcaption>
38
+ </figure>
39
+
40
+ As mentioned previously, DEHB carries out separate DE searches at each budget level. Moreover, the DE operations involved in evolving a configuration are constant in operation and time. Therefore, DEHB's runtime overhead does not grow over time, even as the number of performed function evaluations increases; this is in stark contrast to model-based methods, whose time complexity is often cubic in the number of performed function evaluations. Indeed, Figure [4](#fig:cifar10-speed){reference-type="ref" reference="fig:cifar10-speed"} demonstrates that, for a tabular benchmark with negligible cost for function evaluations, DEHB is almost 2 orders of magnitude faster than BOHB to perform 13336 function evaluations. GP-based Bayesian optimization tools would require approximations to even fit a single model with this number of function evaluations.
41
+
42
+ We also briefly describe a parallel version of DEHB (see Appendix [\[app:parallel\]](#app:parallel){reference-type="ref" reference="app:parallel"} for details of its design). Since DEHB can be viewed as a sequence of predetermined SH brackets, the SH brackets can be asynchronously distributed over free workers. A central *DEHB Orchestrator* keeps a single copy of all DE subpopulations, allowing for asynchronous, *immediate* DE evolution updates. Figure [5](#fig:parallel-DEHB){reference-type="ref" reference="fig:parallel-DEHB"} illustrates that this parallel version achieves linear speedups for similar final performance.
43
+
44
+ <figure id="fig:parallel-DEHB">
45
+ <p>[]</p>
46
+ <p><span><embed src="Experiments-plots/plots-parallel/parallel_letter.pdf" style="width:4cm" /></span></p>
47
+ <figcaption><span id="fig:parallel-DEHB" data-label="fig:parallel-DEHB"></span> Results for the OpenML Letter surrogate benchmark where <span class="math inline"><em>n</em></span> represents number of workers that were used for each DEHB run. Each trace is averaged over <span class="math inline">10</span> runs.</figcaption>
48
+ </figure>
2105.13022/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-01-29T03:47:55.144Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="mpGMnNdeo5sKY8CkJmvs" version="14.2.4" type="device"><diagram id="txuyIxdVINwuQH8d30RI" name="Page-1">7V1Rc6M4Ev41rpp9CIUkBOgxiZPbqr3JbdVs3dXdyxSxZYcaG7yAJ5n99SdhwEDLjuxINvasdyvjyKQN39ctdUvd0ojcL9/+kUWrl8/plC9G2J2+jch4hDEijIp/ZMuPTUvos03DPIun1UXbhi/xX7xqdKvWdTzleefCIk0XRbzqNk7SJOGTotMWZVn62r1sli6637qK5hw0fJlEC9j6n3havFStPvW2H/zK4/lL9dWYEH/zyTJqrt405C/RNH3dNJVPRx5G5D5L02Lzbvl2zxcSvRqYDQSPOz5t7izjSaHzB09uFt8vn7z//mv223L26+S7N49ukFfJ+R4t1tUzV7db/KhByNJ1MuVSjDsid68vccG/rKKJ/PRV0C7aXorlQvyGxNtZmhQVj0hePosXi/t0kWalLMLRlPJAtFdfy7OCv+18ItTgJDSMp0teZD/EJdUfBKzSkh+10njICar7f23RVV320iIKkUpJKgWZN9K3EIo3FYoHIEooAFQS4y/EF989Zx1k/T/Xkv0Ss5u8BO1WXIDw6m37oXg3l/9KwQak+Cak+EaeyHGctphNY2jkMXuijxWDQiNwYdf9mFHlRZZ+4y0zIj5hZNozNwzN7d4fh+N7M+bm0a65hRQ7tZSWtVGVtXmeg6kli6MEWNw0zosoEWAa7cpsYIi1MSSuJfwYHAIW0bMcyPu2OTg4qTtAODGAUxglN9GPCKAKE3KGdj/KgcCS8GPFlJ6iMczOMhg8+vI/S5Y3kMGAhcD6Iok1F6C76Uz8GNH7T9/Ej1+enuQTZ3waT4o4TQbXuQV+D+Ia8ha+p+3aUH0H7WihhFNiJCAdPobEdQJ63gECuXCEKDupgWEXoiFiB529WgNFK8GD1EI2SC30L0IL2RC1kMBRxixwvZF7NuP+ZGKoSwz6sybE0501wfYQZadFdBrxcGYK0RDMQ2kjiqwh6il6yktBNASIEuj7KBV0C7wFRIPLtXoGrN4LdHU0sAboibtRkyrKoNFTXUCZNUBP3IuanM1nYDafBA7WAxQFjq2AElEY86j8pDKu72AXLeJ5It5PBABcAHYn4Ykn0eK2+mAZT6fyz+8ynsd/Rc+lKInyKo2TonwQejeiYylrXaR5xYMlbx8r4Id+FjmpV0Dh1OQ1Ys8GiT1cyALA82R6K1dZxW9JmnCdYIBPwYrruzC94+fXbRlfREX8vSteBUz1Db9LqlsW4NMeCxR6E3m6zia8+sstxEAYczWEFVE25wUQVvLVPP5HKDzxcGDSYZHjaQ/BMHQInGJUjgdhEyOaNwwfDghWUTXptQg17KPqM4f6WqgSi6Osf8F5CALDPqhMG1SLmIbeIZiiI1Ye3PKlQLtcQPxdDJvlzLpiHP5n74LntCjSpc7IXaQrQ7SFIgLyO8Q1E3Ut0kJ6ylE31Bl1F4t4lfP3+Yry1SYpaha/SY4VS0e31CMa5uJPw8D3qus6Pb58GeKDKfjACj8IKRhB9qwIzpNaZQR5hHoajHiBHwZ2GRGvQTKiMQ9jkBGP4DEN3mck8H03jCwz4g3RRrBi3uGPLEpy6Y/LlVX3XkCiWvg+W+RG69+re0EGyEFew0TtCKAdc+v+KUM5rMiQGsPcqLNxgWxw0Z/vbOLZcwXUmEJ37AFmHZyNBWyBBewPj4XDpjUmiyjP48nu2AJdwMxGA/E2PtmG0ofObEBhARRmeWYDU+gCPH3+QzRUJQgfinLqRH1pQnPJf/U+/8aLyUv9i5absM9l7rkPzA3obWijK/RdB/s9zrB6XPIUSujZWv/BvoYtputiEScC6LrAw9WHtPGRd3vZ0yh/aaYU3g1O1bEo6KB3dBX9HtZVuZ9ln82zh+9803VXGrmSf7B8m8syGyd6zT0nTouvURItfog7yr9OoyL6mhdpVo4DHXXX02cTisao44Vs+wo7She6vsPgpEggNDGAWofc0JraaUR0gJcWp4phta8B8bIsLAL0V+3jeDkX976In8XP6K91xuUzzXnCs0jc/+MXHmWTFyf/Pj+SzT0zNV0TCb1bEjApKoumMbcX3AfMYXt0Q524reyOXMf3rKkGHFfGwrI2hjUYV80bWRglmBglaIug7ojR5Li0Z8ZO6rgFhh037wIcN9LPsvZCJ+hlD+j6bZ6GrB1um8A0+tG6rFJWG45doOib6f2n3z4R/Msmt9OPlnLQTZ7zzTzwJiU/X0VJRxd2JeV7qqT8J9Hf8lyC9SRJfE6zfNcXicfafNeOkpizdwo7p+cNdhZ9XQpC5dDe1L2dpocIVQtzWuqxaZb6EReCrs1iW19J/iceQqqIGCbl7X8r1uKm5FsxQvDY0VUQM7FIucByF02+zUt5NenVKnzHF2gpiQ8dgLHH8XTbF35wIawf7rtYlayLiGpNxbe3ZotDVTLkcaoB9OLfMX8VHxcvUhcm66xE6zKV4fF+FvLQjDJg0p8LVSsDa1ZAT6cNGguj1+ZKUNYf/qmD/ONciaCX3CJleXquhDFHQWeh9PJqrEHNja+sdsCnXJPGOiugl1fBGPb6p0FArSgersubvEGWNoUDLG3CzHTpwzD6hj7U+gprs+IWs4PSjC6mewiP7h7son2V4x6okNRHGyGrcF/l2MeOV27LcJsusRqGdh/v1QVW0TZdfzUM5T7esbOJNkEXnOIcwq3W9AoykcWCTILU3vLq6yi4E2/KqdjgTiYGBGP5v8qBPtd8rb9nutYAYQHI0qCqkiJ00ppkgjQc86uqKQpM1hSFA6gpIuiwubPLp5CZpBC5g+DQ8FLqJdAYhJ5Dgtb6doeGppr74KnQvWIF3a7uIqs5dnUKFq6M3ZAIz81zmxc2w+5+sedhV6f44drYDbs0eIbY3Sv2POxqRGTXxi4TRkbxlgZkht39Ys/DrkY99tWxG1LHbb2IIXb3ij0Lu1gjvr82dj3GHNZOGuznmSPsEAzco0PZ7u82itxQyD01v3C24eGtyKKJvNFHHhXrDOaISurKdHJ7cwwfnVSozk6ovmPUrG62lWyfxu+cg7hxHYy6CnGDjahd1w/DDgsUfUEtLp3Nyu1fbagEDIZr9OxPoU7jjE+qCgVhSNLarW7/7vVtkClrSLCit7G2YE4wDGU/vIvlINYMvH7p3DDghrFlfeDJZaMNdnMbBNow1vNP1r1YRBsO54NAG8Ze19GVULC92CDghsHQdZyTQgc5UNbrQS240ZnAfqSP3qOhlHPglSA0BLD19i28QLhBQf8g4IaRGQy9LxDsvlMyDLDhGu41gA18kmGADQPM6+hIgE8yDLhhNEmvAexhDpIwlhxX57blAPQrTdMhMMpnKmYCBTP20nSIogpeMBMn5VRnOZ358zBEge0oGWInZciD3jwg5IC1hvcxHO4SBAU7fbmBQ1sTwrgrUXfNwUeHybW9/KA4qfczL6Kb5jyzPQPViYpL9TazhLtXgMHvIRizh1szBgx4dAOlAStTIX17FvzzpUKCveB8x+/tLaWfRwesUyHNukmazaO75F44gP6821mgP7IXhlqzX651yo3XA1ko6tfbHa3Zr9ZAJwvynJGnSjf3VAVF9rpY6MZe4+kh4OQWRFQ7Ap90m01i/Gin67CT0IdbBLu6lkLtsQXXYq7RUsA5O0OwFMX5Un9bCqwIFlzp2olFrqCdTNJkFk95MpG7/8wEEthd53EyH7VOmhbvEx6n9cZiQ7GqfTuGWfEJApW1KTd5tEehf1jcddU+e9DPmmXCt/74zAly8WGCbTvtin2Go/k84/P6eIKN3c7iRO6+uu9M+NOmcx5nocfnc25sY18+p3BWenn0RjQROai7H6zrsHbWMGO9yN5iTqfO7sBXNTGzreVuu6TukXuDIResWiukWbd4GPql62K1hieQ/CR9PXIpc2jXyJDruw4lx/IMlq6QPAcAFNycjHJVcY2hLR6Fn8v/3tJR9hSo2SCjqeDxnHoHqHYMRZo9N061rSMJVBpwvLlfyNEe/bq2vuusf65Hr/CO9QTZNmDl9r1XT1+3E2XHdsdAUP9erLMHlyf/XPPKrH8eF3qjxPtcaBJSM04zpt0N+3tHKtxQx+11tRa9ZsUGy5P6zDfxoCKkygVy0VUHVe8kkvQmH9m+tWU14zf1hkeVCM/7OL/i1yyVzs72cnnGzOYcJfLwfw==</diagram></mxfile>
2105.13022/main_diagram/main_diagram.pdf ADDED
Binary file (44.7 kB). View file
 
2105.13022/paper_text/intro_method.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In this section, we will briefly introduce the background of $k$NN-MT, which includes two steps: creating a datastore and making predictions depends on it.
4
+
5
+ The datastore consists of a set of key-value pairs. Formally, given a bilingual sentence pair in the training set $(x, y) \in (\mathcal{X}, \mathcal{Y})$, a pre-trained autoregressive NMT decoder translates the $t$-th target token $y_t$ based on the translation context $(x, y_{<t})$. Denote the hidden representations of translation contexts as $f(x, y_{<t})$, then the datastore is constructed by taking $f(x, y_{<t})$ as keys and $y_t$ as values, $$\begin{equation*}
6
+ (\mathcal{K}, \mathcal{V}) = \bigcup_{(x, y) \in (\mathcal{X}, \mathcal{Y})} \{(f(x, y_{<t}), y_t), \forall y_t \in y \}.
7
+ \label{equ:datastore}
8
+ \end{equation*}$$ Therefore, the datastore can be created through a single forward pass over the training set $(\mathcal{X}, \mathcal{Y})$.
9
+
10
+ While inference, at each decoding step $t$, the $k$NN-MT model aims to predict $\hat{y}_t$ given the already generated tokens $\hat{y}_{<t}$ as well as the context representation $f(x, \hat{y}_{<t})$, which is utilized to query the datastore for $k$ nearest neighbors w.r.t the $l_2$ distance. Denote the retrieved neighbors as $N^t = \{(h_i, v_i), i \in \{1, 2, ..., k\} \}$, their distribution over the vocabulary is computed as: $$\begin{align}
11
+ p_{\textrm{kNN}}(y_t & |x, \hat{y}_{<t}) \propto \label{equ:knn_prob} \\
12
+ &\sum_{(h_i, v_i)} \mathbb{1}_{y_t = v_i} \exp (\frac{-d(h_i, f(x, \hat{y}_{<t}))}{T}), \nonumber
13
+ \end{align}$$ where $T$ is the temperature and $d(\cdot, \cdot)$ indicates the $l_2$ distance. The final probability when predicting $y_t$ is calculated as the interpolation of two distributions with a hyper-parameter $\lambda$: $$\begin{equation}
14
+ \label{equ:prob_ip}
15
+ \begin{split}
16
+ p(y_t|x, \hat{y}_{<t}) &= \lambda \ p_{\textrm{kNN}}(y_t|x, \hat{y}_{<t}) \\
17
+ & + (1-\lambda) \ p_{\textrm{NMT}} (y_t|x, \hat{y}_{<t}), %\nonumber
18
+ \end{split}
19
+ \end{equation}$$ where $p_{\textrm{NMT}}$ indicates the vanilla NMT prediction.
20
+
21
+ The vanilla $k$NN-MT method utilizes a fixed number of translation contexts for every target token, which fails to exclude noises contained in retrieved neighbors when there are not enough relevant items in the datastore. We show an example with $k=32$ in [1](#fig:model){reference-type="ref+Label" reference="fig:model"}. The correct prediction *spreadsheet* has been retrieved as top candidates. However, the model will finally predict *table* instead because it appears more frequently in the datastore than the correct prediction. A naive way to filter the noises is to use a small $k$, but this will also cause over-fitting problems for other cases. In fact, the optimal choice of $k$ varies when utilizing different datastores in vanilla $k$NN-MT, leading to poor robustness and generalizability of the method, which is empirically discussed in [\[sec:different k\]](#sec:different k){reference-type="ref+Label" reference="sec:different k"}.
22
+
23
+ To tackle this problem, we propose a dynamic method that allows each untranslated token to utilize different numbers of neighbors. Specifically, we consider a set of possible $k$s that are smaller than an upper bound $K$, and introduce a light-weight *Meta-$k$ Network* to estimate the importance of utilizing different $k$s. Practically, we consider the powers of $2$ as the choices of $k$ for simplicity, as well as $k = 0$ which indicates ignoring $k$NN and only utilizing the NMT model, i.e., $k \in \mathcal{S}$ where $\mathcal{S}=\{0 \} \cup \{ k_i \in \mathbb{N} \mid \log_2 k_i \in \mathbb{N}, k_i \leq K \}$. Then the Meta-$k$ Network evaluates the probability of different $k$NN results by taking retrieved neighbors as inputs.
24
+
25
+ Concretely, at the $t$-th decoding step, we first retrieve $K$ neighbors $N^{t}$ from the datastore, and for each neighbor $(h_i, v_i)$, we calculate its distance from the current context representation $d_i=d(h_i, f(x, \hat{y}_{<t}))$, as well as the count of distinct values in top $i$ neighbors $c_i$. Denote $d=(d_1,...,d_K)$ as distances and $c=(c_1,...,c_K)$ as counts of values for all retrieved neighbors, we then concatenate them as the input features to the Meta-$k$ Network. The reasons of doing so are two-fold. Intuitively, the distance of each neighbor is the most direct evidence when evaluating their importance. In addition, the value distribution of retrieved results is also crucial for making the decision, i.e., if the values of each retrieved results are distinct, then the $k$NN predictions are less credible and we should depend more on NMT predictions.
26
+
27
+ We construct the Meta-$k$ Network $f_{\textrm{Meta}}(\cdot)$ as two feed-forward Networks with non-linearity between them. Given $[d;c]$ as input, the probability of applying each $k$NN results is computed as: $$\begin{equation}
28
+ p_{\textrm{Meta}}(k) = \textrm{softmax} (f_{\textrm{Meta}}([d;c])).
29
+ % p(k_i | [d;c]) = \frac{\exp(f_{\textrm{meta}}(k_i | [d;c]))}{\sum_{j}^{|\mathcal{S}|} \exp(f_{\textrm{meta}}(k_i | [d;c]))},
30
+ \label{equ:prob_estimate}
31
+ \end{equation}$$
32
+
33
+ Instead of introducing the hyper-parameter $\lambda$ as [\[equ:prob_ip\]](#equ:prob_ip){reference-type="ref+Label" reference="equ:prob_ip"}, we aggregate the NMT model and different $k$NN predictions with the output of the Meta-$k$ Network to obtain the final prediction: $$\begin{equation}
34
+ \label{equ:adaptive_prob}
35
+ p(y_t| x,\hat{y}_{<t}) = \sum_{k_i \in \mathcal{S}} p_{\textrm{Meta}}(k_i) \cdot p_{k_i\textrm{NN}}(y_t|x, \hat{y}_{<t}),
36
+ % p(y_t| & x,\hat{y}_{<t}) = \label{equ:adaptive_prob} \\ & \sum_{k} \ p_{kNN}(y_t|x, \hat{y}_{<t})p(N_{0:k}^t|r^t;\theta ), \notag
37
+ \end{equation}$$ where $p_{k_i\textrm{NN}}$ indicates the $k_i$ Nearest Neighbor prediction results calculated as [\[equ:knn_prob\]](#equ:knn_prob){reference-type="ref+Label" reference="equ:knn_prob"}.
38
+
39
+ We fix the pre-trained NMT model and only optimize the Meta-$k$ Network by minimizing the cross entropy loss following Equation ([\[equ:adaptive_prob\]](#equ:adaptive_prob){reference-type="ref" reference="equ:adaptive_prob"}), which could be very efficient by only utilizing hundreds of training samples.
2106.13195/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-05-04T15:00:01.820Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36" etag="-qUd6tZEIl0ZRxbwM7-d" version="14.6.9" type="google"><diagram id="0UKKD8_hs1gR9J5-D_AK" name="Page-1">7V1rd5s6Fv01XmvmQ1lIQgI+Nsm9nUc6tzO5nXv7qQvbOGbiGBeTxu2vH2SDY5Cwhc3jYJSs1RhCqM3e2trn6HFG5PZ58yHyVvOP4dRfjLA53YzI3QhjZGGW/OBnfqRnHEx2Zx6jYLo7Z76deAh++umF2dmXYOqv03O7U3EYLuJglT85CZdLfxLnznlRFL7mL5uFi2nuxMp79HNvg594mHgLX7jsj2Aaz9OzyDy4/G9+8DhP/2uHpr949vYX706s5940fD34v8gvI3IbhWG8e/W8ufUX/Onln8uvJb/dv7HIX8Yqf/CP+MPs9v713//98O3pZvF+sXnGP97h7O1+9xYv6UceMXPzNU7+Td93/CN7GMlHWPGXk5dx8uPmdR7E/sPKm/BzrwkBknPz+HmRHKHk5Th8WU796f14f8KbPD1G/OxvL/EiWPrp+akXPf2W3CaIOUtMw6T5k3h7ll+53vEDJe8tuSKIEsSDcJmcWYcv/HHfzILF4jZchNH2DZOZM/EnE/62wij4GS5jj78X/sd7NLYHT348macHs+SylIccpJt1HIVP/sFNb2/pHXWT34gIpKB896PY3xycShH54IfPfhz9SC5Jf4uJgZzdH2UtxEqf++sB3Zz03PyAaVYGkJdS/HF/9zcWJC9SIlQghSPhBGaLmD+MlbfMcYJ9e+EE3j61dzt43icXYLzabJ9Q9vvk1SP/OeZ/PsLJsYk2KLtt8jZ3d95dBJZ4rirvZjNcwrsim6ZszCirxMiFN/YXN/vPlN1sGfIPVgcrbebmOEmwhJNIwkns1MDJL58ffqL/3bzehhR//TK/mXye373DjsBJZgk8ST5fnOdC/nmnj+gQrfSUtwgeOaDR7sPc8IcVJN3A+/T8czCdLsq4F+0Id1JaojD2UuLIcBXIUgOUNI8kMy0BSSwDsikcCdY4ntUku8NR2kcQJOB4IO3r12A93x+RDQGr6OpWolFJr7+lA/ARkrauOdIpR2xwHLGowJF1YgO9Z47u7l+//8Tw0ZT6thIxXGYTr3ViuF0SQ24ULG0UzkESEWiOj2kgzwISnHUXQzCUhWUayWNJHrOIpKiuVqvmnWhjBsuYFVUbgDOTdMCaJN2SBF6IZ4mdu7bv7dt31GlgJ3UL1NZu4Ty3gLtzC3Ikr8UtWNfSEWCUpwiWUAQxWXM3G3MLYkegSdItSQg4klhin9Ajt6DKDPBuoThU0C4z5H2Mzi2ciaUDzC0wcWBQI6mCJOkwSyRHUvR9mDKN5GkkhcytiKRMXpvL94n9rjZn3ZozF5w5k6T3NUk6JQmBF+ZZIkm0g2/fwZNOYzupW7BF36fdghKWwtS+9tyCHEnt+85D0hKmXnSMJMNX0qWjq+nSC419z4dD4SYSjhCrMd/napIAI0kxOOieJJZIkh75PlVmgPd9VjEiaJUZcrcgTuDVbkEJyw6zRHIkxTZOEdZInkSSKmRuZZMvGsv3ZY1dd+lQuvRiY4fQpV/LCq7rIQm84ECyPYD2fR34vk4jAqlbcMTcgnYLSm5BYX5fq24Bi1O3NJJKSCrMvWgXSXGcVyOpgiTr0MHLY7ErGYyFsWFKHW29kNtvd0sUuYG/kuz+9XCEguMIFYO8/vh3VWKAt+/UBkcM60oWdVyNeDAEjyNXsgL0ejgCz4RQ0YToDqZ9YgB0HmIHw6xNIzt/TJIH50cthKN8t6ITASmqB9B3iOYAxbLNPzAzsixcK+kFKvYHBG+yXb40pCfWaDvYQFYeVcoM1xFbqusa2bB6O8CKY/iIbVATo/hXCCyhpmHncXWp4dgirtTan24HVzEf6GyaWIt1hahS0zGyOc1Zr2rZBpU0V0KMLJxqBVZbhLWJ/ZnzBp9uqN6tGeZuzQ448ydJX1dlKHKrMnS7n3j6ml+kyVqJrMhtg6yOCY+sfZ7bCiOEbZ42GBxtHDG1rqskwJCyljjZaZUEOSfFbJx2hgNmKDxn6IhpQe0MgZO1HWfownOGjpjw1M4QmMa58Jyhe3Rm/VaMgHIGyFz65klTsG4AZto7Ffbo2TouTaFuKeTAo1CFSaNFS6TZ1CmbkGnCo1Of82JA1v60QBwMjjiumIXQ/gcYa+AZILfCjFRtgCBwCJ4DcivMWNUOCBadEDwH5Pa6OOFQHBCC54CQeVSIQFsgGNuntkCbogXqfndVt0IAD9oCDYZDRQsEgEMVtvPriwUaCp2KhSIB0Alld+6nB4Kx43MLzCl6IAjMEd2z9kDAaAPPAyGzQiUxbYIgkAieCUJmhdFU7YJg8alYCAkEn3Tdix64IALPBbHLV8Jc6SxxIHW5W2BlwWQBKNtNxSTBADbtbgFqYUdAybJ92VzExuRHjN4b2YJhYDhbwn6BXePczmKk3i/8GE6n44DrdCTViJrnaL+XfgyGrkVBhUBXcT5Sj0I+VeL0PeSzMDziaHPdShzVuemyRXOtY/uB9VvwYvthFmJtAWposb0t+mkd21+Os1jNq2ucL18yr2P7q+p04MX2tjjfVcf2mq5SQYVA115P7B9KbF+slAWBOOLkJG2uG4ijOjdd+9VIw6ql1f6UeVmprXaRRmIaB+EmdkkeGNLF2ewAkBZTI7pN19GmBfXuHGkxN6KRrqNNw1NvMTui1bsGpIuzcAEgLeYYdJuuo03DU29JPRIT69z2pVAX9yzrPspCOp5uZTsxAEiLK3J0m66jTYPLnEiqkGuka2nU4Mw3Fle3aPmuI6AGZ76zVLxu1HU3anDum4j5UGxaOqS+FOpinS4ASOt8aBs1rgAALaZDdZOuo0nDE28xdaKRrqNNwxNvMXWixbuGeBqeeIvxtG7SdTRpeOIthtNkQ0b72Y/B43OY3KEIfO8mpvV+RiO8VR+IiAH6/cPvH+GSRZErU+o7U0uJKw4eE1aNKzWwgbl5MljUMrJB6QM+yDYgRtZbnfj6GWGJ46WaEW0wwkZQGSHbjKjABn85fR9F22c1WXjrdTDJ4198Xvz44Imb26/kvL8J4j/TZ8xffzl4fbc5PPhxcPDJj4Lkw/pRDtE/0ofGQU9giX7sb8wPvhwevN16e8TvjfeHxbu/WZcdYdfz3MEnL06uXW7PYBNJ+jHKv6XEs/n3MTol5I4m/hG0aEqP2Ise/fjIhVm5OX/66B+l5wH3qIR72bnIXyRG7bufe7syMqb/w6cwSD7bnv00WwuSFeRDIqV3Hz79wzdWC/dizul77Z6PcK9tC9l/+Esajdix1ra0JbvROKrlNqeWvnCG3HN3lG/Smf2fJBzlbaPc/0d+8m688fZ+vAGt+FPf4kBvRvSugjwcN2k5ZUfSZnRC4Ypijc1JuFwmXUv67kepjS8VcdMgyCKXtYjsknA2W/vNsJOKmT0t6WAlHStKehY7AJF0RvKGJvG8hnvw5Zyn7pIKQkfv27TS0wYX2mqlr1np98I3GKWXLbHVSg9T6TMfclLp95WCoEi9hQ2UN93IzquyfaaZt0nlWzeu+OJomVZ8sIqfCeBgFF8c+NGKD1bxmaLiZzuCAxH84qwkQmrx9o5d6baNC32D27Broa9b6DPdG4rQO7LAswA7f8SrAjbOMcW6/OnZWao1a8NZRacDZbIkykQaG+SU7Fsvto/FIlitOQFPjGR561XyMJKDWbDhHckJOhcbhdjXKXc3bPslNqzdV4WWsieO8tiUiKC0MmpjCDrlG5WPleTTKZXPQwUdl8rn5bNkTivs6Wky6lRzRJrs6dMATbJMnFJDb24mjKOQ7ACgiNjqWhEdWQQ7ZEUsCxLhKqI4j/NAEfPekD8dZZU8IZG7ewne83qUU+JcqyunOp2gKKfCHA8AyrkXru6UU3tJOXF6o5xu+f7rWjk7V051OgFRTlcWmgBUTkm7a1c5XayVU0qc/ihn+UwHrZxdK2cFOkFRTpjROiqMQeDOPaero3U5cfqjnEejdZ2/bCR/WYEmUBRRYei+A0XENK+IpHsvqbIkZUiKWDb2CVcRy9dHaEVsTBHVaQJDEbEpS9Z0r4iEAFNEPnNJK6KMOH1RxETStCK2rYhVaAJFEWHO+rEKq5cJ7VwRVVY+DEkR+zbrB5vl8+y1IjamiH2b9YNNWXIFgCK64BRRZWXAkBSRlWANVxHLJ6TrEZiOR2Cq0AmKcipMZulAOWlxFYjbtXJKir0NWzndEqzBKqekiJtWTjDKqU4nIMopqRQHQTlZcaSme+VUSFcMSTn3xOmPcpYXaddReFNReAWaQFFEhUkqHSiig8EpokK6YlCKSEuwhquIsjyKVsSGFVGdJlAUEeaaGheeIuo1NXLi9EYRsSw/oqPry5Wzjui6Ap2AKCdWmMzSxcxwsyidnQ/pZA9GS2eBOf2RzqOLarSZbMRMVqAJFEkEulgGwZNEvVpGzpz+SKJeLdOBJPZttQyWVAwHIYm4KImdTw6XFNwetiT2bbnMnlJaEtuUxN4tl5GUZgYhiUSQRNaxJEpqGw9aEknvAmeiA+f2JbECTaBI4pmBM25YEq2CJHa+zQSWFAEftiT2LnCWFPfWkti4JPYucJaUBgchiVSQxM5dog6c5czpjyTqwLkDSexd4GzBXFXtQvOIkurggxZEq3eLqi29qLp9QaxAExiCiCwxk4gpawDMhT+LL4NyPfem21Jm24MnP57MR1npr9iLg3CZHhYFJFdq6DQp1HW+tPHbZl7QLWIZ2QqZA6hxu1CLqVAN9eVQY3hQY0uSIGDmX35+jUf41vsa/zU5EoDPQ3CiT7+0Cxc0vtinOxN/MikjXw24MUZzuLEMjQPQsl3Kcz02bgw0lenRlxV03HdoeUD4lSqlGJFFjW0hRJV6jPuakQYdqVaNFAycuf0+hvjJ0otYufYiyoJIIMUXWSEsoGZBMJRr61oFjaKFGzVcYBFT0WWQBjqeaAcBjJ6nBo3CxMgmdWcqZYoq1W7Xsq+srqGsAiWizIWGpEIupDVPIC1SWuYJasCDFArbMjFeZ2xv5NoJ2Wn5CuyVNGRfBEv/XfbeeNCOOFvo9gmZxXi+rLLs35erl/ggol9dTUQvMUDme/5dD4UsZhtptdvdV74SS9beO8t10vJRwgbpdBsuv2s2nRGPINNwcI5BtthDtMwgmCtbESHEsAutzVJ6Vg2Kt17cKudObzLmWUrk7Ix5ee3268yYH0swNEATGBlzzMQAqIVu9T/+Opi+JBGJ7lqrx17EZQaBZc6Yyn5ZlyX7dqZ0dJCJG6nm4fhBMZ8ndipvcPN3M/XW89zBJy9O/nq5PcPrFlTohRybfx9DH0iKDiFEjEIoabsGo+7+y7byN1VN2iHEJLfO36vpvB0rj0jXK295Ya+Y9q1RLbdJPuDuLZVoIifMPVekgtNSFrwoUeCfqUXl7WHFn/oWB3ozoncV2uNxYcxln1CFrpSVDT5XNtimQVA2lH1uA8kuCWeztd8QO8unODTYEz88Bfzut7tHyrOIukuu3iUj6hjZUtTyPhklWkolg3AN9svle341yalvL76fXJV8ouU0+feXzSSBUhPrjOF4ig0LveXkUL7/lOTc0dsgS90UCz/Ovn28n4R/ju/++evNvz5/c+5/faewdA9W1n02w5KR+OQ3UzZm9C3Yk/Q7ApTlwT/BRqGUdbspeSlWslAPMlY+mlLflmHlMpt4NWHFOh07kQLVt6GsBCZnaklDHzwmrCagXD5CAQ2quga5sFnexwod7P3D7x8H2p1exCBkWgKFGkudJIdRyGF7ixKShzP/GE551PbL/wE=</diagram></mxfile>
2106.13195/main_diagram/main_diagram.pdf ADDED
Binary file (43.5 kB). View file
 
2106.13195/paper_text/intro_method.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Following prior work [@finn2016unsupervised; @babaeizadeh2017stochastic; @denton2018stochastic; @rubinstein1997optimization; @oprea2020review; @wu2021greedy], we define the problem of pixel-level video prediction as follows: given the first $c$ frame of a video $\mathbf{x}_{<c} = \mathbf{x}_0, \mathbf{x}_1, \ldots, \mathbf{x}_{c-1}$, our goal is to predict the future frames by sampling from $p(\mathbf{x}_{c:T}|\mathbf{x}_{<c})$. Optionally, the predictive model can be conditioned on additional given information $\mathbf{a}_t$, such as the actions that the agents in the video are planning to take. This is typically called action-conditioned video prediction.
4
+
5
+ Variational video prediction follows the variational auto-encoder [@kingma2013auto] formalism by introducing a set of latent variables $\mathbf{z}$ to capture the inherent stochasticity of the problem. The latent variables can be fixed for the entire video [@babaeizadeh2017stochastic] or vary over time [@denton2018stochastic]. In both cases, we can factorize the likelihood model to $\prod_{t=c}^T p_\theta(\mathbf{x}_t | \mathbf{x}_{<t}, \mathbf{z}_{\leq t})$ which is parametrized in an autoregressive manner over time; i.e. at each timestep $t$ the video frame $\mathbf{x}_t$ and the latent variables $\mathbf{z}_t$ are conditioned on the past latent samples and frames. By multiplying the prior we can factorize the predictive model as $$p(\mathbf{x}_{c:T}|\mathbf{x}_{<c})=\prod\nolimits_{t=c}^T p_\theta(\mathbf{x}_t | \mathbf{x}_{<t}, \mathbf{z}_{\leq t})p(\mathbf{z}_t | \mathbf{x}_{<t}, \mathbf{z}_{<t})$$ where the prior $p(\mathbf{z})=p(\mathbf{z}_t | \mathbf{x}_{<t}, \mathbf{z}_{<t})$ can be either fixed [@kingma2013auto; @babaeizadeh2017stochastic] or learned [@chung2015recurrent; @denton2018stochastic; @castrejon2019improved]. For inference, we need to compute a marginalized distribution over the latent variables $\mathbf{z}$, which is intractable. To overcome this problem, we use variation inference [@jordan1999introduction] by defining an amortized approximate posterior $q(\mathbf{z}|\mathbf{x})=\prod_{t} q(\mathbf{z}_t|\mathbf{z}_{<t}, \mathbf{x}_{\leq t})$ that approximates the posterior distribution $p(\mathbf{z}|\mathbf{x})$. The approximated posterior is commonly modeled with an inference network $q_\phi(\mathbf{z}|\mathbf{x})$ that outputs the parameters of a conditionally Gaussian distribution $\mathcal{N}(\mu_\phi(\mathbf{x}), \sigma_\phi(\mathbf{x}))$. This network can be trained using the reparameterization trick [@kingma2013auto], according to: $$\mathbf{z}= \mu_\phi(\mathbf{x}) + \sigma_\phi(\mathbf{x}) \times \epsilon, \hspace{1cm} \epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$$ Here, $\theta$ and $\phi$ are the parameters of the generative model and inference network, respectively. To learn these parameters, we can optimize the variational lower bound [@kingma2013auto; @rezende2014stochastic]: $$\begin{equation}
6
+ \label{eqn:elbo}
7
+ \mathcal{L}(\mathbf{x}) = -\mathop{\mathrm{\mathbb{E}}}_{q_\phi(\mathbf{z}|\mathbf{x})}\big[\log p_\theta(\mathbf{x}_{t:T}|\mathbf{x}_{<t},\mathbf{z})\big] + \beta D_{KL}\big(q_\phi(\mathbf{z}|\mathbf{x})||p(\mathbf{z})\big)
8
+ \end{equation}$$
9
+
10
+ where $D_{KL}$ is the Kullback-Leibler divergence between the approximated posterior and the prior $p(\mathbf{z})$ which is fixed to $p(\mathbf{z})=\mathcal{N}(\mathbf{0}, \mathbf{I})$. The hyper-parameter $\beta$ represents the trade-off between minimizing frame prediction error and fitting the prior [@higgins2016beta; @higgins2016early; @denton2018stochastic].
11
+
12
+ <figure id="fig:model" data-latex-placement="t">
13
+ <embed src="figs/network2.pdf" />
14
+ <figcaption>The FitVid architecture. In the figure, <strong>(bn)</strong> is batch-normalization <span class="citation" data-cites="ioffe2015batch"></span>, <strong>(swish)</strong> is the activation <span class="citation" data-cites="ramachandran2017searching"></span>, <strong>(s&amp;e)</strong> is Squeeze and Excite <span class="citation" data-cites="hu2018squeeze"></span> and <span class="math inline"><strong>(</strong><strong>N</strong> <strong>×</strong> <strong>N</strong><strong>)</strong></span> is a convolutional layer with kernel size of <span class="math inline"><strong>N</strong> <strong>×</strong> <strong>N</strong></span>. The strides are always one, except when down-sampling which has a stride of two. For up-sampling we use nearest neighbour. The number under each box shows the number of filters while the top numbers indicate the input size. To model the dynamics, we use two layers of LSTMs <span class="citation" data-cites="hochreiter1997long"></span>.</figcaption>
15
+ </figure>
16
+
17
+ # Method
18
+
19
+ Striving for simplicity, we propose the FitVid model for stochastic video prediction, a convolutional non-hierarchical variational model with a fixed prior of $\mathcal{N}(\mathbf{0}, \mathbf{I})$. The architecture of FitVid is visualized in Figure [1](#fig:model){reference-type="ref" reference="fig:model"}.
20
+
21
+ **Encoder and decoder.** Following the recent advances in image generation, we use similar residual encoding and decoding cells as NVAE [@vahdat2020nvae]. Each cell includes convolutional layers with batch-normalization [@ioffe2015batch] and swish [@ramachandran2017searching; @elfwing2018sigmoid; @hendrycks2016gaussian] as the activation function, followed by Squeeze and Excite [@hu2018squeeze]. The encoder is made of four encoding blocks with two cells in each block. There is down-sampling after each encoder block using a strided convolution of size three in the spatial dimensions. The decoder also consists of four decoding blocks with two cells in each block, and a nearest neighbour up-sampling after each block. The number of filters in each encoding block is doubled while the number of filters in each decoding block is halved from the previous one. There is a residual skip connection between the encoder and the decoder after each cell which are fixed to the output from the last context frame. The statistics for batch-normalization is averaged across time.
22
+
23
+ **Dynamics model.** Similar to @denton2018stochastic, the encoded frame $\mathbf{h}_t$ is used to predict $\mathbf{h}_{t+1}$ using two layers of LSTMs [@hochreiter1997long]. Likewise, $q(\mathbf{z}_t|\mathbf{x}_{<t})$ is also modeled using a single layer of LSTMs with $\mathbf{h}_{t+1}$ as the input that outputs the parameters of a conditionally Gaussian distribution $\mathcal{N}(\mu_\phi(\mathbf{x}), \sigma_\phi(\mathbf{x}))$. During the training, $\mathbf{z}$ is sampled from $q(\mathbf{z}_t|\mathbf{x}_{<t})$ while at the inference time $\mathbf{z}$ is sampled from the fixed prior $\mathcal{N}(\mathbf{0}, \mathbf{I})$. The input to the model is always the ground-truth image (which is usually referred to as teacher-forcing [@goodfellow2016deep; @cenzato2019difficulty]). At inference time, the predicted image in the previous time-step is used as input to predict the next frame.
24
+
25
+ **Data augmentation.** We find that FitVid can [substantially]{.underline} overfit on some of the video prediction datasets (read Section [5](#sec:exp){reference-type="ref" reference="sec:exp"} and [6](#sec:analysis){reference-type="ref" reference="sec:analysis"}). To prevent the model from overfitting we use augmentation. To the best of our knowledge, this is the first use of augmentation in video prediction, perhaps because prior state-of-the-art models tend to underfit already and therefore would not benefit from it. Given the rich literature in image augmentation, we augment the videos using RandAugment [@cubuk2020randaugment]. We randomize the augmentation per video but keep the randomization constant for frames of a single video. RandAugment substantially improves the overfitting, however not entirely, as it can be seen in Figure [\[fig:augment\]](#fig:augment){reference-type="ref" reference="fig:augment"}. We improve the augmentation by selecting a random crop of the video before resizing it to the desired resolution at the training time, called RandCrop. The combination of RandCrop and RandAugment successfully prevents the overfitting, leading to models that both fit the training set and generalize well to held-out videos.
26
+
27
+ **What FitVid does *not* need.** Prior works on variational video prediction [@finn2016deep; @babaeizadeh2017stochastic; @lee2018stochastic; @villegas2019high; @wu2021greedy], generally require a range of additional design decisions for effective training. Common design parameters include using curriculum training, commonly by scheduled sampling [@bengio2015scheduled], to mitigate distributional shift between training and generation time [@oh2015action; @finn2016unsupervised; @lee2018stochastic; @cenzato2019difficulty]; heuristically tuning $\beta$ in Eqn [\[eqn:elbo\]](#eqn:elbo){reference-type="ref" reference="eqn:elbo"} to balance the prediction vs fitting the prior [@denton2018stochastic] by annealing it over the course of training [@babaeizadeh2017stochastic; @lee2018stochastic] or learned priors [@denton2018stochastic; @villegas2019high; @castrejon2019improved; @wu2021greedy]. Each of these design choices introduces hyperparameters, tuning burden, and additional work when applying a model to a new task. FitVid does not require any of these details: we simply train optimizing $\mathcal{L}(\mathbf{x})$ from Eqn [\[eqn:elbo\]](#eqn:elbo){reference-type="ref" reference="eqn:elbo"} using Adam [@kingma2014adam].
28
+
29
+ Check the appendix for more architecture details. The source code is available at\
30
+ <https://github.com/google-research/fitvid>.
2110.01823/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-06-01T00:22:13.663Z" agent="5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.5.1 Chrome/89.0.4389.82 Electron/12.0.1 Safari/537.36" etag="knh3HqD6atfoGVqVF0s8" version="14.5.1" type="device"><diagram id="tRWMP5fLpBWVF-rBI5lD" name="Page-1">7V1bc6O4Ev41rso82MUd8xjnNnXOzNbUSfbMnPNGbNlmFyMPxpNkfv1KBmEQbXMx4pIwW7UxEgjo71PTUrdaI/Vm8/rg29v1V7xA7kiRFq8j9XakKIpsSOQPLXmLSiRpGpasfGcRlsnHgkfnN4oKowtXe2eBdqkTA4zdwNmmC+fY89A8SJXZvo9f0qctsZu+69ZeRXeUjgWPc9tFmdO+O4tgHZZO9cTZn5GzWrM7y1JUs7HZyVHBbm0v8EuiSL0bqTc+xkH4a/N6g1wqPSaX8Lr7E7Xxg/nIC4pcMDX3wcO/v0iPL7dPwZfNn9c/Z3+Mo1Z+2e4+euHoYYM3JgHy3Fv6k9zJdl3k4pVvb0bqbIt8Z4MC5PN1344Vs5e1E6DHrT2nLbwQjpCydbBxyZFMfi6dV8RAp8c+DuzAwR45HFsSPcF1tp/jk133BrvYPzyXutTpf6R8F/j4b5SoMQ7/6BXYCxLl4T9SnhUdkwPyA/SaKIpE+YAweSP/jZwS1ZoRqhGv5Wl0/HIkicw4vE4QhF1nR7xcxS0foSM/IvRKIKkNSFZDUtG7BqU+QFlLp1TltpE0BiSrIWl1DUlzQLIakjFMnYFyOkBZEUq1a1BaA5QVodS7BiW72YBlaSw7Z/bIw7CyKpadM3xkZcCy4sCyc5aPrA5YVsSyc6aPPEz4VMWye7ZPgSHJysf7bfG3j2fE7WfWgnRWKpqUlops6FmpsLKkVCxhUikxDxYQfqLfmLaXYndcXg+zQ2Knybyw0XQ5B8k8n6LnJUja8zQoTGU2VRhhlkXMAmg8FQZYiemuhgAbdw0xs1uIQQNoww0iBZyCzvi5x6xivDtI+5qcQKT0eqwkv1bR3xpaeUAe8gmUvxA5ZRM6F8N2n312ztX1PsBj5M1Jtf+J1RNphLeOH8feUNZ4z7tt4vEyp3FsJaAGaa7ZrrOixJoTYhy4SaF35rZ7HVVsnMWCXj7zEXm9SPtSEm6x4wUHAPXZSL+lbZEn3x1p25SWAEgXDyGTrNNEsU7p01C/BoNDnxrpT6vZtsGhFBigk2ac7Y4ymWExd/F+UUS8RbSthz1Uj3gNKV+8GmC4qMKkCw2zyulD2TqnVXdb22Nl/0Eroml2lL6cjiTPnjwxqzqh5q7WRECkpQBttph0L/KTNI/dPe0gn3KazlOnxIT1FmgRKcRyNFoukTEHP9oL03qWpHq4xFvB4NjAaLSrQuO8kmTSC3yi57FAjycy0SaKqt7tanSnjCxjNFNfY9rtn1mtnCTWsXikEJlJ8ZVvZa/cFb/g0+nu8T5siQpfLSttNlhAV1AatRugAWF3SKxUJvHZKyESn7gAJvGHZKsqA1Zus2yFRsPvTXE/Veb80+Ec+GKQ8yduNSju3K5gtt4VoOiZKiQOEZpkIT9X9N7wNVQOXzX7YYbg1UXBq5eYqF266PWaxmUTWSBvEf0kQ0ubjGHmaRhOT8+FNSwg+6xc0YIFeJ+QakJq+hmp+cgNZ6KSjUOijO7wjTLhCJrKOR0U9n1iTezw3p+j6KojINmGrJyGAttfoSDT0AHZ+LUvALvEJO9HBVs3DA4jbVIRbn5yA2hKNOAlQh8vAjwex/cPcL53q5pRT+/ONCQa7BIRdR8VbL53q1rl3q1buU0JBpzNZbXpejb5T5qUNWhUCQBUVjlh1WbSqAXE0mEXxXmoizuOlDQwmpzBpVm/hdphv8WlMj8t40adF6oyMP84FH5LI9Ae8QtE7lUmPkhwrjfM1Fv9/kakyI20yI3sN6DZftCnZSLCQOFjWLUsKg13hAIGqtCOYJnXU7EdgY9PBNajNtoTtMEWorVSx3qCJtIWKtQTpJkmtifInP0JhKSK6gn/WqlvT9++7/S/vt6NJe3h8/QnhpVPHS6dyaTLs9XACK8g2IXXuQBfexXoTIqozmSUsHrne999m/n2/G8U5PekY8wLMMaIu1DNEWcmt1wBymmgAfKVrRoG1D///MP58v3H59+fX9YPyv6zF/zf/3Axf6qmpBDQoahLUd8LGIEOj53Li9fIF6+oTwMsXUiB9M/bvykRYqXethLU9TgEdV0Q3ziVJ9wKBGCyVYZmz+uIDoC7Tm64bAukVyqS/tR1QxBYI+w2eHZDn13I8hHH7vcRvwv1kVPBX3l9ZAga60XXmbbedeqK9x3CxiieWgZhYA5LVOAYDHCBWCLRzmd+oGYY5iRLfHHuZ1gwfUo3lgXnPNi9cD/Dr1DAJ9TWEPpSmTfvfoafuE8pvYQxv0X3M/gGRcJh2nI/1yTy9tzPsMT7lEJLGChtup9hWKBpviY7whn3c10yb8/9DIu8T0mrhKHSpvsZhqVA/qm23M91ybw99zMs8gLDpfne/xV7G0tHZd/fz3QzG5WtHNoir/Ajbpgc/I8cSBOdHd5SqUnx0Rs7enWCH4nfiavI0fEiesCuKYloGG197sQIkY4EkGeCvtMNFA0el3WunWm6nROR44QR9lvitGju4eTTstscmRu2UDUMHaZ2gQFvL6gtf2RiK2xzF8ZIpRqzFYNrp9gimLLM5pM6MnGKZXqJuFLRkSZ8ZrssPcvPI5vp6TRojrHZyBMVmkXuo2qRJkrZz6ZcUo+EHevciVanFA4fBmKYZtWlWBrflJZpqia1o6ncnXR2J6GKp0gYr7h+wPg8sSw9xWlZ1s6Smh4kRms1Eb0r/FU4KkiV+auaaqopU5IE8Vc1lQmXxtLQ9UYoLDqlSpjUDGglcjTSFvzV89Vh0b0UOqxl/fhb+kR/HrzRh/su7Y3jvoXXxZ5kms6Ycn6N3F+I+gczNelGoqehbXjY3xxysiWqXyJa0vpD+rKo0kUB6TFj8kpzx1uB11MX5jjyWtLqyHGZqnaITeFFzR+SdCYrA9/2dkvSKGveQ/EJL9hfpO+evPyZGDWrg8Uy5mWrTWN5Kpp1/K0npLtwdlvXjiTreK6TuPHSxXaQfKCTsQwjOI3dIVBgIEF3ScADejKgpJQnP8e8rsE41k3eBZ+wMnIMZGGzSlpucp5Bqb7z/jQo1YEEGUDVnihV7Tjm65BShWZ5BqX6kfrToFQHEmQAVfqiVKUuKlVRy28HpdqX/jQo1YEEGUBPLhfsmFJVdX7xnaE2qVTBnAZaRh50bvwxOsR+sMYr7Nnu3bF0lt7H4XjOF4y3kbj+Isx7i6Km6FqGtDBjt5g00aexn4w6BMbSRNLNUY53jB7xToEzKQahhIWxQyLpjijivedcB7l+CDiTxCjjb4PzMcKEKud2KB/xkSKpqmu1Tu2DAtEr0BDCtVZqJokp5XAyby+xRmlzQg81Sht2LJQ2l++0A+88duOigyn4X2eBMPl779sbtCv6RUlHYhT6sOTkWMlZKMP5rhXmKMtb022I+qrU4KgzIFyejtsSPRJ7yaV2RXfX610GqpbuT1AOFxBURRSolzsK4M52vSCC2tm+c8CVfNeDvf8cBiaV7Xl9A9m0uG8tsNQHXHsoDOTLJy7fhUblVmCNNRUw1MHup6uCkOnTyri6N3y+CExZVzgwlSyUohZPgFCymw1YlsbS7ByWfVqp1y0src5hqQxYVsNSkTuHZZ/WDXYLS7VzWBZYbDhgCa/m7RyWJXYLG7BMYdk926fEZmADliksu2f79CnnUKewVLtn+/Rp84puYdk920folmzZjdfyHVVtrZbi8zEQmscLkUovnjfy26pv6zUQWKXAJFAi+13hVHdl6K5N0/PPsScpSfcpAKmVWahWG+GLJCpnhA+I6kG/MW0vpbji8nqUVmJ1dI52OYP06clmWeHiQsYKMNtsQTAIA6HETEhDIIzFojDlIx7HUGwOBIIsCUNBlH+7nlboBdID8pAfqnVF2uAFcrNJVa+u9wEeI29Oqv1PJ50+vUm1K0IJcB83IHG1AjkC68i+Cz9viTmX9213aHyekAvsDs1s3+4Qul17n4A1+AkVZmuXRdXkR4B8Q6IhLZdIWYgpaSrc55PNcOSmURYWhdTDNMqVvixGqS9Lk2mU4eftUySDMFhazPELh1L2yY8tDJU2M87CsBQYiDWZcVaEzNvLOAuLvE+eY2GotJlxFoalhLMpOPypg/7GzfRudi9S0NymHAWTzIqaciiSiZNJeWEH9mOAfXtVQNg8q4sI//b6TrDuMdJZs3QrI31wLzRh0i/huMEFVEkRIZt318bdhVNrOaqEU/AqFOTcoJTBVHgffh/fSsjy+/hCE9eidvKFe1DuplHDKoUs9uW2E2Yh7q0tUgD3Pi8JsgKBfGUfVm+zCWxWnJrj/hrOcUtLokCP6xk4QrArsDdJNEheNm7zae3s6HvS/+13aBHKifxvRFdG71C0qJ0IbI0XPVYr56nF+R9lYDdSC1oPLY5a0HirFmo9F6YW7weRZnZIkAO1EmvgbtEck4OdE7GuRkrivf/+2TfmDBMNsExk2WyUfzW4/GD+zUH+8Ww7w64Xh8iE8ujwqs6ccpElf2C8OunCq5qWoTdckjlVNlWyqsww6qESOfQxRffoRCCm/zpUH+rdPw==</diagram></mxfile>
2110.01823/main_diagram/main_diagram.pdf ADDED
Binary file (33.7 kB). View file
 
2110.01823/paper_text/intro_method.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Adversarial attacks are designed to expose vulnerabilities of Deep Neural Networks (DNNs). With real-world applications of video classification based on DNNs emerging [\[1–](#page-9-0)[3\]](#page-9-1), a key question that arises is "*what type of adversarial inputs can mislead, and thus render video classification networks vulnerable*?" Designing such adversarial attacks not only helps expose security flaws of DNNs, but can also potentially stimulate the design of more robust video classification models.
4
+
5
+ Adversarial attacks against image classification models have been studied in both *white-box* [\[4–](#page-9-2)[8\]](#page-9-3) and *black-box* [\[9](#page-9-4)[–13\]](#page-9-5) settings. In the white-box setting, an adversary has full access to the model under attack, including its parameters and training settings (hyper-parameters, training data, etc.) In the black-box setting, an adversary only has partial information about the victim model, such as the predicted labels of the model. In the case of video classification models, adversarial attacks in both white-box and black-box settings have garnered some interest [\[14–](#page-9-6)[22\]](#page-10-0), although the body of work here is more limited than the case of image classification models.
6
+
7
+ A common black-box attack paradigm is query-based, wherein the attacker can send queries to the victim model to collect the corresponding predicted labels, and thereby estimate the gradients needed
8
+
9
+ <sup>∗</sup>Equal contribution. Corresponding author: Shasha Li (sli057@ucr.edu)
10
+
11
+ <span id="page-1-0"></span>Table 1: **Comparison with state-of-the-art.** GEO-TRAP, compared to current black-box attack methods for videos, doesn't train a different network to craft perturbations, and parameterizes the temporal dimension of videos in searching for effective perturbation directions.
12
+
13
+ | Methods | WITHOUT training a<br>"perturbation" network | CONSIDER temporal dimension? | PARAMETERIZE temporal dimension? |
14
+ |----------------------------|----------------------------------------------|------------------------------|----------------------------------|
15
+ | PATCHATTACK[19] | × | X | X |
16
+ | HEURISTICATTACK [20] | <b>√</b> | X | X |
17
+ | SPARSEATTACK [21] | X | X | X |
18
+ | MOTION-SAMPLER ATTACK [22] | <b>√</b> | <b>√</b> | X |
19
+ | GEO-TRAP (Ours) | ✓ | <b>√</b> | <b>√</b> |
20
+
21
+ for curating the adversarial examples. Unlike static images, videos naturally include additional information from the temporal dimension. This high dimensionality (i.e., sequence of frames instead of one image) poses challenges to black-box adversarial attacks against video classification models; in particular, significantly more queries are typically needed for estimating the gradients for crafting adversarial samples [19–22]. [19] reduces the number of queries by adding perturbations on the patch level instead of at the pixel level; [20, 21] propose to add perturbations only on key pixels. [22] considers the intrinsic differences between images and videos (i.e., the temporal dimension), and proposes to use the optical-flow of clean videos as the motion prior for adversarial video generation. Similar to [22], we also explicitly consider the temporal dimension of video. However, rather than fixing the temporal search space using the motion prior of clean videos, we propose to parameterize the temporal structure of the space with geometric transformations. This results in a better structured and reduced search space, which allows us to generate successful attacks with much fewer queries in black-box settings than the state-of-the-art methods, including [22].
22
+
23
+ Contributions. In this paper, we propose a novel query-efficient black-box attack algorithm against video classification models. Due to the extra temporal dimension, generating video perturbations by searching for effective gradients remains a challenging task given the exceedingly large search space. These gradients are estimated by searching for 'directions' that maximize the probability of the victim model mis-classifying the crafted inputs. Our approach drastically reduces this large search space by defining this space with a small set of parameters that describe the geometric progression of gradients in the temporal dimension, resulting in a reduced and temporally structured search space. Conceptually, this parameterization of the temporal structure of the search space is performed using geometric transformations (e.g. affine transformations). We refer to our algorithm as Geometrically TRAnsformed Perturbations, or GEO-TRAP. Despite this surprisingly simple strategy, GEO-TRAP outperforms existing black-box video adversarial attack methods by significant margins ( $\sim 1.8\%$ improvement in attack success rate with $\sim 73.55\%$ fewer queries for targeted attacks in comparison to the state-of-the-art [22] on the Jester dataset [23]).
24
+
25
+ # Method
26
+
27
+ We have three sources of randomness in our experiments: *a*) the sampling of rframe in both GEO-TRAP and MOTION-SAMPLER ATTACK [\[22\]](#page-10-0) and the sampling of Φwarp in GEO-TRAP; *b*) direction initialization sampling in HEURISTICATTACK [\[20\]](#page-10-1); *c*) target label sampling in targeted adversarial attacks for all three methods. To account for all these three randomness, we run the targeted attack against I3D model on Jester dataset under perturbation budget ρmax = 16 for the three methods for five times. Using targeted attack strategy allows us to include the randomness of the target label sampling. We choose Jester dataset as it generally takes few queries to attack Jester dataset, thus saving testing time. We choose perturbation budget ρmax = 16 as we observe that the attacks under such budget generally take few queries. We choose I3D model because compared to C3D and SlowFast, the attack success rates against I3D are not always 100%; which is good for measuring the error bars for the attack success rates. In addition, compared to TPN, it generally takes fewer queries to launch the attack against I3D. We observe that the gradient estimated by HEURISTICATTACK [\[20\]](#page-10-1) becomes zero after a certain number of iterations, in which case, no further queries are performed (and hence resulting in a low success rate).
28
+
29
+ We report the mean, standard deviation, and standard error in Table [3](#page-14-1) and present the error bar plot (with mean and standard error) in Figure [1.](#page-15-1) GEO-TRAP, compared to other methods, requires statistically fewer number of queries while achieving statistically higher attack success rates than the baseline methods.
2110.05886/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-04-18T05:29:09.510Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.5.1 Chrome/89.0.4389.82 Electron/12.0.1 Safari/537.36" etag="MQRTFAXyVO9itK_qEynK" version="14.5.1" type="device"><diagram id="OUi-Y2nj0ZsdHzkgz28G" name="Page-3">7V1Xc+LKtv41U3Xuw1DqqO5Hcs6Zl11CIgeBEPHX35ZtbBACBCbIHjz7HNuAJei1vpXDHxQcraKGMummda01/AMlbfUHhf5AKFMk/t96YP3+AOEfD3SMnvb+EPh6oNjbtD4elD4enfe01mzvhaauD83eZP9BVR+PW6q595hiGPpy/2Vtfbh/14nSaR08UFSV4eGj1Z5mdt8fZVD+ejzW6nW62zsDyt+fGSnbF39cYtZVNH35/tDbh0PhPyho6Lr5/tNoFWwNrbPbnkv/vx7vgQQMVGuBYStUWPJF4+/7sUQu+ZPPj2C0xubVl05P1eF62EqnI5tB9r+aSv9TBn/B9sMtlOH848Q+Pq253h6hoc/HWsu6jPQHBZbdntkqThTVenYpeEY81jVHQ/EbED/OTEMftIL6UDfe/hpJb1/imXZvONx5PASsf+Jxl59u+0Zbhtla7RDm49NGW/qoZRpr8ZKPZxHEPvL+R+vtIx9MufziBJlsX9TdZQP4weXKB/t1Pi//dcTih49TvoCY0OG86dC0Tm6ijPcOnk7nFm8F2vrY/Dt7Q5ZfvACAyerrSfFTx/reXU9aRseC8DuYBB3mqtnTx9vLi3f7fof3128fbhrX33P3Gtc9cv2dv8Oj9+I4zKQ9foMyPOA3wKVDdpOlO3EbPziolibE48evumF29Y4+Vobhr0cD+0f59ZqUrk8+DrDfMs31h6xX5qa+f7ziVI11zfp7Aa2PX+sfl3v7JbTa+229/W3VM2vba4if61+XEL99/ZH1y/ZvLiPlTJ8baus8Ok3F6LROXe9DZFiHeZIxjNZQMXuLfV3kROWPP83pPfE5PhmK8n2GQsAmlt4/0Mdf2Xjl8224Yh9n9QDkpzIQ2GOfL246w0BfTFPfYaeHMBBxyUDyXRjIbxjKeucFE4sxZsf5S96quQ/+woDY2Oj9itcy1akj2tGAe7prqDTfLNCJoQvjTHlXYUelPTgv7S2F8sFqAN5R+h+AFaED6U8dbA0q30n4y0dNDetIrle+o9ZIN9Z/m8pMkMAyNhRj1vpr6n/bvXHrj2Vj2VX+bC4IvOjN9s2R93fxkxQ6YocG5GMV+taterBAvkIzf1kBlwnxGwtk5FIgs1sL5G+RGd0NuqaiKabyV1mKA7au0xv1horRM9fvLq/RW12OUBdC+G4ItQldyNkBQhk+BChid6Icc9Zub3xld8x+glYjLg5YdpCAtzjg2X/9RqvYXvV4p6aZNdKvtBZ/bx2wsIUlIm9fjoEMZv2741HbvUd+qGw+w2p7wQoK73TWTsEK21nPusrE+rE3egvEBayP21OFbrHMuJw+672xOgo1ddPUR+IFb/ZdQFEHnTcy7Rxw++1r5xr+Ya9j/a1pqamAMpu8BwjbvZVF3MDbLf3bR6XtI+JnS8QJMfj+K4z0J63OH2gdAe+L/8N+f744aCQKHX/An/eL38R3v3gHEbgMhPz+TjTQUaOBfCcWWKqJYL6TDAXymZB/lgoFBsXwkpVjBb0d60qKeEEnEcTjeDA+i6+WajKkTlIh/yoTykuxtUpypc46F8SbTCi1vXp+IK64FFfqZEKFzjpeWHbT/fj6e/+zrr70+wtBf9DvD/tD/mLc7++GA/5V2BLjLO+PLa3PWRafN2z9z//1FQ76l/mI+MTxYF5Pvb/aLIgnOuGQfxkP+5flyPsxRbg/Hxfnky8ECvFuuhyOhkGkG1gnVpFQMjBQwvG4lFwtC5Wi1PYP0jCx7nQGyUhXrUcL+jDRU/VkUZcypTrKhgY0LxUihYEWL5YnpUqkUq2AbqM6ygwa1cZYiQ6nKioAbazhVm0a6sar0S7uJXqTZGmYqFaHpNcYTZP9SbI6mtDeZGokN2aqCk25R+aCTKt0Lbpi/cTaTJVAplYFvN+A81hPKIJAaUjmbZ22jBVb5CTebgpdG8iN6facxNksw9tzev/gJ84pbJ1TULwsFPCr7+eUT6C3V6/C4UAxvAp0E4FyXuuqhXQvrYfjkXCyoDdjgWAxvSkv/JX6OFUIduuDTDY8ZIW3sxqJk5sk+2WY2XRIdpAJF8qNWDE8TJZBoVIZavVqZdJvRCujBuxOlHFG0moN1IoNaUfPhrt1JdaLj5KDSbY8bCi1UWKk6NPsYKIo42lyNJ0ZWclsKmieGtHlLBdeqc3YOj1OSmauDLRmDWbGCp7nBqTVHNPseHpwSlwXDJYWiIkXxGf055ahTXQ+ZBm5zpPzySgbqeYHYxqccZ7tV1gqumnQQL6owExSghUtOa9bkUhpvs4P6o2gTqJZrPuJXM60x/NqPT+v6xspyNQWb+RBkvfbSZTu5MTNeXUN+5NmkoTjjUg2uFlUWsSs97r+bl4820+q9Wl1UygjhbRZthWXR7UylqNyfVPYqMkVnSpymIX7uRI0eu0g7K3XdDmg0xqbFJd+uYLiJpkX13Kbk06OjAwabVZ6c8JpL5dnYzY1TT9Mi/s0hJgIpNdtDML1drUFY2BcEp8nOghomXFjWOI6K9ZWMTWj9sJtPWlYL9/E60tzkYqp/QSKVlmrLIRRpFvzd6bBaqOS2IwTkVJQ60pqpwUCkpTO0rm5UcBgvQmbzVkX9hP9QmtMQCeTSiXVdLY96ZmLYjtMqVYvwjQfDKSmOWdC0szbdUJBdQzW62V7BodJIybun6ywPutWm6W5lpslUnnJyFajqQIO8VG/pmSMdaS/VkaMBDuwCuZLXq0L2yRgql0VmSttHZiQzDKGKuVmpUb9lVpGWse6/UpiUFz20FybJZatgALj4k/0phUxyCYnoeyS5qvWQ4OGeGhl9HFqUCpO+5nOTPWDSUpJjBW2Em8u0kz0K7FEdrzOLkcz2shM+Hww1+hwFp5mhxW9gePNTGLcZbSxzk/nxXA5BGOJjEabvQho5GbrZjEdDg/AuixuNiwU1rNZMzlT04tRoCpARvtwkqbpXHY96wVIalyfzLVIV4nM85VuMtMHgUhjyOKyUmkntVF3WNNicyoe5ataZVMrjOrJ4aqZnKwGo3wYbKKz9oKw/KobH6wjJJaQR3oJZLuVRHyTSHX4tF9S5YZcznbz4YGWa80lqZ+XrfcVq8sjYnYa0fK82szRSknJDxNrIeI687KaobGMVotMOFcLBRTMr2aDHonW/BuLBjWeYrFJu2Co7cZYTavTDq5FNEum18tDI6wPBiaOxPzlhWSdfFnaDIxieawXZqNYNM5CMCAs++Z80myWm936RPWPQ6FIq4PWm2pmkVI3kFSz8eYoETfWlqUwigCtiKLpaMVcRcTHG0X7IFk2N6lUJ12Jx9pKoJyo9PL+YLG0DCBjwmMsW5W4yeeg2l6n2uIdBzPBjPzGBRHFD2JFLdqv00ZDH+jiQaKs21lLVQUDuRJLlgaDZrFQkWpLEqjrc12hHX9sU6j2Y8NFfkFDcDJftJJ9rZTsjcPJQTu+8K9opUfFB+fiKsnNjBDK2Gg2XEqjfHaCp81slYwTc9iVDcGKgXgqJKci/voED8L1cr2Wi+bKlVq60o3Deqbgr/NAthtpZDrRWlFOo+DayMYrlU6FCxmZVnE1L/yoAMko1WxmZrRSTVizwBwxIw3ElvKoHB6TQm2sTpIL6Y37jUVGYyPMJsKuClQLBY41tdU1hiniL4C1YuYXU6XM8rQ9h8nwfFKa+4OwmA3IleJgOBzkcplisZ0YZ5P9ao3Mk7V4b1KReTBeKM7V5EgZV8OrcISp4XAfTCuN0aokbM24P7cJ40h2XRgmzQSN894skxe8EGCzVIuEy4NGK91OzQOrsrGu5mpaegHKlkgwGyyYiAXCHdhOdgJRVcVTrdzbrHuGTKbV+NJsa9FFiCipWDcwrNDgvDIvrhKC4tX+cDBOw0hwUF9XKdhoMIa0bEkG+rgRLZUW6opgNBwWo+Io+oOkXpsrwuiNhHPdjlKoVzWimJn4JB+kIUsXFywBEFPM2nI4WQuuKjeGgXqsEQ5BXqqVN8nUHIzMRqS8Wk8rJBfJw0qYddctkOsPhELMt1v5bldcJlKqVYaJBgpB1hsK7SOIG82YE6zPEzpXZ/V4c1wSAqg2yDbiy1Wo2e2AODBili1o/c+fyre5qbY3kcZoMMdSdRMIVLphf761iFSHzQRvLzfFRmNEN43VbNlQxR3rhbXKlVUqV9Sb43KkOwylR9KwE+qAeU2CtFjoqf5KBqvSFC/EHTKLZaeaiq7TA4xak0g93xxs9GkyWe0KrjKNRnXZMXEu1S8TP8lYyq7Q71ho6oDJQuew1cuIX0LBvsV/TVLoWGASB5drjFOthJHNEomVIum1DjM4lk3izVrVNqNFpdkvpabYJPVqP7cJ+ddJtlrUkZlaF9GgrNNYQPBeul/NzCeo2tuYbSUVyKqR1SizUHtGPTccaIHRNNYt5hpY66FY1HpjKCZtqtNyWcXlMQWN0WZk5sPDVasAFTUqNHEk3c5X5oUsNyKxUCS51jvL6VQcxhjiaq2SkHvpyNAP+7FsrmoY87w69LfrQodWBkqlUWso/YWxyK03xbaF8ELZMjcmtalFperakhyhdj7TmAohEpDjxUxBriRjA7mZrkzq6/mazQ3aSzejMdCoGF3rjCTV3Ez7KBXPpbX2KNn3G1O1aG5i4dmgi6OlTmUzmlarEyNbm8vCjSvzNqKs3S0XC2axr41LSm5qoiZNNVarVSES6ftJ1cBAsfiW+atQmyGLf4S9BMAKJ4yW1BlWlUw22uhCo6mwXj1sJEchWikmpgW1Uc1Z7hiMNLauipAZWsueJ3+Qe4nAYXLSyZEH+AaxTEfv0inI9fIuX97ly7t8eZc73uU4NQloKK2qdRjuGwVNqWu5QlQNzPoDIBWpptSUehG1gnUtulaWWmHWxKVIrbnpLPINuTJaTyINY50ONUmr3skqzUCb1mIbopDNOlQr0kz9TSRb5mJpVmmweryuKVhepXBzLgd5Kr+c1wXWYsVuqJgwC6BdGpW79aEwYP1GlEkk0adaK9ls0kYLlYyRUG50wCqLXB1ysxnbBObtmPCL65loJRJX69X0bCXuNgcpdbXJ9hZJmM9VJ7jZy2aCvaR/JQ8qgUR81Q5klXBRi+VwH2h4lsuqYfGvMRn1q9mZMtFGhca6FzPiq82KC0qWahoeBJblWLSwqVWhvFLa/rg+a6zKhXCPTCzPCApxSTbFTCLbxovWuBZKtXKMknzLbMyG6002WE3gSUBttroZBSZLmUoqVGrUTaPQrdLYaCotukk0sNSuP45zLFPJmdNVZ8BApkGn1ZFfKMRA28iHGutxL1SKG9W8Ppyti32YTZLuItdvBnqJBJ9yDWgFDY5HRd2ItPGssmbmsImKrapfT9CYNK2W1K40xJVOWs/Wmjw5X5YSqXg+NI9n6oPKgCbEfXB9uAKrEo8NMotpodrULHN7NY33s93BUojf6UxmoDGohwehKALTSXwO5pYN1vQX1plhMTtZVUqlRqRW9hdX/uka5lGpHymXTVjEaAqzSskvT2YlWqfTYCtJwk3dGILkOD7dhKYa7UWF1Z8rhyvxmVpeLom//iYcq8pmsGLjspAS4s2sC8lZsTnKoVEmO08lN4P4UCmUB/51Rt8IVzUOQuXgoGK2NqWCpUJZVrKukgagOlRKk2UnvC6Kq6iraGk4UyWsZ8vReC1Qm/QNKuHwOhTAmVIp3sHlemxSVvypNYoowVgQrXOjGAaBRF0vcdwzcuN6rSNli/oattcTGCSjtRUqqSwnKGhIZc3gg7S5ytZnoaxaKY/mEbaM5yMJrqR6wYrcGFTTi9g66G+1BJOsq5Op2U2YeYnSHpsXC0E9xgrEGEcXFWHwhqC0ycTGwxaotKJI1YNJo0jYsL9MKnHVGJdzWLe8koa/EW0UA+tApVpaNeuhbqUxawFVWddJLjHNzPxachKOVRd42bPcX9lk0rq2MlQ5GO7UmEIq+WUtkw2p7bRSj64zlXlzXS8WZwHqrwAr2ECna11vy5Z53BMepsJIIF4Ph+tGySQonV2uY3W0qKrmWuJBbnZHtWQniUilPF8M8lMFFM1Vq9iqNSEadsbypgKz/VZAbjTrpZo0aYizLSf65dR4IKcnNFwJl7OqKRmgWRG3C7X61VkjrQwycizbloxRwoz1B7l1MiPs/wCo1MK4mh6X6vHxDKZWU5X5x2ZVyyjZ2SZQQSkr4BKuQHOog36ww2h50CHLbKCykVPKutit42WAJ7P1SJAsg1RT0yN5ncgsZkl1lsbL6WqxgqvahLBeELJZPFRV2nHUXlruqx5J8lU205tM1MFgpBdy6UIsMO7rSRmVteZMrq5q64SgQlurdFoK28wTk7h1fAvLjNehOU6o+TjTl8VsMLkpF4fJVUqrqUBObzaZTnvZz8RjlUBnvsCjDs1P6sOJRTctG+3n1eYk2mTKLIXicNgrk5QhnglMi0BrJNLTUs9IKiUlmZaDqrZOqkPDOsSwHuSsbKQnxWLSksraVE/WLWzkK+MCKiTWg3DfsqQ0Y1qXG/HVutPOLTZlGC7Xq7nYrJDXe2p/kFIaRk+Lo3xoPE/SSbhQB2XVH9QXs2m3A6VGrB0pyqXSLJ+FKw4CE9VyW1CglJnoMCxJ3Vqitw5GutHCPI4Duda8OTbFX8xIL9zI84yhTdEqGCVCTBcnMTNbGaotqPjnqlyf4L5pdNL1nhybRmlFYqlac7KYD3qFhTwdDdYWS48qlseaSZlIq27ykY1lueUtC9X67wleASBecwvwyy14uQUvt+DlFpx2CxKguJqvpunKJhaO9fxqLCxwmxMGqSXQAjVLsKYVoHYE3hqDN1ZVaLRdjowqOsZrg/fj3VW1zafj/iKuw2HfCkxnc0MBw/IqGmlDK+Q2HKxawXIhqZKUX5AxErfC7EVLU/WVrn8wM6X6sN7WCupo3mmRdF7cJwDDuUBfUdVGgr/dlVZwnejlSqJRH/iX/i5eVSNVdTjTMtF0bd6Qah2Z01VmNe+vUDTbXk2zQFL4rDHx6/FBLerXN/PYNAkXs+EwEfLncqP0ut0KCEM4Mq4uoquYf2X6/dHkZrIZoMbIrIYrUd4fwmZ51E6Yk3VlwyncWO87OJTmCjHNUoHISjSJSqpcHARUNSbljFqpmC6mI7002NS7wXV2Peng5KDbWmRb7ep8Cvq97CYcDQwnCf+EF3NMycNZO5HFbyE5f7es91MkuVozrnXqQFci9agMlGjCaJVXwpoOrnE0QMk0PFAii1LOiszFMtGEPEsErZhiPR4JWaedbQWqGt5IoRJPmPWwWUehSHHV6HMZxprIkHiirKRnQX9v0E/gVW82rmhFKViUkkpfq5YHCSsIX4EVNjY7KgNKxxx1k81SLhDuDvqjyiasr1vBAjCk9hJhdZHJm8PIqhEN+pllsqQzRpv5/RWL2carmgZbXVoRtzONer6QLgzSs7ol1XKG8HYCVUVflebKfA6NOENggds1tWgK636SQ+nwJJhu6zAdY2q7ukib+VmjaX3MQXdu5MpNRY0lSlZwV+eFmtQltDkLdJdqjFr8phKMWBa2e7F8I5SJloTTsl7lomo6rRXGUlyuTAeqPFejOJXOo/GsrYYtCgQUKwwZhGY1219kYq1uPTxa9XC7YyXf5gM+XVbmiUCql7BC/01aaYJMcGiQejxuHVlbTXfAMDmJhkCGaplNBc20vqpIzVghZWUSTB3LZj3f8I/k7lDGuXafUUZBtFYeNVZNqKajo1qaiDupVS4crYDUCajFaLqOOoSqPFQrtIKpQMMMDc3ZKBmtav1eru9fpzvlUo90WXNFa2rezE3KowJRjXA4GSH1ZC1bjmkzf8OY9BNFUOgnM+O6HMub+dHcTJj9djzeXDS7eF0YtLrxGCoWNn2zDLi66SU6o8gShnhyaaZZVqlMmpm2ojVbhVSGy43kZrommA565pQ1203LxpTXbaNULguJ1ZwYi8x0KPXTamCdt7K986xCK+Xamo/S/WiSLcdgXDZC5UZDzsRRs2GKP2/5cQ0ZCRCq5CLW5fzVIagsLRbv9sQVyiE2qwi81xcVmIlLpAgzShXNTGqGstKsk4s0uJxuNAr+NLZ823rf6C8qI6kv1AqZFFfZbLdOpsKugBEl1hhV4+2sStYq5kk8misNSUUJvxxazmeJVKJZmA8UbYQzNWyGOrNUkZZKdW4S1d+dkEm1AS0aLcKWLOxKRpYlCyg+b8BWvBdr9NXAMlQrDYqquZrXJMPsmnXSyMrJKKxGN93+YF2t5OdFlXBFzUjD6drfzi42/paplPKTbGSQyJUmlYqsKNlSOzqXY00wlaS4sOdyaBY2omG9Pe/Hl0K6VEh8lhnVKxivaqrSmZmcTFR/IizUW8xExsayHUNmMg7Ks1Uu2F/UYj2gTWK1vKabo37CqPQTi1mzEYhLLb+stUeZFLGYNF9Lzjr+dTURTVd6MNotKO1cLrcYrJK4HcVtQKa52ltaMj1dsVE8HR3gtarlltNpJYX6UbNSqnZpDOdYnobUqj5rNsv+1FitKsk2iVmyFqq8wJN0Xc2sF7KhRGOlTDuZkdvF5gJJyiA0kSua9CbvVWU10YMwayKpGJpEa2iaWzbjhqEl4SbbHs3bJSXHR+UgaVQCucBmNRoO5kVhk421kNRIZ2rd0djKXORWyXJtM5GLNK8RWbUExhobBUvDpKvTKq1nRzBlJdAbqWSOglSHr6bFXDFTXJimke6thkFLqlnwXlSWTcH92hDWQ03K2ptpqtiOrsA8t5F7sNdprPOVwqw3RlF9tTa70Ua+BGIRNVPV6NCItaOxVHFejIZKKBon/misVbCYuzhLraKDYg9Vk+Uws1yyTac9rloyzGhWobaEQprWikT4oJFhzcJSvNmLIz9s1boCzrn5ptYs1haxMcj0h1opGGjlBXdkSLc08PuBqfRVeQw7YGA0FkK2qsXBcFxP6lbsLDvvNaU01gXrpaVZIZoNSFOuDoK5AZgqSNclpbox6+PCQPG302NV2ASMrpvTeEEvIL0nTzFUYt1Go9GcN8JjNqmUtHWwMM5mBZuFmnG9MmkkYlq7UujNalir+hd1mXd62Uoymd8IL0sIU9n6/KFlveEv5BrVVHtj9IS9ktXKtczKlNIULTIZEywzyGCr/KDd781brGaEWgVDVdT0ulPXMstEvaUvA+VZe9AryXJ3DiNGXngendq6JMQM9ectp7c6jsDGAGixYVjarNIaDedoZlXOTNqo0m2ltCqb9IdlUp62hq0BylpvC3ZGKboeNFEQx62ylHkNNJcqmEWHZnQAQ+OGMJnb/eoit1HSxUqPWGyVgqxQzYwD6irQnAfW7Ww/hjqFzQrKlQES11inB0ydKasKW2/mIGSYgzIPVWebrt7tDCLdcSBbqJo5RXDwQqU53FKmoyGyYnLGOsRJdLGWiyrO1foU0OC4Fy4J4dJvrFsjmFkwhY0lgeN8o6MH0o1oQajEiuWwRrTqMm4SAXtuqrwaJP1qeiQp6ci42Wt0FrCL/IFsoO+XK4qWqFg1FnkTWs7UIjgR3lLRTPfn1WZnTNvdcrdR2Cxak1mgMtONnmAYvVgaDOeRSncQYslZsRodz4LR3lhtyW9pPUD74Xm0rS3iG7aQJlKazxMgsyGLYUiGoV6/HnpLLVbFxRNqrwqFOcJm7XQ1Rytry+r0B/Wc0VtYQYWI35IJ5bRFmXo/2Y0W05qCM/qoMhvEtUV9HE1S2iwPtWjZjCqLbnY5WjTnQrEse0L49qOlWtXUJUmRN7P4WtDF0vKz5lJLT3NaqF6aw0E4NOnnrDRpv1Yxs528rK+X/k0fGMGppZxS/Vnbsp4HzwovQOi18AI9H16wggSTb57IZ0e70txeVjp5UvbqX4dWE8fqX3SD9iHHg3JqH/pWofVXxTr7+HUnDvP+dVGb+EnynufQLUe6LbK+0zEfNgz8ymMG/MnnzB3OWfiv2PwPvP9wcOzik5n7Z6t8xAZVcTiWCD0IGo56mvbectWa9TYf0LeI9tGJKa5LAn+IJYutLqvZRwOHM9m2TV0O8x++SZNtb8eexNn2+u5QCDsQCN5LMG+54YBC6j9IoR0ikIcSwam95h0m8J8jgrNmfjpOnLpy3nHyz5JIJgc0eSxsnGrZ3mGD/lWa2DOLT8eNU2LxHTf/Lo3wk4HjZv5Ua6z5rUlg1rEPldmsp/45M3Jq26l52hb+fNk17e0zYYD3xh3x2+Xdt2ebzM9QY/vYN6fLQPvACmaj8pHpMgcXQlvoHLvQe/f9LcbUJCr6INattuozo1VIB80UkJRLKgbUebN13odqvjtcqebnA5+lA9m5ObTGXLw/rinGIGtxgvnGFz6J7D8I3x59Q/ewN4n9uW8zPELnJ5AgB37iN+jVdiTM4agXbxPmQxrDe0bEEKb7RMKHetGJSPReRHIR5/EikQC6J5GobXIgkX3wUFVCfkgnzLcDBW9OKUfnzK4sd6bBtIZNfbk7CObtAfFEVzd6G6EMlaH14Bn1eu0gl+OTIL/mdHGO93QuIvyk1rV+ybWMnjjPtxD29wbGnB6JeXZkDPSUNseSTfjbZxC51ub0jFlwZ23u6N96wyTE4Ax7HoEKdDO57nZ8fSqc/8O4Gtm5msMruRpxHwXS5xewXRb4mIQfy+eHZuuiN5srb384buvG6Nzwu8uyAXcdE4Qltn+gDqN2KT1kE3QDx9L5dA/zfNvhVz/jRJEtfAIZODhRgh2HF98gI+h8poc5FHBwmOI6vcnMhZl4UFq9e7L3dJOIBPdP1iF66JiauhevOgRBHFSe588VM753rljCvgcebXa2pHNQN8cg7q9WpfUwSNS/Tjml47bEWLd8E+GCzLqfc9iuHmLnEQ1qy17ga81CJp250O3UpSMlL7MKfyMlMdinAGXkOlLaL4S3c8pvT0q+nKFSU5r+LTLT0Ja8A7TA389igGfNJb/Mkb2MWc46mE7+peMLCXLJfjefSXryfbsJFM3mo/CnvprsODYfWmzH1zkbTToSPNotYPlAu0vf7nhg4l6aESDs27q12z0qTPY9Tjc6whAQ9lwYXrYe4NYwhC5hCN1qgW+Kd4D2zSfCJZ/9KkcE86Vz3w9uRdj2Vrca/X7yzA9SsMX//sgCl3/k0P/+kKCq6eb/3SEl6yRp9tn5SJ/ooXwB0qGAOVZb1ybWvzuKF5uJR/DhiN4dAXTrvK6zbNkGEb0hW8AZ2fLY1SPO2HCYVP5IefRdGUKh/BgZcqzU5k2GLF4i5BYihDoU69xRhDgT2kVa/ymV+p9lS1v3iR/WNlPkI+DwsAi/12G5SLVfNF/+wEimb18nWNNl4fkhtU5T/7JaWe6TZYgFSSAljGOH6Oj9SOCis+QmQTx2SIRIhIsvJ7J9PnP7c9+GU30YIUYoA4BIDKB9PwdQHwVAPCn+kwAhhz4P8kEqQVnGgMlIEoaDU5rgyGtuT0QX1RC/kYhA8jEqywBRaJ0vtNU/CAIIwjJrrYgsYZluU2WeJaOLrppfSUZLRzPriCXOIQB4vzAPEB+BmEECZSIBCW7P37NkdGraeRAZOQ8GnReBfD7zLDJi2SdhoZCBhDmgBDhU+XmJishNEuYXUhFiH+dEnC9CkCPxfY+IiPkQZkziTAhVqwLB40R0U3v2G4kIfczqXmRUYghhQPYVI+bC6ZWRjJiwcmTCgMftm6119q+RERFh3yBMMCeEE7TdubilIvFxYcBKmGIECJeZ16noYnT2vahIJYUD+dDB+xykd+g5Sv73v7gbdTH0IYYFQAHksiDz/lZTAAVdMCRUIpRzQZzDQhpvUddF4OOfoi7wYVliBAuvXqJE2q+fh8Jzka0cO+NcEuiVHSJXnqKui0jNv0RdQnx74QNkp66gBofC5SRIwhL0OnWfGATyInUx9VGMEZchoJQyZLOBkY8SxAkjgrgypx53R9ETg0NeJC5kvjetSrCwj7H4dd+q+mlBI+zGqjooMXM9euWrt+ArScneCLRTiyRLblOV+0vuP7OTzs2qx1txrGuf7zw4yTDnU5sOvQcnQ+geKbgTvh/igCEZCEedQsoPgqKCLZEsGJwL10H4+Ps3cN2agH2AylaAQMaccVuhEOQ+CWMqAUyQzDlAtqK/W5WEQB8Wjo8sCTGNqfhuGw4j3oX4lFQcgzCyZESoDW53yPRiN3bwLfD4gai/Vgfifj8aZvK1wHLo6NkF/lsqbQf4FJ7D/UOQyn4mUoHs44ItoYxlWXiADNmQynyEykCCQDgSsvDxbSrELVLPZEkA9TGBUkIEXyEBJFsfxq2QeibGz4S3RCiWhDhBQED5EUh149PcVnNeV97jk+Q9rSmcC3hDgN8cjtwlHLchdq/AEflkmQhdJTMm8IAlm1OHfRAjIsw5ijgjErmF4hTexX4L2p7iFDLBlve/ERqhcHAgYVxYCRSJb7bwsPxmIiACZIoJ/ajSuC8W3XigN8XiviL7flv4IUohRSdR6i0sQm9hkVoRFCI8MIQJtFxxO4cKD03wrjAuseDj7XlcPgNGGE9cJkR4aQIGErbfZS9j+V0sfqMjxRk0bjx7T4PmEnfw1tDYRnV+GjSEA2dZaRZGMCNQwvvCmyCfkNxYWJXCnhQnjq+EBgY+yIUtJnSAjKkwG+13EXap8DEhE14WJrYxL7dSU7JQlRwyLBFZ6GNKD/SUkxS4laJyLBFH4Lmgc2sz7sAKnITVJX1F3wQcdgs4b7lp8D3IQCBCEDHB8bYpO/sBQ2Lzn1x7acwnCU0kISw8NCBRW0WicAYZt3w4QADBhNg+xY0Ah22x0NtafkcA5abq7QaAcmXC2Qb7yKebKU6GS6zADID7GlN44F4ImCC3oc0tZD2CRNukOHhlQMSWad/y2zPsuiOQcFNBeNvIhESxjVUZv0LXnB1A+dyYvnsVRDzF+IT4gCRxjoSWwcIx2mfgdw1EIBPGiUyBfKXFR6gPCDVkBewlJGw+e/2mJO4CsNA/kMqMSt/VQDdHDXRTsfkQRbLv1py2v27XWPxNaLjt5POYThB+CsXCRUFcuDvCJ7KluxD2IUKweBnmkrCfbI6Ka2zYEOc5leFqJLHbLL/DlIK9mSRP6BGwspZfc9CsNoA9gjAf3/3Ch8XlQPiS7JAL79ZLjBxbPh9GkfsXpzIf5TsUgfsFboj6uLRDEYcKKOJDdyKI84hM6XcMAgO2SZeYwUNmv9NEDueDddyBQodWu/Jsooz3DphO57r1hPrOqn5LAXSa/xNXkN72Tks7P/3fn4+ZxpJ1tn/byqg3XL//jbiQMpq8PYmQZbl2W8NFy+p+Pnhm/yLvc3itSwAwWdmee3+X1pNja5DhcP/p5cdhWs/j9/f59uSwZQqL9e/nbHWHv7eat/9+9GtbT3+0bO893Rtrb5xiPS/tvLW3J01DGc+s6YrbywsRsH3BUje0/bvv/vnXJJq/tjOH1sSFj7OGmH/9THZOXuvNJkPl49R747dRNp+nMtQVc/cNbYkrfuq8NauvJy2jI6DT/fPWryyk1Fx9Hw/5zh6C39455P31B2j8aHo/A8W7IU22xxuRz0mzOC2EuMWIyCNogwdo+4ZieeLcPc+JMSeFfUaMnZIpx0BvR8moNdKN9d+mMhMEsHCiGLPWX1P/234D24c4Gzdn71LtCHIe8EZncwHnRW/2YwCMuSsAw20q6EEAdhPduf1A9K1vekG05tM3/SxVcZf4O+csXDW4bNeDdXyd26Dm56x0z3iw9mnRVw67PBimztzVct3KCXURf7loaPGsq2hvDH1+Qs1s0DLV7p/dIGP1g3j28cfWKw5G5xx0w1tfJ2M9d5NZ8j4RCYVOI/e2wy/3lm3cbS7Wi7C3JyyF/OmEPTQl72NJ3qqv9j6mJ5GxAyUcmjpu0MPhHCv6J8lAJOjjeCeEZCtkkZ1mjT6QKk9saH0qVfbTPoQ+mQxP7Dx9qozabzoknD2XDE9sEX0mGbh8UkY9mypP7O18KlX2kw10GxN7DhGeOLarrWiyojp62f6Q7A/eU1EQ5LOh4dmq4omDt55KCG4nhPxkseSYEfoHKIHoASUcpqw+kA5uPPeuPmrOZ4/02juGovVaX9U17sYMf3roB07+cQ10zP2/n9lmmy8tObmW+JADbrB905kDXLj4Lw64qW0CbUrxUAI8kv6XjYf4SB2Uur3xPumPUHuXKOejc8fIKd5ApDf8GuKsGJ9/9v7sx2/79XK7OYkLemnhtnf2o8YbXt6lZ+fet4kmCnn7LHZt1FY1eKRqkPNvpkFOxfR20yAnJYNXsiDMFndgkn0lses8iB2CMrRf6ukFew57NH8LLn3MNiWCgLObZJ2Kv7+qaSW0X2NO8en+d9eY1bjjcPh2G6qOFuS9MOtQlv4TMEuEdcPt8UIuicfAV+XfdRi2Li1h2X6ZT+EgfE/Jc5i+bKDEj8K0/DXq5UNv8u+CmrE9UMsS/12KmLoFtcfKEayQp0yP7JymEvNhcm3jFYfC/mK2CwoPScLH5MXzQX3ZZIofBeqrxtFc0rPvaXw61Av9DHwyq08LQQlhRAi3dTNSgHx09+lr9a99txL5XIvsHWy6SH88IbhxUVHK9pKPD1biw7DxYZfGI0MVlzWC/yBJC66Ss3vW0elCTU87Nw4jiE6i2SNy1iq8g5gc9UCwNWbz226OlUTb7aI6uAkFnpO6l3Wnfx+nO89otEkJfYathCnaRfHfvZb56zwgRPbDGhye9oCuQbm7IWPfwrfD9M2fgW++Cz2+b0YRfmXwkdCTiOZeRLSrzvlfmK49TJxvC0uek669aRP3D6LDQdqcSk8tX9ge+79Gh4OKHrr1fZ9EBxfJy5d/59q/oxJ4bjGCm+XIP9PD20tGn2+Q87S/ttVCZw26LTy9YtAxcsJhs+LM7AYOm0xPmHdCb/mA98y7X5uX2u9LvSKwcno2k7dhKv1QmHJ4wu+y4szXel7sJDSBJ6H5j7ZV2DrwKHayTB5pabrJJDynxfXupZKfL3UwW+/GANzGAMipGXZbx3Xr+R/OLPDQNg5xndgHye0VqlvH4PEuA7YL46fD8okdHZ8z7S6ZdncbOnxGCL3SWPPMZejPpMPWuvFKX80z95k/kQ7EHpyXnxqWeuY68qdKJXtLMnkyGp44N+GZdLCX5D03SPvMxd/P1Q0+2VPRcjc7un9Bq5friT4PAOL+ugwio+fG193s8X5xwB1VouCIJ3PAZduJflCod6+N6D3LAr/2UF9XgON+Sr+nC5m39ujZSLDHFisRAn2YsqOdBlwo2GvLlzE90T5k1Uhjz1Uzo19b/yr5ZMT3sUsBP4fdnfY/ugNUydpLd3OwHl3lZH/wcah22xToMVTTrb77sor30jK2C7pFNH3be2i7MvMR5jkYP7M8VubNJyln+2p4QKhrgEPK9wDO8BMBfmMYO23gPem+eQXGCAgD62g1hbUNezux/Rooc3K0wxAKbrJX3z4d1dhNBPxnKud93J7t6v1ELce2rnyLlx6A2sdpYLeFUF6DLoTCvrK15Vupi6sBC7iQ0fgIYK0uFfjdBcA3x6u3MyW7+d/LuPb6Sa5Pz+Zi+KIJlD1GE2/Pnn4ITQD1GE28PXn6ITRB3GM0eeIYaq/ILuKxoj182WiOw4Wv17jYNzHTP91meNHO1x2P2X3o+jJ2OO/Puh2bAT3W7rllji330usLjWXbpQ5rlu+8aQdflubxIudfyfgvvr+Y7201nV9Nrxfzvf1S+OBS9+b7y1IkXuR7+UrGl1+cfynnY5td/w3Ot1/q8Zzv7Uroh9if1KZ2n21/kidWRXvFT5M9FmMibuJ+O8s1Z63OSJzJ3nrNM5s07x3C3wnSO4TMuaxJsnzIEBppMQ07MoQ/KAcu7+byihQH9kiAfK0Ux9zGrMSt3X48bu3yTX/e6Yu33695UyVB3ARYXTP/N6yn73P+NRb/V/rr2dg5P/AZewtkkPt2SjTsGdnrIXeQg7gf5LbZhQdDzk38/AU5D0BuCxWvQI7AU5Aj8i2Kpoi9Yup+8JPlI3e6L/zcpEp+MvykC+D3MChts/1egZJ0tG358hkStkuhewHmYIchegxg3OSxXoC5tbnnLZ9K8J7PPh7lG5iRD692N9gQW8/Vo2DjJtX4gs2tYQM9BRthTZ1aBvwtCBHfUX1wcwgh6cid7gshNznLfxZCIRJmIXwHCHmrNJXAo7VWV2GGHeHkm2OGHLML74sZN/nOfxYzd1M73FuYITYu/457Iz0QM5QcudN9MfPMTOkTZ3d97cv8rHB68tQo6iI/+hqqd0MW4Ics4DSi56Fj9ehDGzG8OlbPc9CETxSSHikn+VrN7hmqeHug1YMaZDxHlYe2Y3hSghFwsKHj6VR5YkOGV4qv0MGGh6dT5YnzrL0iwYj3DPGf3y7w8xpltt7P2bJp6rHCBAjt/Mu/EaQ7uNhh9OLOpdP05zcN/LRmmZ/L+weeGru+TezwYvKjG8XoZcOIvMj7P69h5sdyPz6w8r/B/YcXezj3y94eW/+gphmvWaPyKyJIZM/FnrY9za/Gmd/TOHMggNG10hzzA4Zlbu34K5pnjt7rrplD+VXL/0Nq+WWPQY1Ap2r9T+5FN6rlfxwE5YMo/IMg+Krnf0LByzYb6BE4WeXJ21bGQ/Poqo2t9qvdDTjSYfrqMcB51fU/AzgeW60quA+fKFD+HojsMzDuByJ0mEF5DIheVf7PAJG35tcTeCLXfRVu2BFevkOR8pNw8yrtf0Jpv+yx6DFhN8SNcDceh5uvKskH4+ZV3v8MfeOxlhh62D75HeA4uDo7FiG8D4IwOrzr/fEDgFMqAYv/Uv/9kQPiLRrKHzn0/tABsszWyrQlE+xl3VZEv6cqQ//HE6Oepr3BzikrsV/Kfm2tulv4uV6C9/kXl8HogiV4NpYj1GlLMnZAELxX0TkA8AxjCPL+dMbwLgNw+dnkd4rk06H5cYziGfXzfOh0rptvp7M9qZ2HaOf9+w7rzMwdznm/pniT75fdvvzFUDdlqJ1xm89jqVtWgWs9o6WaPd2itNDs1icSRDWVj4fAozuHiG29E9nO1dk5bgg/9x3vnvhW89/hxG9Z4e1wmLsHLt+1BsM2d5Bw6GPgkJ+hAz8zH2J3O+BbFms/klsRsPlrxLHPzVFA3Kt+AgA3QZAfwa4HLTiEk0/sn3GKILZ7Ljc84V+8IBXzz13Gb5l6n3R2t/HW4971tz+97wtWsGn8bXfjwQq2NlTV7V3vtYPtbIkk8NpMcej7Won4uaZwixMGfEz+fqYfkZ2b2FcvWjfZujeeWbcGtgNv/7Gtp3uYdQvYp+07vTE2Pbf2UEhNCR2FjeSjxIYb9wsQkfjj45eGPoQP8OodcEI3ddA/VHESsN8f4OMSv0Jzni6N+3l6E3qsMoEQH0aMypwSiimzL/IWMlAGCEoII2Hd4yv1pjWn2n5l+O2k0e3h6KYA/qfCcQ+MrjWi+6TRNbrwcdaqt+pSMbMNy7Snf1yXf9su9BnR8BCo4D8CKvdmpoRsC7oxPAktJ5PxGs33OLhBT8GNSMI5ROTTDLRhRuY+xq8FIMc+wHc8S9u1hXH7GS/1ECZvOc/mkYO0DsrfCT0MiD82vAhvmX948FmeqEkl1mwseuA77RzzV7B3/6B97G6BRugm83Cm2e1LfF9kEe2pALuCcGglvSoT50Kwems/Dd6mSz7l3bXlMch2IXSfkjJsWxFLmPyQghjoJqXzgznXI/xIkF3/2pJ07vmR+SSMMZUQlAm1rw2TrPgGx8IrhsI75rZU662Kt+B+oSX5HCN5Z169bCrRi1ev41Uq7Y97J+zK8AqVwAleJVbT1v15ldsVAX0Mr16VerTz6nmu2zqV8AHRDK/wp605j3wNMr6YQw+07sGlbsSHlB6705358Ko02z4fHpdvl3LoD+U3W63yV1/15fx29lI347djd7ovv6HfnDm6Lqr2rOTtbxP7d8zWSraLycx7SaCtI/orkfUWHt7JyQJCPY+vu+ZrvQI6QH3UtrLdGtGJJPjdoiUBOh+C2H6Zz5t4LmGE4K8F4HWKzf2kIE/nYJG32smpfWEhQD6EDgLel8ON+uzLfaHkw9B7is5NEuhn4syHhVm+g7W/kqAJPht8ctAzXyhEZD9pyyG/HIvnSpN+v3lpFRxRK/b18WVz12UBQnsNg+uYLQE+JINPANuip9thCB5CoIvU4WsP0znGvmRbhn1ZoFP72kO3MAF0y2aqHzoKl+2nnncm0T9n5CpAT9yW4ZUdJrYqGQ8QxUVa6iUtb8gB6Ku5zEsC003G53cLTOG6HFDm+fB0kwH53TKTAu49umA3mYLXLKPLeMIrzhSTD0YUXb+EmErHdmnfvFrA4X0/ZgkxwG7C+/8sHK4ciecROFAAbgkHKB/h0JsnkR3e96PgAH85HH7KaH2vQEg6GPGLr595Bw8WRpC7YejwjZMHQei1neIFoT29YTOjvgEg+3z8+8HH/qYfBR433TM/ZruRPVv1q/ia2OvzvmFa2bo97uhpEOSD9Mj7Pg67wz96lD32Whfxa711y8yXj0bVr3BPDsOz9/RQ7G/9UYhwkYt5hf3PQeCSsD/1ARtfOU7xe2zYH99yit/PDC8TRg4p8+TtlAC/0jFWh5X36PLEdIxH6OKUJns6XYibdMzvXrLrlCZ7Pl1c5AVeZsYNhSZAXjQztgGXX+t/vYJ53wvmXb+q+SCYd7ctseJNP2dLLCA3jYU/PZz3rQjGLwwBWr1E9jDA9SuRKDwsMLvbLjHHt/4gUNw0xu1BnfIPx/TsCzq+AweAffxhq/UO3veDsPCKb/9eLDisa/wGGsADFxs/a9EkIK/Nxr8XD0z2UXLQUXoLbEg+etiqen+YWB/oGCbvjJObzCS7bIrOL2NHSmwijnDftvzi8gE75691swk7R291V5aD26XzRySxbphdvaOPleGuMN4PEX69JqXrkw9G67dMc/3BeMrc1O1M6jg472sozpHu5ctYcncXb3qqDsWbT6cjm0H2v5pK/1MGf7eO/G5Xv+MLt33tZ5ncNfd+K4AHtz7ei2ont1BAt7LpUWTjL7K5INs2q/d4sp184wfbfBP/+0OCqqab/+d6la99JgKx/jmlL76xkfV0GuaM2fv+dceUiLU9cFfTWeuLDvMhyMFCuNt21c8rn7K7rMOcfPNQrMW/43FLNZXm9rLSycP6nPK+PaztIL7d/CH3MXa4r23PtuJ3O7mLEorARULRbo7St68T0LFng13z8xleOM/RuxatFXeFWIguYboxvi1wfBAR4HkiPLEK4g4nv3XVfBghRigDgEjsc6bWB1isaWMAiCfFfxIgxEHK+KylNLKMAZORBKjkkIs/9po7kPGW+yp+Ehmt/VtUlgGi0DphaPOFBAkEaRmUgNAdWKbS4TIMjxHylssyfhQhsQ9AZh2yxDkEn30PW0ASH4GYWSt+iQQkuKWAdwn5xLE1Lsoxn0ZILPskLDQzkDAH9HPgkHfp6O1JN/ejI8Q+zok4YWTNxJe3beEfZETMhzBjEmdCtIpfHbbfeIuM3q6OviMZoY+J13BGJYYQBrYYJ+Y+4VYhGTFh78iEAc9bOk8sp34qIRERlg7CBHNCOEF0v1QDEx8XxqyEKUaAcJl5no5PLL+mksKBfOjwtd++HD1Jyf/+F/ejL4Y+xLAAKYBcFoTeL6sDUFAGCydesnaOCfIAj9PX1fz9f4q+wLefGYQOJHSg2N0I5GbOy79EIEJ8exEBZKOWTwCGQ+FDEiRhaWtQeBeAT4zseJK+mPooxojLEFBKma3DGmEfJYgTRgR5ZU697l+6Gk79L5EXMt+beiRYGLtY/LpvIP24SJCb2ccnK5tOB7clZv3bTfF95vs+q5LcDN//47Zz4lLK7yYFT5uQ57OCW7B4pNBE6BIZEQxl+LaSzjY2AkGf4E5ZRlSY+QwDmwXgfvO5j4vLAIAkRGRkX3pnhb8RFe+CiE9FZFuC4UalKdaOSPExhD3LBDDptlNpvXMMFEpcvA0khC6wgeYeNSuupknfElV7iXPXKy1+BqokT6EKUMtCI0K6Cy8ZWor+LrBCPixLjGDMoUSJbZPFPwury7b3PglWe/spHgyrrTnuAlbeqoq0epFPKCsuWJELLpShMLykaze0Ctv8hLICwEryUSzuQoTtBe07i5++qAK6mu18U/73sTd7eqcmS5au25h0GgnHe6Ed18HcQR+5L9mDngIOhD7EAUMywIxRKNTOvpgWroTQRrLwSATr04O9w26hIxxXQGUrQSNjzjiwKQNubT6mEsAEyZyDO22NB9CHucxkSXjWmIrv+59VyBDrU1JxDBICQpzQB+gkV3O9b4HJD1RZO5QI3gWWD7PTGwBPgOvYnr8P8Nv3FFJ4DvsPQiv+oWgFso8L1oSyMBEtwwrZ0Mp8hMpAgkBYfzJGwOb3u0XrmXoVYcMygVRCBGshAabvbmQ68ibO1FowYeISiiUhUhAQcH4IWi/bKnoLDXrVGl3gk+Q97WmtoLwhyO8ASeIakt5aQAiQT5aJ0FkyYwIT2DZ9H2Kf8MYIE84O4kx4W7dQoJSy/Q7jPQUq5MJ9PDpIhQQnjAtrgSLxzZakl99MBUSAZQRT+IB+Muhq7vndPLrzHQanUeWMVPi+9PB4s6XX8Mi8hcfTARbBpQxKgn+FoYkFL8MrfUEEhR3FZUIAkwUUJGy/y1792HfxeHNX0NW0c08D5xL38A7wkH8oPIRDZ1lsFk4wI1DC+0KcWNtzCRYWprAtxaFf21mKgQ9yYZcJXSBjKkxI+12EjSp8TsiE1yVgeB91JQuVySHDEpGFXrZ3oB6RBHdVWOimhWEOc940Zdb97ER5RgEY89G9Xp19OwXJPsKlry+Hulri266Ge0hrFLppidfFFHlAzTrycbBz5NDmywEfxTv0kg8rRoTTCR5JEuhmGK0nutWo5XAfnNdT+9XcDIz9Wf1qn9zwc/rVHjsd9uFlLxdQ5Ff2sbkZMvtPkfe39be5Gh/rGQLDkDjSS9ycKwj8y/reXA04vRWBn0aWn9bF5mrCpmdg9wC5+ru621wNjfQ+6H5br5qr2YXeJ8sv6zxzNSnP+1T5Zf1i5Il9naCpgBZ0jo3TsD9yQnfdkb4nC00B9zHZCl0zziWBO4fgksfo+8h+z/tRheBTTWLvGbIf1CRGHxlOuSNWyKnWLsj3W7u2osa7VHlFQX51bxf6dm+Xc9PWNe0lp9O7x5jjdkxwPiG8RcP5hPA2uOSVhLB9AN+146xtYw8PrnMkq3uz1Oq3m6Z2iu4g36tcBT4J3759w7HY1Qu8TH4qLyPmeytAA5wjAu0tRwj5KBCik1IuyQhua9Av5XPB2dzqIRGK+q0Sz96Cz4R1JVNMAZSELmDcdpu7w+C7TU7Xluw4AuCzSMe7knvLez+O24UpQK2CUowpkyiwF4Pfit2JT+JIopwJ71dYqDajhgkwMGGNSFTmlptvs0Zux+2pJgpv2m0WqlfNbLFTCi9n2b+XrRM4KfKvqrP+WTbKNr7+4zjdJtfZfpTBxuiS7fJXyXViNTsdl+vCT3usWIfMqxOchcLxbYtOPm1Ip4nXFPm2hsVjqjCYCwf1Z5XCMPfFX14phWHQhYj2/qjYC07+V5a8sN8xuvlyMv620hb2xBz7cwn5y0pY2COz6fcbMXpzQv60ohf2yPS7l+j4u4pb2O8Y3XwFGX9ZOQz7HaObLyfkLyug2fZA/9L05OX0/WWlOPx391hcQd/fVYrDf3d5wRUBhd9V1LPNi73o+zvLg/jvHv18haH8u8qDIP9lo58PeCIYlKS36P6lPHE+E7c1Ts9n4vhrKPRrKPQ7Jzx8KLRPyI89zPmQjM/hzqmo6deAEXoKjK9Z0k9E4y+bJX0AOOvrqojTecBtyecCcN6aMn1G+7HbTJk+qf3e85E3nDLt/CaEs8kY4NY4XioECbDVOD4DcN+t9fqeuem6/FHyAbA/X1c47Ceh5zTQ76FwdF3uu425eQSOBPlkTgATKgpxJNlVkzOXXoxHKvkg5oxZA6OtSpl9PELsI1bYhRNMKcTSPwNHN2mS+w0QdA1Gq/56F4xUxt8H4+cwwjsYo+51o7c8Q6G2hIuCJcAY5hID1AZGa844oJLMOUMAQ3rt3E3qs6YTUoysuYJMtt1lb5C8LHFb9ONWkwVP2gFPmSO/FUuv3Q63h6Tr3Q5b8HoEkq/dDk/FpGDUB2Hy9+92uACtrpscPIbW126HrzfxhN0OSHKTcH7tdrgGkq5nyW+LIb0Cydduhw/j/vG7HSymfqaH+eN2O9wFj94aXv/a7eAKOK/dDt+Dh+vdDh6Dx2u3w1N3O2zlxB7uxGO49Z8gpfT+8wESzdbK/HOizfQDng6bBZRhrzMWv6qC8y1tEbBqkXqqMvR/PDHqaZp1G8fqqq/OV8k95C8F2wXVUcKDxIwKv0NYIEjiNlNLWGKECcFPhEa1eFw+LH/DPuSUp97FGfUJcFsV7JL4Ju5hC0XcbnSS5FSr8c4K8MUK5xoRsLC6rQpIwhE9FGPCd5SZeBoIc1wWLuTWb/MqKzgVCryzAn6xwjmpwAXsAbf0GRAOFqT7rpHQRNbkB2GqSYgL/+iwJ8VTnOCUwX7nBPTihHPV78gHhffJ6VvZg83wF9YA5sJ9hjLkAAjb57B511OMwA/tcWFBFj9+1Q2zq3f0sTIMfz1qo8fXa1K6PvlgjH7LNNcfVFLmpv7nJnOdDml6aqDIrnmdnqpD8ebT6chmkP2vptL/lMFf94sFmdvin8vM5otrz+wT8j52eB33Hm2v/yh3uq/xCaR/g6eObMVyXcJye6b6niwA4F+m23Zw0I8VBts+HI8JAydPlA7ND829x250Ote3T/ydvTGMX7xAnqy+nhM/dazvnaHeVIbbS4n39n619yc9arlcxrKX9L1zn636BG8LW3fsDuYQ7rnfMkPg5Gy8U2s2UcZXEz6ZyexQ/f1S/yzVtxmYrQgQtqX0dMI7BRxugPi4EP/KX1UZtQzlhfv3IKvAvXR0wymhkk8+jEhQqyz4kCEA9Nk7Jq7hCcfZmFcVn+xtK92h4GWn6ZF4OAY2Zc1tLp37iPf+hbAM9y9076G+W9o9Z0ntiebYG2GKEp+879KT7ZHv9r8iB5HKjnPFN8ff33LAxLkzN3RTcLpuybi/yDFJ9gAi7Oc5iUx88DCQsh0ssCfG7reD4JZTIC4hAn0SEZjkszXYscNJDttD2KPB3VxUdMuJSKclzecqyx1SoG380amR34Gk91T8u1udbTFIiiXf9mB2ZRb3Ue4gtrjvbpLLcVTwGSdAfT9Dyw40Os3/iSuI/8TtpZ2f/u/QPPyIWZPgSDG7wk77IwfSf+TQf+K7sBnFT9ugtjvfYd+COzeNt6tob6aL9dqh0mwNA4o66Lxdw8YSs0HLVLt/dssPqh8ksssFi7mY2lLV/aA3kJzM00saQ27DhNgWT8CcOIxaBttJ3w9xPi6bs3x3yu5SUrL+uaE5VKx/DjS3Zzq2BD7ghUdRnAAninMHKxrfjeAurNEXlH8iYZ0qx/aiCOrnAX2pgu1RHdEO7T8kLPTB/9T/wP9ZugEc6IST0YRnMdLFDKO0sDXl+5AHFY1r5OmMhB7KSGcD0DdgJPFzRnCTg5HxYqjvMRTzHkNdNfzmWDTLejynmOLcx2+PQAkd0AweJ/iudnBrL9xKv5wNrV1kKJ4Kvl0QZyM2lxWRa5fIYXsY6PBSd461uZkr+rJmf5HR42bQ6MuavQ1hgZPAuR9lnfyUlznryDHeNFqhQxzjfuzyCO/nlxmtD2eb7aYqD7HNVXNQ/2HT1D7aGEgOJfw3skzxgWV6pV16aOI+1CoFjsXjLy470UVid2Kt/QeHy4BuxGfswG25js+IdOZCd+czF3MtX9bw2W4F4Ma6oY80hoHklMa9nXkTfFnD9+aXh5o1YNulcld+eTOHgy9z+Fq+Id7jG+AigPayVHYtFWDT+dhpffJtDBX7/vZvhGoJfmCo1rHCFTrZxA8vdOmNZ+ar0uXmom07ZOhJlS7ODOeiEu6VG7gVxR+bG3AmuIupqi8o/0DCOhZ3v1IDHi9McMdI9ypMcGYkp+zhK2nwUxiKeY+h4HmV83KfLjYUb+M+UbqfuviG+0S3VbRPc5+QizmBL2v2Nxk9LjKVL2v2NoS9W6WLM2XPtn9fY4X8FgPWm2bqvaKyzgxy1bqef9iqIPbKgXumj6ltg8zV6WOZHNgnDzUp8F28of8yLzl0jRySyTPlEJ3Mu2xcT3Yqg3w3P2fE1MBfJ/44LobUoTKb9dRD4XO8gzgSuY9YeMzECmqbLYOpjRCuBQE6c6HbCQJHOl/mxDrS+XcQ0I6kqwlov9CdCXjoG7Z1Y6kY2gEdf/e4H5nsu+l4GwzanS16p1FPjoQ59OGaQl+9KAMxeRxlwGiGsVHQ5XagGyriXnDDVVcDl1yPKtk3g9+/Dk2IILf+HSPskRN1S4yjJ2/bI/dZv3AmZLo9j5sf/E1nxHj54IUW8LGHHb741dAte/xLr4jj6qZ1rWW94v8B</diagram></mxfile>
2110.05886/main_diagram/main_diagram.pdf ADDED
Binary file (92.3 kB). View file
 
2110.05886/paper_text/intro_method.md ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Given a person of interest for query, person re-identification(ReID) aims to search the same identities against gallery, it has been widely used in many real-world applications, such as video surveillance system, robotics, human-computer interaction, etc. Due to the expensive annotation cost, recent researches focus on the unsupervised person ReID, which requires no manual annotations. Moreover, ReID is generally carried out in a distributed camera network, where a wealth of auxiliary metadata (e.g. camera index, timestamp, etc.) is attached with the captured images. As shown in Figure [1,](#page-0-0) the metadata could provide auxiliary guidance to unsupervised person ReID [\[23,](#page-8-0) [24,](#page-8-1) [31,](#page-8-2) [33,](#page-8-3) [40,](#page-8-4) [43,](#page-8-5) [49,](#page-8-6) [51,](#page-8-7) [66,](#page-9-1) [68\]](#page-9-2), but how to adequately
4
+
5
+ <span id="page-1-0"></span>![](_page_1_Picture_0.jpeg)
6
+
7
+ Figure 2: Illustration of hypergraph construction. We investigate different hyperedge construction strategies, i.e. KNN, clustering, camera-aware clustering. Global Clustering: perform global clustering and group the instances in a cluster as a hyperedge. Intra-camera Clustering: camera information is combined with clustering to perform intra-camera Clustering. Global KNN: given a vertex (i.e., centroid), a hyperedge connects itself and its nearest neighbors.
8
+
9
+ utilize such heterogeneous structure in unsupervised ReID is still an open problem.
10
+
11
+ Recently, unsupervised person ReID approaches [\[9,](#page-8-8) [12,](#page-8-9) [60\]](#page-9-3) follow clustering-and-finetune pipeline, which iteratively assigns pseudo labels for data then trains the feature extractor with the pseudo labels. In practice, since the ReID image data is generated from a distributed camera network, the correlation among images is usually diverse, heterogeneous, and complicated. Therefore, the relations are various, such as visual connections and camera connections, which causes the unreliable pseudo labels in unsupervised person ReID. To solve this issue, a hypergraph representation [\[16\]](#page-8-10) is an appropriate tool to model the multi-modal and heterogeneous data correlation by means of a variety of hyperedges [\[10\]](#page-8-11). Due to the heterogeneous information in the metadata, the relationship among instances are varied from the different viewpoints. To address this issuse, we propose three kinds of hypergraph construction techniques: "Global Clustering", "Intra-camera Clustering", and "Global KNN". As illustrated in Figure [2,](#page-1-0) "Global Clustering" means performing DBSCAN on all data and then grouping the instances in a cluster as a hyperedge, this strategy aims to capture the global density-based mode in the data. "Intra-camera Clustering" considers the camera information and performs DBSCAN under an individual camera, this strategy concentrates on capture the camera-conditioned marginal density mode. "Global KNN" stands for building a hyperedge by connecting one vertex with its nearest neighbors, this strategy focuses on local neighborhood to capture the smoothness locally. By means of merging several hyperedges created by different approaches, the hypergraph is able to perceive
12
+
13
+ the camera topology and model the heterogeneous data correlation across cameras. With this hypergraph, we can generate more reliable pseudo labels in learning process.
14
+
15
+ Given the generated pseudo labels, the ReID model could be finetuned iteratively. Several approaches related to mutual learning [\[3,](#page-8-12) [11,](#page-8-13) [44,](#page-8-14) [58\]](#page-9-4) and memory-based contrastive loss [\[43,](#page-8-5) [68\]](#page-9-2) are proposed to learn the robust feature representation with noisy labels. Although these proposed classification losses are verified to be effective, the end task such as average precision is not directly optimized in these approaches. To help address this issue, we propose a listwise loss combined with an instance-level memory, which provides a fine-grained supervision by directly optimize average precision in an approximate manner. Besides, we combine the proposed listwise loss and camera-aware contrastive loss as coarse-to-fine supervision for model updating.
16
+
17
+ Based on the motivation above, we propose a novel method termed Metadata Guided Hypergraph (MGH) to mutually guide the process of label refinement and feature learning. Specifically, we construct a heterogeneous hypergraph based on the metadata to generate pseudo labels, and utilize coarse-to-fine memory-based supervision to update the model. To achieve this, we first generate the noisy pseudo labels by clustering the visual feature representations of all unlabeled images. Then utilize hypergraph for label refinement. Specifically, we construct the hyperedges based on a joint similarity matrix [\[40\]](#page-8-4) by considering visual information and spatio-temporal context simultaneously. Hyperedges are grouped to generate a hypergraph, which models the complicated high-order relationships among the data. Then, we perform label propagation on the hypergraph structure, which rectifies the label errors caused by the previous clustering algorithm. For model updating, the instance-level memory and camera-aware prototype memory are constructed to simultaneously capture local and global distribution. We propose to use a listwise loss based on the instance-level memory, which is combined with a camera-aware contrastive loss to provide coarse-to-fine supervision.
18
+
19
+ In summary, our main contributions are three-fold as follows:
20
+
21
+ - We propose a heterogeneous hypergraph to model the complicated data correlation among the metadata, which facilitates the unsupervised person ReID in the real-world scenario.
22
+ - We propose a novel unsupervised person ReID model named MGH, which consists of label generation with hypergraph and model updating through memory-based coarse-to-fine supervision.
23
+ - On three public person ReID benchmarks with readily available metadata, i.e. camera index and timestamp, our proposed method outperforms the state-of-the-art approaches.
24
+
25
+ # Method
26
+
27
+ Unsupervised person ReID aims to adapt the pretrained model to the target domain, which contains plenty of images without annotations but rich metadata such as camera index and timestamp. In this paper, we apply metadata including camera index and timestamp to improve the performance of ReID model.
28
+
29
+ Given an unlabeled target dataset $\mathcal{D} = \{(x_i, c_i, t_i)\}_{i=1}^N$ with N images, where $x_i$ is i-th image, $c_i$ , $t_i$ are its camera index and timestamp respectively. We adopt a hypergraph G = (V, E, w) to encode the complex relationships, where V, E, and w are vertex set, hyperedge set, and hyperedge weights, respectively. The hyperedge is built based on the similarity matrix $\mathcal{J}$ , where $\mathcal{J}(i,j)$ measures the pairwise similarity for vertex i and j. With the hypergraph, we perform label propagation to generate robust pseudo labels. Furthermore, we could fine-tune the ReID model $f_\theta$ with parameters $\theta$ , which is initialized by pretrained model. Table 1 shows the main notations used here in after.
30
+
31
+ <span id="page-2-0"></span>
32
+
33
+ | Symbol | Brief Description |
34
+ |-----------------------------------|----------------------------------------------|
35
+ | x, c, t | image, camera index, timestamp |
36
+ | ${\mathcal D}$ | dataset |
37
+ | N | number of images |
38
+ | $f_{\theta}$ | ReID model |
39
+ | $\mathcal J$ | joint similarity matrix |
40
+ | G, V, E | hypergraph, vertices, hyperedge |
41
+ | w, H | hypergraph weight, incidence matrix |
42
+ | $d(v), \delta(e)$ | vertex, hyperedge degree |
43
+ | $D_v, D_e$ | vertex, hyperedge degree matrix |
44
+ | W | hyperedge weight matrix |
45
+ | $\mathcal{S}_v, \mathcal{S}_{st}$ | visual and spatio-temporal similarity matrix |
46
+ | A | hypergraph adjacency matrix |
47
+ | $\Delta_t$ | time interval in histogram |
48
+ | $N_c$ | number of cluster |
49
+ | $n_{c_i,c_j}^b$ | number of image pairs with the same labels |
50
+ | ${\cal B}^{"}$ | histogram |
51
+ | $y, \mathcal{Y}$ | pseudo label |
52
+ | $\hat{oldsymbol{\mathcal{Y}}}$ | corrected pseudo label |
53
+ | $\mathcal{E}_r, \mathcal{E}_u$ | error matrix |
54
+ | $\mathbb{S}_r, \mathbb{S}_u$ | instance set |
55
+ | $\mathcal{Y}_{soft}$ | soft pseudo label |
56
+
57
+ Table 1: Notation table.
58
+
59
+ Metadata information is crucial for modeling the high-order heterogeneous data correlation in a distributed camera network, and such data correlation plays a vital role in distinguishing hard ReID matchings in the real-world scenario. To well model the diverse, heterogeneous, and complicated correlation, hypergraph is an appropriate tool by utilizing flexible hyperedges. How to define the
60
+
61
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
62
+
63
+ Figure 3: Diagram of our proposed method MGH. Our proposed method is a pseudo-label-based method, which follows the clustering-and-finetune pipeline that iteratively assigns the pseudo label for data and then finetunes the feature extractor until the training converges.
64
+
65
+ hypergraph directly affects the effectiveness of the data correlation modeling. As illustrated in Figure 3, we propose three kinds of hyperedge construction approaches, termed "Global Clustering", "Intra-camera Clustering", and "Global KNN" respectively, the different hyperedges could complement each other to encode complicate high-order data correlations.
66
+
67
+ - Global Clustering: we use clustering to directly group all
68
+ vertices into clusters, and connects the vertices in each cluster by a hyperedge.
69
+ - Intra-camera Clustering: with the camera information, all
70
+ vertices could be separated by the different camera, then we
71
+ perform clustering to group vertex in each camera.
72
+ - Global KNN: in this method, we build a hyperedge by connecting one vertex with its K nearest neighbors.
73
+
74
+ By merging several hyperedges created by different approaches, the hypergraph is able to perceive the camera topology and model the heterogeneous data correlation across cameras. Besides, the weight of each hyperedge can be measured by:
75
+
76
+ $$w(e) = \sum_{i,j \in e} exp\left(\frac{\mathcal{J}(i,j)^2}{\sigma^2}\right),\tag{1}$$
77
+
78
+ where (i, j) denote a pair of vertices in the hyperedge e, and $\sigma$ is the medium value of the similarity of all vertex pairs. Then the hypergraph could be represented by an incidence matrix $H \in$
79
+
80
+ $\mathbb{R}^{|V|\times |E|}$ .
81
+
82
+ <span id="page-3-2"></span>
83
+ $$H(v_i, e_j) = \begin{cases} \mathcal{J}(j, i) & \text{if } v_i \in e_j \\ 0 & \text{otherwise.} \end{cases}$$
84
+ (2)
85
+
86
+ According to this definition, higher weighted hyperedge includes the vertices with higher inner similarity. Besides, the vertex degree and hyperedge degree are defined as $d(v) = \sum_{e \in E} w(e)H(v,e)$ and $\delta(e) = \sum_{v \in V} H(v,e)$ , respectively. The corresponding diagonal matrices of the vertex degree, the hyperedge degree, and hyperedge weight are $D_v$ , $D_e$ , and W, respectively. Then we define the hypergraph adjacency matrix as follows:
87
+
88
+ $$A = HWD_{\varrho}^{-1}H^{\top}. (3)$$
89
+
90
+ With the adjacency matrix, we could perform label propagation to correct the label noise.
91
+
92
+ To construct the hyperedges, we present the calculation of joint similarity matrix $\mathcal{J}$ . As shown in Figure 3, by jointly considering the visual similarity and spatio-temporal similarity, the joint pairwise similarity is
93
+
94
+ <span id="page-3-1"></span>
95
+ $$\mathcal{J}(i,j) = \frac{1}{1 + \lambda_0 e^{-\gamma_0 S_v(i,j)}} \cdot \frac{1}{1 + \lambda_1 e^{-\gamma_1 S_{st}(i,j)}},\tag{4}$$
96
+
97
+ where $S_v(i, j)$ and $S_{st}(i, j)$ are the visual and spatio-temporal similarity of the images $x_i$ and $x_j$ , respectively, and $S_v(i, j)$ is cosine similarity in our implementation.
98
+
99
+ To obtain the spatio-temporal similarity, we first perform clustering based on features $\{f_{\theta}(x_i)\}_{i=1}^N$ and get the noisy pseudo labels $\mathcal{Y}=\{y_i\}_{i=1}^N$ , where $y_i\in\mathbb{R}^{N_c}$ and $N_c$ is the number of clusters. With the pseudo labels, we could build the spatio-temporal pattern through the spatio-temporal distribution, which is represented as the normalized histograms:
100
+
101
+ $$\mathcal{B}(\mathbb{I}(y_i, y_j) | c_i, c_j, b) = \frac{n_{c_i, c_j}^b}{\sum_l n_{c_i, c_i}^l}, \tag{5}$$
102
+
103
+ where b, c, and $\mathbb{I}(\cdot)$ are the index of histogram bins, the camera index, and indicator function, respectively. And $n^b_{c_i,c_j}$ is the number of image pairs with the same labels whose time differences are in the scale of $((b-1)\Delta_t, b\Delta_t)$ . Based on this spatio-temporal distribution, we can estimate the spatio-temporal consistency of two images $(x_i, c_i, t_i)$ and $(x_j, c_j, t_j)$ as follows:
104
+
105
+ $$S_{st}(i,j) = \mathcal{B}(\mathbb{I}(y_i, y_j) | c_i, c_j, \lfloor \frac{|t_i - t_j|}{\Delta_t} \rfloor), \tag{6}$$
106
+
107
+ where $\lfloor \cdot \rfloor$ is round function.
108
+
109
+ In unsupervised person ReID, the pseudo-label-based methods generally follow the pipeline that alternatively assigns pseudos label and finetune the feature extractor until the training converges. Given the extracted feature representations, the first step is to generate reliable pseudo labels, which means grouping the same identities into an individual cluster. In the previous methods, several clustering algorithms such as K-Means and DBSCAN [13, 26, 36] have been explored. The existing methods generally perform label generation based on the pairwise visual relationships, which neglect the auxiliary spatio-temporal constraints. Here we adopt a hypergraph to model the diverse and heterogeneous data structure, and then refine the noisy pseudo label by either correcting the wrong labels or smoothing the noisy labels on a hypergraph.
110
+
111
+ Label propagation [20, 22, 56, 69] has been adopted to smooth the label prediction. Following the motivation that label prediction is highly correlated along edges in the graph, we perform label propagation on the hypergraph for label refinement.
112
+
113
+ Specifically, we first select 4 instances each cluster to be the most reliable instances $\mathbb{S}_r$ , and the reliable prediction $\mathcal{Y}_r$ , the extra unreliable instances are defined as $\mathbb{S}_u$ . Then we could define error matrix $\mathcal{E}$ as follows:
114
+
115
+ $$\mathcal{E}_r = \mathcal{Y} - \mathcal{Y}_r, \mathcal{E}_u = 0, \tag{7}$$
116
+
117
+ where $\mathcal{Y}_r \in \mathbb{R}^{N \times N_c}$ consists of one-hot vector on the selected reliable predictions and zero elsewhere, $\mathcal{Y}$ is the initial noise label, and $\mathcal{E}_u$ is the error matrix for unreliable instances. Then we perform iterative label propagation upon the hypergraph following the scaled fixed diffusion method proposed by [15], which iteratively propagates residual error as follows:
118
+
119
+ <span id="page-4-0"></span>
120
+ $$\mathcal{E}_{u}^{(t+1)} = \alpha_{1} [D^{-1/2}AD^{-1/2}\mathcal{E}^{(t)}]_{u}, \tag{8}$$
121
+
122
+ where $[\cdot]_u$ means selecting unreliable entries in the error matrix. We fix reliable prediction $\mathcal{E}_r^{(t)} = \mathcal{E}_r$ until converging to $\hat{\mathcal{E}}$ , and obtain the corrected label prediction $\hat{\mathcal{Y}}_r = \mathcal{Y}_r + s\hat{\mathcal{E}}$ , where s is a
123
+
124
+ ```
125
+ Input: Unlabeled dataset D = \{(x_i, c_i, t_i)\}_{i=1}^N, pretrained ReID model f_\theta, number of epochs Epoch, number of iterations in each epoch Iters.
126
+ ```
127
+
128
+ **Output:** Finetuned ReID model $\hat{f}_{\theta}$ .
129
+
130
+ ```
131
+ 1 while 1 \le i \le Epoch do
132
+ ```
133
+
134
+ ```
135
+ /* Meta Hypergraph Guided Label Generation */
136
+ Construct joint similarity matrix based on Equ. 4;
137
+ Construct incidence matrix H based on Equ. 2;
138
+ Generate pseudo label \hat{\mathcal{Y}} based on Equ. 8 and Equ. 9;
139
+ /* Memory-based Coarse-to-fine Supervision */
140
+ Construct instance-level memory M^{inst} and camera-aware prototype memory M^{cam};
141
+ for j = 1:Iters do
142
+
143
+ Calculate entire loss by Equ. 14;
144
+ Update ReID model f_{\theta};
145
+ ```
146
+
147
+ 9 end
148
+
149
+ 11 Output finetuned ReID model $\hat{f}_{ heta}$ .
150
+
151
+ scale coefficient. Further, we smooth the prediction by iterative label propagation as follows:
152
+
153
+ <span id="page-4-2"></span><span id="page-4-1"></span>
154
+ $$\mathcal{Y}^{(t+1)} = (1 - \alpha_2)\mathcal{Y} + \alpha_2 D^{-1} A \mathcal{Y}^{(t)}.$$
155
+ (9)
156
+
157
+ The iteratively updated prediction $\mathcal{Y}^t$ converges to final prediction.
158
+
159
+ After label correction with hypergraph label propagation, we obtain the reliable labels $\hat{\mathcal{Y}}$ . We propose to utilize memory-based coarse-to-fine supervision to guide the model training. More specifically, we build two memory banks: instance-level memory $\mathcal{M}^{inst}$ and camera-aware prototype memory $\mathcal{M}^{cam}$ , which are initialized with extracted features every epoch and updated in a moving average manner during the training process.
160
+
161
+ In the instance-level memory, all feature representations are stored and updated in the training process. Based on the instance-level memory bank, we adopt a listwise loss named Smooth-AP [2] to directly optimize mean average precision. Different from Smooth-AP that treats the in-batch images as the gallery, we compute the relevance score between query and features in the memory bank. This makes more hard negative samples participate in loss calculation. For a query image $x_q$ , the smoothed average precision is defined as:
162
+
163
+ $$AP_{q} \approx \frac{1}{|\mathbb{S}_{P}|} \sum_{i \in \mathbb{S}_{P}} \frac{1 + \sum_{j \in \mathbb{S}_{P}} \mathcal{G}(q, j)}{1 + \sum_{j \in \mathbb{S}_{P}} \mathcal{G}(q, j) + \sum_{j \in \mathbb{S}_{N}} \mathcal{G}(q, j)},$$
164
+
165
+ $$\mathcal{G}(q, j) = \frac{1}{1 + e^{-f_{\theta}(\mathbf{x}_{q})^{T}} \mathcal{M}_{j}^{inst}},$$
166
+ (10)
167
+
168
+ where $\mathbb{S}_P$ and $\mathbb{S}_N$ are positive set and negative set for query q, which are consists of 1000 instances with the highest similarity to query. Then $L_{inst}$ is defined as:
169
+
170
+ $$L_{inst} = \frac{1}{N} \sum_{q=1}^{N} (1 - AP_q). \tag{11}$$
171
+
172
+ Because of the variation between different scenes, intra-camera matching is more robust than inter-camera matching, we build camera-aware prototype memory to store the camera-aware prototypes, which depicts more precise camera-aware data distribution. For instance, the identity under different cameras has different prototypes. Moreover, we adopt weighted intra-camera loss and inter-camera loss to keep the intra-camera discrimination and encourage inter-camera matching, which is formulated as follows:
173
+
174
+ $$\mathcal{L}_{intra} = -\sum_{c=1}^{C} \frac{1}{N_c} \sum_{\mathbf{x}_i \in \mathbb{S}_c} \sum_{k=1}^{Z_c} \hat{\mathcal{Y}}_{soft}(i, k) \log \frac{\phi(i, k)}{\sum_{k=1}^{Z_c} \phi(i, k)},$$
175
+
176
+ $$\mathcal{L}_{inter} = -\sum_{i=1}^{N'} \frac{1}{|\mathcal{P}|} \sum_{p \in \mathcal{P}} \log \frac{\phi(i, p)}{\sum_{u \in \mathcal{P}} \phi(i, u) + \sum_{q \in Q} \phi(i, q)},$$
177
+ (12)
178
+
179
+ where $\phi(i,k) = \exp(f_{\theta}(\mathbf{x}_i)^{\top} \mathcal{M}_j^{cam}/\tau)$ , $\mathbb{S}_c$ is the dataset of the c-th camera, $Z_c$ is the number of identity in $\mathbb{S}_c$ , and $\mathcal{P}$ , Q denote the index sets of positive and negative prototypes for $\mathbf{x}_i$ , and $\mathcal{Y}_{soft}$ is soft label calculated based on $\mathcal{J}$ and $\hat{\mathcal{Y}}$ as follows:
180
+
181
+ $$\hat{\mathcal{Y}}_{soft}(i, n_c) = \sum_{m \in M} \frac{\mathcal{J}(i, m)}{|M|}, M = \{m | \mathcal{Y}(m, n_c) = 1\},$$
182
+ (13)
183
+
184
+ where $n_c \in [1, 2, ..., N_c]$ is the class index. For training the network, we formulate the entire loss function as follows:
185
+
186
+ <span id="page-5-0"></span>
187
+ $$\mathcal{L} = \lambda_1 \mathcal{L}_{intra}(X, \hat{\mathcal{Y}}) + \lambda_2 \mathcal{L}_{inter}(X, \hat{\mathcal{Y}}) + \lambda_3 \mathcal{L}_{inst}(X, \hat{\mathcal{Y}}), \quad (14)$$
188
+
189
+ where $\mathcal{L}_{intra}$ , $\mathcal{L}_{inter}$ , and $\mathcal{L}_{inst}$ are weighted intra-camera loss, inter-camera loss, and instance loss, respectively. We summarize the overall algorithm is summarized in Algorithm 1.
2110.14805/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-05-26T12:32:03.351Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36" etag="YMKjSAv8a0JfCgtFiGVp" version="13.11.0" type="google"><diagram id="iNF8s7MMhPUeoEgm8xgT" name="Page-1">7V1fk5s4Ev80rtt9wIUk/j7OTDKXq0tqU5W7yuYpxdjYZoONFzOZcT79SYCMEMIIGwGew7O1MRIW0P3rVqu71czQw/b1n7G333yKln44g/rydYbezSCEwET4H9JyzFpsI29Yx8EyawJFw5fgl5836nnrc7D0D6UTkygKk2BfblxEu52/SEptXhxHL+XTVlFYvureW/uVhi8LL6y2fg2WySZrdUy9aP/gB+sNvTLQ854nb/FjHUfPu/x6M4hW6Sfr3np0rPwKh423jF6ypnQI9H6GHuIoSrJv29cHPyS0pWTLrvNY03u679jfJTI/QNp/PwSPm/f/fnE/fP+xM7++2F80AIxsnJ9e+OzTB0lvNzlSEqUP6ZNh9Bm6f9kEif9l7y1I7wsGBW7bJNsQHwH89ZDE0Q//IQqjGLfsoh0+7T6/hh8n/mvt7YMTUTDY/GjrJ/ERn5L/QEMoJ3wONMfOj18KtgFAYbVheGbQM70cK+vT6AW98JecZK3IZzaTj4BkX0uBHNfeEz1db00ZaJcp4+pVyri2iDAQzZGpijYCylghvvJ9gL+skxQWWcMqwk/K0sz6+zmiHdoh1Rl3+AQE969FJx3llQ6DbzMbqTw6bmauWItt0IztVRCGFNlE3E3yV8E87rHST/4ATHv2kRWIc5CroiFnPlbImKd68YFVLAig4KgSEdgXDH7+Y8IBoxWAWVYK+KKjwgXqDRcTLM7Awh4XKiSsEX+3vCN2Hz5ahN7hECzKXPBfg+RP5vs3YrfMzfzo3WtuxqQHR3qwwzf/J3vA/IocFj9Lj+jvspvzlxUTU45L+Lmi53jhN8+iiRev/XMjIjHXGT6aAj7SttgPvST4WX4KEXPzK3yOglQkc1CZsAyqGlVDB8weOh+DtVj5YTm7RgPW3DLc4mPb5YEzKlUGTpF4Isrl4JSw9VSAsyeQvTHwVOY/u2rrKsaL3Qov+VJp9GAZVNPwS52a6astWE6THDWiwflxFQPHuT3gDAYIgxdzWBHzS1EgGku1yhAtlzr3J1xkOVaow4tIbtEzfEeuUWX8yY9zjfGof/sD7BfOGmnvg9f7j3//66umaaaEwr3GoL933+t35klWGil6YibPpXrr3CqrIbPqyDEEsqSMopZ+6xS1zcEoKl7gWIMJeEa8s4paKLDAUAUvKLHcY4jR+IQn4spSbSBiiPWvxKx/PTLO8OEsMSCy5jboFRzN5DhsvD35mmA94/+KyHD3ez8O8A34Mdv+uWhs1lCvPg1TkeNlEGOaBtEOH2NDgtBDVludONqsrUqUFugjdWSWWNS20vAienFaf6kvfB+KHGOmZQLbV0JhasVwRgzxKFjFh04wLD+cuQGrHHGIPDio+KgSgxZyEGzTYCfLDkKZYOGFd2GwJixJoj3T+tF78sPP0SHIGfYUJUm0xSeEpOP+FOFkvZpZjBPdpxe7O+yzoCxZQnj0IJUhego+3iQJiebeEUrAx8VyB+YBVlarAMMqni/wFeHj0ks84orF7Qf871/P2z35rj15h2ChPQfaOjzuN8RV8YgISbXv76JE+xTFvvbHnty+9snfPWsAs2u/WyvDDzDA3HENG1mOAYFuwrLFZsG5Y7CfCpyAMbfKH5G0i8/p3r5zBeDK3OJPl3rhgSPywq9m5sNv3/H/ko1PGK3jr783e+afaj3zmF+JROSZVT15k5eLwgJjI50ReBnZBssluYxQ1ZWD4d3jLAeSMYc2+3HEeilFIAKmjTFpVoEETQ5IjirjpeuFQpfTyEVxFOesGnDOTiO2XpX7/qYR8fOMeZ5XwCDAeyvGxCGhLkaixdC1utgU6eLQO2K9h3tvX//GUeLlICSuYYWGI5ibDuPt0k2Dn/gtkw1NVeBlmXNYM9fbqiAlEnqlkIITpOQhZRlzw2YghcqIwoamUe1l3eou/n3PiBK5r5QiajchqoWSssHJS8OuSM7CyBIYjh1hSPxYotQfjnGycfQiqvVtxgS1Oo1wUa8iGxmtd6UMFQg75SVTvnMOI9kgGGfZ8qmziuNfMunI48PGcDy3scnKCLfN8W6O7ZRTJwCXIQIgd1BISERMJkgUaS9OH5BwB9UShkSgt+cg2pEjDLuuFKXeq4sjGb3Ekc7w5Xx0o2diiBycPRGjxkHRV1BNnMIo4acbNqp2iS8o4/JAUTUxnUW7X27Z3VZH4hsIq4kZBOUFYQqrlcNqCvBzs2E1Mbhabaq8fem/uWiI+daiIU3iNeZoiJhDvTsa3240RAWgxh0NEUNKtGqcoiFjgdS4oyFiRImW2lM0ZCyIuoloiC1aKZ4wJMDLImMNwUq8fvoNT+z4P3xtnfn2O/lKKKun6Fp52yA8Zr/BA3nbfdqJkEHg4Ic/fcLMSk95EAai6T7rUl92l6RzF8VbLzx1h36CEaMdMCICvH4QnUJgqeUII905yErdZElDoED6debqaWcSe7vDCg9Kh8eYpSe8RPGyfHX250UNF40jKyTum5yckOCMfjcJcXnx7DhH7jWYdZYg12C4KhM+NLfNYjUHbVTeiKGBOTILK7JItmfXdM4cGqYFDB2ZumNTJ0IpwQ2qUu0SjsNRRR1y/dS4fdMcdLPv24hSymyLGh84piilSkhIOHwnSPxfRSktCRdzh5snL5nD66KZdBhnZLsnrQ5ThzR9DizECtrcMsBZYSMHTNCLKx1TVIv5xvZ1XDpGYhenQsFF5hzajmFRv07Z7UPKJmBbD9l5N79okhVcVAO7vgS3iySkGliANwqLEsPwyrsbxvN1plQzvsMKP5N6uQBHboN6MTpRLy6vXkC/KOswoW1C2SU1Y3SjD5hB3RgWZ+3qT03TGHTLDLt4GuPG6X0aa1c/amI8FlXegLEu4zw/ELDdflkv4bac0pq6S2tqWkAbxJPBpDWV/RzASrtvJa3JbWceP4XR4gfm69I7bE4ujkrwkRbdJ5kxEYnTJIQ4JrVUMhVkyzvEGMUFW9ky5Sglfo7HgJCHHuXJq85MkMDz+EjcVFdrP4ktRvl5jeVgzebyRirde6DseNN4h5C0OrXKAwFdrhwfhqB3ZE7bkxMOZ/S/Kb5O7X3x5+f3KXs+buCkMbvjTucC+8b22QlLkNZLwWC+Ds6rrJlcVWKuxsal1Y7dQUuQ0jXRTYNnhKrRRLymUVMrGzr9lzuurzbQcUn/41TS/1jP+pG9AMSuz+TtGhbTG0DO4QIaIwPG26yPbg9aBoC3TiqFzLmEfdn5xXJHZZ20y866GegMunXc5nKzqtjpBjoDmCZUh6mfg35NU9CxnvUjM02c3t5R92syTc7hYmymiQNrgSGd4m2LcFDZHvKAfxV7h1Rt076YOTvLyN89Hcg/H6PDQZz7XYOtLnG0WsHFQikqHG4CkniVIRCVk+CTiruDRYcetcteT9YuPC+qVavSfqHy2OiedkblwatRPq1z0lEXJnJb17XD5WiKr6rU0ex06CvsQiyaYtS9i4XsS/yGFQveNVmjfVt7Jrm9GvyoUO4lftfKBbPqUCsMlwVJ2VoCbLz0XByyjOMieOroqbnnxUl+sj0TRVsrBqH7QP6uF4nB0nck7Ye2CLb53UZ2H4qdfxrxVdViWYHrFlhlu1hfnABYNCJ3Qf4k7Od17C0DP7V55c1iiSXXOPZKz5TZ2jadlKiyh42mtiWQXKjK1HZFHptGD19dqgmnG+te91skjNRYDmIrBZ43U6qat6zUU03Nzwdpo3wGyq2rbotL9LAtJa7HhmEVeyHddsvHBkyfS5+6DuVvDl0mRCrQZXLFJQZGV7tV2IDomtQdpKW16KIdzm2LQU5HL7BtGFY1IFVUxxF6VT/52yjl3r23+3FNzYxWDlJ1xhnvCEWwiJg1OT/5vZLdWWQdVMQoa5X6HLGGHUw1qqdJ89Rl+ILrFUmXXiC3uXCwSueomiAwt4QGVi/OUb7qDDKcXpxApzInMivny5WhIbOUXq2yLQG80tzip9wlz9tW+nJaOUOTz/22G6NUIj2tbOUM9PqoNkVWZloVmABnPS0SSHRF0/LPwH+5Jlz51pEEuJeTI6cJSKJiGwqBJFpRDAGkfRz9lZfXneBUn1RBs+KoYjLEqX+sBQn6BRQcCaD87ZO/XJKSgBOe6ic6q6qAhIBRtuQAegdb2CvOg0waxKuRIqJstwopX7RVUf2agy7BRx55drn9gghUM0SlnXL83kPIbc+y5eLNnS0FgEjpcRhW9AqaJglH3A4B5Ar9DJAWruilxhfQJcoXDFw3LWfqWXnptTAaEL6Au8XUKl47Gh75u3YKFvvsvrzHT3rvxSFW3VD/z0tAdvLr4ZUpjrc/KdcgKx/H4MK2VWHtyKTDh3FEOFYoQ/ykm0/R0idn/A8=</diagram></mxfile>
2110.14805/main_diagram/main_diagram.pdf ADDED
Binary file (53.3 kB). View file
 
2110.14805/paper_text/intro_method.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Self-supervised learning (SSL)  [@noroozi2016unsupervised; @doersch2015unsupervised; @zhang2016colorful; @he2020momentum; @chen2020simple; @caron2020unsupervised; @zbontar2021barlow; @caron2021emerging] helps in learning useful representations from large unlabeled dataset that can be used as pre-trained initialization points for different downstream tasks. These pretrained models are particularly useful in settings where we have a small labeled dataset to learn from. Recent advances have shown that such self-supervised methods are capable of learning representations that are competitive with those learned in a fully supervised manner for natural images [@wu2018unsupervised; @hjelm2018learning; @zbontar2021barlow; @caron2021emerging]. In application domains such as medical imaging, where high quality labeled data is scarce and data modalities shift rapidly, SSL has potential to make a significant impact.
4
+
5
+ Contrastive self-supervised learning methods [@he2020momentum; @chen2020simple; @caron2020unsupervised; @zbontar2021barlow; @caron2021emerging] are currently the state of the art in SSL. In these methods, final layer's feature representations for positive pairs of images are brought closer and those for negative pairs are pushed away using a contrastive loss functions such as InfoNCE [@oord2018representation] or NT-Xent [@chen2020simple]. A positive pair of image could be two different views of the same image while a negative pair could be views of different images. These contrastive methods have also been applied to various medical imaging tasks such as chest x-ray interpretation [@sowrirajan2021moco; @vu2021medaug], image segmentation [@chaitanya2020contrastive], covid diagnosis [@chen2021momentum] and dermatology classification [@azizi2021big]
6
+
7
+ In this work, we extend the idea of contrastive learning further and hypothesize that ensuring closeness of representations for the intermediate layers, and not just the last one, significantly improves the learning of network parameters. To this end, we hypothesize that the features learned by the method should be more reusable for the downstream task than a standard contrastive learning method. We use Momentum Contrastive (MoCo) method [@he2020momentum] as the base SSL method, and study two additional loss terms based on closeness of feature representation between intermediate layers. The first loss function is based on mean squared error, and the second loss function is based on distance between cross-correlation matrix and identity matrix [@zbontar2021barlow].
8
+
9
+ We test our method on three diverse medical datasets: NIH chest x-ray, diabetic retinopathy and breast cancer histopathology. We show that both proposed loss functions either outperform standard MoCo or have comparable performance for classification tasks on all three datasets. The performance gap between the standard MoCo and our approach is higher in the case where we have a small labeled dataset to fine tune our model. This suggests that the features learned via our self-supervision approach are of higher quality than the standard MoCo. To confirm this finding, we first investigate how much the features are *reused* by the model, by comparing the similarity of the features before and after fine-tuning on the labeled dataset. Based on prior work [@neyshabur2021transferred], higher feature reuse indicates better feature quality. Additionally, we also perform layer-wise probing of the model to conclude that indeed the features learned earlier in the model by our approach are more informative than a standard MoCo. Finally, to understand the importance of the feature quality for learning from a small labeled dataset, we compare the output probability distribution of models fine tuned on 1% and 6% of labeled data versus on 100% of labeled data. The proposed method of pre-training leads to lower Kolomongrov-Smrinov (KS) distance against the best performing model, as compared to a standard MoCo. To the best of our knowledge, this is the first work to show that the proposed approach has high performance on such diverse non-natural imaging datasets. Additionally, this is the first work to investigate the feature quality of the self-supervised method by going beyond the linear probing and focusing on feature reuse and on learning informative features earlier in the model.
10
+
11
+ Self-supervised learning helps to learn feature representation of data in an unsupervised manner. This is accomplished by performing a formulated task where the labels for the task are extracted from the data itself. Classic examples for handcrafted formulated tasks include solving a jigsaws puzzle [@noroozi2016unsupervised], relative patch prediction [@doersch2015unsupervised] and colorization [@zhang2016colorful]. These tasks are based on heuristics and therefore the representations learned using these tasks may not be useful for performing downstream tasks. Consequently, contrastive methods to learn representations in an unsupervised manner emerged. They outperformed the heuristics task based methods and gradually have also reached a fully supervised level of performance [@caron2021emerging]. Some of the recent contrastive methods include SimCLR [@chen2020simple], MoCo [@he2020momentum], BYOL [@NEURIPS2020_f3ada80d], SwAV [@caron2020unsupervised], Barlow Twins [@zbontar2021barlow] and Dino [@caron2021emerging].
12
+
13
+ All the contrastive methods treat the backbone model as a black box and work with the final layer's feature representations only. A recent work [@gong2021fast] attempted to incorporate contrastive loss for the intermediate layers of a model trained via MoCo, with a focus on accelerating the model training. They employed partial back-propagation - by randomly choosing a layer to back-propagate from - for reducing model computations and training time. In contrast, our work focuses on investigating the impact of the intermediate loss terms on feature quality, using state of the art analysis methods such as feature re-use, layer-wise probing and model output similarity. We investigate this intermediate loss functions with two distinct formulations (mean squared error, and a loss based on cross correlation matrix) and on three diverse medical imaging datasets. We do not employ partial back propagation, and our formulation of intermediate layer loss does not require storing intermediate representations for the negative pairs.
14
+
15
+ Out of above mentioned contrastive methods, MoCo has been widely adopted by the research community across many domains including medical imaging. The momentum contrastive method (MoCo) differentiates from other contrastive methods by having an additional memory bank of negative examples. The memory bank helps the model to learn even when the batch size is small, and is especially important for medical imaging tasks, because medical images are typically large (e.g. 1024 $\times$ 1024) and it becomes computationally infeasible to train with a large batch size. Increasingly, many recent works have used momentum contrastive method with minor modifications as the choice of self-supervised method to perform diverse medical imaging tasks such as histopathology classification [@ciga2020self; @dehaene2020self], chest x-ray interpretation [@sowrirajan2021moco] and dermatology classification [@azizi2021big]. Given a widespread adoption of the momentum contrastive method, we focus on it in our work.
16
+
17
+ # Method
18
+
19
+ In section [3.1](#sec:std_moco){reference-type="ref" reference="sec:std_moco"}, first, we briefly discuss the standard MoCo method. Then, we describe our proposed approach in section [3.2](#sec:proposed_method){reference-type="ref" reference="sec:proposed_method"}. In section [3.3](#sec:ft_n_eval){reference-type="ref" reference="sec:ft_n_eval"}, we describe the fine tuning and evaluation methodology for the self-supervised methods. Finally, in section [3.4](#sec:analysis){reference-type="ref" reference="sec:analysis"}, we describe the methods used for feature analysis which helps us quantify the quality of the representations learned by different methods.
20
+
21
+ With the help of MoCo, we want to learn feature representation for images in an unsupervised manner. We achieve this by minimizing InfoNCE loss [@oord2018representation] $$\begin{equation}
22
+ \label{eq:info_nce}
23
+ \mathcal{L}_{InfoNCE}(x) = -log \frac{\exp(g({\tilde{x_1}).h(\tilde{x_2})/\tau})}{\exp({g(\tilde{x_1}).h(\tilde{x_2})/\tau)} + \sum_{i=0}^{K}\exp{(g(\tilde{x_1}).h(\tilde{z_i})/\tau)}}
24
+ \end{equation}$$
25
+
26
+ where $\tilde{x_1}$ and $\tilde{x_2}$ are different views/augmentations of the same image $x$. $\tilde{x_1}$ and $\tilde{x_2}$ are called a positive pair. There are $K$ negative pairs of $\tilde{x_1}$ and $\tilde{z_i}$s where $\tilde{z_i}$ is random augmentation of a different image. $\tilde{z_i}$ either comes from a queue in the case of MoCo or a minibatch in the case of SimCLR [@chen2020simple]. We minimize equation [\[eq:info_nce\]](#eq:info_nce){reference-type="ref" reference="eq:info_nce"} to optimize the parameters of the encoder $g$. $h$ is an identical copy of $g$ whose parameters are an exponential moving average of $g$. $\tau$ is a temperature-scaling hyper-parameter.
27
+
28
+ In our work, we extend the idea of contrastive learning and propose that the encoder should be encouraged to learn augmentation-invariant representations not only at the end of the encoder but also for intermediate layers. We make use of an additional loss function to ensure closeness in the representations for the intermediate layers. Such intermediate loss have shown to improve performance of deep learning models such as InceptionNet [@szegedy2015going], PSPnet [@zhao2017pyramid] and DARTs [@liu2018darts]. Intermediate loss helps to improve performance for various reasons. In @mostajabi2018regularizing, it helped as a regularizer. In InceptionNet, PSPnet and DARTs, it improved training of the model by facilitating gradient flow. In our case, we hypothesize that it would facilitate learning of augmentation-invariant features early in the model thereby contributing to learning of high quality features for the downstream tasks.
29
+
30
+ We experiment with two intermediate loss functions that ensure closeness of representations: mean square error, and a loss function inspired by @zbontar2021barlow that minimizes the difference between cross-correlation matrix of representations and an identity matrix. Figure [1](#fig:MoCo_schematic){reference-type="ref" reference="fig:MoCo_schematic"} includes an overview of our approach.
31
+
32
+ <figure id="fig:MoCo_schematic">
33
+ <p><img src="figures/moco_updated.png" style="width:80.0%" alt="image" /> <span id="fig:MoCo_schematic" data-label="fig:MoCo_schematic"></span></p>
34
+ <figcaption>Schematic diagram for our approach of self-supervised learning. In addition to a contrastive loss, we use intermediate loss like mean squared error (MSE) or Barlow twins loss to bring intermediate representations of the two views of same image closer together. The “momentum” in the figure denotes that the weights of the second encoder are an exponential moving average of the weights of the first encoder.</figcaption>
35
+ </figure>
36
+
37
+ We compute the mean squared error of the intermediate features between the two views of the same image. We extract the intermediate features and pass them through an adaptive average pooling layer to reduce the spatial variances in the two views that may be caused due to augmentations such as rotations. For ResNet-50 as the backbone architecture, the spatial resolutions of the features after the average pooling layers are $16 \times 16$, $16 \times 16$, $4 \times 4$ and $4 \times 4$ for \"Block 1\", \"Block 2\", \"Block 3\" and \"Block 4\" of ResNet-50, respectively. The mean squared error is calculated between these spatially down-sampled features of the two views for each block.
38
+
39
+ We took inspiration from @zbontar2021barlow for this loss function. The loss function is termed *Barlow Twins (BT)*, and is designed to bring the cross-correlation matrix of two feature representations close to an identity matrix. The loss function is defined as follows:
40
+
41
+ ::: minipage
42
+ $$\begin{equation*}
43
+ \mathcal{L_{BT}} = \underbrace{\sum_i (1-\mathcal{C}_{ii})^2}_\text{invariance term} + ~~\lambda \underbrace{\sum_{i}\sum_{j \neq i} {\mathcal{C}_{ij}}^2}_\text{redundancy reduction term}
44
+ \label{eq:lossBarlow}
45
+ \end{equation*}$$
46
+ :::
47
+
48
+ ::: minipage
49
+ $$\begin{equation*}
50
+ \mathcal{C}_{ij} = \frac{
51
+ \sum_b z^A_{b,i} z^B_{b,j}}
52
+ {\sqrt{\sum_b {(z^A_{b,i})}^2} \sqrt{\sum_b {(z^B_{b,j})}^2}}
53
+ \label{eq:crosscorr}
54
+ \end{equation*}$$
55
+ :::
56
+
57
+ where, $\lambda$ is a weight that trades off between the *invariance term* and *redundancy term*. Each $\mathcal{C} \in [-1,1]^{d \times d}$ is the cross-correlation matrix of two feature representations of $d$ dimension. Here, $z^A$ and $z^B$ are a batch of intermediate feature representations for two augmented versions of the same image with batch size $b$. $i$ and $j$ are feature components of $d$ dimensional feature.
58
+
59
+ The *invariance term* tries to equate the diagonal entries of the correlation matrix to one, thereby making the representations invariant to augmentations. The *redundancy term* tries to equate the non-diagonal terms to zero and thereby decorrelating the different components of the feature representation. This decorrelation helps to reduce redundancy encoded in different components of the representation.
60
+
61
+ We follow @zbontar2021barlow's procedure for implementing the Barlow twins loss function. We pass the intermediate feature representation of dimension $\mathbb{R}^{C \times H \times W}$ through a global average pooling and obtain a feature vector of $\mathbb{R}^{C}$ dimension. Here, $C$ = number of channels, $H$ = height of the each channel, and $W$ = width of each channel. The resultant feature vector is passed through three linear layers with ouput dimension being 2048 for each linear layer. The first two linear layers are followed by ReLU non-linearity and batch normalization. The resultant output representations are fed to the Barlow twins loss function.
62
+
63
+ Intermediate feature similarity applies only to positive pairs. For both loss functions, we do not need negative pairs to learn the representations. We, therefore, do not have to store the intermediate representations for the negative pairs which could be computationally expensive.
64
+
65
+ In order to evaluate the utility of the representations learned by models using our approach and the standard MoCo, we use the pre-trained models to perform downstream classification tasks. While fine tuning the models for performing the downstream tasks, we perform two types of fine tuning: fine tuning only the final linear classifier (LL-fine-tuning) or fine tuning the entire model (E2E fine-tuning). We fine tune the model using different fraction of labeled training data i.e. 1%, 6% and 100%. For example, if a model is fine tuned with 1% label fraction, the model will have access to only 1% of the labels of the training data which was used for self-supervised pre-training of the model. We ensured that the class distribution of the 1% and 6% label fraction matches the 100% label fraction training data (see Appendix [\[app_sec:class_dist\]](#app_sec:class_dist){reference-type="ref" reference="app_sec:class_dist"}). Fine tuned model with best validation performance is used for obtaining predictions for test set. We report all the performance measures with 95% confidence interval which we calculate using 1000 bootstrap replicates of the test set. We calculate the upper and lower confidence limit using $\mu \pm 1.96\sigma/\sqrt{N}$ where $\mu$ and $\sigma$ are mean and standard deviation of the performance measure for 1000 bootstrap replicates, respectively. $N$ is number of bootstrap replicates.
66
+
67
+ We analyze the features of the models learned via self-supervised methods to understand their quality for the downstream tasks. To objectively measure the quality, we investigate the re-usability of the features. Additionally, we also probe the pre-trained models in a layer-wise manner to understand how informative the intermediate features for the downstream tasks are. Finally, we compute the KS distance between the distributions of output probability of the model fine tuned on small vs large data. This shows us the utility of different self-supervised methods when there is a small labeled dataset available for learning the downstream task.
68
+
69
+ Higher feature reuse indicates better feature quality [@neyshabur2021transferred]. To assess feature reuse in SSL, we measure the similarity of the features of the models trained by our proposed method, before and after fine tuning them on a labeled dataset. We compute the similarity of features extracted from different layers of the pre-trained and E2E-fine-tuned models. Intuitively, if two models pre-trained via self-supervised learning reach a similar performance after fine-tuning on a downstream task, the SSL method that leads to the model with features closer to the fine-tuned model has learned better/more-useful features during the self supervised learning.
70
+
71
+ Following @neyshabur2021transferred, we use Centered Kernal Alignment (CKA) [@kornblith2019similarity] as the measure of feature similarity. CKA has desirable properties such as invariance to invertible linear transformations, orthogonal transformations and isotopic scaling. We use CKA with RBF kernel and $\sigma = 0.8$ following prior work. To compute the feature similarity, we extract the intermediate features from the model which has a dimensions of $\mathbb{R}^{C \times H \times W}$ where, $C$ = number of channels, $H$ and $W$ are height and width of each channel. We pass each channel through a global average pooling layer and use the resultant vector of $\mathbb{R}^{C}$ dimension for computing feature similarity.
72
+
73
+ We probe the intermediate layers of the pre-trained models to understand how informative the intermediate features are for performing the downstream tasks. We expect the intermediate features learned via our approach to be more informative than the standard MoCo, because by bringing the intermediate features of two augmented version of an image closer together, our approach encourages the model to learn augmentation-invariant features not only late in the network but at all levels.
74
+
75
+ **Procedure for layer-wise probing**: We extract the intermediate features from the model which has a dimensions of $\mathbb{R}^{C \times H \times W}$. We pass it through a global average pooling layer and use the resultant feature vector of size $\mathbb{R}^{C}$ for performing the downstream task. The feature vector is finally passed through a linear classifier. The entire network is frozen except the final linear classifier. Since all our experiments are performed on ResNet-50 as the backbone architecture, we extract features at the end of each ResNet block namely, \"Block 1\", \"Block 2\", \"Block 3\" and \"Block 4\".
76
+
77
+ In medical domain, we typically encounter a small labeled dataset from which we want to learn a generalizable model. This is often challenging and in such scenarios, self-supervised pre-trained models can be useful. In this analysis, we investigate how close a self-supervised model, fine tuned on a small labeled dataset, gets to one that utilizes a larger labeled dataset. To this end, we compare the distributions of the output probability of models, fine tuned on small versus large labeled data using Kolmogorov-Smirnov (KS) distance [@massey1951kolmogorov]. KS distance between two distributions is the greatest separation between their cumulative distribution functions (CDFs). We compute KS distance between models trained via our SSL methods, fine tuned on 1% and 6% of the labeled training data, and the best model fine-tuned on 100% of the labeled training data (Detailed procedure to compute KS distance is described in Appendix [\[app_sec:ks\]](#app_sec:ks){reference-type="ref" reference="app_sec:ks"}). KS distance not only depends on the difference in the performance of the two models but also on how similarly/differently calibrated the two models are [@gupta2020calibration]. Therefore, this metric goes beyond the standard linear probing evaluation as it also takes into account the model calibration. Lower KS distance indicates a better method for self supervised learning, when only a small labeled dataset is available.
2111.00160/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-10-29T02:37:09.281Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/15.4.0 Chrome/91.0.4472.164 Electron/13.5.0 Safari/537.36" version="15.4.0" etag="-_yt_42fqlAq75ZDIdC8" type="device"><diagram id="wcHKSHjoD1NvAXouJQqB">7V1dc6M4Fv01rtp9SIrvj8eOnZ7Zqu2d1GR2+3EKG7VNNQYv4El6f/0KjGyQ5A5OpCsLk5maSQQGfM5FnHO5kmb2fPv6SxHtNl/yGKUzy4hfZ/ZiZlmm7Zr4f3XLj0NL4BuHhnWRxO1Op4bn5H+obSS77ZMYlb0dqzxPq2TXb1zlWYZWVa8tKor8pb/btzztn3UXrdszGqeG51WUIma3r0lcbdpv4Xb2/hUl6w05s2m0W7YR2bk9RLmJ4vylcy77cWbPizyvDr9tX+corcEjuBwO9PnM1uOFFSirhnzAOnzgryjdt9+tva7qB/myRb7PYlTvb87sh5dNUqHnXbSqt75genHbptqm7ebj92n++I6q1Yb8URX5dzTP07xojmsbzc9xCwESQ/DwLUlTsmeWZ/hUD+siihP8pRZJgRlN8gxvKvN9/ZGHOCo3xwuMilUbMPWx26+Higq9noXIPAKPIxblW1QVP/Au7Qdw1N27hw+14eo6hz9fTtx7fkh22nSYt3yPNEdtzK2PJzjRgn9pmeGzZE8sXc6S7fikpUNUYPCJclwRRDmXEGUMIOqnoGN64ggF31Y/JU4EuLbdgzY0GFwdgwWVtH0EUXekiPrKEPXGiahtKkPUHymirjJEg5EiGipDNBwnoo66JxMxI6ODVN2jyTTHCSmtn0wbENOLLJ5GmPoKMb3IkOmDKa2hQDEdqXeiVRQopiN1T7SOAsV0pP6JVlKgmI7UQdFSChTTkXooRkv5gJiO1EUxWgoQU2ukNorRUpCYjtRHMVoKEtOR+ihGS0FiOlIfxWgpSExH6qMYLQWJ6Uh9FK2ljjlNCExH6qNoLQWK6Uh9FK2lQDEdqY+itRQopiP1UbSWgsTUHqmPorUUKKYj9VG0lgLFdKQ+itFSLiCmI/VRjJaCxHSkPorRUpCYjtRHMVoKEtOR+ihGS0FiOlIfxWgpSExH6qMYLQWJKc9HeWlVY5Hjy7e8ddV8y25bB3Dvv/ucbLgrm3r8T3gHy929njaejuLU/7pzjFVVVTP/4W+fk1cU/33mL9pt7XnwZfNOvyzoFmZHKh7qM/FIp0YdcAYiRGmyrgcfrDCLCLc/1Bwnqyj91G7YJnFcn4YbZf04rC/xc7RN0prhP5ItKvFV/gu94P/+nm+jrN3lub1uU0BQ2bZFjWawwpAJK8vhjGTwBMSVw/OSH48r2/hpXNWDozBDOLC+1iHlzpOsbV0ucePvTeMj/m1bB2HLQzZFn/DoY8fSwEaffNctACSPjOmDL5N15FtoEQA56gCS74f7o+a64+nq/oDcjdG+yllhEhj1P4JQDtShLN8hCwDIt9QBJN/uigDIUweQfO8qAKBAYU8v34heS0cWKHxcyLem14IyrVogK1Id+W/9RCDkqEOIpA6uHKFAIULyvcG13Kq0cAGFWQuHQSsXUITkWwwRD1WF3b2rhT2gZQcoQlr4A0YyABZeuvINwrV094zugIRZvs0QgRCtOyARkm8RRDwQackAiZB8dX8ttyqjOwBhJqe+7kBkdAckQrdjERjxAgmzFhaBFi+QlY6eFhaB1h2gCGlhEWjdAYqQFhaB1h2gCN2ORaB1ByjMWlgEWneAIqSFRaAlAyhCt2MRGN0BWBVIROB1ByKjOyARuh2LwIgXSJi1sAiMeIFE6HYKlRjxAgmzFj6DES+QCGnhMxjxAokQz2cA1kQ/z/zFnyapisYN+DvNzebXqSwapCzasxyVZdH+AAeG+/dd/Wu53z6mabIrazB3qMD4NIgv0KH16dT01o28jFbf1w32v+2rNMkQ71ECtYaEABZdMmBOQc7U18Ii0iOZbc56J9IQ0qJGjB6XDIkQ0fNXjhAdQyEgQlqMH2FiCBIhLYwZHUOODYiQFu9u6BgCRUgLT8XEkA+IkBaeiokhSIS0GP9Bx5BrAiKkxWsXOoZAEdJCU9t+P3PhAJaZBlpoasdUh1CohaZ2XIUIaaGpnVAhQlpoatdWiJAWmtpV2FOHWmhqT2VPrYWmZp72gDnGUAtNzTztIRHSQlMzT3tIhLTQ1MzTHhIhLTQ187SHXCuHvM2+cogUdtWmoYWqZp73oBDxZDXga/d/D5+KrJjeuQt/525SVUMuGVQB8cbdNHiOBTD4/vPz4Cumgg+5szAGPlXw4ZIV4GDCj2cHz4dfucMovHt6Twx+UaK7x+0SxTXqlrFAq3y7y8ukKcKwvGhbs5Ity10/eA7nnaLsvVFm0FFm817j8Z6wYqJsgKU+1P1Q8sLBLTGnSKdDYbf0qC4kWmLGurVHuzzJqubi3YeZu8At+aHIaJ5nGT5wy8r5WqML64eKvIraJnzXivH6tnNP5jYmAolM7NDhz+TQZwuhb4DfHyF9d6L4s3An77sUf+z9h2+Te4vT0YvhkJeRAJzH+Y8iSrJomaJpLmfx/Tud6rJ9UA0xIJVDqkbzYioaPWNDDCpNYDvBPdvJy3PBA9JNhMXVfnnuXugydLgt/rl8i7I4Kr7/hg+TVPVXN+4Nt99oNa0NhU3Hs/ANhlt61EmH5vnc8xqau6NRVkVelnebqDmEkCyP1WPPdNhbMOBwF4jgzhyQB5u4G85dwGaf5HE3IEE3cTeYO8uAvO94mcOJu3dzZ0NyN6BUYOJuOHecjL087gYUMUzcDeeOM8ZMHncDckETd4O5swNI7gYkgibuBnPnWJB95gVjOyfu3uYO1N/xMiyyknf/yHb7+uNsHu7w4ibJ1uWUuhOVuqPKZziJO5MsgiM8cWcOSPmgLP5UFM2tt0qjskxWPJpI6szGLfgTn5P0uP3sXXw4FYrX6FLoOuC4nFuOtBUojarkr/7heYC1Z3iqXzV0EnLkDqfdCTlEme+LFWo/dYKdORDWV/dh54eMsj933Coq1qhijtvQeURhGMPWgMRQzQCJ6BKttxj9x1PTw5sB0Mu3MtHQ4//qKTdM6jWN907OTSPoR49jGPJYHpBCmlg+TfES9IeSHa3rO0juv5W16Uy8SJIH5JpuuLM+PswIFWSuxItJNQPmSOQ9jAxaL6v/+lgBDpbljbab6moElQ4a1NR3tsc6Onn6zbogDTZN13OWxZDW4C7nvSuvMMNyRZB4QT5sens+lEPztL4wCIlajIgKqVkGTMfngcR5SltCuivglSYlzjTJIMnvM6QhKSkfxC0ab2TO3UuCex3LeCryeL+qpvyPKAkRUvNNklWAumHEK+wUIyB4CSCQOPoUx6TmewokQYHk9AKJTKcAEkg2L88kwMVwA+lYSor3+Np8k24+erI0H3yyUXNXWtwnm7xI4uWyZEXSU7HPmnEoUxiJDiOrn+Q2XdDuaECu7ASROUBpqrBFUbF6PrwPFTQOISQfaUnxuPKfvGzssmKedv0YMRcUXU1m9xyPnkcP54KcldG0B6Sd1Jtdn8oIQE6ga9pazCHj0yMOQCG6nUW1Anp4DijOWkxFE7gqIZI/F82w8Ymi4y5UCaoW09fQeWNQiByeax5nFxiqfNQ48ufAuRacGdUDOOW76Vxku65G9YBCpMUElYxgAYVIvr043q29qQPoEuL2bhWBJ61uQPHUwoswWgUUIk1e36qESAsbwcgMUIhGaiNoTQGabHLk2wjopwEtQEDxdAE8hxKvy8wYDQkqgMFQAiqzHAAkqPLdBLgOZBYPgMRTC+tBixxYiACth+qEDDNvMyTOWlgSRveAQqSFJWGkDChEWlgSRpiAQqTFLPuMzACFaHwGg5EZkHh68g2GiMefSg/mybcLMiACjSJA8X9lr2wgVzY0PfmmQE3OylcJ6u3YCFrdwOKshY2g1Q0sRDdUZRWqxFkLL0KbfliItPAitOqBhUjOeKzBi8E8z/zFn9bAtYim5WDED5vwHWY5GNDViHyeeYNbDuapQHdVPbprGpQjZa4xuoDAY7s2aWvA+JcN7PrYHHZvBdLUTX1sQRPXoLop0AUn/AHJgRueCsmhlgryDXrs1gUz1711KIFTIfmQUyHdNz/TswW1l9ld9ygU5MjM/lBi22U7CXmTI/kXJWGufwxofbg4KjfHyxVAkOPckyUMjlPC2JwxoaF7XL+qtzBV6AkZFerz8jiypqE4Z3swUvhKDZ4fmkyOUPUQ2H3tYIasDpUoHngZMVkTDBwsDt78JaqK5HV63IiKoZASOWZgsTEkaH4B/GeR17SehA1GYfMlj1G9x/8B</diagram></mxfile>
2111.00160/paper_text/intro_method.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Most recent NLP applications have been following the pre-train then fine-tune paradigm, where we start from a gigantic pre-trained model and fine-tune it towards downstream tasks. Conventional *fine-tuning* works by updating all of the parameters of the pre-trained model. As the size of pre-trained models grows, updating all parameters becomes less feasible in most practical scenarios, due to the expensive memory and computational requirements. For example, BERT$_\mathrm{base}$ has $110$M trainable parameters, while GPT-2 has up to $1.5$B and the largest version of GPT-3 has an astonishing $175$B trainable parameters. As such, conventional fine-tuning of the larger models could require hundreds of GPU hours. Another downside of this paradigm is that it requires storing as many parameters, as in the large-scale pre-trained models, for each downstream task. That poses impediments to the deployment in real-world resource-constrained environments.
4
+
5
+ One solution to address the extensive resource requirement of conventional fine-tuning is model pruning [@lecun1990optimal; @han2015deep; @ren2018admmnn; @he2017channel; @liu2017learning], where unnecessary weights are eliminated to shrink the model size. For example,  [@chen2021earlybert] leverages $\ell_1$ regularization to remove insignificant attention heads and gains $35\%\sim45\%$ training time with comparable performance. @guo2020parameter learns sparse task-specific "diff\" vectors for various downstream fine-tuning tasks, leading to great memory savings. All these studies indicate the rise of sparsity naturally during fine-tuning a general-purpose pre-trained model, to some specialized downstream functionality. One potential interpretation, of why sparsity arises, is that different subsets of the parameters may be responsible for different downstream tasks and data domains [@sanh2020movement]. However, identifying appropriate sparse pruning masks requires burdensome training of models with full weights, which can still be unaffordable for many practitioners. For example, fine-tuning a large pre-trained language model like GPT-3 for just one step consumes at least $1.2$TB of VRAM and requires $96$ pieces of NVIDIA Tesla [@hu2021lora].
6
+
7
+ One parallel alternative is designing *parameter-efficient* fine-tuning algorithms, which aims to only optimize a small portion of weights while fixing most of the pre-trained weights during the downstream task training step. Pioneering works along this line, which utilize adapters [@houlsby2019parameter] or low-rank decomposition [@hu2021lora], can significantly reduce the number of trainable parameters while preserving good fine-tuning performance. Introducing only a small number of task-specific parameters for each new downstream task can substantially improve the memory and deployment efficiency of models as it allows us to reuse the remaining unchanged/shared parameters. However, there are two major hurdles of current parameter-efficient fine-tuning: ($i$) it does not yield any inference efficiency gains since the full pre-trained weights are still required to calculate outputs; and ($ii$) current methods assume the weight update to be either sparse [@guo2020parameter] or low-rank [@hu2021lora], yet those assumptions might be oversimplified and overly restricted to allow for effective updates.
8
+
9
+ To tackle both resource- and parameter-efficiency issues of large model fine-tuning, we explicitly draw on the prior of sparsity for *both weight updates and the final weights*, and establish a *dually sparsity-embedding efficient tuning* (**DSEE**) framework. From a pre-trained model, DSEE first adopts a sparsity-aware low-rank weight update to achieve *parameter efficiency* of the fine-tuning process; and then enforces a sparse weight structure by masking to achieve *resource efficiency* of the fine-tuned model at inference time. Our contributions can be summarized as follows:
10
+
11
+ - We propose the dually sparsity-embedding efficient tuning (DSEE), which unifies sparsity-aware weight update and sparse final weight in fine-tuning gigantic pre-trained models. It is the first attempt towards jointly optimizing both parameter efficiency of the fine-tuning process, and the resource efficiency of the fine-tuned model.
12
+
13
+ - We exploit both unstructured and structured sparse patterns in the DSEE framework. For weight updates, we find the injected sparsity prior to greatly enhance existing parameter-efficient update schemes, and to further trim down the needed amount of trainable parameters. For the final weights, we learn well-performed sparse masks from the pre-trained weights, leading to substantial computation and memory reductions at inference.
14
+
15
+ - Extensive experiments demonstrate the effectiveness of our proposal across various representative pre-trained languages models, such as BERT, GPT-2, and DeBERTa; and on diverse evaluation benchmarks, such as E2E, DART, WebNLG, and GLUE. Specifically, on (E2E, Dart, WebNLG), our methods can achieve a BLUE score of $(69.75, 55.40, 46.66)$ with less than 1% of total trainable parameters. On BERT, our method can save about 35% FLOPs, compared to traditional downstream fine-tuning.
16
+
17
+ # Method
18
+
19
+ In this section, we begin by describing our notations and definitions of sparsity generation and parameter-efficient fine-tuning in Section [3.1](#sec:notations){reference-type="ref" reference="sec:notations"}. Then, we introduce the (dually) sparsity-embedded efficient fine-tuning algorithms in Sections [3.2](#sec:sparsity_plus){reference-type="ref" reference="sec:sparsity_plus"} and [3.3](#sec:DSEE){reference-type="ref" reference="sec:DSEE"}.
20
+
21
+ <figure id="fig:methods" data-latex-placement="t">
22
+ <embed src="Figs/LoRA.pdf" style="width:100.0%" />
23
+ <figcaption>The overview of our proposals. The sparse masks can have unstructured or structured patterns, which leads to training and inference efficiency. During the fine-tuning, we only train decomposed matrices <span class="math inline">𝒰</span>, <span class="math inline">𝒱</span> and non-zero elements in <span class="math inline">𝒮<sub>2</sub></span>.</figcaption>
24
+ </figure>
25
+
26
+ We adopt both unstructured and structured pruning methods to produce sparsity. They can lead to resource-efficiency including memory and computation savings.
27
+
28
+ Let $\mathcal{W}\in\mathbb{R}^{m\times n}$ denote a weight matrix. The goal of pruning is to find a binary mask $\mathcal{S}\in\{0,1\}^{\|\mathcal{W}
29
+ \|_0}$, where $\|\mathcal{W}\|_0$ is the number of parameters in $\mathcal{W}$. The mask $\mathcal{S}$ is applied to $\mathcal{W}$ and results in a sparse weight $\mathcal{W}\odot\mathcal{S}$. For unstructured pruning, only memory cost is saved; but for structured pruning, it helps save computational cost since the sparse weights can be smaller in size by wiping out all-zero columns or rows. However, the performance of networks after structured pruning is often shown to be inferior compared with the unstructured pruning counterpart.
30
+
31
+ To leverage the knowledge in pre-trained weights $\mathcal{W}$, downstream models learn task-specific weight update $\Delta\mathcal{W}$ via fine-tuning and generate predictions with weights $\mathcal{W}+\Delta \mathcal{W}$. Since $\Delta\mathcal{W}$ has the same size of $\mathcal{W}$, learning the update matrices usually requires massive resources as the size of the pre-trained model increases. Parameter-efficient fine-tuning try to solve this problem by using as few trainable parameters as possible to represent $\Delta\mathcal{W}$, while maintaining competitive downstream fine-tuning performance. Previous literature reaches the goal via either sparsifying weight update matrices $\Delta\mathcal{W}$ [@guo2020parameter] or leveraging low-rank decomposed matrices to compute $\Delta\mathcal{W}$ [@hu2021lora].
32
+
33
+ ::::::: multicols
34
+ 2
35
+
36
+ :::: minipage
37
+ ::: algorithm
38
+ []{#alg:sparse_lora label="alg:sparse_lora"} Initialize $\mathcal{S}_2$ to be an empty set.\
39
+ :::
40
+ ::::
41
+
42
+ :::: minipage
43
+ ::: algorithm
44
+ []{#alg:DSEE label="alg:DSEE"} Decompose $\mathcal{W}$ into $\mathcal{U}$,$\mathcal{V}$ and $\mathcal{S}_2$\
45
+ (Re-)Initialization: $\mathcal{U}=0,\mathcal{V}\sim\mathcal{N}(0,0.02),\Omega = \textrm{indexes of non-zero elements in } \mathcal{S}_2, \mathcal{S} = 0$\
46
+ Train $\mathcal{U}, \mathcal{V}, \mathcal{S}$ with respect to $\mathcal{L}$ under the constraint of $P_{\Omega^C}
47
+ (\mathcal{S})=0$.\
48
+ Prune (1-s%) parameters in $\mathcal{W}$ globally by sorting the magnitude of $\mathcal{W}+\mathcal{U}\mathcal{V}+\mathcal{S}$, deriving the sparsity mask $\mathcal{S}_1$\
49
+ Tune $\mathcal{U}, \mathcal{V}, \mathcal{S}_2$ for $E$ epochs for recovering the performance.
50
+ :::
51
+ ::::
52
+ :::::::
53
+
54
+ A recent study [@hu2021lora] enforces low-rank constraint to weight update tensors $\Delta\mathcal{W}$, and obtains a satisfactory trade-off between parameter-efficiency and model quality. However, as revealed experimentally by [@yu2017compressing], a part of the important information in the trained weights will also scatter outside the low-rank subspace, creating sparse "residuals\". Inspired by these observations, we investigate a new sparsity-aware low-rank subspace of $\Delta\mathcal{W}$, and introduce the first component of our proposal in Figure [1](#fig:methods){reference-type="ref" reference="fig:methods"}, i.e., sparsity-embedded parameter-efficient fine-tuning.
55
+
56
+ Specifically, we identify a sparse subspace $\Omega$, that we can project our update matrices $\Delta\mathcal{W}$ to. The update matrix is then decomposed into two components: a low-rank part which is represented by the multiplication of two low-rank matrices $\mathcal{U}\in \mathbb{R}^{m\times r}$ and $\mathcal{V}\in \mathbb{R}^{r\times n}$, and a sparse residual $\mathcal{S}_2=\mathcal{P}_{\Omega}(\Delta\mathcal{W})$, where $\mathcal{P}_{\Omega}(\Delta\mathcal{W}) =
57
+ \left\{
58
+ \begin{aligned}
59
+ s_{i,j}&, \quad(i,j)\in \Omega \\
60
+ 0&, \quad(i,j)\in \Omega^{\mathrm{C}}
61
+ \end{aligned}
62
+ \right.,\ i=1,2,\dots,m,\ j=1,2,\dots,n$. As illustrated in Figure [1](#fig:methods){reference-type="ref" reference="fig:methods"}, the update matrix $\Delta\mathcal{W}$ can be approximated by $\mathcal{U}\mathcal{V}+\mathcal{S}_2$, in which the $\mathcal{U}$, $\mathcal{V}$, and the non-zero element of $\mathcal{S}_2$ are learnable parameters while $\Omega$ is fixed once determined. Compared to the full fine-tuning schemes has $m\times n$ individual trainable parameters, our method only has $(m+n)\times r + \mathrm{card}(\Omega)$. If $r$ is smaller than $\frac{m\times n-\mathrm{card}(\Omega)}{m + n} \lessapprox 0.5 \min\{m,n\}$, which is a loose bound, our method is capable of substantially reducing trainable parameters for downstream fine-tuning. In practice, the value of $r$ is very small compared to $m$ and $n$ so the savings are considerable.
63
+
64
+ The next question is how to find appropriate $\Omega$. Motivated by [@candes2011robust], we formulate our sparsity-aware decomposition as the following optimization problem: $$\begin{equation}
65
+ \label{eqn:decompose}
66
+ \begin{aligned}
67
+ \min_{\mathcal{U},\mathcal{V},\mathcal{S}_2} \;\; & \frac{1}{2} \| \mathcal{W} - \mathcal{U}\mathcal{V} - \mathcal{S}_2\|_{\mathbf{F}}^2 \\
68
+ \mathrm{s.t.} \;\; & \mathrm{rank}(\mathcal{U}) \leq r,\ \mathrm{rank}(\mathcal{V}) \leq r, \; \mathrm{card}(\mathcal{S}_2) \leq c,
69
+ \end{aligned}
70
+ \end{equation}$$ where $\mathrm{rank}(\cdot)$ indicates the rank and $\mathrm{card}(\cdot)$ indicates the cardinality of a matrix. Here we derive $\Omega$ from pre-trained weights $\mathcal{W}$, which is different from previous low-rank techniques for model compression since we perform prior-training decomposition and can not access $\Delta\mathcal{W}$ before fine-tuning. In this way, we actually assume that $\Delta\mathcal{W}$ shares a similar crucial subspace $\Omega$ with $\mathcal{W}$ [@sun2018subspace]. We use the GreBsmo algorithm [@zhou2013greedy] to solve this optimization problem. This method adopts a similar idea as random projection and can solve the optimization problem rapidly.
71
+
72
+ Algorithm [\[alg:sparse_lora\]](#alg:sparse_lora){reference-type="ref" reference="alg:sparse_lora"} summarizes the detailed procedure of our proposal. We first decompose the pretrained weights $\mathcal{W}$ into three matrices: $\mathcal{U}$, $\mathcal{V}$ and $\mathcal{S}_2$. $\mathcal{U}$ and $\mathcal{V}$ are matrices of low-rank and $\mathcal{S}_2$ is a sparse matrix. After the decomposition, we re-initialize $\mathcal{U}$ and $\mathcal{V}$ to be of shape $\mathbb{R}^{m\times r}$ and $\mathbb{R}^{r\times n}$. The initial values for $\mathcal{U}$ are set to 0, and the initial values for $\mathcal{V}$ will follow a normal distribution as @hu2021lora did. We do not use the decomposed value inside $\mathcal{S}_2$, but only the index of non-zero elements. We collect the indexes of non-zero elements into $\Omega$, and reset the values of $\mathcal{S}_2$ back to 0. During the update, we only update the elements of $\mathcal{S}_2$ whose indexes are in $\Omega$.
73
+
74
+ Besides parameter-efficiency, resource-efficiency is another major goal of our proposed framework. Sparsity-embedded low-rank weight updates alone do not necessarily lead to computational cost reductions since it works together with the dense pre-trained weights and the massive computation during forwarding processes is inevitable. To achieve both resource- and parameter-efficiency, we also promote a sparse weight structure to the final fine-tuned model, as demonstrated by the sparse mask $\mathcal{S}_1$ in Figure [1](#fig:methods){reference-type="ref" reference="fig:methods"}. Our DSEE explores both unstructured/structured sparsity patterns as follows.
75
+
76
+ $\rhd$ **Pruning with unstructured sparse masks.** The unstructured sparse mask is the most widely used mask since it usually produces almost undamaged performance compared to its dense counterpart [@han2015deep]. It applies fine-grained manipulation on each individual model weight, while the generated irregular sparse masks bring limited hardware speedup [@wen2016learning]. Specifically, based on the weight matrix $\mathcal{W}$, a sparse mask $\mathcal{S}_1$ can be calculated with various approaches, either heuristic or optimization-based. We adopt the one-shot magnitude pruning [@han2015deep] in DSEE, due to its simplicity and competitive performance.
77
+
78
+ In our context, we create the sparse pattern $\mathcal{S}_1$ from $\mathcal{W}+\mathcal{U}\mathcal{V}$: [first]{.underline}, it sorts the magnitude (i.e., absolute value) of individual weights and removes parameters with bottom $1-k\%$ magnitude. [Second]{.underline}, we further tune the value of the low-rank update matrices for a few epochs, which is important to keep the performance as stated in @han2015learning. [Third]{.underline}, the sparse mask $\mathcal{S}_1$ that we derive will be applied on the pretrained weights only and do not affect the output from the update matrices, as $\mathcal{W}\odot\mathcal{S}_1 + \mathcal{U}\mathcal{V}$. For a input sample $x$, the output is calculated by $(\mathcal{W}\odot\mathcal{S}_1 + \mathcal{U}\mathcal{V})x=\mathcal{W}\odot\mathcal{S}_1x+\mathcal{U}\mathcal{V}x$, where the first term $\mathcal{W}\odot\mathcal{S}_1x$ brings significant resource-efficiency.
79
+
80
+ $\rhd$ **Pruning with structured sparse masks.** The second method exploits structured sparse masks, whose regular patterns are more hardware friendly yet have worse performance than unstructured masks. Our DESST considers $\ell_1$ sparse regularization [@liu2017learning; @chen2021earlybert] to craft high-quality structurally pruned subnetworks. More precisely, motivated by @chen2021earlybert, we introduce learnable coefficients $\xi$ before each attention head module, and append a $\ell_1$ sparse penalty on $\xi$ to the original loss. Detailed formulations are depicted as below:
81
+
82
+ We add trainable coefficients $c$ before attention heads. The parameters $c$ are optimized together with the decomposed matrices, *i.e.*, $\mathcal{U},\mathcal{V}$ and $\mathcal{S}_2$. An extra term $\lambda \|c\|_1$ will be added to the training loss for sparse regularization. The value of $\lambda$ is set to 1e-4. After training, we prune the attention heads that have the lowest contribution to the model (*i.e.,* lowest $c$). We use a layer-wise pruning scheme that prunes the same proportion of heads in each attention layer. After pruning, several epochs of tuning are run to recover the performance of the pruned model. The size of update matrices will change after structured pruning, so we also need to change the dimension of $\mathcal{U}$ and $\mathcal{V}$. Specifically, we change the size of $\mathcal{V}$ from $\mathbb{R}^{r\times n}$ to $\mathbb{R}^{r\times [n\times s]}$ where $s$ is the pruning ratio of the corresponding self-attention layer and $[x]$ is the biggest integer not greater than $x$. The size of $\mathcal{S}_2$ is shrunk accordingly.
2112.00054/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2112.00054/paper_text/intro_method.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Using large-scale labeled (like ImageNet [\[11\]](#page-8-0)) or weakly-labeled (like JFT-300M [\[6,](#page-8-1) [20\]](#page-8-2), Instagram-3.5B [\[37\]](#page-9-0)) datasets collected from the web has been the go-to approach for pre-training classifiers for *downstream* tasks with a relative scarcity of labeled data. Prior works
4
+
5
+ Project page : <https://samarth4149.github.io/projects/task2sim.html>
6
+
7
+ <span id="page-0-0"></span>![](_page_0_Figure_10.jpeg)
8
+
9
+ Figure 1. We explore how synthetic data can be effectively used for training models that can transfer to a wide range of downstream tasks from various domains. Is a universal pre-trained model for all downstream tasks, the best approach?
10
+
11
+ have demonstrated that as we move to bigger datasets for pre-training, downstream accuracy improves on average [\[37,](#page-9-0) [60\]](#page-10-0). However, large-scale real image datasets bear the additional cost of curating labels, in addition to other concerns like privacy or copyright. Furthermore, large datasets like JFT-300M and Instagram-3.5B are not publicly available posing a bottleneck in reproducibility and fair comparison of algorithms.
12
+
13
+ Synthetic images generated via graphics engines provide an alternative quelling a substantial portion of these concerns. With 3D models and scenes, potentially infinite images can be generated by varying various scene or imagecapture parameters. Although synthetic data has been used for transfer learning in various specialized tasks [\[2,](#page-8-3) [52,](#page-9-1) [59,](#page-10-1) [63\]](#page-10-2), there has not been prior research dedicated to its transferability to a range of different recognition tasks from different domains (see Figure [1\)](#page-0-0). In conducting this first of its kind (to the best of our knowledge) study, we first ask the
14
+
15
+ <sup>†</sup>Work done as interns at MIT-IBM Watson AI Lab.
16
+
17
+ <sup>∗</sup>Now affiliated with JPMorgan Chase, FLARE. Work done when Chun-Fu was at MIT-IBM Watson AI Lab.
18
+
19
+ <span id="page-1-1"></span><span id="page-1-0"></span>
20
+
21
+ | Pretraining Data | Downstream Accuracy | | | | |
22
+ |------------------|---------------------|-------|--------|-------|--|
23
+ | Variations | EuroSAT | SVHN | Sketch | DTD | |
24
+ | Pose | 87.01 | 28.49 | 37.89 | 37.39 | |
25
+ | +Lighting | 88.57 | 32.36 | 38.81 | 40.32 | |
26
+ | +Blur | 90.20 | 35.58 | 35.53 | 37.66 | |
27
+ | +Materials | 84.54 | 44.84 | 30.81 | 38.51 | |
28
+ | +Background | 80.44 | 29.93 | 14.60 | 32.39 | |
29
+
30
+ Table 1. Downstream task accuracies using linear probing with a Resnet-50 backbone pretrained on synthetic datasets with different varying parameters (successively added). We see different simulation parameters have different effects on downstream tasks.
31
+
32
+ question : in synthetic pretraining for different downstream classification tasks, does a one-size-fits-all solution (*i.e*., a universal pre-trained model for all tasks) work well?
33
+
34
+ With graphics engines, we can control various *simulation* parameters (lighting, pose, materials, etc.). So, in an experiment, we introduced more variations successively from different parameters into a pretraining dataset of 100k synthetic images from 237 different classes (as many categories as are available in Three-D-World [\[13\]](#page-8-4)). We pretrained a ResNet-50 [\[18\]](#page-8-5) on these, and evaluated this backbone with linear probing on different downstream tasks. The results are in Table [1.](#page-1-0) We see that some parameters like random object materials result in improved performance for some downstream tasks like SVHN and DTD, while hurting performance for other tasks like EuroSAT and Sketch. In general different pre-training data properties seem to favor different downstream tasks.
35
+
36
+ To maximize the benefit of pre-training, different optimal simulation parameters can be found for each specific downstream task. Because of the combinatorially large set of different simulation parameter configurations, a brute force search is out of the question. However, this might still suggest that some, presumably expensive, learning process is needed for each downstream task for an optimal synthetic image set for pre-training. We show this is not the case.
37
+
38
+ We introduce Task2Sim, a unified model that maps a downstream task representation to optimal simulation parameters for pre-training data generation to maximize downstream accuracy. Using vector representations for a set of downstream tasks (in the form of Task2Vec [\[1\]](#page-8-6)), we train Task2Sim to find and thus learn a mapping to optimal parameters for each task from the set. Once trained on this set of "seen" tasks, Task2Sim can also use Task2Vec representations of novel "unseen" tasks to predict simulation parameters that would be best for their pre-training datasets. This efficient one-shot prediction for novel tasks is of significant practical value, if developed as an end-user application that can automatically generate and provide pre-training data, given some downstream examples.
39
+
40
+ Our extensive experiments using 20 downstream classification datasets show that on seen tasks, given a number of images per category, Task2Sim's output parameters generate pre-training datasets that are much better for downstream performance than approaches like domain randomization [\[2,](#page-8-3) [27,](#page-9-2) [79\]](#page-10-3) that are not task-adaptive. Moreover, we show Task2Sim also generalizes well to unseen tasks, maintaining an edge over non-adaptive approaches while being competitive with Imagenet pre-training.
41
+
42
+ In summary, (i) We address a novel, and very practical, problem—how to optimally leverage synthetic data to taskadaptively pre-train deep learning models for transfer to diverse downstream tasks. To the best of our knowledge, this is the first time such a problem is being addressed in transfer learning research. (ii) We propose Task2Sim, a unified parametric model that learns to map Task2Vec representations of downstream tasks to simulation parameters for optimal pre-training. (iii) Task2Sim can generalize to novel "unseen" tasks, not encountered during training, a feature of significant practical value as an application. (iv) We provide a thorough analysis of the behavior of downstream accuracy with different sizes of pre-training data (in number of classes, object-meshes or simply images) and with different downstream evaluation methods.
43
+
44
+ # Method
45
+
46
+ Our goal is to create a unified model that maps task representations (e.g., obtained using task2vec [\[1\]](#page-8-6)) to simulation parameters, which are in turn used to render synthetic pre-training datasets for not only tasks that are seen during training, but also novel tasks. This is a challenging problem, as the number of possible simulation parameter configurations is combinatorially large, making a brute-force approach infeasible when the number of parameters grows.
47
+
48
+ Figure [2](#page-2-0) shows an overview of our approach. During training, a batch of "seen" tasks is provided as input. Their task2vec vector representations are fed as input to Task2Sim, which is a parametric model (shared across all tasks) mapping these downstream task2vecs to simulation parameters, such as lighting direction, amount of blur, background variability, etc. These parameters are then used by a data generator (in our implementation, built using the Three-D-World platform [\[13\]](#page-8-4)) to generate a dataset of synthetic images. A classifier model then gets pre-trained on these synthetic images, and the backbone is subsequently used for evaluation on specific downstream task. The classifier's accuracy on this task is used as a reward to update Task2Sim's parameters. Once trained, Task2Sim can also be used to efficiently predict simulation parameters in *oneshot* for "unseen" tasks that it has not encountered during training.
49
+
50
+ Let us denote Task2Sim's parameters with θ. Given the task2vec representation of a downstream task x ∈ X as input, Task2Sim outputs simulation parameters a ∈ Ω. The model consists of M output heads, one for each simulation parameter. In the following discussion, just as in our experiments, each simulation parameter is discretized to a few levels to limit the space of possible outputs. Each head outputs a categorical distribution πi(x, θ) ∈ ∆k<sup>i</sup> , where k<sup>i</sup> is the number of discrete values for parameter i ∈ [M], and ∆k<sup>i</sup> , a standard ki-simplex. The set of argmax outputs ν(x, θ) = {ν<sup>i</sup> |ν<sup>i</sup> = arg maxj∈[ki] πi,j ∀i ∈ [M]} is the set of simulation parameter values used for synthetic data generation. Subsequently, we drop annotating the dependence of π and ν on θ and x when clear.
51
+
52
+ Since Task2Sim aims to maximize downstream accuracy after pre-training, we use this accuracy as the reward in our training optimization[1](#page-0-1) . Note that this downstream accuracy is a non-differentiable function of the output simulation parameters (assuming any simulation engine can be used as a black box) and hence direct gradient-based optimization cannot be used to train Task2Sim. Instead, we use REIN-FORCE [\[73\]](#page-10-19), to approximate gradients of downstream task performance with respect to model parameters θ.
53
+
54
+ Task2Sim's outputs represent a distribution over "actions" corresponding to different values of the set of M simulation parameters. P(a) = Q i∈[M] πi(ai) is the probability of picking action a = [a<sup>i</sup> ]i∈[M] , under policy π = [π<sup>i</sup> ]i∈[M] . Remember that the output π is a function of the parameters θ and the task representation x. To train the model, we maximize the expected reward under its policy, defined as
55
+
56
+ $$R = \underset{a \in \Omega}{\mathbb{E}}[R(a)] = \sum_{a \in \Omega} P(a)R(a) \tag{1}$$
57
+
58
+ where Ω is the space of all outputs a and R(a) is the reward when parameter values corresponding to action a are chosen. Since reward is the downstream accuracy, R(a) ∈ [0, 100]. Using the REINFORCE rule, we have
59
+
60
+ $$\nabla_{\theta} R = \underset{a \in \Omega}{\mathbb{E}} \left[ (\nabla_{\theta} \log P(a)) R(a) \right] \tag{2}$$
61
+
62
+ $$= \underset{a \in \Omega}{\mathbb{E}} \left[ \left( \sum_{i \in [M]} \nabla_{\theta} \log \pi_i(a_i) \right) R(a) \right]$$
63
+ (3)
64
+
65
+ where the 2nd step comes from linearity of the derivative. In practice, we use a point estimate of the above expectation at a sample a ∼ (π + ) ( being some exploration noise added to the Task2Sim output distribution) with a selfcritical baseline following [\[50\]](#page-9-17):
66
+
67
+ <span id="page-3-0"></span>
68
+ $$\nabla_{\theta} R \approx \left(\sum_{i \in [M]} \nabla_{\theta} \log \pi_i(a_i)\right) (R(a) - R(\nu))$$
69
+ (4)
70
+
71
+ where, as a reminder ν is the set of the distribution argmax parameter values from the Task2Sim model heads.
72
+
73
+ A pseudo-code of our approach is shown in Algorithm [1.](#page-4-0) Specifically, we update the model parameters θ using minibatches of tasks sampled from a set of "seen" tasks. Similar to [\[44\]](#page-9-18), we also employ self-imitation learning biased towards actions found to have better rewards. This is done by keeping track of the best action encountered in the learning process and using it for additional updates to the model, besides the ones in Line [12](#page-4-1) of Algorithm [1.](#page-4-0) Furthermore, we use the test accuracy of a 5-nearest neighbors classifier operating on features generated by the pretrained backbone as a proxy for downstream task performance since it is computationally much faster than other common evaluation criteria used in transfer learning, e.g., linear probing or fullnetwork finetuning. Our experiments demonstrate that this proxy evaluation measure indeed correlates with, and thus, helps in final downstream performance with linear probing or full-network finetuning.
2112.07436/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-10-04T08:16:11.385Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36" etag="vEFm_dINJ1oHeESzOO8L" version="15.4.1"><diagram id="I6OEPFZdjC98mujyItlr" name="Page-1">7V1tc6M4Ev4183EoJIQEHydvc1u72UvdbN3u7TdiE5sdYrw23iTz608YZEDiRbYlRBwyVZkgywJ3P4/U3Wq1PznXz69fN8F6eZ/Mw/gTtOevn5ybTxD60KW/s4a3vMElXt6w2ETzvAmUDd+iH2HRaBetu2gebmsd0ySJ02hdb5wlq1U4S2ttwWaTvBTdiuGekrh+13WwCGs9soZvsyAOhW6/R/N0mbd6kJTt/wqjxZLdGWA/f+U5YJ2LgbfLYJ68VJqc20/O9SZJ0vyv59frMM5kx+SSv++u5dXDg23CVSrzhiv/z8df/725//H7wx/BG/jpz/Sn28/FKP8E8a74wMXDpm9MAnQUKmx6cfWyjNLw2zqYZa+8UHXTtmX6HNMrQP8MtutcA0/Ra0hvelWMHW7S8LX1ocFBFBRCYfIcpps32oXhh1gFgAr8AAaol1IbTMLLiiJYW1Dof3EYuhQR/aOQ0hESgyOXGBEkhg1LzBm5xAAYncjQyEUm0BLahiUGXEFCm2S3mmef+MamnzrZpMtkkayC+JckWRey+StM07di0Ql2aVKXXPgapX9kb7cIxMX1//bXNvaK65vXYvz9xRu7WNHPlL8V+uw6f6uPIWso37u/Kt88/5KtXPRylazCvOUuyuRxuNNDuImo4MJNdQS+MZdIOGcrXJuaqdSS3WYWdoi3WNTSYLMI0z7girDZhHGQRv/Un0M5BvyRs0aYmiEyzRqsgTUl9h0b8OD3FIC/4CSlfI2TCPdzUuBNA5cU0gZJ0sY5kzbFWx+SiD5hbWFDfuUH1dHnuJaL7fIH12+Qf7pizKphyd8GEouQym1I/TZsambj5tIQxt0D+PDhz8A00YppAjlIE0cZpAu+VJYZ591CGmqBtIcslwAbEw8ADxMbcIi2Lc/1fAcCzwOOcxqefdtC8DAI9B0ezpZtVx6BDItuTyu6oY85eLtAGbyxR+rwzm2jdngbMHSgJLxbVvqzZ+wa9pALOXwTy294+WiEC4YIHHiO9vXO0TZvdLtEGYoPoGUgRqMDsWMUxA3OIbKqZohPToNtDzuADy27XBxcBAfFNPM2utyPEuSg3wXZppvk+yHqmLWsGWSyN0WrRYGcRRxst8Xf2+9hOlsWF08Uv9dJnGz2d3fu9j+KgiaCM4MKJ6Xqy7iQdaq5M0hbcE4m1lQy/TFOZt/rQq9PQ6IKylkBHMFZUYwVKbkNHh9rO3c9QcSyPY4lfjaFnURA6sJShlWI3D+0btK5x5DOPoV0HIlu3FvvBqkhEXTpes7LUKQRAA0IIdo4hLWLdB5sl4dZUOMkBRGm8q14uNyC4RmXNWmQNY7TTGrrYFUTOv57l20VXT0lq/Tzdm8FfaEdqDxeyxfpX4vi/5h1pp2egll9kN/oOrKlL/wavtDf/0mes5u1DRKxhp9ZC/2wEd9ru3tkTVGlX6W50po/l9Ccf2jWrJXHPrmxCVGEs8wu78AZMY6zptBoDSFH4+xETH39mX4COw7eqN3bBgdO71QvaZNymTILe72q36IpiKPFil7OqB4zO/sq03I0C+IvxQvP0Xwet0WD63hrN7LosxdOyT4iqQRPdg1Avggft8GucnTBxxWDDpnN8624LJ2127L1SuH2SzXQy17p33hxaz6gvP+3twp/W0ark809GRet0FV/HI00Q2WYLRX2mF3GwLE7KZK86rfE0C24U8Q44HdatkTcp+my2tXvBzudBCymO2WMA0fwrSdsUtkMcuqEtGzYHznXHCeRDZQ455KwzZ+CXajzOThp9qUcrYkaJ7NexXpqd7K7yQ1oYDfQxu4GOWtkt30Euw/sxdy+gF2y+dSIqm5y46HIfd4KK+Fuv1PWdUeLjLMODcu606zYU43YRsqpZBeRZZdnlF1NAZbLYBfoZJcvWqyDssvxPqjgD4EdY5IX91bfh6+gcnaS9a4du1nXw8xOCBhTVbkcTcqSUxbuVtY4g2Bk1EEwlv468iAYkthkvIQgmOPye/rG415ifrJeGx379UR/H/clFcvHv6BX4yLLHOrio4HcISRr2OfZN8pjYo6X5XBytqxFf5U/XJ6y5rAYulgXwsFC+oFn2G1A3TtN6gkPkVMnvNNnF5UhMYI4Qjtnp4KbSOiWPdujjfDIsqvZgTz3neoZhmGzrd2B15+jDPQCiNwRm3PDsiaQhcyad1rPjRmd4L3RTfAsZ/cChU3EZL6GU7HDSvu97hurnIVkncy8o6lZyB12O2KksaB3oixsdyprnLGgUYeC3PcRCnLFUJBlWQIYBlu3BsukFo57mA4N4emoR92Nyo5dVVKQ6ydjs6MZ4MRjV4hwx8h7R9bslRGJeKzRQg9ofGWLiNY0ExUyG1/hIqI18qdAZu74KhcRrfv+WrhpvHYR0XEaeqpdxMTrye4vE6OpSuwxx8uc8dUv8sQ9+al+kULqsNm8nzp6EuTFBU5V0SI3K1pUj9YNXajIE/k9FSoygV2sBbuIq1XEwVhFpSLElyri8WyyUpEnxp6nSkUq8S17voGcGyptm5vr1VgQj3A1tYoa7PWBixV5YmR+KlakEMeyCUeacNzkEKqpV9TDEMP1ijyZKNoRAVT+HHz/hsihIrpEoPt9BWMRG+ewFNunQQgIKZnCUC0woXoL3ird1lmH7fGP3Dovc/0du7s/cL2u/vSP/InVYrwp6mmk7oVQS+VrVy2VqSSGkF3BVcRoOGQzaEkMr71yz8DI+m84S6mGof33Llil0Q86byWr04FkBCBVoM7Dp2AXp2pgQxdZi3ATKLPnqtjxSFOhOm3oAawAZtfiu6DCXLfKoPjCk+CRdbePlQ3yIb97YHuWGNFzGtZJ4OiSjC0hGXXb63HwGMZXwez7Yt+nWh8q3+rsAG5pqFTelgelGk0Ybnves7N/sjDvgpGoYbbGikAfMvvscBTy4nV5XKrFcbp8rWtpLKoFH0S1tzj7p1W1cGSqFb99pinHaXw2aAtKiru2AcTXqlu5DTJ9ypSp5qnd0HBd3tBgOffGzIzGypuXOH+pMDNgyw7JSCYsKJFzchG6VGFmtOlynGZGY+HSS1StCjOjR7UjMzMaSktOZsapujVtZjgS7oBuM8OF/ujMDAd+kPlLhZnhtOh3LBOWxImDi9ClCjOjTZfjNDMciS9cvQjVqjAzelQ7NjND7sTWZGbI6Na4mTGCaIYLgZAvIu5GDmxmTNGMIzDe4wGPa/qaYhvqNDsy1U6xDWWqHRlr0RTbUKdbMV1jWKMDjSG2YfO5Gk3ftzis0cHyVS9+/lJhdKBxxzbQFNs4X5fjjG2gKbahTLUji200FCaezIxTdWs6tiFVhvADFbRxsVDcwveVnMvB6NiRNR/FAa7oLxjKJn9Ikjj7ZvHpIELbNOHy4T+AfYudbqxQw0fDppQTiXVet5uCRc4SUTLDuikylY8uwv5R4abkIGrV72fOtvUMG0AylcA+oG57SCqcKWz8guphFTltYChRZNO5uEEVKVPFaFJkryKbthQHVuR0wEOJIqFxRU6hPSWKNP29fsCbzuWcpElh54QVQjIW6lFdeuO9h3owK4VWCcecWi+I4P6xdIdzxlN24v6XhymU0zo1+HVvtiWQgxuAry+M432UrJBDTwWaJCz4NRo/yP8ofpBKq4tXo3kvyG8ynsdRQOh6KiB0FLa4CpwAi9hiqUbDzPR+U8CeU/J9l5K36wwo/QgE2MIYkzoMuaF2j6wJVG5Zaa627u/7bpBV+46UI32Ru+A5ijPENHG0hlOoaCnLqwJWtiLrZdMaKm4Pa5/47batYtQ6lud5viRq/5pQaxK1kPBetnmgtldoMzm9Pk9ANQlURwgH6QMqvdwkGR7K4AIV5vI+mYdZj/8D</diagram></mxfile>
2112.07436/main_diagram/main_diagram.pdf ADDED
Binary file (29.6 kB). View file
 
2112.07436/paper_text/intro_method.md ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ I N recent years, graph neural networks (GNNs) have gained increasing traction in the machine learning community. Graphs have long been used as a powerful abstraction for a wide variety of real-world data where structure plays a key role [\[1\]](#page-11-0), from collaborations [\[2\]](#page-11-1), [\[3\]](#page-11-2) to biological [\[4\]](#page-11-3), [\[5\]](#page-11-4) and physical [\[6\]](#page-11-5) data, to mention a few. Before the advent of GNNs, *graph kernels* provided a principled way to deal with graph data in the traditional machine learning setting [\[7\]](#page-11-6), [\[8\]](#page-11-7), [\[9\]](#page-11-8). However, with the attention of machine learning researchers steadily shifting away from hand-crafted features toward end-to-end models where both the features and the model are learned together, neural networks have quickly overtaken kernels as the framework of choice to deal with graph data, leading to multiple popular architectures such as [\[3\]](#page-11-2), [\[4\]](#page-11-3), [\[10\]](#page-11-9).
4
+
5
+ The main obstacle that both traditional and deep learning methods have to overcome when dealing with graph data is also the source of interest for using graphs as data representations. The richness of graphs means that there is no obvious way to embed them into a vector space, a necessary step when the learning method expects vectors in input. One
6
+
7
+ L. Cosmo, G. Minello, A. Bicciato, and A. Torsello are with Ca' Foscari University of Venice, Venice, Italy. M. M. Bronstein is with Oxford University, United Kingdom. E. Rodola is with Sapienza University of Rome, Rome, ` Italy. L. Rossi is with The Hong Kong Polytechnic University, Hong Kong. Corresponding author: luca.rossi@polyu.edu.hk
8
+
9
+ reason is the lack of a canonical ordering of the nodes in a graph, requiring either permutation-invariant operations or an alignment to a reference structure. Moreover, even if the order or correspondence can be established, the dimension of the embedding space may vary due to structural modifications, *i*.*e*., changes in the number of nodes and edges.
10
+
11
+ In traditional machine learning, kernel methods, and particularly graph kernels, provide an elegant way to sidestep this issue by replacing explicit vector representations of the data points with a positive semi-definite matrix of their inner products. Thus, any algorithm that can be formulated in terms of scalar products between input vectors can be applied to a set of data (such as graphs) on which a kernel is defined.
12
+
13
+ GNNs apply a form of generalized convolution operation to graphs, which can be seen as a message-passing strategy where the node features are propagated over the graph to capture node interactions. Compared to standard convolutional neural networks (CNNs), the main drawback of these architectures is the lack of a straightforward way to interpret the dependency between the output and the presence of certain features and structural patterns in the input graph. In this paper, we propose a neural architecture that bridges the two worlds of graph kernels and GNNs. The key intuition underpinning our work is that there exists an analogy between the traditional convolution operator, which can be seen as performing an inner product between an input matrix and a filter, and graph kernels, which compute an inner product of graphs. As such, graph kernels provide the natural tool to generalize the concept of convolution to the graph domain, which we term *graph kernel convolution* (GKC). Given a graph kernel of choice, in each GKC layer, the input graph is compared against a series of structural masks (analogous to the convolutional masks in CNNs), which effectively represent learnable subgraphs. These, in turn, can offer better interpretability in the form of insights into what structural patterns in the input graphs are related to the corresponding output (*e*.*g*., the carcinogenicity of a chemical compound).
14
+
15
+ - We introduce a neural model that is fully structural, unlike existing approaches that require embedding the input graph into a larger, relaxed space. While the latter allows one to seek more complex and arbitrary decision boundaries, it also has the potential of overfitting the problem and creating more local optima, as evidenced by the need to use dropout strategies to avoid overfitting in Graph Convolutional Networks (GCNs) [\[11\]](#page-11-10), [\[12\]](#page-11-11).
16
+ - In order to cope with the non-differentiable nature of graphs, we let the learned structural masks encode distri-
17
+
18
+ butions over graphs, which in turn allows us to compute the gradient of the expected kernel response between the learned masks and the input subgraphs. This is, in turn, achieved through a computationally efficient importance sampling approach.
19
+
20
+ - Our architecture allows to plug-in any type of graph kernel (not just differentiable kernels as in [\[13\]](#page-11-12), [\[14\]](#page-11-13)).
21
+ - As an added benefit, our model provides some interpretability regarding the structural masks learned during the training process, similarly to convolutional masks in traditional CNNs.
22
+ - Finally, we analyze the expressive power of our model, and we argue that it is greater than that of standard message-passing GNNs and the equivalent Weisfeiler–Lehman (WL) graph isomorphism test.
23
+
24
+ The fundamental goal of our network is to reconstruct the structural information needed for classification and is thus particularly suited for problems where the structure plays a pivotal role. These are often graph classification and regression problems, where structure provides the most relevant information for classification, while node/edge features take a secondary role. The situation is usually reversed for node classification problems, where the distribution of features across the immediate neighbors of a node holds most of the information needed to classify the node itself. For these reasons, in this paper, we focus on graph classification and regression problems. However, we stress that our model can indeed be extended to tackle node-level tasks.
25
+
26
+ The remainder of this paper is organized as follows. In Section [II,](#page-1-0) we review the related work. In Section [III,](#page-2-0) we present our model, where graph kernels are used to redefine convolution on graph data. In Section [IV,](#page-7-0) we investigate the hyper-parameters of our architecture through an extensive ablation study, provide some insight into the interpretability of the structural masks, and evaluate our architecture on standard graph classification and regression benchmarks. Finally, Section [V](#page-11-14) concludes the paper.
27
+
28
+ # Method
29
+
30
+ *Kernel methods.* This class of algorithms is capable of learning when presented with a particular positive pairwise measure on the input data, known as a kernel. Consider a set X and a positive semi-definite kernel function K : X×X → R such that there exists a map ϕ : X → H into a Hilbert space H and K(x, y) = ϕ(x) <sup>⊤</sup>ϕ(y) for all x, y ∈ X. Crucially, X can represent any set of data on which a kernel can be defined, from R d to a finite set of graphs. Hence, the field of machine learning is ripe with examples of graph kernels (see Section [II\)](#page-1-0), which are nothing but positive semi-definite pairwise similarity measures[1](#page-2-1) on graphs. These can be either implicit (only K is computed) or explicit (ϕ is also computed).
31
+
32
+ *Weisfeiler-Lehman test.* The WL kernel [\[7\]](#page-11-6) is one of the most powerful and commonly used graph kernels, and it is based on the 1-dimensional Weisfeiler-Lehman (1-WL) graph isomorphism test. The idea underpinning the test is to partition the node set by iteratively propagating the node labels between adjacent nodes. With each iteration, the set of labels accumulated at each node of the graph is then mapped to a new label through a hash function. This procedure is repeated until the cardinality of the set of labels stops growing. Two graphs can then be compared in terms of their label sets at convergence, with two graphs being isomorphic only if their label sets coincide.
33
+
34
+ *Learning on graphs.* Let G = (V, X, E) be an undirected graph with |V | nodes and |E| edges, where each node v is associated to a label x(v) belonging to a dictionary D. Note that, generally, graphs can be directed and have continuous and discrete attributes on both the nodes and edges; however, for simplicity in this paper, we restrict our attention to undirected node-labeled graphs. Indeed, the model described in this paper can be easily applied to directed, edge-labeled graphs as well. A common goal in graph machine learning problems is to produce a vector representation of G that is aware of both the node labels and the structural information of G. Generalizing the convolution operator to graphs, GNNs learn the parameters Θ of a function h that performs message passing [\[4\]](#page-11-3) in the 1-hop neighborhood of each node v ∈ V ,
35
+
36
+ $$z(v) = \sum_{u \in \mathcal{N}_{\mathcal{G}}^1(v)} h_{\Theta}(x(v), x(u)), \qquad (1)$$
37
+
38
+ <span id="page-2-1"></span><sup>1</sup>Note that while several kernels can be interpreted as similarity measures, this is not always the case [\[47\]](#page-12-25).
39
+
40
+ ![](_page_2_Picture_9.jpeg)
41
+
42
+ Fig. 1. The proposed GKNN architecture. The input graph is fed into one or more GKC layers, where subgraphs centered at each node are compared to a series of structural masks through a kernel function. The output is a new set of real-valued feature vectors associated to the graph nodes. We obtain a graph-level feature vector through pooling on the nodes features, which is then fed to an MLP to output the final classification label.
43
+
44
+ <span id="page-2-2"></span>where N <sup>1</sup> G (v) denotes the 1-hop neighborhood of v. The convolution is usually repeated for L layers, and the final vectorial representation Zpool is obtained by applying a nodewise permutation invariant aggregation operator □ on the node features z(v).
45
+
46
+ The core feature of the proposed model is the definition of a new approach to perform the convolution operation on graphs. Unlike the diffusion process performed by message passing techniques, we design the *Graph Kernel Convolution* (GKC) operation in terms of the inner product between graphs computed through a graph kernel function.
47
+
48
+ Given a graph G = (V, X, E) with |V | nodes, we extract |V | subgraphs N<sup>G</sup> r (v) of radius r, centered at each vertex v ∈ V . Each subgraph consists of the central node v, the nodes at distance at most r from v, and the edges between them. Each such subgraph is then compared to a set of learnable structural masks {M1, . . . ,Mm} through
49
+
50
+ $$z_i(v) = \mathcal{K}(\mathcal{M}_i, \mathcal{N}_{\mathcal{G}}^r(v)), \tag{2}$$
51
+
52
+ where the resulting feature vector z(v) is a non-negative realvalued m-dimensional vector collecting the kernel responses, and the i-th structural mask M<sup>i</sup> = (V<sup>M</sup><sup>i</sup> , X<sup>M</sup><sup>i</sup> , E<sup>M</sup><sup>i</sup> ) is a first-order stochastic graph, *i*.*e*., a distribution over graphs with node set V<sup>M</sup><sup>i</sup> where the edges e ∈ E<sup>M</sup><sup>i</sup> are sampled from independent Bernoulli trials each with its own parameter θ<sup>e</sup> and each node label x ∈ X<sup>M</sup><sup>i</sup> is sampled from the corresponding discrete probability distribution over a dictionary D with parameters ϕxd∀d ∈ D each expressing the probability of x assuming value d. The probability distributions θ and ϕ over edges and node labels defining each mask are effectively the learnable parameters of the GKC layer.
53
+
54
+ *Multi-layer architecture.* The output node feature z(v) ∈ R m <sup>+</sup> of a GKC operation consists of a continuous vector containing the graph kernel responses between the structural masks and the subgraph centered on the node v. For graph kernels functions requiring discrete labels, the output feature vector thus needs to be discretized before giving it as input to the next layer. In this case, we interpret z(v) as an unnormalized probability distribution over a set of m labels from which we sample the new label x(v) for the node v at the next GKC layer.
55
+
56
+ Since in this paper we focus on graph-level tasks, the final graph embedding after L layers is obtained by $\square(Z^1|\dots|Z^L)$ , where $Z^\ell$ are the output features of the $\ell$ th layer and $\cdot|\cdot$ indicates the concatenation of node-wise features. This is then fed to a multilayer perceptron (MLP) layer to obtain the final classification. In particular, in all our experiments we use sum pooling as the aggregation operator $\square$ . Figure 1 shows an overview of the proposed multilayer architecture.
57
+
58
+ Cross-entropy loss. Our learning problem can be formulated as follows: given a set $\{\mathcal{G}_1,\ldots,\mathcal{G}_B\}$ of B training input graphs with node labels belonging to the dictionary $\mathbf{D}$ and associated class labels $y_1\ldots y_B$ , our goal is to find the optimal parameters $\Phi$ and the masks $\mathcal{M}_i$ with corresponding edge observation probabilities $\Theta$ of the model g that minimize the cross-entropy loss
59
+
60
+ <span id="page-3-0"></span>
61
+ $$loss_{CE} = \sum_{i=1}^{B} cross-entropy(\mathbb{E}_{\Theta}[g_{\mathcal{M}_{1}...\mathcal{M}_{m},\Phi}(\mathcal{G}_{i})], y_{i}). \quad (3)$$
62
+
63
+ Jensen-Shannon divergence loss. Depending on the number of structural masks to learn, we experimentally observed that different structural masks can give a similar response over the same node. To mitigate this behavior and push the model to learn a more descriptive node feature vector, we propose to regularize the structural masks learning process by adding a Jensen-Shannon divergence (JSD) loss. The JSD is computed between the feature dimensions considered as probability distributions over the nodes of the graph.
64
+
65
+ Let $P_i = \{\alpha z_i(v) | v \in V\}$ be the probability distribution induced by the *i*-th mask over the graph nodes. $\alpha$ is a scaling factor ensuring that $\sum_v P_i(v) = 1$ . We define the JSD loss as
66
+
67
+ $$loss_{JSD} = -H\left(\sum_{i=1}^{m} P_i\right) + \sum_{i=1}^{m} H\left(P_i\right), \qquad (4)$$
68
+
69
+ where H(P) is the Shannon entropy. The final loss we optimize is the sum of 1) the cross-entropy loss of Eq. 3 2) $loss_{JSD}$ multiplied by a weighting factor $\lambda$ , i.e.,
70
+
71
+ $$loss = loss_{CE} + \lambda loss_{ISD}. \tag{5}$$
72
+
73
+ Note that while previous work in the literature used the JSD to generalize the widely used cross-entropy loss function [48], here we use it as a regularization term. That is, rather than minimizing the JSD or cross-entropy in order to limit the distance to a target class [49], we maximize the JSD to force the learned distributions to be as far away from one another as possible.
74
+
75
+ When optimizing the structural masks, we need to consider two main challenges. First, the number of nodes of the subgraphs is not fixed and can, in principle, vary from 1 to n. Second, since graph kernel functions are not in general differentiable, the automatic differentiation mechanism of common neural network optimization libraries cannot be directly applied to our model.
76
+
77
+ **Structural masks representation.** When defining the space of graphs on which we want to optimize the structural masks
78
+
79
+ $\mathcal{M}$ , we have to consider the possible substructures present in the input graph that characterize it as belonging to a specific class. Assuming that this knowledge is not known *a priori*, we should allow to learn structures as large as the graph itself. Unfortunately, since the space of graphs grows exponentially with the number of nodes, this would be impractical. Moreover, under the assumption of the presence of localized characterizing substructures, we usually need graphs of few nodes to capture their presence. In our implementation, we fix a maximum number of nodes $p_{max}$ for each substructure and optimize for structural masks in the space
80
+
81
+ $$\mathcal{M} \in \bigcup_{p=1}^{p_{max}} \left( \tilde{\mathbf{G}}_p \times \mathbf{D}^p \right) , \tag{6}$$
82
+
83
+ where $\tilde{\mathbf{G}}_p$ indicates the set of all possible connected graphs of p nodes, and $\mathbf{D}^p$ the labeling space of p nodes. The impact of this hyper-parameter is studied in the ablation study in Section IV. With a slight abuse in notation, here $\mathcal{M}$ represents a discrete graph with discrete node labels, but we will relax it to its stochastic version in the subsequent paragraph.
84
+
85
+ Structural masks optimization. Graph kernels, as functions operating on discrete graph structures, are in general not differentiable. In order to be able to optimize the structural masks, we relax them to be distributions over graphs and take the expectation of the kernel over said distributions. Namely, let $\mathcal{M}_i = (V_{\mathcal{M}_i}, X_{\mathcal{M}_i}, E_{\mathcal{M}_i})$ be our *i*-th structural mask, where $V_{\mathcal{M}_i}$ is the set of p nodes, $X_{\mathcal{M}_i}$ is the set of p discrete probability distributions over the |D| labels, and $E_{\mathcal{M}_i}$ is the edge observation models consisting of $\binom{n}{2}$ independent Bernoulli trials with (learnable) parameters $\theta_1, \ldots, \theta_{\binom{n}{2}}$ .
86
+
87
+ A sample $G \sim \mathcal{M}_i$ is a graph G = (V, X, E) whose edge set E is the result of sampling from the Bernoulli trials to decide whether the corresponding edge is present, and labels X are individually sampled from the corresponding node label distributions. With this model to hand, we take as the kernel response between the subgraph $\mathcal{N}_{\mathcal{G}}^r(v)$ and the mask $\mathcal{M}_i$ the expectation of the kernel over the distribution of graphs associated with $\mathcal{M}_i$ , *i.e.*,
88
+
89
+ $$z_i(v) = \mathbb{E}_{G \sim \mathcal{M}_i} \left[ \mathcal{K}(G, \mathcal{N}_G^r(v)) \right]. \tag{7}$$
90
+
91
+ The gradient is then taken with respect to the edge observation parameters $\Theta$ .
92
+
93
+ Clearly, computing the full expectation for the kernel response and its gradient is computationally too demanding. For this reason, in the next section we discuss a sampling strategy for both.
94
+
95
+ In order to efficiently compute the kernel responses and their gradient with respect to the edge observation parameters $\Theta$ we adopt an importance sampling strategy. Importance sampling is a Monte Carlo method for evaluating the expectation of a function over a particular distribution while only having samples generated from a different distribution than the distribution of interest. More formally, assume you have a function
96
+
97
+ ![](_page_4_Figure_0.jpeg)
98
+
99
+ <span id="page-4-1"></span>Fig. 2. Example of model training. Top: the matrix of edge observation probabilities associated to the mask $\mathcal{M}_i$ for successive epochs. Bottom: the corresponding mode graphs.
100
+
101
+ $f: X - > \mathbb{R}$ over a domain X and two distributions p and g over such domain. Then we have
102
+
103
+ $$\mathbb{E}_{p}[f] = \int_{X} f(x) p(x) dx = \int_{X} f(x) \frac{p(x)}{g(x)} g(x) dx =$$
104
+
105
+ $$\mathbb{E}_{g} \left[ f(x) \frac{p(x)}{g(x)} \right]. \quad (8)$$
106
+
107
+ Eq. 8 suggests that the expectation of f with respect to the distribution p can be obtained from the expectation with respect to the distribution g by adding the correction term $\frac{f(x)}{g(x)}$ . Importance sampling is often used to reduce the sampling variance. Indeed, if $g(x) = \frac{1}{\mathbb{E}_p[f]} f(x) p(x)$ , then a single sample is sufficient to estimate $\mathbb{E}_p[f]$ . More in general, if the samples are concentrated where both the response f and the density p are high, then the sampling variance is greatly reduced. To this end, and observing that our first-order random graph model associated with each mask $\mathcal{M}_i$ is strongly unimodal being the combination of independent Bernoulli trials, we concentrate our sampling around the mode of the distribution. In particular, we sample from the distribution of graphs associated with the mask $\mathcal{M}_i$ the mode $G_M$ and the graphs that can be obtained from the mode with a single edge edit operation, i.e., the neighboring graphs. To this end, recall that in our setting the mode is the graph such that edge e is present if and only if $\theta_e > 0.5$ . Letting $p_e = \max(\theta_e, 1 - \theta_e)$ , we have that $p_e$ is the probability that edge e is sampled as present or absent as in the mode $G_M$ and thus the probability of sampling $G_M$ is
108
+
109
+ $$P(G_M) = \prod_e p_e \tag{9}$$
110
+
111
+ and the density of any neighbouring graph $G_e$ obtained by editing the edge e is
112
+
113
+ $$P(G_e) = P(G_M) \frac{1 - p_e}{p_e} \,. \tag{10}$$
114
+
115
+ Assuming that we are sampling uniformly from the set consisting of the mode and its neighboring graphs, our sampling density is $\frac{1}{\binom{n}{n}+1}$ over that set, 0 otherwise.
116
+
117
+ With this setup, we can estimate the expected kernel re-
118
+
119
+ sponse as
120
+
121
+ $$z_{i}(v) = \mathbb{E}_{G \sim \mathcal{M}_{i}} [\mathcal{K}(G, \mathcal{N}_{\mathcal{G}}^{r}(v))] \approx \frac{\binom{n}{2} + 1}{|\{G\}|} \sum_{G \sim \mathcal{M}_{i}} P(G) \mathcal{K}(G, \mathcal{N}_{\mathcal{G}}^{r}(v)) = \frac{P(G_{M}) \left(\binom{n}{2} + 1\right)}{|\{G\}|} \left(\mathcal{K}(G_{M}, \mathcal{N}_{\mathcal{G}}^{r}(v)) + \sum_{e} \mathcal{K}(G_{e}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \frac{1 - p_{e}}{p_{e}}\right)$$
122
+ (11)
123
+
124
+ where $\{G\}$ is the set of samples, P(G) is the sampling probability, $G_M$ is the mode graph, and the summation over e refers to the summation over the sampled neighbouring graphs $G_e$ , where e is the edge that differs from the mode. Note that the first term is due to the mode. If the mode is not among the samples that term can be dropped.
125
+
126
+ <span id="page-4-0"></span>Similarly, we differentiate the expectation as follows:
127
+
128
+ $$\frac{\partial z_{i}(v)}{\partial \theta_{e'}} \approx \frac{\left(\binom{n}{2} + 1\right)}{|\{G\}|} \left( \mathcal{K}(G_{M}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \frac{P(G_{M})}{p_{e'}} \frac{\partial p_{e'}}{\partial \theta_{e'}} + \sum_{e \neq e'} \mathcal{K}(G_{e}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \frac{P(G_{e})}{p_{e'}} \frac{\partial p_{e'}}{\partial \theta_{e'}} + \mathcal{K}(G_{e'}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \frac{P(x_{e'})}{1 - p_{e'}} \frac{\partial (1 - p_{e'})}{\partial \theta_{e'}} \right). \tag{12}$$
129
+
130
+ Here too, we have a term associated with the mode, plus one associated with the neighbor obtained by editing the edge e'. Either term can be dropped if the corresponding graph is not among the samples.
131
+
132
+ Taking out the factor $\frac{P(G_M)}{p_{c'}} \frac{\partial p_{e'}}{\partial \theta_{c'}}$ , we obtain
133
+
134
+ $$\frac{\partial z_{i}(v)}{\partial \theta_{e'}} \approx \frac{P(G_{M})}{p_{e'}} \frac{\left(\binom{n}{2} + 1\right)}{|\{G\}|} \frac{\partial p_{e'}}{\partial \theta_{e'}} \left( \mathcal{K}(G_{M}, \mathcal{N}_{\mathcal{G}}^{r}(v)) + \sum_{e \neq e'} \mathcal{K}(G_{e}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \frac{1 - p_{e}}{p_{e}} - \mathcal{K}(G_{e'}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \right). \tag{13}$$
135
+
136
+ Finally, we note that $\frac{\partial p_{e'}}{\partial \theta_{e'}} = \pm 1$ , with the sign being positive when the edge is observed in the mode, negative when it is not.
137
+
138
+ Recall that, in our setup, each node of the graphs and masks has an associated distribution over node labels. During inference, we sample from these distributions to obtain the labeled graphs to pass to the kernel. During the learning phase, we compute the gradient of the output $z_i(v)$ with respect to the feature distribution of the mask by differentiating its expectation with a similar Monte Carlo approach. However, in this case, the expectation is taken not only over the structures of the graphs sampled from the mask, but also over the labels sampled from the corresponding distribution $\ell$ associated with each node in the mask
139
+
140
+ $$z_i(v) = \mathbb{E}_{G \sim \mathcal{M}_i, l \in \ell^{\otimes n}} [\mathcal{K}(G^l, \mathcal{N}_{\mathcal{G}}^r(v))]$$
141
+ (14)
142
+
143
+ ![](_page_5_Figure_0.jpeg)
144
+
145
+ <span id="page-5-0"></span>Fig. 3. Assuming the set of masks M spans the space of graphs over n nodes, if the radius of each $N_{\mathcal{G}_1}^r(v)$ is such that it spans the entire graph, the responses on the nodes of $\mathcal{G}_1$ will be identical and will peak for any mask fractionally equivalent to $\mathcal{G}_1$ . Here, we use blue and red to highlight the masks with the highest response on $\mathcal{G}_1$ and $\mathcal{G}_2$ , respectively.
146
+
147
+ where $G^l$ is the graph with attached node labels l and $\ell^{\otimes n}$ is the concatenated label distribution over all the nodes of the mask.
148
+
149
+ Assuming again that the distribution over the structure is concentrated around the mode $G_M$ , we can then estimate the gradient with respect to the label distribution $\ell^{\otimes n}$
150
+
151
+ $$\nabla_{\ell^{\otimes n}} z_{i}(v) \approx \frac{1}{|\{l\}|} \sum_{l \in \ell^{\otimes n}} (l - \bar{l}) \left( \mathcal{K}(G_{M}^{l}, \mathcal{N}_{\mathcal{G}}^{r}(v)) - \mathcal{K}(G_{M}^{\bar{l}}, \mathcal{N}_{\mathcal{G}}^{r}(v)) \right)$$
152
+
153
+ $$(15)$$
154
+
155
+ where $\bar{l}$ is the label set sampled in the forward pass, and l and $\bar{l}$ taken as vectors are the concatenated one-hot encodings of the sampled labels.
156
+
157
+ Figure 2 shows the matrix of edge observation probabilities associated to a sample mask $\mathcal{M}_i$ for successive training epochs (top) and the corresponding mode graphs (bottom). Note that as the epoch number increases the probability distribution converges to the mode. This allows us to consider only the mode during inference, which results in a faster and deterministic prediction, i.e., we do not need to estimate the expected value of the output of our model. Note that here, as well as in the experiments described in Section IV, we initialized the edge observation probabilities with random samples from a uniform distribution. Alternatively, one could initialize these probabilities to reflect the presence of frequent subgraphs mined from the training data or predefined patterns of interest, such as stars and cycles. We decide instead to limit the bias we introduce in the model, with an additional motivation being the fact that initializing the probabilities in this way appears to give the network sufficient flexibility to learn masks that resemble discriminative patterns found in the input graphs (see Section IV for further details).
158
+
159
+ In [33], Morris *et al.* show that the expressive power of standard GNNs (*i.e.*, GNNs that only consider the immediate neighbors of a node when updating the labels) is equivalent to that of the 1-WL test. In this subsection, we discuss our model's expressive power and argue that this is higher than the 1-WL test and thus standard message-passing GNNs.
160
+
161
+ ![](_page_5_Figure_9.jpeg)
162
+
163
+ <span id="page-5-1"></span>Fig. 4. $\mathcal{G}_1$ and $\mathcal{G}_2$ cannot be distinguished by the 1-WL test, however the GKNN correctly identifies them as non-isomorphic graphs. If M is the set of all structural masks representing stars up to n-1 nodes, no star mask will achieve maximum similarity on the radius 1 subgraphs centered on the nodes of $\mathcal{G}_1$ , while the same maximal response will be achieved when considering the radius 1 subgraphs centered on the nodes of $\mathcal{G}_2$ .
164
+
165
+ For this to be the case, our model should be both able to distinguish every pair of graphs that can be distinguished by the 1-WL test and it should be able to distinguish between pairs of graphs where the 1-WL test fails.
166
+
167
+ Graphs distinguished by the 1-WL test. Consider two unlabelled graphs $\mathcal{G}_1 = (V_1, E_1)$ and $\mathcal{G}_2 = (V_2, E_2)$ with the same number of nodes $n = |V_1| = |V_2|$ , such that the 1-WL test can distinguish between them. Then we can also build a GKNN able to distinguish between them by adopting the WL kernel in the GKC layer, and the radius r is such that the subgraphs $N_{G_1}^r(v)$ span the entire graph $G_1$ (similarly for $\mathcal{G}_2$ ). In fact, the set of labels computed against any one mask will be the same for every node in graph $\mathcal{G}_1$ ( $\mathcal{G}_2$ ) since all the subgraphs are identically equal to the graph itself. Assuming that we have a set of structural masks $\{\mathcal{M}_1, \cdots, \mathcal{M}_m\}$ that span the space of graphs over n nodes, then, under the hypothesis that the 1-WL test can distinguish between $\mathcal{G}_1$ and $\mathcal{G}_2$ , the set of labels computed by the GKC layer (i.e., the responses of the masks) while identical on all the nodes of each graph, will differ between the two graphs (see Figure 3). In particular, the response will peak for any mask fractionally equivalent to the graph itself [50]. Therefore, under any reasonable pooling strategy, the GKNN will be able to distinguish between the two graphs.
168
+
169
+ We can easily see that the same holds for an arbitrary subgraph radius r under the assumption of a sufficient number of layers. For simplicity, we will assume r=1, but, as shown in the previous argument, increasing r increases the descriptive power of the GKC layer. Note that the full GKC Layer can be seen as 1) a label propagation step followed by 2) a hashing step. The label propagation is given by the computation of the WL kernel against a structural mask, due to the 1-WL iterations on the subgraphs.
170
+
171
+ The hashing, on the other hand, comes from the implicit dot product against the structural masks. This is, in essence, a projection onto the space spanned by them and forces a hashing where the adopted masks form a representation space for the hashing itself. Note that, under the assumption of a sufficiently large (possibly complete) set of structural masks $\{\mathcal{M}_1,\cdots,\mathcal{M}_m\}$ spanning the space of graphs over n nodes,
172
+
173
+ <span id="page-6-0"></span>TABLE I BEST MODELS AVERAGE TRAINING TIME PER EPOCH (OVER FOLDS) AND INFERENCE TIME FOR THE DATASETS CONSIDERED IN THE PRESENT WORK.
174
+
175
+ | | Per epoch training time (s) | | | | Per sample inference time (ms) | | | |
176
+ |--------------------------------------------------|-----------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|----------------------------------------------------------------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|----------------------------------------------------------------------------------|---------------------------------------------------------------------------------|
177
+ | | Ours (WL) | GIN | DiffPool | ECC | Ours (WL) | GIN | DiffPool | ECC |
178
+ | PROTEINS<br>MUTAG<br>PTC<br>NCI1<br>IMDB<br>ZINC | 111.92 ± 59.85<br>10.01 ± 1.92<br>9.30 ± 3.67<br>83.45 ± 1.80<br>36.80 ± 3.59<br>225.16 | 1.55 ± 0.14<br>0.21 ± 0.04<br>0.40 ± 0.07<br>5.86 ± 0.72<br>1.44 ± 0.22<br>6.21 | 5.45 ± 0.20<br>0.70 ± 0.03<br>1.49 ± 0.10<br>17.90 ± 0.52<br>4.25 ± 0.15<br>21.33 | 2.68 ± 0.21<br>0.32 ± 0.05<br>0.59 ± 0.05<br>9.07 ± 0.44<br>1.88 ± 0.12<br>12.45 | 14.73 ± 5.48<br>16.66 ± 1.12<br>11.76 ± 4.68<br>8.02 ± 0.36<br>8.90 ± 0.16<br>2.16 | 4.09 ± 0.94<br>3.29 ± 1.18<br>3.24 ± 1.22<br>4.43 ± 0.69<br>4.23 ± 0.72<br>4.33 | 13.71 ± 0.16<br>9.25 ± 0.18<br>9.38 ± 0.19<br>9.35 ± 0.16<br>9.25 ± 0.16<br>5.92 | 9.02 ± 1.29<br>7.28 ± 1.03<br>8.18 ± 0.67<br>8.91 ± 1.71<br>7.64 ± 1.15<br>4.80 |
179
+
180
+ the dot product induced hashing will not introduce conflicts between nodes with different labels. In other words, distinct labels under the 1-WL iterations will eventually be mapped to distinct labels at some GKC layer. Note, however, that under these assumptions the expressive power is greater or equal than that of 1-WL and labels that are equivalent under 1-WL iteration can indeed be mapped to distinct labels by some GKC layer. In fact, the propagation for 1-WL uses only the star of radius 1 around a particular node, while our scheme uses the full 1-hop subgraph, which includes connections between neighbors. Therefore, given a sufficient number of GKC layers and a reasonable pooling strategy, the GKNN will be able to distinguish between the two input graphs.
181
+
182
+ *Graphs where the 1-WL test fails.* Not only the GKNN can distinguish between pairs of graphs that are correctly distinguished by the 1-WL test, but it also succeeds where the 1-WL test fails. To see that this is the case, consider the standard example of two graphs with 6 nodes, where G<sup>1</sup> is the dumbbell graph obtained by connecting two 3-nodes cycles with one edge, while G<sup>2</sup> is a 2 × 3 lattice graph (see Figure [4\)](#page-5-1). Both graphs have the same degree distribution, with 4 nodes of degree 2 and 2 nodes of degree 3. As explained in [\[51\]](#page-12-29), [\[52\]](#page-12-30), despite being non-isomorphic, the two graphs are considered identical by the 1-WL test (in other words, the WL relabeling procedure converges to the same set of labels for the two graphs). To see why this happens, consider that through the propagation phases, each node is only made aware of what degree other nodes in the graph have, and not how many such nodes can be reached. However, we can easily see that this is not an issue for the GKNN.
183
+
184
+ Assume we have a GKC layer with radius r = 1 that adopts a WL kernel and a set of structural masks M = {M1, · · · ,Mm} representing all stars up to n − 1 nodes, where n denotes the number of nodes of the input graph. The presence of a triangle in the graph induces the existence of at least three subgraphs of radius 1 with connections between neighbors, *i*.*e*., not stars. On these subgraphs, no star masks will achieve maximum similarity under the WL kernel. Having a pooling process that normalizes the masks responses with respect to size and maximizes over the masks on the same node, implies that each node will have the same maximal response if it does not contain triangles and a lower response otherwise. A min-pooling process over the node features will then be able to discriminate between graphs with and without triangles. Now consider the graphs G<sup>1</sup> and G<sup>2</sup> introduced above where the 1-WL test failed. As shown in Figure [4](#page-5-1) the subgraphs built around the nodes of G<sup>1</sup> and G<sup>2</sup> will be structurally different, *i*.*e*., triangles in G<sup>1</sup> and 3 nodes path graphs in G2. As a consequence, the feature vectors x(v) over the nodes of the two graphs will be different, allowing the GKNN to distinguish between them.
185
+
186
+ *Empirical evaluation.* To further strengthen the argument that our network is able to distinguish graphs where the 1-WL test fails, we empirically evaluate the ability of a GKNN to distinguish between the two graphs of Figure [4.](#page-5-1) Specifically, we train a GKNN using the WL kernel with a single mask with a maximum number of nodes set to either 3 or 4. In both cases, when we inspect the learned mask we find that its mode corresponds to the 3-nodes cycle (triangle). Indeed, the presence of triangles is what distinguishes G<sup>2</sup> from G<sup>1</sup> and allows the GKNN to discriminate between the two graphs successfully.
187
+
188
+ The computational complexity of our model is O(NK(s)), where N is the number of nodes of nodes of the input graph and K(s) is the complexity of the kernel computation, which in turn depends on the size s of the subgraphs. Note that s ≤ N, where equality is attained for every subgraph in the case of complete graphs. More precisely, if we limit ourselves to the case of graphs with bounded degree, we have s = O(min (N, d<sup>r</sup> max)) where dmax denotes the maximum node degree.
189
+
190
+ In terms of runtime, for the datasets considered in this paper, this translates to the results shown in Table [I,](#page-6-0) where we report the average epoch training time and the average inference time per dataset obtained during the grid search (for the best model, for each dataset and fold). Both training and testing of each model were performed using an Intel(R) Xeon(R) Silver 4114 CPU @ 2.20GHz processor and an NVIDIA Tesla V100. Note that in order to exploit the computational power of GPUs better we implemented a GPU version of the WL kernel. Although the resulting inference runtime is higher than three widely used baselines, we compared our model to (ECC [\[53\]](#page-12-31), DiffPool [\[54\]](#page-12-32), and GIN [\[32\]](#page-12-10)), it is of a similar order of magnitude. When it comes to training, on the other hand, the computational bottleneck of our network is the computation of the numerical gradient, which in turn explains the subpar runtime.
191
+
192
+ The closest works to ours we are aware of are [13], [14]. Nikolentzos *et al.* propose a GNN where the first layer consists of a series of hidden graphs that are compared against the input graph using a random walk kernel [13]. However, due to their optimization strategy, their model only works with a single differentiable kernel, whereas our model allows us to tap into the expressive power of any type of kernel. Moreover, in the architecture of [13], the learned structural masks are assumed to be complete weighted graphs, whereas we allow for graphs with arbitrary structure. Finally, note that [14] works very much in the same way of [13], as it relies on the underlying graph kernel to be differentiable and, more specifically, it makes use of the random walk graph kernel. Another similarity with [13] is that the model proposed in [14] is also not purely structural, as opposed to ours. Instead, it learns matrices of weights which, in order to be interpreted as learned structural patterns, need to be first thresholded using a ReLU function that prunes edges associated with smaller weights.
2202.01651/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-01-17T10:07:21.418Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/16.1.2 Chrome/96.0.4664.55 Electron/16.0.5 Safari/537.36" etag="tH5OiZJ9857JSBiFI3xS" version="16.1.2" type="device"><diagram id="3YIRq13hLHG6MNtZM4JT" name="Seite-1">3Vldj5s4FP01eZwKQwjJ4yTpbFe7q/2YSm33pfKAA1YBR8YzSfbX7wVswI6BzEyTtCNFEb7Y1+bce861YeKtsv0vHG+TP1hE0onrRPuJt564bjBbwH9pONSGxdyvDTGnUW1CreGe/kek0ZHWRxqRQusoGEsF3erGkOU5CYVmw5yznd5tw1J91i2O5YxOa7gPcUqOun2ikUhq69wNWvsHQuNEzYzUA2dYdZYuigRHbNeZy3s/8VacMVFfZfsVSUvsFC71gu567jYL4yQXpwzAm9+ePv4b+Hznf6b3f/59uPn6+82s9vKE00f5wHKx4qAQiDl73MpuhAuyt+GOH1ITxnZdqHlayBLCMiL4AbpIR3M5QiYIUh52Ldz+TNqSDtSeMmIZ4rhx3aIAFxKIZ4CCxkEBTPKIlE6cibfcJVSQ+y0Oy7s74AHYEpHBpGsEl5iHMrEBhmUhOPvWJJMHll5wuyD2h68X2amO7NQCrGsD1j0XsO4bAdbTgfWuDqz3RoB1dWBtUnBZYKdvBFhkiOzVgfV/dmCvB918HDqSR7fl5gdaYYqLgoYVKJiLY3MHRB3xDcuFxBQtGghJpDZM3xFAC37KxkmKBX3S57RhKmf4i1FYTZP4vqHVN8gIS8EeeUjkqO72yXQU2EVf+QFwYyKO/FQRbp765UFfWII+LX/+CjJaTILl+pDjDCIarMFW/+QdGLdi+YbGjxygZDm0y17w76+2VPp5FfvMVNnQNF2xlPHKlze7XQR3d5erG8iib2huSbL52baQNpLq8fpYJQz0uU1jxqlIsioobyAYi/FoNBXoItFQ6TEQjVAShNTPj2FiwosuU0RCBO6Jj1VvTxZWsqfic3nvnS9bX2TP8nq97zYOqpEDMJ1BZfNL9147rGqpcceVMcUPJF3i8FtcrVJlSs5yUtbVlMZ5+VjghoB9WeYLhfPxrbzxwIRg2WvLA6BZ6fC4AtY6O0q93ux03jlInd7NmnCZkoR8oySZpeTUkuQ6hiOTQD01CVIVHzrdtmWHYmDBxu5x5gwuy1v4A93hop7fGKwWwzabgpylhLq2c72uArBFEqQhffEVTLCUJepXZivzI1wkFduRLgM6+dwjYRim4pFs5NEdTZXvjiAEmiKgEUVo1cftyg8alJ+rqYAqrKMysBhVAWeml6nLisDM4O70pSJgkPNmcR4R8KaeNo/aAPetC/n+UP8ryoDtLZQuA5zsMI8aHeAv1YHeHcDziH5cs+07hmHK9u0YxvTBpLpg25+L58qVIojtrdL5iD5apE8murltQCceQZ/NdKN+j9V7c3vw4xR822tRnelbzh5SkpWecij+edhWf1pe2cleDbZUdpPHnYOXsZ3mdZ4dkSujUVTOYj3jDZ8iCuhM8/gfmcHIOecRz8jF5sjXPeIFFlKZKfvdjnhBf6jLL3EAMUT7V4hspeUc07xfy18f3t7T0ivie1Ishz+1me+wLFGzvibxnx81aLafNmtCt9+Hvff/Aw==</diagram></mxfile>
2202.01651/main_diagram/main_diagram.pdf ADDED
Binary file (18.3 kB). View file
 
2202.01651/paper_text/intro_method.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.03057/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2202.03057/main_diagram/main_diagram.pdf ADDED
Binary file (90.7 kB). View file
 
2202.03057/paper_text/intro_method.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Quality-Diversity (QD) Optimization algorithms [@cully2017quality; @chatzilygeroudis2021quality] have changed the classical paradigm of optimization: inspired by natural evolution, the essence of QD methods is to provide a large and diverse set of high-performing solutions rather than only the best one. This core idea is crucial to many applications, from robotics where it allows robots to adapt to unexpected scenarios [@cully2015robots] to game design [@khalifa2018talakat] or investment strategies [@zhang2020autoalpha]. This becomes particularly interesting when the optimization process is done using a surrogate model instead of the real one as the highest performing solution according to the simulation might not transfer well in reality.
4
+
5
+ Quality Diversity algorithms tackle this issue by not only optimizing the fitness but also aiming to cover a variety of user-defined features of interest. Novelty Search [@lehman2011abandoning; @lehman2011evolving] was the first method to explicitly promote diversity by optimizing the novelty of a solution. Building on this paradigm, illumination algorithms [@mouret2015illuminating] have been developed with a core principle: \"illuminating\" the search space by creating local descriptor niches where the solution with the highest fitness is extracted. Namely, MAP-Elites [@mouret2015illuminating] explicitly divides the descriptor space in niches by defining a grid [@vassiliades2016scaling] and each niche stores its best solution. The method has been further extended to tackle Multi-Task Optimization [@mouret2020quality], efficiently evolve large neural network with Policy Gradient [@nilsson2021policy] or be robust to noise [@flageat2020fast]. has also been used in a multi-objective scenario where one grid per objective is maintained .
6
+
7
+ On the other hand, multi-objective optimization [@deb2014multi] is a very popular and useful field of research and application. Indeed, most applications involve multiple conflicting objectives and it is key to have a view of the different possible trade-offs to make a decision. For instance, training a robot to maximize its forward speed while minimizing the energy it uses involves two conflicting terms. While it is possible to combine the objectives into one, multi-objective approaches look for a set of Pareto-optimal solutions, *ie* none of which can be considered better than another when every objective is of importance.
8
+
9
+ Traditional multi-objective optimization methods are designed to generate a set of solutions that approximate the set of all optimal trade-offs, called the Pareto Front. Evolutionary algorithms are a natural approach and numerous methods have been proposed, differing on their underlying selection scheme: Non-dominated Sorted Genetic Algorithm () [@deb2002fast], Strength Pareto Evolutionary Algorithm () [@zitzler2001spea2] or multi-objective evolutionary algorithm based on decomposition (MOEA/D) [@zhang2007moea] are examples of such approaches. Those methods generally allow to find high-performing solutions by insuring diversity in the objective space. However, to the best of our knowledge, no method explicitly tackles the issue of Multi-Objective Quality Diversity. A method of interest was introduced with Pareto Gamuts [@makatura2021pareto] where authors tackle multi-objective design problems under different contexts, where contexts are defined as different values of a real-valued variable. In this work, we consider more general descriptors in our search for diversity.
10
+
11
+ In this work, we propose to design a novel method for Multi-Objective Quality Diversity () optimization. Namely, we introduce Multi-Objective MAP-Elites () which divides the descriptor space in niches with a tessellation method and illuminates each cell of the tessellation by filling it with its set of Pareto optimal solutions. As such, it allows to find solutions that span the space of descriptors while being locally Pareto efficient. We evaluate on four different tasks: a traditional real-valued optimization problem taken from the literature and three robotic control tasks using a physical simulator. We compare against standard baselines and show that our approach not only helps to find more diverse solutions but that diversity in the descriptors space sometimes helps to find better solutions in terms of Pareto efficiency. Our implementation is publicly available and fully done in Python using the high performing Jax [@jax2018github] library which has shown to significantly accelerate QD applications [@Lim2022Accelerated]. We believe this will provide very useful to build upon for further advancements in optimization.
12
+
13
+ # Method
14
+
15
+ We now introduce our framework for Multi-Objective Quality Diversity (), adapted from a recent work [@fontaine2021differentiable]. The Quality Diversity () problem assumes a single objective function $\mathcal{X} \rightarrow \reals$, where $\mathcal{X}$ is called search space, and $d$ descriptors $c_i: \mathcal{X} \rightarrow \reals$, or as a single descriptor function $\mathbf{c}: \mathcal{X} \rightarrow \reals^d$. We note $S = \mathbf{c}(\mathcal{X})$ the descriptor space formed by the range of $\mathbf{c}$.
16
+
17
+ algorithms of the family of the algorithm, discretize the descriptor space $S$ via a tessellation method. Let $\mathcal{T}$ be the tessellation of $S$ into $d$ cells $S_i$. The goal of methods is to find a set of solutions $\mathbf{x}_i \in \mathcal{X}$ so that each solution $\mathbf{x}_i$ occupies a different cell $S_i$ in $\mathcal{T}$ and maximizes the objective function within that cell. The objective can thus be formalized as follows:
18
+
19
+ $$\begin{align}
20
+ \max\limits_{\mathbf{x} \in \mathcal{X}} \sum\limits_{i=1}^d f(\mathbf{x}_i), \ \text{where} \ \forall i, \ \mathbf{c}(\mathbf{x}_i) \in S_i.
21
+ \end{align}$$
22
+
23
+ We consider an optimization problem over the search space $\mathcal{X}$ with $k$ associated objective functions $f_i : \mathcal{X} \rightarrow \reals$, or as a single function $\mathbf{f}: \mathcal{X} \rightarrow \reals^k$. In multi-objective optimization, the goal is to find a solution $\mathbf{x} \in \mathcal{X}$ that maximizes at once the different objectives. The multi-objective problem can be formalized as
24
+
25
+ $$\begin{align}
26
+ \max\limits_{\mathbf{x} \in \mathcal{X}}{\left(f_1(\mathbf{x}), ..., f_k(\mathbf{x})\right)}.
27
+ \end{align}$$
28
+
29
+ In most problems, there does not exist any solution that concurrently maximizes every objective. In fact, objectives are often antagonist which implies that maximizing one objective alone can result in minimizing the others. Comparing solutions in a multi-objective setting is challenging too. In the mono-objective algorithm, solutions are replaced in their cell if another solution with higher value for the single objective is found.
30
+
31
+ To compare solutions in the multi-objective setting, we rely on the notion of Pareto domination. Given two solution candidates $\mathbf{x}_1, \mathbf{x}_2 \in \mathcal{X}$, $\mathbf{x}_1$ Pareto dominates $\mathbf{x}_2$, noted $x_1 \succ x_2$, if $\forall 1 \leq i \leq k, f_i(\mathbf{x}_1) > f_i(\mathbf{x}_2)$.
32
+
33
+ Given a set of candidate solutions $\mathcal{S} \in \mathcal{X}$, we define the Pareto front over this set, $\mathcal{P}(\mathcal{S})$, as the subset of solutions in $\mathcal{S}$ that are not Pareto dominated by any other solution in $\mathcal{S}$. In other words, the Pareto front over a set of solutions corresponds to all the best possible trade-offs with respect to the set of objectives, there is no solution in the Pareto front that is better that the others in the front on all the objectives at the same time. Canonical multi-objective optimization algorithms aim to find the Pareto front of solutions over the search space $\mathcal{X}$ that we call Optimal Pareto front.
34
+
35
+ In the mono-objective optimization setting, optimization methods can be compared in terms of the best value found for the objective function. In the multi-objective setting, the optimization methods we consider return a Pareto front of solutions. Thus, to compare them we need metrics to compare Pareto fronts. One simple and robust way to compare two Pareto fronts is to compare their *hyper-volumes* [@hypervolume]. The hyper-volume $\Xi$ of a Pareto front $\mathcal{P}$ requires the definition by the user of a reference point $\mathbf{r} \in \reals^K$. It is defined as the Lebesgue measure of the set of points dominated by $\mathcal{P}$ [@zitzler1999multiobjective]:
36
+
37
+ $$\begin{align}
38
+ \Xi(\mathcal{P}) = \Gamma (\{x \in \mathcal{X} | \exists s \in \mathcal{P}, s \succ x \succ r \} )
39
+ \end{align}$$
40
+
41
+ As described in Figure [1](#fig:hypervolume){reference-type="ref" reference="fig:hypervolume"}, in the case of two objectives, this metric simply corresponds to the area between the curve and the reference point. When comparing Pareto fronts for a given optimization problem, the reference point is fixed in advance and remains unchanged for all hyper-volumes computations. The hypervolume indicator induces an order on Pareto Fronts which is monotonic and invariant to scale [@knowles2002local; @zitzler1999evolutionary], the main drawbacks being its dependence on the definition of the reference point and its computation cost in high dimension [@brockhoff2010optimal].
42
+
43
+ ![On the left panel, an example of a Pareto front hypervolume computation when there are $K=2$ objective functions. On the right panel, an example of two Pareto fronts when $K=2$, where the front in blue strictly dominates, in terms of hypervolume, the front in purple given the showed reference point.](images/pareto_front_hypervolume.png){#fig:hypervolume width="45%"}
44
+
45
+ In this work, we introduce the Multi-Objective Quality Diversity () problem that assumes $k$ objective functions $f_i: \mathcal{X} \rightarrow \reals$, or as a single function $\mathbf{f}: \mathcal{X} \rightarrow \reals^k$ and $d$ descriptors $c_i: \mathcal{X} \rightarrow \reals$, or as a single descriptors function $\mathbf{c}: \mathcal{X} \rightarrow \reals^d$.
46
+
47
+ As in the framework, we assume that the descriptor space $S$ is discretized via a tessellation method into $d$ cells $S_i$. In the mono-objective framework, a commonly accepted metric is the -score, defined as the sum of the fitnesses of the solution in each cell. The goal of methods is to find in each cell, a Pareto front of solutions $\mathcal{P}(S_i)$ that maximizes its hyper-volume. In other word, for each niche of the descriptor space we seek for all the best possible trade-offs regarding the objective functions $\mathbf{f}$. Following the -score [@pugh2015confronting], objective can be formalized as follow:
48
+
49
+ $$\begin{align}
50
+ \max\limits_{\mathbf{x} \in \mathcal{X}} \sum\limits_{i=1}^d \Xi(\mathcal{P}_i), \ \text{where} \ \forall i, \ \mathcal{P}_i = \mathcal{P}(\{\mathbf{x} \ | \ \mathbf{c}(\mathbf{x}) \in S_i \}).
51
+ \end{align}$$
52
+
53
+ In the next sections, we will refer to this objective as the score. While our goal is to build methods that maximize the score, i.e. methods that find $d$ high-performing Pareto fronts, we may also, for comparison purpose, study the Pareto front over all the solutions found by the algorithm regardless of their descriptor which we refer to as the global Pareto front. In practice, we found that in many cases the global Pareto front does not correspond exactly to the Pareto front of one of the tessellation cells but is rather constituted by solutions coming from several niches. Thus, the global Pareto front may have an hyper-volume larger than the largest hyper-volume over the cells Pareto fronts.
2204.00559/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2204.00559/paper_text/intro_method.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Estimating the position and orientation of cameras from images is essential in many applications, including virtual reality, augmented reality, and autonomous driving. While the problem can be approached via a geometric pipeline consisting of image retrieval, feature extraction and matching, and a robust Perspective-n-Points (PnP) algorithm, many challenges remain, such as invariance to appearance or the selection of the best set of method hyperparameters.
4
+
5
+ Learning-based methods have been used in traditional pipelines to improve robustness and accuracy, e.g. by generating neural network (NN)-based feature descriptors [\[7,](#page-14-0)[16,](#page-14-1)[17,](#page-14-2)[25](#page-15-0)[,26\]](#page-15-1), combining feature extraction and matching into one network [\[30\]](#page-15-2), or incorporating differentiable outlier filtering modules [\[1,](#page-14-3)[2,](#page-14-4)[3\]](#page-14-5). Although deep 3D-based solutions have demonstrated favorable results, many prerequisites often remain, such as the need for an accurate 3D model of the scene and manual hyperparameter tuning of the remaining classical components.
6
+
7
+ The alternative end-to-end NN-based approach, termed absolute pose regression (APR), directly regresses the absolute pose of the camera from input images
8
+
9
+ <span id="page-0-0"></span><sup>1</sup> The code is available in [https://code.active.vision.](https://code.active.vision)
10
+
11
+ [\[14\]](#page-14-6) without requiring prior knowledge about the 3D structure of the neighboring environment. Compared with deep 3D-based methods, APR methods can achieve at least one magnitude faster running speeds at the cost of inferior accuracy and longer training time. Although follow-up works such as MapNet [\[4\]](#page-14-7) and Kendall et al. [\[13\]](#page-14-8) attempt to improve APR methods by adding various constraints such as relative pose and scene geometry reprojection, a noticeable gap remains between APR and 3D-based methods.
12
+
13
+ Recently, Direct-PN [\[5\]](#page-14-9) achieved state-of-the-art (SOTA) accuracy in indoor localization tasks among existing single-frame APR methods. As well as being supervised by ground-truth poses, the network directly matches the input image and a NeRF-rendered image at the predicted pose. However, it has two major limitations: (a) direct matching is very sensitive to photometric inconsistency, as images with different exposures could produce a high photometric error even from the same camera pose, which reduces the viability of photometric direct matching in environments with large photometric distortions, such as outdoor scenes; (b) there is a domain gap between real and rendered images caused by poor rendering quality or changes in content and appearance of the query scene.
14
+
15
+ In order to address these limitations, we propose a novel relocalization pipeline that combines APR and direct feature matching. First, we introduce a histogramassisted variant of NeRF, which learns to control synthetic appearance via histograms of luminance information. This significantly reduces the gap between real and synthetic image appearance. Second, we propose a network DFNet that extracts domain invariant features and regresses camera poses, trained using a contrastive loss with a customized mining method. Matching these features instead of direct pixels colors boosts the performance of the direct dense matching further. Third, we improve generalizability by (i) applying a cheap Random View Synthesis (RVS) strategy to efficiently generate a synthetic training set by rendering novel views from randomly generated pseudo training poses and (ii) allow the use of unlabeled data. We show that our method outperforms existing single-frame APR methods by as much as 56% on both indoor 7-Scenes and outdoor Cambridge datasets. We summarize our main contributions as follows:
16
+
17
+ - 1. We introduce a direct feature matching method that offers better robustness than the prior photometric matching formulation, and devise a network DFNet that can effectively bridge the feature-level domain gap between real and synthetic images.
18
+ - 2. We introduce a histogram-assisted NeRF, which can scale the direct matching approach to scenes with large photometric distortions, e.g., outdoor environments, and provide more accurate rendering appearance to unseen real data.
19
+ - 3. We show that a simpler synthetic data generation strategy such as RVS can improve pose regression performance.
20
+
21
+ # Method
22
+
23
+ We illustrate our proposed direct feature matching pipeline in Fig. 1, which contains two primary components: 1) the DFNet network, which, given an input image I, uses a pose estimator $\mathcal{F}$ to predict a 6-DoF camera pose and a feature extractor $\mathcal{G}$ to compute a feature map M, and 2) a histogram-assisted NeRF $\mathcal{H}$ , which compensates for high exposure fluctuation by providing luminance control when rendering a novel view given an arbitrary pose.
24
+
25
+ Training the direct feature matching pipeline can be split into two stages, (i) DFNet and the histogram-assisted NeRF, and (ii) direct feature matching. In stage one, we train the NVS module $\mathcal{H}$ like a standard NeRF, and the DFNet with a loss term $\mathcal{L}_{DFNet}$ in Eq. (5). In stage two, fixing the histogram-assisted NeRF and the feature extractor $\mathcal{G}$ , we further optimize the main pose estimation module $\mathcal{F}$ via a direct feature matching signal between feature maps extracted from the real image and its synthetic counterpart $\hat{I}$ , which is rendered from the predicted pose $\hat{P}$ of image I via the NVS module $\mathcal{H}$ . At test time, only the pose estimator $\mathcal{F}$ is required given the query image, which ensures a rapid inference.
26
+
27
+ This section is organized as follows: the DFNet pipeline is detailed in Section 3.1, followed by a showcase of our histogram-assisted NeRF $\mathcal{H}$ in Section 3.2. To further boost the pose estimation accuracy, an efficient Random View Synthesis (RVS) training strategy is introduced in 3.3.
28
+
29
+ This section aims to introduce: 1) the design of our main network DFNet, 2) the direct feature matching formulation that boosts pose estimation performance in
30
+
31
+ ![](_page_4_Figure_2.jpeg)
32
+
33
+ <span id="page-4-0"></span>Fig. 2. (a) The training scheme for DFNet to close the domain gap between real images and rendered images. (b) The histogram-assisted NeRF architecture.
34
+
35
+ a semi-supervised training manner, and 3) the contrastive-training scheme that closes the domain gap between real images and synthetic images.
36
+
37
+ **DFNet Structure** The DFNet in our pipeline consists of two networks, a pose estimator $\mathcal{F}$ and a feature extractor $\mathcal{G}$ . The pose estimator $\mathcal{F}$ in our DFNet is similar to an ordinary PoseNet, which predicts a 6-DoF camera pose $\hat{P} = \mathcal{F}(I)$ for an input image I, and can be supervised by an $L_1$ or $L_2$ loss between the pose estimation $\hat{P}$ and its ground truth pose P.
38
+
39
+ The feature extractor $\mathcal{G}$ in our DFNet takes as input feature maps extracted from various convolutional blocks in the pose estimator and pushes them through a few convolutional blocks, producing the final feature maps $M = \mathcal{G}(I)$ , which are the key ingredients during feature-metric direct matching.
40
+
41
+ Two key properties of the feature extractor $\mathcal{G}$ that we seek to learn are 1) domain invariance, i.e., being invariant to the domain of real images and the domain of synthetic images and 2) transformation sensitive, i.e., being sensitive to the image difference that is caused by geometry transformations. With these properties learned, our feature extractor can extract domain-invariant features during feature-metric direct matching while preserving geometry-sensitive information for pose learning. We detail the way to train the DFNet in the *Closing the Domain Gap* section.
42
+
43
+ **Direct Feature Matching** Direct matching in APR was first introduced by Direct-PN [5], which minimizes the photometric difference between a real image I and a synthetic image $\hat{I}$ rendered from the estimated pose $\hat{P}$ of the real image I. Ideally, if the predicted pose $\hat{P}$ is close to its ground truth pose P, and the novel view renderer produces realistic images, the rendered image $\hat{I}$ should be indistinguishable from the real image.
44
+
45
+ In practice, we found the photometric-based supervision signal could be noisy in direct matching, when part of scene content changes. For example, random cars and pedestrians may appear through time or the NeRF rendering quality is imperfect. Therefore, we propose to measure the distance between images in feature space instead of in photometric space, given that the deep features are usually more robust to appearance changes and imperfect renderings.
46
+
47
+ Specifically, for an input image I and its pose estimation $\hat{P} = \mathcal{F}(I)$ , a synthetic image $\hat{I} = \mathcal{H}(\hat{P}, \mathbf{y}_I)$ can be rendered using the pose estimation $\hat{P}$ and the histogram embedding $\mathbf{y}_I$ of the input image I. We then extract the feature map $M \in \mathbb{R}^{H_M \times W_M \times C_M}$ and $\tilde{M} \in \mathbb{R}^{H_M \times W_M \times C_M}$ for image I and $\hat{I}$ respectively, where $H_M$ and $W_M$ are the spatial dimensions and $C_M$ is the channel dimension of the feature maps. To measure the difference between two feature maps, we compute a cosine similarity between feature $m_i \in \mathbb{R}^{C_M}$ and $\tilde{m}_i \in \mathbb{R}^{C_M}$ for each feature location i:
48
+
49
+ $$\cos(m_i, \tilde{m}_i) = \frac{m_i \cdot \tilde{m}_i}{\|m_i\|_2 \cdot \|\tilde{m}_i\|_2}.$$
50
+ (1)
51
+
52
+ By minimizing the feature-metric direct matching loss $\mathcal{L}_{dm} = \sum_{i} (1 - \cos(m_i, \tilde{m}_i))$ , the pose estimator $\mathcal{F}$ can be trained in a semi-supervised manner (note no ground truth label required for the input image I).
53
+
54
+ Our direct feature matching may optionally follow the procedure of semisupervised training proposed by MapNet+ [4] to improve pose estimation with unlabeled sequences captured in the same scene. Unlike [4], which requires sequential frames to enforce a relative geometric constraint using a VO algorithm, our feature-matching can be trained by images from arbitrary viewpoints without ground truth pose annotation. Our method can be used at train time with a batch of unlabeled images, or as a pose refiner for a single test image. In the latter case, our direct matching can also be regarded as a post-processing module. During the training stage, only the weights of the pose estimator will be updated, whereas the feature extractor part remains frozen to back-propagation.
55
+
56
+ Closing the Domain Gap We notice that synthetic images from NeRF are imperfect due to rendering artifacts or lack of adaption of the dynamic content of the scene, which leads to a domain gap between render and real images. This domain gap poses difficulties to our feature extractor (Fig. 3), which we expect to produce features far away if two views are from different poses and to produce similar features between a rendered view and a real image from the same pose.
57
+
58
+ Intuitively, we could simply enforce the feature extractor to produce similar features for a rendered image $\hat{I}$ and a real image I via a distance function $d(\cdot)$ during training. However, this approach leads to model collapse [6], which motivates us to explore the original triplet loss:
59
+
60
+ <span id="page-5-0"></span>
61
+ $$\mathcal{L}_{triplet}^{ori} = \max \left\{ d(M_{real}^P, M_{syn}^P) - d(M_{real}^P, M_{syn}^{\bar{P}}) + \text{margin}, 0 \right\}, \tag{2}$$
62
+
63
+ where $M_{real}^{P}$ and $M_{syn}^{P}$ , the feature maps of a real image and a synthetic image at pose P, compose a positive pair, and $M_{syn}^{\bar{P}}$ is a feature map of a synthetic image rendered at an arbitrary pose $\bar{P}$ other than the pose P.
64
+
65
+ ![](_page_6_Figure_2.jpeg)
66
+
67
+ <span id="page-6-1"></span>Fig. 3. A visual comparison of features before and after closing the domain gap. Ideally, a robust feature extractor shall produce indistinguishable features between real and rendered images from the same pose. Column 2/Column 3 are features trained without/with using our proposed $\mathcal{L}_{triplet}$ loss, where our method can effectively produce similar features across two domains.
68
+
69
+ With a closer look at the task of feature-metric direct matching, we implement a customized in-triplet mining which explores the minimum distances among negative pairs:
70
+
71
+ $$\mathcal{L}_{triplet} = \max \left\{ d(M_{real}^P, M_{sym}^P) - q_{\ominus} + \text{margin}, 0 \right\}, \tag{3}$$
72
+
73
+ where the positive pair is as same as Eq. (2) and $q_{\ominus}$ is the minimum distance between four negative pairs:
74
+
75
+ $$q_{\ominus} = \min \left\{ d(M_{real}^{P}, M_{real}^{\bar{P}}), d(M_{real}^{P}, M_{syn}^{\bar{P}}), d(M_{syn}^{P}, M_{real}^{\bar{P}}), d(M_{syn}^{P}, M_{syn}^{\bar{P}}) \right\}, \tag{4}$$
76
+
77
+ which essentially takes the hardest negative pair among all matching pairs between synthetic images and real images that are in different camera poses. The margin value is set to 1.0 in our implementation. Since finding the minimum of negative pairs is non-differentiable, we implement the in-triplet mining as a prior step before $\mathcal{L}_{triplet}$ is computed.
78
+
79
+ Overall, to train the pose estimator and to obtain domain invariant and transformation sensitive property, we adapt a siamese-style training scheme as illustrated in Fig. 2a. Given an input image I and its ground truth pose P, a synthetic image $\hat{I}$ can be rendered via the NVS module $\mathcal{H}$ (assumed pre-trained) using the ground truth pose P. We then present both the real image I and the synthetic image $\hat{I}$ to the pose estimator and the feature extractor, resulting in pose estimations $\hat{P}_{real}$ and $\hat{P}_{syn}$ and feature maps $M_{real}$ and $M_{syn}$ for the real image I and synthetic image $\hat{I}$ , respectively. The training then is supervised via a combined loss function
80
+
81
+ <span id="page-6-0"></span>
82
+ $$\mathcal{L}_{DFNet} = \mathcal{L}_{triplet} + \mathcal{L}_{RVS} + \frac{1}{2} (\|P - \hat{P}_{real}\|_2 + \|P - \hat{P}_{syn}\|_2), \tag{5}$$
83
+
84
+ where $\|\cdot\|$ denotes a $L_2$ loss and $\mathcal{L}_{RVS}$ is a supervision signal from our RVS training strategy, which we explain in Section 3.3.
85
+
86
+ ![](_page_7_Picture_2.jpeg)
87
+
88
+ <span id="page-7-1"></span>![](_page_7_Picture_3.jpeg)
89
+
90
+ ![](_page_7_Picture_4.jpeg)
91
+
92
+ Fig. 4. Typically NeRF only renders views that reflect the appearance of its training sequences, as shown by NeRF-W's synthetic view (b). However, in relocalization tasks, the query set may have different appearances or exposures to the train set. The proposed histogram-assisted NeRF (c) can render a more accurate appearance to the unseen query set (a) in both quantitative (PSNR) and visual comparisons. We refer to the supplementary for more examples.
93
+
94
+ The DFNet pipeline relies on an NVS module that renders a synthetic image from which we extract a feature map and compare it with a real image. Theoretically, while the NVS module in our pipeline can be in any form as long as it provides high-quality novel view renderings, in practice, we found that due to the presence of auto exposure during image capturing, it is necessary to have a renderer that can render images in a compensated exposure condition. Although employing direct matching in feature space could mediate the exposure issue to some extent, we find decoupling the exposure issue from the domain adaption issue leads to better pose estimation results.
95
+
96
+ One off-the-shelf option is a recent work NeRF-W [\[18\]](#page-14-14), which offers the ability to control rendered appearance via an appearance embedding that is based on frame indices. However, in the context of direct matching, since we aim to compare a real image with its synthetic version, we desire a more fine-grained exposure control to render an image that matches the exposure condition of the real image, as illustrated in Fig. [4.](#page-7-1)
97
+
98
+ To this end, we propose a novel view renderer histogram-assisted NeRF (Fig. [2b](#page-4-0)) which renders an image ˆI = H(P, y<sup>I</sup> ) that matches the exposure level of a query real image I via a histogram embedding y<sup>I</sup> of the query image I at an arbitrary camera pose P. Specifically, our NeRF contains 3 components:
99
+
100
+ - 1. A base network H<sup>b</sup> that provides a density estimation σ<sup>b</sup> and a hidden state z for a coarse estimation: [σb, z] = Hb(γ(x)).
101
+ - 2. A static network H<sup>s</sup> to model density σ<sup>s</sup> and radiance c<sup>s</sup> for static structure and appearance: [σs, cs] = Hs(z, γ(d), y<sup>I</sup> ).
102
+ - 3. A transient network H<sup>t</sup> to model density σt, radiance c<sup>t</sup> and an uncertainty estimation β for dynamic objects: [σt, ct, β] = Ht(z, y<sup>I</sup> ).
103
+
104
+ As for the input, x is a 3D point and d is a view angle that observes the 3D point, with both of them encoded by a positional encoding [\[10,](#page-14-16)[35](#page-15-13)[,20\]](#page-14-17) operator γ(·) before injecting to each network.
105
+
106
+ During training, the coarse density estimation from the base network H<sup>b</sup> provides a distribution where the other two networks could sample more 3D points near non-empty space accordingly. Both the static and the transient network are conditioned on a histogram-based embedding y<sup>I</sup> ∈ R <sup>C</sup><sup>y</sup> , which is mapped from a N<sup>b</sup> bins histogram. The histogram is computed on the luma channel Y of a target image in YUV space. We found this approach works well in a direct matching context, not only in feature-metric space but also in photometric space.
107
+
108
+ We adopt a similar network structure and volumetric rendering method as in NeRF-W[\[18\]](#page-14-14), to which we refer readers for more details.
109
+
110
+ During the training of DFNet, we can generate training data by synthesis more views from randomly perturbed training poses. We refer this process as Random View Synthesis (RVS), and we use this data generation strategy to help the DFNet to better generalize to unseen views.
111
+
112
+ Specifically, given a training pose P, a perturbed pose P ′ can be generated around the training pose with a random translation noise of ψ meters and random rotation noise of ϕ degrees. A synthetic image I ′ = H(P ′ , yInn ) is then rendered via histogram-assisted NeRF H, with yInn being the histogram embedding of the training image with the nearest training pose. The synthetic pose-image pair (P ′ , I′ ) is used as a training sample for the pose estimator to provide an additional supervision signal LRV S = ∥P ′ − Pˆ′∥2, where Pˆ′ = F(I ′ ) is the pose estimation of the rendered image.
113
+
114
+ A key advantage of our method is efficiency in comparison with prior training sample generation methods. For example, LENS [\[22\]](#page-15-8) generates high-resolution synthetic data with a maximum of 40s/image and requires complicated parameter settings in finding candidate poses within scene volumes. In contrast, our RVS is a lightweight strategy that seamlessly fits our DFNet training at a much cheaper cost (12.2 fps) and with fewer constraints in pose generation while being able to reach similar performance. We refer to Section [4.5](#page-13-0) for more discussion.
2205.00274/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-01-15T18:13:46.822Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36" etag="7FGezdM1HX65QhznzEK_" version="14.6.0" type="google"><diagram id="qUIMWSlBZzvnUDNTMcoW" name="Page-2">7V1tk5s4Ev41rtr74CnxZvDH8YyTqa1sNsmkLpdPV7IRNhuMfKDJePLrTwIJgxAGPIDtib1bGZCEhNWP+ml1N3hk3G127yO4Xf+FXRSMdODuRsb9SNc1w5zQP6zkJS2xTD0tWEW+mxaBfcGj/wvxK0Xpk++imJelRQTjgPjbYuEShyFakkIZjCL8XGzm4cAtFGzhChVugxU8LmGASs2++S5Zp6WOlWv9gPzVWoysAV6zgaIx7yJeQxc/58Yy5iPjLsKYpEeb3R0K2OSJeRl/e/iqjaOXP7W/gvif8dzQXx7Hae/v2lySfYUIheTorqfu1rkN/x38+jX3ycfxYrd7+Da2nbTvnzB44hPGvyx5ETMY4afQRawXMDJmz2ufoMctXLLaZ4oZWrYmm4CeafQwgAsUzODyxyq57A4HOKJVIQ5p+5mHQ/IObvyAgekBBT8R8ZeQV3DsaGwUzw8C6VpMx/QJu9C06CkM/FVITwLk0TmZxSTCPzIJsztpOGl8cn+iiKBdTt58Et8jvEEkeqFNeK3BFwRfD6bAy/MeXbQwLVvnkDXhZZADepX1vBcaPeByayFDS1fIcBKwWVnQgxU7mIdrGC6pEHXwBUEXRaIFHTFrdEDy2iCSN+m5C+N1NiQ7+QQJQVGYlOhAk8Ax0g0XIsdbijv4hGOf+Ji1X1LJ029qzJhw6WjBB6kBwdsckkrNb3nFAhOCNxnIckNPlg5aeAVw6qBP7BmgAD7dUIBvqgBf1rBz9JnTevTdsSodvEchiiDBFw2+3kEmQUkCu+ehyXKpwqJrTxegT/DpR2JPm5g9YU+zS6hBLmV/foojssYrHMJgvi+dFels3+YDZoJKJP8PIuSF4wI+EVzEGtr55D/s8huLn33nnbHj+13+5EWchPT75i5ip99Ff+xkf1lyJq6TAYpC95YZRQwwAV7++Lr2w7T4nR9kNxgKO0wvMaPeGiAERitEDknBShuyqT+IowgFkPg/i4aZChT80k/YpzeY4W9qFfBnTOxiDzF+ipaIXyQhK7uLVyi6smU0INi0PNQy4NWBLQ+1HPIqwNaR7RZTxBCBUn5xUsYhCrrCsayKuSnYTBFHqYKq1sPtVkkKvkOrxGi4nIzOl9PrdKxRye9M8IUFMfnfExYV4zgR3S2jTWe721cKwv8awTD2cLQpGKNpn7UmQYOdiKw6ZSJ1lkhNpAvHMq0+idSUiNRSEKmlINLebDhtopCxyf637tg2mJq49uxhZN+PrPlnXnN5xMvEkqfeG2DrNRoxOfuEIp/OM7Pa+uPkw7q3pabb+K6bTHmdsssadq3srKa2g3FWyk6YuG+X48/AoOwQPc5ZoUfcd+dUOQ+X2D0NTWbOlWrfx0A0aWh6iSY1bVCanF4g69m2I7Ee3TWfFevliEvmqVpik4iwd15zLpTXxOIra6Z4C0OlZqKrlIz5PDPVJGRTVk+VtuKdsBUzvZWOVqG32GQlEi8ugKayjhDVo3CR9MYQu2WegGQmrdnIuldg+Gj4HbbWyqA7qE5KGjELhvEvM8rHm1SaEtwAY6oXtKWA9LE+F9EEe16MevGnnNMeQtTUO+9AtmX4nq9to0nr3Sxnb6B1bnepPX7UyLgx7Gn2cZyiRSB7AFO13JsHUFMFS7uw7+7Ryey7E8YTNOfcDDxd5evKcRulBEpuH1Jyo2WMImnB+/lHVqT2iyRtlHxWGQpqwWhMwcVcuiUhVmiU3uQpG+y2WZanMjjZmzxVGzKZc9T6tVVsUdLXB1VxUXEXl3L72DeyNBMA1fKd6QAkNft44hFOzVoNn5OtymUpyl5JBFlSjnCZOhJkKmI/tR0ZmtRRzxSiqzytbwqRAFgAeSpEAqDdz+7eCiJl8jL0IxFp1XTUMyKdk7o8h4ihd5QbMlhYc7887PbLo9Z217nNXOvCsKddG/mv43K7J+P7EXtkA3cnMb4t9p9KV06STx08cxeknx6tO8OWKNguRy01fVBrvTrzrOhmYnb553auqKFzXvu0y2WmErvmnOAMlVXe10oWg13lVhf/KArONBjxnVR05mkT7q6mQr9+PaNpyp+wUc/ENhD33Uij/P1f/zfWKcb56RStNOPXDchJ8yq71ip6U61yXpmPZrW7vzJomuw4PC5NtuegVXDDdARvsM4ELFW0CKvSo89/5zzOZxpdrQT2acOuZpXSbB12HYMboAuT6GLirmI5XrOUOtoXS48lOIp98aBRLPPq0XvjhNo0P1L4zM6EUI2+8iPvn2AwviWETi9jjFMoIAs5rqlSQI6+MBJPXm8KaCL5d0SMKa+AVA8k96aAjAZBrngNt+xw+RQFL7OIrmWG5DopFJ8J9QJ/+zC6iH2XpclRHkOx71K5T7XeouNGB+ksumo5ZoYrz494x574BdRgTbbegFb4Yfo3s2yV5uzB1VvOq1AnP5RfJNA8/0KFxqLW6IiJeoOdrBucMuhUqqE3zJlmJeboAJDZ4GsYxZzVBLCeiDd28jhrgEyTIlO9q6rZwi1TGSYpr6vFH3Qm6FcF4s+/0vFL+7x1TrqiXnE3vCIdlNWEONrAIFf3zMXAKk2efwUCxKLu45hFBsNV+Uo5VzeDtqjzKWZD3isQ95LUEPEYnuiV76XAM47c4ojZhYsM8GNptnTLSicqf8DnzPXjbQD5fPlh4IuRvABDIg3fajN813oz/Ga0R5+kZchZN2Z5azOs9lAZFi3T2BM/RwN4xRRTbT3FV1Q1cUFL75EwQNleHRRVVvW2pAVJ7HliTxX8XTBltsicfyxyzya77BYUNcVOJEop1pVYpVAtE4tezy2iSQW9FKplhilUqkhGNFDzjKg9QDXMP8vnWmdvoRHHVm7mVZyTzYpEO4eVQkXu75f57f2x7NN2y1m1cnuzHoWrMHtQXVdsWYbcWU5U5qPs67ro9MnfJKHX1CWvKZAg0zR9stSROB8ofXJyTTF/o4i0Jh0h0tGGRaQqUbIHRDYyIuugWgRnEbjtcdgA3FnIofAuEHBj17044VUY7+554WEWQ8mnLN7y13Yx6CXntN5oMVB0wpdcMx5trr5heVPBb7jqvuT2um1IazG9g25XZoOXrV42V/wmD3/oUhYVC/ActzzkjoR/dCCuEDncF8EVRaheJHBM6d3BmnljHYecaW1PfUNHv1joXG3ghvDUpzdW/vH7Zi/crMVqTbd9A1f1kPdlALc92zaA+lsErmncmNPcpxvcHu61b9hWR055tN6Dy6KT+qu/QdRmBh/RM/33C94kLsk2QVRwbBC1eUfFVAEPR88wchMExnG9Y3WoXIG67Nj6qEzTV4u/Ppkg9wJxq0fvsAbkH0KwFCktmso/3Fsox67NMOtpkXSzQmqXB4uGbCMq4BVMM92AF+HNKEmuGWUZNa8KVVxX1PmsKM0+/YqqDrlfV1TxxT/XBXX2C0qZdTnwglI9tZ4+l0gav0LqgtNKpJ9x6kv0ppRwq/q1kkGzTJxL2nZKryA55CS+zP3iVEKHc6QbzpKCfaWO+n57j8rgZUqEDpu81ORtKxQwoEKRf7VB8btvKoXSX/6+M1A4qWONYtVplF7ivheppkraZSphqXFOgqzv5I6OVlP0dP8LmGnz/e+IGvP/Aw==</diagram></mxfile>
2205.00274/main_diagram/main_diagram.pdf ADDED
Binary file (71.9 kB). View file
 
2205.00274/paper_text/intro_method.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multiple-choice question answering (MCQA) aims at selecting the correct answer from a set of options given a question. This long-standing challenge in natural language processing (NLP) requires machines to have a wealth of knowledge, such as commonsense knowledge [\(Talmor et al.,](#page-10-0) [2019;](#page-10-0) [Mi](#page-10-1)[haylov et al.,](#page-10-1) [2018\)](#page-10-1) and scientific knowledge [\(Clark](#page-8-0) [et al.,](#page-8-0) [2018;](#page-8-0) [Khot et al.,](#page-9-0) [2020;](#page-9-0) [Huang et al.,](#page-8-1) [2019;](#page-8-1) [Li et al.,](#page-9-1) [2021\)](#page-9-1), and have reasoning skills such as multi-hop reasoning [\(Khot et al.,](#page-9-2) [2019\)](#page-9-2) and logical reasoning [\(Yu et al.,](#page-10-2) [2020;](#page-10-2) [Liu et al.,](#page-9-3) [2020b;](#page-9-3) [Li](#page-9-4) [et al.,](#page-9-4) [2022\)](#page-9-4).
4
+
5
+ MCQA has made great progress with the development of pre-trained language models (PLMs). Basically there are two types of PLMs that are suitable for different tasks. BERT [\(Devlin et al.,](#page-8-2) [2019\)](#page-8-2)
6
+
7
+ and its variants such as RoBERTa [\(Liu et al.,](#page-9-5) [2019\)](#page-9-5) and ALBERT [\(Lan et al.,](#page-9-6) [2020\)](#page-9-6) are encoder-only models, being more suitable for natural language understanding (NLU) tasks including MCQA and other classification and regression tasks. T5 [\(Raffel](#page-10-3) [et al.,](#page-10-3) [2020\)](#page-10-3) and BART [\(Lewis et al.,](#page-9-7) [2020\)](#page-9-7) are encoder-decoder models, being more suitable for natural language generation (NLG) tasks. However, encoder-decoder models can also be applied to MCQA [\(Khashabi et al.,](#page-9-8) [2020;](#page-9-8) [Zhou et al.,](#page-11-0) [2021\)](#page-11-0). This is enabled by the text-to-text framework, which transforms data in different tasks into a unified text-to-text format so that knowledge spanning many and various tasks can be learned, aggregated, and used by a single model.
8
+
9
+ Research Question To fit MCQA, existing implementations of the text-to-text framework take all the options as input and are trained to generate one of the options, i.e., to copy some tokens from the input. However, this is inconsistent with how encoder-decoder models are pre-trained so that their underlying knowledge may not be sufficiently exploited. Indeed, [Liu et al.](#page-9-9) [\(2021\)](#page-9-9) have found that in classification and regression tasks, the decoder layer is often under-utilized. One research question is *how to apply pre-trained encoder-decoder models in a more natural way to MCQA*, in particular, to exploit their NLG capabilities.
10
+
11
+ Our Contribution Our idea is inspired by human behavior. When reading a question, humans are sometimes triggered to associate the question with their background knowledge to form some *clues* even before reading the options. For simple questions, a clue may be exactly the correct answer,
12
+
13
+ <span id="page-1-0"></span>![](_page_1_Figure_0.jpeg)
14
+
15
+ Figure 1: An example MCQA task and a generated clue. Bold underline indicates the correct answer.
16
+
17
+ while for complex questions, clues may play an auxiliary role to help humans connect the question with the correct answer. For example, for the question shown in Figure [1,](#page-1-0) the clue "paper" forms an intermediate concept between "notebook" in the question and "tree" in the correct answer.
18
+
19
+ With this idea, we propose to employ a pretrained encoder-decoder model to *generate* a clue from the question by exploiting its underlying knowledge, without seeing and being strictly confined to the options as in the text-to-text framework. The clue representation is then leveraged by an encoder-based model to read the options and make prediction. We refer to this generation-enhanced MCQA model as GenMC. It significantly outperforms comparable models, in particular, text-to-text models, on five MCQA datasets.
20
+
21
+ Outline We discuss related work in Section [2,](#page-1-1) introduce GenMC in Section [3,](#page-2-0) describe the experimental setup in Section [4,](#page-4-0) report the results in Section [5,](#page-4-1) and conclude in Section [6.](#page-8-3)
22
+
23
+ Code Our code is available on GitHub[1](#page-1-2) under the Apache Licence 2.0.
2205.14393/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-05-02T15:09:01.890Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" etag="j7Alr_nGyfcW7fiw3n3F" version="17.5.0" type="device"><diagram id="A-yXJqKsbNeCnKhkKmT9" name="Page-1">7V1tc6M4Ev41rrr9EBcSIMTHOMnu3t7M7Ozk6m7vvmxhGzvcEOPFZJLsrz8JkI1eiMEWICZOqhJbgAA9j7pb3S1pYt88vvyUBtuHj8kyjCfQWr5M7NsJhABaPvlHS16LEgRwUbBOo2VRZB0K7qO/wvJKVvoULcNdWVYUZUkSZ9GWL1wkm024yLiyIE2TZ/60VRIvuYJtsA65x6AF94sgDqXT/h0ts4eiFEPvUP5zGK0f2J0BKl/4MWAnl1XsHoJl8ly5l303sW/SJMmKT48vN2FMG49vlx9rju4fLA03WZMLZn8m2Pn2r8X8Nkvh7MPdb7/+9sdVWcu3IH4qX3gCUUzqm60SUi1psGBRHEB/PtEnnf0zeiR4QOtT+Ez+fkkeg83hIH3N7DXmr6A1Xe1yZK/JCQBtX6pXoHX5P7/tnBV8jNJgswzIBb9mWcIOk9ebHy6xIvok9AGs66ddlgZxlH8JFlka7nbTfaWpeBtST/GCfPEy+nb+KwzdckV7WaomK9qLdsfgiXA2JO1iJauixZKUHpyRTvOaN+pyOp0eaS5SnLcYX7rb5iDUvM4qeIzi1+KFyKHgcZtfatsO+Z+VTbTJmygtm4g/h16X/23VOnXIZuFLdkV4s94UtSxIb6LNUldP8XJFGd9A1SOnna1uzpqTvwuq3j+ENT17lwU5HwP6579hnEuC+y0RafmzWBH9k9HLyUNmqzh6OU5XpRjgWhxy7wYpOUj5Q/YYkwJAAc3S5Gt4k8RJSkqW4Sp4KhoyimNWukk25PpZSarbOFzRM76FaRYRvXJdFj9GyyW9z+z5IcrC+20BwTPRoqQsTZ42y5AKbsr1ZbB7yL+A/FAWZFFCq7jyrf1TV/UAE+rkjuFLpajUCz+FyWOY5b2cHcXu1C0uKtU0cMrvzwelBzEqyh4qCg+XajMo9ex6X/lBFZEPpTZqoZlshWYS4Ak3y2uq4g9tzrVUBTZa/jnISM/e5CXQsvdgMrUOuXYHJad/LMXVrapLFKeURgt8E4xwyeyMGigqDe1acjuzsjSMCfzfeOtE1fjlHT4nUd7FS6Qd4HA4e/YUunwlu+QpXYTldVV7on1VpA+vw0yqKufD/tVPpwgzwt7iCN+TVH2tQpN2gANfEghER3l31+jOEkQCKV+59LesoVJe/Ojpx66Hp8DnUPGdqafoygqGwc56MhwTTKsQLRaa4PCNhMMZERxLN8RLRw8cZOhpIhz+iOCYYfrbuxADtj04To5qqGwqTqvVCnYoxYzAo4GBaAweyyDEK014KMSYEXi4I8ID31w7s1tZjq3yn+7kmAH6xhmT0ayz36jkmAl4jEn/d2yOmYCH20DPm+V4yCGsBcQQxwOyLAlt74B2W+cDgo2q69gB4aqGUpxn9bijFL/lKK11pc+Dxdd1TpqrRdE9aWXpev436JI2IJiRN7KEzz8cdQSf+LgfCemoS5LQ83jEoN5Pbvx7hts03JF3zR2wtE/msZPCC83uEO1jLZV33hdeka4GrTvSXNnraU0lnS3IpmM+61JgveGwLkMg+lzWOv2oLfQ9AFPLr/x4nLhQm8xUAzG5UpWCTmfqBnUkQr7k8jrZXN1vw0W0ihbkzEM/vSYqiX3+FGbPSfr1QrCWBLN9a2pVfxBPMIyVDLP9fXGVYcjtimEqg/88hrUOGBZU+vtm+5Tt47mb+W5bkWsNYvRqIjYwhxnwUEXCSkBNFzEsPjJisyoqNHAUQgZ4XVFANcboxU4RlLa1V9OVTz9MuIi+7gSB4plobZskfQxi/vBz2f70uFM8VX4wDqnRfkVebBFt1srr89SBiMikTXm9RZuoejBLg81uRa5i12/C/QlE5C756quXn2r3lJcvo902DspGjDZxVLnxKk6CrPpADTSHusu2NeYuZBgjGQSbt51x2p95zzFUl13Ov+O5+gewyMkRW0VSURp0Eibjbxtzaom5UKqeFuTvg/ic+duVZkJjiqro9H75FpzaHBzYl+Ho0/GF4HuFAhoHxahiWzqhcIyDwsRwCVrgcL5SQKExXCILKADRwFjg94qFJKGGx8LEsFUvWEgianAs/DGpiz7yIH3LFjHyh8ZoTFl3vWAEzcNoTCksvWDkGIcRsAYeMboqkAr//0QV2VfEQdZpsIxCdeIRF9fQAKGLpCwLLAclsAJCYKNpVzEJYKnCXgKKlyyL1lkWCqEqTtBpmmGh6PtiVR1nVwCrwdjrQpITSOKIyLJElvYkOVpV5yRRDQr52BabVM68xuCEQJYYDnPfirnTE957Gkfu7NagwJBnCXMTXctVBdZVOowJDP0KjK1PYBjt4IV2emjnYTNpd3TBhkFo9/VCO00jLiDOxDaDdmPyXsycG2emaQSMJDHguMPPtQFgTJ4KnXh4lpl4NBjOfp94QDPxGFPU7gbe3N1pSj30fBmP4efUADCmyJ1OPDAwEw8To3d18korHraReEDVaNJUPG5n9HfScXRItION0CtQNfx61zg5ZuIELzjxOCEzcRrdKl+j8PBjLOaoOOj0hb7kTDBVbV37+WGDIW+FKos42O2iBU8QvstzwH94WkT5GoM3yWaX5A4jccrY27HdappRvalpOnM85tdWSIm2vPFk2xf6fU/R3idntVji9ucw/hZSJyI35+3ENS8rMy2b+UJP10uNOVxh5txBruVI7AYyu1fB0gt0rWHj8TPkHCJiFMrIsxXKyKnn9JnKqF24+SJh9EiYCvTnSxjbw/1LGIVz4w5OMJ74QCZQHEfbXV08osKkYLct1jNfRS+UUDyZDiKqQR+vyU4S+3hpXeno3r47xYAHxkFKc9OfsgXwuE5+KNbfz+12Rueln2vp51UCaOjnUJYaXfdze0xOfq0rNUlBMCJShh882iY6+fvAQw6CmYGHiU7+XvCQgmBm4NHAyX/Rc2f7WoD/RkShrabDULCObat3PeeoghGXEfOwI2YP2yLNLKUDt98xc5NVkS8ypgMZY53ulxNljO3175VzoMyb9z1mxpY7tcUxs2fKmLnJatuXfq6/n1cpcHY/h7LM6Lyfm5g42sc0cc8XGt8Cw48IjFyjvQ80sOiMMgINE/1JvaBhm4jGJfrTx9xDMjauz0NpnZngDh9fdo7OP7yMlnsfLfsASwMmAyLMTfZ6uMiYDmTMGTFmUcYMEWF2FR659z1a9h1LHi0bE2FuuYfIpZ9r6ufnxJilfj5AhNlVeMUGHxEMu0iUgXE197LcmjQJwECUTPQ8DYoSMhElEz1Sg27hjMSRiw3soUEy0VHVywbO2EAw3mvWE7IMBGNMKU+9bN4siy9m1A4H0pgmO2vduVkWX4ODgcY001nrts2y+BoejDFNZ+5lz2ZJfDmD65hx7cuhc8NmSXwZAMaYxv7dWl8GgNFgiH+ZPd7ar+pKawjZbIeB1ts0yxN3xKq69qmigT0MbdYLr4mX9LpeuIumtlfZNZdHT51zjB3lrmGdriCO2q0g3iDGEr5E2e/lEfr5P7R86pbfbl8qp92+ll/qnA8S4kvPn9cgTsBKX39ntdMvlTvTr4db59/YvWvRLnpnk05RdL63zmTT502RTR4Q18cEp+ePIKk2R1Fb5xJKe+bRKUw+lYU6ekAvTGbTWgxhMgIi99AZOf3I9aZYiGAiIsqB44iivDdaN/CNXWh9Pq2xUbT2sERrdM5UFZnWHhiU1t7ADqyx7T/DxG6JHmTujOpe6Gw1GG7YiFyJNvqsR+/4OuqnLYr+kU/GPOxqX7+R8ne+MjrSRSR0lEjAUu1dDLrkkcofpHlx/c9pkiXZ6zas3337Qpo6rxV6Y3J2VSMqUgRt2N3o1dMdUj+nsTtWKRpwxBhP/fKR6A+f+elhCU98SNLlPBKuBmdk9mvwyz8+/fXlC/7g/P7pl/mn1cv6ink5q8Ytscjuy69Jmj0k62QTxHeHUsGVeDjnQ5Jsy8L/hVn2WgIVPGUJjzqziK2pg6pG8ds+C6Gf0YOfw5TQJO/y5/oW2M4UVdNV2WRskfajluu5syqFeZD7BL4jJiMZmQSvldO29IRd8/vA0vSpXdUIn3c+e48DbYsn1mvv4hHZuwb4T5EngOrDKZIDJfZhfWtO5WBpXVV9OgerbN4ekRwsdkXMQldYwVahMvqNXWE4JjA0RnUVYPiDgzGmqO7KX3n5dJ1uwNjv1TEcGqPK3O4YDXtwNEaVod0xGu7gaIwqFbtjNIZX4qPKLO0YDX+fIDQYHv7AY5VWeIT5T2d4wMH1uN/AqupzFqxizYEGs2D7DDrWMuGo14UNMY/HC9ko0JB4ISDjeIG56MRgIbFcrSnmqrIt4YG6jg76hu1S8l1z3m/MeWgW54Etcn6/NFdr0pP+Io0ZhAVMOie97MuYRXG0CYNUjoOS/8EjVbOb+Y7+u8m7wCoKU6mjlGGsBjq6ugtzA50tRbk0qGDfk8duQNLAqm2bWf5XBxrYsPVmvmdp5DfO2GFWqinSyAKSNPJPVsGWJI1wv9IIWlpJf+JW8G0iFocgHgI2x3Hf74TkmgN+DXlPVJVRvHd9PjMAOkIVTUnvWEJcjrkF+mI8C6QOE3vuJRuzVQ+EcsbL2z3wdPLvSX1c7FtNB16NeX2WcbB/8jYpcNSou2J9MU9eojFr0o8KEWIJqU0Q8KlN8lKIKiylXKhZsPg6p6Fc6qsQ7cnbZPHlTi7+mCzDuP2SiKCVvQnlGWKqhQ3ZlG45dxjNkasp1cmBYoRzH0quyNODk057WoyaZcodXlsmWirXvZzQZRqdiXszT+Ll7vWR/Jt4s8eJdztx76I/QHl8VGmWcTAPY8r2dV6uyldoaYt4esiFHHEenw+nitAUUjkY5d1QNNJL5W/pgV7wQi+t9BInDQCIVWmaKn7Z3cmuo1vHdkMu8vm3PyL67UIynSRzp9CvyyEFPlDpy64Ip0yIREOY8Y2zrrtO0xQ3jNyvDdd2POayZO79Oivd5Ht6NlI+cG3+pnD+sXxPVxhXtj3f7iM/FDbZNXn3EGzpx8VTGr/OUiIcwgaOXcEJE0fbn8vP3ciYDrNMdYgv7BPxxROOAX4kxAtAVxJLwyyqFgpyS1QiVY4pse7HqRx18MAnPOBo4GDlogxsW2rdwX4lEdpNYb+sxNJMJWIMBW+6jcD0VOe8bzWpTZ+zUkmUo7tldCQx4PuVGFhahMfB7uASQxWl6YMIX98vEcR1uk1QHMw06YkHIeVBREbZozYjjB1jY8eeVqZpCmNsrNygQ+kzhFrmQakZ16/RyjNurGrIXMa5SDCHAVTSjCXjcAMjv0OawQFpNlYlZy7NHDBFft2yeIBtwzSU5xAMPGVqfJN0RYuYrS7S2xzdN6fEd+5ME5y+9djp6Du2EDq0VZ52pePqBLOTfE0TKjAPo1bSGA9FPoB9938=</diagram></mxfile>
2205.14393/main_diagram/main_diagram.pdf ADDED
Binary file (45.5 kB). View file
 
2205.14393/paper_text/intro_method.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Relation extraction (RE) is one important task of information extraction, aiming to detect the relations among entities in plain texts. Recently, many scholars have paid more attention to document-level RE [@sahu-etal-2019-inter; @yao-etal-2019-docred] which aims to identify the relations of all entity pairs in a document, since it is more in demand than sentence-level RE in various real scenarios. In general, one document contains multiple entities and an entity may have multiple mentions across different sentences. Furthermore, one entity may be involved by multiple valid relations and different relations are expressed by different mentions of the same entity. As a result, document-level RE is more challenging than sentence-level RE.
4
+
5
+ <figure id="fig:tsne" data-latex-placement="t">
6
+ <img src="tsne_final_big.png" style="width:2.4in" />
7
+ <figcaption>A <em>t</em>-SNE visualization example from DocRED. Points of the same color and marker are different mentions’ embeddings of an entity, which are encoded by BERT <span class="citation" data-cites="devlin-etal-2019-bert"></span>.</figcaption>
8
+ </figure>
9
+
10
+ A key step of existing document-level RE methods is to aggregate the information of different mentions of an entity (mention-level features) to obtain the entity's representation (entity-level feature) at first, since relation classification is generally achieved on entity level. To this end, previous RE models simply apply average pooling [@ye-etal-2020-coreferential; @xu2021entity], max pooling [@li-etal-2021-mrn], or logsumexp pooling [@zhou2021document; @zhang2021document]. Finally, a fixed representation is obtained for the given entity, which is then fed into the classifier for relation classification.
11
+
12
+ However, different mentions of an entity in a document may hold distinct semantics. A simple pooling operation of generating a fixed entity representation may confound the semantics of different mentions, and thus degrades the performance of relation classification when the entity is involved by multiple valid relations. We call such situation as *multi-mention* problem in this paper. In Fig. [1](#fig:tsne){reference-type="ref" reference="fig:tsne"}, we display the *t*-SNE [@van2008visualizing] visualization of a toy example's mention embedding space to validate this problem. As the figure shows, different mentions' embeddings of an entity (marked by the same color) in a document are scattered over the whole embedding space, indicating that different mentions of an entity are not semantically adjacent. We further illustrate it by the toy example in Fig. [2](#fig:casestudy){reference-type="ref" reference="fig:casestudy"}, the first mention *Samuel Herbert Cohen* of the person entity is more important for the classifier to identify the relation *country of citizenship* between him and *Australian*. But for extracting the relation *place of birth*, the second mention *He* should be considered more. It implies that different mentions should play different roles when extracting the different relations involving the same entity. In other words, different mentions function differently in different relation recognitions.
13
+
14
+ <figure id="fig:casestudy" data-latex-placement="t">
15
+ <img src="casestudy.png" style="width:3in" />
16
+ <figcaption>A toy example of multi-mention problem from the DocRED dataset. Based on coreference resolution, mentions belonging to the same entity are in the same color, and the relations are marked above the arrows.</figcaption>
17
+ </figure>
18
+
19
+ Inspired by this intuition, we propose a novel **R**elation-**S**pecific **M**ention **A**ttention **N**etwork (**RSMAN**) to improve the model performance of document-level RE. In RSMAN, each relation's essential semantics is first encoded into a prototype representation. Then, the relevance weight (attention) between the prototype of a specific candidate relation and each mention's representation of the given entity is calculated. Based on these attentions, we get an attentive (weighted) sum of all mentions' representations as the entity's synthetic representation. In this manner, RSMAN enables the model to attend to the information of multiple mentions from different representation space when representing an entity, indicating that the entity's representation is flexible and relation-specific with respect to different candidate relations.
20
+
21
+ Our contributions in this paper can be summarized as follows:
22
+
23
+ . To the best of our knowledge, this is the first to consider different mentions' significance with respect to candidate relations on representing an entity to achieve document-level RE.
24
+
25
+ . We propose a novel RSMAN which can be used as a plug-in of a backbone RE model, to learn a relation-specific representation for a given entity which enhances the model's performance further.
26
+
27
+ . Our empirical results show that RSMAN can significantly promote some backbone models to achieve state-of-the-art (SOTA) RE performance, especially when an entity have multiple mentions in the document.
28
+
29
+ The rest of this paper is organized as follows. In Section 2, we briefly introduce some works related to our work. Then we introduce the proposed method in Section 3 and the experiment results in Section 4. At last, we conclude our work in Section 5.
30
+
31
+ # Method
32
+
33
+ At first, we formalize the task of document-level RE addressed in this paper as follows.
34
+
35
+ Suppose a document $\mathcal{D}$ mentions $P$ entities, denoted as $\mathcal{E}=\{{e_i}\}^{P}_{i=1}$, and the $i$-th entity $e_i$ has $Q_i$ mentions in $\mathcal{D}$, denoted as $\{{m^i_j}\}^{{Q_i}}_{j=1}$, the task of document-level RE is to extract a set of relational triples $\{(e_s,r,e_o)|e_s,e_o \in \mathcal{E},r \in \mathcal{R}\}$ where $\mathcal{R}$ is a pre-defined relation set.
36
+
37
+ Suppose for each mention of $e_i$, its representation $\boldsymbol{m}^i_j$ is obtained by a model-specific method. Most of existing backbone models apply a certain pooling operation for all $\boldsymbol{m}^i_j$s to obtain $e_i$'s representation $\boldsymbol{e}_i$, such as the following average pooling, $$\begin{equation}
38
+ \label{en_co}
39
+ %\vspace{-0.1cm}
40
+ \boldsymbol{e}_i=\frac{1}{Q_i}\sum_{j=1}^{Q_i}\boldsymbol{m}_j^i.
41
+ \end{equation}$$ As we claimed in Section [1](#intro){reference-type="ref" reference="intro"}, $\boldsymbol{e}_i$ is a fixed representation which ignores that different mentions of $e_i$ play distinct roles when identifying the different relations involving $e_i$.
42
+
43
+ Finally, given the subject entity's $\boldsymbol{e}_{s}$ and the object entity's representation $\boldsymbol{e}_{o}$, a bilinear classifier is often used to calculate the probability of relation $r$ involving these two entities as follows $$\begin{equation}
44
+ \label{bilinear}
45
+ %\vspace{-0.1cm}
46
+ P(r|{e}_{s},{e}_{o}) = \sigma(\boldsymbol{e}_{s}^\top \boldsymbol{W}_r \boldsymbol{e}_{o}+b_r)
47
+ \end{equation}$$ where $\boldsymbol{W}_r \in \mathbb{R}^{d \times d}$ and $b_r \in \mathbb{R}$ are trainable model parameters specific to $r$, and $\sigma$ is Sigmoid activation.
48
+
49
+ Our proposed RSMAN incorporates attentive mention-level features to generate flexible entity representations with respect to different candidate relations, and thus enhances the backbone model's performance. RSMAN's framework is shown in Fig. [3](#overview){reference-type="ref" reference="overview"}, which acts as a plug-in of the backbone model.
50
+
51
+ For each candidate relation $r$, its prototype representation $\boldsymbol{p}_r$ is first obtained through random initialization and is trainable during the training process. Then, we leverage $\boldsymbol{p}_r$ to calculate the semantic relevance between $r$ and each mention $m^i_j$ as follows, $$\begin{equation}
52
+ %\vspace{-0.1cm}
53
+ {s}^{r}_{ij} = g(\boldsymbol{p}_r,\boldsymbol{m}^i_j)
54
+ \end{equation}$$ where $g$ is a certain function to compute the similarity between two embeddings, which can be a simple dot-product or multi-layer perceptron (MLP) fed with the concatenation of two embeddings. Then, we feed all $s^{r}_{ij}$s of $e_i$ into a softmax function to get final attention weight $$\begin{equation}
55
+ {\alpha}^{r}_{ij} = \frac{\exp(s^{r}_{ij})}{{\sum^{Q_i}_{k=1}}\exp({s^{r}_{ik}})}.
56
+ \end{equation}$$
57
+
58
+ Since there is a necessity to consider all the mention information of the entity, we use a weighted sum of all mention representations to obtain the relation-specific entity representation instead of using only one specific mention representation. We get $e_i$'s representation specific to $r$ as $$\begin{equation}
59
+ \boldsymbol{e}_i^r = \sum_{j=1}^{Q_i}{\alpha}^{r}_{ij}\boldsymbol{m}^i_j.
60
+ \end{equation}$$ Different to the fixed representation computed by Eq. [\[en_co\]](#en_co){reference-type="ref" reference="en_co"}, such $\boldsymbol{e}_i^r$ is a flexible embedding adaptive to different candidate relation $r$.
61
+
62
+ At last, we use this relation-specific entity representation to achieve relation classification by modifying Eq. [\[bilinear\]](#bilinear){reference-type="ref" reference="bilinear"} as $$\begin{equation}
63
+ P(r|{e}_{s},{e}_{o}) = \sigma({\boldsymbol{e}_{s}^{r}}^\top \boldsymbol{W}_r \boldsymbol{e}_{o}^{r}+b_r).
64
+ \end{equation}$$
2207.00046/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-05-17T16:17:56.076Z" agent="5.0 (Windows)" etag="-5tUTogp-P6et_uBLyLb" version="18.0.6" type="google"><diagram id="lZ5QqtgE2Tnz5kcziqN0" name="Page-1">7Z1db5swFIZ/DZeTbEw6uC1r2mnqpikXq3aHghtYSRxRt0n26wfBEFJn0poc7yBx0l6Eg82HH7/EeU/AnoiX29syWWf3KpWF57N064lPnu/zwPe9+p+luyYSRiawKPPUFDoEZvlvaYLMRF/yVD4fFdRKFTpfHwfnarWSc30US8pSbY6LParieK/rZGH2yA6B2TwppFXsR57qzJzFpFf6TuaLrN0zZ2bNMmkLm8BzlqRq0wuJG0/EpVK6ebfcxrKoG69tl6be9C9ruwMr5Ur/S4Uv6cOvb8X3dFvuFp93D5v7n1/jD/yq2cxrUryYM56Zw9W7tg02Wa7lbJ3M6+VNxdkT15leFtUSr94mz+um5R/zrax2dt2dKasXnqSeZ+2CLtVT15B15ce8KGJVqHK/K/EpuOHTSRVX1e5yXXeasK64lmW+lFqW9WHkq4Wp3WyvV3+6f9XbVSs9M6fA2+Wmd/kTs9yrx2Me7OvZrWoa+lWWWm57IdPKt1JVB1buqiLt2pa46fJ+2yU2hw4UmFDW6zttLDFddtFt+UC1emPAvgfyR4IMDvlqaJBDguxcyQE25MiCPArGLgSLzbLtWyRYh4INsSFzC/IoGLsQLDpLnwTrWrDCx4YsLMjTMUKuawnhRsn4kANSsnMlX2FDntBHL5Rg0VmSTeVcsAG2g+HbNtXdGCFPp6x6uVEyPmSyqdwrGd3asG0qUjK0krEhC/Kv3CsZ2/MQ5F+BCRabZUiCBRdsl/YbShowtAVLkC+FHAwNsu1Ek0kJrWTs8VVoO9GjYOxCsOgsbcOZWJ6pS/RhlO0r05dbaMGiQ7YN51EwdiBY9GxfaPvKxPI8XeKztO1jYnmmLrGTeiG5xM4/SNEhR7bpRJCBlYye1IvIJYYSLD5L21silmfqEtt2iMhCAtMlOkuykMB0ie0uRGQhOR/54kOm3yzu64Fm4sJjyOiZuIhurQWHLIZ2/3Rkm050uYZWMvr4im6tBRMsNsvuIRwE83JhYo+jOCN3CUyZ+DDJXoJSJnoyjjPbX6KBEbBkB0CZnCcwyWKn4ziju2LBlIkP07aY6HfAwJJFT8hxRr9fgpLsAGCSl+TcS0LP1XFGZhKYZNFhto+iJZiXKxPdf+C2mURDJmjJ4lO2XSZKy11KeXBPvOW2/USUL6QcDO2GZs7JfgKTLP5YiuwnMGXiwyT7CX4sNbSn3vITsweMArILyeLDpPvnoJSJn5g7MRsAXX+BJYtP+cQ8AUQZWsvoqTyaKABOsvgwbfuJkjzAksVP5Z2YKmAUkB1IdgAwbZeJJAstWXTHguYEgJMsPkx6RhOYMtGNiRMP/78dBc0311/G4hjq+mtJFp2yQL7+LsokzeWhwVdqJd+wNyELKNv/2cD+98yxnSHRJ9YF34GsWjxMPrxf15vCWdz8AQ==</diagram></mxfile>
2207.00046/main_diagram/main_diagram.pdf ADDED
Binary file (17.7 kB). View file