Eric03 commited on
Commit
fb7897e
·
verified ·
1 Parent(s): 8a9c3a5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2001.09215/main_diagram/main_diagram.drawio +1 -0
  2. 2001.09215/paper_text/intro_method.md +22 -0
  3. 2011.08067/main_diagram/main_diagram.drawio +1 -0
  4. 2011.08067/main_diagram/main_diagram.pdf +0 -0
  5. 2011.08067/paper_text/intro_method.md +45 -0
  6. 2102.06857/main_diagram/main_diagram.drawio +0 -0
  7. 2102.06857/paper_text/intro_method.md +126 -0
  8. 2102.12871/main_diagram/main_diagram.drawio +0 -0
  9. 2102.12871/main_diagram/main_diagram.pdf +0 -0
  10. 2102.12871/paper_text/intro_method.md +43 -0
  11. 2105.04030/main_diagram/main_diagram.drawio +1 -0
  12. 2105.04030/main_diagram/main_diagram.pdf +0 -0
  13. 2105.04030/paper_text/intro_method.md +132 -0
  14. 2106.14087/main_diagram/main_diagram.drawio +1 -0
  15. 2106.14087/main_diagram/main_diagram.pdf +0 -0
  16. 2106.14087/paper_text/intro_method.md +21 -0
  17. 2201.13100/main_diagram/main_diagram.drawio +1 -0
  18. 2201.13100/main_diagram/main_diagram.pdf +0 -0
  19. 2201.13100/paper_text/intro_method.md +139 -0
  20. 2203.11131/main_diagram/main_diagram.drawio +1 -0
  21. 2203.11131/main_diagram/main_diagram.pdf +0 -0
  22. 2203.11131/paper_text/intro_method.md +21 -0
  23. 2203.15205/main_diagram/main_diagram.drawio +1 -0
  24. 2203.15205/main_diagram/main_diagram.pdf +0 -0
  25. 2203.15205/paper_text/intro_method.md +103 -0
  26. 2205.05071/main_diagram/main_diagram.drawio +0 -0
  27. 2205.05071/paper_text/intro_method.md +53 -0
  28. 2205.11867/main_diagram/main_diagram.drawio +1 -0
  29. 2205.11867/main_diagram/main_diagram.pdf +0 -0
  30. 2205.11867/paper_text/intro_method.md +223 -0
  31. 2207.08012/main_diagram/main_diagram.drawio +1 -0
  32. 2207.08012/main_diagram/main_diagram.pdf +0 -0
  33. 2207.08012/paper_text/intro_method.md +306 -0
  34. 2210.02984/main_diagram/main_diagram.drawio +1 -0
  35. 2210.02984/main_diagram/main_diagram.pdf +0 -0
  36. 2210.02984/paper_text/intro_method.md +108 -0
  37. 2210.15230/main_diagram/main_diagram.drawio +1 -0
  38. 2210.15230/main_diagram/main_diagram.pdf +0 -0
  39. 2210.15230/paper_text/intro_method.md +38 -0
  40. 2305.07185/main_diagram/main_diagram.drawio +751 -0
  41. 2305.07185/main_diagram/main_diagram.pdf +0 -0
  42. 2305.07185/paper_text/intro_method.md +296 -0
  43. 2305.10051/main_diagram/main_diagram.drawio +1 -0
  44. 2305.10051/main_diagram/main_diagram.pdf +0 -0
  45. 2305.10051/paper_text/intro_method.md +283 -0
  46. 2305.10786/main_diagram/main_diagram.drawio +1 -0
  47. 2305.10786/main_diagram/main_diagram.pdf +0 -0
  48. 2305.10786/paper_text/intro_method.md +39 -0
  49. 2306.04403/main_diagram/main_diagram.drawio +1 -0
  50. 2306.04403/main_diagram/main_diagram.pdf +0 -0
2001.09215/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-11-04T11:53:53.181Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36" etag="8sa_o8aQq8Owa5PEGPQz" version="12.1.9" type="device" pages="1"><diagram id="xP_OuO_GIXHsNdlOnj9K" name="Page-1">7L3Ztqs4tib8NHFZNeibS9FjAwZ3YN/RG4PB9OCnL0l47YjIyPNn5qk8dWqMv3ZmrL0sg5Bm8805pU/s32j5tehd+H7YTZJWv1FEsvxGK79RFMkIAvwLtaxbC0fSW0PeFcn3ot8bTsUn/TYS39axSNL+TxcOTVMNxfvPjXFT12k8/Kkt7Lpm/vNlWVP9+anvME//0nCKw+qvrX6RDI+tVaD439uNtMgfP08mOXH75hX+XPydSf8Ik2b+QxOt/kbLXdMM22+vRU4rJLwfuWz3af/Bt78G1qX18M/c4JoieFWPErRUwMTMk43J+X+Q3276Yf2ZcZpAAXw/Nt3waPKmDiv191apa8Y6SVG3BPz0+zVW07xhIwkbn+kwrF9thuPQwKbH8Kq+327PRA/6D+dC/pIQNK20eaVDt8JLurQKh2L6833hV8f5r+t+3eo2BeyRIn7skf8q42uNNP03XfTN2MXp964/CvNvOqKYf9DREHZ5OvylI/jLH+bzexPW1b+gN2p72hRW41cWf9EjtLc3+rV4YROX8N+gf29egjQX/nzIigWpU5rSbiig7VthlFZu0xdD0dTw+6gZhub1hwtAVeToiwEpXPo+QUnCIfyNBttHSnvX+W+UXFylw3Em9nreAPjHOV0e6iWHv6H/gAlkcIN/K0TqnDnUIhmOfLp6pgxyMwOPskCNoJpPWvWB33LWDEBaCJIBn0Dzq4duqYjj9UFcKPGVGMkjfl1AQie09arGkHaet0CqrJe43tlpdCU4CmlqTpejdDWeIZ/QbE2SZBpLC7iovf8ah7zpfOWe+cO1JVvj8t4/Hr4jLrO15u1eu0jHI1CtW5MXB3l/DPTHRboFqmSu5Yf60LuhrPrFFt+SvetrG4ronquyD3UDrV56MJm/doXymbL0PqZCRk+z9IBfpDH7sWmR5l2PakmqdM07NZCUD7866TlwVx1Oty+tJZaACqUGf8CfHvoBf+boBxaomgMJyWsHoGDhnxO+En93QpKC/5UAfyeZAH+HfkU/eGlrgpfI3naJmoPvg7Znet9n5uDAXqNuGu245LlW5+JAt/SlglcXQAfGLLRHXyEOUAchV3qquo0P94O69PD4fg3qO3Y0DTS+XPp9UOp3Grdf09jGaH6/Un+fyr/SlqrAH4IPKx5OOq/fY0G38eMkVY6BnPuJTPHkhCSuepvEfgb07fE/3fZL4uavyUl/Ei/8/JXWHxSw21zlb6WFPv9o+3+rrQC7p0rRWhxBaCoa0SDd6DJUoe9L38fqr2G+BsNwSsVhCPmTRU3k2EVOVBq8EiEz7aqh3bOZfRqMa0R97bBFRt8++gCio9byEe97OrUcWYbWOq0MtykqZgSvcvN56OTno+8Z6C9idL0ZX3WvevbhTNmFFzHuevK+krF2d3oQW+oWiSwjZaQSOvAKlfmxkjWzp6xpyIFMrLuW59tddhTAq56X6vQJGaYZ//DN/SrA7GV3nKK5YSLr+JW8uU75nX1X46xe3EUllt2mTVtm/LM6z4zpZMTTEy/M/ojFewbwCVpLj/3hpPj80rz7QUVqg7bw3ENBFIbxtpwuKi5LY0nQY5CsGI5hcpErntm0oHABB8lFea9uti+xdK55k/nR54asxchkqzzexu3u7iO9R+ByHGxe2EeZW7rbXQqrJGQ6QjH48UgLt+75ug20o/3cKjcFvM+KEQCZQ2JfuvBEo6ZJ5DM4CB1KVGMoRV5n+4sLhNj10nI1pJCM7B3nSteUIstQGxEex89/6MH/Tb6UW406u/LwpAcirpb1GOws4k31JzF0K5Z/vwXhinwMXi+VUvTZ0zfk/wdyZ4VpyoUpiSYvQZGCFQzsPq6QsR1hmJNyPBRvB5ST/WTKNjl+YKvXw1ZoD7OdW6n1LBeaDCYGj5HYQ5dSJ4bQj7XJhvAx2AAULweOKWev20q2zwrFORXIjnxTIr2rBQnNo1UO4C77PNuHUzZraGaNy0gwAm1SV2BsOqDHT4HobBHB8AKIyCdvwxLPtbVZFmQplgTgweHsHy7YHUWAw+wRCHMA0vwP16rMhpqq8oGPcX5FmV9Y9MWUf6nN+x2LfuHQdumu3S9MxoePLqioUW6dlxxrbnKJfWG39fWoCOEBogCmSFrIUZ1oD+EUMp2zdwujmMYh3afqjLRveBf7mu0eN3glqQgqTMXcDeUlRZNEJ308FGY50w0eQAHG9j0G2hJRhq48PoC4GKqrTWmBjcuUOS7dgjjp2TdQPMejt3lucjXuxMGVZEa9iJyHbS8/fPwu5WfCvAL43NP3udR+vr/S8SGb9kXcfy8dGT0Njxynqo/lPDdfzOOvRFXCSxWzbf0P1D40QKV06CO/XuwAdjqefp5/LpVZtQPmXZT3Wd2irLwLySErZq+XNDct4hk9a59Hz9BV6tyAumZ2hLRY3+e17sHKpeiidceMJ1RbuSpxKsDpaAQeOrGXziAYumLJxCW371xbMCzz5j9iDw0FWqGt++QQ9i8EA9Mr9awDxRO5eest2MCxQsLynfUMoCxG4SkGX58+C1Usz1CmvU+eb3AIXvo+7NJXf0TeJ9l/zlbgXKvO+3Mm8M9kK78ypJ84qv4n2izw8eavPSv5mRzWUITozaxPONL7QbSCzVPAMK59SHxmlr9X/mJrxSnYmTERLx4eGSHPkmAml7I6nDPP36zfJs7ATe7lMg/fRK3pcpnhWcH8lB2lfmfpqAKgD4nxOcK7kGSg1cJyWku0zY0k6L9XIMSCtPwMddd0ULtdycS7bT5yuJ/Bvl5um2yVYwb0xRQf3s/nAEBcM71NnqHFgIMmbDjrQMw4QzvAElE8eON8/sqbMGYp3scbDu8VFrSA0mNB+cm+4CCNmpf/hfztL6i+yeDXpV9t47bceOqPaWg5Mb2E3Oh/WmbgKBinymo5LwF9rXUXXojv86TiyfFDN3nD1A5B9yrND4qeHMm8mfNPPrQMVA7zH5KMpxcphUSAXV+TFON6oQ0IC9cszucvhl0lGmU+wlPWpaIbPe+Le/uiK0N1VChgNlYeSFsMlBudQcE1i3sUfH3Dmo8ebY5n4Q5xeZvqA2cAWjeJo6xddoKcgH6DShUhUM5NoWt4j/KwT7UfGRxWX+QTCCNmvpyJ9w+yUif6Xafh4wedNplJ5RX2jyIFhZADotnpZ9T2Y5zFdn2s8IZhPf9E3dKhXmXA1fA2Qs8vs26lpPnVniLYs8KgrEf4aVkNL7qMSGQvhZIazhyl6M0R/Gx//evoU4kUcSsPr1kb+6a3MiNCRCGs/kfhTh9UxMmntRnM7n1fPKGGOv5JTeBrvY7zmceWDxFChrMtXqjROqARPkyPfrZWUUOY7EYbhgSOuNdFuUkK1ILhaXmmnCub8P5t+ci/qe13c/9DmuP9/vmn/Ptva/s7HonS/WbNldsYBaeJtqzYnYTPzQ0GagmFG31/sXeolujEgn6pu82Zzj1xLqf6MWiwFruRSbC+aCYlVfjtWWb37s4X9+LDBup6A1W+DEFd3z+HVm7gBex+y5GdFrggj9RmPLNMDWxTn5398vEH+B3xbmBhorGErjXSMhQ26yaVAhFgnztGOo1tMTBCeIHBFl5MBytELw/iXosS2X7IZqXlDh3y1BPM4xyPh56rhELI0U+y1IHe9LyteQdYBhy4hc9hH6MDb3cOOQ9iAQI7YeYyEd37eYWORRAQ+VGu1utjzzKn6oHuAFI97mAO9oRQeudfM51AWcY7FbS6DaJDg2R20x9glcEQXyaQH4ELh2938st/c+sqQkCwTxbMSxPwUj6McRkeOQU8h3kB1zQ8O7l9mEwQUUUCTgrTAWUXQa+UPv5rJkJga/EuB6vPu/OJj+8OkOXxUgLWCNyTWzufaTbMHZI3uEtHhBijyz76FUbD0ET3DTSLAr5t6xBS6JMJhLby1yaZbAo2uF4K79tF6D76vuQzzEo21e9Qbb2bYQ/XmJv60QNHxasSRivtlzy8hkD/QGR9vlWgeyvhGqSWw0eoilYwCkyX5Xskr+IKNQbUI5F1hiBdgC6LSOtTD1PZo3KJYSR859KNrnUfjQDGIs1gpxFK0Y6yYTBW+3pVYLIP6sUELyhVpHhyj4YmSKqjH+6w21IuQTQKwpIgWEFPIygBpt7q3E1sPWjmNy6wuCd+8WBPuzzOJpio3GxYGlC7WQjArpRvGv2eRUpXTBl00zNhTwJYEEA2NHtBrXlQ9MDOA1A23DQcobSA2Z7bKPFmBd4uJToxTaNDbbLaAoQnPGLA0BbBC4KvP6HM6vYAO7kAueemUWmCGRTc9HiupABc40E/76FLv87QZ5/NDgp378kcRQsMyUBLchSKtVTZ1GzQHUphjLdnWduzLuhZCpwv39AI6lHQzBRobyvIHWaEZbw2HywKyRom2jM4Pk8BLzcwiTtLF3jtS55Mz86bA7xAHFOoB5cvcFeT70cQRJ7t1TNhZAE9d0YBcMxVxdzLV/IsruJeAKkLpN44MMlhCb35DjwEAJwrDBlwZ03rAnFAZgP7eeF+5J5Xlucb96KwIp2xMgMMvuGPQqVHyhHC7Z1o0Lg9I+O1yHZQPk3fbeGYQ8U3gKxPKal/oLC67pGfUFkOMgU8Jz65yKQbZ94EJF30PXS1DAcjlkOkoJrz6p8y74jtL6Ah3Gm3EttvlrWKFcxKjJJiVFxXyCtMk/MB7qMykJMtwlkA6onmT1t1aD2z4wZ38P/RnUB4kPfz1j/phucz8g5NcmFpdwcvqWKRuJGc80vePJGtcJkJy0D4S0qWHdvuNzs8PwC2/DcK6kY/zzCsasNpYQasTac7POkYmjGQxymSLOgPwj51UXyl2qrNeuz2xumCLc6gh4qYaZTkODurtzywSrHCEfXEY1lfae62Q+vCwC2e23zQlBJWkOB8mmp+gTOEweFgC8bmr44BUnmTumzwx/w90YFZQpQmoLCQlVp6y7cDhfzdg3nXGxQwO91/WtemH+6MnVioOgGhn0FG+/HyNDLC8lQv03IoqVZGTp0NeFAOOBpx9cRYsMt711GR3hjN3O+7hXah7SjoYhS3Jqo+ScnSQBw0uQBrDvmF2EyiiBZzQaHcXspywT15SBfM7obRYDSQx8PaXqJ88UEl4c/cCzz37uDGG9LJVrxuM0fwMiAIvtC8JeHZH7bZJ1aAFGoxqI+cGZ/Z7ivDcbI//UzcoTadDx9AdFSi5N637MkdHR0jM0R08AZGF02fSclhrxfXMLgTjGj7G4TSRXxjw382788ryEjbmM09FYpZjxBrr3nQk1+9raKruQNameOUG8ZAT6hSrAd9bqZgvGOfB8NnQxGYDd6UkSxElof4d4lID+in9Tbyhp9j6yNJymUdqge04oSHV3fDM9ZjzZND0Ra48C7AtEN/1EIJ3c6uB+EjEAuL0euSFqgaukNECtuLwJ42VF6QVNz3qx55K4NWMSjB/TzDm4czQrPcx76QVR+XTcQLSJRrPHBt2yL/BLfnW/yiUh1Q+4YWpELPZ+pqQwG24HknIXrxWFLIV6/IlkpYzTnUwK7zirxBRA+DceTaMyIRZbjXR3M7HfyMt9XZoZ7vk2PAoSI8KjAeIYnaNvwRh7AGlqm3GqNFASmWQ7TsJFxQJSDuBt3KxJdsKsdTXe+dDWMmFdu2khERjDCWh8Dg1OMRPIYuY9MER7IzMRM3ZI1EmQCNGFiOILbRNvhaw4WeokldmxMos+8SiCxKEB+S6PooUQbAndoDMkDOc/ID78fLDY5VRjCYx7/m2ycwETpK3j621pqDNaKTNFxc7LFV7DZr43bkwj5mrCuKdQ0Y62DyI70QfsUIKb0BHKH5SPt+muvbDOyrMJz55x0K7Eh0SGBAEXuC7WxUTlvem7rGWDtNdqeTLV/hsvZcuRx01f1U0YefiEvgiNsPryyeEJqr153ojqanrJItj0zM0DS1aZfWn1DYpgpjRXfwL2i03ueLFmSyuoepCXLzGZoFrmiOJkgYzn8+5s8dSdVQsUzSVyLcohv22BQnsSrGbAWmLpqbuSi8S8bxSR4uaP0V9n3joSUfN0uOMKbs80wM30h90F+uqLLP1VnqLV40zhuuTw8HaR110YDDA+opfQMo8VaL9/ha/XPYLbQoE5K6cjZaa2FKGNxHGF3F+02AgH949BghrHf0WZk70mza3KAJ4gRZuot3c5vFeHQe32ddgFU8L8930a84crs4+kcSszxeHxwXEwoZ357A/YTilJJUhKU7Ni+YhWtykoPdSk9Vm/bDln0Qm/fZH7ftv95HUJdNv+KRNsQHwuE9F8kmkuFcXoEGfW9mbnjMXYmfteI9Nv6AfVrehfago9qAgMLcEc86jm39iQbyPqOkKN/xfv/pcT7AX9p0n+NY1qanzMb4qRy7Tc8F1LO/jpXJ33CchI+pN+vXepEQZQGYPomgr7zhEc9RRA/YIvfcvTVHPGIPHGLn7Y3CjHEMHDF+Kk9OY+T88T6I0kV+AefpCdkNLS2j3GeX2Ozhkh1ymfPROgLaYAG6yb0F7FO+wtc7aQuyYJGONHFEwPNWlZudmKyoKThvU2DNhPBGRYsN6FqaUBjZLDD21euMvYOrawMlE7ORm49wJ7jrc2ngrNtq5iE+bbNuAZZzgRQs0Q1SBTCfzWc8TXj00qM/CFyXzXCMenq+Zy9DZtCN1ftQo7kTeO5yBGuZV0jFWXYjsH2JcaXA7D7di67Gn3fY5wn/iMC2VeXb4UTtEOgbKIaRioB8oY0ykXt42AbTNYoEcw7AZJykLfqfamiXsbHFI9qYjg5AErUvEzEzMI+p4yAlhcn/kZ5yHOwXxEDNhGjtMvQ7H4QHtnCFRlh1fMUi/JYyt2hzRDbwXABIvSU4MYmz+fT5etdQsvtECQFokb8wtyQ4TrOiiupNKg8jvT3RSZO2xHNPYhRLZXeqj0u6xfPE3sF4TNjKTT9z6IYJ5trScYIRgbX0jpi40EA5JdiFjgOvXG2lka8JAzZkcCHi9Q9aRJnzbClH6JMpaRqFAISuePp1pww4U5HG8BZVNJuDl9EBMi1lmqqxp7rQH0kRb0cjm2MSD0UdKSonQvUkTiGz3aPDfchjtIsqFCIE8HIjNeJia7K3Wm+CMTlQkUl+0Popo+KsR+rKD6F5El8lhrFljXc6O9vITTqoWHui3tpSwozZ4o6z7qEM6b5f0PrxT5w4XF9IcTwJZxsR+SV1XjjfeYO70iElCvcP/PlqAa5FLFSAaOiZ7JNM9g9yKbFvtkcoO5qA9tVIV+5m4YIWjvsAs+4OewfLf24LntsRRpw0ghChO9rYFR8cya/0sbNRsgQzDvvAAxRD0RihvmHklmCMhQGD5u/BIcN+5e/o3syVz+vdjPyyQ74gsZ/oI/RzBE72dP58ErS3SEYQoHazTF3P/bcC0UjHndbPkqM8d8GGzcw/ftl42C+bzyFDi32X36NsA9BqbmwdSSrH1ixlvsuxMBa1DI8KDWtSkq26Y2EslnwYOxydRWBG7bH0LK0MBDTHM8yOzo+E7grsFZM/rSimlUORJDeUwBzeV61GEAycxz1EJkE+cH0s7yvRHW6ZUaN9LqK/JlGPok8enz7iGZgrYdSnEccu094eJcUqdM+Z+qlv6tLPuhug0P5wzXm7YMukyIbBI9bun88PPlwMdaZ93I8C1aLlg0GHz60q7dHiAQlb3WChiRkt+H6r+oiPenu+gnvCIVTUM7RCjDJynQ/d912sc9C8mSm645E/1uLDwpF/YvH1rXI82yNj1c/u0As1/p3uqCfCwXBXVvUqgOx9GQX1jZ91J+sVZRRv93q/Y0vfca/Ch5m9orfthQ5eBtoDa4bczyxPoSwW1dsvGckdnC33AattwKHFr2uN1gPA0Pqu08qfxQZ70UKjCS+4fgxXmg6AOotelZ2WhBm3mP203MQGVxkt1xFvlBnkUDpvrYyEHiayin06pvQWp+RKq0eI7AW69nH6zjXO23cS8gZa8RhJYcFEndn0iJtc1RBnurJEsjXu3/mian2X3+37x0UZH54dKoLNy+lDugAgDEWmW+HxgbtWtyPYFUgVVPAGL4HC+ZWuWi7MyZ82Kxa3L3Lfnk03Ho/wjhRoRWyNrIPtec9RH8PWZsur63c/CmDT82X9fFxYtvQiSja5blsPiC55Gr5raJ/Sm0v3/lbLkn2JrGun+2+aPuDYY/aD8ER5WIBy9Fto4YznRFEL2kfV4FhbgURjTXYNlEQJYNxAVSPpbXEjQZh65i1VgBiDIA0TUCThjjz59eIkdm5B68UXVJfSyQ1b21PU6uIF9k87ZV+oMkfx1eTL8nhnsF3eBr7YNJv5DvbuMPcTf4uEq+EPLGyToFEeyOCDwoFHVIelSlOYVaL4WBX8tNXcTjzwJfQPx1qftrBlV62h1acYLG1bOTS/1ZFWWtWwtpve8Sgom1XXvE7YMAZzhzmodQvrGBmNqZ7oCZfn8sq6OyIOhRVLaD/CflVQQBFIZYHWgkw6jDv9DJHsWR/mngnAQfigegVVcx/BOmizcsaen9Fm38M4fqgLo3vy8nLTlZvUBC/xzs493i9jDHNvSXeU9ViHCg1AcwqM79eVbidgeLcY+zKr1QHCLBQUzHujVpYHQKhwte9YLKDkHaqDS4negfudQL4i3+4JuvP4knm8bhrIjxu6xkhCwr3schNa1Sxtq2cVLaRyVwqAtwy+hzmbRMjP2mdYFFiTBOyEOQGA8F2jqIkk+kwXJ7eoUz+qSmOqY39/dgaQebQGKVfr+abIKlAG2XeHXR6p328U824NkrSXD9pUN5OgSr3Sy8+SHm0oalJFniI8c+sAMwje5ABL+uIOFCAz3+4Lr4zjzAcalijamVy27UEGxxzphN13JWP3TAgkQekXwexKX3gRd1lmiQUosSLYfMIk2poZOwGha6a7TLBIb+NQvXkJVyfoj1pp5/I0ei9YPf6QQxHTMF3+VXro9wbxb8iZxPfz/DuDl/sSJh9/IO/+tP09PumfCJz/MluT/e9g2WZNPXy/5P87SbcM8Z/jyv6lI/ofkW7/A/bub/8m0i39/0i3v/3nSbfr+EfSbSWSVYZIt0cY0K5NJ9/7oihzSQpUTSNOquQkYSW+2yB9r2E7+BzJuXuqGfb9/tHsrRw8TuqjA1r5rlH5y633j+6tpYAy1d3H+fDrLIGqGutlUg7J7XrfMXzN1hPHxEEmyGe8mPup62kS+ZfbVl77RHfS/Kmdsko8gYxj6uP04skuizPbmHQeUAfTkwAQzGEKPm1LfPiBmb5Ei0YHEqFEvL+aeHc0l009/5Kamj1q2sgCUCWIa4X3TeVHg3ajEVsH5Hn43WiVoc5g07YZ3MBWcyPV/dwKOzRlGYp0Y0Hs5Rw2yX/q8I+3orGY/28s/11jgZ0wiLIteQRQb6hTCXieAyTMjUStsMnDbNmLgwiGG3VJU2A6gltVSQLw1i+raxvDNpFJQFdJuCtpe8CXxIO72shhnqoq+AGwU9gKn4m72h6AN/Bl+yQcqusdkXijlN7FTzPkRsH/kZKu0dcHYui+QoKtlmj3ru5hNazHc2vqB/LZdy2ZHbrxE/ewXtnkts8rtLUywOSW6jT2FvrULKpZRI0cW4UQRaSrt/uyCBQWLYt0E6w3LSGnyin1D2spTRWirGHKC0oQyUhgrIRkri3VaPNXxnaBnPb9+tyPUzA0pANnv3G2ZGdisz1vJV1uzBcTCxbRb90cF59N9K5aYvZoKvFKTGBDcqhdfjCOUa9b0jNmeiQrTOdd9xXapXx9pKVLls/J/FpKAcrkwA8xJYuXK/ha28Xy5LYSKIY/bRt2Qs8efiwx3xlX4tw5ZU71MKr6Z5iuY8uBxXJ05WhybzqRQbsg/1qXCYRxeNe+dZOuqGzy2qxt3vkvyfgkqiFu1SdIw7HULxN5fI/NZPPN3v9FAHJyNrPeFYJGDxUVELf41KWu5Qk9AF22tJITsnE8GrxvPrgkxCpxhVLQ0EoetUkc06cdE+pBopmmdZtq9ptLaWev5DKdDlHiS9PILQLdQ4vEtQTQJCT3zeSge3wt7ncz/Edt6FACJo1J4HcPPn298F9rk37avt7DF+D7HEy9US8wqOCtCO1wFhFBWIR/v45dXxMCV4kEn8/fe2RNucmEYqOcdnQVerfzC04TZzI3f10hM2hxXTTfj/ub85AtgSJXTcPTWsCuftUULCpRseUdvREOc92oBMqaMIVFCxKiSUmaDIGmsCJyvyLWzNMK5k0qmnwDxLDjo+cu3+jG+Am6qc3q+oerckD0e55wzABN+ggUODLgn0hhozUePRia0doUj9Klt4x8Tt5DlAnARNYMwMhXwBRGm19MsC2ZzlbOu1VKq7BAA4oeY+qX5gj89gQILaYyaB2z8YKKRwqsJ3v+jqh6mRIQFBlaFOwbamNTi/djifieB8bSX20Q5G71r9/Bz/cq+GfaWvn1KMduX9NRVaVUp4tm9d45HnelQ3KoWgxGP7Y3H4q31V6uK3e/+u8oLcv18Xi+Q55b+sCyGbz7S8gYTiACOGcdbdVp+1OV8PXlEnt6FtH34PTMGWl7fIGKy20BE/r7+kk9srlH+4mOqPkCNttWVi7l54oWVfImFo+yF5ELYP0oJUIQdHpojoAb9Ub7+DVW+8mLaFVfspy2od7vUZ2/k5eeLFp6IgXq8wcMuuFoBDFIO2cxn+UNOVHC7fXH9uA0+5ExR+Q6hxfaudId1/fdtT481hnGBjsVd7Fr67KGdLPDZJAj4fNSMd/og7VnbneOZYsZGdVRaQ8zECWWoQfq+NSR9Q59+WXknuW17bKDH+2WIcriW1DmPzjIJ59TOKj6UF8VVEL70kbABgJZn53Bn5V24L3Q+B4xKeSIK9HC6vgATTUGzoBWZZmnF1pg3mIgFQneqPNX1+7p3Ll0dy5uOP5GbMFW8w48GVFrI9Zn283c2rZbfkC2vnbX4RgMaFlqXSVh4xir0f2Gd0ryk3Ib/cv9dT27Ca+4N5r56xGdf/ZA019pzPLvjLufg17ql4ps/n/Q6L9DsEMxHkfjXK5TsxyU1x0IywgxAH5v7txUP9p6znMUT4jUrqV397hijzdGxveazQlYorvPqjjucvwUTY6B0Xnv9NZ/P/fAmFaTkaedCSM5cItMDwEDMjkPFu8B8q8c0Gpx12KiLIw9QG6yx2d2vp87+Jl6A+Gco2HLe82TrpiYnMuAgQiEKPZ4RPLDB9opsTB7FSGYMmGBqsoCXnKKZ466124WkpLnqjKjbXTsvcaAqx09M+8rYRM5JB/8mVr5t4Tiv0cy/mfb8p36QVt+Ukt0I9nBfIm/W/tXy7BVAm0Puu1Np+nJ7nzlRmFOHZFUqXZ6zl/quX1Ea2OODyutHSG/lXvfh+/qRHfV3GyKtpWG6qrKD32R9Zjpo0nPICU++/wg7ucvpbxVEGp0KX2F2U40aYWyP5SIUBgc1lp6V973ac7xIAauqcXjKz3q+iW6j2Qwf2clN3V2EWX+w3gPSaRYLN8coXyUPWODn63v7Bv4VFsMEp64XfNgzi8/ROpX/fnQTD8ejYYQtNu3X8WMxEnfoaNGnGovTCa9Nzs+SnYYx3uLzz3e0AA/f1Wjt+jad/jUSM5sgnfSg1/jVxMiTTSSMW/dkiqnGatEdsvFM5MIyDrgmmuMoPti5PnmJ55PhqOYW7a5FmFzDLLV8M63H/1BZJqi5nF86EgR1eSjTet8Yb5Ot3d5X5eichU0ryWJYSBJRrcZd46/HO0HCXgykT7MiTu84iSVjzu04HCyrR22vZSCSrlTH4ruIcYx/FP8fInTtor27a0juOmXq6Ac1W8+/3NeTwa/jnD+F7f9nB79gae/e2z0n6Dx/1e1/RCT/+B5fzxf+ge0tPIyKehWPJFRV9C3uPaqangpHC/ON0tyjvEEHg/kr49UTHS8W/G0g+cpq6q+pOGPcZL1hmPdaYeWo0/BLo9NT9HPQO6pZ0jwzZtlkxtNsygWMye0r5bbuyp+vecXgDjp2R+Z4KKGDTMjCylYmekaD62nfMwFFLHhTdSuGlMCWZl3u+T2ZzxLDARLU/X2VfdML64HaLSv7kyTBH3KzxPQBtN5dpE9UfrtBsJi6eDTCJhJolwhC11eOcD4MZ8GYZfuGKCbF6A7zJLwjARLw8v5eAED3orGG1QJSoPKJYNVUP+Id7nU6LMhTRfmgMnHpxlmjp6T94fp091uByhZ6soKJky3SNmUTiJ3X1H+OtH6pwH586EAB9acZkiddxUi6JnsgYCp7E2yHWU5L7wIez195hbQcP60qD0IdE3eSKWCaEr3zElSmAMptgN0z5lBGh9ivDukD/DHHVAyM1D6Dm/d6Sq8C9bSFRJgyqt4e4vzzBymCkRlj0MkvgfM1XMITJxTUDKSoQ4llKcs+KxjKWM6LNCvE8/HmEqFRCOSPRi9fU5a9BFv8xjltw8Er+mXysPlgmRAzSO6hOdOvLPkmHR0rgNEOo1BomT0i99IrhvtVscHjE1Y14Kjc14ZF2XodgbcNKh4SozQrs+hOGMqCPjQVh2Q1gyO/IV4QYnb4KzcYhZZvlyEqDrH5UQAreMOzlyN5tbeNlJZRFqY1HIwhiREO7vSSS6a9Lxsfa9oO3R/xoYvm4jyjg4q3uAQG+dEQnVKqnPsJBWROJ6IdrHvWQlvozU0sTAFcE11djlJnDJOOArAnDofn56WwAuiBjoCrKX3HhMdS0zKBje/TqtfpOynQaLaBozyURKUUej2tHJssPzGjdylHJZ4cl6Y3FWnUYO3tPMPnYV4gkcCbXyLqMPcylXvCs6ofmXFG8l/JaBsEtAG4hepTi/hZ0y+knNhsbvvtj353ki37uMDOOUCRYbp5PvvyKn6pLSYhvRAHumJyCxP0MvrEvCSiLYeGzHCfQX0s/khDdeSKmXUx/zSxjZqaOjQlYBpXOqQhqi2AwK6XGY6/tNHTIz5tdnVw9Qh5k5Z2qNE1A7ONF4hJt2ah24a3piJGh46xc8YmdFVzVPFacwuTjdhAiu90bBlgffZYNuOvZwqdqPOAaU+ilGIN7wKQ8E0adnQcxkmGL2tA0mqMcM3AMQ+6oKNQoP2INkPOqSa+rnSGyRaiOHbL/2sMEGcXwHMX3xErbnYgo8p8IYy3D3UJ1RktnwqHu/9ObYU6yJLXQdaR8z0qTXx3Gyb1/gOkaDMpEhczcPk5pm8p5t8E1o47TZd5ejxY5gc0MdYaELxS0vvgQUrYvkFe4bON74to7CBMDzO0bOdtm1spXrSZ+SlqAA+cE2KiAl7Kd3n8pp5xvx0NiJYgnSrIhL8JIpgDF4nbAeHhwUKBR9HFV/u6xBiaoRL7wSIQKVoFRUh0GhT0zHS8ktG1uiOmHUexxx4M9MAY48oRGeUHhyuP0T0WPA9pUwnVD0y91XDc9KZeN3I85iLK5zRsQUUefiPA4VyJqRc9zgRLb+lYWJ/KZGfNPIQQYUe01o4Y3STg+d0XmEd7uKlb47Gno4Ok8gjWvQRgrp+dXg2NnwmibdJtfc0+sG6UU2lTAXKvZtY8d1tSwPgBoFlCqYYkYR4tB99EhAbgdoIA31gDxmwZ6Z/9olgbdZCFwKIO1pE/I1MfX1pjWiZ6iGWuWLekC+/hevytVwiu7AValsAJ41Mv1cCHswhOglDoqMrHYXgnH07iD4B9q37fqPFS2jrDMuuwnzdSC0Wl2GubuDAgvLJVi903oKrPUTMQdMnLtPUb+TVdKX8jFSVm9pzjPRDpOpB/KTRHKWBQQwTRlEdFFggOKI+Wprzp3iCmpKMK+l3rPaBntudKsDzLt5QN3oY37mN9CWlaWLaOJYEcXlEpC839V/Ccou+RJ7XZfjOuoWzbtd1s76KRNaHLcEKRmXIZlC0Ty7NWkRIOuS0sr436SvacEUmt3gvZIOvAyaI5M+3Aw7HzWCWlbthvo7/vIUrLzQgAqMRfA9uqMIu1z5IqT1a+ZrvmMpsPproszuiPpARIiqIgWny4KEc6fCAiYk9xDE7JHJj2uhYZxqOOoKxrw6oCdMB9sZLBvbjmfA1Oo0gb6Ts7lE77Hlbw4BoPLbWDyJB6Ve0iDxDtrnvwYU9Km9kMhI1+aakcT+V5EZEg3e7SWdl4gyTCbQew9BshfXdIroki+bOpnFy3zTA5k8r42d9tt/E7P7oWwV9hYlzC5ZBbnvEnS9PMLAsPHti+BKZ0wzl9BwLyxUX6MNntr0oyJqkL3nsCroXtk6WXuMZe6GypqpsmiK/9kTgSFvecArPlcszcnPgm/aLR5Ynr0FG85jmiqL4KhdiXD6QrVhpNZPLRoTdO91qfGKJULLPK4kjRNQ4lNAzrI5DIav8YvjRMvHhS+Fogi59gZVBslLw2jxCHfSYT1MHugIVVDdolRHRyO00jQeHTZaNxOqk8GmYqHJRkmXpDdl+wcTNtSlEuPu6IkkO+DST2yWDDbNLZdbXnjrrOaI0GuLetVZDhD3gs2Qd0qaqi17+MwroioLwEIAaUrtp+hhxLmESDJlEG1W5+/EBOMbPE6Ued0wZO8RmrmHc+IQCNWKrVE9EKM/oYl4NYbJyBp7wQ5Z7xT/ez5fIJuITPlqCfqsyRJhL3mkUPLME++/utPRwNEGuXM7RLBoZSDE1VxeOwHoha7iK6Gx7hcnmyOfeG+0u7fuhZHvMJA7Oul+hTPAmNwp34wRyI+GbaCSo/BQgDgk4B/HaDiWdmp3AouH0AiK7nUCxe9l0puu0Jsz3+Il7qxE1PU3nzol4HeeYCloRtGSkcRtqHJ2yDEuwMJhwI8owhz7QLc0eJhx8LI9PbzDPOhzSeLI1e56RphJHCwMkxRm0ytSy9RlstFRdvECbGac3zXhIfDjB2ud0eptR1LFG/7q2G2XWWiZrVRYCZOeDO24zuxTIM1p0Z8V/ihCTQUGHcqdufS4zsKB9Dt+cuoExQcxQssfrCf+d8f6Ntcwq4HBa3uKZ8asfDKpdfUaZtq3P5olHGGS/fo7AIAsVoX2yI0YDhy8tWOkYuVuxZU3iOOGy9SQjfE+B5iXikeRuypandG/vi3oIw48FJ8wbIYlk0fmpG0Vw9GHDMetRuetHvAD37GTmDQ0tV/P2Ee+/9rmyInkRvnnGSrNH4Q2ksII6fmEdGzbUMZ407Rwme8VxSYxfxA1Kzugm64xIZzxUpqRqipjio90ezDHn4QgxbDvElbYCWN2vo2gfiL3dD5Wxz94q7omu2P71pbASfPBGQ+VAgmJ/ogUViQ8N2BqsXAN7cziL5u4Bjn8w0xxWH6EZ/HhIrHzLqQOSduXtmRePAlNVu+IBqQRo0MuBUvYoa7vE5kYPb3VK+B45PCqmgsm2Ig3FiDJDAthPUkT+Epc8WyOthbq4VkjeZIoWT7J1O9p3IaZ5MrMaljEv+qEcRmkjFMOoA20Eaa3OJC7Z/M3ZzaimX2EIHF/nj4H9G52/sfpp6YxZIRdyvSs1oqLhWOj8rjmGRZXv5Ysth3N1AYfzscw5esVxV6KL7iyESwOy6JCKAYFrgDeUoOuGF73CSIRjARiOVwReg7eCg2hL4rJVQW84uwRmVWsioNVR9QpFnjDmN74v7wLTbqX38frBd7/A2KAl8dOcCt/Yn8k3KUZJlTC6jovsCB2P2+UCRK+yP99g2F0wVe+6JCRKR2I3d99Pa6W+WerLZhCq4eNrNLeWWxYfXfKD6kmspa7GKLQ4yjnMiKoYy95ibX/fzKT+IjJCf6m3pjhxWBcT0M8XP8uqGioYLWew/WF0fiLmgg85Kk1NZ62DM0NjON2ZEdecNszmUFB6b3kxh2m+tuY55NElGCOzBLwSMOBadDs2o1nTi8b5hqnUsdALZ7Cj37EU378RoDO0ei1xfnRLE17FVeuV7IQU+lLA1Y/6IEQI3NqJYeiT6mcsA1oEpnD+IR4zudLiGRho8jCfjb4HPInEctVZ5teSrvGp3yA3cx9R1Z+IKi2LEOFOOAKjh4ejVm+HCn/baOO//RyHVdolOLqYwfyRbOWFshwfY7CR3IYa3iVzzcpaqOpeAbZVKwnFdyk31plhj3CmaPQ1Q/ORjinJocoZo3jajhUf7yKSN9ryKG3k+wTMaq93raYQDfIcPj8BjiYQb8peCACsaQDJ1hij5NHP8HGu4UNU8Zan+cqOnV8gD6yPdI2x5PyXIt3nEcgJSke1+Vvlxif5s3h4NWpCx98sE+didFydrSwXYCb6MlJ++OqaorGm6aPGxQr9cvHRhM/9zsMavbECSvuFSCgjV3Q/hXryd/lhEHU8U/aIroUzpX0Bh1mYSzeI0IzmmUTPzt1WZo5Dm8ZWvptOwBeILVZeMdVYtAmUDd56jPBikRmIQfFGdNp6DOrTZ4uZzpBIH5QcPWTYhyf8WFora4gEvK07copg/9R81AVbSsIFcr3Fme5o98IVuIER7LxDut/smiTRe3Y2qySvXzKs7LADnOwO1jRopUOnMG523cXshQswqA8qMZUeHRmp/dcR6SZAdca6IQupv6C+RhBC9Ik+d1xPphd466wQn89ExjPOp1uMJOmdQbRohfgeRi8DHKH34f1mYDssEh+Z0JHYKNoKY7maDSoJwT3pTL9tB+rRiMwSKt8FyqmeWXLE1qR01oGMAYkMmWejPY4N4d2qEOFYfHv4FIFzxUd3nPE0r5/JBdp8/HTx9yBk1vM5knzGHyBw4ur2/NbqNAUSBHASpzEYd5WhqlH4BFbeGpnkbwesUKY/KNDKUIkna/Rnj9b/CIsvQ3XgWeDmxXjeJ9YzwEfH3u/HzYf55gEfph4RJAipMpzym5Prz/RGQZkwMHPYSPlXQ9ceSNLpVosjoNzPcLSAo3XbzXqUKKqwbO97jp4l5iutE7E715ce0Z+trzVqgp8k0ItxcDsSr9JPnBno6OMcHWHNZYL8YLGV7CdQ6z6iUIPhFZ2tUynMGcjQuxtkifYTmYS6nHRkR/UrkK5UBJSnhV7vdHSvNXcUOy+JXwAwbOq85Nlz0OkJKW75IZo14ePACs9uW1QafT5Xe2SHXJt9B+/xAHA73pP7R7kcctM+CrtdOcyqepO8/nxnS/smykBa7VyJq3SGgVEJLmxL43yBA/lizJ8RZmszjPIBhCiyM4Fn5Tdwiq/T7enBzPNCU6g2vnSWS+Z4x/0WgI+YJtVlBrJTZ/TuGbdmZSgeyIpe/5QorB24yh3fDq+cVaAIOpXz05MYz8oavvxoRuFE+ty9wIh3sXZ0/2uoyhT3fxlXmfkLyfUUvt4VOjXMwV/grOuof+PJcxUcjBR1f6LAcu2I3miM2cf/o8f0Y6gQgqTfy3bT93v4W47+VsIh7NPhL0xaKNLhz3zmfuiaMpWbqulgS93UiGCbFVX1N03hlycbQxWk3d8h0L6KJMEU6/lRDOnpHcbomXMXvv9Cu8Yc6u+g/oZTTdL/JrK68GcD4Km/GMAP7/mPBvDzWul/uwGwfzEAs86a7rVRwP9rjOA8p+nQ///XBqift4b/32IE5N97wfSmajT9f0bZ4n+ga+NiA+d/z47+o66B4xzO4Hw4nn76Q2vjeLzbBf83W9hmUeK/x6BItOz+p6MS3H+zQf0Thyf+4UmIP6jp7x+K+ItS/qzN3yg64SKO5f5GpbBdYWQRIOH/nOB4LTn6Fw3+Z/gZu/R/jj3u7t+hGZH5n+w/DPmk+H9SN3+N+f/wYMs/1FWFvpDCuMyxrf9B1Bn+8x/r8G8PyPz1DM3/wbMvwNIR02TJpSJ8kZiz96+efEGsY3zypYh5h2Rf2nJ9oaJn5ulp7MIgSNxQ2+neQ1WfstqYstmbttzkvTXK5m7/fAq1czmqx0u8e9q9XRw4vsib9QSr12a/b0fzYBZ5aV7KnayV+VFeE9F6d2E3TmmGdhWPmle15/OJ9JOUEkUXrc4oAquMLV6JqIJzkKZjD+uj4//737/zf9oxvT07WOOq191wqYU1p+/CzR/I7OlUmXZ01ASsuTer97aXPHC7gdX6CJUU9VEXF8DCL6aqWho8e40CbH7glOhqaZKkD9dkR2S3BxnW9XBkdx9zvoy2c7yBI7KTqBOFNPJWnr7ru9fbbNI9l8/aezSzZzBVstXmQr7mBXGtimPAv9E63UV4n81FtTR+IZ8WkM1i/+wHnb6Xdq5GTHYdmDq3vPYu9dKnTodrkcvpZXw8m4HJJps/mZ/d6/rOuosTqKCB99ttMejuUfUt4iyprB5Lx0JLlWkVNFlijvg9i3cnSHYrpXWBl7aOhMZ/vkTcJSkYxkPrX7EpKAyQ+OfhQqB1Gi0FB5VBW/HVqn+CGiTvx+4GJHC8oK8bMs/0ykbuVe2Ee9w2+2ctJ29xpwrSCfDspHdTWSx0yH8qe9zm+QiKd5+lVtJyl9ZJHjR6s4VETMwzZZ7VcfCXK9cuok4Ti9LNAN9EFcNV9N4R3xHxJUtr59CkcvFUI0J+3e5WF8rsHbjJrQrHNZ9ZnjZ8NOiXKXnQHiJObt/dGXlnMfRhkmj6I/igPS0upT9AWiB+aCcylYAY7tm47o9SDi7QQRFNq24fu3FbciGrS6R0p0/Up6WdTWzO3XfdyXHSKTLYuruuaXSu+jzt8ym+yLlZmBD5kte2rng3SYMmKfpCRnsa7bU8SYZS15xr7nuivJSayFwyT9PPn0+aJA+xuWhA0o7P04CeLtoPfeSF94kijeR8ZoQzsyrHg3Q7OsuHJYpRCgnxQXrsEiiZlJW3ojm2r1azMrQJSZJROPW3yHq6Yq9Yyi0NDNJ3Qv21HBtaPO7vCzuKiSSWZy2H81YuTWTQ82DJDEffomTyORcV6OdEUtS73N/3DHGZTUEIV86vSoceAf8JixkU5qMt9m1IQFXB2LTv0DbYbbq3Q8I/iUiQ0kvbZ2vnxUKodOZexHK+yoc9tXBR2DhOJp7mmDpm6P0VU/Bkgp2yZ9VOawug88uht9wlNXIztTmEoNcr++BmOzjGl6Ca/Fq4xeaCaAGjB0dhzmjBL3pM4sIfB+buMQ9ZLZr38XS9unHmJqIzoP0NNkWbjot+l5pX9bR9kBKIqxcpWeIUSZfvZxhVilNRJwlanTJB1wThgPq+F0ogZLsnc/D1qujRAvPt0DJFu63CR+jco7oz0rOQN+qC/M17X503gVfen+jVoMPj6b0Rg9V/v8p20c979TZRvJakR/qRDmmsxYAB6tXYJzpeADVulyHjWMNEaUhqXNHitDxN9FVHr3bhJco61Gil15yhf2vguPOKOYx4q/MrK3Mvj4phu1cmLPUufkzRcL7rjjEcktN6Dzjm1gNotZd2xwsftCKke+eoe+waKmTeQU3fuzLgHblcidDVnm1SXw/OCzuots97N0hf/PmSPus95eZK7uY92L/7Ss8GD2MM14gmidFjd13eEC+z3g6Geh14BvuIVpmHfSDeu8teODiPj3gZ8we2kEqXqw7x7kTzhF7jKc0rMZlIjEB6IEHw0+X2nIWLvmTOnjw5FICef7pc7MsgkkSooXJXCng10qFUnjn90YB4CV9mLkOjbfJzS9fnbDzfm+m4C8eMfD53aUdZ/psU7uqufgNqSd29m2q+B2DHV7t3aXpZlfBwEYerUzu0loPF9suT66O1sWF/vqfU6eZhDaDd/AEf4CBSzUC21Slxu35qcToqywGYa9BwOz1juRYGoOSe47u8NscaTWr2SvVWlYkNP166LJ9apPRUH5fItZJM8wEekxpfptpb3lQVEH2KXreg3vcDdofmqQZzFSEQI2qo+WK+QJtSZQ3/axSxVQ78GS0d+lN8ZjK8aw2TNBQAlKh+vvNGSgqkB804VNXCheFlQrta3KF1Ppdr7e8AEJ2Cycg4SXzwaLYYsdyfbclWXvJZS6f9oDeZ8T5QZ+V5Br6BtpKJ5Drzpb4hR9FZD5ahxMdIjm12FVLYA9JwVzac9pgvCBKyz7wHhVy85GqaXJHQo1Sc49qF2RyQdct4Lfz7eYuE/RFHNygN421cGydx15oGiNsi7t5BcmOe+3uH96nFjGSZEtv9rS2YMVXRPzIU0qdrLrAAiWfQc0xGQzAM/7uiyCfFL7Np7zd7VBYikD6Dm7xfwPVM0+wM1kh8Kkte4iYzxXlQLvPx0gvaaUnLq5KrZZFL58gdxwLvuhvf0V6Gx0pzMckNHzfx2Ae4A0aeqBU6z40QJ6OfYdrcNDvy3AvKeRyTO6GgjfLzmStzv7DNqWojJDQb5QR7Ody/lmEi24LvVPTvKYjvnjOafZ69rfv7BqWKKvZXedrkj17xqnHniHCoDb8ybvnghXszb+IjEoKewEHkMLYh8nCbijTZ7M98xPOqYdpG+dSsiHlmE0eqWKKGdhB3VKo8aydm6dzRcnl3L881P9nLNX5IcM7iXuf1IUTvQ5OMDydxIBxvLAC0s4c1fGHSb1g3oM6sIDokRnBJGDcJQOJ5oXk4PBaakLD9n6sLZXym4/nWOtbzIHghxSqNpCQffvCn3WiRYoms/q4pMFmHD2OUTqnbQu8K15v1QLbdjqA7fockt+6ME/kU4tY3gj0culh6TzGXy9wD2eVKXSnRpb9PLRqOTXnx7Sf3WSkh5DcNN7pHpl88aKl2Ll3P1/ZAiHPtOxMJ7rF8B2j7o1PF7I5ivH4ETIB3HMTg6e8JAf8DMUcQBHKYxxpMp0jOpA1F0fDIgHH85EN8F/MgJEvwhoCY33kUGpAprZMaF03xPk+nJKAcxX8fOwQApXMLQfRo0yDxrSNhnO9TSKSP1SiljiumlbIugqoycX9TP8q7t5/zxPcU2lyxr5M7fm4khMzr0Sua6n2lauT8km/QFfbpz6CfH7HYz7O42c1ppfj0QRmngFoaJs3R9vjh4JXGeamfh/LtkuYEk6wS/XOFsCKaHkrHvQ/m3qSK3j1K5FJxQpMZaE9XRq8eImt7QaTlIBg+N/yaGka4MEKfApYkNc4cwhvJzTqOd9pxuKTqTC4GxQ4VGWXPh0UmMhn4pzuoFXl7Ea42Z/St118Gtz7492lfT1Pnbvlp2wyHWmrEiy92+XJaPjaO0/ejpEXbvUxTAStOxvTZvVzCU8VLFn7G6oz5gjBTgCnQlWgTqKMi0j4JycCwmXSvQ+kjf/P5GaIWtRjn9i3T2XV7NZ62N8gcqJJ2LW8kfskawv6Lz/W+5Z6T21ntyXCJp6tOfgzqXCvF9k52zUJW0ZrdBZ0dlmbzMsT84YMiBzKpqtedGiFtb6ZrAHySZzOchxyvl6HjHuvhkacxt4h8GVPzaW2RsVjd/ewCBb3A8oBSV/Qk56Edq7C40B2IfHa1dbFuDqXZPqVeXAxfrcezLDbME2xV0fUmuKc4ZD7i4eQBJsybE58ui6vSIdRHguRAK0pgB+/yqn3nLT5zz4nv6dtaJLSVKSHr4Mbbeg4PfiroJ2uhX8fg6N+2omD/1ofz+ZLc7mKaZNEyunOboZRdkoZgEdYw7+LETa7kU+M9WPf5uDYM9UFh2EDfJKxkswJ0W9diLnq2UvkM1GRX7dz08EBsOvzPP8BqBcVNpI/du301p/J+MveHFzeILN09QooeGe5ICsLozx1fV6V7DXvaq7rxPOXIfFEF8FiqM9Kq3pfocJT6NF1jvPHnHctMDP0JzFthho1W8/HHS24oYXg9r8NBvzs6wxarb0XPF3fJc6TLrPioH348sBUivNWCD7VOm3eI6U8zfJzLxysu9sXDMjyPXGpuL8bkC61Vlej9BzubFA6DfihZFaz+vojU7LmaxSm/RLv96Kay2BMgus7GxdYLsHwlrTT00e6uqWb57FLfTP0Es6neg9Gobfd7JYh41a/qq2IFjazqQoOqn0ZuP1dfFzx6TdrbbYqCfZqDu/b49gnzpYhS9ygtKsydL+Zm+VKVOyBgRQCo1qnoaaAaVlsVCRZsO3IWyv9F01UtSY7s0F8yw6OZsYz1ZmbG8tdfZ8/eiJmI2e2ucjpTcI6klF7MYiw5WTsJukrSi/mjLzMhZGQkQKIKu3pepCym7hQQfzILIAbl5sDTd1blwfz1epJF1TR03vVXOgNllqKUBd1v2S2G5ezPa2ndyzPp8p9+/WY3/lhMAIqkBNNY/rjAsrLZkpyV1Rn94kBOHtI/ObYJdQC40/WTb78Re6m7SbRvkkQSd+sXP/9PAvtvqr8gSl7e8wbrC1+mhre+Rfl/2DIIkt81kJFQTOzq18qCaR9mRQCeCr6LErKgcZk6pCoSMmOq817uPAl7icgGWmwsk2Qk8sqZ/ufvrRU+VhtvkdN/3x3yg7rE7MUwvhCB7qbeMzLUJ0ntPHd/73tVfhMtGdd40+fQNwdLJXEg2emOvhsaiExOSPbotj8Vqzuttq2XUxKyEFsy5rQ8jgj7fcFs3hvYS7eDUcqBFIi1A+DEuMx8foxHRrohRr0He4ENRqY+H8OurKK2YqASmlx7HKkPD4rIEWDot+lMEkCnqK5mOK1JWnY4i005y5B0YZIxETq9/0raejJB9sPWwGtTSfuAOrHX6fpRbbn46ymBjakCcKR9sRlnKkKwQ2b0vAe+GEW+q4vhBiotAO+ZqnGk7+W/52tc/oeQmkWxteu1dhI6A7I1wEH1l1AUHRfMlRQbqshupP3rKq/84YMgEDVAFIl2Zc4Hoac/cJYeKno63J3tFUbmY/L+VDhZ5wa4wFF08J7aUqVlMRFGRGww5QP/jt5mKtaxnyzal863nEVal1BjfVStm3mxhlZrt+SmUVhOn2AnoSgUArN14cMnnNed5UzH/Ccjnz67E+Bl/Zm2cXylk2HffvpgQZzF/nEW0TXj7rd9scaUc7T6s/QmfQ7WptipxrEO87fKpZWVJ7Ly6VPInoSz2c8kGefbkFZVS0BjSt/M7qLWCbTEy//7hu5Gb3oKGuPFQKLRoryJP1DnRYar7wySFdgzcy30uULhaxCAxv7JfhijynGAMkivwy1bwrJ6vmBU/mgfVH6oHFh8rJ2l9vYAnXwY/4/bNa+9Bmf1EC88EUDvXdZCMgIdUz8xc1foTCEzYvJ7KB641IU8xK+h/2oqwcGWdHY3oMY/7NO/GwxW8RTU2q6yjtyk3oeobY0iKXdIkPhVX59xwQqRR+iDZwyY8fNaymEF6D3h3Ugnt5wztqdugWBAxZ3/+qaUbNpJj2OSDJqV0IgCrJ2JB24tesplcCNlvsY3MCOS7uKDIKz+cLGmCee/u5NhygNxXTWUVlBGllzho/71oo6uOy1xfpZSrHuxCnIQs48M3FPhWc7UAho6Sgswwl/d7hAZf9LtCc9hvtCy2z4hUnMvaTkmyTpMUTJOUbaj/6Ia2aJj+gs5IF9GY538QUfCIJR4jx93uq2ohidEptQd7G3I9t9xuP4/y204N+YDdlNzBci483jPcahqdv/FD8Xm5CvEwBkLj5YLuGc+riflvJ6kVV7U/UPRJFeyfoOUpCo/EbfWRb0fDuEd9zdGSeBcF/rmFICRxcDMIYDW1ozh0xYlO069JDfBrEByhOfF6mssZKIX7Xa9wYDv5mX8WggBVzg9/P5VFZHPLGtLy/VjQqGcdcnDQkafQpPKsR5XUvvHE0JQ21qduNnfhbk36mRDOMuitvJZYGZpD7Tyb1iCmiKzMZviSvt1fYBA6qb6QGt5Qvo6Wl3np3mjT+hkfhCml1F2qP2FhP4aXJuzFXny+0T+hT6KdjTWN5/jnyRwPBcnt1fFvGInkzMZbPiZGB3TiEqi8jkLbh9jAzbatXU2Kw12thtlvr20UaQKbD9oAd5uoijARqdPpA5MUzmf7CDLdII/s1gIi/nRFtPRqFK5hvD14ffYwGlnBbWPdEAcdjI6MlaoffY9QPhF0o1h+Jbo+fKyOJLDWIzHbS8O251VSKZuYze4YdVPyl2nNXy02x4IO/JXqEVIqCaVr0Sq0q608hfNAua78/rtM8tA/Vf8bCAyOKtQmHrQ7id+PfmBWDTf9GKh7kmB30zby9WH4e4X2W+X/Gqc6y7yajMjRZSq8MdWOocmaBID1j2DSbY9ra7cqQYIqBw/JWfZgUM+r8XaShVHl6QkKtDakfwiZO5RzPvQjg7wT7EagWF6vTwtzms2Z86I8suXfiwh016m3bE9trkIcL+r0QEieyiJvKhw+jxf6eAltjmZQkEvuYYro2c9XQ0TaPL11thdipySb+NkF6cUJ98G3mqCHUi319ktL9COXztnn1eLANfilkq5nMQiYCkG0z64+iD2FyCD31Zw+J86tLgF/VgJ3iSBFF4g2fuW43oP86NMrMj4LvafA890xveshdv9D2yBei0eFtyksibdV94VdqA+ewCRehm1BIQxGRr0CxHD5WhtZbNB9KiWGfZ5fQUl5r4NwVBq6EV5tA/9cxaBrF8OiSIjv5+vge9kWqFiWWW22+6c3/v9DNbJFfNRwLjHmGF0RW4ny4xWEEIldslnN06qlMRxRTZN+b+rVYDGizteSNxfGMM9XwcVu0QkmCqIGBzPga30rvvYBwJYfmx2g30lBuwkYAkhE6m6Phnl2JpWYblJH/0VhJ++J63OiI911bTZCVgo6Gwrnicyly1isA4f7e0QJnru1Z7ydxMvY931tAWHK6r9sod5H7yvFq2GHr6MVrZaL92k1w+zv7As7HDPw5dhkG7UEjAkaQqw+buYPOXJqARLcDFzg2/zNc+JKA7EXfDrcb4Jyc8mgrqA6IlLZVpcob+UpcL9IhtusPQWGC6E3F+kWrikhyxD2QAuwWtHROJYBsLxe8Wm1eOfIyjW/dp9mOc1tNRgVrsIeWbe/O7Rtz4t4SXBUshoFCopOPeSi3jBiEDVuBkR6DnVR60tKGK3KLJLVWqZhK7jCaGewCUtDb3ML+995dTm87U1HWoab91a5HEPI5xkmNfWpTuE2eeAmCeIiTbnxVwvailqWwHK6OUe1EcPePYUkXAzbHkLaawPRO1JI+HT4iC4KeJOSuJrhC5zYTiMFoVydkDbt0EDSO8gqNXmFlW3wBXsMS1ceczyL9f9WQDaT/Lu/o2x8Xm0s/S9krB9mzzmT2Q7nP857YhoRtHqIa0DURmujajeE8sAOJvkbFISve4oef2sMe0D08HcjN6OJAE4+mdN0e+NGTSjOosN/2ovBz2IRW/27eMiPgTkxCrMPH9tn1V78IjfrU70eSYFeaQDwmcVNRAAV6r82ato1nr23pQ4qciVqLd0GxHzcIcjorcRDGmezjs2TSDqORH4lUJeIk8jJUvgatUKOi4oSEr97KL440YjqMi1DME5HtGHKbqcTvicRuw1EXtRlprmcNgMtv5oqDxb1H6Fu5NVbEYDXyXktPY9HJWhR264fEbN530hXZyqV5pNgMqyLn6ITi43eSDuBBkYMCR/HD1ItHTlVVdgZhNg/EA9EF1Y2VEJxIrzQ704IN8OICKrMH5dCqlDFhD5yXYIbOq0LNmXdvg98dU/mZrrjUJRvEvywqb5lVILTzhegSonqy6xzNHtVTc/rYUa3yFCLcvqEfnjHoEZQGDbPU/g0z954uHhbE2wM1b77MB6ylqOblXBCOAOISudyOS822meuEqZeDOBQENM4z8rY6Eu3XSTxlHlBvpHXGOLk+Pj/TRbyyMH5S5W48UwAAH2oyEuiWbd5/Rwrb2Lpt3/Jgij1jMEH031Riuvvirf/uUMU8tOZ1ueniIk1OoFT5Ub9PkZIgO4xSu/7+acPvLrXqFARwF/wbBzwOJU2BBCx88FI9/0i4R/IrAgW/9DxzuuFszB/y4DxDA9/t2QTr31FZ5qorzJIYsTzsycxTqRd3BIjqXPxfEtT0IdSFw/L59Cb4TMVoGKYjmZiyxB6SaeqZ/sh9q0ucHp2++Jewp8yZmP8eNyK7ZrsbjyLUa3x6kKCQ7+0hll2oNv1ITnnjsGiCpEWrJyf/MFOBtsVvf6EDHTQoIsiTWppNz3/3A5qbInPEEYnqpa9KVExgg/yzbvAIr9UKYcpleT/BfWU94/RYViO6tRocl5hMhvZGmFxjB7oFk4lFMZpmtjl4oV44O26naENEdFdWNkJU3jVirxZxpDQmgjJ5nOUjtXlnUjdlbyvAfrxLXh9cC/vJFUcJVjWOlNYBb4PYnNU+4XHCJo890zFumG3uNBTGayK5fK3a6G8Qz4F81fYa8tm4vRxPT9vO8p9pzDzzAtQ1J/UBqpkdVslWHl/eRFMMwdo6BFnXgwNt4Vr/m3g7QfdumWd8B7kTV3Zr+8Qsal1lBi5CII5nFFuXy6F4hMsTqZNhx+usmeHlX0lDiXH3qxlZMuzvvcQ0rK5eIxj44PUqi/2CSSstqEvf5DFjtUFvjTi3P9UUbGBEcyCE/7IfsNPNszSuBiqYyOzqiAUuGAXXBnAaoZC/sE9C5MJf2AkICVEKd4AxSRlJFUVIXKGr+fhXdhYYIekPJZGCMg7EHHdLL9wEczsqALCwtWLs4T9KfENBSs5nHhHy2ceXJyYwxyqIHqzDO5D9jqxkZoO616AY6sL/tRwoIigEyMeK+fOZrHfV0jpZhEM6heuvf9cdK57pHOe90CQI+WB+RhXYsx/b79uZp6FgtUyVKs7douoTXzOPL5tzw24sMPs33c6Fm+O6NzxwTzgRS1XwcwEa1t2z1Vi7/u+u9f03b47pF4O6oEsBmrO06L8h/O7TW5iMH3iSE0n/FhlgX2i0iqHWoSW4EeqjmJ9ToGLvaBcX/quJgAz/t9w5SjM63CJeaww59oR5vd9aL0cGVP0yc4mtMjq6hCdp8BpowHRjYz9jdEvqbNwKbScY1p8zCABphfqbUJ5qc9TP4lo4Xq6sraI8JwKg504pt4O1Dgv5kjclRJ+/5ylL+4G757tvwRyCj8gvkPLEUv35FQyglllFd2Ov2ef9r4qN1Ek6flO2QObvNgdISdafn6RhYOE4dXzAWCwN7gZNaJVpnP8JAi88FZvPSFZjm/Uw53X+M9zjB2UUwXBzOP8pBeOWeU0gKPhoFRoAt7mC8Kk8aoffEKhOrBBpJholm9J+EU97bDn5EE40XYXTrLg7H48i8Qg8KMcdP+cbhcWkTTCJ1Gsj4bBxEmb/7WqpJJY5323ieCFs65hC9y96sJK9zQjXdmezb/FpG8IJrfQ9lM5T3TsFfbgGT/3WVkUGxGyjY3sBKil6UqKsXN6eUpTPtiIfduwuhec//ojd2P6eJCAWt79+GXRU9w7Bj8G7Vr2LHf6AtaTd3HV1f5cjxRtQ3n81Ggh8h7Iy4XXSgqm05k2fGnHUfVaGqgkpUA93dEH3FXNbKDpj8jwpMlYqbd+KFRva9XKA1a6VmQ1NdmSEEuiEHbL86reC5cX7ZKagzhRggN89MBSYKcwjtAxMXsC7eTMKdyezW6hH8rtjnYmvO4UPghP/6LY6jC3THILilwUTFtusrOAvqJsDsfg0yr/BtDfUm3wURlTv1xeWNS0mki69CWrTQXIrnj5d+485U4rcQ4SloH8nx72izN8HaXAC9Qxp1KR0a4wRQa0XsuBDPvoZ6FyJ5Rjc5GAKoOjMuz2CM3hjUo6f0iYVUPjQ9ViElQfGLLtXsuz1l2mUkMBrAJivgIyjCuEZguvDkPcfhW9/yBgip/AMG/qPVmnMyJYhpn2dwvhFCUi7QXo+7RFoNbJlBx5lTS8U4OUgWhxF9vfMqwnx424wMPz8I8dukRrQeWnwffSqsS3zrtd7gLnBrhi75FUy7Bi7nthq5boYGbQJgl4THFhSdJE2lmDCDO4PFoHSi/NHOdaovwtJB4RJWtH3Ez/Gv85H3l3dT8TAXUiikSU+YnIT+/z+MdzNnwz0mjo4l+oNY7HrQ7ymY/j1FpnlxQwtcCMdIQ6VL7iV7LnZwlXP2QeP85EP3F29k6Mo8wSTg7V9iQnkx0gh002HzKcfp0bUNSlEIZZnJ9K7ReyZphz2x+n+TP+Ri0CGTCGLnKJUPM/RGm+mBLcZF/cYkHgLDS1Wy9VR9XHBzQet99fYK0rDw+ZR2C2n2EaMTvFW8A5YTaSOdDo/Fn/L78ELIf6+U1PwXW2V/2/N/2kB4mOQjJA28f4ELitfFvDhHnhyT0WOPQD0XD7LM0KnPwyZy9Gv0J6T8RtMVLkL4U+v3LEgE89/wEx+D2vyGFBvBw8wo/4+dr2ws4FT4aaYRzxj1FEmpX5poihb+8kO4+CpeEgN4W9qPinTcy+VTfPklVoXkAs7FIr9mJ0N2vC0/DVjVlvbunZHl83BSEiXeKgsZXndvFXsB2k2yB5jKewQaBxGVZL0skyEXVqX3B9CiagDAn8Du/6m9qpM93t8nCc7csjaNXy1Lw/Bep23pUzFKVrC4r8Oqfm6qFqlHRAzMBFgZOydkYUG5m5Vpq/7ro9ck6Rk5hRMA6hSpEblykPJYOAP76SJ4YnSpbkdsdoBTY0wsv8frDBfC90yyU5hn8kcgbqeH4TrrfX7M1YGYU41smRkSWHjaeuMjOpEM0+7bOyXuyyrxR7BhDo/jXxND+M+tZwJDbaBdp1GSgjOQKnQelIKs8dw7Gh0UrffEywzWX67EguV4G6cFAD8YHgo7/CviKEHuN1Qv5F9sHIidSNNyubpwBOv4SelkmOqSX9Kdk9Wd/sTWj9nS0RycCzch7lCN6OLjLZYEJBmawtmBgDoCLErVqpMYb4H/28deeFn/nb7zevTqFWbxNPUaVqht3SNFzCrV3cHkLNrZcvgDDixD9yxHQ5FuETVOneqN0xAvN9Wqe/66KBhe+Dr5gtdd5oyiCGXeLp3Whf4FiICemVauZ4VDD9yDOkDVECTBVNAbks1NQJ2IElB3VUcrJQkTmZ/bsp/4Ga2c5JxrtBTj67Kup6MtrXry9iIqf8xgsLeZM7KFo3faywKcD3VBozDDXclUvnpwukKxuI9cEE1TQjdOw6Z02tXRil0WhB9E9R6/k/DUcYNiXLYp+43KkPvc/jvhR4q+2A6IPPuKm5pAIs4Kz/FexkjAroNgrbm6zBBIJghTC8XnTNnp8B24b31doHoLM6AsXh2fMgbd6YVRQXBLzkNX7VnB0gjIGlbhkykOoYgG4iv72LftVQY0wG5kF8cwdbcvZhPiMv0O88QJsc8ryyydWPELyitSH19tY9EP7z3AMUAivE7EHeHYc2c/Nt2TjafAUSHRf8APKY4pn8xAQzkj8jHheqwc4qxTZ/zhr7ExNoNELbPK/iIDQf7Vv+kfON8z8YbawQuExK1kZ9HTwjEWuXEiI2uaDY3+/yf1AJadQBw28f97/1oaXtrI+n3QHoylTMVD9Omz7rB1CRCNU0G9LVkJQyFPF92JiyJh0Zp+QM65h70EXq4qZSWdX9dUHEJ7hV0mj3cb6ofp4LBk/+z6NpJ/AUZcPxjTr9+dSWKgs0JorIAa1mh7cQbgAyikdwrlBQRtrGfRfJ6oDvotVPjf6GnTQnwPYPGAVOqQsqONHpxL3o0htBN3ewHV9FtVP/pyq6XYihv7sUNz1LuTZgnUpNGnPYzVsSxFT3TFYMF/pF79neLeQPhILUf88s7V9116fiC0iVRR+5EJBh1bg2Av2rjPXKmie9fE3F+BVffH2X335adpBOyHDLqIMggbQ60jETbe6nv5B7xnR25hr/aJXHsOr1jAd9f7fnCzH3URgZ3v0R22lecO1mU/PShA0TEsgDOboTN8jRKx/tm6L2L+JQVSl5GRzZX786/eKU1YVd9rymOri0ugMP3VTVuG0LpUJ2LzXTwmm2bx6OzDrT/5ryMKq/iPecP/SE7CItkteFqeVMbxG9j1MsWJR6rZjxXwxGXNUcDXw9Nc59PNldXEpby3wavOWr0ZlND1sYbygzYaWl+DtUWgOM3TrzpdiPAiwlzAMpRJ0fr4L+nFP56+xyWZ1ZbOE+opM4k6AmSFWh1E7Ok6IfEE6UAfdeUXJflr7z/+jtN2xShq7OwF/a1gQHnb+nKM6/aoDdaOetr0+KtiCzS/onjEieL0+JirALFrfE5bxjvD8OmiFLdC0cWMCMdS64G+4ggFHRDxDmSbgOEOwUVmNm/030ZSf0b1RJhqAjFDmg96aSBJSwjMpL/ayjceyPp3xyM9jG1Kv0TuFxexfj49MOp9CrzQHcnlKgp1SY0Ne/ma7bpMDVIW9FRlR1vk2/MV9seLfjTeKAPuLxL1/zo+f6/wKd2HwV/wM15W7JcDUMNKYb6AZlnjgcXUP57ghrTmSIABQdhNQcKGTJVB/NCJ2e8DpX0TpxSotOnv0+T39k76eLNzVCiF8/TPRxbHyEOJvYY5BewFxjSTMPePw1Zjgx6ntE4qW9HawElDIHPUO34dv+TOZXo4A7jTyNYRKDszTLfpStvtvZAWJ2i92AztBR5yjVYpnVftj2Jd4gCC0OgFaVWxnFNRJWYwcdGZHCFfUQEJ8LybzyFhVx46BdP69GJl8otFrt6HlcSLHzZc2Yhlnw8ckLAIOjR/DvHqgSjURpzWGTnBhJTUAWxv9oJz/YseHQPyipiedzms0nlWGJ44OJHMiQflEetPSge+UewLCds8+ZjFqjr80/qHxqAzBq9+i2I3elwsmIXcij/c+Jye7uUkDINYZ3d/VBfd7fspyFvbQkygWlOam63ClMBHpBPjim96yCwH4KMKPOffQosYZEJLrmLYPg/gKua7qL6DxXFMVNcKxTfMo3xIVMyJXXcLnBDHgrGpjfkIT2Q7PkAfC5be38SSB2OZNYGAOJZviiLS4iltgP35xizm+nNhAAQYt2z9no7WFneY3ItRYWX1dxH4xZmxKdEuXc+FQ1VfO2agKPCWotImxm8+s3RlbHMvLqRnXBtVG84/5Jmv3jb7jtwsyAFpA/EH7fVV5CQI2Zio/BLl2RhCHGJ40miDS/kq6SP7+VPiXLmLCOTvZWp3wKRjNXipehKv3QGOZCSnJEcTul1RWGa7W6e2e/pWFOM+l5pSP94DuCqYDdgvt+a8cGKvvv8omIhUI0X33ZeyYwv+jtd4E9r5CmCVgjTprj/amtJ6JRXpXxMv/MvAlxjwdwe8Dz6XfhUeO16NBtGlPDqd8WQ9+NZS7R1Pg6nhWwL878ePVJa2D010DBbRGBCVkL67b8I+W0/Gcjuy4xjwmxOsJmpntGdvhG6AntM8XfwnqclhroLDNmSDkAaF/Db5I7MnK+ftFxL1GVJilObzFICF/DQ/zV8y/As3hwv1cC+jCqLbK8vILjnz+rOH6hULkOumTgG+hmi5eDc5CXoZ8TBVXG/eH1Zb8szMraDxpuZC7A0eW+WawEiTA02fxkhfuicLP3PtMIlcK8BF23bFq63MzRgEjYBrTVWJmgZAUU/CAz4oSsARA3r+C9i4LLNEq/kbPwa+J5mS+TjLTx0EDyvz1AST0qIPNYDRd3AVKmPPNVlm5xXJxoliy7fha29AA/AkTu9x/jYwC0fFhU26+qN9njMSgjflUqiJq8b5of7U3c499h2ODP9wWt/Y+MreZxoUlWS8yauvq+BZ9N5VDaZyWvdEzZHU4RRATTUOqTdLNXXJAN6h7aTKGXjbZq6HWkghskrCzLSw2faCviz+34zYnfsss5r4WberK8/p8IxE9r/b4K3Cywkvpmqb6S0pybRUHs58BtiGm0ZBLcCF8kefcxgJ/VgF1F45//e14PjwIAOPkqls+lEIwVEHMX5p6lmXLpzED5VnLBslqcK/A3AHgOW+wS8dZ57be/4xnww5GYAYMsaVdMW9kA09txe6hUfSXhxw7w1PwkBFIEZZHvgbHhc0yuUnyl6lf/FnJsZ0D+pkaGjvYnwFDy1MiOdvZHJhN2d8XsD2MGxn81eHooX7Ug/raZ/370JOi14Xq0x9vCKffH5mbZe9ymQxkbQB52vzv4O/59/cXVgzPDXVYTDv0tTUu/sb+Ilv/mMdQrwgWKeHqGMmB3zugwnSRCMC7GRvi5jzvy8O83KbrCyCFubh90+0h+Oz+OzhBCvpK9SFtWb/I+1oswkENDDsX49w6cE7U4GuxWEwRSSJEySAwPGqJoq+MLD3lZV8eGH0GTAd/aSz7uNXv0IKy9SimlSjGq7F9AdHj+n7oFMU44rWxHvz3+w4fiyQycbk+Wo41RBWI8wYwLAvkxcM2GJXe6eT1OSLWpmTY86BbuC/5w1r8cEloO3OCmdXEN94oMDScEdXeEwBB8c+TgQPgQVK1zhqaZh3WWTeYWQhlhSzrCOtlO63ubiPUvnOS+mq+xdB8Z3V55kmX0NYTcANQMkFObklNoI4t4J4uPnYnTuzQ++rwSH1o3ya4ig9c22FOcMOXG/K2kAny93xCDa9e8Vlj0lt5WbgtZsts//oY5DC8r0vlUf2jzq+ER7GwTnMeHz4d/LUMaxOC9+2lXy9WCUF5AqORMy+ns3gd+RHVRIbxsx5hgxwn2HMrwnnZwk2+uGKARfjSHX+MyvGOfr1T0QlcKHlAoZptTQRnhhLM+gwzoi9TABhE65IEhVPHhn+PKrQ+UXGw9P6mc99GeguQGjnrzHb7CQIVqWIRH8SxKulBVhW94eCMVRgapnz5HIzUHDRO32MoVAEn71WAw/BXr1rUDP4ilOeEzbT8EGVW/fW5NqoK/o3Ec9sVZUhqyo/lJZxCSsGrzR3XDV+3RpZC7nKLno1kkA9yCyGf55PWCMZMlYGDcalsTMsJUO8k9Dm8CuMCFOqwuKmzFZqUrpp32H0wzmecWL8NBYUv/hoVGlBQmi9tKnJXW+ykWxmGZxphzFyDoYkYhzxxqyPZ/gtvAZfkQBwjj0nqPbRdGYO59pZ/496g8mFR4IkfEDxjvvyuf9B96P7SZey2Ber+CV/djV7UDjlV8xSEQoI6pTO815KBtA1ZnpXnCHNxjJ9W9ia4l8YyZLy2SiXkDOMzOErD3zN57IeWlT2vFznpcQldX2/PfAjs6VVj+Uq5aqMlDHZWXkjk+QgJ+ZwisdMXZyzx32VO2kzpkjy8WJuM2o3FSpSjB3osiobFRQUYW+r+kq4G7HIuUo1pfsXn2O7UyVIw3BcOcpKSxdRauF2gShAnWF3I2J4Rz112e378IfP4vROSFxhh1h2W6rM8/xKN7/ORBw//QXfT76/YKEtYbaGS0xTFT3/nt5Ca1DrzpUqZX/ogbsr/7P9+P/wi2Z8TxiY/irO66PyFu5hVPSMy0TO52LXY09sef02FkRDPYn4PJmQESrSREHdBvMw63X+dwFidcU6WJq3+sT8rT1EUyU8GVGzgx2JOPz/iq07bz+21W1iYcfbrdK8j6KGznSsykg90/sWUKip/hi80lJBW3YkNi1vAGO06Eo11jfhr9Q74GZV88WpA2hcSRkNVzuVLx52XhRqk3Ee605fHgX/t/HZbddoZQ4lNPu3Fvo04V9vFeLa/yHf49jBc2Mm5boZDfz7wCxpPvJYjKVlmg+Ra5Fl4Ns8OLqTvTL8vaqNL4lesjtr82NWKKbNsFy5dmdNNCCSzDntVSL0cm8ajUlxcnHDSmb9CURlklyRk6fSUKU+Q3e0nL33xqDu+0mNB4GZHcRbaSH4FGcgpnCNI/8LgNeB7mjNk1Llcb50J4ABvBaUTofuEEJvZQj14dG/74zdE7VMUlBzal74k6yGJXgsBf2c8LycO1q7SB3eo9u83U3/NOetUsfqY6jPexfxspNMO+yeTzTF35pLPZC3NfPIifcaokqioiOq3vd6qVMedTMT0W5y/i1opBU2IdqJD83Dg4mc/IG4U9plCCjfGBHxNrzRfgbC2EI0tA3vW8+7Er7F/S7O/mqmoHzt6LQQQRfPSl1wp6FjLdUIxdRyF5/pBnhsHkmoPsOZKWQejjnVt0Yne+tA9OCZXbP7ZlAFZ/Yul2Fs4a/qPxBbn+Al2Me14//uuiJW9BHS3YRfR0dIQPQMmA/DmyfHFzjsL+hwhD7Ho7xlVLcIBzIF+HPNXeiKGn8T8Olf+EUZsmBjrr7I1lwpKkR6y5+51McCVW/YXMfqogzY3AI9ToD29qMRNNxOHzHFdCSYcFRNIWB4Z5rD0mKuEY9TFGvF9s7Y0gBgji3/vbbO1w4KFoceHn6oVtnWdtaX+a+DJ217pOALFb8G7Muohj+U7RNYxqiNug5os8bOYPn8JTAUepUfKB4CfGWILRHk1Fw+Ipr9adqUJ8oIhh6YHGeDXHFc3eWdM1jAkiZdvqzOrwt2uMwvg7iRUUWydYcJe7Pg9VYIRlxNg9mRTyLUdmrRHvsoti/6xyK/Ff8Ul7PUAcuv1tDB/DQXoPpBjB6LvE1O5bP6gARiYJwtoqChGSU48oeNH7uiY+vA3zZ7Ryl1Fag1TeG+qEW3+jHhA39/dbOMzo9y1wkAQ72P93T1OfULH1y8dBbfxrV0AycI/uAhkOAZ7eyhTs+c4yvYyUpCmwHcNPRhjm/8xGzxKK5DTL5S/Oovp+U2DiqXfjwt0Ll4oLdub3pY4/QJu9wtvRJWEc+LdLMb8u3Am8trnZl5ewpzL2EJHUVIADpLUaAL2NLJW78d6MZ/zuS6re4ciGvNRzeLo38g2LruShoiAA0BD7Woi/Hr8gjxfbnxGSSlLr47SHv3XzvueWBP7Rax+/0h/ZSyyXLhmWLgx83PXlMijfQkWXLvVxv1skgWlULVAwq6wE+0LJIm5kHcLOpHIzPCIbQrzbkC2RzSp5y/C9huCR92O1vZgEMmoJ5qhQRtpdqaO0cE3dscg869OKaqhmjJYyQ1Krd2gz1+FWm7Cf13x20n2819f1tGLtSYN3fIlD0umHL+yDOAECP6jZSqdZ2X3sdFadbmMLpnX5VXCZ3M8avVi9Ryg3oWJIsK1YWeDfEY2X/UucpUsiOdVLscgVlABw3QAXronRZfxCBwbEg6ACYrn0USMdJkUjf6FPOLZaqywLAkOf+kvWISHgNpRsTXKUzgD7nEdhjGZHV2KnzstRwZ/UvWyj6aOGRK23bu0w79YSrb3i10cfpo0N8nIy3U4PJVOdvP6nm170SCm9HvT0jsUOYA9J2dM8BPnu6pfquxvXeY5YnFa0GY57KrwMkArCrCSoTkb3LZl+nVABBr7tLMd5IFqMxfbR6laHzhjzjsMtB+PGNfyjTJCn+m/2zQ7RLJtTITy8ayViS3gJgh7ulIQ84BCiYZMf2N0lQX3DmiBw/S5vxw4ZzzcK3b0b7LyNdZOXJooXYdH+vns4E4CoD7JIoHtmjNxR2fFWsuQjvj66QJU/YZ1DgLk3I85W4bzpIqZ10wZmoOKMIYrtE4v5GWMx82ko7M3elDC+bTtnM8Ir1nYAjd3tj0uVoq5itKPB15lppH8NxVHItZIWC6vsttpXxcR/2sHgtaJ9Nmo+UimvbRe9yx0CtNqGK+2GRgxiBaEPsgvcL7UybqLFUG9zYfnJt9+xXdoxqNxkkY5UC7Pzj/hhjYESDuKr7LkqDtFYefxGiYcdi9gS6zlg1ky8nd5Y4JPOLPWplfPL/lpXrAtvhxBrYgrZzKDb9Md0yrr0hARE3Y2C3T002HPmFc92N/iK61EzC0N2swk47ERnBY6iGuRTjLTyZheIwnYPDvaJZ6UZI5nBKTSxQFpyeGszL4JLYx3THOZ7gu8uNfstA7BGKbd8y9HG4AOIwBVcIK2cqwzuSswkmhUCI4mSPeL7zBN/nbUra6d4ai+/A2xBEw++yvHJ4u6BZUstd+kzSwXeRl+oF0jIilF8Ivw3SKy6GEuR27dzd9P1wds/ysHnnAZzhai7+Wmr/Mys8r0LDZpF1fnTvjY/b2M0dXxkC5TnrjXqDkTn7Eq1UDp2NX1e/mUvAEDYXOJMDToKo5AVxdAdJHXEttw+XtRuHqpGSM0oaSJ+G+MhUnL5oJRmipJ+MhnuLxyCWp2HarM0PfEhBQQ4AYzOi+dYBm4UdX+lTttX/6mgaxEUiZTIyLFu4Ww+Ol1iPE5uBTxO/lpxGdcmZZXiX8o9xg/2/wS3bAICXoBLszmVH+r+Bk7rhL3JyK+a/5i3fR9S8bmhV6GeH45nLbm5BQECZMIgIo713n6AC1XAf4+/ibBqOWcZcPYd775pRb4iZo7DM3A5XXj9Sma+gLEjX11mFz2JyoVnyMYHP4ZRFN2A7/Z1vqBQLDxCHEAURTxF9nHJykxsXVHanXf76stEFKAXY/lH6QnmujXUipkr/oPRU3bGYBsHhDb7uQ3jWhgoROlenHtDC1K2ndg68yZXfvlO7YLin4jCzdq1Q3iqIUHjSZuVvgEkquFWqR/frN1rtNQ+t1PrmOC6ChSOWksi7wNRYkA4PfcTdfyO33WD/x9+myOPbdrLY3/ZjfCL7YiLEhum0yBtZ3ki6w/MxxeioxLo04z+ePVOi9xOKZH4TpwfzOZHhq6DflflOrvuF5S8zf+Cc/C/sImnJFJDTgJFobsvKfFinVfF3zRjffwe1Y3qtYoDczfC7PvbJ004YkGYQUL8GwwkaNVpsFiBicyRXzw1Sz5Cmg6wuvCr5s7BfYltBAT4eYxiPZ92MWZhnVl+NMxNgcyfrfG8IjLvB/bIP4HkgbU/BHn8YOPAHQMv8bElUdaIyFuPjOnkHe5Sro2CnkynqLCgWyQWwvwx3mh52g5qtOJ75ffhtX2aXXQIOYsMBO7vNbI/zr+xw/md9PYSZdXNGMY2m5U5EXmBFug6fV5f7Ulpg8jX7/M7CuE6lbjAy8FhfEYu1yyVQ5ccFpga93GTo54KcVvdaGw0ul8JTrINACjBaRtWn9zzFjM2acTQsewSTiTPPV5HIDbdjLsb85LhsiG9vdiOKsROQ7f9b0pHgAvvpney3ytUgj+TDeagY+ZgO3zZXTMpH4ecdqz74/VhmkKXt8vpib2uhEoZy5ZN4QFjc/cWmUnr5p61voYpOvHQvt9YKIsuLQzKlPdueC1GXaQg7nLL85ANbSniRk0aOH6YdXJkahKrZ+YUsYJ/JAoFUeCkyn3kHBnuxjAzGKYg+HxlckGoABG5W/cEjUUMwn61GuXwFtBDAWteZ9zBvL9PieVPie2pgy5CuA9tKzzKdKbhNf7v9/9N0FZkr83Cuwbqa1QmdSjmwIt6wVcf+ZukBOcF7jAvZ0/LvYtEiahCYnWUOFRhKn2IzTLXy82a7PGFGx/ylTUVgYWFK7GvD+vy2ElrvvVyjBgDr6pEFessHxcUOFEVqPBr/f9x47KsM+EEvPfLMhg+0UL6p9FYwZe9elgc8RJBsDgjeOXlXjPgC1tCmjZl0YCeS9wOYXdivH5mTrrTAhxyMtdO/i8a+b4E6PP4ATRVzqYBI/59KKreAB0Qw7vmz43WTuo5PmrV6rHHTloXXyJnSYEwdU9UsMk9QJ8pKo99g9kzZa/3ApRQ1qHsloMLerqXp+D4Zik0sBQUt7J4xZdvr9XJVZ0ziGPSbvklQKmMrJj4FQRfgalW56FzdisS5n7heAwchY96kw1qJMHnHRoict9d66EQRdBGyR6XnodvvwUOsgeXa3Ojri4krwGBXUvWCf9uo59dxA++0d5HGldfwc0MjnzyYsAEBJVhX+S0m34l4n4WylJCvRAZrcMBYm8sPYX39KQS3W38pnHYXaz+pV6a8hEMN33N1ZjcaUZqOc/KiVkVDfTRu0E55sUHMTYLXSV+qaYNHUjoRRur/1R3U44hAK+BwVqmPAvBx/ZLHwQDLlLL/pPJp/NXlYjY8AFlX+980PPJLt53KzKdaW23ZmoFvWBtg1Kw5M8aJjIfawP03cCYLrEM/F+sApTxWG4wW9T96+rAZvKq6yH3VgajKaIPv83W6DJpk4Eo2Llu66B/3Tdf3vwRdpgdWSUTWJhyUhgVAwX7BfUKGAyBMNrJNpuXZIe9JePxSCtdqNJdAZUKxSGgGCt178u7DO9ZzW8ujfeGb+ODOzI9SVMvu8dOYu8YgU3wfttl+I/RkVZPzDqWhOLIt01nWEmI9aoltxzsPMdXQXvc686HL7r4/a4M0ZWrh23JDCfiZBfcZUpyFJ+zRCCFbbms4wwfw4QAooicwndWI2VQcmFT/vpKuwMj1NT7S12SCfobICo/2flYEeTNqZZs6mRuQh40cS1UyB1+jwNa2YHI54+s7HD/epoa6J518X48aUAbqMK87YHML9oUBP5YIpraYsXwtspPT2aNKN08Fuwhrs4Taf+pgtBTiCUGxj2+upCavNsTKO1pUZSrk4wUrM+pcxzBYIgn4i6Rl+ZWldzwcRVNiv6EW6yFyE+Hw1orfhh91q1v0mKCZl4Pg+s7Ovrmb6u1LyeibE4cPPrJXm8cbaFSVWZCvVpsOgou8RYU3CPbOXVp9mF/pUJgx15CqPM9pMO3+55tRsahTmEMt9jmr7Y9xyu/gbpCGKgYKDXp91uSqpBSrGV4+vQ+7/ocJKrCcYzzB3nTPrQo9T8faYPNAXEtPQWE2RQHrLuROnPJQgpSxyeKx9NGDLe7+7YLM0ceJyNcsI/CXDM7kfXILyytkPLP4fV7ckuq6pihCw2dziI8AYq/AB5fdzXql4FVTKg4FDmjSkhsdMWxOkyZ03cr6gRL08hwQ1Ezs+n7mRE8r4LYGLUK+2fFKyuy1m4bz4Ptzqh5RbMaK1AOQ9+KwlF+0AQOOtItkBcLxc8a4TT8eeAYndt2zO2YGRVntLywf9aW/y6PQCTcccHj8cJ4krk17aUnwlFVXIIQwbXx1Fel5DD96hok7iEWJvCVRKlyrqJL3ObyVUX+pZhr+unFBExU05To2xMGuOwAoQmvqBZj4DlZJe/ClR0rU+wLbeRlqkt5d+k2woQpGeA8ShpRI6lKdg9AyfPta+CEoGjD0c84TNshcaYVbYatk0HvbJFCtyzIDKiZC8AF2FBUxOfN0/ymxs/+Im7TUoJ9PmS7az7fcWEdKc7ot9AwLXe1KdYxnmlTPnJoYPT6Iot27ZCJzEbTBlEtRxmWW4ODKq6brMHi94qVw+b7zgx0YF8iC8IY6cI+bsfDy8Uz0RWfCfXmNK+zPI3R5UBt67Y6RiWfwHAdD+KkrPiv7ZDIPOxWNuWpd/y9MVnb9LVTRZKU9ZuYmizy8Ojuvmgm6dXFnCQbcv1C0gBwpazqrNntsXXxxue+zoQfzHvF55/fypa5Tn+4xazvBX6hdeeDeoOO4DyIffJxyekKCYEOf/Txysx3zDlXa9jRBnGv1b7gzY42dGHFJrt8C5qJH15r71Dei75+rxEyxusj+T8jhwFnDknO8sJPn9X78iiureIiIXexp/IzxU0u/UsbXvb0GX1ZY9WH3eZjR4g5geo/ioKH7gBMcn2jPjcaCFqt6R9TIRiWS7B4O7T4B8b+hjJxuSD9yjjceFsoO0xY7+E5bPDSe0KJur/tZLDk7G5JoaLkSKrb03hCQ+OXh7UecBKO3G9iABjAD+EyIczIDNFWOmxIEWITRD+LDaHhkUsUxYb8P/Cd6EOatIRYH/3+DnRy7gC2of01YhE4kcv/Ol3qY9YLQOlD8c5l+znuJAMnjhWzY3K8WfqX6SnqfbaT1nJCTpNE7gyE8vepnTEeBwC+VYb3cbZU7PglvT4dN4ZweircTNcWtmx4VuUdIfDfajT5UEcSLIGZ7czrdBV0u54iRIKtVjCzl1Sa/iwa+Rk9TIGFEwQ1fqDVLTHnMjEAkytIO9eNuqKSBMFsImxQ47Y8xEw6hj4Qr4RKyYwQlveU2qv5RpPN8BQQ7WDyJHEVi67So4IdAzSALHczWaTwcfZ0EUV73bkcksAdmNRlnSCguFo+2+kis3Qiy6uE5uyuV0sWR0hduXdcXlaHZegqUPZOrRC4p58aFGB4W7xHImCpdBNXN7PAqU6cxmshPEyrJ0EoYR4lOhWzPqO124RzvhbDdLxIqmYzZ1ihHvCtS9bODAYycryg4cH8isDLbgeKVZH5MeofVsFXKONhCPuGHh6eKXjaSIes/auYiHDTiEFbsDIGGtKNO08IwUALH3163xtMpwnnFxQxOe+fIUK9NGVJXOsHLGS4HVH0cfNX8TANSaisJBAP0GJSt8yaarVj0in3oitBIUpxNQG0rPtde2N9Iy3tLNmmy/f266vveFA7nEzVtNKeDkdDO/ycSRiwbAHq3VQlPsNkRHdpsKl42/rpdFMBBd6HhjBsvIugbhwVCA9apJ0gdTREoB5SOxZRmKnPuzHwlxfemmQ1w/P4OLQ6DvK/hLmiocw9lhzWY2MdPlJVve1LjUln9n2Vz/U44u50P7yDdgh9OBR6w1Yl86570qv8gTH/SpX7fP+aZm+PQXKDi9Af0Q6RK6F9KAquRutEpuffYvPbFXOu/o7R/KarkAcLeY/1OGW04m6whBG6R1QP2c0JMXaMPWji87r5+mU1KnXxjjKMX+fMmZQ+wcubvQXtHF9mR8cZvvJr7JA529e6IXXRt3h8EkDBtlfvrdeXG3xwtWLKVy/TJ+/Z2piwmVh9Hp+bTTSzZHddyqNkPEkaUof29mw965pQVvYfVbkr0cDEOxwaC5lUt6DA1a3QbvxL6tEmT9ol5H9/QfL0zOkMb9A0kAizzWncbYNNSbZ5Sv+ggqnJ0P+idZr7VmXJLxlLYsBz+TUac6T/xn0FrJwNjTui4fVYk5h1jISXldeoMp/XY54saCE4e/6HVcn1/GkuRmelPMsFFPzQEqrMbo9qhMPP0wKohg/fWWifU1SKm9JPGdmuTWl+jICEZN6ibtlzLrMux9eyIp5knvnW3sHO6dd2XvEZAm5kWC3J+wtLwDDIbc+fPP9i1seW0dbPeKcCQKLyvBRn/Tc8+KElE6Z1mK6gmOHfYNvSCGURE3bVfLU9bCF3PKvXJFjlUtTBWUJBAojHZt82Ubl0gEcoMP3DBOjRaCzFrz45xNPoDmL4dOy22XrBFnL1e0+kS60vlI1Xu0yzI5hJrv1qQD9T949OnL3tW/2AspagQ+4MhBVBDWCE7hN6EB/dyt2ni0Vn9mMd5t4xh2WZvgGpjjGcZAagh4lstaGIbF9f2QC9Otogsix5KyrqvXAUzsQreG3N8AGTy8WwdBz5TNaGH+HRDdWHfdaN0yU7+6d7V0keHqs3HRw9QJ3cLAXt+kDlrVKwHuEwEgfFEwSE2rXALhsj+2P/2Pvu7ZdR4LrvkbvyOERgUhEJECkN2QQRM7A1xuNO5IlS7Ll5bGXbfHeWXNmDkES6K6u2ru6uvYecrEfuB+jgquLYhDFHk3Lpn/kB6L7qtYyzYFK19ooh2AFxqgllHZCWYrUxjEHgU0dEPsYiQTkWQWX+HTUdjOmTU841784rNYcC7BH/rVJXyYxtRPzmlnJ5wr3GHp3y6IIobpM/dTazOQ1fghWUkvyesoHneDX4qZ4hoMA5OkdfSUuCtIXGS18Un+R516tbYt05XVIqSNWdBiV5Kd6aCnAKQ8U2R8i6DjAPvKXEGHPN+hEyiakXFqfuni49MwHaRZwgAcO6q49g+3PvtLj7dNwifUw+ZK8FO7eVhlwQszbmaQiHzt9EnwnP0LPv9zv0X7zz7tX5/bz6qDXg6FjvAiVy2j6Sa4TQ+2SopdoPE/HwrC+Th0ez9HkRfM9mCP+IliB/laawMZmKKI5+vmMkpaWcjEUW34x9q4rLrLWVRBDVCiLqUfxTDSaNfuCPdjVRLPreua+/j6TqBUzx62gr0ONvLphdrDXotbDyWLciT7Sj8Q363c2tfYQ1SuGfbrpexo16S3WzrqXA2Jjy1NxNBQQne0h0GlQQMiVFNVm0DqyOlHTWB3jnGjIE+SeeUecNwtw9Gmprm4ToThe8kdUlIzXnt33807nZaBAnyOfYeplxh7R/OIikXtfSxpgeUUstK7zCvdrl7ACcbjjMtdMzAuIAmnjX64n3ysvVmQUKVGcIrW8+gOnTtG2m7zW9bFtEd80szc+jNKOr0i3rVn1BLTYXzMrYTXTTd7pQawPjaE9+aP1zCPzv90Xgcg3Ws+KVm5W9Sjkoen8Vq19E3bQ74OB5ez50Y/NyNt1nPYG3yHxkGdUrrRK2SGh9p9fCEFo6/kcaphGWhNryINhiE9iP+zGJ+P9a4M8dNisNb1aQvdyvxYRUQIBcIw+J8n0OsZ5evdkL6ib6kIWyarO56BIH+yDmcb2AU63HdFljBqVhDeNadG+ESfT4RgtFJ81p7VkLgRBynKvXTMN/aGqFZxJl9fjLtR6V6q6o5rZLS9YtZU6GhlITLsWr6fT7Ugg2jXxPaPlneta6sz2OixpvoOyGnZTHXj9rh4yoArUwy4SVrbMVEF8dJ/9/ak7bng9ji/2TT7X7cNyz41wW1nb493w9kt5fBvW5hCNlEOleABE6WKrn35D1DVzl2Giw8NW+8xhOAy+H48YWHv1jJ23PuPx3AVS+ASlL0R0NZI1KtW+dC4XQ4Kt55ZHlHY8zq/Hr/lrJ2MgDso6lLmgO0Q6zkK1Kn2885Opzd1iHnD1x2RYn+bxmcJSCUJpVztWX9s5ZnAlyKieoygBPRt2bYlQx0PGeK3jbjgEmV/wIM0cHEvszJOz4s1pZhut6oYbGHatf93yqYnkMF6c9FsO9S60/ah3O7D1U2mI5nGFpA8V6o2+mVUdXVJMskE+tjrZ2Nt/+FzkWahKKIWoM4n7DcgAzRH6lYrHqmpMxJ/kCuTShNRLoSM6AHTHHsQyBFyVED1iCwIKdFcR1jRXvunBfhJLG89yl0hd/16LLPQovk3euH7WHCIbtCVury2DDLSdhWpudNGOy1HfYNG6rIMvkEYHO9wje9DPesSX0bjvQ9wkVCG+CHVcxOlLF1eweeTJo5CO/sPuUcLOXluWaesnHqOhZWcuzpi16lohQaimW1lwVkWoPmQRFy/B96B95Wj9ZXSZViKXjyAkVV+zDmFI2UxHXFEbPzVYL2fHbs4aIp9g845tDVIGYr8sg1dKLyUDeUaKrcRL7G0qA5vaEHgxi5LcAKF521ouk+AjJEP9PSjyykeXKR/ICTWgwEMvuFeyvDL1DagHqb/V1F0zz2Kg8fjiHXGSmj35HNWu5SnjEH7hQC4UcO4F7QbhD+dQgW2TKGXf/NnQCm+3oCrjC2NwrY9mND2eIJ38vbzEPN6V3M/X+SDuuHw/jHN9FweVH8Dje//UpzMgkRAEc005o6AJ+IKhmqTvkUzKq+ELUPcLrVsmZaN+3BhRzZDvB4ud5TCgZCOI1UstVbcfXmOGpjFB+YeKUdgGadAsZSF+HNyJlXnVM59lcqSBqSziUKkYu4mEuxp7V9GH64foC1RsqAYzAGWFNyOauTP6MXd+DjomXo2Csj7DFs5ABKc81jAcKZibXitHBRbtMwOTxIhLm8/XrkdmOEdZuGGFA5FC+I2XzalgAWjtCk0Q3DmbroI3hXk9bfLcxlmO51lW36NWGXm6E4VfvwRkBJWjYN/ZrkJ7Iui0oaiNC/xWXhktpkPF1FH+XSEn9y5BoD/MgpsdwftKWq2+Um10P4QBPCOP8f3gPZ6FJKJgh/SUJVukk5Pbz+01rtLJbHZJZnH9hp6zr3Z3H0zpsmldj7eN2MyXFwukCrmY0bdvsNHG8DapAyC/nnIFi9Tkv1Zge2/mxQ4Yw37QuFmOlPu4vG0oasQYFq+/hGuOZuGp4fEAHisXcmrCeIbV4Jx5bOSSmkimsooKz5rqOgvvMa8lSktVvQ9UfD5+vBKlJjtZlrCFDvdLJ0I9op7WVC4SRHJN2Owey+kyGtr3CZmFnCqcJM4OVvPzLDQmo3WITThMOulacvCvjlFfuvdfp9bjOCVBPdfb1DjQZzzD1janDpkOBz85jHGY/QdilGWYQ22WgL2uUR1ElWdnTNaVLkRGXNw6H5dAF/cZ44xjp/WnYPbVh5DwaYXejIR+6AVlXLdWNr/e1zypi/+pMZTn4E1HL+fYXnbeMTzsysKmlstElBV4DsB9bHBKmjXLfuLnSU4cPpIXaM1i9G7eRUhPPO4GtG2ZgFcg5Xr/w6aFfA7CudbhFNjoW0OmAu7uzqXy570yhCufUOdWfOVCKoevx1lYzJdAaw7jFHgN6hZCuHzMIsRr3UCEcRySqjf77o0UDivTMc0Con072QlG7+LUGBil6uAPqiyR8RmIvjee8HAhjY2Pmmcd74tKgOa4bwnAAZzsDTKZyOvV90Fa8+Vlovl0Q29CoaUetgSN6rT6oJzb+6TqeqnwVOAUgn3JuTu9572lEyHDMeZljNcI4BjZkkGRAtjfHor2ETq8pkzxjVquobaRJaigGONxVmh3gtDipZGynNU1t7zFcR1Le6pWG8n8WOFAT+sARnQRkj18mlI92pJG9Vfvdejf1W3ju/zZTjmLiR6BGbI0O+NN6EETJUCeq3DP1NPO5jJZxzDoFMJDROhafc1UsZUHKlbfL3QEOAQkf1Ju4yLG8hnLHcjRDw931lULZ50HruNwAhz/zp0sLsjux32N32yIv7Heyp87PloX7EozaluuIL4ZTDi5jG65xJP6hmGrsPXc2q6+71HM84/RCMrvi+wP2TYiVHDWrlJAVcJAGHFwQEa9g1LmEpn8qEVvhi4WtcBcDHpaq/Rb2keFrdYbMz2n6N9n0oKkUlx6I3+kSrcnXO+9WRR21oH+4llKOPGaSuZ3A3l+AocyE6oxnzH6Cik2qvJOEIlF3w+J5xoo3Nmhj3MiWH6iPiwWOfpgB7Irtue+PZfw/SZoj45G3lm2GVQWYndsfN8FrVYoYOB+j0iRDDu4/HTHs2/OWYz3bpBqVE/Y/JEwCCiw8++s7r+p/NX0RxxEynz9bZVWe+vvrmF9beQ+A4gE3zhEoh2cp+BsDar1/f16uNzbvVXhUxxrhmoYg4+QhA8d6FTV7+8mvDLKUJCdfPsts5g89d7TNXA2ivlP+UcGfWzZnYJeDASWaNM3+a0gxyqvN/4Yv0pRFEDQDvzztwgDwvB/I9n4r2UBEerfkAWk/nfJAv5rJdifLOBPFvD393/x708W8CcL+JMF/MkC/mQBf7KAP1nAnyzgTxbwJwv4kwX8yQL+ZAF/soA/WcCfLOBPFvAnC/iTBfzJAv5kAX+ygD9ZwH/4yQL+w08W8CcL+JMF/MkC/mQBf7KAP1nAnyzgTxbwJwv4kwX8yQL+ZAF/soA/WcCfLOBPFvAnC/iTBfzJAv7DTxbwJwv4kwX8yQL+ZAF/soA/WcCfLOBPFvAnC/iTBfzJAv5kAX+ygD9ZwJ8s4E8W8CcL+JMF/MkC/mQBf7KAP1nAnyzgTxbwJwv4kwX8yQL+ZAF/soA/WcCfLOBPFvAnC/gPP1nAnyzgTxbwJwv4kwX8yQL+ZAF/soA/WcCfLOBPFvAnC/iTBfzJAv5kAX+ygD9ZwJ8s4E8W8CcL+JMF/MkC/mQBf7KAP1nAnyzgTxbwJwv4n+LP/3FZQIQk/y+TBSR+soD/riygsYOrJ4ut7ckQN+Z/TRYwBXVgjXC6QBxLuCVO8mn8vD5c4nn2C4ohSJltFiM9pn1WZ7fPzSbAQZf2GBw+PkP9LnilQmb7S8VDgBtgt+i0EXkRc0wtHtSpveVOuPjg29vfWCVbb2lQGS3LZayy5N72OunwA5ax1qj1V2K/qDvarssagzML+nM6QduxnM5KHEmR6K8UKIm2lXFSiNC6UML9bQuPYxRhnNhUp/Oqg+aslZTnGdZUmSu0GKytHYW5qSMofMJ04yra9EWljYBTJ3Q9Fc/XCSMqUHhPxLwJPV5bOyizfkanaYfyZDzzp/FZcmJ7JhkhNlRZuMTOV+hjhicw7GgzPiKqHaODvILiXRBOrUhML8V3ycH+Fagm2x1tTVgKHYAgiIAU/cM9zTXrHdIYhhc9VsuceW6286uenjiADfiKUsAdJ7idEqCjoKBMJwm/XXRd/CGoqDa0IFCOTBLgRXJvyNB7BuxrY4IL+KF5Nkhan6dq1iRITBSP4jJZCcwKDatXKM8Y6u62piu7HuvFvixuqF7wniT3E7IXvxKoKor19VwJZa0ro+8CRrtuDpq+MiPCmdKDJKO+WDH5J2PTDGCXk5qLKPjQELMseemg5OGAy457M8T4pJCayGpmnF8ItJwR6ozAw5XjqgwkH1evUu74bM0Y2M1+PEze9o48gVTslECnALS+gLn3Bg8jfcT5O3+/hIpauaXk9CeUrkm3pp1SQvLWXuN7efKCZtMqVfMdCKf0NnA47MtoHGPE85+sgYY/U4mczE5dKcHNkwtsXwAZACZS1FNp1/idWr0W9TcbyR9kY5QPhhFLM+VJrE0Z8lrh4wB0hQDN7/COjdfWOcXDNxHEDipbxGMBnZQyeDIee/exvT6lYIOH0o5EA5ousBlhlJTQQBPAqn5RMOAyzZ1eEyN81QaUXbB49CGr0IVSfjONmdsRbluEy/SLnvyW94YtysIvdN9MQmAejMVRZP2FJoTOdSaiSfKy7e9KDA2iAdceXC7syd19b2gqbV/Qqi9Vs9Yz+sIcgk+Aail9JCA7b0jS865vPh8iwEtoO6HoEuskPMhTAEbQshgGkgwqBUv/7k2vwHdDCDQ6qPW4jG5VSZgC5d0yKYzASno3ljmfB0/IPL66DdLjHs2/VzS3w7/OQ4DrLXBSjl4QRy6JAFQAsbq6oriC5wmrDodHu/PzUzoYAjGTzMg3wrzJE61HYH4PHZGul+5eDKBrobDI/SP90Jgo0BEolaOTLF79V0ryOMF5FCiKsK4PAl5FkYGfVy5vf/v7P+78yzG33+JscM3v2v/d13aGFPDhruXNDsqLWaopKSzRWnrI9BnYGrG8qkbXUpXAg3HHYeGdgCp8HZOp4a2CgqVatqYjRiiMetZmm2Z9OJGtNmmJk+xvTdd3/AR7tRRRjFJkEDgKOqlr79Rwk9cQmNDq4c/XWe1aMsJLIPpZCUtL62WaKy4nqFTRQHX36WtZjF8fmRwUWDaYNB0pvIoVqB8+COkV4rLMPHJprzHScAQKj5OWfs88NKxfJyWaeDV18ZMqSlvXEJ3HtIHOS3MO5aTCru82ZbK66QX2aeNRP5M516emyWHHWUd8W5GzsaPyIsvUSFFoVvREvKOcANyu35L92OKq8UyO8R2+/Pahh2Gin/2ytPiU7hSMwa75zIcryq0g/fS9+I9K1GkEVqpagFpycgi/le2rePuZMo+A4UG5ImKSWnB5rBaMLqixjqP4TDfU7GhlG9p2hSnChY08pwDnGkHRN1jkDielWZYh1mletOnPSsfGESa3uQXOvwCOQ6Fcgr7VENJcmEPXZEKVgNXd40i3ibe7xSKO0uZ2a4YBuoURJ/CCE6DKcARirLwd78KnVEz/o3zKpuvaKBuAIcUC7ILgvi7lL+zGE4eg4CAAcQlwMQfwRmiYiuoUpnrKsBjXAs8XKQmFjiC18BSkjTV2M8Wnkw7AoAntgshosWAhwfCFGs8eNKtBbvEXyvt4KQ1Q5Z2sz/b4EVt+osUMj9H9GzilrVZiDE3ZjdG+vJ5n04UN9ozkfEe6gjnzpeMrYjwulkfuq+9avIgxlElCCA8x25PZlzSXyEJKr49kN2XdzfLCkUrj+/uTZNE/n3HdhlUBzPnguH+2wBgg9ncvRu6jnAHfbIPdrqadwp33RA6/1oeoly4El2aCqJ/v6vQj6t2gguE6HvjvpZxg1Gt9fAhdsFn6SPAKbrBkPJh4IM424IgLA0ZjfE4nP4K+t8JOlMtdblBvxZQtRu5oGIWCMjovE5zjwowOZ+JgYvvvjGHW+RVF5lF8pRrkOEHe/fLlb//u9Hb567knU4yEv8C6BlIiLf6gQDnlPz3Z7WZY5uJe/9XN/Huj8P/JtWWjoYzBg0q0HrmZAnYe9EHmUYCGX2KNBHT1YOqWA0sKkNybTyqnHKT1vBEFPaTYr5KSAzkjODJEqMGHE7SC/Pp8BFhcGclXMr2cxYmaX7Jx36reT9KTk4Q2wIMlRBXDaeb93MlBd7aqe18QQWalOF9p1CU/JTjmJ+Dlh0S+q9Y00QlhS8Q/iQv5upbv889xGUkK+2B//EMoh2XXbuvswniIqWn8jYHHFlXweh3CrJujErq9vAfVkIR5eGtQDniW0/Scnq6IGm8Shk9qlplsgyfGYhSEVM999vAO5wSFoHyA4xtrVwGBLyWPoqYFgCMPrFgNtTYLKydWM3hxn6KkxU9Khur1o8Pq97u3Ov8kgVWaLAGFn3lTvw/rmq08/QvIbLCAt9D3sk1buS0fhItXH86fdJP+XJultYioKKO16ggeKmVwVrrmUkTRi/hjzDdy/emigNQVK0GGTImluODsXtAQcoRxO9uaC1JyDM93q0w+4kK0HYDGkqkdHZuUYkboZAB7Af6vGZLFisMDAIlkI8a84rFm8tOHvNyMYSdj9y/uQA9zJD8YO84F8qPv9H2yJ10Regonhrosz2KeogAQ4obE5/VQsNSsZ6YkaY5/qR3ytSvuMVzeohBM5aZvnrQJAhPu4yY0phWtGE8ZDNUoQ7MpwjQJrd/F2D1M5NRH+FA4GUqlVzk4ZoUhl3sU3xNMngHe9pXzREfVZx1vrhpiH7QLulZtNS9fxc0co0h6eBQw+z60VagRNlezsUSACI+m5u5o6N4WdPje95SFlFZrL2biFilfyYFvuGny+UEYRG9YKfG4PLCtnQEiFA3h8ae5cQr2gcCKs1/32mRZFvxkFYCeGfnxuH8+vhb4UViW9bv2/+C127UMNn2j5saUiNAIR8fEJxKBVEX7Iqmtn04FsjzC/N2y1Mvi1hCylaPS813SCsYtw4SPlyU14KB66Qz+uK1kswN7eZMiNs2h3sIGedb+283dLggRl4BPIQl0oMLht6Ixjv0ynI7fP9shOZ0eMD3IbJdhxNUBVjU5m9VtbLHnaBp+7ucAYsAUbsSA7s3vep9OSaOEhPhEqROgFGrUpotbLv6pMO1xdz6VGxAm7a+y5snkkdVjUy72JxbOuUHphPQfKBdNGu4ixVR40s06Ycyz9iS3085VnHEjZB1dgfKlpwOLtTFNtEoPy/qsCRjvbRp6+9g+LseuzDa0NHVOHusQyxF+vJAqoVHIfsWD+txGXqhf+Ln6bGeIFGzGAeKm2QW5R+ITJ/n1Uwz0rUyBd42byYfd9PSMyQ0zUnsBV2AoRP5y4R2aVOmd+kQW1po/aUhq+iDoIAQ5nNJHBXEJx5dny+ZWgBZqx2z0ipDhh4rUnUU8wtUVi5Qw5pR7kC231jkzjBef53nWxFPYXa7qcneqIsVK1upTPjEaoNyNtg8hGqM4Jr0r0JcWeDUzI615WL0sbSJAIg0WVMroUo0QT9TyrNfGdxd5pMu5oBm+3P0Wp0mWuLypwpDntKaFxiyN4pLgK6/fjac2VzTEFU8MCzMza/D88tNKJ1s8ZAwU+F4GYrwmRPBwzZoaESFGtWxGWtKaBbHhyairCfy+JMvb9R4uT7tb5EtjcEa6INnlXRcmKSrGtLEZPnA6W2z/tZD8xrS4QsYwcQtKooFjsdgVTGhtvBvilJGT3mPBAmsicbW6+0G3q1HgrAwGaBJHte4aV6Jh4ptsC8Dtr/c90lp3byR7rz7K9JFE+zxMkg8NaQ9RPjseL8OatPmUJEWQpYrXFq4tSG9tuM9mq34S5PgB5xWEmveoZ/qCEAYZfdeDRFx7EtSAb5gXHGVmiGnc6oXew4IjwrBo6mQ7luGXb+72WQlxbugS6Mg4rCGvVXqiYUP8iR1ibJC5qjs0P990VfHXO4TUIgd7mRK6zjP+WSY6iehB1EpNDb0HOg+NOcc/jUEOq0AtEQGJITeafRalHp5lnHCSK/9IDsMkSyyNLYBubMacNY3pMrB7uUmg8kmokGRxvebUySVdLuhBZqCFByuBc7Bsl+LofQwNWDeJFlUh32wXPAyV569VeSHZ3ZUUBGyCWix+ubEeY7TEVp53LixWOALAiez9ILZ3LYFE6EuBCViARiIRwvo+O+Ao6+qRmYFuj+ABznA8qjc5Sy+MykA5S6A0fCFV6HqIeQQSJWxvONArNfj7ll4TWLSDCjIoZhoC9rTQNXHL6G0kA9Kbz1k3Ep+86yIA3ImECEAPlQXJGP5y2Y8174YP5Hxb3IcWOqM03gA9kYTnimLYfaLdUH0F+D+zjl/UBQmwTLjdufqi8exPW2TBzD+g+ByxN2H6F2j3b0PRc6ENGk2BWmsjz0IY3ekEbk3EymxUVx5X2PC081zbwqzbbXHdbGxNYWkZCvBNr9QVgjOusBGPywK6ILPluYCwQTQ7Rb/VJzGlqdnCT/wUfNc13T4Anv5NuKiUYMoJAo3jt8jzChxNfzrx8JSa7DznO3CozRJdgYPYVS3IFnWrGoxoTQN1/BxQSPQOHMbs13txSgn1SIYmSs8EpXyxMQUahI0Sm7S7OYT2pY2oVUw/u6uxP4+CxQXGKJycdKudVHfbh4YJq/PaO4MGeN2XN9NEbq68rlDrl3XHm4K//T1JevMdvYnjhFZkuBasP/pNraKvV16JC6k+Gli1iQhYGmj5wvZTObaKFOknmr8UYemmkZs0z5Vag07EAouvZTpXF5fE1/gKNsp8e41jCguUE9AcX6I9d11UD4jdrclpl1QKa3ta8H3J3qb0Bec93rz3bDtDiM5H8pqLUcvkBSXbBhtyF6MJQIe2O7GZjJOFt5MfPunzu0qh2akx2Z+7NZBRnsUvgIMlspTS/fLNecas+0meQ5nEWc01p/2EmBzlVvMQ0vHcDzkm4xM600qH9ym+cHNPL2iEnSx6TNP0bW23n6KXb8PdPIL2CkJGg8rJfMX45VoH8ZB3eqEwEl4GqXS0tkPZy0YxFMIQHS6FaDiZfpXYC/4efHSVeBTNMgR2ezrhobPmp0If+fUsPTtbtWODmC9nAlayOQhNzYjlgXTKxm7yciY4Ri3oyyY/NCNkaEa/YCxvRR//TM2UJDNHd4bz2XgLI0+w4v8xwo1k204T8NbkA4eBD5pb8AhJkuMcHfzTe6gKRKI364/ymFo6JtxtwgkGV2lYOFecAlYXHhKxJZfXMkoaBfdGMhabJskork6yXizigVCV3Njga/jM0FY1QdUHg36uNQuZQCcJtO28OIp2cERYx25DE08/iFUvXJD8XHq0TXkSkRidrxy9y5ctYfyMvbCXgT4YlvC5iPRnkLoJ2IMxl+c/5z1GuZvoWMMNfR8ZBtu6f0bPWgg8z1AiQy6GnVkvxhwZXB6B70OBDpiJMqA7IHunIOsd2Fr2UCdVflgMwRjj3ByhZ7av8ZUrJK8o+Z6R0sadyfQic8+5g2iMjvnFnj76ZoAMp5SDak3MDrP3F80Mes3Fk6R7L0ScNltgY9S/Qu+7eUJGCIW9kXKc9MYyEGKYEMYXSh8t3wv6OJpEPzZ1TMK3uizyEHnZcIY+8OAJ//zAH1fZ5zP3Mw8/QM07nLikFl7QsmwFP3GesZsg8JzQQMyejdo5CWK9kMdR0m0XrtppJLfrk+slK1f+3VHwcpTeSOrwuP6bTKvyR1Q9r/GMEyxo21dJpqYzRg+dq64oSffl98FXYHaYjS38tlBBdGNZAPlZ+QGOk16s8gFixKOwQFi7eOgN/W2WYf9H174bOyJPTCBFapNB+llhCh2pTZAGMMDSpCv3VTL6HS5HBwLraMy/I3bi5xLlEUqiNiPeb3x+cDeX5oQ84wTFD6qjvA6AqiS/3/x5i+gz/xp1PBEQCgriaPxPRAUlgjg4dCFY3/uTDGBmFJSZz/aOn6Cwp1O1Wzf6CfazBFpthbFrW9Cf63oykAcz8rFL9BhLHJPubCtHF6Jez9wpR3gFwbzkH+nIM+Vl9NftyE8MNAoTOtSBarCCqD+pUSQhfeQCcv2Zk1ZGwSSoBWCpbABJtdVsyG0uNvZP6Oyff95z3i1nbVqZp3xljkSI/8TST37hz3BH48mQoGNC301ORn7deLaiLMT0atutyvdpYc+B3C9P1MOYXy7EV+9NxoXd1kzSGtlR6JkedAbi/hsxW6IOfKEde7Diez9LBmLoiVnljaP1FmysWCP0yYi6gkKa6nAWgXpN+rwi4JgjZQKjbq7knhJWPFxmQoLyLo4fZET+2blk11Y5Yx0epkUXA7Itqvp9OsMNSxscKrF14FbtnbPF16f2Lzof714nyfqDUtaXNYOOgbgkE8e1gbV3heH0oquOgstppMLcHAOZMOAPPDE691z+jB6cRsPKbaSddVAP122fg8xWsnqrLvvL5z64J+SIx+qDNyMQ8ICJmzoxjxq2AcU1SHJ/LDV9xZ4QRNNgGmmE6SwIak1PxuIxjnuG1vn7jMs70Rqd7CKqwLD3BAZmNoxLsMPDeAXqj+Hvp3kSz8UQ2xIjSmBw8a4pA12KZH936UJQNH0HSZivvmTuN1N1TY9nKTa8mEeJIWTgfvZMN4/K6cOnGKwJHwixdsB0t++LGaugxBKu9Ar4NLC+A7lN1G+9VjMkFdJ3a1NJJg1Sv7Bu/Emh5/eRMIQkSnONwBeZ8nYD4ze52RIcmOzSvhyS+8O1qtNcdcq5mA+ngNfU8c4MX278wWQzjYM9Roj1xqD663f9RT2yzWMkR3i+VhQdt29a6gy3iQWMftPr27mCwzbTN1XiC627h/ER92BovliNnSOvKJBiKgDvaTEzUiEW+9n2bA18VXFFFoDuL+BX4SA9DxzHrhQXvFZr988+hNDB12yDSWV9yl/2hKHQ2CS91Uf3hfy8AXFNwQynTwqKpMvgLkY2pS5GKLOeZ7qRPuE4cOI116WOSkHM6tQW4yAmadwDsGlrVhfAWJu/AlFKtXuNrsiJftfVNI3OWhF0FRQvj7oahhrC3pXP4dHerAyZe/m2yLxpiJYjps8nzizM+/1cEzeo7tS4LQ4R0PtYoNO+SCDJOMz4Ik//hd8UEOnXNE0++xUhvcy0UbJYwHa6hcPZBVRaXvsaI99uJZKlq0Zs1/yr3X2YrwlTCGRkwB7MPmUIUGwVQki/SFsf+nGg2stAYFcQwjd85vwgv/mgG07sEnPLys3JotQaFpel72FnJ4XpfiG1OxYB2iI0mxiOxEWeVjPBGTJaIrzRDgbdzmYE7wQLI4+QJSrSXHfu01Basz1cIGH4JOAT+G6NYA80t5ByeGcRGr5GAW+vl9onRs7DFJ8X+LfDLwe+sN4HeKxxLPC1JdGcE4OopZ2h+fnI0gnQA4PGRpLMtak9hVCPsFeqXjCqsJ2PzFkK3bOdwJ85xQQmcQeOMjm+dwoluziKTJ5/zOmanuvfD5+3DGbe7ryYf7cUGXm8gr6oQbcnYAht53fs8VdYqmwo0oXwOZ64fq4Zsk9ex8J3iCyYVz0N/gn2yNiwbj1SoF7sL5P3y+T9Mnm/TN5/rkzegwHVUuz/ffvJYoGlhRE0VIqQfHlNvw4roOINqfGw/n7seBkfIY7uW3ucGMp1K3zBVd/5ZGCCekv80mybvpMZhn1STS3ARRQ4ABDGwZBkEOrcW5b6jOYM/4zDIH2QaUKf6XCWS/Ig7wqu/oLMLvwmPVypYaEi9TBE7sp3Yfd1kJasU6ttJtMVsNee0E4L6lDWFQXLOVn1qc36ey958RjDjU0DB1KP7Dg6oHcD2z2T3aI845Wfhn2tcOfbVqsX+ztnydTnWhdKSUWL4fv7YX5acYNnbW1KuJjfBHzhzbsHPb5qwtQocxyiF2548rOAV5/W9Zaahv28OfRbTqC0yadKRa6ZRo+mDlDyAo2vENU1PsMD1F6ak1Ss/kJRwbu8M1gUTvcFJLE3DV6m8YIe9V8I3VhaPHfHh4T92bsGv/eu8ZvxSvWHpMpEbM8lijS6J4SDFBr/GbyKWr9UPSJytPiA5eWxn8Cv+awN+j7nrF6eY9LjyzEUmIfGf4A2UThK+hS7MeUxNtRswMW6F7yaqgrqYOrVrPRr5XIyQ8uPOhmn7+rUiLTxzea/xBtqg4zGBbZfyPPCqUzES1LaI7DTAbB9e5ptwm+9psp2YpFkzMv2LgZygW0L5FIUivzne9VGSeOUuzCPP1kfRrl/B/eLt3sbbwlROqHSen5nALWhRyGQbWq/EG7jiM10NDWa0PVlkxfUFkDG5oLaYJeaSeVbZnYGUPv7KFjygtrzLY4DSBKTFdW5tMQNtYGHXTPvSd7FJbHyhwewmOwkYBjXiM5zMS7U2+cWydc9QESyZn7+My7X/1wDvu63dW++GSSXF0+9OP0eU/6sPECJbcTz7PavUTeyJ22O6r1prZPo+m2MvdkELmbit0Kdmk2ObPFOlv1CZdwU3hKAGSgGrgJ6QUdk7whxVw5iQiyvTl36nMf4LJ1IXDYkZMh4GM6ATbyL5Qs2mTjPcENVZpn32loI7NP3ITHqdarSm3ZIuwCai+FHiEIaDusAE79aw0t8PRQ6GqYDhCWQuKW7LKQLbVArw3Lhvk0A0j/6ia2zpZnPx0S7n/ODqLBRr61R03ssJHALEIpfOwGfzH77vkDOaU1rqGFp25YLrktpR2g64yBPRmfb5isVbxNkMBiGb8o4+VsyNn/ntXySLoxknoiMpT1EN/NEgsJRIban72GHgobqi5GAvleEw4Z0jGdvwiU2dUd1R4LG6hl9zmYxEAIbaWHpk/VDTjE6Uo7mfw4QoN7kbNTpy6W9qrwi4Nhq7r6hxpDueYIRFxIlBn3OvoBvlLu+XN8PeiqkxBXiFvJaXK8DWT+fpCBAugOn9xTAf9Q/IXppB9jxPVJL+M0/n8IAz21yl9q7dFuyvicaRZGBoC//W1U4V3RbJQvhvVYtMVADh9rXr6HGpF9E4fU0QBXSzNM6SPJ74xOZ3yicJgrvRdCxn98RONLRJ+OmjknrlbfCoqpXtOUdYgjpFXi0btrbRmkz7Wx9R4GbbiK5RRNfQpvRibht0fml5wp3YTz3QYJ/2Vf/nPoO5gCNSa64fOS1C+ugoHp3a3TZQVO3FqCby8QF25ryF+yPOPI+MguNId2/ONQ6j1MkL+jZNt2SCzNbZcxS8OdBxeny3INVlHCnf39r/kJHrMatDQ7K+CAMgyX93mIg8OZeu3+hpMdrqQnrTktj/MYfwdmU33TNm4LO9F3EhIPTGFoteYTAIxxxx2C9PM3DCZaEyv2469My/ZMbwFv0ruwx+EeWt/Dk/6mZWcHxcQq5PAwh93el9/3bhIBMKVAZdfzUMnwBPcxetqYoLd4iyMO4vpm1hITKqHVwQOZW+CfcyVHkQ50kRkB3y9jEgt+uSEmfUAH6zwhFfmFJ6wUKYQj1H+/BzLDZRUh6aRx/X67nZBo8vCsVUZOvkccfn2pWHB6sX/fbxhJZmExufmh8fY9TPDFWcb0+ssOBR0oNSH6a9ZKm9ibkGmDsGO18PsKW3sfMW02CQ+hszg8c0SBOPgOeKJUat50UIxGais0V+SdAmb6b5xMdP+NC104G4CSMMEfnuxkshox6wUly67z4eNmGSMf1BSeHC8o8YOFp6vF6wcmJbW5rTAjyRA3LQK6lFVZaoVMaPpCf0HmuLTKPTSf5F5xcK16ZWCFN8IFb5oQu87t6aEYhvYtqqZmRd7RnN5tL8GMxyGgUQWmYR0BNyIxrn0RzdJnu406o4uvFIIBfTf67nhF4LOWjSugK2HoFUgYN2Efo33nl9Gl4AVFlbkiTIsMnBXC6ur6A+BMrLg1wTnNk1vSIcmAzxYf5KhM82k/IXfve+oV+JkitDfwg3+CCZ3Pl8i49kLkr1fzOIzyH90jmjmEM2AfBk9pPF6UCCgXcf6yCm4VXicLTYlLvZ3kw6a3GIMSNWN6E/lmVuEl904L66woOZHQCcHe0QNMyCk5jvG/Ui/hmM4EE5iNu5PL25txn6yhyNAPAp9kreFc6qlifGwBveLyCeHwBilIiv/fJe7K0rvgL113MBA8wsiO1QtLh52h3tLT/PUBt87uGcOY72CJot+e5LIHi4Zn7c+KjAvk2y+vnTs/hzsEH2GRnlX2NzWo74530qOzsyTchVnhcGE9wG428rqS/xi/LJAGyfvSY/d+rCJOox4fxTiVbl6WDN2wRc4gIz6nWDnj1Z5RGl7tP4a2drYAjWd4VLLoxAqn8ZlJBH/m0h8c2tAQxzchPNbj5toYeXLaXgwM1mEWrjbTzcr9uDaFuOI9Q7UOEJu8fMm3fq+m3Itr8BzIX1OpmMVxAjXiCI11sC3JIkt1vTlFe8RSSXJCmo/bWaesMASgA530Z3LLt5Y3wVPfptFeafnr9svCCId1ipKLylmbN/yuj7cU1FXWLR7k7h2etcwHtEADt5PM07gyBXhYQqAoXIumzjuPFLGg79TWCyGvam6ysMac/OW3wvUR6UL2Gg6pLVqbXdb7uvfyT4CeNT43uBpNl2oUjnVubFA2mbMehkwiv1WB3F6MPDAbLPFQuuwR455mhdRJD0kpnzOOihM85k9grcKSKCbLRf5KrrG7h+kLTVSCCIgC+kNrPEYA9JDbp76rx1VFGUOyQlrRcGs1fp45o6RyLLS1nZj1kphNXTaeBfxV6Kguxk5HO17UqaLQgVTRQbRnOhXjiO4F6MTpDL6AQAmYgwnapcLlwtn9BHQhs1tQyyUZWC4pXNfSKGuNWMATbrXx+d/XPir1fEYDICcF6MGimRTq0RB8dEqFF0O+8Q/bn004LAO98mcvGhdwFwOO/jjYDtAHW8Q6ytmkhXT7iyY1SeyJIehf7hzM4YcPSyBtdcosHdaIA6nxZRoBgvYqAF8o2989IPGwVnlbCo6/3GCx1oIq5Os8d/+ubwP6zUXWkFBdxFyKbWDLeTnfdGPcpOFqBIsAO6QN/L1oCPDRxBWM488qFKvmVA5FgzqYhaAasee+jjI9Ho3l6muyXz6x96d4N2bFRL+T+X+Dm4t/CzQLsz6/Gai5W9KwC/jmneozoATEKjUC5iUNlYjnvuYqut2FukbgvI/K1Xhu7vRiVK9AVLfmTWohcCvLLy33+5p3N/wev1XeEKGC86hfgFsjxTV8x1lcUcCKTWlc07Bus8wU6DClfrX3fk5600/p+J5+pQaEkHMCtnuc6bF2BG41OIw/jFlZF81xhhA637xEo5zAcoycngmGmnJO99+cclPvY+ATIOghkj0oXosdd2r+uyJvES52J4Ft/fBtzlr/zb1C+c7Dr+h98cp1n2IK/dTOlAIjdbrxAqm2gEjLB8hZzZKdycV9mS6QTeRmbB7EQL27cLTVIA/z+mPwLM+jqquefGeKXRi8YivmzZEDPYSH7qvY4lzSjMQyS3wwBeGiVBedQMixArtuxQzOEjLcdb29QXEH1hxSL6qSkGs1cwNYBSyet0B3xzj6HEmPPrpVs5qne3mqg8Z8dhvi1YmjKqX3bMQajqCjxAIlCPIepE5U+YxbPQf8Bd81+830BWTNFq74r/vb7Jc+T9xnTDQAcK3zDg6lk/rY/HJPdHcMg9ZMnlQfLTXL/3n67xuuJc4Es/23H7TEY/ZfH7SH8Xx23x8l/fdz+H3/39x+3J3/H7f/d4/Y6vF1XpwlrfT5xdr/hf/K4PU+Kf47bVxEZwngixPWaZOvj9XVfYvh9rXjRAu2h6wk+0nWvb44GkfDz5J5ht9p8gIjLzJHREqjN58N9PKxWLOv1eE+cahVtzx6H3LFC/ahLzv4qDC3bVlmK+zfLyDNDZ9uiQ9SkKCDIIhhg0yk5E8BqHnqW4GcL9pNbM0uBB/tOPe1STMAHAmu9/cjv+6DyeT0Xo/L0HT3XIPv08ogOR70ZkKgNB7rpjMiV4eYO9GOt6GUUG92kEvHmey10C0NOeEacBuIBuIyjaxfaBJIbKUro1B82GQYETWOPNNE2ae8uj08WJHLi31HnHgiHl76eTFBz+rg+D6Hd7bvY5+6KAsun1WmZ0QOsTbZGa0BtcE2660z4RFyZvanpDqtejs5rFxx34lHmphThsBKbiWFDEM+DDggZKuO56sFS8sBUCtbk1i6Hn+amnFYWoRZJ0w1pvzEndW/5pCB2Z9Ceo4uAVQVGWmVom5vu43RJnf1CafHCIGqg0Nabk6KeE+V27JzF+TTuO/STGs+mG2zvUUWmt1Tx6tH5YK/d8iceVEFKyxr8OOBwPNsHQRgKeDSirI05ogfFLG7zhdgWQ893pdv4qMO9jG2zkboCqHliOX9s2rySiivGaGx8N0mVPMe8Br6mhJH0hzgntscxOiQ/gCDLKJmQA+agM/jgNs2T4+mhQIyuguqUSBbqegS+OM0c05EaMGMh7MWiQDmFEf/qOK7Na2dT2+U+LWbTTWudYwIf0+j98BgrnIZp/bg0KaiLARibsQG4+54gGlm9yOvkZ1DYuhvlXxJHuTy5aRpktbTad0pTedFzkuX4GT+Ja2Uo5LxV27VwWT6DlMyhtHA0GyOU7rZep8mcA0Ef8ufeESlVPYl7tymfdppsovj2UBgVtEllLgBLAceJ4c6Ji2mE04ylLLKgDXf/+MtMJJbhC1QHYdM/dXGCceeFcIxN3BpXYwXRuYHT24NlOErKgU3q6gNaMIvJ5GtOAXOAwitMswzI/APlvsKLsvOVT9DDLAwK0AR9IJQDLypAgPUN5Jx5ZKIJTuDNjhldpUNtjRZ5DYwrypq023ReZBchxoCA2c0GEQ47sN2Ls25gST00OHLmieeMBrLGRiorrgGW5fSVDt5CHY+9oZhEeXatu05+B4AI6Eed7acjic+Clb8DkQJHsZPOZboiO1LSBTg+iKvxDP9S3jZBk+NeF6AUB4D4fb2Xev9hG5nxjgYhv17G3WjnAea5mGCHfL55ipeV4HXEMCl7zFtmZOuu8Z5HqZEks2Alkmc6Gunb/YNf7+beGGj+8LUztpCuV6Uv86URrN0rBbxqYUpWZVw6QGUpM69dby6CmhzWLjO8FVzmH+XBmTBPjLtWA7ZX1xCLzPwlG4+CGS5jB7fVONkGS6mifMc79fStVQWXddec4OR0sJcVYhpamBB1eqFpFkxV2Plz1jKG2r+X+agbIFKdBi/7l2ESJuK24NTBuwyyWKHn+S0eJiP07N4Fo8hNL0bi86cPm5HdBTjDFZI9TFOgOOz+YJKHhNG0PwXeZXQMZ2mRtgZHwTEamc4A/hh9H7P89eCdHkTWWeZgzksEbb6UfMFFtpRlVy+v1awB94QtyORFry6Yr7Vb8a8ggGZ0r7vLI2wAIKJiZDLBwMhFoU2RFlC7wGicdo947yXmZbTs66VFSdudLMfwoFWDgFf64629CrZgThWpxMf0ulzmMoHXpixM2SuIXpYafg68iR8BI5tqiYJzvc75ZC4bDcoAvs/wNsWD4WQwerwYheerFb0LzT54w2v0Ny9qyOVvSrA46ayZIE0DQyLaEzH5QWWxDGE88vGoy/c1AuwhVG61V8Zl45sFWKFLQfY1GkzCM0fZurXzSJgNcOlthQfn4vHXesWZfXyOYnrNiVGAEKylw1EW0OUvhUMpkLp6ZEzAIeEdJMMSZKau2dsz63l6tnc9JodjwNeG4Ufjr7HCGLYd1cacXhsjg/GlV1IyEusyE1O7jLPAuctebriiwa61ibGrB97zckvQ7GZ+gw/Dp3sLLjgWMCGxMYwDbuiOBT3l4ZgIy7RBGe/b/NS30NbQ0YLfT7SCLkEev+64fnjX0CwcfM/h5F2uGOK71ul3K/Tc+vpAoum5VuIgCeqyWoWWHJ/x5bF/VQPXdE5m/G2lj9PCQfqhj2WLa/Oeq56V7dKAc3cYjQ7joZSY8WpYRi0AsABVMa9Q4s4WmhX74sljBNWyXZq1+YKzt8+sKcWJr2WV7cli+AzI9J6s/nRy/MQ3KnNVF9Zti1OsFmSkAVBI/MceUCJ+sAVeafuFxbb3PW7YDsYv+fiIW4fvL/Pw+2+7sFygzU7pJS8puKwEA6O/Z3h3a4FiWkrmXT8YoH0n8wnkOVMemmVxiakD1yQ6F+UAXpcKk+/75T6ATCyL6cs7CZ4UXl5G97hMtS19Gj1f4YKCG3zZb2oiIqOjv+vysK8P/fIitOBfblOL0uTX813fgEcdYK4W0657doadBdneJZFSxKcsMg4DRo9JO26swxwKGyM39ZaQuXxXGa57eIdeEMVzZ5nHBnK+xyPB6PnYAHcD7pb7DstzWEck/QTGKnEFVSbXUH0psLiFLyQA/puBfQoBCe7zxWDXE6SKCVps84CmrfLdeLX2RV94rYWf8ViocXUkN0lA/5j9pTJXUI8za71GlYtA9IKRKGrIZ4KI6fqHSrJ5zhdW7LbnjuV/gEC+0pv71UDhu1btEEmDypRrCaB4m4FT9kL/TrTJDqBCvs8DUZ/ium+Dus/vfKFZR0z1rvd/57j3rnXrI913TUQZg0OoPEN2W0pURY0z8B+9O9UUen/avSdSfe5PA0ZguabppIUtrSjpknKbC2uZGbHWBCCf9DzfIOfPyRJoxwPoJVOWsff1RYyxDs4S5XAtTdh5bRQJEqdg5u+OGDQgyugIk94Gi1m2xfbTa/sT9I4QDBcNDwIABsq3Nz56TX4xIIHFvDiTuqGZGO0zyPVGi1WcE244bXHiAs5AmFPIh8bzPRDWQYVG1xZyZ16fB/MFYVKAyUpBPlFweBPFRqXfhE0ImtQA+JvH+Lrzz5dKVxJWqSg2cSxDBRKYpWtaq/UkwVht12q5nH2Y+h9jfDA8h27hFU5n6ojuOn4RCRQ9ZdGyibly5ks/nEVvsS46zIBNLlx8u6/3bpTa2310S5oECVqkxRXkY0rO9R10sxaO5glvvFF0Dcs9ePkv+wM76LaJdUdfF7aoX6hTuah1wbIAsai80d4ViuDt7ZPrif31ZZkMcjYxBJ/CAGi1HBvYFclrxXM1W3Fc9qWSVSu2HYvwohMH15IiMZ1GblGAawkNz/LzetrDU8mTJACYjgm3dCsu7w8Z4mvtz0E4OJase+l6rm63LuoTUp9DkDjuwY0YGDHGaJZIPFwmavpBVB79s+tqet8rB+ZlEWyjMOdDY5oOxIkYgkp9S2JGEN8PrNOLt8Lt65iFLMXJhpT7oflwXg+KNHbxnT7mJUuCC8Be15wLJ1+TcbdJCosJBdUJvGzk1vv1Bp4X1/HqgkaKmE+kvPGZEFyMh3EQ0hI2XCtedC5J0qZjwviVig96Rc9CPa51EGNgfD8o0M/LNpdhJ3YcScjbVFM2LzqMaIR6f3NL2e0LY28oKF6YaOF0ChhWY7wX6Ypo4AT+9n1sQVlwILwIxwqFjo28QDLLYpqPFCYMxGEaecXYw7HsVLpfEAv+CCr1YfGomuexlaXi/Xvjw0guHkpL4TJuyfRQAHZxGErRhCsOkdf9HC2wAZAjezDN9cwG/5fLaT4doRXg2AXLFI8Soa/o3rIgJV8jjQFhX5BC+y/svdmus0q3NnY1OQfTHxZ939gYA2emN2B60119qphr7f9LtqJISZRISZamlqbma0M1o8Z4ntGVBDSo5n/ewVt8jG7q5uvbs+7QxcrXI9PvA6lgx9O2L0abl5eAB19oy8F53Z0+x73ifDYBRao8/eP5/NcEeoliT20GVeXnusIM0sXqPpntu/TBCfX+2Hw/zaFdj7uDWv8apFaqjTBrvvNBBRTwYVGMrNRbLs4jQ00BkN8PmsYa/k0qDs4W7tgDeMjzyRsPJ8G8UpzDv2OOvDwp/fd4nm/teuGKTegEFpyZpF1+RaAvJF8qW0L9afyZuF8EDxLQxHHWht0MyI3J8E2ZQ3G5RjRgf+w4EyV40DA+1zOn2i5/ieVFCbRnmtcVoPBumXjtHFw1y4Iz0C9kT9FvrYZMuLzQPqigHOBwyT/6BiZD8TXv+rMQCdUkaaJWuSzpVOm1qvANirWRUCMitJpxRYMlf85Q+LOp1ndkygopanAvpRgzrjWBPxUDv+pdGtvKBuVXeddO9FDrA1+fxFLSTB9KoeB730z4d8zRs7qFcNg8XCxkdSrpv76le5Qg8RscQvjEZtUr/xmCIWvVGshwpObfWlo4sjAl+1/f5E99Xq07cFmZFBpa9K5iIGgtmcrwfHCZaWSHWpuvMOkaPW+B9mHEWenxGBS7iX5sV0wUfoms9afmudDoUJWDsvjQIpXSHNdQYakr073DxP9zEMMphQBiH7WDK7zwL3B0nnWdFnhOQ2tveEcs2N9tOuh7+V/Ph2tKT5Bpntsl9fyfb+wD7tTHBi7SPrK6TOOv+a8VzeGKrjapeZLKv+te/Hef1a+DD1sInN8Y1j3422SIHz+F0wCnKlzpfXYbDT94l7qT4nBDIKPX1DNvN5hVlRhlFAlikcIfuTL1cfnmuhCO/62zPNi5Srn0Sg4UF2Z/ier8YE51hsJI0zOGwrGjD69twb1ndGNe/WreyP/P+alnwxGVWqvxc0w+oXJKV+leGm/xurxvmvBsz7jbArUuAtR+uFNBEagnFQ1jwhG3BIcvruk1FHdM/1BrxuD6BX8Dq13NFtn2Yorfr+m5g+JZ+7nLWo3seXDz7VqD6l0vXVp2bt3j50JN3fOtgSc9cr8tPu74IUV33bruyhpGvZk/SpG6x5fhbNT2R29rKkV53TvcAxeAhM55poOZyRHcneqeqCB9aYjv9znhyQul0vJ1yd0Z/IdygHhil2YGTdPjoNkJhbNgjZ72QOmBvlEEfbuj8dNGZsdtmkKY5MkyXmfD+2lsL9XCMHMVE4+iNNfthvfCpSXBhOETGnk4veapHXNihbwD7uaUMPxb+EA+vRg+3bYVY6aB3y9sOK9RgWjYsUeDQlPcmcMlQYJp8hpNvoCLmeFY69TTYIJNvK6LY+hOMvF7a9AGiQiF/YrnWy4z90wgCoITxEsd5o/6ntMvT8gpwWCem3gY9Brcxg1SHhS6ex2zqddIXMrfQBBP8e97IHN/rQa463axe+BqfBiFPOGe+HuyXImVC8LO/eVYmavPX8u9oJK2NPsJjx/vNRrzg1IE35kbdAXFy3UD/ehklye4M1za82khcmJhBP6JMGiVIRVXeApvnwK6AIyvkmH4mON5Him2Pvn1MXP3oilrdunq34GgJJV9toZE3xMEb3LOr14q0fYMcLf7nekSCpR6F4iKdYb3VyXasZ+mdb/3moMsWct/fxDJTUTI6jL3oAanaEeM8/NsKxgrFpDBIqo0/ft0pev9CV5aVQJ2nwOMNRFoZ+QTj6vr18ZtmydqKsanJK25KG8ZCUyenncwIvtTFhZOB7tGUQpKKvA17nPBJA3NJId0lMZjnJDCA6kvBPrUd87cPP6n5SYtHg6Vvyb9mZ6Cg2msg2L6sRq6HUWitEmkIdGakTvbCI1VyhkfM3NXN+p21VAWwMeRcNPKVqHUCL50Yy0zPEBfd+BKAp0j7FffcvP9fpyVWufoa7fx8VOJ3vci1MNU9NTGSQ3AT79QdKKHi+Zxw5jgcpxqN7ISeQkORHxH463mJeRMKVFm2wNX3aoT1RhdDwtBosol7Tb9bgYC4RnEcqCSeA+qWvsBjwdhPIBwNv3Qwm1M1jH+SjO6hB3xoihF1rz6IP1rg7ewYxVw++LW2P7hPc8CW4ua60yCgehotL7+I4qgZZSA3tffbBIFzwTLxDq5c7o2wZ7fBFsfHlaoGAvhBnIi3y/XIn+RRVZW3XZyp/O0qeUfTVMt5O3bX9MEb5V6suD5FTaAwCKFyN/tZOsvxeNA4BCrwSRbqd9pcw1C0G5WDMj7G0xMC6jJXHLAVPSHv7yZ2S3LsJXfnedlKTScTd5of7gIomNRZutULZiIsjLKNZcfeteUFBtcC6ijgCHMtYMvfSla5M+IHsGQr8PkchPt6K/rYvWtMq99l+Kv8ngC/OoZ+wu2yOIKumBr3u2Zru7TXK8hAIPCC9kCTlFzAxSHNJT30VVXz9ul7N9uBnfNtbWtsi9koSjWwzO2rlw+6gRPXtm6NoupL5O770Uy3QlwbdgkQlThIoQ+6Pm3LlcBJeXymvtPRoeFqEB5vVwXLeb1EoApnh0pzRG1FPipf7bpJumbgVxl1guicsTMIAbT21Qwbe1UHtxK/eK1U6bPj4AyxsuAr9dVw5uEPDJQC8bnBl63oqPoH1a77oDu8JYz9D+cFWQgl6dm37NyAsKHBOhEMHzXOOJN6CPwEi323KaERyV5cmSt2HA/QXpWD4XM0Wbj1MeCh/TDSxC1b/ZgohuoKuDYQ2G6JC2UiBcxp5LEVqFLcKIuuKGWFxCoWlyfmy6qfPuk3prmmBiszyUkaMmINKB+wpPWRijparbIT8wVvxAXL0W9SCHqWMx21NheGTlWQM2mP06c/nqOdPBAvO+nNkb2hw/Wg2aeBqEdMjIzWVY2x/LrdF74CoAnZbb7mAsK/TXYr+5Qj2U+SQpitR7YEBJ79EKO+YnOJ1bBobhl+YjkXjDx9gDEc+1LM+l9Kw8lz9JQ3Nu1DgUKu6Dw38JXjww1+JDZgyI9ukii2wjUW+tSDMLor5xVlg8VGfX3NSZtfzy+kz3ibPcYEV79IdRDfbVeMaBpIV53KwDZZZB53ZY9/MeX2cl+N/v2cls5H1a+QSsbqv1834MX/q62ANnrYHwRnX4Ofv7Armz2uVzYJEPtgeWso6vyXYiSj5pbyoQ/dk/JgCCQaJ5Xco3eC5bI2WZETdIHNwmUNYePUh8EO4d/StREUwTLCw8D/L1sbQmfc2RGYbC+W/9yeJbg55+yHCK3rukyPkUd8Aj/yX7MFM2t10qxDHPahVxx7mnV1sQwJL7/YOju+5ufx710s4fVBlQS34xAa7RFflFthvwsw6YjSJD/ERje5WuvevLgagNtjtplVWUwxP1bWXrLHv0EuSuXwm/CwPGcx8MShVY60Z7W2EcznnhMPbMgaEOEEdbVhKDYxRtbUb16i3pQivk9MUmsKc0N6qtZLJc5NMgvCmy4Dl7nmNOjzSOJOyN+HNQNjudvrekLBBKETilUGjkQt6I74gidGbPkoPjxL4zlv6zu2sgHwjVjsCRBtnq9S8fhXQxQzgXfOb24jCMm78WFBIgdM+UFIqrPYM3cc6xwWQ8Q0jVRM0aET6KhvHsi+NEH5NBif6WZN8YbcpEXb3+nm/tFOF4z8n0r7SFXTAz4qCJmneUnqId3xiaY8jZOYfz2V2fHkvueLscO73slQFJrU5I1Db4O/FKvplBUzVNAiAW5JrLpRaHT36DN5MdOHNhRJFrQNTLSqvoQWh7Dn95VsIsGu40kWmE1p4y7Jlj6T7EjV5r4MyHO0ORrxNqDL9W2wZgqUwNVY2b6+4zeMWlYKivjGltnfDuzR1J+IOHK4gZp3+Q+K+/t9kuQLLDGe9KgPR9/SPKDl+y9m5C4+duLwpCQko8jhuu3utZLO0hHuPdbFaGQ3ZHQm3Ct4KbTb76aXqJLfTdFwo3TPsp9xGJ5zQvyTpVffnFu69njr8MlQQptshE3n5lvANigzg7W1Bo2AR0llOwYnSgkbgl8QwXXVTIPFJhcRetXs3XuW+RJchNbMeT9/jou6BLZLS9sAhA2VdRmyJ3v/JkLzMnvtTe+S2O64+g0629SEgc6Fm/iq1qdl4EdPdMWFRSbmSzwJEDJi5VwID0I311BaeQ8X3lH1fQU5ShmXQRkmhSTddZ69Z1Z1uM258JMbX8N0vfl2siFVn6mh4WpN0MimvOuzujVJligVBYrJeoEaZqr3GsKCwSQijjryr7pHhPBQOp3Mgw9nuaZ3F+gte2u5q+uJkZe6DfU9AT8SlargGgPD4DE9e2tMRCyhx67TAygjf7p/trf1Ncwn7N4akdUV+OAy0MaGZthFUVjapeWeb0Vb4PoLea/szhPOPPxmncKZEjDijdHHT46O0r+CtbB0la2WZ9ILA339p2A9om+Qg+wKxmpHXgU/2IYMOjQuquZ7T4xCj5VaOob+ynnYt5XncAf4ejMnrZRmGDQi0OYmGICpFzz8yf3PagN745SCuXrbsTohZbTaIB5ZIV9RihKYyaSU71OCO2AJYo12KHBpj8gxasjkM+tSW4ou4GndzQuFsFqFDwmmiUIofrI3NJcLrFujL8Rr/zDAMrxBSlRnY/7GZR80xkd6iTkju/6SAqFcT5LYfrq/NQpLSCodikkIUUaFVM8b0EqMA5tyJQBsrD4wgNq4OMO5U8+U/WIsG/MdpOeNqHP5zzBMLuKQbpPyhhEyHP+EzFeker6m7q8atovoN1P6RA6vCd9PY3MTXYmfHcwJJVfUuUFi38qM5ly1QciRkEyw3qZPqwdlmZ0mFeTLuz00P4m70jRNqHACIgVRNdfneALgUrO5yL2Go5XnGzgtD9HSaO1EJOCpy3g+H7qvDcmqSgI1uSNZvlHeV0IR4ty/wSS6SFXBdLZEJR4yHTJhjJjs139SCEThJ5qK3dbgfF4MpI/8EBmsoJ8lMh7OJU7hMDJPVLeu5UR1s4SzGe6UcqXl+8jifhCL+2LeUPro2Bf/qEBhrPLeFVly7dkT4zAMa4Y3m7AibhyalwBIQeE1Lftdo2N+CWcB/WvBvCv/P2532S2Wct4+nIQI5aSxsCv59K9DmCCdOjQsvcFnVT9JxYlatwg905mJ0/sK3fsdgNwWIx5uq5VaIjFxVVKPjZBIRo/utb54aN7FuS5BqaEtV3Lfq8c8mZy8Jf3+hW7Vt5QCMB1KbMygNrbBCijaIYy7NPaBvz3AxFv3p68nuSTqdzIqTYNA8cmYBWGsV/puiNbPFo35H6tiNYH8C4nRQJl63cqEb85867LW4xsx3N53n5IPttN1dSzIMP4xzK+jRC4KIQqK6HLJOI7gyry/EgqNL9rj0ijElsQs/cfTuTjh1Oq4ay/UHalXZf1JjZYIH6buG5FoJKWgCOU8HAYTyE0N5cZ4SMwfEE90wXqseUcluIRsYc/nygkKus3BOtBNeEYjyE6UvIk3uiQpXeF2jv5z3L+admrHhhDMvUmU5d2gwTjswFg20btelLkl0585ErqSql0VilVvUmzINob+5Tfy8eSSat18V1A5wVorJtIJOjeAjiPtHMzzC8PQFH37IlPcCc8KOdIs6bD1UNqum3XefYO3l2EFKi2LlCrioJ8SkXSsV2mzBOyJqKuKGICYspy95NHr7JFJBk8pm8K+T7k4zX4JQTSjGMQhgzck8t1qt4ue8r5qS8EvQT52p14Y0bpkkVUPwL0LiQjdtSO7vXr7OvRFS5gH59nk6vrCfqkqv5WQ9BilXPewD2E/SudvSpefQkPv2434Ssv8ZBR2qO70rRRBDRvWe+H6ko8dAkKj8U92G1piydrdbj87iV+8zIj+hUSLyuXizDIV6JZ9cy9vCwoAvLnQtfeHjrJ38oNvyVv9S/V0j/WSjByZUtoA78c9XoZ2/ieUdDItKoBhf5u+F6w80vyNZRXdMbd4wt56qCoFsURjSjlatN/gM+DefLV85OK33JwfoLKVYxhu5KJ2vfzZzxFX2AtvSJEwn2wXaxzA2TtxuXwhs0s5OrNDF+ckzZBFR/Z1d6Z+uyT7wk3zIda8JinHX9WS873Q9P/nJuNDqrCna+idm+mIZbSEwB5qF83aLskqDgtbudZVnxbQ9EsOsdvENnbRaKu9BHask0ieMoyP5wQghLuKcDnmYU2Giuc7zpCVNjNVs2zm5j7aft0yMUInq9MQeqTy5ihnEJuqtWOFDdvRpKnDCbexbOx6U/s09kRw/IUMI06nKfBYuNZeuyMRBw+0nq+wfL55ZsBSJeYvtw+MTmCp5Wsrvb9FpJJoDqoieGFKLGPnCR2mQML4nouUHdGz5CNZ1zURI1/p2j29lDfZQvaRZ7vJKRGPA/VY8o50afJ86PhBYfeTupLvjRjziXrsCkvZe2ZjlUfSN6mdaf07EaQyGkItR76G/KtACZWvQDwef9U0j0uBixkbVZ7498n59zj5+jg6zSiOiU+TlApNT/sQnClfbD2yMtisLGKKKDiibfGQfmbQFpN0uvhd9TS3d7yH49iYm7ftOvGtpK4Gm1TRJgI+HSyKFC6qoUQRGiv6rZO4a5h2AeeugIxDR73xLO+PXr1kRm13tkE3mfGYwk/uHNC+8zW2bljliUZmwag3U7QGIpqlLBNEcP074iY0+LFMT3YgoldbHnEM+aDGr1P4qLsLp+wG02m4vgPq0MeD29Cuy8Vv0QohQSILfakASGCsbwXU5KNxswimTloUr1w+jNgBolCe6cbAWOu0AiF1QnekBsDkVzvrddvPBQh0YmV2xs58vgbXNOSHqHNPZFerYInHnkc+41Af9sgwOmNA3yVvMKuXAIK/V8cX8OW5I4uo8gJmtvoejrUWGiWo7Z30jImA7VGA6OTxf2V9vG7wV/lMrA9+yS0g6b7d63oT5ORD07nv29sRfFBuU16qL4/98qk2TC2/haO9/Y4+9X41Z/Hvu/RqgfgDrk++Eml1POpTb4QEvEJ9yuqGfWshZAFJLJXOSJjtFMyl3AIwMv7Buo3zTt/795/zsIWC23BLi9pjKAmEcpd64FopFV3lVch/2L/zHXl/DkWOgNAb3+gglCbqApdP3KIXO4xw2ZiwH+gHPOSwwoQf9oAeX+ES8Gy7jCOCTN9fA09QTOr6K4A2n/Az9bagaNhxs9vGlPlkTH0vffjjZQADs/jJt/n5MGWNVDAD8OegFokjwGkdn7NL0dcTgrkAFmX88XuPZkhqUpcBDHl1xT8OqmyfDyR6x2/U3nRuY9znWg8uD9j2Xg0w+PFZL/gvcDPmv7Q4L5Hpo5eXEUxRQ4x1uPWaNmOBatQUiXFYw66xoc/D/suHit1r7n2qB545rSPNn4FcnE6ddBb2dVHcwr+CZZCxXi73QjNQ0HRWnxWCjjRcwmp1Kncy8R/ZN1d36MWP1Fkq0BR0BQdSvfUGCNAvyXLjXuRaW5d7vN+XjuIsaa3VxNz3Ovlm2nwgtDfq6g9AwkKYZrnTZk5odawv5TC7XxaXms4VlJ9//wlUSJ0+PwAfhkEN79fDYQWM78HpfMjUTypr3RLw3InsDmOw4mnTzjzNFIvhiGI7jcly8EKKMAd200PJiGoOkiyCPfA0aY8BOlGsM7nAZHc41Gyf5JsgNF83HAuqLsQTih/GQcq27QnPeAV0Im5nC+TNm58aN3MwRfvJWY4Bm8r68uUIVWhM2t0zjmxOQNscCEiQZtlC8I9Fp6BK9GhTvj6mzgQVuOMw2j+YE7oAkCUzPLN32H4PfkOpw3HvXza2qNY4T5Xt1JDlyz/njvJ9C+N55S3/fo9x0fg6rz2kCrkyhWF45Dupey9t2rp+IQ3e9clr5sPWnpBzsbiAWFA535PWfnidOsPnN9jb+ty225NPgoS+Iq5ZHPfohvrVAu8NONMW0GeYJmTwOUCec2l7wqQJYgZRZIkdkW1yYfs84YAqhU4n6BBF29V7/p0JxQmQK8usVSx1e1XEOjKxWLS8eeNiZeUe8ZgK8FN+vDw7QN8+7J+g7a0FZZFQbHl/RPL7S653Ykz3KfsoKFeV3K7sgqcEcnHE6vcImp0Sr7pA3lYHPHjXmWUWrVaH65bvKP0O2au2j6IUBRFFOouher5kISnlIm5DmEnBQAGwatCDAkl5PTxoBg674Zj/uKRJQmX990Wem8D7Fmn8QrIjbrcMHVqr3Uz9YdgYwOeyffoZXzIRQR8LWoSajT0Ajv3zY/0Dn5Hwd5zJ1sJMcuyglJHhenjaA1JElgqxX8MFpJRT3t32cv8pcrNNJ8JEUrItT4TFMp2wbOf4X/XzWkmqObArM4PzZrBluZ8J294x96Efo5qBvnmm2i0WXrA0l/++37rnUL5I5KlHZCNCSXCDd53+HVyxbWIevMau0BbrYYrxzd39ABldQp9+b0fzS4/hVn1hyfkFJr2sEu7NBSLrGyGQ6wrtgIwKDGdcPW0o812F/o16w32jvXjdQu+McpsF+SZT/fKs8Xv1CupuvN5MrRbd8Or+ebr0i9U0tW4b3B/bioWSVYKuSsr9fNjZaq9fnHVX+NNmSCm6gGG67YbBaXUuhpJ6hjGqz72+IlTrewvwa9SIOuPZ7sNQIXC01zWoRu2GgRVjwCOIC0mgizLQ8SdRlyeQPHjjl+nnkTaAIkJ+OYT5O2KUmpC9OReUztsldnwPuQgwLMUwyxeAc5xqFCbtyhFikTcBp7E4kMZEf2r2bwMnuP6TlLY5IkzMWEJPGpa50K5RSnBvgSRsxO9ZHqCrCEfl1iIjC8kCmyh7lP1Aj2WI00AysKh4EgeSgnphxdVYzzR+RBEgTHlTtqn4A4ZlPNOEuK9AOdMf3cLCu0nHaD2YKH2EGQ4vfF365/qYvp0K4+vo4l5HXx4FdjZ9AjtGlokQkTrqe8U1N3+2ys1SNixKTmN9xrYYaWs/LbHfMBfhWIg+LC5iNLhAYmsnzpGlcZrAiZXuOhhI1V48eK/J88lZJKyFOb+AEhf/2QNUPMRlxtaNUaE79qhRZaepfAOuiBbn1NgTQDRD/pxJfvlZb7iHtwFT08YjmuXUJhB8ScNlQonWkK7KAMtQeu5gFf1ttbSVsU9IgrV244BmZLlotjQcOP6UdRQIqXcyDKO2gdPTPPfb7kmArYQRJ9WS8EPvMorxPXW6wClQqiqYip1MHyGaSPAOvZJ+hprjlsb5kqqpYnvhK0GVPBXxsZ3vHrJq7LoIwxPvdwiuQvw+9xvGRE7GhW1ZlpJ5Jdbn0wdgfjd1ELawmmbWO8kR4RFbb433IG2RPu0JdTtJMbrJdR5WFVe9ThVtT57s6rec7Us9FJlJBcu3E6s+7TTxIL/bKZtyJwvybxfSwshNDegSjKyA5bj2OTZ8ZKonSmOtx6YX+aVZqPVLQnfUdwlBzxBVGtS3SzIz8v/ARpa6nhH8P2T2lsPMugmRSzPC9178UPoX+dl/6E4fABedYOznPOk2eyhKjhk1LeF4XAfzgvbtq46WQGI8cPIusKAv9igIkNPkB4Wa87yDKKvXQfhQP8+s0Qa0G6gWzQ1q3RMIxFm/jr/nldatSswfAI0X1WJC5nkElryF1NumgS/Ynn8dQ/iUBrbx4t4XhAPdtMtCfAVwVBcXb1Bi1P0UahVvQmgfu2jUYrmG/VwRMFr2WThSiT1n8it8x/7UmvkHSK61+3KBZJE3/xuEzBOD9Je4d7tWQlRp6ac2Q/F630do/F7+PeG5HwEADze/H+8oQH38oi12jjm4Qjcf+rwf99uKlHElwf8rBvKALQDzhrwH/dgAZyEx5dMNm6jeXDE3S07/fOmKXTPnwxKlOxoDAD1U92AILktBd4RZAJi36MgGwKk60D3LELC1mlDREOyEBL+jTE4YzhGGvlvh0FGqRT1vG7wfZKT2N9bhgrDUPtYvggmdD9dNpXLP+P0putWBoKVSguuDty1c9rAcwsgwAimdhyn8by6NJsQLhG31ciuKqASDths9Kdq8TcpEeEbvVvYQJFfAIkn/IBBgPUxWXT064o/A4yWRThYJEXvffKf/Os/vjWUP5C6KzwXP2wR78mGZCMY29cYXlfgTVcqPxpt1LYTqKHNKCVJM80TDBCSmEyokJYyvkKzHb9nzHJo0yOhoCiKQd0erzVK99n3hC8mIgG438KeRzjo3jxsdfz8T/8kMPKShtviXbLdul1SN/2FblBCeXF5FGNDdSa77r94QXD+YwRfoBHSzuYTJAFERdyoSG7jbzCyuanQbBol7j1EE48e8j6BFZhKGaGz4AiEMALR+yDto+e5I8kLnIPcBll4J55ZjArrZsJsH8PLaxyBrkPMAyoA662y4UZhre85SH6/e1FmQO4/1Yn8fqzn+3RJEMTum8HftAg1v5X6dhj2bMp7Go7U4ymbQb1vNJ4sGM3liqrSUxh2iw2VWVG8B2iK6VzsMAkou2TV9Lfy+EiahIQv4VFV4cyXrUHgrnTciIA65rutgD9RohkcUVVsKCcLzfrwCuTr820eoLzMGv+56wrVEJAXqq+Rb54UwZWbKABj7/KuFLEepYuijEpLCDil8oGw6Yfh3FEGoQdEEOQejjKp1hdE+xApPq8sQmBQnxJqKkvPFZRThj4dnGqobkr0kKCdRfmBmjB9nY8LLP/xz7cAKQwoTQiyyXMY3ko5e8DNDiIMwzFT6juNcgJPD/HL87eqEN9qmiyQZ4sHWREETuevwaL7OzpgKgiU/WdeT0WhuwcdY4wMeBOUzAMyY/DTUoaZKtGP0cfDN/5e0pG4YNyV0v0yswGjUjYmzl7UthzJP3fO+NsGKDW8TDz61sL3CanI2Dw8hhb1pazPnf5BrYu/42/cGUwAGQ7yxDN9ABwUMp/CaYciqIIfs6UQ0dg9VKmgU1uBoP7LR6IU+LYh6DZqb4X/hbLxaQa2eC1p7iooVxa5GWOIU/RN9zBF0ISzUa/Qa9FRn3Wk1rX+CPszlMkUlHZOYV+WnwWv7LBI5C0QI608EsmCG7cpg0Q6bN/Le349Y0eVEKmdmoUKT8brAHV60FSJXw0Fz2nqUl0TPY5jUyG/CT95/dMdiW5dUbT5TWx5DRHKkMNJ+TyhBNwn+hiD+hV1uofrSUcVcEALsV6N3G8o2ew07P+aOPVPkWiyDGqjSauIYRhbw28EPPoGRKyy7+Ua4L+Qo4TUi3rS1qxcFoxNoYInNKrJjRzwqfAoLQzTBFRkJvg3k6Goan042I5zvz3mihUpxJ2EFDA/f8nNYrAShKPGQyi6XUHLCafcd/RmRvrb43HaP+FH06P5VRBTakqFhN9FaQiycx+zMw1tVnlAIfjpwr33KmBjGFdDNFiaN0RisVZ7xsp0j+aX5oml/Xkwvw1CKz2SGEODT1O2mnyaMmV6z7h0NZB+0ee0LsZb58+owfdaSgV33/0haGV1Odf6Daqx7Q4KQu+f9o3JuwA834M2X7yx9t33KdV6vsdjrs2Cb2YNmB/JyyWv5CvbijAdpXLfkvbU2Sbdo+lm9tD2S5525EfUgBA0vRDYApyWLbesI/JdTTxwmn541BPpPx7RTULPOlYmgfSWoV6mBSPBZfE+ZeYDcR/CAiIJUWw39IpSPb3SqXckLU1a3nCnvveoolE0HrhPiHopvHjnqCNP4j88DZU49qkD5FIVN8gmwWc7OXQLiQfixdlQPUmNi++2REIjfzgoeQJoqCYTPIX8RBEcDk/jtv9srOiGK3lRRBEodefKMdCsVGw5hURPQWxxKY6Bef9UqYan2xneB0ciTSDKJdQYbeKj7Ou7Tt417O3tYhxEX+2DPhmHz/J7p3tgglSC8zfQhSlh7u9iEpApOk78xhVonSIQls8Gs2tv0zzlodck231et5FyUAGDkH4U/xzukg+kWcCXK738nx9emR+M5wJOAQ7d9Z509YlFP7WDKnXfAB6NuB+Vav77B0jcNlA2Dcrj0X/gshYKX6Lrtq/cSWBpGr697tQMtK30IsQH/p4pCoKQIVpBv4Wqz65EcfQdi3/p2o+5SxicaEfW2n98xxSCkzRtNgCQzG22CHcG5YSjWlPjYraSJpi288Tc0vjLAEc/ZlQq4X7HLykDQfjvm64UO4gvns9eudMj0M4P+Px951oOBZiIL29PJMN6BRnv8y8PH1XtjoIZKzcnAXL5sDTLLL1/1hCOx5MfsXQ1GKgwC+TfbvzxKOf93zHxEp92aThFZik8ofQ15X+9E1VpaCFOpvUvBR1QBFL7m+Hfz/OFMgL1bf28ww+p/fN3+FyJl5r3xhF7CbUvpCJVqvznaD+iNoVmk/ml2k+vriT/c548FDfFZpQjBZDGsKcXCH9rc6061Axbds6bs2xium3/q39b9nou+bQXukb7SP/xTLiL8rxaUBZlcRYvf9k/848c3yk4pBno9+NDvsA/2foSHzVPYo7MDZ5NaGbKv+ddFS3RS2rmeoCHWwiGgTar8p9CEe0hxU4b1JUEHpv5Sn/Sn1zCOTdymYzx4QxQpsgUar5/v+KrUurp5eMJPvJ2dUK+KnA+812Lu0fDR4KNoqp/NSpAtFdaX0pRMyJ1CCVP+sMjcO62KWb5WJjoWSlfh6h05t9/42kzEylIb6F4SyP5vbYWIRG+eCjxLU02MIvY9u/cIxQSuq1EZHhiZlv3NAB/XeTAmy/040vDeUHtMDDt9tH+WRVxYb8HltbfN1gEuSe1+78zv65QYJlco0ECGlJqfha4uruh3N7Rx6Kzoa0F8NmYTp32lxULz7HYanbL9K9S2XSUT197wt+K8ZvYc9LVFgSeMNH5NtvPLq8noioOILKddI5kiPSJNNX/VGRccoDg9e/UommWet361tHvqnX+kxFKnSfBjzcfaX89VEup/Ee2WLH0mXSoHHj47pNTfEj2n4oOXm28w2LjDbx4zbtvGfi3nqZn/fibKjehBy14rwN/7Tccpfqx8DDppq0RMHnx/df/+I6G1V+ueLPXKdZnp/T+rXHJo+Rp3r5wtUM/pDLQX7VMkNjBBb9nweaXIfhGvvU/nkU9t5seZrbUi6dtroP67/vN8nkk9QlX7qW/wKZ63v+oi1k+zSkGBsK6UMQ7z/un0kd5zbaArz60Z5CxBhlUrP++v/z6b9n7gGI3N5nHlH+rkBALFZ6foJ3GUkgnrhWH/6rwEauH/s7qDUh6KXN2EmDqhZWRFjyMWK6KJ+G+XgDuPNT80CL8O0LVmoumFoMMCMxzsAX9r6QLDoP/PMI7xJWGVCjZDmLkav6rJqpsu+Jwg514C2IjKUCFUv+86xS0T9aMuCX15vytIUP8t+KHfFav7JmC7TPziPxW0b8VP4pFkkiXPsbokhSAZwL5z2oAK45uvrUEA1h4/4l5nbf9s4IiaL5ED3GvEpSijNn/jgHsdW27B65KQiR14Ed4t+ukv4Bl9Xeiiz+tX73A5wz1TOz/qWD73U2zj0D0+fGuH3ZGeDpBCgxteFyI2DtHhvPS7I2k0txEHkPN/FHvmJmsmi6ma59/QNtKtc4HDltGmvz5PvQt4mc5FfJva9AatPvSs4Jva3XVph0fRfrVBwt1si9hh3QH2iCgCrzdnCsHxyqE6ExD9qFufUKeBiq5+7TLjjUbj+EP/+7cJfCB+EyTdG3v7jKwjmdpzHgc3h+8JnyQzrMobQmYebBwrEXPq+0nBL7Nv++yHsbbyoIl/zUktMZQ3qNMhqfXABHYrOxnv/L2BWKRA37yeMP3jetrHOnFRxEt9+rpN0C9+9Yd20fiuTJHA+2TX1ozvjsfVvCENgiCV8uu7hkE+qNZRiZDEbj3fCtcJ0VeFsSUOQTc+5N9eWhRkTBocU7ZjywzNnFEdMYSxl8tQf0Q0uwRowDkhKInC5jYmkK4ViNoiCQB51P5HaVU1NiKZuwbUa1tfD5y0/vVhfPNhDSSYSm2+rJaA4BrDc3l5azp7cIPcqZE0ibMLYr3Pl21PqMoKQtHFcEdQ3zRr2kT3zSttLKI1IKZ03qQ2cncy1YNZIq8gsD1lZuEXPsB4gD2mYWdP+CPAbva1SJ3S4EhrxjKMmhXdmJb/biy+ruitYimheqT5K2vcZYf8Dv8Mm0wpbvPfKoseoPegPjMsnXrP16x2/gz64hhWSov9Gbctarj8KcqlNKmBdq9PX9tcGI1QnCdYA/D3YF6nJzuKKf5AbFo9q7U+P1qB69/oLiwfj7xJQ27A4WZvQ6DkitQ0vyT3ubt9Rmuk8pX38nGstIHfhBEL7v8TMijhGLhPGEc88S/CwOtgwkVTlp9fcFC8LXt81TadANABQPnCBY9cTHKR9PAKGRRCrfaXiY94FwRjhhc+SkgBwjtkRaOn+cRVTH9LMEshJ+737PgZ6gHu+rvxBn8ZyDdF+QbC1H3VNm2kef0IUZWj+ogV6cOVPcxI3swa6cDNSa55n5SLPbWLh1LPhjqNNDueERkQxMDZEa0jr3rM0/xqEhOfuArIKkteK6NISO4oQa38uP+NZq14zooWLU7/LbpV6RRiemfD54jbndIIIEkLcOwh425myWs6frpR+M77oHAPfDw0Ptin4+BfuQkvevHvTmRVP+44jxXtGsLnzzZypbux8E89nZfkbfvlfTheFUXmPLV2/MbuIdPqDfewqwlIfR19kojelgS7+8zdnfEzVhG9rBOCxH3FR3FKWwAO1lIBeMMZ7mRVEXnDtiS5Q2AC/fPZ8zhpL3z9kny9YsmT0rsxylI5VWoyVmSWp9j089J33mpK3vhVXG1CSiti3gbI/Uqtq6TVsfQSfDkbyGhOpbrwZegI7/E/AYxMCsRxmug3sleM8ZhNXGDagsjMBiP5u7s3fFKT54VrKb7OXOarbGA2iJ8brsILfC9dGeMyzaWAUGZzh3vpjhBR8vTzrjlqZK3GKWR7DwpFgfKeuRV6qtAo14D7/vmv/Sn/ilPR53H1289eyYsgBMW9quwWG+Of7urWaVXylA7Hj3xtN6Z291iqMjaJmDo7xYffrO7t+6QVOFESQcyHNzVsiOP2HP/0G2jAMTAW7KMf0ORlc99oh9F8U2K83d1LF3FIA9bKt60tyzqa/qOoD1YGxn7+5oYoNadkjWpzW4yX5RA4GVQMXnXJZUim6HlwQllO9UYd3h5v5qXpsEUQ6hbAn9TZnDqsSt8O84U2fRYo8Oq4dk8OCtXV9b/8/1Om1PyEKwlyDDpxCrqR9os24r0rM373oji+1Ae3WaFAlaeWMvtcS0i9zlE+/AN5ftwRhD4A0bvz2pBVt6twJY6C6iXP8+K9eZXptAw1FH56iJxpT7dTzrBz/PWZntSi/JyrnG3Vawguc+2/G3+puugp0eoUEw5X29f5C/P2urJ64+oWxWC8a9crYO4Pb7IGWMF/JMgZEpEJ3FoVxr/sXQxhKRWKreET+Q6Eq3GF2rmUZocixSMF1PskYpe86+Gv3qlzGby7RR5eRO5AnEB6QH7R4h5MgNAatY4NO3K7CSbMqJb3G7mQqDqQvb8uvKaEOrmUwvy2DAOZ256eQJZ6AQz4uWmO8cZWDipD8b7y1dIRbJdRl6pSUqPrADV7pX/Gs5xZ1FVCLR7+337hmno5DkWAWJnbEP9SiYmfcbNeDN75/3M+HLNF5R1nj8Uz0oHEdkHfqEFSyOwO5CtNtKEb6OvGKpM8aw8fkOVkR0+1IFQw+JW4dKHSYiKqjnWFDmaMntAt9reTL+f+BcMkfSUX8dOGaxsjclN/1oeEDLNUwtb/jLv7mgWM38lehNSbIoXTBUqtep4c+Ixt0gDjtQIzvf3Sq8O2bW+FSdG87Id0EeCVdxW1bvfaYo9xr4kQTLYCyah+d0574395GU1gKaQ4CuTfa3SR4HM5ddovP6d3BQT2VV6PXTlGaICQnT6jK6ACCNyog61AUp1oIq2EaaEk/8kRdSVPAyaiXF9l339R98faMyz3ppq/YWRHqWD+PjMbRHf9GLEUAY3b+ExeAqkEuk/+ThLeCT0V9DG6eb28sl1CAfRjpvRI/uJcrGJS48n4ygE4MvQyyc1jgYMFTdumc4f7waae5+ci7jRi9/tKK87cVBd+/0FtUjf872L6QhBGORiHmwoB8/ALZ37IK/PSW+E+gutb0XR0frQ6WB9IPUWrJ+Ba/uWUx96Dc/EASnSNpc+hu97Z6FehYdchx17XHi57A1zsecYQWeo9CMOb2sXgn13m4GvvG9xryCXdCpINl6V4UZ9XCCJk1YLr4h88psWoviERrW1lIJesfSf3vu8p4L7o6DgakeILyIPqekFiSNtngVU7bCiAIsxlNgukRJZP1tR8AAp8M2pDZ0In6Spnk4bYXwaiBdSYB+HvLbhuLQ70abghLIBCcm7INfUQvVh2x0FyPZG1UURAnle1BPnEIHEY7rDf85kthzUQ0zUXmF6RhAHgUwKytp4a8ArQSKJSlcr1o7yH4QUkoBgUrQNaNqkfW3HRw3IgGhLkb59a1LSgWBotajWimrdgci7YBomuVNylOUWoausZCX40RYXfYAi3ATrPjyrDwZnYh0QeezcB1XjnXBkwnnfbdOzIgDu/OS/lND6RB/Ugg17ldMtcsu9ARDNW0qIxR7ycJqk1Bkc9WhdfoMwnQn4qr6HnUXAdzngJyWVMFM6L5b820vfirGXE/IcLluJmsZ9N2CB9/FP0ziNJ3QHsOfrbrulVZZCoS/sG7A7pCSeuA1lheHp+QpEqwTdR3j/fjnD9hHqW/oBNYaP+zcF980ODZe7JRbKurotg/QWWVGPhOlmcV8XdUYQ2YdkOPEotTWJPq90r/V+ijeAoieQ6g4YJsSDxbcauFPyQCS/TLGgko14724IJ1XJtllaU2mrzWvNxpiNUIugknBS0zWaITagZoOMhuaohiCuPn4fbeFllj6+X00KN/ltTOFvF26Wj17o39V+GL6V8ZJKvi9HPXtzOHnbUarR7P4gcHHe0XZacDNbIplQ4q4VuciNk12RSzceJP47glnXeyL4pViZIhflCjALe58hLtok8AJ5JZ62qMhXH8UO0t5/+yhi/72Pok1WQLytDwl51rMhPz4F8UCnRhFRfxwN+6SaipurPfmeI7rdJ+V/S96o6wfMkYaiZ48j9c6voXqF2Nfd7XQgS5DgoLgnz2/32s4bLIP26zekzEjenOj0WJQ3YCp51a5e0QJB3zBWYrExY9KIFVFm7ezVWpM+aluYMSryNEfbj7+wCu+Qnyq/Ll8ccni8/NqOucEoy9DSPWMkOSNkblq+px7QxC05Z9ZLbOWJhT2UwucrxHewrCdAOVEiFv+k+7ne3tnQR1v5qe4IGT4pOvyFqcAKAJ7badvTsWvBjA/+van4r3D1wxwSbH7+KAr1uxR5n6+HIVlMJOf1dkha2jt5EUQF1PLPTzibiOxTW8ujxmy/5IYNtpVLShLwM3dXB+O8URl6kioW7Kzz3+H7DuP9Rbudz3VxXLwkD6HOJ9xPcWsrHsPI87kafnyKuoijf/Jvtz17M92XSEoV7WtpJCe/xePwRZ5mlPDkQftGtgQzKk8an45uYa+kDwEMP1k7HwVyFFKj8G07gNftNN2mfQ3f80RD0IFcsz/tVc5H/LLwf7u/ptr/6P6agam57gr4RMivWrZ7jWHgDF1DpTwTVee0JY/dWjyKipeCjUn2E9fyjVQ/GWchb0J19P10r9q12+/39k5Q74DVGz/E7Ssk+CM6J/9tzNxovYoGoDs2UKnxwR+iFPfS+fi9qXiwvyv+8uSI5uyFWdYnk5goXjng+UwQisT563njSdQfCIjNnfcbf44TYm2bomLjUbUzC2fPuzndu5U+n4kdzLiD5hqlUDjfFCcbLT4YpNPPBs2wwUtlv3jRMzlNuDdl4KhumuLjwItfjNEcFDwU4kyiEbFa7Sgq4H+u+vfieOzM6mfPHu46cgPp9yclTY1eluXVgfv/oibcFIX9L5pwkxj235pwM7f/3oT737/9X9+Em/3/m3D/bzbhdnb06dnj28fsKNv/gSbcfND9NeH+pExGFAnO4R3i6vt11QybXFeifGJ8LqnbXZ8Un7/v5Se2vGLe1ppf0nbStDHH5CSa+uUz/7SPsuDCPKaB08jvUXo0zzYw2oe5Ay14a2YczvgDX4SmUs3HALWfcmU7e8EuQQanfuImmlGrYN7+MQ+bkwwNyQ0rdsWKVPnVjYTHU58pWM7fj2dz3f3yf/a/Vn4E6iwQzP5PJ0hX7KTsJQ5hCnDZPZEPi+spdLe83K8Pg8GHkPYksKmZi3yyHoqo8ahZruA1aDs1FJoBjY52VZCuFtICahQooe5ZvID2W6q0f/4qABRX/+9fvR4IPAlFRf6++t8e+J9fvR74/+2xKONTbVXxYAxuDb/ty6MDopAL56TwDMf3oR1IPtM+AfXl6vz1pThw9Xx4NCJpZtkZcpkdI/EP1Em97jlck6Xy8tL0vAe5hhpzUic5uZG9fn2W/3/b6v3vj0UfW2gMi+uerAK1rhcnHJkqZbIYZ8/c9qD9hugcOu9aiHj5KvmnA6fGRbY45AVodw00ls/u0bpfN9BwVwImI295fXsdr65zxZEewqjzCddXqV139gGBhJLjD7KCZINHpZMoW6b7orScEfkR9I7CueYcjFeDfVnUeILXXILkHrvGqMT3ldWRZdvDI+OvC2fBBm6/2+tUCoIYCKajZOK619NtSZJ9htE3oH6F90T30pMgIQoOudfv8Ubno4/Vtu+cw2GNnss80AiADNcm6UCh+kOUl9rdConecxodkzT4JvzHzf6OVY72sV21pl45uqET2Gdpoi64dKD2gpQo7ML75oP8tTELAgiHfqmy0jj1zj3Jx7g1kHvm10F5Jb1SBGJOqxBEBF3DSwnPZEHXCwvcvlRzEzYVCwgP+Q9fJt1RciCA5NOs+eTOZHJ3tdUw8jam77eDphnyF2U/bHJLPoJLq18r95d446gUUrYk87Bw813Y68LmFUKj6NPKSsoiaDCn/qEuFTKGctTZsVhRgKSo6L9HuEhvylX6p1OZExv4JUdeRA4QbrFpjMywo9q/188whFTaxrwU4FlIuGHpMfJ0otwz93Z3VRLPuvhr79FbxSZ+cbIbt05IBt4Kv6hLcw9bMp9xPQWl8hjHvi8Su3m73OdY4D72Ak8J5f97td7/XWPpn8kAwo0L2dPeM2XYTonIz/4l77FxbsoPWwgfOQBnhb+5E1fd9qITeo4/yWdplN5xOiebMhPDvlWGIRLkI9UfOcB3NUiwR8+s6ydtuq5HZdSy57QUC3BeAurX5EgruGrFp45MlPeX4/3SuPU3VGWk7v/0/lTHl61GP/Y82QRUw2L4pPYiSrUHKJn96g9iJC7xQDUdfOHu+7yuFZgO9tcOKL8n4M87nTo72Q01lHEgJxVjnfpXHDekojwL6YP3l6FJTX1IJLh9RaYxZWsFo/iNGzY1T4x/a66/o1MUdCVYeEm0zfR0W4oEi6UeWKvN5nF7daUcCprwfnEobrKsntSDq7gLJSh+sAz38zJONY8fk8VZIIB3gT2ULA3Jq4lppd4nClKC2jhCJrjxr/drZ3v4lL8RUn0u3G/M0MyuZ0dVsNyI+5NguCrIyzcKw2ekpaoc+74h5yO7uJ5L8nVSZN2MtMPV4hur4SGvSZkACq30OJmgoKc/v9Dp7UJ1Y/yi3qb2HB4pj3Nq6O4iPNJQ3eoUa5j9u/iOg4pnbUV/RmNOFP1mov1DfcGk2xMleX6S5pwJlcLG8DGTwpS5xW1fx6pOoDxJC5aFnfboML0ElB8ET0tETFvCXF6jHw3IxP8nzjSyfwxA4unKhLylnd+nKeN0ESFXfeGl1zT82w6twbZ8mULKZ7VC+vLBtkCcJHLd2dV1aZTPrlb/FNwrxzwYtN+BLPAD41SQHFB0VDim/rJR+OCp9k4oWSDTROqzF/WS58WH5aDMQblI6E88WfEVstiKYaOzj39zV3sWlYMcC695O1JWAmZD5XC/ohUrZtiu7EmWzTVzTgw2VHsDWQ87zDC6E9KvS/cc2EsZTU07BsRAF/MkwQtJOpTD3BAvSR88ljy4h4ZOxW13/06FoxLs6x/rh9sngu/qOQhPkJgCi25ve0S/Bgr3E4xmg5bytZj+Jr57M1/NHa7fO+g2/in0TnSDSmLhwAOVGLnOvD4/m9D/EnX/vD/juHpWpIfK/sngSf8bW9XHUAPk+ZFpZym/yDjLsrRwnBA+0wMzjwEpL9R9TcyVIRb4GYUr2duN2GkbGvOZAzRTPVmQ9cZ6/1jFzb3jCbewMY+iV7x1QPWAZ7QEt6ooMDFF8RexZv3fEeUf5GxSunYd9iB8o5P5SdTts/zdSCh7+slxuhM5p5W0B5cZNWb+pIH4n9t702ZHlZx/8BP1BPvykh2M2czOO3Ybs6+GT/8nqVNVd6lnZp6Y2z3d0bci6pwDdkKmpPylpFRKCbB1JpnuQgnKkYeYB0/icOuN7zTR8z4Zv9nMplf4iZNvRn4T6d+o/zfq/436f6P+36j/N+r/jfp/o/7fqP836v+N+v/NqD9iPCNw90dbVON+fznQlepLxSCPHI1axy1QLKPTAnRdc/oAAVsQmJOpWc8kO1NL8f9tzlTAX7gFDPD0hihAT4yqVhLGHBAKdK4ZS0D2bl5i2XhfUJ0OzSTcf/iFz6+UUGKDIFhWAueLDMSv8eQFvLrIlUTZCL5RVVuxsbjvRNCmjInnXiDKzjc3Y5wf+OV/BLE1E4muME3wrQUxLpvcbWz6GKBXNHBNVvBdAll+N+b7ivLYiuIF30A0GygOKhLH3QSH9YNgtMHRUq5a3IzRx+Grvk777VfVHGS2kNMHvFOvv/ydxSPho7uDScbNYrsG/8r/Fa3YlXaY2GnlsMHhNVEuwGdUIcgknufMA74zOoNAWGrUPc5oAwYw3wgFRoIOgB03jLDeb+BJHbEpUxwQvHT2qqFOlAGfgG5wCQpkh/32rMJfg+tZzerz0c/7DRSjdXTe72c52c954Aw+o7IzuR5RLltwIQ4cSZEgekxwei7jPixzigk582BpBVv+X+kGNprZ7ctDC5jFWwjq9PG5lkF3RmTAWU10AolvyAoxiYM6Z+miyTQ6V+ARytfh1ymvI4iVKXL352EY1JVkV3Qbns03uFQ2kkIe62cg9RZwQ3yx653O8vX1ARiI+caRap/a8yI/Zz6wzpjcvBi6bGJvslhtx+DnQ1siAzDtbs7ey7t2N/FzqmQV8GUrXS48dBTvhE1mZmTcnvj8opT8lEiKORE+Bxv//67+2H9SX/6SfygJeM/vAEvb96DSrVtcHwi16LztxWo47q8JWSDh34cs/LJuOP6LuuH4Pytkgf7vClkAkVVsnj4+IBqBYaWMddxTju6SCej3eV6HGjm2ssWbfn5O3MpL1BjG/HbgEEQ6ZM75w+jBM4nm/CGiXLrUAnWJTHO7P6Dz++qgKwdImcDcUuEZpdsV/HBekgITuSP/nMBncibU+sMqKm6z7qzt52+168/7hSbUguU9MGRBszzaPDhCT+2FQnnr1SnKoHSEar+kF7dArcWyhXtwVYWxCux8GFS/9Z/cqStNiEXi9uyoZwlrbvThkM6td4twxP2lgbCp6p1aXdyobmrpD1d8bFYWS+Ta8pSw8YeDtMObyAxwDJ81QXA8GVxVnoyhGNB1zNEDQYnadkLBP+Y5tMgYJNPfkzhkicF1E9vsQ3Asxb/hzSvHl6e3xmXUDCgyVMvduA0DWbIqTChrnLykOHoSgJNgLrL40IC1f/qYsX9+ezIVcEw5N95AQYXlHcbRnboCjzpN9te80piI9zSwBC8VqoAt3w+VApa6H9IbIZwxNygm7IZ1Wsg4iBxKxGEIRofRHfthZ4O3cnxBB5Uewo+U9hNOZQ8Q8vsSoRLEfHjhN5AnDG7QPybTlAnI5DQrYNew9s0H6dFwNN3Dm7dIQ1pDFlhDs9EbX3bUrXGHShgHrTBxy1fuFP59KuI1zzQOuXWos787YG+hMTxASDC8V2/ZuG3oqXxYjhKs9H44TMiAlgjzANmXH6gGChmAFjEG9PLA9BdG3XDdVLeAQGXtCT/oHGPufO2PkVvQT6hQ0YcNJ5MwSWqKlEo6Kwtk3TXY2xborvEx0j6Q8/NTlRHw1ERwiy2ArgC0H5S+v3IImwkvMKBJM4FIIA3DNpNMxuITeVUoAkL0K5tYc8NlK2MnA0AgWnUyP5IWG6gpTTIPZU2YTepRCjKAfFKitTOkS027jD8+p3oL7TNp34thEI0w5GmCJsRN0uOh0MCwQeKRYTG9e5CRRXCldmgeE+Vq+hh0KBMinUOF9wKoWpZvuYCmdUAQUxoVqIihwTpoFjhlcN6HkmLzaWD2+dTCadHnyL3ipOFz1NPH29jYAnqm3Ayi/dsxr2ieh6JgTRbmUd0UGZ2K4p530V7TxI1+fjrpKmd4nHz7dLJC43L7Kog7Y+e0NHEkENTnaWUTNSTpSleBkjMssWRDRd+xtHP9fH3UHu1jvDSJRD6jY3lb+tKTiYksWhgXgyXWGFf/4EFXJIkEJaNb8CZnlggcrMZwjim/wdMdbbOoXw1zxgKXh95OvORrytI+Qn0wCVmVOzicUIBsvPX6WU5em4NUZHSSyKGX6HPxDPISvosVFQgOPz5AjCJP6qlHxwjZrmaHRq8h20VrxMfrnEPK6kPBkfSbzN+fO10fnvGROo/SKLMBSqrXvE2gQCZdmFfOuxSqYtmohT2FRCBa+YkH6mnSETOkwKrW4JTs9T0ykoMjGYxe3p6myWTtTDg7RA4L8cLVkzYWs8BFZsefu027WcxXeE8/UoiWjVuM6d+qDguofwuxZiVIrxwD2KLz8ESK4YDE7gY/1NnVz4lFs2EzrAuj8AM4Lx9aFhOy5ak/PqUT6SWQrfn/9T1vJ6Q5UwvvA4ufe/CEVtdN6TimCF0jzIF8eWuYojQM9zD96fRgE5grgyXNZs+ChW168gRr8L0BHjwv2yUVqK1e1yrJi02ZrQpXX3z23oSLKJTfQiblW9Fnw5VhuxHtHSbegQqlKecqs20ls/GMxFgyPzA/rjmG57RT+hmN635c/2gD+pLxy6aWxgpnd59UutdM68hJJd7Cl4Uaxsn6iNUctG5g9p7BCIeHSz36eHCkYanliMsjUpijt5qRc9gfosSkCYgIfS67b9rR9m5DhwffV4y2iRzkyC8X0M1iSkk4DQcmPJfmsAa105RyaV2RUUE18tYawLr9Us77zOd+O/v60NKQkdjQeIJ4YVYAFU2fT+F8jtwfTGFJjLwFnZ6OOPBmsJ6HdvqpMENsOKGeCnsQ8BqYgvd9/PaenoreV3/yGjwUHG44+fvyHXDCWsE+J9G2U40BmQiUqmS+NdVMG+iqkHNqstd3vfNx/OOSh/wtnHLVSYzG8MLSXtWmjFODBc8BJS9lUIqA8cBhiRL7LR3OcZWg6XjK1gk5w/tKCPCt6Q6OzFuB051KigvoUCon3USLLX2+YxmOIzJGZTjQx+qUrzvnxxzinbIGxXgpxQRHsHm9Xtl6e6mAH4kdTC8Kgjx+7lUEeR7Zw3uvNmvOTJK/3ZV4vmc1lkX4hNps7pX1irBcmuEpNs0TPb/l1/Vpz0LQ47aMD5yFZ3/Io+XF8yb18fl5eENsAg0R3pwm4lbkFmPl/ZpHlTVIMEDW8yX5p1NSuprfmOLfZuhKVDVIl/EIIo7qWxCXmY2WYr50TBADNIH1JZvH+VzUhFX34oLLjiEbe73dHhfDxxMJyRcUxE41K/cto5wk6LVgJzBvD0lkfG1P/TorXKaEvtYkm8X0umADlrMFnZTkLhWBPPYsd6jgEF13lwZhKU+c5AFyvlB/SSLq/aGDYxLitpkyYo3kg/7AKkW3Sha9Hxy26IzMbNIGL8YLleYF7rRaXsk5not3WQeMzSP5QCMn2jlbMdOxSeuykYEl9I1hvSzwxd2s4SWuJQRCCOBxcPwX+nlL53rJfPakkOVFD6nLj+EtHrlc69SMAvO7uZ0QaVzKkUivNdIMjn8q/eD6qd2fGmf4NUmAsCdVz5Ioz9YXNEAony5UUm7C1o2gUCArLQJjVmvAo+MYL9DU8FeCtCJVT45b7sQnZnMswNXUrBi5LeP+qTp28NNMedCV3JFtMW0hHwLbSr6tdHKswSIvkpjUOflskgUJiFgg+MwrUW3yUgNSP6VPE83LORVPHauLdb0iaF85QUM+Poogu30ALPIY126lCZJ1CPnzeBcglwgXZ+FVW5SC+FlfXiMCjpdUW01oYjZlKn4DflofyiYSC8grq3fV+rh7jaANUFWC0BhPOzKyCS5uFWnjnsUiwohT8dMMw8uOqXoHcjnFVEJrXlBdiuFSHp7+QRKioiCORtjMAJ6kCOuMtmhU2MV7SO04Cu1QcIQVbXMbIToW9dztFh+y0PGn0vlEiZKcgY3HvvM6rZzXVxbQMXl5j5F7gnl3X2g5HlKIsB4MWo4giadb0QUVHYtHEEBHnISkeFfk7qEQIBJcnGzuhLbKbL9N5MIaR9pFQ/91dhVg4HAPgEcLeBb8BH3N3gdF44VmxmKWDjghSpkO0pbE3E/mtsqdzq1+9WpYxAaghB8So3eICd3Xw4xy0n9p3EzvS04iPkHk8uz5glkIgLbuInhLz1dylu85aMr07AKOwkI1MCegtn0h6dAb3Hyg4kAd07BO0GKca6I2mJnwetfzDXl/iGEpV7F/aMwjPLzyeQ+KPjDoelxeZLfn+roSjr3iAbFV/WXNz9zbJx/wrEd2FgOC+IEbWO+yZZROu5+LAWFfPruOIlwpwwO7SgAVU0H+wh4fTCF4WR9QL2XCM1EfZPfElKAmgll8GO9m9oK0egpxBJLXsUh1Cr48LfSoLTRo2hjtnCbtNCFEFmkMys15DfCrtwu4fkbdq+hBgTqPcYZtg4DzQUX9ZB0M1MxiuAuIPAXTeYZMnbBjenxHcddvSTfRHYx9M0eQgLbAKttKwEMHV46no/2cbfkt2Od3S4wieqdRVKXHOIXW7EHoQw+l49Ed0NLGwHFlAueWiOTvbw888R4FKDcxJBIUsOwTbk5fVTsmuRnep365xLrokLFszDAg1Mjk1CFzpCrwyNwM94RWSn9cA42ueq4j/YzGu+ZcemjP58w15m7BNM13zmVOLHwLc6xKXYgfLTiTISKPvMbFHAd/m8Dgk6exIHOg+XcrFAaDPDpz541PpIjAqaDU+RiQBFaMd2vmEXUzSVCSLgT5dtZpAAeZIPIzSnmLFkQApwDymlmD9LQ0CEz0hfUjQHuTqwHRjx1weN+jrFMQqu6XBbSeXndrTeeZjhLagF4UaZG38T4bJ4DF4THrz7FyM2nZML73mzdz443sfrDm4Q6E0hDQHMYEGaQW3QfjFs3+Y3ht5A4mMwHgpxtp4p7Utkn7edCDFQ+siQtZJnmjQUFjYeBAlRhRCqyLsjGJY25EcfKQNoAUGV4VZDGEJpymrG8egYF9vAk6Uth5Tfto+ivyXh3zFpLaWmrlcK7ec9E39S3RwD5jxr047UGiuSbDUEiyB7LO2PEIocDOXc1Yl2c12ou7k86sw02WUsE1GwzGx22sLfqkcL1oeL4/Wtw3uAJQaWDMHUHHKCTefOaqJd3hDjiQl+7pEdgUOsP6O+961IiAie8CLnFBH2IotDC3PePatRPgUwEmfSmUZJuWbp9yYT7OaBCkidglpEI9SYIyWuypid5TS1zQZQZ6fwgWphuA43kkn4XuSvkbNdvUbZbaPnSBHFkPW43Zwxib1cGGQn9LA5RWJt4HCfiFD019xPx1D+fWh8rCILaPS+iZDhy3h0t39ZICgfvQG4cOw+e8MYXi0rmKyTu+YjCDiPaZdTfA98d6s7H0a9MUki9rtKxbFypYxAmLfZW20axbLq+rwUdAumMLHvY1DXodR3vqlO9s6457QQytXo9j6CmE6LLKjtpTfYJJBrdPCeGwiKaigO6ufZzoMO80V3+8JSTwJXvtnxRj+R4vDFhaCE++ztABV0xhMvC4vMM73HYTdHG0sKBu5T34FkEMx6/OIeu5AOdTDy9pS6inDkKyDUHIJYIHD3TC2vYk26ZCaWdW5xhtLMLP17nAhMjGqCG0pcCbGCWSkAVdEy2UjPmVYP1MaHY9i9kHA/HE2p8TQerQ1jPKKkQyF+WpUXPUD3r5mD4w6/ct8DmJLvBB7I83Gth7MJ/g22ckH6SSDJNz709JGCtmgEE4+2Jv6uFIxEGQAvlIyCpJ191wwePaAkblULzX8yfv8tTZi2RX7bHqfSCfDWHVRnvQOl4m0SfLTbgi/EmH5P40Ek6tmBHl96kAhAg+Xvsmn1WmuD0wpDxV2yPzF5rYXjzlkkLbOrPwTrylC7AZfggtOUNN4B9vVj2pq0oGgtgr1GiZDdYjFyht3oE+g+bzlOD4nFOAolHOzY7DU8FeBNIENLwJJ4N28WjTGG4y1FDaYez+ODFS5VAemEYChjoiqs5A/HynBt4Zmr3fk80UqUEDFKDxPjWtep3f5INshwXmIw6s/EQ8aKHBSE0D72whQuhuqDWSxJjjTQOVhM6ojrV5GtHv18zoNn2oc4LUE2lnt2dIbGkr17WEDtW95xnVj2Ilp8TnkKqkk3mfdauUNfLhwwRTYTGIY3zS6n60egggfiI8eu2HiTnU1jHwY7fZme/EkHfH3qDwq85bUMxNsKYv4D7ScfKDjgpNtlOzQJeTD8/FVgdinEL2LQOZJRLF2YaPQnr2RFk60wzTsRaJcERvU5qLa1vX998xZgNauXdwSEAM1Ktwo2+RPjond10oHWV0oJVSs/BmQdwmlGgck2YUxo1c59ZcUBZMneRRfA/2D+uUx2zKOdOum7dNOl7VodsHsoFz6cXTGxoR9UaEG1+jrbPrKgXMlEiHUjWGlwVXFxG/gcEMXi+m6TvrZWsz4BkoZ1UOEl/iQBe8RWPXEpVGeizhw9C9ucyjpHLMT+YfnWfd2xym6nyHkbVyjCmLkkNC2hCxUtJf7LQ2p6IcHWc/3oh72tmzNLzm2e0ZoPQY9N1Lw8Oi82HybDtl9pGcaaCsjB7wFNfsUg28MU/ZI1a7w99mDt7bmD2SG+s/qmuKojyskJO31B6fKWMQRCLXEDKdealIeonglKzSo6suBTu09tGzjYleIyd/mV+dry+nGcAx3oRv5p2aqHtR6zMyy4E/BeVCwJVP93QR5NU+A62+V4qEFKht1sE6umOiq2oG6WvubZV5YCXkB8olIwuKTbkEWC3RW0y+DtJO70C7GZ5HBXA/vXbmiKT72FJu6m/04bxlhHdSiYoeBvvgeQXGUDarQxhdVsqRW0l+3cmaL9wT6ppVo54c5RWqiVJ3kPYnIJ4j/wr6wIV6zJrEGGibGqiExp3ohKOVrsiGUq1ZGhDAtEGTVqtHYJHXKpyMM4XXZENBpwZytcECYPiB6Vuc9tox6nB5mkZuqnzqjrmbaq7RT5TSiNavnhq6z3VMf5xsTTw0qlvEW+scePDwNDrhY9NXiAjjkW4IXTdWLa10/+QIwIBOrQgXgQUkN4PRjj/8VChUv96Nq+CGvnc0Cg/wcRAAak69cVkN7L6DCANaiReuBX29tdbQniTDqraIKRLYQMnR8nfSDvF3g+Y8Paf3Il1yt41u0JsllUC0TSimqPsrO6dW4MQVQWdKlEM7hOOfhbuhk1feH1pIO8OU7TUy4WORFCMLPPH1srybII/w21VLtUbnTL4baztrwZoVUeAVfQRMjZsLfjo5JqERGmT6qUfG74G1s7UMeYHppZy+n1b54NY5i+wJKLizDsA2iYHmHdvoxwhR/3bM3hNsxrIskKVaAuuAj8DNiWjGuuOsSHhNhq2P/k4HFD2TixI9kRcwQ/wN6HV4fZPw8QPO7xVhHm905r96Ekhi45wKIoqiUfECzz9SYrnpJQHqQ4jwrGV1vQwvD3tOV9FguiFPFIILYMmO8/mO9/AAMDAX9OMIyNoAfuWSXrRbBdQOtjEYqjRsjGnYHTcxDfJVU6eh/YBmQJc3VkRtdxy0927jmdcFpcruAJw6Jofmm1Hj7Qsu6rip6xUuNdJOnsBgkpDKthTUBtDp+f3qU0RctluZMaftA48KCQ9TpzAgVf8HrzAwFMgu74HfZXPUoI2Zv45tgReksYuBA1TYthR9EP1T/jDngoNB6vMoXJLMCYowsqHKZAN9ISihrsTwcnrZRm2hQcpL11o+cu1vQgsQ3EAmO+ktk3b0YNAwAo2Ntb9q4eJRN/akXUR9sSVJsMRQ2gRZ2yUBCo/ecM/do4cBFuBzI8fQVSQwS/M7kIL3zfYAb8gun4+V9ARMnIzuA6FiENWp6Br7x9apMRfyPuIGJr9HUCKrD5XkEOg9nUvufiKZMYo0BI+RgxQBphhk+2ZQEv5QU56etquaQAFN2x2ddnopxzBBi9y0C62wcTrFzcwdgvpUnob5E+lUP0YR/jZSPBnzx7FM6UdAveQl16f9+YTyUFYWGHW1pCvGCiTrF88HLiYNx2G+srqm3iwjFYC5eK6sJy1WtJvpq/DkED5uIKuc2M6ZfcuH6r0WXO89T9XWVr0dQ4lpwdHidbj6u2sfetscTzjGWmuFMlDpk83YLIJA/Xfx0eTzRpOPxuAc3JyzHeyGpU+0aTmpXORYp6O+QdPUMeoXSUw4d2nLp+2zgYxi8z6oqfHRJa+gWI0/dQ7fSHIZM2y0GBtFqWBp3dvgcUxuMlREsDwfLqEVHMWDydbHMuybtGy35jcdvArCNQtnMGnR46o9rjS9ifGbQUNofqdWJEnHVN9XQiAx1EgoAAA6jGK10+S0rxrUWKFKPCSnNp3BjxR/MAnFIuN9cqV2ZDIwDySBizIARb0qLFeSMorQ9CdN4FdWxgeH7Y2qQ5VfJyY9kTEI0NnvHDVsPXApFQ2a3UOJGJTUxT5rra6wa/YC8PfQHsWuLHJtptULbTZYNsu8lY10nsY+CF1x58oCi/S7CF3ytMmEXk5Hao0PYOgEuYTM6QGs6e5JAxt3zsJ0O8GzWAN2NgsqXdvVaYNzfREN7VMtRV+TdXMcs/8qgZ+GbQvTV+a163w+uZyKxvGQzSKxJ7weA9PTXxmOvDwSvva5mjcezHLu5RDeE8jt1h6McY/gu3wDHg4vpz7aI1jGAjeOpJ5O5s1JBblKjb8rXR2PLEm6mv5ABkkgH3ROs9lPg8IDT6aXqkBlj9L91T+Yan29PsXLi1qN4QnDCYH2R3/2HKlrEOojQspSzJM/4tOT3JoOxg7XfD81pW1Wx/WD7jinxArzaDy7/NgS9Cddw48MGFP0N5lYY8MkcALCgTbtFusLjcaSCyFIsDSGeWtYT4fewqEcsftsdtUdoXx8XzpMz/rZ00FmjsZskgJnINR9a7e5QKZzqaOe2zFIBCXC9LnENk8RD9I3i8nmHQncj8RwicFHuD1IMDhJnGDXiWJxyvjRE7cCDXzrNPrR+J3oENmVkHZgw2eZwDz3Ke+D7BbQ2mwhaRbglTq0wbu2Qco5iCwClGlA/drLmx1HCgXuIJe+g23SZ7LauUbgB8voVTilLb5CNB3XazaYDQt0rgQsfY1wmy+11OP0cw0hHxGPdHEx0PL8yJZ6aGOgr9oRdc8eQtF90J4ZkVaU6pXoItIeiy44TTPquHfPZQGkPsh9s2oNNo2MYJd5OIo1yZ/JGC3oiwaOHzI2+hiBR2hyHVQ0SdrE38xLTnELiISsZw4akduRIFk8CwED3KLPao1jBFqzu54M40wnwaNXfeKp075512d07gezH9cmdFvB2F5HEvT1qfYeJn3MtIkQYISdXBDchDrtZmpPno/SB8rmIuSeK7xB2aHw8iUxamofjpCOo1ZonAkdnoeXJab9qNtJXplHQcIBNdt1FgM9ytks7s1OP/YNgSeTUW93HOl/sde1Vdf+z/PNgz0isMcjMQIjCeH5lfDDlkz4KN3v16Aq5ZOwLKY0Kluy7DdTci+bSDnlilj5vv/HTK0jMIwuPhjrfL+nsQJ4OC/wnRzqIDYFil3n7NsL7IkNoC1b8gyzV+cjvvbJ7ODU4y3G4h+aFYL3i9/HM7uPaSA+rKUqvBe3ZJnDEPPi19D5sNiuLwzNP/YEyCLxwtxtZ8rw5NRve85Z3176Kc/xE+XPHTbQK/F8qWxdO4U/d8zkngUNdOXGcN2PXTNipffwltf+M+qcFI07FahF/simNqbOA1p4Eq1hUNkwU7+36qbOcbDQnmniPemZN28Jn5MGD3lynAbBPt2jlCQcnRaa8lysHk0qEm38gJgBzK91U6HYk70gfloD341PLWCD7u15Pq0iD0C9o/JO8k+yxYFtRTDWH0PRwv/7HTz+pMcvdgzBTunnddJPLAnR+u34u9/v8t5ZCNRByYz3yWj3a1ezkhTuep8HGkDl+5K+a4dR2X7udOpSLWw/GpWSpJ30Ft1f8QsIWCWI1/vOZ/2JX+H2fWf1N5IF+szwj/5hMTCoZmm8NeYJxvm43qFaLlNqT5srr6/vP2VCj5Ux8o7HEyT7Oi34WY11dQEWaAeXK2IsTCkqaikrQ6nMcBo7MwKZd4X0zzUmnQ1vqICC4tXFyMBZ2Po41kFFyQsQYS9ekLLvyRkxhWatkxI1E7KMa/vuJfPct73dcGMZJfo2h0Dxae52V62L/AqYZBpok+r2UIJqFJHNiCAPtnDNUS2mpIx7t4w4wFoNLMZlzqi1Ru/23AAdaObF1EJze3SlGJnoy+txL8nAnTNh5QSyTxd2r3TNK7MYWr1rh7vYC1rx1Lli9AdS70eerdDUssgw0k49dHxowJmSG/wYL7ff7Htjx/ZDwmogYZLVn6MEEhb9lLCnCiRsPynK/BjiNwmbbgCvuIuDPxpdcRFfEiZcEqa4ZXdJmPYbCauBhN2kWvwmlu9LwgC+3RJb4c73ATHStLMBKJJxCpbqfi5s5PaaO18Rce/fNP3CU8f6agp4oHwLbGA+FX+Bafd1/Q1X3WfHA2zcRXUAmQXrWmHOpzHvh/BjH97ujPOx6oV9P+Mq+AthfsRV3Dn/JcGesPFyHNNMTI8MVXzaTKB6H2aKuWUoc7/H6PJ+TKpLA5eOj2unwDGCBuv6HaGz4Pn2huFBz10DlfNcMMBzLGmW8H0m/z+M5afMGd336+kGYp1BLU3AVQ/k5m9WaseuPcWakv3Z8JehbahDh/JXnbhX0TmgqO7xlchiVTEVpgNejz7FCEnM3MYZXRLQaIdn34EnuXu77IjMwIgVtYL1h16WjiVvP/pMEpuG6OrtXfRpE109AbWEBduHTpkqtVf3s99XXEMNcp0ySjn1VvdnOXyei93/IIdX/AbytoRTELzfxfC8fOhsB+JDTt6KJ7Lu9XdkZb7Hb+i2VH41/Yo/eZxNnpUgAZliv18/hR8xKr+P1fjW5uofW/7+GYMLqlC8QN/eJ78uub0CSJXqJ48cCcjeH9fx6pTDZy2w36/vDzkZXiGzDh7PEAgwTVkTVwpgEPT+hy3yqs1NworlpY0ozn++nSFFPJvr2J3X1dLm4LkzJC+RRFpSmXO047Ata3knPNa6nwO6CV/6xI+x9tUJ3C9lU38XIwQr6jXfz3lmg/iTE9vYq/zoN2z7hthleM5YDfC83jF5Y1MVXaqk1C0hPwBY4YxbuAUDVM6uDauE4S33wnbwdC28nsAyltZtCsMLU/+72en85Ay4901iOFDR5HX18FOeZPuurYDQ8Z/I/VNbefkgIvj1JRAnev+cVeDeKTO++xVh86VtcScQXlwCaH6y8ESj89vKOac0DVCI+4ZQZ1fZyC7PV4CopG+SdEUGuWfbs9GJFAjONBzED/R+6gQPWh0IhlhOKOMzOEHVwJLDN62EQLyvyKFLSn+i4LXGXoj9dQ0kWfrZh5+SrV2RRieNN/kx0tnYIpO7X5HN3pVyLZJEKHL6Km28Zya5jFYxH8OGPpoNbbpofTSn23SnO/R92jS+2zQRnL9ThCudX50JXphJD9CSSHz6HfsRzvDKpjnvWXOEWeMf7vmb0PjTKj/SUTvl9vy/hHh1PUAKHqIvP5wEiaDshJzIYtEMTZfYx8cU0Z+p5C4JemvB9jRGSVfs9r8qhyBM/zmHIAH9OSD/+72/PCAf+XMOQUf8hwLKlf8hLP8c9NkZ9jk35wt4+PxzmsfunX8Pt2+7FkTrF6+6/sOt+CvoPj2JlI+/iMZvXlkGXsNuz9ec232cgnduYwyC9K+YfhCdf8XjF107268DfA7Dfw1P0D/wBAU5sv7AE+wXPEH+WTzB/nxIws5PCpyK5XOMp3z6d+bNV6fgP/AK/Yt4RWO/5xWB/olXFP0v5NV32fnt/Bnjduo7ULSdiBtApjaZ+mv8RA3YlIy/4x4xLN38Ra5/TBe9mPMLMNp/vjX6+vz8qwS/H3mdrzHYH4CcLc/n/2JxwFH4/8J/n5T1F4AKQ/9SiYD/JBH6P6Qxbv6L+UTQf4BYnP7/edoif2IS3zXxq/1r5ygIqIPstBvz/17e4wj+hzmK/IL7/+I5iv55hV2Sf9jz+AIH+/5XIgD9DyKgxTMoi/Xvy/YvNn+//s2xSej699ewH0N+P/Wx7yc+f3sE9RdZs/95vP9fZM2eTnGoc2Ycu+2kRvYa8/Tr8OnULaD3v6T1/8jck7QUkqAE8QcOn/czPKcy7BczcOzm+OudGP7jif4X8cA7+nx8naTJR9ANIL/feHyOIQPdvi7SrnmlX3+XY5y98p8M/5Kv77f534wyj6f5LxID8vdigPxCyUZ/oWTj/zQx+F+cRP73EgMY/Q+WA4L4N5MD4s+awH+IHPzjP1sQvk/A7yoh/ed14V8rCH/WCf5TBAGl/3PlACOoP8gB+c+SA7Ab0QEd7cdnp1nWP7Uuy8E3/g8=</diagram></mxfile>
2001.09215/paper_text/intro_method.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We aimed to mimic the presence of sparse/noisy content distribution, mandating the need to curate a novel dataset via specific lexicons. We scraped $500$ random posts from recognized transport forum[^4]. A pool of $50$ uni/bi-grams was created based on tf-idf representations, extracted from the posts, which was further pruned by annotators. Querying posts on *Twitter* with extracted lexicons led to a collection of $19,300$ tweets. In order to have lexical diversity, we added $2500$ randomly sampled tweets to our dataset. In spite of the sparse nature of these posts, the lexical characteristics act as **information cues**.
4
+
5
+ Figure [1](#fig:pipeline){reference-type="ref" reference="fig:pipeline"} pictorially represents our methodology. Our approach required an initial set of ***informative tweets*** for which we employed two human annotators annotating a random sub-sample of the original dataset. From the $1500$ samples, $326$ were marked as *informative* and $1174$ as *non informative* ($\kappa=0.81$), discriminated on this criteria: ***Is the tweet addressing any complaint or raising grievances about modes of transport or services/ events associated with transportation such as traffic; public or private transport?***. An example tweet marked as *informative*: .
6
+
7
+ We utilized tf-idf for the identification of initial from the curated set of ***informative tweets***. $50$ terms having the highest tf-idf scores were passed through the complete dataset and based on sub-string match, the ***transport relevant tweets*** were identified. The redundant tweets were filtered based on the cosine similarity score. **Implicit information indicators** were identified based on ***domain relevance score***, a metric used to gauge the coverage of n-gram ($1$,$2$,$3$) when evaluated against a randomly created pool of posts.
8
+
9
+ We collected a pool of $5000$ randomly sampled tweets different from the data collection period. The rationale behind having such a metric was to discard commonly occurring n-grams normalized by random noise and include ones which are of lexical importance. We used terms associated with high domain relevance score (threshold determined experimentally) as for the next set of iterations. The growing dictionary augments the collection process. The process ran for $4$ iterations providing us $7200$ ***transport relevant tweets*** as no new lexicons were identified. In order to identify linguistic signals associated with the *complaint* posts, we randomly sampled a set of $2000$ tweets which was used as training set, manually annotated into distinct labels: *complaint relevant* ($702$) and *complaint non-relevant* ($1298$) ($\kappa=0.79$). We employed these features on our dataset.
10
+
11
+ <figure id="fig:pipeline" data-latex-placement="!t">
12
+ <img src="pipeline.png" />
13
+ <figcaption>Pictorial representation of the proposed pipeline.</figcaption>
14
+ </figure>
15
+
16
+ **Linguistic markers**. To capture linguistic aspects of complaints, we utilized Bag of Words, count of POS tags and Word2vec clusters.
17
+
18
+ **Sentiment markers**. We used quantified score based on the ratio of tokens mentioned in the following lexicons: MPQA, NRC, VADER and Stanford.
19
+
20
+ **Information specific markers**. These account for a set of handcrafted features associated with *complaint*, we used the stated markers (a) Text-Meta Data, this includes the count of URL's, hashtags, user mentions, special symbols and user mentions, used to enhance retweet impact; (b) Request Identification, we employed the model presented in [@danescu2013no] to identify if a specific tweet assertion is a request; (c) Intensifiers, we make use of feature set derived from the number of words starting with capital letters and the repetition of special symbols (exclamation, questions marks) within the same post; (d) Politeness Markers, we utilize the politeness score of the tweet extracted from the model presented in [@danescu2013no]; (e) Pronoun Variation, these have the ability to reveal the personal involvement or intensify involvement. We utilize the frequency of pronoun types $\{\textit{first, second, third, demonstrative and indefinite}$} using pre-defined dictionaries.
21
+
22
+ From the pool of $7200$ transport relevant tweets, we sampled $3500$ tweets which were used as the testing set. The results are reported in Table[1](#tab:res){reference-type="ref" reference="tab:res"} with $10$ fold cross-validation. **With increasing the number of iterations, the pool of gets refined and augments the selection of ***transport relevant tweets*****. The proposed pipeline is tailored to identify complaint relevant tweets in a noisy scenario.
2011.08067/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-06-03T15:29:25.826Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15" etag="5JFvOW7V5qlRpI3zq0f0" version="13.1.14" type="device"><diagram id="w_b0Qjeg2eZhJgkEHY1Y" name="Page-1">7V1be9o6Fv01PIbPknx9JCS0M21OckIzc/o0n4Md8MRYHOM0ob/+SGCBbQkwwRfZOH2oLd/X3kv7oi3RQ8P5x5fQXszusOP6Pag4Hz1004MQAl0h/9GW1abFQsamYRp6zqYJ7BrG3m83boyvm755jrtMnRhh7EfeIt04wUHgTqJUmx2G+D192gv2009d2FOXaxhPbJ9v/a/nRLNNq6kpu/avrjedsScDJT4yt9nJccNyZjv4PdGEbntoGGIcbbbmH0PXp+AxXDbXjfYc3b5Y6AZRngvugfXXUEFahBc4ePhqfJ065hUwN7f5Zftv8RfHbxutGASuQxCJd3EYzfAUB7Z/u2u9DvFb4Lj0OQrZ253zHeMFaQSk8f9uFK1i8dpvESZNs2jux0f5b4k/jz470RB/2RcXz90oXJETQte3I+9XWlp2LPTp9rztpQ/YI4+ASqygKhNXrJ6apaRvEdnh1I3iq5LoZm6EjCM3WuK3cOJyNyIbie/ZNa2Fd4ogLU6QN57t4ynZg8qjuwjdJQGXYIUDTsI7+VFhTEPb8ci5Q+zjcH0Celn/Udku7IkX0W/UqKiXUYhft8SAhyT5yw0j9+OgLPfJhEH7viMgYAScJciHlP3iT+F9Krisz6qWFO6HF/1FL+9r8d7P+GZ0++YjubNiOwH53sRFdPcnux/d2V223mPX7RXbRm0PYAP1uFdeE+UQiGrRlD5Lpuy9E4QhTNH9iKr12zPZnNJNwNrIQxLNnD4Q3Y7S4ttwI0EiZf1Hjrx4vs/aAxxQZbF9bxqQ3QnB3yXt15QuHjFDg/jA3HOcjFoBno8FsE8H2lH2qQLywbLIp1pNId8ZJDKqMXdGRrhqTnNXlJVi33mEdPDiSGdJRjqN7x2l8guPMgqhnGZJ06QyS+y9jzAEXRpDjKx/XTtDNE5Q/9oHf+mYK5VgrtaNOTLb7woU7yaLXQEdZVwBUK0rwL4zGbC6E+wQlT8cniYpQ0NTc+JOJiKSPZuauqbCoYC2COOtZJDUeZroVYarGuAgbB9NrGpownnMVdNEkNdpKE1M2Wgiee7zKAeY7S2dA1bGD6iYA+w7m88BQ88gWTcHVN4K8xwInAEdSaE+qW8vl96kvjylYy9nW/F+njksW15c0jIhQFFqmrWdyUSipSn90bKu9p5RBv5GmYTD1rWvitJ58gtH9C6lC81IlufXu7zuTad3p9l83pRI3d99XtXyj8t0qlaOqsFmqVrVpjW391qN3nHjXson9U7XM3qnVax3fAb5dL0rurtqnDagPQMyjdOGhjn4Zxg8o6Gq1sCOR3kOf//9+1UbBerN+PXp2/uf17+vWDS807QVn4hs9ohIVlbIrG5ERIg572OseH43HHNLMsx5+7pCLcM8O/JXJeYjAw00TfsxcqLgfw//fp1evfznqjEDfwm7ByBKWb6+ohtHrN9678ENPYIa1YD8JlGImsD5Ep5XePXRWbLm7cj4ftwygmUNiQlqJlhjiuxKdSyF0MBGsoj3DIZ20BvC3kDhRN1wLlmScQnwCMtPJlNN06mvIHSEUgVbK0HFXgN4xnuDbS8Dq51fghrJygh3FstAkmEJ+7WXX4K8DGkcef5JxQNCENWcbNOlYhufzRoMf1TMt/eZF7ljQjP6tPfQXlTAQQ2gvpE2c4bG07CsWVFiGmrNM3MSEVAwPif2JqBUDOQH7kejPzhFSNfh2OEklid9caID8TxEdGNl2UhY6gJHcw0Rfy3dQLbeK71MR814lLoG+xpHNtNgjUm6seHLwoHnp87czp9dx/GCKWn+bq+OlkPtF4OUUsjW1Yo6vK2TUUmPBySaBwpy9nh9gPRkr3elfNazT/SGsc3cdYVn5qkamqjiM1Xv/W4SaY9Pbxl1hwzMiB6ZVmVfuqC2Y4i1Car1ichSnUqQd9ojlCuuA3xIv+g6UgE/GT9q4yc/7ZFDfzmzF3TTm68X10mKIQtkRAm6bf1uP7v+A156sVv6jKMIz8kJPj1wbU9ep2vARQ7n+mGD5WKzCBCF32Y7L94HFdF1/D43syiiqwcNKBJwNHECte9NcPDiEVGG/Ql5Ihw5dkSMwYi2L+n/OGCbGrUl6wNXC/9tSUyL2V8QHzyrDPmWUdGLUhSYURSTVxSRr1yeouSo8O4URQJFUWG9isJufNSLhhfX+VuSedGQD3g6L1ogqNq9aEFf23nRJ4z4o6KdY3HFZnZycd7V5z5RsXnwO0Ve9wtev+mSrcio//2GmQseb+6Inb+v3rZu7n9xPUPd/jvMUQ/euWXVu2WGIpn/DrtArxmKUrv/rsvjazRoUKRq38LIjorlnVZiALVvQWX7B9K3Va2+knm5sr0WfiRUHDBe/CJp9QeM/BIrXcAoEFTtASPqAsazAsbc6zoUHlmex09+dSnRsMvFd6S1h22IT5J23rgM3riZUZS6wzbEp2g7RZFRUeoO22T6TYMubNu71t7nwzbTqi1sexq/DpTx8EW5/7FED8+2dz0Jr9Qcyy2m9UtUaJ/QpeLNPqsvZOl4QfgExGYfns9SMWi1lkOBBEd3jC22xj6H6SjKdRcizIqKk6678EQWolXvuh987+Q06Rn5foe03QafWB3T0VzTUUU+uwmfkV5BwbeZZh80BEX3AIj4V0DNvRhkfgGTjn7F0g/mpB+LyGShH2w7/XTRnJeK6cevq9LRr1j6CaaBiuknmfXjh7FbRj9LAutX6wTPi6CfYBKo8ESzth/PO/jeraWfrktAv1ozNBdBPzMn/TTJnE8+lfKUc6nFONsl9YIjV3omD4OqG8YW491lYUpmop43C8PiLkmYqAuyMG1mYoV1CmK8u4RM2UzMm5DRaptBevC9kzYxai8T9Qp/i1GMd5ebKZuJeXMzmlzBoc7nZp5gi5mo1M3EHJX6Jw2snh6bFwArMFHa1YCi/DNbcyy13E9ZAbieYwrzp4GN9ZrDFAzov2IwJaBKhylfvF0+ptuiwyIw1eTT06IrKyrH1JRPT/nK2AZ2qghKp6xGjkpWqTtVzlBpolRxtZjmKPqUX1k5ayUBsHxQ2Sxl5ayVBJjmWL1Zbkyz1koCTHPM6pa/A+CslQTAlhlb1WKtrPo9gNaFVRJgWmZYVZ0HkLVWEgBbR2xVqrWSANN2xlb1A2u2LbbShTXg1WJaZmxVh7WSAdO2hVUyYFpmWFWdB2DKp6xlxla1dKrCyv5qMW3FkFXWA5BBWdsWW8mgrHUMWZVqrSTAtG1hlQyYtjKskgBY66QQABwH9qRVcbLzHuLmIhwDVk0ZQ62yNHJyTQrhlAe1rFoWi48M7gbjb7dkQ/nxOPhjPLp/vLt95OCvqJqILlrLKtxAIRJI9yKq4HfWgVVSMdHs0Yw+nu7uo7f5nysT2j+/Pn4T/Ph2m2YWAFNhnQnL2lRYvSUEnFf4NhWQc6nyuuHmo+Q21SYSfGXTb0ExaIvKslF1+k12Q0zXXd8e+0Ici9kddlx6xj8=</diagram></mxfile>
2011.08067/main_diagram/main_diagram.pdf ADDED
Binary file (36.1 kB). View file
 
2011.08067/paper_text/intro_method.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ In this section, we show how the two-step process of hierarchical encoding can be achieved using a single standard transformer encoder. If we want to have an $M$ layer utterance encoder followed by an $N$ layer context encoder, we start with an $(M+N)$ layer standard encoder. Then by applying two separate masks as designed below, we convert the standard encoder into an HT-encoder. First, we need to encode the utterances independently. Within the self-attention mechanism of a transformer encoder, which token gets to attend to which other tokens is controlled by the attention mask. If we apply a block-diagonal mask, each block of size same as the length of utterances (as shown in Figure [2](#fig:masks){reference-type="ref" reference="fig:masks"} bottom-left), to the concatenated sequence of tokenized utterances, we effectively achieve the same process of utterance encoding. We call this block-diagonal mask for utterance encoding the **UT-mask**.
4
+
5
+ Similarly, another attention mask (**CT-Mask**) can explain the context encoding phase that allows tokens to attend beyond the respective utterance boundaries. See the two matrices on Figure [2](#fig:masks){reference-type="ref" reference="fig:masks"}'s right for examples of such CT-Masks. From here, it can be quickly concluded that if we apply the UT-Mask for the first few layers of the encoder and the CT-Mask in the remaining few layers, we effectively have a hierarchical encoder. The CT-Mask also gives us more freedom on what kind of global attention we want to allow during context encoding. Positional encoding is applied once before utterance encoder (local PE) and once more before context encoder (global PE).
6
+
7
+ <figure id="fig:masks" data-latex-placement="h!">
8
+ <embed src="figures/masks.pdf" style="width:75.0%" />
9
+ <figcaption>Example of UT-Mask (<span class="math inline"><em>A</em></span> for the given <span class="math inline"><em>C</em><sub><em>I</em></sub></span>) and CT-Masks. Blue cells: 1, White cells: 0. Bottom left is the UT-Mask and on the right are CT-Masks for HIER-CLS(top) and HIER(bottom). In this example, the context comprises of three utterances of lengths <span class="math inline">0, 1</span> and <span class="math inline">2</span>, respectively. <span class="math inline"><em>C</em><sub><em>I</em></sub></span> indicates which utterance each of the tokens belongs to. The entries in <span class="math inline"><em>P</em><sub><em>I</em></sub></span> denotes the relative position of each token with respect to utterance corresponding to it.</figcaption>
10
+ </figure>
11
+
12
+ The steps for obtaining the UT-Mask and positional encoding for the utterance encoder are given below and is accompanied by Figure [2](#fig:masks){reference-type="ref" reference="fig:masks"}. $C$ is the dialog context to be encoded. $w_{ij}$ is the $j_{th}$ token of $i_{th}$ utterance. In $C_{I}$, each index $i$ is repeated $|u_i|$ (length of $u_i$) times. And $C_{IR}$ is a square matrix created by repeating $C_I$. $P_I$ has the same dimensions as $C_I$, and it stores the position of each token $w_{ij}$ in context $C$, relative to utterance $u_i$. $\mathcal{P}:I \mapsto R^d$ is the positional encoding function that takes an index (or indices) and returns their $d$-dim positional embedding. $A$ is the UT-Mask for the given context $C$ and their utterance indices $C_I$. An example instance of this process is given in Figure [2](#fig:masks){reference-type="ref" reference="fig:masks"}. $\mathbf{1}(.)$ is an indicator function that returns true when the input logic holds, and is applied to a matrix or vector element-wise. $$\begin{align*}
13
+ C &= [w_{11}, w_{12}, ..., w_{Tl_T}]\\
14
+ C_{I} &= [0, \dots, 0, 1, \dots, 1, \dots, T] \\
15
+ P_{I} &= [0, 1, \dots, l_1 - 1, 0, \dots, l_2 - 1,\dots, l_T - 1] \\
16
+ C_{IR} &= repeat(C_I, len(C_I), 0)\\
17
+ A &= 1\big(2C_{IR}== (C_{IR}^T + C_{IR})\big)\\
18
+ P_c &= \mathcal{P}[P_I, :]
19
+ \end{align*}$$
20
+
21
+ The attention masks for context encoding depends on the choice for model architecture. We provide the details of the architectures and their attention masks used in our experiments in the subsequent section. There are other masks possible, but these are the ones we found to be working best in their respective settings.
22
+
23
+ <figure id="fig:models-part2" data-latex-placement="t!">
24
+ <figure id="fig:hier">
25
+ <embed src="figures/hier.pdf" />
26
+ <figcaption><strong>Model: HIER</strong></figcaption>
27
+ </figure>
28
+ <figure id="fig:hier++">
29
+ <embed src="figures/hier++.pdf" />
30
+ <figcaption><strong>Model: HIER++</strong></figcaption>
31
+ </figure>
32
+ <figcaption>The proposed architecture for the hierarchical transformer: (a) HIER: when the belief states are not available and (b) HIER++: when the belief states are available.</figcaption>
33
+ </figure>
34
+
35
+ We propose several model architectures to test the effectiveness of the proposed HIER-Encoder in various experimental settings. These architectures are designed to fit well with the four experimental settings (see Section [3.1](#sec:four-eval){reference-type="ref" reference="sec:four-eval"}) of the response generation task of the MultiWOZ dataset in terms of input and output.
36
+
37
+ The tested model architectures are as follows. Using the HIER encoding scheme described in Section [2.1](#sec:hte){reference-type="ref" reference="sec:hte"}, we test two model architectures for response generation, namely HIER and HIER++.
38
+
39
+ HIER is the most straightforward model architecture with an HT-Encoder replacing the encoder in a Transformer Seq2Seq. The working of the model is shown in Figure [3](#fig:hier){reference-type="ref" reference="fig:hier"}. First, in the utterance encoding phase, each utterance is encoded independently with the help of the UT-Mask. In the second half of the encoder, we apply a CT-Mask as depicted by the figure's block attention matrix. Block $B_{ij}$ is a matrix which, if all ones, means that utterance $i$ can attend to utterance $j$'s contextual token embeddings. The local and global positional encodings are applied, as explained in Section [2.2](#sec:conversion-hte){reference-type="ref" reference="sec:conversion-hte"}. A standard transformer decoder follows the HT-Encoder for generating the response.
40
+
41
+ The CT-Mask for HIER was experimentally obtained after trying a few other variants. The intuition behind this mask was that the model should reply to the last user utterance in the context. Hence, we design the attention mask to apply cross attention between all the utterances and the last utterance (see Figure [3](#fig:hier){reference-type="ref" reference="fig:hier"}).
42
+
43
+ HIER++ is the extended version of the HIER model, as shown in Figure [4](#fig:hier++){reference-type="ref" reference="fig:hier++"}, that also takes the dialog act label as input. The dialog act representation proposed in @chen-etal-2019-semantically consists of the domain, act, and slot values. A linear feed-forward layer (FFN) acts as the embedding layer for converting their 44-dimension multi-hot dialog act representation. The output embedding is added to the input token embeddings of the decoder in HIER++ model. Similar to HDSA, we also use ground truth dialog acts during training, and predictions from a fine-tuned BERT model during validation and testing. HIER++ is applied to the Context-to-Response generation task of the MultiWOZ dataset.
44
+
45
+ As described in Section [2.1](#sec:hte){reference-type="ref" reference="sec:hte"}, the encoding scheme of HIER-CLS is more akin to the HRED [@chen-etal-2019-semantically] and HIBERT [@zhang-etal-2019-hibert] models. It differs from HIER++ only with respect to the CT-Mask.
2102.06857/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2102.06857/paper_text/intro_method.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In this section, we review optimal transport and its unbalanced formulation, then from that deriving formulations for robust optimal transport. For any $P$ and $Q$ in $\mathcal{P}(\mathcal{X})$ for a space $\mathcal{X}$, the OT distance between $P$ and $Q$ takes the following form $$\begin{align}
4
+ \text{OT}(P, Q) : = \min_{\pi \in \Pi(P, Q)} \int c(x, y) d \pi(x, y), \label{eq:OT_formulation}
5
+ \end{align}$$ where $\Pi(P, Q)$ is the set of joint probability distributions in $\mathcal{X} \times \mathcal{X}$ such that their marginal distributions are $P$ and $Q$, and $c: \mathcal{X}\times \mathcal{X}\to [0,\infty)$ is a cost function.
6
+
7
+ **Unbalanced Optimal Transport:** When $P$ or $Q$ is not a probability distribution, the OT formulation between $P$ and $Q$ in equation [\[eq:OT_formulation\]](#eq:OT_formulation){reference-type="eqref" reference="eq:OT_formulation"} is no longer valid. One solution to this issue is using the unbalanced optimal transport (UOT) [@chizat2018scaling], which is given by: $$\begin{align}
8
+ \text{UOT}(P, Q) : = \min_{\pi \in \mathcal{M}_+(\mathcal{X} \times \mathcal{X})} \int c(x, y) d \pi(x, y) + \tau_{1} \mathbf{KL}(\pi_{1}\| P) + \tau_{2} \mathbf{KL}(\pi_{2}\| Q), \label{eq:UOT_formulation}
9
+ \end{align}$$ where $\mathcal{M}_+(\mathcal{X} \times \mathcal{X})$ denotes the set of joint non-negative measures on the space $\mathcal{X} \times \mathcal{X}$; $\pi_{1}, \pi_{2}$ are the marginal distributions of $\pi$ and respectively correspond to $P$ and $Q$; $\tau_{1}, \tau_{2}$ are regularized positive parameters. Note that, we can replace the KL divergence in equation [\[eq:UOT_formulation\]](#eq:UOT_formulation){reference-type="eqref" reference="eq:UOT_formulation"} by any Csiszár-divergence [@Csiszar-67]. However, we only consider the case of KL divergence in this work.
10
+
11
+ **Robust Optimal Transport:** Optimal transport is well-known for not being robust in the present of outliers. A way to deal with this issue is using the approach of unbalanced optimal transport (UOT), which has demonstrated favorable practical performance in generative models and domain adaptation  [@Balaji_Robust]. More specifically, when $P$ and $Q$ are probability distributions in $\mathcal{X}$, the ***R**obust Unconstrained **O**ptimal **T**ransport (ROT)* admits the following form $$\begin{align}
12
+ \text{ROT}(P, Q) : = \inf_{P_{1}, Q_{1} \in \mathcal{P}(\mathcal{X})} \min_{\pi \in \Pi(P_{1}, Q_{1})} \int c(x, y) d \pi(x, y) + \tau_{1} \mathbf{KL}(P_{1}\| P) + \tau_{2} \mathbf{KL}(Q_{1}\| Q), \label{eq:uncons_robust_OT}
13
+ \end{align}$$ where $\tau_{1}, \tau_{2} > 0$ are some given regularized parameters. The reason to name it robust unconstrained optimal transport is that instead of looking for an optimal transport plan moving masses from $P$ to $Q$, we seek another plan that optimally transports masses between their approximations, which are probability measures $P_1$ and $Q_1$, under the KL divergence. This formulation is closely related to the ones studied in [@Balaji_Robust] and [@pmlr-v139-mukherjee21a]: the former used $\chi^2$-divergence for the relaxation and the latter used total variation distance (note that those three divergences all together belong to the family of $f$-divergence).
14
+
15
+ By relaxing only one marginal constraint regarding (presumably) on $P$, we have another version of ROT, named ***R**obust **S**emi-constrained **O**ptimal **T**ransport (RSOT)*, which is given by $$\begin{align}
16
+ \text{RSOT}(P, Q) : = \inf_{P_{1} \in \mathcal{P}(\mathcal{X})} \min_{\pi \in \Pi(P_{1}, Q)} \int c(x, y) d \pi(x, y) + \tau \mathbf{KL}(P_{1}\| P), \label{eq:semicons_robust_OT}
17
+ \end{align}$$ where $\tau > 0$ is a regularized parameter. We could also define $\textnormal{RSOT}(Q,P)$ similarly with a remark that although $\text{RSOT}(P, Q)$ can be different from $\text{RSOT}(Q, P)$, the techniques for obtaining the computational complexity of both are similar.
18
+
19
+ **UOT vs ROT/RSOT:** Though the formulations ROT/RSOT and UOT seem to be similar, they serve different purposes. The goal of UOT is to deal with unbalanced measures, thus there is no condition on the "transport plan". Hence, the meaning of the optimal plan $\pi$ of UOT problem is dependent on the interpreter. For example, in applications such as [@schiebinger2017reconstruction], the UOT is used to figure out the developmental trajectory of cells. Meanwhile, ROT/RSOT aim to seek an accurate transport plan between two possibly corrupted probability distributions. The toy example in Figure [1](#figure:rot_and_uot){reference-type="ref" reference="figure:rot_and_uot"} illustrates this difference. In particular, the marginals of the "transport plan\" obtained by the latter (see plots $(b), (d)$) are very different from the two original probability measures $\mathbf{a}, \mathbf{b}$. On the other hand, the solution of the former leads to good approximations of $\mathbf{a}$ and $\mathbf{b}$ (see plots $(a), (c)$) while removing some bumps in both tails which are presumably outliers.
20
+
21
+ <figure id="figure:rot_and_uot" data-latex-placement="!t">
22
+ <embed src="rot_and_uot.pdf" />
23
+ <figcaption>Comparison on two marginals induced by ROT/RSOT solutions and UOT solutions. Here <span class="math inline"><strong>a</strong>, <strong>b</strong></span> are two (possibly corrupted) 1-D Gaussian distributions on which we compute the optimal transport, and <span class="math inline"><em>a</em><sub>[<em>p</em><em>r</em><em>o</em><em>b</em><em>l</em><em>e</em><em>m</em>]</sub>, <em>b</em><sub>[<em>p</em><em>r</em><em>o</em><em>b</em><em>l</em><em>e</em><em>m</em>]</sub></span> represent two marginals (with respect to <span class="math inline"><em>a</em></span> and <span class="math inline"><em>b</em></span> respectively) of the optimal solution for the corresponding <span class="math inline">[<em>p</em><em>r</em><em>o</em><em>b</em><em>l</em><em>e</em><em>m</em>]</span>. In plots <span class="math inline">(<em>a</em>), (<em>b</em>)</span>, we compare ROT and UOT where both <span class="math inline"><strong>a</strong></span> and <span class="math inline"><strong>b</strong></span> contain (<span class="math inline">10%</span>) outliers from other Gaussians, while in plots <span class="math inline">(<em>c</em>), (<em>d</em>)</span> we investigate RSOT and UOT where only <span class="math inline"><strong>a</strong></span> is corrupted.</figcaption>
24
+ </figure>
25
+
26
+ When $P$ and $Q$ are discrete measures, the KL penalties in equations [\[eq:uncons_robust_OT\]](#eq:uncons_robust_OT){reference-type="eqref" reference="eq:uncons_robust_OT"} and [\[eq:semicons_robust_OT\]](#eq:semicons_robust_OT){reference-type="eqref" reference="eq:semicons_robust_OT"} suggest that the probability distributions $P_{1}$ and $Q_{1}$ need to share the same set of supports as that of $P$ and $Q$, respectively. Therefore, throughout this section, we implicitly require this condition in our formulations of RSOT and ROT and we denote the masses of $P$ and $Q$ by $\mathbf{a}$ and $\mathbf{b}$, respectively.
27
+
28
+ Assume that the marginal constraint associating with $Q$ is kept and that of $P$ is relaxed and $P_1$ and $P$ share the same set of supports, the formulation of RSOT in equation [\[eq:semicons_robust_OT\]](#eq:semicons_robust_OT){reference-type="eqref" reference="eq:semicons_robust_OT"} can be rewritten as follows $$\begin{align}
29
+ \min_{X \in \mathbb{R}_{+}^{n \times n}, X^{\top}\mathbf{1}_{n} = \mathbf{b}} f_{\text{rsot}}(X):= \left\langle C, X\right\rangle + \tau \mathbf{KL}(X \mathbf{1}_{n} || \mathbf{a}),
30
+ \label{eq:semi_robust_OT}
31
+ \end{align}$$ where $\mathbf{a}, \mathbf{b}$ are the masses of $P$ and $Q$ respectively, and $C$ is the cost matrix whose entries are distances between the supports of these distributions. Solving directly problem [\[eq:semi_robust_OT\]](#eq:semi_robust_OT){reference-type="eqref" reference="eq:semi_robust_OT"} by traditional linear programming solvers can be expensive and not scalable in terms of $n$. Therefore, we utilize the entropic regularization approach proposed by [@Cuturi-2013-Sinkhorn] to the objective function of RSOT, leading to $$\begin{align}
32
+ \min_{\substack{X \in \mathbb{R}_{+}^{n \times n}, X^{\top}\mathbf{1}_{n} = \mathbf{b}}} g_{\text{rsot}}(X) : = f_{\text{rsot}}(X) - \eta H(X). \label{eq:semi_robust_OT_entropic}
33
+ \end{align}$$ Here, $\eta>0$ is a given regularization parameter, and we refer the problem [\[eq:semi_robust_OT_entropic\]](#eq:semi_robust_OT_entropic){reference-type="eqref" reference="eq:semi_robust_OT_entropic"} to as *entropic RSOT*. The dual problem of entropic RSOT is $$\begin{align}
34
+ \min_{u, v \in \mathbb{R}^{n}}h_{\text{rsot}}(u,v):= \eta \|B(u, v)\|_{1}
35
+ + \tau \big \langle e^{-u/ \tau}, \mathbf{a}\big \rangle - \big \langle v, \mathbf{b}\big \rangle,
36
+ \label{eq:semi_robust_ot_entropic_dual}
37
+ \end{align}$$ where $B(u, v)$ is defined as a matrix of size $n\times n$ with entries $[B(u, v)]_{ij} := e^{(u_{i} + v_{j} - C_{ij})/ \eta}$. Since equation [\[eq:semi_robust_ot_entropic_dual\]](#eq:semi_robust_ot_entropic_dual){reference-type="eqref" reference="eq:semi_robust_ot_entropic_dual"} is an unconstrained convex optimization problem, we can perform alternating minimization for $u$ and $v$ by setting $\partial h(u, v) / \partial u = 0$ and $\partial h(u, v) / \partial v = 0$, resulting in closed-form updates of a Sinkhorn-like procedure (see [@Cuturi-2013-Sinkhorn]) in Algorithm [\[algorithm:rsot\]](#algorithm:rsot){reference-type="ref" reference="algorithm:rsot"}. This procedure is known to converge to the optimal solution $(u^*, v^*) := \mathop{\mathrm{arg\,min}}h_{\textnormal{rsot}}(u, v)$. As strong duality holds for the convex optimization problem [\[eq:semi_robust_OT_entropic\]](#eq:semi_robust_OT_entropic){reference-type="eqref" reference="eq:semi_robust_OT_entropic"}, the optimal transport plan of the entropic RSOT is exactly $B(u^*,v^*)$.
38
+
39
+ :::: algorithm
40
+ ::: algorithmic
41
+ **Input:** $C, \mathbf{a}, \mathbf{b}, \eta, \tau, n_{\textnormal{iter}}$ **Initialization:** $u^0 = v^0 = 0$, $k = 0$ $a^k \gets B(u^k,v^k) \mathbf{1}_n, \quad b^k \gets \big(B(u^k,v^k)\big)^{\top} \mathbf{1}_n$ $u^{k+1} \leftarrow \frac{\eta \tau}{\eta + \tau} \big[\frac{u^k}{\eta} + \log(\mathbf{a}) - \log(a^k)\big]$ $v^{k+1} \leftarrow v^k$ $u^{k+1} \leftarrow u^k$ $v^{k+1} \leftarrow \eta \big[\frac{v^k}{\eta} + \log(\mathbf{b}) - \log(b^k)\big]$ $k \gets k + 1$ **return** $B(u^k,v^k)$
42
+ :::
43
+ ::::
44
+
45
+ Since no assumptions are made on the cost matrix, except its entries are non-negative, closed-form solutions of OT and UOT generally do not exist. Therefore, we introduce the definition of an *$\varepsilon$-approximation* solution of an optimization problem, which will be used for all the subsequent complexity analyses.
46
+
47
+ ::: definition
48
+ **Definition 1** (**$\varepsilon$-approximation**). *For any $\varepsilon>0$, a transportation plan $X$ is called an $\varepsilon$-approximation of the minimizer $\widehat{X}$ of some objective function $f$ if $f(X) \le f(\widehat{X }) + \varepsilon$.*
49
+ :::
50
+
51
+ Based on this concept, we then state our main theorem on the runtime complexity of Algorithm [\[algorithm:rsot\]](#algorithm:rsot){reference-type="ref" reference="algorithm:rsot"} in solving the RSOT problem [\[eq:semi_robust_OT\]](#eq:semi_robust_OT){reference-type="eqref" reference="eq:semi_robust_OT"}.
52
+
53
+ ::: {#theorem:rsot .theorem}
54
+ **Theorem 1**. *For $U_{\textnormal{rsot}}:=\max \{3\log(n), \varepsilon / \tau\}$ and $\eta=\varepsilon/ U_{\textnormal{rsot}}$, Algorithm [\[algorithm:rsot\]](#algorithm:rsot){reference-type="ref" reference="algorithm:rsot"} returns an $\varepsilon$-approximation of the optimal solution $\widehat{X}_{\textnormal{rsot}}$ of the problem [\[eq:semi_robust_OT\]](#eq:semi_robust_OT){reference-type="eqref" reference="eq:semi_robust_OT"} in time $$\begin{align*}
55
+ \mathcal{O}\left(\frac{\tau n^2}{\varepsilon}\log(n)\left[\log\left(\frac{\tau\|{C}\|_{\scriptscriptstyle \infty}}{\varepsilon}\right)+\log(\log(n))\right]\right).
56
+ \end{align*}$$*
57
+ :::
58
+
59
+ ::: proof
60
+ *Proof Sketch.* The full proof of Theorem [1](#theorem:rsot){reference-type="ref" reference="theorem:rsot"} is in Appendix [8](#appendix:rsot){reference-type="ref" reference="appendix:rsot"}. Note that, this result is not achieved by directly applying Theorem 2 in [@pham2020unbalanced] with $\tau_2 \to \infty$ as the nature of the dual function changes in that limit, invalidating many previous results. Let $X_{\textnormal{rsot}}^k$ be the output of Algorithm [\[algorithm:rsot\]](#algorithm:rsot){reference-type="ref" reference="algorithm:rsot"} at the $k$-th step while $\widehat{X}_{\textnormal{rsot}}$ and $X_{\textnormal{rsot}}^*$ denotes the minimizers of equations [\[eq:semi_robust_OT\]](#eq:semi_robust_OT){reference-type="eqref" reference="eq:semi_robust_OT"} and [\[eq:semi_robust_OT_entropic\]](#eq:semi_robust_OT_entropic){reference-type="eqref" reference="eq:semi_robust_OT_entropic"}, respectively. The goal is to find $k$ that guarantees $f_{\textnormal{rsot}}(X_{\textnormal{rsot}}^k) - f_{\textnormal{rsot}}(\widehat{X}_{\textnormal{rsot}}) \le \varepsilon = \eta U_{\textnormal{rsot}}$. We start by decomposing $$\begin{align*}
61
+ \underbrace{f_{\textnormal{rsot}}(X_{\textnormal{rsot}}^k)}_{g_{\textnormal{rsot}}(X_{\textnormal{rsot}}^k) + \eta H(X_{\textnormal{rsot}}^k)} - \underbrace{f_{\textnormal{rsot}}(\widehat{X}_{\textnormal{rsot}})}_{g_{\textnormal{rsot}}(\widehat{X}_{\textnormal{rsot}}) + \eta H(\widehat{X}_{\textnormal{rsot}})} \le \left[g_{\textnormal{rsot}}(X_{\textnormal{rsot}}^k)-g_{\textnormal{rsot}}(X_{\textnormal{rsot}}^*)\right] + \eta \left[H(X_{\textnormal{rsot}}^k)-H(\widehat{X}_{\textnormal{rsot}})\right],
62
+ \end{align*}$$ and try to bound each term by a linear function of $\eta$. Dealing with the entropy term is simple as the $\eta$ factor is already presented, and the entropy difference can be bounded by a constant due to the fact that $1 \le H(X) \le 2\log(n) + 1$ for all $X \in \mathbb{R}_+^{n \times n}, \|X\|_1 = 1$. The non-trivial part is bounding the difference between $g_{\textnormal{rsot}}$ values, which hinges upon two results. The first one is the value of $g_{\textnormal{rsot}}$ at optimality: $$\begin{align}
63
+ \label{equation:main_text:grsot_optimal}
64
+ g_{\textnormal{rsot}}(X_{\textnormal{rsot}}^*) &= -\eta-\tau(1-\alpha) + \langle v_{\textnormal{rsot}}^*, b_{\textnormal{rsot}}^*\rangle.
65
+ \end{align}$$ The second result is the geometric convergence rate of the updates on $u$ and $v$ (Lemma [6](#lemma:rsot:uv_dual:convergence_rate){reference-type="ref" reference="lemma:rsot:uv_dual:convergence_rate"} in Appendix [8](#appendix:rsot){reference-type="ref" reference="appendix:rsot"}): $$\begin{align*}
66
+ \max\Big\{\|u^{k+1} - u^*\|_{\infty}, \|v^{k+1} - v^*\|_{\infty} \Big\}\le (\text{const}) \Big(\dfrac{\tau}{\tau+\eta}\Big)^{k/2} =: \Delta^k.
67
+ \end{align*}$$ The final step is using equation [\[equation:main_text:grsot_optimal\]](#equation:main_text:grsot_optimal){reference-type="eqref" reference="equation:main_text:grsot_optimal"} to tailor the $g_{\textnormal{rsot}}$ difference to be bounded by a linear function of $\Delta^k$, which is an exponential function of $k$, then solving for the minimum $k$ at which this exponential function is small enough compared to $\eta$. The main technical difficulty here is to deal with the unknown term $\langle v_{\textnormal{rsot}}^*, b_{\textnormal{rsot}}^*\rangle$ in equation [\[equation:main_text:grsot_optimal\]](#equation:main_text:grsot_optimal){reference-type="eqref" reference="equation:main_text:grsot_optimal"}, which causes the deviation from the previous techniques. ◻
68
+ :::
69
+
70
+ ::: remark
71
+ **Remark 1**. *The result of Theorem [1](#theorem:rsot){reference-type="ref" reference="theorem:rsot"} indicates that the complexity of [Robust-SemiSinkhorn]{.smallcaps} algorithm for computing RSOT is at the order of $\widetilde{\mathcal{O}}(\frac{n^2}{\varepsilon})$. This complexity is near-optimal and faster than the complexity of the standard Sinkhorn algorithm for computing the optimal transport problem [@Dvurechensky-2018-Computational; @lin2019efficient], which is at the order of $\widetilde{\mathcal{O}}(\frac{n^2}{\varepsilon^2})$.*
72
+ :::
73
+
74
+ In this section, we briefly present another version of robust optimal transport, abbreviated by ROT, when two distributions are contaminated. We first show that the approach of using the duality of the objective function of ROT problem with entropic regularizer does not produce a Sinkhorn algorithm as in the cases of RSOT and UOT. However, a second thought of the problem finds an interesting link between the optimal solutions of ROT and UOT, which results in a nice algorithm for the ROT. We also discuss some technical difficulties when analysing the complexity for the ROT problem. At the end of this section, we show that the result could be extended to the case of low-rank cost matrix, which will significantly reduce the computation.
75
+
76
+ Recall that the masses of $P$ and $Q$ are $\mathbf{a}$ and $\mathbf{b}$, respectively, the ROT problem [\[eq:uncons_robust_OT\]](#eq:uncons_robust_OT){reference-type="eqref" reference="eq:uncons_robust_OT"} becomes $$\begin{align}
77
+ \label{eq:robust_OT}
78
+ \min_{X \in \mathbb{R}_{+}^{n \times n}, \|X\|_{1} = 1}f_{\text{rot}}(X):= \langle C, X\rangle + \tau\mathbf{KL}(X\mathbf{1}_n||\mathbf{a}) + \tau\mathbf{KL}(X^{\top}\mathbf{1}_n||\mathbf{b}).
79
+ \end{align}$$ Here we set $\tau_1 = \tau_2 = \tau$ for the sake of simplicity, since there are no more technical difficulties to work with finite $\tau_1\neq \tau_2$. As noted in Section [2](#sec:preliminary){reference-type="ref" reference="sec:preliminary"}, the formulation [\[eq:robust_OT\]](#eq:robust_OT){reference-type="eqref" reference="eq:robust_OT"} bears some resemblance to the unbalanced optimal transport problem studied in [@pham2020unbalanced], except the additional norm condition forcing $X$ to be a transportation plan (i.e., a joint probability distribution), which shows the different nature of two problems. Following the approach of using the Sinkhorn algorithm of UOT, the duality of formulation [\[eq:robust_OT\]](#eq:robust_OT){reference-type="eqref" reference="eq:robust_OT"} has the form $$\begin{align*}
80
+ \eta \log \|B(u,v)\|_1 + \tau \big\{\langle e^{u/\tau},\mathbf{a}\rangle + \langle e^{v/\tau},\mathbf{b}\rangle \big\}.
81
+ \end{align*}$$ By taking derivatives of the above function with respect to $u$ and $v$ and set the derivatives to be zero, we obtain $$\begin{align*}
82
+ \frac{B(u,v) \mathbf{1}_n}{\|B(u,v)\|_1} = e^{-u/\tau} \odot \mathbf{a},\quad \frac{B(u,v)^{\top}\mathbf{1}_n}{\|B(u,v)\|_1} = e^{-v/\tau} \odot \mathbf{b},
83
+ \end{align*}$$ where $\odot$ denotes element-wise multiplication. Unfortunately, the above equations do not have closed-form solutions to produce update as the Sinkhorn algorithms do because of the term $\|B(u,v)\|_1$ in the denominator. However, the objective function of UOT is not homogeneous with respect to $X$, but could be written as a linear function of ROT and another function of $\|X\|_1$ due to some special properties of the KL divergence. This observation leads to the interesting result summarized in the below lemma.
84
+
85
+ ::: {#lemma:rot:Xrots .lemma}
86
+ **Lemma 1** (**Connections with UOT**). *The optimal solution of problem [\[eq:robust_OT\]](#eq:robust_OT){reference-type="eqref" reference="eq:robust_OT"}, denoted $X_{\textnormal{rot}}^*$, is the normalized version of $X_{\textnormal{uot}}^*$ which is the minimizer of UOT in entropic formulation. More specifically, we have $X_{\textnormal{rot}}^*= \frac{X_{\textnormal{uot}}^*}{\|X_{\textnormal{uot}}^*\|_{1}}$.*
87
+ :::
88
+
89
+ The proof of Lemma [1](#lemma:rot:Xrots){reference-type="ref" reference="lemma:rot:Xrots"} is in Appendix [10](#appendix:rot:proofs){reference-type="ref" reference="appendix:rot:proofs"}. Based on this result, we can utilize the Sinkhorn algorithm that solves UOT (see [@pham2020unbalanced]) with a normalizing step at the end to produce a solution for the ROT. Although the normalizing step is convenient in finding ROT's solution, it introduces new challenge in the proof compared to that of UOT since the normalizing constant does not have a lower bound. Even so, we are still able to obtain an $\varepsilon$-approximation solution for the ROT in $\widetilde{\mathcal{O}}(n^2/\varepsilon)$ time without any additional constraints on the setting. For more technical details, please refer to Appendix [10](#appendix:rot:proofs){reference-type="ref" reference="appendix:rot:proofs"}.
90
+
91
+ **Further Improving Complexities by Low-Rank Approximation:** As a consequence of our complexity analysis, we can show that by using low-rank approximation method studied in [@Altschuler-2018-Massively] to the kernel matrix $K:= \exp (-C /\eta)$, we could further reduce the complexities of both robust semi/un-constrained optimal transport problem to $\widetilde{O}(nr^2 + nr / \varepsilon)$ time, given the same $\varepsilon$-approximation and the approximated-rank $r$. This result is essentially different from the complexity studied in [@Altschuler-2018-Massively], where the $\varepsilon$-approximation is considered regarding the optimal value of the entropic-regularized problem, not the original one in our analysis. For a more detailed discussion, please refer to Appendix [11](#section:nystrom:proofs){reference-type="ref" reference="section:nystrom:proofs"}.
92
+
93
+ In this section, we consider the problem of computing the barycenter of a set of possibly corrupted probability measures. The semi-constrained formulation arises as a natural candidate for this goal, when potential outliers only appear in the given probability measures and the desired barycenter is the barycenter of the uncontaminated probability measures. In particular, assume that we have $m \geq 2$ discrete probability measures $P_{1}, \ldots, P_{m}$: each has at most $n$ fixed support points and the associated positive weights are given by $\omega_{1}, \ldots, \omega_{m}$ ($\sum_{i = 1}^{m} \omega_{i} = 1$). The barycenter problem then aims to find the probability measure that minimizes $\sum_{i=1}^m \omega_i \text{RSOT}(P_i, P)$, which is a linear combination of RSOT divergence from the barycenter to all given probability measures. We refer it as ***R**obust **S**emi-constrained **B**arycenter **P**roblem (RSBP)*. In this work, we consider the fixed-support settings where all the probability measures $P_i$ share the same set of support points. This setting had been widely used in the previous works to study the computational complexity of Wasserstein barycenter problem [@Kroshnin-2019-Complexity; @Lin-2020-Fast]. Let $\mathbf{p}_i$ be the mass of probability measure $P_{i}$ for $i \in [m]$, the discrete RSBP reads $$\begin{align*}
94
+ \min_{\mathbf{p}\in \mathbb{R}_+^n, \|\mathbf{p}\|_{1} = 1} \quad \sum_{i = 1}^m \omega_i \Big[ \min_{X_i \in \mathbb{R}_+^{n \times n} , X_i^\top \mathbf{1}_n = \mathbf{p}} \langle C_i, X_i \rangle + \tau \mathbf{KL}(X_i \mathbf{1}_n \| \mathbf{p}_i) \Big],
95
+ \end{align*}$$ which is equivalent to $$\begin{align}
96
+ \label{problem:rsbp}
97
+ \min_{\mathbf{X} \in \mathcal{D}_1(\mathbf{X})} \quad f_{\textnormal{rsbp}}(\mathbf{X}):=\sum_{i=1}^m\omega_i\big[\langle C_i, X_i\rangle + \tau \mathbf{KL}(X_i\mathbf{1}_n\|\mathbf{p}_i)\big],
98
+ \end{align}$$ where $\mathcal{D}_1(\mathbf{X}) := \big\{(X_1,\ldots,X_m) : \ X_i \in \mathbb{R}_+^{n \times n} \ \text{and} \ \|X_{i}\|_{1} = 1 \ \forall i \in [m]; \ X_{i}^{\top} \mathbf{1}_{n} = X_{i + 1}^{\top} \mathbf{1}_{n} \ \forall i \in [m - 1] \big\}$. Note that the objective function of RSBP is different from that of Wasserstein barycenter [@Kroshnin-2019-Complexity]: here we relax the marginal constraints $X_{i} \mathbf{1}_{n} = \mathbf{p}_{i}$ by using the KL divergence to deal with the contaminated $P_i$. Finally, the constraints $X_{i}^{\top} \mathbf{1}_{n} = X_{i + 1}^{\top} \mathbf{1}_{n} = \mathbf{p}$ are to guarantee that the transportation plans $X_i$ have one common marginal which turns out to be a feasible barycenter $\mathbf{p}$. Similar to RSOT, we consider an entropic-regularized formulation of [\[problem:rsbp\]](#problem:rsbp){reference-type="eqref" reference="problem:rsbp"}, named *entropic RSBP*: $$\begin{align}
99
+ \label{problem:entropic_RSBP}
100
+ \min_{\mathbf{X} \in \mathcal{D}_1(\mathbf{X})} g_{\text{rsbp}}(\mathbf{X}) := \sum_{i = 1}^{m} \omega_{i} g_{\text{rsot}}(X_i; \mathbf{p}_i,C_i).
101
+ \end{align}$$ Since some functions like $g_{\textnormal{rsot}}(X)$, depends on some parameters like $C_i$ and $\mathbf{p}_i$, we sometimes abuse the notation by including these parameters next to variables, e.g., $g_{\text{rsot}}(X_i; C_i, \mathbf{p}_i)$. A general approach to deal with [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"} is to consider its dual function, which admits the following form: $$\begin{align}
102
+ \min_{\substack{\mathbf{u}= (u_{1}, \ldots, u_{m}), \mathbf{v}= (v_{1}, \ldots, v_{m}) \\ \sum_{i = 1}^m \omega_i v_i = 0}} h_{\text{rsbp}}(\mathbf{u}, \mathbf{v}):= \sum_{i = 1}^{m} \omega_{i} \big[ \eta \log \|B(u_i, v_i;C_i)\|_{1} + \tau \big \langle e^{-u_{i}/ \tau}, \mathbf{p}_{i} \big \rangle \big].
103
+ \label{problem:dual_entropic_rsbp}
104
+ \end{align}$$ We could use the alternating minimization method to find the minimizer of [\[problem:dual_entropic_rsbp\]](#problem:dual_entropic_rsbp){reference-type="eqref" reference="problem:dual_entropic_rsbp"}. In particular, starting at an initialization $\mathbf{u}^0$ and $\mathbf{v}^0$, we update them alternatively as follows: $$\begin{align}
105
+ \label{equation:dual_update}
106
+ \mathbf{u}^{k + 1} = \mathop{\mathrm{arg\,min}}_{\mathbf{u}} h_{\textnormal{rsbp}}(\mathbf{u}, \mathbf{v}^{k}), \quad \mathbf{v}^{k + 1} = \mathop{\mathrm{arg\,min}}_{\mathbf{v}: \sum_{i = 1}^m \omega_i v_i = 0} h_{\textnormal{rsbp}}(\mathbf{u}^{k + 1},\mathbf{v}).
107
+ \end{align}$$ In some problems (e.g., RSOT), closed-form updates can be acquired if the system of equations $\partial h_{\textnormal{rsbp}}(\mathbf{u}, \mathbf{v}^{k})/\partial \mathbf{u}= \mathbf{0}$ and $\partial h_{\textnormal{rsbp}}(\mathbf{u}^{k},\mathbf{v})/\partial \mathbf{v}= \mathbf{0}$ could be solved exactly by some simple formulas. However, this is not the case with the formulation of $h_{\textnormal{rsbp}}$ in equation [\[problem:dual_entropic_rsbp\]](#problem:dual_entropic_rsbp){reference-type="eqref" reference="problem:dual_entropic_rsbp"} because the logarithmic term leads to an intractable system of equations of the partial derivative of $h_{\textnormal{rsbp}}$. Instead, we propose to solve the optimization problem [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"} via another objective function, whose dual form can be solved effectively by alternating minimization.
108
+
109
+ :::: algorithm
110
+ ::: algorithmic
111
+ **Input:** $\{C_{i}\}_{i = 1}^{m}, \{\mathbf{p}_{i}\}_{i = 1}^{m}, \tau, \eta, n_{\textnormal{iter}}$ **Initialization:** $u_{i}^0 = v_{i}^0 = \mathbf{0}_n$ for $i \in [m]$, $k = 0$ $a_{i}^k \gets B(u_{i}^k,v_{i}^k;C_i) \mathbf{1}_n; \quad b_{i}^k \gets \big(B(u_{i}^k,v_{i}^k;C_i)\big)^{\top} \mathbf{1}_n \quad \forall i \in [m]$ $u_{i}^{k+1} \leftarrow \frac{\eta \tau}{\eta + \tau} \big[\frac{u^k_i}{\eta} + \log(\mathbf{p}_{i}) - \log(a_{i}^k)\big] \quad \forall i \in [m]$ $v_{i}^{k+1} \leftarrow v_{i}^k \quad \forall i \in [m]$ $u_{i}^{k+1} \leftarrow u_{i}^k \quad \forall i \in [m]$ $v_{i}^{k+1} \leftarrow \eta \left[ \frac{v^k_i}{\eta} - \log(b_{i}^{k}) - \sum_{t = 1}^{m} \omega_{t} (\frac{v^k_t}{\eta} - \log(b_{t}^{k}) ) \right] \quad \forall i \in [m]$ $k \gets k + 1$ $X_{i}^k \leftarrow B(u_{i}^k,v_{i}^k;C_i) \quad \forall i \in [m]$ **return** $(X_1^k, \dots, X_m^k)$ for equation [\[problem:entropic_rsbp_no_norm\]](#problem:entropic_rsbp_no_norm){reference-type="eqref" reference="problem:entropic_rsbp_no_norm"} $\quad$ or $\quad$ $\big(\frac{X^k_{1}}{\|X^k_{1}\|_{1}}, \ldots, \frac{X^k_{m}}{\|X^k_{m}\|_{1}}\big)$ for equation [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"}.
112
+ :::
113
+ ::::
114
+
115
+ # Method
116
+
117
+ We consider a similar problem to the entropic RSBP in [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"}, with its feasible set $\mathcal{D}(\mathbf{X}) := \{(X_1,\ldots,X_m) : X_i \in \mathbb{R}_+^{n \times n}, \forall i \in [m]; X_{i}^{\top} \mathbf{1}_{n} = X_{i + 1}^{\top} \mathbf{1}_{n} \forall i \in [m - 1] \}$ which does not have the norm constraint. The primal objective function and its dual are as follows: $$\begin{align}
118
+ &\textbf{Primal:} \quad \min_{\mathbf{X} \in \mathcal{D}(\mathbf{X})} g_{\text{rsbp}}(\mathbf{X}) := \sum_{i = 1}^{m} \omega_{i} g_{\text{rsot}}(X_i; \mathbf{p}_i,C_i), \label{problem:entropic_rsbp_no_norm} \\
119
+ &\textbf{Dual:} \quad \min_{\mathbf{u}, \mathbf{v}:\sum_{i = 1}^m \omega_i v_i = \mathbf{0}} \bar{h}_{\text{rsbp}}(\mathbf{u}, \mathbf{v}):= \sum_{i = 1}^{m} \omega_{i} \big[ \eta \|B(u_i, v_i; C_i)\|_{1} +\tau \big \langle e^{-u_{i}/ \tau}, \mathbf{p}_{i} \big \rangle \big]. \label{problem:dual_entropic_rsbp_no_norm}
120
+ \end{align}$$ The dual formulation [\[problem:dual_entropic_rsbp_no_norm\]](#problem:dual_entropic_rsbp_no_norm){reference-type="eqref" reference="problem:dual_entropic_rsbp_no_norm"} has a closed form updates for $\mathbf{u}$ and $\mathbf{v}$. Based on these, we develop Algorithm [\[algorithm:barycenter_semiOT\]](#algorithm:barycenter_semiOT){reference-type="ref" reference="algorithm:barycenter_semiOT"}, namely [RobustIBP]{.smallcaps}, since this procedure resembles the iterative Bregman projections studied in [@Benamou-2015-Iterative] and [@Kroshnin-2019-Complexity]. The updates of $\mathbf{u}$ and $\mathbf{v}$ are known to converge to the optimal solution $(\mathbf{u}^{*}, \mathbf{v}^{*})$ of the problem [\[problem:dual_entropic_rsbp_no_norm\]](#problem:dual_entropic_rsbp_no_norm){reference-type="eqref" reference="problem:dual_entropic_rsbp_no_norm"}, and strong duality suggests that $\mathbf{X}^* = (B(u_{i}^{*}, v_{i}^{*}; C_i) )_{i = 1}^m$ is the optimal solution of the problem [\[problem:entropic_rsbp_no_norm\]](#problem:entropic_rsbp_no_norm){reference-type="eqref" reference="problem:entropic_rsbp_no_norm"}. Furthermore, there is an intriguing relation between the optimal solution of the problem [\[problem:entropic_rsbp_no_norm\]](#problem:entropic_rsbp_no_norm){reference-type="eqref" reference="problem:entropic_rsbp_no_norm"} to that of the problem [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"}, presented in the following lemma.
121
+
122
+ ::: {#lemma:barycenter_normalize .lemma}
123
+ **Lemma 2**. *Let $\bar{\mathbf{X}}^{*}=(\bar{X}^{*}_1,\ldots,\bar{X}^{*}_m)$ and $\mathbf{X}^* = (X_1^*, \dots, X_n^*)$ be the optimizers of $g_{\textnormal{rsbp}}$ with the feasible set $\mathcal{D}(\mathbf{X})$ and with the feasible set $\mathcal{D}_1(\mathbf{X})$, respectively. Then, $X^*_i= \dfrac{\bar{X}^{*}_i}{\|\bar{X}^{*}_i\|_1}$ for all $i\in [m]$.*
124
+ :::
125
+
126
+ The proof of Lemma [2](#lemma:barycenter_normalize){reference-type="ref" reference="lemma:barycenter_normalize"} is in Appendix [\[sec:proofs:robust_barycenter\]](#sec:proofs:robust_barycenter){reference-type="ref" reference="sec:proofs:robust_barycenter"}. This result indicates that we can approximate the solution of equation [\[problem:entropic_RSBP\]](#problem:entropic_RSBP){reference-type="eqref" reference="problem:entropic_RSBP"} by the solution of equation [\[problem:entropic_rsbp_no_norm\]](#problem:entropic_rsbp_no_norm){reference-type="eqref" reference="problem:entropic_rsbp_no_norm"}, using the same Algorithm [\[algorithm:barycenter_semiOT\]](#algorithm:barycenter_semiOT){reference-type="ref" reference="algorithm:barycenter_semiOT"} with an additional normalizing step at the end.
2102.12871/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2102.12871/main_diagram/main_diagram.pdf ADDED
Binary file (88.1 kB). View file
 
2102.12871/paper_text/intro_method.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The Transformer (Vaswani et al., 2017) has been commonly used in various natural language processing (NLP) tasks such as text classification (Wang et al., 2018a), text translation (Ott et al., 2018), and question answering (Rajpurkar et al., 2016). The recent use of Transformer for image classification (Dosovitskiy et al., 2021), object detection (Carion et al., 2020) also demonstrates its potential in computer vision. Two notable descendants from the Transformer
4
+
5
+ Proceedings of the 38<sup>th</sup> International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
6
+
7
+ include the BERT (Devlin et al., 2019), which achieves state-of-the-art performance on a wide range of NLP tasks, and GPT-3 (Brown et al., 2020) which applies the Transformer's decoder on generative downstream tasks.
8
+
9
+ Self-attention is a core component in Transformer-based architectures. Recently, its interpretation has aroused a lot of interest. Visualization has been commonly used to understand the attention map during inference (Park et al., 2019; Gong et al., 2019; Kovaleva et al., 2019). For example, Park et al. (2019) and Gong et al. (2019) randomly select a sentence from the corpus and visualize the attention maps of different heads in a pre-trained Transformer model. Kovaleva et al. (2019) summarizes five attention patterns and estimates their ratios in different tasks. A common observation from these studies is that local attention and global attention are both important for token understanding.
10
+
11
+ While self-attention is powerful, a main concern is its efficiency bottleneck. As each token has to attend to all n tokens in the sequence, the complexity scales as $\mathcal{O}(n^2)$ . This can be expensive on long sequences. To alleviate this problem, sparse attention allows each token to attend to only a token subset. A series of Transformer variants have been proposed along this direction (Guo et al., 2019; Child et al., 2019; Li et al., 2019; Beltagy et al., 2020). However, these sparse attention schemes are designed manually. It is still an open issue on how to find a suitable attention scheme.
12
+
13
+ Recently, there is a growing interest in understanding self-attention mechanism from a theoretical perspective. Results show that the Transformer and its variants are universal approximators of arbitrary continuous sequence-to-sequence functions (Yun et al., 2019; 2020; Zaheer et al., 2020). A key part of their proofs is that self-attention layers implement contextual mappings of the input sequences. Yun et al. (2019) constructs the self-attention model as a selective shift operation such that contextual mapping can be implemented. Zaheer et al. (2020) shows that universal approximation holds for their sparse Transformer BigBird if its attention structure contains the star graph. Yun et al. (2020) provides a unifying framework for the universal approximation of sparse Transformers. Note that they all emphasize the importance of diagonal elements in the attention map.
14
+
15
+ To guide the design of an efficient Transformer, it is useful to investigate the importance of different positions in
16
+
17
+ <sup>&</sup>lt;sup>1</sup>Hong Kong University of Science and Technology, Hong Kong <sup>2</sup>The University of Hong Kong, Hong Kong <sup>3</sup>Huawei Noah's Ark Lab <sup>4</sup>Sun Yat-sen University, China. Correspondence to: Han Shi <hshiac@cse.ust.hk>.
18
+
19
+ <span id="page-1-0"></span>![](_page_1_Figure_1.jpeg)
20
+
21
+ Figure 1. Examples of existing attention masks (with n = 16).
22
+
23
+ self-attention. In this paper, we study this using differentiable search (Liu et al., 2019; Xie et al., 2018). A learnable attention distribution is constructed and the score of each position is learned in an end-to-end manner during pretraining. While existing theoretical and empirical findings suggest the importance of diagonal elements in the selfattention matrix, we observe that they are indeed the least important compared to other entries. Furthermore, neighborhood tokens and special tokens (such as the first token [CLS] and last token [SEP]) are also prominent, which is consistent with previous observations in (Park et al., 2019; Gong et al., 2019; Kovaleva et al., 2019; Clark et al., 2019). Besides, using the Gumbel-sigmoid function (Maddison et al., 2017), we propose the Differentiable Attention Mask (DAM) algorithm to learn the attention mask in an endto-end manner. Extensive experiments using masks with different sparsity ratios on various NLP tasks demonstrate the effect of the proposed algorithm. Specifically, highly sparse structured attention masks (with 91.3% sparsity ratio) can already achieve 80.9% average score on the GLUE development set (Wang et al., 2018a). The code is available at https://github.com/han-shi/SparseBERT.
24
+
25
+ # Method
26
+
27
+ - 1: initialize model parameter w and attention mask parameter $\alpha$ .
28
+ - 2: repeat
29
+ - 3: generate mask $M_{i,j} \leftarrow \text{gumbel-sigmoid}(\alpha_{i,j})$ ;
30
+ - 4: obtain the loss with attention mask $\mathcal{L}$ ;
31
+ - 5: update parameter w and $\alpha$ simultaneously;
32
+ - 6: until convergence.
33
+ - 7: **return** attention mask M.
34
+
35
+ an end-to-end manner with the Gumbel-sigmoid variant. To balance mask sparsity with performance, we add the sum absolute values of the attention mask to the loss, as:
36
+
37
+ $$\mathcal{L} = l(BERT(\boldsymbol{X}, \boldsymbol{A}(\boldsymbol{X}) \odot \boldsymbol{M}(\boldsymbol{\alpha}); \boldsymbol{w})) + \lambda \|\boldsymbol{M}(\boldsymbol{\alpha})\|_{1}, (7)$$
38
+
39
+ where $l(\mathrm{BERT}(\boldsymbol{X}, \boldsymbol{A}(\boldsymbol{X}); \boldsymbol{w}))$ is the pre-training loss, and $\lambda$ is a trade-off hyperparameter. When $\lambda$ is large, we pay more emphasis on efficiency and the learned attention mask $\boldsymbol{M}$ is sparser, and vice versa. We also apply the renormalization trick to normalize the attention distributions in Eq. (7). The obtained one-hot Gumbel sigmoid output can then be directly used as the attention mask. The whole algorithm is shown in Algorithm 1. We optimize both parameter sets simultaneously in one-level optimization until convergence and the generated mask is returned finally.
40
+
41
+ Note that the attention mask obtained in Algorithm 1 is unstructured, as the $\alpha_{i,j}$ 's are all independent of each other. This irregular structure may affect the efficiency of the final CUDA implementation. To alleviate this problem, we use the observed sparsity patterns in Figure 2 to constrain the
42
+
43
+ structure of the attention mask. First, as the special tokens are important, we require the first and last row/column of the attention mask to be active. Second, for all positions on each line parallel to the diagonal, we share their mask parameters including their two Gumbel noises such that the generated mask has $M_{i,j} = M_{i+k,j+k}$ for integer k. Among the previously proposed attention masks, the Sparse Transformer (strided) (Child et al., 2019) and LogSparse Transformer (Li et al., 2019) conform to our definition of structured attention masks, and they can be implemented efficiently by custom CUDA kernels. With the constrained structure, there are now only n-2 attention mask parameters. Therefore, the search space is reduced to $2^{n-2}$ in structured attention mask search. Since the sparsity ratio directly affects the efficiency of the SparseBERT, we will show the performance of the attention mask with its sparsity ratio.
2105.04030/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-10T10:15:56.307Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36" etag="wzCnYpPxP-1ff7V_fGdo" version="14.3.0" type="device"><diagram id="ej0NGq9KK8pHftG9yovV" name="Page-1">7V1bc5s4FP41nmkfmkESF/sxTdJuZ9tuZtPOtvvSIYBtGmK8QBo7v36FkTAXgWUbLFGrl7ERIIvz6Tvn6OhIjNDV4+p9ZC/nn0LXC0ZQc1cjdD2CEBimjj/SknVWYplmVjCLfJdctC248188UqiR0iff9eLShUkYBom/LBc64WLhOUmpzI6i8Ll82TQMyr+6tGdereDOsYN66T++m8zpc2na9sQfnj+bk5+G9MSjnV+cFcRz2w2fs6LNNehmhK6iMEyyb4+rKy9IhUflklX0ruFs3rDIWyQ8N9xGPx68+3d/v4Q/P8Uvt0+f9fGfbyDKqvllB0/kiUlrkzUVQRQ+LVwvrQWM0NvnuZ94d0vbSc8+Y9Bx2Tx5DMjpqR8EV2EQRpt70dRI/+LyOInCB69wxtz8Se8IF0mhPPuDy+vPRx75lxcl3qpQRJ73vRc+ekm0xpeQs9AgYJDOB00jO34uQAloV5sXYMwLbdJ/ZnnlWxHjL0TKe0i8b4FPplZ6a13gyEQTlJ6xI4fwzOpHzDrSOMWMJh2I+e4FPIQv+q355cPXxfsb+HU1+/oG6GOGoPX0n3GVchNTfGS9/Tiyrn/gzw/48xU5cz/FBfj7MvZx6WtyVxUkLJukjERZ4otw4VXgIUV24M8W+NDBEvdw+dtU0j5uzyU58ei7bvozTOi3nUMj5CFgQtANmiDXvQROg9ZRgFO3GGD2hmXnnNlKDUzwsWvH8/zeKqGmU+g4LEK55r1pmN0IHcGKzMdWTeYmS09ZvQmdhz7XmCBD4ccG6U74UdF2OryomxWdARbsCyudB6u7M8SqapkMq26Z+kKKaf2BWRO+52KHkxyGUTIPZ+HCDm62pRUxba/5GIZLgtdPL0nWRHj2UxKW0cQCjNbfyP2bg+/pwYVBD69XxZPXa3LUiEAcPkWO19IjCSESO5p5SYs8iJZLZdCKZ+QFduL/KjvlnfPI4OHRF8UjiMDpeMREytztEeDx1jL96jxFwfptZDsPaU/c5RqU/YiqKOPse8acLmyJVZErrMvVYsmVDmQ6FyzLax6gYOGkYqSNuolGLMEavflTYyGaf+Un3wrfC3ofH23VfnpAtT63/thpBSZ1K8CWjVxmYNJgBjYjxeQM1T9Cu9l0UvXPEL6kZDrU9eqOhNRU72bhRCoW0nazaQgUDaExqQcJTktDMaOZYdKQzl7soiE05KJhUyBoQ0OoaAhNJJqGE0VDbhpCXhqO5aIhbKPhOcb2qjS0oGAaAlYY1gxS6S9LmJj/PaWTvhuhvMlGvpf4AgiWq41o6Hn8bZZ+XuCT5H9WHW7ekp6UH3CYD/hJI9PjwL73gtsw9hM/ZP7mx8oF+W9HYWKTsklrkHKfrmSWNfoYiu1KyDgjjd4+cblbo1ucGp3qC1k0utWg0YvT0Kt0fjo5N9Wu06ijLPEGNJiBToe84o3eISAXr5rCd1lWR+BNE/yRHuDuUqHayLjxfwBcGm06VXrZdVaML/hMckX8DSSSO1zllIb+KcpKWBgzKKr3RlFrKBQVbzIhY3qWLVQoFbVh4wQtJ7Whovb+1DYZA6uTUhuyPCU1sBrAwEqnWlmagdVgZmElsBJjXiuB5LISjZm/nFbiTlmJ/a2EBQRbCf2c5oSPpDbinRNGcs0Jo/qc8N9e/NlLGNk1ZWj3zAifRbbre6WFKfebvxXi8i1x6YBuRi0Fg7HGQmPwrbesfF0NuPj5xjv5q8sVo6TtVnwzJnXP9cR8U64rP98gL9/kil3Sdiu+mUg4384pq+JYviFevskVUET1lYVnyjcLiuYbUukTA43yGaZkUT6gDWa+VgLdzb1KQxPmLLHXUgoJ+JTiat1KnfmUkgVgGifXjav7MHDj9SP+IPsnzP3mIOlR1rRiMR3HM6ZTlsXM97zoQMuZtSVloiMwQFMhGG7C6bwhT6DJ5aPq9ZjnuVJOfBAGaCoKw0853qgn0OSaQaQNV5STIA4DNBWI4accb+ATaHItM6MNV5STIBSjszbcUqGYAYRi8kVQ0oRioBqj7LFUn9tjAsJiMQ1r3wYI8xbZ7yVgJYJZtlWoWotnnCfSvZzloqVxdRdU0YuWwHC2Z+iSW9weMBA26Hz4/G/g39z95Uym78BM/7S2rRndcFN+sAoKFJY0aL7VYOt2ghVx9w5zpkKPQHVz62UU2evCBcvQXyRxoebbtGCrDSZ0QEMdMRKr3faQrMZtf8mbdox6bhxFtac+v6i1b0doeuFr3wBU68X3UB28ySoACIuRMC0EHArInSp6brSOVvRHusasoMk+ulctTjxA9wpfnAg0lbc00GDZmO6iK0+wTK1h2sMy8K5iB+L2l2XacTQ8kC8scSM9bphFOwAH7k7wotadHuEACF93mr/YaEh8Fqa0ubdBBeK2I2Znm4oZYjdMVuxSwB1lqTLhYotHroWLoHHbWlY+QXyifILp1HHY75LqMp9gMq5ORAhPVKW7lyoVycM5/oiHbKRrDIGcHekkSFUVsz5iqKTjH2VIlnkBWt5adWakkyBZVdAL3gZKOt6tewGUK0EctG3ee2akE5+uCgBrxy8Vgh9ACB5oQLaE1SHu+yAsk5F7k+j8fdyyaHBDpUz0AbMhm6FuWbCcR+DXZ5mwiofrsmWsGgMMpe+ZBNkhKxvCP/za+NikyFrWI8D/0qnCUiTEguVqstaRO7c9Zt8MS/xbsOw6nCTFkgp931m+tUqxPEY3ic+xNIaYoC1MNbGDZEJVE0IXh2mm7nRH46QUp+5QKYKH6A7xOYLUUqkAxeACFECXLUnQgAM0RPvlj3VpidgDUqGWSDcuoGhTdGC2+lolqx1jisRnqzW/HSNDGQsSo/eRoPgBf74q4p9vv/FaamC5Fcge8x7VEQmtomgIrDqU/U17GC3pFnUor25kZ2MPoAHIsWtfX6ixE+XEbJRwYAyZy/Ayn5Mxtcu8Tti0QFurGX562jm5XHUdu+o1P539ZlfKxtx1z35kKN57X68SQoDB0Z5cbDZHf/tN/g7nNeOlYHLtW9vW6nPc5x2UuaXV53h6y9Zgk0vM/PrJicK7wbO4zPm2Zqt9+CSgSt7XTkqVw7t89125ceqvAlXVFegufNL6oCr9TwKWMFcdVGmzcC+jKHxOneDAjmPfYfteW38rT6TqfJqtqzcIMDY0P2qEVQDQYOBHy47kbm1Pb7PSL7IHr1G3VpGlVyoCp9UBtJf1N1ZM/MD1MiVSjr8eP3LkHxFGHm6vfb+pKu29JC0D12vghl2ndWELEhO11FXY1KwoGcaI8LRDQqq7e0A7q8WnBUlk+4sCsn71yt8NbVjZhsOa1ME2T4o1a0VNP1h7cXJWUFtlXkM67bXD9hwANT6MwlT6W52PHar5p9D10iv+Bw==</diagram></mxfile>
2105.04030/main_diagram/main_diagram.pdf ADDED
Binary file (71.1 kB). View file
 
2105.04030/paper_text/intro_method.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ In domain generalization, we have $\mathcal{D}{=}{\left\{ D_i \right\}}^{|\mathcal{D}|}_{i=1}{=}\mathcal{S} \cup \mathcal{T}$ as a set of domains, where $\mathcal{S}$ and $\mathcal{T}$ denote the source and target domains, respectively. $\mathcal{S}$ and $\mathcal{T}$ do not have any overlap besides sharing the same label space. Data from the target domains $\mathcal{T}$ is never seen during training. For each domain $D_{i} \in \mathcal{D}$, we define a joint distribution $p(\mathbf{x}, \mathbf{y})$ on $\mathcal{X} \times \mathcal{Y}$, where $\mathcal{X}$ and $\mathcal{Y}$ denote the input space and output space, respectively. We aim to learn a model $f: \mathcal{X}{\rightarrow} \mathcal{Y}$ in the source domains $\mathcal{S}$ that generalizes well to the target domains $\mathcal{T}$.
4
+
5
+ We address domain generalization in a probabilistic framework of Bayesian inference by introducing weight uncertainty into neural networks. To be specific, we adopt variational Bayesian inference to learn a neural network that is assumed to be parameterized by weights $\boldsymbol{\theta}$ with a prior distribution $p(\boldsymbol{\theta})$ and the posterior distribution $p(\boldsymbol{\theta}|\mathbf{x}, \mathbf{y})$, where $(\mathbf{x}, \mathbf{y})$ are samples from the source domain $\mathcal{S}$. To learn the model, we seek for a variational distribution $q(\boldsymbol{\theta})$ to approximate $p(\boldsymbol{\theta}| \mathbf{x}, \mathbf{y})$ by minimizing the Kullback-Leibler divergence $\mathbb{D}_{\rm{KL}} \big[ q(\boldsymbol{\theta})||p(\boldsymbol{\theta}|\mathbf{x}, \mathbf{y})\big]$ between them. This amounts to minimizing the following objective: $$\begin{equation}
6
+ \label{elbo1}
7
+ \begin{aligned}
8
+ \mathcal{L}_{\rm{Bayes}} %& = \dkl[q(\btheta)||p(\btheta | \x, \y)] \\ &
9
+ = -\mathbb{E}_{q(\boldsymbol{\theta})}[\log p(\mathbf{y}|\mathbf{x}, \boldsymbol{\theta})] + \mathbb{D}_{\rm{KL}}[q(\boldsymbol{\theta})||p(\boldsymbol{\theta})],
10
+ \end{aligned}
11
+ \end{equation}$$ which is also known as the negative value of the evidence lower bound (ELBO) [@blei2017variational]. We learn the model on the source domain $\mathcal{S}$ in the hope that it will generalize well to the target domain $\mathcal{T}$.
12
+
13
+ @kristiadi2020being show that applying Bayesian approximation to the last layer of a neural network effectively captures model uncertainty. This is also appealing when dealing with uncertainty in domain generalization since applying Monte Carlo sampling to the full Bayesian neural network can be computationally infeasible. In this paper, to obtain a model that generalizes well across domains in an efficient way, we incorporate variational Bayesian approximation into the last two network layers to jointly establish domain-invariant feature representations and classifiers. More analyses are provided in the experiments and supplementary. To achieve better domain invariance, we introduce a domain-invariant principle in a probabilistic form under the Bayesian framework. We use $\boldsymbol{\phi}$ and $\boldsymbol{\psi}$ to denote the parameters of the feature extractor and classifier. We incorporate the domain-invariant principle into the inference of posteriors over $\boldsymbol{\psi}$ and $\boldsymbol{\phi}$, which results in our two-layer Bayesian network. Next we detail the Bayesian domain-invariant learning of the network and the objective for optimization.
14
+
15
+ <figure id="fig:invariant" data-latex-placement="t">
16
+ <embed src="fig1.pdf" />
17
+ <figcaption>Illustrative contrast between deterministic invariance (top) and probabilistic invariance (bottom), where colors indicate domains. The deterministic invariance tends to minimize the distance between two deterministic samples (<span class="math inline"><em>a</em> → <em>b</em></span>). The samples come from their respective distributions, but with limited distributional awareness. This means that the samples cannot represent the complete distributions. Thus, the deterministic invariance can lead to small distances between samples, but large gaps between distributions, as shown in figure (b). In contrast, the probabilistic invariance directly minimizes the distance between different distributions (<span class="math inline"><em>c</em> → <em>d</em></span>), which yields a better domain invariance as most of the samples in the distributions are taken into account.</figcaption>
18
+ </figure>
19
+
20
+ Existing domain generalizations try to achieve domain invariance by minimizing the distance between intra-class samples from different domains in the hope of reducing the domain gaps [@motiian2017unified]. However, individual samples cannot be assumed to be representative of the distributions of samples from a certain domain. Therefore, it is preferable to minimize the distributional distance of intra-class samples from different domains, which can directly narrow the domain gap. A contrastive illustration is provided in Figure [1](#fig:invariant){reference-type="ref" reference="fig:invariant"}. Motivated by this, we propose to address domain generalization under the probabilistic modeling by incorporating weight uncertainty into invariant learning in the variational Bayesian inference framework.
21
+
22
+ We first introduce the definition of domain invariance in a probabilistic form, which we adopt to learn domain-invariant feature representations and classifiers in a unified way. We define a continuous domain space $\mathfrak{D}$ containing a set of domains $\left\{ D_i \right\}^{|\mathcal{D}|}_{i=1}$ in $\mathcal{D}$ and a domain-transform function $g_{\zeta}(\cdot)$ with parameters $\zeta$ in the domain space. $\left\{ D_i \right\}^{|\mathcal{D}|}_{i=1}$ are discrete samples in $\mathfrak{D}$. Function $g_{\zeta}(\cdot)$ transforms samples $\mathbf{x}$ from a reference domain to a different domain $D_{\zeta}$ with respect to $\zeta$, where $\zeta {\thicksim} q(\zeta)$, and different samples $\zeta$ lead to different sample domains $D_{\zeta} {\thicksim}{\mathfrak{D}}$.
23
+
24
+ As a concrete example, consider the rotation domains in Rotated MNIST [@ghifary2015domain]. $\mathfrak{D}$ is the rotation domain space, containing images rotated by continuous angles from $0$ to $2\pi$. A sample $\mathbf{x}$ from any domain in $\mathfrak{D}$ can be transferred into a different domain $D_{\zeta}$ by: $$\begin{equation}
25
+ g_{\zeta}(\mathbf{x}) = \left[
26
+ \begin{matrix}
27
+ cos(\zeta) & -sin(\zeta) \\
28
+ sin(\zeta) & -cos(\zeta)
29
+ \end{matrix}
30
+ \right]
31
+ \left[
32
+ \begin{matrix}
33
+ x_{1} \\
34
+ x_{2}
35
+ \end{matrix}
36
+ \right],
37
+ \end{equation}$$ where $\zeta \thicksim$ Uniform$(0, 2\pi)$. $\zeta$ is the parameter of $g_\zeta(\cdot)$ and different $\zeta$ lead to different rotation angles of the original sample $\mathbf{x}$, which form another domain $D_\zeta$ in $\mathfrak{D}$. In practice, transformations between domains are more complicated and the exact forms of $g_{\zeta}(\cdot)$ and $q(\zeta)$ are not explicitly known.
38
+
39
+ Based on the above assumptions about $\mathfrak{D}$, $g_\zeta(\cdot)$ and $q(\zeta)$, we introduce our definition of the probabilistic form of domain invariance as follows.
40
+
41
+ ::: {#def1 .define}
42
+ ****Definition** 1** (Domain Invariance). *Let $\mathbf{x}_i$ be a given sample from domain $D_i$ in the domain space $\mathfrak{D}$, and $\mathbf{x}_{\zeta} {=} g_{\zeta}(\mathbf{x}_{i})$ be a transformation of $\mathbf{x}_{i}$ in another domain $D_\zeta$ from the same domain space, where $\zeta {\thicksim} q(\zeta)$. $p_{\boldsymbol{\theta}}(\mathbf{y}|\mathbf{x})$ denotes the output distribution of input $\mathbf{x}$ with model $\boldsymbol{\theta}$. Model $\boldsymbol{\theta}$ is domain-invariant in $\mathfrak{D}$ if $$\begin{equation}
43
+ \label{nai_invariant}
44
+ p_{\boldsymbol{\theta}}(\mathbf{y}_{i}|\mathbf{x}_{i}) = p_{\boldsymbol{\theta}}(\mathbf{y}_{\zeta}|\mathbf{x}_{\zeta}), \qquad \forall \zeta \thicksim q(\zeta).
45
+ \end{equation}$$*
46
+ :::
47
+
48
+ Here, we use $\mathbf{y}$ to represent the output from a neural layer with input $\mathbf{x}$, which can either be the prediction vector from the last layer or the feature vector from the last convolutional layer of a deep neural network.
49
+
50
+ To adopt the domain-invariant principle in the variational Bayesian inference framework, we reformulate ([\[nai_invariant\]](#nai_invariant){reference-type="ref" reference="nai_invariant"}) to an expectation form with respect to $q_{\zeta}$, following @learningprior: $$\begin{equation}
51
+ \label{ep_invariant}
52
+ p_{\boldsymbol{\theta}}(\mathbf{y}_{i}|\mathbf{x}_{i}) = \mathbb{E}_{q_{\zeta}} [p_{\boldsymbol{\theta}}(\mathbf{y}_{\zeta}|\mathbf{x}_{\zeta})].
53
+ \end{equation}$$
54
+
55
+ According to Definition [1](#def1){reference-type="ref" reference="def1"}, we use the Kullback-Leibler divergence between the two terms in ([\[ep_invariant\]](#ep_invariant){reference-type="ref" reference="ep_invariant"}), $\mathbb{D}_{\rm{KL}}\big[ p_{\boldsymbol{\theta}}(\mathbf{y}_{i}|\mathbf{x}_{i})|| \mathbb{E}_{q_\zeta} [p_{\boldsymbol{\theta}}(\mathbf{y}_{\zeta}|\mathbf{x}_{\zeta})] \big]$, to quantify the domain invariance of the model, which will be zero when the model is domain invariant in the domain space $\mathfrak{D}$. To further facilitate the computation, we derive the upper bound of the KL divergence: $$\begin{equation}
56
+ \label{kl_inequal}
57
+ \begin{aligned}
58
+ \mathbb{D}_{\rm{KL}}& \big[ p_{\boldsymbol{\theta}}(\mathbf{y}_{i}|\mathbf{x}_{i})|| \mathbb{E}_{q_{\zeta}} [p_{\boldsymbol{\theta}}(\mathbf{y}_{\zeta}|\mathbf{x}_{\zeta})] \big] \\ &\leq \mathbb{E}_{q_{\zeta}} \Big[\mathbb{D}_{\rm{KL}}\big[ p_{\boldsymbol{\theta}}(\mathbf{y}_{i}|\mathbf{x}_{i})||p_{\boldsymbol{\theta}}(\mathbf{y}_{\zeta}|\mathbf{x}_{\zeta}) \big]\Big],
59
+ \end{aligned}
60
+ \end{equation}$$ which can be approximated by Monte Carlo sampling as we usually have access to samples from different domains. The complete derivation of ([\[kl_inequal\]](#kl_inequal){reference-type="ref" reference="kl_inequal"}) is provided in the supplementary material.
61
+
62
+ We now adopt the probabilistic domain invariance principle in the variational Bayesian approximation of the last two layers with parameters of $\boldsymbol{\phi}$ and $\boldsymbol{\psi}$, respectively. The introduced weight uncertainty results in distributional representations $p_{\boldsymbol{\phi}}(\mathbf{z}|\mathbf{x})$ and predictions $p_{\boldsymbol{\psi}}(\mathbf{y}|\mathbf{z})$, based on which we derive domain-invariant learning.
63
+
64
+ For the classifier with parameters $\boldsymbol{\psi}$ and input features $\mathbf{z}$, the probabilistic domain-invariant property in ([\[kl_inequal\]](#kl_inequal){reference-type="ref" reference="kl_inequal"}) is represented as $\mathbb{E}_{q_{\zeta}} \big[\mathbb{D}_{\rm{KL}}[ p_{\boldsymbol{\psi}}(\mathbf{y}_{i}|\mathbf{z}_{i})||p_{\boldsymbol{\psi}}(\mathbf{y}_{\zeta}|\mathbf{z}_{\zeta})]\big]$. Under the Bayesian framework, the predictive distribution $p_{\boldsymbol{\psi}}(\mathbf{y}|\mathbf{z})$ is obtained by taking the expectation over the distribution of parameter $\boldsymbol{\theta}$, i.e., $p_{\boldsymbol{\psi}}(\mathbf{y}|\mathbf{z}){=}\mathbb{E}_{q(\boldsymbol{\psi})}[p(\mathbf{y}|\mathbf{z}, \boldsymbol{\psi})]$. As the KL divergence is a convex function [@learningprior], we further extend it to the upper bound: $$\begin{equation}
65
+ \label{invariant_final}
66
+ \begin{aligned}
67
+ \mathbb{E}_{q_{\zeta}} & \Big[\mathbb{D}_{\rm{KL}}\big[ p_{\boldsymbol{\psi}}(\mathbf{y}_{i}|\mathbf{z}_{i}) ||p_{\boldsymbol{\psi}}(\mathbf{y}_{\zeta}|\mathbf{z}_{\zeta}) \big] \Big]
68
+ \\ & = \mathbb{E}_{q_{\zeta}}\Big[ \mathbb{D}_{\rm{KL}}\big[ \mathbb{E}_{q(\boldsymbol{\psi})} [p(\mathbf{y}_{i}|\mathbf{z}_{i}, \boldsymbol{\psi})] || \mathbb{E}_{q(\boldsymbol{\psi})} [p(\mathbf{y}_{\zeta}|\mathbf{z}_{\zeta}, \boldsymbol{\psi})] \big] \Big]
69
+ \\ & \leq \mathbb{E}_{q_{\zeta}} \Big[\mathbb{E}_{q(\boldsymbol{\psi})} \mathbb{D}_{\rm{KL}}\big[ p(\mathbf{y}_{i}|\mathbf{z}_{i}, \boldsymbol{\psi}) || p(\mathbf{y}_{\zeta}|\mathbf{z}_{\zeta}, \boldsymbol{\psi}) \big]\Big], %\\ & = \mathcal{L}_{Invar}.
70
+ \end{aligned}
71
+ \end{equation}$$ which is tractable with unbiased approximation by using Monte Carlo sampling.
72
+
73
+ In practice, we estimate the expectation over $q(\zeta)$ in an empirical way. Specifically, in each iteration, we choose one domain from the source domains $\mathcal{S}$ as the meta-target domain $D_t$ and the rest are used as the meta-source domains ${\left\{D_s \right\}}^{S}_{s=1}$, where $S{=}|\mathcal{S}|-1$. Then we use a batch of samples $\mathbf{x}_s$ from each meta-source domain in the same category as $\mathbf{x}_t$ to approximate $\mathbf{x}_\zeta {=} g_{\zeta}(\mathbf{x}_{t})$.
74
+
75
+ Thereby, the domain-invariant classifier is established by minimizing: $$\begin{equation}
76
+ \begin{aligned}
77
+ \label{Li_y}
78
+ % &\mathcal{L}_{\rm{I}}(\bpsi) =
79
+ % \\&
80
+ \frac{1}{SN}\sum_{s=1}^S\sum_{i=1}^N \mathbb{E}_{q(\boldsymbol{\psi})}\Big[ \mathbb{D}_{\rm{KL}}\big[ p(\mathbf{y}_{t}|\mathbf{z}_{t}, \boldsymbol{\psi}) || p(\mathbf{y}_{s}^{i}|\mathbf{z}_{s}^{i}, \boldsymbol{\psi}) \big]\Big],
81
+ \end{aligned}
82
+ % \label{dic}
83
+ \end{equation}$$ where ${\left\{\mathbf{z}_{s}^{i} \right \}}^{N}_{i=1}$ are representations of samples $\mathbf{x}_s$ from $D_{s}$, which are in the same category as $\mathbf{x}_{t}$.
84
+
85
+ The variational Bayesian inference enables us to flexibly incorporate our domain invariant principle into different layers of the neural network. To enhance the domain invariance, we obtain domain-invariant representations by adopting the principle in the penultimate layer.
86
+
87
+ To obtain domain-invariant representations $p(\mathbf{z}|\mathbf{x})$ with feature extractor $\boldsymbol{\phi}$, we extend the quantification of our probabilistic domain invariance in ([\[kl_inequal\]](#kl_inequal){reference-type="ref" reference="kl_inequal"}) to $\mathbb{E}_{q_{\zeta}} \big[\mathbb{D}_{\rm{KL}}[ p_{\boldsymbol{\phi}}(\mathbf{z}_{i}|\mathbf{x}_{i})||p_{\boldsymbol{\phi}}(\mathbf{z}_{\zeta}|\mathbf{x}_{\zeta})]\big]$. Based on the Bayesian layer, the probabilistic distribution of features $p_{\boldsymbol{\phi}}(\mathbf{z}|\mathbf{x})$ will be a factorized Gaussian distribution if the posterior of $\boldsymbol{\phi}$ is as well. We illustrate this as follows. Let $\boldsymbol{\phi}$ be the last Bayesian layer in the feature extractor with a factorized Gaussian posterior and $\mathbf{x}$ be the input feature of $\boldsymbol{\phi}$. The posterior of the activation $\mathbf{z}$ is also a factorized Gaussian [@kingma2015variational]: $$\begin{equation}
88
+ \begin{aligned}
89
+ \label{distributionz}
90
+ &q(\phi_{i,j}) \thicksim \mathcal{N}(\mu_{i,j}, \sigma^2_{i,j}) ~~~\forall \phi_{i,j} \in \boldsymbol{\phi}
91
+ \\&
92
+ \Rightarrow
93
+ p(z_{j}|\mathbf{x}, \boldsymbol{\phi}) \thicksim{\mathcal{N}(\gamma_{j}, \delta_{j}^2)}, \\
94
+ &\gamma_{j}=\sum_{i=1}^{N}x_{i}\mu_{i,j}, ~~~~ \text{and} ~~~~\delta_{j}^2=\sum_{i=1}^{N}x_{i}^2\sigma_{i,j}^2,
95
+ \end{aligned}
96
+ \end{equation}$$ where $z_{j}$ denotes the $j$-th element in $\mathbf{z}$, likewise for $x_{i}$, and $\phi_{i,j}$ denotes the element at position $(i,j)$ in $\boldsymbol{\phi}$. Thus, we assume the posterior of the last Bayesian layer in the feature extractor has a factorized Gaussian distribution. Then, it is easy to obtain the Bayesian domain invariance of the feature extractor.
97
+
98
+ In a similar way to ([\[Li_y\]](#Li_y){reference-type="ref" reference="Li_y"}), we estimate the expectation over $q(\zeta)$ empirically. Therefore, the domain-invariant representations are established by minimizing: $$\begin{equation}
99
+ \label{Li_z}
100
+ % \textcolor{blue}{
101
+ %\mathcal{L}_{\rm{I}}(\bphi) =
102
+ \frac{1}{SN}\sum_{s=1}^S \sum_{i=1}^N \Big[ \mathbb{D}_{\rm{KL}}\big[ p(\mathbf{z}_{t}|\mathbf{x}_{t}, \boldsymbol{\phi}) || p(\mathbf{z}_{s}^{i}|\mathbf{x}_{s}^{i}, \boldsymbol{\phi}) \big]\Big],%}
103
+ % \label{dir}
104
+ \end{equation}$$ where ${\left\{\mathbf{x}_{s}^{i} \right \}}^{N}_{i=1}$ are from $D_{s}$, and denote the samples in the same category as $\mathbf{x}_{t}$. More details and an illustration of the Bayesian domain-invariant learning are provided in the supplementary material.
105
+
106
+ Having the Bayesian treatment for the last two layers with respect to parameters $\boldsymbol{\phi}$ and $\boldsymbol{\psi}$, the $\mathcal{L}_{\rm{Bayes}}$ in ([\[elbo1\]](#elbo1){reference-type="ref" reference="elbo1"}) is instantiated as the objective with respect to $\boldsymbol{\psi}$ and $\boldsymbol{\phi}$ as follows: $$\begin{equation}
107
+ \begin{aligned}
108
+ {\mathcal{L}}_{\rm{Bayes}}
109
+ %&= -\mathbb{E}_{q(\btheta, \bphi)}[\log p(\y|\x, \btheta, \bphi)] + \dkl[q(\bpsi, \bphi)||p(\bpsi, \bphi)] \\ &
110
+ & = -\mathbb{E}_{q(\boldsymbol{\psi})}\big[ \mathbb{E}_{q(\boldsymbol{\phi})} [\log p(\mathbf{y}|\mathbf{x}, \boldsymbol{\psi}, \boldsymbol{\phi})] \big]
111
+ \\ & + \mathbb{D}_{\rm{KL}}[q(\boldsymbol{\psi})||p(\boldsymbol{\psi})] + \mathbb{D}_{\rm{KL}}[q(\boldsymbol{\phi})||p(\boldsymbol{\phi})].
112
+ \label{bayes}
113
+ \end{aligned}
114
+ \end{equation}$$ The detailed derivation of ([\[bayes\]](#bayes){reference-type="ref" reference="bayes"}) is provided in the supplementary material.
115
+
116
+ By integrating ([\[Li_y\]](#Li_y){reference-type="ref" reference="Li_y"}) and ([\[Li_z\]](#Li_z){reference-type="ref" reference="Li_z"}) into ([\[bayes\]](#bayes){reference-type="ref" reference="bayes"}), we obtain the Bayesian domain-invariant learning objective as follows: $$\begin{equation}
117
+ \begin{aligned}
118
+ {\mathcal{L}}_{\rm{BIL}} & = \frac{1}{L}\sum_{\ell=1}^L \Big[ \frac{1}{M}\sum_{m}^M [-\log p(\mathbf{y}_t|\mathbf{x}_t, \boldsymbol{\psi}^{(\ell)}, \boldsymbol{\phi}^{(m)})] \\ &
119
+ + \frac{1}{SN}\sum_{s=1}^S %\frac{1}{N}
120
+ \sum_{i=1}^N %\\&
121
+ \big[\lambda_{\boldsymbol{\psi}} \mathbb{D}_{\rm{KL}}[p(\mathbf{y}_t|\mathbf{z}_t, \boldsymbol{\psi}^{(\ell)})||p(\mathbf{y}^i_{s}|\mathbf{z}^i_{s}, \boldsymbol{\psi}^{(\ell)})] \\ &
122
+ + \lambda_{\boldsymbol{\phi}} %\frac{1}{S}\sum_{s=1}^S \frac{1}{N}\sum_{i=1}^N %\\&
123
+ \mathbb{D}_{\rm{KL}}[p(\mathbf{z}_t|\mathbf{x}_t, \boldsymbol{\phi})||p(\mathbf{z}^i_{s}|\mathbf{x}^i_{s}, \boldsymbol{\phi})] \big] \\&
124
+ + \mathbb{D}_{\rm{KL}}[q(\boldsymbol{\psi})||p(\boldsymbol{\psi})] + \mathbb{D}_{\rm{KL}}[q(\boldsymbol{\phi})||p(\boldsymbol{\phi})] \Big],
125
+ \end{aligned}
126
+ \end{equation}$$ where $\lambda_{\boldsymbol{\psi}}$ and $\lambda_{\boldsymbol{\phi}}$ are hyperparameters that control the domain-invariant terms. $\mathbf{x}_t$ and $\mathbf{z}_t$ denote the input and its feature from $D_t$, and $\mathbf{x}^i_s$ and $\mathbf{z}^i_s$ are from $D_s$. The posteriors are set to factorized Gaussian distributions, i.e., $q(\boldsymbol{\psi}){=}{\mathcal{N}(\boldsymbol{\mu}_{\boldsymbol{\psi}}, \boldsymbol{\sigma}_{\boldsymbol{\psi}}^{2})}$ and $q(\boldsymbol{\phi}){=}{\mathcal{N}(\boldsymbol{\mu}_{\boldsymbol{\phi}}, \boldsymbol{\sigma}_{\boldsymbol{\phi}}^{2})}$. We adopt the reparameterization trick to draw Monte Carlo samples [@kingma2013auto] as $\boldsymbol{\psi}^{(\ell)} {=} \boldsymbol{\mu}_{\boldsymbol{\psi}} + \epsilon^{(\ell)} * \boldsymbol{\sigma}_{\boldsymbol{\psi}}$, where $\epsilon^{(\ell)}\thicksim{\mathcal{N}(0, I)}$. Likewise, we draw the Monte Carlo samples $\boldsymbol{\phi}^{(m)}$ for $q(\boldsymbol{\phi})$.
127
+
128
+ When implementing our Bayesian invariant learning, to increase the flexibility of the prior distribution in our Bayesian layers, we place a scale mixture of two Gaussian distributions as the priors $p(\boldsymbol{\psi})$ and $p(\boldsymbol{\phi})$ [@Blundell2015WeightUI]: $$\begin{equation}
129
+ \label{scalemixture}
130
+ %p(\btheta)\thicksim
131
+ {\pi\mathcal{N}(0, \boldsymbol{\sigma}_{1}^{2})}+{(1-\pi)\mathcal{N}(0, \boldsymbol{\sigma}_{2}^{2})},
132
+ \end{equation}$$ where $\boldsymbol{\sigma}_1$, $\boldsymbol{\sigma}_2$ are set according to [@Blundell2015WeightUI] and $\pi$ is chosen by cross-validation. More experiments and analyses on the hyperparameters $\lambda_{\boldsymbol{\psi}}$, $\lambda_{\boldsymbol{\phi}}$ and $\pi$ are provided in the supplementary material.
2106.14087/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-04-06T19:24:58.348Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" etag="Hmm40l37J1mNRsrC6KSo" version="14.5.8" type="google" pages="3"><diagram id="2tDZ_5V49md1tW71myuI" name="Page-1">7V1bc6M2FP41fqwG3cVjN93uTjudtrPT2enTDjFy7CkxLiabpL++EAMGCdsYJC4O7E4mlvGRfL5P5yaJLPDd48unyNutfwt9GSyQ478s8E8LhCBBaJH+d/zXQwtj7qHhIdr42U3Hhi+b/2TW6GStTxtf7is3xmEYxJtdtXEZbrdyGVfavCgKn6u3rcKg2uvOe5Baw5elF+itXzd+vD60CsSP7Z/l5mGd9wzz7/fo5Tdn32S/9vzwudSEPy7wXRSG8eG3x5c7GaTKy/Vy+NzPJ94tBhbJbdzkA78Hz/7Xz2T357+/fP1bfiOvv/7l/QBhhs93L3jKvnI23Pg114Hc+j+mqkxebcNt0vhhHT8GySuY/BruvOUmTtGlTvJSH1U2UOlXFJ2N8ZMMH2UcvSY3ZJQRmbpeqy+fj8qnWdO6pPe8zcvgfijkFl39EW6SIRX9IMYBrXQFmSJkHz5FS5l9rqxZRRTG9WPO5cRe9CBjTU7yS0kBx6Y33K7BUEwOQuwChyGq44gpQC6BkDPCHSoEYtfiqnSOUUtQFTkaOWyD6s6gHjtP/IgyWdU5NhFYczbOsBq2wUPjCmdcz+FqygxrcmzjOr3waBq4Npj6tqHFM7TNY9imuF721LZhpTOsp2FN+3LLFzc1fXu3zGzG2T7OA6e2eZnlOpB9b7+WfgZzLeJ4+ojrhtYU5kOHXK0y33eKuTk3fYk9llHP7cyMem+2fXQcgA65TIKHKHzanQT0u4xi+VJpylYGvPtcgrM4BzVEFGCIlNzEZcApX1gjQBIQQcFrCMAYYOVL0WkZ/4pCr9Ue65qq6ko9j1LTyQMdLdWDEMDyRZqrk1JAy9cZdSq0f22Ex8XZk88VBFDlor3OFdY1e711tPPhMMBdVsieGsoNDOKMcrr8B7Cb+t/MPMOqW7sA4lTI0LWyMZPBnPm/IMY2FboWP2YqGKCCEeNjmymtKijvjykVyVdnUvVBhm1ou+77mKGd+uTuuknkPTIgGxAhgJQvK/wYNkrgXTebzPToSo9a22O7jAab1AaCYLPbp3g/rzex/LLzluk7z5G3q3LA2+8O+2pXm5e01PphtQmCuzAIozdBeCWWcrlM2vdxFP4jS+/cC0pOLK1dKP+pRbyTREIuSN3usS5HquYZV8uZAumsAgQJymHxU+fXiVuMF/AgaTJfTeHme1KsanFjSyHvV3Zxg0CbcCpWbMxYCfOx15VLEpgj3WpBl4N8nQFT5OZmq6RE7pTK2xUzKkD2ocNiBTytuhN7AuhhNlZHlDjIYjSp4JaGtF64gwGtrK70amiF+fBr+iTgHCBGmbKA0nadcoywu+bDqsnDnohNLK8+pgtITZoGXbf8viMaWHUCF4TbpgGaadAzDVoaG9tEML8iO30iGNpV3DCqsI2w+QW4m0MYIg6c8iRvu/G0nQmxzQBL+1HN7EAeHztOzFtTJBllPNCqMjBTxFTC2I5xtklhaU/zrZLCllcZJzsgaZJKGivYr1aovmDvs/tEN+cYdU1l3gEuVjjlYoArl0apEZV4E9G9oiLZCVS4e++cnefXoCKAOs+RskY56ro7JIMX3gkigLiatRSJBoaylsgFlNe4UMBb1tswhMDVvqNDAHZK+bVyksG6lRy+3C5cwJmqFo4BL0OvWzWrjhI4ju683yZ1SXRbHtSL50nYXSZCvxF2PpgBeYBoYgJYRSdY3etRe0REOPrpGoUQFQeZ74O4Ip6GvDqu1if0FUGIK967X9DN73i9JdAxBi5V7L/2UKyJIm9+B+stIZ/2QjkpemHXQDdNRojhF2DHzAhBgFC2qba2BI3sim24h19ofS9wdzImtmkw/PraxGjg1g/talY0YJht7Fudg+ytfjpmXtRacFPMuJRr2KZFq0xwpoXxhKENyWxzo1XCOHPDrisZIVEgaWJFTNX5JfSp5HV1fpdx7JlafSEMCJw/PSDXs9CL0hRQ7nK3KNuTGkLV32K+tE+HT+s4q52VGJfYWXsyqY9EX1THBV3Q9sSyKisN7B1nqMAe0uETPEcASqrAEzwSc/yWlJV6UfjZob6bgI6KZE+I83Kts6BVjG87vyMQj4MFqVMu96L60A4P8kwMe3GEDqokqAxcfXiZdU40COKi8Gnrv8VszmX3PMheCOXpcZTpj49D+dm/MmvUGWjQ1TZYRe+m1z52M6h6rXksX996bbBE3UmvNP1Xe2jz7Uo/EW7jUnvmz23omzmD65s1CBk76bvN4eaueiWuHqf3rdcGEVkXvbY6fNzZPsDh9Yos65VK4ZM6vQp0j5klf4aH12uDtYEueu0na1ftAB7evjaIyW/GnxE6vL4blKJuR998eH03KRIbe+JGL8a54FDxDBRdy71s6E1eHv+44SEjPP6JSPzxfw==</diagram><diagram id="vpblW-uTyff9IbwNRn8F" name="Page-2">7V1bd9pIEv41OWf3wX36fnlMnMvsmckkGe/JzDzKIIMmMmKFSGz/+q0GCRDI0LoiHDFzHNRq1NJXX1dVV5WkV+z6/uFD7M2nH6OxH76iePzwir19RSnhlL6y/+Px47pFSrNumMTBOO20bbgJnvy0Eaety2DsL3IdkygKk2CebxxFs5k/SnJtXhxHP/Ld7qIwP+rcm/gHDTcjLzxs/TMYJ9N1q6Zq2/6LH0ym2cgku757L+ucXsli6o2jHztN7N0rdh1HUbL+dv9w7YcWvAyXX399eHpi/2EyeKJfYhbIf77+dbU+2PsyP9lcQuzPkuqHHr/+M/k9mr6ffojfikmoP76Or3QKw3cvXKaApRebPGYI+mMANN2M4mQaTaKZF77btr6Jo+Vs7NtxMGxt+/wWRXNoJND4j58kjyk7vGUSQdM0uQ/TvdHcGwWJZZiwR3C83hSXRbSMR/6Ri2Q85Z0XT/zkGBppR3vFO+xJ8fzgR/d+Ej9Ch9gPvST4nqeYlzJ1sum3lQZ8SQVSQjjZeR8TzhZ6i+OPaZD4NwCm3fsDJnQe5nK4fvfjxH84ikO6l1G9/kmqI4hMp8yPnRmXNk13JxvG9aHzrj/Gf0fed8W/fPrwnk+nX27kFcHiNHYTAG9eE5ON4vJus8Pi41jhPFSCHEBFeBFWDCPGWoJLMAc1MBu/tvoYtmbRzC85gY9L6eSES8G7IogqYBvD8EcYJrjwrwjLIYoRKHEuFah4zoQwRh4ATBUyxiiDMSZCCZxd7S7e0AcrzKlRRGo4iH4e+vR0P0cBXO3mXAnmdhRKqMDCYMnyp8kZEpQTIZjiHM5By/wAa62WHnNXre8PwwwMo42Ai1FacaOd0MiGWevEg2FWLNrgX4NYDirsJRGLIBCBZnAErRjmmUHJTWSDCNaCaUmZhH6GOjMr0xmgCgg3AgaQWgiOdUnqOFKwbW44qOiBGyW1DlspHcVAnASUllK8lDq4FOrIgTqNU6dtg9UT6riseAbqNEqdpgzWqeO0TR09UOfiqNOMcmubWWZgVvPMqrcycmVWTY+rZWZlkZiBWd0xS0ukmdScGhve4ZnH0bDKOrMxlGQg1qUSq9chI0mrEGvsLaabMHghy9jAMjeLVZdlPdFPlULaA436E2pqhK1ts6xSfHtgWd9MYr9JVj2XeSI5WjubSQWCFbPGgBnIVoGQc7hJhjDhWnKlsOQ0qzbYIRFTSFKuiNLANiI5O+QQkwgbBg4w1YYYsw9+Y0lQWTeo7Ai/LDldJdhRuGyuBWZ2Ipk9jGG3kAJWCxq6GZYJtyTIGMH0okJQjuEoUhxJNRfb+xNicnYbKMIMfAcKnGIw8bPQSFdzrW54+CclQXqaBnEsiTJcMCk4qRzoPTMJ6gZ6f24SUA3OgzX6TIL8KMtq7RyFeykkqRuzHUhyhCQNWZMTh2mZIqpu8HWgSIsUaUZbtU2humHWn5tCzwxSliMn3Jq2OUAHDpyfAz3XE3UreAeOFAySniXlCKyMxJwYqoxRpGIRQj2D1jaD6pbqDgxqn0EndFnbFHGp2A3DYL7wT9+M4i3m6/u97oIHGwx/cxeE4XUURvHqQOxOj/zRCNoXSRx983f23GrBy+SM5XPhzmc5R6SFWSsBGp0zQQ5UPRBNW4FyQohmBWFNxDUwkmmKFeeUF4TGn+nSeFhTuYQ1mxLa2PP1XaHQ5Ej7t3etCo0iyo3BmlAhtcGZ65xNQYY0JQZmjsAaZtDhfTd9ElpHYcjSuSMiGUwNphQFMBVmJq+MGQLzJUB/KgZyMOwQY2MTBsrmhEEL2ilWADJBmisB2s2Owog4gvJzpTPYTmAK/jrRRGKa18ZMI05XSkRxGIDtHd+9QOfoKFZNCMwNE0rCxWguulXWHQUxXyqFCEZcKkMJN1hobUTuNJvKbvecQx3FOF8qh2wJDPiUCjx+wXlWLOAo25dBId1RDPSnpFA3huzEKG0TqKMI6ECgGgSqpejaJhAdCFSLQJ3cRlHT32qbQx2FV39ODlGBYHgB5wcrd8OxqRiir6Xr2mZQS5Wird48cSHsOq46GmNXr72kSg9TGOjVo0hAPRa3za9KxbEDv3pkHPvNL5eAd2Oppbs7WpxaGstbWKJUoGSJNAXRiFAOOgNLQzjneXErxBmmICINfowQhB6ysj9pCu0SY25Oar58RmrK3OIqiqRMcomi3eSSyOsSmD0c1AyGbwbUBC24XaZPYuttWNdQZIgi4GGBWhRE5hOvkiNGreaiSkopRAHInWlsJWG1QZQBN1DmKxLAS2QMThAuAsMFEVO2vnlDOWPBkEA4ojRWWRnY5v5KuFQK+pxqe48Nl92Wi5jexnUVTCOpwYwaDdipPekIjAwAiqmt8gCqFaSBu+EQzz8IcqVBNBZgnTUVWpKq2QCOu+VBb8OzDEypkiAfY5dsLF8NQATgTQSoFK2wMSwTcr5syCj4naAYEyZxUdkQQQJcS6kxZpgbSY7cm1YsLiWRIgZ0vcTc0mh/jjfz9JsTo2COtGbKSCwl+CUZVF0xiA4MqsEgLWEBCq4i4yBbbljeZIJBBZVnwGrCeZqqKcZ+86e3sdmL4A9cli2whAUJxpyDv8FKCfdlUKij2teXSiHCkTRUMQPLFy72HiXQjAqqo+baZk/dB9kO7GmbPTWVXNsE6ujJAz8lgQhRSBApJaMSToVnEbXy8dPqNG2bP5Wqhc8dnL8Ibh03O01xq94qsG12VSokHtjVk7VbPQa3za1KkeiBW72xir1m1+b9Qp3kp3wyFr4qyk8ZuHiv3ayiYYiDGdEMtAZQbl/YBAHVoBeIQlGeva4pl52ydy5zLTS2B2CmKOj9TJ/G81ME9zaoLCXCTArCFZUEBHs4920+CMOs5VrS7LTPEdOxqSFY12BhU1D7BbkGTgJrKWBickqrP6H4+DCwqlIYNA8B5hlNdbclBQTTvrLIvoBKc5AvAQ4ZQ/IpKgKTmRGmpeSKS8EKClM6IxHGmlgrZoji+ZR3k7kJijVYUsM0XJEuNUrrHLrQ4LLGSEsCmNmCArVxHvvnhEijCAMlopXevIet6ehOwavzWueNS0S5sbuuha/HvMjz0PTWlie06XlwcPGMkZyBgyGV4jhvBDQBxwQcRGnvvGabmux+FsYQXC4SNwq9xSIYlVyFHHvB5MnpvwOcKHhNZNbm/IbSZ2aU2itCxAapipkfik8fq7npWPgmVeKwzDzL20Ap2TN3yu3NqbqBF6cWI+WwYsq/7veE7mqi9PK4TE9rqqz0B9zR1VOcQctRycihTSyEGm1/oLThDTx+uPh6HNY89ZCvUD7ZEPKSoRS8Nf79At5hmVALeGH/K3wmyupjfxHNkp329adNgSjw7HbkgQ+t8lkl4uB015JIlUcLNYQ8LP1yOqggPHhW6B381jrQV3pAUEPQM4F2OS8OgzNnRd6hiqAW8lUWCQ0hzzkqwrAvyDs4/XWQrxQYrot8X7B1SE2/NOP6DLZ9kYhDOvelSYRIpPPo9koktMj1l2GSQgV77tbgQ+v/lpFt/29w7y9gx+/+D/j7R3TvzbY74dvE/vv1/bvsOHBe60Ot9xyRODkt8QI5lVklP7saZnt3aBJ1KCdZIKdjccl6gimyyYNgKMWwfjizaIqM9iAaewPieQXDXHK344l/k25GcTKNJtHMC99tW9/kLdC2z29RNE8R/cdPkseb4MkexFsmUYNR2HVo08GtWcctHVT7ybiuc8C2nnAcYh2XLxzuKhzXnFtHwnHJNV68cIircGSvhON0q/ulC0e7zpyslKMvwnGIWi2m3tx+HS1v/dPm+nYtqN9uNw3e6NtkJb5PyyQMbJp/1T724m+fNkLBCIt8I121rnoGsT9KgmgGLSAJa5xb8wr43osYieQFDhshBY6BaSu15PLUgbMk4YTaA0sVgpVVpuyCpVoDy8EWZIxOgL3+U2SP92bux+DlJn682/5523h6Mf/gj1PtU4K0R+VdKljFKZK5T0E5QHuoNx2DbR6qbIILpHY/e2lkg6g0Utr31TKuGTksIbPBjdynuP6HSywVJ/YRSYy1tWRwebhU12SfWePcpgSVQiz3ORDRWadC4fM9cuvrrXCyNbTdcbVY4fkaOhA8f9hdYFdckV+vimzugpG3Ek3pxXm1adqIGVb7ZXlSFlgWVWCFaWtyPRlr7Equf/iT2AfJXrhMabamPptEe7mmB9Tjx7/Sw602/l55wiLbfPuwu/PtY7pVTmynVzSpaTm5osn0XU9WNE4PtbgYofoPQfLXtids/Z0OYL9vf2Q3zk6ExiN2xWWRiuQLZa3TxrWAZrr+W/GmDMX3VjN6s5rpqNqy8Gka5UyMyVuYtT0AXR8vfNj7NXrww6sPMZyFo+UA3Z7kmZ3PTqZ1+7upzLTJC4OJ9QdHQMOVT2ktBfgi4et0x30wHofPRRTyE9Ce4nvvPgitaIpsZHtWa7/QlDoWmrZntU6m1iqSJPFG3wDxjCXvfS9Zxv7Vu9koGoMAB8I45pWyPFJvCHMy4VeNMB/X4qD4Opp9j8KlXWR49mzehNHoWzbEbZz1/5cdAN8sbwH/4C4KLdU2mmnnGIt/H/52YJ9z/DJ/I7G9jaRghUyw6ZKBRbVMDTDw0zKZL5MSDKRvB6o1RrX96C8tqMgtDJS3x7OiVFMDPPsc++NgHd+i+BffGy8G2lQPA9B92rRmH2Ezjqwkt046gDH9CP6M7fF/</diagram><diagram id="Wzl0Dyec37FNAtXyLP3y" name="Page-3">7V1Zc9s4Ev41rpp9MAv38Rg7x8zuZJOa7GZn9mWKkWiZa1nUUFRs59dvQyIlkYTEm6IS6sG2QBgEu78+0N0Ar+jt4/O70F3evw+m3vyKoOnzFX19RYjCFH6ahpdtA1Vk2zAL/em2Ce8bPvnfvLgRxa1rf+qtUh2jIJhH/jLdOAkWC28SpdrcMAye0t3ugnn6rkt35uUaPk3ceb71P/40uo8fi6N9+8+eP7tP7oxRfOXRTTrHDat7dxo8HTTRN1f0NgyCaPvX4/OtNze0S+gyE574gNU/buQXb/p3D6//++3+ejvY2yr/snuE0FtEtYfm89WNe/fpYXr9l/fX+i58//D0SzL0V3e+jukVP2v0khAwDNaLqWcGwVf05unej7xPS3dirj4BYqDtPnqcx5dLzjS5rRdG3vMBn+KZv/OCRy8KX6BLfJUStf2XGIVYxEx5OuBp3HR/yM6Em24Mo9lu6D2p4I+YWhUoR4opNwPSLRtSZCcY7pdkWHSSUkSnCaXyhMLMRimKHEo7IhYtJpa3mL4y4g7fFsHCS8MqAMD5kXkkkFsbyk6xyJumNMRRyl1jh0gAGkXwg2vKGfeuE/UXkxM5oCGYkIpIRjnXWuSoS6SjtZYaIYS55Ch51kNiQx8kESNaYqFgEHWc7vF0PwY+POturhgxcxeCCUdcI0HT02TU4YRhzqlkDOagRPoGq2AdTrx4zEOdkb0N1XAbpTk8jFSSaVWKGsltIjeceVHuNhsI7ehfH1XsR0IVdoD+isIISlLEFLOIsHYwUpwqQaiAfpqUhlWiLUAJYKY53EAozhlSFXFTEn8dA4OPwGgLGIn93agbSYGXGNSVlKySIrgQ3IgRNy3jpms7NQzcyBE3/eKmLTtVNE7HuFEjbi4MN+2otY5hpUdYtQ2rZuugsrBq6GV1DKskojfiqi9cKeEoKhQjoGk4Zlx2oq3ObARxiRjkCKshwmrQwSFM6sBq6q7ud7FuK8boiLEytqopxgaim2oFrkcQtQSinvyqAqx2jbFaYewRY8MyhsOGWImA+JFk5cncZ/NkJXdgiawQUAw4K4HFKaoJ6iDMlGBSIsGIkjkIUekIwiSWCrCGBaN5BFHhIE3B7SVKY62zpG8tx4mbRo9LEj9hZllhFWBB4bGZ4ogaMdIZGsNlLjisERR00xTxWkRGDggX4ZwwBKMIfiKTbLf0BWwq7TAQB1HwGghgioLYc92vpDUNBf+gIIinqR2GBJaacSo4w7XDumcGQdO47o8NAqLAdTAmnwrgH6FEpqZZxNxLAUnTKO0IkhMgacmaFAzTMURI04DrCJEOIdKOtuoaQk2Dqz82hI7cpCpGCtyarjFARgycHwMD1xNNK3RHjFhuEs+SMAesjEAMayK1lrhm1UEzg9Y1gppW444I6h5BBbqsa4iUqcudz/3lyiveaeKultvtQnf+swmE39z58/ltMA/CzUAUIcFvXkP7KgqDB892pSLiyu9WwcKQWUkOGp1RjnOqHoCmDEMZxlhRS1jTYQoQSRVBkjHCLIHxI13a3+dSJqzZFtO0uhVvXtmYtrvSHdOIQ5jWSGHChdJI4rQIUkcRrEFyOFIgQXjQTOspDEmq6WIsKAgGlZIAKSWi6a1KcBGMFwftKSlwQdM8hbVJF0iTCwYdaATMQmLsKCY56DZzF4r5CRofK5dBRnwJeOtYYYFIWhdT5TCiOOPAQrgBzYxfvijn5F2MkuCIacqlgIdRjPerqnsKYX6fAMLIYUJqgplGXCnNU9NsK6s9cAT1FN/8PhFkyl7Am5Tg63PGBKvE2e8DQLSn6OcPCKB+TFjBXbqGT0+RzxE+teHTSMl1DR8ywqcBfHrZLtHQz+oaQT2FVH9EBBHuwO05zA/W6pohXTMo30jPdY2fjupCO9wkcSHYOq02WsPWoL2jWkckjOAazNq/GYa7RletUtgRXYMxi8NGV5ngdlsZiTf0Rihly0jsrlQDZIWEBFYOJgz0BRIaM8bSzJYOo4gAgxT4L5wn+92GmZCgZeLJ7fFMEkNYG8/iK53xjBDnMInE03oEJIeBikHmmDZQEcSyKWZITBtoCFcTR2OJwa8ChcixSKdXBXMoMTqLSCEE5xYS96arpYAVBpYanD+RrjsA35BSmCA8BIIHwrpqFfMOcNoQQwDcsFQIwJW6DabwqAQ0OVFmJw0T/RaFsIHGcCWIkFBgPrUCyskMbzhyNJATEVPJAUCzpHr7QVBCvkPtoRAHq6wIVwLXjfsz1C8KBhqKpWBCpQDuaLNIo+l8P+ZAbcxBnSiJtKbSWhikJfwfJwhhKpCtMAg7HBxKoRCiiGmBT+w+szNLCkdiDVpeIGZAlJXvdg60KbgLYo5SVGqBhAB/JCFVX/ghI35q40cJWHKCg0gZcJZpmjaWYEpB3WmwlzBPXTeROGz0DDQOexHowYib8klmjsVmDPwMWom13weAeqps/T4BhJkjNJFUw5KF8cwhAe2onyYqrmvsND2KdsROt9hpqOC6hk9PZwr8gPDBWDocCyEoETAVlsTPqsdK64O0a/TUqgI+bxj+IpB12uC0haxmK7+usVWrQHjE1iDWa83w2zWyakWdR2QNxB4OGlu8TDy6vR1onGhq34EWX6kGx/JpKE0dBvZDUVAYgLcsp7EDOINewAdJWLLXI5WEMhuRmeIKmQGotsW3j/RpPQ3FBxo+FsJBVHDMJBGYgkTmpN5kfRDIK1OCJJM+RwTHJIBgJYO4STRly2w1TAIpwUEkGVCm9hnDp28D6yiJQOdggJ1WRPVbMsDJMCFk3hWlGDAXA4C0xuk8FAYxppgqIZhkglNL1UlvCEJIYWO8NJaJge4gBUGQAgOqqYInUpXu0jWALjKIrJCjBAaCmXIBuXMXh+d4CC0xBfWhpNq9L63tSI7l/XZdg6ZM4Lgtb+N287F5G7srXXkbDHw6rQUDvwa8OgmOX4ryCoMzAh6hMJun6a7Iepg1L7xawHYyd1crf5Jm1ZYDydtGq78Ws1D6D0jHLa9yTNpCb+5G/tfDsSqJrcyU/WXLBkqfW5KReHAbHQlSuvuUq30GmrsvB92WpsPqxPyzZYsMnZxmrj9O9Yc/tjNoV0eUiPBuj+Zt/HLV6qfzknT9CZblXrqqunrnKi8R0Ny/rhYVK9VjhYMlNeWOecWqMiktAjd4cw40WCsiKM5bZCtBnf0/SKXZCQvcjL4lgnrN6FutZLY6fQV1YhJtqTws8paIbFUib2dklODzHVAR5U32OekoSkRxmsC06gFB1ekL676UFrCEBM9K4EqvBa9O4KqH+VQnMOXOIX6T9f9Q6FsiHNGEvlU9/ur0ZcyxUWoo9C2xWm+mIKrFbyvQdygULLF0HYapOkKRodCxxHJyGHTEwlFpmgyLkLZ1kphHRjiDzSrubksyaP1rHZj2f/mPHiwP0T+9J/j5W/DoLvYX4a+Z+f357ZtkHJjXdqjtlRN8wnX4VGmtdnRNRjM7E7HM80lY+HQqateMMbY12cgYWDuDD31m1tiWcyNrzOa7MzOmTInDdOZ9ir8GYXQfzIKFO3+zb71J2419n1+DYBlT9H9eFL188r+ZQdx1FDSh9zbaV8Jn2IbvSujywjhn6QBmI27IMkUBl8cNXJYbcljcKJPtvzhuJJG1Ym6oYXGjxEr18rghaUluJHGZoXCjxLp2de8uzZ+T9Rev2OR+2XLm1y+7BnfyMNvw68M6mvsmjb1pn7rhw4ddLR1yEE83kk3rpqcfepPIDxbQApwwBrYzy854ZocsymcoMLaYdt1VikKWWDgfedVit8kcrjOkwhZSJUUXh6SSnZGqxNo4QXMEyPW+BWa8m6UXgpcaeeFh+8d9Y3Ew59mbxqqmPGBPc7tSYIcRR6Q+lnx3d1QvkXDsIiJRgVQ74Xbk4SdTEKQdIrQQ5o2qlCmK89VRJjiR+tirW5hAQjJsjvahtCuXX5ZIXfYN9oWxxF1yUEqHpj45Fp1XFArXx3vmJGtgc+F6taHnK+iA0fL5cIFcc0V9uykjufMn7oY1lRfXtcS0HRMsWaZqApEcl6XFApPOuGpbXJ+Fq795s9ADvl46Ryl3cP4UnV6Zisu8C7ES9SxF+eaTSfbEhZyz0J36QOZMc1ccyBbeAK0di/JUuFcOtJ0UvigO6CFwoMRi/LvlAMVD4IBtAb5V7VP/aynTgtOmJTEEFQ0UbsVA/QSEAkqg5NffjtqpXfPmMY9YL+BsdApgMWIs2HLn/sx4pBMA0carNTgBb2j+Kr7w6E+n82PxjDTmOwNkprr+iFK2JRI6BKQt3HDBgCRbJJIRkGUAiZyMp6axI7jef/LFED3D0xbiaR+ezcAWaz8ygq2C9jtijnuGV7XjUaybNaoRbLhbM5wkqd10dwZFGX51tx0jmXPZHRnJ1DrdkYErvjzRiioAU/jyu5FJE4JMGv7YNGjNk4bXz7HYbr+9HH47CPLVkOzi1BYdFKJ3Gzr36YnaiGYsvY82t3GpJUTvds7n53x0atl/Yb0gutphLXZEP/vR7wd/b7HM4297IJsvKRxvpUAompUC3b0UFGaCd1GNi5MXnAGSebWEOnD9ar5YIpdR5R3JDs0s79OqvbA/ietbOhacwcSSX+/yNhcdSrbsIOw3jlzxrZIl9/cmynGvEP84uFKkHNOKscg5aFkDJonJYgVYtjKpHwUoswqQZjBT+twJnvEWEHLQ4afcoQFVFaDMKrRYVI5OM9ufnlaYuf7xUUvdKsyKb90cxSuXthyKeJG0FJCMV1D3pHCeMQgJPtqWrmz+pki6sq9aSPfvSlpIJ9JSE/l1pOxc0rLzzIYjLhkBqW2NehKQpuZHtbtwDd7eT+bv/v35T6S//vnAPv5+Lz9f23y1at63tkWPwQ8OVx5c/Rw8e/PrdyFMoqRXPYwgr5niW/fRnxtm2JYPHcaBce70r6usR2/bJ9iGR2/FiM3haAMjkTt5AIInIHnrudE69K7fLCbBFPg34qXk3iuSxUu5faWd4cVmclvAS2aQqRu519v/Cyepwe6jaLnaDPQ2Bs/KmQXBbO65S3/lTIJHaJ6soMvbu4RlH11jnBbBFbn5FX5FL0svPwNzE/Rp/QX46d8FcwPdnaK7DRZfg/naxA9WBVmuL6El7zVi/BTGWXLQ286W5qvlFOkR48erVBph/MM6Wq6jNJxcM5mbeTB5yAPoJ/I6g72/jSirjTKePVJVkxzKrHtdOoPZ8dqTRjD7GHpTP451op89d7oaUdMgAps5w02d2V9rXBFixcwvi61mSlYtt/NgfejUD79w44wYERkXjeddNN0nRI4f9ZFiYwuqJvbqi3yiywOQ8RgXW5cR7VzGHleF/Mxa5vihJE20TCuYuw2CcOov3Og7hN15YxGEE4fS/Lmy5wJh8+1lVgC93zKnwA2/QM+oZ6XFWGZHqcU12u07aggY+BoGhov78CiQ4/59MPVMj/8D</diagram></mxfile>
2106.14087/main_diagram/main_diagram.pdf ADDED
Binary file (6.2 kB). View file
 
2106.14087/paper_text/intro_method.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In the current state of the art, researchers focus on 3D object detection in the field of perception. Three-dimensional object detection is most reliably performed with lidar sensor data [1-3] as its higher resolution—when compared to radar sensors—and direct depth measurement—when compared to camera sensors—provide the most relevant features for object detection algorithms. However, for redundancy and safety reasons in autonomous driving applications, additional sensor modalities are required because lidar sensors cannot detect all relevant objects at all times. Cameras are well-understood, cheap and reliable sensors for applications such as traffic-sign recognition. Despite their high resolution, their capabilities for 3D perception are limited as only 2D information is provided by the sensor. Furthermore, the sensor data quality deteriorates strongly in bad weather conditions such as snow or heavy rain. Radar sensors are least affected by inclement weather, e.g., fog, and are therefore a vital *Appl. Sci.* **2021**, *1*, 0 2 of 17
4
+
5
+ asset to make autonomous driving more reliable. However, due to their low resolution and clutter noise for static vehicles, current radar sensors cannot perform general object detection without the addition of further modalities. This work therefore combines the advantages of camera, lidar, and radar sensor modalities to produce an improved detection result.
6
+
7
+ Several strategies exist to fuse the information of different sensors. These systems can be categorized as early fusion if all input data are first combined and then processed, or late fusion if all data is first processed independently and the output of the data-specific algorithms are fused after the processing. Partly independent and joint processing is called middle or feature fusion.
8
+
9
+ Late fusion schemes based on a Bayes filter, e.g., the Unscented Kalman Filter (UKF) [\[4\]](#page-15-2), in combination with a matching algorithm for object tracking, are the current state of the art, due to their simplicity and their effectiveness during operation in constrained environments and good weather.
10
+
11
+ Early and feature fusion networks possess the advantage of using all available sensor information at once and are therefore able to learn from interdependencies of the sensor data and compensate imperfect sensor data for a robust detection result similar to gradient boosting [\[5\]](#page-15-3).
12
+
13
+ This paper presents an approach to fuse the sensors in an early fusion scheme. Similar to Wang et al. [\[6\]](#page-15-4), we color the lidar point cloud with camera RGB information. These colored lidar points are then fused with the radar points and their radar cross-section (RCS) and velocity features. The network processes the points jointly in a voxel structure and outputs the predicted bounding boxes. The paper evaluates several parameterizations and presents the RadarVoxelFusionNet (RVF-Net), which proved most reliable in our studies.
14
+
15
+ The contribution of the paper is threefold:
16
+
17
+ - The paper develops an early fusion network for radar, lidar, and camera data for 3D object detection. The network outperforms the lidar baseline and a Kalman Filter late fusion approach.
18
+ - The paper provides a novel loss function to replace the simple discontinuous yaw parameterization during network training.
19
+ - The code for this research has been released to the public to make it adaptable to further use cases.
20
+
21
+ Section [2](#page-1-0) discusses related work for object detection and sensor fusion networks. The proposed model is described in Section [3.](#page-3-0) The results are shown in Section [4](#page-9-0) and discussed in Section [5.](#page-12-0) Section [6](#page-14-0) presents our conclusions from the work.
2201.13100/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-01-12T08:56:14.127Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36" version="16.2.4" etag="OLDHxTda1095SkiYSY2f" type="google"><diagram id="4VBnQSJjx9kBoXR9Z8uw">7LzXkuxIkiX4NfW4JeDkEZxzONgbOOdwsK8fIG5mVWZW9/bMTrXsisy6XInrYQAMgJoe1XPUzOJvMNOfwhJPlTZmefc3CMjOv8Hs3yAIRaDn59tw/WqASOxXQ7nU2a8m4J8NTn3nvxrB31u/dZavv7X9atrGsdvq6c+N6TgMebr9qS1elvH482nF2GV/apjiMv/TY7wNThp3+b+c5tfZVv1qJdA/nC3mdVn9fmcQ+O1IH/9+8m8NaxVn4/GHJpj7G8ws47j9+tafTN69tvuzXfj/5Og/HmzJh+1/5oLfBmKPu+9v7/Y3COueS+lifHp4bBOnvw5g8/d9KJoZv0udL88hPT/+2fy+y3Z1fz737eP/Wn+Gj3pOAKHp/OMVWPn+/x5J6iV7/jvqxzzPEL2d9fHPg/7lcdKxG5c/3eNvEEz+fP615yqPs9+vf0zwq4tfh/4GMU9v12OMx/rP/Z8v13vnIfsZlHF5b7bF9Y+t1p+Xe47VQ/l8HYffnjFZ4iGt/v6f3gL6k1GgLT9/nmrru6cBfL6u2zK2OfPbOw3jkL9vWnfdX5riri6H59f0GdXH9DC958tWP/5I/Xagr7PsvQ19VPWWO9OvQTse8D1ty/gdsvwdcOAfj/V2kJ//qdOA/3DFB8L52Ofb8prntwtgHPo7/uui3wAMA+hvvRz/xAOIAX+Hf7VWf0AD9pvrx7+BsPxH///00+fLb676H7st/P91t/3lev+/j/0/9jEQx/4OIOQ/P8T/qw6H/AcO9xfT/9ME4H9ipD+Myh8H4IlgLI2TIP/vsRxCAH8H/motDPsXayHA3zHwX60FIv/71kL/K3j+L2PuXwHt1n2+/g5nwB77ePhXkP5+2+T3Bpqz3T9gKfnrif/n4AsG/uIi+L8CCgH+1T+gfwOasP9eNPEQB/3bch2B/h2H/hCG0P8aWTD4d+I/iEP/DmTh/72WI0mKQql/UxzC4L+YioD+1VTA339/hX+3qYj/5pAN0jAE/ptM9SQ7mPyLtX7PeH/EI/bfZi3yv7ZWPmTUK57eINbF61qnfzbQrwvy7Hft9D9vhj+8IvofhJzf25a8i7d6/3P3/9FL/3YHc6x/ss3vUEaQv2N/YBTYX93zLz2uD2VM8986+aOG+mu/2P9av1u8lPn2L/3+jNA/jPI/NWi/6+H/u1F7FOb0fq37H1H7j7Sjxg8hNce13urxTT/JuG1j/5zQvQfoOG3LH3j87vJZXsTfnxz518S1jS9S4nX6JbWL+nwhRf/ckPq9Ffi95e0q3uInzf/6FeKnl/EytUcb9gEoQjlSz0d3PhX3KZ9vwvsrXTKU9P7/mIz+0vvzda7bjrM8Gxk0iCxgOwWj4KEANC6ACu4t2Oa6Sxw4vu77ByN9LYlKbZXqs1Eqsl57YhPNQanmDxLHsi1/CVamldzUGkOLoIW154zMxJZ4PSjkY1NirbK2DsrBkjQ89Id48F9IavWaoSl9kV259mFUCZx53gLvcQDa+HT7w2AeUNC6z9irBJhgjahfYlNmY35aMRQFdxxXTRPe99V4WorUrB3amE6W/0BVWc9zLbMyNZcIBXqUpl6y48m2TIEzQ9WSH66V1I+9xMVX1eicLkGIJ8j54Lg1t0l9tpyy91Apugf4ZP3KDmEXHduaQ+AOe+2QOTnhOiXp96GG9A3tfaLiwlE9duGj6wM8zIg2XVEILkY48dLRz0uIPo4LoY/Uoa3FL0JxXb+viwan7KDXBq/Qqj7w45G0ij5PPOFNMLiC6H7DE43EtO+PFX6gBi2P0XViKYoy+FwwppqIw0xBTbPv3/vjmM+VD0BKLnWQIfhKz8WT3kd+YeiUZzXaiqIqgYH9RDiUN3G2R1uWzW+Fxse13tK0S+1qLjUfU9sk1tf7BugwMIFmv68ZfvFO8dRhXssgP5/g8el82JB98+8Sv9Fgt285Ja3218AFvOBcZ+SqIkA1a+Gnz+k8XlLIHm2dB1AyP5imizfT5yEc93Ms39/LQu9bkq+fWp6JY+GNMGviiyk4oSSjO4p7lO1AEmWWnSgwx/UQPaeSKI75uyXhjQ4BuGQ5V1RFQtWWDBc/zqUHJ+FvPB9/rcyevD1kMJm7VIapHb7jtdZ33DASEaGSVF6TNarhztVvXTLj2lIsuURLGi7VmISZKhrwoESHCRAht/UFgUOWlGqc7bXvJ7k2L4QQdxvr78uF7wVRuid+8dLXBQidsxLWQGdNDBY2d7QJLs6mkqs2Qb9lj36/E2YcM78ss5ci2+Xh+qDTGFli9khVVbuHEaoR0qnumT/fME6ejVSxXfwdHkDwGFiRq2nYJFQfVg2TxOHHnb1QcbdKWnLXIonF0zZyw/CEQJrAqWP/XLN9oCsQTeEhhy2E7bo2tbfmyPOEpHS9rUGj+e/A3NTRyp3TE5Fo16NV3cc0iS849m3rexqtabjGIdbgmIJrvBVqG3XVPe82iBokN5Tk7ScU1dRdykYjETmT81RtNE8HzLJ404VF/KAGr51gUP3AnwpnPbIDjBdN9+MK/Dn3AgwDWaZPU5qyciFvmxmAd+SDKp4TVfqcY1/Mp753ww8AJig/Q1s5Ec8QY+MNleUbRjjjNf/cMs56sBjNfnCbUVe9+rzm4Gnu8AXqmqBnmEn8aE4kuwyVQw3Tgb3qAxO1Uh9jlWK6pzZcCzmp1t2sSxZ5rTH7ha3eLr6aCnqCnWWIEN+Q5aObwq4ne0D0dTmERqyDSYy6cG7O+sQ23gGoudjDM61iIWMp8IXAYxO+3h5jZCgZ5aVXvDjAXkxbgXNkk+fIQQMOTEpkh1HPGpHbRLC/EFhD3JYEDsVko3YCTcCI6x4f9POPYONB3Sdz5Wohqu3ZdKBNKqn7zdjmKKk/QVsMyTkCOe6sgMFyxI439VfcYXNbvDBwLkfne17BTh5kFd+IiDnUdk7Isjnco1EOjaixbOnYzdw+EZdU3oGkARj6VPvQniS3miQuOKlaa2DjnEcWSBIu0suwDGvCo60mZsYnEn0fOJUPvMJzmj3jwRsFvJp7uiE5XVQG2FmH8ImUW3dG15XCR0bwZRBvIWnjfpAfhkmvuqkFsfLcu1xjUVBZaDcrIu542DS0WZr3c9Ru8L5TUbxff/kKrwPDxCkll/bRPC256FHqBY4ZPlgdXqabYF/c8MkH0vwuPzKZHwZjxVHLNaUoj2nW27T3OXxoF6yGaa1dbz8UA+wLa0Hh62mkElaVtxtMmKHz58a71DM6ZCWJytI6c1gzET+0OdHJktuD9WS/30gXM5y/WwPk4TJ8cj8bOECeD91He+4kX/qLOgMIBpVhjyg0OLowj9VfzXLVFpiArVDYFqGtkBR+3pH2sGQfe518R4Sqw6zPni/++yquD2HPL/xmmR6sYRmaS+4gn8PpLy+pqAqiOwFuumvbax6s6GbBPZHZaJYxCGwCdhKY/5iU8XEtVpL7TYEaDLXxF8FdGSsVAfqfhcdt8w2DM5dG59B+SE2y8g0so63kSK2CvUI7OFtEB7E/y9gbQU6oXA6EjIqPNIg7NueGg5HF9wPGv3yAIA+nECjrYNL2yEQwMzwESlmKzVd0ETNamx21xYQXcPyQAf4b5gZ4fLjHyw5uuEvdM+wWLqbuJyQdozD06ptn8AZL7zMdlJQrPAiR6OOxN5+0LaiD/Z6sVUdUwfYFsDcpg8/A8mHum3GAdIRV4rrusQ/1YoWzLn0WilKIHN9IZn0xkMvDE5vCJpkiz/xmDuA1kv2THHnyyxlwEj6pkicuB1cSV3ux/+2YlBnSspsGRb4c6kQKx7rgSt2CaTl9hXz9KiQRYXf3b0Te0RpNoyILkRCZRdCyDZmjykM4IVrb8rahdMm7nZdlsJIi1r4KiTClUSIznLiE7+Jl+asPaIA7B1L62c9jHb2MfQNzZG9qV/ECbD7EtsEppMOV8T5Ii33oCC/+yvD9QQzyRYXCgQK4TBRMJLkfrIzTfXR42o02cgOzCHXFcufsXSZi9o1UumqnxX1Vtt1GP/ZPyDdpJvAL++70k26GUuYrX+o+eHrR45HED1Y6EPt2O50vRVwT2kcgatfAluqHY7RLGajS6LA5BvTX9BPISiXzvkUZ9cCXUjnPpI/bEd7Be/5BUoBD1It/lrjQPglnWn6DL8Y6t0S8CFOfHx+GviucSXvjSl7W1guHFAqYfIhFuqI2Aa1Cy94EW25ExRqOE0QQJZrkTsvOdqziR2XwBSH2j+hgPuX0q7pEVK8BwBEUiTm+8FTf8IHjX4HSpwvsgUS68X1WByBciQ4l8YgC+IZag42zmdLjvgafnEsOFYV3y/AngMZYHA5jacb0m8In2lAf2oXInngSdElfsqpbFVULrpSZdBoGQ44YCY1MA67yHeiUF3HR/RqGvoZSkStw8S5Tvpm5wXGbj/ThqXew0ivfVlWlpiy/tpEwebqM0bFgKYP8MB17DUb8OoSgKnx2kA8zor1DRHxJ5OHgHqPlCQm0UlCrny6/bO+iWF6Vu5SuYY0ylJnqFQPyKzQsjPIpxcSJN84ANmCBEpU8VkT5gorqsPa0wzfbDZh724LNHSokzVJr1UaXQOybb6jeeMkzsr/OTwTUPFf4BzdPvX+NjKLWS6NH8UT1HIyUOuAupgwzjsuNmUrWjh4IoPoGMH+umeTX1qrjCCBpH453LtWNvi0lwNNswamvA6j48oiHIjWZfXGaV5qvoabbFO/2PQKALjwhChssZbBvjUHn2ZvbT6ZpBimaE/ZbuDfEK+n++uNKNTfIR2caLL7oUnDTvY3w2lisw4ZKmLXyXMxrFuhy4/XmprSxatNCQm+XNLA1z31gzfALnnhUx+o/efmWm65xBvj7m7PniZAE4OolzJHACFqv0ZMk+TE1RXFMYBHRa3yhPmRPdy5XKTKr6HA6G2nNIHjRvCKYJsi2xcOP3deaVA5iDE4C37XeonwS7b0LeX8fFAPZoGPim2pw9VFnjc+FtM4Ep7N27xDxyhuRRSVpHl/5aClzPkahl+A+uGWDK3S7CIh6Q/fSSyXcwAhnP9gmLFcOtjzgCbxXjdvSiQjWkROBzY84vbGwCK2PFF0MP0jIsa0eEKqNpiKKlC8f6VOblNAMj9OmxU6KOa+ptSPZwjQl2rRe60mAwStGEf+lmk+MOxoJsPWsQwXownvrES1gnMeTlx7Sw2EFPPQg94BUipXMmB0uFLQ5F69TymrtsGUz8+V1ay9BxIoQPNuMjU98CXV7Mwe9+OwlYdjHM40TLRc0Z/j+KvP0jaT8laMJsM4yq4+lYimtIuzN2YwPpOK0DW0RVwQosllHibj9Z0TeMFXAXAALzj0iye3BJp/xQ+y+riZEMThYjTU1dP35TCslk+D2hnWX8CHx87BKPt3WMdctULA1WwPr8hxswaKnlbSq4Nar/WuWmPVRguiKwk0X+JkoPYIyObi6hHisaBSdV0pPTWDaqUIH8idbccPFE7ZQu81yv2PrCgVfkuN1vVxoNw6A39qbZ3ExjWnLR7bUVbSDrrQOaET6yx3sGlHSPfNDc9qnIiparIlbj7fzmyW3fL8kvuJtLj20Xe7KXlk8tNT7fp2tGzWVH83NzVrDqCO+nDEoXIMNf/AweEkPw1Tfk7OrpjrPXhaY3VlDm3jHBSFfhtwpIE/22kJPz9vvbTFdci5ne3F7kX9nrvg1lDu6Euc9tTUlpL8vEeENEjd14I2JyJR6+zCCIIleC1U6b/FCFzfiBmuudTxXoWdNQ+VDvQkmrYNS/0jW+gaLZUTdlUOmDw3TSGsQTevzavMI/Tu7l96AwNCtSrptxYQLrc7COk85Dgs1dk0ciamNqLyBRgvGCU0WUd4TN1hryW30Tjy/tOKyufhbP7eBcsFiIBZWX2/Dq0bCiTVddxA2ZzckPkaqwaYLw4TbQIo2cb5Fy7LGPbJUmznvm6EXJibp8QEo6fNdp5yhXxKpOspFcd+jRYJOHCa/iPPzOm61kdNIYfM9NQBXfunYEWqtVUlvkg2Ren1FclrhxGPLsBUE+PPpSwFKcbOMNGpuBsrd8VXrhIexpKvDZremb+Cpb6maE74ucYY3TOL0saBj9HcNDIXYJoSM8Ts133ydodYnH6MudVCCVY4MUV7qR9L0ynLEorSaTuq1xuamj5Rh1ad3aa0ZmaodbXhWY1K64X7+7A1yOhWX1iVHeF+xD/eEgWrw8lcwPD3DpTjyJlP3WDF29nGKTXJxuT8WWEe7wrAks6oT4u65RxmTY5T0w1lLOarfMtXOlBGISfF4eWlkHnPpDHm+89K6OgWXExf7kRCpghkj3X2rBs5SmBXpM34uV2OjVAiDkDCAw3KkR7d+Zth91XyUnq7arRT0kI+7HjogFZrjBQ0Oj/vLKmsf4rLGxj+tTRt53HTHJuQVvYv1QlKGuI1VxSBjiWhMztR2OdknxFTBD106AFlkFRdhJOw4g96u9GN19VI5LcxuqAQXKiX14oH1lQWFhFOjAvXniRBf+eQ/ueztxIlvhCbMEkmINVfgmApwrJ0ekqh90poNc6MUnPwt/C3MmSnVbcOJwbkWbcM6//jcGnCjqCAH34hfLaCidMSOwSnrRadotxO7nVKLWJ1duoiQUjfL97ZsB5GNu6sa0LMcE8He/dZ76TBR4+ANhZ9XtYOqNiXCO5VZ1rasUMZmtwN9pv4qCiPgUrEFVDTjo8vVIoQemBY7DGO0IhMq51dMd2T9SAyJehgJs+SIDfa1kHUibi4VRUZDn+aBOguy7Ji1QkggwzDwgEC4XwJFT3k0SIpZSZ0tWB1h1V4WcH82U/uAKGdOlvSY2hIdyzlYThqPAx+rgUNod6EsG6lnyRBrlatLwtAaQ/N4DXAYtjeXS2YqnC0oM/Ry+/henpQig4uQNg/z7cuJe9QnWeNkOnvPL0DDDZpxKtl/Q6GunT302eVvI+nHA++gNhUqsoVwakOTmm5d3xG2S0MCUSAu6cxFUuCEwb62LLAJKe5vPdBImmQLjdWQOc3SbNzmMzY6znrN39y44Jelr5/XJ/JR2PFYhTF2X4J0UcwGxYTAxKgYw1s8fYU8shG4oH73n2LoyCQCzHdzv1M2d376Ji4Jd4GFWBqKJpYjjy0JZ4A+RgaUq7Ss3PA8h75unkKwLCkD2d08I4fW3RsOeW3bYMB4grtDresAwGjY5NQ3h6SbYnHEhUqKefRgVxzEyw/csyxOp39LevF0jkAbW0OxDGl2qG/B12GUsMwy8hHd7nw49gzZZ99IZ/kaxLlQamuZ+4JrGGIzCrscgZoWhfmRqSP5aAEUxYkAbBx9rhSNwizsa8pO+TAr+k6PbzXiRlE6MpwIPKq+iYVoVV2E3IzzDKe/3YM9dIw+O2ANCPNwu/D7U3QimyhRXySIemKxKnMfr8eLoyNLsdXzfhLWttR/qmhNq4D56EL9dXDEOY1KKr4hSRyMCjY33+q5p2aNWEJLX4CaadMi7rBWaXVEN/jDWwRlSVjEDTXmTcOu7ovF6W+7aiyjEb3qvpyumQrrpYBEAuyb5q6HsQtNq3lOim39XdxumEehV25qHrdKFTpVWDpNx6VV6QJtvVlVmkFaQqnDSMWqkaxgfixr0RdQEtMHXrCfk2Gst1o/yYENr5AFFqkUJpJHcyLNeELIFSWodo/DBShZ7dfIUYOdrwOYx2+UNP1tH2qy1VqZCjbVQyPc5uA2EzzDJ6bsOnucYmyPzTdrGSDWpQguwnKglEjVHKMq9Ma0/r55rlnzLf6OSh4QEfNNKpmcxGXULSy1gjZ4gjAjjgIsjFsVWMRLrflHOuNn8111DM8YKNJiOaYZIHYSHRdisBitU82mGZ5nmwTGr4V7yvKJ0VN8cCty0XkBXQt5R0kVFVJs4To9XK+KKP50R5Wks9BcDaamDxBPpbQ+c/uT08/rVBn9UOWy7UPqEF4xcAHH6yjMVXlEvq6JnXZ2rMEGQDCoq3wV4GL1Go1QieFnsFvuHXmOqLjiyAGUVUhqs0b6pa3UCjFvhitAA78qJuAQrJmyGZ8hRYLWaALx3ZFsEbHrp0bI+hUhj6K+PKVwjzFIXn+vuOa4D0VcZh5g620gOWVIWJjMi4dTVKBoEJfbXct3sHrUpidX5UKpBkEN5K0EpGYtUZLs0bFoQHWLNt/Q02XzPd0jg52HqejWaWpYn6byeXc3nwZ8+ZW+HAozshHinLTNYBh+4bcuJspJlaRXquhf9sBtcHDuDsK/4hHB4G7+yPa371jtaMt9aJ4nTaBsbJ6fXd/ji3MV8opCuA15d8AckTCECPkwpaGdSc3GWVtZwaFTNnp3u7LB+RE/YNTKVfQLuIqqWdcSK3ISfugZVkR6r4Wafl3Yo3gSINzyHwKXfHzbBBiX4BK5AmnTSLcDIw/04iCNVYxqhTuih1x7nkLQL0GzvIGLShHI1Uw8aRngVc6oOpiyZly/dCfUcEX+JvrMLD+Sr8KEzJZx6IPVBkAO8lQWCFQaT4SDL/15456moujC2QMLN5PC9zdGI/pkfHiaE5IUQTmgGYayS+RZLBt4vHnfGSzuy4Ge6dsp4Cp4LWDE6WDCWw4b4pQMhRR01FqPmAbkECng33m+Zh3d04gmJqPwWld5TKHVypClovWDtIaSHb1cxoQ1BTNH+QtvgTmCe8jYqefrjT6t0D4/rPY86RYCbM/LfPDbnvFgG0v4MII+caNGbO9b2qwY+/hG7K040zOAnqsvLtSDk7SuHNla4AXdarImShXJn6AvVU1la+y1JXUjgwJ8QCO68FqBTgl0QCwGTr7JQl1HNInp9Aour8o2UsgjxzCZb0Dk27bAb03kPI8vw9kxyA769tCLz4PhB3Oj0HwPTTUlhUvd0ZZRq9crsSJttdJRbaCovDXWs9dsVtZU5S0LXiP91rVvrR8oulxVmhYSnRS8M83yzAK99hug2EPB6exF2kNI3hq6xGdbeE4Xnl34QxBhe+AvnlIDpW69jkDHGQpZMLYRHPjEBaSBkYhgRBXKXs993ErnBPHwTRo+3f6bStRqN/BgiEYxcRX6PhRfSwFHo4PKMkdnSlX/qBpN+ubDclJdXzMtJ76pip24badHyn8UQZcfyoVuAcmO2RPbPhC6PXJ2CL6qUS1qYRp77hKk+Ak+i2AhEmKRsjtzAD1PgdtKZsgf/f70G4yfaoyOZoYSLN+eDOGojrQYdHgEDwHmHn77wjyz9NDIvvrKTzqjvDEQ95Ph88nFOiIkqEOpqC9d7wO2MGd0gO8ZMZjJJA846B5lGsyyvIDSKbrK5yRKlkGVWDPdImXg51ceapx3pA+PR9bypY6Zprh3OjaIiC7k6KzVC5znWC2dG9M4T4AS96D1TPEgd3Sd1bz4gsl5nG5xjiByYd+AzwdrrzfOug9xNW/d0k0E5A/qSAUmFXOneVTuJX7iR/289f7pJPoXyXNoCXQ/rMd6Sy35kdfy9J8InHbAx/leWvxTydVebmyFJHUzzqqisovrr4JlGapTk6s83bhO5gPVEfs0PhEzxd9z1YTjGf5UENDElgLG4vD7FK6K5efxYEexEvAtmCSV5wr7TL+A1PAf63hC/fhJP6YYjIxaPQbnKGLj5CQKao46q3YmvhT54sXpA4BAKiJ+ZMDm2kjZ98gdqqx/f6jmamRvrNyFezRGWLwlD7wKHsH6UqbboT5FaNM+p30cWwyMh8xPdmfX983ilFt64PzkgLqghlqyBzVn+oeHv2X5sSiJve7MurXjEV26LDYfDWKqOHd59HgipFv2l3tRDGU86ebV0VA99XbHhp1FaOomHENVdpiwREKZp9zstJ9rNF/+M1LBFy6a661OP/zu/HgBb27XfL5JonrrYhyjqcyxAdnEj/KAyDLDep2iK4Ne9AKZb7qTNWdQxtZWAhfFH4Tago9AXpY2Jvi9LhEyellyE76zCE3GNOgTMXlldybGrJnl0xPioAkyvajzZ2Ddus5hnT2/XFMci6k8MrkVlavD4/Wwj8OZUp3Ew7f+e/AqeqgkEL00eO9P0cwWez2Nhh2o+ZQBSlMkVrIsr+yWjoUy3C45PHC5u9MshxNAIY575Qn5rnTvqsp+EfAEzDJj9oHDUWtUV/CEJXfV06vikkN6Xidx+g9MovHnCW9ZpXWYE37bUCLqKz+tN76t8qrhoq1jSEK1haZHy2qIDRyvo+2BkFmdLTKn7PVIus8B+H5m6hLn0zQSlW8yfN369XgmL1LCTTxCQOSDqBrpcuMrVY+L8viS0RVGjG/ZyeWPH3cSTLeI9nlYwEvoOYBbxTx0IaKktFTFgi+KcAR8mkThsA4f0/HubhR96ZL04LsqGdp3bB/orfaesYl852TVFGFqhmJn7MlSLMSyw5InE3VcwaHqF5NsMDEc9+vN/PKqU5x73xyGJT8a7orbFkxhS64NqwMJ61sze2JE3N2WSqmwp73hWiYqJKdOMztGTf6rKuILkR68jn7xVEw7b+yToxP9LqGis2kriOlHna2VegqRh6blwwwxy2fSsxuYVW33VzQZ7OqK374aVjf7ppSJJ2d0p2eO0OmI7jCgV5w7Gd/PEe2PorrvDHyLlCHMfvi76bHoTThSYdLzBuiDlhs9RREgOnEvS1u0jnR5Bv16bxBW4BvT2qVpIoNRbZY6tmZFW7iyhsN+a4GvvK9AXdzwY7YKHkCC0lI446u4XtcLY47LSk9NE0+z2iE++hyXNNWraSY584EPKow/DLZxtDm5ZqJT6AD88JtGERIdpwikkxqCHFuVd70/pnlrP4r0ecIEnI+yrH5BOAfTlMXQ0tz349xZCSjANHhsga7OAszW0dytp/yoYUbPUuhnHYMQxrHwTpFO0aAx6UU9Cuo1+gTn4opvcnU/GiS/MBvB0v3O0LOM0NzujY2NAA5mY7FhCU/xnku0kTuzrqKit8Yjwb7g4Q6eWiioinFifZ6LDcZmR85XtHfauJA9rdm46wC8j5FTKCWkG4If4KDjKkQ1xR5R7aBHRnVbUAURSPC99ZzzGV5fLJ3pjrF+qOZ732vQO50BHrGliWucHI3aA0/6SXCTqs0G1wpyqH2erBANXaH0uz5CcO/RDk6zdwp2sDDMXdH40kxiEL/vopUfTdiX3ZTE3LenRvmRYBQ7JUBB4jzD/poqldrRZsMnIIJdpxertX5xxexvnL2swMD9FyWBiinhxOHFqk6FfTEBu9+IJ47cKwUkjE8QbKtWsRrBLe9qhs/98mhB/9NtFdv28y3xHTShbtYd2wDELSXhQsVygis92Vvz/Tx/R7A6im95mvI368n+kUU104mcHMDpGpr3dxnwgsMNHb6T3s1ESEuL3U7utHT5HKS67hmecrZvYbbjwd881ruEuF2mb5+czDaVQomkoQV5/Z20sv4WVyttZU3aPo+iI1IXCB66i114BQoXrtLm2XBOiti3Xz/qljCNmC0uaLRKB5TAKY/LdNJKElSzzkfJiRueqHovHww9bn2rUztLbPB6yUEDbOT6HQnZ4aRBkA4AJUdt0HU9QrFKtC+KLqhWlZxcneId2TF9UPvwvKMrmh4xZOiCopYcBuckyqP5S6hu7JE65bfINIrL1JZjqE3hFWXu4248IIhqmSt2cOLrxkP6zRVzaC6y4we/SDcDuZoxmwRK4N2OCUsvDFVTZUX3kGvc4Aiz81ApwJ3jMLTmVMaUgfe1Q2OsQnZBEBYcBpKNeKsAUj0qnwxEpJqHAS72XMlwnCyb8YWK/dXauEGYk6gPhG8JAFKsQwHdqTnQ4R8+I+UiC+4ROlcDdwvomB+PD6dSeryKLmiTz2OWEUnwwaRJyGBGvR6aH2p/JCXMkR73QnoOlvP80qskU49dwneqN4RYSGkvd77YHpTpGaRzAu0egvPol8aOtBD1soBjYmX+JpDDuJ8bBpWtyy8Z94u3T91c1Hse3klrKvpoNR3fM/ukh6qscMEpD2MVJV4mhIlgQyv0a8mlJn1MFwmjN+GaBGl8Y32gIJbNpY7IGmHN9lBj+qXsb8Z3yYIuYCLLdUf9VH1FXrRvqnf+7U5n4qAjAyooOw0Blk2cDMNlDSuEqvcHPu3FKyfQmY6wqvWBLjUBhVy5FmZnjjE8G2lvW9K+wTsvm74uYrENny6NyNZUARtIoOiHNr5zeSLgcW6ItXK29u3qTPUjQh4xK08dYLXCBKoToFM1t40r1tlxzul9OlBfqQfjzwbk8+PrzhykZAIVZnIm73wQv4/NazFV8g4WTvo+JSKE6YhHkm7bSWirkqNofdkSBH9MIwwjV6fInaqF2WPilV3sqHAwyoLpyZbBRQ79mYMAL41kZYZ23VDyaJNKN7Z5+lMqzFzPdcQqdJveUsYB2ew56LkOryjMl+1CIxL5FdEKgtSK1c5iRKwQscF92yf2YKka4czX4034LzEe1hBvlc+HEehz2FP94Om9OaKcfHQi8Vl6pVSDhyoEzDqPLerEn8+9dIFHP3Zgo1MRWtudt3F2O0/n0MXRK/40urTjiy8dSJHUpkq8/yrPV0j63aGgqYCdbMIwNJdcXcxL0VK+03IafZeXPXrc93HCxzFAbFe7s1qm0C3kVckAhqCKWRALd6KxfqlHCUGJd3H2PPoy1ZMP0h3cxXq5paZsYqNb/fLJMGjf1vP81nbsXsb62d2MqNO+yCbqe+pjX2WSakfnZOPDpeKe0MiGez6KkydfewqMc29mY11m2KCUXyjW36xcaZgSX8foLMMJrLoM5e1WQuBnaBD9pV0oP5zxrZ0iPMdkXui2Gs2KMukv+aeyc9k+6O0nyYf+eG3slH07z/GoxZ9IaOdmDtzGCft5UwIWNTePOz7y7N2HNB4VXPws90Rr4+uH9svnjbWh+HZkA9O2xW2XG5dsXTn+Bu8iF2X/isM1G+JtsjlvwtPBGrsBCGGeCnIeH7aQRGd6iMwm8CVX2rQ8eZ/Mr9VJfxeWwPzCTLIX5/GtuM74xnF5BcxaUENpHBjuJm6ahzCCjMXFLBetIvL43HnD13NaNpoRkcVsEPGJvq47FwfTv4QIcSBRimXoTN4HRIJP7wglaNJxJNLytY9hiKwu7rjiLSQneiXH0UWXxU+C8KRKh6teP7iTGphat1VDONybSM3chFypOAOoj/m6GTncGlpEXY8Pb/G7sN9NJjwuSM/oji/BbNpGtnCiHLJ0Jev7kqulsaRKULpAfINoMdT0eVSWf9pd+/BBXVUQ6glNvu5qHgpOvM1rhpOG0cpINTArk0ED0BJNkBAXD8F4MoAbzT0BYb6lUTyzVMBLi5FL7U74kdyZcHcttIfi8lb1v+56G3vw5W4Sw0+qCLgk4Dq4dWcCgiPlDpMgEAHk48Ar4cqbHpJIupdRnz5AIZy4HssOFGaynqf5Y/O3bZF+d/ptP30wdxy0rnLnFCmdOL0Wz6Vn5VpevyE37ICK9EPKOF+RsftGr5cNjMUrWhBOg2GzeWesaXhX95chCQ8WuAiDILcBsfdUfmfr8Kj0oEilY1rhOp2VXhyEyRIq2/OcVovBj8z0sxL4jjPPj4hdE+yE7Kr5EAaKYsRt7+5vNLtnrxoHvhhmNPUcggDZAA8PlbEzLqCH+pzfLPic9e0BHp28ElGo9cNRH0UUqhpSWIQR+46iwAHYj1XjOR5TxNO+BLwEcrTQfmLwnYCZsbbuOi/xvrFlVcc4N2kTLFQaOxqLAK9Gjk112UW0CDN6zxFMsoqJGH4QnSdL7T5DJD9inPtAGQs679IDaI+xUHyXsvHhngy/4u4HUPx+YvGpcK3YDSGhJZFCaiYu06p5jMtZfjKLBBzOOE2f9EMrLYCDB5S+NJtsPPJ847cpr9Sb8SISBZjnWkd7W9+gHSDCnvZUIoMIwgiw+rh2SYOxFRX4avvJAJ4bA1BQYgS7WILvqoRlI72EudIMYxz5y9MjUbJzn9iTUs+tHHMGLdrbQ55sznHWQHxSVx48XIQpZ9QVgrOnWnQW9+0idIoNVD+3IbyHfsX06SG1VVe+Y5KzVxnfectWxLmj4/duiPRzvfVrgcZ0gN7gIMD0WgZOrZr2LQlmNo43vAar9PZtUHaUWtQnFL2kmomyc+7Ud6lVplix+OHCTpiLVzw21Q4/9NliGjDCa+gKjxUcIY3ew33CjOQWjz45EkvoOwPMR/fYA3Skc6Zihmx9uJaGSsoa+W48bjWhV6T+a4nh0ga857l+lPqd5UpTp/mVWQfaMgVcTwDbh700KYVU5ia/1aYTGGQeY7CwQ0PMJBGWza9u2jzN7qDYlw24bjIHtjV5NKnhobMQBe+twPCW3ozSX7t9ujeDvK+ImcySPJoisDUjao/+ZJI52rwRK/0q0F11As95Hp/YDHQ9EtW78E6ikvG2i18z/in6Jm0ODeQEn+z9s6VD+Nnh8TICLIWgevikYmla4PBrzE6Epd8V3+8aKvMWUVjvpNZm6azAdDNuzk8YsPCjhImOD/a9KPr7Q4pA4abV1q/NA8gUQEAlU8N4ARhup97B1z7RT+d0xwtDctRpkVTWHgx6Er0zVrdlSOaj0xYM1ygoXSV7DogdWOfcLsUKPD97vdNEenxpd5yMGL9zCqybbZLaZzyeuMP5oYMAzv4LYNQS/whO/MUsmj7RiP5edhO8piAIeh0MS9zI82ftUo/wLBx1+ea/r1xktN2EVkbgDZaQ5CkF5p3/FoMklXvr2xEk25UvHML9BPeJZz3vqzCFE0kj+iPQFffzpPPPck+aLXS6HakByUJwYL71BQwP6SSFgXZrxvuQrUkW31mcWmFKoWp3Uvbf0izcKF8AfHyAq/yMt1PvcJPOtjz0xAv4LmTV4YACIrbI48F3Uzu9uwiZk0pDkphwBGaE44evJE0hnXgGaMdpi2iEUAeb6fUHUeQ8XHuCykg8ZiMrXouJRSrSqZ48xE54xMAWEXNXbA1arbI5CinaGEv2V2akWBJuxHdRGCZe3L6vizh2OX5bpmk1OQ3VaRGyDp1rB3zuJhYa9ul79S1WyAg3JJ/7amvpyTpgwoQB3Cu7bbO/nM7gqSEQswQmIy3rBKRVWBAFCfJlNKm95746CCScwmtzJSI/3LPOabkx29s1aD1KfFhBGmx1yjvAZV6SJUisIhquItPao2I07DJkky2xC+aJeLifMN5v37oRcSxHL6FNM2m7v6fCPBIWdKahqTDmwnGvIVqVqSR7hVorwaJ+bju9dNg4FWoiG+52LTfmogG73u81e0NenH+zzSMxQuwqPPtZC8a93DSAW/YGJIV67E6bUoJUZiGw2iG926Jal8sGzAT5uZ2ZKBDILtFVSfTrD7t0jXN8f1YR86Nc81PrxR6I31kmxu+KUgKmwj601V4PAHM56Vrrpumrxv35uWzj1TlPjiS5FptRyOTiyMhjiVPLS2OKO+h048lvBppvt5lvL5KVd8042OJ+f4vbfI62vexvJcz1alRoQefRaVDoJpVqK1HSohD3AdR+olvxA7R+6Q0xD1P+fOeODl00Hy2Uy63aCFpbQNPZd0nA53M8voqp0OZkxoqL1/yu4iZCkZHc1IiVNx8RUZfRqhwth9nJXLxhQ/1OGGmEpqT15t0/D3Q9pty3GyYwkFyLPTAtazTKt5IvxvktTJ2K9C3oDrfUh+mTLFuV2KSB58fcbHeBNTvXiTrDouvcoXsoxUpjequbGiW7UTczGRzy1+t8mgq3r0PlAHc4RcSR/Riq5rDMZUDbODIf3uLbT5wr23gMGm5Kp1dinrO0S7AWJaUfvfGnCeq7e1d3Qq+mevQLTm2G2BS3r96Mcr9FJn5KUjmpOaD8WSDUvPOOuhM/iPIYFeyrkOi5iq1dn2CQFuFr4aSlXoJKq3V9q3XeYUJ/lniqqPuzojsE9vYOYsr9ipK4KZiZJ24wO23D8of8LaTxISL4AYJiT+AagR0g4hHwFjTNDfalMxSD4zz6aYejdxcZ7Qz3B4x/rbTVKyiuQCequLE/zglw7MzxPH2sawhwP6kVR8VOnbrTni69mNzWyT3LOFyKvIuyWnsAbxx51zACPI0TKJdCp7URynVr7dt7ScxrYMd9V74J6OTOXZOwcgIg9WzfRZwGoJn63aHIpkWL5Dd3sxWcW72pssILbsHuE6fTQgD4Hp7Kq39EX6e3rS3yqFPyBe1y1TqN8upTS9BRZ00clmPPZxHb0rsaoI0ICZfqN64WQe6QlRIO73ZgPntiRkSE3Z5sE7EqbzZpJJ+8CNXqoNdEAa2f8lhHB7oe5TmBH03XPqNUHru1ionRCsVPRe39UWzFPvXkWxyAM0wn34Ls+m4M46OePRwmqrqnGy/MlFBjEk5uLnHlzJ4FO5JqvpztvqcSoWNPAwIQjyDUe4Nx0TquixlZxzKaSqRor5k6L87xzWOjnQ43Bfi1OjrcXSXzDW8Tk67lAbbGRTeex5FieACEvd1NXHpFGLWb4TDIiMJJb9nVhfd3MyxPoSy6wSv7uqwhPgjqfeVNtF4Gaw/P15BvSvkfk0JKAvxsof55S2Ef1dGinZB7l2WSpt+asyTJ2n3DjTLOEcS653hWo6/U9vzEofhEaNfX6jlt+CLc5Kt0ufHoidF5NZEIwjxLSaNogsa7r5jiOt5tna/VM8y/7Q9r/B0AIALFQeTn5593g4MQ/HeS/MdBBP6XTf6/7x//4w7439v+d3b4g+D/wZvF3/nk37eLG1BWBHQKBgFVoKpxT1i2uxvmY6AQ9A7NeytbPqEztrwmAo6UvRmO6xMkFVIs4sZYGJMPO3Asmw9cC4VY8v32Vc3dUlJnnDaLDgAovJIRNFPy73wdS3/Ys+HOrZkNUGqUczS9CRxwOQBtMTHhpDM2yVHfyI1ba4I2nwibYXgfnlQBB3ux588HL6a19yRW/CDVNceIhuO1RpEwSG5gr0i1PMqXvoAs4Fsp4c38HlqMvH+ppUaGPBdGsnJVdFELyJ0377HDR1ElZlQUprLpoPezgUdM7Hsl6pdfsEtXlCivNcuyYpao/aNea8Zby8vmfrY8jO4QNhA4XB8V+qUYuLzTIm4AR7ujNI0riQ4QOSGfp7ZvwMys4oc2ou07kewO1WJeCDXm9BB6EhmbUQdepTR0A6KwEYc0gDNeNuWPJ6KzgplcLwm6JfnlwMPyVvK6xQ9o3BuayZ6tvddsRrQVIE1Kme1T6o7GK7LVG/y6V4yuDz1iYKAIl+661Pjzs5Jf7TCaYebRW+4bMSNB8b4w8rxftVVcQDe8vxTvOpQRFJR4dW+76isZOWz5LW8wBDpP7Nx8+ka8XSqJMednMQoluBXLN4oupvEAP4pS44l86KQ57KovmYeYNBvcqXFWJMi3/m4wP9qe4vtozp0SoGjNKA74Ec7l3aLRhmo3R/VaGDy0bF64Qz3TljhJHudoSTisdGJYgazLXvL9Wt/PNfcREhpkzamoC1XXa1GR+qbCrUnNuID01w2nrRpHEF+K0gbhdQfwRTsdRaZkgz0kgIJ3UZ7cdylLlCkv+YTqK8gi1kyz+qt+O7Gaf+go05tlpaXxOwntTNZmpPH8pQ3pZLnmbA+Nt33K6kkhlC0USXpPx9Fee+z7mWJ8QQZZYV+aJqpvjWP4hrnfsU+yaBIWjd+FoWBtE5sO+egcZYlHUtjPqoCiKlbIo5cdh+AWN0P0YYA173xJtoCOluAruVNaasAXcJjjzE0ANTV5SIVM79w9DKvlmfc0yeJoyMcNVabyRd5oDF4pdmUZdUqAdH+8v+0Ne3OGXuO4B8jSMiECauEOvWaUyUbM0tEmYguSv36E0SrRJ7cpJgcsWG9aylBV2F5eYWojm4sx0qmANANRnDpSF2eS+G7mc6i1PctbOsHZdT6XIn2YE5T39HKDFoe83v/1+fL+xHuIkJR+wNRXpeixr8VpItNTk87gw9q6xJ+3d9rZ+4eH+Hb2CwSBtpdyVN74LnjIv0orUB1GNOI7XvobNeuowoY86iAJmykGF9j2k0kKHFCom2fGm4IL8kc9zo/0vXnH5jmOlXS40K4t4Y9lF9wsP6N81XISLU/UfOueQfIje/PV0x39M4kOrXwK4jtewfeuCwxbs/WHCQ1tsUEJqgtndr+12UxjvvoJPLE3C/Iil5Vguznk3SNDR6Lsz9+Jjq1vZ0ZvkbAn1bcEZkqYOA8+PXn5ZNouvEB6k0NriW89eG5hBoodYii58mvLGB2W2vu3QHjmakpMedJ+StxMZ1+c6NtqxvKOPsQJfSkxivWjvw2vzATNBLK+JXXBdMQi9Od7dl/ByQSrwFdsrLHokS7Wu9p2D/f0m1OGAmj5PG86Ti1A221mdheq6tsJWEadsw+A6p94t4UaZSVzPPd4bubPUxaAnaVQi6WqNwNeHO6vKUO/3E2zW+HUDco7eTckcgEBHBjFTzXnfpvltL/wa2+H/p7NYfJ1sXV9pL0r1CxVlxhX/pnYz2z+cwMovfSToNOeAtU8CbGIQicXTKGYtxg9W7UlNbW+B3lGKo5eDfobUhFTAN5KW6Exkkqrc4YXM6/pz580UJ90o5GLCbMpGun0fjIlHvDvsul8jdnllrtdRzdd3AlafiQ6w/oaDOL0Na5hlbAUdq90UJYyg10oZZopYQMrD/pANqlfWQei5CaW9m6X+M3RaR4u7zuZpqTTquKQX3Qbv+Rx3esYG4WUNuxYoW8Nyr0P9r6hiHPlq4iMz8d31K+fxrD/dV5XaqE4LrVvixrhbgIVk8Mpyt6wbnSlkU8oOsQOOURwB+mXSNx+TKBvJsMkYIL6FNXI9Sp3x5Qyyqb9e8rkOJvgMthQVL3VDhqEqvBsTUIbd319Wtu3AJ2ge7mS/8HeWyxbrnbZYk/jboUYmmJmVk9LzIxPf6V9fpehc21H2VURrmxkZGxQan0w5xhjEvnyx/L4Gb1qISho+9wNzX6yREkOlbgkQe3LgQ3f431la68YDthBmSapVuTW923/nu+q2UkxsRjArZFS1i/ea8l4nfUStnehLutf9bi1tYP243Xaub/2Az4Nh0PS/CoouwsRhg/jpjI9xjrddr3EI8eC9Ng0k1E5QfgManZTCr5Jib5C4XrvvjgFKi/YvOM+TR8mEiiv2xnsoLPUbB3yuqhjYKzv0PI7Jgl7TdBjcD591XN8S7GOHLAG+LPOSpTRQp2IGKUkTaUo54RNpwSnUhXyY4g8qJkCwXzhWSQ19Tmb82AuwsZ5nOekVnalVkaM6/xENAqLZ0aubiWlEmlAaYYknnRZPRjDBn6nZP1QznMuweA4BSwexzFXFIPYLmG5PGR9NLrW2KnvYzKTL99J5qZciXdldm3+zixUqZOx+85I2oJsXAiKo/i1L3TF0bWoV9GJWCK00LFW5RDpZxxdVBB1r0QTOf4ABNqpGIsK8xfxkcxT6pWZq1SWgYd5lmc08Bz0l8+bW8+zwYwQmyy2HboC05mSOSswI1G0yzZuyJ4gjwinJhQhMBgEOe7EETqhBdhczES16kgSo3YFNHdI36JPo+lmIq4K6/rfVYd5iSIqQawki6rFM+U6B+pMjmASBxaz3NAyBKCyLCUpm3Mbr+U6cFZmqYY7f+5SIohz/703fRK5xUCOx0y3wG6Jd6Qz4oTtSDLqH3UzMUpcTWDJpTPVCbKSINO+PhN76+9elsP43HiPvkbiSzBKSowf8PGsZyqqu5oJZ8GyOmwbG0+ADhjyJriotOGCdDffK78E0PcWP5DFHEp2CNkAPI/pnFKlf/ndgnMpCLKmZOx7Xys5ntTkHLZeLN+0rhO2H++lKIQ4FwAvt/TTglAU33n+IqVaJxlWYvhpr759HrdPc8w/yozQvwvvI5ZE1G4wkmdFubldMv0ZVRBmXRKHdPIfH+JV1rTZwlXvfiVwilRGaUJgqVccNgl2j+GnW9wbgfkb737QyEE50+P8K99HcRLLUpfPcc9aoxks1xdt2r/EusElYpeyGYHXA4S7IqvV9azI+wauP8zc1K/zBXPj0TZPjKkvdgxdY1aQ37cR8uTz4tV5At8XmsMrcQYNFIHALtTy97kTJrLYUGtmvIKi7Yyw79eFqA/Er9kiLUUTHrALXScFStaXV/DJXnggL2gfKQZTSZmnZ7ySBTrRTTZC4fot0FT1WCgFJKtaDw6YEcaco5Tj4smZBsaE0/YXSzK/6O0FwcxFrU4HWmW/fBxBg/KUVObEmdjSUe34i3bTaAqb+/4Ay8tHQokEFiNPP31yxYd2PJDvPm2xNLon2Io7fFZKGU/45JrWeKRabvV6+jOtWRj0zAM6hauBud94CgAti84B+FRg+3GfWCjxsbGmOfoyaXJR+2c7fQC5MBki+4e/vIpnIUHu4xJmtIBwN8dj+N3Vqxww9kKTFUhIJESVtg1Jy6U8U8q+cWWsSPA7RyIvxc1GOmDvNTHm4j8ovlJYaF3hmvQAitVYoM/AqYUim5QkQy/dL9EQlCai0QG5bpWbZbmXb+qq2n2CTtOwtKhIJ8c5wa/J1Zft6Cjhyz9l8HV2k72EmEG6GXdZ5Bu+sc8LcWmusMrRSiuIAIZuE0gWJYa4KBmz1aqkvDLQWmv4eA0my12L0yuZ+GQFs03VvTjWX0JH4uRk8S2OzkUw5v+SHf3Qzf4eRmZHbDNbzAsx7UffbwG2iTVqfnGfS3zlDaf73lHwfZmLeRdzxnnZD9JaeoxzTFyFNihJg/7QigB+FbP8cxRZkAcvfExPLVnPNIFQeSNC91cIpf/+aYH3JxUZI1KWNMQKrV6muooXGltT+zT0Y3dKZ3uOFdQp/RIUDlS7gJtLDMgSN7wc/3CMNYg+sDKsaY72dEjG7nn1PGaLkBiDcmNtot+LS7QwiARtO0Mu0Z9Yyt1ituTna29oAyWHGQFg8tBeC2yg1z7M5Zhgj0MkTe1zIOpSAfo68ziUqwDxhc16t/oK+xvjxmD48jobWR2kKVvJTYBcNsYmSJCiiHGN7X4dvjROIWxGrOvY/pBMszEPtf6Jr/CH0QMom7D7t+kBl+eM3vZT0h/K5sfo+2m95Gl/50tisS7k8GwybOe1+HeRN8j8h5NF3YcFy29kDqmk5YFUcycf/OpFHtCWn9PPKv9SCJOoaDVeLGLUCzLRi6GJB0H4gBcj/GqWf8mXJfwcInxxLDu3uwR0bq07FSNoARq83Ou1GemvRnrv44IR4HkIlvSumaiIjawgp1lzM7K4nZWL9gmiYB0thTLJFap9UnYZ31ZC+i5JUvDypPW8h9IKoODF9A7aj1D4kRCO9zDnqn9DXiJJRYOufm68e3KC0mHdVcaWe32lCp4DIhQsfMI3zrtRraErK7R2zEyvkRXaWDwRVjseK8O12JS9lGYqRGBubQ3DW7QmmUiFx3ZeLrJ9zoUulfDbQO4qmWUS/LqJbkfVbsGwieJ29L+eMRXgJr4z+9aX+ZlcntSsZGl+7zyn6uqyMA8VPwgVCa3ZTHAOepfV9/zdXqyJPvNXa8GHFXf+wq8H/cPPxeYO4Bgj2VUQW+fhWgBByR6YhuI1WVt9gUXUHMG8aBAw2TEjaP5SKe5i+SpvcASk+2IK+ifbckepHivKVDkySjAJEhW6i/BTXZ9TxZ/HUMpZGdvT+T7tpX2loF8UM5CLiFDHKS5Q7Stg4/f1p5Pglz3xs1rTNFnixIfnL8+lUPnyr8GFdlIVKl4/UiuMnItHmnPsAF9D84KtiiGJp1pvgZFyQnzkAgAi9W646ndLbFRIy+fKF/H5ZaDPghuVP+mDfU/NSBpGtQtFKnnxfn9ZsaXd1KkdEaSzU1PQqRcJTfp7TWVZMEmHRB0gXVfFClJMHF/XvsjJQm3ow2rCyJ9/2drK+Dn9Qe0Nv3h/OA8I3BwW4p5YwCOdLyhbJsKT3nHpLndhPF76o138uRvNXo4vqkNTisQknO8mFVhF+XPQWcHWHa7C/cIdPxwbk1/aBoWCjAhHuBriQZEtAfIn4HOfgD/UgJWmKa08aghxdTm2sJZ0G3GeBzuJ1N1lE+g6+kguE5EGDJKmvgbfJULig/VojRJJKIf6MZtWfZuinnIlTNmLCqgogDAjzKO5glYMlIrDjvxCP0aGYsFHNa6OSnGEK9YOURO77dgYAiSWuP93RG8IJv6PojfxX0b1hv5/rHor1L9r3sGvCCkPteEva4BmfbFJjA5q75KxY+0RCJmC45YVLJij6Ma2Wnpyas6GJI0L1cTYPpywMdPTUD69d+UETe5VcXwMV/bwAiMh4KXdjkdqk6o9Lu0krNGfRNcO3TjCLqxfDBYocrxNc3g79u9m5mHxOgqywslhgNDQLOB/MjBQPDTV+UttAHau7XJ3DKonCny4xYTBaJoa2onReu/tb8ZLeJvzl+i3LwOqH13Nh/z1uB1fU2zGO/Gli3ZPHzujmLrI5yura5UiYbW+TwobiF+Iu245RS0Q6TFZCk9D48r1jIM8a2S1S86wH8jFcw5hOWHwAbV0EqtV0zT01JD+HL2mVUWXkURodBrc6Zb6VFHAliBeaFTYXufRiSbIuEr7bvfPu3zSp4tDFhI6WNBoru2JfdQfntEKDClYWBpnc1NJnSS/UHWsy06G+ypbBoQC9OtqqE0GnLOjXl6KmBkUad2ZTp5lcHtQIozlqEtZt7qqW25U+VAcUaO27HpZRQvgdu+H4HjtYxJc6Yfsi1cXGvy4wd0aXoVEW+TV07JyxAZM2zow7iDVTbvosgWQY79UJb4PQkXxKzzOg/sScoOPJsXd+DJ+VsvuqFLEQ1WBlBLFvqw+e3IiZ2ldLWq8DZ9mFu9gPy1VY3LWh8EV+eMN8SJsp2yA+ekyaChjuSO2vN3tZQv75wA95ndwpgH5CbOtUgUAnVn7BQIARGRpaRuN2cQXGFy3YACdF3OQ3wnKHKKVjw2Nix9saXnYERV5kh9xX+roc8CeI7EcOot9HeqtTzb10RGT/0H7D0kdaeL67078FRyx3wp9buDjNI7NCdVFkXMinQaoI3jS12YxpIdWQhjJxddPfb4tvl8/aJeM4WleFlUDqorVl8I10xTbYyseCoW55nayDKTndoBjxgdseGrDo+VaefCovAiqMvGOH0xHhX4zYv6Sz1Vj93WkWbOTPfY56oDh2OILysrlD/wYzq8qxPchMlheVKacPat7xNfPl56TZzKC4Pep+4/L5gVOCc/pJQMBwsONKPt9EoL218zrBcFSNHubtOZJCa+wH6HkDqgWZVq5115I/62QkOPFIq5ZoX4Mi/qdeHWRZv5gIPjBigf3W5A/CRvmrVTMkxCl2B0sv9/8C/b6iuFBcz7hWkgo6AWROXKh6ZM0dyy7N97tuD7newNIoehIFJOZcXATrFdlM0Fd9paiBDlTtMsD/bsSq7l8j/9WHY+/eHFWGEi3F50N+aHpY3n5r9S0eBZvCW8y54WG1/YcPf2Ag14SZKemoBIxg1t/qQ8PCfvjXOsclqmlhedTjmtUf02xENje3YDh9HpS1E0CfEDkH+IFSKKsgrINiAa4zRCMk4VOKSsuLeGfxtbMnYFl3GsGdkswjb+MmAY1skeG69CBLLwfqGllcbEmC9ajwmnJY4JzGLeGXFhs2y74odzpIeCX4udKDLWUaBctQgH8uH4K7oqmr9mr7rjjsxP31xvEywNcj2y6ynT8RBmz4e6baNtfQwJ0SRcH/1t+vd/MJtH4de0SoyCXmKepCm3rKjvh9iFiPf8e87MV6LU6NZaVQF6Z8F95/5oH0k2SXcP8RRxkCS1wLXh+1jU5oTTuDFKbQ/LY9dpMpN25UgYIJh9mD+9Mbe9SZm9GbzlhR/jdDHWOCgdBtc939sBHM5rbcnUio6Xm4qIeBG4b6kuB+HbOTfOrriiacem37bdQK5G7fRzy1MsUFDPg55D5WuDyJ5dzmW//jI3ou7/G0TwxHdsGd/641xbP0cF0YXTGNYqKrf3fvR63QoojBB/xSlHtznNE06W/nsFidm38gqbcD8mrSKa2YQ1ZjADDLmMK02SK3vPM5zfKJ3d7+8VPh+d8iTcthgaNjJrqkcXpONtiIPnwHSZzq3gYfbYFNT/IFISRBBFGXVvrqRVqEHgMx+xfSAfvNhocOnYNlIskfszkPnT1yHJto7N+BhY2jl4eDwZchFM0/IV3BhT5q28kVP/7SCS2uPcpFWMiEd1tCfoKi11lsFBYBEDrW+kscaDO68xvv4gnjSC7M0oMk3X13s9IpK3x68lk/rU9e88vAhaBXXZLic/9TKXm6iqm0c2bjMyD5YheU8WCIA2/TmBMACMkBdYI/AC/ncrxDMuUgqDolG0uO6DlmWfrxZrPBw5BeQUvrKlMy5nOrde4swJZZUIUg0NrOnyN854D29fzhdZ+HZOLxxFbohZRdQpyROR/cYIy6vI5rTYfD7P3hMK7NKeWOoc3hd+aUKmyvH/CoMRMuiIPvshj9Ic+6BwbzlP3vRlxvpiZb8xYPHjR6HssE+7iyQ1mfQCHzk5BMpmE/VkWqKrdhOWmfRlLU4IVheeQ/mWWyu4RCAjMtzJllKLZo1NT0khJNSrgH/8KsyXxWo9ybUvyeO97H3oOq73JA4Cs1kGtd1qIrs/TyOpuusuoN5n3y6/tRGrwXEDxPiLCHCRVgj8nNUm7aBV8j5i+FJAFnnnA0RDALwFjWe9W8rN2wIYdj4/fQ83n9cv61JuiMv/DJwS6BVoRuNiqwM6vTrcXyvgGoC92vraVzp813Qkc9hBsOjc3PKAvmavxLdycPWBzAVgz4L1LpPGkEtA/nL0Hn3xAGsSTkA7BC3TZqjb426fmkBgs1smXBWV5pdytHmfMP7Y4m1J82C27yg7cEJgu335I4CJUCsyW6B3NBgHLUaBBUe+r0rt8zNjuChyQSRkCntsk8gf7kdjDBECyhV5rI8/oyxdb/VWzEa4kD1PFgKlBgB5rn9iVf+nthV3J1H1q5WxxhxO4twjDoOTZw685OSTKI8+dcLYDSHcnRLZfU0+9zddm/owpbXmCMUY3yVp5TEVIjKRInbPiu3XgSpT79sWkCoE3SdHD5Iux7zv0Vv21YxBIvmTzzkPtqLvAn2/UOFDBp34N0mMWs3x3DQF2ZwwMSK5KA6PCo+oxw2yg6FHRRTdvbRm9bgv+WbxFGwS/rc3djE4pPn7DS7xZSeUexyX7aUl1+1JskeIjf00CFxjTT7uYRWk9S3kjGZY2apVynakWSYjoDcTUrbrMtzA3BKKBB/D1pgFStAF6qLUHqIbhXOp8j5eU/6TXJe4n5lRd40YbkjNJfK2xz8Ves3wOCu6uDS6xwNTxyGNrCzqhjdM74wDD4PNF6QmBwJ9aRVAsEZ9GXMIhXn0aoatKFRm3ox6YbrXdKAatIfVJBUKrXslKZfPTitd3/BFphCD55qJLcqImX/cqCFx9v6cTHiubbIklg8qXtWDu1Rk2sDJs4ZZaEpZfygGCFCa0rezXqB0YSnk0AVsYO4TltEOCeX/4QynpZxN4yFwCp2H2trF4JCp5kHxBb+eccYp+SQz8H1+hUb0qvwaFvLkv57CT+u+HihtalCllbtGmwQlv165l01FQj/5UsBggDo7IJuSxa6jRSFb7o7biSwnsiJb3VOa9a8Wdp+woE7J20U/X+2qUJ7YN6iP3YLcmWc71wnvXveFJgyTWuOItpjlknFXYeQmFO90j4z/sNNK6WNn1wSfKcdgWpoxK/YHADm8l1sRMIGLVFt/04R9C+R+vCyAo9G8Y+b/pAtB/GVngfzpB8D9xRFn93yPK/r6Lkv+G/5+EpH+fHfb/wZSy/3Ws0v+NKZP/z3Zco7j/3vBv0zD031DsP3HD/6dzC/+DNtzYl/W/d/zbSQL6Nxj/383OQv4Td///wlTC/yil+H+B4OLvz38drZjAvH9pxcpLKuA69eN/KSf3jF74MBjQV8Gc5z8FFaa+ByhWDGrK5+UJ3JIuJOeuC5Nwb0OavamSqpWSl3wfbP2O90FrrqlaqiJOpUopDPjtxS9boRsgaEc/mWOoTgZroIWxIW1BF0OIPY/bFPjASvGcTxIdK9Z/nw4lRhBETZRITxSHzYLEP0nl+MJrcRBkkGQNHmLfc0KNvs/zfMW9lFnz4plSGMQDtXYc68SnWYtjNaE8bc/VaC2W/YER6iR1Om4xBoeRdskD6wt914zgOGgssyFWYUUlTO3rOGAnw6fAZashBuXKnTUIU/FjPzpJNmH7YUN93Y8Fyh8Q1GEQ3fNoYaELCL1raK8cWAMC48NbwHWzeWmArxEJKEI/KJu+QT7yOY7GMSA/KIzyPGIfv5emqZRznF2rTvJ/+orlEI7At5kSxOX6XJ9lBa98Rb7wV26YKSfSXdpBhNlXfpQeGnYQ0SlW3NUMx/USdeLSaksyRoYaZ0VRQBYsWc2dS8pIOis8uWRB6EmZPs063h3vG5/l9TVD/43P8mFu3SA/jWHEGVYEyKCnJR80HKSr/4S4xg1DDT4laQ09+yQqDe0+mbKx3ZF2oa7DaOthpArFe4EsBOj3wGL6aOphFQOLP3CHvhCZhsHFrPIVQT1XkB5pGtjvAlsXnuQa040j9h6djOwVhDEiPAYGfWUEV1elU2stq3ZowO4JVQRfTOzX4qLTC0zts6MxbT1yVWKzPN8OAWfnMu4wl8XomnxpLie3sGdDGoVUwplEpMhS+1lHVsbpAwrrP/Mg6YZCzMgjrmX69hlBB7BN4aehofprl3ShbYiE6qcWhxo74dN0Dli25dZ+pEG6/tQd/m6VJAj2MiRMtqCkdkN8SWaZC/eWBhYnVCH7HkldclLRyDpS7qq0cqPgIP1TGE1v4O9GLCRQeazZp4uE/sRhEU5UmlHlr/T2FJLWXqS50zTt9+wCmSTz97o0+avRaCdZpeKannA86Knem6ezHAhBLnNJd9vbo6Fy8c+k8QddYc0SZCq2pUKIQEkwlbk1k+9UzIu4NQ+viw5E+N9YNHVlxEbcoZ8Wd7swZhR+oKCF5eIZSRKrUBxGdSep/RTbioQ1MT9bIt7tFR2/oiFYh8yK++i44kSCC//L7cLy8IQt4rYzPLCwO+WpPjif0Ad1CQg3rP6kFdKLmNF/0jz9zZ8cxM2GLLS3XCFW0k2VletaNF/1R88048lxQMSZbgJEviYL5MtZ48/Yhbsm7pl74rTRu+xe8oiM4ZTME1YkSWKqjaLGo3XbPOMyabi5R5lQb2J/LbtAN8L3Jicg2zxHAKBay6v8IhEsXexI+uWzc0yQPLisxtviHVo/KZx9DOW1W8tXnkqXBWKcBRLjTRtueRBK/Ed5R1qFTysJxM3pnXQks3MzRcEdF/YwyxZFQRNs+FSmHtGkFcBJ0BIPhz6rRKyA0CV8rHvW60tuQvl0TwHZl9lvMpVIauIrJo+OQKUHQv9MzbHxXei9C03+Zc+1yY9r15fvxpU1p/XUMJ7inu41OrKO1nUd93hv0b1ZRS2V6rgId1rOfgNMQLRdOfrr5fcUiiu2iMXX772w81Dl0vRbZ/e6BQPsiZdX942qPY9GMFQIp/jM/WU5yF9aKsnJoAr9kh9BmZqaM3cjW/HcsNF17gVak8F6yGwo1vR3E8gZNFfnr3C63BKWx11sPSov4V9jLHi7fB2dH8LN3Pcw/3Wm4FfsH8b+unjuZZyGoPQyQgGcqzu8M3NYfBuuRBikEZAqeSI5jMbPYhx89ABCKJQa+HOVAtzHq5XOilOfmH79agVGSf+8NqxyOU3ZsNOlD9DqC2Kp5TNXzUIKyRJ+LrrGUEHuavsm3k9tEblwNp+VIHv/IvB6OUPTcMoZNQKTBhYshJjnW5Ea5dD7PCDyMfZuRnc0BTtAmDrjWO4crpAgw+lNAYv8Uj/veOFpYdKJ2633n55gAulJftWN/TjMS5ftxMIf6WOu8An+1Q2WBW64CGcNQN8IC2tgCaHUVruqg7Ub/U+5itSzUgqxdckohQCcE/e4iC+KCJBHHi74hFzZ0ztuCibVswU55XJESc/v4nwJHJT7pV8K3oLOr/v/biECANqLPnWbfi/bl0TEW/Xau04UTDo6Al8jGxgJSYTGiZRuK4aiUtpgNLMZeDNAsIK01HSdxpCsqLEm94gFDgOFM/4YvGKjqSTE5zgtW7Prg9bZif3pJgMH9q+RgNMsv97+pb/IydSwKge6jPhpS90I1Ltwu4Pfydv4b/yULPKx/mJGlbgt7RcnkKzc1LpvUBYt2H+DsuwUIEcQDs3f35gsIrmUTu+9wofVKvh6FX15pL8owP0+LSD/yt7D23/CEnBvKGXDhlz719cBXzMaeNQ21GkyJFmzZu/F9guUGXO7wFNBBokwB3Rt93av4i/8RXMlFL2/EVZty8Sada4pRsVIRn85+feVPxNdVEj5W+EVyzh1ZxndszqlDYxwlEK/voer4A/TuXKnqi4cBAjyclSjbk0Clr7cwOJMD3nVLiKqvzsDqJ/vABLh7F3OVzJtZ7wrPZdVCJYfRP4CGpGdVkfMh0uPasLVBg8UXhCqHhQf9HhPKEYQ/RSvoTz9Ff+TbVEYc/LetNp+AgSXUR83FT+qWkf/Er7TW0kNirfs1/PPK0tg9voLqwOPFAtTKLOehbiSX5/BuZRntldt6HABELgx5H/ZpPvX7CtcYOL1lL18DPJVnISlbGLkdv2i7oJCZVTVTBNMsc0NV1+LmiNeJzQcDWInOsq483CC08HMDuVC3Qr5NdvXiptv+sdKBfrU5sYCAGtaNFnjW+hLdaSPOshdj7yfajLC1WEma/0MW06b0hUjC45injCqAp0cGFXzVK2m6ZRdPkFuOopbYV8mgliyP/X09vW8UFvs6eUmOs9nH0q+5Rfa0DYn9dYd8yOyH7oXwzr6C806UW3IlBrNjKr4l7Ac26eRJ6+cmmQCG9oQoa30gbznPoksn3YwbFj8rFCvL+nq/P56/1uk5BXK+qre5GZdj5F/PhX0MfBW/waIBLpryBA+7GTOfGaJ+wDdF5MzgY2nE5uO77bmaPWEODayrgTbZPSmMp1ugbKxMSBMvsvA+NAZEjnrKXjE2igLPU9X3gFsJ7R2q700Sm1UG50JPMYp9ob6rhicToPUCCxUQc+UbH/tpm/5IsNj6RO0vn0rdSWFrgHLRpORhm5NC4Mx+2JGyCzXk9M6/LXkwsXXZTkHNvPTgVL8vAGpYmR97za3/rChulhQlLQroA4Cd/teD8XvoG5fOHt3T44bTAotfnmSruTgPTjThFw0h2Zlh0Mte2hyj2gahrMCNoUZrIY3Hao8KRgpUkabWNyJSW4wABay4g2zrEPUnUBQ18x7edraGAhGskeSAb0Kd+DFad1A6Lqfyrlhl64ZGTMhVUaVunxe+q+1cE5bRsZNSW31Jwzk4+83xuaarpfNVAPkG5rdCZyJS17lL8vdO+Av6F87ezhSIlUVBy6gVw4sttenl8xf/OZ94d3OikkNqSE+UKg3iUUrn69HTsXCLE4T9ZevkSOmRl8p1+JfQ6KqZLS4DKPBhFNjF5v5KcyoBlW+UcEiu80vxbGQvfL6CppovQH45gCaRJJQinsogTuZ1pcxh8i34s7o+q/pczbunOSZrMu7wqz+dZCleFo04qYBkPP8vcQFXb+iDsSgESJSc4HDX4L4lBhc5XguzGR0quDKjvbDUCVdRHzBwWIyt3Tb8w+lClJa82wNlUBY/TMCNsHBMpytGZKryt5mM4ceDUJ39fX8nd0mUkTPFEnI5AZqxl6kedUebUA2Nkv+VRUjo4QrrQp+vqzK6Aqp39XeQymDk72yNoKaX4RYcwp9RlXBun9DMwJ/QE5p36WH7cuizjnFiyqRwz9uObU1KOLoLY3TyMqkQk0CdzcToUkNqxgmbaG/E/x6wJ5pgejOM59w6DA0mkPAFsqItYYr+aP0O4rBdkjvrV2xkpqk2Rqdw7xNChLAk3gfWhBSHPuSL8nrX2V/Af5eUzSGRJOyXbKhM4/3ema8pypc+mw37lkTqLvWKN6ABAdEP/jY0z92tu4rvcj4N1O3q9o0Nmy7IagbpnG5h0zRt8Ty2r2QCkCZ/MpViT8r/qtU5HP3CwjSYB5hrB5eMxQNUeTFgNAyiSqGlg9AEfJUpzPyjIDlPaQqsf16Jr6gWFpG0+oQPi/9vb4PUPwuzslTil1icqjM/FAmieqxy36Egre2iCCHKq3FRnYRxgD3NH8nmO2/eo2vTjI1tG9EohBRVk8Dg8juHMW2MSWBM99847w0UTHOnzzbr7ekqHHMswckTcftV7GY7gPWnvAHsTa1JwW9sfnzJepuhZENpdeeCh8iYPjzdmNJ4B+B4rxne7aoseodR3I9+3lKxdOitJ+O4d6b8DLbRjrYxMBXZ7PXGdhAA8pgLGag+eaurqoY4uJZ3pkE7+MZRQSH8CH7mLprvUG7xTzn0eF192oXpCjHN97vCQCgeFVrZAZ7JTJ1ZFMbVhNmi2oCoFIeKFn4S7WJWVGkqRM2kyUHQ9VKN8DzX5yz26LD/7sx6ZQ/2e0Yn9/L4p5fsgfGNI8mCX74Vbr1/NQYJykY+izAx/RsFem3RK5tb1qf189Ua8GYd1Umo1VFSqYGru/Shsq+ZPTlTzkWKaY4Tl8KFs37TTcogfC+BidwSVVKTuW+lsAAVCwFj5s2f4c4jneITVUMjHkj8H8DohOwNVNY2VaQUFqyRfUl0vn6sZ8D241YMIf2aWFWfMK/qcVDaiyQZvsPQFG2jeaUWeyy9y7IhdJ/xeAP3y+MY/bbYnzWB3yh8kn+uBWNXD8NFZw8gUURYfgp6AI9mWCkZJnj1tNR5uj50kKE6zBCW1xGShh1WiBr9UQEbqpz72TUTcp1rIgJL0w6fg+UCLi1qnzcA2Y78Rb/Bk2TvCbdzNfIlyZG2TzUvODLeIi3ykV5e5QK9LUS9PmwGhPSAX+jNI7tnoT+lcxzPRJdcMP7yAH+vi4zYT/9M8+SNn5YDvdtu8VE+V3qqG5DWFskeoyC9UVDaBUiO3qciHKUe95O8OLr5wb8sNBZw6NWGpqYrIQBRxXKHddRK5ZwgUm9l6p1uZbK+56zXmTHehVjZeDLrHsbCUBW2m6a2dNG4mTO8aOy8aVFC71JtUyyrNR8NJphO4LfdNcmyxtizBhsQi2pSbDz+nOMEP05PkkXr7fKxIX4sbAaJ8ffFPYnImEKPTmPZJNrUv5UQ97UrpNJpF/eEWUv0FOZyIwxhQ8q9hEvv9v35d58SUTOWAklJ45IHmz2KbfxIL3AooTkl6V/A9GCyQlK3iLmU47rl0mxZewC1IvXEuIuqorqiojIEw+5hbWCt5yoRZmDJPLptNTc15KTa0ERlMuqOg4RPXSLIIeSq5In/JonmmrLC3SjeW3/m+uI96VoYMpdUegN9p/JIoY036c8S8rm7Oiw7i5xzabpr89iFmeD/tf70d5GtXjvhGa/5+YrfKNT+gMzsmheT/tXaqvEldAwGogYogbGM/eN06CZFSiRrGcUTr93pqRoRHA8Ucn1dWxAQyF0xjnvj9NRtbF69XgvKIgy9s4YjOhoVVbEmesyvIf8nLCAUmqk2OxAvbuKKuujElF+rPnPpVRiIjJk/bYr+sIdHBCq/rDD2n1nK5pd5J5f8DGFVAWGTytx77EJfoHzMu0TXL9f+Cve+cTOQjKHpp3ttag5VRFaPq07uG8WCx4kKtTJqN5oTS8BStUByldHCkwkiq3wkBec8hZzkYqDaZsSpwib3R7vzAyslo99twScUznyiHGRrDgfCwZAn1TKq2MBXTWkk3j8f7oy0uGnkr9eI3o/dT8wH1nCagwwtXlXS0/400StYXM52IH3PJVsFM8ujCHhYTgKcS3aA+rEw2ZS2gOZLBvkMiy5xYqDBLgqhinjygJBKNpCzXLNdozBWEbl4hyVLmOAGkY4zuaUMX8hR88I+esLwY3iirV+zJ9ajWzaY8+TvEzAqzHj6p1vmPZ38YtKYTe3yqEgEv0jJurJLD7b5sOQ+TfaaQhe5rYS2lhiTQ224G3wyYGCAZaHX372x+evstRsww50sS4uztUDgAR+YWthHUUzvFs4FhNQY/Wi0WHjNNkdrSCVGZFRfpRF2FL7Q+iqnRDaCx6aUw2ESO3R8xHgZN+dbfWCudQ1Jl1Ez0XSuamCXRdjH7LGmeFCroZs+V5CptrFOIbBHGndeIbhO2hP8ixyVlHCdk/lu8u9pX++npZPjW1kO8pempwnxT8RjiRlMvNrAKZH2pZLxPgazLjunRK/Qw2A4DL/HcpihNanBPjyWi7bqn5GnWsT8OC+KEWNxtC2P+0eg1JVMzWvcwGgL+8iPIgX4IlDT/XXvwvzqO/Uxm6e22a+pec47q8vulZ8pH9bI6Wp8rNdFKTiFQLKqklpFjI1P07A+mwmXN8+AFl2GUC1KKt6DmKf8acZptvQrDAKIJ+D5p5aoNjFdNHeB5gmqNOy37TmkuyMWv1d3aL0djGKobvAEaEQ+Mxe3WP52qQMJaMClZp5EMkfi2LQ6cW6Fxiq/af6lD6+TnZv4RhznXFj4E77sDRGobUKEOb247sOFaJfbAlrtKHfJ12Ab8bbxmY7/m4CJQ/lVz11WiXJ8tn55eSMO4iS+omfn5k60sZpLpn4B5bRBlszUdk+6L4BXn8ugqcdvSboI0OvfXf7hcQfZZPBK4YZYHN6jANRKJyCngiZKoc+ZORXboaNwmg5VrKLyruKe0RmkNVmi46DvPg69LVFXkhQuBU5hC7othC4oXTEhjm0k4GaCKUPrQGnhhV0mKGKeZ9y5lEqc5w/ESDzEu/POlSd3PsSiM9fbgpX+ffPb8VgCp9g/ybO78p8sg5DjxfNqorIFVhtS4xXx4FnmQCColpARioTIfgR6wT7Xt8HEIRvKZgglFpoGm7VROWuNHuM456MEgKXvYMsyd1gfVggJjJ3bexCPYPvqMHw6i9F/kwfUS9NNRgHSqa4v26871cTXZQ+9l+RXWmkyEpJZY4dYPr62e+ewHzGU8QPDAoTbT9XhG49OZh/T364m13hCq1OMM5wzbuPR5DiF67LxrX6zFjR9lkJCoUylDtI/LULPFy4e7WAVuUF6TFNZAk9JO3CS15sdE51DGeU5RY3UFYlx9SuUZnnJXs6KSd78T/TMKM9xH41viDz9OWS5auB7kX8VRr9GbMWzc4jyGLZOkIiNYfeNSmOsVC3pJYPP4WLvcblEneOudrI8omiA8ZX4nDcx7l9PAJZe05hnpaAni9b8K8nyFYSNnH3ZWSl6/brh4yzcpuIAbdeuoO1xFVSeE5Ea9RDLjTO6ebnQh7PjPLKb7hRMpwPmUT0MgN1S5sRU6IA2GJzlDafnvDP9Eh5mJlU3hDMmcb3zTDPjhwxuQ2d1+3dzwHaWOXS9jkI+smvYUYza8+PbYQt7WRK+ASu9/TpJECI0gFE2RSm0ydd0veft60+PbWRduT11cf93AUpeT/v6gepFKxY74H15GbQEI3MqJs83kxTzu/Ai0QsSyb3kYRnRYu6E9VbcKcvezScIPHX3lfuuhGdYpzODFaYfCzujO70XYqmVceXYoM/x9tZsr7affQC6GRqzfq2iR/AH8IoV/TsOURgOm6okE5ipNDoBYblR+UWzSTRTy8q734uSSZKDCphjhfD66gK2WXgByRWSxf7UAquuJO5S4oXeNg2XxahV2mS0N3ufVvvQcbd2ZAAh9bweszh6jWEgek3rVWwzaMBjqg9X5ljcQU9sXkleC98JdMTsgDLLn5923jzb8YfHe81H0/z8akY1y8VeyNcwb8uevWsR+XtzT/rPap77AvVNl14Qhf9qQ3Za+DGynFeetDwNLYF6fZTgunjQ/DXRXWETxQe+s2VhOn5pNvs8fGqavNJIL6uMHR6R0SSSK8bu1HDGw6b/OothEJmPSTktU/ih95nesUQjOiUuEh/04C70XgGi5f2sRYM6cfFj2RHUFzd9gTimzxhiTWaug4mCoQr5L9AJFtcA0jVeNEWLVpat02W5KA31s5mcw7Uj/ke9SuJ65bpn7WwfnGovZ6Qc7yjPNrDPJq913mXOzEbTNQbUIRDyAOdxU01lmHF0/NdqCKMigfWH/b2tvET11hIRgYhD7jwNDW7Q67HbYr+j8tu5AtlwbV7fsWFtWr3PmeqPfv4ZZ0JFChrGaDcAow10V4rNG4LG0QJzckoYL6rzG4ye5dgS7fuUwYxDPYtTUONfoPdeYaliBd784K1PFWnnCcPYsLXN5me5ewm2xsmWTl4rUoxf65pBYT5swMKqtabJTCitAdtP/S/IOShgCKOCVqRnQdzxRGXOVgDrUU/XDCs5xI8SUSD3b8cmTthSFPxzQXqxPRFcmluBvi4JG+Oe60CRhE3Z8Kr0D4ddjVQvux/xdAfA35yFjeazsDTLSjCafh2w5MGNMyAZtm5khXiwrMrzvYdd9NWDLX3LFU2Y1RnaJ5HYEcAF+Q46on+NU5x/hun2OzaTZkrYbSNGnajGkgxI0Ncoa+BAsaNtaPrFBEbVEjBoyJ+zw194kIOYQvrEqD4N1XxyptDBSsKiAC5HdTRY8EdIq6v7RMPxUGWMQmaBiQtgClJhftr2H/VMBT+tiSQH7ZE/3FTva2RIUWMcqK34xvH1GQ7gn1zhmglo9MGXLHXkWVFNXpfDTm+Dx/emiUJ0F16gOroJ8tnkLy+xuYXz5KvZ+w7LnTirRZNI3HnX5qPyRBqbbsiKP/Yir16v9SlghVUSimVl9kYGxSpPbOAAPTuAc9ZrrXP20aIpg7l1spCakJT6u5nQx6D2VighyqEPDUalVSC+LfDo2pz57iiEabGPBlMVzzA4QUp6gR5xHSe+r1eGN0MrFhv7wb84c5WGz35T1qgOOvUvwsPSUdxLGVHJTHdweHTJwt6U+gfy+C7cj8Q7vfXCnZCXzxoT739+AqaJBrfMm2tGizvu45Wc5XZqFrM+5zv6U4/z7H99IO1A7zt+Lzvh4mpTrqHrk633t35zZDc4PJ5pM76zV5sKPZ+8J44YqvlQ1lswjdNW6LAzdxcDTF3R54N1kvECD1jJdAfFdDf61DuiEkvHnIgz9PH+UdDsx397a9lwZr07Is9tkjhLk0AlYfLVxhW2Q90cfzMBP41Zvv6OrsYgfeZgf7X4nXyIpa90TSwkIU+fWlaoOObPhoQPMEfao0tbJ5Kx4keogamkRM4k6ub3+dqaqXuQaFvEn7yvbvPrEp1EPlyfQ8AZ3by+aCz0Md1UflSk5/S+xuPDBrLPIzSiBIkOLslN2GHFpyDuoYrnyI+sio3q4B1WgzWmtqTusdJEQDxMpsqlKQnAjz9J9wgCup+bU7FcAg4hCvR5r4G5QOD3sn8ul78GxT7N+kAYMnt89T6sTCGqDP/dHvn+zQvEOkrOnF3vL2flxCDABFCkjeWYs+TDcAbeera7Akn5PNERDAM7635vNmed96NxXM0a3r9zKXLeYA8tsjPTi1+fbQpc5CNmqXa+Trhl/Mod773MvmXVfHCYyXz7fcPc241duvOSnbdh1n7GrxSfL1UlnOO5ANl1JdtFGtC5Zcvz2RfBDmPF7I3YyQKoi0NEv3JS15zLqcLfW6WGjZYSzCj4T5aYcnJaJ0qaEY/pjkaBz5ViYmbK4u5IKhgUkDZL09GGyIdOMe86mmfSgqk1Xj6BeEEwRQAWQhZAPZ41Z3PYXs5For4veaKoTk/WonlzWrp9udIO3c3NbsMvmxM76FLMg6xQd92bMGgvUoIHceY57oPwfZuPt9M2jYExX9NbBISNVzPc326BIfKZBOJAZmE9Wf/RBn3ZlJVvV6wi/F/YDJF8H7LxKBSoomXuXAJuYkfqq6N5Rr6zZEW6Wm+rawMMvPfoM5v9EQAsTbLyLS1/PRNiU3L0E6uIXRPFZjrDh6wIvfGnw1DDSF5L33djDBWsvMj2hD+NjIlGaXFppRxbkua0xNg57huvmbfG5LYh6N747z+RbWO78uCZjTONE+LbEiFes03no8r3c8X1ss4cBgnutnDICT449jwukCFaNcT0jNpudM2y0eSh+5D0ZwTbSk9w+hdgE4+8uwVxhTKdas6NAhATv8jinwkwgqDYd2hxb9Q6suWQo2XDeLLvfgbgpjpVlY2QIavUzXgX5D2PfkVKXzM8qqW20gwrGHS4tp9U6N6+1qaW/g61HCToLKfUDGn+2z1oRaB1xGH3OADOtMmCbNMA63LdTOHTrLiyv7z52RlabE5yrydTuB9hgA16ycHAEf7kSCa1ShXjei6H+U+2Zc+guWKiZSKS66tt2VsdGW6yelbvVXfFgQ7tN93WsKeEUUo1/zKlpRshcbRlBwrRRUO3kVJJsnMFVIyNF4icgEmjOQCuSo1xagblP6pQriBV62VIfnInkz4lw/iVYbWWT7LpIZvcy4Xwf5XBM057YhutfIdaDkNWtuesL7da7Zr68nw7g6cOd01pMzlBbs22YivWrhBCLl8XA1jKPR+afLHPqFQOpvak1iaUMOJV7oqtT6oYydAXE6SJKH9SwI78XQ5Xj3/5vKarE0FQixWFjUCnxlrsTSLiiYellycyCi/ny1L6xF65pi4Q1Xh0htlPPC66Hw7pMmoVkQ/+KsM8OHUYF10zR3cvNclVlYiZ6cjJcwcA/rsLHPfzmM9zrXmG4hzXV6L5xExYxulKaWjWGViSNXYCLWXnEgKxmtWuY4oSST1WUs+rIOXlwtzGXWawJ4sagaMsEY0HSh8Gim+sa99ib4Iq7WtjKXwU6ahb/sXHtSVp0IWr+RmNGG/6JSvsypE6gc+M/iG4G0LRBVlnGoGpUf+5c2KX3AyPcMND56fOzHDlNKLhQgoFOu1wk2OnNHeCMWeL1+jGPetUefTxM7ixIVu187qC85cNxj7pZjzXM2V+1I5CIxK3vvHG0YBjuyPZesSUcQs4vUH19H9LmsSRZUmh3U0Vu//Pr5VYftzqAYOERbuZ5lC2pXAN76VNgGKMT9t5acKT798lGbDdRgkjGgPvG1Rlw5Ncrwm3tWwA07e67U4kGDHhQxXth1cIeCKcwEtm769e7thOaL6RqxX0qQbQut7E8PbX0zhrpNBkH3e5uwYlEukrHFQEJlJfr/vpbGhVsetTx/TzvTP6fX4z3gyhTHYQL0Dy1LasE+z6ny4/nVmUaU3MfFTEYFWRcAskb8JNZ75WyEPkGiL2tPswZkbJxS2gKj5up+/FNFouoLI5BlvTz95maHD8DPr/V+QUzTXz4R8q8FG4IuwguNGYLM/oxbo43E8FV9vonqq9TS1ZIarWUe60gfu19VRplvS1986er2GGwgMo6Td2pAYiFiEwxtP2LxdzR1KsLXZX3Os3t6wqSsM6tfOZQ4Mtu/tIscXpcUAEJ9DOcgBki339cBl9j/a+44d2Zkmu6fRdkBvlvTeFD25o/e26IpPL2Z/P4QRRoOBAEkrNdBAo/veqmJmxolzIiIjTpkZbZa46c8Q6ooPrkKJmA8ozhcCbIhYvAq1gOTrqCVxtFNHgpygu1T5bDbVFuBAin6+TV0emcrji+P4SzuYbRc/kJ2w5dJBXDzX84RoUr/ve4gu8bLDdNR+BGNBzkzkyRccOlFi6N6I+/glpZPMvbzITh9D+njGnleKz4rKFbW0dyNsQPTdqxte1RgUD7pPX56cnk8+B/gY4lhwI1KZfFpOP9Cr/bRwKdUGnTfKhzoTmTRJDrs0FSG3i9cmB/mOCjz5O/b9KwzFCpzpv6QpJH1nDMfgu6655AiEzkfqAAkzuw3ya2ZXcRV79SvZsrTJ3ulPIv8NKwRACtD9GzUgu9o1HK9P46vXoylBRlQANt80yfb9fW6D5S/SCnsIx9Y1ozzGVG8n3ptfFK+8gk1pqX90Qy84F/8YgfFdqaUCdwlFsi1pcN+ayZ1sEkpJJ9CGMjCKfoQcpfjKnOxcrYy21c8MbOcHv5XNNMeva2MdBHE9KfwQaUk0ZBs5ZyUa0KpopdaGD0XYEe+isjHsI/Bqj+Wfpt/68TtxjNCKh8/t7tyFY6E8s1AX30bEvn+zyg3byE8MYiIQpavBSnxf3iop5wKKMFnrig57H/c+MjRUDoVz4Zw6iXeVpQ8S4EIHX/YZfee/ah0nWbCNAiQUZf7oECtEyNnxeWxmcxopafhjAjct3G/wm9eJiZXNL1ROcwVV6uJ9TH55PRG9AgGH+z6J/W1Cwjw/sc/Ue90HBUc0PhkM6F2tzxMZylzXaozgk38ZX26Fsx81YSJLgWiRihenGNjmRE82QEcU/s2ehW4J9N3xMnCUyz4i6hu+KstUo9qqT9UKmyywq1LnbRuRTiSj+UUQr5dUUnOSXj0uCBL08dNhE7/OBZvY6Dd5tfrw0xkEOGM2K2cOc7FtMNeJFJdBjd5tqCiMUDag5BmaNdcKrlpbcFjSv/TjOB7w6jc5bVK2x8P2RLaj2ntSsEjD8wny/mgE59TfK4UN7sAtuXRNUcSGLEyXbl2TO6qG55kuEaV0f8FzezIhP+eZyuVSxf8SE8wrhOYqbeFaRap17JUKh7wry1oFOA4uJVz0N2eZPllzSFuyOeERpuQNETtvrJSRLIlb42XeUn7GMpYplN3LYDKdeG/pnjJDtXSP8c/kKrFVUb+Ezdi85S9Z3K9exM+SzD6Ijx1fpsWTiyQUl45ZXHXIEy3nDAS9s4tTXpazmoe/6MNyb7zkIIrw8soF0ifmcwyRu/KTAFkyx4Xq52rJIrkXI+UgsuUpV7ahSFyV/d01rc6m/PUphAkD69pKnfiKBEe4gp6cOBSkDLt1r57OxOwsnUL52llXQ1JZFt7TgUJGMfoI/a791HlDZIU+0OwhRig/L7L25FqqhQHQG5D0frbM3Qq1+sbQGL8aiQNRnNnjK190sRaoMjTP6Ib6Y7Hv949tji4Z7E4ZLKOPy9sanVQlZDFzE508JPnlcz2WPENr8sn97cVox80MpveWWozxQlE0b8MteIjUJy0faaZpTxBuqJCP0Xk8qk8r6aPZPYyveTjTHNIdieNQpnYV2XWEUE8TS/pfUV6t0uNP4dP7WRV6wmpaZyMjDR183TTmY5fz7x8VpsV97zMUgii3sNv4XBQ7BUEzx1kSmgvzg+O3qY+816NNF1utQj61lLye6vjruh9Zbg57F/2oRGExbVoWf3n210fnfHTkzOlcO1jt/Kit9aFBKEOUaLkEoNcHVv7u9zrx9bSuyqrMbcxZnPjzHRrkGO0GFMrQXc06o/Ir8CJ0tlGnS5RAEvbYmhNlZ2dN2Dn9ZjB0TyxSIgKeYcQZfMv4F8r/uobdp2KD79Xy0Qreoi7xpQuK9tBs+pjnNdPNDzeYL7zN4VdzQaa/k9/DLUHticPwvg5X1Zz9qScETS2ZjXfRrZlSO6ve4r8yBFJrKTM6a8zMNQOedo1pONzyqSv+BhaCD3BQrD8wkPJi1MImhFOAYkUWZuVLkfjm3I6L7WVkZdjfyZkI1fUShz2+4AI+nZB0bi5usKLNIOBSs3VTpKpX3hn7p07C3ZaLIvshNdrvMmMzBbR9icMymSrB953idS8IH/8G7mMOkyg386MZJXCbUQR3NcbEeh90SSzNSGdT+YA7eytUNHOZSGEqd9ZVBQ1Q6xLG+OPOQFrEtfqBwKBMUsuRHuM7QYz3B3spQtQRSH8hVgGeWUBUgBUC/JAKNyL09EOyo4oXmvlrt/SaWondnrJH+otS2UA4e78wtEb+JfANGKYYhmQm50xo9jMcfH5kH5onb4nWdZom7E1Tnd9eogbhpdgtka2R44vrpWPfi05haUADoq264iuUjhVFw84iwRXN1+ycpcL678Zx9xlNDNw/ylS0+9nlYUMn7m3rXn+QFqMf/fEtjrqOHMmF/JCv4X6phZ1fS+T8hIg3H0weZlDkBwZy4zhbKR8+MFbsQSa9LEA7R1GPBEM728WVy/KjGmbNXyjXFxFNrlN211hnAgV270+iuguu+moZCGldzCP3GeNcpxoFkQOPwuVpfF7n+rsP9wChEztz6IdEP6I7KpAEwYOYxvWjav3I4epQIHpSZeYlrZBmsB/y1zA5PvBG58qPLWOy97wi8HdvCUWHQXsNa8OKlfbTGzT9rQssPwgoSpcP6v7xjZWG1QdCsZQi92RXXv8ODhDZ4v3k2HzKgITh605mzGAe+q+nC9rURYn+2vv72ts5qehPTzVWXRAvSHJ4uBc4PMuyHqg2NpXDaN1am6l1Rv4KyUDORoMNg30ce+drS4kDoR/yGKuJ5H0gP5ukwecHRHit8rUopMY9jwFyGDmhVlxWo4qbl4l69g+zKeERrLB6XlGIkfB1nGpDliibOIcoO8lTy5bh1EigKemKJdsn8cN4Gb041FP1UVzoQh8JHZMc8etti15ZBRQKK55Ee68TpiM4Sfjp9/GiQe1SsfP19BUJwEuElVwqDS1iBKiqMbq57PzAkQTfq5ZrVjYtReGTVi5WBR2J5lci7VCaluzvcYmZF69n6aH1F+aD1VscqL7HiJe6x8xdMphVR19DY9wWmtHhBfSEih9kA+pFzz1UPu3oefwzg3PKSJ+GRUhO23zOT2/sNsQ1shgvuFRbB3dpXkdgIXYOnJ29KtjKjjIWNeSdSshGYJkH+DI/VKnSya+KF5zy7uui4eXzSbWexu3y3p4XMl3aCQABk2LEbkoR002oCawFwjvQtoplxoEZhsAxXSiGhyg9oE/IJqa6l5FuvmrLBoGv68bLHwfPUXWebNvNoioxmhgMXNz02QzqtfRuyi6lJTWB0sIAVwITVGd4sOGxxpyzBYrYwQvPZYZ3zYp/VseyhORVHs1o1Pn32gxwF4//kQamvDxxSDtBITpGQiN7L8jyxfoaZHV4Dh92djOXmI7RTu7M0xhnpAHpciRUPCplXQBv3yXGjeOevz5gC2kr/T2CZFx/E8ce9/VOP9fbFqmIZe9u2bmfby/Lf9jdM1PjOcd7CtQM6c/0Oo8GMEuosvSD/lSVKRIyqKbaQmtW8Zx9JXxHRyNjtJUVEyHQyi1Lfll9eDVqxDfS3NkA0kUMFFEIzRIrliD3H+qAXnUUkeC37aezZJnyJkmBf6GjPQZYu4AW4qIxpW8Y/d38wUdre9fQC53ZS0yn99iAfXneHzwOnMchmqEyy5jpCyvLynRrCqoJcPLLGE+1TA7xTbfI40S21lcBvBQIe3nTBDQG1uOE2o3RWS5IqMsShgSwDaZgsi7QIwVtFuMTU9T4c4B44vpym4nNsCvOKiCD1xTJPcLvX4c7ufuAJEKF9l/4IzYRxFsyr/nsoL+Oyrs7VC5QHrkrpy9lgWR7czCZs0usTY1/88WLiBwHThMsWVTsj/Md24JGDSqn+FrRQy9bM9b4+YjqYXc6ppzLaN3H/STpx6JASa3aWeUc3j9RNVaSXUCHa6EefQEi+PwDt2366BOfOIMQjLurLt/0g6SqiahNFJFOR81nFNEtTJruDtP3r0EpMdkrvltenUJGu2doQGQFT+JEY/ypC6a3Ih0mxckgcWGNNVCUHAaUozIDNXudpH62gdMo3yXulJMmPY+XvwyLIwuckCtryLGlo8WGs6vMesUaoTEhIGIunZc26k3wQHkct31AzRokBTWNhYZnhGj5GJfH5R/ZjkeluBXNFLbWjhmsFnvzcUtFlYVVaed7c7e9nll/Ru0lYxklzSFB79ut3bW5dWOBczN/cZEGbfoflN4/o0Yb+gyrE7npyYxxJnH45ZLznXV5EGj4xtvJZC+Nyb0R737Iq0RU4Rb2oB3oF7c+QCAdxgc+wsOFBLedxc8WuBbBtNPPjiJtp5yGjLzxZFoHuVh+rXl+BC2DWODQxPRkfz+AJdf3IQnw1IZEiDZW0wKGGCXoRy0KRNn2yXB9HlMerZwYDAJbUgn/kWv91y6I+UTs4uD3os7l8srFMF9MfZU620rdeMfGNehxl5m1VdPUwYi5z2MA9PstwdaB3fmQOX3I3jIF/0T0IIGqMPnVk5Jd61XjSzhSlJbUyF88+RN81vLN41SwfJ8T2YhXLOfXRFJV+hWHFLtGcymYtJhT7uVF3aCqU7o1YdXUqKopuaBmi8RJpCzrEGp4bv4MBFF0TpYaIEl33rUqIEGgjgtXhY9dWKmgbRbPmmejJIR9i2KaSBB9JxOSQ+jtHvHK7Ll6wLnyW1tvUBfHsw6/8EXEj81JmFNHtFakbazAh7xxV0YhEolBmFeznY8P4Shxjv6Nzn0Q/JUc9lSpEYXqlDIGlZ2zV91Gd+yimE3uurH8CGARz54clyhKiWKw8p6hrtq7l/5gIfNhbIVmOMnhXy1tevkwcLe7j4OxK63lXzk/9CY+WNZ+4s/fhAqs/qv9/ZakTm7QaH/Lfwhgdaer0dcLq5iVBscmkXUvhLycK5N92ORusx2FGg6Yeza6fYWPa0hxRFfTThqE8VN88UU7XpaIF36eaMpKluwxufQwOHbxK656/noDJJsCDBl3LtsbP92galt8F8lBdas0TO9HT95RYTWfVxxpkyuDdUwz4uq87Kzaj8Vu2QuVye1zSghvZxOe8RiUYpZH1ItmM/cmJsgSiFrVfw/nef4KiVA+/fHzC/3A9R3Vl1yxbifrwLczuEl5fTecv6KWX3YL0QTB1ET6jPT1LT8JzMREPuC+bbqac0sO0oAgfnJvhUA8iBtDejePTdYp209r16pLFm0mBoj0oWRchrUL+foH4kTfYWIcr5kd6l8p60zg/ePmtgqGsqO4AhJQpN/nJC09lJ+XDNJZ+RJ5UJ/mVdp1VSZlYn4W9mQx4k32N4z0lZw95mZlN5ALWEbYBm6GTI7QmKpix5VcZflGvc0v7r2Q0MePtCmKkm2vs7uuzXwyD6Vg5jyQ9CaAy0EnYA0OZgmTdCXa/QODsthknqRlX/WnvrufXNtB/HtVvnH30l/kS/q4bhDIxVxrXs43L0Ed7iQYUnf1IXj0x8mhKe/b662kB3oWdppnoPY1BOdZlankXeqDKQHxm/ooMltOlZoFK3p6knXn/iFPKk5ynvAGpRKokqpradDxBHhu0tYXeUyous7YUOwEQbgNQZSjCaNAghO0pKvRTb9YCi+MEqR2FivKS6ZYZoCpTP1tzcTyQNv17d/FsbwbTBL5ZWUJTOMku685pHScaA8XXj/LrXvbDG6WmdC/wTlj/kdxcupviCn4L9st7bDQt7o7w0ghQcvg5taIGoeQmK1fI3jj4p6NzN7CEpwZa1nGAd/PUZV9JgbGmQnckt0IQoIuEqD6I1OMd5z17r+7Zb7krIN07iDmUMmtlfq7GQ790jFqdS4Wrt4DAkRf1DvGPyhZdBgd0tXfSPKOQq3boXOJyVwEokU2xbEaM3zaNgvexm5GyvH0t9N0lzYUGe90BacLe347Eol8BBIplVNaz2/rfk8MhcN2ZKzbflDvbRAF0wo/UpCmLe/MWk8qSxB5+mH6ErJSGoEUmZd6SQ0xvtYKr3T9b6D8Zbg7svDT17GkdHoB7u+Z5pOM73n/YmDz8BLkSCNdC/avi18L9kQQrjzJ60YFE+NJbGiYmTYuq/UbSU3pcPQS9zgIntzqoayALTToX0CKJz9tEcmI24rhGYpfzwi21MqGV3QDGQ54q4gadsPakwyTaX5Ets5hvBSGPOsG7rNTHruY84D/bfc6GqH8xWZUcAJzNU1MDOwgEX95EIhwGkmpm07eYjVO3yuF977iZ9HD1nnPqrEF25FZX/51AjSLS4BJY21wVzQpiRQm2qisb1UBYBgJSEBLxW+J6ayBQL/lucByQHfb7S2kG8lLiOxw9tQiYC/EY/MThCNCPbn9fwb0/M15qyL9n59efwnh69lNodEe3JPYeu/M9cIwglVuBD7dlVaIME7HgPjXpMxdv/jgOe+5V5+K79VJKvU3r36Raw2TBqwfQOmnxw7T6jtpCUVVhV9AdKgASYhEs3ATH4ntxA8YRII3eixGjJeRo1tiJDJ3O6U/8fXYTACTOH5dUR8MRvENHP0Jn8RCdrh/2KZeW1X8eUgCRUHD9HeOafg9Ch32kL+jAFJw2ab9UY6ziTZXny+PccjqPfNz9jLlcLJ8DWkLk2e47i9OB6ylyCb4r+kzoSF7hIqWa84hH+iOLrvjPLQdOIggEDJ4np9jQ/AxnE+4Tu8mLh/XH9dCmsvms9yaxHGTErsuEIaewAEtAlO2c1XYz2Uz82Bovd6av6rmHiDy2Q8zTOUugPglSKw9WrnfsUVNz3Wj08alaJ7CMmDkL6WVFuQODTgJxy4t+DHSvQMHmJDznazOO1FZmoxgLUvrt2D2aA6b+eMPuMDQE6iUrF5DHp98TydiC3wrehnNYoBj1WFh01rsA+zrNdZPqvFBDYuBJ956Azd9wweLMvP+skDrup3rp7fMajMe9ZGH8he3WfQqwIIh/VY9WIiv0K9YLQ9tO6DbSNZQkB6cDMrgsbcliD+8NATwSE+nmEyA7fFbNRgErxzYju2EtmbU320OTvF5BhKlQCUXyaPobKxZ89rMN/kneKkZ7HVD34OyxcVWJjEu9q4eViQ/T3vRgKF/i8bPuJdMb4SjliYzUzO3ht4n/IIP5vbvUSfCIfBeDq/y4bD205Rz40fq5qdx3SXH1Na/NWjyXzdLmBfz/cjJ68XwdjyOeZj+JjkeNkjywAXGDtcJ3pQLu4whRpSnrbOct4emNMEIs/Hx76+wsHF/6J14WKfYyOyoxJIQy8lS8TY8PMTfs0726TYcsraJHfKCwnzarwVdt+vz8P4SfCYGY7rzjjaz4nVXU1wfcnPYXSZHuQQ96aCXgpyG6qLFUPKNCRxR2+H7xYpwmrpFEeYuOHt7a63RKhNUGdbqru7LxGSfkJ2jU6ipdabiWKQWfojgFl6gq4729xVpK4Y8nZGUwlEenRrkoblO1mvEVumSwUQ0MaFp56cUNqfyalEE2s8KTwvYl6bKtC/kBHDOITHYH6Yz6Wg5N9NJPcpnHtjBUh2BEyobkxbq+jsRkzErK95bbqq0/CEKEiMdHM9v+u/Ym65sLclVqnGfOrDUB3Woq27Nc5+65si/sEdKrtS9H5MzPRTKW+McAk7Nnn/FyzzOwi3cbd9Kzp7u75hupHKpRDVoaCZ9CbgKWv7u7qZG+pDhlEWYY8ccBSjp98yu1E1WEfsr9vSioJBayvkfX//SSfm3gcCVRV271XYeux1dakW9WC81wsn0k7GHDXtNdJ6ljn6MQ+xh6fYzedMy/qetg4gFxjqA0RHiIWLQlmVSJxnV/je7nCh17WP7P6vWoO4Pxfvqhg6RK2C9eiSBpNQRsA5Q1PZN4TZMS/KKXBhFFxV9LdMqKiDqwutIUdSYdVVLlUXCzKut6/Eb98vcjq2WfD8jxbQGTGcfy3KHZhjjQdb4ke2mw60z+AYHfQDrScejt7KVsPnkh3OFX/7d8mO3QZ0bwQHU7H/i2UEkcEOmE2zo+0m+qH0PYg+9a1yEoPMSd5aUJ/O6qRCUZz6SXXlwHM1B1dVXB/yWQ447sDb6HOfggfEnR6bBqDCQ42eiDK1ICW3w+5Ls6JRdn+ggJHaCoMrp+sqe5PXSkt3Z9d/NuaYTkPfQleOVNITdYpqvRPMFOjKAv2oPiPN04j3i9S6SpxzH2mCw5Ldfj6jJXq+02TQZe4TazYu+JvK+OGZ8ZKnMuALyyRyF4JAdhCEM4gw66ajqHhbl5vwrM/zrAvU8C2Hnd2V4aon944fnqZQiFBwbOSuqqsue8ZiUbWsuMyz3uiCKU7ZBmGowLkoCIwlExhFCxTWl0YwbIYKdEDZag8nk04RHLuu+aHej5yUUDXdjARtNKGrPZP6dckmxBnoR9N8kBUvD1KBkmPFZxrTdEZt9ogrrNRlSfX11KsxcUU6QHWO+8Aw+upE20bRyGGaewQHCfCU2lGUB4k4r0DL2gv69IF5cNrqVaRvCRI1HUPyvEs2GpAsjV5vfx7kgD1tMWCEiejU5XMtHl208tWUVpvfHKvs9jMPA/SlB455Q0Qwxxqu8M8zpVgK8BbWagHgVQgby9uxuPXhIosLzbmT3hWCewV4NVL9IiKQyHn2Wjods/erJuFtEx/rMbo5+0hmvl9S3zkosIEYm/tmNqBnLo0No7D2s8p2sOgfBFaCMG0aCZ+ZMMdypAj1MPJjQqtS/hh3k5eHszGmPNwLi23ytL6s7qCp/uR1VvzyhsWCOG6XJeH9mBPm8/Q8pQN9ya4knESbygZKd4ARxWxf/XSieI6thG2hV5HNzAuk+pc7NOW+TxajR/jED1SDA8fFzpztO+mO6+Z/Z2UZK7vn3sNtSQCXB30GJQkn9SThzJDDqTLNaj7aj9XPSI1HxWDOxdgX0Olsj/0wnX45xMF8Sb+kgvCsAI3PWRf2scYimY6wWyqwUxuqyXTyAYAErzWojJr2fwjCJoqW8gkmVyfVIsZ0aTZagsi9QUYz6/nB4RkRXJ184G2xYW7Y3zMpyTWv0mozl2kYS21YdOpZEUXSiGFlUVmWD6G8JNoA+n8e+9woFiacMC/Pp8bvPIlUGJRrI6DI/8LBcg6Tdu22FsqgJKuaO4sN+b8X+KMwIZxCxY0i0x7XarObMKzJeJ6k0i2XeGFZFVUctEVopDMyCqLsXF4A1oxK5N+dJWN9JgJn8Hgp1LrAx7kJnPfOl0f8iy2QFNEPSEzy7vnQcADa6IxTAvn14yAQ4CfTZDpKEuiXldkrGzuLzvaCPFpGBurSpYTg1LGH+x0aG9ptXzfz7zGu7LN9rUOrbVGs6BUlCl/V8k2ZaH4qJ77Tr1BiAqqe+ob5DRjSPArkGCVLp9jSBZNTEsiAqoF3WiPOzbzLK4J+SGmGjTaZzCzrus53vfdscwwVeJmwXOCCb2M8WPa3jZ/5rRHQHboA1HqUOS4+lALBe0TnmvmtayQ6Lh9K5moe3VnatkW8qyOJYM6xH7WUxa2OvY9hKX0doStwRnF1UldFo3AEo7Yf8Cyf8IvIvATzl+KJ9xeIMePrrfU6kR/B+0K7OKUMbEtZqDR91qAyt28HH0or+BftXp71alrzZyCCC60fTXK4+u9N8GPsuS1RKzMAeAQW5iNnBsweDOAIi2NFUGjMg3OZzL6nnis0QT9YcjPTgx5K6QiMXwxGuJ95Sw0IjWwAIFyWh87+m4JFVL/aL3ckwdXT6vm5YgdQtfnwSJ4WGUqtImbUVLTXa/l+96PWXqj7xhcaXzMjmY+kwD/2zvNQX2Ucau0kNAm7o50DE8VcH5KHvkQv+4b1UkJGMm5i/zLjHe73Gy5jPuQFRqu4umdDepJlTGV7kfsvYO33r5tYl6bFE0XYtJ0IEvBON9FXujl/yjLa9o2m9RQ96R6K1ntGzOQ1iiMtOD8BbHm1Xe7fxNB64++4Onwu63brlOZy5fYPgxc/58Lb7rwGwRHyeJIlPBe+j6wPk2OmSO+MZ84sneSgbEh+8fCemWVLi8T+mBr8GKyh/OCVpYp8ttGkLo9olew3SPPE4/11Olj6OAV2FXScBo9C8qrz8ilDqBOSXJjcDFvh9kejFw8/t5wRRyUGem7Y2ZtiHK2n+ZzzKD+Esx6DrFewDbnEN9wJ/QZLk72IBeM4nONNdtWcRwFaOYowVBytrhiHMKAr202o1tH+jBD5tYXDhmLJ/MSdmzcyv+BVAz4/7e2nwA36rCWthYgvdzlbaOGtWjgRNsLxrtumnBW0LWXTUHOPD/9KL/a7USbHLcPCwcQgdmNr7frl+YDkazsWKAnrg/h/pIIxi2L/R9H/eQRhGsP93TeXJ/0f9o7n31UALaAQSpnwuyu3/N5P+Owr0vxHwf91AmqL/DaX/L/WQpv7rHtL/83P/r1bm323Fv1/1/4ag0N/Xf7ZF/yfWkML+Qwt2mID+wxqi6H9cQBT5315AwERncNb/x9+kdwka4z3T4F/8dw==</diagram></mxfile>
2201.13100/main_diagram/main_diagram.pdf ADDED
Binary file (56.6 kB). View file
 
2201.13100/paper_text/intro_method.md ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The goal of Masked image modeling (MIM) is to learn image representations, in a self-supervised fashion, by occluding parts of the input images. MIM is inspired by significant advances in natural language modelling such as BERT [\(Devlin et al.,](#page-9-0) [2019\)](#page-9-0), where the model is trained to fill-in words randomly removed from a sentence [\(Fig. 1,](#page-0-0) top row). Recent work, including MAE [\(He et al.,](#page-10-0) [2021\)](#page-10-0)
4
+
5
+ *Proceedings of the* 39 th *International Conference on Machine Learning*, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).
6
+
7
+ <span id="page-0-0"></span>![](_page_0_Figure_9.jpeg)
8
+
9
+ Figure 1. Self-supervised language, and vision, models learn representations by imputing data removed by masking. *BERT*: random word masks; *Context encoder*: random, fix-shaped mask; *BEiT*: random 'blockwise' masking; *MAE*: randomly mask out 75% of the image; *ADIOS*: multiple masks (N=3) generated by an adversarially trained masking model, post-processed with fully connected conditional random fields [\(Krähenbühl & Koltun,](#page-10-1) [2011\)](#page-10-1).
10
+
11
+ and BEiT [\(Bao et al.,](#page-9-1) [2021\)](#page-9-1), show that these gains are at least partially transferable to vision. The task of a MIM model is therefore similar to BERT, e.g., given an image of a bird in [Fig. 1,](#page-0-0) it needs to reason about what the bird might be sitting on or what colour the bird's belly is given the visible context (bottom row). However, while missing words describe *whole* semantic entities (e.g. "head"), the masks used for context encoder [\(Pathak et al.](#page-10-2) [\(2016\)](#page-10-2), which pioneered MIM), BEiT and MAE typically have no such constraint [\(Fig. 1](#page-0-0) bottom, left to right). Imputation under such schemes is conceptually simpler, as random masking only *partially* obscures meaningful visual entities, which allows easier inference of missing values by leveraging strong correlations at the local-pixel level[1](#page-0-1) .
12
+
13
+ To narrow the gap between pixel masking and word masking, we posit that one needs to occlude *whole entities in the image*. This encourages the model to perform imputation by complex semantic reasoning using the unmasked context (e.g. given a bird with a yellow body, it is likely to have a yellow head) rather than leveraging simple local correlations, which can benefit representation learning. Interestingly, [He et al.](#page-10-0) [\(2021\)](#page-10-0) are motivated by similar hypothesis and propose to occlude a large fraction (up to 75%) of the image, removing complete entities by a higher chance, which they find is essential for good performance. Here, we
14
+
15
+ <sup>1</sup>University of Oxford <sup>2</sup>The University of Edinburgh & The Alan Turing Institute <sup>3</sup>DeepMind. Correspondence to: Yuge Shi <yshi@robots.ox.ac.uk>, Adam Kosiorek <adamrk@deepmind.com>.
16
+
17
+ <span id="page-0-1"></span><sup>1</sup>Akin to randomly masking letters in a sentence for NLP.
18
+
19
+ suggest that it is actually *what is masked*, not so much *how much is masked*, that is crucial for effective self-supervised representation learning.
20
+
21
+ To this end, we investigate learning to mask with an adversarial objective, where an occlusion model is asked to make reasoning about missing parts of the scene more difficult. This novel representation-learning algorithm, called Adversarial Inference-Occlusion Self-supervision (ADIOS), can identify and mask out regions of correlated pixels within an image [\(Fig. 1,](#page-0-0) bottom right), which brings it closer to the word-masking regime in natural language. And as we shall see in Section [3,](#page-3-0) it consistently improves performance of state-of-the-art self-supervised learning (SSL) algorithms.
22
+
23
+ Some MIM methods employ a generative component for representation learning, by learning to reconstruct the masked image. However, it has been shown [\(Bao et al.,](#page-9-1) [2021;](#page-9-1) [Ramesh et al.,](#page-10-3) [2021\)](#page-10-3) that pixel-level reconstruction tasks waste modelling capacity on high-frequency details over low-frequency structure, leading to subpar performance. We hence frame ADIOS as an encoder-only framework that minimises the distance between the *representations* of the original image and the masked image. The occlusion model, which is trained adversarially to the encoder, tries to minimises this same distance. We further discuss in Section [2.1](#page-1-0) that, compared to the generative setup, the encoder-only setup optimises a functionally superior objective for representation learning. Note that the encoder objective is compatible with many recent augmentationbased Siamese self-supervised learning (SSL; [Chen et al.](#page-9-2) [\(2020\)](#page-9-2); [Chen & He](#page-9-3) [\(2021\)](#page-9-3)) methods. We show that ADIOS consistently improves performance of these SSL objectives, showcasing the generality of our approach.
24
+
25
+ Our main contributions are as follows,
26
+
27
+ - 1. A novel adversarial Siamese-style MIM framework, that unlike other MIM methods is not limited to using ViT as backbone—advantageous given recent discoveries of modernised-convnet superiority over ViTs [\(Liu et al.,](#page-10-4) [2022;](#page-10-4) [Touvron et al.,](#page-11-1) [2021\)](#page-11-1);
28
+ - 2. Qualitative and quantitative analyses showing that masks generated by ADIOS are semantically meaningful;
29
+ - 3. Analysis of how different masking schemes affect representation-learning performance of SSL models. We find models trained with ADIOS and ground-truth object masks significantly outperform other masking schemes/no mask, demonstrating the efficacy of semantically meaningful masks for representation learning.
30
+
31
+ # Method
32
+
33
+ Set up ADIOS consists of two components, inference model I and occlusion model M (see [Fig. 2\)](#page-1-0). Given an RGB image x, the occlusion model produces an imagesized mask m = M(x) with values in [0, 1]. The inference model I takes original image x and an occluded image x<sup>m</sup> = x
34
+ m (
35
+ is the Hadamard product) as inputs, generating representations for both, which we denote as z and z <sup>m</sup>. The two models are learnt by solving for
36
+
37
+ $$\mathcal{I}^{\star}, \mathcal{M}^{\star} = \arg\min_{\mathcal{I}} \max_{\mathcal{M}} \mathcal{L}(x; \mathcal{I}, \mathcal{M}).$$
38
+ (1)
39
+
40
+ We will now discuss different choices for I and M.
41
+
42
+ As discussed in Section [1,](#page-0-2) the inference model should minimise some distance between the original and masked images. Here, we discuss potential forms of this objective, arriving at our final framework us-
43
+
44
+ <span id="page-1-3"></span><span id="page-1-0"></span>![](_page_1_Picture_15.jpeg)
45
+
46
+ Figure 2. ADIOS Architecture.
47
+
48
+ ing augmentation-based SSL methods.
49
+
50
+ Distance in pixel space One option would be to inpaint the masked image with the inference model, and train I by minimising the distance between the inpainted image and the original image in pixel space. More specifically, we can define I as an auto-encoder consist-
51
+
52
+ <span id="page-1-2"></span><span id="page-1-1"></span>![](_page_1_Picture_19.jpeg)
53
+
54
+ Figure 3. Inpainting. ing of an encoder and decoder, which takes the masked image x <sup>m</sup> as input and produces inpainted image xˆ (see [Fig. 3\)](#page-1-1). The model can be trained using the following reconstruction loss
55
+
56
+ $$\mathcal{L}_{AE}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}) = \mathcal{D}(\boldsymbol{x}, \hat{\boldsymbol{x}}) = \mathcal{D}(\boldsymbol{x}, \mathcal{I}(\boldsymbol{x} \odot \mathcal{M}(\boldsymbol{x}))),$$
57
+ (2)
58
+
59
+ where D denotes some distance metric defined in pixel space. Minimising [\(2\)](#page-1-2) encourages the auto-encoder to impute the missing part of the image as accurately as possible. LAE can then be used in [\(1\)](#page-1-3) to train the inference-occlusion model.
60
+
61
+ Distance in representation space An interesting question for auto-encoding I is: where does the imputation happen? Multiple hypotheses exist: <sup>1</sup> *Encoder only:* The encoder q completely recovers the missing information in the masked image, and in the ideal case, q(M(x)) = q(x); the decoder faithfully reconstructs these representations. <sup>2</sup> *Decoder only:* The encoder faithfully extracts all information from the masked image, and the decoder reasons to inpaint missing pixel from the representations; <sup>3</sup> *Both:* The encoder and decoder both inpaint parts of the image.
62
+
63
+ Given these scenarios, <sup>1</sup> is clearly best suited for representation learning, as it requires the encoder to reason about the missing parts based on observed context, beyond just extracting image features. With representation learning, rather
64
+
65
+ than inpainting, being our end goal, the key challenge lies in designing an objective targetting scenario (1), such that we learn the most expressive version of encoder q.
66
+
67
+ A key feature in (1) is that when the encoder recovers all information of the original image, $q(x^m) = q(x)$ , the features extracted from the partially observed image $x^m$ should in principle be the same as those extracted from the unmasked image x. We thus propose an inference model $\mathcal{I}$ consisting of only the encoder, which extracts representation $z = \mathcal{I}(x), z \in \mathbb{R}^d$ . Our objective can thus be written as
68
+
69
+ $$\mathcal{L}_{\text{ENC}}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}) = \mathcal{D}(\boldsymbol{z}, \boldsymbol{z}^{m}) = \mathcal{D}(\mathcal{I}(\boldsymbol{x}), \mathcal{I}(\boldsymbol{x} \odot \mathcal{M}(\boldsymbol{x})))$$
70
+ (3)
71
+
72
+ where $\mathcal{D}$ is some distance metric defined in $\mathbb{R}^d$ . Not only does $\mathcal{L}_{ENC}$ encourages the learning of more expressive encoder that can infer missing information, optimising this objective also does not involve generative component p, which is redundant for representation learning.
73
+
74
+ framework
75
+
76
+ (Bromley et al., 1993). However, the objective can be trivially minimised when the representations for all
77
+
78
+ <span id="page-2-1"></span>![](_page_2_Picture_7.jpeg)
79
+
80
+ Figure 4. Left: SimCLR. Right: Sim-CLR + ADIOS.
81
+
82
+ inputs "collapse" to a constant. This phenomenon, known as latent collapse, has been addressed in many ways in augmentation-based SSL.
83
+
84
+ Let us take SimCLR (Chen et al., 2020) as an example (see Fig. 4, left); given a minibatch of M input images $x = \{x_i\}_{i=1}^M$ , two sets of random augmentations A and B are applied to each image in x, yielding $x^A$ and $x^B$ . The same encoding function $\mathcal{I}$ is used to extract representations from both sets of augmented views, yielding $z^A = \mathcal{I}(x^A)$ and $z^B = \mathcal{I}(x^B)$ . The objective of SimCLR is defined as
85
+
86
+ $$\mathcal{L}_{\text{SimCLR}}(\boldsymbol{x}; \mathcal{I}) = \log \frac{\exp(\mathcal{D}(\boldsymbol{z}_i^A, \boldsymbol{z}_i^B))}{\sum_{i \neq j} \exp(\mathcal{D}(\boldsymbol{z}_i^A, \boldsymbol{z}_j^B))}, \quad (4)$$
87
+
88
+ where $\mathcal{D}$ denotes the negative cosine similarity<sup>2</sup>. Intuitively, the objective minimises the distance between representations of the two augmented views of the same image (i.e. $z_i^A, z_i^B$ ), while repulsing the representations of different images (i.e. $z_i^A, z_j^B$ ). This effectively prevents the representations of different images from collapsing to the same constant, while optimising an objective similar to (3).
89
+
90
+ We can use the SimCLR objective for our model by masking one of the augmented images and then follow the exact same pipeline (see Fig. 4, right). More specifically, we replace $z^A$ by $z^{A,m} = \mathcal{I}(x^A \odot m^A)$ , where $m^A = \mathcal{M}(x^A)$ is a mask generated by the occlusion model given $x^A$ . Following (4), we can write the SimCLR-ADIOS objective as
91
+
92
+ <span id="page-2-4"></span>
93
+ $$\mathcal{L}_{\text{SimCLR}}^{\text{ADIOS}}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}) = \log \frac{\exp(\mathcal{D}(\boldsymbol{z}_{i}^{A, \boldsymbol{m}}, \boldsymbol{z}_{i}^{B}))}{\sum_{i \neq j} \exp(\mathcal{D}(\boldsymbol{z}_{i}^{A, \boldsymbol{m}}, \boldsymbol{z}_{j}^{B}))}$$
94
+
95
+ $$= \log \frac{\exp(\mathcal{D}(\mathcal{I}(\boldsymbol{x}_{i}^{A} \odot \mathcal{M}(\boldsymbol{x}_{i}^{A})), \mathcal{I}(\boldsymbol{x}_{i}^{B})))}{\sum_{i \neq j} \exp(\mathcal{D}(\mathcal{I}(\boldsymbol{x}_{i}^{A} \odot \mathcal{M}(\boldsymbol{x}_{i}^{A})), \mathcal{I}(\boldsymbol{x}_{j}^{B})))}. \quad (5)$$
96
+
97
+ <span id="page-2-0"></span>Again, we can use (5) in (1) to train the inference-occlusion model. Crucially, any SSL method that compares two augmented image views includes a term like (3), and can be plugged in to our framework. We conduct experiments using SimCLR, BYOL (Grill et al., 2020), and SimSiam (Chen & He, 2021) objectives and show significant improvement on downstream task performance with each method. Refer to Appendix A for the ADIOS objective used for BYOL and SimSiam, as well as more details on the SimCLR objective.
98
+
99
+ For simplicity, we only consider the single-mask-generating case in the discussion above. In practice, since an image typically contains multiple components, we generate N > 1masks to challenge the model to reason about relations between different components—empirical performance confirm benefits of doing so.
100
+
101
+ There are many parametric forms $\mathcal{M}$ could employ. For instance, one could consider generating multiple masks sequentially in an auto-regressive manner as seen in Engelcke et al. (2021). However, we find that the simplest setup suffices, where $\mathcal{M}: \mathbb{R}^{c \times w \times h} \mapsto \mathbb{R}^{N \times w \times h}$ consists of a learnable neural network and a pixelwise softmax layer $\sigma$ applied across N masks to ensure that the sum of a given pixel across all the masks equals 1. We use U-Net as the backbone of our occlusion model—see Appendix B for more details. Note that we experimented with binarising the masks during training, but found that this did not yield improvements, and hence used real-valued masks directly.
102
+
103
+ we present ADIOS in its complete This includes the N-mask occlusion model, which generates masks $\{\boldsymbol{m}^{(n)}\}_{n=1}^N$ from the RGB image x. The
104
+
105
+ ![](_page_2_Figure_23.jpeg)
106
+
107
+ Figure 5. ADIOS, N > 1.
108
+
109
+ inference model computes a loss $\mathcal{L}^{(n)}(x;\mathcal{I},\mathcal{M})$ for each $m^{(n)}$ and the final loss is computed by averaging across N
110
+
111
+ <span id="page-2-2"></span><sup>&</sup>lt;sup>2</sup>We omit the SimCLR temperature parameter for simplicity.
112
+
113
+ masks
114
+
115
+ $$\mathcal{I}^{\star}, \mathcal{M}^{\star} = \arg\min_{\mathcal{I}} \max_{\mathcal{M}} \frac{1}{N} \sum_{n=1}^{N} \mathcal{L}^{(n)}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}).$$
116
+ (6)
117
+
118
+ **Sparsity penalty** A trivial solution exists for this objective (6), where, for masks $\{m^{(1)},\ldots,m^{(N)}\}$ , some mask $m^{(n)}$ occludes everything, with the other $\{N\}_{\backslash n}$ masks not occluding anything. To avoid such degenerate solutions, we introduce a sparsity penalty $p_n$ in the form of $1/\sin(\cdot)$ that discourages the occlusion model from generations.
119
+
120
+ <span id="page-3-2"></span>![](_page_3_Picture_4.jpeg)
121
+
122
+ Figure 6. Penalty.
123
+
124
+ ating all-one or all-zero masks; specifically,
125
+
126
+ $$p_n = \sin\left(\frac{\pi}{hw}\sum_{i=1}^h \sum_{j=1}^w m_{ij}^{(n)}\right)^{-1}$$
127
+ (7)
128
+
129
+ Note that, $p_n$ goes to infinity as $m^{(n)}$ approaches all-one or all-zero (see Fig. 6). Minimising $p_n$ with respect to $\mathcal{M}$ encourages the occlusion model to generate semantically meaningful mask, while avoiding degenerate solutions.
130
+
131
+ **Final objective** Let $\lambda$ the scaling of the penalty term. Our complete objective reads as
132
+
133
+ $$\mathcal{I}^{\star}, \mathcal{M}^{\star} = \arg\min_{\mathcal{I}} \max_{\mathcal{M}} \frac{1}{N} \sum_{n=1}^{N} \left( \mathcal{L}^{(n)}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}) - \lambda p_n \right).$$
134
+ (8)
135
+
136
+ **Lightweight ADIOS** Despite its strong empirical performance, we note that the training objective in (8) requires N forward passes, which can be computationally expensive as we increase N for more complex data. We therefore develop a lightweight version of ADIOS, where we randomly sample one from the N generated masks to be applied to the input image. Doing so disassociates the computational cost of the model from the number of generated masks, and the only cost increase comes from applying the mask generation model once, which is inexpensive (10% the size of ResNet18). We name this single-forward pass version of our model ADIOS-s, and write the objective as
137
+
138
+ $$\mathcal{I}^{\star}, \mathcal{M}^{\star} = \arg\min_{\mathcal{I}} \max_{\mathcal{M}} \left( \mathcal{L}^{(k)}(\boldsymbol{x}; \mathcal{I}, \mathcal{M}) - \lambda \frac{1}{N} \sum_{n=1}^{N} p_n \right),$$
139
+ where $k \sim \text{Uniform}(\{1, 2..., N\})$ . (9)
2203.11131/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-09-27T07:55:53.007Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/89.0.4389.128 Electron/12.0.7 Safari/537.36" version="14.6.13" etag="1ldCXqmGdWEpgXHCbdtH" type="device"><diagram id="3RX6gxh2AAn5v3rtvkH4">3VlLl5owGP01brrwJEFeS+ujs6jn9IyLTpcpREkHCSfEEfvrm0giUNBxHBVn3Egu+fAj936PxJ41WuXfOE6jGQtJ3EMgzHvWuIcQBGggvxSyLRDbRwWw5DQsIFACc/qXGEuNrmlIMo0VkGAsFjStgwFLEhKIGoY5Z5v6tAWLwxqQ4iWpuaGAeYBj0pj2k4YiKlAPuSX+QOgyMr8MHb+4s8Jmsn5wFuGQbSqQNelZI86YKK5W+YjEavHq6zI9cHfvGCeJOMVAr3smtubdSChfVQ8ZFxFbsgTHkxL9ytk6CYl6AJCjcs53xlIJQgn+IUJsNW94LZiEIrGK9V2SU/GkzdX1L3Xdd209HOeVe+OtGSSCb5+qg8LMNsPSbDcydsULqreqLVHG1jzQkKUlhPmSGMrc5krCPT9S2IStiPwZOYWTGAv6Un881gpb7udp0yHneFuZkDKaiKzy5B8KkBN0sPie9k6HCnRQndBX5iMHHp0P4cA9ZiAvCpfNqPLuJbRTVbvCtDsvOF7rJZo/KqPHybShvbqyNhEVZJ7iHU0bmUnqKlrQOB6xmPGdrYURAEiJIROcPZPKHWcK5EdZsERU8MXus5fIC+GC5L1DcXSAfbPs/qBOkxHQpswP0AR9VMkNDjgsmAoHR5d40GUQwwsE8ftD2LlNCJ/KiN0Q/YwmdKXWEoExXSyI9En63mXy7SO7Qh08kbcy357CXIizaOcs3AUm5mKoyq8EghhnGQ0MPKWxcfBMtu3u2HYabD9s04smt9ADwLXaktvQBmBw6+TmW7dLbu79JLfOGhTT99YEb3UneK/bxHX9vHUuJx2WHP9+OAFXiZNaLTmXIL87gow/1aZAPkPWwAZxcSx3suT1KoGztNjeLmiulub/suEA7EtNtpQNNHada/fEja0FHKBG3fBayoZ3gbIBYXNZu6ob6HjhOLPhASfzcXkpd3pscIGifJ0zgy4ZadlmB4w3dxglC/DNXSj8jSFBbekEAGcynF45nSDHqacTu5lOrtaGwk432ZWN9RuaHljVPHxF828ur/ZtIuCsUzMIPFjTiqW1c/AYDA2c9xmYl7/UuRlsniHIXeWXIwH90beVyLdvGM9Ot/H83nC+zL7SbYnhDg9SjD8VzQ9DqbEMc4rVxKEQOHgmvMndJ22Z0S1bZq+x+jOct51ajpSw+su+/JrNJx8ukO7jENPsRe/lUMf4czD4Pn1DecV+Ug7LP3WLbqD8a9ya/AM=</diagram></mxfile>
2203.11131/main_diagram/main_diagram.pdf ADDED
Binary file (21.6 kB). View file
 
2203.11131/paper_text/intro_method.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The field of evaluation metrics for Natural Language Generation (NLG) is currently in a deep crisis: While multiple high-quality evaluation metrics (Zhao et al. 2019; Zhang et al. 2020; Rei et al. 2020; Sellam, Das, and Parikh 2020; Yuan, Neubig, and Liu 2021) have been developed in the last few years, the Natural Language Processing (NLP) community seems reluctant to adopt them to assess NLG systems (Marie, Fujita, and Rubino 2021; Gehrmann, Clark, and Sellam 2022). In fact, the empirical investigation of Marie, Fujita, and Rubino (2021) shows that the vast majority of machine translation (MT) papers (exclusively) relies on surface-level evaluation metrics like BLEU and ROUGE (Papineni et al. 2002; Lin 2004) for evaluation, which were invented two decades ago, and the situation has allegedly even worsened recently. These surface-level metrics cannot measure semantic similarity of their inputs and are thus fundamentally flawed, particularly when it comes to assessing the quality of recent state-of-theart NLG systems (Peyrard 2019), calling the credibility of a whole scientific field in question.
4
+
5
+ We argue that the potential reasons for this neglect of recent high-quality metrics include: (i) non-enforcement by reviewers; (ii) easier comparison to previous research, e.g., by copying BLEU-based results from tables of related work (potentially a pitfall in itself); (iii) computational inefficiency to run expensive new metrics at large scale; (iv) lack of trust in and transparency of high-quality black box metrics.
6
+
7
+ In this work, we concern ourselves with the last named reason, explainability. In recent years, explainability in Artificial Intelligence (AI) has been developed and studied extensively due to several needs (Samek, Wiegand, and M¨uller 2018; Vaughan and Wallach 2020). For
8
+
9
+ users of the AI systems, explanations help them make more informed decisions (especially in high-stake domains) (Sachan et al. 2020; Lertvittayakumjorn et al. 2021), better understand and hence gain trust of the AI systems (Pu and Chen 2006; Toreini et al. 2020), and even learn from the AI systems to accomplish the tasks more successfully (Mac Aodha et al. 2018; Lai, Liu, and Tan 2020). For AI system designers and developers, explanations allow them identify the problems and weaknesses of the system (Krause, Perer, and Ng 2016; Han, Wallace, and Tsvetkov 2020), calibrate the confidence of the system (Zhang, Liao, and Bellamy 2020), and improve the system accordingly (Kulesza et al. 2015; Lertvittayakumjorn and Toni 2021).
10
+
11
+ Explanability is particularly desirable for evaluation metrics. Sai, Mohankumar, and Khapra (2020) suggest that explainable NLG metrics should focus on providing more information than just a single score (such as fluency or adequacy). Celikyilmaz, Clark, and Gao (2020) stress the need for explainable evaluation metrics to spot system quality issues and to achieve a higher trust in the evaluation of NLG systems. Explanations indeed play a vital role in building trust for new evaluation metrics.1 For instance, if the explanations for the scores align well with human reasoning, the metric will likely be better accepted by the research community. By contrast, if the explanations are counter-intuitive, users and developers will lower their trust and be alerted to take additional actions, such as trying to improve the metrics using insights from the explanations or looking for alternative metrics that are more trustworthy. Furthermore, explainable metrics can used for other purposes: e.g., when a metric produces a low score for a given input, the highlighted words (a widely used method for explanation, see Section 6) in the input are natural candidates for manual post-editing.
12
+
13
+ This concept paper aims at providing a systematic overview over the existing efforts in explainable NLG evaluation metrics and at an outlook for promising future research directions. We first provide backgrounds on evaluation metrics (Section 2) and explainability (Section 3), and discuss the goals and properties of explainable evaluation metrics (Section 4). Then we review and discuss existing datasets (Section 5) and methods (Section 6) for explainable metrics, covering both local and global methods for providing explanations; see Figure 1 for an overview. To better understand the properties (e.g. faithfulness and plausibility) of different explainable evaluation metrics, we test and compare multiple representative methods under three newlyproposed experimental setups and present the results in Section 7. Our focus in this context is particularly on the relation between explainability and adversarial techniques, where we examine whether the latter can automatically identify limitations of existing evaluation metrics, thereby promoting a better understanding of their weaknesses. At last, we discuss promising ideas for future work (Section 8) and conclude the paper (Section 9).
14
+
15
+ We mainly focus on the explainable evaluation metrics for MT in this work, as it is one of the most representative NLG tasks. Most of our observations and discussions can easily be adapted to other NLG tasks, however. Source code for our experiments are publicly available at <https://github.com/Gringham/explainable-metrics-machine-translation>.
16
+
17
+ <sup>1</sup>As one illustrating example, we mention the recent paper of Moosavi et al. (2021) (also personal communication with the authors). In the paper, the authors express their distrust for the metric BERTScore applied to a novel text generation task. In particular, they point out that the BERTScore of a non-sensical output is 0.74, which could be taken as an unreasonably high value. While BERTScore may indeed be unsuitable for their novel task, the score of 0.74 is meaningless here, as evaluation metrics may have arbitrary ranges. In fact, BERTScore typically has a particularly narrow range, so a score of 0.74 even for bad output may not be surprising. Explainability techniques would be very helpful in preventing such misunderstandings.
18
+
19
+ ![](_page_2_Picture_10.jpeg)
20
+
21
+ Figure 1: Current explanations for NLG evaluation metrics fall into three categories. Explanation through error highlights explains a metric score by highlighting word-level errors in a translation. Explanation through adversarial samples checks whether the metrics perform reasonably on adversarial samples. Explanation through disentanglement splits the metric score into different components.
2203.15205/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-03-22T00:42:37.196Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36" version="15.8.6" etag="tajRhvIgfVjiOi4jU7AO" type="device"><diagram id="0OgLPGUI8zAxwZYgXrfP">7V1rd6o80/41Xet5PmwWEI4fPbTWPuruydr6DYEqiuKreOLXvxMOgoCVKlbcjfe9VyGEMJlcmSSTycwNqozXtZkyHTQtTTdvWFpb36DqDcsyNCvDH5yy8VL+sJzgpfRnhubnChNeDEcPXvVTF4amz3cy2pZl2sZ0N1G1JhNdtXfSlNnMWu1m+7TM3a9Olb7/RTpMeFEVU09k6xiaPQiog1/45F43+gM78WisBPn9hPlA0axVJAnd3qDKzLJs72q8rugmZmDAGu+9uz1Pt7TN9Imd5QXWJ8PeBNXTNaitf2vN7IHVtyaKeRumlmfWYqLpuAAa7sI8DcuaQiIDiUPdtjd+0ykL24KkgT02/af62rDfI9cfuCiK9++qa79k92YT3Ezs2eY9zIhvP6LPwtfcu+C9T2ti+4QwnH9fsUxrBgma/qksTOBTeW4rM7uEwQHJE2uiB2l3hmluy9WCHKqpzOeG6iX6WRj3lZk10oPib1hEuz944rEZ83anoebWYqbG8A2f7esBdPhkgzJbmEAf062xDtWFLDPdVGxjuVu+4mO9v80XYgEufDikQwN5RSwVc+EXmsDKUp/ZBvSMhtLTzUdrbtiGNYFHY0PTXKgEGUqm0d95EEEDdIEpLs8GeaE7Fv58earPDCBYn0XTH8PE8mpg2PrLVHGZt4IcuCCvmWmK5XhGQAKLWIYTaJlB8FCZqQEMXFiYxvTNJ8CMUa8Cq92PKD7V24SZZSt+pj+yWwy0faS5NUWXPtU0IAiqpPc+d/HIcltgYD7p65t9fXhPk4cyFVEcL29/kleCL2AZJHIUL9Hbn9+Oq4jwYmhK8JMHEcnFSZSQA5K4SwiZdEYnemCePct/9dEyoMRI8/D0boNwwXAQFOJ1ef+9qLSOFSVLh0ryBEqiJLeFtlXK1Gh8SvcXPGE5VSY7zSn83wKPVy7L/3i9sAQZWG66Dh/CVR//bU81xdbhThnjTjvpzafeU9PLdecV72UOko14Qi9I+NxStegdT1TpK3oWvfjXITUtLUHlaZwCRMOMwZgYY5wxL4Y1cmHYaxEYFOeJMtEgb1NZF5FjhYBUZp7E5DUMTnZs1N4Z4fwpU3Q49JMSY+jeKUHakL47Cpxl7ERxkSpwFEoZIkU6OT4yLEMJ4ukDpHAmWVufTBd4+HiDFRMWJmXFVgff7xfHfv4/ia70/hXcT+hcufRQI58eegLD/pte7m/pjBLHxfoiJ1OIy9YXefr0fijm0w/dBo3PMFR34cDS7mrjbH0w7dP/+X4HSytmc0wnSyvox/tZGhG/vKt5a0Zht7uxHPq5viYR9cIX6oUiaRckRCEx1C7IBdMuyJfQLhAVZmYVppy/oiUrNAJ6UgZ0zMQsAwizbwB5MfpjLE3CYcQrcs9Isou4NPkS1V3tdvxPHv+Hs1gzw4GvKEGDJiWB+4uBBNK9XwqY8pAQIA0pPjZ1Qxwso8RQLDAJscCmLagYhgpWQyc1PHN4dAkGBmPsbviU9402Pcu2rXEgyMuKOuq7jRltIPeXMiDZlquXmE+9jahPY40hUHY/WQpS6SAF92fFVgB43i1Mj5b9G7a8BmCwlcf7FtvdlLleZ71QHdpQ7p9ptWotG0hD2oZHzQ2/VMfqsjksrZoV2dHGqlG/16bd+2fr8aW+bjklo14bmEpHs7QqbTRfn1Z1o+xo9+a8+0obH+hto7Ly5q9Rtnuoa/7d1Pva2DQ1+mGpu/lv+ValxLSqzX7zVV3Vq7d0s1IH+h5ral+7fxj0Jq1xDz3Yjy8PtDq+W6jseqDV2lazsrKbL6O+yrYGPXz/MjIeh+vVx/uzVa+15h/vLQfemSsdfgYU2R/vD7Pue93udvgRUA5UP8n1Eb2GmtGNaptpVVZAQR3/Zf6+PkEtW7TeWZtQBtTmYamwbVtjzZFW6wv1amnRqqoizqOO5Vn3hVmqtbuh8v7M/zVK67+bcvCOUL9/mHwYfQ7o7f+tlub1an/1v9e50XA4Cd6hlUp52KvdOZj7vZq5UIBDvc7dRmHfNo3O26ILXAL6geaHFtC38umDcm43Lfjbeh3RUY6r6HnTY22z8e61EtCzagyfUIQmaIny4IM1af3VgjrP+8E70EqbLlu3AjoblRLQPRLrhoRb5L4MnO/3P6AFVNSU6+P1tDeeAyeZgTq2TdVgzN4YuGrU+5jq19fbTbMKFA6bNJQEnG5jTkN71/EXeWg1uT7ZchApnWdaqQJFFY5uOSFVjY68+ui0ptr9CDh/G3IdqFfHmvG/LaUuh6eAseXxSPhA14SEVhQJwOeCIwGwGiLhwyk4EpwIEtimU2gksC3nWpAQSC0XCU6p4DIhkFkYCc1VwZEQIBUjwQl6XMGREIwOTqnYMmGLVBcJjC99C4qEZnSe4BR+nuDsjA5OwUeHJBKw9C0yErYywUNCu+hI2MqEgs8Yneg8oVnweUIKEobqTyGh1pqq49a8+1Le4HUjrPEiqOCX3VobuGpi7q7e3TVli3PzwUwMKBzqQF0ct92xOe9VgxaFse8FWmHYZuDLMLK0pymcZJqQp/V6CxS218DJfqw2LmfhfgO0mfX7ulwf3uL1bMjlsTzqvtT7SgcQMCx/NuH6A/VxPjzDxXTzQHd83TBWOut5wKEIjUAHpnuUgtD2GvIE9OK6xFDiIpbuvg/oCGK9e2hxzK/GsMl0O4CAe5e+dWs4gusRXKvrnR417i57kydbZU2h6/LhYaVXj+CFkcqLeI/IgRdu3YEXD0BLu68OsR7BxUzAh0WPfTaB1inuAc07yXEx5Oob8GwZ04n5gNd6O3yY9mqrACsRmjxc/S8F7QGWPPqOQPt6F+3tCNpHeaPdbVGt9sZpEbnh3YP8xcgwOKfVebPhuz49tzxc41ZEzRiieyxPN95bU33cxjJ2o973j6r9bXrtYyuYvGqfHc8Pny7dHp6d8/btb+LZlNYhnptRPDtnwvP9s6nfP4WtOHHvgcJnzJFF03kze8A5v4exf/E1bkXnNtaK8kKrMI7yPjWxFGtOyty3a89Iq0jtmbD2bebivdnZwbNz/t6cHc+tqHx23JVLUeQzE5XP0RY9m3z+Fp6b6+/jebXsQut0a29jFf4GczIPJQ8DdfKw1OKa8ErJbr1AGXjeWwnncW/evBGP2uG8eRRq3Ft4zocepjA3pXHZ2vvzso5r3+l+QF0Cbbo/E/dm2aV54/WtCW0yb96uVjDDTpux7pt99gPsAr4WXR+HDdanbIivn83upIm/6u0YwFVe28YCQ3GixPOiIIisyDGx/SGep0QmaW8gC5Qs0AwSZYmTZFkUk5tEokxJnIBkjhEkRuJRDgZ4zGXOwVz3hnCwBx7ue2+J378HDjcRQ4tq1o1llNxYPpsFP3BE2UQyTLE1/TxSctwqP25CynKxQ1fxEwFc3NAt9gZceEQca7zPZDi8c2APOtgeHa/7+CAf1VMAHZQxt9RFD0MJrkqTvluYkOgM22Nt9P590xCpWfEcBWLk7aTxFVCsGXpYyLdtsnIQgJIcQwUSQLRJiVM6EdknpeyIw0s0ykHAcYcBgZtnurfm/nFKpRdkp7/LEYYVRYqOjQMcpCHuKzsBAVESCjPQKSOCzFMCG+EsmwPD0s6/XIl9miggASEkwvjKwzjLn8E+LfX42+fnJ6umGqhpQk/ghZtjDdS4L3H1h6UpQYggJGj/TWBoQgnR03F80qJYZrY2/3nbOTJpxv2/2xSFw0sFpfY27bID2jVNqZai09qh9v7g9FBdgEn0Uhu3YSlQZpXOG3oay1tVY2P4wbaGH7C0GKHWsM3CsgKWNE8OTH3pS5im3AYURf6prgLf20ZXufTnBd7q2TRfnxatONWbiFGL09/3vKCKdcBahYu1wgeKGBCgv6nPC7yxsQ95Q/UfRF7ra+QVeEtnH/KCzcl9yCvw5upe5DW/Rl6hTV72IC802rkw8r61zVOJbPOk1uuMCtlaDIVHbC5GNxxSsVbsTTxnlMrxs6nb76RNZPMsVd4UZ9MG0LnJis7ibf6lSrZzbzt9B32bVrX5s+h7i251pY9m50Zfg1XdVt0qtbdy3qMRRrJND+G1SHfafdcqPRd9pX7jtQ2jSqnv1c9/x9yuV1A4Cj7Uux1zotw/+Up+eQPrkoXmlv1G94C/6vht3MHKfljThKOqO2LC6EQzr9XbVaNaboJsZ9JGwb0jWs1HfY1f9tw1keSp7YPx2bsbdTtdB29qqKg7eezjRSH+/ywLb4GiI/osmYn5neEoAUWW3WJS2Y9ESmJSdF6IovNQ4aQd5917LIc5fCwnP61m9ETOF2d49hwDyq7HtKAuho0bhKPz0rYwiJIiyjY5ODUd+HPjKMRE9HkpzS6KlCCkaPFkSuZyaPYMJ0t/QtUZ0/BjtxJykhUshVCUmSlswRrgSBY5BxbJRLn5bd9evC5pXFp/ldgeEo5XbkpXrNxk0w5YEuUmUW5esYqJKDevQcVElJvXgDyi3LwK5BHlJlFuEuUmUW4S5SZRbv4e5ebXC++CKzfZpIebrYvBZ121+hPDv2tY8/nWA+Asi8+jw54ET/O9l48/wqPc8qYVdKIv4//mqFSOA3m/8XV2q9dsneFrdSbixB01U8wH3B7lJiOjFHUmS/E5wP8yBvupFu/Bk3SL94IY699kMLL3W/DcRvaZ2ziDETvZv/n2zgQMQExic4KmuJTNibPu07BpJuknu1DOwZn0Sy7epO9O9XJ74UEl7AhnHFe2Hhqj3tfp5FyKSzkZgQRKzGMkSRr6t17/vOuuL8c8Jk0VyDdT5q5oDGZj9L85kyqfhvnfMZPig/lPeAQmeRQILzD4FNFLbyXySZjfH3DgsqI3n2guRQlOspUe/ogwsxx9kkzf6/w91hn+XWfkLM8kDoZ5B4STa4pg8Zz7VvZ+5/8JtBjpeM0qBBNI20Uk/ekD5Re1v5hwIMwL0lbWRc9ISlTaqjIXBEiXXFMST+LRtSgSkmtR7pJr0Su2mPpl0RBFXi50NMTAW8BvDYcoSrGwI0eHQ2RE5lBR+cVDRGku5fOYKG6nhUULiFiEKTSJVrcj2yQabWXQrgeFlIkylxRhAk8JUg4ijD1TXyAB674ZsK7M5NNRSdC6o/sknwjK+8NR61DaPgkJW0fC1v2Dnc2d3F8ybB3K4EjpFy8EC7QOlBi50HHrEH+JdSBRN2VWN3H5L4kzY2P/Bg0JXJdf4DpJlgsWuA5lOLP8yw7UkcB1JHAdCVxHAteRwHWXRgIJXEcC15HAdSRwHQlcRwLXkcB1JHAdCVxHAteRwHUkcB0JXEcC15HAdSRwHQlcRwLXXX/gOlEWrydwHbrMmYXr3hD+wcB1gVeEnY1lIf+N5aMC1zGCkDCl+TpyHbZA/fKNkyPXoQwHLUjkurNGrmMENgaLi4au4zL4mD23P2cWBPvVhK7j0k4qXImJ2r8Wus4D75V6d+bSzPx/tzEK8e585T52iXfnAm1tEO/OxLtzwZBHvDsT787EuzPx7ky8OxPvzsS78/V6dz6w8C64d2cug+vTn9B5xXS9RYphxl3xQcx/LYaZB9dr1XIlHYASLRfRcl21roFoua5B10C0XNeAPKLlugrkES0X0XIRLRfRchEtF9Fy/R4t19cL76JruZJebkgMs+MKIjHMDqszJUQl7F1/PHAZJyZYTQKXnei9K1AFn9vIOnMbS4c1iiRw2bfPZYgMxcYPY7CICgI4RfuvIFGinOzCDJdPzCguzUi9CPFzSOiyHwtdJsfdkP586DI+aRlPQpeR0GXnm0Axlw9dxp8rDEM+pSymmmLrB/xif4VULxl/wLBhuIXlsjd+0hkrQV8yhFpaZfOOombjuABjY2KM8avYrOFQkK5dGZG1fgeCdJEgbokBMX400D01lrK4EqVtct57+XzaiRUiHfKWDscNltcjHQ7Xj0iHU6UDL9M/KxrQtShewtP1N9Gz9eFR+2Kfrs+isGGSChs+u4Fi7gqbwOCMYOPy2GD4JDYCFc9FsJHBPDDCE59rmjIfbBd/kTbH6Y+KDSJ64qawdKiJ6/iCkP1KIH/RenuYm4FpERHMfxE35dTIf4lISSITc4GyJ1xfhqK2YRUPBBE8wiUFn9ymerH16R/Gm5ocnmKkRQk+HGDvUJTgYDL5a6YQtIyoADBBswtBgJaoAkISKOlMMYL5ZHwGFwvsKVg4ebpZOfrTJ6+Dfx8MJYaNq8HSUIhoIa+5LNzOLLzwCQUYdmzTtDQd5/h/</diagram></mxfile>
2203.15205/main_diagram/main_diagram.pdf ADDED
Binary file (50.4 kB). View file
 
2203.15205/paper_text/intro_method.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:teaser" data-latex-placement="h">
4
+ <div class="center">
5
+ <embed src="figures/teaser_try11arxiv.drawio.pdf" style="width:83.5%" />
6
+ </div>
7
+ <figcaption>Overview of the existing privacy preserving action recognition approaches. The main goals of a framework include removing privacy information and maintaining action recognition performance at low cost of annotations.</figcaption>
8
+ </figure>
9
+
10
+ Recent advances in action recognition have enabled a wide range of real-world applications, video surveillance camera [@cmu2020; @rizve2021gabriella; @gabv2], smart shopping systems like *Amazon Go*, elderly person monitor systems [@buzzelli2020vision; @zhang2012privacy; @liu2020privacy]. Most of these video understanding applications involve extensive computation, for which a user needs to share the video data to the cloud computation server. While sharing the videos to the cloud server for the utility action recognition task, the user also ends up sharing the private visual information like gender, skin color, clothing, background objects etc. in the videos as shown in Fig. [1](#fig:teaser){reference-type="ref" reference="fig:teaser"}. Therefore, there is a pressing need for solutions to privacy preserving action recognition.
11
+
12
+ A simple-yet-effective solution for privacy preservation in action recognition is to utilize very low resolution videos (Fig. [1](#fig:teaser){reference-type="ref" reference="fig:teaser"}a)  [@ryoo; @ishwar; @liu2020indoor]. Although this downsampling method does not require any specialized training to remove privacy features, it does not provide a good trade-off between action recognition performance and privacy preservation.
13
+
14
+ Another set of methods use pretrained object-detectors to detect the privacy regions and then remove or modify the detected regions using synthesis [@ren2018learning] or blurring [@zhang2021multi] as shown in Fig. [1](#fig:teaser){reference-type="ref" reference="fig:teaser"}b. The detection-based approaches require the bounding-box level annotations for the privacy attributes, and removing the privacy features without an end-to-end learning framework may result in the performance drop of the action recognition task.
15
+
16
+ Wu  [@wu_tpami] propose a novel approach to remove the privacy features via learning an *anonymization function* through an adversarial training framework, which requires both *action and privacy labels* from the video. Although the method is able to get a good trade-off of action recognition and privacy preservation, it has two main problems. First, it is not feasible to annotate a video dataset for privacy attributes. For instance, Wu  [@wu_tpami] acknowledge the issue of privacy annotation time, where it takes immense efforts for them to annotate privacy attributes for even a small-scale (515 videos) video dataset *PA-HMDB*. Second, the learned anonymization function from the known privacy attributes may not generalize in anonymizing the *novel privacy attributes*. For example, in Fig. [1](#fig:teaser){reference-type="ref" reference="fig:teaser"} the learned anonymization function for human-related privacy attributes (gender, skin color, clothing) may still leave other privacy information like scene or background objects un-anonymized.
17
+
18
+ The performance of the action recognition task depends on the spatio-temporal cues of the input video. Wu  [@wu_tpami] show that anonymizing the privacy features like face, gender, etc. in the input video does not lead to any reduction in the action recognition performance. Instead of just focusing on the cues based on the privacy annotations, our goal is twofold: 1) learning an anonymization function that can remove all spatial cues in all frames without significantly degrading action recognition performance; and 2) learning the anonymization function without any privacy annotations.
19
+
20
+ Recently, self-supervised learning (SSL) methods have been successfully used to learn the representative features which are suitable for numerous downstream tasks including classification, segmentation, detection, etc. Towards our goal, we propose a novel frame-level SSL method to remove the semantic information from the input video, while maintaining the information that is useful for the action recognition task. We show that our proposed ***S**elf-supervised **P**rivacy-preserving **Act**ion recognition (SPAct)* framework is able to anonymize the video without requiring any privacy annotations in the training.
21
+
22
+ The learned anonymization function should provide a model-agnostic privacy preservation, hence, we first adopt the protocol from [@wu_tpami] to show the transferability of the anonymization function across different models. *However, there are two aspects in terms of evaluating the generalization ability of the anonymization function, which are overlooked in previous works.*
23
+
24
+ First, in the real-world scenario, the anonymization function is expected to have *generalization capability with domain shift in action and privacy classes*. To evaluate the generalization capabilities of the anonymization function across novel action and privacy attributes, we propose new protocols. In our experiments, we show that since our model is not dependent on the predefined privacy features like existing supervised methods, and it achieves state-of-the-art generalization across novel privacy attributes.
25
+
26
+ Second, prior privacy-preserving action recognition works have solely focused on privacy attributes of humans. In practical scenarios, privacy leakage can happen in terms of scene and background objects as well, which could reveal personal identifiable information. Therefore, *the generalization ability of anonymization function to preserve privacy attributes beyond humans (scene and object privacy)* is of paramount importance as well. To evaluate such ability, we propose *P-HVU* dataset, a subset of LSHVU dataset [@hvu], which has multi-label annotations for actions, objects and scenes. Compared to existing same-dataset privacy-action evaluation protocol on PA-HMDB [@wu_tpami], which consists of only 515 test videos, the proposed P-HVU dataset has about 16,000 test videos for robust evaluation of privacy-preserving action recognition.
27
+
28
+ - We introduce a novel *self-supervised* learning framework for privacy preserving action recognition without requiring any privacy attribute labels.
29
+
30
+ - On the existing UCF101-VISPR and PA-HMDB evaluation protocols, our framework achieves a competitive performance compared to the state-of-the-art *supervised* methods which require privacy labels.
31
+
32
+ - We propose new evaluation protocols for the learned anonymization function to evaluate its generalization capability across novel action and novel privacy attributes. For these protocols, we also show that our method outperforms state-of-the art supervised methods. Finally, we propose a new dataset split *P-HVU* to resolve the issue of smaller evaluation set and extend the privacy evaluation to non-human attributes like action scene and objects.
33
+
34
+ # Method
35
+
36
+ The key idea of our proposed framework is to learn an anonymization function such that it deteriorates the privacy attributes without requiring any privacy labels in the training, and maintains the performance of action recognition task. We build our self-supervised framework upon the existing supervised adversarial training framework of [@wu_tpami]. A schematic of our framework is depicted in Fig. [2](#fig:framework){reference-type="ref" reference="fig:framework"}. In Sec [3.1](#sec:formulation){reference-type="ref" reference="sec:formulation"}, we first formulate the problem by explaining our objective. In Sec [3.2](#sec:framework){reference-type="ref" reference="sec:framework"} we present details of each component of our framework, and in Sec [3.3](#sec:minimax){reference-type="ref" reference="sec:minimax"} we explain the optimization algorithm employed in our framework.
37
+
38
+ Let's consider a video dataset $X$ with action recognition as an utility task, $T$, and privacy attribute classification as a budget task, $B$. The goal of a privacy preserving action recognition system is to maintain performance of $T$, while cutting the budget $B$. This goal is achieved by learning an anonymization function, $f_{A}$, which transforms (anonymize) the original raw data $X$. Assume that the final system has *any* action classification target model $f'_{T}$ and *any* privacy target model $f'_{B}$. The goal of a privacy preserving training is to find an optimal point of $f_{A}$ called $f^{*}_{A}$, which is achieved by the following two criteria:
39
+
40
+ **C1:** $f^{*}_{A}$ should minimally affect the cost function of target model, $f'_{T}$, on raw data i.e. $$\begin{equation}
41
+ L_{T}(f'_{T}(f^{*}_{A}(X)), Y_{T}) \approx L_{T}(f'_{T}(X), Y_{T}),
42
+ \label{eq:ft_criterion}
43
+ \end{equation}$$ where $T$ denotes utility Task, $L_{T}$ is the loss function which is the standard cross entropy in case of single action label $Y_{T}$ or binary cross entropy in case of multi-label actions $Y_{T}$.
44
+
45
+ **C2:** Cost of privacy target model, $f'_{B}$, should increase on the transformed (anonymized) data compared to raw data i.e. $$\begin{equation}
46
+ L_{B}(f'_{B}(f^{*}_{A}(X))) \gg L_{B}(f'_{B}(X)),
47
+ \label{eq:fb_criterion}
48
+ \end{equation}$$ where $B$ denotes privacy Budget, $L_{B}$ is the self-supervised loss for our framework, and binary cross entropy in case of a supervised framework, which requires privacy label annotations $Y_{B}$.
49
+
50
+ Increasing a self-supervised loss $L_{B}$ results in deteriorating *all* useful information regardless of if it is about privacy attributes or not. However, the useful information for the action recognition is preserved via criterion **C1**. Combining criteria **C1** and **C2**, we can mathematically write the privacy preserving optimization equation as follows, where negative sign before $L_{B}$ indicates it is optimized by maximizing it: $$\begin{equation}
51
+ f^{*}_{A} = \operatorname*{argmin}_{f_{A}}[L_{T}(f'_{T}(f_{A}(X)), Y_{T}) - L_{B}(f'_{B}(f_{A}(X)))].
52
+ \label{eq:overall}
53
+ \end{equation}$$
54
+
55
+ The proposed framework mainly consists of three components as shown in Fig [2](#fig:framework){reference-type="ref" reference="fig:framework"}: (1) Anonymization function ($f_{A}$); (2) Self-supervised privacy removal branch; and (3) Action recognition or utility branch.
56
+
57
+ The anonymization function is a learnable transformation function, which transforms the video in such a way that the transformed information can be useful to learn action classification on *any* target model, $f'_{T}$, and not useful to learn *any* privacy target model, $f'_{B}$. We utilize an encoder-decoder neural network as the anonymization function. $f_{A}$ is initialized as an identity function by training it using $\mathcal{L}_{L1}$ reconstruction loss as given below: $$\begin{equation}
58
+ \mathcal{L}_{L1} = \sum_{c=1}^{C}\sum_{h=1}^{H}\sum_{w=1}^{W}|x_{c,h,w}- \hat{x}_{c,h,w}|,
59
+ \label{eq:l1loss}
60
+ \end{equation}$$ where, $x$ is input image, $\hat{x}$ is sigmoid output of $f_{A}$ logits, $C$ = input channels, $H$ = input height, and $W$ = input width.
61
+
62
+ []{#sec:sslbranch label="sec:sslbranch"} A schematic of self-supervised privacy removal branch is shown in Fig. [3](#fig:sslloss){reference-type="ref" reference="fig:sslloss"}. First the video $x_{i}$ is passed through $f_{A}$ to get the anonymized video $f_{A}(x_{i})$, which is further passed through a temporal Frame sampler $S_{F}$. $S_{F}$ samples 2 frames out of the video with various $S_{F}$ strategies, which are studied in Section [\[sec:ablation\]](#sec:ablation){reference-type="ref" reference="sec:ablation"}. The sampled pair of frames ($S_{F}$($f_{A}$($x_{i}$))) are projected into the representation space through 2D-CNN backbone $f_{B}$ and a non-linear projection head $g(\cdot)$. The pair of frames of video $x_{i}$ corresponds to projection $Z_{i}$ and $Z'_{i}$ in the representation space. The goal of the contrastive loss is to maximize the agreement between projection pair ($Z_{i}$, $Z'_{i}$) of the same video $x_{i}$, while maximizing the disagreement between projection pairs of different videos ($Z_{i}$, $Z_{j}$), where $j \neq i$. The NT-Xent contrastive loss [@simclr] for a batch of $N$ videos is given as follows: $$\begin{equation}
63
+ \label{eq:ntxent}
64
+ L_{B}^{i}=-\log \frac{h\left(Z_{i}, Z'_{i}\right)}{\sum_{j=1}^{N}[\mathbb{1}_{[j\neq i]} h(Z_{i}, Z_{j}) + h(Z_{i}, Z'_{j})]},
65
+ \end{equation}$$ where $h(u, v)=\exp \left(u^{T}v/(\|u\| \|v\| \tau) \right)$ is used to compute the similarity between $u$ and $v$ vectors with an adjustable parameter temperature, $\tau$. $\mathbb{1}_{[j\neq i]} \in \{0, 1\}$ is an indicator function which equals 1 iff $j \neq i$.
66
+
67
+ <figure id="fig:sslloss" data-latex-placement="h">
68
+ <div class="center">
69
+ <embed src="figures/ssl_privacy_removal.drawio.pdf" style="width:80.0%" />
70
+ </div>
71
+ <figcaption><strong>Contrastive Self-supervised Loss</strong> is used to maximize the agreement between two frames of a video and maximize disagreement between frames of different videos. Please refer to Sec <a href="#sec:sslbranch" data-reference-type="ref" data-reference="sec:sslbranch">[sec:sslbranch]</a> for more details.</figcaption>
72
+ </figure>
73
+
74
+ In order to optimize the proposed self-supervised framework with the objective of Eq. [\[eq:overall\]](#eq:overall){reference-type="ref" reference="eq:overall"}, let's consider anonymization function $f_{A}$ parameterized by $\theta_{A}$, and auxiliary models $f_{B}$ and $f_{T}$ respectively parameterized by $\theta_{B}$ and $\theta_{T}$. Assume, $\alpha_A$, $\alpha_B$, $\alpha_T$ respectively be the learning rates for $\theta_A$, $\theta_B$, $\theta_T$. First of all, $\theta_{A}$ is initialized as given below ( Eq. [\[eq:fa_init\]](#eq:fa_init){reference-type="ref" reference="eq:fa_init"}), unless $f_{A}$ reaches to threshold $th_{A0}$ reconstruction performance ( Eq. [\[eq:l1loss\]](#eq:l1loss){reference-type="ref" reference="eq:l1loss"}) on validation set: $$\begin{equation}
75
+ \theta_A \gets \theta_A - \alpha_A \nabla_{\theta_A} (\mathcal{L}_{L1}(\theta_A)).
76
+ \label{eq:fa_init}
77
+ \vspace{-2mm}
78
+ \end{equation}$$ Once $\theta_{A}$ is initialized, it is utilized for initialization of $\theta_{T}$ and $\theta_{B}$ as shown in the following equations unless their performance reaches to the loss values of $th_{B0}$ and $th_{T0}$ : $$\begin{equation}
79
+ \theta_T \gets \theta_T - \alpha_T \nabla_{\theta_T} (L_{T}(\theta_T, \theta_A)),
80
+ \label{eq:ft_init}
81
+ \end{equation}$$ $$\begin{equation}
82
+ \theta_B \gets \theta_B - \alpha_B \nabla_{\theta_B} (L_{B}(\theta_B, \theta_A)).
83
+ \label{eq:fb_init}
84
+ \end{equation}$$ After the initialization, two step iterative optimization process takes place. The first step is depicted in the left side of Fig. [2](#fig:framework){reference-type="ref" reference="fig:framework"}, where $\theta_{A}$ is updated using the following equation: $$\begin{equation}
85
+ \theta_A \gets \theta_A - \alpha_A \nabla_{\theta_A} (L_T(\theta_A,\theta_T) - \omega L_B(\theta_A, \theta_B)),
86
+ \label{eq:faupdate}
87
+ \end{equation}$$
88
+
89
+ where $\omega\in(0,1)$ is the relative weight of SSL loss, $L_{B}$, with respect to supervised action classification loss, $L_{T}$. Here the negative sign before $L_{B}$ indicates that we want to maximize it. In implementation, it can be simply achieved by using negative gradients [@domainadaptation].
90
+
91
+ In the second step, as shown in the right part of the Fig. [2](#fig:framework){reference-type="ref" reference="fig:framework"}, $\theta_{T}$ and $\theta_{B}$ are updated using Eq. [\[eq:ft_init\]](#eq:ft_init){reference-type="ref" reference="eq:ft_init"} and  [\[eq:fb_init\]](#eq:fb_init){reference-type="ref" reference="eq:fb_init"}, respectively. We update $\theta_{B}$ to get powerful negative gradients in the next iteration's step-1. Note that there is a similarity with GAN training here; we can think of $f_{A}$ as the a generator who tries to fool $f_{B}$ in the first step and, in the second step $f_{B}$ tries to get stronger through update of Eq. [\[eq:fb_init\]](#eq:fb_init){reference-type="ref" reference="eq:fb_init"}. This two step iterative optimization process continues until $L_{B}$ reaches to a maximum value $th_{Bmax}$.
92
+
93
+ Take a model $f_b$ *initialized* with self-supervised contrastive loss (SSL) *pretraining*. Now keeping $f_b$ *frozen*, when we try to *maximize* the contrastive loss, it changes the *input* to $f_b$ in such a way that it decreases *agreement between frames* of the same video. *We know that frames of the same video share a lot of semantic information*, and minimizing the agreement between them results in destroying (i.e. unlearning) most of the semantic info of the input video. In simple terms, maximizing contrastive loss results in destroying all highlighted *attention map* parts of Supp. Fig 7, 8 middle row. Since this unlearned *generic semantic info* contained privacy attributes related to human, scene, and objects; we end up removing *private info* in the input. In this process, we also ensure that semantics related to action reco remains in video, through the action reco branch.
94
+
95
+ The existing training and evaluation protocols are discussed in Sec [\[sec:knownprotocols\]](#sec:knownprotocols){reference-type="ref" reference="sec:knownprotocols"},  [4.2](#sec:knowncrossdomain){reference-type="ref" reference="sec:knowncrossdomain"} and a new proposed generalization protocol is introduced in Sec [\[sec:novelprotocols\]](#sec:novelprotocols){reference-type="ref" reference="sec:novelprotocols"}.
96
+
97
+ []{#sec:knownprotocols label="sec:knownprotocols"}
98
+
99
+ Training of supervised privacy preserving action recognition method requires a video dataset $X^{t}$ with action labels $Y^{t}_{T}$, and privacy labels $Y^{t}_{B}$, where $t$ denotes training set. Since, our self-supervised privacy removal framework does not require any privacy labels, we do not utilize $Y^{t}_{B}$. Once the training is finished, the anonymization function is now frozen, called $f^{*}_{A}$, and auxiliary models $f_{T}$ and $f_{B}$ are discarded. To evaluate the quality of the learned anonymization, $f^{*}_{A}$ is utilized to train: (1) a new action classifier $f'_{T}$ over the train set ($f^{*}_{A}(X^{t}), Y^{t}_{T}$); and (2) a new privacy classifier $f'_{B}$ to train over ($f^{*}_{A}(X^{t}), Y^{t}_{B}$). For clarification, we do not utilize privacy labels for training $f_{A}$ in any protocol. Privacy labels are used only for the evaluation purpose to train the target model $f'_{B}$. Once the target models $f'_{T}$ and $f'_{B}$ finish training on the anonymized version of train set, they are evaluated on test set ($f^{*}_{A}(X^{e}), Y^{e}_{T}$) and ($f^{*}_{A}(X^{e}), Y^{e}_{B}$), respectively, where $e$ denotes evaluation/test set. Test set performance of the action classifier is denoted as $A^{1}_{T}$ (Top-1 accuracy) or $A^{2}_{T}$ (classwise-mAP), and the performance of privacy classifier is denoted as $A^{1}_{B}$ (classwise-mAP) or $A^{2}_{B}$ (classwise-$F_{1}$). Detailed figures explaining different training and evaluation protocols are provided in  Supp.Sec.G.
100
+
101
+ In practice, a trainable-scale video dataset with action and privacy labels doesn't exist. The authors of [@wu_tpami] remedy the supervised training process by a cross-dataset training and/or evaluating protocol. Two different datasets were utilized in  [@wu_tpami]: action annotated dataset ($X^{t}_{action}, Y^{t}_{T}$) to optimize $f_{A}$ and $f_{T}$; and privacy annotated dataset ($X^{t}_{privacy}, Y^{t}_{B}$) to optimize $f_{A}$ and $f_{B}$. Again, note that in this protocol, our self-supervised framework does not utilize $Y^{t}_{B}$. After learning the $f_{A}$ through the different train sets, it is frozen and we call it $f^{*}_{A}$. A new action classifier $f'_{T}$ is trained on anonymized version of action annotated dataset ($f^{*}_{A}(X^{t}_{action}), Y^{t}_{T}$), and a new privacy classifier $f'_{B}$ is trained on the anonymized version of the privacy annotated dataset ($f^{*}_{A}(X^{t}_{privacy}), Y^{t}_{B}$). Once the target models $f'_{T}$ and $f'_{B}$ finish training on the anonymized version of train sets, they are respectively evaluated on test sets ($f^{*}_{A}(X^{e}_{action}), Y^{e}_{T}$) and ($f^{*}_{A}(X^{e}_{privacy}), Y^{e}_{B}$).
102
+
103
+ []{#sec:novelprotocols label="sec:novelprotocols"} For the prior two protocols discussed above, the same training set $X^{t}$ ($X^{t}_{action}$ and $X^{t}_{privacy}$) is used for the target models $f'_{T}$, $f'_{B}$ and learning the anonymization function $f_{A}$. However, a learned anonymization function $f^{*}_{A}$ is expected to generalize on any action or privacy attributes. To evaluate the generalization on novel actions, an anonymized verion of novel action set $f^{*}_{A}(X^{nt}_{action})$, such that $Y^{nt}_{T}\cap Y^{t}_{T}= \phi$, is used to train the target action model $f'_{T}$, and its performance is measured on the anonymized test set of novel action set $f^{*}_{A}(X^{ne}_{action})$. For privacy generalization evaluation, a novel privacy set $f^{*}_{A}(X^{nt}_{privacy})$ (s.t. $Y^{nt}_{B}\cap Y^{t}_{B}= \phi$) (where $nt$ represents novel training) is used to train the privacy target model $f'_{B}$, and its performance is measured on novel privacy test set $f^{*}_{A}(X^{ne}_{privacy})$ (where $ne$. represents novel evaluation) Please note that novel privacy attribute protocol may not be referred as a *transfer protocol* for the methods, which do not use privacy attributes $Y^{t}_{B}$ in learning $f_{A}$.
2205.05071/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2205.05071/paper_text/intro_method.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ As Artificial Intelligence (AI), and specifically Natural Language Processing (NLP), scale up to require more computational resources and thereby more energy, there is an increasing focus on efficiency and sustainability [@strubell-etal-2019-energy; @Schwartz:2020]. For example, training a single BERT base model [@devlin-etal-2019-bert] requires as much energy as a trans-American flight [@strubell-etal-2019-energy]. While newer models are arguably more efficient [@fedus2021switch; @borgeaud2022improving; @so2022primer], they are also an order of magnitude larger, raising environmental concerns [@bender2021dangers]. The problem will only worsen with time, as compute requirements double every 10 months [@sevilla2022compute].
4
+
5
+ ![Example usage of our proposed climate performance model card (§[5](#sec:model_cards){reference-type="ref" reference="sec:model_cards"}), on the Hugging Face Hub.](model_card_climatebert){#fig:example_model_card width="\\columnwidth"}
6
+
7
+ This problem has been recognized by the NLP community, and a group of NLP researchers has recently proposed a policy document[^2] of recommendations for efficient NLP, aiming to minimize the greenhouse gas (GHG) emissions[^3] resulting from experiments done as part of the research. This proposal is part of a research stream aiming towards *Green NLP* and *Green AI* [@Schwartz:2020], which refers to "AI research that yields novel results while taking into account the computational cost, encouraging a reduction in resources spent."
8
+
9
+ While the branding of NLP and AI research as *green* has raised some awareness of the environmental impact, the large majority of NLP researchers are still not aware of their environmental impact resulting from training and running of large computational models. This also explains why a research stream in a similar direction (see §[2.1](#sec:automating){reference-type="ref" reference="sec:automating"}), in which software tools are proposed to measure carbon footprint while training models [@lacoste2019quantifying; @henderson2020towards; @anthony2020carbontracker; @lottick2019energy], have not been adopted by the community to a large extent (see §[3](#sec:survey){reference-type="ref" reference="sec:survey"}). However, we claim that climate awareness is essential enough to be promoted in mainstream NLP (rather than only as a niche field) and that positive impact must be an inherent part of the discussion [@rolnick2019tackling; @stede-patz-2021-climate]. Ideally environmental impact should always be taken into consideration, when deciding on which experiments to carry out.
10
+
11
+ We aim to simplify climate performance reporting in NLP while at the same time increasing awareness to its intricacies. Our contributions are:
12
+
13
+ - We conduct a survey of environmental impact statements in NLP literature published in the past six years (§[3](#sec:survey){reference-type="ref" reference="sec:survey"}). This survey is conducted across five dimensions that directly influence the environmental impact.
14
+
15
+ - We delineate the different notions of "efficiency" common in the literature, proposing a taxonomy to facilitate transparent reporting, and identify ten simple dimensions across which researchers can describe the environmental impact resulted by their research (§[4](#sec:best_practices){reference-type="ref" reference="sec:best_practices"}).
16
+
17
+ - We propose a climate performance model card (§[5](#sec:model_cards){reference-type="ref" reference="sec:model_cards"}) with the main purpose of being practically usable with only limited information about experiments and the underlying computer hardware (see Figure [1](#fig:example_model_card){reference-type="ref" reference="fig:example_model_card"}).
18
+
19
+ Several tools automate measurement and reporting of energy usage and emissions in ML. @lacoste2019quantifying introduced a simple online calculator[^4] to estimate the amount of carbon emissions produced by training ML models. It can estimate the carbon footprint of GPU compute by manually specifying hardware type, hours used, cloud provider, and region. @henderson2020towards presented a Python package[^5] for consistent, easy, and more accurate reporting of energy, compute, and carbon impacts of ML systems by estimating them and generating standardized "Carbon Impact Statements." @anthony2020carbontracker proposed a Python package[^6] that also has predictive capabilities, and allows proactive and intervention-driven reduction of carbon emissions. Model training can be stopped, at the user's discretion, if the predicted environmental cost is exceeded. @victor_schmidt_2022_6369324 actively maintain a Python package[^7] that, besides estimating impact and generating reports, shows developers how they can lessen emissions by optimizing their code or by using cloud infrastructure in geographical regions with renewable energy sources. @bannour-etal-2021-evaluating surveyed and evaluated these tools and others for an NLP task, finding substantial variation in the reported measures due to different assumptions they make. In summary, automated tools facilitate reporting, but they do not substitute awareness and should not be trusted blindly.
20
+
21
+ While branding NLP and AI research as *green* increases awareness of the environmental impact, there is a risk that the current framing, which exclusively addresses efficiency, will be perceived as the solution to the problem. Of course, we attribute benevolent motives to the authors of the proposed policy document. Nevertheless, we would like to avoid a situation analogous to a common phenomenon in the financial field, where companies brand themselves as *green* or *sustainable* for branding or financial reasons, without implementing proportional measures in practice to mitigate the negative impact on the environment [@delmas2011drivers]. This malpractice is analogous to *greenwashing*. While this is a general term, one aspect of greenwashing is "a claim suggesting that a product is green based on a narrow set of attributes without attention to other important environmental issues" [@choice2010sins].[^8] Our motivation is in line with the EU Commission's initiative to "require companies to substantiate claims they make about the environmental footprint of their products/services by using standard methods for quantifying them."[^9] While @Schwartz:2020 certainly do not argue that efficiency is *sufficient* for sustainability, this notion, which is potentially implied by the *green* branding, is misleading and even harmful: regardless of the extent of reduction, resources are still consumed, and GHGs are still emitted, among other negative effects. The efficiency mindset aims, at best, to prolong the duration of this situation. However, scaling up the performance of AI to satisfy the increasing demands from consumers risks ignoring the externalities incurred. Concepts such as reciprocity with the environment, which are central in some indigenous worldviews [@kimmerer2013braiding], are absent from the discourse.
22
+
23
+ A common perception is that *carbon neutrality* can be achieved by compensating for emissions by financial contributions, a practice referred to as *carbon offsets*. This approach is problematic and controversial: the level of carbon prices required to achieve climate goals is highly debated [@https://doi.org/10.1002/wcc.207]. The Intergovernmental Panel on Climate Change (IPCC) and various international organizations like the International Energy Agency (IEA) clearly state that mitigation activities are essential. Compensation activities will be necessary for hard-to-abate-sectors, once all other technological solutions have been implemented, and where mitigation is not (yet) feasible.[^10] Moreover, economic dynamic efficiency requires investments in decarbonization technologies to keep the climate targets within reach. Compensation activities, especially in the afforestation area, delay the needed investments. This delay might exacerbate the likelihood of crossing climate tipping points and/or yields to a disorderly transition to a decarbonized economy [@esrb2016].
24
+
25
+ The issue of environmental impact is more general and not limited to NLP, but relevant to the entire field of AI: @Schwartz:2020 surveyed papers from ACL, NeurIPS, and CVP. They noted whether authors claim their main contribution to improving accuracy or some related measure, an improvement to efficiency, both, or other. In all the conferences they considered, a large majority of the papers target accuracy. However, we claim that the issue is more complex, and it is not sufficient to consider only the "main contribution." Every paper should ideally have a positive impact or provide sufficient information to discuss meaningful options to reduce and mitigate negative impacts.
26
+
27
+ <figure id="fig:survey_proportions" data-latex-placement="t">
28
+ <img src="survey_proportions" />
29
+ <figcaption>Development of proportions of deep-learning-related *ACL papers discussing <em>public</em> model weights, <em>duration</em> of model training or optimization, <em>energy</em> consumption, <em>location</em> where computations where performed, and <em>emission</em> of GHG. While climate awareness is on the rise, it is still low overall. The numbers were calculated by counting pattern matches for papers in the ACL Anthology.</figcaption>
30
+ </figure>
31
+
32
+ We analyze the statistics of papers in \*ACL venues from 2016--2022 by downloading them from the ACL Anthology.[^11] However, instead of focusing on the main contribution, we look for any discussions on climate-related issues. We identify five dimensions in our study sample and create a regular expression pattern to match text for each (see Appendix [9](#sec:patterns){reference-type="ref" reference="sec:patterns"}). These dimensions are public model weights, duration of model training or optimization, energy consumption, the location where computations are performed, and GHG emissions. If the pattern matches the text of a paper at least once,[^12] we consider that paper as discussing the corresponding category. We derive the proportions of papers by dividing the number of papers discussing a category by the number of deep learning-related papers. We only consider deep learning-related papers, as for these papers, climate-related issues are of much higher relevance than for those using other approaches [@strubell-etal-2019-energy].
33
+
34
+ Figure [2](#fig:survey_proportions){reference-type="ref" reference="fig:survey_proportions"} shows our findings. In general, researchers discuss climate-related issues more and more in their work. For instance, the proportion of papers that publish their model weights has almost quadrupled from about 1% in 2017 to more than 4% in 2022. We also find an increase in the proportion of papers that provide information on emissions or energy consumption. Nevertheless, the proportion for these categories remains low.
35
+
36
+ ::: {#tab:sample_proportions}
37
+ ---------------- -------- ---------- -------- ---------- ----------
38
+ Dimension public duration energy location emission
39
+ Proportion (%) 13 28 0 3 0
40
+ ---------------- -------- ---------- -------- ---------- ----------
41
+
42
+ : Proportions along dimensions from Figure [2](#fig:survey_proportions){reference-type="ref" reference="fig:survey_proportions"} in a manual annotated sample from EMNLP 2021.
43
+ :::
44
+
45
+ To complement our automatic pattern-based search approach, we also manually annotate a random sample of 100 papers from EMNLP 2021[^13] for the same five dimensions as before. Table [1](#tab:sample_proportions){reference-type="ref" reference="tab:sample_proportions"} shows the proportions. Borderline cases are counted as "reported" for an optimistic estimate. This leads to the proportions of papers publishing model weights (13%) and the duration of model training or optimization (28%) being much higher than with our automatic approach, which cannot judge borderline cases and is thus more restrictive. Still, these proportions are at a low level. The proportion of papers reporting on the location, energy consumed and GHG emitted are in line with the results from our pattern-based search.
46
+
47
+ We examine article contents to ensure precision and to elaborate on existing practices. We review papers reporting on at least one dimension according to our pattern-based search or our manual analysis. Interestingly, many papers provide information in the context of reproducibility, publishing code but not necessarily model weights and reporting computation time only for specific steps.
48
+
49
+ As examples for specific papers that go beyond what is usually expected in terms of reporting, @anderson-gomez-rodriguez-2021-modest evaluate both accuracy and efficiency in dependency parsers, finding that different approaches are preferable depending on whether accuracy, training time or inference time are prioritized. @lakim-etal-2022-holistic provide a detailed holistic assessment of the carbon footprint of an Arabic language model, considering the entire project, including data storage, researcher travel, training and deployment.
50
+
51
+ Our findings highlight the need to raise awareness of climate-related issues further and find a simple but effective way to report them transparently. Besides awareness and facilitation, incentives to address these issues could be a complementary approach. However, in the rest of this paper we focus on the former "intrinsic" motivation factors, leaving "extrinsic" motivation factors to future work.
52
+
53
+ Efficiency (alongside accuracy) has been one of the main objectives in NLP (and computer science in general) long before its environmental aspects have been widely considered. In general, it refers to the amount of resources consumed (input) in order to achieve a given goal, such as a specific computation or accuracy in a task (output). Different definitions of efficiency correspond to different concepts of input and output. It is crucial to (1) understand the different concepts, (2) be aware of their differences and consequently their climate impact, and (3) converge towards a set of efficiency measures that will be applied for comparable climate performance evaluation in NLP research.
2205.11867/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-15T11:33:39.817Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="iQJwuw8Wym8cZA02EZ1p" version="15.7.3" type="device"><diagram id="VimwRw-csCSIeoiek9rE" name="Page-1">7Vttc6o4FP41Tj9tB0JB/Vis7e29uzO70525n6MckRaJDeDL/vpNICgksb5UxE6xM0UOcELyPE9ezokdazBbPVE8n/5FPAg7yPBWHeuhg1DP6rL/3LDODbZj5gafBl5uKhlegv9AGA1hTQMP4sqNCSFhEsyrxjGJIhgnFRumlCyrt01IWC11jn1QDC9jHKrW34GXTEW1UHdr/wGBPy1KNp1+fmWGi5tFTeIp9siyZLKGHWtACUnyb7PVAELedkW75M897ri6eTEKUXLIAz+6z+6/70+JNZz96XffJw//dPEftpO7WeAwFTUWb5usiyaAyLvnLcnOIhIxoztNZiE7M9lXStLIA16Ewc4mJEoEhmZxPiAhoZkny8k+3B6EYck+sfkfs8cJJW+ge0KtrWgA8CoAiro/AZlBQtfshuUWNltAMS0hVtgohDgJFlXYsWCPv3G3KeFvErA3QYYgel+4ETRHPaPqISYpHYN4qIzSx34cyU2CqQ+J4oZ9KdV5a8oocAwdetdAB8+Gnneno0MPjawvQQfLPhMfZEcXJsSdpSGEEyYCzKzjLdBx3lPel7kcwW5/ZBhlk+Pz43A1B3pbeBjRwl5Y2Dvmbqvmj4pyxj0YTbRF0R1ubxVOV2m7nAYJvMzxmF9dsmGtyvEyWz2Y4DQro8pVoYxMAKIYSxVAD7l76LwAmsDqQ0IXREFVopjFCFoivKMhvGXs5naFTUdTR9eXfBnq3O70+w2o022aOv0aqfMNAUUHAorqArQYxEqACpFdR+M7g97QfTxP4zvVtrebbnpTafq2Gz4fcfauT04mjnnXcC9so5Y5X5I5G0o0xRx10XDPGu+emZ5vZjwYRINFEPlZLIf9+7Um2TGCFYd6CfDG0DN+w01W1Gsac7PHb4nZG/IDr0QMwJzsG0PKiAlkdsKlgyfEIwhdPH7zM7/SU/lVQj2g0pV9RCqxQxPKOZoCpqNfJ5YoYCINB+T15PnmcAoH3JwDDp7xtohGMT88s3vC4A0KGpxxSrAP7W3z1zoZk5ExNerUQlPflECVJyrkmT0N0SvhNQiSC8KhDQzVrBLzUJnUhoUaf3U3WCwg6xRZ9dhxBBDxDnMKlJkHWV+46VB9iNIggpBX9RVwSNL4GwipfyB4dl3gdVUhWbo+LsMoTumcBjEDgNWApPwd8QKim+vSmOX6FHsBVO6dZJ96UNQtkLQo1jZbOS7mTeZMhx/FvKVYNobeZKxgsp28Wi5z/hjwV86e39nEVxLZNpF+onFsYFvmQVfyU3NgW40w3UdJMA7mDamxCEDoUiaFOtE55GfsnSdeNDxhqpORz6jPw/E0O5GxyHoxcMZaKYolqyxFCaMHx3Xsr5B82iSb5FTA0cknOTAs06BmkZrHZafbrnnH7PazfbN54c7ZVCdWV9MpXDsJFPHL2J0sfjmFXTcJ2nnZZ8SvrHpOFf+ltxyYuuRfK/6TxK/Mqk8Vv7J/pWYSFMW34j8Id6WvlhfLh+KujB53F8Zdl61sxX/YCLArgXj0CCCzSR5K6iYBasX/GfH3zyR+ZFwYd91mw1b8J4lfCZeeKn506RFATRw2Gpl76PZdY9dm5m1kbl+ieCd/Dg/dKWNzIc+mYndIk0jcQCVv8/hJ1o3g11hkVZdbvOguDKRJT10dPNcjLxk/1PQGaqQGQFqUFJUduNepvk5QDVe0KClDVdNpJludVbyIfPz3GpVq3I/MTre/Zsyng9ufhFrD/wE=</diagram></mxfile>
2205.11867/main_diagram/main_diagram.pdf ADDED
Binary file (43 kB). View file
 
2205.11867/paper_text/intro_method.md ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Text-based communication has become indispensable as society accelerates online. In natural language processing, communication between humans and machines has attracted attention, and the development of dialogue systems has been a hot topic. Through the invention of Transformer [@NIPS2017_3f5ee243] and the success of transfer learning (e.g., @radford2018improving [@devlin-etal-2019-bert]), the performance of natural language understanding models and dialogue systems continues to improve. In recent years, there have been studies toward building open-domain neural chatbots that can generate a human-like response [@zhou-etal-2020-design; @adiwardana2020humanlike; @roller-etal-2021-recipes].
4
+
5
+ One of the keys to building more human-like chatbots is to generate a response that takes into account the emotion of the interlocutor. A human would recognize the emotion of the interlocutor and respond with an appropriate emotion, such as empathy and comfort, or give a response that promotes positive emotion of the interlocutor. Accordingly, developing a chatbot with such a human-like ability [@rashkin-etal-2019-towards; @Lubis_Sakti_Yoshino_Nakamura_2018; @8649596] is essential. Although several dialogue corpora with emotion annotation have been proposed, an utterance is annotated only with a speaker's emotion [@li-etal-2017-dailydialog; @hsu-etal-2018-emotionlines] or a dialogue as a whole is annotated [@rashkin-etal-2019-towards], all of which are not appropriate for enabling the above ability.
6
+
7
+ In this paper, we propose a method to build an emotion-annotated multi-turn dialogue corpus, which is necessary for developing a dialogue system that can recognize the emotion of an interlocutor and generate a response with an appropriate emotion. We annotate each utterance in a dialogue with an **expressed emotion**, which a speaker put into the utterance, and an **experienced emotion**, which a listener felt when listening to the utterance.
8
+
9
+ <figure id="fig:ex" data-latex-placement="t">
10
+ <img src="ex.png" />
11
+ <figcaption>An example dialogue with expressed and experienced emotions.</figcaption>
12
+ </figure>
13
+
14
+ To construct a multi-turn dialogue corpus annotated with these emotions, we collect dialogues from Twitter and crowdsource their emotion annotation. As a dialogue corpus, we extract tweet sequences where two people speak alternately. For the emotion annotation, we adopt Plutchik's wheel of emotions [@plutchik1980general] as emotion labels and ask crowdworkers whether an utterance indicates each emotion label for expressed and experienced emotion categories. Each utterance is allowed to have multiple emotion labels and has an intensity, strong and weak, according to the number of crowdworkers' votes. We build a Japanese dialogue corpus as a testbed in this paper, but our proposed method can be applied to any language.
15
+
16
+ Using the above method, we constructed a Japanese emotion-tagged dialogue corpus consisting of 3,828 dialogues and 13,806 utterances.[^1] Statistical analysis of the constructed corpus revealed the characteristics of words for each emotion and the relationship between expressed and experienced emotions. We further conducted experiments to recognize expressed and experienced emotions using BERT [@devlin-etal-2019-bert]. We defined the task of emotion recognition as regression and evaluated BERT's performance using correlation coefficients. The experimental results showed that it was more difficult to infer experienced emotions than expressed emotions, and that multi-task learning of both emotion categories improved the overall performance of emotion recognition. From these results, we can see that expressed and experienced emotions are different, and that it is meaningful to annotate both. We expect that the constructed corpus will facilitate the study on emotion recognition in dialogue and emotion-aware response generation.
17
+
18
+ # Method
19
+
20
+ ::: {#tab:stats_len}
21
+ **Length** **\# Dialogues** **\# Utterances**
22
+ ------------ ------------------ -------------------
23
+ 2 1,330 2,660
24
+ 3 1,071 3,213
25
+ 4 509 2,036
26
+ 5 310 1,550
27
+ 6 225 1,350
28
+ 7 158 1,106
29
+ 8 134 1,072
30
+ 9 91 819
31
+ 2-9 3,828 13,806
32
+
33
+ : The statistics of dialogues and utterances.
34
+ :::
35
+
36
+ ::: {#tab:stats_emo}
37
+ +:-------------+-----------:+---------:+-----------:+---------:+
38
+ | **Label** | **Expressed** | **Experienced** |
39
+ | +------------+----------+------------+----------+
40
+ | | **Strong** | **Weak** | **Strong** | **Weak** |
41
+ +--------------+------------+----------+------------+----------+
42
+ | Anger | 430 | 1,349 | 124 | 870 |
43
+ +--------------+------------+----------+------------+----------+
44
+ | Anticipation | 1,906 | 4,229 | 1,215 | 4,068 |
45
+ +--------------+------------+----------+------------+----------+
46
+ | Joy | 1,629 | 3,672 | 1.553 | 4,549 |
47
+ +--------------+------------+----------+------------+----------+
48
+ | Trust | 247 | 1,732 | 520 | 3,455 |
49
+ +--------------+------------+----------+------------+----------+
50
+ | Fear | 252 | 942 | 123 | 846 |
51
+ +--------------+------------+----------+------------+----------+
52
+ | Surprise | 602 | 2,018 | 434 | 2,798 |
53
+ +--------------+------------+----------+------------+----------+
54
+ | Sadness | 1,227 | 2,936 | 889 | 3,037 |
55
+ +--------------+------------+----------+------------+----------+
56
+ | Disgust | 476 | 1,979 | 186 | 1,535 |
57
+ +--------------+------------+----------+------------+----------+
58
+ | Any | 6,371 | 12,215 | 4,705 | 12,515 |
59
+ +--------------+------------+----------+------------+----------+
60
+
61
+ : The statistics of utterances for each emotion label.
62
+ :::
63
+
64
+ <figure id="fig:yahoo" data-latex-placement="t">
65
+ <img src="yahoo_en.png" />
66
+ <figcaption>An example of the crowdsourced task. Checkboxes allow crowdworkers to select multiple emotions for an utterance.</figcaption>
67
+ </figure>
68
+
69
+ ::: table*
70
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
71
+ | **Utterance** | **Expressed** | **Experienced** |
72
+ +:==================================================================+:============================+:============================+
73
+ | A1: | {**Anticipation**, Joy} | {**Anticipation**} |
74
+ | | | |
75
+ | ::: CJK | | |
76
+ | UTF8ipxm来週、車で京都行く 普通に観光してきます | | |
77
+ | ::: | | |
78
+ | | | |
79
+ | (I'm driving to Kyoto next week. We'll just do some sightseeing.) | | |
80
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
81
+ | B1: | {Anticipation} | {Anticipation, **Joy**} |
82
+ | | | |
83
+ | ::: CJK | | |
84
+ | UTF8ipxmいいなぁ、京都 | | |
85
+ | ::: | | |
86
+ | | | |
87
+ | (I like Kyoto.) | | |
88
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
89
+ | A2: | {**Anticipation**, **Joy**} | {Anticipation, **Joy**} |
90
+ | | | |
91
+ | ::: CJK | | |
92
+ | UTF8ipxm楽しんできます | | |
93
+ | ::: | | |
94
+ | | | |
95
+ | (I'll enjoy it.) | | |
96
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
97
+ | B2: | {Anticipation} | {**Anticipation**, **Joy**} |
98
+ | | | |
99
+ | ::: CJK | | |
100
+ | UTF8ipxm行ったことないから純粋に羨ましい | | |
101
+ | ::: | | |
102
+ | | | |
103
+ | (I've never been there, so I'm genuinely jealous.) | | |
104
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
105
+ | A3: | {**Surprise**} | {Joy, Surprise} |
106
+ | | | |
107
+ | ::: CJK | | |
108
+ | UTF8ipxmないんや意外 | | |
109
+ | ::: | | |
110
+ | | | |
111
+ | (I'm surprised you haven't.) | | |
112
+ +-------------------------------------------------------------------+-----------------------------+-----------------------------+
113
+ :::
114
+
115
+ ::: table*
116
+ +--------------+----------------------------------+------------------------------------+
117
+ | **Label** | **Expressed** | **Experienced** |
118
+ +:=============+:=================================+:===================================+
119
+ | Anger | ::: CJK | ::: CJK |
120
+ | | UTF8ipxm糞, せる, マジだ | UTF8ipxm糞, うるさい, 居る |
121
+ | | ::: | ::: |
122
+ | | | |
123
+ | | (shit, force, serious) | (shit, noisy, exist) |
124
+ +--------------+----------------------------------+------------------------------------+
125
+ | Anticipation | ::: CJK | ::: CJK |
126
+ | | UTF8ipxm教える, 願う, 待つ | UTF8ipxm待つ, 楽しみだ, 強い |
127
+ | | ::: | ::: |
128
+ | | | |
129
+ | | (teach, hope, wait) | (wait, looking forward to, strong) |
130
+ +--------------+----------------------------------+------------------------------------+
131
+ | Joy | ::: CJK | ::: CJK |
132
+ | | UTF8ipxm楽しい, 嬉しい, おもろい | UTF8ipxm楽しい, 嬉しい, おもろい |
133
+ | | ::: | ::: |
134
+ | | | |
135
+ | | (joyful, glad, funny) | (joyful, glad, funny) |
136
+ +--------------+----------------------------------+------------------------------------+
137
+ | Trust | ::: CJK | ::: CJK |
138
+ | | UTF8ipxm全然, 大丈夫だ, ちゃんと | UTF8ipxmやすみ, 教える, 大事だ |
139
+ | | ::: | ::: |
140
+ | | | |
141
+ | | (at all, all right, properly) | (rest, teach, important) |
142
+ +--------------+----------------------------------+------------------------------------+
143
+ | Fear | ::: CJK | ::: CJK |
144
+ | | UTF8ipxm怖い, やばい, どう | UTF8ipxm怖い, やばい, 危険だ |
145
+ | | ::: | ::: |
146
+ | | | |
147
+ | | (afraid, serious, how) | (afraid, serious, dangerous) |
148
+ +--------------+----------------------------------+------------------------------------+
149
+ | Surprise | ::: CJK | ::: CJK |
150
+ | | UTF8ipxmやばい, なんで, ? | UTF8ipxm居る, ビックリ, 年 |
151
+ | | ::: | ::: |
152
+ | | | |
153
+ | | (serious, why, ?) | (exist, surprise, year) |
154
+ +--------------+----------------------------------+------------------------------------+
155
+ | Sadness | ::: CJK | ::: CJK |
156
+ | | UTF8ipxm泣く, 痛い, 悲しい | UTF8ipxm泣く, 辛い, 痛い |
157
+ | | ::: | ::: |
158
+ | | | |
159
+ | | (cry, hurt, sad) | (cry, hard, hurt) |
160
+ +--------------+----------------------------------+------------------------------------+
161
+ | Disgust | ::: CJK | ::: CJK |
162
+ | | UTF8ipxm悪い, 嫌いだ, 嫌だ | UTF8ipxm悪い, 気持ち, 嫌だ |
163
+ | | ::: | ::: |
164
+ | | | |
165
+ | | (bad, hate, dislike) | (bad, surprise, dislike) |
166
+ +--------------+----------------------------------+------------------------------------+
167
+ :::
168
+
169
+ <figure id="fig:mat" data-latex-placement="t">
170
+ <figure id="fig:mat_a">
171
+ <img src="mat_a.png" />
172
+ <figcaption>Expressed and experienced emotions (for a certain utterance).</figcaption>
173
+ </figure>
174
+ <figure id="fig:mat_b">
175
+ <img src="mat_b.png" />
176
+ <figcaption>Experienced and next expressed emotions (for a certain person).</figcaption>
177
+ </figure>
178
+ <figcaption>The confusion matrices of the relationship between expressed and experienced emotions. In this analysis, we focus on only the strong labels. Note that the matrices’ elements are normalized in the row direction.</figcaption>
179
+ </figure>
180
+
181
+ <figure id="fig:mat_end" data-latex-placement="t">
182
+ <figure id="fig:mat_end_a">
183
+ <img src="mat_end_a.png" />
184
+ <figcaption>Expressed emotions at the beginning and end of dialogue.</figcaption>
185
+ </figure>
186
+ <figure id="fig:mat_end_b">
187
+ <img src="mat_end_b.png" />
188
+ <figcaption>Expressed emotions at the beginning and experienced emotions at the end of dialogue.</figcaption>
189
+ </figure>
190
+ <figcaption>The confusion matrices for the emotion labels at the beginning and end of dialogue. In this analysis, we consider only the emotions of a person who begins a dialogue. Note that the targets are limited to the dialogues containing six to nine utterances, and the elements are normalized in the row direction.</figcaption>
191
+ </figure>
192
+
193
+ We collect dialogue texts from Twitter by considering the interaction between tweets and their replies by two users as a dialogue. To improve the text quality, we exclude tweets that contain images or hashtags and set the maximum number of utterances included in a dialogue to nine. We also apply several filters: excluding dialogues that contain special symbols, emojis, repeated characters, and utterances that are too short. Note that the reason why we exclude emojis is that they are relatively explicit emotional factors, and we intend to analyze emotions implied from usual textual expressions.
194
+
195
+ We collected Japanese dialogues using this method. The numbers of dialogues and utterances are shown in Table [1](#tab:stats_len){reference-type="ref" reference="tab:stats_len"}. We obtained 3,828 dialogues that correspond to 13,806 utterances in total. Regarding the length of dialogues, the number of dialogues tends to decrease as that of utterances per dialogue increases.
196
+
197
+ We adopt Plutchik's wheel of emotions [@plutchik1980general] as annotation labels.[^2] Specifically, our annotation labels consist of eight emotions: anger, anticipation, joy, trust, fear, surprise, sadness, and disgust. We annotate each utterance with two emotion categories: an expressed emotion, which is expressed by a speaker of the utterance, and an experienced emotion, which is experienced by a listener of the utterance. In other words, an utterance is annotated with both subjective and objective emotions, which is similar to EmoBank [@buechel-hahn-2017-emobank] for non-dialogue texts. By annotating expressed and experienced emotions, we can trace the changes in the emotion surrounding both an utterance and a participant in a dialogue.
198
+
199
+ As a crowdsourcing platform, we use Yahoo! Crowdsourcing.[^3] By showing the target utterance and its context, we ask seven workers whether the target utterance has a specified emotion or not about each emotion label for expressed and experienced emotion categories. For the expressed emotions, we ask which emotion a speaker expressed when saying the utterance. For the experienced emotions, we ask which emotion a listener experienced when hearing the utterance. Workers are allowed to select multiple emotion labels or none of them. An interface of the crowdsourcing task for expressed emotions is shown in Figure [2](#fig:yahoo){reference-type="ref" reference="fig:yahoo"}.
200
+
201
+ Because a view of expressed and experienced emotions can vary among annotators, we employ many workers per an utterance and aggregate their votes to obtain highly reliable annotations.[^4] We consider strength for each emotion according to the number of workers' votes; emotions selected by more than half of the workers are regarded as strong, and ones selected by more than a quarter are regarded as weak. Note that the set of strong emotions is a subset of the set of weak ones. We expect that providing the emotions with intensity enables us to handle their granularity.
202
+
203
+ We applied the above emotion annotation method to our dialogue corpus. The number of utterances for each emotion is shown in Table [2](#tab:stats_emo){reference-type="ref" reference="tab:stats_emo"}. For the expressed emotion, 46.15% and 88.48% of the utterances are tagged with at least one strong and weak emotion, respectively. For the experienced emotion, the percentages are 34.08% and 90.65%, respectively. Approximately 90% of the utterances are accompanied by one or more emotion labels, and thus our corpus is consequently suitable for recognizing emotions in dialogues and analyzing their changes. In contrast to ours, for example, less than 20% of utterances are tagged with a specific emotion in DailyDialog [@li-etal-2017-dailydialog]. Hence it is difficult to analyze emotion changes using such corpora with a small amount of emotion annotation. In addition, we can see a bias among the emotion labels for both expressed and experienced emotions, with more instances of anticipation and joy and fewer instances of trust and fear. An example of a dialogue with the annotation is shown in Table [\[tab:example\]](#tab:example){reference-type="ref" reference="tab:example"}.
204
+
205
+ To investigate the characteristics of utterances with different emotions, we count words for each strong emotion label in our corpus. In this analysis, we identify words by the Japanese morphological analyzer Juman++ [@tolmachev-etal-2018-juman]. To exclude common words likely to appear for all emotions, we apply an IDF filtering. Specifically, words with IDF less than half of the maximum are ignored.
206
+
207
+ Top-3 words appearing for strong emotion labels are shown in Table [\[tab:stats_word\]](#tab:stats_word){reference-type="ref" reference="tab:stats_word"}. The same words tend to appear in the two emotion categories for joy and sadness. In contrast, the frequent words in the two categories are different for anticipation, trust, and surprise.
208
+
209
+ We annotated utterances with the expressed and experienced emotions. Here, we focus on the relationship between these two emotion categories. Specifically, we investigate the following two relationships:
210
+
211
+ a. The expressed emotion and the experienced emotion for the same utterance (different persons).
212
+
213
+ b. The experienced emotion for an utterance and the expressed emotion for the next utterance (the same person).
214
+
215
+ The confusion matrices for the strong emotion labels are shown in Figure [5](#fig:mat){reference-type="ref" reference="fig:mat"}, where the elements are normalized in the row direction. First, diagonal components of the two confusion matrices have large values, indicating that the same emotions are likely to occur both for the same utterance and for the same person. Figure [3](#fig:mat_a){reference-type="ref" reference="fig:mat_a"} shows that people are likely to experience joy for an utterance of anticipation, trust, and surprise in addition to the same emotion. People also tend to experience disgust and sadness for anger and disgust, respectively. Figure [4](#fig:mat_b){reference-type="ref" reference="fig:mat_b"} shows that after experiencing trust, people are more likely to express joy than trust. For an anger experience, people are more likely to express disgust than anger. Figures [3](#fig:mat_a){reference-type="ref" reference="fig:mat_a"} and [4](#fig:mat_b){reference-type="ref" reference="fig:mat_b"} reveal that the relationship of sadness is particularly different. For a certain utterance, sadness makes the other person feel sad in most cases, but for a certain person, anticipation in addition to sadness can be expressed after experiencing sadness. We speculate that when a person experiences sadness from the interlocutor, the person brings an utterance with anticipation to comfort them.
216
+
217
+ To analyze the emotion changes through a dialogue, we compare emotions at the beginning and end of a dialogue. In other words, we see how the emotions of a person who starts the dialogue change through the dialogue. In this analysis, we focus on the following two relationships:
218
+
219
+ a. The emotion expressed first and the emotion *expressed* last by the same person.
220
+
221
+ b. The emotion expressed first and the emotion *experienced* last by the same person.
222
+
223
+ The confusion matrices for the strong emotion labels are shown in Figure [8](#fig:mat_end){reference-type="ref" reference="fig:mat_end"}. The targets are limited to dialogues containing six to nine utterances to analyze the emotion changes in long dialogues. Figure [6](#fig:mat_end_a){reference-type="ref" reference="fig:mat_end_a"} shows that a speaker of the first utterance is likely to finally express anticipation and joy regardless of the first emotion. A speaker who first expresses surprise can express sadness through the dialogue. Figure [7](#fig:mat_end_b){reference-type="ref" reference="fig:mat_end_b"} also shows that the first speaker can experience anticipation at the end of a dialogue. A person who first expresses anger and disgust tends to finally experience trust. From these two figures, we can see that a dialogue causes a person who first expresses fear to finally feel either a positive or negative emotion.
2207.08012/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-05-27T22:06:51.994Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" etag="hq6_wHqbUxor7lS_Bi_r" version="14.6.11" type="google" pages="2"><diagram id="y2vWKO9JKKZSE6A0iRKv" name="Discriminative">7Vxde5u4Ev41uYQHffFx2SRNevakbfZkn7Npb/pgkG1aDF7AidNffySMMBIyxjYk3p71RWIGWYDmndHMOxIX6Gqxvs385fxjGtL4Alrh+gJdX0BoE8z+csHLRgCQU0lmWRRWsq3gIfpJK6FVSVdRSHOpYZGmcREtZWGQJgkNCknmZ1n6LDebprF81aU/oy3BQ+DHbemfUVjMN1KXWFv5BxrN5uLKwKrOLHzRuBLkcz9Mnxsi9P4CXWVpWmy+LdZXNOaDJ8Zl87ubHWfrG8toUvT5wfWHb/FqvXK+3MHn+9+//j57ij4ZCG66efLjVfXE1d0WL2IIaBK+4yPJjoLYz/MouECX82IRMwFgX9n1s5dHdmCZRBx+4Yfi4HotHb00j+5pFi1oQTMhXEfFo+iYff+y7ZcdbXviB6Kj9khUg5OnqyyoniJ8/Ovx8vlrQT/FyTrPF8albxikQpSfzWjRNUxVQxpKaKkG+pam7BGyF9bgeYsRAZF5Ax5CltHYL6InGWN+BdVZ3V19hfs0Yo8GrcqsoF31I6zKknvYPHf1oyYklH4QlPtBltLRZmBaHTE4+C+NZkveIN99v9gm0nUMIEGVfdn0KI4aQ7oVlXA+ANrEPgjaSZow4WXo53MaVgDUgvwgiFdoNizTshwJ0p7rdoL6+2qxrNwh4Md5kaU/ah+Eq+aq+fQyBf1woZ62IBqeiS1gomDYO9IY1I6AZ3rND0a9bGMw+JJR4AuOgW/TGYMTUXsCQnFfhOLzQiiQgQVVL9sboarbJyO5ayxfx30Nb433wz1LV0lYoptD63keFfRh6ZeYeWbRp4z4OtzibYN0waOW6y0kr9I4zbZ208bpNIpj0egCogkIwyn/ecquGBV8ZAg/nGV+GDEoX0cZCz+jNOE3Q/OicarRy7T8dFnBE80Kuu5ErZi8sT4IaIC6DkabqAa2gpomgiUvdrAO0Znr0KKI2v10SP3X0CH2vP06BK+pQ+wdokMwmA7ZwFrUtcqgqL8io0UjL4NYSOqMrBQp2h5AbQRiSW2EmAA0Y4WWFm2NIWJnLDvUqNCOOZwn7MuMf/ljM4dC66GIFqt4lYsW7IJ1o5bi96jaz5ebFHwarTk8dvrQluqB5SCPNtHC+5N9hYwKOIpi2ZRvukjSLbZrSVOfjilymqZKG+LBtdojnWnogj9zFPjxnT+h8X2aR5Vfm6RFkS64suJoxgUBGy2ePlzGvOWlH/yYleOu83ii03fVb4t0qVWMpMU6JLU2Z5b8ZhfrGeeLzCjNHTMK0iQ3Q7/wdyi9cS9s2sNEhy0/cEM0DAqAYEVE/OOa0GlBQOSXkkl7Zkd4eJL6xdTQpX8xuqUTlG1Tr7q9KOkBivJi74TpWzo/UN3P9bwoOG33jo8EvAnCBJe6n0ZM45nJJgcmLWEAb0pMsP859zaGn4SGz7GQpIsXgw3xjctDm5ssZXdqAOiay2TWBk8Thp1Tj11+tFhWcFY/dldkUD7vZZqFtBGeDDHvqJkENok079gtlLIJtYVSIRseo+5BPqrlDJJpWn6LAnMa+1yhJZHMHhNx0y5JZJaWk6OmlpbD05vENE0KkcFicVzdfiue4eiMktkf/JfXpAXAJuaGiPg9V1K/gU0Pe51xB7A10aOaNg6mf9hD/3vZ5Ipz2JK+G77M6uaAdWRYTXqU9BuSmA+TEHsf+6Hjp3eqcS99Udlmk73QctJVf2dCXrSyTNcxHSj30pe/AK7cl8HmVcUVDcRgAIUpwVDB9ggMBhL2PAD4D6l+NMovVumxGiC3ofe6IO9bUCHnhXK1oAKdIyk66ADZXOCpFN3QvDLqQdL0r/gpPvUYsI1W8csff96iT7cfbsl7+HF+8zX9/D0w+lY5zq3i56rFCaWL3iU/S+7o9JLf4ADtUfjYm7foY7uO2HMap8/B3M8KM18tFiym+zZdJRUZeHRa4VPMc8k+aYUfeiEZiFRUC1k2ZuFP49OmM0TC20Su6rkGixLRYYXZ4SbKrhDRMrHTdGbA3HCRrzh3or6Tp3Nmk6c30ORp7yuUDRQdEiLTt6BakzRqeIh1/I3CygoBzzkle7D/WqXihJGX2ek71gCQ5Xp7UvSyoHnO+RZoLRqs7qZP+TqdZC/zNoXC7+7LoBdRGPKfX2aU3aQ/KbviRlFphPVLLi/INe9rVaR5lWYP5PYgafk1bQFsNMdGelB0I9VOtgR670LmG9VOkGJ8xDEBPqPaSY8s7h+a9demWVXOQ+NYXpVWddp+5WFJ/R/ssd/UacvU6PCuAVj4GN8wmn93dL5BmcLvorygCdfMLzjFqnVvYEHThW+sFLjfYe+fZqVccOKzfMMM4nQVfmNDz/4XktMbfVnDCCsWXJmXMlB7pYnj6Xya6YxV0HbaTFS9LOEC3fxavo14PD2VVIBNjzTTcvzGhqRbgbcjQZlkv3QqAR3Fz4E2ZQJH0s3D9Ot/7ozbR+/nnz+S3358gP7V1Ng98YTR0/GZ4kgJ6F2a5wdlnrWsfBpZepYPKLrJV3XP1zQPsmhZ8ijbB2o0OGJEtGYmjZFKm8VxtMzp/syylTscO7vV63H2z25ihc7ws5sNgIll70pQvRKrGbZjk2iWbTXEg5tuj/BktA1WdSndtaQl/KaF0cWhVOlOPe0lNh1N0aXLzZ0Jr+kAS9oBIi++AK5+7c2hpKcD5cgM7+l3IA7UVnItefPgyRSoVr1DVB6H5/0lkwJ7TOp4I9Cx+38DI8AWlHCCoGOCRtAK8HGwt4G8zBY4SDI2RyH4RoL90NT/v9arG3r573v8/NlA8LePmXf3347obRs9+IWf88znF4ymAVLqkRjILs5tzdNjJT7aJQGgNcLc9kTyl2bFPJ2liR+/30oVBnTb5i4tlwLykftOi+KlWknIB1XvyORlbt2b6453PsNvnevtVk5SjW7GUC2nGXdb1xGLQ/2gSPsRcCduaJi6AQ0CXVg8cQkmLXL8DTY0YMfESuCCXLPN2Qlus2lxzlhq/Ydp2LWuB4rdXq/ANGhVM8TajGNXh4nYzrQsW47vcPde+WHTFc1C3S7ndCaBGlQWT6Dj1+lCrCxiZE7EAaPHZuoTAIDFI+y8U/UnA4dz+nihM547llOS57G2+3vwF8t4W1c6Z7+o3Z07gLPEwJa17bYpc+2KDzyAt+xaq/9/vifPhRNUlvWV8Cgk1A1xrfuWojVw2J1EiDrjjrdzaLY+6RY1Im8kJPSIVLfFrZYNl2FrNFmVgPgbWPgQClVWqRpA3c3UDlJdjW2jsWy7xwspXiESQoKoFRs6Due6pHfC8IN7v2Bnk1ICra0+lUjmuFdqiZe07c003zZYciTwHbtilRC56gA80Iq6hoqOFBZF3tK0t7kLlFjq5NhIq/3zSB8cRzGaw9OHNzea3e+rezOj4dQvIo08QEIYgSbfTVJ/jk0+bLlbj+mvuUxCmcDHsi7cfZNInbxewbY6X+uxK+2Y+osoftkkHuwUSyJK5CLE47I5jZ8oDwlaZ9r5yefJdxoUxhU3OmayrSDlTd4oUkeYu2PS0wm4k8Ic/koIIpc6DIxMr12btt1aKr1SZCs+IMQpF+2KN4tuALh9Pyt6/z8=</diagram><diagram id="HzgYrlfDMHPCUnRDlXic" name="Generative">7Vxbc6M2FP41fgyDxM08JnGcdmbbZtab2e1TRwHZVoORCzhx9tdXAoGRkA3xGpzdJC9BB0kIne8cnRseWder7W2C1ss/aIijETTD7ciajCAEwLbZP055KSgOgAVhkZBQdNoRZuQ7FkRTUDckxKnUMaM0yshaJgY0jnGQSTSUJPRZ7jankfzUNVrgBmEWoKhJ/UrCbFlQx465o/+GyWJZPhmY4s4KlZ0FIV2ikD7XSNbNyLpOKM2Kq9X2Gkd888p9mT2uv3jZxdYP1r9PVt+mMLWvLorJpq8ZUr1CguPs6KnXyf1VFP41ub9Pt+bq643lYCCGmE8o2oj9Eu+avZQbiOPwkvOBtWIaM+JViNIl5rMC1lhmq0hcstUlL99Ywywbf9cbk63UeilbW5LxQRemYZqeIOQDDX88Fu3dWN4oh/67Wa0F4gBvp1lCHys226L7HU7ICmc4EaOKN8RhAzct2yv6pXSTBPjAnloC5ShZ4Kytn+DVLaZsickLG/e8Q2kJ0mUNoCUtwRHKyJP8DkgIy6KarnrCHSXs7aApBNsu5xFibfmmPEXxmmJUHVYtEwHf8Ot/tiXPW2xLY152UduFHSkH8SsADXoBNDgG0KCGZfCDOO4bs7AjZuFZMQtkqEHzWMy6CmYdZaKeQQrbQZrQTRzmmOSAeF6SDM/WKOfhMzuoZZxWJxPvG9AVCcR1AaRrGtFkh/YmuuYkispOI2g9gDCc8+GUPZFkfJMc3lwkKCQMbxOSsJOa0JgvBqdZ7VZtlnn+V2H3CScZ3h6B3ibaxCzQU7gomjUwAk+DxmqcDnkSq1/LV+uN89XEFna78RWjs/HV9v03xlf7NXwFJ+Mr22wTj83cCOrOXLKqmbrQLimVkZuTFAT0xUrHsxRWuobb4KarYabt9cRLr8HLdATdiIM93TywywW/NEsae0aN3GB7C6NRui58mjnZcnDs1bQNxgPTs3xcxwqfT9YeMibgcGwFyvk5bkroWMPTcV/yOW6XzxpT+H4Q5iB+Qg84uqMpESrvgWYZXXGuRWTBCQHbI+41XEW85xUKHhc5A3TKsJz0UozN6FrLIYmdld1pFnfWfLGr7YJ74gahqWcQ5hanRogytIf7tbVAy7YdHchQMA6tHuGgWlMaODiuRsR944AJ+EOA8NsBUW53rhxlqdXzshU2HVCSP+yyVAqmTkOI9UyWWcYjJJd8I+A0CGM7B8OcMAgkBjs0GDXHBZzmIOGKiuuhCxSHF4iDI6arF8656ZjbydOEspVeADg21vGiiaY6Lg8eSW7+pwW3ArzqtQ9ZEfn7XtEkxDVTprfzyJFNC695GI3HGsU17gmnoEP8RbYbZBURz2l+RQJjHiHO1TxwB23D4gKfx+yYQ+4cdfI01KBeLuY0zkrn1S7bYvkNY4dDlMSLL3zkxGmgsA68vjBgeTIGLI15CQ84sacHQYeYxQcI+gUB0DkZw6KgQ1DgKBRUEAAFHvb5Dft9jzNjIGyGG/qChW3LpozDxaYTLuzecNEhqPCBi4FxAeyO6qI/WHSISdQC3UGE0pTvXId8TX5SdAlwm4bvu6N6wsZxygSOPtB9jnSMsN1aQ9ueHgHDhLZVH6oRy+oa2gZKOgZa3fIvDCjopdZtzTuk+xcMFImApnd4XaoEQSlbyS6KFZw2GeSeQEg02RwhIvthvkewvDbJ6lsWvI6yAN1fQhigB7SgO7UwQDUHKoO7tb8PBpCFZuBzYFnYnStVRvQnEAV7j2Xwk4mCp+ZOrZ5E4bCeP7noWC3njucd7N+PrEGdR1/kEx4kiXP/29CcWkUNL4LC0L7k2ExQnArAV13LVMRql7VYl7SZlLVY77IW3fMbFXV/zoPZ1pmsF1odgRUJQz78KsEp+Y4e8qm4lAs4sXmdq5Ez4XNtMpoKb6FPix60RoFsjVirUnMye77UxnrI7Gf55w+Wd2b5WFYFrs65H5bpOt/+INNPozv2gAa8XywAx9IkpnVgUE3I04FB59G/qUMDvHsNAtqjxsMqEEeDGWXne6pM2RUodC4ne0uVKZbpSKx0HLls1W8wdtAqFdghcvGRwX6HGewyVlD5cU0TZtAMNnxdWKGl4LrJEN7zDmXshIjzPtC081BB+aUJZ2OtfNoqbk4JfwvBfZRkyuNzWq2PrCKFS12GqWdFRmJUK9UWr7bItadWZe5OoSMjGGersXZdSRGOZbSpJ1n3iutD0wL144O+669fVzDWOacGDZ/nP4q0mm2YurTaR8q9xNLZc2hQVyb2Rizu0zz9zw+LXQGd5Z3XYreaFvsMc4PovPsvK4+BbO7u1eC9Od2Wzs5WIjCfcYDZgZq8H6lxXPl8dprekPYDjN64dFBRF5z4UiRyoDnLyGoTbaqq/ofk/ak9tQqgKtivn7XmkCy096dEQvJUcuMTTdMal2p3dJ01IapbHOOksH8PnnxaUEiPU32YKCLrFLdHUBrusOJcdA6wVOX87QGWssB/oNiJZ6nosg1H44cO+QmIvT96qkHRyQwsdjbQmDFoE2SMxVr90hHNJ1/aXoXYZXm/olJ0HTV665QfoLSpxd7MQfsUFUp7KiyO+U69rZpj9CMxjTdfbeEp1RZHF+GpXyb3VGyhFjd4A9TU2R2+repaR8R/4cOX0OfClmqiMxSQ+ueEpGu7hmkBNS9RAUuZsCtCXT1yTg1QV/000Dpc3eM6wwPaaVqmicayHOiL4DkK/dDR2YMI28BqfEL6Rr4Idl0lztL0GPsyB/17uHVnyfckvAGzf+7o7XTyqP1dI8VhnKAMpdxAei9OoaPE8qGvMX9OFH3Js3XlD2MVkrr7eTHr5n8=</diagram></mxfile>
2207.08012/main_diagram/main_diagram.pdf ADDED
Binary file (92.8 kB). View file
 
2207.08012/paper_text/intro_method.md ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Defining compositional behaviours (CBs) as "the ability to generalise from combinations of trainedon atomic components to novel re-combinations of those very same components", then we can define compositional learning behaviours (CLBs) as "the ability to generalise in an online fashion from a few combinations of never-before-seen atomic components to novel re-combinations of those very same components". We employ the term online here to imply a few-shot learning context [Vinyals et al., 2016, Mishra et al., 2018] that demands that artificial agents learn from, and then leverage some novel information, both over the course of a single lifespan, or episode, in our case of few-shot meta-RL (see Beck et al. [2023] for a review of meta-RL). Thus, CLBs involve a few-shot learning aspect that is not present in CBs. In this paper, we investigate artificial agents' abilities for CLBs.
4
+
5
+ Compositional Learning Behaviours as Symbolic Behaviours. Santoro et al. [2021] states that a symbolic entity does not exist in an objective sense but solely in relation to an "*interpreter who treats it as such*", and thus the authors argue that there exists a set of behaviours, i.e. *symbolic behaviours*, that are consequences of agents engaging with symbols. Thus, in order to evaluate artificial agents in terms of their ability to collaborate with humans, we can use the presence or absence of symbolic behaviours. The authors propose to characterise symbolic behaviours as receptive, constructive, embedded, malleable, separable, meaningful, and graded. In this work, we will primarily focus on the receptivity and constructivity aspects. Receptivity aspects amount to the fact that the agents
6
+
7
+ should be able to receive new symbolic conventions in an online fashion. For instance, when a child introduces an adult to their toys' names, the adults are able to discriminate between those new names upon the next usage. Constructivity aspects amount to the fact that the agents should be able to form new symbolic conventions in an online fashion. For instance, when facing novel situations that require collaborations, two human teammates can come up with novel referring expressions to easily discriminate between different events occurring. Both aspects reflect to abilities that support collaboration. Thus, we develop a benchmark that tests the abilities of artificial agents to perform receptive and constructive behaviours, and the current paper will primarily focus on testing for CLBs.
8
+
9
+ Binding Problem & Meta-Learning. Following Greff et al. [2020], we refer to the binding problem (BP) as the "inability to dynamically and flexibly bind[/re-use] information that is distributed throughout the [architecture]" of some artificial agents (modelled with artificial neural networks here). We note that there is an inherent BP that requires solving for agents to exhibit CLBs. Indeed, over the course of a single episode (as opposed to a whole training process, in the case of CBs), agents must dynamically identify/segregate the component values from the observation of multiple stimuli, timestep after timestep, and then (re-)bind/(re-)use/(re-)combine this information (hopefully stored in some memory component of their architecture) in order to respond correctly when novel stimuli are presented to them. Solving the BP instantiated in such a context, i.e. re-using previously-acquired information in ways that serve the current situation, is another feat of intelligence that human beings perform with ease, on the contrary to current state-of-the-art artificial agents. Thus, our benchmark must emphasise testing agents' abilities to exhibit CLBs by solving a version of the BP. Moreover, we argue for a domain-agnostic BP, i.e. not grounded in a specific modality such as vision or audio, as doing so would limit the external validity of the test. We aim for as few assumptions as possible to be made about the nature of the BP we instantiate [Chollet, 2019]. This is crucial to motivate the form of the stimuli we employ, and we will further detail this in Section 3.1.
10
+
11
+ Language Grounding & Emergence. In order to test the quality of some symbolic behaviours, our proposed benchmark needs to query the semantics that agents (*the interpreters*) may extract from their experience, and it must be able to do so in a referential fashion (e.g. being able to query to what extent a given experience is referred to as, for instance, 'the sight of a red tomato'), similarly to most language grounding benchmarks. Subsequently, acknowledging that the simplest form of collaboration is maybe the exchange of information, i.e. communication, via a given code, or language, we argue that the benchmark must therefore also allow agents to manipulate this code/language that they use to communicate. This property is known as the metalinguistic/reflexive function of languages [Jakobson, 1960]. It is mainly investigated in the current deep learning era within the field of language emergence( Lazaridou and Baroni [2020], and see Brandizzi [2023] and Denamganaï and Walker [2020a] for further reviews), via the use of variants of the referential games (RGs) [Lewis, 1969]. Thus, we take inspiration from the language emergence and grounding framework of RGs, where (i) the language domain represents a semantic domain that can be probed and queried, and (ii) the reflexive function of language is indeed addressed. Then, in order to instantiate different BPs at each episode (preventing the agent from 'cheating' by combining experiences from one episode to another), we propose a meta-learning extension to RGs, entitled Meta-Referential Games, and use this framework to build our benchmark. The resulting multi-agent/single-agent benchmark, entitled Symbolic Behaviour Benchmark (S2B), has the potential to test for more symbolic behaviours pertaining to language grounding and emergence rather than solely CLBs. In the present paper, though, we solely focus on the language grounding task of learning CLBs. After review of the background (Section 2) , we will present our contributions as follows:
12
+
13
+ - We propose the Symbolic Behaviour Benchmark to enables evaluation of symbolic behaviours (Section 3), and provide a detailed investigation in the case of CLBs.
14
+ - We present a novel stimulus representation scheme, entitled Symbolic Continuous Stimulus (SCS) representation which is able to instantiate a BP, on the contrary to more common symbolic representations like the one/multi-hot encoded scheme (Section 3.1);
15
+ - We propose the Meta-Referential Game framework, a meta-learning extension to common RGs, that is built over SCS-represented stimuli (Section 3.2);
16
+ - We provide baseline results for both the multi-agent and single-agent contexts, along with evaluations of the performance of the agents in terms of CLBs and linguistic compositionality of the emerging languages, showing that our benchmark is a compelling challenge that we hope will spur the research community (Section 4).
17
+
18
+ The first instance of an environment that demonstrated a primary focus on the objective of communicating efficiently is the *signaling game* or *referential game* (RG) by Lewis [1969], where a speaker agent is asked to send a message to the listener agent, based on the *state/stimulus* of the world that it observed. The listener agent then acts upon the observation of the message by choosing one of the *actions* available to it. Both players' goals are aligned (it features *pure*
19
+
20
+ ![](_page_2_Figure_2.jpeg)
21
+
22
+ Figure 1: Illustration of a discriminative 2-players / L-signal / N-round variant of a RG.
23
+
24
+ coordination/common interests), with the aim of performing the 'best' action given the observed state. In the recent deep learning era, many variants of the RG have appeared. Following the nomenclature proposed in Denamganaï and Walker [2020b], Figure 1 illustrates in the general case a discriminative 2-players / L-signal / N-round / K-distractors / descriptive / object-centric variant. In short, the speaker receives a stimulus and communicates with the listener (up to N back-and-forth using messages of at most L tokens each), who additionally receives a set of K+1 stimuli (potentially including a semantically-similar stimulus as the speaker, referred to as an object-centric stimulus). The task is for the listener to determine, given communication with the speaker, whether any of its observed stimuli match the speaker's. Among the many possible features found in the literature, we highlight here those that will be relevant to how S2B is built, and then provide formalism that we will abide by throughout the paper. The **number of communication rounds** N characterises (i) whether the listener agent can send messages back to the speaker agent and (ii) how many communication rounds can be expected before the listener agent is finally tasked to decide on an action. The basic (discriminative) RG is **stimulus-centric**, which assumes that both agents would be somehow embodied in the same body, and they are tasked to discriminate between given stimuli, that are the results of one single perception 'system'. On the other hand, Choi et al. [2018] introduced an object-centric variant which incorporates the issues that stem from the difference of embodiment (which has been later re-introduced under the name Concept game by Mu and Goodman [2021]). The agents must discriminate between objects (or scenes) independently of the viewpoint from which they may experience them. In the object-centric variant, the game is more about bridging the gap between each other's cognition rather than (just) finding a common language. The adjective 'object-centric' is used to qualify a stimulus that is different from another but actually present the same meaning (e.g. same object, but seen under a different viewpoint).
25
+
26
+ Following the last communication round, the listener outputs a decision ( $D_i^L$ in Figure 2) about whether any of the stimulus it is observing matches the one (or a semantically similar one, in object-centric RGs) experienced by the speaker, and if so its action index must represent the index of the stimulus it identifies as being the same. In the case of a **descriptive** variant, it may as well be possible that none of the stimuli are the same as the target one, therefore the action of index 0 is required for success. The agent's ability to make the correct decision over multiple RGs is referred to as accuracy.
27
+
28
+ Compositionality, Disentanglement & Systematicity. Compositionality is a phenomenon that human beings are able to identify and leverage thanks to the assumption that reality can be decomposed over a set of "disentangle[d,] underlying factors of variations" [Bengio, 2012], and our experience is a noisy, entangled translation of this factorised reality. This assumption is critical to the field of unsupervised learning of disentangled representations [Locatello et al., 2020] that aims to find "manifold learning algorithms" [Bengio, 2012], such as variational autoencoders (VAEs [Kingma and Welling, 2013]), with the particularity that the latent encoding space would consist of disentangled latent variables (see Higgins et al. [2018] for a formal definition). As a concept, compositionality has been the focus of many definition attempts. For instance, it can be defined as "the algebraic capacity to understand and produce novel combinations from known components" (Loula et al. [2018] referring to Montague [1970]) or as the property according to which "the meaning of a complex expression is a function of the meaning of its immediate syntactic parts and the way in which they are combined" [Krifka, 2001]. Although difficult to define, the commmunity seems to agree on the fact that it would enable learning agents to exhibit systematic generalisation abilities (also referred to as combinatorial generalisation [Battaglia et al., 2018]). Some of the ambiguities that come with those loose definitions start to be better understood and explained, as in the work of Hupkes et al. [2019]. While often studied in relation to languages, it is usually defined with a focus on behaviours. In this paper, we will refer to linguistic compositionality when considering languages, and interchangeably
29
+
30
+ compositional behaviours and systematicity to refer to "the ability to entertain a given thought implies the ability to entertain thoughts with semantically related contents"[Fodor et al., 1988].
31
+
32
+ Linguistic compositionality can be difficult to measure. Brighton and Kirby [2006]'s *topographic similarity* (topsim) which is acknowledged by the research community as the main quantitative metric [Lazaridou et al., 2018, Guo et al., 2019, Słowik et al., 2020, Chaabouni et al., 2020, Ren et al., 2020]. Recently, taking inspiration from disentanglement metrics, Chaabouni et al. [2020] proposed two new metrics entitled posdis (positional disentanglement metric) and bosdis (bag-of-symbols disentanglement metric), that have been shown to be differently 'opinionated' when it comes to what kind of linguistic compositionality they capture. As hinted at by Choi et al. [2018], Chaabouni et al. [2020] and Dessi et al. [2021], linguistic compositionality and disentanglement appears to be two sides of the same coin, in as much as emerging languages are discrete and sequentially-constrained unsupervisedly-learned representations. In Section 3.1, we further develop this bridge between (compositional) language emergence and unsupervised learning of disentangled representations by asking *what would an ideally-disentangled latent space look like?* to build our proposed benchmark.
33
+
34
+ Richness of the Stimuli & Systematicity. Chaabouni et al. [2020] found that linguistic compositionality is not necessary to bring about systematicity, as shown by the fact that non-compositional languages wielded by symbolic (generative) RG players were enough to support success in zero-shot compositional tests (ZSCTs). They found that the emergence of a (posdis)-compositional language was a sufficient condition for systematicity to emerge. Finally, they found a necessary condition to foster systematicity, that we will refer to as richness of stimuli condition (Chaa-RSC). It was framed as (i) having a large stimulus space |I| = ival <sup>i</sup>attr , where iattr is the number of attributes/factor dimensions, and ival is the number of possible values on each attribute/factor dimension, and (ii) making sure that it is densely sampled during training, in order to guarantee that different values on different factor dimensions have been experienced together. In a similar fashion, Hill et al. [2019] also propose a richness of stimuli condition (Hill-RSC) that was framed as a data augmentation-like regularizer caused by the egocentric viewpoint of the studied embodied agent. In effect, the diversity of viewpoint allowing the embodied agent to observe over many perspectives the same and unique semantical meaning allows a form of contrastive learning that promotes the agent's systematicity.
35
+
36
+ The version of the S2B1 that we present in this paper is focused on evaluating receptive and constructive behaviour traits via a single task built around 2-players multi-agent RL (MARL) episodes where players engage in a series of RGs (cf. lines 11 and 17 in Alg. 5 calling Alg. 3). We denote one such episode as a meta-RG and detail it in Section 3.2. Each RG within an episode consists of N + 2 RL steps, where N is the *number of communication rounds* available to the agents (cf. Section 2). At each RL step, agents both observe similar or different *object-centric* stimuli and act simultaneously from different actions spaces, depending on their role as the speaker or the listener of the game. Stimuli are presented to the agent using the Symbolic Continuous Stimulus (SCS) representation that we present in Section 3.1. Each RG in a meta-RG follows the formalism laid out in Section 2, with the exception that speaker and listener agents speak simultaneously and observe each other's messages upon the next RL step. Thus, at step N + 1, the speaker's action space consists solely of a *no-operation* (NO-OP) action while the listener's action space consists solely of the decision-related action space. In practice, the environment simply ignores actions that are not allowed depending on the RL step. Next, step N + 2 is intended to provide feedback to the listener agent as its observation is replaced with the speaker's observation (cf. line 12 and 18 in Alg. 5). Note that this is the exact stimulus that the speaker has been observing, rather than a possible object-centric sample. In Figure 3, we present SCS-represented stimuli, observed by a speaker over the course of a typical episode.
37
+
38
+ Building about successes of the field of unsupervised learning of disentangled representations, to the question *what would an ideally-disentangled latent space look like?*, we propose the Symbolic Continuous Stimulus (SCS) representation and provide numerical evidence of it in Appendix D.2. It is continuous and relying on Gaussian kernels, and it has the particularity of enabling the representation of stimuli sampled from differently semantically structured symbolic spaces while maintaining the
39
+
40
+ <sup>1</sup> <https://github.com/Near32/SymbolicBehaviourBenchmark>
41
+
42
+ ![](_page_4_Figure_0.jpeg)
43
+
44
+ Figure 2: Left: Sampling of the necessary components to create the i-th RG $(RG_i)$ of a meta-RG. The target stimulus (red) and the object-centric target stimulus (purple) are both sampled from the Target Distribution $TD_i$ , a set of O different stimuli representing the same latent semantic meaning. The latter set and a set of K distractor stimuli (orange) are both sampled from a dataset of SCS-represented stimuli (**Dataset**), which is instantiated from the current episode's symbolic space, whose semantic structure is sampled out of the meta-distribution of available semantic structure over $N_{dim}$ -dimensioned symbolic spaces. Right: Illustration of the resulting meta-RG with a focus on the i-th RG $RG_i$ . The speaker agent receives at each step the target stimulus $s_0^i$ and distractor stimuli $(s_k^i)_{k \in [1;K]}$ , while the listener agent receives an object-centric version of the target stimulus $s_0^i$ or a distractor stimulus (randomly sampled), and other distractor stimuli $(s_k^i)_{k \in [1;K]}$ , with the exception of the **Listener Feedback step** where the listener agent receives feedback in the form of the exact target stimulus $s_0^i$ . The Listener Feedback step takes place after the listener agent has provided a decision $D_i^L$ about whether the target meaning is observed or not and in which stimuli is it instantiated, guided by the vocabulary-permutated message $M_i^S$ from the speaker agent.
45
+
46
+ same representation shape (later referred as the *shape invariance property*), as opposed to the one-/multi-hot encoded (OHE/MHE) vector representation. While the SCS representation is inspired by vectors sampled from VAE's latent spaces, this representation is not learned and is not aimed to help the agent performing its task. It is solely meant to make it possible to define a distribution over infinitely many semantic/symbolic spaces, while instantiating a BP for the agent to resolve. Indeed, contrary to OHE/MHE representation, observation of one stimulus is not sufficient to derive the nature of the underlying semantic space that the current episode instantiates. Rather, it is only via a kernel density estimation on multiple samples (over multiple timesteps) that the semantic space's nature can be inferred, thus requiring the agent to segregated and (re)combine information that is distributed over multiple observations. In other words, the benchmark instantiates a domain-agnostic BP. We provide in Appendix D.1 some numerical evidence to the fact that the SCS representation differentiates itself from the OHE/MHE representation because it instantiates a BP. Deriving the SCS representation from an idealised VAE's latent encoding of stimuli of any domain makes it a domain-agnostic representation, which is an advantage compared to previous benchmark because domain-specific information can therefore not be leveraged to solve the benchmark.
47
+
48
+ In details, the semantic structure of an $N_{dim}$ -dimensioned symbolic space is the tuple $(d(i))_{i \in [1;N_{dim}]}$ where $N_{dim}$ is the number of latent/factor dimensions, d(i) is the **number of possible symbolic values** for each latent/factor dimension i. Stimuli in the SCS representation are vectors sampled from the continuous space $[-1,+1]^{N_{dim}}$ . In comparison, stimuli in the OHE/MHE representation are vectors from the discrete space $\{0,1\}^{d_{OHE}}$ where $d_{OHE} = \sum_{i=1}^{N_{dim}} d(i)$ depends on the d(i)'s. Note that SCS-represented stimuli have a shape that does not depend on the d(i)'s values, this is the shape invariance property of the SCS representation (see Figure 4(bottom) for an illustration).
49
+
50
+ In the SCS representation, the d(i)'s do not shape the stimuli but only the semantic structure, i.e. representation and semantics are disentangled from each other. The d(i)'s shape the semantic by enforcing, for each factor dimension i, a partitionaing of the [-1, +1] range into d(i) value sections. Each partition corresponds to one of the d(i) symbolic values available on the i-th factor dimension. Having explained how to build the SCS representation sampling space, we now describe how to sample stimuli from it. It starts with instantiating a specific latent meaning/symbol, embodied by latent values l(i) on each factor dimension i, such that $l(i) \in [1; d(i)]$ . Then, the i-th entry of the stimulus is populated with a sample from a corresponding Gaussian distribution over the l(i)-th partition of the [-1, +1] range. It is denoted as $g_{l(i)} \sim \mathcal{N}(\mu_{l(i)}, \sigma_{l(i)})$ , where $\mu_{l(i)}$ is the mean of the Gaussian distribution, uniformly sampled to fall within the range of the l(i)-th partition, and $\sigma_{l(i)}$ is the standard deviation of the Gaussian distribution, uniformly sampled over the range $\left[\frac{2}{12d(i)}, \frac{2}{6d(i)}\right]$ . $\mu_{l(i)}$ are sampled in order to guarantee (i) that the scale of the Gaussian distribution is large
51
+
52
+ enough, but (ii) not larger than the size of the partition section it should fit in. Figure 3 shows an example of such instantiation of the different Gaussian distributions over each factor dimensions' [-1, +1] range.
53
+
54
+ Thanks to the shape invariance property of the SCS representation, once a number of latent/factor dimension $N_{dim}$ is choosen, we can synthetically generate many different semantically structured symbolic spaces while maintaining a consistent stimulus shape. This is critical since agents must be able to deal with stimuli coming from differently semantically structured $N_{dim}$ -dimensioned symbolic spaces. In other words that are more akin to the meta-learning field, we can define a distribution over many kind of tasks, where each task instantiates a different semantic structure to the symbolic space our agent should learn to adapt to. Figure 2 highlights the structure of an episode, and its reliance on differently semantically structured $N_{dim}$ -dimensioned symbolic spaces. Agents aim to coordinate efficiently towards scoring a high accuracy during the ZSCTs at the end of each RL episode. Indeed, a meta-RG is composed of two phases: a supporting phase where supporting stimuli are presented, and a querying/ZSCT phase where ZSCT-purposed RGs are played. During the querying phase, the presented target stimuli are novel combinations of the component values of the target stimuli presented during the supporting phase. Algorithms 4 and 5 contrast how a common RG differ from a meta-RG (in Appendix A). We emphasise that the supporting phase of a meta-RG does not involve updating the parameters/weights of the learning agents, since this is a meta-learning framework of the few-shot learning kind (compare positions and dependencies of lines 21 in Alg. 5 and 6 in Alg. 4). During the supporting phase, each RG involves a different target stimulus until all the possible component values on each latent/factor dimensions have been shown for at least S shots (cf. lines 3-7 in Alg. 5). While it amounts to at least S different target stimulus being shown, the number of supporting-phase RG played remains far smaller than the number of possible training-purposed stimuli in the current episode's symbolic space/dataset. Then, the querying phase sees all the testingpurposed stimuli being presented. Emphasising further, during one single RL episode, both supporting/training-purposed and ZSCT/querying-purposed RGs are played, without the agent's parameters changing in-between the two phases, since learning CLBs involve agents adapting in an online/few-shot learning setting. The semantic structure of the symbolic space is randomly sampled at the beginning of each episode (cf. lines 2-3in Alg. 5) The reward function proposed to both agents is null at all steps except on the N+1-th step. It is +1 if the listener agent decided correctly, and, during the querying phase only, -2 if incorrect (cf. line 21 in Alg. 5).
55
+
56
+ ![](_page_5_Figure_3.jpeg)
57
+
58
+ Figure 3: Visualisation of the SCSrepresented stimuli (column) observed by the speaker agent at each RG over the course of one meta-RG, with $N_{dim} = 3$ and d(0) = 5, d(1) = 5, d(2) = 3. The supporting phase lasted for 19 RGs. For each factor dimension $i \in [0; 2]$ , we present on the right side of each plot the kernel density estimations of the Gaussian kernels $\mathcal{N}(\mu_{l(i)}, \sigma_{l(i)})$ of each latent value available on that factor dimension $l(i) \in [1; d(i)]$ . Colours of dots, used to represent the sampled value $g_{l(i)}$ , imply the latent value l(i)'s Gaussian kernel from which said continuous value was sampled. As per construction, for each factor dimension, there is no overlap between the different latent values' Gaussian kernels.
59
+
60
+ **Vocabulary Permutation.** We bring the readers attention on the fact that simply changing the semantic structure of the symbolic space, is not sufficient to force MARL agents to adapt specifically to the instantiated symbolic space at each episode. Indeed, they can learn to cheat by relying on an episode-invariant (and therefore independent of the instantiated semantic structure) emergent language which would encode the continuous values of the SCS representation like an analog-to-digital converter would. This cheating language would consist of mapping a fine-enough partition of the [-1, +1] range onto a fixed vocabulary in a bijective fashion (see Appendix C for more details).
61
+
62
+ Therefore, in order to guard the MARL agents from making a cheating language emerge, we employ a vocabulary permutation scheme [Cope and Schoots, 2021] that samples at the beginning of each episode/task a random permutation of the vocabulary symbols (cf. line 1 in Alg. 2).
63
+
64
+ Richness of the Stimulus. We further bridge the gap between Hill-RSC and Chaa-RSC by allowing the number of object-centric samples O and the number of shots S to be parameterized in the benchmark. S represents the minimal number of times any given component value may be observed throughout the course of an episode. Intuitively, throughout their lifespan, an embodied observer may only observe a given component (e.g. the value 'blue', on the latent/factor dimension 'color') a limited number of times (e.g. one time within a 'blue car' stimulus, and another time within a 'blue cup' stimulus). These parameters allow the experimenters to account for both the Chaa-RSC's sampling density of the different stimulus components and Hill-RSC's diversity of viewpoints.
65
+
66
+ # Method
67
+
68
+ In this section, we detail algorithmically how Meta-Referential Games differ from common RGs. We start by presenting in Algorithm 4 an overview of the common RGs, taking place inside a common supervised learning loop, and we highlight the following:
69
+
70
+ - 1. (i) preparation of the data on which the referential game is played (highlighted in green),
71
+ - 2. (ii) elements pertaining to playing a RG (highlighted in blue),
72
+ - 3. (iii) elements pertaining to the **supervised learning loop** (highlighted in purple).
73
+
74
+ Helper functions are detailed in Algorithm 1, 2 and 3. Next, we can now show in greater and contrastive details the Meta-Referential Game algorithm in Algorithm 5, where we highlight the following:
75
+
76
+ - 1. (i) preparation of the data on which the referential game is played (highlighted in green),
77
+ - 2. (ii) elements pertaining to playing a RG (highlighted in blue),
78
+ - 3. (iii) elements pertaining to the **meta-learning loop** (highlighted in purple).
79
+ - 4. (iv) elements pertaining to setup of a Meta-Referential Game (highlighted in red).
80
+
81
+ - a target stimuli $s_0$ ,
82
+ - a dataset of stimuli Dataset,
83
+ - O: Number of Object-Centric samples in each Target Distribution over stimuli $TD(\cdot)$ .
84
+ - *K* : Number of distractor stimuli to provide to the listener agent.
85
+ - FullObs : Boolean defining whether the speaker agent has full (or partial) observation.
86
+ - Descriptive ratio in the range [0, 1] defining how often the listener agent is observing the same semantic as the speaker agent.
87
+
88
+ ```
89
+ 1 s_0', D^{Target} \leftarrow s_0, 0;
90
+ 2 if random(0,1) > DescrRatio then
91
+ s'_0 \sim \text{Dataset} - TD(s_0); /* Exclude target stimulus from listener's
92
+ observation ... */
93
+ D^{Target} \leftarrow K + 1::
94
+ /* ... and expect it to decide accordingly. */
95
+ 4
96
+ 5 end
97
+ 6 else if O > 1 then
98
+ 7 | Sample an Object-Centric distractor s_0' \sim TD(s_0);
99
+ 8 end
100
+ 9 Sample K distractor stimuli from Dataset -TD(s_0): (s_i)_{i \in [1,K]} \sim \text{Dataset} - TD(s_0);
101
+ 10 Obs_{Speaker} \leftarrow \{s_0\}; if FullObs then
102
+ 11 | Obs_{Speaker} \leftarrow \{s_0\} \cup \{s_i | \forall i \in [1, K]\};
103
+ 12 end
104
+ 13 Obs_{Listener} \leftarrow \{s'_0\} \cup \{s_i | \forall i \in [1, K]\};
105
+ /* Shuffle listener observations and update index of target decision:
106
+ 14 Obs_{Listener}, D^{Target} \leftarrow Shuffle(Obs_{Listener}, D^{Target});
107
+ Output: Obs_{Speaker}, Obs_{Listener}, D^{Target};
108
+ ```
109
+
110
+ - V: Vocabulary (finite set of tokens available),
111
+ - $N_{\text{dim}}$ : Number of attribute/factor dimensions in the symbolic spaces,
112
+ - $V_{min}$ : Minimum number of possible values on each attribute/factor dimensions in the symbolic spaces,
113
+ - $\bullet$ $V_{max}$ : Maximum number of possible values on each attribute/factor dimensions in the symbolic spaces,
114
+ - 1 Initialise random permutation of vocabulary: $V' \leftarrow RandomPerm(V)$
115
+ - 2 Sample semantic structure: $(d(i))_{i \in [1, N_{\text{dim}}]} \sim \mathcal{U}(V_{min}; V_{max})^{N_{\text{dim}}};$
116
+ - 3 Generate symbolic space/dataset $D((d(i))_{i \in [1,N_{\dim}]})$ ;
117
+ 4 Split dataset into supporting set $D^{\mathrm{support}}$ and querying set $D^{\mathrm{query}}$ $(((d(i))_{i \in [1,N_{\dim}]})$ is omitted for readability);
118
+
119
+ **Output** : $V', D((d(i))_{i \in [1, N_{\text{dim}}]}), D^{\text{support}}, D^{\text{query}};$
120
+
121
+ - Speaker and Listener agents,
122
+ - Set of speaker observations Obs<sub>Speaker</sub>,
123
+ - Set of listener observations Obs<sub>Listener</sub>,
124
+ - N: Number of communication rounds to play,
125
+ - L: Maximum length of each message,
126
+ - V: Vocabulary (finite set of tokens available),
127
+ - 1 Compute message $M^S = \text{Speaker}(Obs_{\text{Speaker}}|\emptyset);$ 2 Initialise Communication Channel History: Comm $H \leftarrow [M^S]$ ; 3 for round = 0, N do
128
+ - Compute Listener's reply $M_{\text{round}}^L$ = Listener( $Obs_{\text{Listener}}$ |CommH); 4
129
+ - $CommH \leftarrow CommH + [M_{round}^L];$ 5
130
+ - Compute Speaker's reply $M_{\text{round}}^{S} = \text{Speaker}(Obs_{\text{Speaker}}|\text{CommH});$
131
+ - $CommH \leftarrow CommH + [M_{round}^{S}];$
132
+ - 8 end
133
+ - 9 Compute listener decision \_, $D^L = Listener(Obs_{Listener}|CommH);$
134
+
135
+ **Output**: Listener's decision $D^L$ , Communication Channel History CommH;
136
+
137
+ - a dataset of stimuli Dataset,
138
+ - a set of hyperparameters defining the RG:
139
+ - O : Number of Object-Centric samples in each Target Distribution over stimuli T D(·).
140
+ - N : Number of communication rounds to play.
141
+ - L : Maximum length of each message.
142
+ - V : Vocabulary (finite set of tokens available).
143
+ - K : Number of distractor stimuli to provide to the listener agent.
144
+ - FullObs : Boolean defining whether the speaker agent has full (or partial) observation.
145
+ - DescrRatio : Descriptive ratio in the range [0, 1] defining how often the listener agent is observing the same semantic as the speaker agent.
146
+ - L : Loss function to use in the agents update.
147
+
148
+ • Speaker(·) and Listener(·) agents.
149
+
150
+ ```
151
+ 1 Systematically split Dataset into training and testing dataset, Dtrain and Dtest;
152
+ 2 for epoch = 1, Nepoch do
153
+ 3 for target stimulus s0 ∈ Dtrain do
154
+ /* Preparation of observations and target decision: */
155
+ 4 ObsSpeaker, ObsListener, DT arget ← DataP rep(Dataset, s0, O, K, FullObs, DescrRatio)
156
+ /* Play Referential Game: */
157
+ 5 DL, _ = PlayRG(Speaker, Listener, ObsSpeaker, ObsListener, N, L, V );
158
+ /* Supervised Learning Parameters Update on Training Stimulus Only:
159
+ */
160
+ 6 Update both speaker and listener agents' parameters using the loss L(DT arget, DL);
161
+ 7 end
162
+ 8 Initialise ZSCT accuracy: AccZSCT ← 0;
163
+ 9 for target stimulus s0 ∈ Dtest do
164
+ /* Preparation of observations and target decision: */
165
+ 10 ObsSpeaker, ObsListener, DT arget ← DataP rep(Dataset, s0, O, K, FullObs, DescrRatio)
166
+ /* Play Referential Game: */
167
+ 11 DL, _ = PlayRG(Speaker, Listener, ObsSpeaker, ObsListener, N, L, V );
168
+ /* Update ZSCT Accuracy: */
169
+ 12 AccZSCT ← Update(AccZSCT, DT arget, DL);
170
+ 13 end
171
+ 14 end
172
+ ```
173
+
174
+ - $N_{episode}$ , $N_{dim}$ : Number of episodes, and number of attribute/factor dimensions,
175
+ - S: Minimum number of Shots over which each possible value on each attribute/factor dimension ought to be observed by the agents (as part of a target stimulus).
176
+ - $V_{min}, V_{max}$ : Minimum and maximum number of possible values on each attribute/factor dimensions in the symbolic spaces,
177
+ - $TSS(\mathcal{D}, \mathcal{S}, S)$ : Target stimulus sampling function which samples from dataset $\mathcal{D}$ , given a set of previously sampled stimuli $\mathcal{S}$ , while maximising the likelihood that each possible value on each attribute/factor dimension are sampled at least S times.
178
+ - a set of hyperparameters defining the RG:
179
+ - O: Number of Object-Centric samples in each Target Distribution over stimuli $TD(\cdot)$ .
180
+ - N: Number of communication rounds to play.
181
+ - L: Maximum length of each message.
182
+ - -V: Vocabulary (finite set of tokens available).
183
+ - K: Number of distractor stimuli to provide to the listener agent.
184
+ - FullObs: Boolean defining whether the speaker agent has full (or partial) observation.
185
+ - DescrRatio: Descriptive ratio in the range [0, 1] defining how often the listener agent is observing the same semantic as the speaker agent.
186
+
187
+ • Speaker(·) and Listener(·) agents.
188
+
189
+ ```
190
+ 1 for episode = 1, N_{episode} do
191
+ /* Preparation of the symbolic space/dataset:
192
+ V', D_{\text{episode}}, D_{\text{episode}}^{\text{support}}, D_{\text{episode}}^{\text{query}} \leftarrow MetaRGDatasetPreparation(V, N_{\text{dim}}, V_{\text{min}}, V_{\text{max}});
193
+ 2
194
+ Initialise set of sampled supporting stimuli: \mathcal{S}^{support} \leftarrow \emptyset;
195
+ 3
196
+ repeat
197
+ 4
198
+ Sample training-purposed target stimulus s_0^i \sim TSS(D_{\text{episode}}^{\text{support}}, \mathcal{S}^{\text{support}}, S)
199
+ 5
200
+ S^{\text{support}} \leftarrow S^{\text{support}} \cup \{s_0^i\}; i \leftarrow i+1;
201
+ 6
202
+ until all values on each attribute/factor dimension have been instantiated at least S times;
203
+ Initialise RG index: i \leftarrow 0;
204
+ 8
205
+ /* Supporting Phase:
206
+ */
207
+ 9
208
+ 10
209
+ D_i^L, CommH_i = \mathsf{PlayRG}(\mathsf{Speaker}, \mathsf{Listener}, Obs_{\mathsf{Speaker}}^i, Obs_{\mathsf{Listener}}^i, N, L, \textcolor{red}{V'});
210
+ 11
211
+ \_,\_ = \text{Listener}(Obs_{Speaker}^{i}|CommH_{i}); \qquad \qquad /* \text{ Listener-Feedback Step */}
212
+ 12
213
+ 13
214
+ /* Querying/ZSCT Phase:
215
+ */
216
+ Initialise ZSCT accuracy: Acc_{ZSCT} \leftarrow 0;
217
+ 14
218
+ 15
219
+ 16
220
+ D_i^L, CommH_i = PlayRG(Speaker, Listener, Obs_{Speaker}^i, Obs_{Listener}^i, N, L, V');
221
+ 17
222
+ \_, \_ = Listener(Obs_{Speaker}^{i}|CommH_{i});
223
+ /* Listener-Feedback Step
224
+ 18
225
+ /* Update ZSCT Accuracy:
226
+ Acc_{ZSCT} \leftarrow Update(Acc_{ZSCT}, D_i^{Target}, D_i^L); i \leftarrow i + 1;
227
+ 19
228
+ 20
229
+ /* Meta-Learning Parameters Update on Whole Episode:
230
+ */
231
+ \text{Update both agents using rewards } R_i = \begin{cases} 1 & \text{if } D_i^{Target} == D_i^L \\ 0 & \text{otherwise, during supporting phase;} \\ -2 & \text{otherwise, during querying phase} \end{cases}
232
+ 21
233
+ 22 end
234
+ ```
235
+
236
+ ![](_page_16_Figure_0.jpeg)
237
+
238
+ Figure 4: Top: visualisation on each column of the messages sent by the posdis-compositional rule-based speaker agent over the course of the episode presented in Figure 3. Colours are encoding the information of the token index, as a visual cue. Bottom: OHE/MHE and SCS representations of example latent stimuli for two differently-structured symbolic spaces with Ndim = 3, i.e. on the left for d(0) = 4, d(1) = 2, d(2) = 3, and on the right for d(0) = 3, d(1) = 3, d(2) = 3. Note the shape invariance property of the SCS representation, as its shape remains unchanged by the change in semantic structure of the symbolic space, on the contrary to the OHE/MHE representations.
239
+
240
+ The baseline RL agents that we consider use a 3-layer fully-connected network with 512, 256, and finally 128 hidden units, with ReLU activations, with the stimulus being fed as input. The output is then concatenated with the message coming from the other agent in a OHE/MHE representation, mainly, as well as all other information necessary for the agent to identify the current step, i.e. the previous reward value (either +1 and 0 during the training phase or +1 and −2 during testing phase), its previous action in one-hot encoding, an OHE/MHE-represented index of the communication round (out of N possible values), an OHE/MHE-represented index of the agent's role (speaker or listener) in the current game, an OHE/MHE-represented index of the current phase (either 'training' or 'testing'), an OHE/MHE representation of the previous RG's result (either success or failure), the previous RG's reward, and an OHE/MHE mask over the action space, clarifying which actions are available to the agent in the current step. The resulting concatenated vector is processed by another 3-layer fully-connected network with 512, 256, and 256 hidden units, and ReLU activations, and then fed to the core memory module, which is here a 2-layers LSTM [Hochreiter and Schmidhuber, 1997] with 256 and 128 hidden units, which feeds into the advantage and value heads of a 1-layer dueling network [Wang et al., 2016].
241
+
242
+ Table 5 highlights the hyperparameters used for the learning agent architecture and the learning algorithm, R2D2[Kapturowski et al., 2018]. More details can be found, for reproducibility purposes, in our open-source implementation at [https://github.com/Near32/Regym/tree/develop/](https://github.com/Near32/Regym/tree/develop/benchmark/R2D2/SymbolicBehaviourBenchmark) [benchmark/R2D2/SymbolicBehaviourBenchmark](https://github.com/Near32/Regym/tree/develop/benchmark/R2D2/SymbolicBehaviourBenchmark).
243
+
244
+ Training was performed for each run on 1 NVIDIA GTX1080 Ti, and the average amount of training time for a run is 18 hours for LSTM-based models, 40 hours for ESBN-based models, and 52 hours for DCEM-based models.
245
+
246
+ The ESBN-based and DCEM-based models that we consider have the same architectures and parameters than in their respective original work from Webb et al. [2020] and Hill et al. [2020], with the exception of the stimuli encoding networks, which are similar to the LSTM-based model.
247
+
248
+ The rule-based speaker agents used in the single-agent task, where only the listener agent is a learning agent, speaks a compositional language in the sense of the posdis metric [Chaabouni et al., 2020], as presented in Table 3 for $N_{dim}=3$ , a maximum sentence length of L=4, and vocabulary size $|V|>=max_id(i)=5$ , assuming a semantical space such that $\forall i\in [1,3], d(i)=5$ .
249
+
250
+ The agents can develop a cheating language, cheating in the sense that it could be episode/task-invariant (and thus semantic structure invariant). This emerging cheating language would encode the continuous values of the SCS representation like an analog-to-digital converter would, by mapping a fine-enough partition of the [-1,+1] range onto the vocabulary in a bijective fashion.
251
+
252
+ For instance, for a vocabulary size $\|V\|=10$ , each symbol can be unequivocally mapped onto $\frac{2}{10}$ -th increments over [-1,+1], and, by communicating $N_{dim}$ symbols (assuming $N_{dim} \leq L$ ), the speaker agents can communicate to the listener the (digitized) continuous value on each dimension i of the SCS-represented stimulus. If $max_jd(j) \leq \|V\|$ then the cheating language is expressive-enough for the speaker agent to digitize
253
+
254
+ Table 3: Examples of the latent stimulus to language utterance mapping of the posdis-compositional rule-based speaker agent. Note that token 0 is the EoS token.
255
+
256
+ | Latent Dims | | Dims | Comp. Language | |
257
+ |------------------|------------------|------------------|------------------------------------------------------|--|
258
+ | #1 | #2 | #3 | Tokens | |
259
+ | 0<br>1<br>2<br>3 | 1<br>3<br>5<br>1 | 2<br>4<br>0<br>2 | 1, 2, 3, 0<br>2, 4, 5, 0<br>3, 6, 1, 0<br>4, 2, 3, 0 | |
260
+ | 4 | 3 | 4 | 5, 4, 5, 0 | |
261
+
262
+ all possible stimulus without solving the binding problem, i.e. without inferring the semantic structure. Similarly, it is expressive-enough for the listener agent to convert the spoken utterances to continuous/analog-like values over the [-1,+1] range, thus enabling the listener agent to skirt the binding problem when trying to discriminate the target stimulus from the different stimuli it observes.
263
+
264
+ **Hypothesis.** The SCS representation differs from the OHE/MHE one primarily in terms of the binding problem [Greff et al., 2020] that the former instantiates while the latter does not. Indeed, the semantic structure can only be inferred after observing multiple SCS-represented stimuli. We hypothesised that it is via the *dynamic binding of information* extracted from each observations that an estimation of a density distribution over each dimension i's [-1, +1] range can be performed. And, estimating such density distribution is tantamount to estimating the number of likely gaussian distributions that partition each [-1, +1] range.
265
+
266
+ ![](_page_17_Figure_13.jpeg)
267
+
268
+ Figure 5: 5-ways 2-shots accuracies on the Recall task with different stimulus representation (OHE:blue; SCS; orange).
269
+
270
+ **Evaluation.** Towards highlighting that there is a binding problem taking place, we show results of baseline RL
271
+
272
+ agents (similar to main experiments in Section 4) evaluated on a simple single-agent recall task. The Recall task structure borrows from few-shot learning tasks as it presents over 2 shots all the stimuli of the instantiated symbolic space (not to be confused with the case for Meta-RG where all the latent/factor dimensions' values are being presented over S shots – Meta-RGs do not necessarily sample the whole instantiated symbolic space at each episode, but the Recall task does). Each shot
273
+
274
+ consists of a series of recall games, one for each stimulus that can be sampled from an Ndim = 3 dimensioned symbolic space. The semantic structure (d(i))i∈[1;Ndim] of the symbolic space is randomly sampled at the beginning of each episode, i.e. d(i) ∼ U(2; 5), where U(2; 5) is the uniform discrete distribution over the integers in [2; 5], and the number of object-centric samples is O = 1, in order to remove any confounder from object-centrism.
275
+
276
+ Each recall game consists of two steps: in the first step, a stimulus is presented to the RL agent, and only a *no-operation* (NO-OP) action is made available, while, on the second step, the agent is asked to infer/recall the discrete l(i) latent value (as opposed to the representation of it that it observed, either in the SCS or OHE/MHE form) that the previously-presented stimulus had instantiated, on a given i-th dimension, where value i for the current game is uniformly sampled from U(1; Ndim) at the beginning of each game. The value of i is communicated to the agent via the observation on this second step of different stimulus that in the first step: it is a zeroed out stimulus with the exception of a 1 on the i-th dimension on which the inference/recall must be performed when using SCS representation, or over all the OHE/MHE dimensions that can encode a value for the i-th latent factor/attribute when using the OHE/MHE representation. On the second step, the agent's available action space now consists of discrete actions over the range [1; maxjd(j)], where maxjd(j) is a hyperparameter of the task representing the maximum number of latent values for any latent/factor dimension. In our experiments, maxjd(j) = 5. While the agent is rewarded at each game for recalling correctly, we only focus on the performance over the games of the second shot, i.e. on the games where the agent has theoretically received enough information to infer the density distribution over each dimension i's [−1, +1] range. Indeed, observing the whole symbolic space once (on the first shot) is sufficient (albeit not necessary, specifically in the case of the OHE/MHE representation).
277
+
278
+ Results. Figure 3(right) details the recall accuracy over all the games of the second shot of our baseline RL agent throughout learning. There is a large gap of asymptotic performance depending on whether the Recall task is evaluated using OHE/MHE or SCS representations. We attribute the poor performance in the SCS context to the instantiation of a BP. We note again that during those experiments the number of object-centric samples was kept at O = 1, thus emphasising that the BP is solely depending on the use of the SCS representation and does not require object-centrism.
279
+
280
+ In this section, we verify our hypothesis that the SCS representation yields ideally-disentangled stimuli. We report on the FactorVAE Score Kim and Mnih [2018], the Mutual Information Gap (MIG) Chen et al., and the Modularity Score Ridgeway and Mozer [2018] as they have been shown to be part of the metrics that correlate the least among each other [Locatello et al., 2020], thus representing different desiderata/definitions for disentanglement. We report on the Ndim = 3 dimensioned symbolic spaces with ∀j, d(j) = 5. The measurements are of 100.0%, 94.8, and 98.9% for, respectivily, the FactorVAE Score, the MIG, and the Modularity Score, thus validating our design hypothesis about the SCS representation. We remark that, while it is not the case of the FactorVAE Score (possibly due to its reliance on a deterministic classifier), the MIG and Modularity Score are sensitive to the number of object-centric samples, decreasing as low as 64.4% and 66.6% for O = 1.
281
+
282
+ Hypothesis. Seeing the discrepancies around the impact of S and O, and given both (i) the fact that increasing S inevitably increases the length of the episode (thus making it difficult for LSTMs as they notoriously fail to inte-
283
+
284
+ Table 4: Bottom: ZSCT accuracies (%) for S = 1 and O = 1, with a 10M observation budget.
285
+
286
+ | | LSTM | ESBN | DCEM |
287
+ |---------|--------------|--------------|--------------|
288
+ | Acc.(%) | 86.00 ± 0.14 | 89.35 ± 2.77 | 81.90 ± 0.59 |
289
+
290
+ grate information from past to present inputs over long dependencies) and (ii) our results goading us to think that increasing S when O > 1 is confusing LSTM-based agent rather than helping them, we hypothesise that using agents with more efficient memory-addressing mechanism (e.g. attention) and greater algorithm-learning abilities (e.g. with explicit memory) would help the agent get past the identified hurdles. While we leave it to subsequent work to investigate the impact of more sophisticated core memory when O > 1, we here propose off-the-shelf baseline results in the simplest setting and with a greater sampling budget. Indeed, we hypothesis that a denser sampling
291
+
292
+ of the space of all symbolic spaces ought to be beneficial, as we extrapolate Chaa-RSCH (ii) to its meta-learning version, and avoid the issues pertaining to long dependencies.
293
+
294
+ Evaluation. We report on architectures based on the Emergent Symbol Binding Network (ESBN) [Webb et al., 2020] and the Dual-Coding Episodic Memory (DCEM) [Hill et al., 2020] (resp. 3 and 2 random seeds), in the setting of Ndim = 3, Vmin = 2 and Vmax = 3, with a sampling budget of 10M observations, as opposed to 5M previously.
295
+
296
+ Results. Table 4 presents the results, which are slightly in favour of the ESBN-based model. As it is built over the LSTM-based one, the memory scheme may be bypassed until it provides advantages with a simple-enough mechanism, as opposed to the case of the DCEM-based model.
297
+
298
+ In the following, we investigate and compare the performance when using an LSTM [Hochreiter and Schmidhuber, 1997] or a Differentiable Neural Computer (DNC) [Graves et al., 2016] as core memory module.
299
+
300
+ Regarding the auxiliary reconstruction loss, in the case of the LSTM, the hidden states are used, while in the case of the DNC, the memory is used as input to the prediction network. Figure 6b shows the stimulus reconstruction accuracies for both architectures, highlighting a greater data-efficiency (and resulting asymptotic performance in the current observation budget) of the LSTM-based architecture, compared to the DNC-based one.
301
+
302
+ Figure 6a shows the 4-ways (3 distractors descriptive RG variant) ZSCT accuracies of the different agents throughout learning. The ZSCT accuracy is the accuracy over testing-purpose stimuli only, after the agent has observed for two consecutive times (i.e. S = 2) the supportive training-purpose stimuli for the current episode. The DNC-based architecture has difficulty learning how to use its memory, even with the use of the auxiliary reconstruction loss, and therefore it utterly fails to reach better-than-chance ZSCT accuracies. On the otherhand, the LSTM-based architecture is fairly successful on the auxiliary reconstruction task, but it is not sufficient for training on the main task to really take-off. As expected from the fact that the benchmark instantiates a binding problem that requires relational responding, our results hint at the fact that the ability to use memory towards deriving valuable relations between stimuli seen at different time-steps is primordial. Indeed, only the agent that has the ability to use its memory element towards recalling stimuli starts to perform at a better-than-chance level. Thus, the auxiliary reconstruction loss is an important element to drive some success on the task, but it is also clearly not sufficient, and the rather poor results that we achieved using these baseline agents indicates that new inductive biases must be investigated to be able to solve the problem posed in this benchmark.
303
+
304
+ ![](_page_19_Figure_7.jpeg)
305
+
306
+ Figure 6: (a): 4-ways (3 distractors) zero-shot compositional test accuracies of different architectures. 5 seeds for architectures with DNC and LSTM, and 2 seeds for runs with DNC+Rec and LSTM+Rec, where the auxiliary reconstruction loss is used. (b): Stimulus reconstruction accuracies for the archiecture augmented with the auxiliary reconstruction task. Accuracies are computed on binary values corresponding to each stimulus' latent dimension's reconstructed value being close enough to the ground truth value, with a threshold of 0.05.
2210.02984/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-09-28T02:03:55.263Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36" etag="MoSEjejxCT5bOYAoAwcT" version="20.3.6" type="device"><diagram id="CyyifapiEuXWhiBdPv7S" name="Page-1">7V1bk6O2Ev41U3XOgyndL48zk2xSpzZVJ7unksp5SRGb8VCLzSxmbvvr01xtCWwDNmBn8YuNEC3Q96nV3WrhG3q/evspcp8efwkXXnBD0OLthv5wQwgmWsFXUvKelSiRFywjf5FX2hZ89r95eSHKS5/9hbcxKsZhGMT+k1k4D9drbx4bZW4Uha9mtYcwMFt9cpd5i2hb8HnuBl6l2u/+In7Mn4Lv1P7Z85ePRcsY5WdWblE5F7F5dBfh605b9Mcbeh+FYZz9Wr3de0HSeUW/ZII+7Dlb3ljkreMmFyw/fvr2y39ecbz+7fYLmX/9FH+dz2gm5cUNnvMHzm82fi96wFsvbpOOhKN1uIbCu8d4FcARhp9R+LxeeEkLCI42cRR+KfsJlyX3YRBGqTSK4CMEnFm4m8f0wqRa9Vnyx/MWy10g8if7yQtXXhy9Q4XXLSgFJo87eBRlkRe4sf9igurm3FiW4soW/hv6cCcEFTzmLLvkveA1dTQzpWzC52ju5Rfu4mDJYqYoQquiYjdaenFFFPzYefZtUQp0C9D1BHoT0IUFFO+OOZYWf9jgoGM5od4EdWWhLk9AXVuoi+FRVxPqTVAnFur4BNSphToaHvVJwzeb1pEFu+4OO+h0E3Y1OOyFCTvB3mqw1yHVdbDXMahv1PmEehPU2fHJuDHqljtQZyT0jbqYUO9gxNcZ3l2N+DqHoG/UJyO+EeqWEX+Ku24b8SP462Qy4huhbptzdZZ3V3OuziPoGXZKRoP90pFm2ACHnhKaUcKURfngQI/nrl080MQER5ETFLklS1Rl9Qw0G89Bu3igzVHIkD4BaGtEq6qsvoHGE9B70OGm6mZUdJ+jkSULV2X1DfQ0R+8Fmpvg1Kjb5kBbstjwqnu8ldJLB1pSExwpOwNtcYbSqqi+cWYTznvdK2QBzToDrSwDnlZF9Q30eGHTiweaaAto1N0YQ9pCuiqrb6THC5VePNJcWpN0d92NqSmLkuGV93jh0YtHWtnm2AkhE2FN0zXh1b6RbhcymQfuZuPPj4NtQUtpLQ0uHmxsKV19gjeNLbAlHhps3i5sMoHdHWymLFmDB0N5u9DJ9wY2MW1wrNUJYFuyZFVW32C3C598b2BbI/sksKkpC9AePCutNgUV7oE9ZF8V6GPvLTbBNuHNDbgHPwisIjfwl+uEMQCtB+V3L14U+3M3uM1PrPzFImnm7vXRj73PT+48afM1cp+OmIEkaTBcx/lWBcLz4x3KffiQkO4QwZK78d4OUqwMZpvRlVIB7FBQ1FCQov1sM4Btj2LdarQI4rwfDATF1+ewODHbpD12CxUIf3pLu6c4D7+W2XfKA3l3w+/naYfKuzBy1zAcJYhDD+lXxpWsSXiGrNVCwBVSiFQphFB/FGJ8OAq9/LH6k/+8evl1heffbtn/P31b3s4aRF6W0H9ZN8agQcOkuzU6MNJ2uhP42abbyq1D7l9F4+hgd1Jr8Zlo5Gg5bo/uU6zRn9lYSkeBvJslwweOHr3Y3RlJVzhkDmndhR8BnBlpXr1NXEOjKj0OErXxUDNzRmZ45IHWJCF4/hy9lBk+J9paJQTnnDIPIXMhhtrMdJFntLsHZoliyEGikZUGuLnvO9Wekgqb/fdMzYaSdtDOh5A823HLv6yFs5qEsp39f5aAnRCHJ9YL4RTl2CH2sid1BEOSKsI5ODVF8LYtx5JlNWlFdZIVVaG5lhRzRgVpRrqz8WCEDMdr4QE4ajYNpKMop0JjIjkuNlS3ZkGSHbn7sQghHSxYslKnpMaaD8oHPUKi1JXwgUgr6QUxh2BwCxVKKUE7soFTi2YU7FqpFFWaCCzZsATgI6zO5gS4qpTnZL+RpcpBPzgIYVVSgndnhCUaS+EoJWUpXA5LivFmiesihUAV5JRyBCVCMS6o5MpGrjEpSEU02CKOZEJKxWHWUHrgqaLIJJlIccSOQO1ga8wI0Y5pfdNhPI/iqugAZoTtXgB0YFcyVGAnOhKCViRj8GUk1poSpbgGR3NgFTFC7uZVckIRywYE78NhOLH/maBISdstbMwJZksG38IBD6TwZG1Xtm9GjJDleZWMKMKVBR+QdDSRWgmeWYGdrQiTDAw5CheeBtNiYP0wORtd54xjHsEJk8YxP6ZvUoyQNXqVpFC0QgrBHczS2FRmXnY1LVlFNKgOKNoal3pYToyQX3qVnKhGJY6ZgCdEJY7YrX1zYgpKNOIE+J8VTiTrZBKme8EY0ZyiruaEqIhODBWkEp7lvBjWvhTjeRyXzgNSBYsK6RBd6nTVORTBlD1fUM5A7Wwl02FpMJ6bcfk0sDKNFXM05uWI7Tw9IHsZA4xWplPnlSlKCBmUAbKdIuia4FqTdHEVcwLGlUUn5VBaYFWmo7df21QVGoBuQGw72QxLg3aKYKLBmWhAZGVC0KBnFJgaCNNkdbtI2RqKCO1CDWcmAkj+4AfB1dCi6llq6SiBtsTovIzF7Hik4g6RQpexrWFp0S7Y8J3TohJLPhsreHVNhDiYFGFJmDoGnjfGCzhcPA2sN68yxh2K1daC7JiAmaxwswNpUcwRTJZ2xMBpciO8KOxa6ICw7VYCSA7HmUIQHCPUUS0QBaYj288I5ShWpl+RYScONSXK7Q9MW68U48RBdCfFuWOmHJHkkILg2KFclXpo4HXNSUHsX7yyzQYYtOAKlBnPxau82occ7JR96XBRJOdKOewShR4vHeryGcBtBiSJ8GKbg9B1hqDcDGYxcC8oy1iFBUayNyezdnNMXczprBs0y21l1kayf86WzO77y2Z7Npi13rZJzRwJcE7LrUBj7SfDddGLDPSF/1JLrATsWY7bbVrDjeI91GrHUSwPbyLm98lfsAE5gKsfE5ImshAUB/7K4G9aGCXdl/5hHByjbLcxFD9E7hzq4qTAojz6l7lPOQJqpeVHNl1mF/kRyP3hwTiqDKtU3Cy5k4M7ov99ZPyVxSlI1zsq0529+zZKB+5fXgBds/Ci4vTCe3Cf02fPTrrzL8u0uWqFDdyUv17+L3zKm80LPnoPsVlyF8ZxuDLLPuVjb6cIjig5jyLAyjQxy5fn7aoBOqQaaBCVuPhtpZdlm2Blh8CxsuBr7J+Io6LOtIUUJNst5a+UOdem0Xr6Nci4OC/91D+fftYrIkhn8h0RdDbqWe2IIYjXwLue9F474lnvlKW0Y4a5LYjwfrbN2wnReQS4I+/gcPsHwFn17d8o0x//Bg==</diagram></mxfile>
2210.02984/main_diagram/main_diagram.pdf ADDED
Binary file (21.4 kB). View file
 
2210.02984/paper_text/intro_method.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Symmetries allow machine learning models to generalize properties of one data point to the properties of an entire class of data points. A model that captures translational symmetry, for example, will have the same output for an image and a version of the same image shifted a half pixel to the left or right. If a classification model produces dramatically different predictions as a result of translation by half a pixel or rotation by a few degrees it is likely misaligned with physical reality. Equivariance provides a formal notion of consistency under transformation. A function is equivariant if symmetries in the input space are preserved in the output space.
4
+
5
+ Baking equivariance into models through architecture design has led to breakthrough performance across many data modalities, including images [@kipf2016semi; @veeling2018rotation], proteins [@jumper2021highly] and atom force fields [@batzner20223; @frey2022neural]. In computer vision, translation equivariance has historically been regarded as a particularly compelling property of convolutional neural networks (CNNs) [@lecun1995convolutional]. Imposing equivariance restricts the size of the hypothesis space, reducing the complexity of the learning problem and improving generalization [@goodfellow2016deep].
6
+
7
+ In most neural networks classifiers, however, true equivariance has been challenging to achieve, and many works have shown that model outputs can change dramatically for small changes in the input space [@azulay2018deep; @engstrom2018rotation; @vasconcelos2021impact; @ribeiro2021convolutional]. Several authors have significantly improved the equivariance properties of CNNs with architectural changes inspired by careful signal processing [@zhang2019making; @karras2021alias], but non-architectural mechanisms for encouraging equivariance, such as data augmentations, continue to be necessary for good generalization performance [@wightman2021resnet].
8
+
9
+ The increased prominence of non-convolutional architectures, such as vision transformers (ViTs) and mixer models, simultaneously demonstrates that explicitly encoding architectural biases for equivariance is not necessary for good generalization in image classification, as ViT models perform on-par with or better than their convolutional counterparts with sufficient data and well-chosen augmentations [@dosovitskiy2020image; @tolstikhin2021mlp]. Given the success of large flexible architectures and data augmentations, it is unclear what clear practical advantages are provided by explicit architectural constraints over learning equivariances from the data and augmentations. Resolving these questions systemically requires a unified equivariance metric and large-scale evaluation.
10
+
11
+ In what follows, we introduce the Lie derivative as a tool for measuring the equivariance of neural networks under continuous transformations. The *local equivariance error* (LEE), constructed with the Lie derivative, makes it possible to compare equivariance across models and to analyze the contribution of each layer of a model to its overall equivariance. Using LEE, we conduct a large-scale analysis of hundreds of image classification models. The breadth of this study allows us to uncover a novel connection between equivariance and model generalization, and the surprising result that ViTs are often more equivariant than their convolutional counterparts after training. To explain this result, we use the layer-wise decomposition of LEE to demonstrate how common building block layers shared across ViTs and CNNs, such as pointwise non-linearities, frequently give rise to aliasing and violations of equivariance.
12
+
13
+ We make our code publicly available at <https://github.com/ngruver/lie-deriv>.
14
+
15
+ Equivariance provides a formal notion of consistency under transformation. A function $f:V_1 \to V_2$ is equivariant to transformations from a symmetry group $G$ if applying the symmetry to the input of $f$ is the same as applying it to the output $$\begin{equation}
16
+ \label{eq:equivariance}
17
+ \forall g\in G: \quad f(\rho_1(g)x) = \rho_2(g)f(x),
18
+ \end{equation}$$ where $\rho(g)$ is the *representation* of the group element, which is a linear map $V\to V$.
19
+
20
+ The most common example of equivariance in deep learning is the translation equivariance of convolutional layers: if we translate the input image by an integer number of pixels in $x$ and $y$, the output is also translated by the same amount, ignoring the regions close to the boundary of the image. Here $x\in V_1=V_2$ is an image and the representation $\rho_1=\rho_2$ expresses translations of the image. The translation *invariance* of certain neural networks is also an expression of the equivariance property, but where the output vector space $V_2$ has the trivial $\rho_2(g)=I$ representation, such that model outputs are unaffected by translations of the inputs. Equivariance is therefore a much richer framework, in which we can reason about complex representations at the input and the output of a function.
21
+
22
+ The inputs to classification models are discrete images sampled from a *continuous* reality. Although discrete representations are necessary for computers, the goal of classification models should be learning functions that generalize in the real world. It is therefore useful to consider an image as a function $h: \R^2 \mapsto \R^3$ rather than a discrete set of pixel values and broaden the symmetry groups we might consider, such as translations of an image by vector $\vb \in \R^2$, rather than an integer number of pixels.
23
+
24
+ Fourier analysis is a powerful tool for understanding the relationship between continuous signals and discrete samples by way of frequency decompositions. Any $M \times M$ image, $h(\vx)$, can be constructed from its frequency components, $H_{nm}$, using a $2d$ Fourier series, $h(\vx)= \frac{1}{2\pi} \sum_{n,m} H_{nm} e^{2\pi i \vx \cdot [n,m]}$ where $\vx \in [0,1]^2$ and $n,m \in [\text{-}M/2,\text{-}M/2+1, ...,M/2]$, the bandlimit defined by the size of the image.
25
+
26
+ Aliasing occurs when sampling at a limited frequency $f_s$, for example the size of an image in pixels, causes high frequency content (above the Nyquist frequency $f_s/2$) to be converted into spurious low frequency content. Content with frequency $n$ is observed as a lower frequency contribution at frequency $$\begin{equation}
27
+ \mathrm{Alias}(n) = \left\{
28
+ \begin{array}{ll}
29
+ n \bmod f_s &\text{if} \ (n \bmod f_s)<f_s/2 \\
30
+ (n \bmod f_s) - f_s &\text{if} \ (n \bmod f_s)>f_s/2 \\
31
+ \end{array}
32
+ \right\}.
33
+ \label{eq:alias}
34
+ \end{equation}$$ If a discretely sampled signal such as an image is assumed to have no frequency content higher than $f_s$, then the continuous signal can be uniquely reconstructed using the Fourier series and have a consistent continuous representation. But if the signal contains higher frequency content which gets aliased down by the sampling, then there is an ambiguity and exact reconstruction is not possible.
35
+
36
+ Aliasing is critically important to our study because it breaks equivariance to continuous transformations like translation and rotation. When a continuous image is translated its Fourier components pick up a phase: $$h(\vx) \mapsto h(\vx - \vb) \implies H_{nm} \mapsto H_{nm} e^{-2\pi i \vb \cdot [n,m]}.$$ However, when an aliased signal is translated, the aliasing operation $\mathrm{A}$ introduces a scaling factor: $$H_{nm} \mapsto H_{nm} e^{-2\pi i (b_0 \mathrm{Alias}(n) + b_1 \mathrm{Alias}(m))}$$ In other words, aliasing causes a translation *by the wrong amount*: the frequency component $H_{nm}$ will effectively be translated by $[(\mathrm{Alias}(n)/n ) b_0, (\mathrm{Alias}(m) / m) b_1]$ which may point in a different direction than $\vb$, and potentially even the opposite direction. Applying shifts to an aliased image will yield the correct shifts for true frequencies less than the Nyquist but incorrect shifts for frequencies higher than the Nyquist. Other continuous transformations, like rotation, create similar asymmetries.
37
+
38
+ Many common operations in CNNs can lead to aliasing in subtle ways, breaking equivariance in turn. @zhang2019making, for example, demonstrates that downsampling layers causes CNNs to have inconsistent outputs for translated images. The underlying cause of the invariance is aliasing, which occurs when downsampling alters the high frequency content of the network activations. The $M \times M$ activations at a given layer of a convolutional network have spatial Nyquist frequencies $f_s = M/2$. Downsampling halves the size of the activations and corresponding Nyquist frequencies. The result is aliasing of all nonzero content with $n \in [M/4,M/2]$. To prevent this aliasing, @zhang2019making uses a local low pass filter (Blur-Pool) to directly remove the problematic frequency regions from the spectrum.
39
+
40
+ While studying generative image models, @karras2021alias unearth a similar phenomenon in
41
+
42
+ ::: wrapfigure
43
+ r0.4 ![image](figures/spectrum.pdf){width="40%"}
44
+
45
+ []{#fig:harmonics label="fig:harmonics"}
46
+ :::
47
+
48
+ the pointwise nonlinearities of CNNs. Imagine an image at a single frequency $h(\vx) = \sin(2\pi \vx \cdot [n,m])$. Applying a nonlinear transformation to $h$ creates new high frequencies in the Fourier series, as illustrated in [\[fig:harmonics\]](#fig:harmonics){reference-type="ref+label" reference="fig:harmonics"}. These high frequencies may fall outside of the bandlimit, leading to aliasing. To counteract this effect, @karras2021alias opt for smoother non-linearities and perform upsampling before calculating the activations.
49
+
50
+ # Method
51
+
52
+ We evaluate the Lie derivative of many popular classification models under transformations including $2d$ translation, rotation, and shearing. We define continuous transformations on images using bilinear interpolation with reflection padding. In total, we evaluate 410 classification models, a collection comprising popular CNNs, vision transformers, and MLP-based architectures [@rw2019timm]. Beyond diversity in architectures, there is also substantial diversity in model size, training recipe, and the amount of data used for training or pretraining. This collection of models therefore covers many of the relevant axes of variance one is likely to consider in designing a system for classification. We include an exhaustive list of models in the Appendix [3.2](#sec:model-list){reference-type="ref" reference="sec:model-list"}.
53
+
54
+ <figure id="fig:lie_deriv" data-latex-placement="t!">
55
+ <table>
56
+ <tbody>
57
+ <tr>
58
+ <td style="text-align: center;"><embed src="figures/other_metrics.pdf" style="height:14.0%" /></td>
59
+ <td style="text-align: center;"></td>
60
+ <td style="text-align: center;"><embed src="figures/other_transforms.pdf" style="height:14.0%" /></td>
61
+ </tr>
62
+ </tbody>
63
+ </table>
64
+ <p><embed src="figures/bottom_legend.pdf" style="height:3.0%" /> <embed src="figures/bottom_label.pdf" style="height:3.0%" /></p>
65
+ <figcaption> Equivariance metrics evaluated on the ImageNet test set. <strong>Left:</strong> Non-LEE equivariance metrics display similar trends to <a href="#fig:title-fig" data-reference-type="ref+label" data-reference="fig:title-fig">1</a>, despite using larger, multi-pixel transformations. <strong>Right:</strong> Norm of rotation and shear Lie derivatives. Across all architectures, models with strong generalization become more equivariant to many common affine transformations. Marker size indicates model size. Error bars show one standard error over test set images used in the equivariance calculation. </figcaption>
66
+ </figure>
67
+
68
+ As shown in [1](#fig:title-fig){reference-type="ref+label" reference="fig:title-fig"} (right), the translation equivariance error (Lie derivative norm) is strongly correlated with the ultimate test accuracy that the model achieves. Surprisingly, despite convolutional architectures being motivated and designed for their translation equivariance, we find no significant difference in the equivariance achieved by convolutional architectures and the equivariance of their more flexible ViT and Mixer counterparts when conditioning on test accuracy. This trend also extends to rotation and shearing transformations, which are common in data augmentation pipelines [@cubuk2020randaugment] (in [4](#fig:lie_deriv){reference-type="ref+label" reference="fig:lie_deriv"} (right)). Additional transformation results included in Appendix [3.4](#sec:more-transforms){reference-type="ref" reference="sec:more-transforms"}.
69
+
70
+ For comparison, in [4](#fig:lie_deriv){reference-type="ref+label" reference="fig:lie_deriv"} (left) we also evaluate the same set of models using two alternative equivariance metrics: prediction consistency under discrete translation [@zhang2019making] and expected equivariance under group samples [@finzi2020generalizing; @hutchinson2021lietransformer], which is similar in spirit to $\text{EQ-T}_{\text{frac}}$ [@karras2021alias] (exact calculations in Appendix [3.3](#sec:alt-equivariance-metrics){reference-type="ref" reference="sec:alt-equivariance-metrics"}). Crucially, these metrics are slightly less *local* than LEE, as they evaluate equivariance under transformations of up to 10 pixels at a time. The fact that we obtain similar trends highlights LEE's relevance beyond subpixel transformations.
71
+
72
+ In [3](#sec:related-work){reference-type="ref+label" reference="sec:related-work"} we described many architectural design choices that have been used to improve the equivariance of vision models, for example @zhang2019making's Blur-Pool low-pass filter. We now investigate how equivariance error can be reduced with non-architectural design decisions, such as increasing model size, dataset size, or training method. Surprisingly, we show that equivariance error can often be significantly reduced without any changes in architecture.
73
+
74
+ In [5](#fig:scale-comparison){reference-type="ref+label" reference="fig:scale-comparison"}, we show slices of the data from [1](#fig:title-fig){reference-type="ref+label" reference="fig:title-fig"} along a shared axis for equivariance error. As a point of comparison, in [5](#fig:scale-comparison){reference-type="ref+label" reference="fig:scale-comparison"} (left), we show the impact of the Blur-Pool operation discussed above on a ResNet-50 [@zhang2019making]. In the accompanying four plots, we show the effects of increasing model scale (for both ViTs and CNNs), increasing dataset size, and finally different training procedures. Although @zhang2019making's architectural adjustment does have a noticeable effect, factors such as dataset size, model scale, and use of modern training methods, have a much greater impact on learned equivariance.
75
+
76
+ As a prime example, in [5](#fig:scale-comparison){reference-type="ref+label" reference="fig:scale-comparison"} (right), we show a comparison of three training strategies for ResNeXt-50 -- an architecture almost identical to ResNet-50. We use @wightman2021resnet's pretrained model to illustrate the role of an improved training recipe and @wslimageseccv2018's semi-supervised model as an example of scaling training data. Notably, for a fixed architecture and model size, these changes lead to decreases in equivariance error on par with architectural interventions (BlurPool). This result is surprising when we consider that @wightman2021resnet's improved training recipe benefits significantly from Mixup [@zhang2017mixup] and CutMix [@yun2019cutmix], which have no obvious connection to equivariance. Similarly, @wslimageseccv2018's semi-supervised method has no explicit incentive for equivariance.
77
+
78
+ <figure id="fig:scale-comparison" data-latex-placement="t!">
79
+ <embed src="figures/case_studies.pdf" />
80
+ <figcaption>Case studies in decreasing translational equivariance error, numbered left-to-right. <strong>1</strong>: Blur-Pool <span class="citation" data-cites="zhang2019making"></span>, an architectural change to improve equivariance, decreases the equivariance error but by less than can be accomplished by improving the training recipe or increasing the scale of model or dataset. <strong>2-3</strong>: Increasing the number of parameters for a fixed model family (here ViTs <span class="citation" data-cites="el2021xcit"></span> and EfficientNets <span class="citation" data-cites="tan2019efficientnet"></span>). <strong>4</strong>: Increasing the training dataset size for a ResMLP Big <span class="citation" data-cites="touvron2021resmlp"></span> model. <strong>5</strong>: Changing the training recipe for ResNeXt-50 <span class="citation" data-cites="xie2017aggregated"></span> with improved augmentations <span class="citation" data-cites="wightman2021resnet"></span> or SSL pretraining <span class="citation" data-cites="DBLP:journals/corr/abs-1905-00546"></span>. Error bars show one standard error over images in the Lie derivative calculation.</figcaption>
81
+ </figure>
82
+
83
+ From our analysis above, large models appear to learn equivariances that rival architecture engineering in the classification setting. When learning equivariances through data augmentation, however, there is no guarantee that the equivariance will generalize to data that is far from the training distribution. Indeed, @engstrom2019exploring shows that carefully chosen translations or rotations can be as devastating to model performance as adversarial examples. We find that vision models do indeed have an *equivariance gap*: models are less equivariant on test data than train, and this gap grows for OOD inputs as shown in Figure [6](#fig:equivariance_gap){reference-type="ref" reference="fig:equivariance_gap"}. Notably, however, architectural biases do not have a strong effect on the equivariance gap, as both CNN and ViT models have comparable gaps for OOD inputs.
84
+
85
+ Given the deep historical connection between CNNs and equivariance, the results in [4](#fig:lie_deriv){reference-type="ref+label" reference="fig:lie_deriv"} and [6](#fig:equivariance_gap){reference-type="ref+label" reference="fig:equivariance_gap"} might appear counterintuitive. ViTs, CNNs, and Mixer have quite different inductive biases and therefore often learn very different representations of data [@raghu2021vision]. Despite their differences, however, all of these architectures are fundamentally constructed from similar building blocks--such as convolutions, normalization layers, and non-linear activations which can all contribute to aliasing and equivariance error. Given this shared foundation, vision models with high capacity and effective training recipes are more capable of fitting equivariances already present in augmented training data.
86
+
87
+ ::: wraptable
88
+ r0.39
89
+
90
+ Model Test Error (%)
91
+ -------------------------------- ----------------
92
+ G-CNN [@cohen2016group] 2.28
93
+ H-NET [@worrall2017harmonic] 1.69
94
+ ORN [@zhou2017oriented] 1.54
95
+ TI-Pooling [@laptev2016ti] 1.2
96
+ Finetuned MAE 1.14
97
+ RotEqNet [@marcos2017rotation] 1.09
98
+ E(2)-CNN [@e2cnn] 0.68
99
+
100
+ []{#tab:rotmnist label="tab:rotmnist"}
101
+ :::
102
+
103
+ We finally consider the extent to which large-scale pretraining can match strong architectural priors in a case where equivariance is obviously desirable. We fine-tune a state-of-the-art vision transformer model pretrained with masked autoencoding [@MaskedAutoencoders2021] for 100 epochs on rotated MNIST [@e2cnn] (details in Appendix [3.5](#sec:mae-finetune){reference-type="ref" reference="sec:mae-finetune"}). This dataset, which contains MNIST digits rotated uniformly between -180 and 180 degrees, is a common benchmark for papers that design equivariant architectures. In [\[tab:rotmnist\]](#tab:rotmnist){reference-type="ref+label" reference="tab:rotmnist"} we show the test errors for many popular architectures with strict equivariance constrainets alongside the error for our finetuned model. Surprisingly, the finetuned model achieves competitive test accuracy, in this case a strong proxy for rotation invariance. Despite having relatively weak architectural biases, transformers are capable of learning and generalizing on well on symmetric data.
104
+
105
+ <figure id="fig:equivariance_gap" data-latex-placement="t!">
106
+ <embed src="figures/equivariance_gap.pdf" />
107
+ <figcaption>Models are less equivariant on test data and becoming decreasingly equivariant as the data moves away from the training manifold. As examples of data with similar distributions, we show equivariance error on the ImageNet train and test sets as well as CIFAR-100. As examples of out-of-distribution data, we use two medical datasets (which often use Imagenet pretraining), one for Histology <span class="citation" data-cites="kather2016multi"></span> and one for Retinopathy <span class="citation" data-cites="kaggle-diabetic-retinopathy"></span>.</figcaption>
108
+ </figure>
2210.15230/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-10-03T20:06:50.444Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="4e1HsDR0WolnpjrOTPy5" version="20.0.3"><diagram id="XvBtL9slT5JeYgird1u2" name="Page-3">7Vzbdps4FP0aPzoLEGB4tHNrupqZZpKZtI+ykW0ajFwhx06/fiQQFyHZsVNwnRZ3rcY6SEKcs89FGyU9cL7YXBO4nN/iAEU9ywg2PXDRsyzTAB77wSUvmcQBfiaYkTDIREYpuA9/oHykkK7CACVClokoxhENl7JwguMYTagkg4TgtdxtiqNAEizhDEnL4IL7CYyQ0u0xDOg8k3pOpfcHFM7m+Z1NQ1xZwLyzECRzGOB1RQQue+CcYEyzb4vNOYq48mS9XG25WiyMoJjuM+A+/vLw4+HeDm/J7O+7j9825Cro22Jt9CV/YBSw5xdNTOgcz3AMo8tSOiJ4FQeIz2qwVtnnE8ZLJjSZ8Bui9EUYE64oZqI5XUTiKtqE9Evl+1c+1ZkjWhcbMXPaeMkbMSUvX6qNyijeLIelrXxc9nz8obaqTYgSvCIT0Qv4d5/n4OPSs/81/7u5BfY6GPZz+EEyQ2Loj/4C3H1fPV9cxU/ruwfr6es/j1k/YYNrhBeIrYeNIyiCNHyW1wEFPGdFPzF0SAh8qXRY4jCmSWXmz1zAOghPs10BM+Fnlu/LaHilvyd1Z1+yBeStypOUohRgerDtUuAzjFZCCz3LjZgqR2P2ZUZTc2WCKU4XW+LS/b7C+YV+kiJryDqY3nKTDRPX84ke0Ib2Ke7fLLiDW8Y1ihHJ9G8ZIkxlt2KPkt1NXgETV1ZVcxLZBdbzkKL7JUzRs2ZxUIb7NIyicxxhko4F0+nUmkyYPKEEP6HKlcAdu45b3E+D0WdEKNpURCrMxFW/BgdXhN11GcSAM8hk80oAy2GhA2YFHYcb3zrY+JNCM6VxgZF+VHtDrrA5ZqHGMvCUP0CPx8OdJt51HwSZMVz1Pj1ntCR4ipIkxHHPuWgGRebhKHL4P/EQFXn20aHLTT8NocuU0WWYCrqKRFhFl9MWusAfia63PAr7CRccW/E4WcpI1d1ipY3C2mnfvKQw1Whm5DgIn8NgBaOEzwJj9v8Y5Qp3XlvtW3R+bIWHhKBkyWrVLBelcKLMScjPQcr3DcMOtJCaIRZm2PRXyVMYF+P3iV2r48ez7XFrW6RrIJ4VAawIaJYS0FjJdMSANvgFAW2JSII5QNYIkjCeZeKDwsUbgx4ef+Obty6dZp1tFX3HTadehz5p+iIwn0A65Oo5Jd0IXJBGHr3LjSefG00baHKjZivZWnTKuUEpPNVprDgYcvqPtSYRZOXdRFaxvIOvqU/As2SqSnbqa4W30jNVW1VeI6FUhVcU6uyI9ocRSQrzU2SXvNZxalNkRJgYtYNCUiZyPXmijClTJmqMTtLxSR0M9oQBMAc1GFhvg4EyUU41HQsG9tZiZW8W0VZYxIP3eHJ19DcJZ2EMox1JWqmklFRTwzKL11RGr5wPYhyjWvIQIhiFs5i7AMMiS3ZgxKN/OIHRUFxYhEGQcvu6xKT6iGD2TU/1GXcwHPlXzWQe05QzT85NVhzE1TgIaC3vOCeNNKYXphbLuKRzblv27Yab+5kZgJMbHfIOQJ7nychT+U3Lbgl6298pbduQNbYjsQNeKatjzz/dfOZPFbMa2viwWqT7kEu+GFiDVsub9goC7B35ugEE2IPa+zE19jChigDTawsBuqL3HVLcYxIG6A+leWqQGiiQao3l0UPq8Bey7xBSR+VupgQv2JiA1eUkQQ1SNNsojMkqoiuCko6vKPmKmpt5mtzdEl2hdTOg26D89D41z4Oafevl0B3tjlknsge1vcGZIxnL9IA8yb67UM1URm2q5vahejO7nZlbN7Pjeq9N1baZdW/mtgbPfY4H1a0sB9MAIm+qPS7kTjw0njZU7vqKiSyV5jV9FSNNHBjSKtpWC95bvvE4+b3pLlclmGZbJnDR95vaqnju2aBmPLsUVexn27mRpS2LlUubt6F1ZGdxkBfYOmfxrDFoqo53LCUIgf2cpbUaw1YPQD3iRecuOuuZTt16jnka3qIWitfi9aQxCmHSqusc51iqLlb5qu8MNMWI1Zrv6GjdrnBLc7txZjVUuenmOnKFbncV+jEM7ZmvztW2oX/LGt01VSP98iJdPSn2Kb1tV3codYfvnblANh8rMTT01JbCwy2s37wZ/SM7zFHqdNdWI1F+WuA1h/Fb0rSj7movIHnq/EW1Ht9lyUdLTGZQ9aUJcxcTaPzFas9f8iNTFSveZ+cHM+38NvW64/v1zZKuXNe8CG2tXM/PKXVVnEqOWoUnCGuBeom9P9GqzGVZx63iHN0vdXWGbtjQrg1enattQ+/xguz9levOQPWgPWnCQWuxU6U6HlHCyoSOKNQY0DXO8kRfnG09kYLd2YPKeIcFu6HEIsvR/LaBewY0FUdr5Lqj0gl/4bjfec72qlG1I4uGp8CxOyphcZ4eoUkPzP4uVbtrFh6S53SzNZadNcu/YpPVBOXfAgKX/wM=</diagram></mxfile>
2210.15230/main_diagram/main_diagram.pdf ADDED
Binary file (49.6 kB). View file
 
2210.15230/paper_text/intro_method.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Recent Text-to-Image generative models [@ramesh2021zero; @ramesh2022hierarchical; @ding2021cogview; @saharia2022photorealistic; @nichol2021glide; @rombach2022high] can synthesize high-quality photo-realistic images conditional on natural language text descriptions in a zero-shot fashion.
4
+
5
+ <figure id="fig:intro" data-latex-placement="ht">
6
+ <embed src="EMNLP 2022/intro-image.pdf" />
7
+ <figcaption> We study the change in the text-to-image model generations across various groups (man/woman, light-skinned/dark-skinned, Western/Non-Western) before and after adding ethical interventions (in <span>purple</span>) during text-to-image generation. To analyze the bias in model outputs, we use CLIP and Human to annotate social groups of the model generations. We present a few generated results in Appendix Fig. <a href="#fig:sd-doctor" data-reference-type="ref" data-reference="fig:sd-doctor">4</a>-<a href="#fig:minD-bride" data-reference-type="ref" data-reference="fig:minD-bride">8</a>.</figcaption>
8
+ </figure>
9
+
10
+ For instance, they can generate an image of 'an armchair in the shape of an avocado' which appears rarely in the real world. However, despite the unprecedented zero-shot abilities of the text-to-image generative models, recent experiments with small-scale instantiations (such as minDALL$\cdot$E) have shown that prompting the model with neutral texts ('a photo of a lawyer'), devoid of any cues towards a social group, still generates images that are biased towards *white males* [@cho2022dall].
11
+
12
+ In our work, we consider three bias axis -- 1) {man, woman} grouping across gender axis, 2) {light-skinned, dark-skinned} grouping across skin color axis, and 3) {Western, Non-Western} grouping across cultural axis.[^2] The existence of any gender[^3] and skin color bias[^4] (see Ethical Statements for more discussion) causes potential harms to underrepresented groups by amplifying bias present in the dataset [@Birhane2021MultimodalDM; @barocas2018fairness]. Hence, it is essential for a text-to-image system to generate *diverse* set of images.
13
+
14
+ To this end, we study *if the presence of additional knowledge that supports equitable judgment help in diversifying model generations*. Being part of text input, this knowledge acts as an *ethical intervention* augmented to the original prompt [@zhao-etal-2021-ethical][^5]. Ethical interventions provide models with ethical advice and do not emanate any visual cues towards a specific social group. For instance, in the context of generating 'a photo of a lawyer' that tends to be biased towards 'light-skinned man', we wish to study if prompting the model with ethically intervened prompt (e.g., 'a photo of a lawyer *if all individuals can be a lawyer irrespective of their gender*') can diversify the outputs.
15
+
16
+ We introduce an **E**thical **N**a**T**ural Language **I**nterventions in Text-to-Image **GEN**eration (ENTIGEN) benchmark dataset to study the change in the perceived societal bias of the text-to-image generative models in the presence of ethical interventions. ENTIGEN covers prompts to study the bias across three axes -- gender, skin color and culture. The neutral prompts in ENTIGEN dataset are intervened with corresponding ethical knowledge as illustrated in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}. We evaluate ENTIGEN on publicly available models -- minDALL$\cdot$E [@kakaobrain2021minDALLE], DALL$\cdot$E-mini [@Dayma2021], and Stable Diffusion [@rombach2022high] automatically with CLIP model [@radford2019language] and manually with human annotators from MTurk.
17
+
18
+ Through our experiments, (1) we show that a few ethical interventions lead to the diversification of the image generations across different groups while preserving the image generation quality. Interestingly, in some cases, we observe the bias can be flipped towards the originally underrepresented groups with ethical interventions (Appendix Figure [6](#fig:dmini-makeup){reference-type="ref" reference="fig:dmini-makeup"}). (2) Moreover, we find that the interventions containing keywords such as 'irrespective of gender' and 'culture' tend to trigger a large change in model generations. We further analyze the pre-training data to analyze the context in which these keywords are used to better understand how they may affect the diversity in generation.
19
+
20
+ # Method
21
+
22
+ In this section, we introduce the process of building an ethical intervention benchmark ENTIGEN and evaluating the images generated by text-to-image generative models.
23
+
24
+ Initially, we determine three axes of societal bias to be studied: gender, skin color, and culture. Specifically, to investigate the gender and skin color bias, ENTIGEN consists of prompts belonging to the category of *professions* and *objects*. For assessing the cultural bias, it consists of prompts surrounding the *wedding ceremonies* as it is ubiquitous and diverse across different regions [@bell1997ritual; @xu2018comparison; @Acharya2020AnAO].
25
+
26
+ Based on the selected axes, we design neutral prompts without any ethical interventions as the *original* prompts. Subsequently, we append ethical interventions to the original prompts that can perhaps amend the model's behaviour towards more diverse generations. For instance,
27
+
28
+ 1. **Original Prompt:** a photo of a person wearing a \[*object*\]
29
+
30
+ 2. **With Ethical Intervention:** a photo of a person wearing a \[*object*\] [if all individuals can wear a \[*object*\] irrespective of their gender]{style="color: blue"}
31
+
32
+ 'If all individuals can wear a \[*object*\] irrespective of their gender' is an ethical intervention that guides diverse outputs in terms of gender. We require the ethical interventions to not give away any visual cues to eliminate the effect of any explicit guidance.
33
+
34
+ We further include *irrelevant* interventions in ENTIGEN. These interventions also provide ethical advice, but do not correspond to any social axes we study in ENTIGEN. For example, 'if honesty is the best policy' is an irrelevant intervention since it is unrelated to gender, skin color and culture. Ideally, these interventions cannot help in diversifying image generations on either of studied social axes.
35
+
36
+ In total, we create 246 prompts based on an attribute set containing diverse professions, objects, and cultural scenarios.[^6]
37
+
38
+ Each prompt in ENTIGEN is used to generate 9 images from each text-to-image generation model 9 times. We choose the publicly available models, minDALL$\cdot$E, DALL$\cdot$E-mini, and Stable Diffusion for analysis. It is mainly because these three models can generate high-quality images efficiently. We provide more details in Appendix [7](#detailsig){reference-type="ref" reference="detailsig"}.
2305.07185/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-04-25T01:22:05.586Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36" etag="x2Kn2I6si_dV_5QVJppC" version="21.1.5" type="google">
2
+ <diagram name="Page-1" id="NfWfCpEs24WzQ-06m8vV">
3
+ <mxGraphModel dx="1141" dy="825" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1700" pageHeight="1100" math="0" shadow="0">
4
+ <root>
5
+ <mxCell id="0" />
6
+ <mxCell id="1" parent="0" />
7
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-519" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
8
+ <mxGeometry x="280" y="880" width="40" height="40" as="geometry" />
9
+ </mxCell>
10
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-520" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
11
+ <mxGeometry x="320" y="880" width="40" height="40" as="geometry" />
12
+ </mxCell>
13
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-521" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6D0DE;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
14
+ <mxGeometry x="360" y="880" width="40" height="40" as="geometry" />
15
+ </mxCell>
16
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-522" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;dashed=1;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashPattern=8 8;" parent="1" vertex="1">
17
+ <mxGeometry x="400" y="880" width="40" height="40" as="geometry" />
18
+ </mxCell>
19
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-513" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
20
+ <mxGeometry x="280" y="880" width="40" height="40" as="geometry" />
21
+ </mxCell>
22
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-514" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
23
+ <mxGeometry x="320" y="880" width="40" height="40" as="geometry" />
24
+ </mxCell>
25
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-515" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#e1d5e7;strokeColor=#9673a6;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
26
+ <mxGeometry x="360" y="880" width="40" height="40" as="geometry" />
27
+ </mxCell>
28
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-516" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#e1d5e7;strokeColor=#9673a6;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
29
+ <mxGeometry x="400" y="880" width="40" height="40" as="geometry" />
30
+ </mxCell>
31
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-517" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=default;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
32
+ <mxGeometry x="440" y="880" width="40" height="40" as="geometry" />
33
+ </mxCell>
34
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-518" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=default;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
35
+ <mxGeometry x="480" y="880" width="40" height="40" as="geometry" />
36
+ </mxCell>
37
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-523" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
38
+ <mxGeometry x="440" y="880" width="40" height="40" as="geometry" />
39
+ </mxCell>
40
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-524" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
41
+ <mxGeometry x="480" y="880" width="40" height="40" as="geometry" />
42
+ </mxCell>
43
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-570" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
44
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
45
+ <mxPoint x="299" y="901.5" as="sourcePoint" />
46
+ <mxPoint x="427" y="902" as="targetPoint" />
47
+ </mxGeometry>
48
+ </mxCell>
49
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-495" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
50
+ <mxGeometry x="280" y="760" width="40" height="40" as="geometry" />
51
+ </mxCell>
52
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-496" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
53
+ <mxGeometry x="320" y="760" width="40" height="40" as="geometry" />
54
+ </mxCell>
55
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-497" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
56
+ <mxGeometry x="360" y="760" width="40" height="40" as="geometry" />
57
+ </mxCell>
58
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-498" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
59
+ <mxGeometry x="400" y="760" width="40" height="40" as="geometry" />
60
+ </mxCell>
61
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-499" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
62
+ <mxGeometry x="440" y="760" width="40" height="40" as="geometry" />
63
+ </mxCell>
64
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-500" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
65
+ <mxGeometry x="480" y="760" width="40" height="40" as="geometry" />
66
+ </mxCell>
67
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-501" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
68
+ <mxGeometry x="280" y="800" width="40" height="40" as="geometry" />
69
+ </mxCell>
70
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-502" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
71
+ <mxGeometry x="320" y="800" width="40" height="40" as="geometry" />
72
+ </mxCell>
73
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-503" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFF4C3;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
74
+ <mxGeometry x="360" y="800" width="40" height="40" as="geometry" />
75
+ </mxCell>
76
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-504" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFF4C3;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
77
+ <mxGeometry x="400" y="800" width="40" height="40" as="geometry" />
78
+ </mxCell>
79
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-505" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFF4C3;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
80
+ <mxGeometry x="440" y="800" width="40" height="40" as="geometry" />
81
+ </mxCell>
82
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-506" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFF4C3;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
83
+ <mxGeometry x="480" y="800" width="40" height="40" as="geometry" />
84
+ </mxCell>
85
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-507" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
86
+ <mxGeometry x="280" y="840" width="40" height="40" as="geometry" />
87
+ </mxCell>
88
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-508" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
89
+ <mxGeometry x="320" y="840" width="40" height="40" as="geometry" />
90
+ </mxCell>
91
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-509" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
92
+ <mxGeometry x="360" y="840" width="40" height="40" as="geometry" />
93
+ </mxCell>
94
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-510" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
95
+ <mxGeometry x="400" y="840" width="40" height="40" as="geometry" />
96
+ </mxCell>
97
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-511" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
98
+ <mxGeometry x="440" y="840" width="40" height="40" as="geometry" />
99
+ </mxCell>
100
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-512" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="1" vertex="1">
101
+ <mxGeometry x="480" y="840" width="40" height="40" as="geometry" />
102
+ </mxCell>
103
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-494" value="" style="group;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
104
+ <mxGeometry x="280" y="760" width="240" height="240" as="geometry" />
105
+ </mxCell>
106
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-108" value="" style="group;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
107
+ <mxGeometry x="280" y="320" width="320" height="40" as="geometry" />
108
+ </mxCell>
109
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-110" value="" style="group;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
110
+ <mxGeometry x="280" y="360" width="320" height="40" as="geometry" />
111
+ </mxCell>
112
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-119" value="" style="group;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
113
+ <mxGeometry x="280" y="400" width="320" height="40" as="geometry" />
114
+ </mxCell>
115
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-128" value="" style="group;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
116
+ <mxGeometry x="280" y="440" width="320" height="40" as="geometry" />
117
+ </mxCell>
118
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-137" value="" style="group;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
119
+ <mxGeometry x="280" y="440" width="320" height="40" as="geometry" />
120
+ </mxCell>
121
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-146" value="" style="group;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
122
+ <mxGeometry x="280" y="480" width="320" height="40" as="geometry" />
123
+ </mxCell>
124
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-155" value="" style="group;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
125
+ <mxGeometry x="280" y="520" width="320" height="40" as="geometry" />
126
+ </mxCell>
127
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-164" value="" style="group;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" connectable="0" vertex="1">
128
+ <mxGeometry x="280" y="560" width="320" height="40" as="geometry" />
129
+ </mxCell>
130
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-1" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;" parent="1" vertex="1">
131
+ <mxGeometry x="280" y="320" width="40" height="40" as="geometry" />
132
+ </mxCell>
133
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-3" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;" parent="1" vertex="1">
134
+ <mxGeometry x="320" y="320" width="40" height="40" as="geometry" />
135
+ </mxCell>
136
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-4" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;" parent="1" vertex="1">
137
+ <mxGeometry x="360" y="320" width="40" height="40" as="geometry" />
138
+ </mxCell>
139
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-5" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;" parent="1" vertex="1">
140
+ <mxGeometry x="400" y="320" width="40" height="40" as="geometry" />
141
+ </mxCell>
142
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-6" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;" parent="1" vertex="1">
143
+ <mxGeometry x="440" y="320" width="40" height="40" as="geometry" />
144
+ </mxCell>
145
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-7" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;" parent="1" vertex="1">
146
+ <mxGeometry x="480" y="320" width="40" height="40" as="geometry" />
147
+ </mxCell>
148
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-8" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;" parent="1" vertex="1">
149
+ <mxGeometry x="520" y="320" width="40" height="40" as="geometry" />
150
+ </mxCell>
151
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-9" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;" parent="1" vertex="1">
152
+ <mxGeometry x="560" y="320" width="40" height="40" as="geometry" />
153
+ </mxCell>
154
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-111" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;" parent="1" vertex="1">
155
+ <mxGeometry x="280" y="360" width="40" height="40" as="geometry" />
156
+ </mxCell>
157
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-112" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;" parent="1" vertex="1">
158
+ <mxGeometry x="320" y="360" width="40" height="40" as="geometry" />
159
+ </mxCell>
160
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-113" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;" parent="1" vertex="1">
161
+ <mxGeometry x="360" y="360" width="40" height="40" as="geometry" />
162
+ </mxCell>
163
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-114" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;" parent="1" vertex="1">
164
+ <mxGeometry x="400" y="360" width="40" height="40" as="geometry" />
165
+ </mxCell>
166
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-115" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;" parent="1" vertex="1">
167
+ <mxGeometry x="440" y="360" width="40" height="40" as="geometry" />
168
+ </mxCell>
169
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-116" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;" parent="1" vertex="1">
170
+ <mxGeometry x="480" y="360" width="40" height="40" as="geometry" />
171
+ </mxCell>
172
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-117" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;" parent="1" vertex="1">
173
+ <mxGeometry x="520" y="360" width="40" height="40" as="geometry" />
174
+ </mxCell>
175
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-118" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;" parent="1" vertex="1">
176
+ <mxGeometry x="560" y="360" width="40" height="40" as="geometry" />
177
+ </mxCell>
178
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-120" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
179
+ <mxGeometry x="280" y="400" width="40" height="40" as="geometry" />
180
+ </mxCell>
181
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-121" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
182
+ <mxGeometry x="320" y="400" width="40" height="40" as="geometry" />
183
+ </mxCell>
184
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-122" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;" parent="1" vertex="1">
185
+ <mxGeometry x="360" y="400" width="40" height="40" as="geometry" />
186
+ </mxCell>
187
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-123" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;" parent="1" vertex="1">
188
+ <mxGeometry x="400" y="400" width="40" height="40" as="geometry" />
189
+ </mxCell>
190
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-124" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
191
+ <mxGeometry x="440" y="400" width="40" height="40" as="geometry" />
192
+ </mxCell>
193
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-125" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
194
+ <mxGeometry x="480" y="400" width="40" height="40" as="geometry" />
195
+ </mxCell>
196
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-126" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
197
+ <mxGeometry x="520" y="400" width="40" height="40" as="geometry" />
198
+ </mxCell>
199
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-127" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
200
+ <mxGeometry x="560" y="400" width="40" height="40" as="geometry" />
201
+ </mxCell>
202
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-129" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
203
+ <mxGeometry x="280" y="440" width="40" height="40" as="geometry" />
204
+ </mxCell>
205
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-130" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
206
+ <mxGeometry x="320" y="440" width="40" height="40" as="geometry" />
207
+ </mxCell>
208
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-131" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" vertex="1">
209
+ <mxGeometry x="360" y="440" width="40" height="40" as="geometry" />
210
+ </mxCell>
211
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-132" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;" parent="1" vertex="1">
212
+ <mxGeometry x="400" y="440" width="40" height="40" as="geometry" />
213
+ </mxCell>
214
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-133" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
215
+ <mxGeometry x="440" y="440" width="40" height="40" as="geometry" />
216
+ </mxCell>
217
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-134" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
218
+ <mxGeometry x="480" y="440" width="40" height="40" as="geometry" />
219
+ </mxCell>
220
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-135" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
221
+ <mxGeometry x="520" y="440" width="40" height="40" as="geometry" />
222
+ </mxCell>
223
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-136" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
224
+ <mxGeometry x="560" y="440" width="40" height="40" as="geometry" />
225
+ </mxCell>
226
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-138" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
227
+ <mxGeometry x="280" y="440" width="40" height="40" as="geometry" />
228
+ </mxCell>
229
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-139" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CCFFE6;strokeColor=#808080;container=0;" parent="1" vertex="1">
230
+ <mxGeometry x="320" y="440" width="40" height="40" as="geometry" />
231
+ </mxCell>
232
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-140" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;" parent="1" vertex="1">
233
+ <mxGeometry x="360" y="440" width="40" height="40" as="geometry" />
234
+ </mxCell>
235
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-141" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;" parent="1" vertex="1">
236
+ <mxGeometry x="400" y="440" width="40" height="40" as="geometry" />
237
+ </mxCell>
238
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-142" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
239
+ <mxGeometry x="440" y="440" width="40" height="40" as="geometry" />
240
+ </mxCell>
241
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-143" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
242
+ <mxGeometry x="480" y="440" width="40" height="40" as="geometry" />
243
+ </mxCell>
244
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-144" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
245
+ <mxGeometry x="520" y="440" width="40" height="40" as="geometry" />
246
+ </mxCell>
247
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-145" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
248
+ <mxGeometry x="560" y="440" width="40" height="40" as="geometry" />
249
+ </mxCell>
250
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-147" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
251
+ <mxGeometry x="280" y="480" width="40" height="40" as="geometry" />
252
+ </mxCell>
253
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-148" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
254
+ <mxGeometry x="320" y="480" width="40" height="40" as="geometry" />
255
+ </mxCell>
256
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-149" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
257
+ <mxGeometry x="360" y="480" width="40" height="40" as="geometry" />
258
+ </mxCell>
259
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-150" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
260
+ <mxGeometry x="400" y="480" width="40" height="40" as="geometry" />
261
+ </mxCell>
262
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-151" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
263
+ <mxGeometry x="440" y="480" width="40" height="40" as="geometry" />
264
+ </mxCell>
265
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-152" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
266
+ <mxGeometry x="480" y="480" width="40" height="40" as="geometry" />
267
+ </mxCell>
268
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-153" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
269
+ <mxGeometry x="520" y="480" width="40" height="40" as="geometry" />
270
+ </mxCell>
271
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-154" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
272
+ <mxGeometry x="560" y="480" width="40" height="40" as="geometry" />
273
+ </mxCell>
274
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-156" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
275
+ <mxGeometry x="280" y="520" width="40" height="40" as="geometry" />
276
+ </mxCell>
277
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-157" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
278
+ <mxGeometry x="320" y="520" width="40" height="40" as="geometry" />
279
+ </mxCell>
280
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-158" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
281
+ <mxGeometry x="360" y="520" width="40" height="40" as="geometry" />
282
+ </mxCell>
283
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-159" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
284
+ <mxGeometry x="400" y="520" width="40" height="40" as="geometry" />
285
+ </mxCell>
286
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-160" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
287
+ <mxGeometry x="440" y="520" width="40" height="40" as="geometry" />
288
+ </mxCell>
289
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-161" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
290
+ <mxGeometry x="480" y="520" width="40" height="40" as="geometry" />
291
+ </mxCell>
292
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-162" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
293
+ <mxGeometry x="520" y="520" width="40" height="40" as="geometry" />
294
+ </mxCell>
295
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-163" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
296
+ <mxGeometry x="560" y="520" width="40" height="40" as="geometry" />
297
+ </mxCell>
298
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-165" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
299
+ <mxGeometry x="280" y="560" width="40" height="40" as="geometry" />
300
+ </mxCell>
301
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-166" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
302
+ <mxGeometry x="320" y="560" width="40" height="40" as="geometry" />
303
+ </mxCell>
304
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-167" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
305
+ <mxGeometry x="360" y="560" width="40" height="40" as="geometry" />
306
+ </mxCell>
307
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-168" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
308
+ <mxGeometry x="400" y="560" width="40" height="40" as="geometry" />
309
+ </mxCell>
310
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-169" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
311
+ <mxGeometry x="440" y="560" width="40" height="40" as="geometry" />
312
+ </mxCell>
313
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-170" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
314
+ <mxGeometry x="480" y="560" width="40" height="40" as="geometry" />
315
+ </mxCell>
316
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-171" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
317
+ <mxGeometry x="520" y="560" width="40" height="40" as="geometry" />
318
+ </mxCell>
319
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-172" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;" parent="1" vertex="1">
320
+ <mxGeometry x="560" y="560" width="40" height="40" as="geometry" />
321
+ </mxCell>
322
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-443" value="" style="group" parent="1" connectable="0" vertex="1">
323
+ <mxGeometry x="680" y="360" width="240" height="240" as="geometry" />
324
+ </mxCell>
325
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-401" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
326
+ <mxGeometry width="40" height="40" as="geometry" />
327
+ </mxCell>
328
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-402" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
329
+ <mxGeometry x="40" width="40" height="40" as="geometry" />
330
+ </mxCell>
331
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-403" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
332
+ <mxGeometry x="80" width="40" height="40" as="geometry" />
333
+ </mxCell>
334
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-404" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
335
+ <mxGeometry x="120" width="40" height="40" as="geometry" />
336
+ </mxCell>
337
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-405" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
338
+ <mxGeometry x="160" width="40" height="40" as="geometry" />
339
+ </mxCell>
340
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-406" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
341
+ <mxGeometry x="200" width="40" height="40" as="geometry" />
342
+ </mxCell>
343
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-407" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
344
+ <mxGeometry y="40" width="40" height="40" as="geometry" />
345
+ </mxCell>
346
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-408" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
347
+ <mxGeometry x="40" y="40" width="40" height="40" as="geometry" />
348
+ </mxCell>
349
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-409" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
350
+ <mxGeometry x="80" y="40" width="40" height="40" as="geometry" />
351
+ </mxCell>
352
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-410" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
353
+ <mxGeometry x="120" y="40" width="40" height="40" as="geometry" />
354
+ </mxCell>
355
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-411" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
356
+ <mxGeometry x="160" y="40" width="40" height="40" as="geometry" />
357
+ </mxCell>
358
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-412" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
359
+ <mxGeometry x="200" y="40" width="40" height="40" as="geometry" />
360
+ </mxCell>
361
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-413" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
362
+ <mxGeometry y="80" width="40" height="40" as="geometry" />
363
+ </mxCell>
364
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-414" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
365
+ <mxGeometry x="40" y="80" width="40" height="40" as="geometry" />
366
+ </mxCell>
367
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-415" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
368
+ <mxGeometry x="80" y="80" width="40" height="40" as="geometry" />
369
+ </mxCell>
370
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-416" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
371
+ <mxGeometry x="120" y="80" width="40" height="40" as="geometry" />
372
+ </mxCell>
373
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-417" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
374
+ <mxGeometry x="160" y="80" width="40" height="40" as="geometry" />
375
+ </mxCell>
376
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-418" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
377
+ <mxGeometry x="200" y="80" width="40" height="40" as="geometry" />
378
+ </mxCell>
379
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-419" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
380
+ <mxGeometry y="120" width="40" height="40" as="geometry" />
381
+ </mxCell>
382
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-420" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
383
+ <mxGeometry x="40" y="120" width="40" height="40" as="geometry" />
384
+ </mxCell>
385
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-421" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
386
+ <mxGeometry x="80" y="120" width="40" height="40" as="geometry" />
387
+ </mxCell>
388
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-422" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
389
+ <mxGeometry x="120" y="120" width="40" height="40" as="geometry" />
390
+ </mxCell>
391
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-423" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
392
+ <mxGeometry x="160" y="120" width="40" height="40" as="geometry" />
393
+ </mxCell>
394
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-424" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
395
+ <mxGeometry x="200" y="120" width="40" height="40" as="geometry" />
396
+ </mxCell>
397
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-425" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
398
+ <mxGeometry y="120" width="40" height="40" as="geometry" />
399
+ </mxCell>
400
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-426" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
401
+ <mxGeometry x="40" y="120" width="40" height="40" as="geometry" />
402
+ </mxCell>
403
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-427" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
404
+ <mxGeometry x="80" y="120" width="40" height="40" as="geometry" />
405
+ </mxCell>
406
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-428" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
407
+ <mxGeometry x="120" y="120" width="40" height="40" as="geometry" />
408
+ </mxCell>
409
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-429" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
410
+ <mxGeometry x="160" y="120" width="40" height="40" as="geometry" />
411
+ </mxCell>
412
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-430" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
413
+ <mxGeometry x="200" y="120" width="40" height="40" as="geometry" />
414
+ </mxCell>
415
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-431" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
416
+ <mxGeometry y="160" width="40" height="40" as="geometry" />
417
+ </mxCell>
418
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-432" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
419
+ <mxGeometry x="40" y="160" width="40" height="40" as="geometry" />
420
+ </mxCell>
421
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-433" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
422
+ <mxGeometry x="80" y="160" width="40" height="40" as="geometry" />
423
+ </mxCell>
424
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-434" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
425
+ <mxGeometry x="120" y="160" width="40" height="40" as="geometry" />
426
+ </mxCell>
427
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-435" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
428
+ <mxGeometry x="160" y="160" width="40" height="40" as="geometry" />
429
+ </mxCell>
430
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-436" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
431
+ <mxGeometry x="200" y="160" width="40" height="40" as="geometry" />
432
+ </mxCell>
433
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-437" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
434
+ <mxGeometry y="200" width="40" height="40" as="geometry" />
435
+ </mxCell>
436
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-438" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
437
+ <mxGeometry x="40" y="200" width="40" height="40" as="geometry" />
438
+ </mxCell>
439
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-439" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
440
+ <mxGeometry x="80" y="200" width="40" height="40" as="geometry" />
441
+ </mxCell>
442
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-440" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
443
+ <mxGeometry x="120" y="200" width="40" height="40" as="geometry" />
444
+ </mxCell>
445
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-441" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
446
+ <mxGeometry x="160" y="200" width="40" height="40" as="geometry" />
447
+ </mxCell>
448
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-442" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
449
+ <mxGeometry x="200" y="200" width="40" height="40" as="geometry" />
450
+ </mxCell>
451
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-489" value="&lt;span style=&quot;background-color: rgb(255, 255, 255); font-size: 14px;&quot;&gt;patch 1&lt;/span&gt;" style="text;strokeColor=none;align=center;fillColor=none;html=1;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=14;" parent="M_GBHP_B9P7K8caY_ZvZ-443" vertex="1">
452
+ <mxGeometry x="10" y="19" width="69" height="41" as="geometry" />
453
+ </mxCell>
454
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-446" value="" style="group" parent="1" connectable="0" vertex="1">
455
+ <mxGeometry x="600" y="760" width="240" height="240" as="geometry" />
456
+ </mxCell>
457
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-447" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
458
+ <mxGeometry width="40" height="40" as="geometry" />
459
+ </mxCell>
460
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-448" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
461
+ <mxGeometry x="40" width="40" height="40" as="geometry" />
462
+ </mxCell>
463
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-449" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
464
+ <mxGeometry x="80" width="40" height="40" as="geometry" />
465
+ </mxCell>
466
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-450" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
467
+ <mxGeometry x="120" width="40" height="40" as="geometry" />
468
+ </mxCell>
469
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-451" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
470
+ <mxGeometry x="160" width="40" height="40" as="geometry" />
471
+ </mxCell>
472
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-452" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
473
+ <mxGeometry x="200" width="40" height="40" as="geometry" />
474
+ </mxCell>
475
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-453" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
476
+ <mxGeometry y="40" width="40" height="40" as="geometry" />
477
+ </mxCell>
478
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-454" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFD4D2;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
479
+ <mxGeometry x="40" y="40" width="40" height="40" as="geometry" />
480
+ </mxCell>
481
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-455" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
482
+ <mxGeometry x="80" y="40" width="40" height="40" as="geometry" />
483
+ </mxCell>
484
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-456" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFE6CC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
485
+ <mxGeometry x="120" y="40" width="40" height="40" as="geometry" />
486
+ </mxCell>
487
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-457" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
488
+ <mxGeometry x="160" y="40" width="40" height="40" as="geometry" />
489
+ </mxCell>
490
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-458" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFCC;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
491
+ <mxGeometry x="200" y="40" width="40" height="40" as="geometry" />
492
+ </mxCell>
493
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-459" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
494
+ <mxGeometry y="80" width="40" height="40" as="geometry" />
495
+ </mxCell>
496
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-460" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
497
+ <mxGeometry x="40" y="80" width="40" height="40" as="geometry" />
498
+ </mxCell>
499
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-461" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
500
+ <mxGeometry x="80" y="80" width="40" height="40" as="geometry" />
501
+ </mxCell>
502
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-462" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
503
+ <mxGeometry x="120" y="80" width="40" height="40" as="geometry" />
504
+ </mxCell>
505
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-463" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
506
+ <mxGeometry x="160" y="80" width="40" height="40" as="geometry" />
507
+ </mxCell>
508
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-464" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
509
+ <mxGeometry x="200" y="80" width="40" height="40" as="geometry" />
510
+ </mxCell>
511
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-465" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
512
+ <mxGeometry y="120" width="40" height="40" as="geometry" />
513
+ </mxCell>
514
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-466" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
515
+ <mxGeometry x="40" y="120" width="40" height="40" as="geometry" />
516
+ </mxCell>
517
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-467" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
518
+ <mxGeometry x="80" y="120" width="40" height="40" as="geometry" />
519
+ </mxCell>
520
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-468" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#CFE6B8;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
521
+ <mxGeometry x="120" y="120" width="40" height="40" as="geometry" />
522
+ </mxCell>
523
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-469" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
524
+ <mxGeometry x="160" y="120" width="40" height="40" as="geometry" />
525
+ </mxCell>
526
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-470" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
527
+ <mxGeometry x="200" y="120" width="40" height="40" as="geometry" />
528
+ </mxCell>
529
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-471" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
530
+ <mxGeometry y="120" width="40" height="40" as="geometry" />
531
+ </mxCell>
532
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-472" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D5E8D4;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
533
+ <mxGeometry x="40" y="120" width="40" height="40" as="geometry" />
534
+ </mxCell>
535
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-473" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#D4E1F5;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
536
+ <mxGeometry x="80" y="120" width="40" height="40" as="geometry" />
537
+ </mxCell>
538
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-474" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;dashed=1;dashPattern=12 12;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
539
+ <mxGeometry x="120" y="120" width="40" height="40" as="geometry" />
540
+ </mxCell>
541
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-475" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
542
+ <mxGeometry x="160" y="120" width="40" height="40" as="geometry" />
543
+ </mxCell>
544
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-476" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
545
+ <mxGeometry x="200" y="120" width="40" height="40" as="geometry" />
546
+ </mxCell>
547
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-477" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
548
+ <mxGeometry y="160" width="40" height="40" as="geometry" />
549
+ </mxCell>
550
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-478" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
551
+ <mxGeometry x="40" y="160" width="40" height="40" as="geometry" />
552
+ </mxCell>
553
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-479" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
554
+ <mxGeometry x="80" y="160" width="40" height="40" as="geometry" />
555
+ </mxCell>
556
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-480" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
557
+ <mxGeometry x="120" y="160" width="40" height="40" as="geometry" />
558
+ </mxCell>
559
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-481" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
560
+ <mxGeometry x="160" y="160" width="40" height="40" as="geometry" />
561
+ </mxCell>
562
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-482" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
563
+ <mxGeometry x="200" y="160" width="40" height="40" as="geometry" />
564
+ </mxCell>
565
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-483" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
566
+ <mxGeometry y="200" width="40" height="40" as="geometry" />
567
+ </mxCell>
568
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-484" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
569
+ <mxGeometry x="40" y="200" width="40" height="40" as="geometry" />
570
+ </mxCell>
571
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-485" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
572
+ <mxGeometry x="80" y="200" width="40" height="40" as="geometry" />
573
+ </mxCell>
574
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-486" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
575
+ <mxGeometry x="120" y="200" width="40" height="40" as="geometry" />
576
+ </mxCell>
577
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-487" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
578
+ <mxGeometry x="160" y="200" width="40" height="40" as="geometry" />
579
+ </mxCell>
580
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-488" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="M_GBHP_B9P7K8caY_ZvZ-446" vertex="1">
581
+ <mxGeometry x="200" y="200" width="40" height="40" as="geometry" />
582
+ </mxCell>
583
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-562" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="M_GBHP_B9P7K8caY_ZvZ-446" edge="1">
584
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
585
+ <mxPoint x="95.5" y="99.75" as="sourcePoint" />
586
+ <mxPoint x="143.5" y="99.75" as="targetPoint" />
587
+ </mxGeometry>
588
+ </mxCell>
589
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-564" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="M_GBHP_B9P7K8caY_ZvZ-446" edge="1">
590
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
591
+ <mxPoint x="135.5" y="101.75" as="sourcePoint" />
592
+ <mxPoint x="96.5" y="138.75" as="targetPoint" />
593
+ </mxGeometry>
594
+ </mxCell>
595
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-490" value="&lt;span style=&quot;background-color: rgb(255, 255, 255); font-size: 14px;&quot;&gt;patch 2&lt;/span&gt;" style="text;strokeColor=none;align=center;fillColor=none;html=1;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=14;" parent="1" vertex="1">
596
+ <mxGeometry x="766" y="379" width="69" height="41" as="geometry" />
597
+ </mxCell>
598
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-491" value="&lt;span style=&quot;background-color: rgb(255, 255, 255); font-size: 14px;&quot;&gt;patch 3&lt;/span&gt;" style="text;strokeColor=none;align=center;fillColor=none;html=1;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=14;" parent="1" vertex="1">
599
+ <mxGeometry x="849" y="379" width="69" height="41" as="geometry" />
600
+ </mxCell>
601
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-492" value="&lt;span style=&quot;background-color: rgb(255, 255, 255); font-size: 14px;&quot;&gt;patch 4&lt;/span&gt;" style="text;strokeColor=none;align=center;fillColor=none;html=1;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=14;" parent="1" vertex="1">
602
+ <mxGeometry x="686" y="461" width="69" height="41" as="geometry" />
603
+ </mxCell>
604
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-543" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
605
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
606
+ <mxPoint x="40" y="780" as="sourcePoint" />
607
+ <mxPoint x="240" y="780" as="targetPoint" />
608
+ </mxGeometry>
609
+ </mxCell>
610
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-544" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
611
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
612
+ <mxPoint x="40" y="820" as="sourcePoint" />
613
+ <mxPoint x="240" y="820" as="targetPoint" />
614
+ </mxGeometry>
615
+ </mxCell>
616
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-545" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
617
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
618
+ <mxPoint x="40" y="860" as="sourcePoint" />
619
+ <mxPoint x="240" y="860" as="targetPoint" />
620
+ </mxGeometry>
621
+ </mxCell>
622
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-546" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
623
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
624
+ <mxPoint x="40" y="899.5" as="sourcePoint" />
625
+ <mxPoint x="160" y="900" as="targetPoint" />
626
+ </mxGeometry>
627
+ </mxCell>
628
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-549" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
629
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
630
+ <mxPoint x="239" y="780" as="sourcePoint" />
631
+ <mxPoint x="39" y="820" as="targetPoint" />
632
+ </mxGeometry>
633
+ </mxCell>
634
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-550" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
635
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
636
+ <mxPoint x="239" y="820" as="sourcePoint" />
637
+ <mxPoint x="39" y="860" as="targetPoint" />
638
+ </mxGeometry>
639
+ </mxCell>
640
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-551" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
641
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
642
+ <mxPoint x="239" y="860" as="sourcePoint" />
643
+ <mxPoint x="39" y="900" as="targetPoint" />
644
+ </mxGeometry>
645
+ </mxCell>
646
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-552" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
647
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
648
+ <mxPoint x="301" y="781" as="sourcePoint" />
649
+ <mxPoint x="501" y="781" as="targetPoint" />
650
+ </mxGeometry>
651
+ </mxCell>
652
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-553" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
653
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
654
+ <mxPoint x="301" y="821" as="sourcePoint" />
655
+ <mxPoint x="501" y="821" as="targetPoint" />
656
+ </mxGeometry>
657
+ </mxCell>
658
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-554" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
659
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
660
+ <mxPoint x="301" y="861" as="sourcePoint" />
661
+ <mxPoint x="501" y="861" as="targetPoint" />
662
+ </mxGeometry>
663
+ </mxCell>
664
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-556" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
665
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
666
+ <mxPoint x="500" y="781" as="sourcePoint" />
667
+ <mxPoint x="300" y="821" as="targetPoint" />
668
+ </mxGeometry>
669
+ </mxCell>
670
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-557" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
671
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
672
+ <mxPoint x="500" y="821" as="sourcePoint" />
673
+ <mxPoint x="300" y="861" as="targetPoint" />
674
+ </mxGeometry>
675
+ </mxCell>
676
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-558" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;strokeColor=default;" parent="1" edge="1">
677
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
678
+ <mxPoint x="500" y="861" as="sourcePoint" />
679
+ <mxPoint x="300" y="901" as="targetPoint" />
680
+ </mxGeometry>
681
+ </mxCell>
682
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-559" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
683
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
684
+ <mxPoint x="62" y="987" as="sourcePoint" />
685
+ <mxPoint x="110" y="987" as="targetPoint" />
686
+ </mxGeometry>
687
+ </mxCell>
688
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-560" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
689
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
690
+ <mxPoint x="62" y="1026.5" as="sourcePoint" />
691
+ <mxPoint x="110" y="1027" as="targetPoint" />
692
+ </mxGeometry>
693
+ </mxCell>
694
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-561" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
695
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
696
+ <mxPoint x="100" y="990" as="sourcePoint" />
697
+ <mxPoint x="61" y="1027" as="targetPoint" />
698
+ </mxGeometry>
699
+ </mxCell>
700
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-571" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
701
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
702
+ <mxPoint x="72" y="1036.5" as="sourcePoint" />
703
+ <mxPoint x="120" y="1037" as="targetPoint" />
704
+ </mxGeometry>
705
+ </mxCell>
706
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-572" value="" style="endArrow=classic;html=1;rounded=0;fontSize=14;" parent="1" edge="1">
707
+ <mxGeometry width="50" height="50" relative="1" as="geometry">
708
+ <mxPoint x="697" y="898.5" as="sourcePoint" />
709
+ <mxPoint x="745" y="899" as="targetPoint" />
710
+ </mxGeometry>
711
+ </mxCell>
712
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-525" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
713
+ <mxGeometry x="280" y="920" width="40" height="40" as="geometry" />
714
+ </mxCell>
715
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-526" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
716
+ <mxGeometry x="320" y="920" width="40" height="40" as="geometry" />
717
+ </mxCell>
718
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-527" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
719
+ <mxGeometry x="360" y="920" width="40" height="40" as="geometry" />
720
+ </mxCell>
721
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-528" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
722
+ <mxGeometry x="400" y="920" width="40" height="40" as="geometry" />
723
+ </mxCell>
724
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-529" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
725
+ <mxGeometry x="440" y="920" width="40" height="40" as="geometry" />
726
+ </mxCell>
727
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-530" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
728
+ <mxGeometry x="480" y="920" width="40" height="40" as="geometry" />
729
+ </mxCell>
730
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-531" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
731
+ <mxGeometry x="280" y="960" width="40" height="40" as="geometry" />
732
+ </mxCell>
733
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-532" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
734
+ <mxGeometry x="320" y="960" width="40" height="40" as="geometry" />
735
+ </mxCell>
736
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-533" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
737
+ <mxGeometry x="360" y="960" width="40" height="40" as="geometry" />
738
+ </mxCell>
739
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-534" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
740
+ <mxGeometry x="400" y="960" width="40" height="40" as="geometry" />
741
+ </mxCell>
742
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-535" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
743
+ <mxGeometry x="440" y="960" width="40" height="40" as="geometry" />
744
+ </mxCell>
745
+ <mxCell id="M_GBHP_B9P7K8caY_ZvZ-536" value="" style="whiteSpace=wrap;html=1;aspect=fixed;fillColor=#E6E6E6;strokeColor=#808080;container=0;movable=1;resizable=1;rotatable=1;deletable=1;editable=1;locked=0;connectable=1;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
746
+ <mxGeometry x="480" y="960" width="40" height="40" as="geometry" />
747
+ </mxCell>
748
+ </root>
749
+ </mxGraphModel>
750
+ </diagram>
751
+ </mxfile>
2305.07185/main_diagram/main_diagram.pdf ADDED
Binary file (13.8 kB). View file
 
2305.07185/paper_text/intro_method.md ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Sequences of millions of bytes are ubiquitous; for example, music, image, or video files typically consist of multiple megabytes. However, large transformer decoders (LLMs) typically only use several thousand tokens of context [@gpt3; @Zhang2022OPT:Models]---both because of the quadratic cost of self-attention but also, more importantly, the cost of large feedforward networks per-position. This severely limits the set of tasks where LLMs can be applied.
4
+
5
+ ![Overview of [MegaByte]{.smallcaps} with patch size $P=4$. A small *local* model autoregressively predicts each patch byte-by-byte, using the output of a larger *global* model to condition on previous patches. Global and Local inputs are padded by $P$ and $1$ token respectively to avoid leaking information about future tokens. []{#fig:framework label="fig:framework"} ](figs/fastslow3.pdf){#fig:framework width="\\linewidth"}
6
+
7
+ We introduce [MegaByte]{.smallcaps}, a new approach to modeling long byte sequences. First, byte sequences are segmented into fixed-sized patches, loosely analogous to tokens. Our model then consists of three parts: (1) a *patch embedder*, which simply encodes a patch by losslessly concatenating embeddings of each byte, (2) a *global* module, a large autoregressive transformer that inputs and outputs patch representations and (3) a *local* module, a small autoregressive model that predicts bytes within a patch. Crucially, we observe that for many tasks, most byte predictions are relatively easy (for example, completing a word given the first few characters), meaning that large networks per-byte are unnecessary, and a much smaller model can be used for intra-patch modelling.
8
+
9
+ The [MegaByte]{.smallcaps} architecture gives three major improvements over Transformers for long sequence modelling:
10
+
11
+ 1. **Sub-quadratic self-attention** Most work on long sequence models has focused on mitigating the quadratic cost of self-attention. [MegaByte]{.smallcaps} decomposes long sequences into two shorter sequences, and optimal patch sizes reduces the self-attention cost to $O(N^{\frac{4}{3}})$, which remains tractable for even long sequences.
12
+
13
+ 2. **Per-patch feedforward layers** In GPT3-size models, more than 98% of FLOPS are used in computing position-wise feedforward layers. [MegaByte]{.smallcaps} uses large feedforward layers per-patch rather than per-position, enabling much larger and more expressive models for the same cost. With patch size $P$, where a baseline transformer would use the same feedforward layer with $m$ parameters $P$ times, [MegaByte]{.smallcaps} can use a layer with $mP$ parameters once for the same cost.
14
+
15
+ 3. **Parallelism in Decoding** Transformers must perform all computations serially during generation because the input to each timestep is the output from the previous timestep. By generating representations for patches in parallel, [MegaByte]{.smallcaps} allows greater parallelism during generation. For example, a [MegaByte]{.smallcaps} model with 1.5B parameters can generate sequences 40% *faster* than a standard 350M Transformer, whilst also improving perplexity when trained with the same compute.
16
+
17
+ Together, these improvements allow us to train much larger and better-performing models for the same compute budget, scale to very long sequences, and improve generation speed during deployment.
18
+
19
+ [MegaByte]{.smallcaps} also provides a strong contrast to existing autoregressive models that typically use some form of tokenization, where sequences of bytes are mapped to larger discrete tokens [@bpe; @dalle; @hubert]. Tokenization complicates pre-processing, multi-modal modelling, and transfer to new domains, while hiding useful structure from the model. It also means that most state-of-the-art models are not truly end to end. The most widely used approaches to tokenization require language-specific heuristics [@gpt2] or lose information [@dalle]. Replacing tokenization with efficient and performant byte models would therefore have many advantages.
20
+
21
+ We conduct extensive experiments for both [MegaByte]{.smallcaps} and strong baselines. We use a fixed compute and data budget across all models to focus our comparisons solely on the model architecture rather than training resources, which are known to benefit all models. We find that [MegaByte]{.smallcaps} allows byte-level models to perform competitively with subword models on long context language modeling, achieve state-of-the-art perplexities for density estimation on ImageNet, and allow audio modelling from raw audio files. Together, these results establish the viability of tokenization-free autoregressive sequence modeling at scale.
22
+
23
+ <figure data-latex-placement="t">
24
+ <div class="minipage">
25
+ <p><span class="math display">$$\begin{align*}
26
+ %x^{\text{padded}} &amp;= ([\text{PAD}]\times P) + x_{0:(T-P)}\\
27
+ h^\text{embed}_{t}\,\,\,\,\,\,\,\,&amp;= E^\text{global-embed}_{x_t}+E^\text{pos}_t &amp; t\in [0..T), E^{\text{global-embed}}\in\mathbb{R}^{V\times D_G},\\&amp; &amp;E^{\text{pos}}\in\mathbb{R}^{T\times D_G}, h^\text{embed}\in\mathbb{R}^{T\times D_G} \\
28
+ % \h{global-in}_{0} &amp;= [E^{\text{pad}}_0,..,E^{\text{pad}}_{P-1}] &amp; E^{\text{pad}}\inR{D_G}, K=\frac{T}{P}\\
29
+ % \h{global-in}_{k} &amp;= \h{embed}_{((k-1)\cdot P):(k\cdot P)} &amp; k\in [1,..,K) \\
30
+ h^\text{global-in}_{k} \,\,\,\,\,&amp;=
31
+ \begin{cases}
32
+ E^{\text{global-pad}}, &amp; \text{if } k = 0, \\
33
+ h^\text{embed}_{((k-1)\cdot P):(k \cdot P)}, &amp; k\in [1,..,K), \\
34
+ \end{cases} &amp; E^{\text{global-pad}}\in\mathbb{R}^{P\times D_G}, K=\frac{T}{P}
35
+ \\
36
+ h^\text{global-out}_{0:K} \,\,\,&amp;= \text{transformer}^{\text{global}}(h^\text{global-in}_{0:K}) &amp; h^\text{global-out}\in\mathbb{R}^{K\times P\cdot D_G},h^\text{global-in}\in\mathbb{R}^{K\times P\cdot D_G}\\
37
+ %\h{local-in}_{k,0} &amp;= \h{global-out}_{k,0:D_G} &amp; \h{local-in}\inR{K\times P \times D_G} \\
38
+ %\h{local-in}_{k,p} &amp;= w^{\text{GL}}\h{global-out}_{k,(p\cdot D_G):((p+1)\cdot D_G)} + E^\text{local-embed}_{x_{(k\cdot P + p - 1)}} &amp; w^{\text{GL}}\inR{D_G\times D_L}, E^{\text{local-embed}}\inR{V\times D_L}, p\in [0,..,P), \\
39
+ h^\text{local-in}_{k,p} \,\,\,\,\,\,\,&amp;= w^{\text{GL}}h^\text{global-out}_{k,(p\cdot D_G):((p+1)\cdot D_G)} +
40
+ \begin{cases}
41
+ E^\text{local-pad}, &amp; \text{if } p=0 \\
42
+ E^\text{local-embed}_{x_{(k\cdot P + p - 1)}}, &amp; p\in [1,..,P) \\
43
+ \end{cases} &amp;
44
+ \begin{array}{r}
45
+ E^{\text{local-pad}}\in\mathbb{R}^{D_L} , w^{\text{GL}}\in\mathbb{R}^{D_G\times D_L}\\ E^{\text{local-embed}}\in\mathbb{R}^{V\times D_L}
46
+ \end{array}\hspace{-2mm}
47
+ \\
48
+ h^\text{local-out}_{k,0:P} \,\,\,\,\,&amp;= \text{transformer}^{\text{local}}(h^\text{local-in}_{k,0:P}) &amp; h^\text{local-in}_{k,p}\in\mathbb{R}^{D_L}, h^\text{local-out}\in\mathbb{R}^{K\times P\cdot D_L} \\
49
+ p(x_{t}|x_{0:t}) &amp;= \text{softmax}{(E^\text{local-embed}h^\text{local-out}_{k,p})}_{x_t} &amp; t=k\cdot P+p\,
50
+ \end{align*}$$</span></p>
51
+ </div>
52
+ <figcaption>Summary of <span class="smallcaps">MegaByte</span> with vocabulary <span class="math inline"><em>V</em></span>, sequence length <span class="math inline"><em>T</em></span>, global and local dimensions <span class="math inline"><em>D</em><sub><em>G</em></sub></span> and <span class="math inline"><em>D</em><sub><em>L</em></sub></span>, and <span class="math inline"><em>K</em></span> patches of size <span class="math inline"><em>P</em></span>. Transformer layers use masked self attention to not observe information from future timesteps. </figcaption>
53
+ </figure>
54
+
55
+ [MegaByte]{.smallcaps} is an autoregressive model for efficiently modeling long input sequences. [MegaByte]{.smallcaps} is comprised of 3 components: (1) a *patch embedder* that inputs a discrete sequence, embeds each element, and chunks it into patches of length $P$ (2) a large *global* Transformer that contextualizes patch representations by performing self-attention over previous patches, and (3) a smaller *local* Transformer that inputs a contextualized patch representation from the global model, and autoregressively predict the *next* patch.
56
+
57
+ $\textbf{Patch Embedder}$ with patch size of $P$ maps a byte sequence $x_{0..T}$ to a sequence of patch embeddings of length $K=\frac{T}{P}$ and dimension $P\cdot D_G$.
58
+
59
+ First, each byte is embedded with a lookup table $E^{\text{global-embed}}\in\mathbb{R}^{V \times D_G}$ to an embedding of size $D_G$ and positional embeddings are added.
60
+
61
+ $$\begin{align}
62
+ h^\text{embed}_{t}&= E^\text{global-embed}_{x_t}+E^\text{pos}_t & t\in [0..T]
63
+ \end{align}$$
64
+
65
+ Then, byte embeddings are reshaped into a sequence of $K$ patch embeddings with dimension $P\cdot D_G$. To allow autoregressive modelling, the patch sequence is padded to start with a trainable patch-sized padding embedding ($E^{\text{global-pad}} \in\mathbb{R}^{P\times D_G}$), and the last patch is removed from the input. This sequence is the input to the global model, and is denoted $h^\text{global-in}\in\mathbb{R}^{K\times (P\cdot D_G)}$. $$\begin{align}
66
+ h^\text{global-in}_{k} =
67
+ \begin{cases}
68
+ E^{\text{global-pad}} , & \text{if } k = 0, \\
69
+ h^\text{embed}_{((k-1)\cdot P):(k \cdot P)}, & k\in [1,..,K),
70
+ \end{cases}
71
+ \end{align}$$
72
+
73
+ $\textbf{Global Model}$ is a decoder-only Transformer with dimension $P\cdot D_G$ that operates on a sequence of $K$ patches. It incorporates a self-attention mechanism and causal masking to capture dependencies between patches. It inputs a sequence of $K$ patch representations $h^\text{global-in}_{0:K}$, and outputs an updated representation $h^\text{global-out}_{0:K}$ by performing self-attention over previous patches.
74
+
75
+ $$\begin{align}
76
+ h^\text{global-out}_{0:K} &= \text{transformer}^{\text{global}}(h^\text{global-in}_{0:K}) %& \h{global-out}\inR{K\times P\cdot D} \\
77
+ \end{align}$$
78
+
79
+ The output of the final global layer $h^\text{global}_{0:K}$ contains $K$ patch representations of dimension $P\cdot D_G$. For each of these, we reshape them into sequences of length $P$ and dimension $D_G$, where position $p$ uses dimensions $p \cdot D_G$ to $(p+1) \cdot D_G$. Each position is then projected to the dimension of the local model with a matrix $w^{\text{GL}}\in\mathbb{R}^{D_G\times D_L}$ where $D_L$ is the local model dimension. We then combine these with byte embeddings of size $D_L$ for the tokens in the *next* patch $E^\text{local-embed}_{x_{(k\cdot P + p - 1)}}$. The local byte embeddings is offset by one with a trainable local padding embedding ($E^{\text{local-pad}} \in\mathbb{R}^{D_L}$) to allow autoregressive modelling within a patch. This results in a tensor $h^\text{local-in}\in\mathbb{R}^{K\times P \times D_L}$. $$\begin{align}
80
+ %\h{local-in}_{k,0} &= \h{global-out}_{k,0:D}\\% & \h{local-in}\inR{K\times P \times D} \\
81
+ h^\text{local-in}_{k,p} &= w^{\text{GL}}h^\text{global-out}_{k,(p\cdot D_G):((p+1)\cdot D_G)} + E^\text{local-embed}_{x_{(k\cdot P + p - 1)}}% & p\in [1,..,P ], d\in [0,..,D] \\
82
+ \end{align}$$
83
+
84
+ $\textbf{Local Model}$ is a smaller decoder-only Transformer of dimension $D_L$ that operates on a single patch $k$ containing $P$ elements, each of which is the sum of an output from the global model and an embedding of the previous byte in the sequence. $K$ copies of the local models are run on each patch independently (and in parallel during training), computing a representation $h^\text{local-out}\in\mathbb{R}^{K\times P\cdot D_L}$.
85
+
86
+ $$\begin{align}
87
+ h^\text{local-out}_{k,0:P} &= \text{transformer}^{\text{local}}(h^\text{local-in}_{k,0:P}) %& \h{local-out}\inR{K\times P\cdot D}
88
+ \end{align}$$
89
+
90
+ Finally, we can compute the probability distribution over the vocabulary at each position. The $p$th element of the $k$th patch corresponds to element $t$ of the complete sequence, where $t=k\cdot P+p$: $$\begin{align}
91
+ p(x_{t}|x_{0:t}) &= \text{softmax}{(E^\text{local-embed}h^\text{local-out}_{k,p})}_{x_t}
92
+ \end{align}$$
93
+
94
+ We experiment with several extensions of [MegaByte]{.smallcaps}.
95
+
96
+ One limitation of chunking sequences into patches is that it is not translation invariant, and byte sequences may receive a different representation depending on their position in the patch. This may mean, for example, that a model has to relearn the meaning of a word at different offsets. To mitigate this issue, we experimented with augmenting the Patch Embedder with causal convolutional layers, which allow translation-invariant contextual representations of the bytes before they are chunked into patches. We use a stack of convolutional layers, with filter sizes of 3, 5 and 7.
97
+
98
+ The Local model uses short sequences for efficiency, and relies on the Global model for long-range information. However, we can increase the context of the Local model with little overhead by allowing it to condition on $r$ elements from the previous patch. This approach allows the Global model to focus on a longer-range context. Specifically, when computing self-attention in each layer, we concatenate the keys and values with the last $r$ keys and queries from the previous patch. We use rotary embeddings [@rotary] to model relative positions between elements in the sequence. This approach is reminiscent of TransformerXL [@xfmxl] but differs by being fully differentiable.
99
+
100
+ We observed empirically that the per-token loss within each patch would increase towards the end of the patch, as the prediction relies more on the weaker Local model. To alleviate this issue, we propose *strided inference*, in which we predict the sequence with two forward passes of the full model, whose inputs are offset by $p/2$ positions from each other. We then combine the first $p/2$ positions in each patch for our predictions to predict the complete sequence. Similarly to sliding window techniques [@shortformer], this approach doubles the cost of inference but improves results.
101
+
102
+ Having described the model, we briefly discuss the motivation behind some of the architectural choices.
103
+
104
+ **Why is the local model needed?** Many of the efficiency advantages of the [MegaByte]{.smallcaps} design could be realized with the Global model alone, which would resemble a decoder version of ViT [@vit]. However, the joint distribution over the patch $p(x_{t+1},..,x_{t+P}|x_{0..t})$ has an output space of size $256^{P}$ so direct modeling is only tractable for very small patches. We could instead factor the joint distribution into conditionally independent distributions $p(x_{t+1}|x_{0..t})..p(x_{t+P}|x_{0..t})$, but this would greatly limit the model's expressive power. For example, it would be unable to express a patch distribution such as 50% *cat* and 50% *dog*, and would instead have to assign probability mass to strings such as *cag* and *dot*. Instead, our autoregressive Local model conditions on previous characters within the patch, allowing it to only assign probability to the desired strings.
105
+
106
+ **Increasing Parameters for Fixed Compute** Transformer models have shown consistent improvements with parameter counts [@kaplan2020scaling]. However, the size of models is limited by their increasing computational cost. [MegaByte]{.smallcaps} allows larger models for the same cost, both by making self attention sub-quadratic, and by using large feedforward layers across patches rather than individual tokens.
107
+
108
+ **Re-use of Established Components** [MegaByte]{.smallcaps} consists of two transformer models interleaved with shifting, reshaping and a linear projection. This re-use increases the likelihood that the architecture will inherit the desirable scaling properties of transformers.
109
+
110
+ We analyze the cost of different architectures when scaling both the sequence length and size of the models.
111
+
112
+ The cost of the self attention in a transformer architecture for a sequence of length $T$ has $O(T^2)$ complexity. Much work has been explored reducing this; for example, Sparse Transformers [@sparsetransformer] and Routing Transformers [@routing] show strong results with a complexity $O(T^\frac{3}{2})$. Numerous linear attention mechanisms have also been proposed [@lineartransformer; @linear; @performer], although we are not aware of competitive results on large scale language modeling tasks. As a function of sequence length $T$ and patch size $P$, the Global model has a sequence of length $\frac{P}{T}$ so uses $O(\frac{T^2}{P^2})$ operations, and the Local model uses $\frac{P}{T}$ sequences of length $P$ so uses $O(\frac{TP^2}{P})=O(PT)$ operations. The overall cost of [MegaByte]{.smallcaps} is therefore in $O(\frac{T^2}{P^2}+TP)$. $P$ is a hyperparameter that is chosen to create an architecture for sequences of size $T$. By setting $P=T^\frac{1}{3}$ the complexity is in $O(T^\frac{4}{3})$. Using much shorter patches of $P=T^\frac{1}{5}$ would give a complexity of $O(T^\frac{8}{5})$. The cost is less than the transformer for all non-trivial values of $P$ such that $1<P<T$.
113
+
114
+ However, attention is not the main cost in large transformers. Instead of increasing the sequence length, transformers are more commonly scaled by increasing the dimension of their latent state $d$, and the feedforward network cost dominates the model's overall cost [@kaplan2020scaling]. For example, in the GPT3 architecture, the quadratic self-attention computation accounts for only 1.4% of FLOPS. Following the approximation of [@kaplan2020scaling], a forward pass with a large transformer with $m$ non-embedding parameters on a sequence of length $T$ uses roughly $2mT$ FLOPS. [MegaByte]{.smallcaps} contains two transformers: the Global model uses $m_g$ parameters on a sequence of length $\frac{T}{P}$, and a Local model with $m_l$ parameters that sees $\frac{T}{P}$ sequences of length $P$, giving an estimate of $2T(\frac{m_g}{P} + m_l)$ FLOPS. When $m_g\gg m_l$, the FLOPS used by [MegaByte]{.smallcaps} is approximately $\frac{2Tm_g}{P}$, allowing a model $P$ times larger than a transformer with equivalent FLOPS. This analysis holds irrespective of any efficient attention mechanisms used in the transformer.
115
+
116
+ <figure id="figure:flops" data-latex-placement="t">
117
+ <img src="figs/flops.png" />
118
+ <figcaption><span id="figure:flops" data-label="figure:flops"></span>Computational cost (FLOPS/token) for different model architectures at different scales. <span class="smallcaps">MegaByte</span> architectures (here with <span class="math inline"><em>P</em> = 8</span>) use less FLOPS than equivalently sized Transformers and Linear Transformers <span class="citation" data-cites="lineartransformer"></span> across a wide range of model sizes and sequence lengths, allowing larger models to be used for the same computational cost.</figcaption>
119
+ </figure>
120
+
121
+ To understand efficiency at different sequence lengths and model sizes, we calculate the total FLOPS used by transformers, Linear Transformers and [MegaByte]{.smallcaps}. For each operation, we use FLOP estimates from [@kaplan2020scaling], except for attention in Linear Transformers, which we estimate as $9D$ FLOPS/token[^1], where $D$ is the model embedding dimension. Figure [2](#figure:flops){reference-type="ref" reference="figure:flops"} shows that for models of size 660M to 173B and sequence lengths of up to 1M tokens, [MegaByte]{.smallcaps} with $P=8$ uses less FLOPS than either transformers or Linear Transformers. Baseline model architectures are based on GPT3, and Megabyte global/local model sizes are 452M/151M, 5.8B/604M, 170B/3.2B respectively.
122
+
123
+ Generating long sequences with transformers is slow, because the input to each timestep is the output from the previous timestep, meaning each layer must be computed for each token serially. As running a layer on a single token typically does not saturate the amount of parallelism available within a GPU, for analysis, we model each layer as a constant cost independently of size. Consider a [MegaByte]{.smallcaps} model with $L_{\text{global}}$ layers in the Global model and $L_{\text{local}}$ layers in the Local model and patch size $P$, compared with a Transformer architecture with $L_{\text{local}}+L_{\text{global}}$ layers. Generating each patch with [MegaByte]{.smallcaps} requires a sequence of $O(L_{\text{global}}+P\cdot L_{\text{local}})$ serial operations, whereas the Transformer requires $O(P\cdot L_{\text{global}}+P\cdot L_{\text{local}})$ serial operations. When $L_{\text{global}}\gg L_{\text{local}}$ (i.e. the Global model has many more layers than the Local model), [MegaByte]{.smallcaps} can reduce inference costs by a factor close to $P$.
124
+
125
+ # Method
126
+
127
+ Several techniques have been proposed for trading off speed for performance during inference with language models, including sliding windows [@shortformer] and our strided inference (Section [2.3.3](#strided){reference-type="ref" reference="strided"}). We only use these methods when comparing with prior published work (Tables [\[tab:text_sota\]](#tab:text_sota){reference-type="ref" reference="tab:text_sota"} and [3](#tab:img_sota){reference-type="ref" reference="tab:img_sota"}).
128
+
129
+ We evaluated the performance of [MegaByte]{.smallcaps} on language modeling on a set of 5 diverse datasets emphasizing long-range dependencies: Project Gutenberg (PG-19), Books, Stories, arXiv, and Code.
130
+
131
+ ::: {#tab:document_lengths}
132
+ Dataset Total Bytes Mean document size (bytes)
133
+ --------- ------------- ----------------------------
134
+ PG-19 10.1GB 411,404
135
+ Stories 21.3GB 35,265
136
+ Books 79.7GB 509,526
137
+ arXiv 91.5GB 58,518
138
+ Code 353.7GB 7,461
139
+
140
+ : Text dataset sizes and mean document lengths.
141
+ :::
142
+
143
+ **Datasets** We experiment on a range of long form text datasets. The PG-19 dataset [@pg19] consists of English-language books written before 1919 and is extracted from the [Project Gutenberg online library](https://www.gutenberg.org/). The Stories dataset [@stories] is a subset of CommonCrawl data meant to emulate Winograd schemas. Books [@pile] is another collection of English-language books. The arXiv dataset is a collection of technical publications written in LaTeX from the [arXiv online archive](arxiv.org). Finally, the Code dataset is a large publicly available dataset of open source code, under Apache, BSD or MIT licenses. More details on dataset sizes and document lengths are shared in Table [1](#tab:document_lengths){reference-type="ref" reference="tab:document_lengths"}.
144
+
145
+ ::: {#tab:text_exps}
146
+ PG-19 Stories Books arXiv Code
147
+ ------------------------ ----------- ----------- ----------- ----------- -----------
148
+ Transformer 1.057 1.064 1.097 0.816 0.575
149
+ PerceiverAR 1.104 1.070 1.104 0.791 0.546
150
+ [MegaByte]{.smallcaps} **1.000** **0.978** **1.007** **0.678** **0.411**
151
+
152
+ : Performance (bits-per-byte) of compute and data controlled [MegaByte]{.smallcaps}, PerceiverAR, and Transformer models on various text modalities.
153
+ :::
154
+
155
+ **Controlled Experiments** Table [2](#tab:text_exps){reference-type="ref" reference="tab:text_exps"}, lists bpb on each dataset. Each model is trained for 80 billion bytes, and models are scaled to use the same compute budget. We carefully tune hyperparameters for all architectures to best utilize the available compute budget. [MegaByte]{.smallcaps} consistently outperforms both baseline transformers and PerceiverAR across all datasets. We use the same set of parameters on all datasest. In all experiments presented in Table [2](#tab:text_exps){reference-type="ref" reference="tab:text_exps"}, transformer has size of 320M with context length of 1024, PerceiverAR has size of 248M with context size of 8192 and latent size of 1024, and [MegaByte]{.smallcaps} global/local model sizes are 758M/262M with context length of 8192 and patch size of 8.
156
+
157
+ ::: table*
158
+ Tokenizer Vocab Size Context Length Validation Test
159
+ --------------------------------------- --------------- ------------ ---------------------------- ------------ ----------
160
+ TransformerXL [@compressive] SentencePiece 32k 512+1024 (subwords) 45.5 36.3
161
+ CompressiveTransformer [@compressive] SentencePiece 32k 512+512+2x512 (subwords) 43.4 33.6
162
+ PerceiverAR [@perceiverar] SentencePiece 32k 2048 (subwords) 45.9 28.9
163
+ BlockRecurrent [@blockrecurrent] SentencePiece 32k 1024+recurrence (subwords) \- **26.5**
164
+ Transformer byte-level (ours) Bytes 256 2048 (bytes) 81.6 69.4
165
+ PerceiverAR byte-level (ours) Bytes 256 8192 (bytes) 119.1 88.8
166
+ [MegaByte]{.smallcaps} Bytes 256 8192 (bytes) **42.8** 36.4
167
+ :::
168
+
169
+ []{#sec:results label="sec:results"}
170
+
171
+ **Scaling Experiment** We scale up our training data on PG-19 (Table [\[tab:pg19_summary\]](#tab:pg19_summary){reference-type="ref" reference="tab:pg19_summary"}), and compare [MegaByte]{.smallcaps} with byte baselines, as well as converting all results to word-level perplexities to benchmark with state-of-art token based models.
172
+
173
+ We train a byte-level Transformer, PerceiverAR and [MegaByte]{.smallcaps} models for 400B bytes and the same compute budget using same model parameters as in the controlled experiments. We find that [MegaByte]{.smallcaps} outperforms other byte-level models by a wide margin at this scale.[^3]
174
+
175
+ We also compare with the best previously reported numbers for sub-word models. These results may be confounded by differing amounts of compute and tuning used, but show that [MegaByte]{.smallcaps} gives results competitive with state-of-the-art models trained on subwords. These results suggest that [MegaByte]{.smallcaps} may allow future large language models to be tokenization-free.
176
+
177
+ We test [MegaByte]{.smallcaps} on variants of the autoregressive image generation task on ImageNet [@Oord2016PixelNetworks], to measure its ability to efficiently use long context. We test on three different resolutions of images, ranging from 64 to 640 pixels -- the latter requiring the effective modeling of sequences with over 1.2M tokens. This generation task becomes increasingly challenging as the image's resolution grows: doing well on this task requires the modeling of local patterns (textures, lines, etc.) and long-range context that provides information about the high level structure of the image. Inspired by recent works in Vision Transformers [@vit], we model image data patch by patch (more details can be found in Appendix [14.1](#sec:scan_type){reference-type="ref" reference="sec:scan_type"}).
178
+
179
+ We train a large [MegaByte]{.smallcaps} model on ImageNet 64x64 with Global and Local models sized 2.7B and 350M parameters, respectively, for 1.4T tokens. We estimate that training this model consumed less than half the GPU hours we would have needed to reproduce the best PerceiverAR model described by [@perceiverar]. As shown in Table [3](#tab:img_sota){reference-type="ref" reference="tab:img_sota"}, [MegaByte]{.smallcaps} matches the state-of-the-art performance of PerceiverAR whilst using only half the compute.
180
+
181
+ ::: {#tab:img_sota}
182
+ ImageNet64 bpb
183
+ -------------------------------- ----------
184
+ Routing Transformer [@routing] 3.43
185
+ Combiner [@combiner] 3.42
186
+ Perceiver AR [@perceiverar] **3.40**
187
+ [MegaByte]{.smallcaps} **3.40**
188
+
189
+ : Bits per byte (bpb) on ImageNet 64. [MegaByte]{.smallcaps} matches the current state-of-the-art while only using half the amount of GPU hours to train.
190
+ :::
191
+
192
+ We compare three transformer variants (vanilla, PerceiverAR, [MegaByte]{.smallcaps}) to test scalability to long sequences on increasingly large image resolutions. We use our own implementations of these in the same framework and budget the same amount of GPU hours and data to train each of these model variants.
193
+
194
+ [MegaByte]{.smallcaps} is able to handle all sequence lengths with a single forward pass of up to 1.2M tokens. We found neither the standard Transformer nor PerceiverAR could model such long sequences at a reasonable model size, so instead we split images into segments of size 1024 and 12000 respectively. For Megabyte, we set patch size as 12 for Image64 and patch size as 192 for Image256 and Image640 datasets. Model sizes are adjusted to match overall training speeds across models and we do not use any form of sliding window evaluation in this experiment. As seen in Table [4](#tab:resolution_scale){reference-type="ref" reference="tab:resolution_scale"}, [MegaByte]{.smallcaps} outperforms baselines across all resolutions in this compute-controlled setting. The precise settings used for each of the baseline models such as context length and number of latents are summarized in Table [10](#table:opt_model_desc){reference-type="ref" reference="table:opt_model_desc"}.
195
+
196
+ Results show that [MegaByte]{.smallcaps} outperforms the other systems at all resolutions, demonstrating an effective model of sequences of over 1M bytes.
197
+
198
+ ::: {#tab:resolution_scale}
199
+ Context Image64 Image256 Image640
200
+ ------------------------ --------- ---------- ----------- -----------
201
+ Total len 12288 196608 1228800
202
+ Transformer 1024 3.62 3.801 2.847
203
+ Perceiver AR 12000 3.55 3.373 2.345
204
+ [MegaByte]{.smallcaps} Full **3.52** **3.158** **2.282**
205
+
206
+ : Bits per byte (bpb) on ImageNet with different resolutions. All models use the same compute and data. MEGABYTE scales well to sequences of over 1M tokens.
207
+ :::
208
+
209
+ Audio has aspects of both the sequential structure of text and the continuous nature of images, so is an interesting application for [MegaByte]{.smallcaps}.
210
+
211
+ Raw audio is typically stored as a sequence of 16-bit integer values (one per timestep); a softmax layer would need to output 65,536 probabilities per timestep to model all possible values. To address this issue, various techniques have been developed to reduce the memory and computational requirements of the softmax layer. For instance, @wavenet apply $\mu$-law companding transformation and quantizes the input into 256 possible values. Alternatively, @parawavenet model the samples using the discretized mixture of logistics distribution introduced by @{pixcelcnn++}. Finally, @wavernn use a dual softmax technique to produce 8 coarse and 8 fine bits. In our approach, we simplify the audio modeling process by directly reading the bytes (256 possible values) from the audio file and conducting an autoregressive language model on top of that. This greatly streamlines the modeling process, making it easier and more efficient.
212
+
213
+ Our audio modeling approach focuses on 16 kHz, 16-bit audio, which equates to 32k bytes per one-second clip. We use an extensive audio dataset consisting of 2 terabytes (roughly 18,000 hours) of audio. We use a sequence length of 524,288, a patch size of 32, and a batch size of 32 to facilitate model training. By utilizing these settings, we can effectively train our model on large volumes of audio data, helping to improve its accuracy and efficacy.
214
+
215
+ Our model obtains bpb of 3.477, much lower than the results with perceiverAR (3.543) and vanilla transformer model (3.567). More ablation results are presented in Table [6](#tab:ablations){reference-type="ref" reference="tab:ablations"}.
216
+
217
+ We also compare the text generation speed between [MegaByte]{.smallcaps} and a transformer. We compare a 350M parameter baseline transfomer and a [MegaByte]{.smallcaps} model with a 1.3B parameter Global model and a 218M parameter local model, trained on PG19 with equal compute. As shown in Table [5](#tab:generation){reference-type="ref" reference="tab:generation"}, the [MegaByte]{.smallcaps} model achieves much lower perplexity as expected. However, [MegaByte]{.smallcaps} also generates a sequence of 8192 tokens 40% *faster* than transformer, despite having over 4 times the parameters. This speed up is due to the bulk of the parameters being in the Global model, which only needs to be computed once for every 8 tokens, whereas all the parameters in the baseline model are used on every token.
218
+
219
+ ::: {#tab:generation}
220
+ ------------------------ ------ ------ ------- -----
221
+
222
+ Size
223
+ Size
224
+
225
+ Time (s)
226
+ Transformer \- 350M 1.064 132
227
+ [MegaByte]{.smallcaps} 1.3B 218M 0.991 93
228
+ ------------------------ ------ ------ ------- -----
229
+
230
+ : Comparison of bits per byte (bpb) and generation speed of 8192 bytes of transformer model (with context length 1024) and [MegaByte]{.smallcaps} with context length 8192 and patch size 8.
231
+ :::
232
+
233
+ In Table [6](#tab:ablations){reference-type="ref" reference="tab:ablations"}, we analyze the significance of different components in the [MegaByte]{.smallcaps} architecture by studying arXiv, Librilight-L and ImageNet256 datasets. Removing Local (*w/o local model*) or global (*w/o global model*) model, we observe a substantial increase in bpb on all datasets, showing that both parts are crucial. The performance of the model without the cross-patch local model (*w/o cross-patch local model*) is competitive, indicating that the architecture is robust to this modification. We observe slight improvement on the Librilight-L and ImageNet256 datasets by augmenting the [MegaByte]{.smallcaps} model with a CNN encoder (*w/ CNN encoder*). This suggests that the [MegaByte]{.smallcaps} architecture can benefit from integrating alternative encoding mechanisms.
234
+
235
+ ::: {#tab:ablations}
236
+ Arxiv Audio ImageNet256
237
+ ----------------------------- ------------ ----------- -------------
238
+ [MegaByte]{.smallcaps} 0.6871 **3.477** **3.158**
239
+ *w/o local model* 1.263 5.955 4.768
240
+ *w/o global model* 1.373 3.659 3.181
241
+ *w/o cross-patch attention* **0.6781** 3.481 3.259
242
+ *w/ CNN encoder* 0.6871 **3.475** **3.155**
243
+
244
+ : Ablation of [MegaByte]{.smallcaps} model components, showing that both Local and Global models are critical to strong performance, but the architecture is robust to other modifications. We report bits-per-byte on text, audio, and image prediction tasks. All models within a column are trained using the same compute and data. The hyperparameters are listed in Table [10](#table:opt_model_desc){reference-type="ref" reference="table:opt_model_desc"}.
245
+ :::
246
+
247
+ Long-context models often struggle to benefit from the full context [@sun2021long]. Figure [3](#fig:prob_within_context){reference-type="ref" reference="fig:prob_within_context"} shows that later tokens within each context window consistently have a higher likelihood, indicating that [MegaByte]{.smallcaps} can effectively use at least 8k bytes of context on the PG19 dataset.
248
+
249
+ <figure id="fig:prob_within_context" data-latex-placement="t">
250
+ <img src="figs/probs_within_context_baseline_sliding_window.png" />
251
+ <figcaption>Average log probability assigned to the token at different positions within the context length by <span class="smallcaps">MegaByte</span> model with 8192 context size and by a vanilla transformer model trained using the same compute (PG19 test set). <span class="smallcaps">MegaByte</span> likelihoods rise throughout its context window, demonstrating that it can use tokens from 8k bytes previously to improve its predictions. </figcaption>
252
+ </figure>
253
+
254
+ We find that within a single patch, on average, the [MegaByte]{.smallcaps} performs worse on later tokens within a patch (see Figure [4](#fig:strided_inference){reference-type="ref" reference="fig:strided_inference"}). Section [2.3.3](#strided){reference-type="ref" reference="strided"} proposes *strided inference* as a solution, where two forward passes are performed offset by $\frac{P}{2}$ tokens, and results from the first half of each patch are combined. Table [7](#tab:inference_options){reference-type="ref" reference="tab:inference_options"} shows performance improvements from strided inference, which are additive with the standard sliding window.
255
+
256
+ <figure id="fig:strided_inference" data-latex-placement="t">
257
+ <img src="figs/strided7.png" />
258
+ <figcaption>An illustration of strided inference with patch size 8. Lines below the text represent the patches used in the two rounds of inference, the plot above it represents the average probability assigned to the token at a given position within a patch. By considering only the first half of each patch from the two rounds of inference and combining them (bold lines on top), we achieve a better overall bpb.</figcaption>
259
+ </figure>
260
+
261
+ ::: {#tab:inference_options}
262
+ Method Inference Cost bpb
263
+ ------------------------ ---------------- ------------
264
+ Basic Inference 1X 0.9079
265
+ *w/ Sliding Window* 2X 0.8918
266
+ *w/ Strided Inference* 2X 0.8926
267
+ *w/ Sliding & Strided* 4X **0.8751**
268
+
269
+ : Performance of various inference techniques on the PG19 test set using our best [MegaByte]{.smallcaps} model.
270
+ :::
271
+
272
+ [MegaByte]{.smallcaps} introduces several additional hyperparameters. We tuned these parameters independently for different modalities and reported performance based on the best setting we found. All experiments in the same group use the same compute.
273
+
274
+ **Patch Size.** We experimented with various patch sizes on Image256 dataset and found that there is a wide range of values where [MegaByte]{.smallcaps} performs similarly. We found similar robustness against the choice of this hyperparameter across all modalities, although the optimal patch size itself can be different across modalities.
275
+
276
+ ::: {#tab:hyperparams_ablation}
277
+ Patch Size Global Size Local Size bpb
278
+ ------------ ------------- -------------------- -------
279
+ 48 125M 114M (D=768, L=11) 3.178
280
+ 192 125M 125M (D=768, L=12) 3.158
281
+ 768 125M 83M (D=768, L=8) 3.186
282
+
283
+ : Effects of patch size on performance on the Image256 dataset. All versions use the same amount of GPU hours and data.
284
+ :::
285
+
286
+ **Local to Global model Size Ratio.** We experimented with different Local/Global model size ratios on PG19 dataset. By grouping bytes into patches, [MegaByte]{.smallcaps} effectively uses $P$ times less tokens for the Global model as on the Local model---enabling us to increase the size of the Global model without reduced cost. We find that a given compute budget is spent optimally when the Global model has more parameters than the Local model. This trend was consistent across all modalities and various patch sizes.
287
+
288
+ ::: {#tab:size_inference}
289
+ Global Size Local Size bpb
290
+ -------------------- -------------------- -------
291
+ 350M (D=1024,L=24) 290M (D=1024,L=20) 1.014
292
+ 760M (D=1536,L=24) 262M (D=1024,L=18) 1.002
293
+ 1.3B (D=2048,L=24) 218M (D=1024,L=15) 0.991
294
+
295
+ : Effects of Local / Global model size on performance on the PG19 dataset. Increasing the capacity of global model improves performance. Models are compute and data matched.
296
+ :::
2305.10051/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-12-06T15:59:40.359Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36" etag="B6ZQbDb8hNGigVEkAjiw" version="20.6.0" type="google"><diagram id="-rYwSFd57W1ithcO8tzu" name="Page-1">7V1bk5s4Fv41rtrZqnYhAQY/9iXJbu1kJjPJ7Gb2ZYq21TYV2zhAp+3+9SOBhJGQjWQjwHb3Q2IEFnDOp3M/8sC+X24+xMF6/jGaosUAWtPNwH4YQDh2RvhfMrDNB1zXywdmcTjNh8Bu4HP4iuigRUefwylKuAvTKFqk4ZofnESrFZqk3FgQx9ELf9lTtODvug5mqDLweRIsqqP/C6fpPB/1XWs3/i8UzubszsCiZx6DybdZHD2v6P0G0H7K/vLTy4DNRe+QzINp9JIPZVPY7wb2fRxFaf5publHC0JaRrb8Pu/3nC2eO0arVOULm/+vfwP2L1/+E//2+sfXX52t8+7XG8fOp/kRLJ4Re4/sadMtoxB5yXX1bjxfgkd2OSXPDxSnaFO6lD7VBxQtURpv8SX0rEcBQwEEHDrDy44dENDnnJdZ4foUBhQCs2LqHRnwB0oJHapIiILHnHX+X4VC+EXxM93N0yW+zQPAH4NFOFvhzxNMLhTjAUKOEKPulp5YhtMp+fpdjJLwlVLPwsfrKFyl2Qu5dwP3gcz1nEZJvm7I1EkaR9/QfbSI8LwPq2hFZnkKFwthqMqxwwhQ5hdwbZ5h0K8wbGRV+WVbp7Prz0//nj6lX6ZfPwbP8R/BNvyv495ALQxXXnQ/hhWJV6VUAeUKYYDrSJDsOIaQ7NWTJhNhaErh9zIPU/R5HUzI2Rcs8XlcV+lykCPqmLJ4TLluhXKOBFJOA5CS0g3U0w2tprdE/5BVvgiSJJzwpOLpqkc4NOWUVpVsJbq4ErqwsRgtgjT8was6GbHoHT4R4bPjigOFhT7iZ0ii53iC6JfKqufwPMIsaRDPUFqZJeNb8crHs1JBOlwBK22BleBIVkJhoVYmMsxNBXsFG1rrTHoF20yZEp35DaWTOeUf0ZWf6eVJtCC3u6P6dcQUMIrf/UC5HgZSpSxRuKImj6M0SMuaHcUhfn1iDlC74NNu5C5ZBesvUU63lmXtqFtZ60pYSsys78rWlqpJpG6VydTgKVJAgTuUG3aFGaZsKbnisyTcGC0IwZ+iTBDsODH6/hyxEzf5CrrFF/jrTUYfdhp/muX/5+y0Bu79UxxMBh5eS/donYSLaDXwHvLj5Huc4k8wG3hg38mfAL9R/hBswl4igzwi9XNBIQfCaGUKOMwTFyT8WGlVQ88UkPT0LuXDNEjmGSHBIQ0sUliPpLmGqtcxPdHcANZoXFXVbXs15pxh1Q1kbrUhRGAgxNuv5OTQ9Vw28Gc+UBw/bOjX86Nt+aikm49YtmeGMcFA984VYftMifVfVq+Nia5UhiNIlmq8ol3jY7TPFDwz/pkyDkfV0Gi7DPK1RPjOmU7wyk81fOyqPGepA8hmKx3rkbsnMhewKCPlrSNEs1WFLnDsg/OYlrnjN0g0BgkLNAMJ6HQJCSjzId8gcRwkXF5KHGuaifOwAHtbkDg2oP4GiVpWjr1mIOHZ7UJCFsrVizQBeDDU9PtfeUiJpOKxXYcP3l1EVMk36BIIuV2HorOr1K7MoRPYc3pqt6Y8weKXSZH+5uoT3CpRCsOseTOcJZEP0aUmkxsk67yc5incoCwFwupTLKWESbYEWMUM4AE6xoezOJiGmMQPYYzvk/uyqygml6uC193D1f3+LG8+2dUQqCcBr2fMW4Iyf1ZF652ae3R7pbAcIcwABdtDOY3My6aWY0tQoZZCj5mlIKUPPD5Iadm2wSglZUBtlBLAca+QZIsBqyORBPyaiYyDSeY1X0CJWTNyHDDvpGBP11lRW+bSXkySWp1BPUlSs2qWHiUX+6VyCx17sqQUJvKESkXTgtKWWbrXk9LRN4FHfZOce8t7zoyBpjgGOYa5VZ+lZX4d67P0IlLXNynMr0amtrSF8AgenMe4ED7W9TGLCbQJU+o+jT16TL0nyI53zhM52JYOTnOdeoIw3x/60Cr+gIg37FjuzlrH5g5Yg0dH8HMVooCXUvTLR+E4tTfAUj/7Uwm/mdJWwOLtwc4bNWyFivCzrypTjte4/RJPQuTv6OSl6ISoRWswz4Nt6TK6oA+oayi7zQ6e+YTHCjcpZ6XlE81nweBFp8EkeR+1JbVXzAnAhVUp126HY6tV+fB6y/KbR5IQaYZK6rKJqnzpsyo4d6bzqY4YopCk6aT93lC0XxtsetHLM9dCYnSYWArmVnfN7yNZjec5hrprWK1s8t7wK1jSWNNqhMaTGbyXEVA7mWNCkLorda1QnWFoQwJKrL1YFtqAnSqhgA8k0oZ1BjSPZr1a0ZPcNy4apZfCZGsqc1gOvI7fr74eoSDGP9L/EqZx2k2Wj/T6xvQKLwQ3vsaH10OLAAIF/z3XvZ2Fq4W60mO9dX6aG3FzE9NwUbDlzqVOx1HFjdcPsXOinBErksV5TCNHlua41KIcJlTVI79QUCYdNwuyXtMrrQzQ5p+QQ+yafb5C3J4R+efgES0+4cVAyfQYpWm0lHAhjYQqZ5YWWm5mZPfC4WOAZf0wziqfd3yGO0ZXEyt0CULCmxCzMeOKW0nVsGQTEJJNkzhKkpt5kJ2tZnTuIM3olFEBBRDi65ALnOy6Q3XVZI7Sd+h2hAYhxYflIRxKskFeFVSiAdlgIEehz6j1VGE5cdduvvD9e5V8IdAosNDHiD8UUNJxxtDTa0U7z4Sh3APpzA3lAOAc64UK/dItl2xbem5oGzWn/eKy7w/LxSaW3N7Q5frhWVv2LYErcxGMNC2WEnDnnWzT6FbU90na23l2z5ZGsmADh4enYMLD4QuW5wk+8Qt6wf/+Hi2DVRkLJwMIYyfDgEcmwx+wTCIhhDnKVviMWIwQv61FkLWDGTFv8S3J8rL+OciywfTUkL3QYyzmjM8JkWrWulJ7o469NNKFtKDlAHNzO4O4K4vQl4A2yXjmYccEIVovkMsweMeKBSi2CKaW63SboLTXDnqDUNEoxdNGii+0eLjtIUX6ArWVTLqiMJcn/5j8dF6SZg8fVSGwl9+s6bvgd8e9+UDvtwOMFJMAmy9NLlJV5Ywmy9i3QpVaA/HIVRD8RO6MwZnHCq9kRYA9G+yX2Wu5LMLQDuz36cOw10qNwOR9sAwXZKXIIGeCTUxydVwy45xn675q1tjpWZM+4PdpqGywobzfw6hmIsPe/l7L99wq4VjEU70n0ePValFt2NUKlrYAXUbRmz5zfD4RK/uZmXYNQQXxatoQtIVdsou3lQm45hVM4xv9mC5RAvKKEQVtkxOxq9ISkYW1uoRVIY1b1h3X0JbGQFRvofj9sFC0NzRnUmQP+gyDyJGFVC6gIElb5TnCxk9w1PFPzqlshG1c5QmxD+C7kvqLIizId484wwZqMKSkkVGmiQDI49WEAcGeKmB+GsZ1S8b1lrNg++SUTorAvV+h74O3LIEKNnRkp/jLb93jxVEQnoeKtEQav4az12B2swjzCr4TS7WKmiljpVrkgJXxsRtP0VPwnEm3YlqCvnA1G2j9Gpw+Phy+HRM6siK+Ngu0mMLqUaENo+rZGLqs+I7nrV/R+sqGMOCTjePKTKb9KVn11fVUex8h9vlf1fZltbntFnwrLOu3gu9WC76PUBaCH1bNtrZa7+0r9HpdAqaYFSli5hA2JMXg5M8kOlwhMG0pGRLAmMRRiEtfAjpqJI5ljccqEqe4TrRNs3P397XSyByu+CicTOpIcDU2BKvxlcDqXISO0BYwlgXB2oSHgnf7Bo/W4OHxLpBjdQ0PWazsAuFxbUqpe7mjkIN+A9YZAKsisTqFFbAUMtlvLbUntIicHrLteg9eVyZ63rriBs10xWngo7N0eDUOk7edlQrWSc8ZJhuZ63Yxi+IwnS/xZzjsMbPMFUwfTvHeCDuuAziSWRd+q3vQKvTNG/8tRtcZesJ2An61uBzA8RBKegKcAwmU0/Avo00TRQ9TUvQQLAk6V4/JOjt1fV0gCuKu9S4QWTYj7wJhdQ2g93kpY+Ktbk+Q3fLsbEftqlVL9kaH1qefb8+XY3IL1AAHHVfc1aXjZkRYXY/Xys+ajsmRJ7DOBsZ4hw/jiGi04twHEjX4GE0RueJv</diagram></mxfile>
2305.10051/main_diagram/main_diagram.pdf ADDED
Binary file (51 kB). View file
 
2305.10051/paper_text/intro_method.md ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Bayesian networks. Bayesian networks (BNs) [\[Pearl,](#page-8-0) [1988\]](#page-8-0) are probabilistic graphical models that enable succinct knowledge representation and facilitate probabilistic reasoning [\[Darwiche, 2009\]](#page-8-1). Parametric Bayesian networks (pBNs) [\[Castillo](#page-8-2) *et al.*, 1997] extend BNs by allowing polynomials in conditional probability tables (CPTs) rather than constants.
4
+
5
+ Parameter synthesis on Markov models. Parameter synthesis is to find the right values for the unknown parameters with respect to a given constraint. Various synthesis techniques have been developed for parametric Markov chains (pMCs) ranging over e.g., the gradient-based methods [\[Heck](#page-8-3) *et al.*[, 2022\]](#page-8-3), convex optimization [\[Cubuktepe](#page-8-4) *et al.*, 2018; [Cubuktepe](#page-8-5) *et al.*, 2022], and region verification [\[Quatmann](#page-8-6) *et al.*[, 2016\]](#page-8-6). Recently, Salmani and Katoen [\[2021a\]](#page-8-7) have proposed a translation from pBNs to pMCs that facilitates using pMC algorithms to analyze pBNs. Proceeding from this study, *we tackle a different problem* [\[Kwisthout and van der](#page-8-8) [Gaag, 2008\]](#page-8-8) for Bayesian networks.
6
+
7
+ Minimal-change parameter tuning. Given a Bayesian network B, a hypothesis H, evidence E, λ ∈ [0, 1], and a constraint of the form Pr(H|E) ≤ λ (or ≥ λ),
8
+
9
+ what is a minimal change—with respect to a given measure of distance—in the probability values of (a subset of) CPT rows, such that the constraint holds?
10
+
11
+ We illustrate the problem with an example of testing COVID-19 that is adopted from several medical studies [\[Barreiro](#page-8-9) *et al.*[, 2021;](#page-8-9) [Nishiura](#page-8-10) *et al.*, 2020; Dinnes *et al.*[, 2022\]](#page-8-11). The outcome of PCR tests only depends on whether the person is infected, while the antigen tests are less likely to correctly identify COVID-19 if the infection is asymptomatic (or presymptomatic). Figure [1a](#page-1-0) depicts a Bayesian network that models such probabilistic dependencies. In the original network, the probability of no COVID-19 given that both tests are positive, is 0.011089. Assume that in an application domain, the result of such a query is now required not to exceed 0.009: the aim is to make this constraint hold while imposing the least change in the original network with respect to a distance measure. This is an instance of the *minimal-change parameter tuning* problem. We consider the −bounded variant of the problem: for a subset of *modifiable* CPT rows, are there new values within the distance of from the original probability values that make the constraint hold?
12
+
13
+ Main contributions. Based on existing *region verification* and *region partitioning* techniques for pMCs,
14
+
15
+ - we propose a practical algorithm for -bounded tuning. More precisely, we find instantiations that (i) satisfy the constraint (if the constraint is satisfiable) and (ii) are close and (iii) lean towards the minimum distance instantiation depending on a coverage factor 0 ≤ η ≤ 1.
16
+ - We propose two region expansion schemes to realize closeness of the results both for Euclidean distance and for CD distance [\[Chan and Darwiche, 2005\]](#page-8-12).
17
+ - Contrary to the existing techniques that restrict to hyperplane solution spaces, we handle pBNs with multiple parameters in multiple distributions and multiple CPTs.
18
+
19
+ Our experiments on our prototypical implementation [1](#page-0-0) indicate that -bounded tuning of up to 8 parameters for large networks with 100 variables is feasible.
20
+
21
+ Paper organization. Section 2 includes the basic notations and Sec. 3 the preliminaries on parametric Bayesian networks. Section 4 introduces parametric Markov chains and the region verification techniques thereof. Section 5 details our main contributions and Sec. 6 our experimental results. Section 7 concludes the paper with an overview of the related studies that were not mentioned before.
22
+
23
+ <span id="page-0-0"></span><sup>1</sup> <https://github.com/baharslmn/pbn-epsilon-tuning>
24
+
25
+ <span id="page-1-0"></span>![](_page_1_Figure_0.jpeg)
26
+
27
+ Figure 1: Our running example: (a) the parametric BN COVID-19, (b) the parametric MC of COVID-19, for the topological order C; S; A; P, (c) the parametric MC of COVID-19 tailored to the evidence $Antigen\ Test = pos$ , and $PCR\ Test = pos$ , abstracted from variable valuations.
28
+
29
+ **Variables.** Let V be a set of m random variables $v_1, \ldots, v_m$ and $D_{v_i}$ the domain of variable $v_i$ . For $A \subseteq V$ , Eval(A) denotes the set of joint configurations for the variables in A.
30
+
31
+ **Parameters.** Let $X = \{x_1, \dots, x_n\}$ be a set of n real-valued parameters. A parameter instantiation is a function $u: X \to \mathbb{R}$ that maps each parameter to a value. All parameters are bounded; i.e., $lb_{x_i} \le u(x_i) \le ub_{x_i}$ for $x_i$ . Let $I_i = [lb_{x_i}, ub_{x_i}]$ . The parameter space $\mathcal{U} \subseteq \mathbb{R}^n$ of X is the set of all possible values of X, i.e., the hyper-rectangle spanned by the intervals $I_i$ for all i.
32
+
33
+ **Substitution.** Polynomials f over X are functions f: $\mathbb{R}^n \to \mathbb{R}$ where f(u) is obtained by replacing each occurrence of $x_i$ in f by $u(x_i)$ ; e.g., for $f = 2x_1^2 + x_2$ , $u(x_1) = 3$ and $u(x_2) = 2$ , f(u) = 20. Let f[u] denote such substitution.
34
+
35
+ **Parametric distributions.** Let Distr(D) denote the set of probability distributions over D. Let $\mathbb{Q}[X]$ be the set of multivariate polynomials with rational coefficients over X. A parametric probability distribution is the function $\mu:D\to\mathbb{Q}[X]$ with $\sum_{d\in D}\mu(d)=1$ . Let pDistr(D) denote the set of parametric probability distributions over D with parameters in X. Instantiation u is well-formed for $\mu\in pDistr(D)$ iff $0\leq \mu(d)[u]\leq 1$ and $\sum_{d\in D}\mu(d)[u]=1$ .
36
+
37
+ **Co-variation scheme.** A co-variation scheme cov: $Distr(D) \times D \rightarrow pDistr(D)$ maps a probability distribution $\mu$ to a parametric distribution $\mu'$ based on a given $d \in D$ .
38
+
39
+ **Distance measure.** The function $d: \mathcal{U} \times \mathcal{U} \to \mathbb{R}^{\geq 0}$ is a *distance measure* if for all $u, u', u'' \in \mathcal{U}$ , it satisfies: (I) positiveness: $d(u, u') \geq 0$ , (II) symmetry: d(u, u') = d(u', u), and (III) triangle inequality: $d(u, u'') \leq d(u, u') + d(u', u'')$ .
40
+
41
+ A parametric Bayesian network (pBN) is a BN in which a subset of entries in the conditional probability tables (CPTs) are polynomials over the parameters in X. Let $par_{v_i}$ denote the set of parents for the node $v_i$ in the graph G.
42
+
43
+ **Definition 1.** The tuple $\mathcal{B}=(G,X,\Theta)$ is a parametric Bayesian network (pBN) with directed acyclic graph G=(V,W) over random variables $V=\{v_1,\ldots,v_m\}$ , set of parameters $X=\{x_1,\ldots,x_n\}$ , and parametric CPTs $\Theta=\{\Theta_{v_i}\mid v_i\in V\}$ where $\Theta_{v_i}: Eval(par_{v_i})\to pDistr(D_{v_i})$ .
44
+
45
+ <span id="page-1-2"></span><span id="page-1-1"></span>The CPT row $\Theta_v(\overline{par})$ is a parametric distribution over $D_v$ given the parent evaluation $\overline{par}$ . The CPT entry $\Theta_v(\overline{par})(d)$ , short $\theta_{(v,d,\overline{par})}$ , is the probability that $v{=}d$ given $\overline{par}$ . A pBN without parameters, i.e., $X=\emptyset$ , is an ordinary BN. A pBN $\mathcal B$ defines the parametric distribution function $\Pr_{\mathcal B}$ over $\operatorname{Eval}(V)$ . For well-formed instantiation u, $\operatorname{BN} \mathcal B[u] = (G,\Theta[u])$ is obtained by replacing the parameter $x_i$ in the parametric functions in the CPTs of $\mathcal B$ by $u(x_i)$ .
46
+
47
+ **Example 1.** Fig. 1a shows a pBN over variables $V = \{C, S, A, P\}$ (initial letters of node names) and parameters $X = \{p, q\}$ . Instantiating with $u_0(p) = 0.72$ and $u_0(q) = 0.95$ yields the BN $\mathcal{B}[u_0]$ as indicated using dashed boxes.
48
+
49
+ **pBN constraints.** Constraints $\Pr_B(H|E) \sim \lambda$ involve a hypothesis H, an evidence E, $\sim \in \{\leq, \geq\}$ and threshold $0 \leq \lambda \leq 1$ . For the instantiation $u: X \to \mathbb{R}$ ,
50
+
51
+ $$\mathcal{B}[u] \models \phi \quad \text{ if and only if } \quad \Pr_{\mathcal{B}[u]}(H \,|\, E) \sim \lambda.$$
52
+
53
+ **Sensitivity functions.** Sensitivity functions are rational functions that relate the result of a pBN query to the parameters [Castillo *et al.*, 1997; Coupé and van der Gaag, 2002].
54
+
55
+ <span id="page-1-4"></span>**Example 2.** For the pBN $\mathcal{B}_1$ in Fig. 1a, let $\phi_1$ be the constraint:
56
+
57
+ $$\Pr_{\mathcal{B}_1}(C = no \mid A = pos \land P = pos) \le 0.009.$$
58
+
59
+ This reduces to the sensitivity function
60
+
61
+ $$f_{\mathcal{B}_1,\phi_1} = \frac{361}{34900 \cdot p \cdot q + 8758 \cdot q + 361}$$
62
+ .
63
+
64
+ Using instantiation u with u(p) = 0.92075 and u(q) = 0.97475, $f_{\mathcal{B}_1,\phi_1}[u] = 0.008798 \le 0.009$ , i.e., $\mathcal{B}_1[u] \models \phi_1$ .
65
+
66
+ **Higher degree sensitivity functions.** Contrary to the existing literature that is often restricted to multi-linear sensitivity functions, we are interested in analyzing pBNs with sensitivity functions of higher degrees.
67
+
68
+ <span id="page-1-3"></span>**Example 3.** Consider a variant of the COVID-19 example, where the person only takes the antigen test twice rather than taking both the antigen and PCR tests to diagnose COVID-19; see Fig. 2 for the parametric CPTs. Then,
69
+
70
+ $$\begin{split} f_{\mathcal{B}_2,\phi_2} &= \Pr_{\mathcal{B}_2}(C = \textit{no} \,|\, A1 = \textit{pos} \land A2 = \textit{pos}) \\ &= \frac{950 \cdot 9 \cdot t^2 \cdot s^2}{8850 \cdot t^2 + 349 \cdot p^2 + 950 \cdot s^2 + 151 \cdot r^2}. \end{split}$$
71
+
72
+ <span id="page-2-0"></span>
73
+
74
+ | | | Antig | | | Antigen Test 2 | | | | |
75
+ |-----|-----|----------|------------------|-----|----------------|----------|---------------|--|--|
76
+ | C | S | pos | neg | C | S | pos | neg | | |
77
+ | yes | yes | [0.72] p | [0.28]1-p | yes | yes | 0.72 p | [0.28]1-p | | |
78
+ | yes | no | 0.58 r | $0.42 \ 1-r$ | yes | no | 0.58 $r$ | 0.42 1-r | | |
79
+ | no | yes | 0.005 s | $0.995 \mid 1-s$ | no | yes | 0.005 s | $0.995 \ 1-s$ | | |
80
+ | no | no | 0.01 t | 0.99 1-t | no | no | 0.01 t | 0.99 1-t | | |
81
+
82
+ Figure 2: Parametric CPTs for a variant of COVID-19 example with two antigen tests; see the inter-CPT parameter dependencies.
83
+
84
+ **3.1. Parametrization.** Let $\Theta_{modif}$ denote the CPT entries in BN $B=(G,\Theta)$ that are explicitly changed.
85
+
86
+ **Definition 2** (BN parametrization). $pBN \mathcal{B} = (G, X, \Theta')$ is a parametrization of $BN \mathcal{B} = (G, \Theta)$ over X w.r.t. $\Theta_{modif}$ if
87
+
88
+ $$\theta'_{(v,d,\overline{par})} = f \text{ for some } f \in \mathbb{Q}[X] \quad \text{if} \quad \theta_{(v,d,\overline{par})} \in \Theta_{modif}.$$
89
+
90
+ The parametrization $\mathcal{B}$ is *monotone* iff for each parametric entry $\theta'_{(v,d,\overline{par})} = f$ , f is a monotonic polynomial. The parametrization $\mathcal{B}$ is *valid* iff $\mathcal{B}$ is a pBN, i.e., iff $\Theta'_v(\overline{par}) \in pDistr(D_v)$ for each random variable v and its parent evaluation $\overline{par}$ . To ensure validity, upon making a CPT entry parametric, the complementary entries in the row should be parametrized, too. This is ensured by *covariation* schemes. The most established co-variation scheme in the literature is the *linear proportional* [Coupé and van der Gaag, 2002] scheme that has several beneficial characteristics [Renooij, 2014], e.g., it preserves the ratio of CPT entries.
91
+
92
+ <span id="page-2-1"></span>**Definition 3** (Linear proportional co-variation). A linear proportional co-variation over X maps the $CPT \Theta_v$ onto the parametric $CPT \Theta_v'$ based on $d_k \in D_v$ , where
93
+
94
+ $$\left\{ \begin{array}{ll} \theta'_{(v,d_k,\overline{par})} = x & \textit{for some } x \in X \\ \theta'_{(v,d_j,\overline{par})} = \frac{1 - x}{1 - \theta_{(v,d_k,\overline{par})}} \cdot \theta_{(v,d_j,\overline{par})} & \textit{for } d_j \neq d_k \in D_v. \end{array} \right.$$
95
+
96
+ Note that we allow the repetition of parameters in multiple distributions and multiple CPTs, see e.g., Fig. 2.
97
+
98
+ <span id="page-2-2"></span>**3.2. Formal Problem Statement.** Consider BN $B=(G,\Theta)$ , its valid parametrization $\mathcal{B}=(G,X,\Theta')$ over X with constraint $\phi$ . Let $u_0$ be the original value of the parameters X in B and $d:\mathcal{U}\times\mathcal{U}\to\mathbb{R}^{\geq 0}$ a distance measure. Let $\hat{d}_0$ denote an upperbound for $d_{\{u\in\mathcal{U}\}}(u_0,u)$ . The minimum-distance parameter tuning problem is to find $u_{\min}= \underset{\{u\in\mathcal{U}\mid\mathcal{B}[u]\models\phi\}}{\operatorname{d}(u,u_0)}$ . Its generalized variant for $0\leq\epsilon\leq\hat{d}_0$ and $0\leq\eta\leq1$ is:
99
+
100
+ The
101
+ $$(\epsilon, \eta)$$
102
+ -parameter tuning problem is to find $u \in \mathcal{U}$ s.t. $\mathcal{B}[u] \models \phi, d(u, u_0) \leq \epsilon$ , and $d(u, u_{\min}) \leq (1 - \eta) \cdot \hat{d}_0$ .
103
+
104
+ $(\bar{d_0},1)$ -tuning gives the minimum-distance tuning. We call $\eta$ the coverage factor that determines, intuitively speaking, the minimality of the result; we discuss this in Sec. 5. We consider two distance measures.
105
+
106
+ **Euclidean distance (EC-distance).** The EC-distance between u and u' (both in $\mathcal{U}$ ) is defined as:
107
+
108
+ $$EC(u, u') = \sqrt{\sum_{x_i \in X} \left(u(x_i) - u'(x_i)\right)^2}.$$
109
+
110
+ **Corollary 1.** For n = |X|, $\hat{d}_0 = \sqrt{n}$ is an upperbound for the EC distance of any $u_0$ from any instantiation $u \in \mathcal{U}$ .
111
+
112
+ **Corollary 2.** Let $0 \le \epsilon \le \sqrt{n}$ and $u_0(x_i) - \frac{\epsilon}{n} \le u(x_i) \le u_0(x_i) + \frac{\epsilon}{n}$ . Then, $EC(u, u_0) \le \epsilon$ .
113
+
114
+ **Chan-Darwiche distance** (**CD distance**) [Chan and Darwiche, 2005] is a distance measure to quantify the distance between the probability distributions of two BNs, defined as:
115
+
116
+ $$\mathit{CD}(u,u') = \ln \max_{w \in \operatorname{Eval}(V)} \frac{\Pr_{\mathcal{B}[u']}(w)}{\Pr_{\mathcal{B}[u]}(w)} - \ln \min_{w \in \operatorname{Eval}(V)} \frac{\Pr_{\mathcal{B}[u']}(w)}{\Pr_{\mathcal{B}[u]}(w)},$$
117
+
118
+ where both 0/0=1 and $\infty/\infty=1$ by definition. Whereas the EC-distance can be computed in O(n), this is for CD-distances NP-complete [Kwisthout and van der Gaag, 2008]. It has known closed-forms only for limited cases, e.g., single parameter and single-CPT pBNs [Chan and Darwiche, 2004]. Let $\Theta_v$ be the CPT that is parametrized to $\Theta_v'$ by a monotone parametrization. Let the minimum (maximum) probability entry of $\Theta_v$ be $\theta_{\min}$ ( $\theta_{\max}$ ) parametrized to $\theta_{\min}'$ with x ( $\theta_{\max}'$ with y). Let x0, x1, x2, x3, x4, x6, x7, x8, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x9, x
119
+
120
+ **Corollary 3.** An upperbound for the CD distance of $u_0$ from any instantiation $u \in \mathcal{U}$ is:
121
+
122
+ $$\hat{d_0} = \ln \frac{\max\left\{\theta_{\min}'[lb_x], \theta_{\min}'[ub_x]\right\}}{\theta_{\min}} - \ln \frac{\min\left\{\theta_{\max}'[lb_y], \theta_{\max}'[ub_y]\right\}}{\theta_{\max}}$$
123
+
124
+ as derived from the single CPT closed-form [Chan, 2005].
125
+
126
+ **Corollary 4.** Let
127
+ $$0 \le \epsilon \le \hat{d}_0$$
128
+ and $\alpha = e^{\epsilon/2}$ . Let for each $\theta \in \Theta_v$ , $\alpha^{-1} \cdot \theta'[u_0] \le \theta'[u] \le \alpha \cdot \theta'[u_0]$ . Then, $CD(u, u_0) \le \epsilon$ .
129
+
130
+ Note that zero probabilities are often considered to be logically impossible cases in the BN, see e.g., [Kisa *et al.*, 2014]. Changing the parameter values from (and to) 0 yields CD distance $\infty$ . We consider $\epsilon$ —bounded parameter tuning: we (i) forbid zero probabilities in $\Theta_{modif}$ (the CPT entries that are explicitly modified), (ii) use the linear proportional co-variation scheme (Def. 3) that is zero-preserving [Renooij, 2014], i.e., it forbids changing the co-varied parameters from non-zero to zero. This co-variation scheme, in general, optimizes the CD-distance for single CPT pBNs, see [Renooij, 2014].
131
+
132
+ Parametric Markov chains are an extension of Markov chains (MCs) that allow parametric polynomials as transition labels:
133
+
134
+ **Definition 4.** A parametric Markov chain (pMC) $\mathcal{M}$ is a tuple $(S, s_I, X, \mathcal{P})$ where S is a finite set of states, $s_I \in S$ is the initial state, X is a finite set of real-valued parameters, and $\mathcal{P}: S \to pDistr(S)$ is a transition function over the states.
135
+
136
+ **Example 4.** Figures 1b and 1c are examples of pMCs.
137
+
138
+ **Reachability constraints.** A reachability constraint $\varphi = (T, \sim, \lambda)$ is defined over pMC $\mathcal{M} = (S, s_I, X, \mathcal{P})$ with targets $T \subseteq S$ , threshold $0 \le \lambda \le 1$ , and $\infty \in \{\le, \ge\}$ . For pMC $\mathcal{M}$ and instantiation $u: X \to \mathbb{R}$ :
139
+
140
+ $$\mathcal{M}[u] \models \varphi \quad \text{if and only if} \quad \Pr_{\mathcal{M}[u]}(\lozenge T) \sim \lambda,$$
141
+
142
+ where $\Pr_{\mathcal{M}[u]}(\lozenge T)$ denotes the probability that MC $\mathcal{M}[u]$ reaches a state in T.
143
+
144
+ <span id="page-3-3"></span><span id="page-3-1"></span>![](_page_3_Figure_0.jpeg)
145
+
146
+ Figure 3: The parameter lifting steps for the COVID-19 example with parameter dependencies (Fig. 2): (a) the original (sub-)pMC $\xrightarrow{\text{Relaxation}}$ (b) pMC without parameter dependencies $\xrightarrow{\text{Substitution}}$ (c) non-parametric MDP. Note that $0.9875 \le 1-t, 1-t', 1-t'' \le 0.9925$ .
147
+
148
+ **Example 5.** Consider the pMC
149
+ $$\mathcal{M}_{\mathcal{B},E}$$
150
+ in Fig. 1c. Let $T = \{s_{10}\}$ . Then, $\Pr_{\mathcal{M}_{\mathcal{B},E}}(\lozenge s_{10}) = \frac{361}{34900 \cdot p \cdot q + 8758 \cdot q + 361}$ . Let $R \subseteq \mathbb{R}^n$ with $n = |X|$ and let
151
+
152
+ $$\mathcal{M}, R \models \varphi$$
153
+ if and only if $\forall u \in R. \ \mathcal{M}[u] \models \varphi$ .
154
+
155
+ We now define the parameter synthesis problems for pMC $\mathcal{M}$ and reachability constraint $\varphi$ that are relevant to our setting.
156
+
157
+ <span id="page-3-5"></span>**Definition** 5 (Region partitioning). *Partition region* R *into* $R_+$ , $R_-$ , and $R_?$ such that:
158
+
159
+ $$R_{+} \subseteq \underbrace{\{u \in R \mid \mathcal{M}[u] \models \varphi\}}_{satisfying instantiations} \qquad R_{-} \subseteq \underbrace{\{u \in R \mid \mathcal{M}[u] \models \neg \varphi\}}_{refuting instantiations}$$
160
+
161
+ and $R_? = R \setminus (R_+ \cup R_-)$ with $||R_?|| \le (1-\eta) \cdot ||R||$ for some given coverage factor $0 \le \eta \le 1$ .
162
+
163
+ The sub-region $R_?$ denotes the fragment of R that is *inconclusive* for $\varphi$ . This fragment should cover at most fraction $1-\eta$ of R's volume. Region partitioning is exact if $R_? = \emptyset$ .
164
+
165
+ **Definition 6** (Region verification). For the region R and the specification $\phi$ , the problem is to check whether:
166
+
167
+ $$\underbrace{\mathcal{M}, R \models \varphi}_{\textit{R is accepting}} \ \ \textit{or} \ \ \underbrace{\mathcal{M}, R \models \neg \varphi}_{\textit{R is rejecting}} \ \ \textit{or} \ \ \underbrace{\mathcal{M}, R \not\models \varphi \land \mathcal{M}, R \not\models \neg \varphi}_{\textit{R is inconclusive}}.$$
168
+
169
+ **4.1. Parameter Lifting.** The parameter lifting algorithm (PLA) [Quatmann *et al.*, 2016] is an abstraction technique that reduces region verification to a probabilistic model checking problem for which efficient algorithms exist [Katoen, 2016]. It first removes parameter dependencies by making all parameters unique. Then it considers for each parameter $x_i$ only its bounds $lb_{x_i}$ and $ub_{x_i}$ within the region R. This yields a non-parametric Markov Decision Process (MDP).
170
+
171
+ **Definition 7.** A Markov decision process (MDP) is a tuple $\mathcal{M} = (S, s_I, Act, \mathcal{P})$ with a finite set S of states, an initial state $s_I \in S$ , a finite set S of actions, and a (partial) transition probability function $\mathcal{P} : S \times Act \rightarrow Distr(S)$ .
172
+
173
+ While resolving the non-determinism in the obtained MDP, a trade-off between maximizing or minimizing $x_i$ may occur. This occurs in particular when parameter $x_i$ repeats in the outgoing transitions of multiple states, see e.g., parameter t in Fig. 3a. PLA handles the issue by a relaxation step that introduces intermediate parameters; see e.g., Fig. 3b, where parameter t in the outgoing edges of state C=no, A=pos is replaced by t'. The relaxation step yields an over-approximation of the region verification problem.
174
+
175
+ <span id="page-3-4"></span><span id="page-3-2"></span>**Example 6.** Consider the pMC in Fig. 3(a) and region $t \in [0.0075, 0.0125]$ . Fig. 3b shows the pMC after relaxation, e.g., parameter t in the outgoing transitions of state C=no, A=pos is replaced by t'. Fig. 3c shows the MDP obtained by substituting the parameters with their extremal values. Consider e.g., state C=no, S=no. Its outgoing dotted (blue) edges have probability 0.0075 and 1-0.0075 obtained from lb(t). The probabilities 0.0125 and 1-0.0125 of the dashed (purple) edges stem from ub(t).
176
+
177
+ After *substitution*, region verification reduces to a simple model-checking query that over-approximates the original higher-order polynomial, e.g., Example 3. The region accepts $\varphi$ for the pMC if the resulting MDP satisfies $\varphi$ for all schedulers. If all the schedulers satisfy $\neg \varphi$ , the region is rejecting. If the region is neither accepting nor rejecting (i.e., inconclusive), it is partitioned into smaller subregions that are more likely to be either accepting or rejecting. Partitioning ends when at least $\eta\%$ of the region is conclusive.
178
+
179
+ **4.2. Parametric Markov Chain for pBNs.** To enable the use of PLA to parameter tuning of pBNs, we map pBNs onto pMCs as proposed in [Salmani and Katoen, 2021a; Salmani and Katoen, 2021b]; see the next example.
180
+
181
+ **Example 7.** Consider the acyclic pMC in Fig. 1b for the pBN in Fig. 1a for the topological ordering $\varrho: C < S < A < P$ . Initially, all variables are don't care. The initial state can evolve into C=yes and C=no. Its transition probabilities 0.05 and 0.95 come from the CPT entries of C. Generally, at "level" i of the pMC, the outgoing transitions are determined by the CPT of the i+1-st variable in $\varrho$ .
182
+
183
+ pBN constraints relate to reachability constraints in its pMC.
184
+
185
+ **Example 8.** Consider the pBN $\mathcal{B}_1$ from Fig. 1a and the query $\Pr_{\mathcal{B}}(C = no \mid A = pos \land P = pos)$ from Example 2. Let $\mathcal{M}_{\mathcal{B}}^{\varrho}$ be the pMC in Fig. 1b. The query coincides with
186
+
187
+ $$\begin{split} &\frac{1-\operatorname{Pr}_{\mathcal{M}_{\mathcal{B}}^{\varrho}}(\lozenge C = \textit{yes} \lor \textit{A} = \textit{neg} \lor \textit{P} = \textit{neg})}{1-\operatorname{Pr}_{\mathcal{M}_{\mathcal{B}}^{\varrho}}(\lozenge \textit{A} = \textit{neg} \lor \textit{P} = \textit{neg})} \\ &= \frac{361}{34900 \cdot \textit{p} \cdot \textit{q} + 8758 \cdot \textit{q} + 361} = \textit{f}_{\mathcal{B},\phi}; \textit{see Ex. 2}. \end{split}$$
188
+
189
+ Our approach to the parameter tuning problem for a pBN and constraint $\phi$ consists of two phases. The $\epsilon$ -close partitioning exploits parameter lifting on the pMC for reachability constraint $\varphi$ of $\phi$ . We start with a rectangular region enclosing the
190
+
191
+ <span id="page-4-2"></span>![](_page_4_Figure_0.jpeg)
192
+
193
+ Figure 4: The iterative procedure of region-based parameter tuning for n=2 and $\gamma=1/2$ .
194
+
195
+ original instantiation $u_0$ . This region is iteratively expanded if it rejects $\varphi$ . Inconclusive regions are partitioned. Any iteration that finds some accepting subregion ends the first phase. The second phase (Alg. 2) extracts a minimal-change instantiation from the set of accepting sub-regions.
196
+
197
+ # Method
198
+
199
+ ```
200
+ 1 \mathcal{M}_{\mathcal{B}} \leftarrow \text{computeMC}(\mathcal{B})
201
+ \varrho \leftarrow \text{reachSpec}(\phi)
202
+ \mathbf{3} \ \epsilon_0 \leftarrow \hat{d_0} \cdot \gamma^{\tilde{K}-1}
203
+ 5 function minChgTuning (\mathcal{M}_{\mathcal{B}}, u_0, \varphi, \eta):
204
+ while \epsilon < d_0 do
205
+ R_+ \leftarrow \epsilon-partitioning (\mathcal{M_B}, \varphi, \epsilon, \eta) if R_+ = \emptyset then
206
+ 8
207
+ 10
208
+ 11
209
+ return getMinDistInst(R_+, u_0)
210
+ 12
211
+ end if
212
+ end while
213
+ 13
214
+ return Infeasible
215
+ 14
216
+ 15 function \epsilon-partitioning (\mathcal{M}_{\mathcal{B}}, \varphi, \epsilon, \eta):
217
+ \begin{array}{l} R_{\epsilon} \leftarrow \texttt{makeRegion-d}\left(u_{0}, \epsilon\right) \\ R_{+}, R_{-}, R_{?} \leftarrow \texttt{PLA}(\mathcal{M}_{\mathcal{B}}, R_{\epsilon}, \varphi, \eta) \end{array}
218
+ 16
219
+ 17
220
+ return R+
221
+ 18
222
+ 19 function makeRegion-EC (u_0, \epsilon):
223
+ R_{\epsilon} \leftarrow \mathsf{X}_{x_{j} \in X} \left[ u_{0}(x_{j}) - \tfrac{\epsilon}{n}, u_{0}(x_{j}) + \tfrac{\epsilon}{n} \right]
224
+ 20
225
+ return R_\epsilon
226
+ 21
227
+ 22 function makeRegion-CD (u_0,\epsilon):
228
+ \alpha \leftarrow e^{\epsilon/2}
229
+ 23
230
+ R_{\epsilon} \leftarrow \mathsf{X}_{x_{j} \in X} \left[ u_{0}(x_{j}) \cdot \alpha^{-1}, u_{0}(x_{j}) \cdot \alpha \right]
231
+ 24
232
+ ```
233
+
234
+ <span id="page-4-0"></span>**5.1.** Obtaining Epsilon-Close Subregions. Alg. the main thread of our approach. Its inputs are the pBN $\mathcal{B} = (V, W, X, \Theta)$ , the initial instantiation $u_0 : X \to \mathbb{R}$ , and the constraint $\phi$ . It starts with obtaining the pMC $\mathcal{M}_{\mathcal{B}}$ of the pBN and reachability constraint $\varphi$ of $\phi$ (lines 1–2). The output is either (i) $u_0$ if $\mathcal{M}[u_0] \models \varphi$ , or (ii) the instantiation u with a minimal distance from $u_0$ or (iii) no instantiation if $\varphi$ is infeasible<sup>2</sup>. The hyper-parameters of the algorithm are the coverage factor $0 < \eta < 1$ , the region expansion factor $0 < \gamma < 1$ , and the maximum number of iterations $K \in \mathbb{N}$ . The hyper-parameters $\gamma$ and K steer the iterative procedure. They initially determine the distance bound $\epsilon$ , see line 3. The bound $\epsilon$ determines region $R_{\epsilon}$ , i.e., how far the region bounds deviate from $u_0$ . PLA then verifies $R_{\epsilon}$ . If $R_{\epsilon}$ is rejecting, $\epsilon$ is extended by the factor $\gamma^{-1}$ (l. 9). Figure 4 visualizes the procedure for $\gamma=1/2$ and n=2. At iteration i, (a) PLA is invoked on $R_{\epsilon}$ and either (b) $R_{+} \neq \emptyset$ or (c) $R_{+} = \emptyset$ . For case (b), the iterative procedure ends and Alg. 2 is called to obtain an instantiation in $R_{+}$ that is closest to $u_{0}$ . For case (c), the region is expanded by factor $\gamma^{-1}$ and passed to PLA, see (d). Note that the loop terminates when the distance bound $\epsilon$ reaches its upper bound $\hat{d}_{0}$ (l. 2). We refer to Sec. 3 Corollary 1 and 3 for computing $\hat{d}_{0}$ .
235
+
236
+ Region expansion schemes. Region $R_{\epsilon}$ is determined by factor $\epsilon$ , see line 16. The methods makeRegion-EC and makeRegion-CD detail this for each of our distance measures. For the EC distance (line 20), the parameters have an absolute deviation from $u_0$ . We refer to Corollary 2, Sec. 3. For CD distance (lines 23 and 24), the deviation is relative to the initial value of the parameter. We refer to Corollary 4, Sec. 3. Such deviation schemes ensure $\epsilon$ -closeness of $R_{\epsilon}$ both for EC and CD distance, that is, all the instantiations in $R_{\epsilon}$ have at most distance $\epsilon$ from $u_0$ .
237
+
238
+ Remark. For pBNs with a single parameter, already checked regions can be excluded in the next iterations. Applying such a scheme to multiple parameters yet may not yield minimal-change instantiation. To see this, let $u_0(x) = 0.15$ and $u_0(y) = 0.15$ and take $\epsilon_0 = 0.05$ . Assume $R_{\epsilon_0} =$ $[0.1, 0.2] \times [0.1, 0.2]$ is rejecting. Limiting the search in the next iteration to $R_{\epsilon_1} = [0.2, 0.4] \times [0.2, 0.4]$ may omit the accepting sub-regions that are at a closer distance from $u_0$ , e.g. if the region $[0.1, 0.2] \times [0.2, 0.4]$ includes some satisfying sub-regions. This is why we include the already-analyzed intervals starting from $u_0$ in the next iterations. However, as will be noted in our experimental results, this does not have a deteriorating effect on the performance: the unsuccessful intermediate iterations are normally very fast as they are often analyzed by a single model checking: the model checker in a single step determines that the entire region is rejecting and no partitioning is needed.
239
+
240
+ **5.2. Obtaining a Minimal-Distance Instantiation.** Once $R_+ \neq \emptyset \subseteq R_\epsilon$ is found, it remains to find the instantiation $u_+$ in $R_+$ that is closest to $u_0$ . Region $R_+$ includes infinitely many instantiations, yet minimal-distance instantiation is computable in $O(k \cdot n)$ for n = |X| and the regions $R_+ = \{R_{+,1}, \cdots, R_{+,k}\}$ : (i) PLA ensures that the subregions $R_{+,1}$ to $R_{+,k}$ are rectangular and mutually disjoint. (ii) Due to the triangle equality of the distance measure, a minimum-distance instantiation can be picked from the bounded region. Alg. 2 simplifies finding $u_+$ . In the first step, we pick $u_{+,i}$ from $R_{+,i}$ that has minimal distance from $u_0$ (1. 3–18). The idea is simple: for every $x_j \in X$ , three cases are possible, see Fig. 5. Let $lb_{x_j}$ be the lower bound for
241
+
242
+ <span id="page-4-1"></span> $<sup>^2 {\</sup>rm That}$ is, in the $\eta=100\%$ of the checked parameter space, no satisfying instantiation has been found.
243
+
244
+ ```
245
+ function getMinDistInst(R_+,u_0):
246
+ (R_{+} = \{R_{+,1}, \cdots, R_{+,k}\})
247
+ U_{+} \leftarrow \emptyset
248
+ 3
249
+ for i \leftarrow 1 to k do
250
+ 4
251
+ // R_{+,i} = \mathsf{X}_{x_j \in X}[lb_{x_j}, ub_{x_j}]
252
+ 5
253
+ \begin{array}{c} \text{for } x_j \in X \text{ do} \\ \mid \quad \text{if } lb_{x_j} < u_0(x_j) < ub_{x_j} \text{ then} \end{array}
254
+ | \quad u_{+,i}(x_j) = u_0(x_j)
255
+ \begin{array}{c|c} \text{if } u_0(x_j) < lb_{x_j} < ub_{x_j} \text{ then} \\ & u_{+,i}(x_j) = lb_{x_j} \end{array}
256
+ 10
257
+ 11
258
+ 12
259
+ u_{+,i}(x_j) = ub_{x_i}
260
+ 13
261
+ 14
262
+ U_{+} \leftarrow u_{+,i}
263
+ return argmin d(u_+, u_0)
264
+ 15
265
+ u_{+} \in U_{+}
266
+ ```
267
+
268
+ <span id="page-5-0"></span>parameter $x_j$ in the region $R_{+,i}$ and $ub_{x_j}$ be its upper bound. (1) If $u_0(x_j) \in [lb_{x_j}, ub_{x_j}]$ , we set $u_{+,i}(x_j) = u_0(x_j)$ . This yields the least distance, i.e., 0 in the dimension of $x_j$ , see Fig. 5 (left). (2) $u_0(x_j) < lb_{x_j}$ , see Fig. 5 (middle). Then $lb_{x_j}$ has the least distance from $u_0(x_j)$ in the dimension $x_j$ , i.e., $|lb_{x_j}-u_0(x_j)|<|a-u_0(x_j)|$ for every value $a\in [lb_x,ub_x]$ and similarly for CD-distance, $\ln\frac{lb_{x_j}}{u_0(x_j)}<\ln\frac{a}{u_0(x_j)}$ . In this case, we set $u_{+,i}(x_j)=lb_{x_j}$ to minimize the distance in dimension x. (3) By symmetry, for $u_0(x_j)>ub_{x_j}$ , we set $u_{+,i}(x_j)=ub_{x_j}$ ; see Fig. 5 (right). It remains to compute the distance for each candidate in $U_+=\{u_{+,1},\cdots,u_{+,k}\}$ and pick $u_+$ closest to $u_0$ (1. 19).
269
+
270
+ <span id="page-5-1"></span>![](_page_5_Figure_3.jpeg)
271
+
272
+ Figure 5: Obtaining the minimal-change value for parameter $x \in X$ .
273
+
274
+ **Example.** We apply the entire algorithm on the pBN in Fig. 1a, constraint $\phi = \Pr(\text{C=no} \mid \text{A=pos} \land \text{P=pos}) \leq 0.009, \ u_0 : p \to 0.72, q \to 0.95, \ \text{and} \ \eta = 0.99.$ The pMC $\mathcal{M}_{\mathcal{B},E}$ is given in Fig. 1c and the reachability constraint is $\varphi : \Pr(\lozenge s_{10}) \leq 0.009.$ We take $\hat{d}_0 = \sqrt{n}, \gamma = 1/2$ , and $\epsilon_0 = 1/16$ .
275
+
276
+ - 1. The initial region $R_{\epsilon}$ is obtained by $\epsilon = 1/8$ : $[0.72 1/8\sqrt{2}, 0.72 + 1/8\sqrt{2}] \times [0.95 1/8\sqrt{2}, 0.95 + 1/8\sqrt{2}].$
277
+ - 2. Running PLA on $R_{\epsilon}$ gives no sub-region accepting $\varphi$ .
278
+ - 3. Expanding the region by the factor $\gamma^{-1}=2$ yields: $[0.72^{-1/4}\sqrt{2}, 0.72^{+1/4}\sqrt{2}] \times [0.95^{-1/4}\sqrt{2}, 0.95^{+1/4}\sqrt{2}].$
279
+ - 4. Running PLA gives 12 regions accepting $\varphi$ , e.g., $R_{+,1}:[0.92075,0.960375]\times[0.97475,1]$ and $R_{+,2}:[0.960375,0.97028175]\times[0.9431875,0.9495].$
280
+ - 5. Running Alg. 2 on $R_{+,1}$ through $R_{+,12}$ gives minimal distance candidates $u_{+,1}$ through $u_{+,12}$ ; e.g., $u_{+,2}(p) = lb_p = 0.960375$ as $u_0(p) < lb_p$ and $u_{+,2}(q) = ub_q = 0.9495$ as $ub_q < u_0(q)$ ; see Fig. 5.
281
+ - 6. Running Alg. 2 using the distance measure of choice—here: EC-distance—for the candidates $U_+$ returns $u_+$ closest to $u_0$ : $u_+(p)=0.92075$ and $u_+(q)=0.97475$ ; distance 0.040913125.
282
+
283
+ Let us shortly discuss the hyper-parameters: $\eta$ , $\gamma$ , and K. The hyper-parameter $\eta$ is the coverage factor for the region partitioning. Our region partitioning algorithm, i.e., parameter lifting (line 17 Alg. 1) works based on this factor: the procedure stops when $R_?$ is at most $(1 - \eta) \cdot ||R||$ of the entire region R, e.g., for $\eta = 0.99$ , it continues until at least 99% of R is either rejecting or accepting and at most 1% is unknown. This relates $\eta$ to the approximation bounds of our algorithm, see Problem statement 3. Intuitively speaking, the coverage factor $\eta$ means that if the algorithm provides a minimal-distance value $D(u, u_0) = d$ , then with the probability $1-\eta$ there may exist a smaller value distance than d that works too but has not been found. One can thus consider $\eta$ as the confidence factor for the minimality of the results. The hyper-parameter $\gamma$ specifies the factor by which the region is expanded at each iteration, see Line 9, Alg. 1. The hyperparameters $\gamma$ and K specify the size of the initial region, see Lines 3 and 16 of Alg. 1. Our experiments (not detailed in the paper) with $\gamma = \{0.2, 0.5, 0.8\}$ and $K = \{2, \dots, 12\}$ reveal that $\gamma = 1/2$ and K = 6 gave the best balance. Large values for $\gamma(0.8)$ and small K lead to unnecessary computations in the initial iteration for the simple cases i.e., when small perturbations of the parameters make the constraint satisfied. Small values for $\gamma(0.2)$ lead to large regions in the next iterations due to the expansion by $\gamma^{-1}$ .
2305.10786/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2023-03-03T08:00:21.747Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36" etag="2hhc2oNmr2znMEPCuvbx" version="20.8.23" type="google"><diagram id="OwgkZXBl_3l1am2_Top0" name="第 4 页">7V3dk6I4EP9rrNp9wOJbfdz52nuYvdq62arbfbqKEDU7QDyIo3N//SUkKBBUdFCi4lbNSudD6F9302m6Q8+6D1dfYzCffcM+DHqm7q961kPPNIcjm/5lhHdOMEcWJ0xj5HOSsSG8oP+gIOqCukA+TAodCcYBQfMi0cNRBD1SoIE4xstitwkOir86B1Pxi/qG8OKBAErd/kY+mXGqk+v8B0TTGSnSQ5D1FOOTGfDxMvdD1mPPuo8xJvxbuLqHAWNcxhQ+0dOW1vVZxTAidQbMfz4GcOT5z7MR9r/+GIf/mKYmZnkDwUJcrThZ8p5dfowXkQ/ZJEbPulvOEIEvc+Cx1iXFm9JmJAxE8wQFwT0OcJyOtSYO+8foOCI5Ov9QekJi/ApzLW76ESOeQIgCJjU/UEglwNT/hEv69y8cgoh2EecOYwJXW5lirFlN5RPiEJL4nXbJBpgDPkTIpjUS8C03SJu2oM1yMA8EDQjhmq6n3kBAvwgUDkBEBgD6VBzFIY7JDE9xBILHDfVuA5FOjzZ9njGeC2B+Q0LehW6BBcFF2OAKkZ9seN8RR7/EZOz7wyp/8C4OSpDq6SeFFMTkC9M72hDhCGa0J8Q4wSeK/KyHF4AkQd6PGYp4g+hmHCIDjEW7JYByFC9iD+7gvLBK9FSnkOzo51ZLVAwDQNBb8TwaFw9LUtgfVIGinumCkOki//sdJ4ggXCa/wGnI2GLqj+EY+j6KpskH9f08WmoVtdQeVGipWaGl5qm01L4ULVVS09yamjZsU9OcNiE28gCv4d5uiM+M3QcwEUO/Y5QaIqHhtlHScGtUnILLihhVQnZ9GseD7Upm9QUGEw0QQhmUGtIGvSIfwOHEq/R+vCEcT9qzq+aoZbs6uBy7Si83N4gd/srmYwebYemR0vb4o3a2WqfpbbroW7slqTmxTg8lnX6CVExM/QnHSxD7l+j5WKNhyxo6unYNvbp1TRZH2etuWW26W9lZFkIR7r8LFh65QyQV+XSWMWRBHET5v26WRJIqHClKUPFWK+DL35cFCQRoGjH4KJshpd8x9UUeCL6IhhD5firUVfahKOhnMhGDYd90ClbC0V3JSlBL0ndkO2GYp8KzKrTkBgwWH70V4MpgZKhpAoAvtEcAJxzdDc7uVPyfTpTMKROrZhoD73WagqF5HF82XzwdfzIdygR6RXr+y+edv8JgrPwV1qAlqc1i0xvGfFU9kXP/6Zn+IakIOPefs5kpY/nkxR+kZH5lRfI4ruiY8jKj3rIWmFbJ2ZBVYGDL8m+d6j6ZKVa90Kp++CLCgUPfrlpEDM2xdcYQql1286pCqOf0UAw5RvZBzp+Hj65qfGw3yHWEo3eom7cHVNZFnKdhNOsBNuneOTXdO3uL/J3Jv3MuUyuHqmmlHKq6BD46ump8HFwkH8t3W9tom49ymOVK/RxXNc6PLlOCh4rx0ayKPKjPx7JFbZ+PByWTKMNHyaJabfOx8ZVjO/axdT42vg5U9c5Utqitc96+SAmWLGrrfLzQNVPZojpt87HpNZOPYuileQHWA12gs4tqy8q2ztsLXUeVbWbrfLyVdZRkZVvnvLyOQkTi/S09TLEdswhRRb6lUQHRyR6mWPISjT8NvmWQBqqBJK//2LP6W4ZopBpE8tKSZ1LcMEiOoRpI8rr11u9H5SqdoVMLomz91DxE8gK3ux+VQLL1eiCdTo/k1fOt34/KEJltQyQvzG/+fiSBZFck850VpKoV/u5MvjFescQ4FE15atwiSiBPUdNLSXNvIP6kab/nmod9qK0beUaeHlBh0LKLlLvnmsWAXLZfrmvEqjj45GkHQ/Qe49iHoruQlx6r3I2nSKQg6iKlj+KXlsuVqJgiPQlYtgcji2QWBhWdWBvHELzylvSrBhjf88M0LmabHmxcnk2csfz04hAEuTZ6eQjQ/6kYA7KIWYH0zn4emG/rsswx2E5znRnrIaEKoyVUIdbXnR+J4/kMRGJKk9O2JnDyFkR1KSIlFqYtJKZTTejsZSyozKf127kfSXlbOK31XPRKxq+ITsfm5BZCE1pT6HdUeig/Ux96OAYs9KeRGfJeI5iI00MRIijjTrlvDsmd/XKnk+9XzigNwDuVW1M3tmWCdurZqWennm2p5wwCn2mnfliithKBcWtQTtSSfUSnwv2wT+Z+VAXGj7ZvLZosjeB5SdJFwxgTgsOWTdrB1RcfrovQ+8b+SohdenMVz0Ok/Hm75echWem1ApXvvZoFfqNChV+u4G9rFS49+g5jimm6KryujPBMgPbvZDKqlszzZITb8kMdvS/XHFyHkpeLO9pWclt+WJOa46tkfjmHv3Xmy49h9L51ncyXEv9bZ36NtEC2+pgfwiKx9xwYZzPoDbDOcJ1yZa+VnX2Oe1sKe0/17MM+KLmvxk4KARjD4G695ivHdA+/K4PYE4eOXqEZ+3dfqYXdDuGSEW0Rrho5hHQaNE+2BcFzUIFkzndYnKAVg7d57JrbQLB5ELNp+oOiUpr9eo9RjJOZtBr5jR3GB2FsqQdyjUTLDuRDQDYd9UCukQWqhnNi6gMVnZODauJuzTnZss9Xe3A5cgigs2nHgaisc+LUKAvsMD4IY/WckyxhvAO5KZAVdE6ci4mcmPZIQefE6SIn+4RLKeeki5w0BaK6zkkXOWkaYwWdky5y0jDIKjonlxM5GRkqOidd5GSfcKnknLhd5KQpEJV1TtwuctI0xuo5J24XOWkYZAWdE/dyIiemtJv8et/sFu92lx452Z+Ndbw6uMpFTtwuctIUiOo6J13kpGmMFXROushJwyCr6JwMZUzPUO/xgdczXUWVhisCLvurNOxqiTpPlYZbIy6UgzXbXuGZ6fb6NbTWQ1biJm/IsMUKMP1NPz15z4a0kE5ob/ZKbnlTiMLLu7KXb7MDHySztejSljm7jHA1ZW8v7yOcDPqI+tZJH/j+cQLXhB+e1QhkEcKK3Z3PaiWyXZHObSXO/UK1D+iZGLrvNYjlSlluKaTXIO6fqFwicfT7FOnh5j30vDvThW/Yh6zH/w==</diagram></mxfile>
2305.10786/main_diagram/main_diagram.pdf ADDED
Binary file (62.5 kB). View file
 
2305.10786/paper_text/intro_method.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Pre-trained language models (PLM) such as BERT [@DBLP:conf/naacl/DevlinCLT19], RoBERTa [@DBLP:journals/corr/abs-1907-11692], and ELECTRA [@DBLP:conf/iclr/ClarkLLM20] have achieved great success in a wide variety of natural language processing tasks. However, @DBLP:conf/emnlp/ReimersG19 finds that sentence embeddings from the original BERT underperform traditional methods such as GloVe [@DBLP:conf/emnlp/PenningtonSM14]. Typically, an input sentence is first embedded by the BERT embedding layer, which consists of token embeddings, segment embeddings, and position embeddings. The output is then encoded by the Transformer encoder and the hidden states at the last layer are averaged to obtain the sentence embeddings. Prior studies identify the anisotropy problem as a critical factor that harms BERT-based sentence embeddings, as sentence embeddings from the original BERT yield a high similarity between any sentence pair due to the narrow cone of learned embeddings [@DBLP:conf/emnlp/LiZHWYL20].
4
+
5
+ Prior approaches for improving sentence embeddings from PLMs fall into three categories. The first category of approaches *does not require any learning (that is, learning-free)*. @DBLP:conf/emnlp/JiangJHZWZWHDZ22 argues that the anisotropy problem may be mainly due to the static token embedding bias, such as token frequency and case sensitivity. To address these biases, they propose the *static remove biases avg.* method which removes top-frequency tokens, subword tokens, uppercase tokens, and punctuations, and uses the average of the remaining token embeddings as sentence representation. However, this approach does not use the contextualized representations of BERT and may not be effective for short sentences as it may exclude informative words. The prompt-based method (*last manual prompt*) [@DBLP:conf/emnlp/JiangJHZWZWHDZ22] uses a template to generate sentence embeddings. An example template is *This sentence: "\[X\]" means \[MASK\] .*, where \[X\] denotes the original sentence and the last hidden states in the \[MASK\] position are taken as sentence embeddings. However, this method has several drawbacks. (1) It increases the input lengths, which raises the computation cost. (2) It relies on using the \[MASK\] token to obtain the sentence representation, hence unsuitable for PLMs not using \[MASK\] tokens (e.g., ELECTRA). (3) The performance heavily depends on the quality of manual prompts which relies on human expertise (alternatively, OptiPrompt [@DBLP:conf/naacl/ZhongFC21] requires additional unsupervised contrastive learning).
6
+
7
+ The second category of approaches *fixes the parameters of the PLM* and improves sentence embeddings through post-processing methods that *require extra learning*. BERT-flow [@DBLP:conf/emnlp/LiZHWYL20] addresses the anisotropy problem by introducing a flow-based generative model that transforms the BERT sentence embedding distribution into a smooth and isotropic Gaussian distribution. BERT-whitening [@DBLP:journals/corr/abs-2103-15316] uses a whitening operation to enhance the isotropy of sentence representations. Both BERT-flow and BERT-whitening require Natural Language Inference (NLI)/Semantic Textual Similarity (STS) datasets to train the flow network or estimate the mean values and covariance matrices as the whitening parameters.
8
+
9
+ The third category *updates parameters of the PLM by fine-tuning or continually pre-training the PLM using supervised or unsupervised learning*, which is computationally intensive. For example, Sentence-BERT (SBERT) [@DBLP:conf/emnlp/ReimersG19] fine-tunes BERT using a siamese/triplet network on NLI and STS datasets. SimCSE [@DBLP:conf/emnlp/GaoYC21] explores contrastive learning. Unsupervised SimCSE uses the same sentences with different dropouts as positives and other sentences as negatives, and supervised SimCSE explores NLI datasets and treats entailment pairs as positives and contradiction pairs as hard negatives.
10
+
11
+ In this work, we first analyze BERT sentence embeddings. (1) We use a parameter-free probing method to analyze BERT and Sentence-BERT (SBERT) [@DBLP:conf/emnlp/ReimersG19] and find that the compositionality of informative words is crucial for generating high-quality sentence embeddings as from SBERT. (2) Visualization of BERT attention weights reveals that certain self-attention heads in BERT are related to informative words, specifically self-attention from a word to itself (that is, the diagonal values of the attention matrix). Based on these findings, we propose a simple and efficient approach, **Di**agonal A**tt**ention Po**o**ling (**Ditto**), to improve sentence embeddings from PLM without requiring any learning (that is, *Ditto is a learning-free method*). We find that Ditto improves various PLMs and strong sentence embedding methods on STS benchmarks.
12
+
13
+ <figure id="fig:both_figures" data-latex-placement="!ht">
14
+ <figure id="fig:figure1">
15
+ <embed src="heatmap_bert.pdf" />
16
+ <figcaption>BERT</figcaption>
17
+ </figure>
18
+ <figure id="fig:figure2">
19
+ <embed src="heatmap_sbert.pdf" />
20
+ <figcaption>SBERT</figcaption>
21
+ </figure>
22
+ <figcaption>The heatmap shows the impact matrix for the sentence “For those who follow social media transitions on Capitol Hill, this will be a little different.”. The impact matrices are computed using BERT (bert-base-uncased) and SBERT (bert-base-nli-stsb-mean-tokens) on Hugging Face, respectively.</figcaption>
23
+ </figure>
24
+
25
+ **Observation 1: The compositionality of informative words is crucial for high-quality sentence embeddings.** Perturbed masking [@DBLP:conf/acl/WuCKL20] is a parameter-free probing technique for analyzing PLMs (e.g., BERT). Given a sentence $\mathbf{x} = [x_1, x_2, \ldots, x_N]$, perturbed masking applies a two-stage perturbation process on each pair of tokens $(x_i, x_j)$ to measure the impact that a token $x_j$ has on predicting the other token $x_i$. Details of perturbed masking can be found in Appendix [6.1](#subsec:pertuberd masking){reference-type="ref" reference="subsec:pertuberd masking"}. Prior works use perturbed masking to recover syntactic trees from BERT [@DBLP:conf/acl/WuCKL20]. Different from prior works, we use perturbed masking to analyze the original BERT and a strong sentence embedding model, supervised Sentence-BERT (SBERT) [@DBLP:conf/emnlp/ReimersG19]. Figure [3](#fig:both_figures){reference-type="ref" reference="fig:both_figures"} shows the heatmap representing the impact matrix $\mathcal{F}$ for an example sentence in the English PUD treebank [@DBLP:conf/conll/ZemanPSHNGLPPPT17]. The y-axis represents $x_i$ and the x-axis represents $x_j$. A higher value in $\mathcal{F}_{ij}$ indicates that a word $x_j$ has a greater impact on predicting another word $x_i$. Comparing the impact matrices of BERT and SBERT, we observe that the impact matrix of SBERT exhibits prominent vertical lines on informative tokens such as "social media", "Capitol Hill", and "different", which implies that informative tokens have a high impact on predicting other tokens, hence masking informative tokens could severely affect predictions of other tokens in the sentence. In contrast, BERT does not show this pattern. This observation implies that the compositionality of informative tokens could be a strong indicator of high-quality sentence embeddings of SBERT. Furthermore, we compute the correlations between the impact matrix and TF-IDF [@sparck1972statistical] which measures the importance of a word, and report results in Table [\[tab:perturbed\]](#tab:perturbed){reference-type="ref" reference="tab:perturbed"}. We find that the impact matrix of SBERT has a much higher correlation with TF-IDF than the impact matrix of BERT, which is consistent with the observation above. Notably, ELECTRA performs poorly on STS tasks and shows a weak correlation with TF-IDF. Consequently, we hypothesize that sentence embeddings of the original BERT and ELECTRA may be biased towards uninformative words, hence limiting their performance on STS tasks.
26
+
27
+ **Observation 2: Certain self-attention heads of BERT correspond to word importance.** Although SBERT has a higher correlation with TF-IDF than BERT as verified in Observation 1, BERT still shows a moderate correlation. Thus, we hypothesize that the semantic information of informative words is already encoded in BERT but has yet to be fully exploited. Prior research [@DBLP:conf/blackboxnlp/ClarkKLM19] analyzes the attention mechanisms of BERT by treating each attention head as a simple, no-training-required classifier that, given a word as input, outputs the other word that it most attends to. Certain attention heads are found to correspond well to linguistic notions of syntax and coreference. For instance, heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions are found to have remarkably high accuracy. We believe that the attention information in BERT needs to be further exploited. We denote a particular attention head by $<$layer$>$-$<$head number$>$ ($l$-$h$), where for a BERT-base-sized model, *layer* ranges from 1 to 12 and *head number* ranges from 1 to 12. We visualize the attention weights of each head in each layer of BERT and focus on informative words. We then discover that self-attention from a word to itself (that is, the diagonal value of the attention matrix, named **diagonal attention**) of certain heads may be related to the importance of the word. As shown in Figure [4](#fig:attention){reference-type="ref" reference="fig:attention"}, the informative words "social media transitions", "hill", and "little" have high diagonal values of the attention matrix of head 1-10. Section [4](#subsec:analysis){reference-type="ref" reference="subsec:analysis"} will demonstrate that diagonal attentions indeed have a strong correlation with TF-IDF weights.
28
+
29
+ <figure id="fig:attention" data-latex-placement="t">
30
+ <embed src="attention.pdf" style="width:90.0%" />
31
+ <figcaption>Illustration of attention weights for BERT head 1-10 (on the left) and head 11-11 (on the right). The darkness of a line represents the value of the attention weights. The top-5 diagonal values of the attention matrix are colored blue.</figcaption>
32
+ </figure>
33
+
34
+ <figure id="fig:method" data-latex-placement="t">
35
+ <embed src="method.pdf" style="width:90.0%" />
36
+ <figcaption>The diagram of our proposed diagonal attention pooling (Ditto) method.</figcaption>
37
+ </figure>
38
+
39
+ Inspired by the two observations in Section [2](#sec:observation){reference-type="ref" reference="sec:observation"}, we propose a novel learning-free method called **Di**agonal A**tt**ention Po**o**ling (**Ditto**) to improve sentence embeddings for PLMs, illustrated in Figure [5](#fig:method){reference-type="ref" reference="fig:method"}. Taking BERT as an example, the input to the Transformer encoder is denoted as $\mathbf{h}^{0} = [h^{0}_1, \ldots, h^{0}_N]$, and the hidden states at each Transformer encoder layer are denoted as $\mathbf{h}^l=[h^l_1, \ldots, h^l_N], l \in \{1,\ldots, L\}$. Typically, the hidden states of the last layer of the PLM are averaged to obtain the fixed-size sentence embeddings, as $\frac{1}{N}{\sum_{i=1}^N{h^{L}_i}}$ (denoted as *last avg.*). Alternatively, we can also average the static word embeddings $\frac{1}{N}{\sum_{i=1}^N{h^{0}_i}}$ (denoted as *static avg.*), or average the hidden states from the first and last layers $\frac{1}{2N}{\sum_{i=1}^N{(h^{0}_i + h^{L}_i)}}$ (denoted as *first-last*) to obtain the sentence embeddings. Ditto weights the hidden states with diagonal attention of a certain head. For example, to obtain the sentence embeddings from the first-last hidden states of BERT using Ditto, we first obtain the diagonal values $[\mathcal{A}_{11}, \ldots, \mathcal{A}_{NN}]$ of the attention matrix $\mathcal{A}$ for head $l$-$h$ of BERT, where $l$ and $h$ are treated as hyperparameters and are optimized on a development set based on the STS performance. Then, we compute $\frac{1}{2}{\sum_{i=1}^N{\mathcal{A}_{ii}(h^{0}_i + h^{L}_i)}}$ as the sentence embeddings[^2]. Note that the impact matrix (Section [2](#sec:observation){reference-type="ref" reference="sec:observation"}) correlates well with TF-IDF (as shown in Table [\[tab:perturbed\]](#tab:perturbed){reference-type="ref" reference="tab:perturbed"}) and hence may also improve sentence embeddings. However, the learning-free Ditto is much more efficient than computing the impact matrix, which is computationally expensive.
2306.04403/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-09-19T14:56:33.277Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.2.3 Chrome/102.0.5005.167 Electron/19.0.11 Safari/537.36" etag="X93poftWhFskQyDVkcy5" version="20.2.3" type="device"><diagram id="zopdTJojhskT9D_KdMwu" name="Seite-1">7V1tk9o4Ev41fBxKr7b8cWcmuc3d7tVWzdXe7n1JEfAM3BFMgSdh9tefDJbHlgSWwbJlxVSqAhpsoJ+nW92tVmuCH74e/rabbZe/Jot4PUFgcZjgxwlCOASI/5eNvJ1GIAbkNPKyWy3ysfeBp9VfcT4I8tHX1SLeV96YJsk6XW2rg/Nks4nnaWVsttsl36tve07W1U/dzl7yTwTvA0/z2TpW3vbv1SJd5qMBKL3953j1skzlv3ydFe8+DeyXs0XyvfRh+MMEP+ySJD09+3p4iNeZ+IRgTjf6eOavxTfbxZvU5ILDdv/yEi8P+/ifn8CnD/Onpx2+Ez/722z9mv/kCeIQkf1nNKEfJuE9nISPp6H8Z6RvQjhpfOCffL9Mv675AORPZ+vVy4Y/n/PvFO/4wLd4l664OH/K//B1tVhkl9/v4v3qr9mX460Af71NVpv0CBa9n9DH7F6vabI/ESK79T7dJf+LH5J1wu/7uEk22V2eV+u1PJRs0pxHJBKv8+8sfiz/UvHhrBhhAQ7ndZx8jdPdG3+LuACE9HRNzukoOr38/s4PFOWftCxRI8zHZjklX4pbv4PGn+S46TH8/SN++Ptqu/tH+tN/fln/tv22vf/5jnPuDIgT+rBdfeYg8ifpMk5nHNL74eKJAz2eCngaiM/iSRiowIkFTiU8MdLgGVjDEyJjPEcsy1jeQTytKidmKpqIdYsmPoPm7585jJvXEcCSccW4Hj7QJXyUadCToPq+XKXx03Y2z15/556QhNt+e3JNnleHeCEJdoLwM5vH83kGW5LO0lWSQXkXgQIW4XhgBSh+8RdGCc3eu5jtl9ndjx/ZBhRhUDWMfGAqpr4SHJBq4CgG28dDN9e1i8diFrPna/EI5iz+8twSBIUz6RQEkEaKyOMFd5vzl7mt2CWvm8WRkZnokl26TF6SzWz9S5JscyT+G6fpW25PMotUxSk+rNI/8suz539mz7l9P716PJT+9PgmXmz4LzxeNA0QFgPZlXdgCgAUI+9XH19VLv8t3q24pDITexzUGb0S4uD4OF68+CkLOd5FwEc+rjLhPgIdV4orq7zi0/8F+uyT1908vgAPEXHSbPcSp5dwJLmxzcC7yMddvOaK8K0aE7XOK/HFfzhTG2EX1TzU+S2y2iuUL4FRNQDnLWVV9OSCQb5eKcRcVkv1krx14hZjxhqRf8JvmQdWsuwsrDo6KHiHXNzm9KPyK8uBvXIzbpYqXhMIlZudrIFysyMzip9/A1kM3CRXyFJrGAPkGFlgKJEFX0+WkFUjJBRFnZMlGg5Zai2LcIacIQtjVXwpvcGywKplQQHqnCy6ZMhgyeKYZQlhi9NQEPY+DQXBcMjiCgcCKuk4BDdwAEaS9YGdGwyDMMIVDtQbjMAtsnhnMOhwyOIMB4Ck4+gGD8PE+ljmABlQ7OIKBwAlkureMGkAIC3adG8HoP00vzNpZaJklfPwsr9cEx6Qk++IClIka6B0C1P1o4o5D7pVPZNswHq92u4zzG9UwiLlrihhC4pFUVWxUMQUxQrFAnWlNGRKz8N/k2Jp63uGKVwgOQoI9y5ck0hjEMIlVHLrQf/MNZkShiFcOYlG1IqxroVrsrYzDOHKzBUrPT0Kl3gjXJF8LXwMdVWyY+HCAKrCdKT4YGCFAijHqL3lf72DGeAzNXkdOZjIIMfjZV0BVSq4eg/0kEmCfhC2MWDuOfuhN8IN3HP2TbKEwxCunMN3wNn3JwcQOufsY29yAApz+3f2sTc5gED8FIecfVF976CzX6o0xnzqL1Uav5cV+1tkLPZpGBQZ5/rgSJExNsh8+LLQE4ay/+9CXTE2SY+Maz3lCJpF8gLp9autTFRfXli9txyQY4OAfBhzJouqcyaKQp2Gde2TeBNkM9mbRsgF+XoTZzMkh4JO8NebUDtkcjTIXJCvN9G2wl8aOSBf4k3AzYT3XjgexAX5Il/kCwF10YEg3iwPQ4Bd9CCIN0vEEXPRgyDeRBgRcdGDIN5EGAp/3fAgvIkwIBA94NxyIQa0BdWRJBRkrMWafyYXhXRf8y9cr5EDDfb+KFXnN3DAZCORbQ74E0qEyMVQgvoTSoRKBboLoQT1JpSAgVKF7gSDvYklYKBUorsQTFBvggmVwU5EE6FJPtIRR6d+R3wupdrKA9e2zgdReJYbzbdNS6W9Oj/MtuvkT4waKPscXIhRqUmMOgwBM+aib6rtmz1QASsF0y74poFJhD0QAStF0y4wOBxQLZVxj8b6/nyOJUWY1AyDm7tbWq5JbgLqvD+f6H/ogdqGSjm+CxFPaBJSDkZtoanaIsfUVm7XAMkNuUxlHanzXGbgT6pNmW2diKMDf1JtTHTccSriMQh4vNyoC1l1zndgoy70JjbCQGqI2/9GXehNXIQiyVD3v1FXe3rSMIVLJAcFqGaha+F642MgIDGX9G8WvPEvFOb2v1EXerOMh0Ri2KGNuvWy9WVTJXLOVfNmvoPEOU/Nm+lOPlnPAUfNm9lO7gTUfz8Vb+Y6IjtpvXdT8aZYRWZt/y6aNwUFUXUec8BBi0ychPcVh/l6tt+v5o0WHQqJVh2y4pjY/LTZi31HHFkawFTSDeGYNF0WwFHNjSwvCUQm/os12F2HOZQXgK6FWT4qt3OYm5UB/GAwQ1IHj/EqHwU9A91s4fhHA5q2Zbch7dtwq27mdDpVsHb9kHiKxev8O8OzHGqSJpDBgVC7eKlhmtx6uD0HS+e8crNM9p/BhH6YhPd3cBI+nsYGhyMPu6o4gjZwhFUYIzW2w1CDYWgNQ13fmRHDi/G5VPbSP4a6GoIThvCE4QihtD6OayFEUZcQQuFkndfDEcMqhncETEWbe3dg1K0KVVRxNKdyTo46Zk6h6N4w2lPjnm/AOXuqywKdMPzXqIhntoQxXMQU7iijLs9TAXLEUcKRIlYLIugURJPuoS+75HVr/vMRmCebTTxPc4j4GLhsozCeVsmNNXUMmAS6VQoofH4LsjHp/GmhAbxMw7a6rOuzZpd5Yfs4JhRSGfvqLWyf92nUfnQE+bZUKOwZZKMeqOZbnGTsbjzm4EZwOkpnQ9lI30kgmqazub2vuZN1OrTba+taOlhR1Y7YEClT9pVs0NypYzKciykn9GG7+szdWP4kXcbpbLjebHGIywVvtsY/rHdyq/CVA0ymIWJgzaXV9gvOYPv9M0dy8zpieB7D90VqvVKWUdUFKhZRbb2qoGy2xcpgyWhHx4fOnAfHh2fmvB1jLu2ULV53ZsxbL0kYWdI6S7gD2DNLTMpmu2GJ9BcfWKIECgq+xpVsyp1o10wxqQIemdI7UzgxeqeKSbZwpMp1VNGYgmupgpRsc/dUadbvfaRKT1TBqoHqmiq0WebSJlUejg+fqCIa1p0D15QnRKGJvChlnSbNMpojTfqgCQT98wSNPLHGEygV/F9PFBj1TpTW92qNRLFAlLB/oriTl/WPKAYOqHHYI9XGd08Ud1KzI1EuBT2kb6K4k531jygIyyt711sU6QSi7oniTnJ2JMologQ9E6XlI2cu1/ss4ufZ6zpVuHAs/trzn5qqhDwO58V++i6ug+CMWstTLBkOriwoMMjR2i6MpgxNaVV3CIZTkc4sN3mDYMoiFdhQbSLeXkmGSYvvqubUtMmRq10UM/vx42OkWVNHUvFM+Z2mqlTg3aDb7DRgEeYoERTwD9OgQqYYMcJw1vQhCIloqdkdQgbBmccIkWgKAIn4P8JAlPWqrOgSB1Ddf3HEDDFKAwCjiBLIVMwImTJ7mBnESR5jFrApD2be1UreGDLFmuYKJopWvtQCagZBi8eosXAaoABhGlEKaCCfGkOmEQxKD3IVglydGS4/rIFpsJTvMZiiIfzbRbE3BO8cByzsSzQJCwe0o+dyk7raE3Fy+K1v5BIH6ghLLTe3se2wA882ct0Iu2kzrFtDPtAz7EZHg/S2tcuxlmRSwzgcSkbYPKNTcyPrqLuTI25U5+0YH5TlQwLBdYyIlDtFXRsCd7LBIyWORoL2zglBAQc40ajQ0jFOSNOGAqMpI6RZg+Ku6eBOUctIhyxs7JsP7tSuDJgPslpfzQeE+uaDOyUqIx9KrZB644M7UUajAgO3+BAAyQ1UcDQnRPU+8m5z63xwJ8QY+TB5b1zYGx/c2fI3YD5ApdLoakJAqSSte0a4s7NvZMTxVkHPjBBffGTETfXNda6geeZaTkp1z4iB7tXzlBEY9c8INDKihbgT4XYYASPUOyPcyVSOjMiWMnpnBGw2a7hYw+4WOaB05CiVm2wbmwvpRp2XQITnjraYHU8ogR43PGx+psUZ0PtrbBjqJv8TenBET0IPBc7B12ymHu1ybXoQsmkkB+3RNLxyI1EAo+q95HIG69ZZt8j4bp0Hft6JFTUnyuYvraKTbhXdZDHIm/pS63WjDNMpid4f0j4PdO1ZqTX3ZdI3ta79P1QxunXWBKEd1tTct3PWNFtY+rGLmgOl4Z+yz6uB9ai9l3Xsmy0hjQ5lfUmr6FEknAlwbaQfKm1mOzcNIoBV3Mm/+ODxNRhDxvJmtkCuOgWqKwltnVBx2O5fXuLlYR//8xP49GH+9LTDd7p0zXkFN8jtNj9wRlJNjYx701ZQd9h9g8MIau7Unq5qcT6X2FEPlqEf7v0LATX9LwxYJi5AQiuKeVhVW4y6VFuDRE/NNuDZfhvPs6/5vDpkyivv7V3MYvY8P6p3ynUoyaC8i1Q9xjqdD+Ys/vJcaPht0i9ORRJ9XDQHMkKdFSgGWxd/s1rtOrfovPiqoiYXUHLelMqNlzCQ7mFsSpFoMCxMqUjEdmRKTQqz1+vVdp+BfqMWFpOnooWt2DVpnzSKmKJZoe6g02Kwdc0yCTaHIVxIq+frItFpvz/hmkRzAxGulBUpZtr+hGu0+XsY0kWwGqRioLa56Vq6UBWmhVRlfFilf+SXZ8//zJ7zH3V69Xgo/enxbaJkNQr3UweVhbRn/Xxf7c6gFy3Ss+Hmwtmw2nnHYi3Chd9l1Ul+ZvN4fq2T/IVRQrP3Lmb75ZG0sCX9xVIjDAc8ZmjiMg/DOhIgTeyaAym7to7++KSYSjM76N0nNWrwMRDpQuecUqNeGQORrsxdB7xSnc+fpfb2x4qugVd8HFeRLKT7qjBGqo5gqJk95aLN1kAsVhrM+v8aN/tt6FQEbjQA1otoOP1/a8h5gQFnwcFVq040q0pZW0QVEmwTEgOXz19IaDjF1Twl1aOCVVSQTVSG09LXAiphMA2kI0WMUQlsojKclr0WUImUmQUbo0IsotJwv+y4zpOhhJR1HgUg46UeJYUEmHIz2wvnXZa/tVcC5TBDMIvaLKqHWG69Ya+qXs+QcyHWzIcQy1JFBY8Feqyq14fKJrtnhpGIIOKru7OwC/1Z2cVh1YV0YHms4R7t0VM51pHIBbQZQlf7KgjIvaT4WNfeij+Z6qJA0p1VFn8S1Ur5hM1FFv5ylyRpmehcHMtfk0WcveP/</diagram></mxfile>
2306.04403/main_diagram/main_diagram.pdf ADDED
Binary file (37.9 kB). View file