diff --git a/.gitattributes b/.gitattributes index 172b9c40a3504cf2a4fa24e4943372c02936d4a7..6329c8d6989da606b3153e530493257e0c22d1ae 100644 --- a/.gitattributes +++ b/.gitattributes @@ -314,3 +314,259 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2302.00695/paper.pdf filter=lfs diff=lfs merge=lfs -text 2306.01669/paper.pdf filter=lfs diff=lfs merge=lfs -text 2007.02863/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.05551/paper.pdf filter=lfs diff=lfs merge=lfs -text +2112.06170/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.04015/paper.pdf filter=lfs diff=lfs merge=lfs -text +2309.09582/paper.pdf filter=lfs diff=lfs merge=lfs -text +2506.11045/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.04239/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.14170/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.12498/paper.pdf filter=lfs diff=lfs merge=lfs -text +2107.08829/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.16613/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.12086/paper.pdf filter=lfs diff=lfs merge=lfs -text +2404.12886/paper.pdf filter=lfs diff=lfs merge=lfs -text +2402.07891/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.14128/paper.pdf filter=lfs diff=lfs merge=lfs -text +2110.08851/paper.pdf filter=lfs diff=lfs merge=lfs -text +2404.01588/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.17569/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.12254/paper.pdf filter=lfs diff=lfs merge=lfs -text +2404.01992/paper.pdf filter=lfs diff=lfs merge=lfs -text +2306.13596/paper.pdf filter=lfs diff=lfs merge=lfs -text +2110.14633/paper.pdf filter=lfs diff=lfs merge=lfs -text +2302.11002/paper.pdf filter=lfs diff=lfs merge=lfs -text +2208.01838/paper.pdf filter=lfs diff=lfs merge=lfs -text +2302.05527/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.15544/paper.pdf filter=lfs diff=lfs merge=lfs -text +2302.14442/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.11310/paper.pdf filter=lfs diff=lfs merge=lfs -text +2001.04753/paper.pdf filter=lfs diff=lfs merge=lfs -text +2110.11945/paper.pdf filter=lfs diff=lfs merge=lfs -text +2106.03921/paper.pdf filter=lfs diff=lfs merge=lfs -text +2408.04962/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.11488/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.13662/paper.pdf filter=lfs diff=lfs merge=lfs -text +2307.16368/paper.pdf filter=lfs diff=lfs merge=lfs -text +2209.01814/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.09271/paper.pdf filter=lfs diff=lfs merge=lfs -text +2208.09170/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.01397/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.13862/paper.pdf filter=lfs diff=lfs merge=lfs -text +2204.12516/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.12082/paper.pdf filter=lfs diff=lfs merge=lfs -text +2509.10833/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.13948/paper.pdf filter=lfs diff=lfs merge=lfs -text +2409.11256/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.00834/paper.pdf filter=lfs diff=lfs merge=lfs -text +2401.06838/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.01583/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.12389/paper.pdf filter=lfs diff=lfs merge=lfs -text +2307.16399/paper.pdf filter=lfs diff=lfs merge=lfs -text +2307.14392/paper.pdf filter=lfs diff=lfs merge=lfs -text +2502.14096/paper.pdf filter=lfs diff=lfs merge=lfs -text +2401.17992/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.10667/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.05238/paper.pdf filter=lfs diff=lfs merge=lfs -text +2112.08544/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.18423/paper.pdf filter=lfs diff=lfs merge=lfs -text +2109.13016/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.02527/paper.pdf filter=lfs diff=lfs merge=lfs -text +2311.17081/paper.pdf filter=lfs diff=lfs merge=lfs -text +2006.04166/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.14794/paper.pdf filter=lfs diff=lfs merge=lfs -text +2208.10024/paper.pdf filter=lfs diff=lfs merge=lfs -text +2105.14573/paper.pdf filter=lfs diff=lfs merge=lfs -text +2406.10180/paper.pdf filter=lfs diff=lfs merge=lfs -text +2501.13484/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.19946/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.03131/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.13236/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.15362/paper.pdf filter=lfs diff=lfs merge=lfs -text +2006.03465/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.01092/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.13740/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.03077/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.07262/paper.pdf filter=lfs diff=lfs merge=lfs -text +2309.14859/paper.pdf filter=lfs diff=lfs merge=lfs -text +2204.03688/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.05952/paper.pdf filter=lfs diff=lfs merge=lfs -text +2401.11824/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.16148/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.11028/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.21730/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.14337/paper.pdf filter=lfs diff=lfs merge=lfs -text +2505.20728/paper.pdf filter=lfs diff=lfs merge=lfs -text +2401.08295/paper.pdf filter=lfs diff=lfs merge=lfs -text +2510.12697/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.09858/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.01448/paper.pdf filter=lfs diff=lfs merge=lfs -text +2402.06894/paper.pdf filter=lfs diff=lfs merge=lfs -text +2104.08225/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.16182/paper.pdf filter=lfs diff=lfs merge=lfs -text +2108.08421/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.17859/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.01723/paper.pdf filter=lfs diff=lfs merge=lfs -text +2404.10150/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.08788/paper.pdf filter=lfs diff=lfs merge=lfs -text +2406.04866/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.16997/paper.pdf filter=lfs diff=lfs merge=lfs -text +2306.08984/paper.pdf filter=lfs diff=lfs merge=lfs -text +2005.08465/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.12654/paper.pdf filter=lfs diff=lfs merge=lfs -text +2502.07244/paper.pdf filter=lfs diff=lfs merge=lfs -text +2209.15486/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.01273/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.11654/paper.pdf filter=lfs diff=lfs merge=lfs -text +2006.04325/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.18414/paper.pdf filter=lfs diff=lfs merge=lfs -text +2109.00602/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.11991/paper.pdf filter=lfs diff=lfs merge=lfs -text +2306.05584/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.02751/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.06140/paper.pdf filter=lfs diff=lfs merge=lfs -text +2311.15303/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.17262/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.12735/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.00164/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.09704/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.12288/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.12759/paper.pdf filter=lfs diff=lfs merge=lfs -text +2209.10732/paper.pdf filter=lfs diff=lfs merge=lfs -text +2102.12301/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.13250/paper.pdf filter=lfs diff=lfs merge=lfs -text +2009.09099/paper.pdf filter=lfs diff=lfs merge=lfs -text +2311.14294/paper.pdf filter=lfs diff=lfs merge=lfs -text +2402.02429/paper.pdf filter=lfs diff=lfs merge=lfs -text +2003.11562/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.03241/paper.pdf filter=lfs diff=lfs merge=lfs -text +2502.09977/paper.pdf filter=lfs diff=lfs merge=lfs -text +2406.11820/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.00921/paper.pdf filter=lfs diff=lfs merge=lfs -text +2509.16060/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.09914/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.08969/paper.pdf filter=lfs diff=lfs merge=lfs -text +2010.00784/paper.pdf filter=lfs diff=lfs merge=lfs -text +2302.03985/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.08200/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.11272/paper.pdf filter=lfs diff=lfs merge=lfs -text +2410.06846/paper.pdf filter=lfs diff=lfs merge=lfs -text +2509.23898/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.12560/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.10664/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.11131/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.16877/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.00164/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.07691/paper.pdf filter=lfs diff=lfs merge=lfs -text +2204.01172/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.14962/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.00911/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.10482/paper.pdf filter=lfs diff=lfs merge=lfs -text +2404.11889/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.06442/paper.pdf filter=lfs diff=lfs merge=lfs -text +2409.17986/paper.pdf filter=lfs diff=lfs merge=lfs -text +2504.17261/paper.pdf filter=lfs diff=lfs merge=lfs -text +2412.14510/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.09963/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.12006/paper.pdf filter=lfs diff=lfs merge=lfs -text +2302.07672/paper.pdf filter=lfs diff=lfs merge=lfs -text +2011.06782/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.14679/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.16114/paper.pdf filter=lfs diff=lfs merge=lfs -text +2110.00280/paper.pdf filter=lfs diff=lfs merge=lfs -text +2206.05852/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.05011/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.15487/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.08221/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.07535/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.00200/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.08605/paper.pdf filter=lfs diff=lfs merge=lfs -text +2107.06325/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.03041/paper.pdf filter=lfs diff=lfs merge=lfs -text +2509.07908/paper.pdf filter=lfs diff=lfs merge=lfs -text +2102.07762/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.05301/paper.pdf filter=lfs diff=lfs merge=lfs -text +2511.03107/paper.pdf filter=lfs diff=lfs merge=lfs -text +2311.17922/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.06838/paper.pdf filter=lfs diff=lfs merge=lfs -text +2006.00080/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.12744/paper.pdf filter=lfs diff=lfs merge=lfs -text +2501.08659/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.08674/paper.pdf filter=lfs diff=lfs merge=lfs -text +2202.05420/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.00036/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.04538/paper.pdf filter=lfs diff=lfs merge=lfs -text +2206.06719/paper.pdf filter=lfs diff=lfs merge=lfs -text +2409.01445/paper.pdf filter=lfs diff=lfs merge=lfs -text +2402.01145/paper.pdf filter=lfs diff=lfs merge=lfs -text +2309.09531/paper.pdf filter=lfs diff=lfs merge=lfs -text +2111.12701/paper.pdf filter=lfs diff=lfs merge=lfs -text +2307.07812/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.13545/paper.pdf filter=lfs diff=lfs merge=lfs -text +2309.15848/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.12956/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.16001/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.08733/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.06668/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.07697/paper.pdf filter=lfs diff=lfs merge=lfs -text +2301.12217/paper.pdf filter=lfs diff=lfs merge=lfs -text +2103.14024/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.05939/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.13072/paper.pdf filter=lfs diff=lfs merge=lfs -text +2401.10695/paper.pdf filter=lfs diff=lfs merge=lfs -text +2211.01910/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.11748/paper.pdf filter=lfs diff=lfs merge=lfs -text +2307.14680/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.15420/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.11927/paper.pdf filter=lfs diff=lfs merge=lfs -text +2412.10912/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.07235/paper.pdf filter=lfs diff=lfs merge=lfs -text +2110.00966/paper.pdf filter=lfs diff=lfs merge=lfs -text +2212.07634/paper.pdf filter=lfs diff=lfs merge=lfs -text +2204.10356/paper.pdf filter=lfs diff=lfs merge=lfs -text +2410.14429/paper.pdf filter=lfs diff=lfs merge=lfs -text +2206.13464/paper.pdf filter=lfs diff=lfs merge=lfs -text +2406.19931/paper.pdf filter=lfs diff=lfs merge=lfs -text +2112.05787/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.11964/paper.pdf filter=lfs diff=lfs merge=lfs -text +2102.07631/paper.pdf filter=lfs diff=lfs merge=lfs -text +2505.10518/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.12524/paper.pdf filter=lfs diff=lfs merge=lfs -text +2210.02412/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.15827/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.09517/paper.pdf filter=lfs diff=lfs merge=lfs -text +2408.09384/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.07177/paper.pdf filter=lfs diff=lfs merge=lfs -text +2403.10658/paper.pdf filter=lfs diff=lfs merge=lfs -text +2106.02796/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.00320/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.17331/paper.pdf filter=lfs diff=lfs merge=lfs -text +2509.17430/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.16176/paper.pdf filter=lfs diff=lfs merge=lfs -text +2207.10040/paper.pdf filter=lfs diff=lfs merge=lfs -text +2405.10812/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.18484/paper.pdf filter=lfs diff=lfs merge=lfs -text +2106.06499/paper.pdf filter=lfs diff=lfs merge=lfs -text +2311.17921/paper.pdf filter=lfs diff=lfs merge=lfs -text +2407.12511/paper.pdf filter=lfs diff=lfs merge=lfs -text +2303.16268/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.08734/paper.pdf filter=lfs diff=lfs merge=lfs -text +2204.06260/paper.pdf filter=lfs diff=lfs merge=lfs -text +2301.05434/paper.pdf filter=lfs diff=lfs merge=lfs -text +2304.05634/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.03663/paper.pdf filter=lfs diff=lfs merge=lfs -text +2402.14966/paper.pdf filter=lfs diff=lfs merge=lfs -text +2104.05670/paper.pdf filter=lfs diff=lfs merge=lfs -text +2309.10765/paper.pdf filter=lfs diff=lfs merge=lfs -text +2203.03079/paper.pdf filter=lfs diff=lfs merge=lfs -text +2312.14572/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.16021/paper.pdf filter=lfs diff=lfs merge=lfs -text +2308.06548/paper.pdf filter=lfs diff=lfs merge=lfs -text +2305.05189/paper.pdf filter=lfs diff=lfs merge=lfs -text +2301.12232/paper.pdf filter=lfs diff=lfs merge=lfs -text +2301.09209/paper.pdf filter=lfs diff=lfs merge=lfs -text +2205.04012/paper.pdf filter=lfs diff=lfs merge=lfs -text +2310.08731/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.09958/paper.pdf filter=lfs diff=lfs merge=lfs -text +2206.09016/paper.pdf filter=lfs diff=lfs merge=lfs -text +2503.02463/paper.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2001.04753/paper.pdf b/2001.04753/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6ae10cb087aef80e0bd7845456dffcccc92e7996 --- /dev/null +++ b/2001.04753/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09b56ea00e4b90a21949509c3eaf2460a8d8c075b2a53129b47c7a4142427a86 +size 29077144 diff --git a/2003.11562/paper.pdf b/2003.11562/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4c5bb2e278610e92a4ea30d618909deb97e94b83 --- /dev/null +++ b/2003.11562/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bb5f61bceeb86193efe6ce85e3e84e1a6f4980ec6f803fabff91f75e043bcd3 +size 185899 diff --git a/2005.08465/paper.pdf b/2005.08465/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3c2fe3d8421ae32f1dfb40f11ae3f2c9c62ba4d3 --- /dev/null +++ b/2005.08465/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e75b7e897dceb061a6985ed156ab35f3d92c9bf9d94a6fbcca35003ef73b797 +size 1556106 diff --git a/2006.00080/paper.pdf b/2006.00080/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..26f31373572c1d9c372c10af3c3646f5e4780235 --- /dev/null +++ b/2006.00080/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f0df03e08db9feb5504f0c453f28b55f33227780d6266f5693b32d2689b4770 +size 4643823 diff --git a/2006.03465/paper.pdf b/2006.03465/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d142e3984451125cbe3d79ba99e157b77cbafbde --- /dev/null +++ b/2006.03465/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44f71efeb708f43637d5c43e8b695b4969bc35e3895aaf83020f8e29bb7f99c8 +size 1569514 diff --git a/2006.04166/paper.pdf b/2006.04166/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..60c7fb4fbc61a6090de2071995420bfdd3667eb9 --- /dev/null +++ b/2006.04166/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f7e0640fe7110ac95b184467dc6f2c25260a83c3f77366f096c8e583c1701c +size 569271 diff --git a/2006.04325/paper.pdf b/2006.04325/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9dd20b22a6c116dc3ebc44db7a43dff5713cd2ed --- /dev/null +++ b/2006.04325/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c7a9c2c80f690c6c79d8c50dbc268089e306aba5a674a78331d85a0c9b19ae3 +size 10644382 diff --git a/2006.08228/main_diagram/main_diagram.drawio b/2006.08228/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..a67b84fdfa3f2067f20abab3d80ff308342fac63 --- /dev/null +++ b/2006.08228/main_diagram/main_diagram.drawio @@ -0,0 +1 @@ +5LzXsuTIlS34NTSbeSANWjwGZAR0BDTeoGVA66+/8DiZlZVVRTbJIZt9e5K0OoADcLhvsfba2x3xJ5R97+IY9oXaJWnzJwRK9j+h3J8QhEaw67+g4fhqwBHoqyEfy+SrCf7RYJZn+q3x+21LmaTTV9u3prnrmrnsp5+ejru2TeP5pxvDcey2n5/Nuubnt/Zhnv6uwYzD5vetbpnMxVcrikE/2u9pmRff3vzLuN/h93u/9TAVYdJtvxoMyv8JZceum7+O3jubNkB038Xy1ZHwV67+Mq4xbee/54EClZRbI4cFxTEvbf9zxWLTn7+Pdg2b5duEv412Pr5LIG2TGxDkdRY34TSVsVWU7Z9QppjfzdUIX4fTPHb1LwK65sZcTwll8/367wf7XSrdMsbp3xghSX7dmCY/6enbHMW0e6fzeFw3bD+0g3+bVfErxXxvG9MmnMv1Z+2G34wk/6W7X95gdOU1ZAT6Zs8ITv6Fwr+e+mbRKAT93MvXnL49+GuN/KYvjIb+dkdzOObp/LuOroNfzfxH00fh/4jysf+w8v+n6BSDLp1CP/4h/xr9Ihj8U0f036feS97h8avbenDD9E8PHyZ/woXr4OsF/1pTwv9rU4qXcU2Tbzbxe7v6WzaFgif2cva+P30d+7865oAooO8nx7eTrz7YrunGzwhQigT/u64k4VT8aijX3EHPf4b+ApG/tID+ob+gCP294cdLPmfHr8+MdCwvGabjt8a/avJfOv8bkiTo/1G+geLIX1AKx1GUhnAaRcifbYum/oKjP64S+D+JhMjPnoJQ9L/FVVDiZ8T9LqW/Oi7oN+Mi6X+/LyH/GCy3XZv+V77zPx6BUexvQhj2TyPwP9bvvzngIn8HSv4v0ywG/44vITTxT6LE1ReO0z/+/ZaHEf+92iT+QJtEc8mPybrPuH+olRiW7vuFP0+fJOd23QCT/f7j4nWUf/5eKIUzl6ivcV3cA+dAw7eer5F+df7t1t+az5zu8882EzZl3oIwe1kIiE/Mmo5zeSU4t28X3mWSgMeZMb1GFkafrkAQ+4amV784A0Zx9bXM3dfo/zaxA69I979pft8R+bccGEa/zn9lnsQfmCcC/XVL/EnR/7BWyX+rVqEvrcL/u7UKk+jPQIuQf6Gp/6xiUfgPFPuvpai/I5wCfAOVgl9xTOgv8JV2/UwxcfJfSjH/65Qa+R8VItDv4/mFVJL/fHyA0V/Fh5/DDgX9BSd+dRX7u6LFP04xiZ9fi/5tiolS/yAlhX9zP/HfkN5h9F8FxakP2z8ExfjLDQAgjnkU/j/XMKHvIe0Pjv5fcAisGvqgaRa+y+b4evzdtd31msumf7rlB+BCf4i3CP7bqiR+TRm0fopuv5x9FwH+EcLVwoFjMDAcTBO/RPtf3Qv/cu93h/ynukF+dPMl6V+u/LjwJehfLlzdlP30aWC2opxT80tQ1/Xtmvmn/YNZoAX+nIZTDwqloCEr9/Tz/p+gC1z5cE9wISub5kczeOflYSwrfGbD/GrAX8Hh7xHJL5d+kcYPzMGBof8QCEH+6Ob4VQ/kD7F+Q6kf13516Rta/eG1y6F/ac9/9f7f6upz+ovCft34sxl9u+939vZLhP/ylP8lER6h8N/xehj5XYRH/jsjPPFH1O236dU3f/nJW775ys/i//KS7z7y2+D+LTf74R0/F5j+FcwYI34rYfh7xvMrCcN/IGH43ybhP4oD/9dKGIN+rmYhFPmfFe93IvSfE28Gh1+E9V9hwL8ptsH07/Hhv1e8f1R++Zendv/LE/bfpnboH6D+f2teR/7HUf+rtPiHC2H/ikiLkn+rhon8QV5N/l785P936a+21j2jN/3Spe4x5MYL2Ys//z2I9bcqmj+txPz1BZv/+wqfOPob9EN+k2/+vVkt/lsY/W1H/7oy5x9qGP03gSaXtiBvgdp03rqx/t+Klzj+myWz7+Ht11EQ/TcB5h8q9F+zvPRHVa5/AP/+p3op/k/Wnn7npb/t6N/spf8uanOFyvH/F276W/1hxH/YTf+e3TlTEfbgsHx/drH9IlIljNLGuEQ0lx0QbdTNc/e+bmjABSaM63zsljb5NfP//PsDtcxd/0fE6PPK2/dW6HvLdZyEc3iZ1NcpIvRtfhHj0mH01wbJYt7drn+aaRe8nV9Hn3OGZm/+9ZfjUnsSroMjqRv+6bwwb0GSKrKpBfamqzNLucnW3vpykqvdzX7c1DhvebG+Kfomvz5t5O1MO6Tz75Y1XDYBzaHnvi1thN3r+eRPCHNSZwfDZkKsznW29NnVrj+8Luuug9zbbcIA4/76P7JuwfVHAlVaZux9QmmQshfZZJv6V6bQgqnaWO6Wo6jKOZflxnVbTlTGPdv8Lj6vMzHbQEeqcVqnZgZRbV+NBnbJnGFX7HVd8vCa0BvuvuIyo+lsT8ZX42VAQhXhWDYGz+ulRNm7crypjZmNMG9Sn5f2Im/kyJJq2Eq31/2CREf4LXvgT+9OBqZxeagq6N4wSjG7HkcelPp1V0RxSXYYzrtBimvKTLp17a0+m1XiTai4L3XVLV4o176EskcjUCvkPHarZEATuhgB7lN5xCaBhT8vzQqvsrymKWyr3+HcAH2JrUavkVfQ4OCOktNOejOeY3838vW6NqAYOt/lKiYPDHMvOQhnnEdA1maG9teAVt4YlnspNV2/38pEuZoy21ReXsXIcuNXFpNktFJztFRfAMT05Dnitq0E8StHRzgLYqOCL1LMqK8hkmJu3A+CrYF46GK7mnkUaUMDrvtGysbLJrXm4fMTauDtZ+xM1u7K0FZY6YTy5EstezYwtFxC2KxSCmXfV8a2g3ij8t4a5oMJ6bRO6TqvllvEIiY0hZwc5+vmVr5rit10l2ElKCNsihXG6RA+QxswS+FvzrK5ZhlSIXtZJzOs1SpVuyVTC30DfsCY6HnBg0D2/HAg6A3UZRggSW3LM2w1ttLWdK0ehFsRFdJZ7+edytRDuu4AgvnS0cvYqgoO7LY2bh7u7Bm3EuOaVeJlVwKuN6sy4qG2D54cd86CkbPNbTiwlzuUKGmAFo6AUF19BQ0B9KgQzWIB/2LCl73UaO5kDXrqql93YkJKgnwzkds0KcdDqYBH8cKla+GR3trDrXHPDx+lD3sbCqxtlbYJTzJl3hU9hQAUM1u2KdhNy2DyJHzx9qUq4NKpi55+KqK+6sgPwdVV9AlkadatagFzH2CflEPqbNmxaTJvWTGZQhfXz86GsRm+bzzMo4QRbF0S7Bewp9t0O+dAfvtxIxlkvObB4U1ePMgECXtpj4nXnQamUL6kp8CubJJnIWejVsluCfp+wT0Tk2DITaDqeZIjKKbCcK8vmOR8xielWYkq/HWLJERIZcbjyMmJnHrIjVptqucduU8obdUjIO6I4LVyb2w2U4rbeZahSMWCVABto2d4hTWBRemywzSGc9M7BcKOOqqwhLebfD1/jN4LB4JwEpLqfy3dKZbxs8c4b1rx2Rq4kYlje7GxJ2xsXkoEz1I5/LrB21SwLwkaturH9IR490BxiwZoublM73oFEB1qpYWI6hKmEq4p1U59qc3s+OyRVZnojdk9lLp4IIHsKtzF2azVTQPY2/vX9mZVhSHG3JbMJKRqk0I8hn1v1C2kicejxxaxkT6Wx75Xj2Anx4Gn0WKlhge2xN4L7fqzGqeEmHB7OztJOB+9MLLPlQRQHLgDxcDus5U7bK2dEbZSL2aVcPKOK8thqE05+NYtB7G5hnP1dO/wV1UCG3PgaMPyJuKPbbvQmWmjzLfjtiznLWXRc26eODJgEm9orluVGXCypEjcVDToPWhRkhEa+ExJAgbd4hkOYkqxmem0NRsINkuj7neuOlzg1UgNj1MkBx7AMIORv0Eq/pIDshefm0QILD6bQk2UaWyUauNwc4K9A/QJO+ilkhf5lsrWMbN3ekUNCdOdVh7x+BKtHmloA1vmqD6ewNSbFkH3Zx0RzVPIiLGY7+PjYTf4+JRAVNwqWc2QWC2oJuKq6Wim5R1QZspOGdqVobKHvfqGUEoelwzWTk808xsIa4JNkYTlvEgeYEMwPtPS0KuXiPvqEyBwTnPvy0EEvhXjVbJ6Azjs5ga+rsR4RFQuCzNpS7xGHBKokd6x1+AY7v2tO71/4PnLpGTvLUXWcOtdMIuDZGp8RJ1YAshyLwrBLC9HzbaiOPANeT4o2RQfspRugl1ZBjsUI/vGy6nbyJSzhMZgZ2d/obtLUNgEYHSY4+32NGBn6UEsvoK2H5xUY9TxSDL4VGDRvhhZ28JHsN5HdPxA6hUMGfRkrYj2CDMNn3goBMomKZ3cIpOmDQf+JLxpZ0xgalTyDFnjej/WHvd+8WiDDyLyGEwQSxkmt1G2HXE0QG9gSjI5Nbwlt4VjeNlyPe+1MqVD+ZO0W+6QydHg0CtVedgFuQnREiW+gQ2hm6TnOc8hmMLLHNc7chBFpoWGiXgXGpnV0/LqN0dZl0mUPS9VleIyTMZUxDimS8c2EeENCyeI5TCgXmFXtwyrnLFoovJiYwQjUARAPwb3FKc2BO+KswKWqHT8LDiJkqn80W64zRT60fsAldwjgdKIrdCxQzhRaQVoPATSVV9xAFGNXlMryfT1hI3oFLLmK+EglWatFouSobAGJ4OqOXvGYYjx3nZhSTt/JLrgLIdjHdHCkCLtIB472MAUGaYk1A2RZvchnXeM8M8blA2NeIZ5XnrGjYhRgjaZ9yVMwTV5H54bvFBAQHlYYXsLzPuyR1ollagq3akihkW4EpPsF2sN/b7VdPqSIRteBsOXIAS3OHZzwCcm9KEeQZbnnoo0SpYuNduAiOAIlHiD+XZ822HtK8ctuZcU/A6WjfcE2YcELRK2nVEEnpIHsXqE94dkFQh0E8O3gZbpENr300UIbQPy3PidxVbu3kV3K4UlXQVYbDZmmtG5v40tzedV7r/slS/v2l4666C0RXleHnBrdiFGsV4knrK34va6bXzARmmOrg9R0ORFYzKPRmP2nvYxW7rkQ7m0HefyoPcBB9BNtLFkAc5umcxLdNVUSuTe10gXmpw4caN1yee4fz8rHa17qAKw52uaY5XZa58kVGAe+ysXQ4+PEsbh+EyqDbXgSgEZByj6xa+BL+S0fSInMPrRlR61GAUH3p/7bd0x3oDl2jJ6OH+ngkuzbUrVViGqGh2mbx3JVxZHSj2hVPbAX+PLvN+kybFHMJbCpbbwg45l8AIIS6U3Cq7hooT9mx/DqCS6WqSlU7YUmHnnHc9jUHM63DuJE71oBplO1EoAo/VzCbxYuo8F7qFv6tCslHm4tndQaz5RCYhH8aN/r0rBAPqVeqRzUSWPpKoHi8taQRXapWdAZhS7Le9Z7PH162KBnEm8QKZQUWyrrm6NiBpkqBmRuETprlM7vyb46VaGPw2ZlT5ArMDvdaQR3HYXxw1meCOu4U1GMZ6ndz5YRgKl0rF+eE1GoofuX07YSKKPPGBZIFzHpwSczBuuXw/jZVci9eIHoLXDAFp2jmh3sGoVpiB0emN/jImXzK5n3YYmwqJUYRZYnbKQYbjIk+1SHLrlZhuzycMnRmTmNKwNtuYx6QHu5EtFiBzqqZyq+XLfzXFnEgKNCmRpnGxcLxJsc4BFXQzjDe63Sm9MYSrEzNXg5RCeYLGbpYBJpSCJxS64bUYpglGCJF9wIji8uUJQqYSC7gfKjwRsvhNY06AWp73HK5NJZIXiDZWGTFV03ma+jeGAsxb0U0Atfe8Gp0wfb9u8chZm3ryaQmbYaIaNel50jXvaTzhEE0YRr6DI2OqVcO5KskDagrj8pRQo7rqJPySv7JTXWxP12bsyt8eWJl/xnJmFDnP2Ik1Ks7k0EDoRJWxRBe3V8xjp5HiFchd6ptxC6B7GO47jrGtJ56vtq5p4S8L6ls2bvjoNvIVF3Qr8c4WdN8CFMPbDrUymZuoQRy4BeRh5v/efIUelV+564sBa4G0Ww715JDxca/JThqAFxZQJaSWJeMEWQUIl5Ny2iycpl01AWHXCHDetb8bkZsiq5UCdFKxg74HrOUyom7EdMJk4L9scFqIv+87lsoJIssM1epHTu6DGa9EFDNlhHUqCQKCTpwLxFSmGm9fbb/34igQJa52eOyW4QI9SOrWGb7/k3qPk/UK0KXLWMnaPw9Eey2kX7ihQeJWU8L1Xg524wUZ9MLqMamMVGS2hvbVITN3Xm733YQncjzxA5gVHtgrdnafQkt6hpfFtKLkoj9+H48zvbuwqn0hqVPJuMQJrCYm/dqohjpgugE9k23hnl2QPIeL1WIZEaqRHhpqnRCo2j9n+BXt4P81ea9Lc446PObkEbPpcHm443rxxxLmVP/FsB6TZI+b6nX2NxJlZdReHbTjul+my+GG/gypZHU70njXVoI2M6/p5TYKZGgOLX1/8UH5tfavqc09iYR0tT6+zoHBfFjfe9PD0G78M3mmTASSxHjw2xITp1RWCZ7rqRgUWK+dAXujVPCo9BAM6zPd6VD4Yj5iNLvrKvYCdjl5Zu+NG6qf69i6AZ9bIl556HCBTK2f9fmVRGdbf2TTeTsiqDs+QJF9SnV4ZWc9oD+oFDRT9iD+5J4NBbqLQUJ6NOPsy6goktMayIYq6dVoZ+tcs45fBJTIVlSm62mm3MS8KLkYbdnlMcJ+jKifLUFLBZiRvydXXW0/gOHoEEqwsHHqpCpQRWt58O9ecyjqW8JNrhwSZ3uqSa6M6qO2JQwtPi2ujWavlSdCKmmBsRmUOn8wKwWWCEtiLrgrNsRVZ31Mtnc2AHbJXVgrueT8Ug1hkBofjxa5F5NZqg8JR6P0URVnUV6yv76+o5ZD4ElKAi71gET3K2rFBEOlu3Ki66R5v3ztxaq39EcW1qHBUy/BQE6SVKoqkSiUgD9djQchd0VLhLRJK0wRJdVGIssbYQsMLD0MzmaSWK7lt9KjdITrs3hO7ziaBZ9egLnHQbR3WjjG0sfQCU2p7kkcj9XaFxyhJMKngl168KLwyz/oLLhtKp48ZoL8ZYPDDiwxek+f39iyMYjrTV/22iisBZcIbKCCFGLLjEQQf7c0SBV1oI4PcTWi2VGElTRPAjw9sFbLTp96n5Ja1F1k/pNnwhzcTlL20HCUgYmx7zAZ0cDNSYRYPKICWPNf92EKVaquIe14te3isYlztBvkW2EwmNjsiKJzW3M1N4CLhTplaavVNsRdrcE7Sn53q4twgkT4o9Y4iBjp0aOp8G0dko60XCigHQbLqGCKwGzbCluwBVf4I99Ix4io/7yy98sN09cxNxv5KhU/K69sSRb9vFzgB1s68xRHT7kPuTIQl50e0SdwVtTgLr+O6UYElajiYE0PTsHQjGnIFBmcu67nhBNwnBosKg0oLkSKASlTVmjN8BndF1k5QQb4rHrCKJo4XvXmOPNmC8FzqqVYPvNkWcjrNE6BJ6mnkwRlk/qwO8TUMgR2vNPb1eiWakzFjlJVHFrav10439pIcwlmghjNVEjqATFQOBV1xYdJEXJgTHjAxRsfYyPdLBmAQj2TNJAyBCK7UYHgd2kzLc+cyQORNEjSfqBUApVOz4ZwwIKdBc1M876i4me1ot/qQA6sKEbJNgxZjqUlK+YNYsdAnT5Y25sbaKQWwMQ2nLyrPPKw7id4oTTxLPpHKjIse9rLbe3dq68mDCOMhhkYoc30O3gIxwTzgVokTLX7UxasiFBd9b66hLGz4imRS74quUUqZdseyzGYK0j4m9bZACdiC3HtvsyA2R6+aEl1IjYqYcnvdak7UH6ZLlo/tyvIPi55o3VTFcU6tGm5B0jBhILw4JUERQWI8SmPrJyxa9UmswIx3vKHHW5LkG3U/x5VGG27w3gRHjSwJSWWvbcHzKYHqWiyhkF+A6rD6UOFMqISjW2T8crxB8qMjTabDoKwlc1AsFNW38yhWLhDVZqaRCwzgqH81b3jcb4pOKqC+nS37fLkokhasLiwKJQwTJ9EVaj02K8vuPUp6kZeLDRI9QRGPxO4vpn580n9Qb8exsovJWjre8Jt/GxdUCnIpaevFwij3YRq7qWkE5Ht0xlX8Io+nF9onvSAPKJrQKepD4S2yqzhj5054b8gzhCtd5rxTkLgNlrR7binpmRimU4oLbI1REmimRNyUVCeCNDkUudDKMbcYnLSTXTDVexOhtgRNeBRfYzreGGDYpMPZza7WUT0N95eXezotQ21lVtF0Wx4RBldH3/YTbmbjzJAaTh4uNKpbVbonfAdVf9V+DJ65bqyxZmRVPF7kjjuEzSaXwCY0gQXW01cppRMMaGiRsJuQIva4rFaztVdOowv1Pr3vjBqtLtN0zag/V9wHQcJ92SGEntC946l5tYF/UpaMJFsLcLGGn04LJ/ndIxikbV9K2eiZeCnKOOvynFsKsODAI6qn7vHAL9DCIbMuFEAVZd+QVK7cJh2vtFkwCft7wjEwpS1Y8co3ka+EJg44Q1PjNlwXOraQbl5ErUeBzOTt3L0OZ+2QgXE8ZRi8ivfHy3gnigYqVN2Dmpoc8VDYqUDFFV4c0ad3zQJiwGgshstnNb1HinafF0f0ux5vigIXpBTBCmtu6GHc/Ze839Kb0L9miFKEgy1ootiBaJ62wU5Kngv34SCC4fUS2bez5s8UwNBxJ94KeyvD6Z08KTtIF9zf7FXoLNFLlsIclSXPsEiD4wJAMailBmsbwS7ecLtbLACs4ILR38tUYIe3GY4PwWsrlNVJMbmgN6s0UuXWOh5GV/ensmA2FgB+YlEXF8pulNAzbXB30YKvTtt5h072SMVtQRXgpHcmHfNQTEK54fvs1oHPVYVag1WXLyzd217KJbLwUeVNMtpTpQvPTyFWWG8d7TgHWMmK0dDDFG8xHKXuRr674go9TEkZ+brdBJR3BZ5PDVIIQJTIXi5cOeJlcMmzP49WENKwAw5bm5IWkvn7SrBiySvyE5lTr3Le6TViFMGeC7qQseGRbcxyc5pxh6o5gIfwbTVRoAQ4lFgdqtqN4L0LEvsZpgwmfKOvezLgWK078w2kbG7lOYQh0f1NsJcH/moGXdn2KjoY5ShBziHVckQ7NofRV+rpOFInRqXgpiH+MHaPlSi1sMK1rIJdJfYH1eiOg9Hak7jd0q4YRJBjgLIYu++gENspZTBsPDNiiz6PO58I2R4CFdPIuOge7kAcOmw7ITF7Jvuco7RQ1wXLUVQTWaCLBEq2Fpsy38d0vo9D0yYpfhYHHETjYSOGUHZpNy65q0wC5zoYfCU63C3c/OeyidSFlzN+Hx/CY8gu9k2vuQHHDs+9is4IYR6s8sEC0T++FlOEEUQ9Mp1fZHgD1aFVKjCE51l+dhZ/eJUfRplkW5tEVFkMU8A+gG5fMDvDtwXuj8irCiax7GGsRv3SEBdOGvueg+XKg+DcXlkbgyOpc6xShpGK2Pap03N5igN3zmTlQq5XbhpAD8iY99btKU6xHvWKKsgerd+9dO1C1A0gPXvZqbpLaKgWz3aIV+Je0ho3LZOGvnrg1WaFaYwp2iU90CS55NWolUe8DLWMwecRHkrLSKk87qFZQqldgEcWBW9CvcnYNPW2tyPXucQpJlGGOsmwGmy+QGVszM4Yqdx3LifhdoBUQ/XjR+gBaRXd6DyhS8btJKGnrpkCSt1tE7W5yLUB6egPUZ2Je3vfSPYZDUibQ5bLxBpwcklMu2qK2FIFBQPARg+wYoWZAjax4eMOyqceVxDArppYSJ8ik+LYrQOowT5QRwLmDW56tigwm4WTxFZ5yOTj+bBRHYXZ3W2fcxYr6CoavZxddDjbkpeSPLUROjGI5u/uLJyUxfqmkOBpzRiNv6+3TbliVZLzuoBoIN3vXA5+LGjYJPGcahdd2b2aeb4WcmG7rEqkXiDNAiwMjlhk1F6WpJiahBh8eXH3fBCezAm7u76SVGY/ibXX+ABHohbTsXzpnF2Ul9qy4ySEMnVTS7MOPTeIyd3bs9uh3AtsxyLWBGK6A8u5d4/7sH2qyVJmbqkGaAzR5crBbJeeq7KGoMfqmtp6UW5oXtD9VjMoPqT0bWwPgUWqM3DUzfUiGCwzSThqE5yd7nJfglInRzCKjFurYR3c4Ar0zWnsoAGLF85IDh7DqS+PO0gu910LzONCg9BQn4aQk8cL1JVj80Zd4XRl1wd7G+O3WgeL2csPmF9DSlzhWt79AYRTMXt3qGPZbSD0Dw12VHkIhASi1HVeyM2RdLSZXsMrWQuabxoeXU1MrU/EdE0KEKSuw1HTpRL1uCR2useifpdY7odmQTVnLUvn0x7F+Y1jAXnhRQU2jgvyKsUXu7jBiJ9OiMwAMHhCTDaAL1gEOuQHF2E/CrXOVR0xWwnhOrCFGfIEEh+N2AaLVqtFEXbCygw9rpxcnK179K6o6oK/tqsUQeM6Dtg1HpDQ6vDNU/JnMhIKX6c97mtYmQCqmA4YnpwupopmS8ze+miqRBYdo3ewywq9Ly73NB31gcya6XF3/BM7tJtHzA41pBfB6VbbYDLNOyuJRQCLiChIyf0VsFfyNZ7VZ/sEyiKeHU65lYSEpQA3pl+vY/Qb7bMoPXiw2mJawC2WgGz3bzYX2WptG48oTraqT+QrCFLz15CGu9X3LCQR7Xrh3BTBZlbr16CWp0ugUuTLHjLBEOK74qwgJ47FNGYlLr03dCbNaEFvTSMIa3ORbph6DzMKDDHnKtcsskZD0voFtVdSJkIBCHQsHnNcT/BPYDWWpy8EvboHWMX08jh1AeOFOQmCisFw3k9FGqRjbbTthbVLgyBlgGFZUaeplmozL4YgO1HVT/Z7bneqqXjcmnawMM3c8eSdebgSm8fQoyKfJgRi4W0pqnb8nGwPP1+dj/R+HJrcUrJo129NqlmQXoyp5V3oQ3D5leiBJJB+Hon2cMtRRkdgrMr9ELcH5e6foqTSlygcXaCSAegXEwxjw4IUJqAM/0nAZDwxdhjj7VyK3Rm7snkX64K8HNTPV8bByHDGd4HsZXHr8pRwqPea2CRyubfS8KAMwlYH6FmfMv1dy09nTqBwSS+ikfE6h0Pw7fmccRLfEPsO2CnAEuHKhRaKDpY+Bniq8BVXzhdHdlDK6nKsguyb0Wd7J99MBlknl9I7DG8vK3HMkZZXBvN4jbmnZeykxuN4Uw9KPOhaLuEw4SK2vfJNANZFt5cFqdR4U3VvKe1jDnS/SRdCzm8ZBBUJynohHUqGFrMhL6GHd4IiGOe7SH3lYoKNrxa81N99xIAt83V7MOMrfkk+TirQQ0kNDBJuXQFCh0cxgo2UM+kmhwnq0tmzUbuP3KvqSqhZ7OE/QB54a1jg/vVOly6vj1CENESRozlwOAcSABcP4DA7kpJ+3CNjTWXmU5JcMFUnIJMMUHkqGg9zUvHEOdyGapu5hdSD192BxGASrDJeWCk/XhSgEKrlaHVHLmVafa1j7Gb4XNt2R45HsEtKYVVpZrWP6e3dTL/a769ODif3Qa1TIub9HeWMHKP1SmWZ/tGJtzG6CwgdBynGred+CM+pQPUknI863AOvSZ7hZ1ViSHL27a1e6LCLvuZxKegkzT78OjWK07s/yKG4cHUH67/FU8frYT0NbuMkRMGqBIFZrNuy49YeIAt5HYLJ1xb7fE1r6q0lJ1J2ypGXdXXF/fC8pBwi5oVyHCyYAyijsEPwpCsD7DWZFhdJfTP21gAOOJzLIcFxqH5F40s0x0I9tupekgXgmqJ0g+rXTTQXHHv3m7iCdSDByAey4I5RgjVHu4zUeMH8j7cA7apakbqRawki7+k8nmHdrr1xnKSIvb7USjveAeoQKlGqwbJnPPmGuotAoHuyrt1+60P/cZFMbJmncIVF2mBIEN32mFAX7EkNW580Nip58XoHOaixOZZs0BnYWSVjdHQleDRFkiqLam/s5TctaUunaQs3lEk99OxClvbNKKZ6ua5pzrc5k+HFDe8ED2y1mvRNWR1bLOAqQpJazMDEwlHuF3O5D85Fbl/G9rJkgH1W4zYUVcP5e5fKwCDFRsMEVpGXgleQoOVG0UoZDo49maRO4GFX3PYKLlraOzBEnqSzKoPsrAIgHJ1jF94GRtEJnM/xYqjMizOYCtJyC+qY5MVpBqaIngEIpMORc5e14EeAXk+p9YGYLFj3WCEOTw5akrHST11MVr0erVgTnwmUc/0ctfdkulmkHTtRo3PG+lKjhYnEWYQznRAwA/KGmt2pc40HCdlM4wywLUgKxXlWLlH5E6jYDB0tpt84BQk3o0m0V1LnvzcQVkN03sCaqp7GhHu4fiSpc5PXZU8DtLupaR7cmxeP3hjtEUfA/XMCnw8kerL2MyL8BXlEWz/1e7DKTaVNit+s59pKonTS90swS8abFKDQKrK52hl10EHK9sCIC72MfszBRn/ubN+QVoEGPQahQ1kGoTw90XqGg9fw1tMJPQDaH5EU93VulyOtoDf1NWPTm1F3q9kcCBRqEx0XqCig4hLPzyTmEUL2vkZWE8JnZGPqQoVmWHAI3ZeWLYGCUZDWDpiVMd2YBDjNbv0GEzUvF22miDEnez+0SjeNiwLLfgjdsFfhE6U74YJ2/ByDHVddE7ckBmSewHRu2CAa3mOR+9Uc6gh2ynJ5jaCmEfEl1wDywHlTr2HNEz+no6/vAvl095EnF74/4tHtaAiLvQNBZxplA2zmfDk9T16mtLRn2E63WxDkEyeWVyj4YrFHZCFKQ28LWPWdQDmjcahdfxj6RepOcJ4HSqPWVzJqdCvGlZ1Px6VfSINjgwQOfs3Bs/7s8bglN1R6Ez6zbnZH9AMqx64JlWDvTsgiMkQnWNE+3vqvJDlTAgFK3Nx+QmwCWIK5P4n4IHlbzp3OqpOnNCRy9nCujE3i74n/qEfRzcB+rWG6XSz/ht38uYrkjUIlAMHZe3bqZcAk3F88yCACdwkEyr2At57e22WkCbDvW+qZ6SKaj0WRvGffp2Qw9UMWNQ2oe2F0jPl5jWXS8zLILr4i03koX5EpTXOcJBTYZJI0zOlHb6Q+elyy1v2b4ZBmv+1kfQ0erl+i9grfUE8Vdp9B9TMbd4MAVjkhFtg0CiqqrFT5egiKSji9cFOwkvw7Htqq7YoU8FOs6QV0PIMeULW3IzUTxH9WHjHmILy3KZN3nbxsrzaCLtpkPInrGGvXMR8ge72shP8I152iCxEudk31WNKKGlxqCZYiCi1BelRo1PA2wLuMt2PLrTofWNyV5FTDDhG8SICNy1siXb3RUlYQiHujuVkXdOPqIoYkrwgfYAd5KZqE6yLRzIRNA6gAPidcWfwaGVqM+5RkGgk6epCe8BbTxQJHh5ztvhKVdTAiaW/h2ojuYH+SNzZHlFBywCIKv1LWQ4RWR838Q9J5ckpNbdgzjtUsS2+e3kldUfSCang4vdFeIEVnGvzxfknUUCFEcwd1d9LJQs+00GLReKjw3MmMvBQsOQwudRY3sEqnyEw6OlBZQl19C6M4VRnrVWVSsfmG4z+IiSV8XjlRTp+nVrSuqGOL+1Anm2swZeRBn6C7R3dXXVFfuctAoMlSFGInYfQC2x40fRvnbBhysqj0tNXnIkrQ6Ex8Qo+ju7+Il7EGFaw9UjREFtZJT78bT+2KvMAWbtPwnukJBfa7C15dWRC+bFjVRUK7+8CwmEcF62yDk06ozevx2INUYKdwyQ0WJqZ7z758x4ErBYbYnfjsCctGvygZt5on2MVswMLWz8L4exNrEyWpXPwA/jE+O8qO+mVYi5Q4mlsPXI0KEtWGBkKLCJoywRZErLTNlWKPKQ3fsv24o/3tPaip08DVhj28EHe8XqX7zf/sy54LE6y2dqkJ+c909K3yWAib2BuCYHwabAEfZ1j199sYZj7SOedUgipjDfjHaKYnUjlR7kdPNM8Ynoi6jopQdw9RU6KxVhG0x0yH0aQMoISWyQ+X29oA+EMJPMjDuLad4CWuHy3BijS6wCS/H6VrOAT1vjMA29z63tSedQPbqd0cpSTXsJuiPCmBOU7lxjFggcQdO79S6MDc6UIBd972owV4qHT+68qZaluZX2C3dIO+52p4D6AMQPH36qxWa38tPdZRyvdxk81bsAvLXVUy4t+p5D6NYrqvmujiMYI/LjqEw5+xG1BCtJOU4A8TNbT5MTq7jDGgGAgTh3goef4yFHdBXRQkrY/d2QU/ntMYcGGhpNVSa3rsEjVCMj4OphE1kPrA2ctMtKGJT7BnN3CuNHML5ACzoHx+Z/H7knMYRSkVIOlhx0OWyjMuz+x79LQnoWToGpc9SP72tQm5yHg5yLcKfIxYZkgXx0t+wku1naMkmJvpYUMDlv5oZncE7PnImsES+5hyzK5whmnRXWJM0HLQLm17HwDUpyBdcjfzMtzhWWp9DhfYmATpDqs87nGPq96wDW+tJK4AR+aPyVv0HJMdd9QUAYTtfVRgI2VnTQ5f7WkVGV1a7Mzfx8Pcdqh1B2ohwJiEG6YuaU0TFwmtizYR23c1O7wyCdFFhnKSdrU3cs8Pd8zTJXCWx8sfrdoCC6VofvFmK7AD0dserhyAGb6MQscNPMtOSyp3toMdJdEz4rObdng9W2Iy7QN+Y5qcYaHCa2tki5V+pxg0bULYvVf3pqRZdw4mInE4bjBWUFYpkpYgD/LtXNlgDmcCoT9IYsFal9P48K6jwTwsT7CgonlD3qPpQohtT8Zht6GEbwpiuHti3TWwil0+QG5J61HO1/4JxqsmGtBfYoOatODZ+Wi13kFvBZ6kimBIVZuW8OQRIDrkmwfWwtUhN621twiwqnwjMVS3SkoSvf6mN37lZ20t7eL5AsCLgyRDWQK72kLOAz9hwhgdOlK3FXEazZLvezUxcKcplFCS+AJYf3+L3+JaHuPiNU8kfPiZ94h62kZMA5v6aK9WP37ZYB8K/1kpQb5KKS1rCof4pMOHNLUGMqzK2JtjPURRb+swD4oHrfFQPax4UcqVH3BPwBVBDEqgpN1ZsNEKpYL0Wcx27kUExZ4CIb1vfTr2LychaO52gR0xB8didO4a3lEMQvg2WL3Fd1d5IPJ7u5EFcghGMdDpoNIGprHu+4pFBHIGqs97e7AhbrWGLqi5ZLmagOETIAVs9ZfAyk89VUmTXBj2AOzNG/pbO8DuhL91wNKfz/epOBu+cnot9jOIDrlO+94l3HgPKYIaAmkjeveBvhR3vfFhhifzUzYCNwPC9GOhGp+St194xT7H7I7UNXVfX8txZSNMdHGDacHTFDliyKVEG+wkq9MFNvmCdCagaZhkXAJZRBzJnqzcQCN9azFvfYNSjJdR5yE9Lxh82/RK9sovM1gJIiyifAPllNe75pUZNrhSGpgNBTp5o+5wj299OSqST0upQloKBY+KMxuMvhu8SvZihj2DkLlSz9zGUYuoquN9z5KbIdK1/1bIzwZxgooGodojPArJgHhGqaKGoPSIaJuLXTg76bAjRiA1c1kNJtGhOx4Bmk8f9gKo08vNYeAQIF7h+7Fnz4FzR71NJIw7CMq1D3FmlRR+ETH9FFIZneuo4WEuvWeEqVVktfPuyK/vsAd7EYUIdapm5UTfixzMM0Rrn+8gzWSk1tZC7ri/D43RuyUOa9ErdhbGFJcm8ExQ3vzHoEFV5LFpRyDgirW2j6N8MwaNv1Ov0arxvW4whm1InXQ7lt3Xt+DZUpjxTN9GM6WD1RhnsjWWxjErzI9HT+AOfiF0RN0qOuBpDVj+AbagsU+Zc3nKWFjEjWolC/mChn2YSZuNNwxrgqbqflPqHLlrJYZpOknHjzZe6nkD8Qbrpdtwezn3Lamgw9Dv275CXRa2sq5a6Qy2hczVJMeOWo3Gq8/7T1pPmg85vq+Hca8r009AmMfRREyebZ4WJpIm4MuK/DxswALIy8FGo9o96kzIW+C19eOGjDgpok9Lkt86u17ODlYz1CGxP3tCbhYxbtAJEg8c7P0uHkw+OnGQ4bNkhhqLDlcmvNi8tjiwnHFPfWvAwomT2Dg+5ioaGVMq1hCA+IJWWFogeNtQ2KIb95DUd7PdIwp8Apaa/KDs4pZbInllNVfeNCYPsDjRjIZCtRw8W9jNg0Df3fkUSv/KgQKsm6oNNB1EFrilVuAOY12z2OmVga78Um6y2MMUww2Jl4s7QIL8TWZhm2B3Hdt3BGY/a38AV0u7Ibzb/im7vbEERAqwXnR1+XDoOF/nOJyLvElTrj4OnQCsT+R2EkYo0zEVqCVoMhlk1mISAEK4Ckn87eBfcgA8U6MKS79SNA9YSQRc4z5dGBaS6L2wSWeghKrxMxuhtru1kInMHSL7IhYdPRtpPUKHBruXcygYsu7h6wooIvpMx0+Auph6c7xmIZN8EcU6F19l4ozy4kyXi4h8SunJ9HjsblzWKX6Q26tqzitRz7jLYfQ7KRnGo687pGzUGGoMvbsZ92yVpRiK4PABy4amixkOCgvoLa4O32n4BgJ5bc897jROIkC19xdEvknKXu+K7rpQl4+3vU6o/CJ8eQ7Jo4RqdwXVH5hif5zxm3BfuI2CTP1mnREEW2K3TjBI6zxeZuQlMLVtr910rNVsCFSydXAZQOk1kE+gykfjiTfTFqnmuuNSk0TiicXMmepbctvTp5LfCyt4euyinEWv5RRgbEVWv8HnuYKIE5f7Og16uI8bvUIw83gwNKXr3I55ly9FPFWtO6p3F7mDI6S/VNmDDi4UBYE1FWZ4y0Q2oH3G6XRmUsj8ypD6u8LldMQewsG3L4OJ3ji9HePUORqwslylw46Akejz0U57U5hRxhpqQN0nugiZ3Fml84rQ7vuOzK8vuSBmP78Wh8174tTsy5RX2Cqll5zh1fQZSRS/MkNjDm3kJN1FmWJ6ZRwXpBlh8aqIHpWK4145rBc5AjR78GttoN0M39n8eUqnPKDIni845yFzkaKL260SzoIKi4pvA57Zwh0sU6/CkxsozPle1QQ8FexGsO6ePipr6T4L0g915vm4TwkSrLd7l9gokaWShllAz5yE9l5yZe9xGH+2EkfZhXr2JC+UAfwUQ2iNfVbOA+b5WRmitM2Ara8B6w1TVFrADZ7Dm2opBXDt64yxwHK5t8E97bXMC6Hyu76kYBc10W74lVD3tEhxO3Eij/tpSk3vbG4CVoxAaS2SglJGpEr5BIlXwGbvNYmR+ujUkZSDbcKzJMpFPRPnB12joMqRFkaRKtmQN2IPNlLa33b7XMQehEja8sBKxWuw8onsQ1DFeTuKKzHuONF1gkuWSBW4krlW4BJ3wUuBWIRaBp+s3vNPedA5n/TqTtnqN9sKdBtWIG/0HjLIwNBN1p2ovqiGDLVW2GCKjuwAcVwzuAxQnEqMKo9DiV9UCbesBS45LdjsVwSfPSAMZlTtmtYGGVLzmHdnbFZjc2USuwy2ETEtz37tdEVz986bQYsboPlMBaIUhPt5+aOLKJXTPh2wf2Ya9smKWOO9rK7OLNt7WAGZYg8HIGS1Zffs/upxPlklgTw7wfcqfkgUIn5+WXjSsuoibeSKUw3ME2KCqq9MC1JOKehnCSkQPj50TonoEOFXeWpZl9S7DPTNfn3nakHp/W7otzsdg6y7Pzf7eldDok/NFjpYaY+juewVZCf311EhM8aBwGm/68d66ShCN7RYby3JI1GjqnfnFSuNAh8zSVnoWPl8RZ6PNjq8ej19JNHPMZjuwPYMZoqJ7FBwNQTmZLSdU7XIrBRlklyRrYH1x9eXhPKZI7P9VoFY/HfNZEyAQCb2eStuZ6/JDWXstXsplvv+YDEyXJGLh2D2YbPUS8/qOR662DoE4PVbLJ+n78ekA5hiRysoJchvCoSV23NEp83d047+LJWA7xotj5nCEZIkBKuyXaD29fnGbpl7ZeMmPu2PK/uJWpS7LzssRwXAnwjgcWJ6G1EcxgNU9S1u80BBSOwDq4XTa1ZjdoDSzetYJxXsYRZDhtTg/PMxjMCDpb+B9WFltFJGBgVjFeyNTwtFdLi1YMjwOavOApxH2BypeheAukOPzjztUVOBLE9w0Qp/mfUzfpwnmjBi01wp8OCteJg8cA8RAfMSCeb8/dul6IhjPKFixRibMWjfaR3dvVZX70Mxq9R6Sq+sVKsn7LwVF7VT9M3KPBVWC9RGVbsc490GdZmKsu8ghq6mv+eBIVkAE72WkB0NfOLNMIqe02/iKX+t8lxoHSLX2++jZefGCl1vfy9XCmqcfsZySP+QXt71wovhQg97gJS1lUpT5+OenzNGJwXniXv5x88LTKhAGhW/Ue7dO8SPOQOoEqi3XCufPdjyYMNHhwlDh34w9iD3bU4fCA4iJXNfY1XvuZ6j1jfuhAwa46+nNnReQM/pdY/5431MdZSpmantRgjy8FkzyTZ7iE4jZu4669JJpz0+hqU3K99vnDiS0mJ5lCKyJ9y1uTjhgYj01aJ4JyCbNHunjfclRSYaTy+b5qpQ5Fdrk+kcmCP+HPaPjwBUZZjTypQQCvDV+92bkd+9WbMAoHGUjZ6zS8k9MKDZhOEY/JIoowtrib0GV4GkMNdfozffkwNkwwuf3tRGUqKps+LgZfURyKE1qwTFhuDGpbtNjXRMO/E4yvZXvGc4QcW02TtmJ3xfSe6330swieLSlgEWqYfDOS9QvzUpHC5gXBYKzPd+sQbnETilxkQXtcRm7LbkzMK83YbOiYzwaxy8B6GkymV2i4SPxCZcqq1lsGZwXqzzfMkIWysIZhGXROyXAn44oW+498qmQ/H5OikI6TK91RdigrwNKQU8yDPpRUmNsNlKmWrvSHdcUAbnSvW0xQ2U2nQlWv3LlHAEEoL4dUyGnAN+ARGxvHiwEt93kNvFzansAdk8+/EXtOYErzBEgXZnIh8f8QK+6vz8ugN9s4/mnGXHilBIESjmUaaCrbCI7wnWZipT9kCUByyd5xYTF7928guHl0ZtKV7mDDh8k+uZOrYc0ivr4wlaT5BO3FGAYf3+qTWrfQs2sA7yPUcqdjFnWpkFU+VPVpiagONmf1u6sQiccXuv+x3lZ0AW6uRh3aAiFluhj8UrgW7ZgF8xmPfZ3gKIzLE5OT/PO7R11Ecfl9/RUzZ9NLgyoC5HFdIObCV9Dl9f0alQ4Wmi4jaWRT6HDNSKXnYl6B072FM6vkITahxOpKzLJuKvBLTnuj5Ms/LDXxSXzSvkPrJtZ+6PDSBupmvH00Q+X+EILPhuzIbecZocS533l5Fc+QF4tYKJWiHV+xabvd3gH4D+1AKI4q7dE/C4xyHQb8YU0Q13fqqr5Hsaij7ujQx6k0Stv1v4aUabzEaa0/WBI2tFp4vonEHQQFEpA3a1ahipO2fhHMsaUdYOXiGEMJ4/6Bf13U9obkZvehOo0Ufo8mfBFfzSR327EqAJOe/FStDN+lS9DStCqlkx/HWM5dO0wW8PULlc4QaQORc8nHa8aOmB2GtVp9Sgl2i6WWzhnU0+uyTgzWlLUm3oybEfGuAnJSjf0KJYglGiUxqN/jBjlJoFWVBh8k7eVWU8kO2Vws4SAbSJtxpUJihf0WxdLYccfH8iRB+OBH6zYIOyyl48bEsDF4sGlMiVht6fZZ9c4baU7T7o14ROYlAa8DxGIJ0sr0etGmxQ/NvzCd4wRB5YXYSsYRcveZ9Fh15/1z2PyXlDk2y4LR4qgMrPmh1SYBbuWzhw7EXhUSyv8pVhDzPy/GQJIMSx58uquqFTLqIv4FdAxM+Vey3CE8/GG/4cq8VxoG3B1tEsv/rk1WFBGfkW78rijS6YLz/mc3I+q0TlnBE7nhm55xTsYs9nJ+FgA0SzvevF+blvs3aSjZn5dEb3EOg6pRrtpRywN0+54aiesM14FrPrBe22ikWVdGCDxPwybs/KL0V7Tn/Qd+U3Oovjccjb/8PdeyxLrmzZgV/DIWnQYggRUAEZ0JhBawS0+vqGx3mPXVV8NNKsqydl9w4i85wMOFzsvdbawu2mFsbqBfWAU+agJjauZGR/EePjnAogAXDJM1w1EIVOBhP8rEvoY6FT9uyYABhk7iBhYKpOsMfFKp1AaxbhcYVjBKOMQvVOsPnF6+q9fSxciB/PGCz2lR/goG1OW+SnjdB2QuvPkndet6uFgIPWQlrwEXzps4TBp8sZtn8LZ50gZ5ui3Rr29JKjQNRgU/A5knQ88ZU9kdY0E6sue+ykzJXH22bGXy4polngO3Ho41VQ2nv3801wJnZ7UjK4xvO4PH4T8Bu89Htq8/x8T0sG0+8SNfjH03/bDPxcGH4/dxIkgrLnPSKL/RshrxxJn24hQq926Y8c6InEsMrHxV9zq5RlCdozgf//U3pJ4v+hyxX5L5rRQf+qyxX6P/7Ze/g/v9HV/8WtOP8FGl3V6b9pdMXiyKJ3z4eb/TeNroCrCe5NCl6DRdjCKBtLjYZSaq2PHwiVrNkOkWQRTETZZONH2cfKy7objuUxg8VViLWgZrgNci+C09kx7vW4quXPmdGDWeyR2d86//4SgujM4G8zdC/2Hwdj188AF0f3fYwfmlvUDCXDVXv8GaWVvzP7uW2svUdBOt1dpFvOTD38QjgAYKWLxpkD2ytwHzdu4e/7g0l54P0JjArYFS8g/dpSZ7L+NYvJerLXnm3FW3HNrjpeKEnugkQBWUCIF0XNRudMDKvraD2T+gg0jSklkEZeSJYa5F4R5c+jFO4dIqI4bJn0KUrlZJAH2eYFwP7yKvgaStZ8l9+gqsO1v4WqlvKOormoKXq4f/7Nc4aOBuiJMYEwUpEm4RToSu0q+hXsG51vBVWQSD6T12EhFxDBmLWEETTVzHY1z535X9+pPDhgrsjiSE7oWiLwuEh6nAoct66ApfV3V5u7IAuU/j1HL8AATsCN3s/DQvkTSOW8WoR4gY4WbC6LF3AsxyudinLPH5iAYAdu70PRvIjgejjmQanrcGAfc9pFHSsKzNaCqMmQzhR2hdt8H914ZfgsX/ACyHzriCJ6yvnMyrtKEdUFl5M+BBGgJ3cWEgokGn1WtDXftIhT386i/OrrFLs/gRGZ0rmE0y2Sq5onkS1o827i6qXwIPZYqBeNSX4EdoQwX11YHaVFVkT10QXDLhry4tPL/Gu3JejrfoDwikIi2E9YhIIvWVVUBUZir6Z1nUhbCBtW4s4OXF5quDMRgEJCliYaRJ+PIH9sEZi64Nm5O8Rdo4SOc5VeQFl/SAWXYaaLyiKRdubDkHf6e3jbIGr1K8BU6ANvHHUndaM/xIRt+50JQP6ClJgHaZs9kjLpCBQt2mhhfn92y69vHGeWKqXq6/wQNYDEu7+36U3m2oK9xSnhZL/SsbN5pyjUe4APZd9Tp+j4FejUDsEu++l4avziaR/No1oEdBTbMCpS7p36Pv/5u+lS+qkZUNnXWpoTxcmj38V+faPXygZgptlBqF5AU+P2r+EmB1gvlSZ7G5zuBTsTj4Sw0WXNhPlgtCTMz4gYZMBeqROog7tSQkSRHYKX+Gfn0gmCS6ZDjjGPblgDikQncTRG3F+gWguX+i7Zqj+BmeLTTDoTrT6I61wn85Jv8nk52ae5Q4XydgpK5/eeSXSw11CQFK5Mxr7I7jB4XVNuBdmkxffzzmi1NGjIIcx0TdmEKDoUfxDMXrtf7yCzsci/mWjQieXvg5wwLNYDmAxLrXTBWH0EchgdGJUiUSnqICXb6JwLmIsDcWJOQiNlPLNAn5S2L+O/twTWbUe3B4cTd5soMUcrHCJdMTiALkhZGJgZ/coX/FB9LcAkcZZp595Lbm7g0KrO7JgEz/P9gMm9PDmv5VpZEGqjCjygmgsc4VgTPJt7NkocKsqf/RUaOKCUu+lhiNFBshlkmivNUawSD/1jf2gSpizw3mhqpTO6UTrxax6YZMCcGa/91FO9dszwShstrwY6zoWKfJyG6FIN5OXCx4SKEyW7uXwvTkU7b2O3jelnVjgQOfeE1juSn1xopy2tRKuK/s7QB8T/hFXkepzB0Lm656IqMJbxhiPQ2RYZaWBA32vqkvzG8kHMR5KNOtHGPOcQW9GX9KbhDfiQCjbH9/qRFLKKaPHUgxkJcT6PDjAHRCtOmSyggv/28N1cy3oOJqqBj/d2PSD5noLFx88hs2wQPtRmSCe5pVV1YjJyYHAkLE+BSRBcpXmNN2+bSZ06tMkfr8rwuYGonaGH5Ay3tQyzYRE1QJKdlmqFn3i8pQPQ+ca6laJC4KxVz9vE6z6UQyhGf+R5J2yEzntN93Uc+6EvETdjfTFvHPeTKvgCHhzn1xiSS19USXzlCM0AeD2xU8foAtRCjDj3hSr9k1VHNJAeJDimsxsZlMkStuXiBMkBWTtzB2ERZlMeZsDcjqFdQzRpdJyrAhOEYyj47dE27s6yqY9aR2EBA2jk4zg0Xa+VBxbDIq+jJFLyUpHHcM+gAvZ2QLbBww/J0YpN3YYKHE1ZkbqoKVDPSvhWXqrcSH6w99SLANqSR/+V+dwMoJ3ILVkAgoNn9wppJAJKBw9mce9bnZsH7b/wU2/9b3Pa5PHzpTS+csPjg6ScW515FUA+o2CHj3FsNdonQSnB/FlA+7VejXQMeNV3l3HoG+MKIaa+GI34l+xIbaOmZKIdhRI/ZtslyRoVr2XiFdwK0IBbSDV302kYVj5mkoMAV6oI5vWq1jgFuntuMii8S8vYRR2J6i9S8MBRw9fuS+ueAs5PUIh+vb+Qr0DM2/RLqsKkqv9l9QLYakIk6t0x7IE0tvmDVMkiU892MPoDcjRqP8VmNYrh4TJL7u4dUfL1JFysFUTMyS34On2MgCFosC0e4/7qlMmKih7kXjn99LaO5cLH2lE3GcXfxiKhr9MfCjinRv9NsXHjELUiwIDR2L4ZdhhYxAItnGlnZlOEL9UzCehxddfGNCfUIH7Ynh1Uzy/CbsmbgEpPBVE+Kz6hlrSZ81qFZeOpeFDHIYxQ07x3xbZLfW7XFb1UFEFXI+mWvezq6RzOqQLqQ+3Od/gCY1JTaXXGQDNeE7A67XVAEo60GVK/5oCMe2pqYHBWPmE9UtNJol9aqHX/JpbxdmfzDV+53H22vlenktmygKSx6mFdmsOAoKdZPKOJ6l13uOU6Ztwir7ZkX+iKD2m9QgVJMBCPJi+ApmZXoNXG02xzAjiKB7Qdv975++7QOqGofY64+EPCAbNevexhW1iwY6rntV4QofmAIoLG7DyYOaweQY9GtneOleH1gEyNLr0+ZAnnv8av++sKeRyEHfAaN05KGkbssZ/ygBdnk1iK9vGdq569kvqiJIB4Q/GHcbEX7XQoRKEsSu7SQcnV8N5hpcpDbe4cQnN3bPqCmMYtaid3S5UMrGBz7OYL4Rwhyck7Fef3arUHzKKqICmHpRNYg3OEE6K/zo9zQHS/voSsUvnAcM81t0rugyRPVbeOBPO9oUT8TcuEs8ksIdf+MMjbWsTMEDpQZJ1tsuGS+A9RDqwxXkmJCxzwV7RcklWhwmrYZeu5N1A9lvluJy9VszWFkGwgvz4MnPhaCwI25lWounYKDm1ja5xZNb8sA8hXS77mhyGvCR1/JYRgAGZ38E48HtNcsTmaMNf+FSfMRN9BBVRPNiN0A2pw9NtG7iAYXx4POpjq8xc2mfLnhruEGOMjRoaO+LiX+fyr+tW/ex1GCUrCpvn4njM+7lDtiA5/LL9ghBLOwvAmoBlnMmo2FfIH6W+dchLsD5yx1EcpfRldzyjgYjxGGnDyyrfZGIHDBmbp/vs1XjB10jbcs6DOBL/5qlDegGfEGJrrBzU2yaEcNQt8k3/7YaU040uvNdhmPe5D4buRJzq3n4t/dQLyPsPYmS7a8ayOZCICQ9f0H9NlaN2h8hPz/c5VViyPdy8sQTqGXwelHZmo/ldhCbCv9vhcHA2/z7Rx+fck++hnO6jt2fVYcmfNxzjCXR2wj3HCvG802kVqVnsCK7HY4CgdUzX600vawQ6zIxagzpHZ9YME7Au7aMRxQenI3aR9CWkfQn5jQXB7J6EpjGcaOaEKxW++nR1NHoIFjmbWcy8PP3g4F+2cK6Tt8WkvEMYMtZf18WmRuWZUuR03TEBh8y195ApeoImt/LpV0znPE1euk3x30ohwp1Pb4XWFa9zCqWafggI+jMYI7Rc3vSI8z5/vK4F43hiJdaHplxIeIpQ3f4gzI368hu8484NFdzR8FIl642AH52lr3LkdYhPuBfPYL+MBqVrGVd1Zj6JbOXXSoEUlqump5dPHOtmZ3559zZGktNW7YgDseUOvlvTODwgYKPH1bdRXw6YInhBKLHbfscH+cJzWymNzeMtUVe4IG8YnKL9XeKuFHF1kRroPXEfC9xIetSLNYTGRxoAhDbviHDCWh4babyk1wzakbFBEajlWfkKC38GGpkRjcBGnu8PYgwQKEFaIudtt+/dF09pwE27odaU75hrm2dqmnrCV/nBvD1dV93ymH5QUn/L3Qw549/Yw8cMJqfl+RgagIOuRfBOwbuqGbCNcOl50QWHIfULG5nouYMuVrnnEpw9nWJD0M02tx8B/+8PboVZab9qvmyQeSo0sIhTxnUFZOuedYdnkj8csl0omWzHILQRTGAILlRzLPRpKSIEGy7nbwobkC58QxGJZZBaF0MCKgzYoS4QZzwatA7XM1iv7LF7fKUxdbUTXrK06vD4vFkn6FTsjd4+/0FCoKRCpi8RPPfJ1t0gvVa0fcCR286g2K2QNcUQ4kZDXXic5UDKjUMI+LxuZ+ZQjpAUnxR/Kx8tKSB+4A1vaM8mc5JP47n7zEWbOmT25Ro+DgvzBI+m7CL4mDzyFvoKoTU7trtm9wI1fwkB50aeMs6RCZhVHJ32ohqlvbLHVLwtG3vIemy3gMWHrER0eqiYqm3wUb8f0fePE2sAYzOxT+r5SVqbwY6ojt5bc4bUAHL5ibgQ+569dWEy5hg3xg5atrbbyc+o9vtZFQjV94aSzZvpUPGZeeE/NnXXPD+rU3J/CLowgYR5xNw4YOTt8Ubmh3BwEjIJp/s1iDcXxp5vsrv01HDoNb6DSZaTLeY0EUkXHxv4u3JUrOvOJKyJzJQYwU2UNw+j5DPQL8RlgQBj0OTOtis7hWOERJyOp0xCk4DaccBRDxzMentB3zvfSY48DtB4mCeRncBRwTntyO1ZTnzeeVGNOZdERLO7Oh9paFBkz59yxij+BiplGXRPihNf3GHn+5HmlH5TgMI8RUcIzHHvsoWY4fyW+KILPaCefM38Bp9HscXyqJgFWWmeTZFUyIQCDaMP5w6d4Js5juJcHVAOBxvCiGsTPzdPszeRlY4OKj5t1vnZn3R6PMGIJFWj6tmIiyvncDd/mWN2xm8YB02vTcmgQOaN4s38q0Oa0P3c02NttyDoAAHxe+GlNQ6WeCXtxZhBA/XIVNvU52SpDmutOIzVBEAcvWPOxS+s7nqq2WuXwYc6BuAvUCwEzArzZyJKq9clygFblb13kcKaTNP+cQtUkxYk4r9fuLs+4PAOW7GhEpZWM80E45CUfqiA3XTawNzAkECwT1b0CbSuvWvLRX4YRTfK1UGjNhZWmNOXyWLwrET+TIDNlodB1rH5lepDVv9H9aubxt/dA9ebyhnt7YWUBckVEOjMw1S6uln7f8eY6h2/MmflG/+bMNeAHty/up+3AlD08kNTAThUftF6ej/U9e/zhWv6Rto2Lw7FPij5jsDjvtK9JImcmT+BN4bAyms83qMRwmVtgYuzgT8CS8iTngPmgj+HVXKkV3I8f+/C47ffRzG6YY3Jj8TVbWN2mhqP4alYzj6F7XvZk9YjzX7srG6L3N3D0qN4t34cuP9Y3SHj/mkQhS+bWRysGWgmeFYZI0LDvhJ2/HAzikDoVJsMH6qzHaqUJm3TZxy++gpyN7DWagtH91lMQcDxhGOxhHkD7UGni8zHV67vLMzq/u7PPHxz0337ZaO1qsoLoUSJvBLnP1uFiRXHeSIdg3r2yAy95B4czyj5tkbe1+JuPp2lho+WEgSwpHBgxokRCVAvpkoRnxbQi/VCXaHjMGi55GYSON6bfOEshEYFkmCwFv91qT2u6OgZ1fuQrF6i6m4THCkn3l9mKZxssG+g6wLblp308m0Qs0Fqzebc/x/zcfIqlahvVFQTSh+/+ui8/b6oHZ3Ae5QdMo81lz3YkCS07W5I/OabltgcnaAfCFRAh4srFU5I9XdNXFK8Hai4HLH5njqabktAHqWwE/IHD0DpQQkK0oTKCyTqIteQ07AzC8jbZ1v+xov6WVz/VcJukGP84DA5o6Y//BomGWn2Gq7LlO7laCfky6LhfOWzc2/vwJZDsAqebHfbP9CtXqoOk3VSfl2ZibBfTkBn+Pi/wWbBKBeatQa+WBCl8nHxJGP6FldnB7Shz+RxO7KSTR+mVmmaI2BZ7mXxsdsAqHTqw/posUAt0vYLscfLnpX5I37aNSHjfFdAsdiLudTFYU7ivWvxsTRKcYUyfTqmdTQDvEFmg31m8wujSdeVS2EWhditRazz2ctLjHXJzCXQhoR9bE7drNJFERPvkHKH36UpJBB+2mjN0c2tccwGfGwMU+bkV8ya9exnpTuWcRcZQOxCdZpM0p6Pj3j/xyyiUuTKbyJu0nalGEu4NckaESlGvRPSnFmQUdNI++7y7eeQVMCHRBVTuZc5FF6vMOClCsNfMe7NEOuEvVp20FJ9+C5w/iuMaI5zM2x7nPdIrGveaMOIuzSp/DBhQ9h2kHu98CH8yYxSL0gy3soxlNMl8UeIzeMP2XmFbS1w2X2wSNHdQ+9LaQrhjJCEw5gSsnWFwKZG45V2ZJo19cHtKhEveLHokpOYT4CN/Jr1Ntk4Rs1ggyE46o9rG3Dnge0oz7HRb+enAp49rZLGWKCvtztgipl4hSIQt3Ev42NKqVj2l73rkO6dgpDXaaw7+WEB8iO1fCw8goauvAgjE8AsuVFGnzim9zy7JGUJPKnRVz6wUcdZU/dFjObJTMF7133LlEFu0HQHa8RoRBSW4i5h9/EKGe1qNvwK5QFYY+3g83rwXPBitnXCpWezcOKPUQ8vt3xwcXxOg5HAV+9J1VrtYiGR9G98+QrPRW2308KPtK+wppjNDMYHqdPZkDxTw2AvVRXGKc8xoNo3qMuWt6Rj16fE38M3rJn4c4pv6S6kjaRd+L9awMY0Aq72LflMPWxW/TBM4cAevh0gCu6kZkxOqA9VnLKlHtA6VB1qBV9997HiXs83lF7Ne9WZAfWxTL8veWxyB3Mv7q4oRQuTdCtMDKCJjuVN/SPyNxQFtiCSpCzYeL1/dq2fakqRFT1vHYL04bKPe30v14zUcDkSy8OFlK+hhmSJrawqiFd1xVlodylJp+sJh1arwZTcmM2pzGaIqMtl1nRDd6NqdszgVqt56PBbAajyo3VRiv8VyCTO1oHEshe2+EzHhzUJ0U5+gM2n+yti5BXYk9dbwSQCpLQ40bPaXHptSNGP/fNH9u4FO+U57YPl8jKc4l0JTp4VK1GwVw9DLrhq3Iin3L0TdU7+xtlBkHQErtN7gXN8WSYErowPOJ9GCJ3aCTa28z3f5ORyIKR+/Cqu6tRPWkryifDhuBrOU7D1mn6LEXQny1BPjjn4tg5Zf7OGP2TdoRA0/7WTTvGjMDyHzkHZ8fHkQMtvHfFieTJG2546DknNa6fQ9gAts1M/65wNjb0slVoJ9hS2EmO07/tDAFbd9ZypLlEzWh/DpCp5Efgz2KGUbIkgTqZrNAD5YXnfEOnqMNIB/7XnMVLdFc6L2mqji791BYC429WPu3iZh+VNlpKl7m7i+hWiXvif1Opa3H6oQEJ6alBe/X/X21dPKklz6WhBrUzJuo8EnPfuTmqH1OclzHkMpO1IPy0g9XHhAeG+qD7jYH5weYar0j+uIevWNcSzUMRyXIauGOld4gFMDwKGZ++TlyGGcv77Uqaa6dacqbneIpcdj1vF1H10HiWcL9nJ/hUDSRtpTSWSH34GQTn2kOmnv3GB2ofOAlLBEEx+9qXwhxT4HEUkpEnaHdk4OqOzh94tikqTl6T5bjBhP6BfH7oxzZnyxADsfC2spqC7gg+atx1E8wFNcBXqsd8pMOG6EViiUht98Sn7zZemERrs28s2HVDAvcZzaW14kNqdLYsxY0+xQakLhjcuEPR9vwwEsH30Bstw5YT4bfgXssryzIbiXAPhvg9VE53t8LRAc5HkFovwPSIsdiKaZDzu3q/RNn5mERVfzQt5JMMYyBkWF3RSX1saecMYP59H6y15NT0XIspNNLGGPYAz6kkqroAPzuB6fcSVPM3rY/Wi8sZI8Jjp7vqxSnt3qfpkvBf+t3qRqkFCStT3zPKI0vwIb4WHLmFq4iDDT4bVwJuGf+0husJhUuyiMyUUD1EFfMkm61wJKxY2rXFTkGHCskYpRtqcHuVtij2UvkM3Zq8/5rsdCpBRoJpuC1S7rmgyDumbFoOt4zS5wzh9XDZqSAghih3KgLJpI0nYhR0BME2oWpd/X9Ibz5gCoYivACabNcREWYlVXa9Vl1MTIytbRSWltHOk+qb9W5hXlEfUi1AlF35YnuHlpS9X+DnDP1ujMdkeU3tjMoAgUc7RWnht+f0+r/nDq79f4340rz4Hm0AjNGYCpeiWJS6rm3Q49S6jHqB9DJynEgphIUCOKmPeZlFWwl/t6ZdzK0s7kiA3vsO1APDiTlXg6CdQdVk6qTMMXhyTfKG36VyOjFWC1xwALRlF2yzSPf22iBAZ2q9SAFZ0mvrbmH1g9Y6qLgqnS9mZ578I18utjmgeN4vD8wjUtRbPtLpHuwj2ptwNteoxReYVVcURDqvPddp0lX429veyKP5F8umF6wOfuAO1OymR/SlLgK0BWAyqixt1Gi83179qnaUwzeX0YL82yyKwr3slBbhVrK5bNR1DKY3V6weGj3+B+EXpLX1azL1tviemGunXwGgPnIAwPRl5TcyLIjafjkjN+WjeQ/7raYSuc+u62Y3/9+jlrV7spyMjljbDwOIXS51IPeovHBG3Y26gdfzuu3G0YIQOTDXefelhN/LZWzge1CULNYfk7TD2PEvY3LsBbNbeJoQx+F453h8dqlzdIBbRFN+mqtEjUZsPH1gczxl+X843Wyzf/RuQXxPuicL/CwoeKxMZzKKpVSkW6loFe84Kq+yb5rZ0nR3tpU9M8vAhWsgl6DorwIHN13KUGOa65Fr4VB0Vy3QYBmO1nygIDzpEvvzjKHXZz7L8rfWC/In4RK899z12TFRTxtLsUagsLumOf+60QvRWxCF6pX7uqLP0ri5WqnVEKKqtpDJ65M7q9SC69hFMRVz102QGmcTheN4MldWKKLKvOTrOTLUZffP2apRrc4DUPHVfI4ARc2R4/C9h3I6b2EAevire58iQRMbcK+eM6i2P7NoWVTigbGRZN+lJ1vtcWrz0SCa0P7c64nUkXtHeRAebsYZShilLvHvWYw7Elcy4U0+/c42q+r8gdpNSKCG8GFTMwHOXBc0rVlsoBP5k4zb/qt/XRnHfeTlwV8PW0gwgd8DU2v47WCoOYg9BVElAhUVYxHJoSCfKr+oH7LCM7CDF+pqO/5cyVPfNi4jt2WVxcxGkCqYrBRL2teSeLDkgSnZA7WkcbvQ2D+IZedwg34Q8l/eqmh1sDyMd4o4RGhaJpP1UnfbW4iX6roW/pECGvIb3rt+Efv06s9Rrhn1tB+J/6qTNSmh+NB3QW7VXuFS54ol33Xbuxfx5HzLsiCaYS0YMLx1QGqnKEDn025Nb5yvQKRj22J4WCoTBMgeJLyr6nNu9OPlFjOE1kCkrpAFuMv5y9qmSZCr0SBrsappTg7NNSfjY8PsyaNN9Nq+gaxK1qsmAICGgjy60+SIu0yGvWLaxPVOX7fgCVAzw4GUihWAsluIJjG+pvo0PAQZkY4lRyhcYG2RjYxSux8542lCn6nRgYhxiOqPq6WDdmMv1YdjxZ3uFwJApQMy10VWAtjgZAzr+kbXY/zzogpDOcr1L1z9n0cpV/I2+EIc+hudXUhxqIyAgRBq4knaZEITicR7/ENj9OwLg0nZO07XABN5jUB7VwvdrOpa/9aIqeEDZKBxBhZoUMokRZWeD0Ht3OGCqMv5AP7h843D/9QypQKlLk6UPSbwRR00UBJQrPDDM5oD83P9ruUJFcaIvG1GgrLT7mED0DKzp37LDK2YFhIiomiR022QgSKY+S9sFO4FsbzGYFM3BnXkHAJLLkuWLiegbAer3J27Erz+sp9tfK0DYUoNGXkKhBscBB69KMv+fc6Ro+g1m3zFA8DToqmcBuXNksw53JzErAxX2Cgw029nlYghnBJsa/ewNlZnwgmRH3L0knQUaGoHT0JwTUEqWYB7ShEP+NuvJQNDeWslcY7+mICqnzDqz9i+FarNxm3+PM34qhA/qcGozTxEknbukbd+7ZY8L+IKk04pv1wccoCiQALg7eRA0Dp1dlv1LwzMKv3VJT+0je4LS40y9P5ppkz5/S1SCeU+u1fRsxfl9o+iWADKSV31UjBdklOLD+yM4Dlv+Gvo8PaF/vGxH3CFbtYM3PClNJl8lIqtysdZRGeIdWg0YoWZucamcKUEQCo0Z91SMQKpeTON/NGrxXqqWxgaI46wU0r251eXzdmHrkzU3xsu61arf0qn9RbWZzsoZ98FPyFR6gX9C6pE6MEj1DeZhGsVw1TuPBEua/rtHcBC+ZAG2jVEBc9DAb0wwO5hoNkWKvmkFxyH7c1g2xTVXD7ERdTRqhPozZco1S3VQWwnq8MjnSadbtUwcyRxYokBib1mSX0+e0vtb8DcT1FsKEpeaEUPXWRljBtZCCbVENpO1K4cc0XbVTnrQCqRasEe3HzwbiHMyIWxy8pvmsx40sysJAE534/qYK6UemkvPkHWPSY/cEwRZzKiTDqaN+DUp/Wlwp/NKU7PcqjCrq6BAov2eAvCfRqSukDWdbjUs1Nk0tyHnt6UErRItbbkrdApfX6O34gAWLY1kJz+62+P7L4dA048/0nyFWQFv2+BcQ7gBWX8348NmonzS3YWuvEYwzek2CPt5mfcS1Gw5wBSaHA7syPo4m2KghJsB1OBVBpuvpeCKrKtfL0rw1XoYGEjx7/+bg1xG14aiMahQ2B7Yb67WB0kFeKHXHpaTbgjN6wa3bys0DZeCi9seFAij5ol+8Balpl6Ucnm2c71bNVn9vNcpDdG/7U1im8+8cPPvxSqnQpnSqEslnMw/FAl7tFtlAPtpW9pRdW4f51467Rw7enjLGi4MVaA91zukO+k1/qXCYRK0A879insRIA8DWX3sQ/6jF1ESQioLSc1v7pN41oL+JwTSiwavqOSPdHIfTbQdOghPDVe0DQsflO4cPtUMwJbIFOg6e837Kp1Y+RLAeXmoLqE5HP2dwdy6485+xg79g6b3OphfjO+27RHonhHMgfHU+pTqiORI+2bWC72ByG7QRSM8XZoScTrIQI3h/LWOBx5CQLzOM7Ig4HLR8Bm0CVDlhit27C6hfoeEXvn+GMW71BoSaBcHrE8yovgE2EZhwzamw4EDKo4Vv0UG7BnWey8ZtuEJ8S3x0wJhEm2Aj+jSDoUrDUsOZiYOzmMrhx522Dv0WdXGoT0/izeNoe9M8BI+vuwwP1cY+Dchejli6EO9CSrMNxya0lyqgLS3vvF7zjBwa+HzoYJHBI2XhSObXfIWukiuFl72VlT6Xxn+OECaMMHCitDN6jKEa3lDoJgYul+4hEcUE0wMToKZ9ywhpabB+h/XrMFI3u/aZONjoUr+sh0Hz6uWedevdTppL/E5sOxzpZGFruv6lyUmLfGaWPdol8XRargLr2ykQQbzYNvZe2x6zwJXi1lYqnG/2dnnOi5ptZY+VK98+t+DgVwfb7+iZhN9p8jKdVq8XhC3b4evgfm+hGdph8S4GesVrB0PDfs+ZeyzQ2mCXrqqem4JUT/6zTR7k6IaWVwfQJNskiWOYAu0bm5W4UMz+G6tPuyVEeD52k+RZDJLRwn4CU24pF9ewyxcxqPMXsQm/FWLPSMolgN9iHAjo/V6Nw6BAKRQ0fQcvxy8LtFaOvSBJ8UniNktkRg9ZbD3IXgJ+UMc2u2WMxJ/3+WlFqU2KnlZdbsnO0cnabua26TGmQJQugEeoYTNx3n6ZI+CFXu0wTd1VoxJIBExlPEf3alz9JtgeNx9xwq6f+InU0dDF1roqD906jRzU6Cq/cc68mwN9qW4zrMvFk+iFnEVfXImsJqFXqceQ7XAvvNzAkiBaYf4ZRt3WtdD8YglQ1FlXOlWTI++3PMbaCDELXSdest4JF5GO7I5e8lglYJaRudzNhyNWtmFet5NT8+46OdORcEJEbd6NLNKNQb6hUAkEpqhbYVYaI6SCEfo8hvHVXH72zWh+QCZLxErXgJRY3jafM6RWSRbVRCXZge00jNkKl878mcVP2DSfBVDLZc3YV3TcjZZPy8watmKdzA3ORgKB8ly+/91HQExmayEHGAB0kYHBQu/1Vl+bOzaE06Ui9IBoQQfWH4AG6plL0p7ijo3tGSdhbaMO9Ot/iKzMcF+YXU10jQBzf7uUSbvbnJg3zoTclF5qmlB4kDLV+HbH49w6rLxY5G3ellvBp0WAGLct5xgqjMftmKN+UVxjw6io8OUvGMbBn+5LVEimHn5OWZHy/mB3Ejfbw40Kjh6TAFDo6p08G8AsnJSgPo5WQ6v8QnB0wjA0FDJ8XC2tQcjyvrWgL1zqQVmv4STMpUQJpA1Lb+4N7/5cu9m/dMUmpnkqvNFKw4ghdQlXaicICsxt6mGlhYKn3bdOUR7/muCRcF9vkJkYH8G25ZL5esDBJLKfgg0BQs71BCUFbijmd97N/SigEkJ3ObPun+T1a89dlc9cj+8OzjFV7dDvADwghs0yDPGzXcE/DbEy6GckdLaEUtyYK9ozp8rXL3p0+3OPAFR78BYvm9EXIywIl0L5hD+F4/EsabdvHBpR+Us62By8PGYeSBQZXL3WQGQV4yIsEeGFdsh1kO5GUmFYN5AVcecDIBpss9fHBeEARb9qc4fwQeHfLZbUjGn/bsWE+9YGDFgx+ZBYwRiHeOzuXWN7X3ophD3qdRBh2wfil9YXM6QyDfnzs0v1s4W0zeid2sxbj7PlfbhdZqrX7i2Kiq3ZF8Z98GTFsMEMdDxo7mYjz5BOz8itinK2bBPEaf6uT3cXepR5aEUg9zWP9eL7b5ANgPZypgxLDNHLiD9vbB2iMRdoISj93Jx/UuPFmi8XGm3Sd2h09ze3bjQN2zMguPO9siXzm9UKiK7C762sD9w/cAm6VBpDS0WzvoldueG+BVKqNESKVJqD3nkEdgWVoHnVDhs+xxnGA4wuLJ8xV9IcO4x2RLKB0KFeBTvFqjdNpGYGRIj0lsgMaBVcjWt+dx+gHz9xNaEh52fzG8w4vluIJD5D8fXf2nHWK0YOKdQd6+M9+8cRSNLef5nTdBcP3eKpFcRdBFrrqNS8RqyaL/bhWB1ecCC8EYyPAYfzlwc8TAo5UpUSwXQOOfNY2bglgl7agnA5aNLzkq3LsciV+NyqyB5UEY1EFiZIF7bNF1BAMfu7o5zdwjcbx6lxJhTr2EIxftbAsQm+frZpuBZqWalxP57f9MRiwwYAr/po02Oc55FoUPZX+CrMWS7+CAJAx1BkWNNtpwKm6Mvp8sy+lcA++WdVqPvbZaVtsqZv/h5HgkItx0wToouz8cHrWjLKEm84CB30NoruMshcTC2T8KqtItct+KFYpsJBqxl1mft3uao0vMJzLAiXsFKWS459cQMdnTXUQJ+LGiSxx5hq5rz0VjOtu6H+JxSzIBvZphfrbfaur1BFncLk4mNhVAJVJeJr3a71wRGZ9bAuzDso2T381WQuUMAgHHaxFFIMXmyfvY9JBQyQG2SQ6mc/uwLRHEvdzbdorY1z5gsxnwYNRKZGpF6iTB04ZuYC31LptHXCT3fBImF4t7Ry41wEVPYfRgixP1m7k04stSbeppho3D1xFoW45/pBJYPCXu6tpR6QJHUjkvYuXxmmvDP1aaW/lGv4GQxwIgBoCfteMGr/CrMRYViwnrlkvYJVtvddOiVOK+gZVAhBK4fq8uGdEUXbgZmVQB3Oqcl5/uSCDYWh8/A4jlMJ+YdB8JDTWzYtWwihkLFrRlhjfkpJrZ7l6aqgIrFcv3BRXJuCZW/oA5dZzpIHmiomvpydKRdoJG2mJMePz7QPiHOI3/UPKyOUWOUbiAitkzcjStjbU6+63ewYu+3nQcIGzbdPed8awo3hyeYFF7NUltMOz2MQz6s61LICccgzzz+89tDs3hTnuKI4njEzumK9N3ChAmF3K69WS4XxpxB1kzY2Onn98jGK3xi1AlaK0tr8AyUex3QguVewiXJwmjDNQIbxx0t+9snoyj7SGVaVtUc0MCDhZRk2KFMhsmJjcsS/c4PVYiVxAJjFMxfg6Xcg38Vv43l5THAvdOIPEl6NRv7sQAxBFOoyDpvQWvsdkwKw1hpYEcSPlvoNE1bn46ScSCpgXmcEJrUW2z34FEQArRU2Nl97xEgz3pmu//Y2KIenNYF+h8TdTUn+moD9Ip8J3LMY0pC3f7a8AIeUAxl8wg+fFQ09CRtzzmwWjfKbSHEpzc81NOKrIwTBb5zS28kMeKBW6tV9PnR9jtji1wBud5BRtiHt16BLwFPt9NwWuhcOGovZSM5lP81XNPfdTj0LNO8cSsJoEEnPa/RSaybpvCA0An8sUPc4bdyNkNQ7egutPPDjKTBH8HBLvaTyirbb4kXx+1p9McZXMZpdZs0K1J1IBBgk/JBDeJ9Ji300dkvuyBeVFE09gjna340cV1DhC2g6KYyh3lZYBdyB4BhMW0r5UYVCf0lCF2DN/NdCT1CLRLOP2RwSRIXSyBklS5ju+jBd6lNctafiC7tJrkBf1O2J6P5esK7+1g3moKAXoPAeOYE23rOo4zr0bbvX1hrr+wPQbICjZit//55TSfnmvav4r0Yv9HhjGV5cts4z8TE6qn+h/CRup5lbvY5VkjsLC7gDlkUBLe5yVtXZV+rIC8hA7q26F7o64n1UVYNuemV48Sv7s2pgzl96dvC0Z84HCZRd07rfRkesgmo63h7/ii83Spg/EzYUg80tMthdDUaboIZIMTELDi7OAsaOzMTo4a4vKc49q04nvtvwFESghB7UeorH6IFQ9k2Fvx713T9yQP/yQXnMuNbuUL1d8sZWpi8HPfp/RPkw++MNOMlECJl7fntbHVX5YocY25CJgSh6yjHKKoJf4Rvm4iB3m9fyV8sqqN/85gcWBPl0uyn+RiL8KuiUjs+/dM55wQV/PSBMLOWqSd9ikQW/4DfmRq7IOqEC40pBNSjug06+oxFO7TDSeLuIOKHBkLe7sPDsr0aXDD9Xc1HQSoNOgVKnnKp+sg8zVzqz+K4+4wUI9IWB30z3rm1eiDDYmT+AZzcaWXgOcgWRdzQyanPey3lfAOEB3a55/MvcaUpK6IuSvL+x8Ks0FUGcbqN450CbX9q4ml7PapsV/4uj3jGvfQhMAJanmiwCOMJU9WqUKxVSfd7jfz6b/tRb4VE+87Ae5oRxItN+Tw3VySnERuGRX0YiC/Qy/1wU2MJ34a1vZPSVzgTkqp+UnI3SlWA+ldMC9lvHqH2DvMjHqetQye3Ozt1jPDnEvW+fwlh3C4CvcmRtXK8+eKTfI/fWTr8N8kBFaf/kyasLu/pXu3sMEdc/SLCVUMoueuQXd/q6PzW2TGgSkrTV17ygPwZYnlP0EkmSfQ/EqU55DntAHRRsA0T2n0OsLIt2fSDlg5Dp7+o9tZwlKtnfnNFl48d4nug93vmX6bQeqw5JIcgtrD7d46+e148+YGeylrtugv/PzPRflnqHlslJQgGz+lTmidaAyk7oZA9mehkrSAdmtVOvRCQDp8c1H4KGe9U0zm0harGLLhgItOuQEe11IDOkpxciljdz5jYcpVreIFGkxDV5aRhTIFxAQ2UfZLAB20K+RJXQ0dv65+yUHuApfiA7xMsgJapAONIEXmcezRIiioAFbyEnn9J38SN7dsEdk69VJJBvo5bdJRdpIiFwp4MqUoCjVCYB9MO4DZDQjP80fKQ9sNcvR9phg4cd4840EpJhBMlR/kZHjb4SatYOywYG1znlrR93A8DpBQ6JdoN84t1eNU0OfpetsrZdItrinEy6ipr9bZJyv6wFBrh/XDC424Hil1ALK7qpDt3PxBwZrcg++4+K8iAupbgKxOCkihVq8LUBpxP7ZSF6y8mZYTncS4HWHVxk/3McF31sEdgKfn9TTOCddoUXhtsWSjRj8kVpjuhfv8HcMvmxo703f85CxPfrEPHPYhJpkC1DYdnwN/07EQr0/eBHMxJENMc/uT7496NpwRWwbJzT/CYSaeGnI6uYD6feYMLe20MpLg723N5KxL1c9OuAjDVtbTF4za6YTypcQivmJFTw0xRlqxPRNMYVgwe6Ch/OVP6Pin7FBWDIfSHRTGLDv8/JVuoXBRo2zZUKnA+pRQ4lTiOgPA8nXy1ifBnXgjnCEjsFJLSTjGAm1UZ684WTc8WJuqA73q6cu6vNDiP6lh62r17vLIimd1WorJYCP6CSGu2xELZ/VOJnLs/tvDX3810qMJrV4OJYgYvA/r+/YD9P2u9OFOTsvAMiaocyi/RdwXjxztzPeuS4DMUPNUeNnQe1XMKxxxAXndtOwb6NGFcDJ6L/e88L+xDfMEL9b8IDVRbYcN/kfOAXfa5C17hpf4qFfGa98hsHMJl8tn6CByp3FSlAWLndYTQQnV4lZw/65LBr+8qmyXO/tuIGBigXyGi///pXIy0gePW7v0T/dNpZNFcYs9ukIVPc5N1fpVa3Vqbif88y/x70+11Tv1u7d6lD1v1lx131HeJmGFrJ+eJoN8vv22sKKkbwlUxBFLqz45NXtOtOEdr9FV4VHPeVkW+Hgs4vE8Jkv14DL3iR8fRURkNYcUe8s6/i5c5LK17keZ/GQFJ65RzN8FEZeCCwuraQsShMPAAnrnN6asXZQCJ0hRuK11SATf+L7dmd+wU6ty353Zw8aP3XCZK1HLSknWCr7IMmAKLYP3rCGf4ijJ8HDRruaCq1KNUZNlKDs4GHLIxXDDLP35gYw0I3EcA4HnT7zk/gISY+vwjEJcs/rzZ7trB8V/JA5qyvZ7E7KIF2XR1MX76xwa/XhtMdezdhEQAcTTgTz66pL6ns4ofqA6HfZDLn4JWcGqrvr2hmj4nIWn+X94lc0coywPgu1YjA8LDH9qsGwf3aFZPzdXyCMLMQyZPPfTiOidenCXt5G9SYrjHi+sMgry6Ah99tZuDSKNx1UYodweYmEqw5zC5oKwF57Zw0+hCCSwdJ6si8L6H8uyJJhHcZgZGg1eEHP+TeLLA+5RyO3p7x2a3eY/QehDIbzQUMyPk4kOMDgaJmGvnYmHs5Uf5Z79vKX033cYpDbCNz3CuzJF9dp+HslYritZ0ZcePvS6MOheUvRNKvxc5+VSVjXIFYR/DCg3wBDY4w6hsE2IP/Eum7bvJzvO/16j0uJ5xDpyde60susTcP9J4TmFgnuJ+vo4SoNgTrhpdJs2nFP0bI+2yXJjwYJvSADbQuttiC6Z1/zgDZWaII19Zk9KIIcfA+HkDAn52jUY7EMWeitV/D0yr+GupSAb+oI19u8790maiktfVhV56c99MMGFoVk6IENRQ1+E1RsC7xevBG3pEGQTXR++SlGzY8tQPH+fWrvucZ4Oly2mO4LOUEP31AvFeTr+jz5imYSSIohNNzZg41GHaah5af0sesQmq8f7kxV7U8jv+6Zc5PrZOinc4Gl5QL/8BkqnX80qL3dHV5AbGAlliT7Sd4OzGqzfrDvKxjtjA1AB1eBN5dwvVXfWTIzzP0NznJD8Xzj6aXqgmcuM97oNR/5mQA70x6a84gCP4l7SUIebt3PfJ7/gr5vqt+/74fjdXgeig4IJuvETv/vDToUBLm7ujJPFRYHj2DaWQLgOBnkpDew7WizGPlKD9QPil3l1JqFAHB6r93SD/qu3YKJNfaxyFOawtlHs+tWdnl6r96jidMTDOadU1fpOfyzPof1xwh3nPpVW36XpLPQYFKtuWzDN/wrptPR3q7/nmQsKdWw79Yh/eVV9bAUc2F+q0Lbx5YMRQxPLNL/urjti1/+EYzIwP1orzk/NVrL2fbfpU7/CKFcVgEhRKRCjg9DgwJT+dV4tPY1pnMf05jrMoFjbEoQvWj/1ptsah/3xYL+ZdtseB/0RYL+R80/P9XWyz6/9wW6/maelxAQ6yjqtfcHuMU/OSYY9DKqlr754k8/K+6Wi3r/G3zf7bFGr4D+JKi7rp/2ykLjlEM+s+ZYgzG/sMUU/9iiv/XGf7n3/1/md7wdbwLcSZTE/5e59ua4XH57/D/eXb/CzQd+x0yNvprOvbQxcBvnw8l8/prOsbkMM2RZW97Q+4Y2Olm9ovjwlbpiPrLVi/FcvzbINKdpIh5Y7qgku0vwZQvx1U+L4NptWEj5jfaZ8ANTRRVPtY30Ac1Ebyj4SwLmrkj8ZP3QapcoBE5eZNyVS8bgWGqemtEehW/pgE0oFhZEMBzMk03gRoUfg0JHBQ0HQDBKyu66ZcFjb14jDvKF8uAmwbYw2YNQNoParLdBc86rljLYWX9YWbUNaCNAfriQZRlRV6Oas/jtCIMBf3qI5J6m93AmcN8XTrwLGW5EaUbPd/LWC+mlFnG7i2DL9kvnbd1sWDZxMSf8aVl1UlP2Wit+XbDeZFwa7ktcd6n+TzAzTWaATkPcy7Quw22pssxpcj+jZg5ROs9OK1T9G/iROHFCL+RF1Fa+e5ifeCJYP4Fa4Dmi0r8AI8BIe37ruIBHg6CkSrGVeDReOcg92Cl4/BzMaH4zwFbCBu+murAKbG4eo5UjWDl+P6tYnS7fAF7NGC68Xz32B3EREK6jXeHbH5RZ/fXZWZOL2mgtWyUVARJLO/hSxXNW1FSxH5yZtLnHxf3MiXI/yFVl3mN/+/bWS+jVg8WvXH0VPqZL9yyYBtVdww/303J02k+jwn+xOD8mF0Io3D0I87xlkdHR5rGczSQSKGNSe8FIr2zwYxeHbQHonfRw+i4INyAPtwcZSjwsn/zqnGG5fNAOVyHICp78v8h7zu2ZFd27P5Ii94Mk0mb9CbpZvTee369GFn3td5St5ZG6olm91adyiQjAOy9AQQivfsB8vRYzdGE0/WU1mbAYNgF72nU3KrP+s23d0pFtY+mbaVY9fFC3v/r89jMEli6jlajJEaXHfG36+/LMOXRWkZsksyhh350/PK1udM96KXGCTyduNVPjGn+x14HosQNkqLpBsS4AznmRhS9jkQXRbM8X89azESZcF5p4SZDyyfR9zzh4iOH3nQzZut7bV71Py/5W1unZIPqd+6HMLnMSJKPGqASeR44recXd+2GPRouT5w3vR5QCMyDm+qETLZij74+G37/LORfn/fvVvPf87MykEaWl3GHJz9M+zDW9e/n9eK9LORvFAYjDMAVw9yIDRIoH2eFKIZYHWrsaufDXqZlP/bJ4RxLNVjuPT62D/4H4ZLuX9+UUBKpAYKsOAtIAx4pb73zyKw+Nwq387jFgOrHrmGimIVte63ulneu7e4sKp68mr+H6jELUVEaITLd2JMdP/oUCLYrJhwkN3Zl+U3q4xHNoXolYKfANF1SWkccd8ri2WW+ykFrF7EaMg9SRoFsplXFob2sJdPLB48av5JHjwBnsjEKkOdigIsDvxYco5KtAm0qy+eOeqJGRmFbvLR7xxCYlclckKhaB88JuR4udZq+BqZCXPA12ezAgQhc0+R2sJKR+ZtcN35HjP/HReWiLCCQYaZ6unhf5QdJ8QhIcDzpab7Kvh/83ipdQULvF6k8uYGOxM0I4PqbkYDUUDRyEsZJpzG/QEnldavGDXbwCCnWHZswBOCscPRoMxAJuKjbcOFJqoOsvz15vLfJaEDhvLsFOgFxaZ70aOPPf0wHkyRYSxZ8A8eINnMur18foX0aUv9XGracg0zA8CIyEbjc3bxFpZX9wzuw6wpiqpJdOOW2Gdd7vOliPtuOeKqOenTM+504U/xRlBnlD6C7NM9SLkYnNtOcAtAhV6vfsGrrhNknc4fK/STYZbEDdwL2fLD4G+NNKoMJb5F+7UtJdgH1uQSWBPR4vaJVclLGjMjgtwX0yIdX6+iZqGAyTArgUCbf+PJw9sgDeBRXLqCwYakufdu6v21sTZi4Biq2H7Dk3BB2RIJO+rsRReKW+3mYspJrcaltHD/EbzqKXHXpJ6rJ6hdz/q64bD0+Xa5N5IqK/0VLBxHMD7pvd5ifON3QI0roaXjURHBmp6RPWS4/uHtbE1IrqhF8/OH5M3D+bc6egHimUm1LvUoOwrv2WSI92/e77agnXhc97S0NeyV3TTCyT4tuBAZMGs6vXrhBd0evwk0Fc+Q9+IFe3CfkZtjKMXKuNUMMw+Y8UUtHjwPlk4btaIMJva86TM/GC+wuYS/XTcjRJJVcZY4c3VjrN/dYCiS3kfnHMYx+YWaUQoQdeJEL/HSEtrtkiGENevAc9tn1dLh6no3XioHOFAW8K5tO8qvREcvczkhNkZ1DJc0k28SsQ83BBq6qWEuc5NVcx7RMNZScC6QyVMh15Ofgxn5q6G/8NoDgepVX3ZIqYStTfo+4MoAytLeIw8D/w1TOFyN8TKkgXgJtH93vgFO+Z9auNYKT7FMe/s44QckbcV2dHvKUphF6FA2EFH8jCEYTDhLPqIY3/i44L7P4/MvAGvHhNon1EH/yj8wiFld36jR8zz2xkmAjpPMKUQPhkKk+nTbf3a9njbaK7MYVpqK2jPeqZvnS2iM9MekSBS7xs3MNF1/GSurR40xOMuH3SUwCejCc8kTnqG6dse2vAIrQDDdX3v85/wzjtxGvbJS8iAmr45AkSdDnDOoY3of+3T/Gf7E+pZMBFK9zF3jIvtxXicB+9Cnglw+P9NWg2nwFjqPMLrOplE18/AvEBDBWgm/pdOT6X4AR0vvk0mzN3hWIx3VEKJGl0OYHxLm0DN7f19r1AioZDWgFOxqMbbyNGJmAreLw+iLd+vAt6riwyE9omgYN5vE7X/ZpvitDDvw06aBMxMAROObw6XwYQe4axkDdeNJOJ+5IY/1uj2qawFr77dTjrvW7G/H2LRT+xvLvMpdN0UwzKvSF/Kh9yySSF56N7UJB0xn+t8ATc7mG7/gKeKoqNKrTt07LjIfpJ2fWPVRM7e03gMN6yRCBebu01OOoD8ssi4j8XecwuoLMpHnfKWBCM3iRZOHN70Tnfm8QY4xD3TLjxEoXrrvLspv/xiP6fErwsQLumwB/3wxER28SjY7GfJoUtvQ7Oq/1WPe/HZHnyy9AZqY3aaJ8vYXIuRFvve6VsCT89a6RwDXLwGfPX2A18gdcoNU4LD083+WBfb4TI5mg+3JQ9Ff+/UsGkrqurAYwh2XKoe36no8lGAE2LDbISAO44ZQ9HgFk5eQns2Yo87rHcEkhykiMFU0XIxIIakP3prt3iuYtRoy9/EcoWB++77XJp/xTwR5cL5LON80o9qKGdERDg0El5MgvmUljJJ6QePOWoDKM8s6lRk81Pc+4R72/DiVjr3GyX8I9vaiqDCXmsB6/l2L4n24lvhpQLRuVOdOdHWgSXbo+ZBB6COg7GZ2bqCfspUBqT5ddjwIDFpo4KvDZpznxhmO4AXUBO03D/gFldQoBQt6sXiOD5aHj4GKWKkvEJec3IeQiJFyz/kPdbxk5hG/IyLOzrxHXLe0tlCyy7XMJ3yM5Vy5UCCcbtGZRsLWHaPYUCTX/bBh4hhqxZtqYfnexHAWsuIcFvYrio4lEslhDQbB2EtUiJgryw9CBZY07m2U8ApfQ7xqY4mUTL+U4O+KjEQJtzawUslnQurm6cQGi6TKkjcN8ARZeoOchanpOXAgoAfysdSFUCNiLRSQCfUZxGpUohkYjVMQ+gRo+DhuHvJ+wcykYvicjjKEWATSMSwn83R47R2Dsa1/cl8V/wAnuMLxpO3PRY23XL5/CXlhFiboaM3vMHThNy89lX1Caq1licMJpM9KUg3yT9cU8Ehg8JsjjJ3O+FSbw5bQC+J4b7v4YPFkkM/qV68G46JTdN8S7ELF4DYQT3ZCGEb/hMwiMasx9Gjco1DP0q7rfXweA+W96tEoWXGGlGRsw6Rq69I/3jtemL7yaNTX9ZYNi4RPreA22fLDwijVmR+DU833h2ItgPjs/F40dQuSXTEToC2kKVLuzyOh2wJvI1z+rLDEi0flOlLUldQf6HZ7HR7yHR6DyitQkg5yNq+/N21+/4B2X6nfE/NH6gex/MXTqz7QHXySz9zzam+qp96XiMhaP6yc2oNmAjLY6YGjlqN/qW4V96XtLwIF6/GYV/rLHeiL6mcoMB162ss7lyQJ+N1Hk7Q3+O9x/t2wCVy33cZNZ3MyyFaL17bvQiTZu25hjuvXlEfe+CIz6bCIV+llaJhOMIYE/3fb5NSViQ8rOCFbjAhllJX6fFSdzC0fTZfwFJy54qsDen9q+9u/7qJ+46Fzlm9jml1P0KAq0HRGvtLDsCqXIv7HHDJS6M3BsxY7ElUtUqh1ly+76bDY/FpYu32rQILTxAPjTyKN6B+JHkFWi3SXH+FKN1sLq3vcaXMWAkbnlo6XbE77ybBjyL6IpYNnpoKg0yfv2uJnccQyfQpSjLCMGw3YyaCgdGGl1pp1+32a9CKYtt1lq12b8sLKv2v0FHkGaJcMEX/67EvU1BURl57mxDgPFOOr8fQISNNhPUNwyadc0CQh9Aj17O5w8UjF++ReEihK0e9swHGuzEpQ2HBkC0ouk5E56uN/SrxLfbfy8f1a1Oy+H4owu+WgfncftDmXqnuKIr0hbAaGGdeiOlUYHb1BCYhNh3LOYmEWy3X4TF/FjRgQCk7cEd/s6FGoB83m8lsxNWtwygAeRpH73V4PKO6O8nydr2RP+pvlbWxcPbw1EmUeeUsIrEZ3si2nKlQJMxlZA90p6WdBJzYylx0ge1c1IQqvFCjNjtOGKuaoAH2/9+IyY8oAACGjGAXAAA+2EEiig2Xp0KuqZI9GBrG8PNogB4fKXoyO1ppOlRuQQkvIIKqhI8HB9LOdF4rA3WooO30pKVbXz1oGER75xcTipCEhhvY4grAylec3Pzkjch+8PfsEcun/Yz3nG+UlpVZsoo3u+8i2jXpX+CgjdPvYjyE/5nvb+QQFOnfaw2AoQL+vGiKrjlOcnoGRL2B0M7qnGlhYfyVgLxyp33NcQ8DrNSfcPYHzIapaCj/GmLxAEc/6NN3EGR6wetWE33g5gxdcc30aIskYTV5DooesHOracHfInVkZQ8zv/7m4Z9NaJXCmRILc/KJF9tAkQA9b75QFtgFzZPa+xgMIivsvbXPc1QxFm4MDF4M5c8G22RspUCujZL0J/k+ChqMl6UfWpESxdzYi1gGYfl4aJHjFo7uTR5tUmpUhMt5S8cFe9pTQQom2dFykU0EwlyuJaG+cYW6LoocWNiRM/9/OqszEKmil2P8exvEVdbcHbM4wQl+KSle1qT31uMfk4xdMVJK9Kc+F6p1NpwDiDqNjFDw31C6L0PjEcjmb8Ha4EGofl0Irb2X9QgaYJdF7Wy7dF8lT2PtVkAaekwgKFBSZ07hxTjF3kFDDi5pfnsL6xs94YQjNkBJ9WIIIdPaUMQtvAJ9s3Rp6l5nhWjvYG50QeFumlTJs+sgVecawDh9cPIacMefm0U0AqtPdjAkF+He7mOklm2KbGK5Luw8rzf1tbe+lJbDkltLi2R+lkfH1+lWAQMcMu1gpHuG+CvQbNAt3SH12DG6T6DCNvsHUcIFluKnKLJMHtKbwH3kJf5symOnYdok0RTdznz7qGOd01qm4ZOUG48KapzcMMxV3BthrFfKVEDwQ0vYjdqL0rRBNBUhdqzBwHonilgBc20W/WEPjPmSQ03OiL0clP0+wrQs2b3fkGVDwmkBHF1zC7hhBJbi0QMdItFbnpmeZ3KeL1QTaCj2hR9poN1YSTU4oWlgdMnka3I+mu7FG19JdpZM3lEj+Q3WwkTSObxQHHWS1Y3W+KnH1C9HbugN6O17vkQV7tW5kbvEgSrk/ciXCw5WIO+7ju86vhkTwNDcK9VduxkwDrdmNk76kt1OtPYH68yYByobevEj0f4TlqISPAwt1lu6wdUAcwh099em5Ig902CjT7vO/sSrezhrVAXhX+jNPd7Rt3WTnvaggmOlUDdoe7TjtDC/143tcQDDxhXPIU8+NjXZroD98Sx92PlrEfbq/ozm2auG5itS+7gsuNcHcckmw0GHAAkQ0GxykHYS/K7bVcdEw+ULuQZ2SOrNgQkp2/jY07bD7MA1GCJWZhHZ4MknjcILg4vq2vRFkmYSzcKZUZKfdoYRwTv4ojTzZ5t06n51a9p+xuQ1phd5ho0u0LdB9wDYl1bsSc6bAMHBzYFwtnofJmqxIekRGHa7Uy4rJS34265KAJhpk8tGX5gGZcDtZc+lGC3+Po9ggv49GkvhoAO3MJuf3Xrf6RfMROJjuFTh2LLQ9WzTi7bsIwJxg/rh6ASLXuGbmIM21WnIEG2g40Ylqfv6p74xF9qthwGm8x/2ir1r8664sOqTFESLtUv5taCXOuIdVnWZLU6KqzlXayjjS2pLpyZZ08+HNCkTv/ZPMY5PHrDa98gFQInPsywK+7FBxcQpFhnYwZag+zbh5GGH+IUoUlTYWmrBfIhZjqqMFS323mnmYfMg4iDXw5a8nDV9DRewFFic7U5Zr6kJ7h5eSH+r2mw199wbrGZp8b931/d8td5FM3DIHLL0vzBOe8S8WZh/zzuDO00enEM5Euf3+H6hWp236tTgFEuDQBwSlbNcoY3uAsO8MofHeRnS7Tdt3LEv7pwBj2PgbNiZiPgJ4t5M2ZPIvOTsPGBAli8aUb82dZQbkRxq6v0Wgfd6WwllYFMQ+n4cZ5jF7f3NuUzodzdfaDtaD2tXbCm3hHwuCQ2fezQ0BNvMfR/Qg4soW/C1Nkq747Pkr16UHgwM8Ka9uGhwdunWwMlbp4Kjm6tEmAdtSOUp9YzdTgmMsGCf4M7anXI2gq/g7wfS6+Cfc30AZLMfW2RX5AznUGP4BEGWAenc+TltpDVm/99okZNOKqHpNzxaShu3+RnsAgtY2kUu4WbgSCH9gNiBDJ6dEjADs7MDqJseEzoWXRFJsWMKiSj7+QkfVEt1yRFL0P/IcMuyHdikedB/jqiiTTcVQ7Y/KoHsGTXHlEAWzOCACWWbi88yJhwG3lqdJprlgTozUvvNbzjETpxRfdFzEnPXzAikq+ZVUHjiMxEMKKiLMJExeJQrxo3umBR8ZuIcUVM8aLg/JbY7U+HIKiV9LwMVnlTKWxMEl/GxG7RUenJ6rfkn35Nj6t/k5awfGXLt2A/TwEC12f3aGQDLunuwo/jR/2EjGG7pX9ppz176ZQX+fksw5rFU0HzLwyXvnYjeinJK3sU4SOz8rWZs9FJyjRpI69sQCrK66jYQWXj+gQV1dzy2uMnI6NP6u1qHvv1XKFZgVRu0X3JCBkLudwuptaUn78rZbK41YxXlWUCQI57s+9Qbp/5hGsBwxP3CyQvktuuSgVaGKP+p44pnSAxiATNZIUemg6FpXN4QHGZC3vjdrzwico6oBm4OXze9U3PEreVq6MyTWPA3Z3FH+ZdPa1H+aZbY7VATUpO4TNBtx5NMkGKNFUhEx60k+o9SP5JnH0eu3DUge8O0WLoDLG6QI0BR7nDRXeP94ZQ0q4tCknuzydEmg4yTB9MYY3Oa+pN/CXakXemRTxjMQu2hipfRLzLHxk44XQSMMnYVbJXwXoNKZScGwjpxUmQsmfUr2xsX1bVutXfwjxfn6Lv7kfVvbGr2D5TPkGqqDST/dVPuEtL0pvNfhXHgn1rbfw1EMDU2l5Kg4+SwE8m/A3Y/ml/HELn0bWa/yUxDRnMDMKY6rWK9iO1YlQY029md5k8m3NNHGwEJG6oWM/nmdIeTsiwNbi0HBYarnGTdPvORtwLPsqVTOhLSroQnXP9ip0qLsXtk9pQ95zluUR6k7rFECkdX5Be9Ou+ANawCFic66ucoLvg6NjA6zdH6EjrlJ7LHlwY0u237fGxX5SJ5b6KMGsX4/aq0rHtyJl2yN4VdfXThy0DKoNGZkj28H5r6uI4yrgi3MVPluG+rlLkQRW1347ztJG9g+BBktMZI0RPPuPSn7hsN4J2lL4WiBVTcM9dKsIkOm8lgHQcfzDErIjsijNO9+IF0cCZtJVxAnFP0nSckqMKoCbb0JNn/rvMuxSAoo20tJXq7G0eSUzJqBXD2qfeguhUeQ19SWc9ZBSNRJeZUhegUfkcmVVQ5t1KF1A3zBKH1Xf9CKirW9WWKGmJFbQmyWjUkprCPxGiz8QklDBAqUd5nOyPv32P8uMk6Az6vqIE2x+sVNOwfPMkNG0nR+/mg0TygVkjb+31aK4QO2PK6jS3tP67ICEYnWFue4mmjrX2lrgnOZDpmYG5bsUtvvyYqt949IZrfNXCuQE00QF40GXta4y+hUl2zf9Wq8XRWYs+XWLUhx2ZSGjfHd5hbEpx0uDg9ZbojfSkDgi6IZgcdv0AcgsamJVadtkwnj2o0pkFL51bXWQTiVSXFqTUF+PxtYvFhzNY5jGluQutl+zb7UJXdDe/bYVRCyM6frdYVaTvwriBModAvomLnx5t5bqvow4B4sylscXNroiJhSZSzpjlzMeJbrdGbl65NXcgnmft2gtzE0PRc+FEJSH8tEJd/d7C9Ox7iD+N/XrcD0W5wsRrHPZacH/0gGs9vzS9gJLTNraLWong38Xkj9s4TL11RBdIneRzVftuG3jbuFYH/cZHLWzl/mwaNIJqNOeLAjtnZVgMxO2r0dB/quiAcmTRhi9DcfCHJ/jo06h8e9unfpCkAq8Mc+mRG1buaTtwa1E4DwE5ejitAjjLcfVp+0mYj+ss+O2dxWRSnZ3fQPtYdtGI+6gutMnfNC/xhsiSjfWFrgK3+qLcLhgE67uuKL3foqT2Jnhi28DchRZHi6pgVzr461BZXgOYUEQ+2axUTqksZLVa7uIwiL0F7FdvDAtdk94NYhi81sKOz95wynWx9+T3o10J90xavCUv2Eth2l2pjcohoz69QiZh3MBLrQ7oTGTuJPbBFnvPUjdRBctFRshm0nIOntUnFmxy49fUUGGhykdhUTdM+fWX3BE4gQBOxfJAI3z/vge/xlNrNDdLUAo20i6yDFzwgtiZC3zWI7wjrlPxF6XCMSF8WEiaytsSwEikDTxen5rpvBWwDI/7jL8PMlPCvp8n5vshrmrCfbhxN1w7pEbR02qgIpPve8bujYvcd36BBaz2tlOyHmiT6LyM+cKC9XDhPh9K28hBqVRTqQnSWcJQtjEan7UAMAJL7WwBA0VUAEAIegiv/z3Ilu6u0ZS2TRtt8g0lT3bT9CgH8/KQ1lTfgnNSVX59T3Tl7byJ2pwn9Z0J1v2qOJOOwrULjHGfEhvWUn4PH1OwKRcWbvpFqQEw14h4AHhKJ0U8OHmd6lcIx3KO5K97pgp9L+4fmd29aH3UirPCQP9BI9Nt743p79xZq4ewoB86fewgpkwVxRu30NMs3Ajf4e35Tl0qcFXBgokmnhvw8OHlX2q/DuTUZLZWZvZcIfhK7serdGOAE1ici0I3rcTw/wGY/+BowwOs0x55+/7bCDFnvZYAjToocxG5joaAaMcln7Xm/4iV69yOogOsVU2NhQn1jfMkNcRctq0eJ9X5GesmWLRWAdQo8ciisZXTCwlLNEP0QPrMgPNKf2yOaIoQGMMp5Ganezm3eiR9Lmh6DL8gdStxqFQCvQXB0zhk6RjVIdUBGoYa1nPk39OyyrXdE0lai6vKQpzXlXUi3vlZ5GiKyuZ5Efz8y2/Pu5YoG38IE3YVvgcVV1dI4YO1r/fdHFvPWM0lzh/gGu+EOqXX+7gokVFpIi5a6U9MQhayiKimkzsvoaCd9zQ6zJ/aYoGL2WaHsjUA6xTHSsa1yUPcbcpl+UvhZoob2O2RpTI1WZUw4h/6HVtGru7XHfdu/T6PU26qn1SdJdXSUfMJv8mG2CFW1YOwpU/wy1Nyo+z/prSXxM+ROLRmgYbmEvLq4hgTHc94GScO8BU6Bs7QZH4MQzWZ0Uo8xZ/1b03pSoC3ZrWEbQX25LlBWmrhXurD0zjKmXCp2xGmRRoV5/Xjnr7nDupTe5RPTwBCncYv4idIsz1wdyAS/13JMYnjwtfvQi78nducFgNJJdAgn7Oz619cTpJ2fyn0gxE2+b95EfgPK/uFQRSNCmfOnrBcVjpWRYl/skd/cPwGKr3hgXpEn+SlDGfXtaKhx8I4RDiw0Nyc+UfGpSXurPCCdnllhg+QSuK3ubkwxDx7apESfudc/vC4q9fGcZSD3u2mLqE6y0ythUrQwfiW3DE7iOBZJy/qShrGlqB8Ov2joEiHDHOP6HWaduCfNbhysFxK95geGqHexBeAoeaCUMiZNF2SWGgQEI5wzUiabp8Hd9c0J1k/gRsq4Aqpn2J3heJVqZCjMcH2yUotPB+ZFV6pxmSAAI4v/b8PFRxC5VVp+Lhy9MhNR+MSTAnb7PV0E+v6gjBoYoOiskvOr3Pb5/Kh1xTuMN+xinpOg7bHt9QVbmVgvqa3l0wfO6PjCBc/OvamTKgis9GSH+Xg/HOlELQCneZIAxYAN1sRdtg5WqinTXXK+Rfslyur4W4EJAVWSX3M9gb8alnRVdFJTYVQBNtYW2JNoNueFFQW6x6+35iSbrf/ooh4ZkqNlg9H90JTDw+WRi8DLuF3FgnFEMlkgjROz3Ts9xvtYcXBLTRU+E7tN28Zu/IKs2T+qo9ojh+4bGbk4Zkb5Joz2Io/IKKvxp6aK3nLdYQ8YRKAPMSvcnZEEdV+phwvcHKjIAsNHdmjvlE4ePFc+IRTlQZT3M7DDoJvY0wO+/sEEpAh53m/at8ODsmSPz2IUw7jMjhLSdvPznKbNElJDfzS6ziswKlXSF8OBzwP+GK229HwoTwIS/PLuuHx2lvM6h4L0wy0slTCTcOe5tr/yV1rgFRj+OiAETOifRC/0HR9Gxtb7XdM2doZ/3mhYFW5qrY6LRAa5jixecB+bQIr3WF+AiMPgFBw2VzHF/t63b4LCKgwRM9D9pK2GX0hw50O38xCRpH6Vlrqi9PVGD3l+rYWvYqHw/I6e6JNNhptHm254mBI9ts0ITvuwkOIPR3lyok1yOZcO+B25B1y7T3N7C1a70lP6PM2sNxUs3jUJvvh+flrcq9SKGLgvnAUuU9DXTx2VaJx0HV84j3A8yBHIL3Cxqr8o3YwI3PNVvgtPdyemmurD6NW1RKrxJy+kbkPFvAZvdDgBr2ihFj4qGRwAFBqtxTDiy8ypMQi5DTsrtltGDIF6JH5u/YWGVfagOLXBExUrzyx6AMTeHnqc5Bl1Sgy1gyhi1upGMfvmKbPlULKWWvmibQUiOKD9JbfWdAWCZEYwiJ26PVnl/gGjVw8oYDaNStBtOuoLZh3WSK8w0useGD4J76nlbadQUvSNi6kqC4GWSDdilN/CZ+V91/pfALM+CGUUZNOJLq+NiP8OF0FN91ASZHuAfw0yC+366fQpJA6CT3h5VEuitbz++6NRmzEN+FTj8a6PgxS1dFLAnrkB7dOyR2HmVzYKQP/fGPy0loX1gvSLSWVR8tmRmVYoeq1OWGL1vjt6M8kUetl9frtR/213ooTg9z4pt6gmR9jZ6OglnZLIexoQcL+6RjkwjHyVwjFIXn35EGuzz+TOd3M0xkQCx3JnD8OwOaaLV59RMJUsRQm71ZbXiPhPWZlr0RPEHcvx8DZd/7zCsHlDmv0J7fXNbR9dy6l8OB3BgvB7iJqHkKO0TBw6kVniHxbW6kMLpOTl/7e6whngxSBAOdrVFifLb9tkksKvIehOv2VbIhlF4Lm8oj+pvo3/pz0aZkymlSb4WYxmz7b7n40FjZJ+xQMo8RyCu1isPrIkq6NE6z+dk/9ODOPiCPAbwEN5A0UW3Ssj739Xl0ZJUgQunE6fJqweUvciXChk4+UFWR8B1mbtJB9W9OgxwOon/Sf13v8Xkb884U6hrDrMTeA4y8oLp4VLoQwhzdz6EhHKqE4XYu9LTDpBgywljvVRBPi9Z73ddBL9bFlp8oAdL/57HaVJa4aksjXXtr6YGdD/VJ8wN6j1qves3hER5IqsRphMQHsVxBwCJYNBXXYB8hjLCcKS2T6BN9ZaQdPWQQ2a730qsNbxP2L/OiDKOZibb/LI98vobJrUFvmH+tECzN6+dOXXKDPOnVp9WDxSABVu1Rq1ft5Lp5lR6w1lDW1zimWrJIzdeFBhis6O5ES6l6c88yaUjtZHkTaWzflq7o33Hy4dcPkW/e5fTkQE0kJd90qg4GsbxlyiiKh1CjrCvlqvFNak8D3WPEs51dV001hTV+qqeI8Wl+eCdUYWTkHRXdkUd1hdCebbjXZ8S8N7GDmMdiu6x82Eu65v2n9b7KqK+m2X8hdgCYYU24K6ARw619H4bEbEfeWY0tEdBhJOTmY4jphUVobCHDkswr/8X7tdPsido2soQq/aV5bj+EKJ0HSCvmfub9rpZl6LcQmHvc2u7eaIH3OshAIGPpZ6IN4kPhncWf2gI4LUvddJfW2ceE9FDQzXukUQ9y0bw4GttDrnDSvP2B5ZRZh/h4X6Hr48EwJCxKdZdMAOLQEMFm2S9BfmglYizlE9H798UKIWkzSFsc+uyQ+swgaKjXad2mR3hX12de0zbxGw3x/PjRH+BhPLnNhfYN37HVGrmj+8itsL9nFr7mp0Evh/JunPZp1mtnefcwdIpUCM2VtdKxNx+P2diN8+LvydnBOCxiEIT+Omm+rPJdPIuk5iQV9BB1yUKVBhAmSesa/ckjKxBy0/NuSIUkVqrJ868UB32GFkIYFcQE6gaMXRf5nOA/grTQHLuQeOvjybW9Dz/H+2CNHFeG29hNbmLJOAq3f7047w1KIrHwwSALhhtKLqIZ2ucYuVGtdSJEYa6k93dCcfxKlZoiGDdv/PKIvWnPNm8nYfzOpjhPcd+fSYUeJvSrBnCVdD0frx/RsdMNOWTJxkCNICJUAww2YgzoL8Pl9+vnS1bcOst3Pl0e+8hlWCdd1qkjvn/Ua9g5bOnrzH3PSZuMmXqUiAph+UQiW6lmjLkj2mnqqxY+0fWJnM/PIQOxfnX4cG8koOAVRhcILL1DpVlbKM15yOVS9TqEWNciG2/VE57frxOjf32hI2AZHz44y29ZD82GDAmhV6mnxqPxUKeK9p2kCNO7YeGa8Ch8VuPHIemcI3Z50Gj3CIXYet2GqqsQ56mwY4eWPxQ4/8mOMN1SzPOrL+4g8/pm1iHibqQ3zAaKPR6OPvJI3mgzbWfrxTCWw2wsjVbt/0v7F52bTPIgPpZgmHFf+NKjPPE+zGX8tlRZBi3fpwjR+7SUgPTqsPvjBdWKDrq8j/A9YnP0hWH6V00hPOTFXjnhkCitytIZBf2lu/BU1ROHG9IvCwdIqIyh1ihdR6P5MZYrEbiqj+kyOdqcuBJfL2b2Xs0jSlklnZu8QpGl/NLFGacG/hAYh9kTlWXvMxqDWTtIBtEIVsDCuUeiU8GmEU1K6Wg9xmX6KyuFubh2ZCXCfUzqRTeWKuYeFBcikG+mXWOceSjPoCt10M55mQeMzx4WpBbPy4RkzzJr0WG2DCnJVE5Cjsb7uvKIyPhpo1IcpCVYC/jWmfJGuhQgDeHpQ8Tw2ddPLFBdcjN2GowXZayuZJDkSqkYQ33TiohKyHh8F015Jv6o+QKIXymlQL+a62+MJkwHKKNCYr9ZorGvvslUGdobOhs1425+SNP4Es1qm+/1INOrI17kjKHuWTfBm3mXMLvsX9eX2Lx6Uam3T/yOApG4zIn0ekfbV5HarI4r8Mx8VX4WodAjjynd4hFCkQWivn2dr9MTPvayRJuVaLcrlpKgK9yDMtF+ovvDBnilnGeCNg6J3YLeHQk+hUkMjrCMB1loUecfPVUh9UicrepxxdCQicoIoJ7ZvsykV5WC6ak5M2h50b7HJMYMZDbyoGQpFDmTkZdx/MpfCu9O/fNZ/Ce+leNVIS9gSy6QL/11mKN/Sko34TtsAKk9mYEOq88fHVfIgdYv49JH1PAnQ24qf4nHHLScgTKk1WpbkQrwKvCpKIBs+P1pe41ciw3PxpFrJypTalzMVjyPhXREMIkx02CDQnXTWaKvz2nCymjAxjJ3ybeE9V8TmrE3LRw2sgCix0alLGQKyxevlLnXJilc7oAS5v0yyAe45tCxXQVREPeVmtUEf9EW+X5A0uKEimHRM4XZsfbDacgDJjL2qsiKiuESb8p9ynBdXuU4i74kmp3GCi4W4wGL62/4Cgpv3P6IGp9whMxFidcD39+btnoIkDycdMQI0z49jOnVuK/WwaFu+7Vfol/O3GkEe70gTxh+tf8vuW8N4XXkputJr22M61os8nYKcndBAl1hu7655lc2mYVGsptoj1dCXy6cK5nphft62NbCBNub/NL5A3HXGg8B+dLL67uzrG3m0yGVHLsfaKRdNqPFsrBYYVqzqLUPZErP6Q4QJl3tgIzNoWeQCrQMompY5O02SUPaQ9LJer26a6knWFuLbvIVF0mNk5b3fTlZwfj4Hn52giQsjPg+xuqxfhZ3I9qI3qcQX+ZvJNZ1T46DdHSW12kF6c7C7/XnSOHKdjqQavoyBUDFdC/zVIS9+jNCPiSJeGAmMvqODeMEIypq0QjM9Lt7oa0kuEM8j+KE56NqSyKQxihsB43aQM0cQArV4b1glQrGo3Vv6igZmi2l/YboMBFLBNuBtRem+ifHaEZPss536T7ue0+ESWL9YVmsWLv8jlKQgeTuT1r/eiCXdXHgfE/93jtnDnQR+BwenbaRfGSX8a6x+UgIl7v4vZe/hNcKV2lAc4SZQUNofezWcnHWk2H9cvbjXRQcox8WAvrEwch4mJgNZQkeFSXrCv0FUkEFhgLGXctTF3Qa9WUdPYegGns77t0NOo4ZMgeyT4mlrZ7md11464P2eujS15yQl8kxv95pkDKA4ph1mu5t67kYsQ59iBSxSMCXT/SQ+fzdDob2UF+dcjFbHQawd02UEfEkSbAkfMXUld+O76ODbBZGJSa6dAofzykEcEZkO3RAGFKQQFXcT9+OcVbp6JKAMlBagXyj83oP9Za8zBG6ve0sCy5Lrw9Kb9n38qcMfmsfIVV0mls6kMtlOoJ5E/mNnH6QETIYLcIUDqIcIjmCMGC+aMSoiz6lOT6mYInvMougPfcx+0XhqBmXfD+s24sF7YvuyVmbuNztZ7tH8nULCImnI6Cq3dRln3KIiWQp/PR5E6aoKZBVJTTKdCJy2TGfkWi4qeESdTwQ0GDPEWmmqtoHB4S2qBQiiD4FfXJipuil9RtULXPiyO0vM+RVmR3LXtlbl3Go5V19p7hC+mVlbUh+64fBGL+bO7DY7ITBJjAw1o9B7Z37pHtm/dZTjvwLEjpdU1apihpb63m7S2dBzxdtUp3TOL/U8ybvGIFj+ozaix42pP/kX5rr03Yl0Te5mDv1OiSSDwQaUegyATvyKVQnJHKvpl9nTlKvjwE562Tab3e4Pqpcv2HPA9y22D1XY8DJBWDzS6iPwbqoZdfT5CjVZOggD0a+i3qxDgfxDErQMSRuX36KfFGd7VcKQAiJmY4XU1kpVs1Rp6xqV9mHUFCb6MI4EapRqy+qbV7Y3xnGUS0XPn82AcKI8TJmfp5svIt6anw4LVQjo0Qsx0+UxfvGmxHzvB1TmAGX9r+B8LsS1nhOq0gKCK0SYX045geyx36c8/lEq7y61SUageML0d+JT9asVWYjf+P6AOYRkJuJi6bLm/bQAbBeOnCXAPGzR5ybwlBw6XrXdvj5KAjblTj3Np9PMgLsNW9nfuqf2zJklSIzA/pmXpgDfQwYw2eeGOm72blp/l8GQ/ynEQX/xSCD/+PUAvR/GwwB/1eDIUjsP48tQNH/gVL/jyYXIP9fTC7I/n1yAduFe5b858kFxyOy+9xRL7Z7A85bCUwVbFJdDUMlDPIRrhU+As5HDF3x9S9glCYncDYjq+XwthM/hZE0V8mtn3SsuqDv9yEzS/EuGk6VLzIWrDNI21JE822KSYVZuuuQCFapVTKh8hz4yfzVU9+FAWhOXxpFe6WFcxCzSQLO3ecHMHmTIFXdkiS9ADpUcQwnveWjPN8YRrpzjdMH3cnWkuetkKGN4KxA52Q5tJhdP1FOy+BY8BonQ0TnCEfom173QroV5PB0sd8v1KTfmvaW3lwgSCZnFtL8eiLjnWNLo1DYQftMe8SBejDN/hexbSj0YtAQ+cvKsOGxLELWTPks4v09qjCpoHsHeFpt/7W8vspnaf95brUoEgMfKQixAbomLcUp9pJ1ixDE7YTnTpgLBpiLwoCTzG9lq2NibracTlzc6rpscXQZxDP2iZX3ble6Vx0lx/zz/Ac3VIlemzv4J9aIWta9tnVUvhMRCACl1ogsJlnF+g57FznLSH2F/RsCrQIiA1EI/VcPwXEAZjkaxyXTVZLofAT2wQw6vKWSF/kKyFCDjHREo8389syvXj1LFxSS9TKrVws9YIdSNKiDJ+GDMQ+b+CZi8OtXP9yToAHnaJ5QaSabgKJ8yRiGHj6hGNSIeqnTjWch6dj+KP7cuVCxc50CJ6HawHD7RcIrQ/QcxsSYxozr3753S7RXu/u1gnuhfoFsUsR8PmQKGsaqr48R+af9VS+c3+zUbQ5PK72O2ESzovrd3EdNJkGeuB5z5X9YQw/QWJpZfNu/fUCsARvjF2ii6SUGHEi6YhsSqKPtd3y99Wkd1hLmrT3da6/XWiRb+FN4NsfkpOcxi9BlX/wuFavaL6Kc1ly2Iw8VBowCM+sYbMKlDSgv4xwttOuLJdb+WPUYb5sr1xJ3HcpQWqDfZwXly3q/qoUxpVh2Z/Q3C87Byay82d3wnQII8VCEcYy4fiX3FNfvEDptur2WbfHKFzDiuQDG1gjWvz3lP5/MMf/NP4sRruBq35ad5eDxKUHO+feGbGIi0k4L+GerDHEAXQNYDJFUgFXQ7/A8I2m7r78eSZo0GCr1soYZNn7n4m8iSJdGnvkvOxFM4bX/CtvVpZdxhZXNyhplyvu5F2UCXucvnFb3Cmmx7XduZxGFN2iJ5IcW0Rux/HvesuGL+acbxHunakDH4xB10P13R2aK3hSkgZzE/msLAYnvwF8ZWGKCCJsaCNe+6oUyv3N9hEoNEAon4+tSpEh7Y+0xMl+retULX8hnfm1mD580pe4wy9cS6d+PAYDSJkqnpBDOubkhQay8+0q6M3A20ywQTlengggvrcnHWTIDuHWexQBpZvM3CrR5keh+FVrutiqFDtifB5Vfbnn49A1GGg4L5whkKYV/ubjnGXENt/peJ/zIWprNECFAkydXPvHz4RvXwuYqZdGt71jM3Ve7wO9RbpxMyVLXhj6MzXXn7r2ASpOh0rPFPIES5Fca7ndH52vpVWv7oi8iD0IFInJknidMS+FP+VKs4nm6fvFenjs0qIZcxGFECVi8DeLyazYa9DgY7zcEmXwCHIi+ONvYYV5jvaDudH0oe30CfyU9CMWM8X2S5wu8G7TD9/h2dNcSHmmeD3kfhgqbAaltSIVN2dSgNUkdVK/sETEfWWv69W9JsrcvteunX3lNJV4XXRVR8Mi2xDJeMj1cmtUa+7KxjnzYwPZIluPqKatDbFJzl50BMQxpT32Iv2HdpA5AMA0ZTIxEYr0QEJCYR+71MSP1r7BJxX0VEd27enrQiM4OJ2qbUF0CpTURX5Npwg866SeRHZSC1thzwuXdEryGZj5hMqAFKdNyGCvwBG+f9CqKzPjcn3gYN9JiV7dopMchEVWVAtHj86bfdeq8m1hEq9BMu+Ee0dPN+DxOjUQMM5KRSdPQvwYUz5IPK+52z8mKO3EKH5bJ0l/go06X35rSHfaN5mpj3BFu3eoU+LcFWtl7U/hoBjA19/tEt2xXcbLwepZYrr3IPohBPLEplPPRvLnMl2AKCkhEOjT89frEnB6lsRvTpe4Q1Bkau5qKL2IZ7bUtTooW4HBvQecjEtNbIkaXmz9R2wjRm84g6ndNa+6soMO165yFRQ09XrA77ZHS7xz/QpfnDSuLByolR1cctIXCWkP/Mvqn5N3CYffrUeqPDcW0hlEDvXAhvGmaNJxYA6eqauqxSo4BNmXXb4Tur10VtmHktxIT7p2Qsdsf2GFHKhVh7XeZnfa60yXEjBWthgSOjhLKAi5AseczBA/WQ4zur0fWTMyk/Py1X76vrPDGr+jm2muMnbYOHmEaWRDF/Y4FZ3TcC6xdJNM24yhY9Z8e8UCmwiI6+qicHMeaCqQ4WBQG1yHz8IjBARLeKeqbDmMzgdNnCGiKdrI4/uhqPn3nuO+iWV8gRk71Ak0B4+oifMg78MG6BwvKg9DWsoKw2RMIbJWWMI3Gh6ZX0XbkLb1v0J/zxcmmikIWh4MPQPK0iN/qiwXonDS0SNSpSjgMvgg+8WrOSc/l1XtcJr3cPw/Uv1wlgFMQzCGpk+baG2VxlnBzacYKOCwIVzuASI8OXhPD73wkpDSJBL59bLVFKPcY0tRLOxCdeb8t2olCBT2pbJgY39Un6Yw28UHf04WL3dTrq+XtD9CC/pJne8p1o+UPbquAedGF+UY+nw0xPKEuplKMvTpJs+7e60rsWy7ODP++AwTO6VYABRkYJwxi3dqxpn8orfSl3Z0nfEMuCO0QvS/3JP6mUPhubGvr1G0lt82ze6d7BAH3Orj24cDVAHX53LQgygIkp7G5+R2k0eAoJUjBHzPpz/74bADbo9aj7VZ1mVQDod3ediclo6EfRlCAftZeChZHcn1iY9vMSu/U5YOztpFvAQZartD5yuuCSSsb3CIslB9KkMb3h+hRIYvxtAHMCOa/HctaoIiQNvAILHuxp2zzgSc8MLndKB3/bj+6Y3mmdWW9862LodXRIvjbS/QXhhGcbAUHJq1xd7w0Key+NrLqkCBGGQmw4yj1CASjBhmKZjwQ7NpVVkUGl677HjvF+cGr7eJF6UVkNTdxItCLm4EqP9X+DXUCoSdelzhdkz5lZLz1B3Q1Jjy4oEKRt/+myC12Z/7XQexdWlVXrPwNqaYlmMP3UnT5rPWDh/IZjcYqs3X6O9yizbXS9RIZEvr78drqx2itBf8qaEyzIl62pKV2ih1GWvygdycXoze6dTdfkHzCse7XuvwZgz2d5VWDnpga3neN43/mXo8+XXrAp3IcFBuSMVSX1FUrTRPDwyX0qRnIl6Hq4mJroaw970Q8ivN8q+ADJMxU3rsteO9u+w42/v7+PpWZLghr9K0yRZb43q7pAveDCgSe0DPJNxAkWXIJVashUOWePlzZ0/TZ0HD2LD+ssQqAw47EDUA3YS1aPjUiuSaneEIOC15Sf7SclkV3TEgwgkQIriq4N/jcPj/WfC8zpOof1qis5ZvC+7glRqQ+/5xvLOHdct9Xkk5lZ88rv/S6s09HzjE71BUtqBk+lqUBGqKDOss5MR8VS//UisB5qv369PvRpNPVeNhsQVa05c8O5uYTNA2TD14LDTLzlrXH1HHfQrDtsqUfhfCp53R2hHVLP+BAk0MSGoq+SnFmrIug66YG8yV4D5l6TaUM6zK2B43AtFoaOztatdBzCDiC0mAvFVnub/fkFo787OGrTYnIqRuQI08OEsmvwmiYtujWP2r0SLr9C7RM/VpLufdZ5MLke79q2q0rkknXh1RMf2UAvY6+lBfks1udquOvJxdGfHu28rSXA2+nCx7dkryRXv19fcEm5RU3/W7x3pP+22FwUZsVv5oFaq0BgiRFvQOFPXWacJcf5Jox9GNoj5eUGKz4MbRgXRT/7os5OEL23z5wYU2h6Yq1quz89fWAfMEWfMrYwGs+JaD4gr7c3SfcIklur8AgG4pAlSVf+4Z1H54+Q4zVJrZ5O7X8P1u7jl1XoSv6RxG9DMH0bjrMqAZM7/D14fi9SImUSaRMfa9tfM4ua+2KZ7m4u7gWDkbpKHFdLBim3O6ryvA4EY0H38Sr5swnj18QuHzoQR8Wj3ihVO1Wo415PEM73mRE4NN37+N5YBQSPOAgbLZ6QEwCG0KB2OfaHIjxu6RXW2vi3aw4xiVe05GQvQFYlGMoXyrStIfBF17zu41PfczwxggTVmd1gFioTUb5u+biBet9UBXjBCesjZZvKCqs0v3UrB8i1z0ROCAfpg2bJnkGA6ltpCpxU5yhsCy5kY9fn0bpvfGjd0BwAeb4GF4HZlgR70NQsrJQ5bnqZ3yO7WIQm6rWv9G6N7BeSxbQYL8vTunNRQ8s53ugCDxfkaodKy0usdaDXFsLbeIOTIEUXCukOGu5NjGg+2Q5OcRFfSiQUDQJpcR3MPztcs7gDDHt6CJEYJDvtt9sNUZEx4VuPyU/JWc9BI9X7ReWfDJGdkTyWgVvBjm830DET90ZsndDRYKbzUVaFXCx46z+Zm43LRqMBGx0noQdq5ulOLLB8CwCnwZBs7sI44BYefA66szAak89KRzBOoonB/EzHOyrlrUhPJoE4JlDiR5uoXCQlbSft/C+fIPEJWcGUA1duXK2Vtfx3cZ57OA5sn9UsVybnt4UxW0odFeNAmBo057ohfKtUo/eO6XZ3yBLpHjrfh3JvSsUmOrKeHL2hGiiGv/a1mIAuJB73UvVftIIHdcEcooqYeIkxaGqUEbTG+2pS2qa/kyBhkcN9ZJHe+99OhzyRXou85gPhh7tlbrI9PVYbOPoRoQcIOZNtMyooHJp1SoOMpt+VNv3torKW+dAZIUjkqhB5nJ5o2/eLJSA9gxzMkXVzrvVln6RCTFn3CvKgynaTfISnWCNorz/Yth7A+GbA6ALgf5QfsD36Wekultl3XnhaZe9kvlTVr/Fr/H6eSw9DyqC4drN5IWa7gWLyfc91kdDv97HPFZfmlwOUBNXf0RLRQfaZ69ji2wpacdIT1vruCCHlthEra3agRJXtr5KKC9aJLfNmrewDpnMrDYaEpMK/cu5QVHaqtj+Fk+m3M2BqQ9mIJztE9Z0DSt28ZvlIhdeJ3Y5K1GhjybNF7fm8Y1tCE9GBz0EaWge+kdWP/i3rISG7jWAZFt44GA+YA5gE8TykWhjERYVNiCqZ55TWwoIWAHIT3qAWPJj9tXIYGGa9PKFHLNOOj/zljorQPaYYV56HZB0kqrAN6tnHhPLnMAyMaGOBIGc9kuL2UgWGAMduQEPTbJZifXr6/m7MJrvS4+s3vnU48Kpm1y/PtAHAx3TwodkaC+iRmn7dj4EzkROmAdTeehn3ku9BXE6cIdtIYaFAUV0EHrrHnjGai7091O+NZEVPg8wgGZmkTRYwFoM865qOzMQF4QqYGwYFik0QMNkioAhMt7hVXz56wId0VJWWctJ/C/60f+CZnkm5NSmQMnzX2L74d07jXraVfw+K/Pw4O68gDNcIscEfrtziZqptT94H+pI7W2HBsKsNAWTU3XNgdArQOUV6rezTuTrcM9pwip+pe41GRbNDgoChE4Rrd8WunMf7llap0XrHekBcrB5JZ2pVV0Qip5xAneE2q350lCVH6J4BgfmXqZHFZol7UW0zkRHnsng1sBe8zC77mJ8n7jHtjiugHK5OZHLRDX9GYDEr0sstJRTbxzW5wewVp6wkw+20kWfhdCh5j3EThYHJJFTyibPgUbWvr+Ie8x7UYetDG6R5mC7VTzdVBlO1IcjarCxaiD8yi9A2EPwhypp5MB2DI5jPWeYRm1MYSv3oAK+NPna8TGNc4BBNcfo2rCo47iUTyspzLEnSyPqGUBykYRACKLLP6WP8hCISyFXwt2oOTbNS6feTv8bFPkYFbkXf+urSW0636QCe6tP1lqYjG8R0VcrmtQgRIlie107YHXtF0IfA+vOvdE6URiN2QcIXQXCI1nR94gxi852JaARotmV70o5SSk3iCHv61ftEXpI0K1hUuWzIDc6/jqLthKyd/M4Rp5HRoGpHiexTBMcaddL5eaAhcMTFs4dc/zx+PVYMW3eGpfVj3HoJUHgQuuL25xvvP3Q6wctAQkI6+vGZzCFsNtrbN1691oI2LHWTY0VAP6BrufyHjsZwANm49J20SmNQSyZrqPLq6bnPTwLWKG632oea4qiTdmHqysyq0gu8RaAwaEj3yHhzPA4eZcpa/zNdAHhPQ/kGYW0zTV1fi2n9BAGimL4CmFineyCThBtzcnbm03KfUER5rM+FCDUCPOtMdopp3CZNMeHGxgIT6Wkl5bFx/HSto+8s5AuvT3CtLwKV0Uonfr2UB5zLWz1HQmrAH3VUDm3dIZ/46haGBUz6JJ9az1sBji89e1ZtzICeAczfcRy4lvd3eM1qF/8TfymRGfk6lKevsK87ZTWG2AF2kSAdckXKkJKCMc0N0Z4KDCz9OUidjjj1/0Fc65Yx9iKJXdBFZplCa5mo+Wv9mKZ93M2xxm5bP2BDMj68OIGXaItyF0HSGSmo+dqyzvJpd2vycEAQGGtIwq8/37nLmzwvS8m6fsvNCoDf56QQqyj52lBPWdp0GgN1jVOWQHNokagSnIrwSQyEbS/pRCGFGm0a+TmMNTNAySXmEuLtx8gFhvrHhsiU/Y6rEFZ690Uk9tC5PtXD8+I3fJrZI4saIScfkqRBRfOhoyUhJ48RDvUiLI62dib8aVL8U15Ca6431t+d1ZcxaT87dYuG9DR9NGJ5JPYUg3vCoCx5VBUlnHLBF510rG2JXG94xaO/sFPGgnHoYlYjyJKbyPtbXosqX0557wNzJRy4mksy4aQ/kpHmBC19EUvv+m/3VstLRR766AAfSAoEbc7OtwKgSJHH3LkK+L9LzXwZyuiPsgJPGI08jMEHRtUrOoetFqgbRl07pC5rEhYNVmSLC9rnp+/COtG1jqfox4cILXOal7B5VY5MhPZF4Td7S9iBlL7tdZBMvIPfs6x3FSW6pP82imyJeP34oEYkyvYtCCIxtXRd+jpdClcAkd3lKbQaT39vWrfHVffYiOa6Pvht4qQSvV1njk2y1li4zLsLsYS/7VLJO+wbJWI0PfVz1RPQBHXN1MFntsaQ9eZ1FVeOGAajIMkSVjK0TMpNkmKxPuhmjePtVxgXs039nsD/SxzOMf1PayNZtPJHdCnPGwtk69zSm6j1yIdNi/QjbYcpttjXZWk0ZkPEIjR1foAyWlo8dFYHg0B27J3t7zTvvXz0YxcKAhB0RahrOvwjUvVTZENJXch//EZP7CQmkDgMtL28EMQ/V2pxOHcSKh/Q4cqO6kNdUeb+iBine+FfMgKJdWW6UU7TX4ODHp7wanvk7i9sD00A9LNVKd9s7GcJgUKzAOWIRS+WqvToSLCjaYqfDK2jZiidHoWzlA/x4UItosZDkaKAbEjO5+p48UbGUleSFHyFPOdve9sDg2Fo3DJQCS3FspZcXRwwYNkk8zV9nSzHI2/ZLljnhMf1h/fdLes1DIAkn45iMRDJzNjGGs1Qpp7RDZfP1rf3cF104xfrMMBHTwpQc0l4QVWs91JA2P7rQfCJG7tgxR+gkwmkmMvoKT3nudK85LrzW3uNehWKM0i0lxL/yHsrfaRAtJ5PSQmANiY40GG9LAJts9pFE+CZfcqamov18WTa523/bZE3fqNZalp/55qU4Qxq7Fgslhp+dwfN7SUEX7RNBBMMxtdNSVBgWhLQfiINaS66+BO80kVMnYsBDH9RSj6nCVvRtT95KNIlbIYpAuorC1ldf8uMha+yIiDNO5NaKFnTvCWkq1owcSsEsvjPGZPHXSUWvSpdSRKSbNYT8C37iqEuHkYsOC+TgQtTQYkn1lmK4krVoIN/VD6YdkI14smeTSU7SVhVTD9nIFOFhV27Mfnbpi6ZvImTwqLbzYsBw4CpSxcTKDmn60km+huGPhRIjZviHBLNIMD9qotYp0D2LgRnhrJ3LKwvAUFtHHYIiQSArvEKDm5qRk1k9+HUpblq5s5T/o8ju6YqZRBHpepI+HOUa1lj6gTz9r7Iue/ea9l1rsZotRj9N4eoczgtnMx1az985Ihd+2W3iSKI59hmNxlIqcZehOpz7UtNHx89MIAKFLpnBOQVgnJyQ4mJMVEEvIsJbsoKUgNw+5Gt3MmEKvx8dIpVlxH3ueHnGpYnxhhyQYHq4Buq6FJ4M7iUDE6fHLTX3WaAcOCRFaCWR8fBgMCX4PXqhX4cSjHzVqlwR6EOIhO0L7Blxbt9LOGHhdVQoDKxdohBbmGBDK7zlk/DqL+GwJXCxNHKYNsFLXrJJGMCYDVDsfz2yOdWIEbQQtLeYlG9EEYu/NcfBXTyeTkhkXS7aSj+CPUQO7oL1mUtyft4l0OKGnB0Od7049KVmhaxJTrnoYUmnswyfvkEf5udHm69ulI40xx14l23SgvH7HxYBFa2/3s6qPzkJVyE6Fw+ixloNzzZjTmN7rq14XQNwusOWnR7m0hVoCsy4xub40Iab+wgqBrFdSN6eC2BoKmNGtUTxGfZEpUlQl5qFKL/NBq1lbJes3nsa8QBCuRVlg2aoCUrhA3adSU4ZrsvvMaGxy9aXDqIVQ0SRyHzvY+ssK/8hQoe9GbAMtqKXXb0Jp4I7GnoVxHBDU7EowxY3VV80N7Mf1zbwsi9fDVZs34sxAk9JgesMIdMOFyA99dxMywTuRrJ35xOHbemZw4vy2UmyC+HvZDRpNYkGlbVPsR0lR5OBEI5WVvLkf5ES5AijjDR+RRGOeYoww3Vya8CN7c4oSNF+fxWFtcRWpWx3OvJrcuwwbFcUb1+GKZ+vTukIt3X7/69Y1utvlpqCGw2dBchUQK8+XrISpczpmvb7pN0oLpAPY0D3sOBYNYvZHcy0sYN0blwf1XGXgo8i0j6oEgK/X+JFRwnSFo9SPGfbE1tjHMflts1LP8vLlB+G9ubBVPvO0+mr7CS3fFqGkdSqM5GALBlWNDa9aTi65U/HnfCSS07ROGPw0q0HnBP8wrTenybfPXNxZADAeXARXq31VPwP0WQ5/FlwBJMavd++Qpt8ybHcFfRbH7ZHKHbAALlYUCpZhXBn03D30RW+ng0ep3y4ek9+3xbbp8TWHa9ykKRlmxZEa5PkYkv0LsObkAY6jK0oux09bfM9gOydLcwgRlTZYZ4S1ryxnsxiWwj/TvuWnZZkpev5WYaUeMxto0ThxR86WWVKJW5knrw8uvYbVzUeiz0ZWAq4QXrqa0deiLboWmKJXSTWg6Wxp89UP/Oy3Z+Ou/0NKvJ4Geueq1OGFW1TyhzUgBSHerc5kUv4BYEWExesunjdFxnfeuOWx/DOKwLyrF37MBb8HsFwH0/QvQjnZ6W91m7/2stUpT11H2/vwewcFt1BwrOcHBv/DHZ74B5MzOfGteF1HoEuB2QSsZyJ4A63R/i0cuP7cTG+F9v45cbq4uFr1h2jNdiefxgdhk0uZE0l56f9Vxkkv45SGPPAMSobASmkKVzZ4ToE9jmRI6YRMqY+fb8O5nJIlcf131F3NdpQDgMNJJ+CvyBbRSNv9P0j4TUFBKNf2mI+Hb1YCqCsJ9n/lOaBq+O7HVefL9pQ9kqqgPB6HMh6k+vCqkGBsXTnNkly6EsA9yNLNrcb/ISamtCovx09Q3+opJ7yQdxb3V1gQkSQW5LTZQ3IwbCEJMWgAFpDeL1oDhPZEsuHn4Na1K9cQp9v3IIEHKhIkybs8h/ddv52kr17MCfNcjkm27xEMMVvcxBM8r1J7sEHwXeIGkRojuyCkuIhRh5Df4whfyKoR7e0ynZptZCAVj81vpyc5o6FOvjU5Zf2wJyuwUHCV/7QHgS1SJJDtRLx5b4fScvqrgpwKrBQJeyVB0BfJ3eh0KQb8F1Whz2TcZHQqqKttOSRuU7deMO7/KLvlx+gMb4fFd8IOc+0Z+Szb/aJPfDHbi/BbitNsUD9fzStWz2ETxe5Fw2OP24d4tu3RM1lR5n4uIUPGZrG3El9h1R4QLAibhGtLza0LIZB9L0Xf65WNIHYTucSz0R/n1mbI0QhV1GPWBLkZEMGIim86900VB5ikGxw11kHjTq89L0qKRWzaFEtl/HK/21z70j4ROHww5KtwdPnw1l4/iUu9oQH+/nTj174DVHcwl1TJkycmPXXEZuLgqX7oLx5pMwfThLOK/fRlaCdxJj2Mw3ja05z12WOHEFbtd3z0cou+TsnePPBcbo1MGXpK3pq3iV3L8I0iz5fpNxo8wJUxNoFml/2vJAIwVN2dZdExtFbTTuiwOKKf1foHa/VbmXRQDW3XhlT80oztzePRzzAKZvIq3+LzqEIV47V3P1W7YSQ7axuptLthgCADlHzV/xLnl8jS0ITiYTQnKYyyuO8eMYqGKSBFrf0Rhh+HrFW+U7kjvHbjd9Jcm0bVbm48IVjnqO5oaKmisNQULBSefnRSj0VK5+Y7ZDvo81FXVhq4PO520WmZIQZnxUYBUQCcFyTzxSfKmb7GLBQsG+AVe7YyiAXjZLA48qtrfkCRhbhbTobDSaNPm8JHtEHObQP9SkviEg403b5r25D7BV2OLUshL+oki7cc/jMXOVHDbWusnnjHLjv0zM4+83Qn24RP7VXx70296ZofW1TaLHLr6okVAm5pQ5cGUQFEiGtLwQSoB/apS4H37Oszrc7c2DwpenkJPnyHe41/Hcm13nRUU5nMOPOPTxSNKH7DcWiDZ9U1rjPw+s0mIWzQG833MGYdpikL/cHqe9kmPAuKPBgVHQdYnvwLM3GSONpsvseKAywow9YERENpSx8Yx99n4rl13AA/5jes6TnfH8/Ii0HrHyX2x8E9WIKJ6A33vwSQ9Hnv70/4uhGAuEn6H8X171TrFC+Ib0gfGf9U32VBIy/mc8ZeExHcavB179lykyAHSj7/GIN58r/bd4kO97PKQudD8vKM7iM9QGAqHpOHR6eznf76rU6u346wOXOYhtCM16RilnIKG5ddBNjU0+1QmEYjKWRG1fSdI1dpUScbnQkDMqD9Ui7yCT+440eescOoKg7kvIqUgL9EVF8r7sgh2ao3tBUmHuri5V5TobE3pLb71WD3q0SQQZwdijltzh8CV/4XRq4SQOs0T5y51dI49UDfQXd6aH0ceUalhcG2Ahy659Wenx8UJXUx3zz78Np1KT/ly8dZmQgqSLRdWjNDEgghkM+EFsbaxAIlxJMU24bq5PjfFbdZLiaGnQZsDb3jEFISsfmtstofI/ER5tgbOFclB5glV14B3FRpUdN04IcgwpBnSaftgvVC88d2ifxAagRS0eaSE78uYFiHUgKCZxYB0kLS0tIeMsSN8qU8ei92vDTiIU++3URdy6YFD6ZLcv2F8qlkJVtcd0G/GAUcUCri22PYI2mUIvRXvYIDO8sco9s+Pz4LiPmGi7ReEXrcLYEqoQwCHuFOKSQZ4AQcBBz5Vy6DCfFlCUdQkAovYaO7RwSEjv7KFgEAqiOGaI9yUpvQ8GJcssw0+6CEI23jsALb3pGKI3bGHvf2b3QD5ZOoQ0Nx2s7k95PBUvRAX3hsyP3Z0RrM6yU+sz6DVddEqJLYXR8wXPp7fe3btSkVc/E3RJG1JLQF+Urw1AD1QSe3AkLk2r+MecXL2YGobDisj5uesO/+1Ke5vxCrZjr6oqZ/pF/zY5xIfHmMOPmAOwxeYrA28KISQeQTlQ+dGqzhpWpWki/SBROGXCy7XRiVzEP7ZjYUaKvR6LaAg60Az/wQDEoTNZwaXupuKlDGkaF9jkjx686GWvUbKy4dS9CbJ9U4LvNdpZJwULJmTAUyutjYIer+Kd9mPVFD+TLua+rRBYT6y36H92ZuR/62I8qhwIm/JpDQOlVJXcuO5iPDg8Yg6D2q+4z7Urdd0G3cby2aRxumadQhhmNUVxdx+0D7fHiBVyNIPJz3c4Tk9yPJK1Z4heu2tstes13cEI6XAlGeBsobx3fROH07pfr7enftLkc5W1by5pCTpFHb5bNVyRAIpV02KJhGiYLPwFHjsVlZt30hvXF/qTnRo2zRletH677jZuQwfrl23BFo/iGEFlaPXsNE4/fh0Czn7rqKaYSE+A+rTW7QBVRfMWxu11MOLpjVVgI5l00cewm4Qb8q5iBETJc+jaOH6kOOuP9jWo8VMAkt8c+mGfuX5lhlBSe2qjtFdlXQhc5LHy/xN8BeXho7b9p1R6sOPYEgLOjktIJqbQqSC0+17I9TrN7dOoPtQXovIA21B7myrlOUN9yASyKoYi0do3qt06FlT3dN7HJogenuYdcdEl71qcRqWHH3ZKWiGFb4iOkwofX9DYQq0Ma4Oi3CqnLhgJniHlk7v7mhVG5h/YLtBxPESfiYSW51Bo/KJxn3txGonDkJE0uHi+p18z+PhTB2n6aZn7ezUqJHhWGuHfX1NJNe0mSCZShc8SPF5zaMpLURc35yEnB3q+gIw44ckgRHbvgpq821Fn05t38p6YImGwM/JFhFjb8SUQuJp6SZuojc00zN2Mh6XXz8v7aZewYjb4XRiIG4jiExDYf+d+Bw5EEYy71+lYhQFcubiG8LLPivS8Noq62pROr14ND2EA1qW/l73JJe5lNi8fxVCIMyPrOp8pFT21Yk3GwxG9rbJOOp7tPM+LSgdYmMJp5DDBMNpfiPmT8CJ3i2IMPqP53fidvd7nGI7lJJVUbKdSlEsFRGD+LPBphia19H4KH8Q4QP5cBMBYYcl5xT+Y6ETJnIDefd5ooETwaWweU0uVs3XTVBcDWOgywLJmr17ngNSj7YMoKi0gCPmoJEGUxB0qdb3BUxBmIK39OXhI68u4iGYQPRyW/AbWw+mEuvNPuXnU7G33gQGoA+IeUvINj4mhjgWlSNgm+gXmhlpArm8Ah1ATzPLse4HgBu5lprMc79L2Jg4rzB3nU+LOWIlFouxnlXDllnDQFGO0uSUhMi4fc660kKB3mLv6jG3lxe3Fj1Y97dehkvXHlDjZgppaTrJMMKIFhgsUpMK5xtF92TKxAAMndIFpQEh3bbDLdgLDza3zWciqPqB+1qCgOPfgXrrD8wJPgE+H51kx6g/qscSJ2JhEzxqOIU9ssTu2oUtzetowhvq5li+xti7CSO7r7Xxcu7bXVsGN0levgfsIr3iAYTat1CyXvllCBc+eOBR+YV6/MZzmk9ihWC3SXAzosCihjuWR86BHVk2BGmNuURz+hGas1SNLq90NeNbTvwU4OIOj8EFBIp6FUgXUWhEdZfsyqVS/rmDi6kUCed0Y4p2kV5mTyLdL0z0d8sCzTccDOqktvEdHOX8QcruQWnxv+zNEL/p2LsBztgvdXFSvRtfwgqMFc/qZZzyDWlJwohgVwELG0EifMx3v41JZNzeu+PoJKFM6dIeVd3mog0ZwrtX/SX5AfdRESyiuBe52bPDQ8OgN0svrizl02kEYqfPzUXYEgdEjsPVEbFLoh0ihYJD4Z6zob8lI0E23PhAiwaEZhZLjrIr6hrNZs/oChR45/my24i6fL8CDNPcPlRggCS+0bEbNR6d8qM3+kjduu0U2gvgg/UwtVw5YjI8KXHnN33f98xvZoIew6oiIHg5pCjd6pkJYCjK/CFo+ldDFmu/rbfIGRXPG723ffIvtL1/0BiklZG9hLhwV1D3sXzc6/Vtv5ez2MY5iGn+MR+VIVdJVEOVD/2p43BNFfo2S3DxOq4rj6n5fBfNW/8K8uORxf1I9xj0jHXEmYa/eNJjMQDh6911Cpu9+hVXg/RmeNE40xGQicNBfeUckY96/6uEPxhd4SMI1QlQNFi9eT7WMMF8cCzbh5x+kt7roi/5yugu6hs9VkhK9t8p9dmCzcen5TiNSiydBAAPf98jk1aYo8NocaYtXuMxUCJhv3eTO/iBZlI5o8rBnfi0tyx7TUcd5KTBe3VQ68t+q4kvWJFE51I8y9TQMyZtfY3WVVCY+i3HE3JgYsuDHNr1+fFvWVOLNSpcr4cfaW/zLktFnOHB3LbbDgrR6SZYXffTzHqoPZBB2qf6S0imoA0vuaODPZRaynLQ48Uqb3BSpCykUbWPJ8UbSzm71AffM8n72BEbXpXQP/dUTYL5mr+Tg+tl50RkobsY6SL8hztzwnBtUKFTai/9NzU8uiTy9JGHI+6CA6Y8C6+YItLQf1G4JTZiVHxv061vrqEgqL2zxk8DdrWLEqRYTMZT0qY4Jc4PjnuYJfbSqUBiPGKDtN4GcR1JcT3dVwBCXJj6+wY9aK9taTvoVfpvdlfOnGHnS+6uC79J40FvkHVLgIiOWsU+yIc215h46TcVxddLTA3EcoTZFcd43/6ym1hcr/v7sGMkQ2sh9UdSVNwo9rchNwNThAku/aqzR17NKL2ZSHbMroYXFz7BBAQ3vyAzXOL9gx+d37lNV1Re9PlFKDTZHzGS5Jz0i5T+49RAZPL1qkIqjQHscCU6iwlBMH4jp3F5Q3Bp+aIfXakzIX582YsRPdjDMa5JP/5r4ie0fIxdxqg6pRMsWiqK+PISfpDGyDqpWWqqq86ArSR8S7kX9rgmXF1iOhWFXWFza9nD3Y40Qg7C7CJPgODJNELAAs7XTiDy6z1Hq52q5ApKT4VXHRd8R+FYlHTzQrTL57iisn2Ff9AZx++RkleElKAg9pcSOs2Fj7BPM1aa+JwKKx9JNxtJ0a/kiL6SdkbVsovOe/3RHIm4VBZUQrF81ZiGPjqf2qq/oj48gJD3Dbt3a9q9FbffjBMN2d/kv+X1fq/ycuoY2ufMzBLrOp8s6H5rGL5mAEO8C6s9mFS4g+5y9DIsVnbsHyFCMfndQo5brV/etl9ZDOJ5lTHclXim7KbPyiO6bpNGHvqWirtWTgK6qgwQrrLQLgbwKF2/JyU/sOa3HcEt2Miumc9iHupZogb5pi1vc61cg5SV7rod0699aYvHS8nTd3zpxugCplKOTOPWe9oyun+PD9hUcv/r3GJEkl5vvThrWvlqNWM0a3QLd4bTFOqjXhjm+RZAKIElmVIQI5nNo6uFmrjzM//Rj4usbE4gFRH3/KXgFHC8Xy5YyHGroyrft7jRZw6xobtzW5LjLAmN4xl4m16jJMt92bGOS3naL4BngA6mjM9+8izkOm/wp/GNZAY2CQ/Nk33yqU1GjbgMsOp2Hc813iF4Xy0LVINOg42LpwVvguYjXy47wMPMNsG4qkgkPFiTLZhcvvsz4XRnZIiTt3Vruaz165JfBbhuFtEoeEdNvlogEKgjQEmMcIo3srtTkqQfun9XZa227VWx9WXlymbW12yTKRdhB8WObXQivEkHDcbPej/eMNkY6c7sJdK1NBR6A+wTFbVZqfI+5Jkh/9i1O8JIoSoe8JFkff6hXIMeNBLFUIejeK6bBL7KocNvPDmZoXomajhBbjCc+Syk0vIGWTVdzHJN7/U1/d/vUCWBxx7eIlvx3oaZap4ZHVLSmGaY6X8nYTtFUyKTILL9PLa2qsNZWTSKGe06TS+RaV80Ke/A6k/hZbb/6n0uDrAeROgQcw/ETz4lj/Gr7nTfES6DLZyEHRxroPGNvn4tHgLFT/ReVwvoAYVklvntO0HIeD+g7E9gm+0bU9r2FOvbofw8Dv32CseHUH6wmwKHejAuGxYk9c8TaO9mYYvfjHygeb3SkZQVP0x6TfhS3ykXuD7raMNUTRwTajF63KDHFxHKr0LYFzPwORh3ipH6C/4AMcuLgtO+BmxkVrrJpfIGdFlDYcJ43OPtgUyC5YF+fIZvBffrbO/u9fr/TFjA0P+csIDg+D9w4r8MWYD+y5AF+B848j8PWQA6PAzrv/1NnJOx0oe8AP/xTw== \ No newline at end of file diff --git a/2006.08228/main_diagram/main_diagram.pdf b/2006.08228/main_diagram/main_diagram.pdf new file mode 100644 index 0000000000000000000000000000000000000000..805b22b221a321d559efa911e7859d7e1b40584a Binary files /dev/null and b/2006.08228/main_diagram/main_diagram.pdf differ diff --git a/2009.09099/paper.pdf b/2009.09099/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..39235716cb659dd655576f86e11a83125ccf2cd9 --- /dev/null +++ b/2009.09099/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e39e21cd89ed05022fefa03eaf8dac97605b3da4c4d6be49f28cde13bc7154f +size 457655 diff --git a/2010.00784/paper.pdf b/2010.00784/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2edc70c4735da661c365b7e5f650b6318109538a --- /dev/null +++ b/2010.00784/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:236d77cdf003186b99022c1ef03e6c29139401ffbd05ac2f80102fccab10438a +size 1205378 diff --git a/2011.06782/paper.pdf b/2011.06782/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..60d288ab45a3e8593ed006bcc6bb049f71ded5b1 --- /dev/null +++ b/2011.06782/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7109b3410f8239d09160d886897164e25b28573ad46a868271cda4a4fb3e17d8 +size 2206334 diff --git a/2102.07631/paper.pdf b/2102.07631/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e9d8a92191869866eb06cd959e5b407367e2319c --- /dev/null +++ b/2102.07631/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4329784f2ac58a503bbfefe5b5209bf98fa090ed6af8f15ca3f22720bf9c599a +size 856226 diff --git a/2102.07762/paper.pdf b/2102.07762/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..183aca6b4c308cc9bc4d6303ba750ed7e3b44f40 --- /dev/null +++ b/2102.07762/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:429d5d1ce890b64d211c076ad1904d6154adae083c6f853fd43a4045fee8f4e9 +size 2707368 diff --git a/2102.12301/paper.pdf b/2102.12301/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8f92b70329b6d48e51d4a2320663965484e7289f --- /dev/null +++ b/2102.12301/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d76403ef8b6d2fcb6e50895d005da2d210f14ef1b866f7f8d579fc539f031e2c +size 975610 diff --git a/2103.14024/paper.pdf b/2103.14024/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9f0f67f4b4a5969e531030d3c5509d8afe36a5d5 --- /dev/null +++ b/2103.14024/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0e44c4c00fe4f6cd4fe64a65579a1a5b671b10ea439e7ec2c2ac318e6878d5 +size 41141495 diff --git a/2104.05670/paper.pdf b/2104.05670/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a82bbc8cf4f9a837ab6587505bf7b8420b21a1e5 --- /dev/null +++ b/2104.05670/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33613983a89f3ce731236ca82205a9a1c43084cc1e7959d08b7128fbb9457ec9 +size 40800600 diff --git a/2104.08225/paper.pdf b/2104.08225/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..946861baadfb8d4c955ee17d8110a6328274d60a --- /dev/null +++ b/2104.08225/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8f791d20bbfa5b1c46b4b00bc9cc226d3b873b70f0e5b7cf4099d61d7aba10 +size 2483838 diff --git a/2105.14573/paper.pdf b/2105.14573/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e06c8d02cae2c4070668d1cb14c64f66bf5d6570 --- /dev/null +++ b/2105.14573/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b3d2ef9dedcdf2bbec033002c27c6d0dea25acfd3c91cca3f9f0e4430be5b62 +size 2000962 diff --git a/2106.02796/paper.pdf b/2106.02796/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c49827db64795db23292513729c9a0dc30f91d3b --- /dev/null +++ b/2106.02796/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:999eb2a6872402d66cf2aeb3a16bd92257476d97cea5c21a07765d7668335320 +size 1014280 diff --git a/2106.03921/paper.pdf b/2106.03921/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..40b6370deb95a45d5b06b6fd6a3fde400b219429 --- /dev/null +++ b/2106.03921/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b9173113abcda3358e2b8a9591189805c7b4f7beb54ac7fe96619ced4fc9a6 +size 1597384 diff --git a/2106.06499/paper.pdf b/2106.06499/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c971961996f1f007599a30f1bc381e17a4b4dc71 --- /dev/null +++ b/2106.06499/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:071d8dc29d08a9cf4fc9c3aabd487878323c978a181518c4c2b81b0cafe818ea +size 1355625 diff --git a/2107.06325/paper.pdf b/2107.06325/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dfe76d4eb25c55eb52a49565a76bcf00b1a8243f --- /dev/null +++ b/2107.06325/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90ce02e4fc1f86bdc134281c62b34356aa033b5fd80ec928bf8ee41815ed6055 +size 2195032 diff --git a/2107.08829/paper.pdf b/2107.08829/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6a514b2d458ba75b0ec6d68711f6fddb71476299 --- /dev/null +++ b/2107.08829/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9498652c07b11a94a7ad292b451779a0375e3bb70b84cf383eadd5a0b3baa2b +size 2380230 diff --git a/2108.08421/paper.pdf b/2108.08421/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..48113fee5db430e6b3fc09bd7b4efb03941fe201 --- /dev/null +++ b/2108.08421/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7286d58573456c16415c01c04cda78c903da7d8d4e92d4397a5c80eb2a400a47 +size 3938244 diff --git a/2109.00602/paper.pdf b/2109.00602/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..011752a099b40fccaef00c3ee50f43e35866a1c5 --- /dev/null +++ b/2109.00602/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb08126e80646fb93a80415e91173f98d5e43346c4fd1f5dbfecc6e5fced694 +size 3321973 diff --git a/2109.13016/paper.pdf b/2109.13016/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6094c6b39edf22dbfff7fc488b362d431b4344a8 --- /dev/null +++ b/2109.13016/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17e4930cb6f5ec25fc1e24808bcb7af9204eafdff0acbe2c6503d55382e6e439 +size 2785408 diff --git a/2110.00280/paper.pdf b/2110.00280/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..35932f7cf8edc059f57b9b2037d3ca52d5ff5dd5 --- /dev/null +++ b/2110.00280/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbce8495c85daabb25f65c0f281847ffa164bbeb5e5ab9b8a5e3559a948e9128 +size 16324505 diff --git a/2110.00966/paper.pdf b/2110.00966/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bb9e814242c63b43780d7141ff52ffd6162610dc --- /dev/null +++ b/2110.00966/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23386ec2539387c78fd97c2e19c0723d280ccb7110fc7601f61a4c644d40f576 +size 1029536 diff --git a/2110.08851/paper.pdf b/2110.08851/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e773df84bdb7569cab01028f9fa01e9a159c76ed --- /dev/null +++ b/2110.08851/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:031c13adbf5671d6e36f0cdb052ece076fc4418068e813ac8be585cb53da9ce9 +size 720783 diff --git a/2110.11945/paper.pdf b/2110.11945/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8de0942cda292b264e323fe22b8aca89670e6b33 --- /dev/null +++ b/2110.11945/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fee528ac814789069677db456eab82d3a6155c53668cf9b09a23a7daf8ee155 +size 10885561 diff --git a/2110.14633/paper.pdf b/2110.14633/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0e7dbcc0c8701064c1293da11c709e07d9a15934 --- /dev/null +++ b/2110.14633/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e3723d9973247b52ed17b90e54dc1a45b49630af2492ee1b1cf4bfd1b876437 +size 8322234 diff --git a/2111.04239/paper.pdf b/2111.04239/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a7a62e18dd4c811089d1dc72b923e10ea14c5646 --- /dev/null +++ b/2111.04239/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9581334a5c4a61986b57faa3c3f5ef79f605e95abffdbed25a180e2303e426ea +size 823047 diff --git a/2111.05011/paper.pdf b/2111.05011/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cba1101718d200bb308c3ab0ddab0dfb025add10 --- /dev/null +++ b/2111.05011/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d5d7d436827ad86e654fdb8e335258df8e9ba91b3a838ab8d5b488160f8684a +size 1359630 diff --git a/2111.09858/paper.pdf b/2111.09858/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cdb711ee91dd468fdc8d28915f0e86afe4819e48 --- /dev/null +++ b/2111.09858/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:722471dc0a0722f387398535b55324b05936817e81a92aef916b9b29c04345e2 +size 4781623 diff --git a/2111.12082/paper.pdf b/2111.12082/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..748205a816d442bafbbb6c3694139f06ca409e6e --- /dev/null +++ b/2111.12082/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a79e7c992795a4966f0c8557973504d8c0f38ddf01b86d65abd853b1ffd047a9 +size 1808046 diff --git a/2111.12701/paper.pdf b/2111.12701/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c799676fd35da1efb555807df0e1c57c844b7c8e --- /dev/null +++ b/2111.12701/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:300840b7d092e416501bf011571f49a060051d0cb37410e953e51cf1a7c7d818 +size 18656575 diff --git a/2111.15362/paper.pdf b/2111.15362/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7fa1bc796bbc553f50c3a27062f175ab30eb561a --- /dev/null +++ b/2111.15362/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad832843c39cc8576595d578b10db53d076abd9478d4123d05902eb7fe9824e5 +size 6867372 diff --git a/2112.03002/main_diagram/main_diagram.drawio b/2112.03002/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..f85b6366321912986ece14fc644ffffb2fd4d74c --- /dev/null +++ b/2112.03002/main_diagram/main_diagram.drawio @@ -0,0 +1 @@ +7Vxbk6I4FP41Vu0+aHEHH7u1Z6a2Zna7arZ2Z5620hAx20icGFvdX78JJHIJtmjbgA5OTUtOSICT71w4HzgwJ4vtRwKW8y84gNHA0ILtwJwODEN3LI19cckulVimEIQEBakoJ/iK/oNipJSuUQBXQpaKKMYRRcui0MdxDH1akAFC8Ka42wxHQUGwBCFUBF99EKnSv1FA56nUM9xM/gmicC6PrDvjtOcJ+M8hwetYHG9gmLPkk3YvgJxLHGE1BwHe5DRiPgzMCcGYpluL7QRGXLdFtX040Ls/bwJjWmeA89v3739txuHv209kOnEW0z/cH0PdMtN5XkC0hvJCktOlO6kiduZLvsmOBaIIRjgkYDEw75eQoAWkkJT7HrOO+80cUfh1CXw+w4ZhiMnmdBGxls42Z2gLJSrSdhRNcIRJcmjzQ/Jh8pXYRdNYQ710oY0XSCjc5kRCFR8hZudDdmwX0Tv0JCwFbsd22txkILA8fSSk8zwEbEvAT0Av3M+eqZ9tiBU4aTUsRfkwYHAVTUzoHIc4BtFDJr1PMAj5vFw32T6fMV4Krf4LKd0JLYM1xcU1gFtEv/Hh7GrT1vdcz3QrZk4au1wjt8zTV5dlhdfEh69euCNMH5AQ0jp7cr28utAERoCil6KZv8OS2Te5ZJdTrxj6iBE7RGZ/lr43LmGBuowccpYUNmJg3rOdMVcKLGWuZM33V/UWGDjH/Sj3TohFns/gCUaPeIUowjHresKUYu5Q5Q53EQp5B8Ulbyk98WIb8mg8mkV4488BoSMcD3mo+ofAGWSLya2t7EvH7lRz3Qu5T2fsFhRuaKbiP6WHLThP7TBS3miI7vEVgHFwx3MG1vIjsFohv6jfZjyYV9PEcqq0K1QpZW+1RMcoW4+hnW2JlmKJRtOW6HXPEi9hcbZeTFiYj2vb4sa3qWmnpOm9a2lN09LUb03Tlq13TdP6cU13I4qMOxZF1Bys7PlrRxHbKc/lNhxEbOPtMJA5up7L0LN8vX6OzlBEdt/yjdxMvJlNlbR2+dYl79HGdbEpMdUVbFoKns7HpnqvURObDCtgl9ttyXdYvQ96axR1rjFclFMgXTfaDhfWbWracYu3d7rRuqbtVgOzXOgazs/slvNztLLD8s6Oy3Z5qqbLLHaNMksnUNC59ExBwfhyKLCaRkFVqceJmMruA/RSQIPzY80JlvsZjukwJRHu2A66vdxmnWwrFN+R3PmcWbQZ8IsDHgFfthizvs/si+6YH88P8PfluUxoah7/d/j0VksQV55eRkkNxcT8LCkB8UrA/fQ500tOxXy2GJMF4Eskrph3bwRgeb/Fa0eiM4KUZZ5DNreP4rByPAtIdIjiILFF3q9xleY7k9OfsVFyfAz3O2wwCYrT54cHaLWMWM6VdKA4QrmRLIQCmp+xrJjJVNcZyrVf5usFiH+VenoiZc0xCKfKqytO0aWIE+RKacmlcUWUUgJK8DOU1d3kEooFXyECIqfwmYITPq6cbCxQECScRBVRV+IpuKIpt3n7UiycU2LhTEdJM3S9wi2W0/cL5hk1yoe5CCPUXGA0C1X32Qw6vl+4jcvu3PakzSu3cSWtZ8yPZ+e5H23EqagGKbsTMiK3Y7FQuR10zo6FSlgtZ8DvHgtrFGHbxOsbECYr+VeIMJUGMc2Ra5+JstyDCHuGplmUSQO5RZTVL2t1DGW24ZRRZl8xxE6rvF8TxGRJ4PogZo0VR6ZftyMzbhdl2tWiTKkouNcNshr195ZA5mnKzWOtR4MuSV12DX456vJ0sCkOsnGwVVEQHSiGyVnY6Vl/DuwHPd3sCyHVjyNrpcK6rlZCrEYLIU4V4dJBXGk9rk7AlTFuHVdVFE4HcWX0uDrFX8k6VHu4OkwKNcbnFLHxVE2yCJl9/2VgT4/TBblZNLTiqmP/n1EcsC88Y38EU/EKP1EE7sUhGsEZvShA+fnKFyi0S6DVdY5H18rnGd4RrlU8wy3CFXFuj+MiUTfT9h6xZW6tR6qK1Kp4bXhWo0itYhhuEakHHOuEa3+O/Pmh/dQjVmUKLemqrSU6Hod6oz8YnmzV6E230fDkVhE+3TJ6cR6Vj9CIgpY6WS1TrRnGFsAneDnnL5gb2mg0uomI9i5BzFFvDkyt0SDmVrFL3cLz+wSxBJB8RQmmEMX9fcJJjrjieaSG7xPcKsLqpwRun4f1eVjD5u+2n4dVUYkdrJb2VfiTqvAVCVGz1VK3xotLr1PUbHlzDLKWfAYK1bx/XHVQ92dKDuq7K3SxSqkYxTnqk8fylA7N9N7csdtRju/MoAfXz4DsMDP8E/3Q9V5yKa1vy+92P7Zb0uPJ59gcNbbLsktDLvgwEdowiCZT3eP1+D4XzD9cayvvjBtq0G62iNEZilMips7bUT89lFxrrLwNoP7Oia55jWLpMP94jXFQVAv6qN9H/cL7hVrpB4baj/qdYVP750ZqJAFm8QbNqCAyxk3CxzvMy12jIytQaJzJi3fDFUWLdTI2/GkcOhtFmYkY3F1pBPpwmTaP8Ym9o5c51tgrP4eve4qtGvplfD1rZj/cnVZjsl9HNx/+Bw== \ No newline at end of file diff --git a/2112.03002/main_diagram/main_diagram.pdf b/2112.03002/main_diagram/main_diagram.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a8d516c76c3584371e4ce9ecb69ccbec64ccd08d Binary files /dev/null and b/2112.03002/main_diagram/main_diagram.pdf differ diff --git a/2112.03002/paper_text/intro_method.md b/2112.03002/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..f2b18f7f302e502d3043d9676995a795418be143 --- /dev/null +++ b/2112.03002/paper_text/intro_method.md @@ -0,0 +1,11 @@ +# Introduction + +Mining biomedical text data, such as scientific literature and clinical notes, to generate hypotheses and validate discovery has led to many impactful clinical applications [@zhao2021recent; @lever2019cancermine]. One fundamental problem in biomedical text mining is entity normalization, which aims to map a phrase to a concept in the controlled vocabulary [@sung2020biomedical]. Accurate entity normalization enables us to summarize and compare biomedical insights across studies and obtain a holistic view of biomedical knowledge. Current approaches [@wright2019normco; @ji2020bert; @sung2020biomedical] to biomedical entity normalization often focus on normalizing more standardized entities such as diseases [@dougan2014ncbi; @li2016biocreative], drugs [@kuhn2007stitch; @pradhan2013task], genes [@szklarczyk2016string] and adverse drug reactions [@roberts2017overview]. Despite their encouraging performance, these approaches have not yet been applied to the more ambiguous entities, such as processes, pathways, cellular components, and functions [@smith2007obo], which lie at the center of life sciences. As scientists rely on these entities to describe disease and drug mechanisms [@yu2016translation], the inconsistent terminology used across different labs inevitably hampers the scientific communication and collaboration, necessitating the normalization of these entities. + +![Illustration of GraphPrompt. GraphPrompt classifies a test synonym (CD115 (human)) to an entity in the graph by converting the graph into prompt templates based on the zeroth-order neighbor ($\mathcal{T}^0$), first-order neighbors ($\mathcal{T}^1$), and second-order neighbors ($\mathcal{T}^2$).](imgpdf/intro.pdf){#figIntro width="50%"} + +The first immediate bottleneck to achieve the normalization of these under-explored entities is the lack of a high-quality and large-scale dataset, which is the prerequisite for existing entity normalization approaches [@wright2019normco; @ji2020bert; @sung2020biomedical]. To tackle this problem, we collected 70 types of biomedical entities from OBO Foundry [@smith2007obo], spanning a wide variety of biomedical areas and containing more than 2 million entity-synonym pairs. These pairs are all curated by domain experts and together form a high-quality and comprehensive controlled vocabulary for biomedical sciences, greatly augmenting existing biomedical entity normalization datasets [@dougan2014ncbi; @li2016biocreative; @roberts2017overview]. The tedious and onerous curation of this high-quality dataset further confirms the necessity of developing data-driven approaches to automating this process and motivates us to introduce this dataset to the NLP community. + +In addition to being the first large-scale dataset encompassing many under-explored entity types, this OBO-syn dataset presents a novel setting of graph-based entity normalization. Specifically, entities of the same type form a relational directed acyclic graph (DAG), where each edge represents a relationship (e.g., $\mathrm{is\_a}$) between two entities. Intuitively, this DAG could assist the entity normalization since nearby entities are biologically related, and thus more likely to be semantically and morphologically similar. Existing entity normalization and synonym prediction methods are incapable of considering the topological similarity from this rich graph structure [@wright2019normco; @ji2020bert; @sung2020biomedical], limiting their performance, especially in the few-shot and zero-shot settings. Recently, prompt-based learning has demonstrated many successful NLP applications [@radford2019language; @schick2020exploiting; @jiang2020can]. The key idea of using prompt is to circumvent the requirement of a large number of labeled data by creating masked templates and then converting supervised learning tasks to a masked-language model task [@liu2021pre]. However, it remains unknown how to convert a large graph into text templates for prompt-based learning. Representing graphs as prompt templates might effectively integrate the topological similarity and textural similarity by alleviating the over-smoothing caused by propagating textual features on the graph. + +In this paper, we propose GraphPrompt, a prompt-based learning method for entity normalization with the consideration of graph structures. The key idea of our method is to convert the graph structural information into prompt templates and solve a masked-language model task, rather than incorporating textual features into a graph-based framework. Our graph-based templates explicitly model the high-order neighbors (e.g., neighbors of neighbors) in the graph, which enables us to correctly classify synonyms that have relatively lower morphological similarity with the ground-truth entity (**Figure [1](#figIntro){reference-type="ref" reference="figIntro"}**). Experiments on the novel OBO-syn dataset demonstrate the superior performance of our method against existing entity normalization approaches, indicating the advantage of considering the graph structure. Case studies and the comparison to the conventional graph approach further reassure the effectiveness of our prompt templates, implicating opportunities on other graph-based NLP applications. Collectively, we introduce a novel biomedical entity normalization task, a large-scale and high-quality dataset, and a novel prompt-based solution to advance biomedical entity normalization. diff --git a/2112.05787/paper.pdf b/2112.05787/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3417abc22bc63bb03263df9efc778468f4068847 --- /dev/null +++ b/2112.05787/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:102c86c2ca8fa5bf36bb64c947071885520d5ec9e50411eb08b508b16955d5fd +size 972162 diff --git a/2112.06170/paper.pdf b/2112.06170/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d37e8cf2ea85df4a8393c36aedad3ce34d3c9d08 --- /dev/null +++ b/2112.06170/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b107289d06e49b0df64cf16a2d6b90d25ccb53a08be91a847bb9331d104d6b +size 2837862 diff --git a/2112.08544/paper.pdf b/2112.08544/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e3df6ac98fbd1267d6d1c5bef224766abae1ef03 --- /dev/null +++ b/2112.08544/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c91a5da27703735fe60766eaf5c82a35b7bb41cdce6328642fa6737f0b6bd410 +size 1817419 diff --git a/2202.05420/paper.pdf b/2202.05420/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cd5ffc387f4db2ac6d7036f1d79d985b6e93e2f5 --- /dev/null +++ b/2202.05420/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdfd8e4f8c6bd92f428b50c88bd12addc4d15b65e24ba06ed7599076adc26b25 +size 1041215 diff --git a/2203.03079/paper.pdf b/2203.03079/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..86cb3aef24318531ed709990885159655050c30b --- /dev/null +++ b/2203.03079/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08637f71d5236f51b20d991698fa028c207bb1043348492b89979d4d47682fff +size 12876096 diff --git a/2203.08734/paper.pdf b/2203.08734/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2c65f43fe5f313bbd7ac79a26a0a2fce95a89381 --- /dev/null +++ b/2203.08734/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb51252d241695a4d10a779c9f3f4c69e36aeeca0259daeb08f63bbe0337c25e +size 1969200 diff --git a/2203.08788/paper.pdf b/2203.08788/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..979689be0630ce6c36d81fc57e204e898852600d --- /dev/null +++ b/2203.08788/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a322efbff57b38ba40797d6ad59a97911c6649a5455a8354d778789f51149218 +size 11515829 diff --git a/2203.11654/paper.pdf b/2203.11654/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..258b70d6103b7ab680a9474d051e049ecd703235 --- /dev/null +++ b/2203.11654/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aca5f716e9458f85dbf7322f7fc637bbb1d06568e44e7a2d2ee0c3afbb489f92 +size 3412066 diff --git a/2203.12560/paper.pdf b/2203.12560/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..343912a3cb16444d2dabd5a2a329926acee314bc --- /dev/null +++ b/2203.12560/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7405317d04db9f9e41cb3cfe0021e867586473d7e92d0f1fb373d19411dc51a5 +size 15414322 diff --git a/2203.16001/paper.pdf b/2203.16001/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aa7c2013150c5ec82887fec5dd52b43ee6e4c562 --- /dev/null +++ b/2203.16001/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adccf6288729d191f90ac62aef8f3bd3865e3b3c3d5c1cf83038e9d0137a9d29 +size 33434410 diff --git a/2204.01172/paper.pdf b/2204.01172/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6f86bba6f51dbafa15a85a6f59cbde9fe59c2451 --- /dev/null +++ b/2204.01172/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ecc815c416401417b0ab08c2ba02cd818b246e5f7bc066f939420ac2103a53 +size 606150 diff --git a/2204.03688/paper.pdf b/2204.03688/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e9656fdbef3642c4c95ba4a63acdf89842ba557f --- /dev/null +++ b/2204.03688/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:813aed2a00936462ee54b92e77bfbc9a4b6005ceb19954a1a2d86e5f3c04e1e3 +size 18171462 diff --git a/2204.06260/paper.pdf b/2204.06260/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5910c2dde805e59dc89cf9de4089e561b51135ee --- /dev/null +++ b/2204.06260/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17103b68596bd563dc8f26ad59ad6038e9563873131452d7745c0fb45c378fc6 +size 255352 diff --git a/2204.10356/paper.pdf b/2204.10356/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9b37afd1d9f924ce7d1e6e1845a2e8b00998b252 --- /dev/null +++ b/2204.10356/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65aeaacd9511abfaac9cbf8878e990500944d3f741c9b86180421900ed522d07 +size 18789234 diff --git a/2204.12516/paper.pdf b/2204.12516/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5e65cbe5196280e316d04458817bf9652beb4eda --- /dev/null +++ b/2204.12516/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b7f39d04554b4118e5d16deb52e2cac3f223a17afe8d67014e1beb50f2e5605 +size 13794680 diff --git a/2205.00320/paper.pdf b/2205.00320/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a037d8bdab51fcef4b6d7831b5bd2fd76a01fa42 --- /dev/null +++ b/2205.00320/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bdcdee7cb4294503c3e58d011b187f6989147a5f1179b03854ed8e722ae8b87 +size 732055 diff --git a/2205.04012/paper.pdf b/2205.04012/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b07a5116a0e9485319c79379de72a3d2a71199d1 --- /dev/null +++ b/2205.04012/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edd1f5d9e6548f51485a070a5441a9f34b545aa6b9ad524bae4595ae3ab375f9 +size 186202 diff --git a/2205.07177/paper.pdf b/2205.07177/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c3dde4f43ea177e9edf3db78637f8b5d71703fe9 --- /dev/null +++ b/2205.07177/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e3f9fedb49852c4825ca2771bd9d0b01da77c366de36f2e6275b4cc4f523f67 +size 497070 diff --git a/2205.09963/paper.pdf b/2205.09963/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1071ec26fb2e633a62ddcf49744b3555dde54afe --- /dev/null +++ b/2205.09963/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:146a740ca905cce9d3d2e5c67de2915e4a0b100e41321b1101b5eed81d3567f3 +size 740336 diff --git a/2205.11028/paper.pdf b/2205.11028/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..af1f87b3c5b5284e22099d218804b1b1f642f566 --- /dev/null +++ b/2205.11028/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fb6e5ef849df3efba5b002ebafa5b2fc2669b9a969cfd46250c08a21e9ddfa3 +size 3520333 diff --git a/2205.12006/paper.pdf b/2205.12006/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4677ff5f82561d59da8a23fc95b430fa60bde856 --- /dev/null +++ b/2205.12006/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc5dc37540bb8d962e8769de1a405e78d137a55f36023780fea7cd73470d3878 +size 1493784 diff --git a/2205.13662/paper.pdf b/2205.13662/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..32740b9778021d9afae0593adde3968bf1987c95 --- /dev/null +++ b/2205.13662/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f8e06a310bcf9fc33e3e15d2d7e8f944320703d7b1c74f88f72a90eeb1446cc +size 3803684 diff --git a/2205.14794/paper.pdf b/2205.14794/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b8375c23b573160ff2378231dda0be776256333c --- /dev/null +++ b/2205.14794/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecdb117950d311f68ec8023d84c46b90871e7d3abc63d0825f9327d8378c6387 +size 886042 diff --git a/2205.14962/paper.pdf b/2205.14962/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f2edbdc4f6c8912d55283279caef7b228b2e420b --- /dev/null +++ b/2205.14962/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8168ccda20a106d1523d47304992de71820c3c2d6f368308eff544f6543ea96a +size 2101529 diff --git a/2205.15544/paper.pdf b/2205.15544/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1be8f0023b4cd06dad0cc28da9b3ab5b7c4f09d5 --- /dev/null +++ b/2205.15544/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c3469e94ff5337a562eff25f4fcc14551c05ed96ee37222ef70628c97eebb12 +size 576817 diff --git a/2206.05852/paper.pdf b/2206.05852/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e6c6e9f7530d38214556b4868da076f573799bbc --- /dev/null +++ b/2206.05852/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c8dbafbd6398af89e03e1d1acdd2d0de79b5e2d592f8c512803bfcff078053 +size 1049958 diff --git a/2206.06719/paper.pdf b/2206.06719/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..94046c6c55b83e45ecb1e7e4e7e68cd95caee264 --- /dev/null +++ b/2206.06719/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de573a9d62213c80204c15441af27674c38f7341a7cc47a9c2ba89748b6f8d4f +size 10088258 diff --git a/2206.09016/paper.pdf b/2206.09016/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..106265a966b78776e41fd64af3cae9bece9f2970 --- /dev/null +++ b/2206.09016/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e5249c024bfc65c18ae9fae1750e19754000b7605e73232986b454afb6ce9fc +size 703735 diff --git a/2206.13464/paper.pdf b/2206.13464/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e86d1166c04f38ea307ee494140e9e72284fbe88 --- /dev/null +++ b/2206.13464/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1317c97471e67a454f7713fa86ed17cf7f56b982064f27233beefc07ec27ea +size 4702438 diff --git a/2207.01583/paper.pdf b/2207.01583/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b239fbff1499b77b57b07f9281c8e800204e7233 --- /dev/null +++ b/2207.01583/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a654734b8dea47b8aec6eb6b88332312d9a54d01477c4dcba90179baa931c7cf +size 9269021 diff --git a/2207.03411/paper_text/intro_method.md b/2207.03411/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..011feff8f225e37938303d4dc08d362013ce7f6c --- /dev/null +++ b/2207.03411/paper_text/intro_method.md @@ -0,0 +1,115 @@ +# Introduction + +There has been a significant progress in image-to-image translation methods [15,26,39,8,23,41,22,25] especially for facial attribute editing [7,27,37,42,21] powered with generative adversarial networks (GANs). A main challenge of facial attribute editing methods is to be able to change only one attribute of an image without affecting others such as global lighting parameters of the images, identity of the persons, background, or their other attributes. The other challenge is the interpretability of the style codes so that one can control the attribute intensity of the edit, e.g. increase the intensity of smile or aging. + +To achieve the targeted attribute editing while preserving the others, many works set a separate style encoder and an image editing network where modified styles are injected into it [7,21]. During image-to-image translation, a style encoded from another image or a newly sampled style latent code can be used to output diverse images. To disentangle attributes, works focus on style encoding + +![](_page_1_Figure_2.jpeg) + +Fig. 1: Attribute editing results of VecGAN. The first column shows the source images, and other columns show the results of editing a specific attribute. Each edited image has an attribute value opposite to that of the source one. For hair color, sources are translated to brown, black, and blonde hair, respectively. + +and progress from a shared style code, SDIT [34], to mixed style codes, Star-GANv2 [7], to hierarchical disentangled styles, HiSD [21]. Among these works, HiSD independently learn styles of each attribute, bangs, hair color, glasses and introduces a local translator which uses attention masks to avoid global manipulations. HiSD showcases successes on those three local attribute editing task and is not tested for global attribute editing, e.g. age, smile. Furthermore, one limitation of these works is the uninterpretablity of style codes as one cannot control the intensity of attribute (e.g. blondness) in a straight-forward manner. + +To overcome the challenges of facial attribute editing task, we propose a novel framework, VecGAN, and image-to-image translation framework with interpretable latent directions. Our framework does not require a separate style encoder as in the previous works since we achieve the translation in the encoded latent space directly. The attribute editing directions are learned in the latent space and regularized to be orthogonal to each other for style disentanglement. The other component of our framework is the controllable strength of the change, a scalar value. This scalar can be either sampled from a distribution or encoded from a reference image by projection in the latent space. Our framework not only achieves significant improvements over state-of-the-arts for both local and global edits but also provides a knob to control the editing attribute intensity via its design. + +VecGAN is encouraged by the findings that well-trained generative models organize their latent space as disentangled representations with meaningful directions in a completely unsupervised way. Exploring these interpretable directions in latent codes has emerged as an important research endeavor on the fixed pretrained GANs [28,31,11,29,36]. These works show that images can be mapped to the GANs latent space and edits can be achieved by manipulations in the latent space. However, since these models are not trained end-to-end, the results are sub-optimal as will also be shown in our experiments. + +To enable VecGAN, different than previous works of image-to-image translation networks, we use a deeper neural network architecture. Image-to-image translation methods, such as state-of-the-art HiSD [21] uses a network with small receptive fields that decreases the image resolution only by four times in the encoder. However, we want an organization in a latent space such that we can take meaningful linear directions. Therefore, images should be encoded to a spatially smaller feature space and a network should have a full understanding of an image. For that reason, we set a deep encoder and decoder network architecture but then this network faces the challenges of reconstructing all the details from the input image. To solve this problem, we use a skip connection between the encoder and decoder but only at lower resolution to find the optimal equilibrium of the information flow between with and without dimensionality reduction bottleneck. In summary, our main contributions are: + +- We propose VecGAN, a novel image-to-image translation network that is trained end to end with interpretable latent directions. Our framework does not employ a separate style network as in the previous works and translations are achieved with a single deep encoder-decoder architecture. +- VecGAN enables both reference attribute copy and attribute strength manipulation. Reference style encoding is designed in a novel way by using the same encoder from the translation pipeline. First, encoder is used to obtain latent codes of a reference image and it is followed by the projection of the codes into learned latent directions for different attributes. +- We conduct extensive experiments to show the effectiveness of our framework and achieve significant improvements over state-of-the-art for both local and global edits. Qualitative results of our framework can be seen in Fig. 1. + +# Method + +We follow the hierarchical labels defined by [21]. For a single image, its attribute for tag i ∈ {1, 2, ..., N} can be defined as j ∈ {1, 2, ..., Mi}, where N is the number of tags and Mi is the number of attributes for tag i. For example i can be tag of hair color, and attribute j can take the value of black, brown, or blond. + +Our framework has two main objectives. As the main task, we aim to be able to perform the image-to-image translation task in a feature (tag) specific manner. While performing this translation, as the second objective, we also want to obtain an interpretable feature space which allows us to perform tag-specific feature interpolation. + +For image to image translation task, we set an encoder-decoder based architecture and latent space translation in the middle as given in Fig. 2. We perform the translation in the encoded latent space, e, which is obtained by e = E(x) where E refers to the encoder. The encoded features go through a transformation T which is discussed in the next section. The transformed features are then decoded by G to reconstruct the translated images. The image generation pipeline following feature encoding is described in Eq. 1. + +$$e' = T(e, \alpha, i)$$ + + +$$x' = G(e')$$ + (1) + +Previous image-to-image translation networks [21,38,7] set a shallow encoder decoder architecture to translate an image and a separate deep network for style + +![](_page_4_Figure_2.jpeg) + +Fig. 2: **VecGAN pipeline.** Our translator is built on the idea of interpretable latent directions. We encode images with an Encoder to a latent representation from which we change a selected tag (i), e.g. hair color with a learnable direction $A_i$ and a scale $\alpha$ . To calculate the scale, we subtract the target style scale from the source style. This operation corresponds to removing an attribute and adding an attribute. To remove the image's attribute, source style is encoded and projected from the source image. To add the target attribute, target style scale is sampled from a distribution mapped for the given attribute (j), e.g. blonde, brown or encoded and projected from a reference image. + +encoding. In most cases, the style encoder includes separate branches for each tag. The shallow architecture that is used to translate images prevents the model from making drastic changes in the images and this helps preserving the identity of the persons. Our framework is different as we do not employ a separate style encoder and instead have a deep encoder-decoder architecture for translation. That is because to be able to organize the latent space in an interpretable way, our framework requires a full understanding of the image and therefore a larger receptive field; deeper network architecture. A deep architecture with decreasing size of feature size, on the other hand, faces the challenges of reconstructing all the fine details from the input image. + +With the motivation of helping the network to preserve tag independent features such as the fine details from background, we use skip connections between our encoder and decoder. However, we observe that the flow of information should be limited to force the encoder-decoder architecture learn facial attributes and well-organized latent representations. Because of that reason, we only allow skip connection at low resolution. This design is extensively justified in our Ablation Studies. + +To achieve a style transformation, we perform the tag-based feature manipulation in a linear fashion in the latent space. First, we set a feature direction matrix A which contains learnable feature directions for each tag. In our formulation $A_i$ denotes the learned feature direction for tag i. Direction matrix A is randomly initialized and learned during the training process. + +Our translation module is formulated in Eq. 2, which adds the desired shift on top of the encoded features e similar to [31]. + +$$T(e, \alpha, i) = e + \alpha \times A_i \tag{2}$$ + +We compute the shift by subtracting target style from the source style as given in Eq 3. + +$$\alpha = \alpha_t - \alpha_s \tag{3}$$ + +Since the attributes are designed as linear steps in the learnable directions, we find the style shift by subtracting the target attribute scale from source attribute scale. This way the same target attribute αt can have the same impact on the translated images no matter what the attributes were of the original images. For example, if our target scale corresponds to brown hair, the source scale can be coming from an image with blonde or back hair but since we take a step for difference of the scales, they can be both translated to an image with the same shade of brown hair. + +To extract the target shifting scale for feature (tag) i, αt, there are two alternative pathways. The first pathway, named as latent-guided path, samples a z ∈ U[0, 1) and applies a linear transformation αt = wi,j ·z+bi,j , where αt denotes sampled shifting scale for tag i and attribute j. Here tag i can be hair color and attribute j can be blonde, brown, or back hair. For each attribute we learn a different transformation module which is denoted as Mi,j (z). Since we learn a single direction for every tag for example for hair color, this transformation module can put the initially sampled z's into correct scale in the linear line based on the target hair color attribute. As the other alternative pathway, we encode the scalar value αt in a reference-guided manner. We extract αt for tag i from a provided reference image by first encoding it into the latent space, er, and projecting er via by Ai as given in Eq. 4. + +$$\alpha_t = P(e_r, A_i) = \frac{e_r \cdot A_i}{||A_i||} \tag{4}$$ + +In the reference guidance set-up, we do not use the information of attribute j, since it is encoded by the tag i features of the image. + +The source scale, αs, is obtained by the same way we obtain αt from reference image. We perform the projection for the corresponding tag we want to manipulate, i, by P(e, Ai). We formulate our framework with the intuition that the scale controls the amount of feature to be added. Therefore, especially when the attribute is copied over from a reference image, the amount of features that will be added will be different based on the source image. It is for this reason, we find the amount of shift by subtraction as given in Eq. 3. Our framework is intuitive and relies on a single encoder-decoder architecture. Fig. 2 shows the overall pipeline. + +![](_page_6_Figure_2.jpeg) + +Fig. 3: Overview of cycle translation path. + +Modifying the translation paths defined by [21], we train our network using two different paths. For each iteration to optimize our model, we sample a tag i for shift direction, a source attribute j as the current attribute and a target attribute ˆj. + +Non-translation path. To ensure that the encoder-decoder structure preserves details of the images, we perform a reconstruction of the input image without applying any style shifts. The resulting image is denoted as xn as given in Eq. 5. + +$$x_n = G(E(x)) \tag{5}$$ + +Cycle-translation path. We apply a cyclic translation to ensure that we get a reversible translation from a latent guided scale. In this path, as shown in Fig. 3, we first apply a style shift by sampling z ∈ U[0, 1) and obtaining target αt with Mi,ˆj (z) for target attribute ˆj. The translation uses α that is obtained by subtracting αt from the source style. Decoder generates an image, xt as given in Eq. 6 where e is encoded features from input image x, e = E(x). xt refers to the image without glasses in Fig. 3. + +$$x_t = G(T(e, M_{i,j}(z) - P(e, i), i))$$ +(6) + +Then by using the original image, x, as a reference image, we aim to reconstruct the original image by translating xt. Overall, this path attempts to reverse a latent-guided style shift with a reference-guided shift. The second translation is given in Eq. 7 where et = E(xt). + +$$x_c = G(T(e_t, P(e, i) - P(e_t, i), i))$$ +(7) + +In our learning objectives, we use xn and xc for reconstruction and xt and xc for adversarial losses, and Mi,j (z) for the shift reconstruction loss. Details about the learning objectives are given in the next section. + +Given an input image xi,j ∈ Xi,j , where i is the tag to manipulate and j is the current attribute of the image, we optimize our model with the following objectives. In our equations, xi,j is shown as x. + +Adversarial Objective. During training, our generator performs a styleshift either in a latent-guided way or a reference-guided way, which results in a translated image. In our adversarial loss, we receive feedback from the two steps of cycle-translation path. As the first component of the adversarial loss, we feed a real image x with tag i and attribute j to the discriminator as the real example. To give adversarial feedback to latent-guided path, we use the intermediate image generated in cycle-translation path, xt. Finally, to provide adversarial feedback to reference-guided path, we use the final outcome of the cycle-translation path xc. Only x acts as real image, both xt and xc are translated images, and they are treated as fake images with different attributes. The discriminator aims at classifying whether an image, given its tag and attribute, is real or not. The objective is given in Eq 8. + +$$\mathcal{L}_{adv} = 2log(D_{i,j}(x)) + log(1 - D_{i,\hat{j}}(x_t)) + log(1 - D_{i,j}(x_c))$$ +(8) + +Shift Reconstruction Objective. As the cycle-consistency loss performs reference-guided generation followed by latent-guided generation, we utilize a loss function to make these two methods consistent with each other [19,14,20,21]. Specifically, we would like to obtain the same target scale, αt, both from the mapping and from the encoded reference image generated by the mapped αt. The loss function is given in Eq. 9. + +$$\mathcal{L}_{shift} = ||M_{i,j}(z) - P(e_t, i)||_1 \tag{9}$$ + +Those parameters, Mi,j (z) and P(et, i), are calculated for the cycle-translation path as given in Eq. 6 and 7. + +Image Reconstruction Objective. In all of our training paths, the purpose it to be able to re-generate the original image again. To supervise this desired behavior, we use L1 loss for reconstruction loss. In our formulation xn and xc are outputs of non-translation path and cycle-translation path, respectively. Formulation of this objective is provided in Eq. 10. + +$$\mathcal{L}_{rec} = ||x_n - x||_1 + ||x_c - x||_1 \tag{10}$$ + +Orthogonality Objective. To encourage the orthogonality between directions, we use soft orthogonality regularization based on Frobenius norm, which is given in Eq. 11. This orthogonality further encourages a disentanglement in the learned style directions. + +$$\mathcal{L}_{ortho} = ||A^T A - I||_F \tag{11}$$ + +Full Objective. Combining all of the loss components described, we reach to the overall objective for optimization as given in Eq. 12. We additionally add L1 loss on the matrix A parameters to encourage its sparsity. + +$$\min_{E,G,M} \max_{A,D} \lambda_a \mathcal{L}_{adv} + \lambda_s \mathcal{L}_{shift} + \lambda_r \mathcal{L}_{rec} + \lambda_o \mathcal{L}_{ortho} + \lambda_{sp} \mathcal{L}_{sparse}$$ +(12) + +To control the dominance of each loss component, we use λa, λs, λr, λo, and λsp hyperparameters. These hyperparameter values and training details are given in Supplementary. diff --git a/2207.07697/paper.pdf b/2207.07697/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..03333bf1090e13407f63ab544807b429608a1f23 --- /dev/null +++ b/2207.07697/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:323ab7eb90fa3be0e74a094969daf5dafa698071302d808425bd92dc22252678 +size 598691 diff --git a/2207.08605/paper.pdf b/2207.08605/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dbec8028f16ee627a6f77c57370b186867fd32c5 --- /dev/null +++ b/2207.08605/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:931b60acfc9ce4a83ca1248a36d7cbc111fe626dc8853eceab00e4bfafbeaa38 +size 2952688 diff --git a/2207.10040/paper.pdf b/2207.10040/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0873e50137572ad962f0f28c357a7e19bac60e85 --- /dev/null +++ b/2207.10040/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe80d163d9281c4152edcf0121b3514a6179526372b81dc227430ffb3117368 +size 10896264 diff --git a/2207.10667/paper.pdf b/2207.10667/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b54ecc445d07e10b2c047b2d6f169d36a0c1b7d0 --- /dev/null +++ b/2207.10667/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65ec81ded990c029efa81482825fd34b020af2b508e92dd2d5755dd87e282dc +size 7638081 diff --git a/2207.12389/paper.pdf b/2207.12389/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..27bd30fd74f33264663ae3c6e9bf11763c87b230 --- /dev/null +++ b/2207.12389/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e2975617568a852f637fa942910aa9d86a1594e7b8f30751815eea809755d50 +size 4526435 diff --git a/2208.01838/paper.pdf b/2208.01838/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bde1fae1469f1b498f54863ef6386e4f5d18d292 --- /dev/null +++ b/2208.01838/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cade2d85f4f9f61ed6915087236fa8efcf6f7e15d346bb3cb6e61a91765f5dd +size 2859752 diff --git a/2208.09170/paper.pdf b/2208.09170/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c489137d53b99b0caf427380bb31c1806b8e5f27 --- /dev/null +++ b/2208.09170/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e021d3c9cb87668d6ee81fb915984ded1ed7ee6d364920ed227d7c08b36d0f6a +size 2194181 diff --git a/2208.10024/paper.pdf b/2208.10024/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f4bc8564914ac2aefd0b8a9a510190f93d25f372 --- /dev/null +++ b/2208.10024/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49db4f1c7af07de79ff8e556f4e14f435f7038c8a13cb3dc1607eefa1054b0aa +size 6133468 diff --git a/2209.01814/paper.pdf b/2209.01814/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f936050afc3fad44cd515fb5d09b0b8a7d389ec5 --- /dev/null +++ b/2209.01814/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:989ca9012cbc096cee49d5085eca7f859c5070d0564b210819c1eb9df882a42e +size 4024918 diff --git a/2209.10732/paper.pdf b/2209.10732/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ea47abf51d7c0b5cbc7c6db4057fcb2b0c62762b --- /dev/null +++ b/2209.10732/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f8288872d56a5beca387cef5e99dd3934c74b028243a38c0b7d9e96857dc249 +size 1422490 diff --git a/2209.15486/paper.pdf b/2209.15486/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6a273c712940fd9ed248880a643085ce56a10a76 --- /dev/null +++ b/2209.15486/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e9bcdaab59798fa52b28441415332c6269a3251da95a5246293e016973098be +size 1204751 diff --git a/2210.00036/paper.pdf b/2210.00036/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8b6f3f949f24a3f1574482ea1f68ba6f4c7298ad --- /dev/null +++ b/2210.00036/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d44f5e92edad7e69fdb37f41bf88ad3702fa54ca2c0cab4f8c333fba3aa32212 +size 860231 diff --git a/2210.02412/paper.pdf b/2210.02412/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f2b82e581f1f06271e782d4d1582391bb9f283eb --- /dev/null +++ b/2210.02412/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1270ceca74eccdc916d513d8e9334e348bea66d4995285c37a0ca8edf86ff54 +size 1240834 diff --git a/2210.10664/paper.pdf b/2210.10664/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a8ffdd5ae8f7c1a670f7a809bcc351768598fb2e --- /dev/null +++ b/2210.10664/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1417ac4612711508050ef87a1dd96c70e3ae8b4203ace8ed3ef87ad11637b573 +size 813799 diff --git a/2210.12288/paper.pdf b/2210.12288/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..93dfc22fe9a7311807e9f632fa834728da3bf3c5 --- /dev/null +++ b/2210.12288/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adde05272522b94e846c1e45c5843128c7dc0c3d5ec5b76713b87f9c18e7bed5 +size 2314212 diff --git a/2210.12524/paper.pdf b/2210.12524/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0ee07432d22bf8d93e1c42c51ef48c6478f10f47 --- /dev/null +++ b/2210.12524/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:327bdd80e837f10ef48a048363d76c03e7c85e3fb6cea40fdf65e70f8e158824 +size 17339554 diff --git a/2210.14128/paper.pdf b/2210.14128/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..49277d30bf3c3c7b1767718a396eb6cf92cd9c4b --- /dev/null +++ b/2210.14128/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d93102fbfc4f941c7f7a81709b8a1968c5941e7542df7202fc4fefa7e70315a9 +size 758794 diff --git a/2210.16114/paper.pdf b/2210.16114/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9725538f0622eb21008385ea19299e87b3800d23 --- /dev/null +++ b/2210.16114/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd56c96b211a278dcfec0de1f1e40c1a867f7817e851d73e9d7d08176c805794 +size 834987 diff --git a/2210.16613/paper.pdf b/2210.16613/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9e51a172387f6513d80504a12288017d613e4b15 --- /dev/null +++ b/2210.16613/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a410770d3d35f8dce847da0ada51f207fa30a84a1888b1ca1e032333691ee8a +size 666546 diff --git a/2211.00164/paper.pdf b/2211.00164/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d312970e01f7f69cda895d1ca0185003829869cf --- /dev/null +++ b/2211.00164/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7be388a618948bd55fa2133ee2fd3823d97668326d17307c77241461f15ab68 +size 7323097 diff --git a/2211.01910/paper.pdf b/2211.01910/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9420c68e81389674dfe633d6dd30c1da46867ac4 --- /dev/null +++ b/2211.01910/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4162f38101db7511e6292a2577c66514c757824ddd21f8426a6276485355e81 +size 4085477 diff --git a/2211.02527/paper.pdf b/2211.02527/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..83e187dda0173342d8432e34e7bb4c462e2d767b --- /dev/null +++ b/2211.02527/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94cdd5017c6267aa0de45b19886e9a8dfbd702722c920b2c04d768529255aab0 +size 336304 diff --git a/2211.03041/paper.pdf b/2211.03041/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c32b380ec96b1cdd846525ecc6aa21e033346030 --- /dev/null +++ b/2211.03041/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd0cf9b0bf02983c382553a43baeb4a595299b34b0595b7d7e2329cbca1b2be1 +size 395878 diff --git a/2211.12254/paper.pdf b/2211.12254/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6165423a67870c64d8079b60edfbe9784c2b2e1b --- /dev/null +++ b/2211.12254/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bc2190a7aed7171cbc4a09196e6d78d7125070b11487fdfea77d7ecf2358633 +size 12348986 diff --git a/2211.12759/paper.pdf b/2211.12759/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a1b856c6ff18d1f1c675ec63e019d960a24f11b0 --- /dev/null +++ b/2211.12759/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc44644ae1404827dd11aacdcb34f0d9d83e1c37152ffe9fc57503ce46979007 +size 478791 diff --git a/2212.00921/paper.pdf b/2212.00921/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dc6fd76cdda4a257e0233984e5dc6d1cc86cc070 --- /dev/null +++ b/2212.00921/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25e014dd7239be4c7cb78fb9ca5c0a6be1be63052b93a73ad00f41d7e6552aec +size 3733895 diff --git a/2212.01448/paper.pdf b/2212.01448/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8a6bb9b8036133ce800345f6e2bea62b32ed8300 --- /dev/null +++ b/2212.01448/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd59fcad6cd14f4da6a4fbda1fe3866f606b6a62bd6cb23ce57f152d4b204857 +size 6709563 diff --git a/2212.03131/paper.pdf b/2212.03131/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a74b9800d9afc1f0331f4b04a4c7dc762d96f977 --- /dev/null +++ b/2212.03131/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b355f021e21f6eb8642b032caa0d1e2b7ac3b506e272a13e107cc44e77a28d +size 1648854 diff --git a/2212.03241/paper.pdf b/2212.03241/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..89870b96c8fd82b3627acf538dd666d51c29a4bb --- /dev/null +++ b/2212.03241/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8db6b66ae7d258837b2cb6a5bff1ed91cfb9d9ec0b480329685bcf1d5c274b4 +size 20316750 diff --git a/2212.05301/paper.pdf b/2212.05301/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dd9be617723c60155487caaa88e41f8210f67fb4 --- /dev/null +++ b/2212.05301/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e887958648681a67f7e30bc66f9d28df1e84d0dd5284f0247e094956d3eacff +size 1209861 diff --git a/2212.07634/paper.pdf b/2212.07634/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8c949f43c54e071716cb89e87bd1692c9402a401 --- /dev/null +++ b/2212.07634/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d2aa36ce50cd147d58f2ea1f0c9e7662ce102c0947a14dd371ebe71185337e3 +size 748768 diff --git a/2212.12735/paper.pdf b/2212.12735/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ab11192600bd6a4a01ce4b3ce713a131211a09a5 --- /dev/null +++ b/2212.12735/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dccae86aed53595b2d195366103b783abe273deb8c8260d6e64b1d6674f6de1 +size 4719878 diff --git a/2301.05434/paper.pdf b/2301.05434/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..db9f89059038e97f40eda544e93b6ebdbf641926 --- /dev/null +++ b/2301.05434/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12d6159e186ef2b2df4f48091180fc20d712cc0b1528507f6df60a7c253e9da +size 8077148 diff --git a/2301.09209/paper.pdf b/2301.09209/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2d42307dcbce7010027b21315bc8d077fec22c2b --- /dev/null +++ b/2301.09209/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6764f4da82fee8ded27275830a1fc9130e00d270c510d831c69f27d05a37506 +size 12573905 diff --git a/2301.12217/paper.pdf b/2301.12217/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a65de304cba5cb12b575719b6ebcb2f4caa32884 --- /dev/null +++ b/2301.12217/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ee65002bf0354ffa5633c38f59572db979da85400f383aff179f9f9374d1e7e +size 540709 diff --git a/2301.12232/paper.pdf b/2301.12232/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..baa456fe95f267f2e1dc8ba94647f094d960726f --- /dev/null +++ b/2301.12232/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbf2caa21650a44ffe109665557343d257cd7b3eeb41d7247a80e1e08cecf04 +size 934933 diff --git a/2302.03985/paper.pdf b/2302.03985/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5067c768b71e6f5086116cc962edc1d19566ea65 --- /dev/null +++ b/2302.03985/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b16b96a562adf91c41e1b6f9ddcf9014b8a81749f99949fbc683cbc96f91dd9d +size 6147549 diff --git a/2302.05527/paper.pdf b/2302.05527/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cd397d9aad5802d68a9a2e9a3d2b3b76e1069e8f --- /dev/null +++ b/2302.05527/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d366ca782190b5254a5555d99aa465026b56740e6ceb7abfc05601a4a982d31 +size 609425 diff --git a/2302.07672/paper.pdf b/2302.07672/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7732f6ae47b4148e4fb99bf1ccd121ac7e6a6dd2 --- /dev/null +++ b/2302.07672/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ac563a301ca17ee010587b136e30acac0a2d716583ede25cfab71d5e81a7a6d +size 36225694 diff --git a/2302.11002/paper.pdf b/2302.11002/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8f20bf965bec215cac083da519738d3652387923 --- /dev/null +++ b/2302.11002/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:942ce82d8fd0ec9b0a0944391002c7f5c8282ac866c544e4e2bd233586303c85 +size 2562315 diff --git a/2302.14442/paper.pdf b/2302.14442/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6212d94420d7b03cc2b5579d6896e69774966a4e --- /dev/null +++ b/2302.14442/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18ebdd46589a36481d89f1fc79e79cdbf5694d65c0bc97f00a95fdcb4b30d0c9 +size 13399360 diff --git a/2303.05952/paper.pdf b/2303.05952/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8b06c6a466dcaadadc19efa37958f20f8ce5e5af --- /dev/null +++ b/2303.05952/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa1b0df847b94a7e985754dfd89a9e1ca97b0ececc86bc7c5e2cd7c0ead45d73 +size 1199192 diff --git a/2303.09914/paper.pdf b/2303.09914/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1bff9e27a0942096a7bcaabae6e248bc0d738b3e --- /dev/null +++ b/2303.09914/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:661517fbf1cf63337992e6dce82bcc1c4f2d65b020fde425b1839c9c6496a232 +size 8425375 diff --git a/2303.10482/paper.pdf b/2303.10482/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c8dffe4c25a5cdae3853d59bbf8f14a14a064d5a --- /dev/null +++ b/2303.10482/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3698b091ed191cdb278db80f43e14c0215caf38c910e2d994acc30214a0f881 +size 6089306 diff --git a/2303.14679/paper.pdf b/2303.14679/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ef9469334aaa2899e9e0f2664e02bffae1a2b60f --- /dev/null +++ b/2303.14679/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d0bfeb8c31a9e5d031d4440b737988ce8a3cd25dc5db30b5b3ee5ab1208651e +size 4852635 diff --git a/2303.16268/paper.pdf b/2303.16268/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..97d20805ad6044799cc2987775f3950e322242b7 --- /dev/null +++ b/2303.16268/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89ad18db4945a6f60a41f2a48159eea0108aaeccd7c0a0107bcc59a07ee1a559 +size 2481357 diff --git a/2303.17859/paper.pdf b/2303.17859/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b86bf0dba6c7ded0e1e9f04286fdad1d6b656ec7 --- /dev/null +++ b/2303.17859/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dee0d0395c5bc8fcaaf97952fd3aea8d1b934f8e3d0df2a3ec4a984814843535 +size 2768255 diff --git a/2304.04015/paper.pdf b/2304.04015/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e3ce8b9e1cf009956ab19b5b11872567292731ec --- /dev/null +++ b/2304.04015/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c91439e109ca21bba6081acc728e77a295dea57d09637bedc90730f4e7d8063 +size 964300 diff --git a/2304.05634/paper.pdf b/2304.05634/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f459e8425c769f5edee7fc14b834af1e939e1eb5 --- /dev/null +++ b/2304.05634/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6229a85fd4b08144aa7183dd1392f3d8d73f285abe80ebfc0fe6b184492748e +size 3894288 diff --git a/2304.05939/paper.pdf b/2304.05939/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..900876fac094f0e34dbdb48aff3463f4c6ac6061 --- /dev/null +++ b/2304.05939/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:995db6f05a32990d2c325b3db33fb1466882507c3e4e4ffe8f80d45e6e3dbdc7 +size 17152842 diff --git a/2304.06140/paper.pdf b/2304.06140/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..43c84de49df074acd74cfd7074f2475e936eb8a5 --- /dev/null +++ b/2304.06140/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa011f6230d5b50e6eaea566281aacd8075bb38fafa678967c622e8e4e233b79 +size 16169298 diff --git a/2304.06668/paper.pdf b/2304.06668/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dc74f1fca1eb9dd765bb006acc3fac96ded34584 --- /dev/null +++ b/2304.06668/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee2945d9ce95c9251736e95f418b41015ab0446178af1c4b74c1caa8a1ffc30c +size 13586050 diff --git a/2304.09704/paper.pdf b/2304.09704/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b06b5e3eb1601086c0ca82dbc52ae4f0774c9353 --- /dev/null +++ b/2304.09704/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa64e42cde54889292f3177e21373d4f3642550b08f90e459616482ed21275b +size 15114276 diff --git a/2304.12654/paper.pdf b/2304.12654/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8c447beab9726520955d6a84241904af3c34baa6 --- /dev/null +++ b/2304.12654/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b06ede4bb30eeef67b3415ee56a4da3ea070c3b488d8ba3febd1a2a48a80927e +size 1245418 diff --git a/2305.05189/paper.pdf b/2305.05189/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7a63ea9de6aa91ce88410b097845618c824165ee --- /dev/null +++ b/2305.05189/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65ac20382280f2e6b48bc70fd339bad76e1a537d75d996df076b518fcef3f483 +size 11980707 diff --git a/2305.12498/paper.pdf b/2305.12498/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d5da13e791ccd3d1dfe1f6f74e809711e52a759e --- /dev/null +++ b/2305.12498/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f7de7a3893f04949de560cd0c4e8ad0091bed005c883ea353799ab2309af329 +size 651560 diff --git a/2305.13072/paper.pdf b/2305.13072/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ba260d5f3659334039880c4099a337f68884ae44 --- /dev/null +++ b/2305.13072/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d307b2a602bc28be87939c35c08942425b5be5ba3b99ed5cbda270e5880da17 +size 1086659 diff --git a/2305.13948/paper.pdf b/2305.13948/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8a7c4965cb452c5ebce7ee24d991ddd20deeb2a6 --- /dev/null +++ b/2305.13948/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6823f88ba52ba0f910cf5725128dbd285d184160c2615d86f4f0f99f0daa067 +size 1671509 diff --git a/2305.15523/main_diagram/main_diagram.drawio b/2305.15523/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..09b19934b5e771e03e65b73c013ea5bc3184273c --- /dev/null +++ b/2305.15523/main_diagram/main_diagram.drawio @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2305.15523/main_diagram/main_diagram.pdf b/2305.15523/main_diagram/main_diagram.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d467cf1d11077921d647b415d5e326b390ae1b80 Binary files /dev/null and b/2305.15523/main_diagram/main_diagram.pdf differ diff --git a/2305.15523/paper_text/intro_method.md b/2305.15523/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..20c2dd80452604ea7c041ebb26eb6746596a8001 --- /dev/null +++ b/2305.15523/paper_text/intro_method.md @@ -0,0 +1,124 @@ +# Introduction + +Efficient data compression plays a pivotal role in multi-sensor networks to minimize communication overload. Due to the limited communication bandwidth of such networks, it is often impractical to transmit all sensor data to a central server, and compression of the data is necessary. It is important for the sensors to compress the respective data independently, to avoid communication overload in the network. Information theory literature refers to this setting as distributed source coding [1], where the goal is to recover the original data with minimal distortion. In many cases, the data collected by the sensors is only processed by a downstream task model, *e.g.*, an object detection model, but not by humans, and hence the original distributed source coding goal of minimizing reconstruction error is no longer applicable. Instead, the goal should be to maximize the performance of the downstream task model. Additionally, in practice, data collected by multi-sensor networks is often correlated *e.g.* stereo cameras with overlapping fields of view. To improve communication efficiency, it is + +Equal contribution; order decided randomly. Correspondence to {pohanli, sravan.ankireddy}@utexas.edu. + +[https://github.com/UTAustin-SwarmLab/Task-aware-Distributed-Source-Coding.](https://github.com/UTAustin-SwarmLab/Task-aware-Distributed-Source-Coding) + +![](_page_1_Figure_0.jpeg) + +Figure 1: Task-aware distributed source coding with NDPCA. $X_1, \ldots, X_k$ are correlated data sources. Encoders $E_1, \ldots, E_k$ independently compress data to latent representations $Z_1, \ldots, Z_k$ . Using linear matrices, the DPCA module projects the representations to any lower dimension at the encoder and projects them back to the original data space at the decoder, which allocates the bandwidth of sources based on the importance of the task $\Phi$ . The goal is to find the optimal encoders and decoder that minimize the final task loss. + +important for the compression framework to take advantage of the correlation and avoid transmission of redundant data. Combining both objectives, the final goal of the distributed compression framework is to learn relevant features that maximize the task performance, while avoiding the transmission of redundant features by exploiting the correlation between sources. Together, we refer to the distributed compression of task-relevant features as *task-aware distributed source coding*. + +However, existing compression methods fail to combine the following three aspects: 1. Existing distributed compression methods perform poorly in the presence of a task model. Although neural networks have been shown to be capable of compressing stereo images [2, 3] and correlated images [4], existing methods focus on reconstructing image data, but not for downstream tasks. 2. Existing task-aware compression methods cannot take advantage of the correlation of sources. Previous works only consider compressing task-relevant features of single source [5–9], but not multiple correlated sources. 3. All existing methods for 1 & 2, especially those based on neural networks, only compress data to a fixed level of compression but not to multiple levels. Thus, they cannot operate in environments with different demands of compression levels and require a separate model trained for each compression level. Here, we note that we use the term bandwidth to indicate the information bottleneck in the dimension of transmitted data. Based on the choice of quantization, it is straightforward to convert the latent dimension to other popular metrics such as bits per pixel (bpp) in the case of image sources. Additionally, we consider the scenario of total bandwidth constraint for the uplink, which is typical for wireless networks [10]. + +We design neural distributed principal component analysis (NDPCA)—a distributed compression framework that can transmit task-relevant features at multiple compression levels. We consider the case where a task model at the central node requires data from all sources and the bandwidth in the network is not consistent over time, as shown in Fig. 1. In NDPCA, neural encoders $E_1, E_2, \ldots, E_K$ first independently compress correlated data $X_1, X_2, \ldots, X_K$ to latent representations $Z_1, Z_2, \ldots, Z_K$ . A proposed module called distributed principal component analysis (DPCA) further compresses these representations to any lower dimension according to the current bandwidth and decompresses the data at the central node. Finally, a neural decoder at the central node decodes the representations $\hat{Z}_1, \hat{Z}_2, \ldots, \hat{Z}_k$ to $\hat{X}_1, \hat{X}_2, \ldots, \hat{X}_K$ and feeds them into a task. Task-aware compression aims to minimize task loss, defined as the difference in task outputs with and without compression, such as the difference in object detection results. Due to the significant training cost involved, we avoid training the task model, which is usually a large pre-trained neural network. + +To highlight our proposed method, NDPCA learns task-relevant representations with a single model at multiple compression levels. It includes a neural autoencoder to generate uncorrelated task-relevant representations in a fixed dimension. It is desirable to learn uncorrelated representations to prevent the transmission of redundant information. It also includes a module for linear projection, DPCA, to allocate the available bandwidth among sources based on the importance of the task, by observing the respective principal components, and then further compressing the representations to any desired dimension. By harmoniously combining the neural autoencoder and the linear DPCA module, NDPCA generates representations that are more compressible in limited bandwidths, providing a graceful trade-off between performance and bandwidth. + +Contributions: Our contributions are three-fold: First, we formulate a task-aware distributed source coding problem that optimizes the compression for a given task instead of reconstructing the sources (Sec. 2). Second, we provide a theoretical justification for the framework by analyzing the case of a linear compressor and a linear task (Sec. 3). Finally, we propose a task-aware distributed source coding framework, NDPCA, that learns a single model for different levels of compression to handle any type of source and task(Sec. 4). We validate NDPCA with tasks of CIFAR-10 image denoising, multi-view robotic arm manipulation, and object detection of satellite imagery (Sec. 5). NDPCA results in a 1.2dB increase in PSNR, a 9% increase in success rate, and a 14% increase in accuracy compared to an autoencoder with uniform bandwidth allocation, for the respective experiments mentioned above. + +# Method + +We now define the problem statement more formally. Consider a set of K correlated sources. Let $x_i \in \mathbb{R}^{n_i}$ denote the sample from source i where $i \in \{1,2,\ldots,K\}$ . Samples from each source i are compressed independently by encoder $E_i$ to a latent representation $z_i \in \mathbb{R}^{m_i}$ such that $\sum_{i=1}^K m_i = m$ , where m is the total bandwidth available. A joint decoder D receives the representations $\{z_1, z_2, \ldots, z_k\}$ and reconstructs the sources $\{\hat{x}_1, \hat{x}_2, \ldots, \hat{x}_k\}$ . In the setting without a task, the goal is to find a set of encoders and a decoder to recover the inputs $\{x_1, x_2, \ldots, x_k\}$ with minimal loss: + +$$\underset{E_1, E_2, \dots, E_k, D}{\operatorname{argmin}} \sum_{i=1}^{K} \mathcal{L}_{\text{rec}}(x_i, \hat{x}_i) \quad (\textit{Task-agnostic} \text{ distributed source coding}), \tag{1}$$ + +where $\mathcal{L}_{rec}$ is the reconstruction loss, e.g., the mean-squared error loss. + +In the presence of a task $\Phi$ , it takes the reconstructed inputs to compute the final output $\Phi(\hat{x}_1, \hat{x}_2, \dots, \hat{x}_k)$ . The goal is to find a set of encoders and a decoder such that the task loss $\mathcal{L}_{\text{task}}$ is minimized, where $\Phi(x_1, x_2, \dots, x_k)$ is the task output computed without compression. We refer to this problem as *task-aware* distributed source coding, which is the main focus of this paper: + +$$\underset{E_1,E_2,...,E_k,D}{\operatorname{argmin}} \mathcal{L}_{task}(\Phi(x_1,x_2,\ldots,x_k),\Phi(D(E_1(x_1),E_2(x_2),\ldots,E_k(x_k))))$$ +(2) +(Task-aware distributed source coding), + +where $\mathcal{L}_{task}$ is the task loss, e.g., the difference of bounding boxes when the task is object detection. + +**Bandwidth allocation:** In the previous formulations, we assume that the output dimensions of encoders are known a priori. However, the dimensions are related to the compression quality of each encoder, which is also a design factor. That is, given the total available bandwidth m, we first need to obtain the optimal $m_i$ for each source i, then, we can design the optimal encoders and decoder accordingly. Finding the optimal set of bandwidths for a given task is a long-standing open problem in information theory [11], even for the simple task of a modulo-two sum of two binary sources [12]. Also, existing works [4, 13, 14] largely assume a fixed latent dimension for sources and train different models for different total available bandwidth m, which is, of course, suboptimal. In this paper, our framework provides heuristics to the underlying key challenge of optimally allocating available bandwidth, i.e., deciding $m_i$ , while adapting to different total bandwidths m with a single model. + +We start with a motivating example of task-aware distributed source coding under the constraint of linear encoders, a decoder, and a linear task. We first solve the linear setting using our proposed method, distributed principal component analysis (DPCA). We then describe how DPCA compresses data to different bandwidths and analyze the performance of DPCA. In this way, we gain insights into combining DPCA with neural autoencoders in later Sec. 4. + +**DPCA Formulation:** We consider a linear task for two sources, defined by the task matrix $\Phi \in \mathbb{R}^{p \times (n_1 + n_2)}$ , where the sources $x_1 \in \mathbb{R}^{n_1}$ and $x_2 \in \mathbb{R}^{n_2}$ are of dimensions $n_1$ and $n_2$ , respectively, and the task output is given by $y = \Phi x \in \mathbb{R}^p$ , where $x = [x_1^\top, x_2^\top]^\top$ . Without loss of generality, we assume the sources to be zero-mean. Now, we have N observations of two sources $X_1 \in \mathbb{R}^{n_1 \times N}$ and $X_2 \in \mathbb{R}^{n_2 \times N}$ and their corresponding task outputs $Y = \Phi(X) \in \mathbb{R}^{p \times N}$ , where $X = [X_1^\top X_2^\top]^\top$ . + +We aim to design the optimal linear encoding matrices (encoders) $E_1 \in \mathbb{R}^{m_1 \times n_1}$ , $E_2 \in \mathbb{R}^{m_2 \times n_2}$ , and the decoding matrix (decoder) $D \in \mathbb{R}^{(n_1+n_2) \times (m_1+m_2)}$ that minimizes the task loss defined as the Frobenius norm of $\Phi(X) - \Phi(\hat{X})$ , where $\hat{X}$ is the reconstructed X. We only consider the non-trivial case where the total bandwidth is less than the task dimension, $m = m_1 + m_2 < p$ , i.e., the encoders cannot directly calculate the task output locally and transmit it to the decoder. For now, we assume that $m_1$ and $m_2$ are given, and we discuss the optimal allocation later in this section. + +Letting $Z_1 = E_1 X_1 \in \mathbb{R}^{m_1 \times N}$ and $Z_2 = E_2 X_2 \in \mathbb{R}^{m_2 \times N}$ denote the encoded representations and $M = \Phi D$ denote the product of the task and decoder matrices, we solve the optimization problem: + +$$E_1^*, E_2^*, M^* = \underset{E_1, E_2, M}{\operatorname{argmin}} \|Y - MZ\|_2^2$$ + (3a) + +s.t. +$$Z = \begin{bmatrix} Z_1 \\ Z_2 \end{bmatrix} = \begin{bmatrix} E_1 X_1 \\ E_2 \tilde{X}_2 \end{bmatrix}$$ +, (3b) + +$$ZZ^{\top} = \mathbb{I}_m,$$ + (3c) + +$$\hat{Y} = \Phi DZ = MZ, \quad Y = \Phi \begin{bmatrix} X_1 \\ X_2 \end{bmatrix}.$$ + (3d) + +Note that solving M is identical to solving the decoder D since we can always convert M to D by the generalized inverse of task $\Phi$ . The encoders $E_1$ and $E_2$ project the data to representations $Z_1$ and $Z_2$ in (3b). We constrain the representations to be orthonormal vectors in (3c) as in the normalization in principal component analysis (PCA) for the compression of a single source [15]. This constraint lets us decouple the problem into subproblems later in (5). Finally, in (3d), the decoder D decodes $Z_1$ and $Z_2$ to $\hat{X}_1$ and $\hat{X}_2$ and passes the reconstructed data to task $\Phi$ . + +**Solution:** We now solve the optimization problem in (3). For any given $E_1$ , $E_2$ (thus, a given Z), we can optimally obtain $M^* = YZ^\top (ZZ^\top)^{-1} = YZ^\top$ by linear regression. Now, we are left to find the optimal encoders $E_1$ , $E_2$ . First, a preprocessing step removes the correlation part of $X_1$ from $X_2$ by subtracting the least-square estimator $\hat{X}_2(X_1)$ : + +$$\tilde{X}_2 = X_2 - \hat{X}_2(X_1) = X_2 - X_2 X_1^{\top} (X_1 X_1^{\top})^{-1} X_1.$$ +(4) + +The orthogonality principle of least-square estimators [16, p.386] ensures that $X_1 \tilde{X}_2^{\top} = \mathbf{0}_{n_1 \times n_2}$ . We decouple the objective in (3a) with respect to $E_1, E_2$ by the orthogonality principle and (3c): + +$$\min_{E1,E2} \|Y - M^* Z\|_2^2 = \|Y\|_2^2 - \max_{E_1,E_2} \|M^*\|_2^2 = \|Y\|_2^2 - \max_{E_1} \|Y_1 X_1^\top E_1^\top\|_2^2 - \max_{E_2} \|Y_2 \tilde{X}_2^\top E_2^\top\|_2^2,$$ +(5) + +where $Y=\Phi X=[\Phi_1\Phi_2]\left[X_1^{\top}X_2^{\top}\right]^{\top}=Y_1+Y_2.$ We then have two subproblems from (3): + +$$E_{1}^{*} = \underset{E_{1}}{\operatorname{argmax}} \quad \|\Phi_{1}X_{1}X_{1}^{\top}E_{1}^{\top}\|_{2}^{2} \qquad \qquad E_{2}^{*} = \underset{E_{2}}{\operatorname{argmax}} \quad \|\Phi_{2}\tilde{X}_{2}\tilde{X}_{2}^{\top}E_{2}^{\top}\|_{2}^{2}$$ +s.t. $E_{1}X_{1}X_{1}^{\top}E_{1}^{\top} = \mathbb{I}_{m_{1}}, \qquad \qquad \text{s.t.} \quad E_{2}\tilde{X}_{2}\tilde{X}_{2}^{\top}E_{2}^{\top} = \mathbb{I}_{m_{2}}.$ + +$$(7)$$ + +The two subproblems are the canonical correlation analysis [17], which can be solved by whitening $E_1X_1, E_2\tilde{X}_2$ and singular value decomposition (see [17] for details). + +**Dynamic bandwidth:** So far, we solved the case for fixed bandwidths $m_1$ and $m_2$ . We now describe ways to determine the optimal bandwidth allocation given a current total bandwidth m. To do so, DPCA solves (6) and (7) with $m_1 = n_1$ and $m_2 = n_2$ and obtains $E_1^*$ , $E_2^*$ and all pairs of canonical directions and correlations. Canonical directions and correlations can be analogized to a more general case of singular vectors and values. Similar to PCA, the sums of squares of canonical correlations are the optimal values of (6) and (7), so DPCA sorts all the canonical correlations in descending order and chooses the first m pairs of canonical correlations and directions. These canonical correlations determine the optimal encoders $E_1^*$ , $E_2^*$ and decoder $D^*$ , which indirectly solves $m_1$ and $m_2$ . Intuitively, the canonical correlations indicate the importance of a direction to the task, and we prioritize the transmission of directions by importance. For simplicity, we only consider the case of 2 sources. DPCA can easily compress more sources simply by constraining all Zs to be independent and thus decoupling the original problem (3) to more subproblems. + +**Performance analysis of DPCA:** When DPCA compresses new data matrices with encoder $E_1^*$ and $E_2^*$ , the preprocessing step is invalid as the encoders cannot communicate with each other. So for + +DPCA to perform optimally while skipping the step, the two data matrices need to be uncorrelated, namely, $\hat{X}_2(X_1)=0$ , because in such case the preprocessing step removes nothing from the data sources. Given that correlated sources lead to suboptimality of DPCA, we characterize the performance between the joint compression, PCA, and the distributed compression, DPCA, under the same bandwidth in Lemma 3.1 with the simplest case of reconstruction, namely, $\Phi=\mathbb{I}_p$ . In this setting, the canonical correlation analysis is relaxed to the singular value decomposition, which is later used for NDPCA in Sec. 4. + +**Lemma 3.1** (Bounds of DPCA Reconstruction). Given a zero-mean data matrix and its covariance, + +$$X = \begin{bmatrix} X_1 \\ X_2 \end{bmatrix} \in \mathbb{R}^{(n_1 + n_2) \times N}, XX^\top = \underbrace{\begin{bmatrix} \operatorname{Cov}_{11} & \mathbf{0} \\ \mathbf{0} & \operatorname{Cov}_{22} \end{bmatrix}}_{X_{\operatorname{diag}}} + \underbrace{\begin{bmatrix} \mathbf{0} & \operatorname{Cov}_{12} \\ \operatorname{Cov}_{21} & \mathbf{0} \end{bmatrix}}_{\Delta X},$$ + +assume that $\Delta X$ is relatively smaller than $XX^{\top}$ , and $XX^{\top}$ is positive definite with distinct eigenvalues. For PCA's encoding and decoding matrices $E_{\mathrm{PCA}}$ , $D_{\mathrm{PCA}}$ and DPCA's encoding and decoding matrices $E_{\mathrm{DPCA}}$ , $D_{\mathrm{DPCA}}$ , the difference of the reconstruction losses is bounded by + +$$0 \le \|X - D_{\text{DPCA}} E_{\text{DPCA}}(X)\|_{2}^{2} - \|X - D_{\text{PCA}} E_{\text{PCA}}(X)\|_{2}^{2} = -\sum_{i=m+1}^{n_{1}+n_{2}} \lambda_{i} e_{i}^{\top} \Delta X e_{i}.$$ + +where $\lambda_i$ and $e_i$ are the *i*-th largest eigenvalue and eigenvector of $XX^{\top}$ , Tr is the trace function, and m is the dimension of the compression bottleneck. + +The proof of Lemma 3.1 is in Appendix A.1. Note that $\Delta X$ is the correlation of sources, so as $\|\Delta X\|_F$ gets smaller, the difference of PCA and DPCA is closer to 0. That is, as the covariance decreases, DPCA performs more closely to PCA, which is the optimal joint compression. + +To summarize, uncorrelated data matrices $X_1, \ldots, X_K$ are desired for DPCA. If so, DPCA optimally decides the bandwidths of all sources based on the canonical correlations, representing their importance for the task. One application of DPCA is that encoders can use the remaining unselected canonical directions to improve compression when the available bandwidth is higher later. + +The theoretical analysis in the previous section indicates that DPCA has two drawbacks: it only compresses data optimally if sources are uncorrelated, and it only works for linear tasks. However, DPCA dynamically allocates bandwidth to sources based on their importance. On the other hand, neural autoencoders are shown to be powerful tools for compressing data to a fixed dimension but cannot dynamically allocate bandwidth. This contrast motivates us to harmoniously combine a neural autoencoder to generate representations and then pass them through DPCA to compress and find the bandwidth allocation. We refer to the combination of a neural autoencoder and DPCA as neural distributed principal component analysis (NDPCA). With a single neural autoencoder and a matrix at each encoder and decoder, NDPCA adapts to any available bandwidth and flexibly allocates bandwidth to sources according to their importance to the task. + +**Outline:** NDPCA has two encoding stages, as shown in Fig. 1: First, the neural encoder at each k-th source encodes data $X_k$ to a fixed-dimensional representation $Z_k$ for $k \in [K]$ . Then the DPCA linear encoder adapts the dimension of $Z_k$ via linear projection according to the available bandwidth and the correlation among the sources as per (6). Similarly, the decoding of NDPCA is also performed in two stages. First, the DPCA linear decoder reconstructs the K fixed-dimensional representations $\hat{Z}_1, \hat{Z}_2, \ldots, \hat{Z}_K$ , based on which the joint neural decoder generates the estimate of data $\hat{X}_1, \hat{X}_2, \ldots, \hat{X}_K$ . These estimates are then passed to the neural task model $\Phi$ to obtain the final task output $\hat{Y}$ . Note that since we have a non-linear task model here, we envision that the neural encoders generate non-linear embedding of the sources, while the DPCA mainly adapts the dimension appropriately as needed; the role of the DPCA here is to reliability reconstruct the embedding $\hat{Z}$ s, which corresponds to the case described in Lemma 3.1 with the task matrix $\Phi$ as identity. + +**Training procedure:** During the training of NDPCA, the weights of the task are always frozen because it is usually a large-scale pre-trained model that is expensive to re-train. We aim to learn the + +K neural encoders and the joint neural decoder which minimize the loss function: + +$$\mathcal{L}_{\text{tot}} = \lambda_{\text{task}} \underbrace{\|\hat{Y} - Y\|_F^2}_{\text{task loss}} + \lambda_{\text{rec}} \underbrace{\left(\|\hat{X}_1 - X_1\|_F^2 + \|\hat{X}_2 - X_2\|_F^2 + \dots \|\hat{X}_K - X_K\|_F^2\right)}_{\text{reconstruction loss}}.$$ + (8) + +In the task-aware setting when λrec = 0, the neural autoencoder fully restores task-relevant features, which is the main focus of this paper. When λtask = 0, the neural autoencoder learns to reconstruct the data X, which is the task-agnostic setting later compared in Sec. 5. + +We now discuss how to encourage NDPCA to work well under *various available bandwidths* with DPCA during the training phase. We begin by making observations on the desired property of the neural embeddings arising from the limitations of the DPCA: (1) uncorrelatedness: Lemma 3.1 shows that DPCA is more efficient when the correlation among the intermediate representations is less. (2) linear compressibility: we encourage the neural autoencoder to generate low-rank representations, which can be compressed by only a few singular vectors, making them more bandwidth efficient. + +We tried to explicitly encourage the desired properties with additional terms in (8), but they all adversely affect the task performance. To obtain uncorrelated representations, we tried penalizing the cosine similarity between the representations. We also tried similar losses that penalize correlation, as per [18–21], but none improves the task performance. We observed that the autoencoder automatically learns representations with small correlation, and any explicit imposition of complete uncorrelatedness is too strong. For linear compressibility, we tried penalizing the convex low-rank approximation–the nuclear norm–of the representations, as per [22, 23]. However, we observe a similar trend in the final task performance as the network tends to minimize the nuclear norm while harming the task performance. For the comparison of the resulting performance, see Appendix F.1. + +In this regard, we propose a novel linear compression module that allows us to adapt to DPCA during training rather than using additional terms in the loss. We introduce a *random-dimension* DPCA projection module to improve performance in lower bandwidths. It projects representations Z to a low dimension randomly chosen, simulating projections in various available bandwidths during inference. It can be interpreted as a differentiable singular value decomposition with a random dimension, described in Alg. 1. For encoding, it first normalizes the representations and performs singular value decomposition on all sources. Then, it sorts the vectors by the singular values and randomly selects the number of vectors to use for projection. For decoding, it decodes with the selected singular vectors again and denormalizes the data. Note that during training, we only run Alg. 1 on a batch. This module helps to improve the overall performance over a range of bandwidths, and we show the ablation study of this module in Appendix F.2. + +Inference: With the training data, the DPCA projection module first saves the mean of representations Z and the encoder and decoder matrices in the maximum bandwidth. It only needs to save for the maximum bandwidth because its rows and columns are already sorted by the singular values, which represent the importance of each corresponding vector. During inference, when the current bandwidth is m, it chooses the top m rows and columns of the saved encoders and decoder matrices to encode and decode representations. No retraining is needed for different bandwidths. Only the storage of a neural autoencoder and a linear matrix at each encoder and decoder is needed. + +Robust task model: We pre-train the task model with randomly cropped and augmented images to make the model less sensitive to noise in the input image space, namely, the model has a smaller Lipschitz constant. This augmentation trick is based on [8]. A robust task model has a smaller Lipschitz constant, so it is less sensitive to the input noise injected by decompression when we concatenate it with the neural autoencoder. For a detailed analysis of the performance bounds between robust task and task-aware autoencoders, see Appendix A.2. diff --git a/2305.17262/paper.pdf b/2305.17262/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5cc31f1fd7e6ece718cca1a6a4c1621341ebef85 --- /dev/null +++ b/2305.17262/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dc77f05122e29c23e79399616b220b1fb689600f7eb8b0a06266299ccdfc70b +size 23212537 diff --git a/2305.17331/paper.pdf b/2305.17331/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..75d9c1362b1001351f19c700d43776222a087e7f --- /dev/null +++ b/2305.17331/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a43b020edcfd3a720e8dad37ad3df366fb2aeb89391b36d6393d0e0f0ad8dd26 +size 1026060 diff --git a/2305.18484/paper.pdf b/2305.18484/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bb22738205fa52fc36a75f7ea2e8744c8c403663 --- /dev/null +++ b/2305.18484/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71b97cabc06149340a70ddf28a651a2cfb36c9e90b0dfe1334fa2340abd33a91 +size 19425578 diff --git a/2306.05584/paper.pdf b/2306.05584/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..28b96378b6c04ebaf88ce669698a6ebd49169179 --- /dev/null +++ b/2306.05584/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a84fde990ef60ec11a11df6bd0a2eff5f39ccbfbc53002fab407f63b8eddf07 +size 5046234 diff --git a/2306.08984/paper.pdf b/2306.08984/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..035015e5c99f40bc74ed83f4e7ae39a5eab20f80 --- /dev/null +++ b/2306.08984/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0f3bed2cc5f3c22671dc7ff37a38c0f96aafa95bf7e25cc2ac855b7e9c1274a +size 6030516 diff --git a/2306.13596/paper.pdf b/2306.13596/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d85e9ebb935429ba128ac1af19556427933e38a0 --- /dev/null +++ b/2306.13596/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40515f770a158297e7f48dfb1c4d84d66ea72afd80ab9befd41d19aee0a89027 +size 980281 diff --git a/2307.07812/paper.pdf b/2307.07812/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c2db79b6265861297f4e3315bbebbdf17506ab84 --- /dev/null +++ b/2307.07812/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:759ae0c44cc125d2e0618a727ead9310bc6ec12dd32fee3139addcd83dbe1cbb +size 12734882 diff --git a/2307.14392/paper.pdf b/2307.14392/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c5578d25953ca13b1c90c0176cb538a994812b2f --- /dev/null +++ b/2307.14392/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267dcc83ae6882bf2d722842871d85e245198619c37406819ccd1dfb9c6b26bd +size 8808443 diff --git a/2307.14680/paper.pdf b/2307.14680/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..22dee631f2e9dbe592504517979172555e53950c --- /dev/null +++ b/2307.14680/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:883f0d796535b6ba830c754d699f5ceb19840a75ae5795fd9cb53652daf48224 +size 607648 diff --git a/2307.16368/paper.pdf b/2307.16368/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..58291c662b373920b4b486487326882888342fea --- /dev/null +++ b/2307.16368/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f08bd1128fe5b109e58a1e098601217ea9d08d22566d074e2150c7f5a3ab720a +size 17453783 diff --git a/2307.16399/paper.pdf b/2307.16399/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1fa126cb7f9bbb3d5b621e6198a32519aab902f0 --- /dev/null +++ b/2307.16399/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26a0ac328272c5a9e7d9668a71f491b694d319e82789cd7b7f7d5eeae1b4f900 +size 2768075 diff --git a/2308.06548/paper.pdf b/2308.06548/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2600619e84d2cbb1c7b222f84505199e011d614e --- /dev/null +++ b/2308.06548/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3de9b5c91b4faf7f9af11dcd0e56fa0d7bc10d751035ac120c8829c319d9f03 +size 4140388 diff --git a/2308.06838/paper.pdf b/2308.06838/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3a92466462b1cbe0268cf0f13ab84b7334648c13 --- /dev/null +++ b/2308.06838/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb5c80f97f35029351dbafcf2506cdf3156512b6d99695acf466013e0747f952 +size 973295 diff --git a/2308.09517/paper.pdf b/2308.09517/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..887ef43982b7fe43a92c734e52ec1a1057470ac9 --- /dev/null +++ b/2308.09517/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f62c8cb5a23bcc06b21077a21c0736c8db1ae35bcc387ab391df1cd7abc0e6b2 +size 921073 diff --git a/2308.11272/paper.pdf b/2308.11272/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2d35105f8df3f864ecec4acf6644ad60c726bebf --- /dev/null +++ b/2308.11272/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc354b03649c2648450ff0be485845dba99fd89a33a3e73364939157653798b2 +size 6726668 diff --git a/2308.11488/paper.pdf b/2308.11488/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8f6df7470d29eabe214e5cb4e4862d09c2b445c5 --- /dev/null +++ b/2308.11488/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8845425b6c4d670d6c8642d688d39ef68a05b7aea3dcc6d68cceff198f66ed86 +size 4901328 diff --git a/2308.11991/paper.pdf b/2308.11991/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c57d57e908b06d7051580ea722cfc1042e9dcace --- /dev/null +++ b/2308.11991/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f115409bbd1b8ecc46f2d3853f6c9f2542c1c2c83e694705dd5c0730fe8dcea4 +size 692548 diff --git a/2308.13250/paper.pdf b/2308.13250/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..052a858cab2ef12fe987e18e2d99d3aad0b1a151 --- /dev/null +++ b/2308.13250/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eff4c0dea3a55381294cda1cefd8adf898018eb308b680b056386c90abbf6fc6 +size 698610 diff --git a/2308.15827/paper.pdf b/2308.15827/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4824fbd7458bf9933327793b37471eef88f52bec --- /dev/null +++ b/2308.15827/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02180fe590e1f53a9e13ba1993ec8526d59512fbd656607d41e24e51eab358ba +size 2275258 diff --git a/2308.16021/paper.pdf b/2308.16021/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c7b9a79ce4e7b938fa64b3b579cebf30866ad684 --- /dev/null +++ b/2308.16021/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0e6993c40c62825745767dd465d7a4ca47d574359fcb37cbab5a7047034349a +size 1120381 diff --git a/2308.16182/paper.pdf b/2308.16182/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3e86cf867202fce286a12f0abf8ba575c34fe2f4 --- /dev/null +++ b/2308.16182/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8822edd5633eb3b8e14ff8abd8e0748f153342b9ea8de50ce15eb7335ad7fbc9 +size 3046027 diff --git a/2309.09531/paper.pdf b/2309.09531/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4402605300b0ef1746b31a3c9a1e2747d0e1c4c7 --- /dev/null +++ b/2309.09531/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f64ff1a0d43214887a1385f19d09dd646bb313ff42746157c38f09c02304dd2 +size 4061210 diff --git a/2309.09582/paper.pdf b/2309.09582/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..731a2e5f71ad6fa6cd76704a33891d0bbe59e27b --- /dev/null +++ b/2309.09582/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9f91fdfd40eb57eac464b4b89b3517ebfdd18b4bf01d6a6a209b4c1a505ab5 +size 458731 diff --git a/2309.10765/paper.pdf b/2309.10765/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b06b060a44653b5689b4fffe6d884c3338b6d17f --- /dev/null +++ b/2309.10765/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17e3b559be91978a3c4a99d8982dc3a0b3e48d513faffd31dacb7b2d8891812e +size 737286 diff --git a/2309.14859/paper.pdf b/2309.14859/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..688e624386a9b2bd3baa133a1dc0d0a2f0a62d20 --- /dev/null +++ b/2309.14859/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5be5d211cd03413e7e1cd65bcb6da9109c1fdbe2223fbcbf8b882a17e409d930 +size 89004242 diff --git a/2309.15848/paper.pdf b/2309.15848/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b67b21c3afdb2422687c9ece606b196d32516935 --- /dev/null +++ b/2309.15848/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05d04c2c1179e0dfb588295fddb2dac6014eb727a43c6392484018350da907a +size 24486434 diff --git a/2309.17249/main_diagram/main_diagram.drawio b/2309.17249/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..e425f71a1d36a05827c73b058c3a649fa70d32e7 --- /dev/null +++ b/2309.17249/main_diagram/main_diagram.drawio @@ -0,0 +1,5330 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2309.17249/paper_text/intro_method.md b/2309.17249/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..93e0b32b8b8edd8629dd55b18c060cb51ad0156d --- /dev/null +++ b/2309.17249/paper_text/intro_method.md @@ -0,0 +1,105 @@ +# Introduction + +:::: wrapfigure +r0.48 + +::: center +![image](figures/mainv2.pdf){width="48%"} +::: +:::: + +Prompting large language models (LLMs) [@chowdhery2022palm; @anil2023palm] has become an efficient learning paradigm for adapting LLMs to a new task by conditioning on human-designed instructions. The remarkable in-context learning (ICL) ability of LLMs also leads to efficient few-shot learners that can generalize from few-shot input-label pairs [@brown2020language; @liu2023pre]. However, the predictions of LLMs are highly sensitive and even biased to the choice of templates [@min-etal-2022-rethinking], verbalizers [@holtzman-etal-2021-surface], and demonstrations [@liu-etal-2022-makes], resulting in barriers for pursuing efficiently adaptable and robust LLM applications. + +Extensive research has been devoted to mitigating these biases, which we explicitly refer to the a-priori propensity of LLMs to predict certain classes over others unfairly. @lu-etal-2022-fantastically provide an analysis of the impacts of the order of ICL examples to LLMs and have explored the order selection mechanisms for ICL. On the other hand, @zhao2021calibrate reveal the bias of language models toward certain answers and propose to calibrate the LLM given content-free tokens. More recently, @fei-etal-2023-mitigating detect the domain-label bias, and @han2023prototypical treat the calibration of LLMs as learning a robust decision boundary. Though multiple calibration solutions have been provided, the field currently lacks a unified analysis that systematically distinguishes and explains the unique characteristics, merits, and downsides of each approach. + +In this work, we first conduct a comprehensive analysis across existing calibration methods for LLMs. We approach the calibration problem from a distinctive point of view by interpreting the decision boundaries for each calibration method together with the ICL decision boundary. We start observing fatal failure cases for each method by extending them to more challenging and under-explored evaluation tasks. We then conclude the current limitation for each method with a novel interpretation from the decision boundary perspective, pointing to the need for a unified and widely applicable solution for conquering diverse bias sources in the field of LLM efficient learning. + +Inspired by these findings, we propose *Batch Calibration* (BC), a zero-shot and inference-only calibration method for prompting and ICL. The central objective of BC is to accurately model the bias from the prompt context (referred to as *contextual bias* in this paper) by marginalizing the LLM scores in the batched input. The simplicity of the design of BC only brings negligible computation overhead at the output of the LLM. We further extend BC to the black-box few-shot learning (BCL), a practical case where labeled data is available, by introducing a *single* learnable parameter into BC, which enables it to adapt and *learn* the contextual bias from the available data resources. + +We conducted extensive experiments on more than 10 natural language understanding tasks together with image classification tasks. BC stands as the most widely applicable calibration method while achieving state-of-the-art results. With the proposed black-box few-shot BCL framework, we show that further slight gains can be achieved by leveraging more labeled data. We provide further analysis with BC on robustness with templates, ICL choices and orders, and verbalizers, validating that BC can effectively alleviate prompt brittleness and make prompt engineering easier. + +To summarize, we provide the following contributions: + +- We provide a unified and systematic analysis of existing calibration methods through their decision boundaries, investigate the common use of content-free tokens as an estimator of contextual bias, and identify their deficiency with individual case studies. + +- We propose Batch Calibration (BC), a zero-shot and inference-only calibration method for ICL, that mitigates the bias from the batch. We further extend BC to learn from few-shot data. + +- We show that while conceptually simple, BC attains state-of-the-art performance in both zero-shot and few-shot learning setups over widely selected tasks with PaLM-2 and CLIP models. + +Prompting is an efficient learning paradigm that allows LLMs to perform zero-shot inference by conditioning on a human-designed instruction. Formally, denoting a test query-target pair $\{x_i, y_i\}$ and instruction as the context $C$ for a classification task, LLMs make prediction by computing: $\operatorname*{arg\,max}_{y\in \mathcal{Y}}\mathbf{p}(y|x_i, C)$, where $\mathbf{p} \in \mathbb{R}^J$ is the logits, and $\mathcal{Y}$ denotes the verbalizers that define the label set for $J$ classes. ICL further enables LLM to learn from $k$ input-label pairs (i.e., few-shot setup), $s^{(i)} = \texttt{Template}(x^{(i)}, y^{(i)}) \, \forall i \in\{1,...,k\}$, by concatenating few-shot demonstrations in a pre-defined template as the context, $C= \texttt{Concat}(s^{(i)},...,s^{(k)})$. Though ICL has demonstrated strong performance with easy implementations, the prediction of LLMs is shown to be biased towards certain answers due to different elements of $\mathbf{p}(y|x_i, C)$ [@lu-etal-2022-fantastically]. In the ICL context $C$, majority label bias and recency label bias [@zhao2021calibrate] can bias the prediction of LLMs toward the most frequent label and the label towards the end of the demonstration, respectively. Among verbalizer tokens $y_j\in \mathcal{Y}$, LLMs are shown to be inherently biased towards predicting the label-tokens that appear more frequently from pretraining term statistics [@shin-etal-2022-effect; @razeghi-etal-2022-impact]. These bias factors significantly degrade the performance of LLMs for robust ICL applications. + +# Method + +Various *calibration* methods have been proposed to mitigate the issue of bias identified above. In this section, we provide an overview of the state-of-the-art calibration methods. + +Motivated by a common calibration technique that applies affine transformation on the model outputs [@platt1999probabilistic; @guo2017calibration], @zhao2021calibrate propose to calibrate the LLM prediction by first measuring the entire test-time distribution $\hat{\mathbf{p}}$ by a content-free input. Using "`N/A`" as a content-free example, the model score distribution is generated by $\hat{\mathbf{p}}_{\text{cf}} := \mathbf{p}(y|[\texttt{N/A}], C)$. CC then generates the calibrated output by transforming the uncalibrated scores $\mathbf{p}(y|x, C)$ with $\mathbf{W} \in \mathbb{R}^{J \times J}$ via $\mathbf{W}\mathbf{p}(y|x, C)$, where $\mathbf{W}=\operatorname*{diag}(\hat{\mathbf{p}}_{\text{cf}})^{-1}$ offsets the uncalibrated scores with the model score (a contextual prior) triggered by the content-free sample. + +Instead of using a single content-free token, @fei-etal-2023-mitigating propose DC that estimates a contextual prior $\hat{\mathbf{p}}(y|C)$ by using a random in-domain sequence. It randomly sampled $L$ tokens at an average sentence length from an unlabeled text set. Then, it estimates the content-free prediction prior by averaging the model score $T$ times, such that: $\hat{\mathbf{p}}_{\text{random}}=\frac{1}{T}\sum_{t=1}^{T} \mathbf{p}(y|[\textsc{Random Text}]_{t}, C)$. The final test-time prediction is then calibrated by dividing the estimated prior prediction, or equivalently in logits space, $\mathbf{p}(y|x_i, C)-\hat{\mathbf{p}}_{\text{random}}$. + +PC learns a decision boundary with Gaussian mixture models (GMMs). It estimates $J$ prototypical clusters for the model output $\mathbf{p}$ for $J$ classes: $P_{\text{GMM}}(\mathbf{p}) =\sum_{j=0}^{J-1}\alpha_{j} P_{G}(\mathbf{p}|\mathbf{\boldsymbol\mu_{j}},\mathbf{\boldsymbol\Sigma_{j}})$, where $P_{G}$ denotes a multi-variate Gaussian distribution, and the parameters: mixing coefficient $\alpha$, mean vector $\mathbf{\boldsymbol\mu}$, covariance matrix $\mathbf{\boldsymbol\Sigma}$ are estimated by the Expectation-Maximization [@moon1996expectation]. Followed by an automatic label assignment strategy, the predicted label is then computed by $\operatorname*{arg\,max}_{j}P_{G}(\mathbf{p}_j|\mathbf{\mu^{*}},\mathbf{\boldsymbol\Sigma^{*}})$ in the inference time. This EM-GMM process can require up to $T$ repetitions to stabilize its estimation of clusters where $T$ is a hyperparameter of the algorithm. + +Summarizing the calibration methods with distinctive design principles discussed so far, in Table [\[tab:analysis\]](#tab:analysis){reference-type="ref" reference="tab:analysis"}, we present a unified view of the characteristics of each method with their mathematical formulation, computation cost, and strengths & weaknesses. Though each approach demonstrates a clear motivation for calibrating ICL, it is still unclear which method surpasses others in what scenarios. We proceed with an in-depth analysis of existing methods in representative tasks. We provide a novel view of calibration methods from a multi-variate decision boundary perspective. In pursuit of practical guidelines for ICL calibration, we set out two important research questions behind their design principles: **1)** What constitutes a better decision boundary for calibrations? **2)** Is content-free prior a good estimator of contextual bias? + +To address this research question, we first derive the decision boundary for each category of calibration methods. We recall that the classification by a LLM is based on $\operatorname*{arg\,max}_{j \in \{0,...,J-1\}}p_j$ where $p_j$ denotes the $j$-th element of output vector $\mathbf{p}$. Consider binary classification problem for simplicity: the decision boundary $h(\mathbf{p})$ for ICL is given by the line $p_0 - p_1 = 0$: the model predicts class 0, $y_0$, if $p_0 - p_1 \geq 0$, and class 1 otherwise. Consequently, CC and DC that apply an affine transformation at $\mathbf{p}$ is equivalent to a linear transformation to the decision boundary. In CC with $\mathbf{W}=\operatorname*{diag}(\hat{\mathbf{p}})^{-1}$, $\mathbf{b}=\mathbf{0}$, the decision boundary can then be derived as: $$\begin{align} + p_0 \times \frac{1}{\hat{p}_{0}} = p_1 \times \frac{1}{\hat{p}_{1}} \rightarrow p_0 - p_1 \times \frac{\hat{p}_{0}}{\hat{p}_{1}} = 0, +\end{align}$$ which is a *rotation* of the ICL's linear decision boundary around the origin. Similarly, DC with $\mathbf{W}=\mathbf{I}$, $\mathbf{b}=-\frac{1}{T}\sum_{t} \mathbf{p}(y|[\textsc{Random Text}]_{t}, C)=-\hat{\mathbf{p}}$ is equivalent to a *shift* of ICL's linear decision boundary away from the origin, such that $p_0-p_1 = (\hat{p}_{0}-\hat{p}_{1})$. It is worth noting that both calibration choices lead to a linear decision boundary, indicating that the calibration problem can be framed as an unsupervised decision boundary learning problem. Based on this intuition, we further derive the decision boundary for PC as $P_\text{G}(\mathbf{p}|\mu_{0},\Sigma_{0})-P_\text{G}(\mathbf{p}|\mu_{1},\Sigma_{1})=0$, which delivers a non-linear boundary between the estimated Gaussian mixtures. We conduct a preliminary experiment to visualize the derived decision bounds from existing calibration methods alongside the ICL baseline. In Fig. [1](#fig:db){reference-type="ref" reference="fig:db"}, we observe that uncalibrated ICL is biased towards predicting `negative` in the SST-2 task. This biased prediction is then mitigated by each calibration method, where we observe a rotated decision boundary from CC, a shifted boundary from DC, and a non-linear boundary between the GMMs by PC. However, in the QNLI task (bottom row of Fig. [1](#fig:db){reference-type="ref" reference="fig:db"}), we observe failure cases in the calibration baselines, in particular, PC (third figure from the left), where it fails to capture the correct distribution for each class. From Fig. [1](#fig:db){reference-type="ref" reference="fig:db"} and the additional results in Fig. [5](#fig:dba){reference-type="ref" reference="fig:dba"} in Appendix §[10](#app:aexp){reference-type="ref" reference="app:aexp"}, we find that while theoretically more flexible, the non-linear decision boundaries learned by PC tend to be susceptible to overfitting and may suffer from instability in EM-GMM. We hypothesize that the PC boundary is even more vulnerable to instability for more challenging multi-class tasks due to the increased difficulties of learning clusters and assigning classes correctly. Conversely, we find that linear decision boundaries, as evidenced by CC and DC, can be more robust and generalizable across tasks. We validate this hypothesis by proposing BC with extensive experiments in Sec. [4.2](#sec:mainexp){reference-type="ref" reference="sec:mainexp"}. + +
+
+ + +
+
+ + +
+
Visualization of the decision boundaries of uncalibrated ICL, and after applying existing calibration methods and the proposed BC (to be introduced in Sec 2) in representative binary classification tasks of SST-2 (top row) and QNLI (bottom row) on 1-shot PaLM 2-S. We show success and failure cases for each baseline method (CC, DC, and PC), whereas BC is consistently effective. Refer to Appendix §10 for more examples.
+
+ +:::: wrapfigure +r0.48 + +::: center +![image](figures/casev2.pdf){width="48%"} +::: +:::: + +CC and DC both use a linear decision boundary but differ from each other by leveraging different formats of a content-free input to estimate the contextual prior. However, as we observed in Fig. [1](#fig:db){reference-type="ref" reference="fig:db"}, they both exhibit failure cases in QNLI, a question-answering NLI task. We hypothesize that contrary to the proposals made by CC and DC, relying on content-free tokens for calibration is *not* always optimal and may even introduce additional bias, depending on the task type. For example, in a textual entailment task involving question-sentence pairs, we empirically observe that an ICL template employed with a content-free token '`N/A`' such as '`Question: N/A, Sentence: N/A, Answer:`' will result in a biased prediction towards '`entailment`', because although '`N/A`' is intended to be content-free, the LLM may nevertheless construe '`N/A`' in the sentence to be substantively entailed to the '`N/A`' in the question due to surface text equivalency. This phenomenon holds true for other multi-text classification tasks, such as paraphrasing and word disambiguation tasks. Consequently, the prior estimated via a single content-free token can lead to further bias. DC introduces multiple randomly sampled tokens to form a content-free input, e.g. '`Question: that What old rubisco’s the did Which?`'. We suspect a possible reason is that random sequences, when used in conjunction with in-context demonstrations, can be susceptible to spurious relations among them that often lead to unfair priors further skewing the predictions, which is also reflected in Fig. [\[fig.case\]](#fig.case){reference-type="ref" reference="fig.case"}, where CC and DC fail to mitigate the contextual bias in the QNLI task. In sum, the empirical observation shows that content-free inputs can be inappropriate prior estimators, especially for multi-sentence classification tasks. + +![Illustration of Batch Calibration (BC). Batches of demonstrations with in-context examples and test samples are passed into the LLM. Due to implicit bias sources in the context, the score distribution from the LLM becomes highly biased. BC is a modular and adaptable layer option appended to the output of the LLM/VLM. BC generates calibrated scores according to Eq. [\[eq:mean\]](#eq:mean){reference-type="ref" reference="eq:mean"} & [\[eq:overprior\]](#eq:overprior){reference-type="ref" reference="eq:overprior"}. Highlighted symbols indicate the distribution means (visualized *for illustration only*). ](figures/bc.v3.2.pdf){#fig:method width="\\linewidth"} + +Inspired by the previous discussions, we now propose Batch Calibration (BC), a zero-shot, inference-only, and generalizable calibration technique with negligible computation cost. We further discuss how BC can be extended to *vision*-language models as well as the black-box *few-shot* learning setup where some labeled samples are available. + +Following the discussion in Sec. [2.3](#sec:analysis){reference-type="ref" reference="sec:analysis"}, we argue that the most critical component for calibration is to accurately estimate the contextual bias term $\mathbf{p}(y|C)$. Both CC and DC, which use content-free and in-domain random tokens as trigger signals, respectively, have failure cases in multi-sentence classification when the estimation of the contextual bias is inaccurate. On the other hand, PC is vulnerable to overfitting and may incorrectly model the mixtures, especially in high-dimensional space. We, therefore, opt for a linear decision boundary for its robustness, and instead of relying on content-free tokens, we propose to estimate the contextual bias for each class $\mathbf{p}(y=y_j|C)$ from a batch with $M$ samples, $\{x^1, ..., x^M\}$, in a *content-based* manner by marginalizing the output score over all samples $x\sim P(x)$ within the batch: $$\begin{align} + \mathbf{p}(y=y_j|C) =\operatorname*{\mathbb{E}}_{ +x\sim P(x)}\Big[\mathbf{p}(y=y_j|x, C) \Big] \approx \frac{1}{M}\sum_{i=1}^{M} \mathbf{p}(y=y_j|x^{(i)}, C) \, \forall \, y_j \in \mathcal{Y}. +\label{eq:mean} +\end{align}$$ We then obtain the calibrated probability by dividing the output probability over the contextual prior, which is equivalently by shifting the log-probability by the estimated mean of each class: $$\begin{align} + \hat{y}_i = \operatorname*{arg\,max}_{y\in \mathcal{Y}} \mathbf{p}_{\text{BC}}(y|x_i, C)=\operatorname*{arg\,max}_{y\in \mathcal{Y}}\big[\mathbf{p}(y|x_i, C)-\hat{\mathbf{p}}(y|C)\big]. +\label{eq:overprior} +\end{align}$$ It is noteworthy that this BC procedure is zero-shot and only involves unlabeled test samples. BC incurs negligible computation costs. We may either compute the correction term $\hat{\mathbf{p}}(y|C)$ once all test samples are seen or, alternatively, in an on-the-fly manner that dynamically processes the outputs. To do so, we may use a running estimate of the contextual bias for BC. At the $n+1$ mini-batch, the bias term is given by: $$\begin{align} + \mathbf{p}_{\text{r}}^{n+1}(y|C) =\frac{n}{n+1}\mathbf{p}_{\text{r}}^{n}(y|C) + \frac{1}{n+1} \hat{\mathbf{p}}^{n+1}(y|C), +\end{align}$$ thereby allowing BC's calibration term to be estimated from a small number of mini-batches that is subsequently stabilized when more mini-batches arrive. + +:::: wrapfigure +r0.48 + +::: center +![image](figures/BCLv4.pdf){width="\\linewidth"} +::: +:::: + +While BC is designed to be zero-shot and inference-only, it is also common that some *labeled* data are available. In this section, we describe a simple, adapted variant of BC that may further refine the calibration and mitigate any estimation errors from the unlabeled data, which we term *BCL*. Specifically, instead of deducting the bias term $\hat{\mathbf{p}}$ from the test data only, we introduce a single additional hyperparameter *strength* $\gamma\in \mathbb{R}$: $$\begin{align} + \mathbf{p}_{\text{BCL}}(y|x_i, C)=\mathbf{p}(y|x_i, C)-\gamma\hat{\mathbf{p}}(y|C), +\label{eq:BC+} +\end{align}$$ where $\gamma$ controls the strength of BC. To select the appropriate $\gamma$, we simply perform a grid search by uniformly sampling $T$ different $\gamma$ values in $[a, b]$ (we set $[a, b]:= [-5, 5]$, but any reasonable range may be used). The strength $\gamma$ is then learned by $\gamma^* = \operatorname*{arg\,max}_{\gamma\in[a, b]}R(\mathbf{p}_{\text{BC} +}, \gamma)$, where $R(\cdot,\cdot)$ is the evaluation function (e.g., accuracy) on the set of *labeled* data, allowing the amount of calibration to be adjusted from evaluation metrics directly. + +We give concrete examples in Fig. [\[fig:bcl\]](#fig:bcl){reference-type="ref" reference="fig:bcl"}, which illustrates the effect of BCL where we plot the accuracy in SST-2 and CB [@de2019commitmentbank] tasks over a range of $\gamma$. We observe that $\gamma=1$, which corresponds to BC without adjustment (purple line), leads to a strong but not optimal performance. By using the $\gamma$ learned from the labeled data (a 128-shot randomly sampled set in this case), BCL estimates the contextual bias more precisely by leveraging the labeled data and achieves a performance that is very close to the optimal. We refer readers to Table [\[tab:bcl\]](#tab:bcl){reference-type="ref" reference="tab:bcl"} for more results. + +Recently, vision-language models (VLM) [@radford2021learning], which simultaneously encode visual and textual information, have demonstrated strong zero-shot generalization capability by rewriting class labels. However, the sources of bias as LLMs have also been observed in prompting VLMs [@alayrac2022flamingo] but have not been adequately addressed. In this work, we propose to apply BC to Zero-Shot (ZS) CLIP [@radford2021learning] and mitigate the biases in its zero-shot classifications. We follow the same notation from Sec. [2.1](#sec:notation){reference-type="ref" reference="sec:notation"}, where the test image is now $x$, and the prompt template becomes the context, $C$. Similarly, we append the BC layer at the output of the ZS CLIP and calibrate for each class following Eq. [\[eq:mean\]](#eq:mean){reference-type="ref" reference="eq:mean"} & [\[eq:overprior\]](#eq:overprior){reference-type="ref" reference="eq:overprior"}. + +
+ +
The ICL performance on various calibration techniques over the number of ICL shots on PaLM 2-S. Each shot indicates 1 example per class in the demonstration. Lines and shades denote the mean and standard deviation over 5 random seeds, respectively.
+
diff --git a/2310.00164/paper.pdf b/2310.00164/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..89e5a5fcdf30437d08a0f6972b3a3490c2ea1c41 --- /dev/null +++ b/2310.00164/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc403069e1eb28c07154fb767c941954914acdb0eb13de532b23e67692c6d8dd +size 6930837 diff --git a/2310.02751/paper.pdf b/2310.02751/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..778e792508d5ef82a69000d441d359cfe2d6a9a5 --- /dev/null +++ b/2310.02751/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d88472a545e035fc74bb59c8fe0cb08e99f33c9e014d54b343542efddca5f3 +size 1956627 diff --git a/2310.07235/paper.pdf b/2310.07235/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..da2f8125bb949da8765da422872b2341a4adf790 --- /dev/null +++ b/2310.07235/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b743f8862a1110af288cce5d751543d2ad7093b3ea7393172d397401f3f20318 +size 1801810 diff --git a/2310.07535/paper.pdf b/2310.07535/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8f4f9cd8d9ad917d83692ae0b8c71cc583bc040c --- /dev/null +++ b/2310.07535/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:920ff1f57876b63b35b42f15a8090cdc2e58fbaab6f1d269e92486082664ee1f +size 1187439 diff --git a/2310.08731/paper.pdf b/2310.08731/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f8cb73eb0b1df66646cc32f4110a96e53cedb7c4 --- /dev/null +++ b/2310.08731/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73a871b0de7888f0109a2ea67a14df5009a99543b57bfd7980d6e91cd0cc1847 +size 2474990 diff --git a/2310.11748/paper.pdf b/2310.11748/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..69f94c630be2b57cbbbbd28242c9dc0cb6a5d4ad --- /dev/null +++ b/2310.11748/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:207b80cf4bcf6bfd48a1af71fffa1433286dafef8c69763890013f3721c6ae67 +size 4634631 diff --git a/2310.11964/paper.pdf b/2310.11964/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..06d57d7d97bd8ec89584be7eb3f907ff1143a9ce --- /dev/null +++ b/2310.11964/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e6386114b1b3cb1b9d7919afac9153bd83c30e0a730abebb8a2ae43aedb4209 +size 709828 diff --git a/2310.12086/paper.pdf b/2310.12086/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a2d6849b8c646c0897872537e82b048ca80b65bb --- /dev/null +++ b/2310.12086/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f013fd67572bcb3c842f1ad47ec8ade18c971ddec60bdc8f20ebfa60f88023a4 +size 2058080 diff --git a/2310.12956/paper.pdf b/2310.12956/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..99d73d848c4f6ad5a3bec083ece88c448c435647 --- /dev/null +++ b/2310.12956/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efe49fe2c8a7b41f52648892280e8b5a37a94a0d5b0682256a0723d6ea4e77e0 +size 8036521 diff --git a/2310.13236/paper.pdf b/2310.13236/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8eef1d9e6c0ead76ba78a0074a72d4264055fe70 --- /dev/null +++ b/2310.13236/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:653b3600d10bcb6a201c3ce8e165d3107f2b0efe439aaa16e32d86bc1b6d8dd0 +size 556490 diff --git a/2310.13545/paper.pdf b/2310.13545/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d2fd846168459918145b44f9448b4a860f007a57 --- /dev/null +++ b/2310.13545/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60e389490ebd3172e97daac73f0f5d778401cdbdbfc0b6e737fc31e15d883af7 +size 12778578 diff --git a/2310.13862/paper.pdf b/2310.13862/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..02b63875ca6fc53a67ecf7d34c45e96d72a63ac7 --- /dev/null +++ b/2310.13862/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3889eb59283264d6364b53ef4be9e7b81980ea7667ae91af7942eff0d7b976 +size 781356 diff --git a/2310.14170/paper.pdf b/2310.14170/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..510ec1a514a237f1d29e32dea6cb26e70cfa24de --- /dev/null +++ b/2310.14170/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc39badf2bc8d67991440cb6414ed7e70c2eb0377e6708cb1656c2aa6beeccf2 +size 1597372 diff --git a/2310.17569/paper.pdf b/2310.17569/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d326aa7697345166e479e2445c0da2e8c86de1ce --- /dev/null +++ b/2310.17569/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0035bf547a3499d43ee1a1d75ac86630fd48d7e826636c1937cd2a841236de08 +size 11251548 diff --git a/2311.14294/paper.pdf b/2311.14294/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ee9f47296c8ddc8d193910eafcc2f09dc3a5ef01 --- /dev/null +++ b/2311.14294/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd95024257c72f97dfb0a045cbedf3d53b4ecbf65c301cb3282d8104fa7a54d8 +size 1762256 diff --git a/2311.15303/paper.pdf b/2311.15303/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ed76324036b77749d127ede125e314bc97701b07 --- /dev/null +++ b/2311.15303/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de604dde846cf9893bdf3cac49d9db37d74b98d0cfe2499ceaed7401a921000 +size 6582397 diff --git a/2311.17081/paper.pdf b/2311.17081/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..93d1f4020ddd1672add2923b94d65f8573476f40 --- /dev/null +++ b/2311.17081/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dba560fb8a441bdae09d1fbc9355f6a4a3b95f6f731b4a0b2adbc0aeb3a6731 +size 3849886 diff --git a/2311.17921/paper.pdf b/2311.17921/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..672068f31c02598d226abac53935e58a4cff60de --- /dev/null +++ b/2311.17921/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dab3b587aa0fe4cc019d0bd06a1bd3dcd19888318426627d911cb83f9fad71b2 +size 3950682 diff --git a/2311.17922/paper.pdf b/2311.17922/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2bc7f418695da2cb62cd6971bef89c7bf777fa7e --- /dev/null +++ b/2311.17922/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4332bd0dd50b1a0d73b74400fb8c14c78c86260cd5bae95090254fb726d35f41 +size 14643417 diff --git a/2312.00834/paper.pdf b/2312.00834/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..437827134f9e6366b36a7543b52245e1f3c8289f --- /dev/null +++ b/2312.00834/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24717c586a47e68b67a2c18604746a899fc11d26440bb54058985808f9e95c6f +size 10660166 diff --git a/2312.01397/paper.pdf b/2312.01397/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f932221e038077c1a87e87008bdec462e3868dd4 --- /dev/null +++ b/2312.01397/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e24d88259b7ede6d9c01204b525df8a8e5c31b2b7e0bd4cc2ce42eac100c8f50 +size 4767713 diff --git a/2312.05551/paper.pdf b/2312.05551/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d790591520081b32e3457fcdbf416af35de70ba6 --- /dev/null +++ b/2312.05551/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e79920e07450abd8acf79ad47fffeb2e3d0f19629c965d2955bf300c7233266 +size 529753 diff --git a/2312.08200/paper.pdf b/2312.08200/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4457f94ad37c6f6dbffa2bad12a44007dd577391 --- /dev/null +++ b/2312.08200/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5508cebf3ce6562ade7fe980aa9773ed697d1dbc2d973c7189c797e5d580695 +size 1058636 diff --git a/2312.11927/paper.pdf b/2312.11927/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0568c9bdf6c3d0bab328ce7faad4c17c3d727865 --- /dev/null +++ b/2312.11927/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37a077a2f060b20f1ceb6b1cbeb3fb450be504ae0bd01ea9a31d52c740d4d3b +size 2105437 diff --git a/2312.14572/paper.pdf b/2312.14572/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1bca6aeac01c2cc481da7ca4ee22b4a0c4795bac --- /dev/null +++ b/2312.14572/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75f7327ef2ccc83f3aee158a77fdbcec945538c76f08fc4446a0f82a153e7a6d +size 890159 diff --git a/2312.16176/paper.pdf b/2312.16176/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..63a40e4108078a97c99dbe86dfb4fd931e910bf0 --- /dev/null +++ b/2312.16176/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:023e1a67018798db7b25bfddbc932cc9c5c3267efda03f584fe46d43bc6524a1 +size 1076280 diff --git a/2401.06838/paper.pdf b/2401.06838/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..74ef1485b6ed437411a30d52bffa7b012d3891f5 --- /dev/null +++ b/2401.06838/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7d317da15cf95b70a2508c71e665298aa5241498ddd314c7c8340691fd74d41 +size 2838101 diff --git a/2401.08295/paper.pdf b/2401.08295/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..21ad8bc660e7d2ce7d4505d0ed3eac70c8f8eb9d --- /dev/null +++ b/2401.08295/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d131f80b3b661e85678d170a1f8d450ce86cb8b22b676ee150fcb5cedd720c +size 1212000 diff --git a/2401.10695/paper.pdf b/2401.10695/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e975d958eaa6d363dc17553e6becee0d239dd4cf --- /dev/null +++ b/2401.10695/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3a37cc76a8c16af6416587f914c46bf35dc644e66b3b0233b2c9564a2d31dcb +size 1109565 diff --git a/2401.11824/paper.pdf b/2401.11824/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4889f0c4c71caa78cbc7fa11ff3c30df96cbe0e3 --- /dev/null +++ b/2401.11824/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2064b2f667da1f06911989f8dc5f034e0c4023bb627423ec228d844483eb335b +size 7738250 diff --git a/2401.17992/paper.pdf b/2401.17992/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1ff338c4e96b54e7adb5546873fea79f00a788a2 --- /dev/null +++ b/2401.17992/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42cffef36c3ec0dce39e28393e4f7f454689ace3edd7b19562afeb6ec9a1c806 +size 6633501 diff --git a/2402.01145/paper.pdf b/2402.01145/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eaa49ddab79da6f6207180488a5cfc8eb4124162 --- /dev/null +++ b/2402.01145/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef3d2e39e1d4fe26e93842b2da8e0a915594cba6d2309ef5ba5df1fb06064968 +size 1091889 diff --git a/2402.02429/paper.pdf b/2402.02429/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..167f9da0a341a7c98bd0226808e63dc05d4418f3 --- /dev/null +++ b/2402.02429/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90ae551da609621baaf1c3dd8ac338c85a523a1c8a8ca74fe1be9a4a0389f8d9 +size 9967711 diff --git a/2402.06894/paper.pdf b/2402.06894/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..95f644859af1a4197787defb771fc3b538d7a40c --- /dev/null +++ b/2402.06894/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278efd93bbbcf618add96a71b866e42ab72c5d0bb2d64cf528e30a80880efe05 +size 891618 diff --git a/2402.07891/paper.pdf b/2402.07891/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5db7434c00115b29366cdd62d13bdde5c2dd0d19 --- /dev/null +++ b/2402.07891/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fdb6e81620806c7ca971bb6a3984e009e8eb7cc2cd807ddbf2013031145cfa7 +size 3095761 diff --git a/2402.14966/paper.pdf b/2402.14966/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c622cedb14cd8009de8e96007a8f90dba80b0c05 --- /dev/null +++ b/2402.14966/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0145425f893923cb4c67c622c4856a427d7fc59d41f36fc81da05b682e84435e +size 919732 diff --git a/2403.01092/paper.pdf b/2403.01092/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fdb2a04094dccfc224eca5bc93b4c6c8311f297f --- /dev/null +++ b/2403.01092/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:887ddf1c837b7c8ca1e65fd9bcb3b7a369b08af3f42c1893bbd86f6733bfb527 +size 850882 diff --git a/2403.01273/paper.pdf b/2403.01273/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7007bf5383ebd46eda90d9875557a8e9da03585e --- /dev/null +++ b/2403.01273/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6c612954120dd8d991586f3a9f81c272e562a9a9b0961502dd5a82ec966805f +size 1254321 diff --git a/2403.03077/paper.pdf b/2403.03077/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..69c78a0381f0f4e644883fd880fcc5357189ca52 --- /dev/null +++ b/2403.03077/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c1f512dbcca3f1d31c51d97b0f71d370e795024b085f1d11038dd5e32acfc1d +size 10850716 diff --git a/2403.07262/paper.pdf b/2403.07262/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e47eb6518387e94204ce8785a93d7ab9ef28414a --- /dev/null +++ b/2403.07262/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b3ce7e1ba8d3b82d7797dc43fffe1e4755c895ec42239f8b37dc14a0dbafc8a +size 1829011 diff --git a/2403.07691/paper.pdf b/2403.07691/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..700d1746f0570fcee2d92e2752942cd2483f18b1 --- /dev/null +++ b/2403.07691/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7bea325e50e4f6c4efa45d76b6823b72f4c391e9808f5c581729408bb6e91f3 +size 1395389 diff --git a/2403.08733/paper.pdf b/2403.08733/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cee09b04dca5fbe1e8f35910a5e6b88f66a0c54f --- /dev/null +++ b/2403.08733/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1085c3fedd6fc6685e9e6f7b7ed68ab6fd091b6b3c3200586e3b455b2229a457 +size 15192317 diff --git a/2403.10658/paper.pdf b/2403.10658/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9409eb4e0e4c69f861a5f245c659902413cf8b53 --- /dev/null +++ b/2403.10658/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6becc613636cfcd092a0045a2fda6d9a03b075f98b14fb21246356155272a1b6 +size 698563 diff --git a/2403.11131/paper.pdf b/2403.11131/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..237a515e336b840b3694a14f3d97376e64473610 --- /dev/null +++ b/2403.11131/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c294188fefe1d91703e3d6d505b755025e5bfaba503b0c5a4827488879a97cf3 +size 23789072 diff --git a/2403.11310/paper.pdf b/2403.11310/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5ece9abe7c4bf93cda256df56333909a1deb6ea8 --- /dev/null +++ b/2403.11310/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8171a8be138e0ccb3cdfab4cb30f659f1c57208a51ff1f9ed2793c69ee7bdb9f +size 4418189 diff --git a/2403.18423/paper.pdf b/2403.18423/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..10cbec3e3814dd42f6f1d9b27e6cba2a26ee139c --- /dev/null +++ b/2403.18423/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8716eb1bbdbbda4f3de6095b3f6e3ff21e4d69b1f876c07e00aa3fdf70380efc +size 1753755 diff --git a/2404.01588/paper.pdf b/2404.01588/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9ee2eebb43274ce528ddf5e33637b35428dd0eea --- /dev/null +++ b/2404.01588/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be4111834b2d35710381458fec447061af35e7c0a6939e5eced810005d66a36e +size 738150 diff --git a/2404.01992/paper.pdf b/2404.01992/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e31cf42e0bf28bb56807fd561d78b69bf434de3b --- /dev/null +++ b/2404.01992/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65ebf7fdf302b524a02980ad6c4bed52a27e921d4f895efabbc79e36aca13ff5 +size 610409 diff --git a/2404.10150/paper.pdf b/2404.10150/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..faeee943e437752a4a88238cd0d12bd5f0ed5308 --- /dev/null +++ b/2404.10150/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18907c5afdfb59c42a406ee1c11b0d14dc40c2ee05a2cd6f66d6533884a53e80 +size 481158 diff --git a/2404.11889/paper.pdf b/2404.11889/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..741e9d72ce303436c8853317e8712cd68c97b7a2 --- /dev/null +++ b/2404.11889/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8e8dcb90ff3124e8142733d66fe47753b5394e764d73f50b2f2bf752941eff9 +size 3348115 diff --git a/2404.12886/paper.pdf b/2404.12886/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..373aaf899c5b9a8716579580018caa1a43eb2a45 --- /dev/null +++ b/2404.12886/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74cc70dca99788775cf46cb1c1500c5fc3545012b810100ee575423b78269422 +size 3291300 diff --git a/2405.00200/paper.pdf b/2405.00200/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..124d5d631786be40afbe46cde47782e3e2eee39b --- /dev/null +++ b/2405.00200/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1370fedd954f6cefe9dd8e162ad1e0b671b293c16c124953560ff009f3a3dbd +size 940820 diff --git a/2405.08674/paper.pdf b/2405.08674/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6cd2c66c15a14615f1ffc39be7a26690e5fd8908 --- /dev/null +++ b/2405.08674/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a63009ee4c90bf5ce8606cea694203b387687771e8db857d3834e69b3971365f +size 5187609 diff --git a/2405.08969/paper.pdf b/2405.08969/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..daa24009f7d26126a171e6615149f94eb4fedf34 --- /dev/null +++ b/2405.08969/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e09a0827c69e9f3a5a88d386e3e24e0ac1f81fa4491ef277032f769bfeb4c142 +size 2610448 diff --git a/2405.10812/paper.pdf b/2405.10812/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d4edfadc01c6262d4fa882087898ae69ce9afa0f --- /dev/null +++ b/2405.10812/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941b33aad1ee2610417d2815ff86c4ff2958c77e9aec29cb7d9c534e1bf40ca1 +size 968816 diff --git a/2405.12744/paper.pdf b/2405.12744/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..124fd603423deb74d4ca4a4ec3b67e5a604242ae --- /dev/null +++ b/2405.12744/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:314fa93b3a2e4e9d17530eb5ef3e2b9f46d9b07ed77bdae020ac10519ff5c6da +size 1023708 diff --git a/2405.13740/paper.pdf b/2405.13740/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0daad2d4eed5de0b850f02dc843a71d5ffbf56a0 --- /dev/null +++ b/2405.13740/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a133ec66fd37d3a3f560e4a8ce51b6664df5395739d5f7d459c178ee278a208 +size 3711886 diff --git a/2405.16148/paper.pdf b/2405.16148/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b17b6376cfe524683484de4105aae752fb9b137e --- /dev/null +++ b/2405.16148/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:920f0ec5f0b1e1b9e43c878c61a2875aad5161cd1a400a0bd99fd0aa38aff184 +size 13198370 diff --git a/2405.16877/paper.pdf b/2405.16877/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..01d8319f4f1c14f41b5406510cfaad12689754f5 --- /dev/null +++ b/2405.16877/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e64b46f4ff266d1b8d29a7780739f58edee743e2a50b0c71b4ef86dabb78f881 +size 2335773 diff --git a/2405.19946/paper.pdf b/2405.19946/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7e23bda779340f3178896dd4af6d0a5a48059bc7 --- /dev/null +++ b/2405.19946/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8e37798441b6b476930ba189dfa4d0aa01c596a2aa783617501b74dcad15420 +size 876552 diff --git a/2406.04866/paper.pdf b/2406.04866/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ed81537538fa755ab6d56514c9a3647f95613fc0 --- /dev/null +++ b/2406.04866/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f3f5a4e1b0ae60bc3db11f9cdfd7569ce0c2d2948cfafa6512d631deb207b9 +size 1184314 diff --git a/2406.10180/paper.pdf b/2406.10180/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e3f9423fc6949090627517dd3e68fb7cd7817729 --- /dev/null +++ b/2406.10180/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:225667f98b3478b390bf547c7ef48db6e840a812e1b09ea1e36addb82113d0cc +size 20496826 diff --git a/2406.11820/paper.pdf b/2406.11820/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b74d3e7230c5792126fbcc16c9e3c6d6b445d1e9 --- /dev/null +++ b/2406.11820/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58931b8b095dd9028c3d2e06014ad34cfecc4cb6e1261205d211150ec7ae9a37 +size 8240916 diff --git a/2406.19931/paper.pdf b/2406.19931/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3197971bc36fbbd059e91f5d0861c0a13e7f9d03 --- /dev/null +++ b/2406.19931/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b86f465285ccea6f99b0b09d3dd2cc4fc33ab604de1fafddc72335bbeb6f25f +size 2078151 diff --git a/2407.00911/paper.pdf b/2407.00911/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9cd010b5ff35cf8d68195031404eea0668e895d6 --- /dev/null +++ b/2407.00911/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6b6a32fe432a16a1f79f308ec995a128981a6ce58535b23a1b15e7cfc6284e1 +size 1275923 diff --git a/2407.04538/paper.pdf b/2407.04538/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..13574fe3b10ff6f7f21720164f3ffa784ba35e28 --- /dev/null +++ b/2407.04538/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91a973a56a105997f3cd8f9ed2f68bce84b33328184c77a03b30d6a6151a0db +size 10037515 diff --git a/2407.08221/paper.pdf b/2407.08221/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..85d7079302eeeca5194af3af3216eff7e64b010a --- /dev/null +++ b/2407.08221/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1651868498a4515f9b57ef7141c4ba1dfd1ab8e31d1e174cb0ea06184e31f9b +size 40814565 diff --git a/2407.09271/paper.pdf b/2407.09271/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c2ed33a9e86a92c0fc5c9df809a5b4ecc8fe1e8c --- /dev/null +++ b/2407.09271/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21c4b7153976ca04c0fbdaa2d8d018f847ba863b78b2c73a727ff46978305a0f +size 4503163 diff --git a/2407.12511/paper.pdf b/2407.12511/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9b1cd8c2c2c5f5723b61faea43417a9ea9651bb0 --- /dev/null +++ b/2407.12511/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45539620b49139618ee227b1ba00d6b8ecdbca59509c50e79425cebef383f5a0 +size 10171584 diff --git a/2407.15487/paper.pdf b/2407.15487/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b396a4ddd940e217501a2ef674a8eee95a51316f --- /dev/null +++ b/2407.15487/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75cf1382670d0cc309a69e0f4d36deca9a0205888e1d3e7cc5c4c702cbc2730d +size 2527267 diff --git a/2407.18414/paper.pdf b/2407.18414/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a4f4bbc61b1971d833ec8732f1f9d0c3cbf02e1e --- /dev/null +++ b/2407.18414/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7abba521b79a70448c01c59800fc202ac03c161c1b26d751da1b06ccd884b0ec +size 1037744 diff --git a/2408.04962/paper.pdf b/2408.04962/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8358e9201fc43c20181526521fd7a102cac09421 --- /dev/null +++ b/2408.04962/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6835341ceb5abc8da3f1971fa240696f2cdd587296049e1f61f48f8eca23aa69 +size 6688090 diff --git a/2408.09384/paper.pdf b/2408.09384/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..547a76f59552ac9a5e89779c5b63c00aa5abd526 --- /dev/null +++ b/2408.09384/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a05d3f7f988408c9f588aaad1684cbde2d2b7687d6fd99ec10b250b3711a6856 +size 18489090 diff --git a/2409.01445/paper.pdf b/2409.01445/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e44b03e3537a8ead4ea6bafe76c3122c08be7bda --- /dev/null +++ b/2409.01445/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:251af6d88116331df975241c67c1d004cc884e5baf0c09aebf3fdf69bd59bece +size 6852568 diff --git a/2409.11256/paper.pdf b/2409.11256/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c985ab4ef7baba84e378c54b379e27afebdbedf8 --- /dev/null +++ b/2409.11256/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afb85c9acee469943b2cb0738e945d42eecd0ea68ad7492463b2dc17db525292 +size 4287248 diff --git a/2409.17986/paper.pdf b/2409.17986/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..198bbdef2aea8f101986368b13d5939fa12e4b26 --- /dev/null +++ b/2409.17986/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87016006c61730dcbf06d83c9c0b8c7204920bd9e9cae6f0f6c2c148efc32d7f +size 12928960 diff --git a/2410.06846/paper.pdf b/2410.06846/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2168db77af2825f2b95a5e9e7eaf37b4d4a80e8c --- /dev/null +++ b/2410.06846/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25eea12eb6de9b1c44f85fb099b2b831368372ab02de2f497db6b121ef1dd4d2 +size 762600 diff --git a/2410.14429/paper.pdf b/2410.14429/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8c7c221a5b601a547ae47b4d5e8303800f60d6aa --- /dev/null +++ b/2410.14429/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee4943237947a55688ed73cbb16937db371638b946efa7f46ba29bd2b2b9beac +size 24253487 diff --git a/2412.03512/main_diagram/main_diagram.drawio b/2412.03512/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..2579b225be5bc67ba839c54733edc360bdc52318 --- /dev/null +++ b/2412.03512/main_diagram/main_diagram.drawio @@ -0,0 +1,115 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2412.03512/main_diagram/main_diagram.pdf b/2412.03512/main_diagram/main_diagram.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ad5a39b8258b2e84edabf01d50bcdd36bab234a0 Binary files /dev/null and b/2412.03512/main_diagram/main_diagram.pdf differ diff --git a/2412.03512/paper_text/intro_method.md b/2412.03512/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..4aab8fc5eb0950f8201bee2cae3517d05a9378f8 --- /dev/null +++ b/2412.03512/paper_text/intro_method.md @@ -0,0 +1,80 @@ +# Introduction + +Semantic correspondence involves finding relationships between different regions across two images, enabling a wide range of applications such as 3D reconstruction [47], image-to-image translation [91], object tracking [22], video segmentation [95], pose estimation [37, 85], explainable AI [87] and visual place recognition [42]. Traditional approaches often rely on handcrafted features like SIFT [58] and SURF [5], which, despite their robustness, have limitations in capturing semantic meaning. With the advent of deep learning, Convolutional Neural Networks [31, 67, 73, + +![](_page_0_Figure_8.jpeg) + +Figure 1. Our method achieves better performance and throughput with less parameters on the SPair-71k dataset. The circle size represents number of parameters. For more details, see Tab. 1. + +101] and Vision Transformers [11, 12, 40, 43, 45] have revolutionized the field, providing powerful methods to extract semantically rich features from images in various ways. Since finding semantic correspondences is challenging due to the need for extensive world knowledge, the transition to large-scale supervised deep learning was hindered by the limited availability of ground-truth annotations. To address this issue, recent efforts have focused on weakly-supervised [94] and self-supervised methods to extract per-pixel descriptors, most notably DINO [1, 29]. Diffusion Models (DMs) have shown great capabilities in generating highquality images, due to their large-scale training paradigm which has also led them to learn rich world representations, useful for downstream tasks such as semantic correspondence [32, 90]. + +Since a previous work "A Tale of Two Features" [102] demonstrated that those two models are complementary in their features, current state-of-the-art approaches [63, 103] all rely on combining two large vision foundation models. This comes with several limitations. First, diffusion models often need to be run multiple times to incorporate different timesteps, leading to increased computational complexity and longer inference times. Second, these combined models are typically very large networks with a substantial number of parameters, which require significant computational resources and memory. Third, managing these combined models introduces numerous additional hyperparameters, such as timestep(s), layer(s) and prompt, and mixing weights, complicating further training. This becomes particularly challenging when calculating correspondences in videos or in other real-time applications such as semantic video correspondence. Additionally, the increased number of parameters reduces computational efficiency, posing challenges for server-less applications. + +Distillation [34] is a well-known method broadly applied in various tasks like classification [55], segmentation [60], self-supervised learning [25] and generative models' sampling acceleration [78]. This naturally raises a question: can we also apply distillation on top of the "Tale of Two Features" [102], and ideally squeeze the parameter burden further to boost inference speed in the task of semantic correspondence? + +We propose to leverage knowledge distillation for semantic correspondence to reduce the computational load of existing off-the-shelf methods, without retraining a full model. In summary, we make the following contributions: + +- We propose a parameter-efficient approach to distill the semantic correspondence capabilities of two complementary large vision foundation models into a more compact and efficient model. +- We additionally propose a novel fine-tuning protocol to further boost the model's performance by incorporating 3D data augmentation. This approach not only achieves new state-of-the-art results but also allows the model to improve without the need for labeled data. +- We validate the effectiveness of our method on three canonical benchmark datasets, demonstrating state-ofthe-art performance on semantic correspondence with significantly reduced inference time. + +# Method + +**Problem Formulation.** Semantic correspondence can be defined as follows. For a given pair of images $I_1, I_2$ and a query keypoint $p_1 \in I_1$ , we want to find the corresponding target point $p_2 \in I_2$ with the highest semantic similarity i.e. points of different objects that share semantic meaning. By extracting feature maps $\mathcal{F}_1, \mathcal{F}_2$ from both images, a corresponding target point for $p_2$ can be obtained with: + +$$p_2 = \arg\min_{p} d(\mathcal{F}_1(p_1), \mathcal{F}_2(p)), \tag{1}$$ + +where the distance metric $d(\cdot, \cdot)$ is defined as cosine similarity $sim(\cdot, \cdot)$ . Current state-of-the-art methods [63, 102, 103] rely on combining two large vision foundation models, specifically a Vision Transformer and a Diffusion Model. This introduces numerous hyperparameters (e.g., timesteps, layers, prompts, mixing weights), increasing the complexity, and number of parameters. As a result, fine-tuning is difficult, VRAM requirements are higher, and computational efficiency is reduced, challenging server-less or realtime deployment. In this work, we address these issues by distilling information from these two large vision foundation models into a single model. This approach significantly reduces complexity and decreases the number of parameters, thereby improving efficiency without compromising performance. Furthermore, we show that by incorporating 3D data from a multi-view image dataset, we can fine-tune our model without the expense of human-annotated data. + +We leverage two major models that have shown strong performance in semantic correspondence: Stable Diffusion [76] is a text-conditioned Latent Diffusion Model (LDM) known for its ability to synthesize realistic images, leading to highly informative internal feature representations [90]. DINOv2 [70] is a self-supervised Vision Transformer that excels in capturing useful features through self-supervised contrastive learning. Combining these two models offers complementary strengths, which are rooted in their distinct learning paradigms [102]. While LDMs learn to generate images with a holistic understanding of the world, capturing both global and local information, DINO focuses on extracting meaningful features with invariance to spatial information, i.e. local position and global orientation, due to its input augmentations during training. + +**Multi-Teacher Distillation.** Our method aims to distill information from two large vision foundation models + +![](_page_3_Figure_0.jpeg) + +Figure 2. Illustration of our multi-teacher distillation framework (a) and 3D data augmentation method (b). We distill two complementary models, DINOv2 and SDXL Turbo, into one single and more efficient model. Using unsupervised 3D data augmentation we further refine our distilled model to achieve new state-of-the-art in both throughput and performance. + +into a single student model that provides fast inference with accurate predictions. Therefore, we employ multi-teacher distillation with two teacher models $T_1, T_2$ and one student model S, where $T_1$ is a ViT and $T_2$ is a diffusion model. To achieve our goal of parameter-efficient training, we initialize the student model with a pre-trained vision model and incorporate Low-Rank Adaptation (LoRA) [35]. LoRA uses two smaller matrices, $A \in \mathbb{R}^{r \times k}$ and $B \in \mathbb{R}^{d \times r}$ , whose dot product $\Delta W = BA$ matches the size of the model's initial weight matrix $W \in \mathbb{R}^{d \times k}$ , with its size controlled by $r \ll \min(d,k)$ . Following [3,16,106], we apply LoRA only to the projection layers of the queries and values and keep all other parameters fixed: + +$$Q = W_Q x + B_Q A_Q x,$$ + +$$V = W_V x + B_V A_V x,$$ + +$$K = W_K x,$$ +(2) + +where $W_Q, W_K$ and $W_V$ are the frozen projection layers and $B_Q$ , $A_Q$ and $B_V$ , $A_V$ are the trainable LoRA layers. The weights of $B_Q$ and $B_V$ are initialized with zeros. Besides parameter efficiency, this also mitigates the problem of catastrophic forgetting [7]. We utilize the predicted feature maps of an image pair to obtain the similarity maps of the teacher and student. The objective of the distillation is to align these similarity distributions. Thus, our approach is fully unsupervised by leveraging pseudo-labeling instead of human-annotated correspondences. Specifically, we pass an image $I_1$ to $T_1$ while we pass multiple noised versions $I_1 + \epsilon_t$ to $T_2$ . We average the extracted feature maps of $T_2$ and concatenate them with the extracted feature map of $T_1$ to form a combined feature map $\mathcal{F}_1$ . We input the same image $I_1$ to the student S and extract the feature map $\mathcal{F}'_1$ from it. + +However, manually selected image pairs are scarce. To exclude any supervision while still having image pairs that are beneficial for the distillation, we propose to use image retrieval during training. The goal is to find a set of semantically similar images $\mathcal{R}(I_1)$ for a given image $I_1$ . After finding such a set, we sample and process a second image $I_2 \sim \mathcal{R}(I_1)$ in the same way as $I_1$ , to obtain feature maps $\mathcal{F}_2$ and $\mathcal{F}_2'$ . + +We compute the cosine similarity of *all vectors* of the teacher feature maps $\mathcal{F}_1, \mathcal{F}_2$ and the student feature maps $\mathcal{F}'_1, \mathcal{F}'_2$ to obtain the similarity maps $\mathcal{S}$ and $\mathcal{S}'$ with shape $(H \times W) \times (H' \times W')$ using: + +$$sim(\mathcal{F}_1, \mathcal{F}_2) = \mathcal{F}_1 \cdot \mathcal{F}_2^T = \mathcal{S}$$ + (3) + +We apply softmax with temperature $\sigma_{\tau}(\cdot)$ to ensure that each descriptor has a sharp but distributed similarity over all other descriptors. We use cross-entropy as a dense objective over all $(W \times H)^2$ correspondences simultaneously. Hence, we define the final training objective as: + +$$\mathcal{L}_{dist} = \text{CE}\left(\sigma_{\tau}(\mathcal{F}_{1} \cdot \mathcal{F}_{1}^{\prime T}), \ \sigma_{\tau}(\mathcal{F}_{2} \cdot \mathcal{F}_{2}^{\prime T})\right)$$ +$$= \text{CE}\left(\mathcal{S}, \mathcal{S}^{\prime}\right)$$ +(4) + +Here, cross-entropy loss CE is defined as $CE(P,T) = -\mathbb{E}_P[\log T]$ where H is the cross-entropy of the distribution T relative to a distribution P. As a result, the student learns the approximate goal of dense image similarity instead of directly imitating the teacher's features and lowering the feature dimension. + +**3D Data Augmentation.** Because large and diverse annotated datasets for semantic correspondence are scarce, we utilize a large multi-view image dataset with corresponding depth maps, namely CO3D [72], to further enhance + +our distilled model in an unsupervised manner. Specifically, with two images of different viewing angles $I_1$ , $I_2$ , their corresponding depth maps $D_1$ , $D_2$ , and camera parameters $K_1$ , $K_2$ , we project all points of $I_1$ from screen view s into world view s using s, t, and project them back into screen view using t. + +$$T = T_{s \to w}(K_1) \cdot T_{w \to s}(K_2),$$ + +$$\hat{p} = \begin{pmatrix} p_x \\ p_y \\ D_1(p_x, p_y) \end{pmatrix} \cdot T,$$ +(5) + +where $T_{i\to j}(\cdot)$ is the transformation from coordinate system i to j. Using the difference in z-axis of the projected depth $\hat{D}_2$ to $D_2$ , we exclude points that are not visible after the transformation: + +$$M(x,y) = \begin{cases} 1 & \text{if } |D_2(x,y) - \hat{D}_2(x,y)| < \epsilon, \\ 0 & \text{otherwise} \end{cases}$$ + (6) + +where M is the mask of mutually visible pixels and $\epsilon$ is the threshold parameter. Thus, we obtain *locally-dense* correspondences to fine-tune our model without any human annotations. Following [53], we apply a $k \times k$ Gaussian kernel $g_k(\cdot)$ to the correspondence points, resulting in a $(N \times H \times W)$ sized correspondence map. Furthermore, we apply softmax with temperature to the predicted similarity map $\mathcal{S}$ . This yields two distributions, where we utilize cross-entropy as the learning objective, which leads to the final fine-tuning loss: + +$$\mathcal{L}_{fine} = \text{CE}\left(\sigma_{\tau}\left(\mathcal{F}_{1}\cdot\mathcal{F}_{2}^{T}\right),\ g_{k}\left(G\right)\right)$$ + (7) diff --git a/2412.10912/paper.pdf b/2412.10912/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6d1be4c3e3e4f6f97ba635c1004c1e0fad444f14 --- /dev/null +++ b/2412.10912/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94fae89f4b9670d1e23ec0c62f8671736eefd7e86803e6c9cab7049629de90dd +size 1367561 diff --git a/2412.14510/paper.pdf b/2412.14510/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7cd106ef7a9c8226e0c2ef96c0029b0783221456 --- /dev/null +++ b/2412.14510/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9cb233f5f3cc3c8556b4860106ce969a6d16b3b00e7b3b48a38251ae71a970b +size 488737 diff --git a/2501.08659/paper.pdf b/2501.08659/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c50e4773af86389134f67e3a3fca10e4488fa5db --- /dev/null +++ b/2501.08659/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f3b0ef0c194b80007a8c88feac92028898b7dc273050c946a6f7f4c0a51e91 +size 1218710 diff --git a/2501.13484/paper.pdf b/2501.13484/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a365de24bcb1c63b55cdb59dd9aebb1dc18065c5 --- /dev/null +++ b/2501.13484/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:379dac7269a82ea849d0583fcee4f3c20be2fc6ef75917f6e3cd3a782845526b +size 13872397 diff --git a/2502.07244/paper.pdf b/2502.07244/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..839c2f2ce14d2c10d42ed5124dca8b272853c599 --- /dev/null +++ b/2502.07244/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a98ac13a8cb5c2f280f99e93489afba3af9afe96116844b97b552715114070cb +size 4370721 diff --git a/2502.09977/paper.pdf b/2502.09977/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..667fa997a1161df0adcac2d17b3b36e1f20d6cf7 --- /dev/null +++ b/2502.09977/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a40d65f934c6532bc3f2b3204770f10eb43037114fb258af43429c3c70f399fc +size 609393 diff --git a/2502.14096/paper.pdf b/2502.14096/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ee449b53a19b4174c1514337686fd75f872fe5ee --- /dev/null +++ b/2502.14096/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:785d328b2dd2e7e2c5f3ae7d90e45520359f63a808f236e94b1df38cae60328e +size 1392901 diff --git a/2502.15215/main_diagram/main_diagram.drawio b/2502.15215/main_diagram/main_diagram.drawio new file mode 100644 index 0000000000000000000000000000000000000000..2c12374244e8cd9827694b839f20876fd03f08c6 --- /dev/null +++ b/2502.15215/main_diagram/main_diagram.drawio @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2502.15215/paper_text/intro_method.md b/2502.15215/paper_text/intro_method.md new file mode 100644 index 0000000000000000000000000000000000000000..cd9143f8250b8e636bd9e2956c2e209a781929f8 --- /dev/null +++ b/2502.15215/paper_text/intro_method.md @@ -0,0 +1,242 @@ +# Introduction + +Interpretability has become more important as artificial intelligence (AI) models have become more sophisticated and complicated in recent years. Various methods such as LIME [@ribeiro2016should] and SHAP [@lundberg2017unified] have been suggested to interpret complex black-box AI models. However, these methods explain a given black-box model through a locally approximated interpretable models and thus often fail to provide a faithful global view of the model [@AttackSHAP&2020&Slack]. + +The functional ANOVA model, which approximates a given complex high-dimensional function by the sum of low-dimensional (e.g., one or two dimensional) functions, is a well known transparent-box AI model. One of the most representative examples of the functional ANOVA model is the generalized additive model (GAM, [@gam]), which consists of the sum of one-dimensional functions, each corresponding to each input feature. Low-dimensional functions are easier to understand, and thus the functional ANOVA model is popularly used for interpretable AI [@func_puri; @neural_decom]. + +Recently, several specially designed neural networks for the functional ANOVA model have been proposed, including NAM [@nam] and NBM [@nbm]. These neural networks can be learned by standard stochastic gradient descent algorithms and thus can be applied to large sized data compared to traditional learning algorithms based on basis expansions [@wood2006low] and regularizations [@SSANOVA]. However, existing neural networks are not good at estimating each component (each low-dimensional function in the functional ANOVA model) mainly due to unidentifiability. Here, 'unidentifiability' means that there exist multiple different functional ANOVA decompositions of a given function and we do not know which decomposition a gradient descent algorithm converges on. Note that poor estimation of the components would result in poor interpretation. + +In this paper, we develop new neural networks for the functional ANOVA model such that each component is identifiable but they are learnable by standard stochastic gradient descent algorithms. Identifiability makes our proposed neural networks be good at estimating the components and thus provide reliable interpretation. In addition, it is robust to outliers and easy to reflect monotone constraints. + +To develop the proposed neural networks, we begin with the tensor product basis expansion [@wood2006low] and replace each basis function by specially designed neural networks so that each component becomes identifiable and robust to outliers. We call our proposed neural networks *Tensor Product Neural Networks* (TPNN). Finally, we propose ANOVA-TPNN, which estimates each component in the functional ANOVA model using TPNNs. Theoretically, we prove that ANOVA-TPNN has the universal approximation property in the sense that it approximates any Lipschitz function well. + +Overall, our contributions are summarized as follows. + +- We propose novel neural networks (TPNN) for the functional ANOVA model with which we can estimate each component stably and accurately by use of a standard stochastic gradient descent algorithm. + +- We prove the universal approximation property in the sense that TPNN can approximate any Lipschitz function up to an arbitrary precision. + +- By analyzing multiple benchmark datasets, we demonstrate that TPNN provides more accurate and stable estimation and interpretation of each component compared to the baseline models, including NAM [@nam], NBM [@nbm], NODE-GAM [@nodegam] and XGB [@chen2016xgboost] without losing prediction accuracy. + +Let $\textbf{x}=(x_{1},...,x_{p})^\top \in \mathcal{X} = \mathcal{X}_{1}\times ... \times \mathcal{X}_{p}$ be a vector of input features, where we assume $\mathcal{X} \subseteq [-a,a]^p$ for some $a>0$. We denote $[p] = \{1,\dots,p\}$, and denote its power set as $\mathcal{P}([p])$. For $\textbf{x} \in \mathcal{X}$ and $S \subseteq [p]$, let $\textbf{x}_{S}=(x_j,j\in S)^\top.$ We denote $f_{S}$ as a function of $\textbf{x}_{S}$. For a real-valued function $f : \mathcal{X} \to \mathbb{R}$, we denote $||f||_{\infty} = \operatorname{sup}_{\textbf{x} \in \mathcal{X}}|f(\textbf{x})|.$ + +The functional ANOVA model [@func_first] decomposes a high-dimensional function $f$ into the sum of low-dimensional functions $$\begin{align*} + f(\textbf{x}) &= \beta_0+ \sum_{j=1}^p f_j(x_j)+ \sum_{j0,$ where $\theta=(b,\gamma),$ $\sigma(\cdot)$ is an activation function and $c_j(b,\gamma)= -(1- \eta_j(b,\gamma)) /\eta_j(b,\gamma)$ with $\eta_j(b,\gamma)=\int_{x\in \mathcal{X}_j} \sigma\left(\frac{x-b}{\gamma}\right) \mu_j(dx).$ We introduce the term $c_j(b,\gamma)$ to ensure $\phi_j(x|\theta)$ satisfies the sum-to-zero condition. For $\sigma(\cdot)$ function, any differentiable function can be used but in this paper we use the sigmoid function for $\sigma(x)$ (i.e. $\sigma(x)=1/(1+\exp(-x))$), since it is robust to input outliers and provides a nice theoretical property: an universal approximation theorem given in Section [3.3](#sec:uni_thm){reference-type="ref" reference="sec:uni_thm"}. + +The standard basis function approach can be understood as a method of fixing the parameters $\theta_{jk}$s a priori and only learning $\beta_{jk}$s in ([\[eq:j_main\]](#eq:j_main){reference-type="ref" reference="eq:j_main"}). In contrast, we learn $\theta_{jk}$ as well as $\beta_{jk}.$ That is, the terms $\phi_j(x|\theta_{j,k})$ can be considered as data-adaptive basis functions. Since there is no constraint (except the nonnegative constraint on $\gamma$) on the parameters, a gradient descent algorithm can be used for learning. The number of basis functions $K_j$ is fixed in advance as is done in the basis expansion approaches. + +For $f_S,$ we consider the following tensor product neural network (TPNN) $$\begin{align} +f_S(\textbf{x}_S) \approx \sum_{k=1}^{K_S} \beta_{S,k} \phi_S(\textbf{x}_{S}|\theta_{S,k}) +\label{eq:high_order_tpnm} +\end{align}$$ where $\phi_S(\textbf{x}_S|\theta_{S,k})=\prod_{j\in S} \phi_j(x_j|\theta_{j,k}^{S} )$. Since $\phi_j(x_j|\theta_{j,k}^{S})$ satisfies the sum-to-zero condition, $\phi_S(\textbf{x}_S|\theta_{S,k})$ does and so does $f_S.$ As is done for the main effect, we learn $\beta_{S,k}$s and $\theta_{S,k}$s while we let $K_S$ a hyper-parameter. + +As the traditional tensor product basis expansion approaches [@wood2006low; @rugamer2024scalable] do, we may consider $$f_S(\textbf{x}_S)\approx (\otimes_{j\in S} \Phi_j(x_j)) \bm{\beta}_S,$$ where $\Phi_j(x_j)=(\phi_j(x_j|\theta_{j,k}), k\in [K_j]),$ $\bm{\beta}_S$ is a $\prod_{j \in S}K_{j}$-dimensional tensor, and $\theta_{j,k}$s are those used for the main effect. This expansion shares the parameters between the components and thus the number of parameters $\theta_{jk}$s is smaller than the expansion in ([\[eq:high_order_tpnm\]](#eq:high_order_tpnm){reference-type="ref" reference="eq:high_order_tpnm"}). However, the number of parameters in $\bm{\beta}_S$ is exponentially proportional to $|S|$ and thus the number of the total learnable parameters would be much larger than that in the expansion ([\[eq:high_order_tpnm\]](#eq:high_order_tpnm){reference-type="ref" reference="eq:high_order_tpnm"}). This is an important advantage of the proposed TPNNs compared to the traditional tensor product basis expansions. + +Finally, we propose ANOVA-T$^{d}$PNN that estimates each component of the functional ANOVA model using TPNN as follows. $$f(\textbf{x}) = \beta_0+ \sum_{S:|S|\le d} \sum_{k=1}^{K_S} \beta_{S,k} \phi_S(\textbf{x}_S|\theta_{S,k}).$$ In ANOVA-T$^{d}$PNN, the learnable parameters are $\beta_0$ and $(\beta_{S,k},\theta_{S,k}), k \in [K_{S}], S\subseteq [p], |S|\le d.$ Unless there is any confusion, we use ANOVA-TPNN and ANOVA-T$^{d}$PNN for general $d$ interchangeably. + +For given training data $(\textbf{x}_1,y_1),\ldots, (\textbf{x}_n,y_n)$ and a given loss function $\ell,$ we learn the parameters by minimizing the empirical risk $\sum_{i=1}^n \ell (y_i, f(\textbf{x}_i))$ by a gradient descent algorithm. Overfitting can be avoided by selecting the number of epochs and learning rate carefully. + +The term $c_j(\alpha,\gamma)$ could be too large when $\eta(b,\gamma)$ is close to 0, which can happen when $\mu_j$ is the empirical distribution and there exist outliers. To avoid this problem, we transform each input feature based on the marginal ranks to make $\mu_j$ of the transformed data be similar to the uniform distribution. Since ANOVA-TPNN is nonparametric, this data transformation does not affect much to the final prediction model. + +An interesting theoretical property of ANOVA-TPNN is the universal approximation property as the standard neural network has [@hornik1989multilayer]. That is, ANOVA-TPNN can approximate any arbitrary G$\text{A}^{d}$M function to a desired level of accuracy, as stated in the following theorems. + +::: {#thm_approx .theorem} +**Theorem 3**. *Suppose that $\mu_j, j\in[p]$ have lower and upper bounded densities with respect to the Lebesgue measure. Then, for any $L$-Lipschitz continuous function[^1] $g_{0,S}:\prod_{j\in S} \mathcal{X}_j \rightarrow \mathbb{R}$ satisfying the sum-to-zero condition, there exists a TPNN with $K_S$ many basis neural networks such that $$\begin{align*} + \left \| g_{0,S}(\cdot) - \sum_{k=1}^{K_{S}}\beta_{S,k}\phi_{S}(\cdot|\theta_{S,k}) \right \|_{\infty} + < C_S {|S| \over K_{S}^{1\over |S|} + 1} +\end{align*}$$ for some constant $C_S>0$ and $S \subseteq [p].$* +::: + +Theorem [3](#thm_approx){reference-type="ref" reference="thm_approx"} shows that TPNN can approximate any Lipschitz continuous function satisfying the sum-to-zero condition. with an arbitrary precision by choosing $K_S$ sufficiently large, and the required $K_S$ for a given precision should increase as $|S|$ increases. An obvious corollary of Theorem 3.3 is that ANOVA-TPNN can approximate any GA$^{d}$M model where each component is Lipschitz continuous and satisfies the sum-to-zero condition. + +::: {#cor_approx .corollary} +**Corollary 4**. *Let $g_0(\textbf{x}):= \sum_{S\subseteq [p], |S|\le d} g_{0,S} (\textbf{x}_S)$ be a given GA$^d$M function satisfying the sum-to-zero condition. If $\mu_j, j\in[p]$ be probability measures having bounded densities with respect to the Lebesgue measure. and each $g_{0,S}$ is $L$-Lipschitz continuous, then, there exists $f_{\text{ANOVA-T$^d$PNN}}$ such that $$\begin{align*} + \left \| g_{0}(\cdot) - f_{\text{ANOVA-T$^d$PNN}}(\cdot) \right \|_{\infty} + < C \sum_{S \subseteq [p],|S|\leq d} {|S| \over K_{S}^{1\over |S|} + 1} +\end{align*}$$ for some constant $C>0,$ where $K_{S}$ is the number of basis neural networks for component $S$.* +::: + +Similarly to NBM [@nbm], we extend ANOVA-TPNN to NBM-TPNN, which estimates each component as a linear combination of common basis neural networks. + +Let $\mathcal{X}_j, j\in [p]$ be all equal to $\mathcal{X}_0$ (e.g. $\mathcal{X}_0=[-1,1]$) and let $\mu_j, j\in [p]$ be also all equal to $\mu_0$ (e.g. the uniform distribution on $[-1,1]$). Let $\phi(x|\theta)$ be a basis neural network on $\mathcal{X}_0$ satisfying the sum-to-zero condition with respect to $\mu_0.$ Then, NBM-TPNN approximates $f_S$ by $$f_S(\textbf{x}_S)\approx \sum_{k=1}^{K} \beta_{S,k} \prod_{j\in S} \phi(x_j|\theta_k).$$ That is, NBM-TPNN shares basis neural networks for each component, which reduces the number of learnable parameters much. The experimental results for NBM-TPNN are provided in Section [4.6](#sec:nbm-tpnm){reference-type="ref" reference="sec:nbm-tpnm"}. + +::: table* ++----------------------+------------------------------------------------+------------------------------------------------+------------------------------------------------+ +| True model | $f^{(1)}$ | $f^{(2)}$ | $f^{(3)}$ | ++:====================:+:====================:+:==========:+:==========:+:====================:+:==========:+:==========:+:====================:+:==========:+:==========:+ +| -------- | ------------ | NA$^{2}$M | NB$^{2}$M | ------------ | NA$^{2}$M | NB$^{2}$M | ------------ | NA$^{2}$M | NB$^{2}$M | +| Models | ANOVA | | | ANOVA | | | ANOVA | | | +| -------- | T$^{2}$PNN | | | T$^{2}$PNN | | | T$^{2}$PNN | | | +| | ------------ | | | ------------ | | | ------------ | | | ++----------------------+----------------------+------------+------------+----------------------+------------+------------+----------------------+------------+------------+ +| ------------------ | ------------------ | -------- | -------- | ------------------ | -------- | -------- | ------------------ | -------- | -------- | +| AUROC $\uparrow$ | $\textbf{1.000}$ | 0.330 | 0.522 | $\textbf{0.943}$ | 0.311 | 0.481 | $\textbf{0.956}$ | 0.381 | 0.477 | +| ------------------ | (0.00) | (0.08) | (0.16) | (0.01) | (0.08) | (0.09) | (0.02) | (0.13) | (0.07) | +| | ------------------ | -------- | -------- | ------------------ | -------- | -------- | ------------------ | -------- | -------- | ++----------------------+----------------------+------------+------------+----------------------+------------+------------+----------------------+------------+------------+ +::: + +::: table* ++--------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------+----------------------------------------------------------+ +| | | GA$^{1}$M | G$\text{A}^{2}$M | Black box | ++:========================:+:=====================:+:========================+:===============================+:=====================+:========================+:========================+:=====================+:========================+:===============================+:========================+:===============================+ +| Dataset | Measure | ------------ | ----------- | ----------- | ----------- | ------------ | ------------------ | ------------------ | ------------------ | ----- | ----- | +| | | ANOVA | NODE | NA$^{1}$M | NB$^{1}$M | ANOVA | NODE | N$\text{A}^{2}$M | N$\text{B}^{2}$M | XGB | DNN | +| | | T$^{1}$PNN | GA$^{1}$M | ----------- | ----------- | T$^{2}$PNN | G$\text{A}^{2}$M | ------------------ | ------------------ | ----- | ----- | +| | | ------------ | ----------- | | | ------------ | ------------------ | | | | | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Calhousing]{.smallcaps} | RMSE $\downarrow$ | -------- | -------- | -------- | -------- | -------- | -------- | -------- | ---------------------------- | ------------------ | -------- | +| | | 0.614 | 0.581 | 0.659 | 0.594 | 0.512 | 0.515 | 0.525 | $\underline{\text{0.502}}$ | $\textbf{0.452}$ | 0.518 | +| | | (0.01) | (0.01) | (0.01) | (0.08) | (0.01) | (0.01) | (0.02) | (0.03) | (0.01) | (0.01) | +| | | -------- | -------- | -------- | -------- | -------- | -------- | -------- | ---------------------------- | ------------------ | -------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Wine]{.smallcaps} | RMSE $\downarrow$ | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | ------------------ | ---------------------------- | +| | | 0.725 | 0.723 | 0.733 | 0.724 | 0.704 | 0.730 | 0.720 | 0.702 | $\textbf{0.635}$ | $\underline{\text{0.696}}$ | +| | | (0.02) | (0.02) | (0.02) | (0.02) | (0.02) | (0.02) | (0.02) | (0.03) | (0.03) | (0.01) | +| | | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | ------------------ | ---------------------------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Online]{.smallcaps} | RMSE $\downarrow$ | ------------------ | ---------------------------- | -------- | -------- | ------------------ | -------- | -------- | -------- | -------- | -------- | +| | | $\textbf{1.111}$ | $\underline{\text{1.121}}$ | 1.350 | 1.187 | $\textbf{1.111}$ | 1.137 | 1.313 | 1.179 | 1.122 | 1.123 | +| | | (0.25) | (0.27) | (0.57) | (0.25) | (0.25) | (0.26) | (0.46) | (0.21) | (0.26) | (0.26) | +| | | ------------------ | ---------------------------- | -------- | -------- | ------------------ | -------- | -------- | -------- | -------- | -------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Abalone]{.smallcaps} | RMSE $\downarrow$ | -------- | -------- | -------- | -------- | --------------------- | -------- | -------- | -------- | -------- | ------------------ | +| | | 2.135 | 2.141 | 2.171 | 2.167 | $\underline{2.087}$ | 2.100 | 2.088 | 2.088 | 2.164 | $\textbf{2.071}$ | +| | | (0.09) | (0.09) | (0.08) | (0.09) | (0.08) | (0.10) | (0.08) | (0.08) | (0.09) | (0.10) | +| | | -------- | -------- | -------- | -------- | --------------------- | -------- | -------- | -------- | -------- | ------------------ | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [FICO]{.smallcaps} | AUROC $\uparrow$ | --------------------- | --------- | --------- | --------- | ------------------ | --------- | --------------------- | --------------------- | --------- | --------- | +| | | $\underline{0.799}$ | 0.795 | 0.788 | 0.797 | $\textbf{0.800}$ | 0.793 | $\underline{0.799}$ | $\underline{0.799}$ | 0.796 | 0.793 | +| | | (0.007) | (0.009) | (0.006) | (0.006) | (0.007) | (0.007) | (0.007) | (0.008) | (0.008) | (0.008) | +| | | --------------------- | --------- | --------- | --------- | ------------------ | --------- | --------------------- | --------------------- | --------- | --------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Churn]{.smallcaps} | AUROC $\uparrow$ | --------- | --------- | ------------------ | --------------------- | --------- | --------- | --------- | --------- | ------------------ | --------- | +| | | 0.839 | 0.824 | $\textbf{0.846}$ | $\underline{0.845}$ | 0.842 | 0.830 | 0.844 | 0.844 | $\textbf{0.846}$ | 0.842 | +| | | (0.012) | (0.012) | (0.011) | (0.012) | (0.012) | (0.011) | (0.011) | (0.011) | (0.012) | (0.013) | +| | | --------- | --------- | ------------------ | --------------------- | --------- | --------- | --------- | --------- | ------------------ | --------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Credit]{.smallcaps} | AUROC $\uparrow$ | --------- | --------- | --------- | --------- | --------------------- | ------------------ | --------- | ------------------ | --------- | --------- | +| | | 0.983 | 0.983 | 0.976 | 0.972 | $\underline{0.984}$ | $\textbf{0.985}$ | 0.980 | $\textbf{0.985}$ | 0.983 | 0.980 | +| | | (0.005) | (0.005) | (0.012) | (0.011) | (0.006) | (0.006) | (0.007) | (0.004) | (0.004) | (0.006) | +| | | --------- | --------- | --------- | --------- | --------------------- | ------------------ | --------- | ------------------ | --------- | --------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Letter]{.smallcaps} | AUROC $\uparrow$ | --------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | ------------------ | ---------------------------- | +| | | 0.900 | 0.910 | 0.904 | 0.910 | 0.984 | 0.988 | 0.986 | 0.990 | $\textbf{0.997}$ | $\underline{\text{0.996}}$ | +| | | (0.003) | (0.002) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | +| | | --------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | ------------------ | ---------------------------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| [Drybean]{.smallcaps} | AUROC $\uparrow$ | --------- | --------- | --------- | --------- | ------------------ | --------- | --------- | --------- | --------------------- | ---------------------------- | +| | | 0.995 | 0.996 | 0.996 | 0.994 | $\textbf{0.998}$ | 0.996 | 0.995 | 0.995 | $\underline{0.997}$ | $\underline{\text{0.997}}$ | +| | | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | (0.001) | +| | | --------- | --------- | --------- | --------- | ------------------ | --------- | --------- | --------- | --------------------- | ---------------------------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +| | Rank avg $\downarrow$ | -- | -- | -- | -- | ----------------- | -- | -- | -- | ----------------- | --------------------------- | +| | | -- | -- | -- | -- | $\textbf{3.11}$ | -- | -- | -- | $\textbf{3.11}$ | $\underline{\text{4.33}}$ | +| | | | | | | ----------------- | | | | ----------------- | --------------------------- | ++--------------------------+-----------------------+-------------------------+--------------------------------+----------------------+-------------------------+-------------------------+----------------------+-------------------------+--------------------------------+-------------------------+--------------------------------+ +::: + +# Method + +In this section, we conduct experiments to compare ANOVA-TPNN and Spline-GAM which estimates each component by using cubic B-spline basis functions. We evaluate the prediction performance and stability of the component estimation in ANOVA-TPNN and Spline-GAM on [Calhousing]{.smallcaps} dataset. We implement Spline-GAM using pygam python package [@serven2018pygam]. + +Table [1](#table:splinegam){reference-type="ref" reference="table:splinegam"} presents the results of the prediction performance and stability scores of ANOVA-TPNN and Spline-GAM on [Calhousing]{.smallcaps} dataset. A surprising result is that ANOVA-TPNN is superior to Spline-GAM in both of prediction performance and stability of component estimation. By investigating details of the empirical results, we find that Spline-GAM is vulnerable to input outliers. That is, when there is an outlier input (i.e. an input vector in the test data locating outside the range of input vectors in the training data), the prediction at the outlier could be inaccurate since the B-splie basis uses the linear extrapolation outside the domain of the training data. In contrast, the basis neural networks in TPNN use the sigmoid activation function which is bounded outside the range of input vectors and so robust to input outliers. The details of experimental results for Spline-GAM are presented in Appendix [20](#app:splin_gam){reference-type="ref" reference="app:splin_gam"}. diff --git a/2503.01723/paper.pdf b/2503.01723/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7facab2a74dd8cb8780374b1bc4d2a8e1fc7950d --- /dev/null +++ b/2503.01723/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:026ff1f5ee2bebd00cbf72f320d25acce2aaa8561e3b533d7ddb5fdf2781f913 +size 5098692 diff --git a/2503.02463/paper.pdf b/2503.02463/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..678bcafd928aca654c18d34ad8913db6402feaac --- /dev/null +++ b/2503.02463/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02f2511edce528c76ec49d2e4453dbe8e434b722a03019f2c836ee82f97c94ec +size 872967 diff --git a/2503.03663/paper.pdf b/2503.03663/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e3af8114385efe4bb8d366576c291f0fe579ad8e --- /dev/null +++ b/2503.03663/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16333d55840938bc41d3df31f189d87b9d829a298475dd0281c146c78fd424b8 +size 28747500 diff --git a/2503.05238/paper.pdf b/2503.05238/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..de6d332660daf6098b1ba4d7cefd73a9abf201f3 --- /dev/null +++ b/2503.05238/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b966edbc11f7c9d98751578210ab67666f3010386234647e919fdbca2cb8d36 +size 299135 diff --git a/2503.06442/paper.pdf b/2503.06442/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eaf19ffcd72b38796cb8f8ce26f2c452af282752 --- /dev/null +++ b/2503.06442/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8151f7c86b1d672578c016c776ee863e0c511835f812380b5e66c4e1efb629ff +size 11174807 diff --git a/2503.09958/paper.pdf b/2503.09958/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b70f8d0ff2fc9040426840b18b9c03c2ec17a89b --- /dev/null +++ b/2503.09958/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cd872f55f12922988a473efce982e666da01c90d4aa24bd8361e1c2898342c +size 1171557 diff --git a/2503.14337/paper.pdf b/2503.14337/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..04918bf04256ca42ee0c5787bf2c414846991c00 --- /dev/null +++ b/2503.14337/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a9fc5bfd0730773fddc5a7cf7593b51872a9fac54e0e1c138c83a272475a922 +size 2725628 diff --git a/2503.15420/paper.pdf b/2503.15420/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c4b36e9018336304b23c3552337f2c34c757465e --- /dev/null +++ b/2503.15420/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2339c2126db9e05010d2ceb3912baa84b69ed954839f4398af9f11def1e62ff +size 29902905 diff --git a/2503.16997/paper.pdf b/2503.16997/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..14928f1cfc6039c4e8927c4a4cb0ea70b82f7b43 --- /dev/null +++ b/2503.16997/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2f88c5658eca8ffa3893118f2a18ba8291239dad93e5126ab477ebb1bdda7f +size 8107868 diff --git a/2503.21730/paper.pdf b/2503.21730/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..31e8b31c468fc50f4a690429bd02c5694f6e32f6 --- /dev/null +++ b/2503.21730/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:687689f3fab47a76bd2df922ce7e32efb2000857eccd8646b1c7c278b1dec81c +size 12285194 diff --git a/2504.17261/paper.pdf b/2504.17261/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cf87d9f3dd1c2a67fa237f6dd07b113f70742442 --- /dev/null +++ b/2504.17261/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:914571d192ec8967c32c86c976b19792e211b565a818fc87a35ea0a5013eb25c +size 9362075 diff --git a/2505.10518/paper.pdf b/2505.10518/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d10bdbfbcf7ecf701c5aae1a228087773924ecc7 --- /dev/null +++ b/2505.10518/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:babeca693cb76977aa121f2bd289327a58193c9056968aa3240ee563556db04a +size 472222 diff --git a/2505.20728/paper.pdf b/2505.20728/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3403bf0f3d28b055c07aaa68f26a2cdce557c661 --- /dev/null +++ b/2505.20728/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a114bef79bd99ac0f7d3682fe0ce9c2a9ad4f79e43686a5088ce128dbc9564 +size 4783137 diff --git a/2506.11045/paper.pdf b/2506.11045/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..83588fb51c3516573cf52ec14e3405f9027b515b --- /dev/null +++ b/2506.11045/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16c0fb16c0826aa9181b83d998f083967a49b0033692b344502d849417769cb5 +size 826367 diff --git a/2509.07908/paper.pdf b/2509.07908/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3cfea6e44b1ef8961b8ed80b772abeb8ec130b91 --- /dev/null +++ b/2509.07908/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ea5a7de15674e9c8b032a32951d65bec9894893530b8e269ca82aeaf8bb68b0 +size 4643817 diff --git a/2509.10833/paper.pdf b/2509.10833/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e331390f6bac06ef7b6b124dad7948bb43b0fe7b --- /dev/null +++ b/2509.10833/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77be4aae4f3f180a40a7be68dc259c0983afa922c6f51e9edebe7cb2ac2096d5 +size 3256842 diff --git a/2509.16060/paper.pdf b/2509.16060/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..81667f2a3e439678cecdd1b768db2bed7dff1eb2 --- /dev/null +++ b/2509.16060/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda02acc0d76cf2e71fefd627d8e3c90663a6c65e621032a6d6a3621be34c475 +size 617634 diff --git a/2509.17430/paper.pdf b/2509.17430/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cdc9d698a205270f7b59591f201a4080477e049d --- /dev/null +++ b/2509.17430/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d26968a60a876e74641203a11757f112f5b8a7f75cfe2b68f45a42a53ba3699 +size 10972242 diff --git a/2509.23898/paper.pdf b/2509.23898/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8e6238a70c5d0b9253302fca73c956fb4b11450f --- /dev/null +++ b/2509.23898/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c143c27d41541d9f3edfc74ec56b8644511c80c718a82154a41f76c0c79584 +size 2265924 diff --git a/2510.12697/paper.pdf b/2510.12697/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1a75015d92ad816d189a4cb983119c115f85629e --- /dev/null +++ b/2510.12697/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680c97c7829aa2ab219112d19100fc21f3ad9c5eaded9f0141987597f43039e2 +size 4843497 diff --git a/2511.03107/paper.pdf b/2511.03107/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..420f8454b363ffdf5b026fe8976e112bfa038b5d --- /dev/null +++ b/2511.03107/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0cb1c21ea00e4351b2028961ec964be7b09a8b44287926d234f4a4b4d156dc2 +size 368084