Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +172 -0
- 2006.08085/paper.pdf +3 -0
- 2006.08228/paper.pdf +3 -0
- 2006.11132/paper.pdf +3 -0
- 2009.11340/main_diagram/main_diagram.drawio +1 -0
- 2009.11340/main_diagram/main_diagram.pdf +0 -0
- 2009.11340/paper_text/intro_method.md +50 -0
- 2009.11355/main_diagram/main_diagram.drawio +1 -0
- 2009.11355/main_diagram/main_diagram.pdf +0 -0
- 2009.11355/paper_text/intro_method.md +71 -0
- 2010.05315/main_diagram/main_diagram.drawio +1 -0
- 2010.05315/main_diagram/main_diagram.pdf +0 -0
- 2010.05315/paper_text/intro_method.md +86 -0
- 2011.00105/paper.pdf +3 -0
- 2011.07387/main_diagram/main_diagram.drawio +1 -0
- 2011.07387/main_diagram/main_diagram.pdf +0 -0
- 2011.07387/paper_text/intro_method.md +36 -0
- 2012.07489/paper.pdf +3 -0
- 2101.08779/main_diagram/main_diagram.drawio +0 -0
- 2101.08779/paper_text/intro_method.md +25 -0
- 2103.00180/paper.pdf +3 -0
- 2103.03501/paper.pdf +3 -0
- 2104.03962/main_diagram/main_diagram.drawio +0 -0
- 2104.03962/paper_text/intro_method.md +56 -0
- 2104.09379/main_diagram/main_diagram.drawio +1 -0
- 2104.09379/main_diagram/main_diagram.pdf +0 -0
- 2104.09379/paper_text/intro_method.md +71 -0
- 2105.01294/main_diagram/main_diagram.drawio +1 -0
- 2105.01294/main_diagram/main_diagram.pdf +0 -0
- 2105.01294/paper_text/intro_method.md +25 -0
- 2106.01425/paper.pdf +3 -0
- 2106.02740/paper.pdf +3 -0
- 2106.13948/paper.pdf +3 -0
- 2107.05757/main_diagram/main_diagram.drawio +1 -0
- 2107.05757/main_diagram/main_diagram.pdf +0 -0
- 2107.05757/paper_text/intro_method.md +14 -0
- 2108.02180/paper.pdf +3 -0
- 2108.08815/paper.pdf +3 -0
- 2108.12296/main_diagram/main_diagram.drawio +1 -0
- 2108.12296/main_diagram/main_diagram.pdf +0 -0
- 2108.12296/paper_text/intro_method.md +77 -0
- 2109.10637/paper.pdf +3 -0
- 2110.03753/paper.pdf +3 -0
- 2110.13903/paper_text/intro_method.md +93 -0
- 2110.14468/main_diagram/main_diagram.drawio +1 -0
- 2110.14468/main_diagram/main_diagram.pdf +0 -0
- 2110.14468/paper_text/intro_method.md +74 -0
- 2111.08919/paper.pdf +3 -0
- 2111.13415/main_diagram/main_diagram.drawio +1 -0
- 2111.13415/main_diagram/main_diagram.pdf +0 -0
.gitattributes
CHANGED
|
@@ -826,3 +826,175 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 826 |
2405.02842/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 827 |
2304.04970/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 828 |
2412.01007/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 826 |
2405.02842/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 827 |
2304.04970/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 828 |
2412.01007/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 829 |
+
2303.16342/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 830 |
+
2310.15066/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 831 |
+
2106.13948/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 832 |
+
2503.18933/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 833 |
+
2106.01425/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 834 |
+
2202.08205/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 835 |
+
2409.10362/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 836 |
+
2408.11194/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 837 |
+
2211.02580/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 838 |
+
2201.12426/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 839 |
+
2203.00048/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 840 |
+
2212.04088/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 841 |
+
2304.12477/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 842 |
+
2305.04091/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 843 |
+
2205.14571/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 844 |
+
2403.01058/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 845 |
+
2409.03424/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 846 |
+
2405.18065/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 847 |
+
2201.07745/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 848 |
+
2502.17258/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 849 |
+
2405.16738/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 850 |
+
2312.08870/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 851 |
+
2207.03411/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 852 |
+
2502.15215/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 853 |
+
2305.15523/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 854 |
+
2112.03002/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 855 |
+
2006.08228/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 856 |
+
2309.17249/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 857 |
+
2505.12531/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 858 |
+
2503.17709/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 859 |
+
2405.20935/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 860 |
+
2006.08085/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 861 |
+
2412.14925/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 862 |
+
2307.12935/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 863 |
+
2403.01944/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 864 |
+
2305.10135/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 865 |
+
2311.09214/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 866 |
+
2410.07383/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 867 |
+
2112.08907/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 868 |
+
2212.09955/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 869 |
+
2109.10637/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 870 |
+
2302.11864/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 871 |
+
2410.21157/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 872 |
+
2402.07398/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 873 |
+
2310.08540/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 874 |
+
2303.09093/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 875 |
+
2202.11781/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 876 |
+
2502.14573/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 877 |
+
2205.02455/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 878 |
+
2310.18659/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 879 |
+
2407.07778/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 880 |
+
2406.17617/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 881 |
+
2211.09231/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 882 |
+
2412.12614/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 883 |
+
2211.13854/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 884 |
+
2406.03818/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 885 |
+
2312.02647/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 886 |
+
2505.21755/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 887 |
+
2303.10752/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 888 |
+
2210.07920/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 889 |
+
2103.00180/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 890 |
+
2304.07039/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 891 |
+
2312.10325/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 892 |
+
2409.01073/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 893 |
+
2410.11473/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 894 |
+
2505.15304/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 895 |
+
2310.19188/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 896 |
+
2410.13805/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 897 |
+
2211.10435/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 898 |
+
2404.12130/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 899 |
+
2406.08723/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 900 |
+
2410.13032/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 901 |
+
2011.00105/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 902 |
+
2503.19604/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 903 |
+
2312.03431/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 904 |
+
2405.08205/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 905 |
+
2305.14331/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 906 |
+
2103.03501/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 907 |
+
2304.02152/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 908 |
+
2110.03753/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 909 |
+
2403.09930/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 910 |
+
2106.02740/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 911 |
+
2412.03512/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 912 |
+
2206.11474/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 913 |
+
2506.00129/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 914 |
+
2401.16265/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 915 |
+
2309.13775/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 916 |
+
2305.00477/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 917 |
+
2405.15194/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 918 |
+
2406.12203/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 919 |
+
2302.05446/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 920 |
+
2404.11225/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 921 |
+
2303.12564/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 922 |
+
2308.15466/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 923 |
+
2303.16445/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 924 |
+
2305.02219/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 925 |
+
2203.16421/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 926 |
+
2501.13987/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 927 |
+
2309.12075/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 928 |
+
2211.03524/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 929 |
+
2402.11291/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 930 |
+
2501.05687/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 931 |
+
2402.11435/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 932 |
+
2108.08815/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 933 |
+
2310.17075/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 934 |
+
2506.17046/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 935 |
+
2209.10492/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 936 |
+
2505.06333/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 937 |
+
2403.09193/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 938 |
+
2201.11192/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 939 |
+
2307.03591/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 940 |
+
2407.12568/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 941 |
+
2205.11775/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 942 |
+
2207.01769/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 943 |
+
2309.16292/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 944 |
+
2207.09090/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 945 |
+
2202.01079/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 946 |
+
2404.18655/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 947 |
+
2510.08669/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 948 |
+
2210.06041/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 949 |
+
2211.10282/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 950 |
+
2012.07489/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 951 |
+
2111.08919/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 952 |
+
2308.09694/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 953 |
+
2311.16511/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 954 |
+
2408.04102/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 955 |
+
2305.05402/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 956 |
+
2405.06884/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 957 |
+
2506.18520/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 958 |
+
2203.15996/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 959 |
+
2503.02875/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 960 |
+
2405.20003/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 961 |
+
2403.13788/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 962 |
+
2504.09623/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 963 |
+
2203.13685/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 964 |
+
2510.05903/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 965 |
+
2407.09115/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 966 |
+
2312.05784/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 967 |
+
2305.19742/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 968 |
+
2205.11048/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 969 |
+
2305.19162/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 970 |
+
2203.13560/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 971 |
+
2406.19226/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 972 |
+
2410.18935/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 973 |
+
2410.10834/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 974 |
+
2207.13440/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 975 |
+
2402.19215/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 976 |
+
2506.21046/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 977 |
+
2311.17042/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 978 |
+
2308.03654/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 979 |
+
2205.15730/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 980 |
+
2205.07872/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 981 |
+
2209.00465/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 982 |
+
2108.02180/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 983 |
+
2302.10145/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 984 |
+
2312.13068/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 985 |
+
2308.04662/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 986 |
+
2509.20549/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 987 |
+
2205.08714/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 988 |
+
2112.01565/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 989 |
+
2509.23659/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 990 |
+
2203.15235/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 991 |
+
2402.14361/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 992 |
+
2305.14286/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 993 |
+
2402.07834/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 994 |
+
2006.11132/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 995 |
+
2209.13430/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 996 |
+
2112.05364/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 997 |
+
2407.02685/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 998 |
+
2308.02898/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 999 |
+
2309.06891/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1000 |
+
2402.16105/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
2006.08085/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:720a7e94166b0472cb105059d7edde5f49706d6a5fed16d23748ea5cfce6a363
|
| 3 |
+
size 1679641
|
2006.08228/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fec9f371bde51b0c0c034a51ea4292a6842b0aa33db71a0363a3f39ff1464b73
|
| 3 |
+
size 948358
|
2006.11132/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:faf6f922f2a85091a61bf03c135eaf326f774a87a504d5270b133f5c00f08389
|
| 3 |
+
size 5322400
|
2009.11340/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2019-12-06T14:17:11.578Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36" etag="icjpLd0yamO5-mmvDcXq" version="12.3.7" type="device" pages="1"><diagram id="fNQ16oQlGKagVx_b5vSC" name="Page-1">7Zxbb5swGIZ/DdJ2sSpgjpdNelTXaVovql1NDphgzeAInOOvn2lMAjGbmiUUIzk3wZ+NwX4fyMcbJwaYpOv7HM6TZxohYlijaG2AG8OyTMfz+FsZ2ewinu/vArMcR6LRIfCCt0gERyK6wBEqGg0ZpYTheTMY0ixDIWvEYJ7TVbNZTEnzqHM4Q1LgJYREjr7iiCW7qG95h/gDwrOkOrLpBruaFFaNxUiKBEZ0VQuBWwNMckrZbitdTxApJ6+al91+d3+p3Z9YjjL2nh2Cwls+/Xwt/O0P+O2pQA83v+Ivtr3rZgnJQozYcMbP1y9PhnMjzpttqsnI6SKLUNnfyADjVYIZepnDsKxdcfl5LGEp4SWTb8Y0Y0JPUDaPMSETSmj+1heI49gKQx4vWE5/o1pN5E5dx616EEeveryDKSYlSw+ILBHDIeQVBE4R+U4LzDDNeF3IpwTx3sZLlJdNyNejBimOorLbMSR41rrHtajYt0zpEk73p5KjAm/rZcogq5X5ZYDqZRThelHQWovIagqByxNC61pIqHuPaIpYvuFN1hWUYhdxqVnA2ZVXB3DNisakBq0rYlBcK7N91wec+IYg6gS6TAmuT4s0/XwmV02OIgf5kd3GkW9Ngeu2kai5OoUrywsaXPl9Y2VJWD1qpAaFlG0CtZCSPwZZQhflcTVYQwLLsX21wHJbwMKFpmpQVLm+rRZVvkQVn0SMNFaDwsqvmFEFq+qhu8bVCuqb1bCoChxXMarkfD1HkJCNBmtQYJmjwFKMLDltn8JIYzUsrDggimElJ+1XGqphQQVcxRwGS86tznWt/m2wI5Nj5rVBFrgegNpgP9sINStY+oNKdtgv41tptPo1RBVAC7SgdbZzpbnq1w9VgCtH4uoS3pUGq19LVAGwPAms880rjVW/lqgCWAUSVhdxrzRZ/dqi/ZMF5OT9fPdKY9WzKaoAV3Lmfq59panq2RNVgCo5vXo0LBemJSvZtJjvR94RZRFEfty6YNQNfTSNNWWnUmb5zWSr/wWjQF7Y8BF+lkbr8n6WrRhatuy/d+5naa4u72cFqnFlSVx172dpsC7vZzmqgSV/C921n6Wxurzr4KmGlZxifYCfpcnqwHgAiqHlyClW14aW5qoD60G1W5Yjp1jdGlqaqg7WY6n2QBjIVJkyVnx8rMnO+2U5FmJOccbehuGMy59X85nl3S0Y136LWsi0Wjg7kloBabmSV6Paq/kj5ECW2W5R2e5MZXmJcMvNQ6t8qsp283HJdP2edZaXlQCt83/obB95wrYnCVvduuvCVs7tCcLy4uHfL97qav8hAm7/AA==</diagram></mxfile>
|
2009.11340/main_diagram/main_diagram.pdf
ADDED
|
Binary file (11.6 kB). View file
|
|
|
2009.11340/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Disfluencies are interruptions in the regular flow of speech, such as pausing silently, repeating words, or interrupting oneself to correct something said previously (Fraundorf et al., 2018). They commonly occur in spoken language, as spoken language is rarely fluent. Fillers are a type of disfluency that can be a sound ("um" or "uh") filling a pause in an utterance or conversation.
|
| 4 |
+
|
| 5 |
+
Recent work has shown that contextualised embeddings pre-trained on large written corpora can be fine-tuned on smaller spoken language corpora to learn structures of spoken language (Tran et al., 2019). However, for NLP tasks, fillers and all disfluencies are typically removed in pre-processing, as NLP models achieve highest accuracy on syntactically correct utterances. This contradicts linguistic studies, which show that fillers are an essential and informative part of spoken language (Clark and Fox Tree, 2002; Yoshida and Lickley, 2010; Brennan and Williams, 1995; Corley et al., 2007; Stolcke and Shriberg, 1996).
|
| 6 |
+
|
| 7 |
+
So far, the information carried by fillers has only been studied using hand crafted features, for example in Le Grezause (2017); Saini (2017); Dinkar et al. (2020). Besides, Barriere et al. (2017) show that pre-trained word embeddings such as Word2vec (Mikolov et al., 2013), have poor representation of spontaneous speech words such as "uh", as they are trained on written text and do not carry the same meaning as when used in speech. We address the matter of representing fillers with deep contextualised word representations (Devlin et al., 2019), and investigate their usefulness in NLP tasks for spoken language, without handcrafting features.
|
| 8 |
+
|
| 9 |
+
Hence, the present work is motivated by the following observations: (1) Fillers play an important role in spoken language. For example, a speaker can use fillers to inform the listener about the linguistic structure of their utterance, such as in their (difficulties of) selection of appropriate vocabulary while informing the listener about a pause in their upcoming speech stream (Clark and Fox Tree, 2002). (2) Fillers and prosodic cues have also been linked to a speaker's Feeling of Knowing (FOK) or expressed confidence, that is, a speaker's certainty or commitment to a statement (Smith and Clark, 1993). Brennan and Williams (1995) observed that fillers and prosodic cues contribute to the listener's perception of the speaker's expressed confidence in their utterance, which they refer to as the Feeling of Another's Knowing (FOAK), also observed by (Wollermann et al., 2013). (3) Recent work has shown that fillers have been successful in stance prediction (stance referring to the subjective spoken attitude towards something) (Le Grezause, 2017).
|
| 10 |
+
|
| 11 |
+
**Aim of this work:** We want to verify that these observations are still valid when
|
| 12 |
+
|
| 13 |
+
<sup>\*</sup>Equal contribution
|
| 14 |
+
|
| 15 |
+
we represent fillers in an automatic and efficient way. Hence, our contributions are as follows: (1) Fillers contain useful information that can be leveraged by deep contextualised embeddings to better model spoken language and thus should not be removed. In addition, we study which filler representation strategies are best suited to our task of Spoken Language Modelling (SLM) and investigate the learnt positional distribution of fillers. (2) We show that in a spontaneous speech corpus of spoken monologues, fillers are a discriminative feature in predicting the perception of expressed confidence of the speaker, and perception of a speaker's stance (which we measure by sentiment).
|
| 16 |
+
|
| 17 |
+
For our work, we consider the two fillers "uh" and "um" (see subsection 2.2). To obtain contextualised word embeddings for fillers, we use bidirectional encoder representations from transformers (BERT) (Devlin et al., 2019), as it has achieved SOTA performance on several NLP benchmarks and are better than Word2Vec for word sense disambiguation by integrating context (Bartunov et al., 2015).
|
| 18 |
+
|
| 19 |
+
For SLM, we use the masked language modelling objective (MLM). It consists of masking some words of the input tokens at random, and then predicting these masked tokens. The MLM objective is classically used to pretrain and then fine-tune BERT. Here, we use this MLM objective to fine-tune a pretrained BERT on a spoken language corpus (see subsection 2.2). Each experiment requires a token representation strategy $\mathcal{T}_i$ and a pre-processing strategy $\mathcal{P}_{Si}$ (additional details are given in the algorithm 1 in Supplementary).
|
| 20 |
+
|
| 21 |
+
The **token representation strategies** are particularly important for our task, for BERT to learn the distribution of fillers. The three token representation strategies $(\mathcal{T}_1, \mathcal{T}_2, \mathcal{T}_3)$ , are described as follows: In $\mathcal{T}_1$ , no special treatment is done to the fillers<sup>1</sup>, i.e BERT will use
|
| 22 |
+
|
| 23 |
+
its a priori knowledge of the fillers "uh" or "um" to model the language. In $\mathcal{T}_2$ , "uh" and "um" are distinguished from other tokens by a special filler tag, and are represented as two different tokens respectively; this strategy aims at forcing BERT to learn a new embedding that focuses both on the position and the context of the fillers. In $\mathcal{T}_3$ , both fillers are represented as the same token, suggesting that they have the same pragmatic meaning and are interchangeable. A concrete example is given in Table 1.
|
| 24 |
+
|
| 25 |
+
Pre-processing strategies, $(\mathcal{P}_{S1}, \mathcal{P}_{S2}, \mathcal{P}_{S3})$ , are as follows: In $\mathcal{P}_{S1}$ , the sentences have all fillers removed, both during training and inference. In $\mathcal{P}_{S2}$ , the sentences have the fillers kept during training, but are removed at inference. In $\mathcal{P}_{S3}$ , the fillers are kept both during training and inference. For each pre-processing and token representation strategy, we optionally fine-tune BERT using the same Masked Language Model (MLM) objective as in the original paper (Devlin et al., 2019). Note, if we do not fine-tune, the training dataset $(\mathcal{D}_{train})$ is not used and therefore $\mathcal{P}_{S1}$ and $\mathcal{P}_{S2}$ are equivalent. For language modelling we report the perplexity (ppl) measure to evaluate the quality of the model.
|
| 26 |
+
|
| 27 |
+
In both our confidence prediction and sentiment analysis task, our goal is to predict a label of confidence/sentiment using our BERT text representations that include fillers. Formally, our confidence/sentiment predictor is obtained by adding a Multi-Layer Perceptron (MLP) on top of a BERT, which has been optionally fine-tuned using the MLM. The MLP is trained by minimising the mean squared error (MSE) loss (additional details are given in algorithm 2 in Supplementary). We keep the same token representation and pre-processing strategies from Section 2.1.1.
|
| 28 |
+
|
| 29 |
+
We use the Persuasive Opinion Mining (POM) dataset (Park et al., 2014), a dataset of 1000 English monologue videos. Speakers recorded themselves giving a movie review, freely available on ExpoTV.com. The movies were rated from 1 star (most negative) to 5 stars (most
|
| 30 |
+
|
| 31 |
+
<span id="page-1-1"></span><sup>&</sup>lt;sup>1</sup>It is interesting to note that BERT provides embedding for "uh" or "um" despite being trained on written text (Wikipedia, BooksCorpus (Zhu et al., 2015), Word Benchmark (Chelba et al., 2014).
|
| 32 |
+
|
| 33 |
+
<span id="page-2-0"></span>
|
| 34 |
+
|
| 35 |
+
| Token. | Output Tokenizer |
|
| 36 |
+
|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 37 |
+
| Raw | (umm) Things that (uhh) you usually wouldn't find funny were in this movie. |
|
| 38 |
+
| $\mathcal{T}_1$ | ['umm', 'things', 'that', 'uh', 'you', 'usually', 'wouldn', "", 't', 'find', 'funny', 'were', 'in', 'this', 'movie', '.'] |
|
| 39 |
+
| $\mathcal{T}_2$ | [[FILLER <sub>UMM</sub> ]', 'things', 'that', '[FILLER <sub>UHH</sub> ]', 'you', 'usually', 'wouldn', """, 't', 'find', 'funny', 'were', 'in', 'this', 'movie', '.'] |
|
| 40 |
+
| $\mathcal{T}_3$ | ['[FILLER]', 'things', 'that', '[FILLER]', 'you', 'usually', 'wouldn', "'", 't', 'find', 'funny', 'were', 'in', 'this', 'movie', '.'] |
|
| 41 |
+
|
| 42 |
+
Table 1: Filler representation using different token representation strategies
|
| 43 |
+
|
| 44 |
+
positive). Annotators were asked to label the video for high-level attributes. For confidence, annotators (3 per video) were asked "How confident was the reviewer?", and had to each give a label respectively; from 1 (not confident) to 7 (very confident), after watching the entire review. Similarly for sentiment, the annotators were asked "How would you rate the sentiment expressed by the reviewer towards this movie?", and were asked to give a label from 1 (strongly negative) to 7 (strongly positive).
|
| 45 |
+
|
| 46 |
+
We choose this dataset for the following reasons: (1) The corpus has been manually transcribed with fillers "uh" and "um", where $\approx 4\%$ of the speech consists of fillers (for comparison, the Switchboard (Godfrey et al., 1992) dataset of human-human dialogues, consists of $\approx 1.6\%$ of fillers (Shriberg, 2001)). Sentence markers have been manually transcribed, with the practice of the filler being annotated sentence-initially, if the filler occurs between sentences. (2) The dataset consists of monologues, where the speaker is conscious of an unseen listener, but dialogue-related disfluencies (such as backchannels) are not present, allowing us to concentrate on fillers of the narratives of the speaker (Swerts, 1998). (3) Only reviews with a 1-2 star or a 5 star rating were chosen for annotation, to clearly demarcate sentiment/stance polarity. (4) FOAK, which we measure by the given label of confidence, has been annotated with high inter-annotator agreement (Krippendorffs alpha = 0.73).
|
| 47 |
+
|
| 48 |
+
Details can be found in the supplementary material and in Park et al. (2014). Confidence labels are obtained by taking the root mean square (RMS) value of the labels given by the 3 annotators<sup>2</sup>. Sentiment labels are calculated by taking the mean of the 3 labels, which were
|
| 49 |
+
|
| 50 |
+
obtained from Zadeh $(2018a)^3$ .
|
2009.11355/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-06-01T17:36:08.042Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15" etag="7qpNHCjvN8bAJKpLWiQr" version="13.1.12" type="google"><diagram id="X05EIip6syMM4KoI7kOn" name="Page-1">7V3ddtpGEH4aX1pn/38uYztJfZqkbt00ae8EyKAGEBEixn367oIE0moNC0hCELhIYIFFnvnm25nZ2dEVvh3N38f+ZPAx6gXDKwR68yt8d4UQhIKp//TIy3KEC74c6MdhL/3QeuAx/C9IB0E6Ogt7wbTwwSSKhkk4KQ52o/E46CaFMT+Oo+fix56iYfFXJ34/KA08dv1hefRL2EsGy1FBwXr8lyDsD7JfhiB9Z+RnH04HpgO/Fz3nhvDbK3wbR1GyfDaa3wZDLbxMLsvvvXvl3dWFxcE4cfmC7HzgnV96HUz7f0wfog/Jlw67Xl3uD384S//kK8SGasabp0hNrK47eUmFwb7PouyN6+lCVW/UB5CYzNdvqmd9/f+3bBp1RcuZluOpMFaToiSY6/FBMhqqAaie+sOwP1bPu+ovC2I18COIk1Dp5E36xijs9fTXb+JAXYbfWUwF1OtJFI6ThcbpzRW903PNkmh5qYup9ZWkIEM4ff3OH4VDDc8/w5GCGgKfgmf17x/RyB+vLjgv51T0+qqCeW4olfv7IBoFSfyiPpK+ixj1IJHrx3KC1CRwhvXnNcBwOjTIYYukY34K6f7qd9ZaV09Sxe8EAmwBgaGmYNx7o+1JvRpH46CoMSWa+OWrVoIHaPb678VrjnE2cDdP1bR89ZJ/9RDESvoLdS8Gp0kcfVuZHNqkh6BXMOGyFnJypRa5ZmNxMPST8EfR8G3CTn/hQYNtrWTGSVGtgniZprNZptEs7gbpF/O2as4lRXEuDj1GwephXFzix/0gKU27AMNKDofgg27Hh6K3iX46COZ+P9KGM8mpNB1daVkr9HkQJsHjxO/qrz2r5aOIqThKlDr0THfXeEdIuJsmFYbWOFppLYcbacGNqM8ehYM9DodqDQy2y9GfTpYL41M4D3qa88Lh8DYaRvFiItwTAHC8km/unTcKcQSkLJkbf1o8qpE/o9ywGmCTP7LIH9Umf8iq40Np8iHdRofzMPmae/537vn6K/rFmREox9yTSEiMoBAQExMYyjBh6d1dmZVnus0xq8nSddMptvlcu8CrqFtqs913i8cmrS8lll7nR0G/w66cfP32GX7+J/o+v7+/zuyrJfCgGQenmiOYeuZC6IqC0lwAlOaqHQXwNFCA2oUCIQ0UHOBlIQMFFDXNBYjt5np3h/50GnZtQMipfekkliDiGPDkYiQoTmDdIMplwEV/mRCwNyooK08Hy9PVDgxyAcbhwCiqUTKPiL1RUZiLItY8JGxh2G65GgxsuZpsltCWvFkNXg+iiZpirBXVieJBFPUuOZ412ECRM5Bk1kCGNZnZQcwWyVQBmYmS+MI0EUjicDIMlEpaDoYFZzUDBsppkS0y0y2EtBYk4PqQwB3Wk/PIKaDMlU+lz1g5wdpoQgHxw5kbMZsZoiIpz+LWW2GTlAxxEQfy6FbID+djOxBSpSkkRL3ggoE1F6BivofRMgYgaRYDNiauAgPwQgY7kIEFCE2TgawJCOQChE07DkXfrAWrQhanVg4EfAHCDkBokhEe3g4/3ox699wfffo8+gwoxOIaHQwDe7A2Dvr+JVh7DQcii4SyLSEArZE7bBIKLjuQ5xGtMVrcgacYephbLLEs/SoCNqv0ocvezJmIXxbRT2QLxG8jQlP8G/ffsz10j17ttIue+1K2Zb/az39lv36HjfZlgtgBdy1JoHNQxMbCNFmpjm33PfiixS82cIz9/Opy6XZB77a7UtPma0vULGExX4Yl33v3TBKDTiCtcZ/ErluX6ikn9tidO2BNzJHfpt/kL7QEURwVEUUhKVm48y49lh7gEDAudDk5xcbMwiNk/S5qFmq2CP51qF02aS1QMaIwTPfcoRW6Hij3MDiNljmtZnAQF3CchxMpCfOoUdVuL6Rt1I+kldVxFng95yA6M3vPnw603hYz7U/zGaZaYrzKc/CMesqsnnpn85Xlqeosx7IDZjfX4cLnNj43CmWgJt71Y292t03bLDgui/3hfME8UlyYiXLg6L7BhlHMTcwjGTUjgtny9gYi4mg27q2Yf8sqX8FSLAj3DN+HCLyqaMxveyqHKTPHvK5Z2bKqS2vaEmvLTPR0oiC9d4q7r4+faqCPOkGvF477V9qrWM7cibNPjZbnU1ep7+Vv/sSp7yzTlrEqt1TK2M4istry3mi7SSVx6I/7QwevOX+GDJad5kDFpTnyzb1zg1JartFptlgqBdTmNEPp0bIOssqG6nXgUCq2lw6EbJsOpBI4KgaghMs26MDBE93TDgQoawEA4du1AIAihbq1wLBHiuFjSyzBweU7Gy0I41CUrTCiYflzBwfrXFYDST1q1K3S8j5c0wqwLcdVVKaE48ksaXsRQoPFKJJsL0/jTRYgcNvBtCo0H82Si+qLqjcL0nBJ9aJR1V+Onh2a6IAAGPkqBKtIg0EgbPMWJ6s567HhMMNhMXwS++E46F3pXlDLaL4cwefie/Mt9b8/0kv+uDOdXIL9Es9AwM1Ih1vzQjayqaLZiR1NtVa6VULPRo4EWcsDbdWBsDahNXqYyw/EU9fG6awrgk5VMSAyxAxbIOaDK0kqqAs5bMMwM6+tpWcZpFqygEpuZMbw/if61aJpFpw1vmiKWitKq7A/s9avBTQnbNFnXTQXwB4NuI3mJOPYZxWF+cIQcwtoTlTW/u9oNJeZ11aay8TfEpoTskqaA8enuWrral+rxszG7dWYB6yW3BVG7Qo3pUAehxRhDKRuQVvcFqcEeIgzILFy2QjZt2mWBEaKHtcJr8lfd++xeP/PJzr6/jx57OOXH2+vD0XXpU8pQhhRDxdZhyhNkj1ZBxPpKdStH0aMCYUHm+hcasWLQ2x5on1LES5rEcqVFhtoXWoV+M9T8oqBKCkAcZsC6ip5tSogu6S8wBXvPKYvozgZRArQ/vDtevRmXaOkhbb+zIcomqTK+DdIkpc0Z6WzWVcO5602r9JlolYj70L951bOrIdSpuQbOY4cQJ9bpmZ804mrmukTuqSZLn1wq16gpXLX8LrTLTO8eiQ8cngjXMw2A0+fq2kWbeiEqMt+VBRsg3R1lJePUOzSdD0pcOhOlxSbcKRPfjlyY2U4cklnHDPVBgkruQ6cHN11QJVFV9dAn9gz+J7vlg9CrxF62+kbAe6pkBtxKBdmYTSgootW01wwAoiOizDdj7+VA+oJkdtQLrZLJ4J7Eh4r7kIHH7xq5wH+lkAM042cSwDf3x/dPDUW5anrxlKtMWUVdC5KkSDNOsodLQysNg+LciYJnQ2y7WakGNjDInd23DAjw9HOyrJ2NSmCqJcVEq+mru2gkh0Oh/KxGxycvWYASdE7wGiv7OurCMtn+jc6iW2BYhkj1MSI8z6Q4iOa8wyMBj/aAZG5jCygdUHR2uxO1uqdF45ZW2oQGQNm3X0K9pqytErWxfAd0/LSABn2oOUkXn78oK6Dtht7uNzkqPLkuJHCZcCX0Lrnju44q7vk3uwNSxCzBmFyZZqVd4u2KsYh63bmimHUI4ZuJGqFbhwqws5bN7orQ2nfA7dCNw6BwpnrhmBPFHWj07wt0M3qbzlqv7V80vRyy7Nab3lmle6hdWbn1HSv0huZQeN+VbS28HLjPeJ+XuZlRnd5QmgraNfh/Ox5K8a8Ew/mwtqWrGnFHOOOyq1SDDcaOmFuv19W04r56aNioZsxGE7kK738mtaNQ/B1YE0ZABQET/bcEby7ua07tuIepLnMnbGnx6x9nGq7lZVVBw5B1knrACHDl+L24LZJobvcMPqkhU7MrRdgX6UbFbqDU3vSQme6gsDoONLknfKsQnfYtzzv5RcL6hWNgQOLUupbeh/++zC5Htz8PuGzX39//Osh/jj7du2QvokH0agzm+4s7gB3xSttd4Dep6o71UzUkmuQD7GRj60NXiUtOGzydsiUnKa8sd5zN9LH9hRlo/J2oJ0TlTdBnthUKnR8rDuEwqcpewaRuamvy6uOjXWHCPdE5W3cG4Iga9jaqLAruxfJpRDSkvjmgHvm7SPk3ml0y2y4PFuFJ6htgPnpNzFX7c8yHUBrP9bGPWCXXt+nSZtGgZi+Vcqx1yiXG/CdprSx0QDE1uSzWVE75DlOU9TMOBNBEbPd2rBZaZ+xtyvNdBKRjXq7hL+D91+ew1//7A/B49f7+86331wyF1uknC23o/miLaHX8adh15soSaWrb0ncmANwe2tZWH0AFuLerIaSzC2aeVUNUJQPl0nsQcsKiq0rKCy7PJUpw4FqzkoZTHkphjKs2bwqNaFexpHuq7n2SrWkPi66YeK3/wM=</diagram></mxfile>
|
2009.11355/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.2 kB). View file
|
|
|
2009.11355/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Knowledge Graphs (KGs) are repositories of information organized as factual triples (h, r, t), where head and tail entities are connected via a particular relation (r). Indeed, KGs have seen wide application in a variety of domains such as question answering [\(Yao and Van Durme,](#page-5-0) [2014;](#page-5-0) [Hao et al.,](#page-5-1) [2017;](#page-5-1) [Moldovan and Rus,](#page-5-2) [2001\)](#page-5-2) and machine reading [\(Weissenborn et al.,](#page-5-3) [2018;](#page-5-3) [Yang and Mitchell,](#page-5-4) [2017\)](#page-5-4) to name a few and have a rich history within the natural language processing (NLP) community [\(Berant et al.,](#page-4-0) [2013;](#page-4-0) [Yu and Dredze,](#page-5-5) [2014;](#page-5-5) [Col](#page-4-1)[lobert and Weston,](#page-4-1) [2008;](#page-4-1) [Peters et al.,](#page-5-6) [2019\)](#page-5-6). While
|
| 4 |
+
|
| 5 |
+
often large, real-world KGs such as FreeBase [\(Bol](#page-4-2)[lacker et al.,](#page-4-2) [2008\)](#page-4-2) and WordNet [\(Miller,](#page-5-7) [1995\)](#page-5-7) are known to be incomplete. Consequently, KG completion via link prediction constitutes a fundamental research topic ameliorating the practice of important NLP tasks [\(Sun et al.,](#page-5-8) [2019;](#page-5-8) [Angeli and](#page-4-3) [Manning,](#page-4-3) [2013\)](#page-4-3).
|
| 6 |
+
|
| 7 |
+
In recent years, there has been a surge of methods employing graph embedding techniques that encode KGs into a lower-dimensional vector space facilitating easier data manipulation [\(Zhang et al.,](#page-5-9) [2019\)](#page-5-9) while being an attractive framework for handling data sparsity and incompleteness [\(Wang et al.,](#page-5-10) [2018\)](#page-5-10). To learn such embeddings, contrastive learning has emerged as the de facto gold standard. Indeed, contrastive learning approaches enjoy significant computational benefits over methods that require computing an exact softmax over a large candidate set, such as over all possible tail entities given a head and relation. Another important consideration is modeling needs, as certain assumptions are best expressed as some score or energy in margin-based or un-normalized probability models [\(Smith and Eisner,](#page-5-11) [2005\)](#page-5-11). For example, modeling entity relations as translations or rotations in a vector space naturally leads to a distance-based score to be minimized for observed entity-relation-entity triplets [\(Bordes et al.,](#page-4-4) [2013\)](#page-4-4).
|
| 8 |
+
|
| 9 |
+
Leveraging contrastive estimation to train KG embedding models involves optimizing the model by pushing up the energy with respect to observed positive triplets while simultaneously pushing down energy on negative triplets. Consequently, the choice of negative sampling distribution plays a crucial role in shaping the energy landscape as simple random sampling—e.g. Noise Contrastive Estimation (NCE) [\(Gutmann and Hyvarinen](#page-5-12) ¨ , [2010\)](#page-5-12) produces negatives that are easily classified and provide little information alongside in the form of a gradient signal. This is easily remedied if the
|
| 10 |
+
|
| 11 |
+
<sup>∗</sup>Equal contribution, names ordered alphabetically.
|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>
|
| 14 |
+
|
| 15 |
+
Figure 1: Our proposed approach for training a graph embedding model. In this illustration, k is set to 2.
|
| 16 |
+
|
| 17 |
+
corruption process selects a hard negative example through more complex negative sampling distribution, such as adversarial samplers [\(Cai and Wang,](#page-4-5) [2018;](#page-4-5) [Bose et al.,](#page-4-6) [2018;](#page-4-6) [Sun et al.,](#page-5-8) [2019\)](#page-5-8). However, adversarial negative sampling methods are computationally expensive, while more tractable approaches—e.g. cache-based methods [\(Zhang](#page-5-9) [et al.,](#page-5-9) [2019\)](#page-5-9)—are not tailored to the KG setting as they fail to incorporate known graph structure as part of the sampling process. This raises the important question of whether we can obtain a computationally inexpensive negative sampling strategy while benefiting from the rich graph structure of KGs.
|
| 18 |
+
|
| 19 |
+
Present Work. In this work, we introduce *Structure Aware Negative Sampling* (SANS), an algorithm that utilizes the graph structure of a KG to find hard negative examples. Specifically, SANS constructs negative samples using a subset of entities restricted to either the head or tail entity's *k*-hop neighborhood. We hypothesize that entities that are within each other's neighborhood but share no direct relation have higher chances of being related to one another and thus are good candidates for negative sampling. We also experiment with a dynamic sampling scheme based on random walks to approximate a node's local neighborhood. Empirically, we find that negative sampling using SANS consistently leads to improvements upon uniform sampling and sophisticated Generative Adversarial Network [\(Goodfellow et al.,](#page-5-13) [2014\)](#page-5-13) (GAN) based approaches at a fraction of the computational cost, and is competitive with other SOTA approaches with no added parameters.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
Results for the Uniform and Self-Adversarial algorithms in Table [1](#page-3-0) and Table [2](#page-3-1) respectively were achieved by re-running the code provided by [\(Sun](#page-5-8) [et al.,](#page-5-8) [2019\)](#page-5-8) using the hyperparameters they reported for the best performance on the validation set of different datasets. Additionally, the results for KBGAN and NSCaching in Table [1](#page-3-0) are the *scratch* results directly taken from [\(Zhang et al.,](#page-5-9) [2019\)](#page-5-9).
|
| 24 |
+
|
| 25 |
+
Table [9](#page-7-1) and Table [10](#page-8-0) report the performance of the graph embedding models fused with our negative sampling techniques on the validation and test sets with respect to the evaluation metrics. Additionally, they list the hyperparameter values corresponding to Uniform/Self-Adv. SANS and Uniform/Self-Adv. RW-SANS that resulted in the best performance on the validation sets. Based on our out-
|
| 26 |
+
|
| 27 |
+
<span id="page-8-0"></span>
|
| 28 |
+
|
| 29 |
+
| | | SANS | k | | H@10 | | MRR | |
|
| 30 |
+
|---------|----------------|------------------|---|------|------------|-------|------------|--------|
|
| 31 |
+
| Dataset | Score Function | Algorithm | | ω | Validation | Test | Validation | Test |
|
| 32 |
+
| | | Uniform | 5 | 4000 | 49.12 | 48.50 | 0.3023 | 0.2981 |
|
| 33 |
+
| | TransE | Self-Adversarial | 4 | 4000 | 50.59 | 50.04 | 0.3129 | 0.3060 |
|
| 34 |
+
| FB15K | | Uniform | 4 | 3000 | 41.66 | 41.46 | 0.2628 | 0.2621 |
|
| 35 |
+
| 237 | DistMult | Self-Adversarial | 5 | 3000 | 48.67 | 48.17 | 0.3142 | 0.3071 |
|
| 36 |
+
| | | Uniform | 2 | 4000 | 49.05 | 48.47 | 0.3034 | 0.3003 |
|
| 37 |
+
| | RotatE | Self-Adversarial | 2 | 4000 | 51.41 | 51.07 | 0.3205 | 0.3161 |
|
| 38 |
+
| | | Uniform | 2 | 1000 | 95.23 | 95.22 | 0.8194 | 0.8195 |
|
| 39 |
+
| | TransE | Self-Adversarial | 3 | 4000 | 88.65 | 88.51 | 0.7480 | 0.7429 |
|
| 40 |
+
| | | Uniform | 2 | 1000 | 89.38 | 89.80 | 0.6205 | 0.6235 |
|
| 41 |
+
| WN18 | DistMult | Self-Adversarial | 2 | 1000 | 90.55 | 91.08 | 0.6601 | 0.6634 |
|
| 42 |
+
| | | Uniform | 2 | 3000 | 95.92 | 96.07 | 0.9492 | 0.9489 |
|
| 43 |
+
| | RotatE | Self-Adversarial | 2 | 4500 | 95.83 | 96.09 | 0.9493 | 0.9496 |
|
| 44 |
+
| | | Uniform | 2 | 1000 | 52.67 | 53.41 | 0.2282 | 0.2317 |
|
| 45 |
+
| | TransE | Self-Adversarial | 5 | 2000 | 53.05 | 53.81 | 0.2229 | 0.2273 |
|
| 46 |
+
| | | Uniform | 2 | 3000 | 49.01 | 49.09 | 0.4111 | 0.4071 |
|
| 47 |
+
| WN18RR | DistMult | Self-Adversarial | 4 | 1000 | 43.70 | 42.74 | 0.3883 | 0.3836 |
|
| 48 |
+
| | | Uniform | 2 | 1000 | 57.20 | 57.12 | 0.4860 | 0.4796 |
|
| 49 |
+
| | RotatE | Self-Adversarial | 2 | 1000 | 57.09 | 56.94 | 0.4882 | 0.4805 |
|
| 50 |
+
|
| 51 |
+
Table 10: The hyperparameter values associated with the best performance on the validation sets, used for obtaining the test results. The different variations of SANS in this table approximate the *k*-hop adjacency tensor by random walks (RW-SANS) using Alg. [1.](#page-2-1)
|
| 52 |
+
|
| 53 |
+
<span id="page-8-1"></span>
|
| 54 |
+
|
| 55 |
+
| Negative Sampling | Preprocessing | Runtime | Space |
|
| 56 |
+
|--------------------------------|-----------------------------|-----------------------------|---------------------|
|
| 57 |
+
| Algorithm | Complexity | Complexity | Complexity |
|
| 58 |
+
| Uniform (Bordes et al., 2013) | O(1) | O(bn) | O(1) |
|
| 59 |
+
| KBGAN (Cai and Wang, 2018) | O(t) | O(bn<br>+<br>bd<br>+<br>bt) | O(t) |
|
| 60 |
+
| NSCaching (Zhang et al., 2019) | O(1) | O(bn<br>+<br>be) | O(c R V<br> ) |
|
| 61 |
+
| Self-Adv. (Sun et al., 2019) | O( E ) | O(bn<br>+<br>bd) | O( E ) |
|
| 62 |
+
| Uniform SANS (ours) | 3<br>O( V<br> <br>log<br>k) | O(bn) | 2<br>O( V<br> <br>) |
|
| 63 |
+
| Self-Adv. SANS (ours) | 3<br>O( V<br> <br>log<br>k) | O(bn<br>+<br>bd) | 2<br>O( V<br> <br>) |
|
| 64 |
+
| Uniform RW-SANS (ours) | O(rk V<br> ) | O(bn) | O(r V<br> ) |
|
| 65 |
+
| Self-Adv. RW-SANS (ours) | O(rk V<br> ) | O(bn<br>+<br>bd) | O(r V<br> ) |
|
| 66 |
+
|
| 67 |
+
Table 11: Comparison of different negative sampling algorithms in terms of preprocessing, runtime, and space complexities given batch size b, negative sample size n, cache size c, cache extension size e, node set V , edge set E, relation set R, embedding dimension d, hops count k, random walks count r, and GAN parameters count t.
|
| 68 |
+
|
| 69 |
+
comes, we hypothesize that the usage of random walks in approximating the *k*-hop neighborhood implicitly results in the removal of nodes with the least number of walks to the center node—i.e. outlier nodes.
|
| 70 |
+
|
| 71 |
+
Table [11](#page-8-1) is representative of the time and space complexities of different negative sampling approaches including SANS.
|
2010.05315/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-05-31T23:54:55.185Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36" etag="B06X0UauHcLycnW-z_n_" version="13.1.3" type="device"><diagram id="ImcN4yuYkwht8uK8pNuU" name="Page-1">7VtLc9owEP41HNOxJEuYYxuSZqbpNDOZSdJTRrEFdmsQNSJAf33l2PITiJ0YiUIujHe1fmh3v33Z9ND5ZPU1ojP/O/dY2IOWt+qhYQ/CgWPL35ixThjYxgljHAVewgI54zb4y1KmlXIXgcfmJUHBeSiCWZnp8umUuaLEo1HEl2WxEQ/Ld53RMasxbl0a1rn3gSf8hOvAfs6/YsHYV3cGZJCsTKgSTncy96nHlwUWuuih84hzkRxNVucsjHWn9JKcd7llNXuwiE1FkxPuhk+Pj1d/+tY3MqJndz+u8c39mdrHXKzVjpknFZCSPBI+H/MpDS9y7peIL6Yeiy8LJJXLXHM+S5m/mBDr1Jp0Ibhk+WISpqvyiaP1gyQsRfyMiU9YkcNVcXG4zijvc2xTSU75lCWcyyAM0/WRPDznIY9edoI8ypyRK/lzEfHfrLBCXIc9jeRKsvt4y1u1qryORmMmdqhykNlUYoHxCZNPLs+LWEhF8Fy+Pk29cpzJZafe8EDeGVopgIBynxQ/xCpfYc4XkcvSk3Lry4PCU+SsF59o4x/J3Z5puCjvoOgwYSjRGFtj6QeC3c6oG68sZTwom53OZwlER8Eqdp832+uZRYKtdlusbgl1AihrFKfkMoe30rlfQDa0tpuupPW2Kka2EQiuAvFQOC4AUFI5/mKiKfzeCut9wjbBxy71N4Q3sDZ71TvxDQEueSOo+lnyYHsDOKoBHB4bwDPaFMKzCsAIwq1WCE8RDNoh+LXIULZ3KtIhiAcNQQy7wLDcKV0XBGYxNuc7UrhT8Ue7UqhV5dFOeXmQPEG3cQCfuo8azUJNHRiZcGBolXMU2e2/VXEANfjvQGehOnJc5m70jSdHNrhWN3kMwsMqVKHzESIMhghVxRxmkqs6K7BeSXIYv0ueaIgpSuN6imMzQcV4cQwsM2FFzwiqsVE7GUEBYHeB/Rr4ECk7jeYhFLCOMLsjfFjZPQsEH3MoM+kdNEc52QvK7WpK1jyKyjRwTNPmKszN51uwoYY5rTJ+39OoNA03Gip38taobbFuV4vAVyZSqG9gIgUAOnVHNZyQGnsx2BID9+zFdquxVFVcy1gqU+Ixla42ObTSlWgtHDBzPHuTkh34hAjZj5IPoHA45m9FNDfqsJNBfi3GYdtsow6cGhD/+4kZRocW7QYm66KPRl1ZtgHK8V5QTpDhRl3d75i+GqnC3Hy+3VTDnFb/s/dGHTb++ivJbLpbHGK1a9RxddqkpVHfBP7TclTDCamxFxc+PNfpxbBVo14V19OoQ61fQuopXYl9YKWrsqymwkFLo15VsvnCwa6/R6kruRbRPDr3s3hc0HJFqQxItfY3KXVA+oiSFrGtrtKCyvAGlSnee19nVz8Qxs3a5Fcv1FkdLsn8j0GJeP7vKnTxDw==</diagram></mxfile>
|
2010.05315/main_diagram/main_diagram.pdf
ADDED
|
Binary file (11.4 kB). View file
|
|
|
2010.05315/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Attention layers enable long-range representation learning and are becoming indispensable in architectures for both Image Synthesis [@biggan; @sagan; @daras2019local] and Natural Language Processing [@albert; @yang2019xlnet; @devlin2018bert; @dai2019transformer; @t5; @liu2019roberta]. Attention finds further uses in other domains like symbolic mathematics and music modeling as well [@MATHTRANSFORMER; @musictransformer; @child2019generating]. Unfortunately, attention layers have high computational and memory cost which scales quadratically in the size of the input sequence. This constraint is so onerous that the canonical implementation of attention for image synthesis - Self-Attention GAN [@sagan] - could only afford to use one self-attention layer. For NLP, modern transformer-based models can only be trained in large industry research labs with massive infrastructure investments. For instance, the recently published GPT-3 [@GPT3] model uses $96$ attention layers trained on input sequences of $2048$ tokens. When fine-tuning pre-trained attention models, NLP researchers usually truncate input sentences, limiting performance on datasets with longer inputs.
|
| 4 |
+
|
| 5 |
+
Recent research [@show_attend_and_tell; @daras2019local] indicates that dense attention is statistically and computationally inefficient [@Voita_2019; @michel2019sixteen; @daras2019local]: it does not account for the locality inherent in many tasks. Alternatives have been proposed that are either more efficient [@child2019generating; @adaptively_sparse_transformers; @reformer; @routing_transformer; @sinkhorn; @dai2019transformer; @lample2019large; @star_transformer] or that better accommodate locality [@localattn; @daras2019local]. Most such alternatives have been sparse. Sparsity can be achieved by limiting attention to pre-defined positions [@localattn; @daras2019local; @star_transformer; @child2019generating]. Recent work [@adaptively_sparse_transformers; @reformer; @routing_transformer; @sinkhorn] proposes data-driven sparsity, which allows for discovery of arbitrarily complex dependencies between input positions.
|
| 6 |
+
|
| 7 |
+
Despite this progress, new state-of-the-art models [@t5; @GPT3; @liu2019roberta; @clark2020electra; @GLUE; @superglue] still use the original dense attention layers. There are three reasons for this: (i) alternative fast-attention mechanisms degrade the performance of the underlying model. For example, replacing dense attention layers in Transformers with memory efficient local attention [@localattn] increases perplexity from $41.57$ to $44.23$ [@sinkhorn]. (ii) some mechanisms work well, but make very strict assumptions. For example, in Star Transformer [@star_transformer] all nodes attend to a relay node which summarizes the content of the entire input sequence, but this prevents the use of causal masking, so it can only be used for encoding. (iii) some alternatives are only efficient in theory. For example, in some variants [@adaptively_sparse_transformers; @Malaviya_2018] sparsification of the attention map happens after instantiating the matrix, and so quadratic memory is still used before instantiation. Finally, [@child2019generating; @beltagy2020longformer] require highly specialized GPU-kernels and which prevents usage in several hardware settings (e.g. TPUs). The design of fast and efficient attention layers remains a challenge.
|
| 8 |
+
|
| 9 |
+
**Our Contributions:**\
|
| 10 |
+
**1)** We propose a novel type of balanced clustering to approximate attention. We call the underlying optimization problem Attention Biclustering and prove that finding an exact solution is computationally intractable.\
|
| 11 |
+
**2)** We propose an algorithm for solving Attention Biclustering efficiently in practice. Our algorithm, SMYRF, uses Locality Sensitive Hashing (LSH) in a novel way by defining new Asymmetric transformations and an adaptive scheme that produces balanced clusters.\
|
| 12 |
+
**3)** Our method, SMYRF, can handle different query and key vectors, just like normal dense attention. As a result, SMYRF layers are drop-in replacements for pre-trained models, unlike previously proposed fast-attention mechanisms such as Sinkhorn [@sinkhorn], Reformer [@reformer] and Routing Transformer [@routing_transformer].\
|
| 13 |
+
**4)** We show through numerous experiments that SMYRF attention layers are very effective in terms of performance, memory and speed, even without any training. We measure the memory-performance trade-off of applying SMYRF to state-of-the-art NLP and Computer Vision models, across more than a dozen tasks. For example, we are able to shrink the memory requirements of a pre-trained BigGAN [@biggan] by $50\%$ while maintaining $98.2\%$ of its Inception score without re-training.\
|
| 14 |
+
**5)** We finetune SMYRF on GLUE [@GLUE] starting from a BERT (base) checkpoint. We demonstrate that SMYRF-BERT outperforms BERT while using $50\%$ less memory. We also show that with $75\%$ less memory, SMYRF maintains $99\%$ of BERT performance on GLUE. Due to SMYRF's portability, we are also able to conduct experiments for various memory configurations with pre-trained BERT and RoBERTa [@liu2019roberta] models on IMDB. We show slight performance drops for great memory benefits.\
|
| 15 |
+
**6)** We show that SMYRF can be interchanged with dense layers *before* and *after* training. We report performance gains by using SMYRF in a back-and-forth manner: we replace dense with SMYRF during training (to earn in memory) and we replace SMYRF with dense attention during inference (to earn in performance). The interchangeability of SMYRF with dense attention is unique, as it has not been observed in previously proposed attention alternatives [@reformer; @routing_transformer; @sinkhorn; @beltagy2020longformer; @daras2019local].\
|
| 16 |
+
**7)** We are able to scale the resolution of attention for GANs, due to our reduced memory footprint. We train a BigGAN with an $128 \times 128$ SMYRF attention layer and show it outperforms the dense attention performance, decreasing FID from $26.06$ to $25.03$ in Celeba-HQ-128 [@celeba]. Finally, we successfully train a BigGAN with attention at resolution $256\times 256$ on a single v3-8 TPU.\
|
| 17 |
+
**8)** We open-source our code and pre-trained models to encourage more related research: <https://github.com/giannisdaras/smyrf>.
|
| 18 |
+
|
| 19 |
+
Attention [@vaswani2017attention] works by computing inner products of query and key vectors. Depending on the application, these vectors may represent embeddings for tokens or image pixels. Input of each attention layer is three sets: $\mathcal Q, \mathcal K,
|
| 20 |
+
\mathcal V$ for query, key and value vectors respectively. Attention of $q$ to the keys set $\mathcal K$ outputs a new vector $o_q$ , which is a weighted sum of value vectors $v_i \in \mathcal V$ where each weight $w_i$ increases with the inner product $q \cdot k_i$. Specifically, the output is computed as: $$\begin{equation}
|
| 21 |
+
o_q = \sum_{i=1}^{N} w_i v_i, \qquad w_i = \frac{e^{q \cdot k_i}}{\sum_{j=1}^{N} e^{q \cdot k_j}}.
|
| 22 |
+
\label{dense}
|
| 23 |
+
\end{equation}$$ Here, we assumed for notational simplicity that $N = |\mathcal Q| = |\mathcal K|$. Using matrix notation, attention is equivalently defined as $\sigma(Q \cdot K^T) \cdot V$ where $Q, K, V$ are matrices with rows the embeddings for each query, key, value and the function $\sigma(.)$ computes the row-wise softmax.
|
| 24 |
+
|
| 25 |
+
Our method is motivated by the observation that attention matrices have interesting structure in real datasets. Naively, to compute dense attention, as equation [\[dense\]](#dense){reference-type="ref" reference="dense"} shows, we need to compute all outputs $o_{q_i}$, i.e. $O(|\mathcal Q| \cdot |\mathcal K|)$, a quadratic number of inner products $q_i \cdot k_j, \ q_i \in \mathcal Q, \ k_j \in \mathcal K$. However, we observe that in most real networks, the attention weights $w_i$ are sparse, because of the softmax operation and the structure of the vectors. For example we observe that in a pre-trained BigGAN on ImageNet, on average $\bm {98.11 \pm 0.26 \%}$[^1] of keys get weight less than $0.01$ in softmax and $\bm {86.11 \pm 2.92 \%}$ of them get less than $\frac{1}{|\mathcal K|}$, where $\mathcal K$ is the number of keys.
|
| 26 |
+
|
| 27 |
+
Further, we observe that the attention matrix is near low-rank, even after the softmax. By definition, the matrix $Q \cdot K^T$ is going to be of rank at most the dimension of the query and key vectors. Therefore, if the embeddings dimension is smaller than the input sequence, the attention matrix is low-rank. This is more pronounced for images and long-context language models. However, one can easily construct cases of low-rank matrices which become full rank after softmax. Our finding is that this does not happen in practice. In the Appendix we show that *real attention matrices of pretrained models have a sharp decay in their singular values and hence can be well approximated by low-rank matrices*.
|
| 28 |
+
|
| 29 |
+
SMYRF benefits from sparsity and low-rank structure of attention matrices. By clustering keys and queries into groups, we obtain block-diagonal structure in the approximate attention matrix, since only query-key pairs within the same cluster are computed. We show that this method leads to accurate approximations of dense attention and it can be computed much faster and with much less memory.
|
| 30 |
+
|
| 31 |
+
# Method
|
| 32 |
+
|
| 33 |
+
We formulate the assignment of keys and queries into clusters as an optimization problem. Denote with $P_{ij} = q_i^T k_j$ the element $(i, j)$ of the product matrix $P = Q \cdot K^T$ and the attention map with $M = \sigma(Q \cdot K^T)$. We will assign query and key vectors into $L$ clusters $c_1, c_2, ..., c_L$ and compute attention only within each cluster. For fast execution on TPUs/GPUs, all partial attentions should be computed in parallel. For this reason, we require that clusters are balanced: i.e. all clusters contain the same number of keys and queries. We note that the number of keys in each cluster does not have to be equal to the number of queries. Formally, each cluster contains $\frac{|\mathcal Q|}{L}$ queries and $\frac{|\mathcal K|}{L}$ keys.
|
| 34 |
+
|
| 35 |
+
We denote with $\mathcal C^L$ the set of all possible assignments in $L$ balanced non-overlapping clusters. A specific assignment is denoted by $\mathcal C^L_t$ and there are $T$ possible such assignments, where $T$ is exponentially large in the number of keys and queries. $$\mathcal C^L = \{\mathcal C_1^L, C_2^L, ... \mathcal C_T^L\}.$$ $$\begin{equation}
|
| 36 |
+
\mathcal C_t^L=\{c_1, c_2, ..., c_L\}: \quad
|
| 37 |
+
\begin{cases}
|
| 38 |
+
c_i = \{q_1, ..., q_{\frac{|\mathcal Q|}{L}}, k_1, ..., k_\frac{|\mathcal K|}{L}\}, \quad c_i \subseteq \mathcal Q \cup \mathcal K, \ \ \forall i\in \{1, ..., L\} \\
|
| 39 |
+
c_x \cap c_y = \varnothing \quad \forall c_x, c_y \in \mathcal C_t^L.
|
| 40 |
+
\end{cases}
|
| 41 |
+
\label{balanced}
|
| 42 |
+
\end{equation}$$
|
| 43 |
+
|
| 44 |
+
We emphasize that every key and query is assigned in a unique cluster for any valid assignment $\mathcal C^L_t$: $c_x \cap c_y = \varnothing \quad \forall c_x, c_y \in \mathcal C_t^L.$ We also define a masking operator $\textrm{Mask}_\epsilon$ that takes as input: (i) a clustering $\mathcal C_t^L \in \mathcal C^L$ and (ii) the product matrix $P$ and replaces $(q, k)$ pairs that are not in the same cluster with $-a$, where $a \in \mathbb R^{+}$ is a constant chosen to satisfy $e^{-a} = \epsilon$ for a given $\epsilon \geq 0$. Formally: $$\textrm{Mask}_\epsilon(\mathcal C_t^L, P_{ij}) = \begin{cases}
|
| 45 |
+
P_{ij} \quad \textrm{iff } \exists t: (i, j) \in c_t, \\
|
| 46 |
+
-a, \quad \textrm{o/w}.
|
| 47 |
+
\end{cases}$$
|
| 48 |
+
|
| 49 |
+
Intuitively, the masking operator replaces inner products of queries and keys that are not in the same cluster with an arbitrarily small number, so that the softmax will assign a score arbitrarily close to zero to these entries. We denote with $\hat P_\epsilon=\textrm{Mask}_\epsilon(\mathcal C_t^L, P)$ the product matrix after the masking. With this notation, $\hat P_0 = \textrm{Mask}_{0}( \mathcal C_t^L, P)$, is the product matrix for the within-clusters attention.
|
| 50 |
+
|
| 51 |
+
**Attention Biclustering:** Under this formulation, we are searching for the cluster assignment $\mathcal{C}^L_t$ that approximates the dense attention matrix $\sigma(P)$ as well as possible, in Frobenius norm: $$\begin{equation}
|
| 52 |
+
\min_{\mathcal C_t^L \in \mathcal C^L} || \sigma(\hat P_0)- \sigma(P)||_F.
|
| 53 |
+
\label{min_problem}
|
| 54 |
+
\end{equation}$$ Note that $L$ must divide the number of queries and keys for this problem to be well-defined.
|
| 55 |
+
|
| 56 |
+
We start by showing that Attention Biclustering, the optimization problem defined in ([\[min_problem\]](#min_problem){reference-type="ref" reference="min_problem"}), is provably computationally intractable.
|
| 57 |
+
|
| 58 |
+
::: {#main_theorem .theorem}
|
| 59 |
+
**Theorem 1**. *Attention Biclustering ([\[min_problem\]](#min_problem){reference-type="ref" reference="min_problem"}) is NP-hard.*
|
| 60 |
+
:::
|
| 61 |
+
|
| 62 |
+
We defer the proof of this theorem to the Appendix. Our proof proceeds by first establishing hardness before the softmax, using a reduction from three dimensional matching [@gary]. We then leverage this to establish hardness of approximating attention through clustering after the softmax operation.
|
| 63 |
+
|
| 64 |
+
We consider it interesting to establish the computational intractability of Attention Biclustering, since this clustering formulation is quite unique due to the softmax operation. Our hardness result rules out an exact polynomial solution, unless P=NP. We propose an efficient algorithm that leverages hashing to assign queries and keys to clusters. Formally proving an approximation guarantee or provable inapproximability for the attention approximation problem we proposed remains open.
|
| 65 |
+
|
| 66 |
+
Our algorithm consists of the following steps:\
|
| 67 |
+
**1)** We first propose novel asymmetric transformations $F, G: \mathbb R^{d} \to \mathbb R^{d'}$ such that for all given queries $q_1, q_2 \in \mathcal Q$ and keys $k \in \mathcal K$: $q_1 \cdot k \leq q_2 \cdot k \iff ||F(q_1) - G(k)||_2 \leq ||F(q_2) - G(k)||_2$.\
|
| 68 |
+
**2)** We then use a Locality Sensitive Hashing (LSH) function $h:\mathbb R^{d'}\to \mathbb R$ to map transformed vectors in real numbers, so that that vectors that are close in Euclidean distance correspond to numbers that are close on the real line.\
|
| 69 |
+
**3)** We sort vectors based on their LSH value and group them by adapting the thresholds to ensure $L$ balanced clusters.\
|
| 70 |
+
**4)** We perform dense attention within each cluster.
|
| 71 |
+
|
| 72 |
+
Our approximate attention algorithm relies on a few technical innovations:
|
| 73 |
+
|
| 74 |
+
**Novel Asymmetric Transformations:** We need an efficient way to find, for any given query vector $q_i \in \mathcal Q$ the set of keys with which it has big inner products. This problem, called Maximum Inner Product Search (MIPS), can be efficiently solved by transforming query and key vectors to convert it to a Nearest Neighbor Search (NNS) as proposed in the pioneering Asymmetric LSH (Locality Sensitive Hashing) work by Shrivastava et al. [@l2lsh].
|
| 75 |
+
|
| 76 |
+
We are looking for functions $F: \mathbb R^{d}\to \mathbb R^{d'}, G: \mathbb R^d \to \mathbb R^{d'}$ such as: $||F(q) - G(k)||_2^2 = D(q\cdot k), \ \forall (q, k)$ where $D:\mathbb R \to \mathbb R$ a decreasing function that depends only on the inner product $q\cdot k$. We constrain our focus on functions $D$ that decrease linearly with the inner product $q\cdot k$. Several previous works have proposed Asymmetric LSH transformations [@l2lsh; @xbox; @h2lsh] but focus on the case where we have a *single query* $q$ and multiple keys. In that case, any norm $||q||_a$ where $a=\{1, ..., \infty\}$ is constant and thus $D = D(q\cdot k, ||q||_a)$.
|
| 77 |
+
|
| 78 |
+
Our central algorithmic contribution is the proposal of novel asymmetric functions: $$\begin{equation}
|
| 79 |
+
F(q_i) = \left[q_i; 0; \sqrt{M_Q^2 + M_K^2 - ||q_i||_2^2} \right], \qquad G(k_i) = \left[k_i; \sqrt{M_Q^2 + M_K^2 - ||k_i||_2^2}; 0\right]
|
| 80 |
+
\end{equation}$$ where we use the constants $M_Q = \max_{q_i}||q_i||_2, \quad M_K = \max_{k_i}||k_i||_2$, or any other upper bound on the norms. With this transformation, all queries and keys are mapped to a $(d+2)$-dimensional ball with radius $\sqrt{M_Q^2 + M_K^2}$ and the distance of the transformed vectors decreases linearly with the inner product of the original vectors: $$\begin{equation}
|
| 81 |
+
||F(q_i) - G(k_i)||_2^2 = 2 \cdot \left( M_Q^2 + M_K^2 - q_i\cdot k_i\right).
|
| 82 |
+
\end{equation}$$ Note that the Euclidean distance of the transformed vectors depends only on the inner product of the original vectors and not on individual norms $||q_i||_2$ as in previous work [@e2lsh; @h2lsh; @xbox]. We include details of comparison to the numerous prior asymmetric transformations in the Appendix.
|
| 83 |
+
|
| 84 |
+
**Adaptive Clustering:** The final step of SMYRF is to use the hashed values to create *balanced* clusters. These are created by forming balanced hash buckets where every group is assigned the same number of query and key vectors. We modify the E2LSH [@e2lsh] hashes to create balanced clusters as follows: Instead of rounding the E2LSH to an integer value as in [@e2lsh], we adaptively set the boundaries of the $1$-d hashed space to ensure the same number of query and key vectors per interval. Computationally wise, this only requires sorting the hashes. We explain the mathematical details of our adaptive clustering scheme and the differences with E2LSH in the Appendix.
|
| 85 |
+
|
| 86 |
+
**Computational Complexity and speedups:** For notational simplicity we assume $|\mathcal Q| = |\mathcal K| = N$. The total time and memory complexity of SMYRF is $O\left(H \cdot N \cdot \log N + H \cdot \frac{N^2}{L}\right)$, where: $H$ denotes hashing rounds, $N$ number of query/key vectors and $L$ number of clusters. For most of our experiments we choose $L = O(N), \ H=O(1)$, and thus complexity is $O(N\log N)$. Even though we obtain optimal complexity for $L=O(N), \ H=O(1)$, both $L, H$ are parameters that can be tuned to satisfy the desired memory-performance trade-off. Regarding speed, SMYRF accelerates a lot attention as sequence length increases. For example, for sequence length 2048, SMYRF-BERT offers $\approx 20\%$ speedup, while for $4096$ speedup increases to $\approx 50\%$. We include detailed speed plots for applying SMYRF to BERT in the Appendix.
|
2011.00105/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f959f829af71a0bbe5bf14bf6ff28141f701ab738f86b09d59f538bf8c515155
|
| 3 |
+
size 498061
|
2011.07387/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2020-08-14T07:25:56.063Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/12.3.2 Chrome/78.0.3904.113 Electron/7.1.2 Safari/537.36" version="12.3.2" etag="fL38GVE6kpAek1q5Q745" type="device" pages="1"><diagram id="Ud1qqpHv8_ff6WkxOnXp">7Vpfd5owHP00PnYHiCA+VtttD93pznrO9hwhAmskLISq+/RLTAJEUGy1ap28iJdf/t3fvUkM9sB4tvhCYRZ/IyHCPccKFz1w13P4NQD8QyBLidiu70skokmosAp4Sv4iBVoKLZIQ5UYgIwSzJDPBgKQpCpiBQUrJ3AybEmy2msEINYCnAOIm+isJWSxR37Uq/CtKoli3bFvqyQzqYAXkMQzJvAaB+x4YU0KYvJstxggL9jQvstznDU/LjlGUsl0KOLLAC8SFGpvqF1vqwUaUFJkKQ5ShRRvFcKLDrWYX7HJgXBKIzBCjSx6iK9K0KTW4uop5jVpfYXGNVqevQKjSGZV1VyPmN2rQ7QSAFgI8zFsYTfhNJG40MCV8QGLEmNBVqPenIDIAWKurDtUo1KCo4CZfqfmWB9h+tqiXkK2NSfpTt8g7Lxs1O8LhWufW0sUVlYnboJjwj9E8Thh6ymAgsDn3I8diNuN03NlikKRIQxQ+TEoABs8i42n4WDCcpEjhIaTPj7yahIk0WZ8s1wSdFSoipwnG45IjMOUXtzcY5YySZ1R7AjwwBGH5RDvJLkfVIre6rJytsvJcQ1XDpqgGLZoqxbiPpvrbNNWii1WObnKZJKGMlNAZxE1tHFeJPxAuLkmJQYDc6fT4SnQs62RSdLvndxTytU19JZTFJCIpxPcVOqIyNYJX/q2KeSAkUzT9Rowt1UINC0bM3Jq0OiWtountpPKekoIGyJisGaQRYjWvNamnCEOWvJi170Okt9HTrzOj6R/u+fTtBm2sVHztCCArYbrdriUme9GATcOfubmn04Bfxzc3AJ3rzPC9zD24KHP3m+b2jmRu/7oJvMxNYH9wuqV3eFHu9Jru9I/kTv2r/7qfPi9Tn2o/7Vmn+2ln2xflar/pattqJ//wtm47e7puqc/H36faUg+c022p7bbzwI/rb710GgbfwP7hDd52DvaRuNyDI1X0O0lWM5l+0eH1zSIyVyqqYrYq2H6Y2ahH5rdRzypDZfd3S9pHPzE6XNIU977dQf6uSSxfYhwhi23HVetZTMNb8S5MrDgY5nkSmCngz1V6uJEPm5HaTO62zOQa2zNxYD1v7tvy5gw7KtqQN84uXNbCMhGQ79zf9Td3Hb16Zbh+JVlpTHb3zYrb5TDqqrjdFDew1meKd1Vcl2Y2PXe97ZobHFpjbed0/53G7A5tnJ3I1jrcMVGtd8vZS0P8a/VnChle/ScF3P8D</diagram></mxfile>
|
2011.07387/main_diagram/main_diagram.pdf
ADDED
|
Binary file (8.52 kB). View file
|
|
|
2011.07387/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In this paper, we describe a method for privacy-preserving visual human-robot interaction (HRI). Our approach rests on effective human pose inference from a user-degraded camera. This would allow a user to cover the robot's camera with a translucent film but still interact with the robot using nonverbal behaviors.
|
| 4 |
+
|
| 5 |
+
Visual observation of humans plays a vital role in understanding humans' states and intentions for HRI [\[12\]](#page-6-0). However, being monitored by cameras, especially at home, leads to privacy concerns [\[3\]](#page-6-1). Prior work on protecting humans' privacy in captured images focuses on using depth cameras or specially designed low-resolution cameras [\[23\]](#page-6-2), [\[32\]](#page-6-3). However, these methods require specialized equipment. In contrast, we propose to improve pose inference in images captured from a standard RGB camera which has been covered by a translucent film. The main contribution of this work is an image enhancement method that maintains privacy but enables robust pose estimation.
|
| 6 |
+
|
| 7 |
+
Pose estimation on clear images has been studied extensively [\[14\]](#page-6-4), [\[29\]](#page-6-5), but conducting pose estimation directly on degraded images is challenging. Researchers have proposed a number of image enhancement methods to facilitate pose estimation for degraded images [\[17\]](#page-6-6), [\[19\]](#page-6-7). However, these methods are generally based on a specific image formation model, such as low light or haze. We found that the privacypreserving images in our case do not comply with existing models. Therefore, the existing models cannot solve the pose estimation problems for our purpose (see Section [VI-D](#page-5-0) for a detailed evaluation).
|
| 8 |
+
|
| 9 |
+
Our method is building on the recommendation put forth in Hu *et al.* [\[15\]](#page-6-8), which proposed privacy-preserving HRI with cameras covered up by filtering materials. To detect
|
| 10 |
+
|
| 11 |
+
<span id="page-0-0"></span>
|
| 12 |
+
|
| 13 |
+
Fig. 1: Left to right: the pose estimation results on the ground-truth clear image, the corresponding shadow image and enhanced image output by our enhancement module.
|
| 14 |
+
|
| 15 |
+
humans' poses from the degraded images, we propose a neural network based architecture for image enhancement. The proposed network is trained to produce an enhanced version of the input image. Afterwards, the enhanced image is passed into *OpenPose* [\[4\]](#page-6-9) to obtain the final estimated poses (Figure [1\)](#page-0-0). Experimental results indicate that our system can obtain human pose information effectively.
|
| 16 |
+
|
| 17 |
+
The paper thus makes the following contributions:
|
| 18 |
+
|
| 19 |
+
- A design for camera-based human-robot interaction, which can protect humans' privacy while obtaining their pose information effectively;
|
| 20 |
+
- A neural network architecture designed for image enhancement specifically aimed at pose estimation from translucent-film-filtered images[1](#page-0-1) ;
|
| 21 |
+
- An extensive quantitative evaluation of the proposed system providing insights on the effects of distance, background clutter, and film thickness.
|
| 22 |
+
|
| 23 |
+
# Method
|
| 24 |
+
|
| 25 |
+
There is no existing method proposed for boosting pose estimation's performances on the type of shadow images we are interested in this work. That said, given the similarity of shadow images to hazy and low-light images in feature space, we evaluate the performances of two state-of-the-art methods focused on enhancing hazy and low-light images. Specifically, we choose AODNet [\[17\]](#page-6-6), which focuses on hazy image enhancement, and MBLLEN [\[22\]](#page-6-20), which focuses on low-light image enhancement for comparison study. Then we use the same paired images as the ablation study. Finally, we evaluate the pose estimation results (*i.e.*, DR, and SmAP scores) of the enhanced images generated from the two stateof-the-art methods using *OpenPose*.
|
| 26 |
+
|
| 27 |
+
Table [III](#page-5-2) shows the performance of our method (ie) for i layers of filters, i ∈ {1, 2, 3}. i<sup>m</sup> indicated the output image that MBLLEN produces and i<sup>a</sup> the output image that AODNet obtains. We can conclude that our proposed network boosts pose estimation on collected shadow images more effectively than AODNet and MBLLEN.
|
| 28 |
+
|
| 29 |
+
<span id="page-5-2"></span>
|
| 30 |
+
|
| 31 |
+
| LayerMethods/Metric | 2e | 2m | 2a | 3e | 3m | 3a |
|
| 32 |
+
|---------------------|-------|-------|-------|-------|-------|-------|
|
| 33 |
+
| DR | 0.959 | 0.605 | 0.730 | 0.954 | 0.531 | 0.582 |
|
| 34 |
+
| SmAP | 0.901 | 0.069 | 0.049 | 0.887 | 0.044 | 0.062 |
|
| 35 |
+
|
| 36 |
+
TABLE III: The evaluation table which shows the performances of AODNet and MBLLEN on the selected shadow image dataset.
|
2012.07489/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:99b74d6d6b72506ee2390ebdf2cf9622327e71dd1eca4033314f1c82e26a2828
|
| 3 |
+
size 5835384
|
2101.08779/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2101.08779/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The ability to dance by composing movement patterns that align to musical beats is a fundamental aspect of human behavior. Dancing is an universal language found in all cultures [\[50\]](#page-9-0), and today, many people express themselves through dance on contemporary online media platforms. The most watched videos on YouTube are dance-centric music videos such as "Baby Shark Dance", and "Gangnam Style" [\[75\]](#page-10-0), making dance a more and more powerful tool to spread messages across the internet. However, dancing is a form of art that requires practice—even for humans, professional training is required to equip a dancer with a rich repertoire of dance motions to create an expressive choreography. Computationally, this is even more challenging as the task requires the ability to generate a continuous motion with high kinematic complexity that captures the non-linear relationship with the accompanying music.
|
| 4 |
+
|
| 5 |
+
In this work, we address these challenges by presenting a novel Full Attention Cross-modal Transformer (FACT) network, which can robustly generate realistic 3D dance motion from music, along with a large-scale multi-modal 3D dance motion dataset, AIST++, to train such a model. Specifically, given a piece of music and a short (2 seconds) seed motion, our model is able to generate a long sequence of realistic 3D dance motions. Our model effectively learns the music-motion correlation and can generate dance se-
|
| 6 |
+
|
| 7 |
+
<sup>∗</sup> equal contribution. Work performed while Ruilong was an intern at Google.
|
| 8 |
+
|
| 9 |
+
<span id="page-1-1"></span><span id="page-1-0"></span>
|
| 10 |
+
|
| 11 |
+
Figure 2: Cross-Modal Music Conditioned 3D Motion Generation Overview. Our proposed a Full-Attention Crossmodal Transformer (FACT) network (details in Figure [3\)](#page-4-0) takes in a music piece and a 2-second sequence of seed motion, then auto-regressively generates long-range future motions that correlates with the input music.
|
| 12 |
+
|
| 13 |
+
quences that varies for different input music. We represent dance as a 3D motion sequence that consists of joint rotation and global translation, which enables easy transfer of our output for applications such as motion retargeting as shown in Figure [1.](#page-0-0)
|
| 14 |
+
|
| 15 |
+
In order to generate 3D dance motion from music, we propose a novel Full Attention Cross-modal Transformer (FACT) model, which employs an audio transformer and seed motion transformer to encode the inputs, which are then fused by a cross-modal transformer that models the distribution between audio and motion. This model is trained to predict N future motion sequences and at test time is applied in an auto-regressive manner to generate continuous motion. The success of our model relies on three key design choices: 1) the use of full-attention in an auto-regressive model, 2) future-N supervision, and 3) early fusion of two modalities. The combination of these choices is critical for training a model that can generate a long realistic dance motion that is attuned to the music. Although prior work has explored using transformers for motion generation [\[3\]](#page-8-1), we find that naively applying transformers to the 3D dance generation problem without these key choices does not lead to a very effective model.
|
| 16 |
+
|
| 17 |
+
In particular, we notice that because the context window in the motion domain is significantly smaller than that of language models, it is possible to apply full-attention transformers in an auto-regressive manner, which leads to a more powerful model. It is also critical that the full-attention transformer is trained to predict N possible future motions instead of one. These two design choices are key for preventing 3D motion from freezing or drifting after several auto-regressive steps as reported in prior works on 3D motion generation [\[4,](#page-8-2) [3\]](#page-8-1). Our model is trained to predict 20 future frames, but it is able to produce realistic 3D dance motion for over 1200 frames at test time. We also show that fusing the two modalities early, resulting in a deep crossmodal transformer, is important for training a model that generates different dance sequences for different music.
|
| 18 |
+
|
| 19 |
+
In order to train the proposed model, we also address the problem of data. While there are a few motion capture datasets of dancers dancing to music, collecting mocap data requires heavily instrumented environments making these datasets severely limited in the number of available dance sequences, dancer and music diversity. In this work, we propose a new dataset called AIST++, which we build from the existing multi-view dance video database called AIST [\[82\]](#page-10-1). We use the multi-view videos to recover reliable 3D motion from this data. We will release code and this dataset for research purposes, where AIST++ can be a new benchmark for the task of 3D dance generation conditioned on music.
|
| 20 |
+
|
| 21 |
+
In summary, our contributions are as follows:
|
| 22 |
+
|
| 23 |
+
- We propose Full Attention Cross-Modal Transformer model, FACT, which can generate a long sequence of realistic 3D dance motion that is well correlated with the input music.
|
| 24 |
+
- We introduce AIST++ dataset containing 5.2 hours of 3D dance motions accompanied with music and multiview images, which to our knowledge is the largest dataset of such kind.
|
| 25 |
+
- We provide extensive evaluations validating our design choices and show that they are critical for high quality, multi-modal, long motion sequence generation.
|
2103.00180/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efd46e8ed657223ce090e092243ed3ef94ed624dd6221abad9cd5ebecc32e202
|
| 3 |
+
size 247591
|
2103.03501/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca865b668b75f372c9c5cb7ea0e7085fcc49879c620296ddf78c31bf01e9220b
|
| 3 |
+
size 7595959
|
2104.03962/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2104.03962/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
An intelligent agent must *anticipate* the outcome of its movement in order to navigate safely [\[14,](#page-8-0) [41\]](#page-9-0). Said differently, successful autonomous agents need to understand the dynamics of their observations and forecast likely future scenarios in order to successfully operate in an evolving environment. However, contemporary work in computer vision largely *analyzes* observations, i.e., it studies the apparent. For instance, classical semantic segmentation [\[8,](#page-8-1) [42\]](#page-9-1) aims to delineate the observed outline of objects. While understanding an observation is a first seminal step, it is only part of our job. Analyzing the currently observed frame means information is out of date by the time we know the outcome, regardless of the processing time. It is even more stale by the time an autonomous agent can perform an action. Successful agents therefore need to *anticipate* the future 'state' of the observed scene. An important question,
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Figure 1. We study the novel task of 'panoptic segmentation forecasting' and propose a state-of-the-art method that models the motion of individual 'thing' instances separately while modeling 'stuff' as purely a function of estimated camera motion.
|
| 8 |
+
|
| 9 |
+
<span id="page-0-0"></span>however, remains open: what is a suitable 'state' representation for the future of an observed scene?
|
| 10 |
+
|
| 11 |
+
Panoptic segmentation recently emerged as a rich representation of a scene. Panoptic segmentation classifies each pixel as either belonging to a foreground instance, the union of which is referred to as 'things,' or as a background class, referred to as 'stuff' [\[23,](#page-8-2) [5\]](#page-8-3). This decomposition is useful for forecasting because we expect different dynamics for each component: 'stuff' moves because of the observer's motion, while 'things' move because of both observer and object motion. Use of panoptic segmentation is further underlined by the fact that it separates different instances of objects, each of which we expect to move individually.
|
| 12 |
+
|
| 13 |
+
Consequently, we propose to study the novel task of 'panoptic segmentation forecasting': given a set of observed frames, the goal is to forecast the panoptic segmentation for a set of unobserved frames (Fig. [1\)](#page-0-0). We also propose a first approach to forecasting future panoptic segmentations. In contrast to typical semantic forecasting [\[44,](#page-9-2) [52\]](#page-9-3),
|
| 14 |
+
|
| 15 |
+
<sup>\*</sup>Work done during an internship at Niantic.
|
| 16 |
+
|
| 17 |
+
we model the motion of individual object instances and the background separately. This makes instance information persistent during forecasting, and allows us to understand the motion of each moving object.
|
| 18 |
+
|
| 19 |
+
To the best of our knowledge, we are the first to forecast panoptic segmentations for future, *unseen* frames in an image sequence. We establish a leaderboard for this task on the challenging Cityscapes dataset [\[12\]](#page-8-4) and include a set of baseline algorithms. Our method for future panoptic segmentation relies on a number of innovations (Sec. [3.1\)](#page-2-0), that we ablate to prove their value. Our method also results in state-of-the-art on previously established tasks of future semantic and instance segmentation. Code implementing models and experiments can be found at [https://github.com/nianticlabs/](https://github.com/nianticlabs/panoptic-forecasting) [panoptic-forecasting](https://github.com/nianticlabs/panoptic-forecasting).
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
```
|
| 24 |
+
1: Input: Background semantics m_{T+F}^B;
|
| 25 |
+
Foreground segmentations m_{T+F}^i, classes c^i,
|
| 26 |
+
depths d_{T+F}^{i}, i = 1, ..., N;
|
| 27 |
+
2: for (x,y) \in \{1,\ldots,W\} \times \{1,\ldots,H\} do
|
| 28 |
+
3: S_{T+F}(x,y) \leftarrow [m_{T+F}^B(x,y),0]
|
| 29 |
+
4: end for
|
| 30 |
+
5: \sigma \leftarrow \operatorname{ArgSortDescending}(d^1_{T \perp F}, \dots, d^N_{T \perp F})
|
| 31 |
+
6: for i \in \sigma do
|
| 32 |
+
for (x,y) \in \{1,\ldots,W\} \times \{1,\ldots,H\} do
|
| 33 |
+
7:
|
| 34 |
+
\begin{aligned} & \textbf{if} \ m_{T+F}^i = 1 \ \textbf{then} \\ & S_{T+F}(x,y) \leftarrow [c^i,i] \end{aligned}
|
| 35 |
+
8:
|
| 36 |
+
9:
|
| 37 |
+
10:
|
| 38 |
+
end for
|
| 39 |
+
11:
|
| 40 |
+
12: end for
|
| 41 |
+
13: Return: future panoptic segmentation S_{T+F}
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
frame t while being 0 otherwise. MSE refers to mean squared error, i.e., MSE $(\widehat{\mathbf{r}}_t^i, \mathbf{r}_t^i)$ := $\frac{1}{J} \sum_{j=1}^J (\widehat{\mathbf{r}}_t^i - \mathbf{r}_t^i)^2$ , while SmoothL1 is given by
|
| 45 |
+
|
| 46 |
+
$$\begin{aligned} \operatorname{SmoothL1}(\mathbf{a},\mathbf{b}) &\coloneqq \frac{1}{J} \sum_{j=1}^{J} \operatorname{SmoothL1Fn}(\mathbf{a}^{j},\mathbf{b}^{j}) \quad (23) \\ \operatorname{SmoothL1Fn}(a,b) &\coloneqq \begin{cases} \frac{1}{2}(a-b)^{2}, & \text{if } |a-b| < 1, \\ |a-b| - \frac{1}{2}, & \text{otherwise,} \end{cases} \end{aligned}$$
|
| 47 |
+
|
| 48 |
+
where a and b are vector-valued inputs and a and b are scalars. We use $\lambda=0.1$ , which was chosen to balance the magnitudes of the two losses.
|
| 49 |
+
|
| 50 |
+
To train the refinement network we use the cross-entropy loss
|
| 51 |
+
|
| 52 |
+
$$\mathcal{L}_{bf} := \frac{1}{\sum_{x,y} \mathbf{1}_{t}^{bg}[x,y]} \sum_{x,y} \mathbf{1}_{t}^{bg}[x,y] \sum_{c} m_{t}^{i*}(x,y,c) \log \left( p_{t}^{i}(x,y) \right).$$
|
| 53 |
+
|
| 54 |
+
$$(25)$$
|
| 55 |
+
|
| 56 |
+
Here, $\mathbf{1}_t^{\text{bg}}[x,y]$ is an indicator function which specifies whether pixel coordinates (x,y) are in the background of frame t, and $m_t^{i*}(x,y,c)=1$ if c is the correct class for pixel (x,y) and 0 otherwise. Other variables are as described in the main paper.
|
2104.09379/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-08-30T07:52:15.514Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.9.6 Chrome/89.0.4389.128 Electron/12.0.16 Safari/537.36" etag="5fgtTnhEYQGmiGAWB2aL" version="14.9.6" type="device"><diagram id="VLxcbhCezlpfyYszo4su" name="beta-gamma">7Vtdd7I4EP41nrN7oYckIHBZrbb7brf7vrVnt+1NT5QotEgoxFb76zdIUAJIpWKt3fZGM/kgmXnmmZlgG6g7nZ8F2Lf/ohZxG1Cx5g102oAQGCbkH5FkISSqpsSSSeBYQrYWDJxXIoTJsJljkVAayCh1mePLwhH1PDJikgwHAX2Rh42pKz/VxxOSEwxG2M1L/3UsZsdSIzlFJD8nzsQWT0aK6JjiZKwQhDa26EtKhHoN1A0oZfG36bxL3Eh5iVrOB93riWqj2ZVmWw/3uH03/NWMF+tXmbI6QUA89u6lX30Svl4++c3OHxaBl3eePXoWU5Rn7M6EusRZ2SLRX0BnnkWiRUADdV5sh5GBj0dR7wuHDJfZbOqK7pAF9HGlZ8QlOBgJTOi8NXZct0tdGizXRv1+r3eqcvmWJxSaeCYBI/OUecWJzwidEhYs+BDRi4x4RgJe2NJiwcsaCrouDm2nYAB1MRAL+E1Wa691zL8INRerfPTr8kdgLcZd7969sJ3pGb19aqISDStcFTRgNp1QD7sXlPpCrw+EsYXQI54xKmudzB12k/p+Gy3FTxq3Tudi5WVjkTQ8fpibdCOeBbWkvZ63bCUT8ya2cGivAFLNkiGdBSNSAlBTMAYOJoSVaFWYkFgSGeRxERAXM+dZ5obabaznbBxtbCCaHvX4R2cvZgcfZXQXD4n7k4YOc6jHZSM+gXC37kTO6XD6vcgMmDqWFR2/g11nUjjjRHSsRoacZxxvcr3Uh3k4cIFPBS6zgLPbLovYlXK1pGHXfprRpKMZLnF0wgfAtj9fd0ZBFY/kCRezkWNh3tOlXkj5aqnR7Yn4XD7TWQm6vUZntRV+Mic7NJwNExEAqYEpeUoaHyYRF7jTEmCyR2wPrYBwdeDhcr0I4D51PLa0lNZpaKcJwDt49DhZOmoStYTz5pFYRgS5gLVKd8QWpJSiKJA1lZbShqYUzQTnbY06sfjP6KiplYEqx0hDXoGOxyH3jixqV1vcCshl7rcDkBGUgRyj5WZrFLkuz0LJ22kNDv04NR0784ixC/gwelAqsxkv//IZj2Uoio72mPEATbImyuc7SeabTncSWe1kpX7ubEevHvhqjkFJmXRkGY5xsAxHKTV03l7vNv2hDF17tlHM+0BVZN7PckC8UTGrfvYHRbVnHfR/+4npv61gE+gfR/8wCe4fwP/FVgZ7svLdZ7YyVpSlfF9W1o1MzgZgVMMdNNJrnzvSf2SgL6P1I4vzu7tvttiUC73dVhkw4m/kgTq3Gc78RPQb+F2qXf3s2A0beYunKt2zRisJt0F59MKVJEVKqI1MZOXJqn/a73eMPZLV6gI9Iavkhj3FVADBVjtPVrra2tc1bPtwCewbDLZ9Cru/DLaMmdIMVhrz953BaghlMtgMWOrLYMv08c2Na26ER86NlW+1h5QxOm1scfXIIsqQr7Q5CaVfkLUPQM6aIpMzKkwkgaHlyXn1hncXbu5cPt3fnj/MnD/t6fXTlf98ga8K30oe3Q03bHy1G+5KiKt8ww0gbJmmrvDyCeomQgDK5K4i3m1yxBq6oenQgFXDRTNTGqvyArXccxfCuf4c+uPhDL/eC5v9whmaMrGqaktXU/AFNSS2hWirPys5ANq+ybMa2lSoyuQpc2d0PaRux507YQ99Beyhb+xVwh7K1Pe8mN81UiM59sMMV9YSqg19+tg3yH2T/HNh/3BOxj+bbknmyRN37/210t/pUixeamMRVdlllKInytdUG4qugqfudk7+iadRneMNQz924MxxavPntJvm/LnkyIXKz3n5tsoHxu58BbYxVGqHdRXH61I4XRxnKtCO1u31+7vTyuYKdP2jzFSqVFCDtvf0NqOQCDbn7EdHBPCbCI6ECOA2hjoUEfSNk95J9R9h7kIEAIDDM8HmeuromAB9M8GRMAHaxlD/o5QA6OY+mYA31//HE9cS6/+GQr3/AA==</diagram></mxfile>
|
2104.09379/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.8 kB). View file
|
|
|
2104.09379/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
<figure id="ntu-archs" data-latex-placement="b">
|
| 4 |
+
<div class="minipage">
|
| 5 |
+
|
| 6 |
+
</div>
|
| 7 |
+
<div class="minipage">
|
| 8 |
+
|
| 9 |
+
</div>
|
| 10 |
+
<figcaption>The top-4 architectures found by BM-NAS on NTU <span class="citation" data-cites="shahroudy2016ntu"></span> dataset. <strong>‘NTU Config 1’ is the best architecture found on NTU dataset.</strong> ‘C1_S1’ denotes <span class="math inline">Step<sup>(1)</sup></span> of <span class="math inline">Cell<sup>(1)</sup></span>, and so on. The blue edges are the connections at the upper level, and the dark edges are the connections at the lower level.</figcaption>
|
| 11 |
+
</figure>
|
| 12 |
+
|
| 13 |
+
We tune the hyper parameters extensively on NTU RGB-D [@shahroudy2016ntu] dataset. The top-4 configurations are shown in Table [\[config\]](#config){reference-type="ref" reference="config"}, and the architectures found under these configurations are shown in Fig. [8](#ntu-archs){reference-type="ref" reference="ntu-archs"}. The 'NTU Config 1' is the best architecture found by our BM-NAS framework.
|
| 14 |
+
|
| 15 |
+
For feature selection strategy, we find that *Video_3*, *Video_4*, and *Skeleton_4* are always selected by our BM-NAS framework no matter how many Cells and steps used. It indicates these are the most effective modality features. Especially *Video_3* is strongly favored in all the found architectures. MFAS [@perez2019mfas] also selects *Video_4* and *Skeleton_4* in every found architectures, but it does not pay much attention to *Video_3*.
|
| 16 |
+
|
| 17 |
+
For fusion strategy, we find that adding more inner steps (increasing $M$) is more effective than adding more cells (increasing $N$). However, since we have $N \times M$ steps in total, setting $N$ or $M$ too large would easily lead to an overfitting. Roughly we find that setting $N=2$, $M=2$ is a good option. $N=2$ means we have two different feature pairs for Cells, which is sufficient to cover the three most important features *Video_3*, *Video_4* and *Skeleton_4*. And $M=2$ is sufficient for BM-NAS to form all the fusion strategy like concatenation, attention on attention (AoA) [@huang2019aoa], *etc*.., as shown in the paper. The best fusion strategy found by BM-NAS on NTU is very similar to an AoA [@huang2019aoa] module, see 'NTU Config 1' in Fig. [8](#ntu-archs){reference-type="ref" reference="ntu-archs"}.
|
| 18 |
+
|
| 19 |
+
<figure id="ego-archs" data-latex-placement="b">
|
| 20 |
+
<div class="minipage">
|
| 21 |
+
|
| 22 |
+
</div>
|
| 23 |
+
<div class="minipage">
|
| 24 |
+
|
| 25 |
+
</div>
|
| 26 |
+
<figcaption>The top-4 architectures found by BM-NAS on EgoGesture<span class="citation" data-cites="zhang2018egogesture"></span> dataset. <strong>‘Ego Config 1’ is the best architecture found on EgoGesture dataset.</strong> </figcaption>
|
| 27 |
+
</figure>
|
| 28 |
+
|
| 29 |
+
For the experiments on EgoGesture [@zhang2018egogesture] dataset, we basically follow the settings as those in NTU RGB-D dataset. The top-4 configurations are shown in Table [\[config\]](#config){reference-type="ref" reference="config"}, and the architectures found under these configurations are shown in Fig. [9](#ego-archs){reference-type="ref" reference="ego-archs"}. The 'Ego Config 1' is the best architecture found by our BM-NAS framework.
|
| 30 |
+
|
| 31 |
+
For feature selection strategy, we find *Depth_1*, *Depth_2*, and *RGB_2* are the most important features for EgoGesture [@zhang2018egogesture].
|
| 32 |
+
|
| 33 |
+
For fusion strategy, we find that a combination of $\mathrm{Sums}$ is the most effective, shown as 'Ego Config 1' in Fig. [9](#ego-archs){reference-type="ref" reference="ego-archs"}, probably because the backbone models share the same architecture. Unlike the experiments on NTU RGB-D [@shahroudy2016ntu] which use Inflated ResNet-50 [@baradel2018inflated] for RGB videos and Co-occurrence [@li2018co] for skeletons modality, EgoGesture [@zhang2018egogesture] uses ResNeXt-101 [@kopuklu2019real] backbone for both the depth videos and the RGB videos. These two backbone models have exactly the same architecture, except for the input channels of the first convolutional layer. Therefore, the depth features and the RGB features probably share the semantic levels for features of the same depths, such as *Depth_2* and *RGB_2* in 'Ego Config 1'.
|
| 34 |
+
|
| 35 |
+
<figure id="fig-best-mmimdb-model" data-latex-placement="h">
|
| 36 |
+
<div class="center">
|
| 37 |
+
<embed src="sup_figures/best_mmimdb_model.pdf" style="width:100.0%" />
|
| 38 |
+
</div>
|
| 39 |
+
<figcaption>MM-IMDB Config 1, which is the best architecture found by BM-NAS on MM-IMDB <span class="citation" data-cites="arevalo2017gmu"></span> dataset. </figcaption>
|
| 40 |
+
</figure>
|
| 41 |
+
|
| 42 |
+
We do not tune the hyper-parameters extensively on MM-IMDB [@arevalo2017gmu] since it is a relatively simple task compared with NTU RGB-D [@shahroudy2016ntu] and EgoGesture [@zhang2018egogesture]. The configuration can be found in Table [\[config\]](#config){reference-type="ref" reference="config"}. As shown in Fig. [10](#fig-best-mmimdb-model){reference-type="ref" reference="fig-best-mmimdb-model"}, we find *Image_2* and *Text_0* are the most important modality features. The best fusion operation is *ConcatFC* for *Image_2* and *Text_0*, and *LinearGLU* for *Cell_0* and *Text_0*.
|
| 43 |
+
|
| 44 |
+
It is worth noting that we use the Weighted F1 score (F1-W) as the metric for performance, since we perform a multi-label classification task on MM-IMDB[@arevalo2017gmu] dataset. Although the Macro F1 score (F1-M) is also reported in the paper, we only use F1-W for model selection, because the distribution of labels in MM-IMDB [@arevalo2017gmu] is highly imbanlanced as illustrated in Table [7](#mmimdb-labels){reference-type="ref" reference="mmimdb-labels"}. Thus, F1-W would be a better metric as F1-M does not take label imbalance into account.
|
| 45 |
+
|
| 46 |
+
:::: minipage
|
| 47 |
+
::: {#mmimdb-labels}
|
| 48 |
+
**Label** **#Samples** **Label** **#Samples**
|
| 49 |
+
------------- -------------- ------------ --------------
|
| 50 |
+
Drama 13967 War 1335
|
| 51 |
+
Comedy 8592 History 1143
|
| 52 |
+
Romance 5364 Music 1045
|
| 53 |
+
Thriller 5192 Animation 997
|
| 54 |
+
Crime 3838 Musical 841
|
| 55 |
+
Action 3550 Western 705
|
| 56 |
+
Adventure 2710 Sport 634
|
| 57 |
+
Horror 2703 Short 471
|
| 58 |
+
Documentary 2082 Film-Noir 338
|
| 59 |
+
Mystery 2057 News 64
|
| 60 |
+
Sci-Fi 1991 Adult 4
|
| 61 |
+
Fantasy 1933 Talk-Show 2
|
| 62 |
+
Family 1668 Reality-TV 1
|
| 63 |
+
Biography 1343
|
| 64 |
+
|
| 65 |
+
: Label distribution of MM-IMDB[@arevalo2017gmu] dataset.
|
| 66 |
+
:::
|
| 67 |
+
::::
|
| 68 |
+
|
| 69 |
+
[^1]: We enforce the Cells to have different predecessors.
|
| 70 |
+
|
| 71 |
+
[^2]: We don't enforce the step nodes to have different predecessors.
|
2105.01294/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-06T00:21:48.977Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" etag="GfHgi2Y5FeG5wrMKHAkN" version="14.5.7" type="google"><diagram id="rVkYfM6V5xoIUByvUdLa" name="Page-1">7Vpdc6M2FP01ftwMIMDmMf7Y3U6TJrPpzG760pEtGbSWERVyYvfXV4AwiA8be43r6TYPDjoISdxz7pXutQdgst5+4jAKHhnCdGAZaDsA04FlebYrPxNglwGOY2WAzwnKILMAXsjfWIGGQjcE4VjrKBijgkQ6uGBhiBdCwyDn7F3vtmRUnzWCvprRKICXBaS41u0rQSLI0JE1LPDPmPhBPrPpetmdNcw7q4HjACL2XoLAbAAmnDGRXa23E0wT2+V2yZ772HJ3vzCOQ9Hlgcfp99VvaPuHs0JBtLRff/3Te/6gRnmDdKNeWC1W7HIL4BDdJ4aUrZCFEhwHYk1ly5SXcm6++yYbxp3nWTnwmgKOk7eniRqMfWtXbj1jTtZYYK7A+mupNcZswxf4wLs4WT+M/DJ3yhifMJOz8J3s8F7w6ChughKFOcYxhYK86TqASk7+frj9DM+MyBVbhlL+fhwlfGACfYjsfdRTZd4qA5m2d2cAr/hzD48rIPexqI0rL0pWKKBUJScoBnRQjGTgRTUZFwHzWQjprEDHnG1ChJGivOjzwFiklPUdC7FT8QBuBNN1d75KbBVEUitdQE2dZaIRcarV7Qaru1SufxxHMNTM7/61SULKeMlC8SFODXgvO5h2tC1uyis/+f+F/SLvPcuISkI/H1KuMBs161PjV0ayKLlcbOYJme8BEfglgqnZ3+UuoFM1z7h+mO8BuFj5qQKeNkLOixWOIF89yWGISAPEneHooJWiSc/srabmwXjxhrnA24PcbXWPVx5leYr7UqBwGwJFjl2cbKcnssdMgsZnDNHPyrQJKkHZMGpUm8aBTeHiXHu9cv0F+xzHMeM/K+H2sEL4sE74qInv4Z3TE+P5ybYnyicUxjFZEvw/5wecvInz3uK52XTKzrhJiE3YyPOenFOzTm/+yLyTQobtu322AUhrSGPIFqR0syAhFFqUmFenlVi22BYVSXKErpRYcLbCE0YZL3KHJaG0AkFK/DDRn6Q8SQPGCdVEpmD36saaIETbpKkfJS+xR9jHQ4YFGvQDetOP1VPEaOH+tqNFWUEDCyAHj5Bdk5u8M7LmwHVL8QVcRiD74JGfF+2Oh4jeAgwYNQjklDS+1So3kkyDislB1de6JtPVgWyjMlDf2XPTea+dqUW6mS9aay5GqulyzUVOcMmiy9E02btpndTo7aoTp5Io2KPr6sRuOiT+lzy6Vh4716Nr2fu1mTqthHrEo3Vvdt3hdb05P2ncqEhq3HYWiacPZA6vLJKLhv1ey+xHNWLflkQqYcQanamQag2wN4GIyJttlw+T6aP4+jsFzGJPbzdVVa8we7TM3vhCnSOOMWhUz8XL6odWecjseb7D+IxSEsWJoaOSM+EMLfnX0ZyoJQU6PbFBMA5SvgvyamlMA33tlRPT1r3JrFfC+8p8G/n5V7wAb4nYx1l5/Vq6TkNsktypZmIokLeaY+yp7nToK4GyO7UL+ge8qSU4Vsvm5x7WaqW56kA9h9mmb1bOz74cbSM+sgt3F8O5JF9nw7UtT98pzz6TVVOsanmkZy24J2mhLcM6Tuit5MJnE3WU8bOJks3i9y9Z9+JHRGD2Dw==</diagram></mxfile>
|
2105.01294/main_diagram/main_diagram.pdf
ADDED
|
Binary file (17.3 kB). View file
|
|
|
2105.01294/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Modern deep convolutional neural networks (CNNs) rely heavily on large amounts of annotated images [\[29\]](#page-8-0). This data-hungry nature limits their applicability to some practical scenarios such as autonomous driving, where the cost of annotating examples is prohibitive, or which involve *never-before-seen* concepts [\[9](#page-8-1)[,51\]](#page-9-0). By contrast, humans can rapidly grasp a new concept and make meaningful generalizations, even from a single example [\[31\]](#page-8-2). To bridge this gap, there has been a recent resurgence of interest in fewshot or low-shot learning that aims to learn novel concepts from very few labeled examples [\[8,](#page-8-3) [10,](#page-8-4) [34,](#page-8-5) [37,](#page-9-1) [42\]](#page-9-2).
|
| 4 |
+
|
| 5 |
+
Despite notable successes, most of the existing work has focused on simple classification tasks with artificial settings and small-scale datasets [\[34,](#page-8-5) [37\]](#page-9-1). However, few-shot object detection, a task of great practical importance that learns an object detector from only a few annotated bounding box examples [\[18,](#page-8-6) [38,](#page-9-3) [39\]](#page-9-4), is far less explored. Few-shot detection requires determining *where* an object is as well as *what* it is (and handling distracting background regions [\[13\]](#page-8-7), *etc*.), and is much harder than few-shot classification. The most
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
<span id="page-0-0"></span>Figure 1. Learning to detect a novel class, *fennec fox*, from a single training example (*i.e*., 1-shot detection) using a serial detector. The region proposal network (RPN) generates a few high intersection-over-union (IoU) boxes for the detector's classifier. The pink circle represents the classifier decision boundary learned from these boxes. Due to a lack of sample variation, the decision boundary is not accurately estimated. With hallucinated examples (image in backslash) produced by our hallucinator, the classifier learns a better decision boundary (the dotted circle), thus being able to potentially correct previously misclassified instances.
|
| 10 |
+
|
| 11 |
+
difficult regime occurs when there are *very limited examples* (less than 3) for novel classes (Figure [1\)](#page-0-0), which is a common yet extremely challenging case in the real world.
|
| 12 |
+
|
| 13 |
+
While few-shot classification approaches are helpful (*e.g*., [\[2,](#page-8-8) [4,](#page-8-9) [18,](#page-8-6) [33,](#page-8-10) [41\]](#page-9-5)), few-shot detection is much more than a straightforward application of few-shot classification approaches. The state-of-the-art two-stage fine-tuning approach (TFA) [\[38\]](#page-9-3) learns a better representation for fewshot detection, through (1) pre-training on base classes with abundant data and then only fine-tuning the box classifier and regressor on novel classes, and (2) introducing instancelevel feature normalization to the box classifier during finetuning. Despite the improvement, its performance in the extremely low-data regime is still far from satisfying.
|
| 14 |
+
|
| 15 |
+
We argue that, to fully improve *extremely-few-shot* detection performance, a key factor is to effectively deal with *the lack of variation in training data*. This is because for an object detector to be accurate, its classifier must build a useful model of variation in appearance with very few examples. More concretely, a modern object detector first finds promising image locations, typically boxes, using a region proposal network (RPN) [\[28\]](#page-8-11), then passes promising boxes <span id="page-1-0"></span>through a classifier to determine what object is present, and finally performs various cleanup operations such as nonmaximum suppression (NMS), aimed at avoiding duplicate predictions and improving localization. Now assume that the detector must learn to detect a novel category from a single example (Figure [1\)](#page-0-0). The only way the classifier can build a model of the category's variation in appearance is by learning from the high intersection-over-union (IoU) boxes reported by the RPN. Although there is variation of boxes produced by the RPN, the variation from a single example is too weak to train the classifier for the novel class.
|
| 16 |
+
|
| 17 |
+
To overcome this issue, one strategy is to adjust the learning procedure for RPN, so that it reports highly informative boxes. Contemporary work [\[48\]](#page-9-6) achieves this by training multiple RPN's be somewhat redundant and cooperating. Hence, if one RPN misses a highly informative box, another will get it. This cooperating RPN's (CoRPNs) approach, while helpful, is still insufficient. In the extremelyfew-shot regime, all positive novel class proposals produced by the multiple RPN's are only slightly modified from and thus similar to the few available positive instances (with light-weighted cropping and scaling operations); their variation is significantly limited for building a strong classifier.
|
| 18 |
+
|
| 19 |
+
In this paper, we propose a different perspective on building a model of variation for novel classes, *by transferring the shared within-class variation from base classes*. In fact, many modes of variation in the visual world (*e.g*., camera pose, lighting changes, and even articulation) are shared across categories and can generalize to unseen classes [\[30\]](#page-8-12). While such within-class variation is difficult to be encoded through the proposal generation procedure, it can be effectively captured by *learning to hallucinate examples* [\[40\]](#page-9-7).
|
| 20 |
+
|
| 21 |
+
To this end, we introduce a *hallucinator network* into a modern object detection model. The hallucinator network performs data hallucination for the box classifier in the learned region of interest (RoI) feature space. We train the hallucinator on data-abundant base classes, encoding the rich structure of their shared modes of variation. We then use the learned hallucinator to generate additional novel class examples and thus produce an augmented training set for building better classifiers, as shown in Figure [1.](#page-0-0)
|
| 22 |
+
|
| 23 |
+
Note that the existing strategy for training the hallucinator in few-shot classification [\[40\]](#page-9-7) is coupled with a complicated meta-learning process, making it difficult to apply to state-of-the-art few-shot detectors like TFA [\[38\]](#page-9-3) or CoRPNs [\[48\]](#page-9-6). We overcome this challenge by introducing a much simpler yet effective training procedure: we train our hallucinator and the detector's classifier in an *EMlike* (expectation-maximization) manner, where a "strongest possible" classifier is trained first with all the available base class data; the hallucinator is then trained under the guidance of this already-trained classifier; and finally, the classifier is re-trained and refined based on the set of augmented examples (with hallucinated examples) on novel classes.
|
| 24 |
+
|
| 25 |
+
Our contributions are three-fold. (1) We investigate a critical yet under-explored issue in extremely-few-shot detection (*e.g*., as few as one) – the lack of variation in training data. (2) We propose a novel data hallucination based approach to address this issue, which effectively transfers shared modes of within-class variation from base classes to novel classes. Our approach is simple, general, and can work with different region proposal procedures. (3) Our approach significantly outperforms the state-of-the-art TFA [\[38\]](#page-9-3) and most recent cooperating RPN's [\[48\]](#page-9-6) detectors in the extremely-few-shot regime. Our code is available at <https://github.com/pppplin/HallucFsDet>.
|
2106.01425/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a48860ed28f1ee69f75578751bf7c594d27598b21b2c481a2534b6514d97c494
|
| 3 |
+
size 5621479
|
2106.02740/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e8a2fcd8c2d6618c05df84025375938dc62789858650ac9c25767ce84cb0e02
|
| 3 |
+
size 33391307
|
2106.13948/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3975751213868f21fea34b6f353cdb7a8425d5216803ad976eced7d881d54ad7
|
| 3 |
+
size 834156
|
2107.05757/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-05-31T17:12:14.197Z" agent="5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/89.0.4389.128 Electron/12.0.7 Safari/537.36" version="14.6.13" etag="eP4HhV-A4ZEpokFvxPi2" type="device"><diagram id="bUHDOelsTeNF3ZFeg6Xk">7V1tj6M4Ev41ke4+TITN+8d+m73T3EqjnZPu5tOKTuiEnSRkCD3duV9/dmMT29i8JMYkJNnRdiBgoJ6qctXjspnYD+v337Jou/w9ncerCbTm7xP7cQJh6Hjo/3jHvtjhQbfYsciSebELHHZ8S/4Xk50W2fuazOMdd2Cepqs82fI7Z+lmE89ybl+UZekbf9hLuuKvuo0W5IrWYce3WbSKK4f9J5nny2Jv4DJH/yNOFkt6ZWCRX9YRPZg0sVtG8/SNuZb9NLEfsjTNi2/r94d4hWVH5VI09Fnxa3ljWbzJ25wAixN+RatX8mzkvvI9fdhFlr5uyWFxlsfvMhFHzytRYodbAOWDIYWI03WcZ3t0CGkIhmFxClEG2yNNvB1E6wd+sW/JiNUOyIERgXNRtn14YvSFPLRcAHazANDzb+YxPt6a2PdvyySPv22jGf71Dak32rfM16j9R4C+7vIs/RE/pKs0Q3s26QYddq+UHislKJeSESk4Eil4K3SF+5cU3R8rDu/na0p/+LT7MM07dACwtu+HH9G3Bf77Jc42H6b/RzJHJoX+xoss3u2SdIM2iF8oLoRusbhWceZpKLwkqxXFYALtFxf/R26a2V98TgSInODZ7tTlFTmkOxgMS1fAYVgeeQqIrgREB/9zH/JkNY8n/j36il0Q8mRoYz/xH/E/9yknB4oyR9LIW6g3K22yK1oliw3anCHpxWj/PZZtgq57R35YJ/M5vowUSR5rHeD4lgQLWwIF1GBNXkWQMdL+b2QzzfJlukg30erpsFd45MMx/0rTLZH9X3Ge70lnGL3mKY/MPNotP86v038ePvSL9/FBv8TvSf5ffPWpS7a+k3vB3x/f2Y093dggwTAn4c3v5A4+Ng6nfWztOUCxTOrhRCJMX7NZzPmoPMoWcc5ofBX0LF5FefKLb10GKDn1a5p8ODmiLL7rcXbshnwLxR2Qkw5acZdl0Z45bIsP2Kkv4wX8ZfieGn0pGjxoXPmErZTQb3TpNKyiThtU/Tc9JWnVBdiyLuCfm5cY4TqLJQ4/Ea/znIl7mrqF8bgoegrtPWy/2nnAsOqxHA0eK1B3Hdtd8jeu28hQn/HnO+41/j76bsPle3RoQ2OdSNgVktl1QBLCwSCh6efNTHhMfEvsyQxiAm52IsXEsYfDRMYq3DCBnuULmNjmMJERHTdMoOc4w2Eio10EQZ/OvtU/vxME3PMHgSRRLtvlJKBFBDLSQrsICkErZfAJVJ7YAzLIPQ0P7DU/8ElU18sLnM1kqf7ce/Zcr63VAEVuTc4IBK6LMvqMCGkHpN1qZJmtIMF4M7/DPD/2M6tot0tmvNAObIcTOgzf8cmaWpQN0ct54Ea+xlmCHhd7vbY8CLUPlghhBgqk2kz58z3VbqeCjSuxZ7cGGzmDAcNpAMPDB/LdG4CCvRQPUSFOOrcr2qGCkDmCNAGyRFindTYTcSdR1A1WC3ijBbRja+rroAarleWzXa22P76RSoZlF1X0onk74y8UHGlWAJmVbR0+fODj9WZUUJU2L//EQxPLOI8uP8pssjweQmBXQw6npygTqjJkPuZ/L4eCUIsW99MDTgfGkAp0BMmXBDV9gSRLmU9wjxbgHCSKahoDFLylK0KhRRmsN+0atVT7Jj3e1BZQ9lxx8LWtQ620BMSWNDpRVf6uYh0ldjyqtL6bLQcGTVmW1ROm5YDFC8YCcy/LZPRYCCNbslyxLyxqaiKqWCyi9TpC5gHGi8gAELQgPNqn62dRnNCg7uYSAxtF9C6TKEtdXudera7RHrMErbROez25NLxZYoQf4nGOBNzhbx/CHqOYFvxKE8itARNi0nMBEVpTHxww5On3YxkzvoftFUIZkdOtVjSQ1QmRaJaJXfckUsVBbLIRg9nHj6RUWjgUrXGvuXne4T/opOdo9mOHVGkpyWppVz+6aqOOuW5oLiKgaVMlKPs53rhLYPJMSFlF+2xHL+XqCG5vQpbRNh0L54HaG345yju1hY0dpkDNkspi6NBt0jCoA7QC/TFo1o/OgkN1PF8py41Qe1WAbR0A29X4pP9abhrH0u/fuZi2W75zyHDk+c6JdeMtojEa4jHBGIWGJQhBUaGk1gNvGrIfocDJ04F2i4KMAQepYFVoXj2r2iCzoFpirCfGDYURW3qhrnFt6NUNWQFPaFZflGu3qEvRkKjIEK2fLffJnlrsx+Ul3Rui0NYEKRItyn8GwXQgJupIPaj3hkPpgd1gkseqRVO7PapFC+LpHCqy6vUh9AVPz4vPqeqDbUsmk2gJinsuoplHcfAiFag3C+LnFz0C9cWukxeoJRFob1lGi9qZc65Kqhd0UCvostyvSdA6pkE5KmaCqVQZxfBQAyJCmTqsQtCXqjsq1oJDYPTzlct8dggIZJxGBYLN2CGwh7SCdsUepGQLqEq2wAjKPLp5Ks8gRqoyj+srq+voy0yCpKr/kIK0UYG0GT1IFW9nEqTxVYjQB2CXryA+nc3xnfNk78qYmy5FYB3L9XiCVrm9ZfFOiyz+EpXo7Agg5FOnwpxBKJIzrfVD0pbTn4poKD65qUgLFXHEec7H6keloR6VQ7XehqK+WhZ0l/XVVxd4A4ku9RUuuDKSpqsZlybJDZUe7HNsJmk7QpcOjjVJ2NCQPpN0VVzQ6OkfMRq3JSRob+YFuznC0U806ZbdGvWEHecEyfLbEqrry3GNQqWjsuSSYs+hcldg1Y4jlyxu58nTDe2KKbLGblBDKcpFqc6Qma1vsetO6FGdpnZ7VJ2RMmtnpzpOT16nqd0eVUfGpylnTuJZrFcwMC5M1pFV1fQWQNQs0arCY/R5UllqNAQgNQu0qgAZ/6D5gBbiyXghJSAjmutdD4kvLp5lEJGatVmvGBFxeM0oJCp257ohsYe0khavoBlVkD4YNYAurF7ZAAQ+lzsIr41ozRPUX8SrvYi+8N27Nr5pMNLA9aZ+ZQ6EDk2StWxMfW6ckxn1cUz4pIaLGFOqM59/dZ4dlnq9FyBWQ7Tvo+Ta3QPk57uoz3n6gw+nz8yOFEcXj4UcKh6gB8gHqrK6WMgd0cr1IO6YM3IdSztfUrAw3Kx6buWokF/0CRxbpVvfbH964+so5LokvRlu7r439ZjrQleP4jS026PmyIjFm+b0Mdtf8A16FKe+2R71Bkr0hrCfs3mK71DybeTUp1BjbbIqypdRn6e8pLTY8W2Jnn5eXRrwPpr9eMYg3N5AekRYI5uJT4tFdU/Ep7zXzVBrDNXg2Kpfs472DQ+hNzSBh4xKu+HB+yuDE/Z9Gc91w4PHw+CUYl9GQnVblzNUL8v5k1T04DJ7dG+LqAAViXdSP8X/ygIKCGsDCqMd2OmrVtcoxLYslmhWCbputcUtTF2NPm9q1FaNDPa7gYyy0rv4OZ5YuidzSGtWPgenrnwOrlSbxIUuDKa7gYy20r90Pl03v6INt0X0dTumAXVJRmXp1aUN0SU8M7CFgii0a3Oqdm2uVLsqS/KY1C7VzNIRvKKhXurCytW0qNyEzFWc1whe2NDQJVuDyVzFa41ez/Ey3O5gYlfRV6NXdQiCAcWuYqlGr+14dfHhxK6aujd6bbd98dUtJsVeM0HvWjlZQAfsBhjsDWvm593wmBinREMZG3HDg8fDILdIl4W44aHGw+SYXqjKgW94DDKmF1blavBVbNYU9l8nOMi72Io3fx47+lL32jV58Z8nTGori4j01/uF7rAq499URovK+FZoTGVk1ISON3o+pOttHqOf//j8uS2X3emlPuq3IQlq8Xj3FHx+0NMheEIlclnUz3QIdH4I3yGIb6g+qkvwh7Xvo8wbWgFr4NY0dIIGI//Y+hpnCZIQjgIGtHzCZbCr2PfnDVrrQavFkdglS8cdpolWacveVtZXmBZeoE3659/nAss6R9Mrb6uF7c2uwvaEF9iZtD1AJ++fg/GBlsbHd4ZNK7APZHuws+0ZeF11eVunBKuWugqE1jLScsVdskZfjFe/9mzzWZpHeZLis3D0KtGeYs6dHvfg0kXRagLmoDf3MOjb7I/sm53Q5jrnqXVB8TIoVmztw0so2BXHmlquDXieDjhw6gTsVF2+XX35M6BL1Pb2stpgFstfp/wcuI6rqxe3eDMFkoFLX5bXQjVg7c10YNrqONYKCGaKV5m5HDPtrTNXMFp2MLUsJywnQQvmak+Bz5hrbzOiyYNfvLmK684aNddhWCiGUTrP7JUNls33g9Dilkzg+0ModoCKlQuOsqhgYHU4UzJjWHXwBlOHS+TDbODxaXkYhGfdmfPKZbozh7YxdQLWzbucnwK4fIuBOXU4fUqZfLiyZIDyZDWPJ/79F/yO7GOYnLY8LhtDomaJLqLLkW3SMKhjeyq8kP7l9KFYNy+rEaPLNrKxpq2DwaF5/XWYf5/rQAoJIDUk7vXcck3I4lWUJ7/49pXO4i7Loj1zwBab/a7Gl9h8LkiGE1RLafkA1ByOvhTXV5xcKYsvFawPXzUM+3hRedIgKuc5dTokiaVPUDnMT4r+0+tP5WRkpNYBku0ukbxtk7z7tOFnyaIPT/++0tmuOF/ngjJeR6jKGagzBUA2KXB4pXm/KU0lGAPB1BlCadBmlmJsD44JPeLy93Qe4yP+Dw==</diagram></mxfile>
|
2107.05757/main_diagram/main_diagram.pdf
ADDED
|
Binary file (53.5 kB). View file
|
|
|
2107.05757/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Despite the promise of artificially intelligent agents [@lecun2015deep; @schmidhuber2015deep], they are known to suffer from catastrophic forgetting when learning over non-stationary data distributions [@McCloskey1989CatastrophicII; @Goodfellow2013AnEI]. Continual learning [@ring1998child; @lopez2017gradient; @nguyen2017variational], also known as life-long learning, was introduced to deal with catastrophic forgetting. In this framework, agent continually learns to solve a sequence of non-stationary tasks by accommodating new information, while remaining able to complete previously experienced tasks with minimal performance degradation. The fundamental challenge in continual learning is catastrophic forgetting, which is caused by the interference among tasks from heterogeneous data distributions [@Lange2019ContinualLA].
|
| 4 |
+
|
| 5 |
+
Task interference is almost unavoidable when model parameters, like the feature extractor and the classifier, are shared by all tasks. At the same time, it is practically infeasible to keep a separate set of model parameters for each individual task when learning with an arbitrarily long sequence of tasks [@hadsell2020embracing]. Moreover, knowledge tends to be shared and transferred across tasks more in the lower layers than higher layers of deep neural networks [@ramasesh2020anatomy]. This has motivated the development of non-parametric classifiers that automatically avoid task interference without sharing any parameters across tasks. Kernel methods [@smola1998learning] provide a well-suited tool for this due to their non-parametric nature, and have proven to be a powerful technique in machine learning [@cristianini2000introduction; @smola2004tutorial; @rahimi2007random; @sinha2016learning]. Kernels have been shown to be effective for incremental and multi-task learning with support vector machines [@diehl2003svm; @pentina2015multi]. Recently, they have also been demonstrated to be strong learners in tandem with deep neural networks [@wilson2016deep; @wilson2016stochastic; @tossou2019adaptive], especially when learning from limited data [@zhen2020learning; @patacchiola2020bayesian]. Inspired by the success of kernels in machine learning, we introduce task-specific classifiers based on kernels by decoupling the feature extractor from the classifier for continual learning.
|
| 6 |
+
|
| 7 |
+
<figure id="fig:demo" data-latex-placement="t!">
|
| 8 |
+
<embed src="figs/vrf.pdf" style="width:100.0%" />
|
| 9 |
+
<figcaption><strong>Overview of kernel continual learning</strong> with variational random features. For each task <span class="math inline"><em>t</em></span>, we use the coreset <span class="math inline">𝒞<sub><em>t</em></sub></span> to infer the random Fourier basis, which generates kernel matrix <span class="math inline">𝒦<sub><em>t</em></sub></span>. The classifier for this task is constructed based on kernel ridge regression using <span class="math inline">𝒦<sub><em>t</em></sub></span>. <span class="math inline"><em>h</em><sub><em>θ</em></sub></span> denotes the feature extraction network, parameterized by <span class="math inline"><em>θ</em></span>, which is shared and updated when training on the task sequence. <span class="math inline"><em>f</em><sub><em>ϕ</em></sub></span> is the inference network, parameterized with <span class="math inline"><em>ϕ</em></span> for random Fourier bases, which is also shared across tasks and updated throughout learning. Memory <span class="math inline">ℳ</span> stores the coreset from each task and is used for inference only. <span class="math inline"><em>h</em><sub><em>θ</em></sub></span> and <span class="math inline"><em>f</em><sub><em>ϕ</em></sub></span> are jointly learned end-to-end. </figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
In this paper, we propose *kernel continual learning* to deal with catastrophic forgetting in continual learning. Specifically, we propose to learn non-parametric classifiers based on kernel ridge regression. To do so, we deploy an episodic memory unit to store a subset of samples from the training data for each task, which we refer to as 'the coreset', and learn the classifier based on the kernel ridge regression. Kernels provide several benefits. First, the direct interference of classifiers is naturally avoided as kernels are established in a non-parametric way for each task and no classifier parameters are shared across tasks. Moreover, in contrast to existing memory replay methods, e.g., [@kirkpatrick2017overcoming; @AGEM], our kernel continual learning does not need to replay data from previous tasks when training the current task, which avoids task interference while enabling more efficient optimization. In order to achieve adaptive kernels for each task, we further introduce random Fourier features to learn kernels in a data-driven manner. Specifically, we formulate kernel continual learning with random Fourier features as a variational inference problem, where the random Fourier basis is treated as a latent variable. The variational inference formulation naturally induces a regularization term that encourages the model to learn adaptive kernels for each task from the coreset only. As a direct result, we are able to achieve more compact memory, which reduces the storage overhead.
|
| 13 |
+
|
| 14 |
+
We perform experiments on four benchmark datasets: Rotated MNIST, Permuted MNIST, Split CIFAR100 and miniImageNet. The results demonstrate the effectiveness and promise of kernel continual learning, which delivers state-of-the-art performance on all benchmarks.
|
2108.02180/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30baa693377c6e66405168ed1ed2342e4dc9affc891389a03b878d28f17d58e1
|
| 3 |
+
size 12773064
|
2108.08815/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f5b5fba726ec1954351e5dae68617f8240bee18fa2cf9c2352955cf7cfcaf14
|
| 3 |
+
size 10162069
|
2108.12296/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-05-27T16:44:17.333Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/89.0.4389.128 Electron/12.0.7 Safari/537.36" version="14.6.13" etag="5FmOpbBEJqvuP4ucTIzc" type="device"><diagram id="TNksmL_zShHpyeO1fcFz">7V1bV6tKEv41rnXm4biggVweY8hWPGkwSlTyclZCshFynVzk8uunqgkJIUSDBrNl3HMcsYGmu+rrunV39YVQH3vX8+7shU77g9EF4frehSBfEMILHA+/sMQPS6rlUlhgze3++qFtwYMdDNaF3Lp0ZfcHi50Hl9PpaGnPdgvN6WQyMJc7Zd35fOruPvZ7Otr96qxrrb/IbQsezO5osPfYk91fvoSlFSn29M3Atl6iL/Pc+s64Gz28rmLx0u1P3di3hMaFUJ9Pp8vwauzVByMkXkSXsKJfB+5uGjYfTJbHvEDCF167o9W6b+t2Lf2os/PpatIf4PP8hXDlvtjLwcOsa+JdF9gLZS/L8Wh9e7GcT4eD+nQ0nbO3hRL7B3d+26NRrPy3hP/D8ulkGSsP/0H5umGD+XLgHewcvyEZYG0wHQ+Wcx8eWb9ApMqlFL60BpqwLXG3jBOqUeFLjGsCH5V213CxNl/YUhQu1kRNJ7CQO4HNyqD3e5/A/e6g8tuE8vl02V3a0wkU/13lTkNZSeB26EpE6ZLsE7aUQlbpJGQVT0zWPSIdouZh+p+CrKRyKZR2EVshX0hWKYWspRF84qo3hytruennRwl9FrqKkcKJ4Foq7RGVCJdiZZ+ufIT0z5C1dGK0noWIglBOEjFFluZJxvJBdKISQUUeEgxK/7tCDXql22OwEwinDlz4//vpuDvZ3txhQFSINf29YBZHDR7guZkXfyMaAutBERU0JiYYOfOoHDrSiz3bHSP/Jr3FLPYuPBO2+sCoAkYt3xL9k+lkkIDCuqg7si0UZCYwFtokXCHbbbBeausbY7vfx8+kwmwLRG5XLp5IdQgl8ZLsyjgSgSOGI5HbBxE5AYYq72How5AAlSQO/rqQ6r3u/KJ85V2U5f+Epd+L5SfhsbDHY+nreFw9vZyIXJCojD9CKMiDt4TCN0LE6YWAJPKX5V3TfMP5LwBI5EeeGyF386kD/imSFmteutP58AcuaXYxuSzvWh+iWN2DSwVAxafYHqdADH9iE+4UVq1U3SUJt+8q5DaCjggcDPrW4GH952JgjaFbjW3R1WDSr2EsBlE36i4WtrlLIbi/jvy8ZfKGX8VPfZCKfIm/fF8SSSl0lN6g4/pzd1ObiZOIY6S6sY4j/y7SVlEti+lqbg7WL8YjN4m69lzwUqI5y+7cGiz3KgKad/3YYzN8YPFGmytcaoMPtSv5fNSuLbDCFmxhtuHJccg7IqKSJ/J+V8yB+f2QJ6RzMTPsxHJS6OQEu8R3ErD7PIzSIkjZHIHyYT/gr+Bf+Fgdm//v6D/sau0InMgXPF5fzwfQ4m6PVYXqeE14qFe6upBkrGu1nC62eD+FZopFATaRVz4lWlDKSz0dDmSdhruriLvg6c0Wp3D1/nCO8pU9joIjv8/Rcl4cPSKGlk2ufy/5Xd2zHEQezN3SR40HKVEXOUqKf0TQpkXtPmM57w2UQzNLb8xF5TfntA+K9DmnqrAPi9PMOPGHQ1yLGfNad3zKY+Vh5Q15eCHVcWYThBWIQxmk4b9tkIccq46Dm1D6l/fv6j/sUkaZuWLMLIPIasBfKshTuH5bgm6Kd3txtGAdDX4v/zSxypfKm3mzCC7lfRnC5yZV/5BQWXsy6vYGIxABhJO7y+5isPz+cZB8pEkpzY6qCmkz2KcIfJC0UNlJxfcJyEQ4MamJS6lCV8pvop+kRYjOMJSaPwMpDSFRAC/CR5lLwUclRcyeZBClRcu+WiU3U1Ry3End6OZRQjc389fNf6bTw5dIUjsDRDYTF3EFnYacUyjo1PVDXypVPugp3y0Gq/6UafUfAcQYySVnvL5clR+xaGrx0p3hpbmaj/yredccDpbv6/RdA2DP7+LYv3Q2xRde4Sg6zfwzt2dWk7KQtlolTeCfZNymBaAO0Noes+WmG4AyDX4HsmxNl950uZyO4QE2mq6AJxYjeNznZf9SQL6cIqu6i1m4DPa37SGbrtgna1EpF5XAdR+MBhjW4Z8gz1+tC3LljVFR3N2opONfib0nb2UGnN29uedMefraFPpC35cE6kuv5th8pU7NpfVq0B+btnLTn3Vu7qd3D4qn6XSlyYanym2X6kOeBg1ffVCs7vXjrENeOHjGV53aStVrHnUsTw2oiL+p04Z6VG7w5I3uHm6d/vNt0BOUknLNv/bH7bJiX5Hu06PQGldF/A7Va1LTqfHUgfrlmk+DVqAGbQt+C01nKFEH22By8C2JBiavyA2J+qKoOg0Ov63p7YDqCpQrhNZFnsoNX5MbAg0s/I1tBprcXZtW/+b2pTdRxz3hdgkt48zxr5VJvJf+dXsKNXI0MHyqm74aWAHUKlLdWDT1hqfVRQ4oAfcbRJMVV9UtjtpDyyTqSw/ffYCv6i1elZEiigtfDVS5Qf4Jv3xzBV+wLAO+ZAq0qoy9WW+8sIEeL+Z4OTJtftQbt6qKrVgdmbN1HfpadwOg5UKRrfW1EqdpAD157ZL2sk9GQ6i7pMi1laYrZXzGHFfnnQf+1bz+5XSf7yXNrrlNpyVq/lX0Xkm5uZ0YtiXSumtpcg2/4/6jL6weyAmDjLiBPrUo6/XCbgZiBeriuvUrp3f9K0As9a5Hq66vWL2nX36XPPrNp8cVth14O4T2qWrQ4poy9TVWP7sWaF2xTOFeAopVlcmmnUL36Z7ryux7wOsFPuP3yHLUfKq6xpM6698MWf9U2dr2D9pnjvv2P9DuZr0W9WMGfUzpt/J653iu8Xw/Va7VhfGsBtDGRfdJmkN7l8bz7bzzrCw7TxK0Heh8DbwYcoIqtxaANx/rD68VH54HGo4WHZ2zN+18DsdM+L2hH6PzMoWem/5p9pXfIco0on/Yj2FZuU7pt135BJLUQNkgSQ1q3xxJLRiPX4Gkz6BGESPUqLJBvgQ1p0IIR3XrKxDyGTQI1N6ggaj2n40G1ARfLkM+gwYO9d4Xy4tPoKHlqfb3kQ1xNHwL2YB211fbIZ9BA6FfbnN8Bg2Gu0VDQ/xWaIBr48vQAL30oa0j5UapKmD5A6W2yBhXhx30TJ6gvc7Vb+S6IVhwjW3G6yFcow0RQ8O4M+tdu9E3wfNow/cUTn1wLeq0eLRhkpTEdoU/SSutEVH1Wp2ZY3XRebjy0bdhX99QWHrtXINvdD1CHLjPQQOuVfSBEAt4LcE16uI4VcfdJ2+xaQG0DPgYa+0wBZ9IOfaTtIe9NVbh71ugXtsyHc4Gz8WO4XbVI/cjoO4MuU5/VVBfOQO4xvHFrgGbKMXiWO2MR4ueHH1/S8V1W2cpXEY8sp8PcdhP4zCO/RNyOCtHvWfUlSFHxThHE1LzvBx9rHgRR1FKRhwFT8c9IUd37f81h/vXj2I/JtHDv0G+3AD3bDFQnx6X0AY2Iqg8BJohV9uM5jGuvvYmraVJRqUOq/vWHciZ6xdzrl/KuX6PyjUJrhFfPON3DF89InHNZ3U2GLdRNvnmjfUhNCtpaE76B59Ec2b0BmnoTVpSp5BHTWIyit2POhPKriLNGLYF9LrfEzBm1pl1nvv1HpNHNaupt4GrNfzZatPRJq4mbG2CW6XzNJp0b1CmQT3XVb8LNk2f1f3I9UDGmePH8RPKXr2xovpQ0nZ/iOJs7CRmA4HsWqpt14Pfrg40xnjdnv1zOHayw8sDdkO9Glk04V/DzlMnuHPcV1PoTO4sDHDif6fZwli9jKLgm7hwyjTgdmHVy+42/hMEho9Yx/b/FhhWA7pSgzaYf1RUHdPTZJMHUE+3YG5I2gOIK6dGqGP4UA+Ic+rRuKM+Vnlzor6azhQM+irpwKDbHUiNANzQlQaDF0Qpge9IqmMJ6kMNBFAbDPMhDwPZ1wCgmtyW1HotvV3ywbYcERRu6iaGpl1VB0o4JgjaoUsDCg4c58PwXlEZBHHQwPC4SIMWiBZjBuLOMUA0/iM3sPUiDlMVA8IsVG0Fqj20P+QuBK0V1Ex2fmxw0HVzheINxCFpJu87xoIG7aC5G5I/YKhbf1DQz+QO9EWAvoQOiC0m7xNw+SQo549wo8Q/KkwrqPXDvQkdROMA9wHzaUI+4eTGe/SlTu4B1Gowtt9G7ZB8EWpPilBVpvw7COWp/0UIPTEaW4H68DYaQ65+PzS+J0OBq9J3RON6FL2BxlYA2rGYaOSg91+JxoQT02DfPhz8qW2DP6mo3ISteDUhLxLODR86N+3QudE/5pongi8eQ/vhoJC3DQqlIm8T+tiTCMmgjM4cxU0fPhaUoSGGD4eL7E24KBVNWzeSSe833NsNldet/oh7+yYyEkGkNGRvkZHUJGdHxm5wKU2W5oeM9IDszf1ocNPaUnvC/oZW3GMrVjR4HPWgFSGWUcqntJiFcVoitZPUrq76dT7oPs9GGIqjk6tosQHXeX7hYlQK/wZpjShrAuc6TyC9bxCRFqI3DZEs2AIeRJJKJwl/aTJNRVY4Ciw/OQreCClloLCmAxZPQ+GMY0o5ckzt68NPjqk/L5Qkt/4PQ0mSkNifnJKnTRQuy2Q/jvTW7r+jw0hH7Kr7CSP9hJF+wkg/YaSfMNJPGOknjPQd0fgTRiogGn/CSD9hpJ8w0k8Y6SeM9BNG+gkj/YSRLiu7kSQ8S2F/TVKesaTP58R+LxUe23geYAagYmdJ2897R6qltIQBeSW+eyP3dZiaIYWXZhjnQz7Ord5fHCaIYGkhYldh7gjkIuP87+7YHvnhO5ukhphJS0R2DUavA2TK3p3dStYtwTom0/m4O9q97a5Jg/fFsC3s5miwBNb/Dd0x7YmV+j7i5u81VPD2Gi07t+1Jn3EV77OM7vGby3l3svgNlUbVTwabB9zpvL/79fjrvU349O8EXQkiY01PgtmLo2spRt2+vZiNumvK2pORHfvw79G0u4w36FDWhlPkDzGOzmyYKfdH/lmRSUk8LtGCIGxyBJw604KQljTpc6cMZM7zEfKB2t5qVlhxu5cA++tSyAuZEoJz70M/U9aMKYqfJXabP1HKDLFS3kvwGcuiEc91w6WQtHSaw7IO50n67OkcaVmy4PG7OZDSCjOQfPs0NbFcKic76KuSOI4qMha/Yogdk+l7m/J1TdB+d/GySUUTYwiW33XRdJiwEsIJGzZFh/ilJZqXBpW+uCHmN0kXSxJJ2PeydB+dKJbs5k0TynsD/UQJv/eaLIjvtIxLkFMUohQ/p0oSLhyRMem9pMP7CNtNQ7yD16KA75Jfqy/8t4sgkdtD0LFgfLtioXosND+ChJ98Tuv1Fi/L3rUUaJMXNqt193A77d/cu5pdeYW3hObEDJrjqt/xKx5GcJrCSzT7FVuHoYi4c7o/fvRNMnrtYaTsQfQSeZxcRa5Zqkx9RV4/H85osWhYuE7i1lZFQ19Y2whWVKLIXhgpe7jiBs9XoyjW2SRqYPqxd/87EDazL9jSkUF+udr4lu/ftMrb2RecDXr8b4+MVtrk9rX/JA01gRsrwsuL5tesO6QOi8uJUVmlSZJtwlUdLPI/7sw6wqPfwai0I25rie5PcC5KneJ+7jvbcAbXjbJSh/vrOSo2//RQc0O61MZK0HA1zo3RKllWs25t6lF5SfGaRveFafjb349298bVVSeMEsZ7OYm1pwLYWUcJX0Ymv9sXoP8kjHLetnA2tEWqi971o9O/Bn6PEs9iXdePYzPYKUvpb2ulyqZPdUqUuuFQvVZu+TWv6TRE1an5ar3GNZ2WQJ1GoNo1v+lQnwYGYAXLAcPy0Kd+LYDnCXUUAegUv17TjFpqXXRxNyx1qEVtMYDfhAYKlnNqgCtrDAvxqsmGS53WbvluPZvyJttvaREqKxz2A+p3NV0JlDoXb2e8PUvqi57qKJg9LOKfDPxdQVsCFd6Bd+P9itNhmWjTzt+buurcwXea+sHv7LSrqVPoF65fMsUYT5ZxGu7XteHRMk7Hph7j7xazcuKdZJtTebXTLjm9jQnextq7y6t4m1LoWAp/L9ZR/VuZ/qq4z0ThVUd9wRVW8OzOOEK870sHhniQAAbxZmadZ2sIoDbeHLtTRaiRDuaikU1PvTa4MA9c/E0Yb7xJ2vuzVuTegbE1gnayyHd8HpnWRUETNhFzeE4ddp7ariakrBTAvHzBYj03BPS3RcJy9Dm4dzuU8dr4xe88GVUF5ytTaBDOplhiE/gAf3MUZ0xi8k2R3ddOgk4gY5ze9chtPjNZALLx0e/VLceUO0PMb6IGDa7jtGYJeQKy4zEA2Z+cLdn2kTD6xFcMuNCu8nYmTx0ayCPAStocPKOdH84eQV+EJtBCkynB2YjBNVs7UOre3I46oNsUjrrPwdVvw2kTShocYCmI5rQAl/AuyAsdxv0Z6KHJDUkd3zrv04OKVB8SY0zfp4djwLsGzkJhn7LSQwJ5iGslcfarGPQIV2uKqjzEmTGRrZoJZlOc+QI7B6yZq9/0V9VWdSMw9JaHKznX86wwvgyQP0Aflr+kkpQXuxp5rXXVRfe5tmRzZvD1/vPt4p+66qrjFmZaEKh+P1brwx3LAinZf1ZH++sLtr1kFKonJcJyO8cYwAjQO7axLduj3nqWF5BhhGtUHUPKSBGfMolsCXBPOAdFtnLhXYpsZe97FAH+mivQqQFgJCtFPIprLVmG0NZZMKI6nTGuFjKOoIg6NnCti/M+RiiumpaRIg0+M0UCC/EFWsfyCjRqUBY4FujUVvZRg991hh5QszijBjQuWGoBlWjQymiFwLUvipghWJNbKdaZ+U0tM4r9AmsCNIZuZKYJWHWYgZnAuDsPTYIXkA8NQdPfp4l23WGa9QhrldceRLTygdfDrDTxVKAn0EOi+plokg9OeK0ugtcC0gSzxmWjiYg0ocHQBQ/wTDjZSM33cRJZdO/TxAPvDvrZ9sOM41loAhK2jp6sQs4nTza69V2aGKSDK2TJ+zQB3KPHyzzgrDLW9EFWg0dtFUfjADLA+tQBUY5CMlsljrICW42HUUfOY5WAneapwfAIq2Qjh9/VwSpGd+QaDzo4K0UEkEEY1ZHUoF0g2QqcrosoG0FGZpUjJtC2gSjj1YeCREechss8etw1ljk6wqJ9POhtV6ufIRoQ2RhHRAOisfV+NMAImo7Ca3qDp9npwTWhnEVMixI9A48GT+tQ5fYHJGoDvRqXyoZYHIlKg8ZK0w2QIEPuQ7EAkDyArgJ5vqh1oV6OBrXsETSMBQRDAbR2kewyjp0vA/Y7/GS1313cvYu0pOfSu7nY75iVVfQp6hkns08jgS0SaLoiFSfqrOA+Jx5PnMkehTdBR7U8oGNxpAjIRgvkahu8eSuzpgFkgKbBeYkzxc9yirEa4M0YItjvfvYY63CFWkbTqVQY/y5guTp0nMtjZydknKtpg6ZpuRrLHV0Yiui1lSZTTtOHmTGC74J/KAG+ihMDwLPhgCIWRkmzUwRsO1U3CdVbZ6FIXvYZIF8HG97B+ZasFMFd5m12Ul9hMOKrQRvexXM4MkcSOWqLHnqJqnOmSGI+0VXM9OCCt4b5D7JaZ4StuwEpRPVCWawYNQYJO+Q1PbMVL4LFip6RCxZegaJn0K8HUQQZuT5RIxNNkJ5EdWocjrsCzWAR4DXQw+QxY0QWSx56wOzWs42bfDAisWwZQQuzmGVdfyaAnOZwx/85PL1cImi4ztoxwHttsbOLsnl6La7pgC2DMqgoEWfd8JDHqmOKH6CH2HSYPC4OPRz05mswVmpe5ogzy6xSA2zViFqU9YlBg0CfiCbXslurjokrizDvY1Ag+x0zUjkWIMTKHHHW2OqzFrSLniVWlM/KIsr0pkXW56tmxAhm4DRxBc5ZMJILRXAdAFhlQ1/NaIGA5YHzXWD1W2dZ4ZyfRG2AdQH0yBxbpaBxhwQzvp5Dom7mGN6nxybmeMSKb7QiAKctklXjqrq1UjFPrWPwxYmsqpgfKwBpndnXRXsM+hEYwVlmInKZ8W6DDYGrIRrcB1YAiJiXjOIKgILYYxruc8JZGTaXkDWiyla8urjnpECjBXvlUIwTZ7Y/8F2gKGarLE78MKAy2yGBe+w+MA+Bs+UUtPV5KJLP7B2bSwhqGDnPbJHhOnFVrkkq7uIrzqhhGQMVCerN7MdQZ4jUBPmtFMdGZXOSuHsWepd5nTdG3WHEcJqetlvzu0bKWA5aGI+oqc2sNOHwXUCJhGtMCkQTl/kygP4PrJ3hgSaA95p7tghzPjTxNYYTi61fzxhRDXc36rgvoBgWGtXRnzGQx0J2fxct+DZo8LZUnDW8mJkArU4zcwRRdShYrCYHWug8uxNz0TRs1lo3cadhZhseV61SuYEjrTgRMz/UvRSzI2Tfr4mWjNzA9WZniqrmszMC13rgqgglK0ZwbhckbAtGzbBI890uWhSaboK2oFk1rwDvulQ2SVHiALiiGeeacJdy9p0AQ8wbI8K7fmFWrDqYmb+GsQ0/87ydo3AYZcY1WcXRuixvBHitJp/dChl6zAoBzU0LE0fE3VFD8Hdp1igRzl4ILH+RXqg1VT7z7tDLy7wDnnmGmG9CTM9P9H13RoDuxNkV8oEd8J7G9BPuzjqT3s1nt7eAu0VY3qrMOyNgbOIJTXrDO98OmnyiALhbRAVeg02RkSa43gbGqH6eXXj5yFbcaalgdNXLvMZMZ6t/MYdNcda9sxUAOHOFVnj2OQkLKYJnbxbHw8N1qitNruH6yuxzEnprhXnBMD9JgSgSsJPYMFNgdp/XwXnvGniSwyLtBGBzmxb4rZmzAfh4fi7FHHpOuzhyhFGErRgLakHm/Ajo8TptCUZOgTQvxRMScZezpGbWvA0/XAmNiDzTqvd8IiN4vhdgVsG5msx7V/EkRsCYpOm0QDhRJBXnnzAjbua5q3COBzxozJpWIJqwKJgEUtLXMufjMZjVCvrmPOuscprxRZ3BRk3WOXARfCJAFuqqQs1uSuE+K5OEnM8WYwUphGfXS2pQpKx4DRckpAC+HvnIniI1pAngpEgzvg0X83iDhHQ1OXMsHrMleLiH5hyxxXxi8RbGnjlNB7pkX+HMNzFjE2YoKUwsvsb2fYCFBpZJ5hzOmOOb13RDOAc98lnh3BLYCbE65bPTo+Vjxifw9rizZLHKZQ8NhXcxb3FLyL7HysQ9ViA/DHKOuZp8xosCfar50BZey04PwEcLdC5YukWZuwqQHkPwXgH3mekx5DGfPpVbbnHWwDeQxz7LlJhZfjSAHiCHZeMs+iW/rIANsIZa2ed6WRY8XLva8IozXnCFleGqekvMnK2JzX2D7YFnmZyBHvnoW4udyA79/MCOs4bIdpzJ7bPol3zwQcEeMzBb6gf20DRwHQFPZUMqzB5eB893siTqDIPs46URhPt/Lb849rpBwrwGw8xzMzh/p8q4dsYsTmSIYCSDskxemeOpAX0QeZBAuLOiQB6/ibl0XYp5ibN7MJhhAhBSez8rQuLvHfzs3ts79e2O4eqKnRi3Pk0rgakEBSuAb6df3zlbEDP2szwY2/MCoSd1UVwjI7RHsVdsDa+RPOOOjfU2Q/Ybp/lVmoJKoC94XuNpDmAuk0uuGvtX3j2usyptjjWPH8ZMSpvi+KmdFfEk5zGXPn/+6e5pp58+yJRIwmVllzTlak4HmUpV/rIixliyy5EyB6SP8av0sVNNS7yQqJfP7RTT8h/IT3Ip7J47TvjSJSfmxlPuUpAO8VTkypfl+CD84Em1JZ6/lJLn3lYuOSFWdSk3LldSuHyaU9RF/E+qL+1Rf3BRvvIuyvK68MMHpR9/APp8AA3s9lhVeMTt+kRpqFe6upBQDHdXy+kiCc5Pnn9evRQT8JQql/uCuJzXIejVXJn5Wmz2lTgxyT4hUqxfcIJ9VO9b8pYd/bw5ANx9sZeDh1nXxLvuvDvbP8R+8+wp6COlwLuaAm+Bu5RICpGEExCJ//OUEhhbX2VklAThK4wMKXH6e35Ghki+H+iF6r69kSfkhT8S8kICI3wpR8wL5E0j7MM43xP4WF3cpKvkBnwxV1U9+nd+KmU9D5n3h+lqgb8si7sILHEpuigNgyfR1lK+/LuQGmAzL1azE1rNfyYneSnJSRCw+RnN8Od8ihTfDlxQIi902h/gE/8D</diagram></mxfile>
|
2108.12296/main_diagram/main_diagram.pdf
ADDED
|
Binary file (58.8 kB). View file
|
|
|
2108.12296/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep learning has shown tremendous success in domains where large annotated datasets are readily available such as vision, text, speech via supervised learning. Implicitly learned by these models is an intermediate representation that lends itself useful for downstream tasks. Unfortunately, in many settings such as healthcare, large annotated datasets are not readily available to enable learning such valuable representations. As a result, there has been a push towards learning these in an unsupervised or semi-supervised manner as unannotated data on the other hand may be readily available for free and a lot of it in many cases. Recent literature has shown significant progress towards learning these useful representations without human-annotated data, closing the gap between supervised and unsupervised learning, and in some cases demonstrating superior transfer learning properties compared to its supervised counterpart [@he2020momentum; @chen2020simple].
|
| 4 |
+
|
| 5 |
+
Self-supervised methods have emerged as a promising approach to achieving appealing results in various applications without requiring labeled examples. This is typically done via pretext tasks closely related to the downstream tasks of interest and typically differs from domain to domain. For example, in the image domain colorization [@zhang2016colorful], jigsaw puzzle[@sajjadi2016regularization], rotation prediction [@gidaris2018unsupervised] have been previously presented as pretext tasks useful for learning such representations. Similarly, in the text domain, commonly used pretext tasks, such as predicting masked words and context words, have been widely used [@mikolov2013distributed; @devlin2018bert]. More recently, contrastive learning methods introduced leverage domain specific transformations to create multiple semantically similar examples such as random cropping or flipping for images that preserve the semantic meaning and encourage the network to be invariant to such transformations achieving great success. Such pretext tasks and transformations cannot be readily applied that do not have the same structural information, as an example tabular data[^1].
|
| 6 |
+
|
| 7 |
+
It is not clear how to generate new semantically similar examples for tabular data. Moreover, in many settings, tabular data contains both categorical and continuous features which require different treatment. In this work, we focus on tabular data settings that contain a small set of annotated samples and a relatively sizeable unlabeled set of samples. Specifically, we propose a framework for improving downstream task performance in this semi-supervised setting. Our method consists of a semi-self-supervised pretraining step where a feature reconstruction pretext task and a supervised contrastive loss term are used. Various forms of Mixup augmentation [@zhang2017mixup] has been used in the image domain, where new examples are created by taking convex combinations of pairs of examples. This may lead to low probable samples in the dataspace for tabular data. Instead, we leverage the manifold assumption [^2] and mix samples in the latent space to create multiple views for our contrastive loss term. The unlabeled subset is further leveraged by pre-training the encoder and using label propagation [@iscen2019label] to generate pseudo-labels for the unlabeled samples. Subsequently, the trained encoder and samples, for which we have generated pseudo-labels for, are transferred to a downstream task where a simple predictor with Mixup [@zhang2017mixup] augmentation is trained. We show that our proposed framework leads to improvements on various tabular datasets, such as UCI and Genomics (UK Biobank).
|
| 8 |
+
|
| 9 |
+
# Method
|
| 10 |
+
|
| 11 |
+
To present our method we formulate the self-supervised and semi-supervised problem. Consider a dataset with $N$ examples: Our assumption is that there is a small subset of this dataset for which labels are available: $\mathcal{D}_L = \{ (x_i, y_i)\}_{i=1}^{N_L}$, and the rest of the dataset is unlabeled: $\mathcal{D}_U = \{ (x_i)\}_{i=1}^{N_U}$ where $x_i$ are observations sampled from a data-generating distribution $p(x)$ and $y_i \in \{0, 1, \cdots, c\}$ is a discrete label set. We consider settings where the majority of the data is unlabeled i.e. $|\mathcal{D}_L| \ll |\mathcal{D}_U|$. In supervised learning a classifier $f : \mathcal{X} \rightarrow \mathcal{Y} \in \mathcal{F}$ is a function learned by an ML algorithm which aims at optimizing $f$ for a given loss function $l_A(\cdot)$ i.e. $f = \min_{f\in \mathcal{F}}\sum_{i=1}^N{l_A(f(x_i),y_i)}$. In this limited labeled data regime a supervised model is most likely to overfit, hence we propose to use the unlabeled samples to improve the models generalization.
|
| 12 |
+
|
| 13 |
+
Self-supervised methods leverage unlabeled data to learn useful representations for downstream prediction tasks. Many techniques have been proposed for images where useful visual representations are learned through pre-text tasks such as in-painting, rotation, jig-saw [@noroozi2016unsupervised; @pathak2016context; @gidaris2018unsupervised], and more recently, the gap between supervised and unsupervised models have drastically been reduced through contrastive representation learning method [@he2020momentum; @chen2020simple]. Generally, in contrastive representation, learning a batch of $N$ samples is augmented through an augmentation function $\text{Aug}(.)$ to create a multi-viewed batch with $2N$ pairs, $\{\tilde{x_i}, \tilde{y_i}\}_{i=1\cdots2N}$ where $\tilde{x}_{2k}$ and $\tilde{x}_{2k-1}$ are two random augmentations of the same sample $x_k$ for $k = \{1, \cdots, N\}$. The samples are fed to an encoder $e: x \rightarrow z$ which takes a sample $x \in \mathcal{X}$, to obtain a latent representation $z = e(x)$. Typically when defining a pre-text task, a predictive model is trained jointly to minimize a self-supervised loss function $l_{ss}$.
|
| 14 |
+
|
| 15 |
+
$$\begin{equation}
|
| 16 |
+
\min\limits_{e, h} \mathbb{E}_{(x, \tilde{y}) \sim P(X, \tilde{Y})} \big[l(\tilde{y},h \circ e (x)]
|
| 17 |
+
\end{equation}$$
|
| 18 |
+
|
| 19 |
+
where $h$ maps $z$ to an embedding space $h: z \rightarrow v$. Within a mutliviewed batch, $i \in \mathcal{I} = \{1, \cdots 2N\}$ the self supervised loss is defined as
|
| 20 |
+
|
| 21 |
+
$$\begin{equation}
|
| 22 |
+
\label{eqn:closs}
|
| 23 |
+
l = \sum_{i\in \mathcal{I}} - \text{log} \Big(\frac{\text{exp}(\text{sim}(v_i, v_{j(i)})/\tau)}{\sum_{n \in \mathcal{I} \backslash \{i\}}{\text{exp}(\text{sim}(v_i, v_{n})/\tau)}}\Big)
|
| 24 |
+
\end{equation}$$
|
| 25 |
+
|
| 26 |
+
Here, $\text{sim}(\cdot , \cdot) \in \Re^+$ is a similarity function (e.g. dot product or cosine similarity), $\tau \in \Re^+$ is a scalar temperature parameter, $i$ is the *anchor*, $\mathcal{A}(i)$ is the *positive(s)* and $\mathcal{I} \backslash \{i\}$ are the *negatives*. The positive and negative samples refer to samples that are semantically similar and dissimilar respectively. Intuitively, the objective of this function is to bring the positives and the anchor closer in the embedding space $v$ than the anchor and the negatives, i.e. $\text{sim}(v^{a}, v^{+}) > \text{sim}(v^{a}, v^{-})$, where $v^a$ is the anchor and $v^+$, $v^-$ are the positive and negative respectively.
|
| 27 |
+
|
| 28 |
+
In semi-supervised learning (SSL), the dataset is comprised of two disjoint sets $D_L$. $D_U$, where predictive model $f$ is optimized to minimize a supervised loss, jointly with an unsupervised loss. In other words: $$\begin{equation}
|
| 29 |
+
\min\limits_{f} \mathbb{E}_{(x,y) \sim P(X, Y)}\big[l(y, f(x))\big] + \beta \mathbb{E}_{(x,y_{ps}) \sim P(X, Y_{ps})}\big[l_u(y_{ps}, f(x))\big]
|
| 30 |
+
\end{equation}$$
|
| 31 |
+
|
| 32 |
+
The first term is estimated over the small labeled subset $\mathcal{D}_U$, and the second unsupervised loss is estimated over the more significant unlabeled subset. The unsupervised loss function $l_u$ is defined to help the downstream prediction task, such as consistency loss training [@noroozi2016unsupervised; @tarvainen2017mean], or in our case, a supervised objective on pseudo-labeled samples [@Lee_pseudo-label:the].
|
| 33 |
+
|
| 34 |
+
This section describes our proposed method Contrative Mixup, a semi-supervised method for multi-modal tabular data where structural (spatial or sequential) data augmentations are not readily available. To this end, we first propose our semi-supervised training to learn an encoder and subsequently propose to train a classifier using the pre-trained encoder and pseudo-labels.
|
| 35 |
+
|
| 36 |
+
{#fig:overview}
|
| 37 |
+
|
| 38 |
+
We make use of the manifold assumption where high dimensional data roughly lie on a low dimensional manifold and then leverage Mixup [@zhang2017mixup] based data interpolation for creating positive and negative samples. By doing so we mitigate creating low-probable samples in the original data space.
|
| 39 |
+
|
| 40 |
+
In our setting we represent the mutli-modal tabular data rows $x_i$ as a concatenation of discrete $D = [D_1, \cdots, D_{|D|}]$ and continuous features $\mathcal{C} = [C_1, \cdots, C_{|\mathcal{C}|}]$. The raw features $x_i \in \Re^d$ are fed through an embedding layer $E: x \rightarrow \bar{x}$ that results in a feature vector $\bar{x} \in \Re^{|C| + \sum_i^{|D|}{d_{|\mathcal{D}_i|}}}$, that is a concatenation of the continuous features $\mathcal{C}$ and embedded discrete features $\mathcal{D}$, where $d_{|D_i|}$ is the embedding dimension for each discrete feature $\mathcal{D}_i$. The embedded features are fed to an encoder $z = e(\bar{x})$, and subsequently fed to a feature estimation pre-text task, as well as a semi-supervised contastive loss term shown in Figure [1](#fig:overview){reference-type="ref" reference="fig:overview"}.
|
| 41 |
+
|
| 42 |
+
In the tabular domain, data augmentation commonly used in the image domain cannot be used. Instead, we propose to interpolate between samples of the same class to create positive examples and use a supervised contrastive loss term in the latent space. Given a batch of labeled examples $\mathcal{D_{B}} = \{x_k, y_k\}^K_{k=1}$, we create a new labeled sample $(\hat{x}, \hat{y})$ by interpolating within the same labeled pair of examples $$\begin{equation}
|
| 43 |
+
\hat{x} = \lambda x_1 + (1 - \lambda) x_2
|
| 44 |
+
\end{equation}$$
|
| 45 |
+
|
| 46 |
+
where $\lambda$ is a scalar sampled from a random uniform $\lambda \sim \mathcal{U}(0, \alpha)$ with $\alpha \in [0, 0.5]$. The newly generated sample $\hat{x}$ will be $\lambda$ close to $x_1$ and $1-\lambda$ to $x_2$ with the same label as $x_1$ and $x_2$, i.e. $y_1=y_2=\hat{y}$. As opposed to randomly interpolating between samples and enforcing closeness between samples of different labels, we encourage samples of the same label to lie close to one another in the latent space.
|
| 47 |
+
|
| 48 |
+
Applying Mixup in the input space for tabular data may lead to low probable samples due to the multi-modality of the data and presence of categorical columns. Instead, we map samples to the hidden space and interpolate there. More concretely, given an encoder $e$, that is comprised of $T$ layers $f_t$, for $t \in \{1, \cdots T\}$. The samples are fed through to an intermediate representation $h_t$ at layer $t$. This layer contains a more abstract representations of the input samples $x_1$ and $x_2$. The samples are interpolated within this intermediate layer as $$\begin{equation}
|
| 49 |
+
\tilde{h}^{t}_{12} = \lambda h^{t}_{1} + (1 - \lambda) h^{t}_{2}
|
| 50 |
+
\end{equation}$$ where $h^{t}_i$ is obtained by feeding samples $\bar{x}_i$ through the encoder until layer $t$. Subsequently, the newly generated samples $\tilde{h}^t_{i'i}$ as well as the original samples $h^{t}_i$ are fed through the rest of the encoder layers $t, \cdots, T$ to obtain the latent representation $z$. In this space we distinguish between $z_l$ and $z_u$, which are the latent representation of labeled and unlabeled samples respectively in. Note that initially we only consider the labeled portion for the contrastive term, i.e. $(z_l, y_l)$ in Figure. [1](#fig:overview){reference-type="ref" reference="fig:overview"}. We define the contrastive loss term to encourage samples created from pairs of the same class to have high similarity. It is common practice to introduce a separate predictive model to map the latent representations to an embedding space via a projection network $h^{proj}$ where the contrastive loss term is defined. We use supervised contrastive loss [@khosla2020supervised] for the labelled set $\mathcal{D}_L$ as our augmentation views are within a class. It generalizes Eqn. [\[eqn:closs\]](#eqn:closs){reference-type="ref" reference="eqn:closs"} to an arbitrary number of positive samples, due to the presence of labels and examples belonging to the same class are encouraged to have high similarity, making the loss term more sample efficient. $$\begin{equation}
|
| 51 |
+
\label{eqn:supervised-contrastive-loss}
|
| 52 |
+
l^{sup}_{\tau} = \sum_{i\in \mathcal{I}} \frac{-1 }{P(i)} \sum_{p \in P(i)} \text{log} \Big(\frac{\text{exp}(\text{sim}(h^{proj}_i, h^{proj}_{p})/\tau)}{\sum_{n \in Ne(i)} \text{exp}(\text{sim}(h^{proj}_i, h^{proj}_{n})/\tau)}\Big)
|
| 53 |
+
\end{equation}$$ In the above, $P(i) = \{ p | p \in \mathcal{A}(i), y_i = \tilde{y}_p \}$ is the set of indices of positives with the same label as example $i$, $|P(i)|$ is its cardinality, and $Ne(i) = \{ n | n \in \mathcal{I}, y_i \neq y_n\}$. This objective function will encourage mixed-uped labeled samples and anchors of the same sample to be close leading to a better cluster-able representation. In addition to the above loss term the encoder is trained to minimize the feature reconstruction loss via a decoder $f_\theta(\cdot)$ $$\begin{equation}
|
| 54 |
+
\label{eqn:reconstruction}
|
| 55 |
+
l_r(x_i) = \frac{|C|}{d}\sum_c^{|C|} || f_\theta \circ e_\phi(x_i)^c - x_i^c||_2^2 + \frac{|D|}{d}{\sum_j^{|D|}\sum_o^{d_{D_j}}{ \mathbf{1}[x_i^d = o]\log(f_\theta \circ e_\phi(x_i)^o)}}
|
| 56 |
+
\end{equation}$$
|
| 57 |
+
|
| 58 |
+
The semi-self supervised objective function can then be written as $$\begin{equation}
|
| 59 |
+
L = \mathbb{E}_{(x,y) \sim \mathcal{D}_L}\big[l_{\tau}^{sup}(y, f(x))\big] + \beta \mathbb{E}_{x \sim \mathcal{D}_U \cup \mathcal{D}_L}\big[l_r(x)\big]
|
| 60 |
+
\end{equation}$$ The encoder is trained using this loss term over $K$ epochs, to warm-start the representations in the latent space prior to pseudo-labeling and leveraging the unlabeled samples.
|
| 61 |
+
|
| 62 |
+
Thus far, we have only used the labelled set $\mathcal{D}_L$ in the contrastive loss term $l^{sup}_\tau$. To make use of the unlabeled set using $\mathcal{D}_U$ we proposed to use label propagation [@iscen2019label; @zhou2004learning] after $K$ epochs of training with the supervised contrastive loss term $L^{sup}$. Given the encoder trained on $\mathcal{D}_{L}$ for $K$ epochs, we map the small labelled set $\mathcal{D}_L$, and a subset of the unlabeled set $S_U \subset \mathcal{D}_U$ to the latent space $z$ and construct an affinity matrix $G$ $$\begin{equation}
|
| 63 |
+
g_{ij} := \begin{cases}
|
| 64 |
+
\text{sim}(z_i, z_j)~ &\text{if}~ i \neq j ~ \text{and} ~z_j \in \text{NN}_k(i) \\
|
| 65 |
+
0 & \text{otherwise}
|
| 66 |
+
\end{cases}
|
| 67 |
+
\end{equation}$$ where $\text{NN}_k(i)$ is the $k$ nearest neighbor of sample $z_i$ and $\text{sim}(\cdot, \cdot) \Re^{+}$ is a similarity measure, e.g. $z_i^Tz_j$. We then obtain pseudolabels for our unlabeled samples by computing the diffusion matrix $C$ and setting $\tilde{y}_i :=\text{arg}\max\limits_j c_{ij}$, where
|
| 68 |
+
|
| 69 |
+
$$(I - \alpha \mathcal{A}) C = Y$$ Similar to [@iscen2019label; @zhu2005semi] we use conjugate method to solve linear equations to obtain $C$ to enable efficient computation of the pseudo-labels. Here $\mathcal{A} = D^{-1/2}WD^{-1/2}$ is the adjacency matrix, $W = G^T + G$ and $D := \text{diag}(W1_n)$ is the degree matrix. Once we've obtained the pseudo-labels for the unlabeled subset $S_U$, we train the encoder with unlabeled samples treating the generated labels as ground truth $$\begin{equation}
|
| 70 |
+
L = \mathbb{E}_{(x,y) \sim \mathcal{D}_L}\big[l^{sup}(y, f(x))\big] + \gamma \mathbb{E}_{(x,y_{ps}) \sim S_U)}\big[l^{sup}(y_{ps}, f(x))\big] + \beta \mathbb{E}_{x \sim \mathcal{D}_U}\big[l_r(x)\big]
|
| 71 |
+
\end{equation}$$
|
| 72 |
+
|
| 73 |
+
The pseudo-labels are updated every $f$ epoch of training with the above loss term.
|
| 74 |
+
|
| 75 |
+
Following the semi-supervised pre-training, the encoder is transferred to the downstream task along with the generated pseudo-labels to train the predictor on the downstream task. We leverage Mixup augmentation [@zhang2017mixup] in the latent space and feed samples to a set of fully connected layers as depicted in Figure [2](#fig:predictor){reference-type="ref" reference="fig:predictor"}.
|
| 76 |
+
|
| 77 |
+
{#fig:predictor}
|
2109.10637/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20c9a516b117cfed9e3f4a85957f6d7573b6c522a143bc6e6eea15d7e6e65941
|
| 3 |
+
size 2426757
|
2110.03753/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0556ca6a5be1100a0a4429dc9890ac7c0d4e9cfc881f5ee2139e0ab91ca63a55
|
| 3 |
+
size 1186066
|
2110.13903/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
What is a video? Typically, a video captures a dynamic visual scene using a sequence of frames. A schematic interpretation of this is a curve in 2D space, where each point can be characterized with a $(x,y)$ pair representing the spatial state. If we have a model for all $(x,y)$ pairs, then, given any $x$, we can easily find the corresponding $y$ state. Similarly, we can interpret a video as a recording of the visual world, where we can find a corresponding RGB state for every single timestamp. This leads to our main claim: *can we represent a video as a function of time?*
|
| 4 |
+
|
| 5 |
+
More formally, can we represent a video $V$ as $V = \{v_t\}^T_{t=1}$, where $v_t = f_\theta(t)$, *i.e*.., a frame at timestamp $t$, is represented as a function $f$ parameterized by $\theta$. Given their remarkable representational capacity [@hornik1989multilayer], we choose deep neural networks as the function in our work. Given these intuitions, we propose NeRV, a novel representation that represents videos as implicit functions and encodes them into neural networks. Specifically, with a fairly simple deep neural network design, NeRV can reconstruct the corresponding video frames with high quality, given the frame index. Once the video is encoded into a neural network, this network can be used as a proxy for video, where we can directly extract all video information from the representation. Therefore, unlike traditional video representations which treat videos as sequences of frames, shown in Figure [1](#fig:teaser){reference-type="ref" reference="fig:teaser"} (a), our proposed NeRV considers a video as a unified neural network with all information embedded within its architecture and parameters, shown in Figure [1](#fig:teaser){reference-type="ref" reference="fig:teaser"} (b).
|
| 6 |
+
|
| 7 |
+
<figure id="fig:teaser" data-latex-placement="t!">
|
| 8 |
+
<embed src="figures/teaser.pdf" />
|
| 9 |
+
<figcaption><strong>(a)</strong> Conventional video representation as <strong>frame sequences</strong>. <strong>(b)</strong> NeRV, representing video as <strong>neural networks</strong>, which consists of multiple convolutional layers, taking the normalized frame index as the input and output the corresponding RGB frame.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
::: {#tab:vid-rep-compare}
|
| 13 |
+
+-----------------------------------------+---------------------------+-----------------------+
|
| 14 |
+
| | Explicit (frame-based) | Implicit (unified) |
|
| 15 |
+
+:========================================+:===========:+:===========:+:=========:+:=========:+
|
| 16 |
+
| 2-3 | | | | |
|
| 17 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 18 |
+
| (*e.g*.., HEVC [@sullivan2012overview]) | | | | |
|
| 19 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 20 |
+
| (*e.g*.., DVC [@lu2019dvc]) | | | | |
|
| 21 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 22 |
+
| (*e.g*.., NeRF [@mildenhall2020nerf] | | | | |
|
| 23 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 24 |
+
| (Ours) | | | | |
|
| 25 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 26 |
+
| Encoding speed | **Fast** | Medium | Very slow | Slow |
|
| 27 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 28 |
+
| Decoding speed | Medium | Slow | Very slow | **Fast** |
|
| 29 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 30 |
+
| Compression ratio | Medium | **High** | Low | Medium |
|
| 31 |
+
+-----------------------------------------+-------------+-------------+-----------+-----------+
|
| 32 |
+
|
| 33 |
+
: Comparison of different video representations. Although explicit representations outperform implicit ones in encoding speed and compression ratio now, NeRV shows great advantage in decoding speed. And NeRV outperforms pixel-wise implicit representations in all metrics.
|
| 34 |
+
:::
|
| 35 |
+
|
| 36 |
+
As an image-wise implicit representation, NeRV shares lots of similarities with pixel-wise implicit visual representations [@sitzmann2020implicit; @tancik2020fourier] which takes spatial-temporal coordinates as inputs. The main differences between our work and image-wise implicit representation are the output space and architecture designs. Pixel-wise representations output the RGB value for each pixel, while NeRV outputs a whole image, demonstrated in Figure [2](#fig:pixel_image_rep_compare){reference-type="ref" reference="fig:pixel_image_rep_compare"}. Given a video with size of $T\times H \times W$, pixel-wise representations need to sample the video $T*H*W$ times while NeRV only need to sample $T$ times. Considering the huge pixel number, especially for high resolution videos, NeRV shows great advantage for both encoding time and decoding speed. Different output space also leads to different architecture designs, NeRV utilizes a MLP $+$ ConvNets architecture to output an image while pixel-wise representation uses a simple MLP to output the RGB value of the pixel. Sampling efficiency of NeRV also simplify the optimization problem, which leads to better reconstruction quality compared to pixel-wise representations.
|
| 37 |
+
|
| 38 |
+
We also demonstrate the flexibility of NeRV by exploring several applications it affords. Most notably, we examine the suitability of NeRV for video compression. Traditional video compression frameworks are quite involved, such as specifying key frames and inter frames, estimating the residual information, block-size the video frames, applying discrete cosine transform on the resulting image blocks and so on. Such a long pipeline makes the decoding process very complex as well. In contrast, given a neural network that encodes a video in NeRV, we can simply cast the video compression task as a model compression problem, and trivially leverage any well-established or cutting edge model compression algorithm to achieve good compression ratios. Specifically, we explore a three-step model compression pipeline: model pruning, model quantization, and weight encoding, and show the contributions of each step for the compression task. We conduct extensive experiments on popular video compression datasets, such as UVG [@mercat2020uvg], and show the applicability of model compression techniques on NeRV for video compression. We briefly compare different video representations in Table [1](#tab:vid-rep-compare){reference-type="ref" reference="tab:vid-rep-compare"} and NeRV shows great advantage in decoding speed.
|
| 39 |
+
|
| 40 |
+
Besides video compression, we also explore other applications of the NeRV representation for the video denoising task. Since NeRV is a learnt implicit function, we can demonstrate its robustness to noise and perturbations. Given a noisy video as input, NeRV generates a high-quality denoised output, without any additional operation, and even outperforms conventional denoising methods.
|
| 41 |
+
|
| 42 |
+
The contribution of this paper can be summarized into four parts:
|
| 43 |
+
|
| 44 |
+
- We propose NeRV, a novel image-wise implicit representation for videos, representating a video as a neural network, converting video encoding to model fitting and video decoding as a simple feedforward operation.
|
| 45 |
+
|
| 46 |
+
- Compared to pixel-wise implicit representation, NeRV output the whole image and shows great efficiency, improving the encoding speed by **25**$\times$ to **70**$\times$, the decoding speed by **38**$\times$ to **132**$\times$, while achieving better video quality.
|
| 47 |
+
|
| 48 |
+
- NeRV allows us to convert the video compression problem to a model compression problem, allowing us to leverage standard model compression tools and reach comparable performance with conventional video compression methods, *e.g*.., H.264 [@wiegand2003overview], and HEVC [@sullivan2012overview].
|
| 49 |
+
|
| 50 |
+
- As a general representation for videos, NeRV also shows promising results in other tasks, *e.g*.., video denoising. Without any special denoisng design, NeRV outperforms traditional hand-crafted denoising algorithms (medium filter *etc*..) and ConvNets-based denoisng methods.
|
| 51 |
+
|
| 52 |
+
# Method
|
| 53 |
+
|
| 54 |
+
<figure id="fig:pixel_image_rep_compare" data-latex-placement="t!">
|
| 55 |
+
<embed src="figures/archi.pdf" />
|
| 56 |
+
<figcaption><strong>(a) Pixel-wise</strong> implicit representation taking pixel coordinates as input and use a simple MLP to output pixel RGB value <strong>(b) NeRV: Image-wise</strong> implicit representation taking frame index as input and use a MLP <span class="math inline">+</span> ConvNets to output the whole image. <strong>(c) NeRV block</strong> architecture, upscale the feature map by <span class="math inline"><em>S</em></span> here. </figcaption>
|
| 57 |
+
</figure>
|
| 58 |
+
|
| 59 |
+
In NeRV, each video $V = \{v_t\}^T_{t=1} \in \mathbb{R}^{T\times H\times W \times 3}$ is represented by a function $f_\theta: \mathbb{R} \rightarrow \mathbb{R}^{H\times W \times 3}$, where the input is a frame index $t$ and the output is the corresponding RGB image $v_t\in \mathbb{R}^{H\times W \times 3}$. The encoding function is parameterized with a deep neural network $\theta$, $v_t = f_\theta(t)$. Therefore, video encoding is done by fitting a neural network $f_\theta$ to a given video, such that it can map each input timestamp to the corresponding RGB frame.
|
| 60 |
+
|
| 61 |
+
**Input Embedding.** Although deep neural networks can be used as universal function approximators [@hornik1989multilayer], directly training the network $f_\theta$ with input timestamp $t$ results in poor results, which is also observed by [@rahaman2019spectral; @mildenhall2020nerf]. By mapping the inputs to a high embedding space, the neural network can better fit data with high-frequency variations. Specifically, in NeRV, we use Positional Encoding [@mildenhall2020nerf; @vaswani2017attention; @tancik2020fourier] as our embedding function $$\begin{equation}
|
| 62 |
+
\Gamma(t) = \left(\sin\left(b^0\pi t\right), \cos\left(b^0\pi t\right), \dots, \sin\left(b^{l-1}\pi t\right), \cos\left(b^{l-1}\pi t\right)\right)
|
| 63 |
+
\label{equa:input-embed}
|
| 64 |
+
\end{equation}$$ where $b$ and $l$ are hyper-parameters of the networks. Given an input timestamp $t$, normalized between $(0,1]$, the output of embedding function $\Gamma(\cdot)$ is then fed to the following neural network.
|
| 65 |
+
|
| 66 |
+
**Network Architecture.** NeRV architecture is illustrated in Figure [2](#fig:pixel_image_rep_compare){reference-type="ref" reference="fig:pixel_image_rep_compare"} (b). NeRV takes the time embedding as input and outputs the corresponding RGB Frame. Leveraging MLPs to directly output all pixel values of the frames can lead to huge parameters, especially when the images resolutions are large. Therefore, we stack multiple NeRV blocks following the MLP layers so that pixels at different locations can share convolutional kernels, leading to an efficient and effective network. Inspired by the super-resolution networks, we design the NeRV block, illustrated in Figure [2](#fig:pixel_image_rep_compare){reference-type="ref" reference="fig:pixel_image_rep_compare"} (c), adopting PixelShuffle technique [@shi2016real] for upscaling method. Convolution and activation layers are also inserted to enhance the expressibilty. The detailed architecture can be found in the supplementary material.
|
| 67 |
+
|
| 68 |
+
**Loss Objective.** For NeRV, we adopt combination of L1 and SSIM loss as our loss function for network optimization, which calculates the loss over all pixel locations of the predicted image and the ground-truth image as following $$\begin{equation}
|
| 69 |
+
L = \frac{1}{T} \sum_{t=1}^{T} \alpha \left\lVert f_\theta(t) - v_t\right\rVert_1 + (1 - \alpha) (1 - \text{SSIM}(f_\theta(t), v_t))
|
| 70 |
+
\label{equa:loss}
|
| 71 |
+
\end{equation}$$ where $T$ is the frame number, $f_\theta (t) \in \mathbb{R}^{H\times W \times 3}$ the NeRV prediction, $v_t \in \mathbb{R}^{H\times W \times 3}$ the frame ground truth, $\alpha$ is hyper-parameter to balance the weight for each loss component.
|
| 72 |
+
|
| 73 |
+
<figure id="fig:pipeline" data-latex-placement="t!">
|
| 74 |
+
<embed src="figures/compress_pipeline.pdf" style="width:65.0%" />
|
| 75 |
+
<figcaption>NeRV-based video compression pipeline.</figcaption>
|
| 76 |
+
</figure>
|
| 77 |
+
|
| 78 |
+
In this section, we briefly revisit model compression techniques used for video compression with NeRV. Our model compression composes of four standard sequential steps: video overfit, model pruning, weight quantization, and weight encoding as shown in Figure [3](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}.
|
| 79 |
+
|
| 80 |
+
**Model Pruning.** Given a neural network fit on a video, we use global unstructured pruning to reduce the model size first. Based on the magnitude of weight values, we set weights below a threshold as zero, $$\begin{equation}
|
| 81 |
+
\theta_i =
|
| 82 |
+
\begin{cases}
|
| 83 |
+
\theta_i, & \text{if } \theta_i \geq \theta_q\\
|
| 84 |
+
0, & \text{otherwise,}
|
| 85 |
+
\end{cases}
|
| 86 |
+
\end{equation}$$ where $\theta_q$ is the $q$ percentile value for all parameters in $\theta$. As a normal practice, we fine-tune the model to regain the representation, after the pruning operation.
|
| 87 |
+
|
| 88 |
+
**Model Quantization.** After model pruning, we apply model quantization to all network parameters. Note that different from many recent works [@jacob2018quantization; @banner2018scalable; @faghri2020adaptive; @wang2018training] that utilize quantization during training, NeRV is only quantized post-hoc (after the training process). Given a parameter tensor $\mu$ $$\begin{equation}
|
| 89 |
+
\mu_i = \text{round}\left(\frac{\mu_i - \mu_\text{min}}{2^\text{bit}}\right) * \text{scale} + \mu_\text{min} , \quad \text{scale} = \frac{\mu_\text{max} - \mu_\text{min}}{2^\text{bit}}
|
| 90 |
+
\label{equa:quant}
|
| 91 |
+
\end{equation}$$ where 'round' is rounding value to the closest integer, 'bit' the bit length for quantized model, $\mu_\text{max}$ and $\mu_\text{min}$ the max and min value for the parameter tensor $\mu$, 'scale' the scaling factor. Through Equation [\[equa:quant\]](#equa:quant){reference-type="ref" reference="equa:quant"}, each parameter can be mapped to a 'bit' length value. The overhead to store 'scale' and $\mu_\text{min}$ can be ignored given the large parameter number of $\mu$, e.g., they account for only $0.005\%$ in a small $3\times3$ Conv with $64$ input channels and $64$ output channels ($37k$ parameters in total).
|
| 92 |
+
|
| 93 |
+
**Entropy Encoding.** Finally, we use entropy encoding to further compress the model size. By taking advantage of character frequency, entropy encoding can represent the data with a more efficient codec. Specifically, we employ Huffman Coding [@huffman1952method] after model quantization. Since Huffman Coding is lossless, it is guaranteed that a decent compression can be achieved without any impact on the reconstruction quality. Empirically, this further reduces the model size by around 10%.
|
2110.14468/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-05T22:32:38.994Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36" etag="4A4_GRzBY1ldAqEVB-4N" version="15.4.3" type="device"><diagram id="UgztAKpD84cM9E8grNCJ" name="Page-1">5ZrNjpswFIWfJstB/sWwbMjMtJWmXdCqVXduMAGV4MhxmkyfviYxSRgz6lRKiCVYRHBsDPnOxZcLTHCy3D0qviqeZCaqCQLZboJnE4RiEprfRng+CCGhB2GhyuwgwZOQln+EFYFVN2Um1p2OWspKl6uuOJd1Lea6o3Gl5LbbLZdV96grvhCOkM555arfykwXBzWi4KS/F+WiaI8MgW1Z8razFdYFz+T2TML3E5woKfVhbblLRNWwa7kc9nt4pfV4YkrU+i07fNhNY6y3X6aff3yv0/ppEVXpnR3lN6829g/bk9XPLQElN3UmmkHABE+3RalFuuLzpnVrLDdaoZeV2YJmVUnNdSlrs3kXN/3zsqoSWUm1HwyHHIA8N/paK/lLnLVgBkCSNHvIWp/p+X4xuj1TobTYvYoAHsGagBRyKbR6Nl3sDji20dgGY2i92Z6spVHAbIgWZ8a2PbmNp8Vx8BNys2Kp/4cDaNQOUHJ7B/CoHQjB7R1op8iRWkAiDywYdyYgyAMLhk4FOeAY8D4LpjPjAXAtAPvlOhZgH5Lx0LlAUEj2QF0LEOi14JJXAaHkyNcnF8ioXKAQBx6aQMdlAkMB8tCFcFQuhBj6ZwEblwVR7GVOiIa+OeJZnNE+F7ggEKPL0GYk8nHujweGDUDE+0MeAMqufhvkY/7Fg9fEgMeQ9XmAZiy8tgcQeJl/8eBl8W1dCP3Lv3josvi2FiDoZf7Fgz8lva0LzMe8jIcujG9rAsbMy5wwdGUMf3IoUP/dUXj/7mHYR6XMg0el2C2LU82VdnwwY5WrtWjYFXzViPNKbrJ/e3IRcPgFONbOKWfowh5u7Tvyy3Nza9mv9ZrnwmiPklde8WMUBLCLsPfyp0EPQqOG+FoU3Vo09ZRhX2kTAOhCJK9BRNeC6NaYHzf1fD8P+kQQR3EA4xcMURAhhyFDQRsZHYjXmgSJWyHaMEykUmVmkoFPIGH7ltNSRLF7LcO26h0qColb3x0nRC8pUvQGimRoim6J9klstGomQz8xEnOxRlGHJO3JLRckaTZP33rt284+mMP3fwE=</diagram></mxfile>
|
2110.14468/main_diagram/main_diagram.pdf
ADDED
|
Binary file (9.35 kB). View file
|
|
|
2110.14468/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
**Reinforcement Learning (RL).** In RL, an agent sequentially selects actions to maximise its expected returns. The underlying problem is typically formalised as an MDP $\left\langle \mathcal{S},\mathcal{A},P,R,\gamma\right\rangle$ where $\mathcal{S}\subset \mathbb{R}^p$ is the set of states, $\mathcal{A}\subset \mathbb{R}^k$ is the set of actions, $P:\mathcal{S} \times \mathcal{A} \times \mathcal{S} \rightarrow [0, 1]$ is a transition probability function describing the system's dynamics, $R: \mathcal{S} \times \mathcal{A} \rightarrow \mathbb{R}$ is the reward function measuring the agent's performance and the factor $\gamma \in [0, 1)$ specifies the degree to which the agent's rewards are discounted over time [@sutton2018reinforcement]. At time $t\in 0,1,\ldots,$ the system is in state $s_{t} \in \mathcal{S}$ and the agent must choose an action $a_{t} \in \mathcal{A}$ which transitions the system to a new state $s_{t+1} \sim P(\cdot|s_{t}, a_{t})$ and produces a reward $R(s_t, a_t)$. A policy $\pi: \mathcal{S} \times \mathcal{A} \rightarrow [0,1]$ is a probability distribution over state-action pairs where $\pi(a|s)$ represents the probability of selecting action $a\in\mathcal{A}$ in state $s\in\mathcal{S}$. The goal of an RL agent is to find a policy $\hat{\pi}\in\Pi$ that maximises its expected returns given by the value function: $v^{\pi}(s)=\mathbb{E}[\sum_{t=0}^\infty \gamma^tR(s_t,a_t)|a_t\sim\pi(\cdot|s_t)]$ where $\Pi$ is the agent's policy set.
|
| 4 |
+
|
| 5 |
+
**Safety in RL.** A key concern for RL in control and robotics settings is the idea of safety. This is handled in two main ways [@JMLR:v16:garcia15a]: using prior knowledge of safe states to constrain the policy during learning or modifying the objective to incorporate appropriate penalties or safety constraints. The constrained MDP (c-MDP) framework [@cmdpbook] is a central formalism for tackling safety within RL. This involves maximising reward while maintaining costs within certain bounds which restricts the set of allowable policies for the MDP. Formally, a c-MDP consists of an MDP $\left\langle \mathcal{S},\mathcal{A},P,R,\gamma\right\rangle$ and $\mathcal{C} = \{(L_i: \mathcal{S} \times\mathcal{A} \rightarrow \mathbb{R}, d_i \in \mathbb{R}) | i = 1, 2 \ldots n \}$, which is a set of safety constraint functions $\boldsymbol{L}:=(L_1,\ldots L_n)$ that the agent must satisfy and $\{d_i\}$ which describe the extent to which the constraints are allowed to be not satisfied. Given a set of allowed policies $\Pi_C:=\{\pi\in\Pi :v^{\pi}_{L_i} \leq d_i\; \forall i=1,\ldots, n\}$ where $v_{L_i}^{\pi}(s)=\mathbb{E}[\sum_{t=0}^\infty \gamma^tL_i(s_t,a_t)|s_0=s]$, the c-MDP objective is to find a policy $\pi^\star$ such that $\pi^\star\in\arg\max_{\pi \in \Pi_C} v^{\pi}(s)$, for all $s \in\mathcal{S}$. The accumulative safety costs can be represented using hard constraints, this captures for example avoiding subregions $\cal U \subset \cal S$. When $L_i$ is an indicator function i.e. takes values $\{0,1\}$, it is easy to see that each $v_{L_i}$ represents the accumulated probability of safety violations since $v_{L_i}^{\pi}(s)=\mathbb{E}[\sum_{t=0}^\infty \gamma^t1(s_t)|s_0=s]=\mathbb{P}(violation)$.
|
| 6 |
+
|
| 7 |
+
[*Safe exploration in RL*]{.underline} seeks to address the challenge of learning an optimal policy for a task while minimising the occurrence of safety violations (or catastrophic failures) during training [@hans2008safe]. Since in RL, the model dynamics and reward function are a priori unknown, the aim is to keep the frequency of failure in each training episode as small as possible.
|
| 8 |
+
|
| 9 |
+
**Markov games.** Our framework involves a system of two agents each with their individual objectives. Settings of this kind are formalised by MGs, a framework for studying self-interested agents that simultaneously act over time [@littman1994markov]. In the standard MG setup, the actions of *both* agents influence both each agent's rewards and the system dynamics. Therefore, each agent $i\in\{1,2\}$ has its own reward function $R_i:\mathcal{S}\times(\times_{i=1}^2\mathcal{A}_i)\to\mathbb{R}$ and action set $\mathcal{A}_i$ and its goal is to maximise its *own* expected returns. The system dynamics, now influenced by both agents, are described by a transition probability $P:\mathcal{S} \times(\times_{i=1}^2\mathcal{A}_i) \times \mathcal{S} \rightarrow [0, 1]$. Unlike classical MGs, in our MG, Safety Agent does not intervene at each state but is allowed to assume control of the system at certain states which it decides using a form of control known as *impulse control* [@mguni2018optimal].[^2]
|
| 10 |
+
|
| 11 |
+
We now describe our framework which consists of two core components: firstly an MG between two agents, Task Agent and a second agent, the Safety Agent and, an impulse control component which is used by the Safety Agent. The impulse control component allows the Safety Agent to be selective about the set of states that it assumes control (and in doing so influence the transition dynamics and reward) so that actions geared towards safety concerns are performed only at relevant states. This leaves Task Agent to maximise the environment reward everywhere else. Unlike the c-MDP formulation, the goal of minimising safety violations and maximising the task reward are now delegated to two individual agents that now have distinct objectives.
|
| 12 |
+
|
| 13 |
+
**A Markov game of Interventions on One Side**
|
| 14 |
+
|
| 15 |
+
We introduce our new MG of interventions on one side. Formally, our MG is defined by a tuple $\mathcal{G}=\langle \mathcal{N},\mathcal{S},\mathcal{A},\mathcal{A}^{2, {\rm safe}},P,R_1,R_2,\gamma\rangle$ where the new elements are the set of agents $\mathcal{N}=\{1,2\}$, $\mathcal{A}^{2, {\rm safe}}\subseteq \mathcal{A}$ which is the action set for Safety Agent and the functions $R_i:\mathcal{S}\times\mathcal{A}\times\mathcal{A}^{2, {\rm safe}}\to\mathbb{R}$ the one-step reward for agent $i\in\{1,2\}$. The transition probability $P:\mathcal{S}\times\mathcal{A}\times\mathcal{A}^{2, {\rm safe}}\times\mathcal{S}\to[0,1]$ takes the state and action of both agents as inputs. Task Agent and Safety Agent use the Markov policies $\pi^{}:\mathcal{S}\times\mathcal{A}\to[0,1]$ and $\pi^{2, {\rm safe}}:\mathcal{S}\times\mathcal{A}^{2, {\rm safe}}\to[0,1]$ respectively each of which is contained in the sets $\Pi^{}$ and $\Pi^{2, {\rm safe}}\subset\Pi$ which are a stochastic policy set and a *deterministic policy* subset respectively. Therefore, whenever Safety Agent assumes control, random exploratory actions are switched off. Lastly, Safety Agent also has a (categorical) policy $\mathfrak{g}_2:\mathcal{S}\to \{0,1\}$ which it uses to determine whether or not it should intervene.
|
| 16 |
+
|
| 17 |
+
Denote by $\{\tau_k\}_{k\geq 0}$ the *intervention times* or points at which the Safety Agent decides to take an action so for example if the Safety Agent chooses to take an actions at state $s_6$ and again at state $s_8$, then $\tau_1=6$ and $\tau_2=8$ (we will shortly describe these in more detail). At any instance the transition dynamics are affected by only the Safety Agent whenever it decides to act (Task Agent influences the dynamics at all other times). With this in mind, define the function $\boldsymbol{P}:\mathcal{S}\times\mathcal{A}\times\mathcal{A}^{2, {\rm safe}}\times\mathcal{S}\to[0,1]$ by $\boldsymbol{P}(s_{t+1},a_{t}^{},\sum_{0\leq k\leq t}a_t^{2, {\rm safe}}\delta^{\tau_k}_t,s_t)$ where the function $\delta^a_b$ is the Kronecker-delta function (so $\sum_{0\leq k\leq t}a_t^{2, {\rm safe}}\delta^{\tau_k}_t$ is $a_{\tau_k}^{2, {\rm safe}}$ whenever $t=\tau_1,\tau_2,\ldots$ and null otherwise). The transition dynamics are determined by the probability transition function given by $$\begin{align}
|
| 18 |
+
\boldsymbol{P}(s',a^{},a^{2, {\rm safe}},s)=
|
| 19 |
+
P(s',a^{},s)\left(1-\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a^{2, {\rm safe}})\right)+P(s',a^{2, {\rm safe}},s)\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a^{2, {\rm safe}}).
|
| 20 |
+
\end{align}$$ Note that if Safety Agent plays a fixed policy then the MG reduces to an MDP.
|
| 21 |
+
|
| 22 |
+
*The Task Agent Objective*
|
| 23 |
+
|
| 24 |
+
The goal of Task Agent is to maximise its expected cumulative reward set by the environment (note that this does not include safety which is delegated to Safety Agent). To construct the objective for Task Agent, we begin by defining the function $R_1:\mathcal{S}\times\mathcal{A}\times\mathcal{A}^{2, {\rm safe}}\to\mathbb{R}$ by $R_1(s_t,a_t^{},a_t^{2, {\rm safe}})=
|
| 25 |
+
R(s_t,a_t^{})(1-\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a_t^{2, {\rm safe}}))+R(s,a_t^{2, {\rm safe}})\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a_t^{2, {\rm safe}})$ where $\boldsymbol{1}_{\mathcal{Y}}(y)$ is the indicator function which is $1$ whenever $y\in\mathcal{Y}$ and $0$ otherwise. The objective that Task Agent seeks to maximise is: $$\begin{align}
|
| 26 |
+
v^{\pi^{},(\pi^{2, {\rm safe}},\mathfrak{g}_2)}_1(s)=\mathbb{E}\left[\sum_{t\geq 0}\sum_{0\leq k} \gamma^t R_1\left(s_t,a_t^{},a_{\tau_k}^{2, {\rm safe}}\right)\delta^{\tau_k}_t\Big|s_0\equiv s\right], \label{p1objective}
|
| 27 |
+
\end{align}$$ where $a_t^{}\sim\pi^{}(\cdot|s_t)$ is Task Agent's action and $a_t^{2, {\rm safe}}\sim\pi^{2, {\rm safe}}(\cdot|s_t)$ is an action chosen by the Safety Agent. Therefore, the reward received by Task Agent is $R(s_t,a_t^{2, {\rm safe}})$ when $t=\tau_k$, $k=0,1,\ldots$ i.e. whenever the Safety Agent decides to take an action and $R(s_t,a_t^{})$ otherwise.
|
| 28 |
+
|
| 29 |
+
*The Safety Agent Objective*
|
| 30 |
+
|
| 31 |
+
The goal of the Safety Agent is to minimise safety violations both during and after training. Unlike Task Agent whose actions incur no cost, for the Safety Agent, each intervention incurs a cost applied by a cost function $c:\mathcal{A}^{2, {\rm safe}}\to\mathbb{R}_{>0}$. The cost ensures any Safety Agent interventions are warranted by an increase in safety. The objective that Safety Agent seeks to maximise is: $$\begin{align}
|
| 32 |
+
v^{\pi^{{}},(\pi^{2, {\rm safe}},\mathfrak{g}_2)}_2(s)=\mathbb{E}\left[\sum_{t\geq 0}\sum_{0\leq k}\gamma^t\left(-\boldsymbol{\bar{L}}(s_{t}, a_t^{}, a_{\tau_k}^{2, {\rm safe}})-c(a_{\tau_k}^{2, {\rm safe}})\delta^{\tau_k}_t\right)\right].
|
| 33 |
+
\end{align}$$ where $\boldsymbol{\bar{L}}:\mathcal{S}\times\mathcal{A}\times\mathcal{A}^{2, {\rm safe}}\to\mathbb{R}$ is defined by $\boldsymbol{\bar{L}}(s_t,a_t^{},a_t^{2, {\rm safe}})=
|
| 34 |
+
\boldsymbol{L}(s_t,a_t^{})(1-\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a_t^{2, {\rm safe}}))+\boldsymbol{L}(s,a_t^{2, {\rm safe}})\boldsymbol{1}_{\mathcal{A}^{2, {\rm safe}}}(a_t^{2, {\rm safe}})$ and $\boldsymbol{L}:=(L_1,\ldots L_n)$ is a set of constraint functions that indicate how much a given constraint has been violated and is provided by the environment. Therefore to maximise its objective, the Safety Agent must determine the sequence of points $\{\tau_k\}$ at which the benefit of performing a precise action overcomes the cost of doing so. We specialise to the case $c(a^{2, {\rm safe}})=\kappa\cdot a^{2, {\rm safe}}$ where $\kappa$ is a positive constant. Each function $L_i$ can represent a (possibly binary) signal that indicates a visitation to an unsafe state.
|
| 35 |
+
|
| 36 |
+
We now mention some key aspects of the framework. In particular, now the task of ensuring safety is delegated to Safety agent whose sole objective is minimise safety violations throughout the course of the problem. Secondly, the presence of the (strictly negative) intervention cost induces a selection process for Safety agent whereby it seeks only to intervene at the set of states for which the reduction in cumulative safety violations is sufficiently high to merit an intervention. This means that at all other states, task agent can freely act and in doing so learn to play actions that deliver task rewards whenever there is no potential for safety violations. Lastly, since the agents learn how to respond to one another, the strategic interaction between the two agents leads to policies in which task agent anticipates the actions of the task agent and vice-versa.
|
| 37 |
+
|
| 38 |
+
*The Safety Agent Impulse Control Mechanism*
|
| 39 |
+
|
| 40 |
+
The problem for the Safety Agent is to determine at which states it should assume control and what actions it should perform. We now describe how at a given state Safety Agent decides to intervene and overwrite Task Agent or not and the magnitudes of such interventions . In our setup at each state the Safety Agent first makes a *binary decision* to decide to *assume control*. Therefore, the *intervention times* $\{\tau_k\}$ *are **rules** that depend on the state* that are given by $\tau_k=\inf\{t>\tau_{k-1}|s_{t}\in\mathcal{T},\mathfrak{g}_2(s_t)=1\}$ where $\mathcal{T}$ is the or sequence of states induced by the joint actions of Task Agent and the Safety Agent and the probability kernel $P$. Therefore, by learning an optimal $\mathfrak{g}_2$, the Safety Agent learns the useful states to perform an intervention. As we later explain, these intervention times are determined by a condition on the state which is easy to evaluate (see Prop. [1](#prop:switching_times){reference-type="ref" reference="prop:switching_times"}).
|
| 41 |
+
|
| 42 |
+
Unlike [@eysenbach2018leave; @turchetta2020safe], our approach enables learning a safe intervention policy during training without the need to preprogram manually engineered safety responses and minimises safety violations during training unlike [@tessler2018reward; @dalal2018safe]. Unlike [@Fisac2019; @deanlqr2019; @chow2019lyapunovbased; @fisac2019bridging], which require access to information which is not available without a priori knowledge of the environment, our framework does not require a priori knowledge of either the model of the environment or knowledge of the unsafe states.
|
| 43 |
+
|
| 44 |
+
Learning to solve an MG involves finding a stable point in the independent agents' policies. In our MG, this means the Safety Agent learns to assume control at a subset of states and minimise safety violations given Task Agent's policy and, Task Agent learns to execute actions that maximise the task objective at all other states (given the actions of Safety Agent).
|
| 45 |
+
|
| 46 |
+
:::: algorithm
|
| 47 |
+
::: algorithmic
|
| 48 |
+
Initialise replay buffers ${\cal D}^{}= \{\emptyset\}$, ${\cal D}^{2, {\rm safe}}= \{\emptyset\}$, ${\cal D}^{\rm int}= \{\emptyset\}$, State $s_0$ Sample a task action $a_{t}^{}\sim\pi^{}(\cdot|s_{t})$, a safe action $a_{t}^{2, {\rm safe}}\sim \pi^2(\cdot | s_t)$, and an intervention action $a_{t}^{\rm int}\sim \mathfrak{g}_2(\cdot | s_t)$ ($\in \{0, 1\}$) Apply task action $a_{t}^{}$ so $s_{t+1}\sim P(\cdot|a_{t}^{},s_t)$. Set $a = a_{t}^{}$ Apply safe action $a_{t}^{2, {\rm safe}}$ so $s_{t+1}\sim P(\cdot|a_{t}^{2, {\rm safe}},s_t)$. Set $a = a_{t}^{2, {\rm safe}}$ Receive reward $R(s_{t},a)$ and cost $\boldsymbol{L}(s_t,a)$ Set $R_1=R(s_t, a)$, $R_2 = -\boldsymbol{L}(s_t,a) - c(a)$, $R^{\rm int}= -\boldsymbol{L}(s_t,a) - c(a) a_{t}^{\rm int}$ Add the sample $(s_t, a, s_{t+1},R_1)$ to ${\cal D}^{}$, the sample $(s_t, a, s_{t+1}, R_2)$ to ${\cal D}^{2, {\rm safe}}$, the sample $(s_t, a_{t}^{\rm int}, s_{t+1}, R^{\rm int})$ to ${\cal D}^{\rm int}$ Update the policy $\pi^{}$ using ${\cal D}^{}$, the policy $\pi^{2, {\rm safe}}$ using ${\cal D}^{2, {\rm safe}}$, the policy $\mathfrak{g}_2$ using ${\cal D}^{\rm int}$
|
| 49 |
+
:::
|
| 50 |
+
::::
|
| 51 |
+
|
| 52 |
+
A key aspect of our framework is the presence of two RL agents that each adapt their play according to each other's behaviour. This produces two concurrent learning processes each designed to fulfill distinct objectives. At a stable point of the learning processes the Safety Agent minimises safety violations while Task Agent maximises the environment reward. Additionally, Safety Agent learns the set of states in which to perform an action to maintain safety at the current or future states. Therefore, central to this process is the decision of where Safety Agent should intervene. The intervention times for Safety Agent can be characterised by a 'single obstacle condition' which can be evaluated online.
|
| 53 |
+
|
| 54 |
+
We now derive the condition that characterises Safety Agent's intervention times. We begin by first defining a key object in preparation for this characterisation:
|
| 55 |
+
|
| 56 |
+
We first introduce the following object which is required for constructing the Bellman operator:
|
| 57 |
+
|
| 58 |
+
::: definition
|
| 59 |
+
**Definition 1**. *Let $\pi^{}\in\Pi^{}$ and $\pi^{2, {\rm safe}}\in\Pi^{2, {\rm safe}}$ be a Task Agent and a Safety Agent policy respectively, then for any $s_{\tau_k}\in\mathcal{S}$ and for any $\tau_k\in \tau_1,\tau_2,\ldots$, define by $\mathcal{Q}^{\pi^{},\pi^{2, {\rm safe}}}_2(s_{\tau_k},\pi_{\tau_k}^{2, {\rm safe}}):=-\boldsymbol{L}(s_{\tau_k},\pi_{\tau_k}^{2, {\rm safe}})-c(\pi_{\tau_k}^{2, {\rm safe}})+\gamma\int_{\mathcal{S}}dsP(s;\pi_{\tau_k}^{2, {\rm safe}},s_{\tau_k})v^{\pi^{},\pi^{2, {\rm safe}}}_2(s)$. We define the intervention operator $\mathcal{M}^{\pi^{},\pi^{2, {\rm safe}}}$ by the following: $\mathcal{M}^{\pi^{},\pi^{2, {\rm safe}}}v^{\pi^{},\pi^{2, {\rm safe}}}_2(s_{\tau_k}):=\underset{a'\in\mathcal{A}^{2,\rm{safe}}}{\max}\mathcal{Q}^{\pi^{},\pi^{2, {\rm safe}}}_2(s_{\tau_k},a')$.*
|
| 60 |
+
:::
|
| 61 |
+
|
| 62 |
+
The quantity $\mathcal{M}v_2$ measures the expected future stream of rewards for Safety Agent after an immediate (optimal) intervention minus the cost of intervening. The following result characterises the Safety Agent policy $\mathfrak{g}_2$ and the times that Safety Agent must perform an intervention.
|
| 63 |
+
|
| 64 |
+
::: {#prop:switching_times .proposition}
|
| 65 |
+
**Proposition 1**. *For any $s\in\mathcal{S}$, the Safety Agent intervention policy $\mathfrak{g}_2$ is given by the following expression $\mathfrak{g}_2(s)=H\left(\mathbb{E}_{a\sim\pi}\left[\mathcal{M}^{\pi^2}v^{\pi^{},\pi^{2, {\rm safe}}}_2(s)- Q_2^{\pi^{},\pi^{2, {\rm safe}}}(s,a)\right]\right)(s)$ where, $Q^{\pi^{},\pi^{2, {\rm safe}}}_2(s,a):=-\boldsymbol{L}(s,a)+\gamma\int_{\mathcal{S}}ds'P(s';a,s)v^{\pi^{},\pi^{2, {\rm safe}}}_2(s')$ and $H$ is the Heaviside function, moreover Safety Agent's intervention times are given by $\tau_k=\inf\left\{\tau>\tau_{k-1}|\mathcal{M}^{\pi^{2, {\rm safe}}}v^{\pi^{},\pi^{2, {\rm safe}}}_2= v^{\pi^{},\pi^{2, {\rm safe}}}_2\right\}$.*
|
| 66 |
+
:::
|
| 67 |
+
|
| 68 |
+
Hence, Prop. [1](#prop:switching_times){reference-type="ref" reference="prop:switching_times"} also characterises the (categorical) distribution $\mathfrak{g}_2$. Moreover, the times $\{\tau_k\}$ can be determined by evaluating if $\mathcal{M}v_2=v_2$ holds. The result yields a key aspect of our algorithm for executing Safety agent's activations.
|
| 69 |
+
|
| 70 |
+
While implementing the intervention policy appears to be straightforward by comparing value functions, it requires the optimal value functions in question. Furthermore, learning the intervention policy and these value functions simultaneously resulted in an unstable procedure. As a solution we propose to learn $\mathfrak{g}_2$ using an off-the-shelf policy gradient algorithm (such as TRPO, PPO or SAC). This policy is categorical with values $\{0, 1\}$ and has a reward $R^{\rm int}$ equal to $-\boldsymbol{L}(s_t, a) + c(a)$ if the safety agent intervenes with an action $a=a^{2, {\rm safe}}_t$, and $-\boldsymbol{L}(s_t, a)$ if the safety agent does not intervene, i.e., the task agent applies an action $a=a^{}_t$.
|
| 71 |
+
|
| 72 |
+
Implementing both the safety policy, which maximise the safety associated reward, and the task policy, which maximises the task reward, can be done using any off-the-shelf policy gradient methods without modifications. To summarise, we will learn three policies: the first one learning the actions of the task agent, the second learning the action of the safety agent, if the safety agent intervenes, and the final learning to intervene.
|
| 73 |
+
|
| 74 |
+
We take an off-policy approach and every policy is an instance of SAC with appropriate action spaces and rewards. Since we learn off-policy every agent can collect the same triplets $s_t$, $a$, $s_{t+1}$, where $a$ is the current action (i.e., either the task action $a^{}_t$ or the safety action $a^{2, {\rm safe}}_t$). The rewards that the policies will receive will differ though. The task policy will receive $R_1 = R(s_t, a)$, the safety policy will get $R_2 = -\boldsymbol{L}(s_t, a) - c(a_t)$, and the intervention policy rewards are $R^{\rm int}= -\boldsymbol{L}(s_t, a) - c(a) a^{\rm int}$, where $a^{\rm int}\sim \mathfrak{g}_2(\cdot | s_t)$. Note that we chose to use the same triplets for all policies since we use off-policy algorithms, which allows us to use the acquired data more efficiently.
|
2111.08919/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d048e9ccdc9328574402782e5d428b408f084afdccf6835efd6627da39c9e4d
|
| 3 |
+
size 6203292
|
2111.13415/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-11-26T01:00:18.828Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" version="15.8.3" etag="mwyfv-QjcI-3xVCHYjWr" type="google"><diagram id="1UA5Efs0kJ8aTkl2feIU">7Vxtk6O4Ef41rrp8GBeIV3+csWcuqbrktrKX5PbTlWxkmyxGDuAde399WiABEsLGGHs8czu1W4ZGb6ifbnW3Woys6Wb/c4K367/TgEQjZAT7kTUbIWTaCI3YPyM4FBTP5YRVEga8UEX4HH4nnGhw6i4MSCoVzCiNsnArExc0jskik2g4SeirXGxJI7nXLV7xHo2K8HmBI9Io9p8wyNYF1Xdqpf9KwtVa9Gwa/MkGi8K8iXSNA/pa68t6HlnThNKsuNrspyRikyfmpWjopeVpObCExFmXCnzev+Fox9+Njys7iJeFCjCvcPP0ug4z8nmLF+zJK7AWaOtsE8GdCZcRnpPoCS++rhK6i4MpjWgCj2Ias8rpV5It2Msb7CZL6Ndy7sySotSh0FeYMZDYrFbeLAl4jSWNM1F+hCzTtlwbMXoYRTW68eI/+TOg8zclSUb2rbNlljwA8BK6IVlygCK8AvI5HzluLc8v7l8rFJgGp61rCPD4tGIOvFXZdMUbuODs0bPKuhdWyWyQGVef8GsyYoJkRvje2HMavJgYTVY4aOxczgx7QGb0kYzjDKzkZqKRG5ktL4/PztQ7Ik8DsMsyXZldEy27XA27kH85s5wmbwLQ5/yWJtmarmiMo+eK+lRRf6F0y6fuvyTLDnw5wruMynwk+zD7nbERAFbcfeFMZdezff3mULIbJxlvMW8jDh7ZAgW3iwinabgoiC9hFFVFahVkvKB2aABLZ48zNPN1MvvoI8N8PAkCNm3y8kV3yYKT3M6oSEiEs/Cb3JSOwbzqJxpCixWabEUL24p2LUbFa9UXP6UhR/QtGhLvIBoC5qxI1mgIOIQPtWJbViA9MmABYtGP4Z43Lrk8XBQjqMBfTm4neXA1ysuNMs5+ZhBhzlT3fztmiTz9myQBjnFFyI0rAZOKaBV/UrlK7ASR9fKQ5hh+hAKmsd3Xa7gr/psPaS4IU6hF9llJTy5o25n+BP/nYPelhw38jLyn7yNv9kcM1L+IHmAa5+pogFbMkSAriuWEqmd1hfA2tb1qvXBt3nm9vaICNxREWm5Dezt2U3tbikD10d7eKbR2BWIXVNfwgYzPgAdktCAFYGLU0cLweAIrBTnd4lgLXf4eDLbJav6TY7IeEMyPIV3mHRcvxLC+xJswOhTVoC282fIJsOH3W/mGMr31lafnvDK8YPEy8gtejy3dp/oysdQIYVNS70AsbcNSxNLoJJb2AEaVf9oCPmX3NqY0wOm6nNEtSUIYFUlYC2G84hbTFRTkcWN6CD75LYZGjU+W8BHXUuRgAMdx0sFX0ZmdR1kljF1h+H6RDF+9satarrLpexW7FRiUHGpGObv9UhrScFONNL8TQ+1g7/J1qbARubPidUZGXUJ1XiqnXWgqe4ol6agLcldT2XYnx23uFlO5h3UqgnzD4dXsjVcBnjp0akhqAc8b4LwDXt0mXj8sXD1Pbsj2u3l2feBqvgdv6td5ShLgS27AMUNv2eYA5VZmwI099FTUiHe5XagYf5eYXBrPRzHCgjAhiyykMatPUtbJmibhdyiGS8PtLr0l13Ek9KGJ3VjuTUuz3Iuw4iWrvbAY2vF4NoTO95zgh211LDAD1pQD6wy34iiy8iiABkwKEDTYwFG4YnBaACNJUvhHWQijfOQPNmEQRG0h23KvJvfcRdCW9d9rn2IAoJV2pFBzhjVGzbiqrdHYpjcA1HRbEldxcrsGi87QuDqnvB3DM0BqQUtJtgnjXVrEBy7zlFsiBI3S700AclXOx9vcMbqqQyxr3pZ9IZ1AiK3eiwSiy7bQ+aZraU7eOrx/N4ah5Sjuh++Ojdpfzw0A2/DPaXao7QDFOBW7yEOF903nVmq5Z+zxLM1ehmKPq9qaWtYYH33V9A/F2zMSaZvNzAhdJHIQravbzxpQ697aab8jrWuPPct0Td+2fMc1ZB1sW8bY9g3kwzN/Ap6V1VMJi1VTtGsrDQ2ldn1f6WdgtXtyo6pTIKCng/+JpqBYQsAYMv61DXBG3qc7dVP/yXHklRj5qKG1TMMTCUODK64Oeyi3isyXYXLm1dWjnaO7i3XmA/gkdodGnQP2IlJXj4D6d6ZyHUfZ0fMVnHXObfEVi9m4Tm6L4xrafgbTquftXnHNJG0kns57/NjLuO/KTrEtsq3PxZTaEPIUcA4XVUe6TaDzfBjbrhyX2qU/qA/DFs8Hvg6yFiKyzE6Gp3ou8VMaAzh3C9YEZgBL6BzPwyhMYdGF+4Cm5CEh6RbKsUj/hufdN9Mm5jgF6UAGjbWPt9yOACsCjy+MVZ2T+xpuagn1yG5aAi2Zq28S5Pc976TtMHGbQj1EKjhq33O6K+nQwvosyfknWdDNBhQ8q7EmWrgy3OfzDtOuyxAqBhfPU/aziKB0ykRoQ3LYnihfk6eMikGolbhFwRky/iEbimzozklcTTZQq2zcwA0bXgz1csIfzumeDSnPjnrM75OAJA9Abg7tbwFwL1weSklicpOeloBsjfMVJ2F1UrysZM1YA+vqC1F2GGvbO2sP5M8iJJPJGy4gl+/cvZMF5DkKN2Gch0P0uluFu5AKiXgCwx9szmSJ383pNgPJijqvsUfUwp92SVSl/aZL4gDbkm+TP4dGckzp1hug9fCRcEmlhE90Z76+mkHXO36kZtCVqeVX8PV1+5U94SmfvTM7wlPODe4e8rwreCINPK0f8LwcngNsL5bw7KM9zQ+hPS0NPO8tUvou4fl25+MuyvL8GIe46knKAA7W7iDpFBNlG0gctzh1Wn6ABGLtya6rAapDoMU8khcUFHnprDr4Kytw9f4AxAUF1sK4F0hZpqfq/uXNhLzdzY51Ej+YRVLo8Ux61uQD/P+t1khDAj6cXAwgA26HM8faTM4hDh1rD829FyHAe1UITp1k1sF9AUDKL+Y4gbLNdGioW9B14Pdmejl6zUWno+D8kJM+cjKxdTnPVxMVYdoMnMXy7s/e1S1d894sXd9UwlFI+ShSZ0vXcceTiZxZhyYW0K6Tb9IY+MD5JtbJ43zDm9PPm23G5uAzg8vLx1JwtzKQJ5POX5QawEYWh/cu0XltM1bXWbJGM46qp2YKc1sAvM+HqlqUXN3Ft+5NyTlq4pOJxrZbO9+hIKGrzlNXXFv9SsNw3r3V3Kj7QtIm1oAvv7C1TYZY9wTdhICtieeRgBlXw9C4A1bX7OjS+ZL/cUS/8L2rmdB2R8Sdf56TdzuqfW2zAlAhaa164MEYm75ITL8ULnIFulym5GL+DbD18vaK4habPh0UjO6TG/endITrWGZb9tQyjqVoL0MZy4BaprkD8w96KU4LFXsbnPbE3FGFddSgPyNxvNJUyBVWy5utfz2hqCaTO7Z0TrIbMM9PLXfUXhWc10z9qvaw+rv9W4nzTnEbp8Nx9bNb0fsgxtKf+0Gzt98ep7+ezuKdt7oVb3VAqHI9jG72h5Qd0qaNWmS+7hw5AwVmbNlVte1mAHOgk+hwW30zvIB59eV16/n/</diagram></mxfile>
|
2111.13415/main_diagram/main_diagram.pdf
ADDED
|
Binary file (58.5 kB). View file
|
|
|