Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2002.12229/main_diagram/main_diagram.drawio +1 -0
- 2002.12229/main_diagram/main_diagram.pdf +0 -0
- 2002.12229/paper_text/intro_method.md +67 -0
- 2004.00048/main_diagram/main_diagram.drawio +1 -0
- 2004.00048/main_diagram/main_diagram.pdf +0 -0
- 2004.00048/paper_text/intro_method.md +98 -0
- 2102.05714/main_diagram/main_diagram.drawio +0 -0
- 2102.05714/paper_text/intro_method.md +57 -0
- 2104.07186/main_diagram/main_diagram.drawio +1 -0
- 2104.07186/main_diagram/main_diagram.pdf +0 -0
- 2104.07186/paper_text/intro_method.md +128 -0
- 2106.01342/main_diagram/main_diagram.drawio +1 -0
- 2106.01342/main_diagram/main_diagram.pdf +0 -0
- 2106.01342/paper_text/intro_method.md +72 -0
- 2109.02832/main_diagram/main_diagram.drawio +1 -0
- 2109.02832/main_diagram/main_diagram.pdf +0 -0
- 2109.02832/paper_text/intro_method.md +433 -0
- 2201.10222/main_diagram/main_diagram.drawio +1 -0
- 2201.10222/paper_text/intro_method.md +123 -0
- 2204.08453/main_diagram/main_diagram.drawio +1 -0
- 2204.08453/main_diagram/main_diagram.pdf +0 -0
- 2204.08453/paper_text/intro_method.md +22 -0
- 2205.11029/main_diagram/main_diagram.drawio +0 -0
- 2205.11029/paper_text/intro_method.md +145 -0
- 2206.07043/main_diagram/main_diagram.drawio +1 -0
- 2206.07043/main_diagram/main_diagram.pdf +0 -0
- 2206.07043/paper_text/intro_method.md +108 -0
- 2206.08194/main_diagram/main_diagram.drawio +1 -0
- 2206.08194/main_diagram/main_diagram.pdf +0 -0
- 2206.08194/paper_text/intro_method.md +196 -0
- 2209.05861/main_diagram/main_diagram.drawio +0 -0
- 2209.05861/paper_text/intro_method.md +78 -0
- 2209.08244/main_diagram/main_diagram.drawio +1 -0
- 2209.08244/main_diagram/main_diagram.pdf +0 -0
- 2209.08244/paper_text/intro_method.md +162 -0
- 2210.08933/main_diagram/main_diagram.drawio +1 -0
- 2210.08933/main_diagram/main_diagram.pdf +0 -0
- 2210.08933/paper_text/intro_method.md +103 -0
- 2210.13005/main_diagram/main_diagram.drawio +1 -0
- 2210.13005/main_diagram/main_diagram.pdf +0 -0
- 2210.13005/paper_text/intro_method.md +115 -0
- 2302.06058/main_diagram/main_diagram.drawio +0 -0
- 2302.06058/paper_text/intro_method.md +117 -0
- 2304.04205/main_diagram/main_diagram.drawio +1 -0
- 2304.04205/main_diagram/main_diagram.pdf +0 -0
- 2304.04205/paper_text/intro_method.md +181 -0
- 2306.01150/main_diagram/main_diagram.drawio +1 -0
- 2306.01150/main_diagram/main_diagram.pdf +0 -0
- 2306.01150/paper_text/intro_method.md +104 -0
- 2308.02000/main_diagram/main_diagram.drawio +253 -0
2002.12229/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-01-16T20:50:21.918Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36" version="12.5.5" etag="zKrbvRLIWFxQO7OC-C1n" type="device"><diagram id="-JvJuW7iEI2BqbwphRCb">5Z3Nduo4FoWfJsO7FpL8x/Amt7p60LVqkEGPHXDAVQ6miFNJ6unbjm2CE5ujG2y2Nj0CZGPQdwRbm3OErszNw8uvu3i7/i1fJtmVni1frsyPK60D35+VN1XLa91igrZltUuXdZt6b7hN/0maxva0p3SZPHZOLPI8K9Jtt3GRbzbJoui0xbtd/tw97T7Puq+6jVfJp4bbRZx9bv1vuizWdWvU9qJq/3eSrtbtK6tZc+Qhbk9uGh7X8TJ/Pmgyv1yZm12eF/W9h5ebJKvotVzq5/1r4Oj+je2STWHzBF0/4e84e2r61ryv4rXt7GqXP22vzPVjscv/TH7fxou0qAJX9tZcN09PdkXy0oc+vmsvM/v81tS+w+VQSfKHpNi9lqc0FzLz5inNKImah8/vxL1ZVLetD2j7gd9Euonyan/pdxDlnYZFPxeD5KL7uRx03LQDavSOe8iOm/6ONwNA9YyAXhAmGgGEjwThHQXh+Z847D8dHQ7e7HQOAZKDL34Sput4KHd8XTyUz/6hym4+r9MiuS27Xh16LnWvbIsft7X63KcvybICtI631fH0Mb95uivvXd/Fiz8rfJvl709Flm6S5nI/h/IQWSAim/cQ2yvUKcSiiyL20v2wNZ+99vNwDp7zS+T5QdV9fT6e7fTxsoAq0x2hYXRGouoiiUbdMapm3hmRQqfkx8F8k78MTTtx6uix8kYAYzEnd3GsKXk6P91gspjOMzHDa7Ky8AWERJGqbOEwCIlCZZnUu0hIobps4W4m/KnsOBkbYQ57yOgoGoEMqU+pI4oRZk1qRYaY4YVZk1oRgShQmFuAF0YUKcw2eQVGpEhhxqYsBDIWwqx7hdkPRiBD6lWG0h9nGU2kbmSImQPCTGpGBKJIYSZNtAhEocJM6mEkpEhhNhYeZ8IiiuNkvizMZgwypF7FzPqZnmU0kbqRIWZ4YTakZkQgChRmQ5puEYgihdmQehgJKVSYoTVdApkvC/NshByzIfUqBlj1ZUjdyBAzB4SZ1IwIRIHC3NaDXhhRpDB77nmY+zTLbvIs3729ulnGSXS/2J98cCRYRMnd/VhBQEq5Z+GKzl6mLtbnhyP8uO1Bl2jU3FFdJ7UZ3sD6jrN8UEh9xBAz/CzFI82UCESRsxRS9yEQhc5SSM2JhBQ657CwJxMq73EyNpVwXl+Juj8CGZ/UZtQRxQiz756POIkZXphbybowokBh9kkzJQJRpDD7rB5GQIoUZh+7Zv04GRth7iNj9Ai/6/ukXsWXF8BPN5pI3cgQMweEmdSMCESRwkyaKRGIIoU5IPUwElKkMAcWHmfCP1E5TubLCff5CGvHAlKvEgz8d9VZRhOpGxlihhfmgNSMCESBwhyQplsEolBhJvUwElKoMFt4nAn/bOU4mS8Lc6hHIEPqVeqIgoSZ1I0MMcMLc0hqRgSiQGEOSdMtAlGkMLdft5eGFCnMIbS6SyDzZWH2Rij+Ckm9Sggs/gpJ3cgQMweEmdSMCESRwkyabhGIQoXZPQ8zUYm6FASolPe5oiArKhp52YHDcAR/PeXtgW+Pb/s+fC9PCLzty/vB8t6qul20lynfQX2luv1TjEtyVftBqLu4N3kVtE5smqY4S1eb8uGiJJ2U7ddVHNJFnH1vDjyky2X1Mr2D521QVKPmx2zKj9g370N0++Yjui+8eoTwRn0WbYzwPv+/hFfKXIUfwhvNzxleaOrq+IYQ7VeWNO827VfQSSAsXN50IKKBnwpfuzNBkYQZoZgmgnqzOg5HfcSEXSc1XxHQfEWk5muIGd58RaTmSyAKNF8RqfkSiCLNV+Se+RoFKdJKRX1W6nzKe/LKm35hno1QRzTvcyEMow248mZOmgoaYoYX5rmFRyAkChTmOWmtm0AUKcxzVg/j8MqbOXTljUDmy+nK9iePk8iQepU5cOXNnNSNDDFzQJhJzYhAFCnMpLVuAlHork0zUhMjMYVu2zSDJjAENNDtIdSM1K40McWIs5qRWpJBanh53n8jXBpT6GbHpHkXiSlWokntjAgVq9HQZTgSG+hWEWpGal2aoKJEmtSeDFJzQKQVqT2RmCJFWpHmYCSmUJFWtL7G4RU5SkHLviQ20G0jlGL1LwpYGKYUq0MZouaCSLMaFIEpVKRJ8zESU6xIu+drJlqcI4YBK+vYmjL5l9zpqrn3CyRAUxolF39N2XlW61GzBM1ZWkgXQ82BOYtmzagITJFzFs3qSASm0DmLZjUsElToDERbWJYJVVhgAy1rV5rVemjgjpRKu+ctTqPmgkizZlQEpkiRNhb+g5EpVKQNra9xeJ9JZSx8z5QifZwNtMRdGVb/UgcVJNKG1aEMUXNApA2rQRGYQkWaNaMiMMWKNKuvkaBiRdrC90wo0gIbbLG7YfUvdVBBIu2xOpQhag6ItMdqUASmSJH2LOwJI1OoSHu0vkaAChVpz8L3TCnSx9lgi909Vv9SBxUl0qwOZYiaCyLNalAEplCRZk3LCEyxIs3qaySoUJH2sZVhAhtssbvP6l98ZOFY+017MdQcEGmf1aAITJEi7bOmZQSmUJH23fM1UxW7S2HAyjq01Exea2VmPV0f5e/rlY9dY+8PJPBfuypyntJ3H5sn8eWV8VN2ntUw+MhESMCaCBmi5sD0LaA1Eu4mQgJam+FwIiSg9RkuJ0ICbCJEYIOt+w/cMwx2Iy5AJkIC1kTIEDUXRJo1ESIwhYo0ayJEYIoVaVZfI0GFinRo4XsmFGmBDbbuP2T1LyEyERKyOpQhag6IdMhqUASmSJEOWRMhAlOoSIesvkaCihVp7Ap6gQ227j9k9S8hcgV9yOpQhqi5INKsBkVgihTp3t3nL4ApVKRt9nynhAoVaewG8hIbbN2/zZbyTo44i93oJxxRrA5liJoDIs26W73EFCrSrGkZgSlWpFl9jQQVK9LYyjCBDbbu32b3eTdHHLJwjHVn+kFqDog06871ElOkSLPuXS8xhYq0g9vXT1X3L4UBKus2e94Dit/Fyv9RCr6xm9M37GGdZ/UeyA3oFesO9IPUXJi0sKZU3N2EXrHuQi8xRU5atM029Mlyldw2D/Ndsc5X+SbOfnlvvX7jVQH9UVF5P+c/eb5t+P2RFMXrbfpPdZH4qcjLpoNgHaFbvbrEtny7+dNukbSdas4s4t0q2dviAbu9S7K4SP/uvsZpTN1zLB8ngn4SLb2+iWCk70wQjDa0kRNBTbvlvUQVChW7M6SAxuJHM9OeMvpCDs26s30TVMz8U7PuXT9IDT//1Kxb10tMgfNPbbNxPSNT7PzTPaM00VxJDAN2rmRhrqaTdYkNdOnHfr7l0Bi1HHFzoKyz7nY/SM0BWWfd7F5iipR1m63uGZlCZV2554Qmk3UhDFBZV9B0kMQGulhEK1aPpIDZIq1YXdAQNRdk3T0TNApTqKyTZoskplBZ1+45oalkXQoDVNa1hbeaUNYFNtDlJbpF4dAYtRtxdSNI1jWrCxqi5oCsa/dM0ChMkbKuWZNFAlOsrLvnhKaSdSkMWFmH/m2zxAa6IEVrqEf6MBzvo0Wy6C2kvot87y0Up3xVyH8ZPeEYhPomJzg7MHUwUKPlUhSQkw0DTWG5FAXo9MRAHd3Pr6GZMAzQ6Ynp84hBVlSQ8rILhwEJ/nrK2wPfHt/Kfr+XJwTe9uX9YHlvVd2u28uUb6G+Ut3+Kcolu7fzPxYOt1HY5FXYOiFrmuIsXW3Kh4uSdVK2X1eRSBdx9r058JAul2+1zH3Dp1vffEJ8hehGXcVpK+MOZ1i6L7z658NbPtzlVRT2x34t+7r+LV8m1Rn/Aw==</diagram></mxfile>
|
2002.12229/main_diagram/main_diagram.pdf
ADDED
|
Binary file (12 kB). View file
|
|
|
2002.12229/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In this section, we introduce more detailed background knowledge.
|
| 4 |
+
|
| 5 |
+
Let $\mathbf{x}$ be a high-dimensional continuous variable. We suppose that $\mathbf{x}$ is drawn from $p^*(\mathbf{x})$, which is the true data distribution. Given a collected dataset $\mathcal{D} = \{\mathbf{x}_1, \mathbf{x}_2,...,\mathbf{x}_D\}$, we are interested in approximating $p^*(\mathbf{x})$ with a model $p_{\theta}(\mathbf{x})$. We optimize $\theta$ by minimizing the negative log-likelihood $$\begin{equation}
|
| 6 |
+
\mathcal{L}(\mathcal{D}) = \sum_{i=1}^{D} - \log p_{\theta}(\mathbf{x}_i).
|
| 7 |
+
\label{eq:likelihood}
|
| 8 |
+
\end{equation}$$
|
| 9 |
+
|
| 10 |
+
For some settings, variable $\tilde{\mathbf{x}}$ is discrete, e.g., image pixel values are often integers. In these cases, we dequantize $\tilde{\mathbf{x}}$ by adding continuous noise $\bm{\mu}$ to it, resulting in a continuous variable $\mathbf{x} = \tilde{\mathbf{x}} + \bm{\mu}$. As shown by @ho2019flow, the log-likelihood of $\tilde{\textbf{x}}$ is lower-bounded by the log-likelihood of $\mathbf{x}$.
|
| 11 |
+
|
| 12 |
+
Normalizing flows enable computation of $p_{\theta}(\mathbf{x})$, even though it is usually intractable for many other model families. A normalizing flow [@rezende2015variational] is composed of a series of invertible functions $\mathbf{f} = \mathbf{f}_1 \circ \mathbf{f}_2 \circ ... \circ \mathbf{f}_K$, which transform $\mathbf{x}$ to a latent code $\mathbf{z}$ drawn from a simple distribution. Therefore, with the *change of variables* formula, we can rewrite $\log p_{\theta}(\mathbf{x})$ to be $$\begin{equation}
|
| 13 |
+
\log p_{\theta}(\mathbf{x}) = \log p_{Z}(\mathbf{z}) + \sum_{i=1}^{K} \log \left|\det \left(\frac{\partial \mathbf{f}_i}{\partial \mathbf{r}_{i-1}}\right)\right|,
|
| 14 |
+
\label{eq:relikelihood}
|
| 15 |
+
\end{equation}$$ where $\mathbf{r}_i = \mathbf{f}_i(\mathbf{r}_{i-1})$, $\mathbf{r}_{0} = \mathbf{x}$, and $\mathbf{r}_{K}=\mathbf{z}$.
|
| 16 |
+
|
| 17 |
+
Emerging convolutions [@hoogeboom2019emerging] combine two autoregressive convolutions [@germain2015made; @kingma2016improved]. Formally, $$\begin{eqnarray*}
|
| 18 |
+
\mathbf{M}'_1 = \mathbf{M}_1 \odot \mathbf{A}_1, ~~~~~~~~
|
| 19 |
+
\mathbf{M}'_2 = \mathbf{M}_2 \odot \mathbf{A}_2, ~~~~~~~~
|
| 20 |
+
\mathbf{y} = \mathbf{M}'_2 \star (\mathbf{M}'_1 \star \mathbf{x}),
|
| 21 |
+
\end{eqnarray*}$$ where $\mathbf{M}_1, \mathbf{M}_2$ are convolutional kernels whose size is $c \times c \times d \times d$, and $\mathbf{A}_1, \mathbf{A}_2$ are binary masks. The symbol $\star$ represents the convolution operator.[^2] An emerging convolutional layer has the same receptive fields as standard convolutional layers, which can capture correlations between a target pixel and its neighbor pixels. However, like other autoregressive convolutions, computing the inverse of an emerging convolution requires sequentially traversing each dimension of input, so its computation is not parallelizable and is a computational bottleneck when the input is high-dimensional.
|
| 22 |
+
|
| 23 |
+
Periodic convolutions [@hoogeboom2019emerging; @Finzi2019Invertible] use discrete Fourier transformations to transform both the input and the kernel to Fourier domain. A periodic convolution is computed as $$\begin{equation*}
|
| 24 |
+
\mathbf{y}_{u,:,:} = \sum_{v} \mathcal{F}^{-1}(\mathcal{F}(\mathbf{M}^{(p)}_{u,v,:,:})\odot \mathcal{F}(\mathbf{x}_{v,:,:})),
|
| 25 |
+
\end{equation*}$$ where $\mathcal{F}$ is a discrete Fourier transformation, and $\mathbf{M}^{(p)}$ is the convolution kernel whose size is $c \times c \times d \times d$. The computational complexity of periodic convolutions is $\mathcal{O}(c^2hw\log(hw) +c^3hw)$. In our experiments, we found that the Fourier transformation requires a large amount of memory. These two problems impact the efficiency of both training and sampling when the input is high-dimensional.
|
| 26 |
+
|
| 27 |
+
Memory-Efficient Woodbury transformations can effectively reduce the space complexity. The main idea is to perform spatial transformations along the height and width axes separately, i.e., a height transformation and a width transformation. The transformations are: $$\begin{eqnarray}
|
| 28 |
+
\mathbf{x}_c &=& (\mathbf{I}^{(c)} + \mathbf{U}^{(c)}\mathbf{V}^{(c)}) \mathbf{x}, \nonumber\\
|
| 29 |
+
\mathbf{x}_w &=& \text{reshape}(\mathbf{x}_c, (ch, w)), \nonumber\\
|
| 30 |
+
\mathbf{x}_w &=& \mathbf{x}_c (\mathbf{I}^{(w)} + \mathbf{U}^{(w)}\mathbf{V}^{(w)}), \nonumber\\
|
| 31 |
+
\mathbf{x}_h &=& \text{reshape}(\mathbf{x}_w, (cw, h)), \nonumber \\
|
| 32 |
+
\mathbf{y} &=& \mathbf{x}_h(\mathbf{I}^{(h)} + \mathbf{U}^{(h)}\mathbf{V}^{(h)}), \nonumber\\
|
| 33 |
+
\mathbf{y} &=& \text{reshape}(\mathbf{y}, (c, hw)),
|
| 34 |
+
\label{eq:me-w}
|
| 35 |
+
\end{eqnarray}$$ where $\text{reshape}(\mathbf{x}, (n,m))$ reshapes $\mathbf{x}$ to be an $n \times m$ matrix. Matrices $\mathbf{I}^{(w)}$ and $\mathbf{I}^{(h)}$ are $w$- and $h$-dimensional identity matrices, respectively. Matrices $\mathbf{U}^{(w)}, \mathbf{V}^{(w)}, \mathbf{U}^{(h)}$, and $\mathbf{V}^{(h)}$ are $w \times d_w$, $d_w \times w$, $w \times d_w$, and $d_w \times w$ matrices, respectively, where $d_w$ and $d_h$ are constant latent dimensions.
|
| 36 |
+
|
| 37 |
+
Using the Woodbury matrix identity and the Sylvester's determinant identity, we can compute the inverse and Jacobian determinant: $$\begin{eqnarray}
|
| 38 |
+
\mathbf{y} &=& \text{reshape}(\mathbf{y}, (cw, h)), \nonumber\\
|
| 39 |
+
\mathbf{x}_h &=& \mathbf{y}(\mathbf{I}^{(h)} - \mathbf{U}^{(h)}(\mathbf{I}^{(d_h)} + \mathbf{V}^{(h)}\mathbf{U}^{(h)})^{-1}\mathbf{V}^{(h)}), \nonumber\\
|
| 40 |
+
\mathbf{x}_w &=& \text{reshape}(\mathbf{x}_h, (ch, w)), \nonumber\\
|
| 41 |
+
\mathbf{x}_w &=& \mathbf{x}_w(\mathbf{I}^{(w)} - \mathbf{U}^{(w)}(\mathbf{I}^{(d_w)} + \mathbf{V}^{(w)}\mathbf{U}^{(w)})^{-1}\mathbf{V}^{(w)}), \nonumber \\
|
| 42 |
+
\mathbf{x}_c &=& \text{reshape}(\mathbf{x}_w, (c, hw)), \nonumber\\
|
| 43 |
+
\mathbf{x} &=& (\mathbf{I}^{(c)} - \mathbf{U}^{(c)}(\mathbf{I}^{(d_c)} + \mathbf{V}^{(c)}\mathbf{U}^{(c)})^{-1}\mathbf{V}^{(c)})\mathbf{x}_c,
|
| 44 |
+
\end{eqnarray}$$ $$\begin{eqnarray}
|
| 45 |
+
\log \left| \det(\frac{\partial \mathbf{y}}{\partial \mathbf{x}}) \right| &= hw \log\left|\det(\mathbf{I}^{(d_c)}+\mathbf{V}^{(c)}\mathbf{U}^{(c)})\right| + ch\log\left|\det(\mathbf{I}^{(d_w)}+\mathbf{V}^{(w)}\mathbf{U}^{(w)})\right| \nonumber\\
|
| 46 |
+
&+ cw\log\left|\det\left(\mathbf{I}^{(d_h)}+\mathbf{V}^{(h)}\mathbf{U}^{(h)}\right)\right|,
|
| 47 |
+
\end{eqnarray}$$ where $\mathbf{I}^{(d_w)}$ and $\mathbf{I}^{(d_h)}$ are $d_w$- and $d_h$-dimensional identity matrices, respectively. The Jacobian of the $\text{reshape}()$ is an identity matrix, so its log-determinant is $0$.
|
| 48 |
+
|
| 49 |
+
We call Equation [\[eq:me-w\]](#eq:me-w){reference-type="ref" reference="eq:me-w"} the memory-efficient Woodbury transformation because it reduces space complexity from $\mathcal{O}(c+hw)$ to $\mathcal{O}(c+h+w)$. This method is effective when $h$ and $w$ are large. To analyze its complexity, we let all latent dimensions be less than $d$ as before. The complexity of forward transformation is $\mathcal{O}(dchw)$; the complexity of computing the determinant is $\mathcal{O}(d(c+h+w)+d^3)$; and the complexity of computing the inverse is $\mathcal{O}(dchw + d^2(c+ch+cw)+d^3)$. The same as Woodbury transformations, when the input is high dimensional, we can omit $d$. Therefore, the computational complexities of the memory-efficient Woodbury transformation are also linear with the input size.
|
| 50 |
+
|
| 51 |
+
We list the complexities of different methods in Table [2](#tab:complexity){reference-type="ref" reference="tab:complexity"}. We can see that the computational complexities of Woodbury transformations are comparable to other methods, and maybe smaller when the input is high-dimensional, i.e., the $c,h,w$ are big.
|
| 52 |
+
|
| 53 |
+
:::: center
|
| 54 |
+
::: {#tab:complexity}
|
| 55 |
+
Method Forward Backward
|
| 56 |
+
---------------------------- ---------------------------------- ----------------------------------
|
| 57 |
+
1x1 convolution $\mathcal{O}(c^2hw+c^3)$ $\mathcal{O}(c^2hw)$
|
| 58 |
+
Periodic conolution $\mathcal{O}(chw\log(hw)+c^3hw)$ $\mathcal{O}(chw\log(hw)+c^2hw)$
|
| 59 |
+
Emerging convolution $\mathcal{O}(c^2hw)$ $\mathcal{O}(c^2hw)$
|
| 60 |
+
ME-Woodbury transformation $\mathcal{O}(dchw)$ $\mathcal{O}(dchw)$
|
| 61 |
+
Woodbury transformation $\mathcal{O}(dchw)$ $\mathcal{O}(dchw)$
|
| 62 |
+
|
| 63 |
+
: Comparisons of computational complexities.
|
| 64 |
+
:::
|
| 65 |
+
::::
|
| 66 |
+
|
| 67 |
+
In this section, we present additional details about our experiments to aid reproducibility.
|
2004.00048/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-01-17T20:21:00.932Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36" etag="j9m2agXYvS60ngvYZsBA" version="12.5.5" type="google"><diagram id="n6LuasvkSc8RuUL3lQSj" name="Page-1">7Vxbc5s4FP41mdl9MIPE/TFJ0+50291O0pm2+0ZAtuli5AWRxPvrK4G4SAIbbOPamSYzKRwJAfrO5dM5olfG7erlXeqvlx9xiOIrqIcvV8abKwiBZbv0HybZcAmAoJQs0ijkskbwEP2PuFDn0jwKUSZ0JBjHJFqLwgAnCQqIIPPTFD+L3eY4Fu+69hdIETwEfqxKv0QhWZZSFzqN/A8ULZbVnYHtlS0rv+rM3yRb+iF+bomMuyvjNsWYlEerl1sUs9mr5qW87m1Pa/1gKUrIkAviP99H39L7z46V3+d/vQ/+MecfZo5RDvPkxzl/Y/60ZFNNQYrzJERsFHBl3DwvI4Ie1n7AWp8p6lS2JKuYN2ckxf+iWxzjlEoSnNBuN/MojivRFTTmc2QHAZWrr8Df6gmlBL20RPyV3iG8QiTd0C68dQY9/sSVhnl8vp8buEyH91m2oXJ5R5+ryKIevJlFesAncsSk2gMmFSXhNVNPehbEfpZFgTiP6CUiX+mxrkGLn35rNb1hr69XJxt+MscJac2zrtu2riugtFt6EUChYBfq/Lem19LV2a1kKYp9Ej2J1tQ14/wOn3BEn6SB1xbBtSXMMpynAeIXtZV/+ziGJ41D/HSBiDJOAX/90vtrBNitEAtqZmuO4QMXd9h4n4HUHtB/rEbUu4GrDUfXu+ekhaxhdEALzS0wHmQ4sGOa7Jje9uaRHizYwfUCFbgmVQu9Ud24xXXpu12XH0eLhNkjvQOihnLDJjmioeCaNxDMrsjoAFGyoOczU/VtoY/cedBlc3bgosd5p81t1ZnBrhBamuGaNrD4X0+FUze1pt01nQ5wdVcDht36nQjrLh8pYa1p2i+UZZQB1GyPIulUfy3RQzqaBZ3271lrgaloQWXg4DJg3uGuD8S8cs1Qc9tg2GeNqaVg+gnHUbA5jFgqgCqkcg6DTqxC+9G27CkdLzTozFqeCWzTtGxoChZpeZpnO7rrWCYs/irg2VAzd4DXowHHp64qUaVEsFJwnJIlXuDEj+8a6Y1ol9/z1Zov49icN5d8wMziCiy/I0I2vJOfE9xHfdvMtya7ncy3k05T4dso7l6ZFCSY/YzWi5JybuvHDaDklLsNZSfVHsyhD4LeOSfo28DXatAHPX37r+2T1lXstLmsODtHlTF/lsYUl9Jp8DetDmu2DMpaI0uLKWiIlMNw2sq3q7vjSKpa3v6o6y33YnX5AJ3c1xB+ki4D5xUoM/C8fm1WLrZ06WJTfJmpsxC6wsyq9RWjsYLB2P/luGqYZYWOXzNWrq9fmsZqRbbm/I6v39JKnq9Dn6DWGq68Tc8yjrItMjKZyEU7yf4qCsPCyLtIpmj4k5FEKd0CoKUQQacr2zJVlhKoPP0D8tOELYcUJLNNRtDqFTN4XTNs2wGMoBsGkLKOrpoZqwobAkl3poLKUaC6R89+GlLZPE8CEuHk1WLjOWcGhquA8RGtcPp617dy2AKupvquk2IAx1VYeJhozfa4qTqTYogrJhaAJWUBhhZD5HE4hzkVDYFq0m8beN3lsX0XfZyUVwR9CCkfpyu70wPcKHYT5KGrvdNoHzClehHYsxZXV2j5QFAu6k2sf1Z/6eEwGlzG5OwX3y29k1SY6IjbJ+W7den0J7idiww3UAr7cjQfau8yfZDHmTrcwAO5QgN6g/M3AeahoFcbmHZ6fvesFGEGpAWRae/JPOhIlua1flxPGNjSzYpanioYdJGRug49IBK4NBIUIUAKBl9wGofHL18X9cYy5Ui15DhbqIALRQvVVVddB+m2fsmB+3glRDU3Md5X70H1alMHh/r3oWxwcK3IPCuXIHM409mTDMrRQXEtPfY/Ns2rPDC/z1CSCgxhd+U0ZYtqQXcIMzV7/NHRBrou0k39LLdJHR7haS+DQ7edsnkcpwykQoNpqht5TkqfLTXttS+N2m/JPqaOtr8/HlyJPS9/PAOSH53pMpEaztGkSsXMUsY6klOe1TmEiv/B7dU35YLTuGXvcLdsHcst9w3092OG0if/iL551yNfnm+2jkSY5W8OLN3oyIif1D3bXeuYc8ak62uENlbdBY3xWBmuAJXhqGsb86RAqbW80T7A7so+Xocs9aiTJaJ/8XyerdOilEuP2XuwbbWs/XmJi3CzTnGYByjU6Nln5Zqlz/qucsIdCruNHkbzOUrL7bmpXH+k/W8ZWHwrgE4wPcSLCLGB/IR1jZKIRH5cvXymXZ4nGfUNzRgtFZQUVp9ltZQUOOYJtdQZ+cVKe2u03btH7ogfr0gV+g52DNyuCZvuo69x/LgzZdE3k0rJ1/OUPV+aA8XspObu3gFJzz6hNKIzwIxjTKraOa9c9cwWE4lA2tw+/DsxcRxoSgNNnI501DB+94TjnHlYv5hP6rcDlGWX4iO5noqfSUwT6aErhnpQsbR2GlM/ZayvuMbRY/09WuEnpIb1MEIs0P6W+CRPi0iboRgV8fn3y462e0SVLZriyPFWZe+gK+E9HStUl5izCwNmCpOuN0lOz97pafMtfunOm//SwLj7AQ==</diagram></mxfile>
|
2004.00048/main_diagram/main_diagram.pdf
ADDED
|
Binary file (38.5 kB). View file
|
|
|
2004.00048/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
We recall the single agent fully-observable RL setting [@sutton1998introduction], where the environment is typically formulated as a Markov decision process (MDP). At every time step, $t=1, 2, \dots$, the agent observes the environment's state $s_t \in \mathcal{S}$, and uses it to select an action $a_t \in \mathcal{A}$. As a consequence, the agent receives a reward $r_t\in\mathcal{R}\subset \mathbb{R}$ and the environment transitions to the state $s_{t+1}$. The tuple ($s_{t+1}, r_t$) is sampled from the static probability distribution $p: \mathcal{S} \times \mathcal{A} \to \mathcal{P}(\mathcal{S}\times \mathcal{R})$ whilst the actions $a_t$ are sampled from the parametric policy function $\pi_\theta: \mathcal{S} \to \mathcal{P}(\mathcal{A})$: $$\begin{equation}
|
| 4 |
+
s_{t+1}, r_t \sim p(s_{t+1}, r_{t}|s_t, a_t), \quad a_t \sim \pi_\theta(a_t|s_t)
|
| 5 |
+
\end{equation}$$ The goal of the agent is to find the optimal policy parameters $\theta^*$ that maximise the expected return $\Bar{R}=\mathbb{E}[\sum_{t=0}^\infty\gamma^t r_t]$, where $\gamma$ is the discount factor. In the more general framework, the state is only partially observable, meaning that the agent can not directly observe the state but instead it observes $o_t \in \mathcal{O}$ which is typically given by a function of the state. In this situation, the environment is modelled by a partial observable Markov decision process (POMDP) and the policy usually incorporates past history $h_t=a_0o_0r_0, \dots, a_{t-1}o_{t-1}r_{t-1}$.
|
| 6 |
+
|
| 7 |
+
The action-value function $Q^\pi$ gives the estimated return when the agent has the state history $h_t$, executes action $a_t$ and follows the policy $\pi$ on the future time steps. It can be recursively defined by $Q^{\pi}(h_t, a_t) = \mathbb{E}_{s_{t+1},r_{t} \sim p}\big[r_{t}+\gamma \mathbb{E}_{a_{t+1} \sim \pi}[Q^\pi(h_{t+1}, a_{t+1})]\big]$. Q-learning and Deep Q-Networks (DQN) [@mnih2015human] are popular methods for obtaining the optimal action value function $Q^*$. Once we have $Q^*$, the optimal policy is also available as $\pi^*=\arg\max_{a_t} Q^*(h_t,a_t)$. In DQN, the action-value function is approximated by a deep NN with parameters $\theta$. $Q_\theta^*$ is found by minimising the loss function: $$\begin{equation}
|
| 8 |
+
\mathcal{L}_t(\theta) = \mathbb{E}_{h_t,a_t,r_t,h_{t+1}}[(y_t-Q_\theta^\pi(h_t, a_t))^2],\quad \text{where } y_t=r_t+\gamma \max_{a'}Q_\theta^\pi(a_{t+1}, h_{t+1})
|
| 9 |
+
\end{equation}$$ Where $\pi$ is the $\epsilon$-greedy policy which takes action $\arg\max_{a_t} Q^\pi(a_t, h_t)$ with probability $1-\epsilon$, and takes a random action with probability $\epsilon$.
|
| 10 |
+
|
| 11 |
+
In this work, we consider the MARL setting where the underlying environment is modelled by a partially observable Markov game [@hansen2004dynamic]. In this setting, the environment is populated by multiple agents which have individual observations and rewards and act according to individual policies. Their goal is to maximise their own expected return.
|
| 12 |
+
|
| 13 |
+
Our work builds on VDN [@sunehag2017value], which was designed to address the binary cooperative MARL setting. In this setting, the agents are grouped into teams and all the agents within a team receive the same reward. VDN's main assumption is that the joint action-value function of the whole team of cooperative agents can be additively decomposed into the action-value functions across the members of the team. $$\begin{equation}
|
| 14 |
+
\label{eq:vdn_team}
|
| 15 |
+
Q^\mathcal{T}((h_t^1, h_t^2, \dots, h_t^{|\mathcal{T}|}), (a_t^1, a_t^2, \dots, a_t^{|\mathcal{T}|})) \approx \sum_{i \in \mathcal{T}}\tilde{Q}^i(h_t^i,a_t^i)
|
| 16 |
+
\end{equation}$$ Where $\mathcal{T}$ is the set of agents belonging to the team, and $\tilde{Q}^i(h_t^i,a_t^i)$ is the value function of agent $i$ which depends solely on its partial observation of the environment and its action at time $t$. $\tilde{Q}^i$ are trained by back-propagating gradients from the Q-learning rule through the summation. $$\begin{equation}
|
| 17 |
+
g_i = \nabla \theta_i(y^\mathcal{T}_t-\sum_{i \in \mathcal{T}}\tilde{Q}(h^i_t,a^i_t|\theta_i))^2, \qquad
|
| 18 |
+
y^\mathcal{T}_t = r^\mathcal{T}_t+\gamma \sum_{i \in \mathcal{T}}\max_{a^i_{t+1}}\tilde{Q}(h^i_{t+1},a^i_{t+1}|\theta_i)
|
| 19 |
+
\end{equation}$$ Where $\theta_i$ are the parameters of $\tilde{Q}^i$, $g_i$ is its gradient and $r^\mathcal{T}_t$ is the reward for the team $\mathcal{T}$ at the time instant $t$. Note that even though the training process is centralised, the learned agents can be deployed independently, since each agent acting greedily with respect to its own $\tilde{Q}^i$ will also maximise its team value function $\arg\max_{a^i_t} Q^{\mathcal{T}}_t(\dots) \approx \arg\max_{a^i_t} \tilde{Q}^i(h^i_t, a^i_t)$.
|
| 20 |
+
|
| 21 |
+
In this section, we propose a reward function that enables RL algorithms to search for policies with increasingly evolutionary success. We call this reward the evolutionary reward because it is always aligned with the fitness function (see section [1.2](#sec:twoloops){reference-type="ref" reference="sec:twoloops"} for a definition of this alignment), and therefore, we don't need to go through the expensive process of aligning the agents' reward functions through evolution. We also propose a specific RL algorithm that is particularly suited to maximise the evolutionary reward in open-ended evolutionary environments (however other RL algorithms could also be used).
|
| 22 |
+
|
| 23 |
+
The evolutionary reward of an agent is proportional to the number of copies its replicators have in the world's population. Maximising this reward leads to the maximisation of the survival and reproduction success of the replicators an agent carries. We start by defining the kinship function between a pair of agents $i$ and $j$, who carry $N$ replicators represented by the integer vectors $\bm{g}^i$ and $\bm{g}^j$ (we chose to use $\bm{g}$ for genome, which in biology is the set of genes (replicators) an agent carries): $$\begin{equation}
|
| 24 |
+
\label{def:kinship}
|
| 25 |
+
%\begin{split}
|
| 26 |
+
%k\colon \mathbb{Z}^N\times \mathbb{Z}^N \to [0, 1]&,\quad (\bm{g}^i, \bm{g}^j) \mapsto k(\bm{g}^i, \bm{g}^j)\\
|
| 27 |
+
k\colon \mathbb{Z}^N\times \mathbb{Z}^N \to [0, 1], \qquad
|
| 28 |
+
k(\bm{g}^i, \bm{g}^j) = \frac{1}{N}\sum_{p=1}^N \delta_{g^i_p, g^j_p}
|
| 29 |
+
%\end{split}
|
| 30 |
+
\end{equation}$$ Where $\delta_{g^i_p, g^j_p}$ is the Kronecker delta which is one if $g^i_p=g^j_p$ and zero otherwise . When agent $i$ is alive at time $t+1$, it receives the reward: $$\begin{equation}
|
| 31 |
+
\label{eq:reward}
|
| 32 |
+
r_t^i = \sum_{j \in \mathcal{A}_{t+1}}{k(\bm{g}^i, \bm{g}^j)}
|
| 33 |
+
\end{equation}$$ Where $\mathcal{A}_{t+1}$ is the set of agents alive at the instant $t+1$. Note that since agent $i$ is alive at $t+1$, $\mathcal{A}_{t+1}$ includes agent $i$. $T^i-1$ is the last time step that agent $i$ is alive and so, at this instant, the agent receives its final reward which is proportional to the discounted sum of the number of times its genes will be present on other agents after its death: $$\begin{equation}
|
| 34 |
+
\label{eq:final_reward}
|
| 35 |
+
r_{T^i-1}^i = \sum_{t=T^i}^\infty \gamma^{t-T^i}\sum_{j \in \mathcal{A}_t}{k(\bm{g}^i, \bm{g}^j)}
|
| 36 |
+
\end{equation}$$
|
| 37 |
+
|
| 38 |
+
With this reward function, the agents are incentivised to maximise the survival and replication success of the replicators they carry. In the agent-centered view, the agents are incentivised to survive and replicate, but also to help their family (kin) survive and replicate; and to make sure that when they die their family is in a good position to carry on surviving and replicating.
|
| 39 |
+
|
| 40 |
+
The discount factor, $\gamma$, needs to be in the interval $[0, 1[$ to ensure the final reward remains bounded. Due to the exponential discounting we can compute the final reward up to an error of $\epsilon$ by summing over a finite period of time denoted by the effective horizon ($h_e$). To see how to compute the $h_e$ for a given environment and $\epsilon$ see the appendix [10.1](#sec:he){reference-type="ref" reference="sec:he"}. We can now use Q-learning to train agents with this evolutionary reward. However, in the next section we introduce a more practical algorithm that allows us to estimate the final reward without having to simulate the environment forward for $h_e$ iterations.
|
| 41 |
+
|
| 42 |
+
We propose Evolutionary Value-Decomposition Networks (E-VDN) as an extension of VDN (explained in detail in section [3.0.0.4](#sec:vdn){reference-type="ref" reference="sec:vdn"}) from the binary cooperative setting with static teams to the continuous cooperative setting with dynamic families. E-VDN helps us reduce the variance of the value estimation and allows us to estimate the final evolutionary reward without having to simulate the environment forward for $h_e$ iterations.
|
| 43 |
+
|
| 44 |
+
Within a team, each agent fully cooperates with all the other members of the team, and it does not cooperate at all with any agent outside of the team. Moreover, if $a$ and $b$ are members of the same team and $c$ is a member of $a$'s team then $c$ and $b$ are also in the same team. Within a family, the degrees of cooperation amongst its members depends on their kinship degree (which can be any real number from 0 to 1). Also, if $a$ and $b$ are members of the same family and $c$ is part of $a$'s family, $c$ is not necessarily part of $b$'s family.
|
| 45 |
+
|
| 46 |
+
Each agent $i$ sees the members of its family from an unique perspective, based on the kinship degree it shares with them. In E-VDN, each agent $i$ has a joint action-value function, $Q^i$. E-VDN assumes $Q^i$ can be composed by averaging the action-value functions across the members of $i$'s family weighted by their kinship with agent $i$ (this extends VDN's assumption, given by [\[eq:vdn_team\]](#eq:vdn_team){reference-type="eqref" reference="eq:vdn_team"}, to the continuous cooperative setting): $$\begin{equation}
|
| 47 |
+
\label{eq:maeq}
|
| 48 |
+
Q^i((h_t^1, h_t^2, \dots, h_t^{|\mathcal{A}_t|}), (a_t^1, a_t^2, \dots, a_t^{|\mathcal{A}_t|})) \approx \frac{1}{n^i_t}\sum_{j \in \mathcal{A}_t}k(\bm{g}^i,\bm{g}^j)\tilde{Q}^j(h_t^j,a_t^j)
|
| 49 |
+
\end{equation}$$ Where $n^i_t$ is a normalisation coefficient defined as $n^i_t=\sum_{j \in \mathcal{A}_t}k(\bm{g}^i,\bm{g}^j)$. Composing $Q^i$ with an average, instead of a sum, is necessary as E-VDN allows the number of value functions contributing to the composition to vary as the family gets bigger or smaller (agents born and die). This averaging allows us to incorporate the local observations of each family member and reduce variance in the value estimation.
|
| 50 |
+
|
| 51 |
+
More importantly, E-VDN allows us to deal with the difficulty of estimating the terminal reward [\[eq:final_reward\]](#eq:final_reward){reference-type="eqref" reference="eq:final_reward"} in a particularly convenient way. As is clear from its definition [\[eq:final_reward\]](#eq:final_reward){reference-type="eqref" reference="eq:final_reward"}, the terminal reward is the expected sum (over time) of kinship that agent $i$ has with other agents $j$ after its death. The key idea is to note that this value ($r^i_{T^i-1}$) can be approximated by the Q-value of other agents $j$ that are close to (have high kinship with) agent $i$: $$\begin{equation}
|
| 52 |
+
\hat{r}^i_{T^i-1} = \left\{
|
| 53 |
+
\begin{array}{ll}
|
| 54 |
+
\frac{1}{n^i_{T^i}}\sum_{j\in \mathcal{A}_{T^i}}k(\bm{g}^i,\bm{g}^j)\tilde{Q}^j_{T^i}(\dots)\approx Q^i_{T^i}(\dots) & \mbox{if } n^i_{T^i} > 0 \\
|
| 55 |
+
0 & \mbox{if } n^i_{T^i} = 0
|
| 56 |
+
\end{array}
|
| 57 |
+
\right. \label{eq:final_rew_estimate}
|
| 58 |
+
\end{equation}$$ The final reward is zero if, and only if, at the time of its death the agent has no surviving family.
|
| 59 |
+
|
| 60 |
+
Each $\tilde{Q}^i$ is trained by back-propagating gradients from the Q-learning rule: $$\begin{equation}
|
| 61 |
+
\label{eq:gradient}
|
| 62 |
+
g^i_t = \nabla \bm{\theta_i}(y^i_t-\frac{1}{n^i_t}\sum_{j \in \mathcal{A}_t}k(\bm{g}^i,\bm{g}^j)\tilde{Q}^j(h^j_t, a^j_t|\tilde{\theta}_j))^2 \approx \nabla \bm{\theta_i}(y^i_t-Q^i_t(\dots|\bm{\theta_i}))^2
|
| 63 |
+
\end{equation}$$ Where $\bm{\theta}_i$ is the concatenation of all the parameters $\tilde{\theta}_j$, used in each $\tilde{Q}^j$, contributing to the estimation of $Q^i$; i.e. $\bm{\theta}_i \coloneqq \{ \tilde{\theta}_j \}_{j~\text{s.t.}~k(\bm{g}^i, \bm{g}^j) > 0}$. Note that $\tilde{Q}^i$ are neural networks with parameters $\tilde{\theta}_i$ and $Q^i$ is simply the average stated in [\[eq:maeq\]](#eq:maeq){reference-type="eqref" reference="eq:maeq"}.
|
| 64 |
+
|
| 65 |
+
The learning targets $y^i_t$ are given by: $$\begin{align}
|
| 66 |
+
y^i_t &= \left\{
|
| 67 |
+
\begin{array}{ll}
|
| 68 |
+
r^i_t+\gamma \max_{\bm{a}_{t+1}}Q^i_{t+1}(\dots)) & \mbox{if } t < T^i-1 \\
|
| 69 |
+
\hat{r}^i_{T^i-1} & \mbox{if } t = T^i-1
|
| 70 |
+
\end{array}
|
| 71 |
+
\right. \label{eq:learning_targets}
|
| 72 |
+
\end{align}$$ $r^i_t$ is the evolutionary reward [\[eq:reward\]](#eq:reward){reference-type="eqref" reference="eq:reward"} and $\hat{r}^i_{T^i-1}$ is the estimate of the final evolutionary reward [\[eq:final_rew_estimate\]](#eq:final_rew_estimate){reference-type="eqref" reference="eq:final_rew_estimate"}. We don't use a replay buffer in our training (which is commonly used in DQN) due to the non-stationary of multi-agent environments (more about this in the appendix [10.2](#sec:replay_buffer){reference-type="ref" reference="sec:replay_buffer"}).
|
| 73 |
+
|
| 74 |
+
Since the joint action-value $Q^i$ increases monotonically with increasing $\tilde{Q}^i$, an agent acting greedily with respect to its action-value function will also act greedily in respect to its family action-value function: $\arg\max_{a^i_t}Q^i_t(\dots) \approx \arg\max_{a^i_t}\tilde{Q}^i(h^i_t, a^i_t)$.
|
| 75 |
+
|
| 76 |
+
# Method
|
| 77 |
+
|
| 78 |
+
We want to find the number of iterations ($h_e$) that guarantee an error between the estimate of the final reward and the actual final reward to be less or equal than a given $\epsilon$, $|r^i_{T^i-1}-\hat{r}^i_{T^i-1}| \leq \epsilon$.\
|
| 79 |
+
Remember that the final reward is given by: $$r^i_{T^i-1} = \sum_{t=T^i}^\infty \gamma^{t-T^i}\sum_{j \in \mathcal{A}_t}{k(\bm{g}^i, \bm{g}^j)}=\sum_{t'=0}^\infty \gamma^{t'}k^i_{t'}$$
|
| 80 |
+
|
| 81 |
+
Where $t'=t-T^i$ and $k^i_{t'} = \sum_{j\in \mathcal{A}_{t'}}k(\bm{g}^i, \bm{g}^j)$. The estimate of the final reward is computed with the following finite sum $\hat{r}^i_t=\sum_{t'=0}^{h_e-1} \gamma^{t'}k^i_{t'}$.\
|
| 82 |
+
Note that $k^i_t$ is always positive so the error $r^i_{T^i-1}-\hat{r}^i_{T^i-1}$ is always positive as well. To find the $h_e$ that guarantees an error smaller or equal to epsilon we define $r_b$ as the upper bound of $k^i_t$ and ensure that the worst possible error is smaller or equal to epsilon: $$\begin{align}
|
| 83 |
+
\sum_{t'=0}^{\infty} \gamma^{t'}r_b-\sum_{t'=0}^{h_e-1} \gamma^{t'}r_b &\leq \epsilon\\
|
| 84 |
+
\frac{r_b}{1-\gamma}-r_b\frac{1-\gamma^{h_e}}{1-\gamma} &\leq \epsilon\\
|
| 85 |
+
\frac{r_b\gamma^{h_e}}{1-\gamma} &\leq \epsilon\\
|
| 86 |
+
h_e\log{\gamma} &\leq \log{\frac{\epsilon(1-\gamma)}{r_b}}\\
|
| 87 |
+
h_e &\leq \frac{\log{\frac{\epsilon(1-\gamma)}{r_b}}}{\log{\gamma}}
|
| 88 |
+
\end{align}$$
|
| 89 |
+
|
| 90 |
+
We go from (1) to (2) by using the known convergences of geometric series: $\sum_{k=0}^\infty ar^k = \frac{a}{1-r}$ and $\sum_{k=0}^{n-1} ar^k = a\frac{1-r^n}{1-r}$ for $r < 1$. Since $h_e$ needs to be a positive integer we take the ceil $h_e = \ceil*{\frac{\log{\frac{\epsilon(1-\gamma)}{r_b}}}{\log{\gamma}}}$ and note that this equation is only valid when $\frac{\epsilon(1-\gamma)}{r_b}<1$. For example, an environment that has the capacity to feed at most 100 agents has an $r_b=100$ (which is the best possible reward when the kinship between every agent is 1). If we use $\epsilon=0.1$ and $\gamma=0.9$ then $h_e=88$.
|
| 91 |
+
|
| 92 |
+
When using Q-learning methods with DQN, as we are, it's common practice to use a replay buffer. The replay buffer stores the experiences ($s_t, a_t, r_t, s_{t+1}$) for multiple time steps $t$. When training, the algorithm randomly samples experiences from the replay buffer. This breaks the auto-correlation between the consecutive examples and makes the algorithm more stable and sample efficient. However, for non-stationary environments, past experiences might be outdated. For this reason, we don't use a replay buffer. Instead, we break the auto-correlations by collecting experiences from many independent environments being sampled in parallel. After a batch of experiences is used we discard them. In our experiments, we simulated 400 environments in parallel and collected one experience step from each agent at each environment to form a training batch.
|
| 93 |
+
|
| 94 |
+
In some situations, we used a denser version of the evolutionary reward to speed up the training process. We call it the *sugary* reward, $r_t^{\prime i} = \sum_{j \in \mathcal{A}_t}{k(\bm{g}^i, \bm{g}^j)}f_t^j$ where $f_t^j$ is the food collected by agent $j$ at the time instant $t$. In these simple environments, the *sugary* and the evolutionary reward are almost equivalent since a family with more members will be able to collect more food and vice-versa. However, the *sugary* reward contains more immediate information whilst the evolutionary reward has a lag between good (bad) actions and high (low) rewards; a family that is not doing a good job at collecting food will take a while to see some of its members die from starvation. Nonetheless, the evolutionary reward is more correct since it describes exactly what we want to maximise. Note that this reward was not used to produce the results when comparing E-VDN with CMA-ES.
|
| 95 |
+
|
| 96 |
+
When using the standard evolutionary reward to evolve the larger NNs, the same four eras, that were observed with the *sugary* reward, emerge. However, their progression is not as linear. In this case, the families take longer to learn and sometimes one family evolves much faster than the others. When this happens, the families left behind eventually catch up with the most developed ones. The behaviour of the emerging families successfully interferes with the developed ones creating a temporary disruption in the environment which disrupts its macro-statistics. Two disruptions were observed in one of our simulations and we named them the First and the Second Family Wars (fig. [5](#fig:standard_history){reference-type="ref" reference="fig:standard_history"}).
|
| 97 |
+
|
| 98 |
+
{#fig:standard_history}
|
2102.05714/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2102.05714/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep reinforcement learning has been successful in a series of control problems, such as Atari 2600 video games [@mnih2013playing] and MuJoCo environments [@lillicrap2015continuous]. However, the advances of deep RL relies on a large amount of interactions with the environment. In addition, the policy tends to specialize to the training domain and fails to generalize to new domains even when these two domains are similar. It has been shown that slight visual changes on pixel-based observations from Atari games could cause the well trained policy totally break down [@gamrian2019transfer]. These two limitations make deep reinforcement learning algorithms inefficient when applied to sets of tasks. As a result, efficient domain adaptation approaches are important for the applicability of Deep RL.
|
| 4 |
+
|
| 5 |
+
Although state-of-the-art methods have demonstrated compelling performance in domain adaptation in RL, these approaches all have their limitations. Domain randomization [@tobin2017domain; @andrychowicz2020learning; @slaoui2020robust] relies on the availability of multiple source domains for training and cannot be applied in one-to-many generalization scenarios. Image-to-image translation approaches [@pan2017virtual; @tzeng2020adapting; @gamrian2019transfer] need a computationally expensive generator model for image translation. The extra burden on computation brought by the generator model is impractical for real-time applications such as autonomous driving. Other approaches utilize the latent embedding of encoder-decoder models to extract internal state representation for better generalization [@higgins2017darla]. However, domain-specific variations are also compressed into the latent embedding which could be problematic for zero-shot policy transfer.
|
| 6 |
+
|
| 7 |
+
To solve the problem of domain adaptation across related RL tasks and avoid limitations of prior methods, we propose to learn a latent unified state representation (LUSR) for different domains and then train RL agents in the source domain based on that. After the RL training, zero-shot policy transfer is evaluated in target domains. To learn LUSR, we split the latent state representation into domain-general embedding which contains information existing in all domains and domain-specific embedding that compress domain specific information. LUSR is composed of domain-general embedding only and thus is able to ignore domain-specific variations and generalize across domains.
|
| 8 |
+
|
| 9 |
+
To empirically justify our approach, we conducted experiments in two car driving tasks with different visual complexity. We first applied our approach in CarRacing games with analysis of final domain adaptation performance, domain adaptation performance across the training period, generalization to totally unseen domains and policy explanation with saliency maps. Then we evaluated our approach in autonomous driving tasks in CARLA simulator [@Dosovitskiy17] with more challenging and realistic visual observations.
|
| 10 |
+
|
| 11 |
+
In comparison with other approaches, LUSR does not need RL training in multiple source domains like domain randomization and thus is applicable to a wider range of tasks. In addition, LUSR does not need computationally expensive generator models and can achieve better training efficiency compared with image-to-image translation approaches that operate in pixel-space. Finally, in contrast with other approaches that use latent state representation, LUSR filters out the factors of variation across domains and ensures the latent state representation is unified across all domains.
|
| 12 |
+
|
| 13 |
+
# Method
|
| 14 |
+
|
| 15 |
+
Our approach focuses on learning a latent unified state representation (LUSR) for states from different domains in RL. In this section, we first introduce the definition of LUSR and then introduce how to learn it.
|
| 16 |
+
|
| 17 |
+
We first introduce two notions for state space in RL which are the agent's raw observation state space $S^o$ and the agent's internal latent state space $S^z$. Raw observation states $s^o$ consists of a grid of pixels while each unit in the internal latent state $s^z$ represents a high level semantic feature. A mapping function $\mathcal{F}: S^o \to S^z$ maps the observation state to the corresponding internal latent state. In our work, high level semantic features in $S^z$ are further divided into domain-specific ones (such as weather conditions in the driving task) and domain-general ones (such as vehicle dynamics). Here we denote $S^z = (\widehat{S^z}, \overline{S^z})$ where $\widehat{S^z}$ represents domain-specific features and $\overline{S^z}$ represents domain-general features. For state representation in source and target domains, this is summarized as
|
| 18 |
+
|
| 19 |
+
$$\begin{equation}
|
| 20 |
+
\begin{aligned}
|
| 21 |
+
S^o_S & \neq S^o_T \\
|
| 22 |
+
S^z_S = (\widehat{S^z_S}, \overline{S^z_S}); & \quad S^z_T = (\widehat{S^z_T}, \overline{S^z_T}) \\
|
| 23 |
+
\overline{S^z_S} = \overline{S^z_T}; & \quad \widehat{S^z_S} \neq \widehat{S^z_T}
|
| 24 |
+
\end{aligned}
|
| 25 |
+
\end{equation}$$
|
| 26 |
+
|
| 27 |
+
In our setting of domain adaptation, the transition function $T$ and reward function $R$ only depend on $\overline{S^z}$ which is consistent across domains. Here we define the reward and transition function that take $s^o$ as input as $R^o$ and $T^o$ while the reward and transition function that take $s^z$ as input as $R^z$ and $T^z$. Then, we have
|
| 28 |
+
|
| 29 |
+
$$\begin{equation}
|
| 30 |
+
\begin{aligned}
|
| 31 |
+
{T^o_S} \neq {T^o_T}; &\quad {R^o_S} \neq {R^o_T} \\
|
| 32 |
+
{T^z_S} = T(\overline{S^z_S}) & = T(\overline{S^z_T}) = {T^z_T} \\
|
| 33 |
+
{R^z_S} = R(\overline{S^z_S}) & = R(\overline{S^z_T}) = {R^z_T} \\
|
| 34 |
+
\end{aligned}
|
| 35 |
+
\end{equation}$$
|
| 36 |
+
|
| 37 |
+
Since $\overline{S^z}$ is consistent across domains and the reward structure ($T$ and $R$) depend only on this representation (not on $\widehat{S^z}$), the RL agent taking $\overline{S^z}$ as input will be able to be trained successfully and the trained agent also has the capability to adapt from the source domain to target domains. As a result, the goal of our approach is learning the mapping function $\mathcal{F}: S^o \to \overline{S^z}$ that maps raw observation states to the latent unified state representation which we call LUSR.
|
| 38 |
+
|
| 39 |
+
In this work, we choose to learn the mapping function $\mathcal{F}: S^o \to \overline{S^z}$ via Cycle-Consistent VAE [@jha2018disentangling] which is a non-adversarial approach to disentangle domain-general and domain-specific factors of variation. Similar to VAE [@kingma2013auto], Cycle-Consistent VAE is also composed of an encoder and a decoder. However, the output from the encoder is split into domain-general and domain-specific embeddings. To learn the mapping function $\mathcal{F}$, a number of random observation states from a set of pre-defined domains are first collected and then used as input for Cycle-Consistent VAE model training. Once the model is trained, the encoder is able to map observation states $s^o$ from any domain in the domain set to a latent state representation composed of $\overline{s^z}$ and $\widehat{s^z}$. As a result, we use the trained encoder as our mapping function $\mathcal{F}$ and keep only domain-general representation as LUSR.
|
| 40 |
+
|
| 41 |
+
Cycle-Consistent VAE is based on the idea of cycle consistency whose intuition is that two well trained forward and reverse transformations composed together *in any order* should approximate an identity function. For example, in the VAE, the encoder is a forward transformation that converts an input image to a latent vector while the decoder is the reverse transformation that converts the latent vector back to a reconstructed image. Here we define the forward cycle as: $Dec(Enc(s^o)) = s^{o}\prime$ and the reverse cycle as $Enc(Dec(\widehat{s^z}, \overline{s^z})) =(\widehat{s^z \prime}, \overline{s^z \prime})$. As indicated by the cycle consistency, $s^{o}\prime$ should be close to $s^{o}$ and also $(\widehat{s^z \prime}, \overline{s^z \prime})$ should be close to $(\widehat{s^z}, \overline{s^z})$.
|
| 42 |
+
|
| 43 |
+
In the forward cycle of Cycle-Consistent VAE, for two observation states $s^o_1$, $s^o_2$ from the same domain, $Enc(s^o_1) = \widehat{s^z_1}, \overline{s^z_1}$ and $Enc(s^o_2) = \widehat{s^z_2}, \overline{s^z_2}$. Since both originate from the same domain and $\widehat{s^z}$ contains only domain-specific information, swapping $\widehat{s^z_1}$ and $\widehat{s^z_2}$ should have no effect on the reconstruction loss which means we should get $Dec(\widehat{s^z_2}, \overline{s^z_1}) \approx s^o_1$ and $Dec(\widehat{s^z_1}, \overline{s^z_2}) \approx s^o_2$. This operation ensures that domain-specific information and domain-general information are compressed into $\widehat{s^z}$ and $\overline{s^z}$ separately.
|
| 44 |
+
|
| 45 |
+
In the reverse cycle, a randomly sampled $\overline{s^z}$ is passed through the decoder in combination with two domain-specific embeddings $\widehat{s^z_1}$ and $\widehat{s^z_2}$ to obtain two reconstructed images $s^o_1\prime$ and $s^o_2\prime$. Since both $s^o_1\prime$ and $s^o_2\prime$ are generated based on the same $\overline{s^z}$, their corresponding domain-general latent embedding $\overline{s^z_1}\prime$ and $\overline{s^z_2}\prime$ should also be the same.
|
| 46 |
+
|
| 47 |
+
As a result, the objective for Cycle-Consistent VAE to minimize is $$\begin{equation}
|
| 48 |
+
\mathcal{L}_{cyclic} = \mathcal{L}_{forward} + \mathcal{L}_{reverse}
|
| 49 |
+
\end{equation}$$ where $$\begin{equation*}
|
| 50 |
+
\begin{aligned}
|
| 51 |
+
% \mathcal{L}_{forward} = & -\mathbb{E}_{q_\phi(\overline{s^z}, \widehat{s^z} \mid s^o)} [\log p_\theta(s^o \vert \overline{s^z}, \widehat{s^z\ast})] \\ & + KL(q_\phi(s^z \vert s^o) || p(s^z)) \\
|
| 52 |
+
\mathcal{L}_{forward} = & -\mathbb{E}_{q_\phi(\overline{s^z}, \widehat{s^z} \mid s^o)} [\log p_\theta(s^o \vert \overline{s^z}, \widehat{s^z\ast})] \\ & + KL(q_\phi(\overline{s^z} \vert s^o) || p(\overline{s^z})) \\
|
| 53 |
+
\mathcal{L}_{reverse} = & \mathbb{E}_{\overline{s^z} \sim p(\overline{s^z})} [|| \overline{ q_{\phi}} (p_{\theta}(\overline{s^z}, \widehat{s^z_1})) - \overline{q_{\phi}} (p_{\theta}(\overline{s^z}, \widehat{s^z_2})) ||_1]
|
| 54 |
+
\end{aligned}
|
| 55 |
+
\end{equation*}$$
|
| 56 |
+
|
| 57 |
+
$\mathcal{L}_{forward}$ here is a modified variational upper-bound and $\mathcal{L}_{reverse}$ is the loss for cycle consistency. $q_\phi$ and $p_\theta$ are parameterized functions of the encoder and decoder. We define $\overline{q_\phi}$ as $q_\phi$ that only keeps the domain general embedding as output. The latent embedding $s^z$ is composed of $\overline{s^z}$ and $\widehat{s^z}$ which are domain-general and domain-specific latent embeddings corresponding to observation state $s^o$. $\widehat{s^z\ast}$ represents any random domain-specific embedding from the same domain while $\widehat{s^z_1}$ and $\widehat{s^z_2}$ are two different domain-specific embeddings.
|
2104.07186/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-12T01:50:54.311Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" etag="ipC3TC6MTzlK7gLvswO-" version="14.3.0" type="device"><diagram id="yu4Y3AYubq5W-A7oQpcr" name="Page-1">5VxZk9o4EP41PA5lWfLB4xxJdreSqlTNbjaTNw8W4MQg1piBya9f2ZbAOswYIx8kpCqD2kKW+/y6JXkE75f7D0mwXnwiIY5HthXuR/BhZNvA9SD9k1FeC8oEooIwT6KQdToSHqOfmBEtRt1GId4IHVNC4jRai8QpWa3wNBVoQZKQndhtRmLxrutgzu5oHQmP0yDGSrd/ozBdFFTfKfX+A0fzBb8zsNiVZcA7M8JmEYRkVyLBdyN4nxCSFt+W+3scZ8zjfCl+977i6mFiCV6ldX7wdR0EH35Mk9vHxz/nt+DL0/rvLzfAK4Z5CeIte2I22/SVswCHlCOsSZJ0QeZkFcTvjtS7hGxXIc7uY9HWsc9HQtaUCCjxO07TVybeYJsSSlqky5hdpY+QvH7Nfj+2Hd5+YuPljYe90HplrWKy2QwrucJIG7JNpqzXt7/I4y6aP/3E/3y//QTf33959W4cpl1BMsfpiX7+QXZU6TFZYjof+rsEx0EavYjzCJj2zQ/9jgKiX5iM9PI6NUlBXG5Mp3v3TL/M05wlBWFGKAfKgnT/2xJ+4WaTi+KWdgDeen+8yEcJ1mv6MzYUnWoxmngHSi7dVdKao05kAt4tohQ/roNcAjvqKkT5z6I4vicxSfLfwpmT/aP0TZqQH7h0xc0/7CFK9OJzmIdG/i84SfG+RFJFyK7aiHGZuSwAmQXvjg7AZaRFyfZ5N+NC9yuFXlvGvk7GHz9VCrhSmNbZwgwd7IdIJ0zffoa5MA0IDXCPzIRmW6rQHEsjNbctqdmqrW6NMnbmT/F0qmPss+8gxzJkDZ7IWGirjAWd8tWAOaDzXZ4kOMq9VJSOKIUVWWFJZIwUxNF8RZtTKhNM6XeZLCKKOG7ZhWUUhnlY1amDqDAm5DuR5ItU+SKNfO225AvrQJJVeJuBu4yNcbDZRFMJT+yjtAwnaPOpdOkIJrIGxxIlENIAgzSBHH5NyMFdiYRzVJkKzk6VGafVxirsDp9JlBvWnquDqDIASbpQPDj7VRmSygNJTnsijVMwRhknV6vDU1+gaTpP0ljTrkDRFJxc1jx9flCBdq9M0aBfEcTe0DQq+uC11G2dddi0oYuIO+4TungWWDAQGoAtAmGkCQ3A7jL2I1vh0stlbOoHU8EajO2Ur7q8stOYCwRHePSL57rCN2MpV6GBxFLbQ2NRGQ6+6lwnpw6FZH95tpurupN0H08oPdEvxYhmPaSr6ChD7HpoXhePl50BRfysRmUj3mYDg1NIXcH0JrwvEmOWo8loPY1iyjI35yRqAPPeQ5QuO9WFKNAal2qAyiuMUDWz/vbYOjEQoZpWmUsg35qUYps1nkzs0/GNNj7jJKJMyPzC6dilwPO3a9b9xi4OWrmSWE0jF7LkyFUToJsKLpwjFxSVqEg1RSWlPP9AptslzoekNpvZi3WTPWcRyqx1lP3/HPyg7sG2xuOxvtjetEhVv/iUYPpowXM+VKa1DBPQcZ27kfOQjbVNSfH45ry5GPJc1enATmtRjpqVcQ93xWy27QrEWeZzp2xW07rekcWBAxx+UR/Fp9Vb/utAhVFm818MQgd7OnQxcT0YGFqsgTK0reBtp6xFCmtzX/xrJhfyqo5raSWgc7at5RdOjSJE717Aru8FWsPCjpoJ/wpOoIK1nXLWxNqTgSwDlCtoNMvwedZhPsvoK3mAyBkjNDl+oKgN1CFZpY80fu3CP72LCHbKatZVcqGWBH4BFAkl14h6R5EmSgSXLOeZqmGX0/4Tq3I1it0cLw7F5n1DpW4kxQ5XhoWGCt3yAqRjuNCtl67h6F5h2BrVG+ZKoJZJasbY0+aqi/ja50Kglq1qftm77vWdiGv5pCaLPeXhF7G25zxcy9l2kkCzGthxEqjl01ByQLMK2G0OqA+/amhRONvBkYiRDrg1g4sMFw/mZMOpSbZ/siGv6l3puYbLjE2q+XV5rkFvaTV8WLeHj7wODh/pWdHV6aNGaZAjwX/gSkfNzuvfUtrU2dkotjr7O/oQNHEH5kPU2tpVIvGJVBnp8JSNnq/V8ZgbDz+ry80DVO9+2KyDVS170x7MESN2MdalGyH6O61zmf0BSU80BQNdGmKi6Kqvw6hoeWfU/mazma0vxITus+sYsj9H2nnSu/3ZukpMT/Ynx7vf2gIdKWXt8rycXlNMVwJMhDNXdFOuA8ZANahuK8bq2u41IgXk1WJtp5w1veW5DQ2EtTWwtcqSXWOB8goVUM/ZLhkLh7pN8zJGSyrsTLTF0bbW2PWcVuHe8Ex9AMEGqvDtGmHxAIMNVJfehqeBAwg20PRC2jAUcADBpk7VuoM9ghR1lXYojS3LHTXbI/h2Abuv/ULIt8Y+Km0ClJQBZAHx4j2C2V1kLbPG0mDmtgjqlcrk8bYz96E12fNWV3NqLH3wJ33zXRRFTO1NF6V3lUD5ZSX1t6RKe9dkT2Vo75q8COP6QNLcFhZh+LOV9JhVj/RgfADnKC6s24vC9IAuPrV1TFu/gcboNliv0Wtt0ETwQTfW2AL2W24oazWNUM1etnTCH/XlZpRCdNMztY602n+YYEcRzVU9wSk1ZHYeBpvFYWG0pJAZ/XOQUsVY5RTbggfvwd/Lal8BonEl5w8QGnuuGQnrxmosZNo8vpq26H58wS989z8=</diagram></mxfile>
|
2104.07186/main_diagram/main_diagram.pdf
ADDED
|
Binary file (21 kB). View file
|
|
|
2104.07186/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Widely used, bag-of-words (BOW) information retrieval (IR) systems such as BM25 rely on exact lexical match [2](#page-0-1) between query and document terms. Recent study in neural IR takes a different approach and compute soft matching between all query and document terms to model complex matching.
|
| 4 |
+
|
| 5 |
+
The shift to soft matching in neural IR models attempts to address *vocabulary mismatch* problems, that query and the relevant documents use different terms, e.g. cat v.s. kitty, for the same concept [\(Huang et al.,](#page-9-0) [2013;](#page-9-0) [Guo et al.,](#page-9-1) [2016;](#page-9-1) [Xiong](#page-10-0) [et al.,](#page-10-0) [2017\)](#page-10-0). Later introduction of contextualized representations [\(Peters et al.,](#page-10-1) [2018\)](#page-10-1) from deep language models (LM) further address *semantic mismatch*, that the same term can refer to different concepts, e.g., bank of river vs. bank in finance. Fine-tuned deep LM rerankers produce token representations based on context and achieve state-of-
|
| 6 |
+
|
| 7 |
+
the-art in text ranking with huge performance leap [\(Nogueira and Cho,](#page-10-2) [2019;](#page-10-2) [Dai and Callan,](#page-9-2) [2019b\)](#page-9-2).
|
| 8 |
+
|
| 9 |
+
Though the idea of soft matching all tokens is carried through the development of neural IR models, seeing the success brought by deep LMs, we take a step back and ask: how much gain can we get if we introduce contextualized representations back to lexical exact match systems? In other words, can we build a system that still performs exact querydocument token matching but compute matching signals with contextualized token representations instead of heuristics? This may seem a constraint on the model, but exact lexical match produce more explainable and controlled patterns than soft matching. It also allows search to focus on only the subset of documents that have overlapping terms with query, which can be done efficiently with inverted list index. Meanwhile, using dense contextualized token representations enables the model to handle semantic mismatch, which has been a long-standing problem in classic lexical systems.
|
| 10 |
+
|
| 11 |
+
To answer the question, we propose a new lexical matching scheme that uses vector similarities between query-document overlapping term contextualized representations to replace heuristic scoring used in classical systems. We present COntextualized Inverted List (COIL), a new exact lexical match retrieval architecture armed with deep LM representations. COIL processes documents with deep LM offline and produces representations for each document token. The representations are grouped by their surface tokens into inverted lists. At search time, we build representation vectors for query tokens and perform contextualized exact match: use each query token to look up *its own* inverted list and compute vector similarity with document vectors stored in the inverted list as matching scores. COIL enables efficient search with rich-in-semantic matching between query and document.
|
| 12 |
+
|
| 13 |
+
Our contributions include 1) introduce a novel
|
| 14 |
+
|
| 15 |
+
<span id="page-0-0"></span><sup>1</sup>Our code is available at [https://github.com/](https://github.com/luyug/COIL) [luyug/COIL](https://github.com/luyug/COIL).
|
| 16 |
+
|
| 17 |
+
<span id="page-0-1"></span><sup>2</sup>Exact match up to morphological changes.
|
| 18 |
+
|
| 19 |
+
retrieval architecture, *contextualized inverted lists* (COIL) that brings semantic matching into lexical IR systems, 2) show matching signals induced from exact lexical match can capture complicated matching patterns, 3) demonstrate COIL significantly outperform classical and deep LM augmented lexical retrievers as well as state-of-theart dense retrievers on two retrieval tasks.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
In this section, we first provide some preliminaries on exact lexical match systems. Then we discuss COIL's contextualized exact match design and how its search index is organized. We also give a comparison between COIL and other popular retrievers.
|
| 24 |
+
|
| 25 |
+
<span id="page-2-3"></span>
|
| 26 |
+
|
| 27 |
+
Figure 3: COIL's index and retrieval architecture. COIL-tok relies on the exact token matching (lower). COIL-full includes in addition CLS matching (upper).
|
| 28 |
+
|
| 29 |
+
Classic lexical retrieval system relies on *overlapping* query document terms under morphological generalization like stemming, in other words, *exact lexical match*, to score query document pair. A scoring function is defined as a sum of matched term scores. The scores are usually based on statistics like term frequency (*tf*). Generally, we can write,
|
| 30 |
+
|
| 31 |
+
<span id="page-2-2"></span>
|
| 32 |
+
$$s = \sum_{t \in q \cap d} \sigma_t(h_q(q, t), h_d(d, t)) \tag{1}$$
|
| 33 |
+
|
| 34 |
+
where for each overlapping term t between query q and document d, functions $h_q$ and $h_d$ extract term
|
| 35 |
+
|
| 36 |
+
information and a term scoring function $\sigma_t$ combines them. A popular example is BM25, which computes,
|
| 37 |
+
|
| 38 |
+
$$s_{\text{BM25}} = \sum_{t \in q \cap d} idf(t) h_q^{\text{BM25}}(q, t) h_d^{\text{BM25}}(d, t)$$
|
| 39 |
+
|
| 40 |
+
$$h_q^{\text{BM25}}(q, t) = \frac{t f_{t,q} (1 + k_2)}{t f_{t,q} + k_2}$$
|
| 41 |
+
|
| 42 |
+
$$h_d^{\text{BM25}}(d, t) = \frac{t f_{t,d} (1 + k_1)}{t f_{t,d} + k_1 (1 - b + b \frac{|d|}{\text{avad}})}$$
|
| 43 |
+
(2)
|
| 44 |
+
|
| 45 |
+
where $tf_{t,d}$ refers to term frequency of term t in document d, $tf_{t,q}$ refers to the term frequency in query, idf(t) is inverse document frequency, and b, $k_1$ , $k_2$ are hyper-parameters.
|
| 46 |
+
|
| 47 |
+
One key advantage of exact lexical match systems lies in efficiency. With summation over exact matches, scoring of each query term only goes to documents that contain matching terms. This can be done efficiently using *inverted list* indexing (Figure 2). The inverted list maps back from a term to a list of documents where the term occurs. To compute Equation 1, the retriever only needs to traverse the subset of documents in query terms' inverted lists instead of going over the entire document collection.
|
| 48 |
+
|
| 49 |
+
While recent neural IR research mainly focuses on breaking the exact match bottleneck with soft matching of text, we hypothesize that exact match itself can be improved by replacing semantic independent frequency-based scoring with semantic rich scoring. In the rest of this section, we show how to modify the exact lexical match framework with contextualized term representations to build effective and efficient retrieval systems.
|
| 50 |
+
|
| 51 |
+
Instead of term frequency, we desire to encode the semantics of terms to facilitate more effective matching. Inspired by recent advancements in deep LM, we encode *both* query and document tokens into contextualized vector representations and carry out matching between exact lexical matched tokens. Figure 1d illustrates the scoring model of COIL.
|
| 52 |
+
|
| 53 |
+
In this work, we use a Transformer language $model^3$ as the contextualization function. We encode a query q with the language model (LM) and represent its i-th token by projecting the corresponding output:
|
| 54 |
+
|
| 55 |
+
$$\boldsymbol{v}_{i}^{q} = \boldsymbol{W}_{tok} LM(q, i) + \boldsymbol{b}_{tok} \tag{3}$$
|
| 56 |
+
|
| 57 |
+
where $W_{tok}^{n_t \times n_{lm}}$ is a matrix that maps the LM's $n_{lm}$ dimension output into a vector of lower dimension $n_t$ . We down project the vectors as we hypothesize that it suffices to use lower dimension token vectors. We confirm this in section 5. Similarly, we encode a document d's j-th token $d_j$ with:
|
| 58 |
+
|
| 59 |
+
$$\boldsymbol{v}_{i}^{d} = \boldsymbol{W}_{tok} LM(d, j) + \boldsymbol{b}_{tok}$$
|
| 60 |
+
(4)
|
| 61 |
+
|
| 62 |
+
<span id="page-3-2"></span>We then define the *contextualized exact lexical match scoring function* between query document based on vector similarities between exact matched query document token pairs:
|
| 63 |
+
|
| 64 |
+
<span id="page-3-1"></span>
|
| 65 |
+
$$s_{\text{tok}}(q, d) = \sum_{q_i \in q \cap d} \max_{d_j = q_i} (\boldsymbol{v}_i^{q \mathsf{T}} \boldsymbol{v}_j^d)$$
|
| 66 |
+
(5)
|
| 67 |
+
|
| 68 |
+
Note that, importantly, the summation goes through only overlapping terms, $q_i \in q \cap d$ . For each query token $q_i$ , we finds all *same* tokens $d_j$ in the document, computes their similarity with $q_i$ using the *contextualized* token vectors. The maximum similarities are picked for query token $q_i$ . Max operator is adopted to capture the most important signal (Kim, 2014). This fits in the general lexical match formulation, with $h_q$ giving representation for $q_i$ , $h_t$ giving representations for all $d_j = q_i$ , and $\sigma_t$ compute dot similarities between query vector with document vectors and max pool the scores.
|
| 69 |
+
|
| 70 |
+
As with classic lexical systems, $s_{tok}$ defined in Equation 5 does not take into account similarities between lexical-different terms, thus faces vocabulary mismatch. Many popular LMs (Devlin et al., 2019; Yang et al., 2019; Liu et al., 2019) use a special CLS token to aggregate sequence representation. We project the CLS vectos with $\boldsymbol{W}_{cls}^{n_c \times n_{lm}}$ to represent the entire query or document,
|
| 71 |
+
|
| 72 |
+
$$egin{aligned} oldsymbol{v}_{cls}^q &= oldsymbol{W}_{cls} \mathrm{LM}(q, \mathrm{CLS}) + oldsymbol{b}_{cls} \ oldsymbol{v}_{cls}^d &= oldsymbol{W}_{cls} \mathrm{LM}(d, \mathrm{CLS}) + oldsymbol{b}_{cls} \end{aligned} \tag{6}$$
|
| 73 |
+
|
| 74 |
+
The similarity between $v_{cls}^q$ and $v_{cls}^d$ provides highlevel semantic matching and mitigates the issue of vocabulary mismatch. The full form of COIL is:
|
| 75 |
+
|
| 76 |
+
$$s_{\text{full}}(q, d) = s_{\text{tok}}(q, d) + \boldsymbol{v}_{cls}^{q} \boldsymbol{\tau} \boldsymbol{v}_{cls}^{d}$$
|
| 77 |
+
(7)
|
| 78 |
+
|
| 79 |
+
In the rest of the paper, we refer to systems with CLS matching **COIL-full** and without **COIL-tok**.
|
| 80 |
+
|
| 81 |
+
COIL's scoring model (Figure 1d) is fully differentiable. Following earlier work (Karpukhin et al., 2020), we train COIL with negative log likelihood defined over query q, a positive document $d^+$ and a
|
| 82 |
+
|
| 83 |
+
<span id="page-3-0"></span><sup>&</sup>lt;sup>3</sup>We used the base, uncased variant of BERT.
|
| 84 |
+
|
| 85 |
+
set of negative documents $\{d_1^-, d_2^-, ... d_l^-..\}$ as loss.
|
| 86 |
+
|
| 87 |
+
$$\mathcal{L} = -\log \frac{\exp(s(q, d^{+}))}{\exp(s(q, d^{+})) + \sum_{l} \exp(s(q, d_{l}^{-}))}$$
|
| 88 |
+
(8)
|
| 89 |
+
|
| 90 |
+
Following Karpukhin et al. (2020), we use in batch negatives and hard negatives generated by BM25. Details are discussed in implementation, section 4.
|
| 91 |
+
|
| 92 |
+
COIL pre-computes the document representations and builds up a search index, which is illustrated in Figure 3. Documents in the collection are encoded offline into token and CLS vectors. Formally, for a unique token t in the vocabulary V, we collect its contextualized vectors from all of its mentions from documents in collection C, building token t's contextualized inverted list:
|
| 93 |
+
|
| 94 |
+
$$I^{t} = \{ \mathbf{v}_{j}^{d} \mid d_{j} = t, d \in C \}, \tag{9}$$
|
| 95 |
+
|
| 96 |
+
where $\boldsymbol{v}_j^d$ is the BERT-based token encoding defined in Equation 4. We define search index to store inverted lists for all tokens in vocabulary, $\mathbb{I} = \{I^t \mid t \in V\}$ . For COIL-full, we also build an index for the CLS token $I^{cls} = \{\boldsymbol{v}_{cls}^d \mid d \in C\}$ .
|
| 97 |
+
|
| 98 |
+
As shown in Figure 3, in this work we implement COIL's by stacking vectors in each inverted list $I^t$ into a matrix $M^{n_t \times |I^k|}$ , so that similarity computation that traverses an inverted list and computes vector dot product can be done efficiently as one matrix-vector product with optimized BLAS (Blackford et al., 2002) routines on CPU or GPU. All $v_{cls}^d$ vectors can also be organized in a similar fashion into matrix $M_{cls}$ and queried with matrix product. The matrix implementation here is an exhaustive approach that involves all vectors in an inverted list. As a collection of dense vectors, it is also possible to organize each inverted list as an approximate search index (Johnson et al., 2017; Guo et al., 2019) to further speed up search.
|
| 99 |
+
|
| 100 |
+
When a query q comes in, we encode every of its token into vectors $\boldsymbol{v}_i^q$ . The vectors are sent to the subset of COIL inverted lists that corresponds query tokens $\mathbb{J}=\{I^t\mid t\in q\}$ . where the matrix product described above is carried out. This is efficient as $|\mathbb{J}|<<|\mathbb{I}|$ , having only a small subset of all inverted lists to be involved in search. For COIL-full, we also use encoded CLS vectors $\boldsymbol{v}_{cls}^q$ to query the CLS index to get the CLS matching scores. The scoring over different inverted lists can
|
| 101 |
+
|
| 102 |
+
serve in parallel. The scores are then combined by Equation 5 to rank the documents.
|
| 103 |
+
|
| 104 |
+
Readers can find detailed illustration figures in the Appendix A, for index building and querying, Figure 4 and Figure 5, respectively.
|
| 105 |
+
|
| 106 |
+
Deep LM based Lexical Index Models like DeepCT (Dai and Callan, 2019a, 2020) and DocT5Query (Nogueira and Lin, 2019) alter $tf_{t,d}$ in documents with deep LM BERT or T5. This is similar to a COIL-tok with token dimension $n_t = 1$ . A single degree of freedom however measures more of a term *importance* than *semantic agreement*.
|
| 107 |
+
|
| 108 |
+
**Dense Retriever** Dense retrievers (Figure 1b) are equivalent to COIL-full's CLS matching. COIL makes up for the lost token-level interactions in dense retriever with exact matching signals.
|
| 109 |
+
|
| 110 |
+
**ColBERT** ColBERT (Figure 1c) computes relevance by soft matching *all* query and document term's contextualized vectors.
|
| 111 |
+
|
| 112 |
+
$$s(q,d) = \sum_{q_i \in [cls:a:exv]} \max_{d_j \in [cls;d]} (\boldsymbol{v}_i^{q\mathsf{T}} \boldsymbol{v}_j^d) \qquad (10)$$
|
| 113 |
+
|
| 114 |
+
where interactions happen among query q, document d, cls and set of query expansion tokens exp. The all-to-all match contrasts COIL that only uses exact match. It requires a dense retrieval over all document tokens' representations as opposed to COIL which only considers query's overlapping tokens, and are therefore much more computationally expensive than COIL.
|
| 115 |
+
|
| 116 |
+
Datasets We experiment with two large scale ad hoc retrieval benchmarks from the TREC 2019 Deep Learning (DL) shared task: MSMARCO passage (8M English passages of average length around 60 tokens) and MSMARCO document (3M English documents of average length around 900 tokens)<sup>4</sup>. For each, we train models with the MSMARCO Train queries, and record results on MSMARCO Dev queries and TREC DL 2019 test queries. We report mainly full-corpus retrieval results but also include the rerank task on MSMARCO Dev queries where we use neural scores to reorder BM25 retrieval results provided by MSMARO organizers. Official metrics include
|
| 117 |
+
|
| 118 |
+
<span id="page-4-1"></span><sup>&</sup>lt;sup>4</sup>Both datasets can be downloaded from https://microsoft.github.io/msmarco/
|
| 119 |
+
|
| 120 |
+
MRR@1K and NDCG@10 on test and MRR@10 on MSMARCO Dev. We also report recall for the dev queries following prior work [\(Dai and Callan,](#page-9-8) [2019a;](#page-9-8) [Nogueira and Lin,](#page-10-10) [2019\)](#page-10-10).
|
| 121 |
+
|
| 122 |
+
Compared Systems Baselines include 1) traditional exact match system BM25, 2) deep LM augmented BM25 systems DeepCT [\(Dai and Callan,](#page-9-8) [2019a\)](#page-9-8) and DocT5Query [\(Nogueira and Lin,](#page-10-10) [2019\)](#page-10-10), 3) dense retrievers, and 4) soft all-to-all retriever ColBERT. For DeepCT and DocT5Query, we use the rankings provided by the authors. For dense retrievers, we report two dense retrievers trained with BM25 negatives or with mixed BM25 and random negatives, published in [Xiong et al.](#page-10-11) [\(2020\)](#page-10-11). However since these systems use a robust version of BERT, RoBERTa [\(Liu et al.,](#page-10-14) [2019\)](#page-10-14) as the LM and train document retriever also on MSMARCO passage set, we in addition reproduce a third dense retriever, that uses the exact same training setup as COIL. All dense retrievers use 768 dimension embedding. For ColBERT, we report its published results (available only on passage collection). BERT reranker is added in the rerank task.
|
| 123 |
+
|
| 124 |
+
We include 2 COIL systems: 1) COIL-tok, the exact token match only system, and 2) COLL-full, the model with both token match and CLS match.
|
| 125 |
+
|
| 126 |
+
Implementation We build our models with Pytorch [\(Paszke et al.,](#page-10-15) [2019\)](#page-10-15) based on huggingface transformers [\(Wolf et al.,](#page-10-16) [2019\)](#page-10-16). COIL's LM is based on BERT's base variant. COIL systems use token dimension n<sup>t</sup> = 32 and COIL-full use CLS dimension n<sup>c</sup> = 768 as default, leading to 110M parameters. We add a Layer Normalization to CLS vector when useful. All models are trained for 5 epochs with AdamW optimizer, a learning rate of 3e-6, 0.1 warm-up ratio, and linear learning rate decay, which takes around 12 hours. Hard negatives are sampled from top 1000 BM25 results. Each query uses 1 positive and 7 hard negatives; each batch uses 8 queries on MSMARCO passage and 4 on MSMARCO document. Documents are truncated to the first 512 tokens to fit in BERT. We conduct validation on randomly selected 512 queries from corresponding train set. Latency numbers are measured on dual Xeon E5-2630 v3 for CPU and RTX 2080 ti for GPU. We implement COIL's inverted lists as matrices as described in [subsection 3.3,](#page-4-2) using NumPy [\(Harris et al.,](#page-9-19) [2020\)](#page-9-19) on CPU and Pytorch on GPU. We perform a) a set of matrix products to compute token similarities
|
| 127 |
+
|
| 128 |
+
over contextualized inverted lists, b) scatter to map token scores back to documents, and c) sort to rank the documents. Illustration can be found in the appendix, [Figure 5.](#page-12-0)
|
2106.01342/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-16T17:14:43.031Z" agent="5.0 (Macintosh)" etag="zPEeyWz_ke0F7bm4SiSM" version="14.6.12" type="google"><diagram id="-V2exvk_vhBUc0ruXK1k" name="Page-1">7V1bd5u4Fv41XjPzEC/ul8dc285JO11Jp5156sJGtpkhxoNJk/TXH8mADWIDwkiAE5quNghbxtrfvmhrXybq5cPzu9DZrD4GLvIniuQ+T9SriaIoqmni/8jISzwiq4YVjyxDz03GDgP33k+UDErJ6KPnom08lgxFQeBH3mabe/c8WK/RPMqNOWEYPOVftgj8/KdunCUqDNzPHb84+s1zo1Uyauja4cZ75C1X6UfLhh3feXD2r44HtivHDZ4yX0W9nqiXYRBE8W8Pz5fIJ8uXLkz8fW9K7u6fLETriOUNF6uLzbef2s3581Zb+s7dH+fn78/0eJYfjv+YfOPkYaOXdAnC4HHtIjKJNFEvnlZehO43zpzcfcJUx2Or6MHHVzL+deH5/mXgByG+Xgdr/KKL4nMmj/4DhRF6zgwlz/0OBQ8oCl/wS5K7egqHBEa6kVw/HWhiW8nYKkMOVUsGnQQHy/3ch5XCvySL1WTh7CYrJ9ev3B4eZJW3/6JovkovojD4F6WrOlFUCVmSZe3vpMCk1x+/ciYhFRl4PMCf60Vk+VSJD01k2aaIYheJgp+1SBRF50CUy59f3a+R7Tzdfnh/8YRuI12LzlRJBqhi+BFZmQB/1Sx5jP8eg/TG2XYnec7xCxR983y4iX9bkv9/dX5LJ8JPFs8V3ylQHa9fRJE2R8CELQBOcXxvucaXc0wVhMcvCDU8LIzOkxsPnuuSjwGxlOfTcjgdoKBLu7dFTuQF6+QuD2SoeWDYKgAMEwKGKGZVT1LKyaxSTpNEMZRia/2JOdeYGbrBIuYWi4UynxexzUXMUWhWzSJV9momSxXZFEYVS+mPKrqhyyZioYorzRFSuqGKzEoVRRyvyFaBCMjF1mRyGYTRKlgGa8e/PoxSUvvwmtsg2CTL+g+KopfENHYeoyBPOvTsRX9lfv+bTDXVk6ur52Tm3cXLnqhOGJ0T8/ggwDAxwpe/0leTi8xM5PIw1e4qnauUmtvgMZyjygVLbCj8NEsUVb3STOYk68kkShUKHlYF1ZNZPgfezkJIprAo80aVqSnip07edcAOXlbnJfOyDXnBtoCu/TO3AFyFteN6Pw4GSkMDSIIMoImikb/6JdnXzBYT8+J6Yl79igfmbhD9ltwuM5H2w7nnaiOxOAgRk9K3CmDAyhKob4WJELmwKgMVIS3YXlI6Z2aa1KzMzI9VGTQ2VsKbmMrXvu9ttoS2GxR6+MPJbuAKxaOfD0N1TDJz5v8ud/D44zHyPSLnd+OsW4XiznP3h9Ekw3/Ipma7iR0zC++ZfCQPzjUoRKiSDnAuYJIJY1wJspPFyN3n7x6WvDUC98T2pBwwQUsJBdg9wQahMGEO8bxYXUyBpE4tvz2UWDqFEqV3lJhCURJ+934ZUUBvHyWNViB9w0CBbHnaFFy76aZt7jvbrTfP0ym/gvWGI4FBYhTKlTY1q6VmSCW78gbbrjJ3SzpFbF8WLLXCRLaiUyaf3q3JlxKwBWPLBsTYnwIP24b449f4H9+JUDzZjpsYeZwQ9NaZIZ+gBuEPdGa7W1IeUOwMXYRONcyT87rkYyd7xmLg4RDh7+z9QDmEMgAswcGZrObfEiwWWyQIAu2tQhgCsURffpcLG3CJFQLCDqeabxEO/kGTm3/QovQ8tENQAAkv7GwKS0eQDUXv7Y9060EuwlLCZPf8d5fn/vLrpz9/vF/+b37u3Hz5tpbOEhun3tGXcszRGqe5b6CIFZXNOVCcSZJqZipRXvwETntjslrgKKPAYRQ4Vv8C52TOIzoXUtVbMQYxZXYvpiTT7tmghUJe+O1Uf373xo1qIZRCHZq/In2gE5IqZ9JU0nRKtChyjXDZXWWc7W2PPVLThkG62AKlS0vqcwiwqpYBo7eqVgj0761SAefUwIVAd2ENCrMZobb1rwlkdGgnQZE8PbB0g8eZjxI77ML1QjRPIgrxYhGA1ln7zc4jsxa+AEtepXzDila05EUZ8uDm2R55rRymVpHXqjwQtZx2rHdxb6XTBhujM4GblZ6uSD9BQfe8g4JSGROr+VjSFKQGX4fCzNI1nc2hYM3RLu6Ug9yxDQo4gJI3ALFjiFMB5fu9HcWk1II6ACOxpQCg8HBCfXSevQfyGvzRyxChh93JRxt0vQnrkQpk7DIwAhTF/fjBu41xSwNW621AuS/N1HkMnFm+dxw10ylpJqtvzWQyJPTwDpY4GLfTnCupzo1USoB60dBDoKxOCwl6CtbwC4OeqGs72BR19p4LvwiRQx7lNIIvUrZ5G8EXpn7ydkaX6S50MoQqTaXMH4rUrGKADudi3Q53mEVjGn2aJa8vi0YuWgcdZ9GYDL7LhuYBb89Wh8o/QwcdIEOV27LERqDtwWNDNI2+s3JMyMPRC1C6TsFiPotMpeMJwJDeb7DDkIoUplOEG+soNpM41YUHVMcz8sW4zIDxMfOMeS88uMyzlL8qZdhw40wz4rUugIvBl9YwcIKD3WxLNTqMOV/hSPdch5ayVXS7XGIDNXS2uy2hIvnBdlsA39B97AKqU6hGvRK0ASkhLmDDYij0Jc5j1pnRbDW0VkoDKrhbK7JMp5wdKyhkmcph1FrvqctODEpKYJTHv9vwk4m1cKzyDTyzp8+Eduv3EbZi8N1ggf/5ePsZr9UluVwTWbfAkgoDzcFyBF8gJ3oMyTB6nqNNtL9/eXt/0vHx/Ar2UCJRA4635W5PESyxmbd0OrZ+jX/BgxtiRO//N6/GZH4GDar1HvdsVYdhJSucZ+KBx11B6RO7sRuPLE5bJ0MqlxnUtsh0ipYF8Bi2V680DtIw8yyoaQUW7DahyRYbfD4WWWkul/XeQ9FtKBZgjFI7NWBp1FmA1nv5Hru4Zb5C68DbeuslHr4dfR2gYa8D53+ifB1gsV+ltZJQqRLQ0sKZ59/wBZvumPrSJ/SE/70LHpw1sH28vr3Bd+///Hx99/XD/TV+v/T57vrsy935h08fPr1LH2sWAvLgpDWN8BLThYLwQIlpHSqVLAx17U0TXqjLwo18J0W6+fDp+sufI+S4Qs6CFFSnkNMZSka9zq2JQukcA2o9IMiXBJOiXOmMRujpGKEqxeMmoFZEGaEwrtQCHTqMYTz4nf7O3BEUw6ik7oXUp5OmHzU4Y1GpOSyNmoP5jKV2Jn6hSdbCW6y/+V/tD+dPy3ffFlbwl3tWnsTXso/JbOxjwlwrlNIynfYxAVHRayYwF2mQ9RpXAT/rM656Hb+sqZaNhxq4iTtMGFIN1VZd0CwrnP3N51ALlSWJBOCoYukKaYC3QIVO3A1hfTtkiSGbiBftyhrXVDS/yVKbi/GcLyOlQFJNkwGpZoujAGQ+d0MB10HWAqSAMbfQbNEBBXTgbBOmgKFODU0QEUwoQojH4crHRz/yzt4jxyXOMOQvzs4jUrOW6OpD9cDmHomuEx0Umd50KlO8QbDTH7lARBnqfqdrU13L/Oii6Nk+Kgam57nrxnRzHsgix/8Sr1QQPpwwNTVzqpiGuv9ho6ZC3pX9UUWRs9dijnnbb2/u1UQjcEqUrY00SEVXbaBB02QGmYoPVuypZGeZV5gwFhWalBPGH8j27GyLOdgnjqLXIZY1WZ5acuaHjZFNLMxznCyIsk3D9ZE/C55yUbVkAN9YBaH3ExPC8Se5KPpBZrOJYrxcmG4uX1UC7CdNn+bmqFC9sFton1Oc+gcNMuMBa2Z+QlZvE6Mu4h3eS1UsNmVpass0BwgN3bVEBfTcIOTeBOGTE7o1Xu+2gUOnKiZ1ynodmLljVUvFAZk7LcyWDq0RNnlmKESIHuQONaHgdFwgvWjcsVRw8MB3LFav9Te6YeFUgQ2HhXWT+BdsWrB3xcKQ0+HVW7ep4Hpl1q2hWlPZKp3wWOtWs4peTEEmra4bU9XIyDrxJi0QNToqsXIwqAahUMHiqFViGELZt6nUzovbiat2KjqsB68ba5nzlCVYxSOVRCrT6a3cLBT7TWqrpuSgtZUJ65melZUuUQ48TtpKNarnFaS5NJXYBJmfLlSXKKfz6I2pVYPHOa0728vtwzZGRVguUOslb8M0V0bJy18vcog+fEM2rHKkDdsd88qFNRuZl+LJeuZljTjsm3lZahq+PqNWDHV6NmpVnZIRnIxahTaWlU6MWkUjDqXDj1L50HTBJFjMCrWI5bROJvc4/EIc3MwPSK2hkw3Nz4Tf25ziJOlsHCMFRFazQhl3MoGZMGXKIa4OhERFNM4IjiI46LqLUBhzWtg1F0guEBwKk0eJc5m90sVk14V5rXNmUgeLFYYpmx6zGPug1wfcaPbUtptGxnCrNFfhIYnTPFs6Ltrtrq4fZsglyCG9KTaPRDPHpescIkIckibSLAN0MN+swd7wVeSyiijTQWWvm0VxqQrUpeDhyoCSW4UFCDQ8Gm5y9sEmMjWLSjhgk5n1DndFmmYd1BRISiQ8P1HcbTm+JmgRW1bPBjIkwTZ7St/IK2z3hIWTgd9feLG4uJxnoZMLVUPut90rr77jAW8HKvNqol/PXldpOf4Ki4KjBVT40KCuMpz0VRVHDdP6I5XYMUoed/3IQrQJ0Rav/m7bNRqCI1/tixrkZbQM8JVIOxDkq5oKjkIacFHCO4zrfZaIamn3QRLUTrSVEjiu4Vfp0oxgrwG7PgAl0t6jLFvlyL6NYRolleJO0crgQHe6QQLQ1aHTjtdiC80T2XWCdBZbykuiT486LCAPb0egxE+xPSUpJfRLnbp5ezCxKEmhdljOGkZJL6EapSvJXJCffyOtQqdY5kZadF/APlvOwkQGDoVOgMg6TaGqcpowbeguw0c3QaL7CvfaLW31++K/yHyO/ln9rvvnnrz69t9WvPuJlu6vUbgXJDkA4HKhRAl3BfQhiRHuICRa+4/oWq8Nysp+DoN/kpq/ihTX/6hwpZzyNpIDcmQ6cgnqCwcHdYrCTl2XixE7Q8WOCoUtdYqd9imJcJFpPrPEYQtxUw3feSEAlEq1WCPlVhMKXoAUB+qblIEjA1XiZahBMI8kN5D4HJojgmS7P//w6cvwCWKrlA8QCHwQRhDwAFeTyk3DI2U5T3bc25crh8zy8l05yklNAYCpeqdUD5EGtW9tw1QdY3+novYtkl0dmXw8CsXeBIDTGZL+wvAm9+NT2DcynEpkvTLNDGXZnFQmgeCLTKv5lvESauOsK4mmYPNQCFmhHCKGybY/BXrq1s1UsmVuHKwvqyb1teXqJyu8IXkysd1+q2WnkLPCgmN17gbRcWd3FB92nbpmmiVgyggn6PRXWBNeWS4naMtKl7efJ6RXc8aa9ErwcVyjparWzvvhwkcKTm/kXquabultAHH2UHs3ul8GP8SkAXBDCBsV3BPFtrT84ldV/y6R0nQnE8Om5ji6J0phJsHh9ypYW4eCwhJTesMOfEWaB+s1mkfOLJ1BqiSJThsHUB9zqIGBQnuoOTafaNDAIPmiWWhDImUXr5owA4HynCSCeuudPSbvrn3f2Wy9eNV2r1h5vnvrvASPUfox6VXD5mHtetcXaV8DJmZZCGzloYb1sjBlqcpGKV3xF448x78jUF4vWUhcJKEbBpsvaVgxGdjZjCi8/oGI6RiPAb65aCdDyU0fLdL3zoIoCh6SizBZnv2ku6XRL/BfvFiXRKjq+Ntc4mv5cI3/kpeH0WWwxgBwvB1FkbONntA2akDrPYPU07qGuJo42kJBIhyJnZVyXOgY4NVc+HGquOe6aD3JhnDB+q8a1kMmD5gJPPKiUF58yRO1P9oz2B2iWDPboqY9f5Y3uWEmn8ZMvv7oBdWvGXm1C14Fzkk7pj1D+ZM3w6sWM/n6M3sg/8/Iq13wqgyExHZM/AYb19fOrHtGGDCzGqOjoQtHA+WkB1pkdux3SN2Oo0xuJpONE/A7GD1ubnr2OxgnsJcxGCoKjbzIlxeH4ncwenQJZsNiBJhH++AcZvKdgI/QGP0OffFq734Ho1+/w7B49QT8DqmNPfJq57zav9/BhCJw3yiz7hlhyMzawEk4+h2O9jvQ5Zp79zuYxQqBo0xm4ekT8CWagg/UB+x3ME/ALWQKdguNvDhcv4Mp2OdUYx4trDmaCzqWmZF+lo14lV2L9kcvwX6ikVeH63cwBfucTopXTWby9UcvwX6ikVcH7HdIpcXIrBlGGDCzWg38RKPf4Wi/A5ViBmSYdex3sEaZfJRM3vPLkHm6vJIOc8KpWVV+oSzLtPG0neal1mWhvlJXTMrpg0bsmHrSuXgaiivGgtxwpO7A4rucFiDozoB0dWS5mhgD0lJmJMijATVPIHrLGqO3+mLd3j0zVtErN512ya2G5NiyoGNm5co0dvsHYjS02S1YJxDXZY1xXX1x8QB8NpDDLtbA6+418NxFM2smhqdVQ7VVtwk5TyDOy4ZcbqLqMO/6urzCMsxcnT77BnN2gbNFVWMueeYuS3QrIzSY/YF0MwGg8ErHSFE6RMpsRMqxjX6gbmYdI4XBV5NpAJ6QwXW2q32FtwwZyfhnJ8KkWe9GFEmd0C57pUCIpstdW40ss5pQZat0LES+E3k/snM1qVRG09Ki2o6z1ikzaExQ8wivUlYs0iSyM608lWR7kq2vaqrqpLK+6hp/w7+yF/H79PTy8LbdVZO+tpxrt6Y+nmyz26qyoMytTCiwmZI4iVDTM15sxdJ73hVLmQo5Ny1UWX1WuT9arj+rTA+4SyHX4FCS6jBjAGnYUOHLytZHLaFUXfmSu2TJSZW07GX3MuV4+aEwSo/GNaK7lB+Q/2G41eLlE60Wv3d+1AqZ+RzpJS7OI6rF01ImbVfQV7V4TSrf6wwRb+op4a1KdTmu7eoQMm9uLq/tG154o8s5AwXAO8Yb985GR3Ut7xS0df1aq/o+DwTK0MltLcj358c8oKzSjRGAEz9RUL5YXWy+/dRuzp+32tJ37v44P39/lm48M0i+DIPt9uwamzzBhjz7r5e+s916C2/ukHZav5EPVKSP99fk3h1ahgjf3Y2fmIuHf+9uWUn7llRV2oYscGF9tGp2ckMQMm+wXU+7bm20u8g0ujPAQJBBZ80DA9nbNPRb4owy9M0O20KBODuFbeWbNPBb4owy8E2rZ3lW0ff+ldn3o2XPCcO0ZW/afWO43CcydlCdcO6gSjfYASLAuu2gClYpfkstVCl21KFm2Jwogi/DgCzS/t47/B1XHwMXkVf8Hw==</diagram></mxfile>
|
2106.01342/main_diagram/main_diagram.pdf
ADDED
|
Binary file (73.9 kB). View file
|
|
|
2106.01342/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
While machine learning for image and language processing has seen major advances over the last decade, many critical industries, including financial services, health care, and logistics, rely heavily on data in structured table format. Tabular data is unique in several ways that have prevented it from benefiting from the impressive success of deep learning in vision and language. First, tabular data often contain heterogeneous features that represent a mixture of continuous, categorical, and ordinal values, and these values can be independent or correlated. Second, there is no inherent positional information in tabular data, meaning that the order of columns is arbitrary. This differs from text, where tokens are always discrete, and ordering impacts semantic meaning. It also differs from images, where pixels are typically continuous, and nearby pixels are correlated. Tabular models must handle features from multiple discrete and continuous distributions, and they must discover correlations without relying on the positional information. Sufficiently powerful deep learning systems for tabular data have the potential to improve performance beyond what is achieved by classical methods, like linear classifiers and random forests. Furthermore, without performant deep learning models for tabular data, we lack the ability to exploit compositionality, end-to-end multi-task models, fusion with multiple modalities (e.g. image and text), and representation learning.
|
| 4 |
+
|
| 5 |
+
We introduce SAINT, the Self-Attention and Intersample Attention Transformer, a specialized architecture for learning with tabular data. SAINT leverages several mechanisms to overcome the difficulties of training on tabular data. SAINT projects all features -- categorical and continuous -- into a combined dense vector space. These projected values are passed as tokens into a transformer encoder which uses attention in the following two ways. First, there is "self-attention," which attends to individual features within each data sample. Second, we propose a novel "intersample attention," which enhances the classification of a row (i.e., a data sample) by relating it to other rows in the table. Intersample attention is akin to a nearest-neighbor classification, where the distance metric is learned end-to-end rather than fixed. In addition to this hybrid attention mechanism, we also leverage self-supervised contrastive pre-training to boost performance for semi-supervised problems.
|
| 6 |
+
|
| 7 |
+
We provide comparisons of SAINT to a wide variety of deep tabular architectures and commonly used tree-based methods using a diverse battery of datasets. We observe that SAINT, on average, outperforms all other methods on supervised and semi-supervised tasks. More importantly, SAINT often out-performs boosted trees (including XGBoost [@chen2016xgboost], CatBoost [@dorogush2018catboost], and LightGBM [@ke2017lightgbm]), which have long been an industry favorite for complex tabular datasets. Finally, we visualize the attention matrices produced by our models to gain insights into how they behave.
|
| 8 |
+
|
| 9 |
+
# Method
|
| 10 |
+
|
| 11 |
+
SAINT is inspired by the transformer encoder of @vaswani2017attention, designed for natural language, where the model takes in a sequence of feature embeddings and outputs contextual representations of the same dimension. A graphical overview of SAINT is presented in Figure [1](#fig:saint_arch_training){reference-type="ref" reference="fig:saint_arch_training"}(a).
|
| 12 |
+
|
| 13 |
+
SAINT is composed of a stack of $L$ identical stages. Each stage consists of one self-attention transformer block and one intersample attention transformer block. The self-attention transformer block is identical to the encoder from [@vaswani2017attention]. It has a multi-head self-attention layer (MSA) (with $h$ heads), followed by two fully-connected feed-forward (FF) layers with a GELU non-linearity [@hendrycks2016gaussian]. Each layer has a skip connection [@he2016deep] and layer normalization (LN) [@ba2016layer]. The intersample attention transformer block is similar to the self-attention transformer block, except that the self-attention layer is replaced by an intersample attention layer (MISA). The details of the intersample attention layer are presented in the following subsection.
|
| 14 |
+
|
| 15 |
+
The SAINT pipeline, with a single stage ($L=1$) and a batch of $b$ inputs, is described by the following equations. We denote multi-head self-attention by MSA, multi-head intersample attention by MISA, feed-forward layers by FF, and layer norm by LN: $$\begin{align}
|
| 16 |
+
\mathbf{z_i^{(1)}} &= \operatorname{LN}(\operatorname{MSA}(\mathbf{E}(\mathbf{x_i}))) + \mathbf{E}(\mathbf{x_i}) & \mathbf{z_i^{(2)}} &= \operatorname{LN}(\operatorname{FF_1}(\mathbf{z_i^{(1)}})) +\mathbf{z_i^{(1)}}
|
| 17 |
+
\label{eq:sa_block_apply} \\
|
| 18 |
+
\mathbf{z_i^{(3)}} &= \operatorname{LN}(\operatorname{MISA}(\{\mathbf{z_i^{(2)}}\}_{i=1}^b)) +\mathbf{z_i^{(2)}} & \mathbf{r_i}&= \operatorname{LN}(\operatorname{FF_2}(\mathbf{z_i^{(3)}})) +\mathbf{z_i^{(3)}}
|
| 19 |
+
\label{eq:isa_block_apply}
|
| 20 |
+
% \mbf{y} &= \op{LN}(\mbf{z}_L^0) \label{eq:final_rep}
|
| 21 |
+
\end{align}$$
|
| 22 |
+
|
| 23 |
+
where $\mathbf{r_i}$ is SAINT's contextual representation output corresponding to data point $\mathbf{x_i}$. This contextual embedding can be used in downstream tasks such as self-supervision or classification.
|
| 24 |
+
|
| 25 |
+
<figure id="fig:ISA" data-latex-placement="tb">
|
| 26 |
+
<embed src="figs/intersampleattention.pdf" />
|
| 27 |
+
<figcaption>Intersample attention on a batch of 3 points. In this plot, <span class="math inline"><em>d</em></span> is the size of value vectors <span class="math inline"><em>v</em><sub><em>i</em></sub></span>. See Section <a href="#subsec:ISA" data-reference-type="ref" data-reference="subsec:ISA">3.2</a> for details. </figcaption>
|
| 28 |
+
</figure>
|
| 29 |
+
|
| 30 |
+
We introduce intersample attention (a type of row attention) where the attention is computed across different data points (rows of a tabular data matrix) in a given batch rather than just the features of a single data point. Specifically, we concatenated the embeddings of each feature for a single data point, then compute attention over samples (rather than features). This enables us to improve the representation of a given point by inspecting other points. When a feature is missing or noisy in one row, intersample attention enables SAINT to borrow the corresponding features from other similar data samples in the batch.
|
| 31 |
+
|
| 32 |
+
An illustration of how intersample attention is performed in a single head is shown in Figure [2](#fig:ISA){reference-type="ref" reference="fig:ISA"} and the pseudo-code is presented in Algorithm [\[alg:MISA\]](#alg:MISA){reference-type="ref" reference="alg:MISA"}. Unlike the row attention used in [@ho2019axial; @child2019generating; @rao2021msa; @iida2021tabbie], intersample attention allows all features from different samples to communicate with each other. In our experiments, we show that this ability boosts performance appreciably. In the multi-head case, instead of projecting $q,k,v$ to a given dimension $d$, we project them to $d/h$ where $h$ is the number of heads. Then we concatenate all the updated value vectors, $v_i$, to get back a vector of length $d$.
|
| 33 |
+
|
| 34 |
+
::: algorithm
|
| 35 |
+
``` {.python language="python"}
|
| 36 |
+
|
| 37 |
+
# W_q, W_k, W_v are weight matrices of dimension dxd
|
| 38 |
+
# mm: matrix-matrix multiplication
|
| 39 |
+
def self_attention(x):
|
| 40 |
+
# x is bxnxd
|
| 41 |
+
q, k, v = mm(W_q,x), mm(W_k,x), mm(W_v,x) #q,k,v are bxnxd
|
| 42 |
+
attn = softmax(mm(q,np.transpose(k, (0, 2, 1)))/sqrt(d)) # bxnxn
|
| 43 |
+
out = mm(attn, v) #out is bxnxd
|
| 44 |
+
return out
|
| 45 |
+
|
| 46 |
+
def intersample_attention(x):
|
| 47 |
+
# x is bxnxd
|
| 48 |
+
b,n,d = x.shape # as mentioned above
|
| 49 |
+
x = reshape(x, (1,b,n*d)) # reshape x to 1xbx(n*d)
|
| 50 |
+
x = self_attention(x) # the output x is 1xbx(n*d)
|
| 51 |
+
out = reshape(x,(b,n,d)) # out is bxnxd
|
| 52 |
+
return out
|
| 53 |
+
```
|
| 54 |
+
:::
|
| 55 |
+
|
| 56 |
+
Contrastive learning, in which models are pre-trained to be invariant to reordering, cropping, or other label-preserving "views" of the data [@chen2020simple; @he2020momentum; @pathak2016context; @grill2020bootstrap; @vincent2008extracting], is a powerful tool in the vision and language domains that has never (to our knowledge) been applied to tabular data. We present a contrastive pipeline for tabular data, a visual description of which is shown in Figure [1](#fig:saint_arch_training){reference-type="ref" reference="fig:saint_arch_training"}. Existing self-supervised objectives for tabular data include denoising [@vincent2008extracting], a variation of which was used by VIME [@yoon2020vime], masking, and replaced token detection as used by TabTransformer [@huang2020tabtransformer]. We find that, while these methods are effective, superior results are achieved by contrastive learning.
|
| 57 |
+
|
| 58 |
+
**Generating augmentations** Standard contrastive methods in vision craft different "views" of images using crops and flips. It is difficult to craft invariance transforms for tabular data. The authors of VIME [@yoon2020vime] use mixup in the non-embedded space as a data augmentation method, but this is limited to continuous data. We instead use CutMix [@yun2019cutmix] to augment samples in the input space and we use mixup [@zhang2017mixup] in the embedding space. These two augmentations combined yield a challenging and effective self-supervision task. Assume that only $l$ of $m$ data points are labeled. We denote the embedding layer by $\mathbf{E}$, the SAINT network by $\mathbf{S}$, and 2 projection heads as $g_1(\cdot)$ and $g_2(\cdot)$. The CutMix augmentation probability is denoted $p_{\text{cutmix}}$ and the mixup parameter is $\alpha$. Given point $\mathbf{x_i}$, the original embedding is $\mathbf{p_i} = \mathbf{E}(\mathbf{x_i})$, while the augmented representation is generated as follows: $$\begin{align}
|
| 59 |
+
\mathbf{x_i^\prime} &= \mathbf{x_i}\odot \mathbf{m} + \mathbf{x_a}\odot \mathbf{(1-m)} && \textcolor{gray}{\text{CutMix in raw data space}}\\
|
| 60 |
+
\mathbf{p_i^\prime} &= \alpha*\mathbf{E}(\mathbf{x_i^\prime}) +(1-\alpha)*\mathbf{E}(\mathbf{x_b^\prime})&& \textcolor{gray}{\text{mixup in embedding space}}
|
| 61 |
+
\end{align}$$ where $\mathbf{x_a}$, $\mathbf{x_b}$ are random samples from the current batch, $\mathbf{x_b^\prime}$ is the CutMix version of $\mathbf{x_b}$, $\mathbf{m}$ is the binary mask vector sampled from a Bernoulli distribution with probability $p_{\text{cutmix}}$, and $\alpha$ is the mixup parameter. Note that we first obtain a CutMix version of every data point in a batch by randomly selecting a partner to mix with. We then embed the samples and choose new partners before performing mixup.
|
| 62 |
+
|
| 63 |
+
**SAINT and projection heads** Now that we have both the clean $\mathbf{p_i}$ and mixed $\mathbf{p_i^\prime}$ embeddings, we pass them through SAINT, then through two projection heads, each consisting of an MLP with one hidden layer and a ReLU. The use of a projection head to reduce dimensionality before computing contrastive loss is common in vision [@chen2020simple; @he2020momentum; @grill2020bootstrap] and indeed also improves results on tabular data. Ablation studies and further discussion are available in Appendix [\[appendix_sec:analysis\]](#appendix_sec:analysis){reference-type="ref" reference="appendix_sec:analysis"}.
|
| 64 |
+
|
| 65 |
+
**Loss functions** We consider two losses for the pre-training phase. (i) The first is a contrastive loss that pushes the latent representations of two views of the same data point ($z_i$ and $z_i^\prime$) close together and encourages different points ($z_i$ and $z_j$, $i\neq j$) to lie far apart. For this, we borrow the InfoNCE loss from metric-learning works [@sohn2016improved; @oord2018representation; @chen2020simple; @wu2018unsupervised]; (ii) The second loss comes from a denoising task. For denoising, we try to predict the original data sample from a noisy view. Formally, we are given $\mathbf{r_i^\prime}$ and we reconstruct the inputs as $\mathbf{x_i^{\prime \prime}}$ to minimize the difference between the original and the reconstruction. The combined pre-training loss is: $$\begin{align}
|
| 66 |
+
% \mathcal{L_\text{contrastive}} &= -\sum_{i=1}^{m}{\log{\frac{\exp(z_i \cdot z_i^\prime/\tau)}{\sum_{k=1}^{m}{\exp(z_i \cdot z_k^\prime/\tau)}}}}\\
|
| 67 |
+
% \mathcal{L_\text{denoising}} &= \sum_{i=1}^{m}\sum_{j=1}^{n} [L_j(MLP_j(\mathbf{r_i^\prime}),\mathbf{x_i})] \\
|
| 68 |
+
% \mathcal{L_\text{pretraining}} &= \mathcal{L_\text{contrastive}} + \lambda_{pt} \mathcal{L_\text{denoising}}\\
|
| 69 |
+
\mathcal{L_\text{pre-training}} &= \underbrace{-\sum_{i=1}^{m}{\log{\frac{\exp(z_i \cdot z_i^\prime/\tau)}{\sum_{k=1}^{m}{\exp(z_i \cdot z_k^\prime/\tau)}}}}}_{\text{Contrastive Loss}} + \lambda_{\text{pt}} \underbrace{\sum_{i=1}^{m}\sum_{j=1}^{n} [\mathcal{L}_j(\text{MLP}_j(\mathbf{r_i^\prime}),\mathbf{x_i})]}_{\text{Denoising Loss}}
|
| 70 |
+
\end{align}$$ where $\mathbf{r_i} = \mathbf{S}(\mathbf{p_i}), \mathbf{r_i^\prime} = \mathbf{S}(\mathbf{p_i^\prime}), z_i = g_1(\mathbf{r_i}),z_i^\prime = g_2(\mathbf{r_i^\prime})$. $\mathcal{L}_j$ is cross-entropy loss or mean squared error depending on the $j^{th}$ feature being categorical or continuous. Each $\text{MLP}_j$ is a single hidden layer perceptron with a ReLU non-linearity. There are $n$ in number, one for each input feature. $\lambda_{\text{pt}}$ is a hyper-parameter and $\tau$ is temperature parameter and both of these are tuned using validation data.
|
| 71 |
+
|
| 72 |
+
**Finetuning** Once SAINT is pre-trained on all unlabeled data, we finetune the model on the target prediction task using the $l$ labeled samples. The pipeline of this step is shown in Figure [1](#fig:saint_arch_training){reference-type="ref" reference="fig:saint_arch_training"}(b). For a given point $\mathbf{x_i}$, we learn the contextual embedding $\mathbf{r_i}$. For the final prediction step, we pass the embedding corresponding only to the `[CLS]` token through a simple MLP with a single hidden layer with ReLU activation to get the final output. We evaluate cross-entropy loss on the outputs for classification tasks and mean squared error for regression tasks.
|
2109.02832/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-02-04T18:09:46.963Z" agent="5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.1.8 Chrome/87.0.4280.88 Electron/11.1.1 Safari/537.36" etag="ofJiHJhBCjrNkGQC_rnj" version="14.1.8" type="device"><diagram id="pcj6whyVe2MLGWiIq6mV" name="Page-1">7V1bc5s4FP41ntl9iAeQuD3G2Gm723TTprtN9yUjg2KzxUABx3Z//QojwAjFYAO2EzvTaayDEJfvOxcdHTk9YMyW7wLkT289Czs9SbCWPTDsSZIo6Cr5FUtWiUTR9EQwCWyLdsoF9/YvnJ5JpXPbwmGhY+R5TmT7RaHpuS42o4IMBYG3KHZ78pziVX00wSXBvYmcsvSbbUXTRKpJai5/j+3JNL2yqNDnm6G0Mx0inCLLWySi9cOBUQ8YgedFyafZ0sBO/PLS95K8gZsXjmY3FmA3qnOC9FkRbj8Hzj/X4vh+cP/nx38fb68gvdtn5MzpE9O7jVbpKwi8uWvheBSxBwaLqR3hex+Z8dEFAZ3IptHMoYctFE6zvnHjDkURDty1RBLIUw/CKPB+ZG8z7vdkO47hOV6wviC4uRkphpH1LBwxDF2Pz/DcaEOe/FA5JZEk0/YNmtlOTL+v9oxQSRI+4QX5/4s3Q+S2BuX3SF/tMw4ivNwQ0ff6DnszHAUr0oUehZCylZJcVGl7kVMGCFQ23aCLpFMhojSdZGPnSJIPFMxdgFUvwDYHNgMyB7avSCVsM2t1IGyFlrGtBG4oj7Qh5AGnXxvq4KYeP46EoVbEUJLLyinzlFMUugIQtG11zwpAoIj1AExDjfYVsBo+bJEwgja9IJp6E89FziiXDnKABdLK+3z0PJ+i8B+OohV94WgeeUXQyRsNVg/0/HXje9zoy2lzuNw8OFzRFkONJ83EpsmjxkCToSyU6BUzAS/t6CG9D/J548qklV84bmTXbZE9oTcPTPqqPw5/ea4/nd89TL5e3Vr6H676/iqNFVEwwdEWKAHtGOO1lYwBdlBkPxfDQh6z6Kl3nk3uOSNx5g1SKwQZ45LcKT0r5+d1EKDVRjc/7hCWGJzd6P6kFjmkVpyIAlVgt/Jz7qUHrsI1pNexiZD95Rq09Dj5NIl/f3D9ORkBxv9kI46Mx089dbDsqUMqpVciN55cLDmvmVFkeI5FS8Yq1wQqKkDK4e1bFlRIfa1s0lSeSwKwD7SOrJrEIQCFLLIdCxPEUvTG5LMYo6cOHxPxEDsRiiWyYdqBScbZOM2K+4k9eZSO2CauFsLaE9d+KaaGCdGO5rd0mQk89BLIoiiXUVa6ijuUaoSfKKTk0QyRxdOf2o/2OUMIFLkehLLUEYS8aV1ZSaP1C8mt6zlgBSCDlaiWsIIcmwphR1CJWo0w0bWu4ywVabmei3eacufhfBqZgXWsZ93Y8Y22HnNVhkiFeJyjE1TWMJKCgrg9kkpiw1IkVR4I7heStRVwSaBbh0t+G4/5GCZySOM2GWR94nm5Y6jq1e5Y4/C2M3csyfX98ctISsYnnp+uQv+Mked6cR7ynXlxqalr2Hfuz3iHupmA8cuZABbSYlojJDY0Sh/DdFAY2mYqpjeSphCyp9khndBG+mCrbT4Vn6fAIoMBk9qq7fPkttMQNa9Dc3G50iQjtupQQY28+AHycl1o1d65vgZKspk7s8bA+Ii+PfztXy0+/RkJys8ZvtJq6khT8gN5O2frkl8R9e0DHT0HB3hJOIbB4RT5CXtHjmP7YcxZHwcEWzI3IHKcSO9yUZUfHyPzx2RN+7/mkWPHvqbeqsLJenzAmh8J9vVyuAc4Fht0tuhTDvdvjGYB2OsFSGUBEpS+Xo7KFF44rvSFrlKggLey01ISnOjWOgueDDcOGg2WzRPGKKCzhN/Y1Prv55xbp0c1UORZsoAv5D+gxDkdlDknAq2vyl2RrkapxsZkII+iX7QE+67Qdb6imEU8Xa0FbrW81YuBsGZEc5ioX4ESE0RLfZExfbVjH50ZC5TH6jjfBVsyrhvGUDAzbuVCIAiKQvhXMpoGCiybRPKkjWaxQXPHYfxrcBX667CHvArfD7ylTeyo7bl1LSexRRFPHVPi0+n7ppZQEXLsSZzONQmn15FabNlsEznX9MDMtqz1rIRnj4szleMYWIkxsFAEvAopyKvPgKC7xUzIy6215MnTgUKfvMUmA5EnyCmWjFa8wutlXjrHbjoJ3YGJbEiZFuVVGOvOAn7IW269UPANU1CG8MQoyMszFyhY04E2YNgXHNrWPPa6wtjxzB+vi1ivpiqSV3Mu6jyv2wLZtpXccbi2k5UqEegOvWyjzjN7wtYSCmXoedVjWlfISxzkjzKB3X0i2iSxfrAJLCchv00DT2T6qsF+nmeRhTSvkiff+4qsZz/anhNbjV0cYO6zvUnt3Tx8no5Wi+WXD9f6TI/gz8GSa/R2pH62+JmR+/smt7lELzqpnMQ5b7/3NlaKW8vbVK9UtaUI3BUnqawIjVamDqMIKnhhZrIr11V2nxl7L+2RfdvrvxQfljxytq31CMWHXKh4tWUv26WuCkw45qJxqvhtFJgoJ2WhmLkEZCekdQ0Uu1+yq/IS9jpAFxmNaVZewlWpOru+XqdKdahE5TRNdzVbJ6VSJYpCuc+sIJ66VrVdtMXVKl6e/pXMHdsq0jq8jlSuhKonpUuAzXXBPSeLssQO1I0mAWmrJlX2B7DwXR7daB5veeKieR0HfK9N81gip1V0u2oekJndWZAZqCPNYyLD6v7gAD6v04KfXdJIbWrejqmnLnVNralrtTf/n4mba4vfNXa3XKqYaySVdGY5+bBVzFxoa3xzze6m6y0EAJXGJt3tdyLGpnInUG1jI+gb6zyCIO1leppuMDpIBijN219C5sNMVjl1u9vWJU5Es4CyXSH2depH0TNFKS5sVM1k2f4HiafFFlZhL3pZXy/li16eml5WzXOPo5fS29HLbneonNxEtAtNyTYB7LiO0RodeYvil2rYt1ANC5mSyPV36vXYmXJn9bB8uu22YHzS1u8Y3z9QWQ2mlcOQrfv/Tse6thR5HC7Nx99dwHPvB93QN0VBfBkrzhDObPeya6/u9yMw5pJXribJnO81THfI7mAsSTP/IwcJ9fI/FQFG/wM=</diagram></mxfile>
|
2109.02832/main_diagram/main_diagram.pdf
ADDED
|
Binary file (76.3 kB). View file
|
|
|
2109.02832/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep learning has achieved significant success in various practical applications with high-dimensional data set, such as computer vision (Krizhevsky et al., 2012), natural language processing (Graves et al., 2013; Young et al., 2018; Wu et al., 2016), health care (Miotto et al., 2018; Jiang et al., 2017) and bioinformatics (Alipanahi et al., 2015; Zhou and Troyanskaya, 2015).
|
| 4 |
+
|
| 5 |
+
The success of deep learning clearly demonstrates the great power of neural networks in representing complex data. In the past decades, the representation power of neural networks has been extensively studied. The most commonly studied architecture is the feedforward neural network (FNN), as it has a simple composition form. The representation theory of FNNs has been developed with smooth activation functions (e.g., sigmoid) in Cybenko (1989); Barron (1993); McCaffrey and Gallant (1994); Hamers and Kohler (2006); Kohler and Krzyżak (2005); Kohler and Mehnert (2011) or nonsmooth activations (e.g., ReLU) in Lu et al. (2017); Yarotsky (2017); Lee et al. (2017); Suzuki (2019). These works show that if the network architecture is properly chosen, FNNs can approximate uniformly smooth functions (e.g., Hölder or Sobolev) with arbitrary accuracy.
|
| 6 |
+
|
| 7 |
+
In real-world applications, convolutional neural networks (CNNs) are more popular than FNNs (LeCun et al., 1989; Krizhevsky et al., 2012; Sermanet et al., 2013; He et al., 2016; Chen et al., 2017; Long et al., 2015; Simonyan and Zisserman, 2014; Girshick, 2015). In a CNN, each layer consists of several filters (channels) which are convolved with the input, as demonstrated in Figure 1(a). Due to such complexity in the CNN architecture, there are limited works on the
|
| 8 |
+
|
| 9 |
+
<sup>\*</sup>Hao Liu is affiliated with the Department of Mathematics at Hong Kong Baptist University; Wenjing Liao is affiliated with the School of Mathematics at Georgia Tech; Minshuo Chen and Tuo Zhao are affiliated with the ISYE department at Georgia Tech; Email: haoliu@hkbu.edu.hk, {mchen393, wliao60, tzhao80}@gatech.edu.
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Figure 1: Illustration of (a) convolution and (b) skip-layer connection.
|
| 14 |
+
|
| 15 |
+
representation theory of CNNs (Zhou, 2020b,a; Fang et al., 2020; Petersen and Voigtlaender, 2020). The constructed CNNs in these works become extremely wide (in terms of the size of each layer's output) as the approximation error goes to 0. In most real-life applications, the network width does not exceed 2048 (Zagoruyko and Komodakis, 2016; Zhang et al., 2020).
|
| 16 |
+
|
| 17 |
+
Convolutional residual networks (ConvResNet) is a special CNN architecture with skip-layer connections, as shown in Figure 1(b). Specifically, in addition to CNNs, ConvResNets have identity connections between inconsecutive layers. In many applications, ConvResNets outperform CNNs in terms of generalization performance and computational efficiency, and alleviate the vanishing gradient issue. Using this architecture, He et al. (2016) won the 1st place on the ImageNet classification task with a 3.57% top 5 error in 2015.
|
| 18 |
+
|
| 19 |
+
Table 1: Comparison of our approximation theory and existing theoretical results.
|
| 20 |
+
|
| 21 |
+
| | Network type | Function class | Low dim. structure | Fixed<br>width | Training |
|
| 22 |
+
|---------------------------|--------------|----------------|--------------------|----------------|-----------------------------------|
|
| 23 |
+
| Yarotsky (2017) | FNN | Sobolev | Х | Х | difficult to train |
|
| 24 |
+
| Suzuki (2019) | FNN | Besov | Х | Х | due to the cardinality constraint |
|
| 25 |
+
| Chen et al. (2019b) | FNN | Hölder | ✓ | Х | |
|
| 26 |
+
| Petersen and Voigtlaender | CNN | FNN | Х | Х | |
|
| 27 |
+
| (2020) | | | | | |
|
| 28 |
+
| Zhou (2020b) | CNN | Sobolev | Х | Х | can be trained |
|
| 29 |
+
| Oono and Suzuki (2019) | ConvResNet | Hölder | Х | ✓ | without the |
|
| 30 |
+
| Ours | ConvResNet | Besov | ✓ | ✓ | cardinality constraint |
|
| 31 |
+
|
| 32 |
+
Recently, Oono and Suzuki (2019) develops the only representation and statistical estimation theory of ConvResNets. Oono and Suzuki (2019) proves that if the network architecture is properly set, ConvResNets with a fixed filter size and a fixed number of channels can universally approximate Hölder functions with arbitrary accuracy. However, the sample complexity in Oono and Suzuki (2019) grows exponentially with respect to the data dimension and therefore cannot well explain the empirical success of ConvResNets for high dimensional data. In order to estimate a $C^s$ function in $\mathbb{R}^D$ with accuracy $\varepsilon$ , the sample size required by Oono and Suzuki (2019) scales as $\varepsilon^{-\frac{2s+D}{s}}$ , which is far beyond the sample size used in practical applications. For example, the ImageNet data set consists of 1.2 million labeled images of size $224 \times 224 \times 3$ . According to this theory, to achieve a 0.1 error, the sample size is required to be in the order of $10^{224 \times 224 \times 3}$ which greatly exceeds 1.2 million. Due to the curse of dimensionality, there is a huge gap between theory and practice.
|
| 33 |
+
|
| 34 |
+
We bridge such a gap by taking low-dimensional geometric structures of data sets into consideration. It is commonly believed that real world data sets exhibit low-dimensional structures due to rich local regularities, global symmetries, or repetitive patterns (Hinton and Salakhutdinov, 2006; Osher et al., 2017; Tenenbaum et al., 2000). For example, the ImageNet
|
| 35 |
+
|
| 36 |
+
data set contains many images of the same object with certain transformations, such as rotation, translation, projection and skeletonization. As a result, the degree of freedom of the ImageNet data set is significantly smaller than the data dimension (Gong et al., 2019).
|
| 37 |
+
|
| 38 |
+
The function space considered in Oono and Suzuki (2019) is the Hölder space in which functions are required to be differentiable everywhere up to certain order. In practice, the target function may not have high order derivatives. Function spaces with less restrictive conditions are more desirable. In this paper, we consider the Besov space $B_{p,q}^s$ , which is more general than the Hölder space. In particular, the Hölder and Sobolev spaces are special cases of the Besov space:
|
| 39 |
+
|
| 40 |
+
$$W^{s+\alpha,\infty} = \mathcal{H}^{s,\alpha} \subseteq B^{s+\alpha}_{\infty,\infty} \subseteq B^{s+\alpha}_{p,a}$$
|
| 41 |
+
|
| 42 |
+
for any $0 < p, q \le \infty, s \in \mathbb{N}$ and $\alpha \in (0,1]$ . For practical applications, it has been demonstrated in image processing that Besov norms can capture important features, such as edges (Jaffard et al., 2001). Due to the generality of the Besov space, it is shown in Suzuki and Nitanda (2019) that kernel ridge estimators have a sub-optimal rate when estimating Besov functions.
|
| 43 |
+
|
| 44 |
+
In this paper, we establish theoretical guarantees of ConvResNets for the approximation of Besov functions on a low-dimensional manifold, and a statistical theory on binary classification. Let $\mathcal{M}$ be a d-dimensional compact Riemannian manifold isometrically embedded in $\mathbb{R}^D$ . Denote the Besov space on $\mathcal{M}$ as $B_{p,q}^s(\mathcal{M})$ for $0 < p,q \le \infty$ and $0 < s < \infty$ . Our function approximation theory is established for $B_{p,q}^s(\mathcal{M})$ . For binary classification, we are given n i.i.d. samples $\{(\mathbf{x}_i,y_i)\}_{i=1}^n$ where $\mathbf{x}_i \in \mathcal{M}$ and $y_i \in \{-1,1\}$ is the label. The label y follows the Bernoulli-type distribution
|
| 45 |
+
|
| 46 |
+
$$\mathbb{P}(y = 1|\mathbf{x}) = \eta(\mathbf{x}), \ \mathbb{P}(y = -1|\mathbf{x}) = 1 - \eta(\mathbf{x})$$
|
| 47 |
+
|
| 48 |
+
for some $\eta: \mathcal{M} \to [0,1]$ . Our results (Theorem 1 and 2) are summarized as follows:
|
| 49 |
+
|
| 50 |
+
**Theorem** (informal). Assume $s \ge d/p + 1$ .
|
| 51 |
+
|
| 52 |
+
1. Given $\varepsilon \in (0,1)$ , we construct a ConvResNet architecture such that, for any $f^* \in B^s_{p,q}(\mathcal{M})$ , if the weight parameters of this ConvResNet are properly chosen, it gives rises to $\bar{f}$ satisfying
|
| 53 |
+
|
| 54 |
+
$$\|\bar{f} - f^*\|_{L^{\infty}} \le \varepsilon.$$
|
| 55 |
+
|
| 56 |
+
2. Assume $\eta \in B^s_{p,q}(\mathcal{M})$ . Let $f^*_{\phi}$ be the minimizer of the population logistic risk. If the ConvResNet architecture is properly chosen, minimizing the empirical logistic risk gives rise to $\widehat{f}_{\phi,n}$ with the following excess risk bound
|
| 57 |
+
|
| 58 |
+
$$\mathbb{E}(\mathcal{E}_{\phi}(\widehat{f}_{\phi,n}, f_{\phi}^*)) \le C n^{-\frac{s}{2s+2(s\vee d)}} \log^4 n,$$
|
| 59 |
+
|
| 60 |
+
where $\mathcal{E}_{\phi}(\widehat{f_{\phi}}, n, f_{\phi}^*)$ denotes the excess logistic risk of $\widehat{f_{\phi}}, n$ against $f_{\phi}^*$ and C is a constant independent of n.
|
| 61 |
+
|
| 62 |
+
We remark that the first part of the theorem above requires the network size to depend on the intrinsic dimension d and only weakly depend on D. The second part is built upon the first part and shows a fast convergence rate of the excess risk in terms of n where the exponent depends on d instead of D. Our results demonstrate that ConvResNets are adaptive to low-dimensional structures of data sets.
|
| 63 |
+
|
| 64 |
+
**Related work.** Approximation theories of FNNs with the ReLU activation have been established for Sobolev (Yarotsky, 2017), Hölder (Schmidt-Hieber, 2017) and Besov (Suzuki, 2019) spaces. The networks in these works have certain cardinality constraint, i.e., the number of nonzero parameters is bounded by certain constant, which requires a lot of efforts for training.
|
| 65 |
+
|
| 66 |
+
Approximation theories of CNNs are developed in Zhou (2020b); Petersen and Voigtlaender (2020); Oono and Suzuki (2019). Among these works, Zhou (2020b) shows that CNNs can
|
| 67 |
+
|
| 68 |
+
approximate Sobolev functions in $W^{s,2}$ for $s \ge D/2 + 2$ with an arbitrary accuracy $\varepsilon \in (0,1)$ . The network in Zhou (2020b) has width increasing linearly with respect to depth and has depth growing in the order of $\varepsilon^{-2}$ as $\varepsilon$ decreases to 0. It is shown in Petersen and Voigtlaender (2020); Zhou (2020a) that any approximation error achieved by FNNs can be achieved by CNNs. Combining Zhou (2020a) and Yarotsky (2017), we can show that CNNs can approximate $W^{s,\infty}$ functions in $\mathbb{R}^D$ with arbitrary accuracy $\varepsilon$ . Such CNNs have the number of channels in the order of $\varepsilon^{-D/s}$ and a cardinality constraint. The only theory on ConvResNet can be found in Oono and Suzuki (2019), where an approximation theory for Hölder functions is proved for ConvResNets with fixed width.
|
| 69 |
+
|
| 70 |
+
Statistical theories for binary classification by FNNs are established with the hinge loss (Ohn and Kim, 2019; Hu et al., 2020) and the logistic loss (Kim et al., 2018). Among these works, Hu et al. (2020) uses a parametric model given by a teacher-student network. The non-parametric results in Ohn and Kim (2019); Kim et al. (2018) are cursed by the data dimension, and therefore require a large number of samples for high-dimensional data.
|
| 71 |
+
|
| 72 |
+
Binary classification by CNNs has been studied in Kohler et al. (2020); Kohler and Langer (2020); Nitanda and Suzuki (2018); Huang et al. (2018). Image binary classification is studied in Kohler et al. (2020); Kohler and Langer (2020) in which the probability function is assumed to be in a hierarchical max-pooling model class. ResNet type classifiers are considered in Nitanda and Suzuki (2018); Huang et al. (2018) while the generalization error is not given explicitly.
|
| 73 |
+
|
| 74 |
+
Low-dimensional structures of data sets are explored for neural networks in Chui and Mhaskar (2018); Shaham et al. (2018); Chen et al. (2019b,a); Schmidt-Hieber (2019); Nakada and Imaizumi (2019); Cloninger and Klock (2020); Chen et al. (2020); Montanelli and Yang (2020). These works show that, if data are near a low-dimensional manifold, the performance of FNNs depends on the intrinsic dimension of the manifold and only weakly depends on the data dimension. Our work focuses on ConvResNets for practical applications.
|
| 75 |
+
|
| 76 |
+
The networks in many aforementioned works have a cardinality constraint. From the computational perspective, training such networks requires substantial efforts (Han et al., 2016, 2015; Blalock et al., 2020). In comparison, the ConvResNet in Oono and Suzuki (2019) and this paper does not require any cardinality constraint. Additionally, our constructed network has a fixed filter size and a fixed number of channels, which is desirable for practical applications.
|
| 77 |
+
|
| 78 |
+
As a summary, we compare our approximation theory and existing results in Table 1.
|
| 79 |
+
|
| 80 |
+
The rest of this paper is organized as follows: In Section 2, we briefly introduce manifolds, Besov functions on manifolds and convolution. Our main results are presented in Section 3. We give a proof sketch in Section 4 and conclude this paper in Section 5.
|
| 81 |
+
|
| 82 |
+
# Method
|
| 83 |
+
|
| 84 |
+
**Notations**: We use bold lower-case letters to denote vectors, upper-case letters to denote matrices, calligraphic letters to denote tensors, sets and manifolds. For any x>0, we use $\lceil x \rceil$ to denote the smallest integer that is no less than x and use $\lfloor x \rfloor$ to denote the largest integer that is no larger than x. For any $a,b \in \mathbb{R}$ , we denote $a \lor b = \max(a,b)$ . For a function $f: \mathbb{R}^d \to \mathbb{R}$ and a set $\Omega \subset \mathbb{R}^d$ , we denote the restriction of f to $\Omega$ by $f|_{\Omega}$ . We use $\|f\|_{L^p}$ to denote the $L^p$ norm of f. We denote the Euclidean ball centered at $\mathbf{c}$ with radius $\omega$ by $B_{\omega}(\mathbf{c})$ .
|
| 85 |
+
|
| 86 |
+
We first introduce some concepts on manifolds. We refer the readers to Tu (2010); Lee (2006) for details. Throughout this paper, we let $\mathcal{M}$ be a d-dimensional Riemannian manifold $\mathcal{M}$ isometrically embedded in $\mathbb{R}^D$ with $d \leq D$ . We first introduce charts, an atlas and the partition of unity.
|
| 87 |
+
|
| 88 |
+
**Definition 1** (Chart). A chart on $\mathcal{M}$ is a pair $(U, \phi)$ where $U \subset \mathcal{M}$ is open and $\phi : U \to \mathbb{R}^d$ , is a homeomorphism (i.e., bijective, $\phi$ and $\phi^{-1}$ are both continuous).
|
| 89 |
+
|
| 90 |
+
In a chart $(U, \phi)$ , U is called a coordinate neighborhood and $\phi$ is a coordinate system on U. A collection of charts which covers $\mathcal{M}$ is called an atlas of $\mathcal{M}$ .
|
| 91 |
+
|
| 92 |
+
**Definition 2** ( $C^k$ Atlas). A $C^k$ atlas for $\mathcal{M}$ is a collection of charts $\{(U_\alpha, \phi_\alpha)\}_{\alpha \in \mathcal{A}}$ which satisfies $\bigcup_{\alpha \in \mathcal{A}} U_\alpha = \mathcal{M}$ , and are pairwise $C^k$ compatible:
|
| 93 |
+
|
| 94 |
+
$$\phi_{\alpha} \circ \phi_{\beta}^{-1} : \phi_{\beta}(U_{\alpha} \cap U_{\beta}) \to \phi_{\alpha}(U_{\alpha} \cap U_{\beta}) \quad and \quad \phi_{\beta} \circ \phi_{\alpha}^{-1} : \phi_{\alpha}(U_{\alpha} \cap U_{\beta}) \to \phi_{\beta}(U_{\alpha} \cap U_{\beta})$$
|
| 95 |
+
|
| 96 |
+
are both $C^k$ for any $\alpha, \beta \in A$ . An atlas is called finite if it contains finitely many charts.
|
| 97 |
+
|
| 98 |
+
**Definition 3** (Smooth Manifold). A smooth manifold is a manifold $\mathcal{M}$ together with a $C^{\infty}$ atlas.
|
| 99 |
+
|
| 100 |
+
The Euclidean space, the torus and the unit sphere are examples of smooth manifolds. $C^s$ functions on a smooth manifold $\mathcal{M}$ are defined as follows:
|
| 101 |
+
|
| 102 |
+
**Definition 4** ( $C^s$ functions on $\mathcal{M}$ ). Let $\mathcal{M}$ be a smooth manifold and $f: \mathcal{M} \to \mathbb{R}$ be a function on $\mathcal{M}$ . We say f is a $C^s$ function on $\mathcal{M}$ , if for every chart $(U, \phi)$ on $\mathcal{M}$ , the function $f \circ \phi^{-1} : \phi(U) \to \mathbb{R}$ is a $C^s$ function.
|
| 103 |
+
|
| 104 |
+
We next define the $C^{\infty}$ partition of unity which is an important tool for the study of functions on manifolds.
|
| 105 |
+
|
| 106 |
+
**Definition 5** (Partition of Unity). A $C^{\infty}$ partition of unity on a manifold $\mathcal{M}$ is a collection of $C^{\infty}$ functions $\{\rho_{\alpha}\}_{{\alpha}\in\mathcal{A}}$ with $\rho_{\alpha}:\mathcal{M}\to[0,1]$ such that for any $\mathbf{x}\in\mathcal{M}$ ,
|
| 107 |
+
|
| 108 |
+
1. there is a neighbourhood of $\mathbf{x}$ where only a finite number of the functions in $\{\rho_{\alpha}\}_{{\alpha}\in\mathcal{A}}$ are nonzero, and
|
| 109 |
+
|
| 110 |
+
$$2. \sum_{\alpha \in \mathcal{A}} \rho_{\alpha}(\mathbf{x}) = 1.$$
|
| 111 |
+
|
| 112 |
+
An open cover of a manifold $\mathcal{M}$ is called locally finite if every $\mathbf{x} \in \mathcal{M}$ has a neighbourhood which intersects with a finite number of sets in the cover. The following proposition shows that a $C^{\infty}$ partition of unity for a smooth manifold always exists (Spivak, 1970, Chapter 2, Theorem 15).
|
| 113 |
+
|
| 114 |
+
**Proposition 1** (Existence of a $C^{\infty}$ partition of unity). Let $\{U_{\alpha}\}_{{\alpha}\in\mathcal{A}}$ be a locally finite cover of a smooth manifold $\mathcal{M}$ . There is a $C^{\infty}$ partition of unity $\{\rho_{\alpha}\}_{{\alpha}=1}^{\infty}$ such that $\operatorname{supp}(\rho_{\alpha})\subset U_{\alpha}$ .
|
| 115 |
+
|
| 116 |
+
Let $\{(U_{\alpha}, \phi_{\alpha})\}_{\alpha \in \mathcal{A}}$ be a $C^{\infty}$ atlas of $\mathcal{M}$ . Proposition 1 guarantees the existence of a partition of unity $\{\rho_{\alpha}\}_{\alpha \in \mathcal{A}}$ such that $\rho_{\alpha}$ is supported on $U_{\alpha}$ .
|
| 117 |
+
|
| 118 |
+
The reach of $\mathcal{M}$ introduced by Federer (Federer, 1959) is an important quantity defined below. Let $d(\mathbf{x}, \mathcal{M}) = \inf_{\mathbf{y} \in \mathcal{M}} ||\mathbf{x} - \mathbf{y}||_2$ be the distance from $\mathbf{x}$ to $\mathcal{M}$ .
|
| 119 |
+
|
| 120 |
+
Definition 6 (Reach (Federer, 1959; Nivogi et al., 2008)). Define the set
|
| 121 |
+
|
| 122 |
+
$$G = \{ \mathbf{x} \in \mathbb{R}^D : \exists \text{ distinct } \mathbf{p}, \mathbf{q} \in \mathcal{M} \text{ such that } d(\mathbf{x}, \mathcal{M}) = ||\mathbf{x} - \mathbf{p}||_2 = ||\mathbf{x} - \mathbf{q}||_2 \}.$$
|
| 123 |
+
|
| 124 |
+
The closure of G is called the medial axis of M. The reach of M is defined as
|
| 125 |
+
|
| 126 |
+
$$\tau = \inf_{\mathbf{x} \in \mathcal{M}} \inf_{\mathbf{y} \in G} ||\mathbf{x} - \mathbf{y}||_2.$$
|
| 127 |
+
|
| 128 |
+
We illustrate large and small reach in Figure 2.
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
|
| 132 |
+
Slow Change: Large au Rapid Change: Small au
|
| 133 |
+
|
| 134 |
+
Figure 2: Illustration of manifolds with large and small reach.
|
| 135 |
+
|
| 136 |
+
We next define Besov function spaces on $\mathcal{M}$ , which generalizes more elementary function spaces such as the Sobolev and Hölder spaces. To define Besov functions, we first introduce the modulus of smoothness.
|
| 137 |
+
|
| 138 |
+
**Definition 7** (Modulus of Smoothness (DeVore and Lorentz, 1993; Suzuki, 2019)). Let $\Omega \subset \mathbb{R}^D$ . For a function $f: \mathbb{R}^D \to \mathbb{R}$ be in $L^p(\Omega)$ for p > 0, the r-th modulus of smoothness of f is defined by
|
| 139 |
+
|
| 140 |
+
$$\begin{split} w_{r,p}(f,t) &= \sup_{\|\mathbf{h}\|_2 \leq t} \|\Delta_{\mathbf{h}}^r(f)\|_{L^p}, \ where \\ \Delta_{\mathbf{h}}^r(f)(\mathbf{x}) &= \begin{cases} \sum_{j=0}^r \binom{r}{j} (-1)^{r-j} f(\mathbf{x}+j\mathbf{h}) & if \ \mathbf{x} \in \Omega, \mathbf{x}+r\mathbf{h} \in \Omega, \\ 0 & otherwise. \end{cases} \end{split}$$
|
| 141 |
+
|
| 142 |
+
**Definition 8** (Besov Space $B_{p,q}^s(\Omega)$ ). For $0 < p,q \le \infty, s > 0, r = \lfloor s \rfloor + 1$ , define the seminorm $|\cdot|_{B_{p,q}^s}$ as
|
| 143 |
+
|
| 144 |
+
$$|f|_{B^s_{p,q}(\Omega)} := \begin{cases} \left(\int_0^\infty (t^{-s}w_{r,p}(f,t))^q \frac{dt}{t}\right)^{\frac{1}{q}} & \text{if } q < \infty, \\ \sup_{t>0} t^{-s}w_{r,p}(f,t) & \text{if } q = \infty. \end{cases}$$
|
| 145 |
+
|
| 146 |
+
The norm of the Besov space $B^s_{p,q}(\Omega)$ is defined as $||f||_{B^s_{p,q}(\Omega)} := ||f||_{L^p(\Omega)} + |f|_{B^s_{p,q}(\Omega)}$ . The Besov space is $B^s_{p,q}(\Omega) = \{f \in L^p(\Omega) |||f||_{B^s_{p,q}} < \infty\}$ .
|
| 147 |
+
|
| 148 |
+
We next define $B_{p,q}^s$ functions on $\mathcal{M}$ (Geller and Pesenson, 2011; Triebel, 1983, 1992).
|
| 149 |
+
|
| 150 |
+
**Definition 9** ( $B_{p,q}^s$ Functions on $\mathcal{M}$ ). Let $\mathcal{M}$ be a compact smooth manifold of dimension d. Let $\{(U_i, \phi_i)\}_{i=1}^{C_{\mathcal{M}}}$ be a finite atlas on $\mathcal{M}$ and $\{\rho_i\}_{i=1}^{C_{\mathcal{M}}}$ be a partition of unity on $\mathcal{M}$ such that $\operatorname{supp}(\rho_i) \subset U_i$ . A function $f: \mathcal{M} \to \mathbb{R}$ is in $B_{p,q}^s(\mathcal{M})$ if
|
| 151 |
+
|
| 152 |
+
$$||f||_{B_{p,q}^s(\mathcal{M})} := \sum_{i=1}^{C_{\mathcal{M}}} ||(f\rho_i) \circ \phi_i^{-1}||_{B_{p,q}^s(\mathbb{R}^d)} < \infty.$$
|
| 153 |
+
(1)
|
| 154 |
+
|
| 155 |
+
Since $\rho_i$ is supported on $U_i$ , the function $(f\rho_i) \circ \phi_i^{-1}$ is supported on $\phi(U_i)$ . We can extend $(f\rho_i) \circ \phi_i^{-1}$ from $\phi(U_i)$ to $\mathbb{R}^d$ by setting the function to be 0 on $\mathbb{R}^d \setminus \phi(U_i)$ . The extended function lies in the Besov space $B_{p,q}^s(\mathbb{R}^d)$ (Triebel, 1992, Chapter 7).
|
| 156 |
+
|
| 157 |
+
In this paper, we consider one-sided stride-one convolution in our network. Let $\mathcal{W} = \{\mathcal{W}_{j,k,l}\} \in \mathbb{R}^{C' \times K \times C}$ be a filter where C' is the output channel size, K is the filter size and C is the input channel size. For $z \in \mathbb{R}^{D \times C}$ , the convolution of $\mathcal{W}$ with z gives $y \in \mathbb{R}^{D \times C'}$ such that
|
| 158 |
+
|
| 159 |
+
$$y = \mathcal{W} * z$$
|
| 160 |
+
, $y_{i,j} = \sum_{k=1}^{K} \sum_{l=1}^{C} \mathcal{W}_{j,k,l} z_{i+k-1,l}$ , (2)
|
| 161 |
+
|
| 162 |
+
where $1 \le i \le D$ , $1 \le j \le C'$ and we set $z_{i+k-1,l} = 0$ for i+k-1 > D, as demonstrated in Figure 3(a).
|
| 163 |
+
|
| 164 |
+
The building blocks of ConvResNets are residual blocks. For an input x, each residual block computes
|
| 165 |
+
|
| 166 |
+
$$\mathbf{x} + F(\mathbf{x})$$
|
| 167 |
+
|
| 168 |
+
where F is a subnetwork consisting of convolutional layers (see more details in Section 3.1). A residual block is demonstrated in Figure 3(b).
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
|
| 172 |
+
Figure 3: (a) Demonstration of W\*z, where the input is $z \in \mathbb{R}^{D \times C}$ , and the output is $W*z \in$ $\mathbb{R}^{D \times C'}$ . Here $\mathcal{W} = \{\mathcal{W}_{i,k,l}\} \in \mathbb{R}^{C' \times K \times C}$ is a filter where C' is the output channel size, K is the filter size and C is the input channel size. $W_{i...}$ is a $D \times C$ matrix for the j-th output channel. (b) Demonstration of a residual block.
|
| 173 |
+
|
| 174 |
+
In this section, we first introduce the ConvResNet architecture, and then present our main results.
|
| 175 |
+
|
| 176 |
+
We study the ConvResNet with the rectified linear unit (ReLU) activation function: $ReLU(z) = \frac{1}{2} \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2} \right) \left( \frac{1}{2}$ max(z, 0). The ConvResNet we consider consists of a padding layer and several residual blocks followed by a fully connected feedforward layer.
|
| 177 |
+
|
| 178 |
+
We first define the padding layer. Given an input $A \in \mathbb{R}^{D \times C_1}$ , the network first applies a padding operator $P: \mathbb{R}^{D \times C_1} \to \mathbb{R}^{D \times C_2}$ for some integer $C_2 \ge C_1$ such that
|
| 179 |
+
|
| 180 |
+
$$Z = P(A) = \begin{bmatrix} A & \mathbf{0} & \cdots & \mathbf{0} \end{bmatrix} \in \mathbb{R}^{D \times C_2}.$$
|
| 181 |
+
|
| 182 |
+
Then the matrix Z is passed through M residual blocks. In the m-th block, let $\mathcal{W}_m = \{\mathcal{W}_m^{(1)},...,\mathcal{W}_m^{(L_m)}\}$ and $\mathcal{B}_m = \{B_m^{(1)},...,B_m^{(L_m)}\}$ be a collection of filters and biases. The m-th residual block maps a matrix from $\mathbb{R}^{D \times C}$ to $\mathbb{R}^{D \times C}$ by
|
| 183 |
+
|
| 184 |
+
$$Conv_{\mathcal{W}_{m},\mathcal{B}_{m}} + id$$
|
| 185 |
+
|
| 186 |
+
where id is the identity operator and
|
| 187 |
+
|
| 188 |
+
$$\operatorname{Conv}_{\mathcal{W}_m,\mathcal{B}_m}(Z) = \operatorname{ReLU}\left(\mathcal{W}_m^{(L_m)} * \cdots * \operatorname{ReLU}\left(\mathcal{W}_m^{(1)} * Z + B_m^{(1)}\right) \cdots + B_m^{(L_m)}\right), \tag{3}$$
|
| 189 |
+
|
| 190 |
+
with ReLU applied entrywise. Denote
|
| 191 |
+
|
| 192 |
+
$$Q(\mathbf{x}) = \left(\operatorname{Conv}_{\mathcal{W}_{M}, \mathcal{B}_{M}} + \operatorname{id}\right) \circ \cdots \circ \left(\operatorname{Conv}_{\mathcal{W}_{1}, \mathcal{B}_{1}} + \operatorname{id}\right) \circ P(\mathbf{x}). \tag{4}$$
|
| 193 |
+
|
| 194 |
+
For networks only consisting of residual blocks, we define the network class as
|
| 195 |
+
|
| 196 |
+
$C^{\text{Conv}}(M, L, J, K, \kappa) = \{Q | Q(\mathbf{x}) \text{ is in the form of (4) with } M \text{ residual blocks. Each block has} \}$ filter size bounded by K, number of channels bounded by J,
|
| 197 |
+
|
| 198 |
+
$$\max_{m} L_{m} \le L, \ \max_{m,l} \|\mathcal{W}_{m}^{(l)}\|_{\infty} \lor \|B_{m}^{(l)}\|_{\infty} \le \kappa \}, \tag{5}$$
|
| 199 |
+
|
| 200 |
+
where $\|\cdot\|_{\infty}$ denotes $\ell^{\infty}$ norm of a vector, and for a tensor $\mathcal{W}$ , $\|\mathcal{W}\|_{\infty} = \max_{i,k,l} |\mathcal{W}_{i,k,l}|$ .
|
| 201 |
+
|
| 202 |
+
Based on the network Q in (4), a ConvResNet has an additional fully connected layer and can be expressed as
|
| 203 |
+
|
| 204 |
+
$$f(\mathbf{x}) = WQ(\mathbf{x}) + b \tag{6}$$
|
| 205 |
+
|
| 206 |
+
where W and b are the weight matrix and the bias in the fully connected layer. The class of ConvResNets is defined as
|
| 207 |
+
|
| 208 |
+
$$C(M, L, J, K, \kappa_1, \kappa_2, R) = \{ f | f(\mathbf{x}) = WQ(\mathbf{x}) + b \text{ with } Q \in C^{\text{Conv}}(M, L, J, K, \kappa_1),$$
|
| 209 |
+
|
| 210 |
+
$$||W||_{\infty} \vee |b| \leq \kappa_2, ||f||_{L^{\infty}} \leq R \}.$$
|
| 211 |
+
|
| 212 |
+
$$(7)$$
|
| 213 |
+
|
| 214 |
+
Sometimes we do not have restriction on the output, we omit the parameter R and denote the network class by $C(M, L, J, K, \kappa_1, \kappa_2)$ .
|
| 215 |
+
|
| 216 |
+
Our approximation theory is based on the following assumptions of $\mathcal{M}$ and the object function $f^* : \mathcal{M} \to \mathbb{R}$ .
|
| 217 |
+
|
| 218 |
+
**Assumption 1.** $\mathcal{M}$ is a d-dimensional compact smooth Riemannian manifold isometrically embedded in $\mathbb{R}^D$ . There is a constant B such that for any $\mathbf{x} \in \mathcal{M}$ , $\|\mathbf{x}\|_{\infty} \leq B$ .
|
| 219 |
+
|
| 220 |
+
**Assumption 2.** *The reach of* $\mathcal{M}$ *is* $\tau > 0$ .
|
| 221 |
+
|
| 222 |
+
**Assumption 3.** Let $0 < p, q \le \infty$ , $d/p + 1 \le s < \infty$ . Assume $f^* \in B^s_{p,q}(\mathcal{M})$ and $||f^*||_{B^s_{p,q}(\mathcal{M})} \le c_0$ for a constant $c_0 > 0$ . Additionally, we assume $||f^*||_{L^\infty} \le R$ for a constant R > 0.
|
| 223 |
+
|
| 224 |
+
Assumption 3 implies that $f^*$ is Lipschitz continuous (Triebel, 1983, Section 2.7.1 Remark 2 and Section 3.3.1).
|
| 225 |
+
|
| 226 |
+
Our first result is the following universal approximation error of ConvResNets for Besov functions on $\mathcal{M}$ .
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
|
| 230 |
+
Figure 4: The ConvResNet in Theorem 1 contains a padding layer, *M* residual blocks, and a fully connected (FC) layer.
|
| 231 |
+
|
| 232 |
+
**Theorem 1.** Assume Assumption 1-3. For any $\varepsilon \in (0,1)$ and positive integer $K \in [2,D]$ , there is a ConvResNet architecture $C(M,L,J,K,\kappa_1,\kappa_2)$ such that, for any $f^* \in B^s_{p,q}(\mathcal{M})$ , if the weight parameters of this ConvResNet are properly chosen, the network yields a function $\bar{f} \in C(M,L,J,K,\kappa_1,\kappa_2)$ satisfying
|
| 233 |
+
|
| 234 |
+
$$\|\bar{f} - f^*\|_{L^{\infty}} \le \varepsilon. \tag{8}$$
|
| 235 |
+
|
| 236 |
+
Such a network architecture has
|
| 237 |
+
|
| 238 |
+
$$M = O\left(\varepsilon^{-d/s}\right), \ L = O(\log(1/\varepsilon) + D + \log D), \ J = O(D), \ \kappa_1 = O(1), \ \log \kappa_2 = O(\log^2(1/\varepsilon)). \tag{9}$$
|
| 239 |
+
|
| 240 |
+
The constant hidden in $O(\cdot)$ depend on d, s, $\frac{2d}{sp-d}$ , p, q, $c_0$ , $\tau$ and the surface area of $\mathcal{M}$ .
|
| 241 |
+
|
| 242 |
+
The architecture of the ConvResNet in Theorem 1 is illustrated in Figure 4. It has the following properties:
|
| 243 |
+
|
| 244 |
+
- The network has a fixed filter size and a fixed number of channels.
|
| 245 |
+
- There is no cardinality constraint.
|
| 246 |
+
- The network size depends on the intrinsic dimension d, and only weakly depends on D.
|
| 247 |
+
|
| 248 |
+
Theorem 1 can be compared with Suzuki (2019) on the approximation theory for Besov functions in $\mathbb{R}^D$ by FNNs as follows: (1) To universally approximate Besov functions in $\mathbb{R}^D$ with $\varepsilon$ error, the FNN constructed in Suzuki (2019) requires $O(\log(1/\varepsilon))$ depth, $O(\varepsilon^{-D/s})$ width and $O(\varepsilon^{-D/s}\log(1/\varepsilon))$ nonzero parameters. By exploiting the manifold model, our network size depends on the intrinsic dimension d and weakly depends on D. (2) The ConvResNet in Theorem 1 does not require any cardinality constraint, while such a constraint is needed in Suzuki (2019).
|
| 249 |
+
|
| 250 |
+
We next consider binary classification on $\mathcal{M}$ . For any $\mathbf{x} \in \mathcal{M}$ , denote its label by $y \in \{-1,1\}$ . The label y follows the following Bernoulli-type distribution
|
| 251 |
+
|
| 252 |
+
$$\mathbb{P}(v=1|\mathbf{x}) = n(\mathbf{x}), \ \mathbb{P}(v=-1|\mathbf{x}) = 1 - n(\mathbf{x}) \tag{10}$$
|
| 253 |
+
|
| 254 |
+
for some $\eta: \mathcal{M} \to [0,1]$ .
|
| 255 |
+
|
| 256 |
+
We assume the following data model:
|
| 257 |
+
|
| 258 |
+
**Assumption 4.** We are given i.i.d. sample $\{(\mathbf{x}_i, y_i)\}_{i=1}^n$ , where $\mathbf{x}_i \in \mathcal{M}$ , and the $y_i$ 's are sampled according to (10).
|
| 259 |
+
|
| 260 |
+
In binary classification, a classifier f predicts the label of $\mathbf{x}$ as $\mathrm{sign}(f(\mathbf{x}))$ . To learn the optimal classifier, we consider the logistic loss $\phi(z) = \log(1 + \exp(-z))$ . The logistic risk $\mathcal{E}_{\phi}(f)$ of a classifier f is defined as
|
| 261 |
+
|
| 262 |
+
$$\mathcal{E}_{\phi}(f) = \mathbb{E}(\phi(\gamma f(\mathbf{x}))). \tag{11}$$
|
| 263 |
+
|
| 264 |
+
The minimizer of $\mathcal{E}_{\phi}(f)$ is denoted by $f_{\phi}^*$ , which satisfies
|
| 265 |
+
|
| 266 |
+
$$f_{\phi}^{*}(\mathbf{x}) = \log \frac{\eta(\mathbf{x})}{1 - \eta(\mathbf{x})}.$$
|
| 267 |
+
(12)
|
| 268 |
+
|
| 269 |
+
For any classifier f, we define its logistic excess risk as
|
| 270 |
+
|
| 271 |
+
$$\mathcal{E}_{\phi}(f, f_{\phi}^*) = \mathcal{E}_{\phi}(f) - \mathcal{E}_{\phi}(f_{\phi}^*). \tag{13}$$
|
| 272 |
+
|
| 273 |
+
In this paper, we consider ConvResNets with the following architecture:
|
| 274 |
+
|
| 275 |
+
$$C^{(n)} = \left\{ f | f = \bar{g}_2 \circ \bar{h} \circ \bar{g}_1 \circ \bar{\eta} \text{ where } \bar{\eta} \in C^{\text{Conv}}(M_1, L_1, J_1, K, \kappa_1), \ \bar{g}_1 \in C^{\text{Conv}}(1, 4, 8, 1, \kappa_2), \\ \bar{h} \in C^{\text{Conv}}(M_2, L_2, J_2, 1, \kappa_1), \ \bar{g}_2 \in C(1, 3, 8, 1, \kappa_3, 1, R) \right\},$$
|
| 276 |
+
(14)
|
| 277 |
+
|
| 278 |
+
where $M_1, M_2, L, J, K, \kappa_1, \kappa_2, \kappa_3$ are some parameters to be determined.
|
| 279 |
+
|
| 280 |
+
The empirical classifier is learned by minimizing the empirical logistic risk:
|
| 281 |
+
|
| 282 |
+
$$\widehat{f}_{\phi,n} = \underset{f \in \mathcal{C}^{(n)}}{\operatorname{argmin}} \frac{1}{n} \sum_{i=1}^{n} \phi(y_i f(\mathbf{x}_i)). \tag{15}$$
|
| 283 |
+
|
| 284 |
+
We establish an upper bound on the excess risk of $\widehat{f_{\phi,n}}$ :
|
| 285 |
+
|
| 286 |
+
**Theorem 2.** Assume Assumption 1, 2 and 4. Assume $0 < p, q \le \infty$ , $0 < s < \infty$ , $s \ge d/p + 1$ and $\eta \in B^s_{p,q}(\mathcal{M})$ with $\|\eta\|_{B^s_{p,q}} \le c_0$ for some constant $c_0$ . For any $2 \le K \le D$ , we set
|
| 287 |
+
|
| 288 |
+
$$M_{1} = O\left(n^{\frac{2d}{s+2(s \vee d)}}\right), \ M_{2} = O\left(n^{\frac{2s}{s+2(s \vee d)}}\right), \ L_{1} = O(\log(1/\varepsilon) + D + \log D), \ L_{2} = O(\log(1/\varepsilon)),$$
|
| 289 |
+
|
| 290 |
+
$$J_{1} = O(D), \ J_{2} = O(1), \ \kappa_{1} = O(1), \ \log \kappa_{2} = O(\log^{2} n), \ \kappa_{3} = O(\log n), \ R = O(\log n)$$
|
| 291 |
+
|
| 292 |
+
for $C^{(n)}$ . Then
|
| 293 |
+
|
| 294 |
+
$$\mathbb{E}(\mathcal{E}_{\phi}(\widehat{f_{\phi}}, n, f_{\phi}^{*})) \le C n^{-\frac{s}{2s + 2(s \lor d)}} \log^{4} n \tag{16}$$
|
| 295 |
+
|
| 296 |
+
for some constant C. Here C is linear in D log D and additionally depends on $d, s, \frac{2d}{sp-d}, p, q, c_0, \tau$ and the surface area of M. The constant hidden in $O(\cdot)$ depends on $d, s, \frac{2d}{sp-d}, p, q, c_0, \tau$ and the surface area of M.
|
| 297 |
+
|
| 298 |
+
Theorem 2 shows that a properly designed ConvResNet gives rise to an empirical classifier, of which the excess risk converges at a fast rate with an exponent depending on the intrinsic dimension d, instead of D.
|
| 299 |
+
|
| 300 |
+
Theorem 2 is proved in Appendix A. Each building block of $C^{(n)}$ is constructed for the following purpose:
|
| 301 |
+
|
| 302 |
+
- $\bar{g}_1 \circ \bar{\eta}$ is designed to approximate a truncated $\eta$ on $\mathcal{M}$ , which is realized by Theorem 1.
|
| 303 |
+
- $\bar{g}_2 \circ \bar{h}$ is designed to approximate a truncated univariate function $\log \frac{z}{1-z}$ .
|
| 304 |
+
|
| 305 |
+
We provide a proof sketch of Theorem 1 in this section. More technical details are deferred to Appendix C.
|
| 306 |
+
|
| 307 |
+
We prove Theorem 1 in the following four steps:
|
| 308 |
+
|
| 309 |
+
- 1. Decompose $f^* = \sum_i f_i$ as a sum of locally supported functions according to the manifold structure.
|
| 310 |
+
- 2. Locally approximate each $f_i$ using cardinal B-splines.
|
| 311 |
+
- 3. Implement the cardinal B-splines using CNNs.
|
| 312 |
+
- 4. Implement the sum of all CNNs by a ConvResNet for approximating $f^*$ .
|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
|
| 316 |
+
Figure 5: An atlas given by covering $\mathcal{M}$ using Euclidean balls.
|
| 317 |
+
|
| 318 |
+
• Construct an atlas on $\mathcal{M}$ . Since the manifold $\mathcal{M}$ is compact, we can cover $\mathcal{M}$ by a finite collection of open balls $B_{\omega}(\mathbf{c}_i)$ for $i=1,\ldots,C_{\mathcal{M}}$ , where $\mathbf{c}_i$ is the center of the ball and $\omega$ is the radius to be chosen later. Accordingly, the manifold is partitioned as $\mathcal{M}=\bigcup_i U_i$ with $U_i=B_{\omega}(\mathbf{c}_i)\cap\mathcal{M}$ . We choose $\omega<\tau/2$ such that $U_i$ is diffeomorphic to an open subset of $\mathbb{R}^d$ (Niyogi et al., 2008, Lemma 5.4). The total number of partitions is then bounded by $C_{\mathcal{M}} \leq \left\lceil \frac{\mathrm{SA}(\mathcal{M})}{\omega^d} T_d \right\rceil$ , where $\mathrm{SA}(\mathcal{M})$ is the surface area of $\mathcal{M}$ and $T_d$ is the average number of $U_i$ 's that contain a given point on $\mathcal{M}$ (Conway et al., 1987, Chapter 2 Equation (1)).
|
| 319 |
+
|
| 320 |
+
On each partition, we define a projection-based transformation $\phi_i$ as
|
| 321 |
+
|
| 322 |
+
$$\phi_i(\mathbf{x}) = a_i V_i^{\top}(\mathbf{x} - \mathbf{c}_i) + \mathbf{b}_i,$$
|
| 323 |
+
|
| 324 |
+
where the scaling factor $a_i \in \mathbb{R}$ and the shifting vector $\mathbf{b}_i \in \mathbb{R}^d$ ensure $\phi_i(U_i) \subset [0,1]^d$ , and the column vectors of $V_i \in \mathbb{R}^{D \times d}$ form an orthonormal basis of the tangent space $T_{\mathbf{c}_i}(\mathcal{M})$ . The atlas on $\mathcal{M}$ is the collection $(U_i, \phi_i)$ for $i = 1, ..., \mathcal{M}$ . See Figure 5 for a graphical illustration of the atlas.
|
| 325 |
+
|
| 326 |
+
• **Decompose** $f^*$ according to the atlas. We decompose $f^*$ as
|
| 327 |
+
|
| 328 |
+
$$f^* = \sum_{i=1}^{C_M} f_i \quad \text{with} \quad f_i = f \rho_i, \tag{17}$$
|
| 329 |
+
|
| 330 |
+
where $\{\rho_i\}_{i=1}^{C_M}$ is a $C^{\infty}$ partition of unity with $\operatorname{supp}(\phi_i) \subset U_i$ . The existence of such a $\{\rho_i\}_{i=1}^{C_M}$ is guaranteed by Proposition 1. As a result, each $f_i$ is supported on a subset of $U_i$ , and therefore, we can rewrite (17) as
|
| 331 |
+
|
| 332 |
+
$$f^* = \sum_{i=1}^{C_M} (f_i \circ \phi_i^{-1}) \circ \phi_i \times \mathbb{1}_{U_i} \quad \text{with} \quad f_i = f \rho_i,$$
|
| 333 |
+
(18)
|
| 334 |
+
|
| 335 |
+
where $\mathbb{1}_{U_i}$ is the indicator function of $U_i$ . Since $\phi_i$ is a bijection between $U_i$ and $\phi_i(U_i)$ , $f_i \circ \phi_i^{-1}$ is supported on $\phi_i(U_i) \subset [0,1]^d$ . We extend $f_i \circ \phi_i^{-1}$ on $[0,1]^d \setminus \phi_i(U_i)$ by 0. The extended function is in $B_{p,q}^s([0,1]^d)$ (see Lemma 4 in Appendix C.1). This allows us to use cardinal B-splines to locally approximate each $f_i \circ \phi_i^{-1}$ as detailed in **Step 2**.
|
| 336 |
+
|
| 337 |
+
Step 2: Local cardinal B-spline approximation. We approximate $f_i \circ \phi_i^{-1}$ using cardinal B-splines $\widetilde{f_i}$ as
|
| 338 |
+
|
| 339 |
+
$$f_i \circ \phi_i^{-1} \approx \widetilde{f_i} \equiv \sum_{j=1}^N \widetilde{f_{i,j}} \text{ with } \widetilde{f_{i,j}} = \alpha_{k,j}^{(i)} M_{k,j,m}^d,$$
|
| 340 |
+
(19)
|
| 341 |
+
|
| 342 |
+
where $\alpha_{k,\mathbf{j}}^{(i)} \in \mathbb{R}$ is a coefficient and $M_{k,\mathbf{j},m}^d : [0,1]^d \to \mathbb{R}$ denotes a cardinal B-spline with indecies $k,m \in \mathbb{N}^+$ , $\mathbf{j} \in \mathbb{R}^d$ . Here k is a scaling factor, $\mathbf{j}$ is a shifting vector, m is the degree of the B-spline and d is the dimension (see a formal definition in Appendix C.2).
|
| 343 |
+
|
| 344 |
+
Since $s \ge d/p + 1$ (by Assumption 3), setting $r = +\infty$ , $m = \lceil s \rceil + 1$ in Lemma 5 (see Appendix C.3) and applying Lemma 4 gives
|
| 345 |
+
|
| 346 |
+
$$\|\widetilde{f_i} - f_i \circ \phi_i^{-1}\|_{L^{\infty}} \le Cc_0 N^{-s/d} \tag{20}$$
|
| 347 |
+
|
| 348 |
+
for some constant C depending on s, p, q and d.
|
| 349 |
+
|
| 350 |
+
Combining (18) and (19), we approximate $f^*$ by
|
| 351 |
+
|
| 352 |
+
$$\widetilde{f}^* \equiv \sum_{i=1}^{C_{\mathcal{M}}} \widetilde{f_i} \circ \phi_i \times \mathbb{1}_{U_i} = \sum_{i=1}^{C_{\mathcal{M}}} \sum_{i=1}^{N} \widetilde{f_{i,j}} \circ \phi_i \times \mathbb{1}_{U_i}.$$
|
| 353 |
+
(21)
|
| 354 |
+
|
| 355 |
+
Such an approximation has error
|
| 356 |
+
|
| 357 |
+
$$\|\widetilde{f}^* - f^*\|_{I^{\infty}} \le CC_M c_0 N^{-s/d}$$
|
| 358 |
+
|
| 359 |
+
- Step 3: Implement local approximations in Step 2 by CNNs. In Step 2, (21) gives a natural approximation of $f^*$ . In the sequel, we aim to implement all ingredients of $\widetilde{f_{i,j}} \circ \phi_i \times \mathbb{1}_{U_i}$ using CNNs. In particular, we show that CNNs can implement the cardinal B-spline $\widetilde{f_{i,j}}$ , the linear projection $\phi_i$ , the indicator function $\mathbb{1}_{U_i}$ , and the multiplication operation.
|
| 360 |
+
- Implement $\mathbb{1}_{U_i}$ by CNNs. Recall our construction of $U_i$ in Step 1. For any $\mathbf{x} \in \mathcal{M}$ , we have $\mathbb{1}_{U_i}(\mathbf{x}) = 1$ if $d_i^2(\mathbf{x}) = ||\mathbf{x} \mathbf{c}_i||_2^2 \le \omega^2$ ; otherwise $\mathbb{1}_{U_i}(\mathbf{x}) = 0$ .
|
| 361 |
+
|
| 362 |
+
To implement $\mathbb{1}_{U_i}$ , we rewrite it as the composition of a univariate indicator function $\mathbb{1}_{[0,\omega^2]}$ and the distance function $d_i^2$ :
|
| 363 |
+
|
| 364 |
+
$$\mathbb{1}_{U_i}(\mathbf{x}) = \mathbb{1}_{[0,\omega^2]} \circ d_i^2(\mathbf{x}) \quad \text{for} \quad \mathbf{x} \in \mathcal{M}.$$
|
| 365 |
+
(22)
|
| 366 |
+
|
| 367 |
+
We show that CNNs can efficiently implement both $\mathbb{1}_{[0,\omega^2]}$ and $d_i^2$ . Specifically, given $\theta \in (0,1)$ and $\Delta \geq 8DB^2\theta$ , there exist CNNs that yield functions $\widetilde{\mathbb{1}}_{\Delta}$ and $\widetilde{d}_i^2$ satisfying
|
| 368 |
+
|
| 369 |
+
$$\|\widetilde{d}_{i}^{2} - d_{i}^{2}\|_{L^{\infty}} \le 4B^{2}D\theta \tag{23}$$
|
| 370 |
+
|
| 371 |
+
and
|
| 372 |
+
|
| 373 |
+
$$\widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_{i}^{2}(\mathbf{x}) = \begin{cases}
|
| 374 |
+
1, & \text{if } \mathbf{x} \in U_{i}, d_{i}^{2}(\mathbf{x}) \leq \omega^{2} - \Delta, \\
|
| 375 |
+
0, & \text{if } \mathbf{x} \notin U_{i}, \\
|
| 376 |
+
\text{between 0 and 1, otherwise.}
|
| 377 |
+
\end{cases} \tag{24}$$
|
| 378 |
+
|
| 379 |
+
We also characterize the network sizes for realizing $\widetilde{\mathbb{1}}_{\Delta}$ and $\widetilde{d}_i^2$ : The network for $\widetilde{\mathbb{1}}_{\Delta}$ has $O(\log(\omega^2/\Delta))$ layers, 2 channels and all weight parameters bounded by $\max(2, |\omega^2 - 4B^2D\theta|)$ ; the network for $\widetilde{d}_i^2$ has $O(\log(1/\theta) + D)$ layers, 6D channels and all weight parameters bounded by $4B^2$ . More technical details are provided in Lemma 9 in Appendix C.6.
|
| 380 |
+
|
| 381 |
+
• **Implement** $f_{i,j} \circ \phi_i$ **by CNNs.** Since $\phi_i$ is a linear projection, it can be realized by a single-layer perceptron. By Lemma 8 (see Appendix C.5), this single-layer perceptron can be realized by a CNN, denoted by $\phi_i^{\text{CNN}}$ .
|
| 382 |
+
|
| 383 |
+
For $f_{i,j}$ , Proposition 3 (see Appendix C.8) shows that for any $\delta \in (0,1)$ and $2 \le K \le d$ , there exists a CNN $\widetilde{f}_{i,j}^{\text{CNN}} \in \mathcal{F}^{\text{CNN}}(L,J,K,\kappa,\kappa)$ with
|
| 384 |
+
|
| 385 |
+
$$L = O\left(\log \frac{1}{\delta}\right), J = O(1), \kappa = O\left(\delta^{-(\log 2)(\frac{2d}{sp-d} + \frac{c_1}{d})}\right)$$
|
| 386 |
+
|
| 387 |
+
such that when setting $N = C_1 \delta^{-d/s}$ , we have
|
| 388 |
+
|
| 389 |
+
$$\left\| \sum_{j=1}^{N} \widetilde{f}_{i,j}^{\text{CNN}} - f_i \circ \phi_i^{-1} \right\|_{L^{\infty}(\phi_i(U_i))} \le \delta, \tag{25}$$
|
| 390 |
+
|
| 391 |
+
where $C_1$ is a constant depending on s, p, q and d. The constant hidden in $O(\cdot)$ depends on $d, s, \frac{2d}{sp-d}, p, q, c_0$ . The CNN class $\mathcal{F}^{\text{CNN}}$ is defined in Appendix B.
|
| 392 |
+
|
| 393 |
+
• Implement the multiplication × by a CNN. According to Lemma 7 (see Appendix C.4) and Lemma 8, for any $\eta \in (0,1)$ , the multiplication operation × can be approximated by a CNN $\widetilde{\times}$ with $L^{\infty}$ error $\eta$ :
|
| 394 |
+
|
| 395 |
+
$$||a \times b - \widetilde{\times}(a, b)||_{L^{\infty}} \le \eta. \tag{26}$$
|
| 396 |
+
|
| 397 |
+
Such a CNN has $O(\log 1/\eta)$ layers, 6 channels. All parameters are bounded by $\max(2c_0^2, 1)$ .
|
| 398 |
+
|
| 399 |
+
**Step 4: Implement** $\widetilde{f}^*$ **by a ConvResNet.** We assemble all CNN approximations in **Step 3** together and show that the whole approximation can be realized by a ConvResNet.
|
| 400 |
+
|
| 401 |
+
• Assemble all ingredients together. Assembling all CNN approximations together gives an approximation of $\widetilde{f_{i,j}} \circ \phi_i \times \mathbb{1}_{U_i}$ as
|
| 402 |
+
|
| 403 |
+
$$\hat{f}_{i,j} \equiv \widetilde{\times} \left( \widetilde{f}_{i,j}^{\text{CNN}} \circ \phi_i^{\text{CNN}}, \widetilde{\mathbb{1}}_{\Delta} \circ \widetilde{d}_i^2 \right). \tag{27}$$
|
| 404 |
+
|
| 405 |
+
After substituting (27) into (21), we approximate the target function $f^*$ by
|
| 406 |
+
|
| 407 |
+
$$\mathring{f} = \sum_{i=1}^{C_{\mathcal{M}}} \sum_{j=1}^{N} \mathring{f}_{i,j}.$$
|
| 408 |
+
(28)
|
| 409 |
+
|
| 410 |
+
The approximation error of $\mathring{f}$ is analyzed in Lemma 12 (see Appendix C.9). According to Lemma 12, the approximation error can be bounded as follows:
|
| 411 |
+
|
| 412 |
+
$$\|\mathring{f} - f^*\|_{L^{\infty}} \leq \sum_{i=1}^{C_{\mathcal{M}}} (A_{i,1} + A_{i,2} + A_{i,3}) \quad \text{with}$$
|
| 413 |
+
|
| 414 |
+
$$A_{i,1} = \sum_{j=1}^{N} \left\| \widetilde{\times} (\widetilde{f}_{i,j}^{\text{CNN}} \circ \phi_i^{\text{CNN}}, \widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_i^2) - (\widetilde{f}_{i,j}^{\text{CNN}} \circ \phi_i^{\text{CNN}}) \times (\widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_i^2) \right\|_{L^{\infty}} \leq N\eta,$$
|
| 415 |
+
|
| 416 |
+
$$A_{i,2} = \left\| \left( \sum_{j=1}^{N} \left( \widetilde{f}_{i,j}^{\text{CNN}} \circ \phi_i^{\text{CNN}} \right) \right) \times (\widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_i^2) - f_i \times (\widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_i^2) \right\|_{L^{\infty}} \leq \delta,$$
|
| 417 |
+
|
| 418 |
+
$$A_{i,3} = \|f_i \times (\widetilde{\mathbb{I}}_{\Delta} \circ \widetilde{d}_i^2) - f_i \times \mathbb{1}_{U_i} \|_{L^{\infty}} \leq \frac{c(\pi + 1)}{\omega(1 - \omega/\tau)} \Delta,$$
|
| 419 |
+
|
| 420 |
+
where $\delta$ , $\eta$ , $\Delta$ and $\theta$ are defined in (25), (26), (24) and (23), respectively. For any $\varepsilon \in (0, 1)$ , with properly chosen $\delta$ , $\eta$ , $\Delta$ and $\theta$ as in (53) in Lemma 12, one has
|
| 421 |
+
|
| 422 |
+
$$\|\mathring{f} - f^*\|_{L^{\infty}} \le \varepsilon. \tag{29}$$
|
| 423 |
+
|
| 424 |
+
With these choices, the network size of each CNN is quantified in Appendix C.10.
|
| 425 |
+
|
| 426 |
+
• **Realize** $\mathring{f}$ **by a ConvResNet.** Lemma 17 (see Appendix C.15) shows that for every $\mathring{f}_{i,j}$ , there exists $\bar{f}_{i,j}^{\text{CNN}} \in \mathcal{F}^{\text{CNN}}(L,J,K,\kappa_1,\kappa_2)$ with $L = O(\log 1/\varepsilon + D + \log D), J = O(D), \kappa_1 = O(1), \log \kappa_2 = O(\log^2 1/\varepsilon)$ such that $\bar{f}_{i,j}^{\text{CNN}}(\mathbf{x}) = \mathring{f}_{i,j}(\mathbf{x})$ for any $\mathbf{x} \in \mathcal{M}$ . As a result, the function $\mathring{f}$ in (28) can be expressed as a sum of CNNs:
|
| 427 |
+
|
| 428 |
+
$$\mathring{f} = \bar{f}^{\text{CNN}} \equiv \sum_{i=1}^{C_{\mathcal{M}}} \sum_{j=1}^{N} \bar{f}_{i,j}^{\text{CNN}},$$
|
| 429 |
+
(30)
|
| 430 |
+
|
| 431 |
+
where N is chosen of $O\left(\varepsilon^{-d/s}\right)$ (see Proposition 3 and Lemma 12). Lemma 18 (see Appendix C.16) shows that $\bar{f}^{\text{CNN}}$ can be realized by $\bar{f} \in \mathcal{C}(M, L, J, \kappa_1, \kappa_2)$ with
|
| 432 |
+
|
| 433 |
+
$$M = O\left(\varepsilon^{-d/s}\right), L = O(\log(1/\varepsilon) + D + \log D), J = O(D), \kappa_1 = O(1), \log \kappa_2 = O\left(\log^2(1/\varepsilon)\right)$$
|
2201.10222/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-05T21:43:27.244Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36" etag="bEsGMCEOLvNv_kAmQw9R" version="15.3.5"><diagram id="NcSe2i6TBRhNJo6g24qH" name="Page-1">7V1tc+M4cv41qux9sIoACL58tD3jS1K3qalMJdn9lJJljke7tuhQ8s74fn1IiqBIAJSaNl+aYnuubilSIiU83UD3043uhbh9/vn3ZPXy/df4IXpacOfh50J8WnDOhO/66X+zU2+HU6FwnMOZx2TzULzteOLr5p9RcVK97XXzEO1qb9zH8dN+81I/uY6322i9r51bJUn8o/62b/FT/akvq8fIOPF1vXoyz/7P5mH//XA2kM7x/L9Gm8fv6slM/b7nlXpzcWL3ffUQ/6icEp8X4jaJ4/3h6PnnbfSUDZ8al8Pn7hqull8sibZ7yAf+8+av1f+8vvzHfz3/mdw4/7H7Gnx9uZKsuM9fq6fX4icvuPeU3vHmW5zeOP3e+7diMLz/e43VhatdDtV1+gbuOC8/j1fTo8fiv/lt7ssTd84iFIuAL27u1MX0697rH9i9rLZ9PJedeW567vBo4z7v/y7Fo8PFjTz16C6eIReBOPPzDoiq07z2UJ7Er9uHKBMWll7+8X2zj76+rNbZ1R+pdqfnvu+fn4rLu30S/1lqRKqs6tRt/BQn+Q3T017oOOWjqpKqhC5K9tHPyqlCcv8exc/RPnlL31JcDYOwUKM3pWiCH078OKqlr9Tye0UlhV+cXBVTwWN596O2pAeFwrRSHueM8gwm2DcjCTZfXH/qX7BDl5TnA8qTrklM0x6hTlS1hw2qPa4fWrSn73EN879uxlUKUR/WK+ZI3xhX5nqWgWWu7G1kg3bz0rtXYGZXqi9nJh6oYeFb755icuuKoFEz36/EKex7m0Qp8dnG2yj7lpunJ+3U6mnzuE1frlNhitLzN5kQbVIr8rq48Lx5eMgeYxXho5B3pPBuqOk7l465WrqhTSzD/vTdn7q++0zXd8HNebRB30XIexvZoFHfDSWAKh/jVu1bpn+tVO9SdSyQvqwLg2D+Uo6uZYBFNfVEX7LD9PfvN6un/0z95tX2sXHoKljdx/t9/Fy8SIrfkx0/Rd/U4T5+KUY5HfP9Zvt4mwP2W3ryykkHSNdbLzujfGPHjn0Xc6Kn6S63qq4FLN6b/2Bbphuw2jznREUVDF3+84Evz/5jdR89fYl3m/0mzq4q6G6esgs3q/Wfj7lOVObKb/lf+pb8Yde7lwOhkmGyUi++bX5mWnRTfJ9P3/f7jIm5zkaC360ftmK5Wcfbb5tU25LlOn0iv3tY7Vfpf7Lzu2ygNrt1OjqrbRS/7q4yW+Uuw+budbv+Hq3/vGI8WL5sH3Wx2P0Z7dffrVO/25vUhNy08CxiwwOLjquTPfidFyg37lm5+RHdX71urtb5g/ldPr53t5nIPK8SXGLDuFrsUckNp7XBCpen+3EI1gZBWNntLgcfVi5hZV89fXxYScLKvmSVOCACyyOw7GAJnYlAAFbn9E51HB+fVrudbtI5xvjrJl1qnTqp5RV9s3FDjsM+3dwW1IfNnu0AKW5QRmWEuIqUY0GqPNk9VDa+SIPqrD9gdyPq+OWa+fzzMcsOWN6vdpv1crV+3Uf/u082SkvzbIGDLiXxflU85ooFuvrAsLTIQ3/ouroxggJdAAU0BrrIsZS6sYIBS0VaYZtUI8lcx0q433DHya80QRWn322zzwZZdkTFGnNsaPO1BwYOwNGgnGIByMLVsnusjRkXA9YAXgXBhIscWWP+xYAsgIWZn00b6KS4NQo6MFIADgblfIvNDApDhOACSBsEEyw2KJmjL5a+Nz6WAE5n9hYtY3q0WiLQQgDBg3KKRW74BHr0ybMkZw6NNVKGaFrIehLhWtp51u0lmLTmhIsAKuUQYVPCqU+4zMjbxgD2xzdiNaRjn9g3EvLKMTseB3eV48+V47ByHFQ+61WOZeXYrRyLynHl/uHn89/n+kObM4qc0zPzWBeC5RscsuObkhVYU0LdviQrAKzls4yiCmNPWWDOAoPGUAPK3m0gBz1sUIWAxXmWUPkCHVQt8mVnBVXooIOKE1QNXJ6PDitKUW3K/naxYVX6LQTWGSOQ++NjResVzArEgBUtWDAzEANWtGDB7EAMWNGeCqAhiAEs2lQBtAQxgIU0Tx9XAJ7rhJMFt2EDBMxBGpOdXooTx4ct0pz9yWHr6673+Nhaq8QhmHDHjbjLAB9QSHP2pxVdd/VacwiABTjp89NAAyibbTo0UgAXnVTw/Nwq8SGLNEF/WsiGHCGySDO6x51dfYkQKUovgQWB8vqi49I0HOA10HzZ1sYRNmAH1kIOcDNmqYV6GAKDFgI8B+JlQIYLQkWkeCAsdotBEQHuwxyNTIRaRcFAYOQWg1pRhTVg5BYDWIDI7SzB0tcrGY6P1VSDtcgdOn3Fc21QD73iEa0Cc+gQ6KVASqtM3qHDoIiCmBWg6YlBEwHUyjzB0k1PDGARVQLULF+ODxblTgM1CwNYALqE0nH1dFzPBtzQ1gZxJzCzH4OWUdJ7e5Meg5K5ZHjAOC8ESuYiTfGanG+tk1woFJHI56ZNyfVELwyKCCCf5xcs1bNiMWgVteQDZuQh0CpISz7KyGubkYdCC4kwgWkhAiaSesIBi6NgwAqp1z22HYIvcgbpCDdTraobjQjSgLD2dxvWzX7YJKkAHh73I9rtO4Lbr8ONIaES0gOO2r3rCwE7uxB0GFdyVQuwcrOXY4rNsM3eGaQD3eTERpwVm+fNbp0Oz2obxa+7q2ypvcsU6O51u85kB7fclIXpx5SbFpTEZOTmsqcbN3DHF5sWgZrJiM2FTzeercPi0HLTgnuZjNxc9nRja081tNTQJiNYWDhUtW/G81chfeNmiZWeJ4MBq6luW8EWwtdzaQIbtkMTCbRPBZjEi0ATIc3f5gmWnsSLASzaeAILIGLAqgW/MSus9AAiBqxamPmTcQ4vnFSwWjoDu4c+0pJ0qNK9jU0VCGxUbk2yKvuXAnqgcnsP1KKVKF+k2lQeX39Sx+HipmxV6iyub9WxXAQNrUc/JE01OWFWgQic7F9Hzogw+5KG0sDas2FdfrQHrG3UX7uWt9ze8vaX67+1bA5roKPNlJbJc1WsKut89bYsN8+bh4dGm+EoMF0l1gWay5nCbhJ1rrAt4EF/GNtm4gMyu5fV1orxgRTN8U0e71e/pF8u/V/6BRzr0d+yw2wInVw4vq2eN09vh48/x9t4l4977S1H+cmlp7h0X674V/Vv8AvPEqqLx3E3PB7L7OFmv2WZI5RRG7/GD9FTduJzfjaJ03eWr9QQynwQ0zOfsuPs5jIbJZkZDGfey8r3KuF512348TYHoMorxwsHnMoL0fbhOknSNTE7k2tEenBQo+wMy19WJ7rsbGY9yqoyqduJbzL7V/mQdt3L//Lrx68UPTxWvtGJsSgvlcNw1B+p9KN8tzwOnVIT67UkelrtN3/Zv0KqSuXpx8rDat/iS7zJv2amxOW73dCrPOStcvc8PGx7wC5+TdZReTcT9XYPErmZb3vQfpU8RvvGB+Uva4Nbv1AIX/VkXSXUDeq681iZzA+zRr0BOM0kNJPQTEIzyfmZpHEi0azCUiZLa++0+a75eYUMWsz62/zvlMWXi+Ipe69izan4fdWYU+dKmare3GbgFU8ooCjZIFcrNe8pKk/doyYqRzvRcictJilKFkndqiYMmslZ/vL3W6GpaWwinA7z1+JlnOy/x4/xdvX0+XhWs9CP7/lHnPN3mST8Ee33b1/zNeDT6nUfn5KTfHFIwU7eMqLPWbqup078nn1g6TiuOvHpZ/HUw6u36qsvUbJJByTzOD5ZPccw/zslYgfkTpntihk4AHPinVJ1rjorth+UR+ZIplfUZ8OKEbdm4rZzWJlndVhv4+0f0Xr/mkQl25FUrJzVc+Yybu932X/+Hm2jZLXP1jSYi6sY0n06F0b/jLPfePNSkaLyfEW0zlEYOctZSD4zJ8BymmuWzl3xYcuOtHxKtSR410mWVbIuvkBnBRACbWsGd2wJmWVsrjrtutJpFumP+dCQjMzO+afzs0iLKJCnMY1ZP3LLwLqhZWCZ8PobWaou0ZBGFGpr9pUM2NKkDIeNB/E2yYKzwitgDkq8KE2vAS+uM/I48KLNtQ14uS5KvCi1sgEvj6HEi7bZNuDlByjxooxJqH3ocgR4UdIk2D7EgRflTULtQxx4AZiIeeJl2Ic48CJ+A2of4sCL+A2ofYgDL4C/TNulQPOnzhB7dvt/4HRUH+Cwza+ClR9KnGghrYw0rQqNAQ8NdEME6AYAY2Z+uhiUncJL5oSjQItMGXBozYrXsJZMQMw/mPnHABcRyeDAGgK41IJKcJ2P02CAi2hJcJgGA1zESgInw9TeQwAX0jr6U2+NbHAoVqwHdgNCcgOAdiUO3SQ3AGio4ICL8n+AhgoGuIQD4CdnCZfBiaCAi5HXBswmwQEXuQGNcNV3TmHw2gSbqhswgTYWQWBsB8AQFBCMij0jL/YcOg4HSc6wFb0Ea7GTZDKSc1mV4IIwDJb1hYZZswKGFp0WPu1kROfCJx3mYph0uC3KVt3vXS9Dcw+uS6NvCrdXMftypuwNdM+5b707a9w+bit3N7diakxqjcBsO8Ht+5X1OhndyWN4Th6dYlP9EW1mqyrUop4it9crONZQvLlTx97iWtjqKbJFSEKVi4sTBEutwdxVaNkIL0OuZr+aL9ZbiQERApgp1CUGmCP0nETOAnNoWdl/uKazUvQ1tK5DLFITZK7WXPMqnTu5BbIheQnXoeB/E16ewIgX0X5NeAU674cCL9qT1IRXqBsHKPCiCH4DXoy5GPGimitNeAmGES9KuYDbh747Pl6UcwG3DzHgRanycPsQA16UJAO3DxHgRVkyLexDDHgRvwG3DzHgBeA3qJcVE7rtwR2bbT9swotLSU4dbXNOF78QJcAAdoQAhlmj+uyLA2AAnTK/qgOprydRooW0IM+0dpIxxw1QogugXuaoiwZaLLDZrUOjBSBeSBcBM63RPAEFulRrqQt0j40wUKHLAaTOHGfaQK/dgwMtonTgISQZWAAbltJR8NDc2bXd49nAHVobKcEFHsDAoI1E4fTG0eFQSMqIgUeAMSgkgMSZpfHJUWoX5cPA478YtIvyYeDxXwx4UT4MfPUqXavx8BIA6gSlOYne3TPXP2mDe+D1TxD5Anf3MOgnUvLlItw9FApJ/AvcIMWgkLTBCG6QYsCL6BS4fnFvfLxogxFcvzDghTTLBXvCLxM27IY2PohdgXsDGJSt88Inl6hsFksfg7K5AKplpspmUmMIlM0FUCXker+TC0OhkABuZaYK6eltxFAoJLVUBKbcotAuIkrgaX4YtAtAlFCa33vS/FBoI9EqcG1EQFu6lKcCr9uCAS9qvQ7beIki6OZSlgrc8keQVSQB1Mnlu+L9dF/JdFSHHEOmpmyRqUKNEMZohMCYVNGf4wYzJUvjNUJwZQteZzKSc1ndV2yi44UIRKdFNs5kROfiJx2pkgbGlJxLbBZ2+ZOO8BFMOpfYLezSJ52sD9L4kkOsHjykzFRt3RH9WGL14Pk2GPBCWhRogikAZk4Ot+E7NMlARCA8OxiBQnoAInCmeJnZwRjwou1m8LAjBrxaUGczw8sMO2LA6xL5qstnHaymz8C+owcgrCiX3LJxA4Pd6tGum/PgBZ5R/1BaG3UPDR7litjQCvTaol7AEKDlA1wOUjWmZ/gjAQ9gz84ePD/UE0AkxzBP+khLHU4rIzngYR1dBwO0lxjyuiy/JQjDYGlM6tZ5YWDPJQCQStHDY/S1eBkn++/xY7xdPX0+nr05TvvZEB/f8484ZyCyof4j2u/fvm7+md1k9bqPtYmjBgS3zgBh/ncKkP0qeYz2J34uV5xn9pNOApdET6v95q+odn/b8Bcf/RJv0i9To6TcZXj882rgc8/TEN3Fr8k6Ku6igVp+rY/gbCM3vKcs7/G+hrb3f6/xvphUr3Y5XqlWOqmn9vLzeDU9esz/e+csQrEI+CJVwPL4+pM6Dhc3rnpO+sXvy88V53Yvq+1HHn/8CfXvIxfhyeem5w6PNu7z4aGQi2ysbY/+kK1UUxBmVZDAyf51NGP5xkY4ZgnSS9+y1pV5ID1MVzYz5jDWGUoQ+FI7wgbfLzd/q6B2uFkDcOkQ7m3oaOuGZSlZFWvsOudhLYvv8+bhoZH9rc+yXRirXHPqeShMjMu6hjUaNugPY5s90w3Gt/PDmDFdk3nojg6yVOvRFM2Mw1p96tc5YiAzgwltT1DANMgONlFvhkX5Uz+grczTtNVZl3AcTyq/wtDqf8v07CWJMm0Dardyh/ap0kX/jLNfePMSJZvn/CaV81+OJ8+t0LlLU0geMz1bl2f/bALH7rJ/2ZXiwxY3N1/wq3tWolW+Z6VuQ6ySdfEFutuC6/v12SNkFsae2cwAtzczQDofNwPaWpQnLdzi2Flc3543A2e2APnC2AXHuMWStC1AojfWRDq2wEGPDtEITlAqmvIdThD5ZFh9MmZsLkTgk0nVpnxYUy4dxuQty2Rxllyq179XL376Wdz98OptYYsn9GgDFumQZ0mpvqzF6yRZvVXe8JLZgLtTxqSjUmAK+WJeceIoH4ebdmxC2hJH9UmkGw+wo9W8vnKPMa98yrb4dWbk6RMLc33TyvNtcY0e5xVId67JxTUubCtP6g7UBMeToydjSUibmwzcF/gIZB7pdpslZ96rOzhnRobprIuwbMkNQ4tKMdUFq4eleiZ1xM4IxnkBV/OgY9kMO2hWqmQtoruzhkxd1fgKDBDOZGdhVxDqhAEGCGey2bArCCXDB+FM6rN3BaEX4INwJjsMu4Iw0N268SFUd55TEmlncGqmjQXMYVMPZekwIcsqHXtPd2fGbIgOcY5Tfzvdb9GZFcRHzw6WHLCrcXaJ3515Kh46/UTaQX0K62tJcI+JX3gCru6ynirRM/8d0bNus7IHTJfyVG3pkpX1NCgbErGt99Kt61C7V9+5V4Gp2xeURKfaK/ctFYGvlUsK3IFxtDaS/mAO3SEQ+Xn7cLWPr6LtA+LUuLu7z97trU2MyiinSo0L8KTGBbpxnuo/NDXOd3hfC4gwF5DJzAnnV4rB5gTpOUtx/HPrq4bn+9XNPSFoCWmbj5F9B22xKljVpjXp7Cd6SuCQ5jqEymipKkD0dB//qMp+fiK9oHycqYtu6Hi6jaOn8UNtnJBpJg7nrha2b1ga28p6yPSdKCr1s/m7aeQmd8QQsu4BHKy+swsCT1t6uGtxXYZOLoDUUblcdr6UiyklF0CqpxBkC8zJBd6sY2LtIUSYXACpikMQLjAnF/gzqc3ZFYQIkwsg5Y0IwgXm5AIfaVPj4YMf74ATXXIB1pJVOJML3mHMoksugFSyutDkgndYQQiSC3wAGULJBe/1VNAlF/gzrJTbFZoYkgv8FlzBZHbUXVilQJ9rWj9+UzV5oupWY7GAooRLFptOHu9Xv6S/Iv1f+gUc69HfFvygf04e2P62et48vR0+/hxv410+hdTecox955Hv4tJ9KWdX9W/wSx66KB7Hs6bV6lhmDzerGMgcoGzB+TV+iJ6yE5/zs0mcvrN8pYZQ5oOYnvmUHWc3z3MlZSamZ97Lyvcq2XnXbfjxNgegyivHCwecygvR9uE6SVKJz87kQpweHFQ+O8Pyl1XRz85mPp2syr+6nfgms3+VD2nXvfwvv378Snk8DDIW5aVyGI7qI5V+lO+Wx6FTamK9VkbbbF8hVaXy9GPlYbVvUQSmZKbD5bvd0Ks85K1ydyEc+wNqUTkb6u0eJHLn2/agWszOfFD+sja49QuF8FVP1lVC3aCuO4/vLTtCMwnNJDST0ExS0Z2GiUQzZUuZLA2000U2NK+ikEGLV3Gb/50y+M4meVTr9VicAHXug7kgvq9Fu3zduYCmgqR3qidJijL/YqgsyRDADvWdeSH1AU1HgZv2ue97qrZ3rSIe68tEd4NZ7zEvRQOcfMFDvjQJFa5KEdXiFeEykD5zufr//kCcdQZNCxCLq+lisPRr2NR0EyfGs065aY+xdPxlDRlvAhjPOienPca+x5aiAk2IH2IVOyOI4TXmlhVgVMIMaoxnndTzDoy5FyzDCsQTmKpDfh7jeYS03oN3GNTtrxrezBVLUVV5i6PkhktuMeBqcY2G9/QgC5Rw0q+5HoilV10C/InLy3zTVd5h46Xga/tsmXBrZl9gSyBEBTilu/QnIFL4y+rkUN9yObXJQTo2B6GbGtbibupNJDqzQDJ2wGKBVO3L1CatWpK+KRT90bLSAbgQZF42uYzuZNYL93/D//78+bfnX2/507+LYHUvvLChY3LLug4Nc4BeH3+5XE6/20zF0A2PsmWJ2kDF7QzjdNzAX7dErwLL6uJZwnRMr/jSneyQ6LxjjTkjKqfnG1ffV8Z9ZsqBa91V3ZscnGiG+O7eGA3NcpZ1QWjd4+JiBCGQgdarWWRBiNFFofPSboA+I1rliw+NKwu0ZoVXrMx2ra/htnE1KnV1N7AXHHb9EGB+qLduS+0lW9Pyvrb52eG64Ajqx+YtxjDCdcHB0I/BxQOEcF1yYPNjcLl69z0UcF1wjPJjcHkcI1w2t47gyvY9hRjhAhQUmCVcpmXoMgRwXXB7s44tQxRwXXBrs44tQxRwXXCKeceWIQq4iNWAWoYo4CJWA2oZooALwGrMJVfsg5OnVnjqyrMb/r3VyrDiy6z5GxrAk0zu+qDd7+FEC2kjssEzsz6mi6rodgXdEAO6ANZkfroY6DXTswaZGNAi0gRMmljxGtSQYQ6RJmC3DgNcRJqASRMMcBFpAvbCMcBFpAnYC8cAF5EmwMmQMQcBXFMlTbD7bTqHYgd7YD+AARiUeSqnbliiUE5GmSRASwUHXABOZJ5w6ZYKDriIFAGSIjjgIlIEmEmCAy4iRRrh0rZcYHDbGNJ99MMGT6s9q39Eu31HiAfGJgAUUQHWglmZTO8B92zvgR/R/dXrpqggze/yIv13t1nbgedV0nnrgQ9JTugom/KM5PTWfaBBclqQPJORHHRdKz4oOk6wrC803JoWMLToXGLDkwufdJjAMOlwG3lV32Z96Fx/3EfNTu61B5R78ez79e+cRSgWAV/c3Kljb3Et1HG4uHHVMVuEDXu1Z7aXX4R6VllZkKdaeT3ktjLdXK9s3qFYzaQezAcJNz1Pgtm9loFNWIUVNvBGTkMyHA4cKYF8Js1gP5pDJlGCB+DfZg+emYyLw9XnADaOknFbJ+OiCOryFvlMk/GoLt4Zb8rjHtqpsnGAVMyq36pmnpYlzqQIDEkYuJRVKWPj1LJaJeuvB+896Mrd1GxgX5reZmpq2QbZ762uFeMt6K9ZhcVEqFV4Y55nAWzYqJiwUU6EVlafVyJEi9KlGtASXEOL++7oaAGom3miVXYxwIQWJUs1GXMeQrQoV6oBrUBPHOXMNL2HRotSpaA2YQnfiGjR9jGoTYgBLdo9BrUJXTH+ukWbx6A2IQa0iMuA2oQI0HKJy4DahMIf3SZ0kVbPmVwxK1dq6fZM2piqgWN21l4yGrzzy1txHQcjVgDqg2Ln5+1NvXWYlHx8bJEmtYysh0xvC2pruzQ4VkSTNNEkurnp+2J0A4ZoEqArl/Eko6NFNEnTsqWbJBh0i2gSoOONQreIJmlyvAU+tCTRJNCZMBSjz4RyqjQJdm9NZ03SU+N7AMphJN08a1MKG1wD6yaljECtFAxoUcoI1ErBgBZxIVAuBIOVQlxIE9EoEKJFXEgTWnpdOARciARwISN7AH1HSfupWuUGOtpcFXAY0wWg8jHIy8dIR5lHJ+Vm4H2OXgtaZzJyc1mbZKUjhV6xikvLAjO06LTYVjQZ0bmwKUfZbEpuHNXVe0y5sbFV7XZSN9WfMnZol8Wmbu5OFZsyPsAX159afSBc3MhWH5CLrAX0yQ98uA5XXnuLamxlHG0QLLXcjQBFlS2PSv9AiEFtvwt3bAGxgU1uj3Kk7FjpRRwQ5Cp6AF6Q1IyhhA5AEhJ0RoYwhsxED8AYUoZw+x1pgY0MHhpbKnWNnzgQenUtVvbEGM8B5Nb2TVRbq2cPyMwANLfeDVxbi0M6Q02ptpbU9nKENobXXlsr6G2q5tTQqQkvo11aOPrOfE79nJrQMrqlYUCLEk4aE04QokUJJ40JJwjRooSTxpRzo1Dq2HUUeJveR/NCy2h5jAEt2nzTZBPquuX5bGy0rC1+CK3MJtR1CwNaVG+1ySbUU+cwoAXgIGaJlmv0z8WAFnEZUJvQC0e3MiC9ceaJlmETYkALaTOc6dXWKndLn6z4OWzEjkP64cwvX0XoHfxQYKV2HVagiR4eo6/Fy2IiWr8mf5Vg1UNGcbL/Hj/G21RT43wKzN7yR7TfvxUhj9XrPq7DGf3c7H8rPp4d/145/pSNli/VqzK8nb36EiWb9GdnQa7D+7fpEOQ3WnKpXv+uHpK9yO9WvjW/2+HVw3WS5BIVv0TbXCLSRaH4wsw9vKX6che/Juuo/AKZqG6y4Gse0Ek/+xjtGy4ehjcb09Nyk0KQP+OkYhU81eGBp955YItNWUyip3SG+6v+XXqYASgfo4sZI9TTpILxc214922KLmF2d91Qy4B3bRnwQ2NF5BswIJsCaIFrWLOYmh1Bg0Yo0CLyDRiQRYEWkW9AOgcFWkS+AekcFGgR+QadCT1nfLSmSr4h99YMLo5LG9gDewDULAlsU2LQTUpEglopGNCiRCSolYIBLeJCoFwIArSoWRI0OR0FWsSFNKKl7YZC4K9Beh9Rxbb3BQH0BNzAM9Ee2AWAtE+a3AbaiyqfZJaycBHsnm3T7ofkBoXccGkLEA8tNzbK4YO7rs/Un+KLdCCqFbWOxbJcdewsrm+PNbECQbu1M/sk1Mt5OK4583jMW/JBy1VxSD8kKsYidM/NWhl9aHuDEkSsWOltUK0tiYfGCkCKkJpJpquZsJhnQ2duAhiS2UNnFl6Q3ugljTjWpknTCrgZ6ZEoAm5tWiRNxmu6rHJVlsqRNppmYLdJFdpGlYnPW2fit0zET1+ZN5tmdr5KrDifnS/Gzc5v07toMlPUhRM7wrZXaOgZinJJoFu7QpuDMGyUh7ooNaEV6NvNMaBFuSTQTVAY0KJcEuiGZARotWk9NC+0dP6YSXf04gBtuv3MCi2Ty/KC0cukWHvsEFq2IqIY0KJ9NdAiohjQon010CKiGNCiAr1QK8N3xrcyAFwGyl1Q+EoQ6QWmAgTBUUhDmPnlIAhPX+VQYIW0gfS0oqGuo5flC9Q4jokt0vySkfXQYCClrY/7wFipeZwMmHP0PnelRbWGNWB8okmg+7AxoEU0CTSJBwNaRJNAE44xoEU0CVS3PNv2kIHRovIj/dj/uh9u3ws0tE1JKSNQKwWDblLKCHTdw4AWpYxA/TUMaFHKCJAMwYBWQFxII1p6QYvxPYAAwIXMfiuUK/RZUdiQG9hADADEyPwIYlcIfbOoLTFraKwAtMjs1UwEOnQYYmwBgCMh6Iw0fgxhmQApYTIxekQPj6KgR4IeSqUwbi2Vskz/qORJpuWhyWGba2tZyasmCWF/ktB5wkpN2ZhVM8P8L4MoWRf7KYOOBpnr2QjcVtFKSNsolwtmD8NMnEkTYFIzW/zxUyoD4kwa0HI5PrRC4kwa0JIhQrQof6Rp44aHEC0ATTJPtELd3caAFuWPNEU99XKDGNCi/BGoTRi646NF22ygNiEGtCj/A2oTYkCL8j+gNiEGtIjLgNqEGNAiLgNqE46PVtltClvQZXLbQ4UfaOhat2oPG3cRDtKEkZG3pZXJ2aiwAjjTFP88H9vWy4+jwBZpWsLIehhqesh9lds/JlbkeAOnTe6PnsAqHHK8gQYJCrTI8QYuWyjQIse7aeGSCNEix7vJ8eb40GIAx3uWaJkzYeCPjxbAj0ZJk2D31gLH8AAsYA/sAajZgXTzrE2JQTcpCQFqpWBAi5IQoFYKBrSIC4FyIRjQIi6kKWXERYgWcSFNaOklQzD4a0iLd1Y8gL6jpA+bJJW/w7N+RLt9N2gb6x4LxOibEQVrQaZQ67BRWoelSzBAboZtHVaWzyG5wSs3QqccVG2GMeXGRjCVG2TPb4blnnUz7OLOWYRikf7Amzt17C2uhToOFzeuOmaLcG47ZKXnLdVOSTWLCEvNfo95S7VK1cwN2dsCpOYNbBkDuAoZGAkE1pJKA9sOHGn5kLGTPfSOaRgSczjSxBxUauYy3X32mKWP9NDQAbip2UNnpjpi8K440qY0AwbPzMhdF2h7hm2LIJzGWxBdk/GJxFmf6HmzW6fDs9pG8evuKiu0eJd1pbl73a4zxwiZU8TFUs+btdWEGtotIhpmcu60VPUxRpQbQdy6HS7P0zb4caa2143HrQvKM7Sj5Tt6egwGtCjPEIqWkKOj5bYgpGeFljkTYkCLihU1oKVXm0WBFoCjnSlaegluBOuWC6D6Lpx06GSR09PUuE0RB6YYJOWANmmi3nTCCS3U7bCaKCkHtAEto5EZBrTI3m9Aq6yecSywPPoq55G9D50JMaBF9n6Td2a2Ih4fLbL3oesWBrTIJoTqlsfGRwupdzb1HZZe2WTg2A9kfH/No31FUCsFg27SviLouocBLYp9Qv01DGhR7LMBLVfv840BLeJCGtEy2qWOjpYP4EJmnyHqGw3jrJG1gQ1En4phdqGVnofQ+PddA8ro4TH6WrwsJqX1a/JXqZf1rT9xsv8eP8bbFPo4nw6zt/wR7fdvRXO31es+riMd/dzsfys+nh3/Xjn+lI0Wl+pVCUD26kuUbNKfnW1WOrx/mw7Bb6U45C9/V8/IXuQ3K9+Z36x8Zd7s4TpJ8gklfom2ubCki0bxI5h7eEv15S5+TdZReZ9sptpkiYB5O7v0s4/RvuHiYcizcT4tSyks+TNOroIFgIcHnn+nKZ9J9JR61X/Vv0v3kgZp5DvP5dpgBZgzeqAJ0rt3lmiZSzQGtMgUbnIzjb6r3uimMPUsBM+EGNCisCDUoMWAFie0gOsWBrTIJoTqlsfHR4vCgv0wA4FeY0vawB6YGaCeh2ArBYNuUlgQuu5hQIvCglB/DQNaFBZsCjSFCNEiLqQRLT0sOLoH4EJ6HlJY0NFnRWFDblgD0YX0UaOw4HlPT09bQmD8u9a+a2UJRFWL8HAiG6sa6g1lERm3lkVcpn+VUoeHu9WfMI8KiKnLrzM0lg585ZJbk4SwP0kAOBbtJuiasjGrZvqfr73P2ZVVsi5CrkE3gywDnVgp/YOquglp1Tfp9TbM5BE0AGY0zuCjx9tcasbWNIcZjTMwoEUeQQNaXPcIEKBFzdga/Te9SAoGtCg62mTN6UVSMKDFCa0GTl8vF4UBLYqOQm3CclvUiGhRxhzUJsSAFkU3oTYhBrQougm1CTGgRVwG1CbEgBZxGVCbEANaAC4DZQ5Wp93vOpk59fbJ1jz+geMuHOBOU0ztfBaJ3rYeBbYA53t+rYg8T/fl/AABVuR6Q11vf/SUHxfSOGqWaJnuAQa0yPWGut4Y0CLXG+rMYUCLXG+oM4cBLXK9oTNhEIyOltr1QWidtTIwoEWBaei6hQEtgG88T7SMdQsDWuQdQ71jDGiRdwwNTGNAi7zjJrT0bbcILHiBtHnysOGYh02SiuDhcT+i3b4jwAOE3Y3cNt1MqQvuGF1wfUexICflZtguuG6bvqokN+PIjR419G2N8oaWG1vA/7Atbvey2tYEqGHjHfesG++uKtvrDre69B12nIdsqUy8AmTfMjd4zFuqtadmR8jelhVIa9jZ74M2Y8XWegMDWwQuwBsm6AzoOIIt7G4LZ2syi7I4uyg/b3brdHhW2yh+3V1l+8/vsl2xd6/bdbYyI1uVuVhqouMgWJddmx/QckO8Y12XjY329Q3yxv57Y9M8yuW74pGGHc0pQmrlZ1ioStSNuGFe2lZyEoxRBYOXO9/GFAxbRKofwUifdJvXNbgg4ehMNvQCEExV+B1TNvhgsiE4iYUt40CPYbseginD5lrQWjLkWuK50tftT2tTHm9g0bBF9Ug0hhUNvXer9M2lhAUWJqlPwQD4tJnj+dI4AqmXuM1Civfq7U7bkZG+NjLl6+rA+L63tIxNuSOth7GZbzLrKWExcVQCHfKluRmDq4I6NW40XAap+Ltc/X9vIELakfXfwwpXu6jTU8HZLlLpGnd459kuUoVVZErMMF2kyi9KGnxGg1XIQ4qlX1PM+oqFUcFli1ApYZxFpVILtYqMNwGM51uT7V0Y+x5bigo0IX6IPUBIkyA+QswcJpcVYJQvgRrj+WbZvw9j7gXLsALxBKZqxYPNPL79PvMrEMuMQSpxEjW8mfDTWb2i8gb2zA2X3GK/1ciEhvf0IAoAbuHCy1OYmbU9GnapeFTXA7cuPK5Yiup6YfLVyKTHxr60oyx5A2Up7qbFRva4wmTeQM3jM9eTdA2qrhy+KRQ9cnAewIOn5aTJRHTroQgm3JpbYCudgmoO8IcLglPYAtgHiDG1oW/EeJY/XNpMlh3BfO+ihKMr2RBaD5LUYTGN0qHD4D7STkOTK3onQ9/Y4mCxGgfOwvUBXsUMC6MxvSsshmR3v/N2QJeAlalXHrf48kNjdYlbFS8tu13fc+YJy4Q8cG57AAjOHFJIGuyegwzESYqRratX48B9PPMkNJszW5qopQavRRXLJOHuRzQExEL666IGEtRTotA43Fe2vJ7BBxcQhLjENeldiPm6VeGMb1QoG5SiSDAMhba9QTCLFg5bCCOcb0mgd0Go70LAAOF86wS9byYV+CCcb/Ggd0EYOvggpBTldhMpPgQpRbUVgiUUiCCkYj2jF+t5lywxxjT3hluEaWA6JQTQKRS++IAhpm/oNYsADOzRSnVnCnO3jmTCw9zvsxcCLcA5+lZO6QD4K6R0qx9ipFulAyCUMNKthSigplulg7Te1Sh06znEENKt0iGipxWG+OhW6RDR0wpCfHSrdIjoaTeToqNbpUNMTysI8dGt0qG9xu0mUnwI0k7iVgjio1ul+gZEtyKjW8/JEka6VTKkPbSnQreeNcTw0a2K7CC6dWi69ay9gI9uZTbfuSosDci3bbvQIFFfGtsutLu9b727c+bu05VZkIyem7mMusBqbRhvd5Nk57ZFkzxeqDyGhjz6lg28g8vjucr2JI8XKo/MMfcHBqNv/5TMxtF0ImuGDP/8lzOCMI9GSYz5epX0MqY6piDYqB4ShD5XKH0/OJcWuqgzOUhfJnEGTHnt75lz/Wv8EGXv+H8=</diagram></mxfile>
|
2201.10222/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Making accurate predictions about the future is a key ability to survive and thrive in a habitat. Living beings have evolved many systems to this end, such as memory [@mcconnell1962memory], and several can predict the course of complex phenomena [@Taylor16389]. However, no animal comes even close to the prediction ability of humans, which stems from a unique-in-nature system.
|
| 4 |
+
|
| 5 |
+
At the core of this system lies an object called *explanation*, formed by the proposition of a language, which has a remarkable property: it can be installed with ease into another human speaking the same language, allowing to make predictions on new phenomena without ever having experienced them. When the installation is successful, we say that the human has *understood* the explanation.
|
| 6 |
+
|
| 7 |
+
This process is key to the success of human beings. An individual can provide accurate predictions for a multitude of phenomena without going through a painful discovery process for all of them, but only needs an operating system -- mastering a language -- and someone who communicates the relevant explanations; this way, the individual can focus on unexplained phenomena. When an explanation is found for them, it is added to the existing shared collection, which we call *knowledge*.
|
| 8 |
+
|
| 9 |
+
How can we make machines take part in this orchestra? With this work, we try to shed new light on this problem. Specifically, we propose a learning procedure to allow machines (i) to *understand* existing explanations, in the sense described above, and (ii) create new explanations for unexplained phenomena, much like human scientists do.
|
| 10 |
+
|
| 11 |
+
Our contribution in this sense is threefold:
|
| 12 |
+
|
| 13 |
+
i\) We formulate the challenge of creating a machine that masters a language as the problem of learning an interpreter from a collection of examples in the form $(\text{explanation}, \text{observations})$. The only assumption we make is this dual structure of data; explanations are free strings, and are not required to fit any formal grammar. This results in the *Explanatory Learning* (EL) framework described in Sec. [2](#sec-el){reference-type="ref" reference="sec-el"}.
|
| 14 |
+
|
| 15 |
+
ii\) We present Odeen, a basic environment to test EL approaches, which draws inspiration from the board game Zendo [@zendo]. Odeen simulates the work of a scientist in a small universe of simple geometric figures, see Figure [1](#fig-galileo){reference-type="ref" reference="fig-galileo"}. We present it in Sec. [3](#sec-odeen){reference-type="ref" reference="sec-odeen"}, and release it with this work[^1].
|
| 16 |
+
|
| 17 |
+
iii\) We argue that the dominating empiricist ML approaches are not suitable for EL problems. We propose *Critical Rationalist Networks* (CRNs), a family of models designed according to the epistemological philosophy pushed forward by [@popper1935logic]. Although a CRN is implemented using two neural networks, the working hypothesis of such a model does not coincide with the adjustable network parameters, but rather with a language proposition that can only be accepted or refused *in toto*. We will present CRNs in Sec. [4](#sec-crns){reference-type="ref" reference="sec-crns"}, and test their performance on Odeen in Sec. [5](#sec-exp){reference-type="ref" reference="sec-exp"}.
|
| 18 |
+
|
| 19 |
+
Humans do not master a language from birth. A baby can not use the message "this soap stings" to predict the burning sensation caused by contact with the substance. Instead, the baby gradually *learns* to interpret such messages and make predictions for an entire universe of phenomena [@schulz2007preschool]. We refer to this state of affairs as *mastering a language*, and we aim to replicate it in a machine as the result of an analogous learning process.
|
| 20 |
+
|
| 21 |
+
Using a batch of explanations paired with observations of several phenomena, we want to learn an interpreter to make predictions about novel phenomena for which we are given explanations in the same language. Going a step further, we also want to discover these explanations, when all we have is a handful of observations of the novel phenomena. We first describe the problem setup in the sequel, comparing it to existing ML problems; then we detail our approach in Sec. [4](#sec-crns){reference-type="ref" reference="sec-crns"}.
|
| 22 |
+
|
| 23 |
+
Formally, let phenomena $P_1, P_2, P_3, \dots$ be subsets of a universe $U$, which is a large set with no special structure (i.e., all the possible observations $U = \{x_1, \dots, x_z\}$). Over a universe $U$, one can define a language $L$ as a pair $(\Sigma_L, \mathcal{I}_L)$, where $\Sigma_L$ is a finite collection of short strings over some alphabet $A$, with $|\Sigma_L| \gg |A|$, and $\mathcal{I}_L$ is a binary function $\mathcal{I}_L: U \times \Sigma_L \rightarrow \{ 0, 1 \}$, which we call *interpreter*. We say that a phenomenon $P_i$ is *explainable* in a language $L$ if there exists a string $e \in \Sigma_{L}$ such that, for any $x \in U$, it occurs $\mathcal{I}_{L}(x, e) = \mathbf{1}_{P_i}(x)$, where $\mathbf{1}_{P_i}(x)$ is the indicator function of $P_i$. We call the string $e$ an explanation, in the language $L$, for the phenomenon $P_i$.
|
| 24 |
+
|
| 25 |
+
Our first contribution is the introduction of a new class of machine learning problems, which we refer to as *Explanatory Learning* (EL).
|
| 26 |
+
|
| 27 |
+
Consider the general problem of making a new prediction for a phenomenon $P_0\subset U$. In our setting, this is phrased as a binary classification task: given a sample $x' \in U$, establish whether $x' \in P_0$ or not. We are interested in two instances of this problem, with different underlying assumptions:
|
| 28 |
+
|
| 29 |
+
- **The communication problem: we have an explanation**. We are given an explanation $e_0$ for $P_0$, in an unknown language $L$. This means that we do not have access to an interpreter $\mathcal{I}_L$; $e_0$ looks like Japanese to a non-Japanese speaker. Instead, we are also given other explanations $\{e_1, \dots, e_n\}$, in the same language, for other phenomena $P_1, \dots, P_n$, as well as observations of them, i.e., datasets $\{D_1, \dots, D_n\}$ in the form $D_i = \{(x_1, \mathbf{1}_{P_i}(x_1)), \dots, (x_m, \mathbf{1}_{P_i}(x_m))\}$, with $m \ll |U|$. Intuitively, here we expect the learner to use the explanations paired with the observations to build an approximated interpreter $\mathcal{\hat{I}}_L$, and then use it to make the proper prediction for $x'$ by evaluating $\mathcal{\hat{I}}_L(x', e_0)$.
|
| 30 |
+
|
| 31 |
+
- **The scientist problem: we do not have an explanation**. We are given explanations $\{e_1, \dots, e_n\}$ in an unknown language $L$ for other phenomena $P_1, \dots, P_n$ and observations of them $\{D_1, \dots, D_n\}$. However, we do not have an explanation for $P_0$; instead, we are given just a small set of observations $D_0 = \{(x_1, \mathbf{1}_{P_0}(x_1)), \dots, (x_k, \mathbf{1}_{P_0}(x_k))\}$ and two guarantees, namely that $P_0$ is explainable in $L$, and that $D_0$ is *representative* for $P_0$ in $L$. That is, for every phenomenon $P \neq P_0$ explainable in $L$ there should exist at least a $x_i\in D_0$ such that $\mathbf{1}_{P_0}(x_i) \neq \mathbf{1}_{P}(x_i)$. Again, we expect the learner to build the interpreter $\mathcal{\hat{I}}_L$, which should first guide the search for the missing explanation $e_0$ based on the clues $D_0$, and then provide the final prediction through $\mathcal{\hat{I}}_L(x', e_0)$.
|
| 32 |
+
|
| 33 |
+
Several existing works fall within the formalization above. The seminal work of [@Angluin] on learning regular sets is an instance of the scientist problem, where finite automata take the role of explanations, while regular sets are the phenomena. More recently, CLEVR [@clevr] posed a communication problem in a universe of images of simple solids, where explanations are textual and read like *"There is a sphere with the same size as the metal cube"*. Another recent example is CLIP [@clip], where 400,000,000 captioned internet images are arranged in a communication problem to train an interpreter, thereby elevating captions to the status of explanations rather than treating them as simple labels[^2]. With EL, we aim to offer a unified perspective on these works, making explicit the core problem of learning an interpreter purely from observations.
|
| 34 |
+
|
| 35 |
+
We briefly discuss the relationship between EL and other problems in ML, pointing to Sec. [6](#sec:related){reference-type="ref" reference="sec:related"} for additional discussion on the related work.
|
| 36 |
+
|
| 37 |
+
EL can be framed in the general meta-learning framework. The learner gains experience over multiple tasks to improve its general learning algorithm, thus requiring fewer data and less computation on new tasks. However, differently from current meta-learning approaches [@hospedales], we are not optimizing for any meta-objective. Instead, we expect the sought generality to be a consequence of implicitly defining an interpreter through a limited set of examples rather than an explicit goal to optimize for.
|
| 38 |
+
|
| 39 |
+
To many, the concept of explanation may sound close to the concept of program; similarly, the scientist problem may seem a rephrasing of the fundamental problem of Inductive Logic Programming (ILP) [@shapiro1981inductive] or Program Synthesis (PS) [@balog2019deepcoder]. This is not the case. ILP has the analogous goal of producing a hypothesis from positive/negative examples accompanied by background knowledge. Yet, ILP requires observations to be expressed as logic formulas, a task requiring a human; only then the ILP solver outputs an explanation in the form of a logic proposition, which in turn is interpreted by a human expert. With EL, data can be fed as-is without being translated into logic propositions, and a learned interpreter plays the expert's role. PS also admits raw data as input, it yields a program as output, and replaces the expert with a handcrafted interpreter; still, the sequence of symbols produced by a PS system only makes sense to a human (who designed the interpreter), not to the system itself. Instead, in EL, the interpreter is learned from data rather than hardcoded. An empirical comparison demonstrating the benefits of EL over PS is given in Sec. [5](#sec-exp){reference-type="ref" reference="sec-exp"}.
|
| 40 |
+
|
| 41 |
+
Next we introduce Odeen, an environment and benchmark to experiment with the EL paradigm.
|
| 42 |
+
|
| 43 |
+
:::: wrapfigure
|
| 44 |
+
r0.47
|
| 45 |
+
|
| 46 |
+
::: overpic
|
| 47 |
+
./figures/odeen-game.jpg
|
| 48 |
+
:::
|
| 49 |
+
::::
|
| 50 |
+
|
| 51 |
+
**Single game.** The inset shows a typical situation in a game of Odeen. The players look at a set of structures made of simple geometric figures. Each structure is tagged red or green according to a secret rule, and the players' goal is to guess this rule. In the example, the rule can not possibly be "A structure must contain at least one red square" since the fifth structure on the left does not contain a red square, but respects the rule (green tag). To win the game, a player must prove to know the rule by correctly tagging a large set of new structures[^3]. We made a simplified interactive version of Odeen available at [ https://huggingface.co/spaces/gladia/odeen](https://huggingface.co/spaces/gladia/odeen).
|
| 52 |
+
|
| 53 |
+
**Odeen challenge.** We can see each game of Odeen as a different phenomenon of a universe, where each element is a sequence of geometric figures. In this universe, players are scientists like Galileo, trying to explain the new phenomenon; see Figure [1](#fig-galileo){reference-type="ref" reference="fig-galileo"}. We can phrase the challenge for an Odeen scientist in this way: make correct predictions for a new phenomenon given few observations of it in addition to explanations and observations of some other phenomena. This is the essence of the Odeen Explanatory Learning problem, see Figure [2](#fig-teaser){reference-type="ref" reference="fig-teaser"} (A and B).
|
| 54 |
+
|
| 55 |
+
*- Why do we need explanations and observations from phenomena different from the one of interest? Indeed, we are able to play Odeen from the very first game.*
|
| 56 |
+
|
| 57 |
+
*- We are able to do so only because we are -already- fluent in the Odeen language, which is a subset of English in the above case. We already have and understand all necessary concepts, such as being "at the right of" something, but also being a "square" or "at least". Otherwise, we would need past explanations and observations to first build this understanding. Before explaining the dynamic of the Jupiter moons, Galileo learned what "Jupiter\" is and what does it mean to "have a period around\" something from past explanations and examples provided to him by books and teachers.*
|
| 58 |
+
|
| 59 |
+
:::: wrapfigure
|
| 60 |
+
r0.44
|
| 61 |
+
|
| 62 |
+
::: overpic
|
| 63 |
+
./figures/rules.png
|
| 64 |
+
:::
|
| 65 |
+
::::
|
| 66 |
+
|
| 67 |
+
In Odeen, consider the point of view of someone who does *not* speak the language in which the rules are written; an example of this is in the inset, where the secret explanations are given in hieroglyphics rather than English. Such a player would not be able to tag any structure according to the secret rule, even if the latter is given. However, assume the player has been watching several games together with their secret rules. Reasonably, the player will grow an idea of what those strange symbols mean. If the player then wins several Odeen games, it would be strong evidence of mastering the Odeen language.
|
| 68 |
+
|
| 69 |
+
**Problem formulation.** Each game of Odeen is a different phenomenon $P_i$ of a universe $U$ whose elements $x$ are sequences of geometric figures. The specific task is to make correct predictions for a new phenomenon $P_0$ (a new game) given: (i) a few observations $D_0$ of $P_0$ (tagged structures), in conjunction with (ii) explanations $\{e_1, \dots, e_n\}$ and observations $\{D_1, \dots, D_n\}$ of other phenomena (other games and their secret rules). More formally:
|
| 70 |
+
|
| 71 |
+
::: mdframed
|
| 72 |
+
Let us be given $s$ unexplained phenomena with $k$ observations each, and $n$ explained phenomena with $m$ observations each; let the $n$ phenomena be explained in an unknown language, i.e., $e_1, \dots e_n$ are plain strings without any interpreter. The task is to make $\ell$ correct predictions for each of the $s$ unexplained phenomena.
|
| 73 |
+
:::
|
| 74 |
+
|
| 75 |
+
We consider $\ell=1176$ (1$\%$ of structures); $s\hspace{-0.2mm}=\hspace{-0.1mm}1132$; $k\hspace{-0.1mm}=\hspace{-0.1mm}32$; $m\hspace{-0.1mm}=\hspace{-0.1mm}10K$, $1K$, $100$; $n\hspace{-0.1mm}=\hspace{-0.1mm}1438$ or $500$.
|
| 76 |
+
|
| 77 |
+
<figure id="fig-teaser" data-latex-placement="t">
|
| 78 |
+
<div class="overpic">
|
| 79 |
+
<p><span>./figures/zendo_teaser.png</span></p>
|
| 80 |
+
</div>
|
| 81 |
+
<figcaption> <strong>Odeen Explanatory Learning problem.</strong> Given observations and explanations in an unknown language for some phenomena (<strong>A</strong>), plus a few observations of a new phenomenon, explain the latter and prove this knowledge by correctly tagging a large set of new samples (<strong>B</strong>). An empiricist approach attempts to extract this knowledge from data (<strong>C</strong>, left); a rationalist one conceives data as theory-laden observations, used to find the true explanation among a set of conjectures (<strong>C</strong>, right). </figcaption>
|
| 82 |
+
</figure>
|
| 83 |
+
|
| 84 |
+
**Why not explicitly ask for the rule?** Instead of requiring the player to reveal the secret explanation explicitly, we follow the principle of zero-knowledge proofs [@zero]. In our setting, this is done by asking the player to correctly tag many unseen structures according to the discovered rule. This makes it possible for any binary classification method to fit our EL environment without generating text. A winning condition is then defined by counting the correct predictions, instead of a textual similarity between predicted and correct explanation, which would require the player to guess word-by-word the secret rule. In fact, different phrasings with the same meaning should grant a victory, e.g., "at least one pyramid pointing up and at most one pyramid pointing up" is a winning guess for the secret rule "exactly one pyramid pointing up"[^4]. A brute-force enumeration of all equivalent phrasings, in turn, would not allow solutions like "exactly one *one* pyramid pointing up", where "one" is mistakenly repeated twice; intuitively, we want to accept this as correct and dismiss the grammatical error. Similarly, a solution like "exactly one pointing up", where "pyramid" is omitted, should be accepted in a universe where only pyramids point up. We will reencounter these examples in Sec. [5](#sec-exp){reference-type="ref" reference="sec-exp"} when we discuss the key properties of our approach.
|
| 85 |
+
|
| 86 |
+
**Dataset generation.** Odeen structures are sequences of six elements including spaces, blues or reds, squares or pyramids, the latter pointing up or down. The size of the universe is $|U| = 7^6=117,649$ possible structures. We further created a small language with objects, attributes, quantifiers, logical conjunctions, and interactions (e.g., "touching", see Appendix A). The grammar generates $\approx$`<!-- -->`{=html}25k valid rules in total. Each of the $|U|$ structures is tagged according to all the rules. The tagging is done by an interpreter implemented via regular expressions.
|
| 87 |
+
|
| 88 |
+
**Metrics.** As described above, the task is to tag $\ell$ new structures for each of $s$ unexplained games. An EL algorithm addressing this task encodes the predicted rule as an $\ell$-dimensional binary vector $\textbf{v}$ per game (predicted vector), where $v_i=1$ means that
|
| 89 |
+
|
| 90 |
+
:::: wrapfigure
|
| 91 |
+
r0.39
|
| 92 |
+
|
| 93 |
+
::: overpic
|
| 94 |
+
./figures/meetriche5.png (12,27)[Predicted vector v]{style="color: red"}
|
| 95 |
+
:::
|
| 96 |
+
::::
|
| 97 |
+
|
| 98 |
+
the $i$-th structure satisfies the predicted rule, and $v_i=0$ otherwise (see inset). Let $\textbf{w}^\ast$ be the ground-truth vector, obtained by tagging the $\ell$ structures according to the correct secret rule. Then, the Hamming distance $d_H(\textbf{v},\textbf{w}^\ast)$ measures the number of wrong tags assigned by the EL algorithm; if $d_H(\textbf{v},\textbf{w}^\ast)<d_H(\textbf{v},\textbf{w}_i)$, where $\textbf{w}_i \neq \textbf{w}^\ast$ ranges over all the possible $\approx$`<!-- -->`{=html}25k rules, then the predicted rule $\textbf{v}$ made by the algorithm is deemed correct.
|
| 99 |
+
|
| 100 |
+
According to this, the *Nearest Rule Score* (NRS) is the number of correctly predicted rules over a total of $s$ games. A second score, the *Tagging Accuracy* (T-Acc), directly counts the number of correct tags averaged over $s$ games; this is more permissive in the following sense. Consider two different rules $A$ and $B$ sharing $99\%$ of the taggings, and let $A$ be the correct one; if an EL model tags all the structures according to the *wrong* rule $B$, it still reaches a T-Acc of $99\%$, but the NRS would be $0$. An EL algorithm with these scores would be good at making predictions, but would be based on a wrong explanation.
|
| 101 |
+
|
| 102 |
+
In principle, an EL problem like Odeen can be approached by training an end-to-end neural network to predict $\hat{y} = \mathbf{1}_{P_i}(x')$, given as input a set of observations $D_i$ and a single sample $x'$ (see Figure [2](#fig-teaser){reference-type="ref" reference="fig-teaser"} C, left). Such a model would assume that all the information needed to solve the task is embedded in the data, ignoring the explanations; we may call it a "radical empiricist" approach [@pearl2021radical]. A variant that includes the explanations in the pipeline can be done by adding a textual head to the network. This way, we expect performance to improve because predicting the explanation string can aid the classification task. As we show in the experiments, the latter approach (called "conscious empiricist") indeed improves upon the former; yet, it treats the explanations as mere data, nothing more than mute strings to match, in a Chinese room fashion [@chinese-room; @bender-koller-2020-climbing].
|
| 103 |
+
|
| 104 |
+
In the following, we introduce a "rationalist" approach to solve EL problems. This approach recognizes the given explanations as existing knowledge, and focuses on interpreting them. Here theory comes first, while the data become theory-laden observations.
|
| 105 |
+
|
| 106 |
+
Our *Critical Rationalist Networks* (CRNs) tackle the EL scientist problem introduced in Sec. [2](#sec-el){reference-type="ref" reference="sec-el"}: to find $y=\mathbf{1}_{P_0}(x')$ given $x'$, $D_0$, $\{D_1, \dots, D_n\}$, $\{e_1, \dots, e_n\}$. They are formed by two independently trained models:
|
| 107 |
+
|
| 108 |
+
\(i\) A stochastic *Conjecture Generator* $$\mathcal{CG}: \{ (x,\mathbf{1}_{P}(x))_j \}_{j=1}^k \mapsto e \,,$$ taking $k \leq |D_0|$ pairs $(x,\mathbf{1}_{P}(x)) \in D_i$ as input, and returning an explanation string $e \in \Sigma$ as output. $\mathcal{CG}$ is trained to maximize the probability that $\mathcal{CG}$$(\Tilde{D_i}) = e_i$ for all $i=1,\dots,n$, where $\Tilde{D_i} \subset D_i$ is a random sampling of $D_i$, and $|\Tilde{D_i}| = k$.
|
| 109 |
+
|
| 110 |
+
\(ii\) A learned *Interpreter* $$\mathcal{I} : (e, x) \mapsto \hat{y}\,,$$ which takes as input a string $e \in \Sigma$ and a sample $x \in U$, to output a prediction $\hat{y} \in \{0,1\}$. $\mathcal{I}$ is trained to maximize the probability that $\mathcal{I}$$(e_i, x) = \mathbf{1}_{P_i}(x)$, with $i = 1, \dots, n$ and $(x,\mathbf{1}_{P_i}(x)) \in D_i$.
|
| 111 |
+
|
| 112 |
+
At test time, we are given a trained $\mathcal{CG}$ and a trained $\mathcal{I}$, and we must predict whether some $x' \notin D_0$ belongs to $P_0$ or not. The idea is to first generate $t$ conjectures by applying $\mathcal{CG}$ $t$ times to the dataset $D_0$; then, each conjecture is verified by counting how many times the interpreter $\mathcal{I}$ outputs a correct prediction over $D_0$. The conjecture with the highest hit rate is our candidate explanation $\hat{e}_0$ for $P_0$. Finally, we obtain the prediction $\hat{y}'$ as $\mathcal{I}$$(\hat{e}_0, x')$. See Figure [3](#fig-implementation){reference-type="ref" reference="fig-implementation"} (left) for a step-by-step pseudo code.
|
| 113 |
+
|
| 114 |
+
<figure id="fig-implementation">
|
| 115 |
+
<div class="overpic">
|
| 116 |
+
<p><span>./figures/impl.png</span> (52,38)<span><span class="math inline">ℐ</span></span> (47.5,3)<span><span class="math inline">𝒞𝒢</span></span> (91,1.2)<span>EMP-R</span></p>
|
| 117 |
+
</div>
|
| 118 |
+
<figcaption><strong>Left:</strong> Test-time algorithm of CRNs. <strong>Right:</strong> CRNs are implemented using encoder-decoder transformers blocks, details of the parameters in Appendix <a href="#sec:trans" data-reference-type="ref" data-reference="sec:trans">9</a>. <strong>Right-top:</strong> <span class="math inline">ℐ</span> denotes the interpreter model (rule encoder and label decoder). <strong>Right-bottom:</strong> The conjecture generator <span class="math inline">𝒞𝒢</span> is composed by blue blocks. The “radical empiricist” (EMP-R) is composed by orange blocks. The “conscious empiricist” (EMP-C) baseline model consists of all the transformer blocks in the right-bottom figure, board encoder with rule and label decoders (all the blue and orange blocks).</figcaption>
|
| 119 |
+
</figure>
|
| 120 |
+
|
| 121 |
+
The interpreter $\mathcal{I}$ is a crucial component of our approach. A poor $\mathcal{I}$ may fail to identify ${e}_0$ among the generated conjectures, or yield a wrong prediction $y'$ when given the correct ${e}_0$. On the other hand, we can work with a $\mathcal{CG}$ of any quality and safely return as output an *unknown* token, rather than a wrong prediction, whenever ${e}_0$ does not appear among the generated conjectures. The role of $\mathcal{CG}$ is to trade-off performance for computational cost, and is controlled by the parameter $t$. Larger values for $t$ imply more generated conjectures, corresponding to exhaustive search if taken to the limit (as done, e.g., in @clip). This potential asymmetry in quality between $\mathcal{CG}$ and $\mathcal{I}$ is tolerated, since the learning problem solved by $\mathcal{CG}$ is generally harder.
|
| 122 |
+
|
| 123 |
+
Secondly, although a CRN is implemented using neural networks, as we shall see shortly, its working hypothesis does not coincide with a snapshot of the countless network's parameters; rather, the working hypothesis is but the small conjecture analyzed at a given moment. This way, the CRN hypothesis is detached from the model and can only be accepted or refused in its entirety, rather than being slightly adjusted at each new data sample (Figure [2](#fig-teaser){reference-type="ref" reference="fig-teaser"} C, the hypotheses are in orange).
|
2204.08453/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-03-07T03:47:42.911Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36" etag="jDDw9rbD_VxysxURKCZa" version="16.6.6" type="google"><diagram name="SR-2" id="eBosDk3r1usDjvO6YGeX">7Z1vd9o4Foc/DefsvmCPJf9/2SZtOpud6expp52+6iHgEKYEU+JMkn76NcEigCW4G4p+CrpveoJrHMXSY11Jj6474cn1/dmsN736tRwU444MBved8LQjZVfI+t/5gYfFgSgJFgeGs9FgcUg8Hfgw+lE0B9Vpt6NBcbN2YlWW42o0XT/YLyeTol+tHevNZuXd+mmX5Xj9t057w+Y3Bk8HPvR746J12ufRoLpaHM1k+nT8XTEaXqnfLJJ88T/XPXVyc4mbq96gvFv5XeGbTngyK8tq8dP1/Ukxnt87dV9uq9/LLM7652dnyfn4w0UYjb92F6V8+/98ZfknzIpJ9XMv3VTu373xbXO/mr+1elA3cDgrb6fNacWsKu511da7GG/Wwo7CiuUdrFteUV4X1eyh/l5z9SxurtO0urD5ePdUhUnQHLtaqb44aw72mmYzXF766dbUPzR3R3+n/vg+ff/pfTLNph/FsP+1Wz7cyG6++07VV6lbdf3h9d3VqCo+THv9+f/c1VzVx66q6/oXnor6x97NdNHUL0f3Rf1LX99Us/Lbsn3OT6nva9UbTYpZ/TlYnnFSjsvZ4y8Lk35WXFzW/3M5Go9XjqdvXiVv5t8wVtnOqpFbq0ZE8VrVZFGraoSmZsShKibSVMSgfgQ0H8tZdVUOy0lv/Obp6Ou6TU8G81v/eHOfzvlPWU6bGvirqKqH5nnWu63K9Sos7kfVn/Ov/ytuPn1pLjb/+fR+9cOD+jCp/96VL80/flHXm394+trjp4dl3fdmVVOS5PF/B6/mT8b646ScFIsjb0fz23b6VPPzm/CMeq9vZHk76xdb7riqyrpYw6LadmKkb0mzYtyrRn+vF++nNwxVTEZWBG4hm4h2RdhDVqwCu8R3F7KrwK7w+1KQVaHXTmQDfUuyhKxgZJvoxi1i1SiAK0a6VTFJ6E70I4iP0vXoJ3hpj9KQ+CjNoU/SkIHtaMcrEXq84hCxnoxXImrwk0GRjRhZxZ5byMYZFNnnjFdE52WPV2IqsgkU2ZiR7ejGK3Bic3c6WU/C4oRKrKEhWSI2YWI7uoEsmtjEoWn87cQ6R15KJS+EkpcyeR39iBRLniKfZ+OtEZtRiU2hxGZMrGLPKWJ19eBmX3k00W1OJTaGEsuWimE8CgaWF7wtAyvJjgp0mUayo2IYjmKBjaExsY+LNJI6ipXQRRrJo1h1lcwtEyKOXwqyzqFHHY5KbF/Jw1EDeui5W8nDDlPNYCsmSV/KM/FYwpiQOu6Q0Km9kMcdBmIFmtnYock9T5ilyvEhVI5XxWRmW8wmaGZ5fs82s5LKrGGjqSVmJTPbXCVyjdmAmbXMLHXnRGhoSpaY5a0ThmVvOLMhK/L69U18zUBnX718mlKF2xC6Cz9k4Va/wmkT2cEvVfdEfn84f1Wc9f/95fzhx827rjh+K2EXo5tM/0RmtbdcM2jRnmdrU8u2QjoD7EUWR3HQBvYy6xf9/iGBzbM1YrthC9ikzWtyqPGKY9XizHMUPWErHVp49sTGpE7+QGXMBDrB4GO7oG4aDaGbRnkMaxjDoh/kIXabt48rb9TMDNghrEObif1oF+SEHdjFHX6QqyeyY0/yJHGHWD9CL/LSDnQPsmtTG+6sxsKJdWjDhR/EUnc0GhqSpTE0dm5lNfDaMUXtWvVSp0hsPY/1SxACKij7uAShcR3NqwCwduFYP41bgkjTtX7a2hKEAdej76Vdw1WznUBfNbamureVknmVsVO86uqBeT0krxopR1810CV+15wcGK+hAC3x6xtFevROsmu8arIOaM+zlXRgWyEZ1zB0CVfX9qPjqiV2qVpE6lCGfT+iHt0bwfQn2sogsbWYTGyIUhv1c00hxz22iaXOA1pL1721mExs5NZIRXK9NPXiVEi6XMfl2Mfak1SjPhh6OeiT1DVbyZltHeqVEKg1FYdcpcPYhc4Rq9ELzcuQMGBd88SdWQRFA3v0W52dA1azsWMLMzBiXdui58wyKJhYiZ0QfA6x4oUTS14HxQbFvBBqWAgFE5sc/ZYb54ilroTaylu6tZQM7OZSKBrYo39FrnPAarK2b2EGRiyvkhtWycHEptiZYh+J1Wy50c8UGxqSJWJdy+bvzCo5llgpOSi2na+L7LVAp4qde4uYM6vkaGI5KrZNLHl/m61c/luLycRu+hPoqPjok7k5R6wkEmstK/jWYjKxYeoUsdj0iz4ux0qy8YTNY8vGk8F4CrHEYm1vL6NiqvEkodvIJStPS+XJLWKhmeK9jIqpylOIJZaVJ4PyBCZWXZmJtUYsVXmyln16azGZ2E3lCUwsNmepl+NYqvIkodkfJDtPBucJTezRv13dOWLJzhOWWHaeDM4TmNjUIWI9iYqpzlMIzQyiisnEbjpPWGIldhzrY1QcUp0na29W31pMJnbTeUIT61Af60dUrGbnd49jscSy82RwntBRMfextomVVGKhUbEqJhO76TyhV3fYK7ZNLNV5CqEpKEJ2ngzOUwwmlvfu2CaW7DxhiWXnyeA8gYkNNfXAc8UHJZbqPEVQ5ylk58ngPKH7WOzMk49RMdl5gqZ5UsVkYjedJzCx4Nd7+hgVk50naNaYkJ0ng/OEJpazxtgmluo8hVhi2XkyOE9gYlOeK7ZNLNV5igwNyRKx7DwZnCcssRK7o93HqDgiO0/QrDERO08G5wlNLEfFtomlOk8Rllh2ngzOEzoqdmg91o/VnUgSiY2hUbEqJhO76TyBiY2h41gfLcWI6jxF0B3tETtPBucpBRsU/C5K28RSnacE6jxF7DwZnCc0sdAXZXnZx5KdJyyx7DwZnCcwsQk0M5uXxFKdpwi6HquKycRuOk9gYnOOim0TS3WeEiyx7DwZnCd0H4t9fayP67Fk5wma5yli58ngPIGJzThfsW1iqc5TAt3RrorJxG46T1hipa4eeBx7SGJjqvMUQ7PGxOw8GZwnNLEcFdsmluw8YYll58ngPKGjYuh7d7zsYyWVWGhUrIrJxG46T2BiY86laJnYhOw8QXe0J+w8GZynHGxQYPfuPKePfeE7ARKy8wTNGpOw82RwntDE8puybBNLdZ5SqPOUsPNkcJ7AxMZYYn3sY6nOU4KNitl5MjhPYGJzfke7bWLJzhPUoEjYeTI4T2hiHYqKPZl5ojpPEZZYdp4MzhOY2IzzPNkmluo8CWFoSZaQZenJID1hkV3+VdzJ2kI2JUtP0LQxKUtPBukJTSxUofCxk02p0lOKJZalJ4P0hJ4shm6383HqKZVUYqELsqqYTOymQiFEDEVWJaZ1ISz2Y0U2pVpPGXR9J2XryeBQwJHl5Gy2kaVqTykWWdaeDBIFGtmYs4zbRpbqPSXQfIope08GiwKMrAyws08+BsZU8SnDIsvik0GjQPeyOXYs+5zA+KVPGFPNpxSLLJtPBo8CjqymIriXPSiyVPUpg6pPKatPBo8CHRgLnn6yjSzVfcqwy7KsPhlECjiyDi3y+DH9lFHdpxSaPiZj98lgUsADY841bhtZsvwEDYxVMRnZzfwxaGRjTtJmG1lJRdbQkiwhKxnZxVda9lOIRTZixdg2smT7CZqPImP7yWQ/oZF1aB+PHzPGGdV+yrHIsv1ksp/AyMaaiuBe9qDIUu2nDBsYs/1ksp+wyMqAe1nbyFLtpxyqUmRsP5nsJ3Avm2NzUvg4/US2n6DvusvYfjLZT2hksTPGPgbGVPspx67Lsv1ksp/AgbHgtwTYRpZqP6XQ93qoYjKyLfsJjSynMLaMbE61nzIosjnbTyb7CR0YY3tZH5Gl2k8ZdCyrisnItuwn9LqsQ0mM/ZgxVnundqdEDQxNyRKzkpldfKWlP8VgZvlVAbaZpepPORZZ1p9M+hMaWU1FcDd7UGSp+pMIoP5Tzv6TyX8CMxs7JFP4MWeck/0naFrUnP0nk/+ERVYGDu3l8aSbpfpPIoAKUDkLUCYBCs0sdAbKy9EsVYASAVSnyNmAMhlQYGYFb3O3zSzZgMKuzbIBZTKg0MjyDJRtZKkGlAiwzLICZVKg0Mxy/nHLzIqA6kDl0KRty3IytC0JCj2edUg19qOjFWrbI+Ets9AB7bKgTG3Lg0Iv0EKp9XFE2+hNpPgYutyzLChT2zKhUjC10DdX+hkgk1Uo6Eu1luVkaFsuFBpa6CKtn10tXYYCU8s2lMmGQlPLeSqsU0vVoXJwfMw+lMmHwkIrA36zu3Vo6UIUdEvesqBMbcuIQlPLyz7WqaUqUTkYWlaiTEoUGFrh0ItnfYGW7ERBX/2xLCdD25Ki0NDyVJR1aMlWlAAbFqxFmbQoNLW86mObWkHVohoxCUatYC/K5EWhR7U597W2qSV7UQE2QhbsRZm8KPCyT+7QsNYTm1Ht1Ni9Cx4rM6pyMrSb0KrNxgdn9o/v0/ef3ifTbPpRDPtfu+XDjeymRz+o3URyBeGLcdn/9vFqNFnjWOzBrdrquMKt/rbvm0am+erv5aguz7Jt/db7ba1tLT+rSyweE823NprOshh7tKajX/dHtKbVZ7v2tu+dr1PfmoRU+SZVfKEyU6lrLNr5wZpTdvSv7nSzOe2rdT3z4XTo1uTQTND26NTJLmvvNEqmh0y64yFz4D5Lve2KIyC7zekwfRY6AsocioAOMwR2szXtuyne8HASOfbhhJ1R8fbhtPebodx8OAnsKz+9bU77J2d3tT3x7BGmPR3n9JE4/vR+jran4wzGlzKd+xNITk4E7Z8BAz4TpF+NSjWLT8f1nLG1UKl5xOit2naTM8gFe7a4/bKbLa7rwyKlgezlrJxcX6WUzV4CkFmQHv3AxTVgybq8JV4vzs5/SX+cX4pPf82Ks8931XB209X4e/HJP+SkPlb/UI2ui5v6x/pzfPLPVgOqkak2kB2PhnUve9qvb+EcytdzsEb93vhV8x/Xo8Fg0bSKm9GP3sXjpea1M513VY9/Xfy6E5/Or1W3pptFdYkW2k0Nr1KtDtWPhKaWRXZIwEUYtIgWUdBmWp23T+V1T85Pxu9Gb//88i7Ng8//vci+FYbK26g7rrpW1eUqnV/zaI7a9Sh/Tj3WH2dlWa0GX3U3d/VrOSjmZ/wP</diagram></mxfile>
|
2204.08453/main_diagram/main_diagram.pdf
ADDED
|
Binary file (27.6 kB). View file
|
|
|
2204.08453/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In any form of digital communication, information is transmitted via a sequence of discrete symbols. This includes images and videos, even though they are inherently signals with two spatial dimensions (2D). The modus operandi for transmitting such signals is to (1) efficiently encode and quantize their values in the spatial or spectral domain, (2) linearize the signal to a one-dimensional (1D) sequence by using a standard scanning order such as raster, zig-zag, or Hilbert Curve order [@hilbert1935stetige], and finally (3) apply a Shannon [@shannon1948mathematical] style entropy coding technique such as Arithmetic coding [@witten1987arithmetic] or Huffman coding [@huffman1952method] to further compress the 1D sequence. Given the ubiquity of images and videos in our lives, a large amount of effort has gone into optimizing each of these steps of digital communication. The focus of this work is the second step of linearizing the 2D spatial signal to a 1D sequence. A continuous scan order that traverses all spatial locations in two or higher dimensional signals exactly once is also known as the space-filling curve (SFC) [@peano1890courbe].
|
| 4 |
+
|
| 5 |
+
<figure id="fig:teaser" data-latex-placement="!h">
|
| 6 |
+
<embed src="figures/teaser-latest.pdf" style="width:90.0%" />
|
| 7 |
+
<figcaption>Given a set of images, a gif, or a video, Neural Space-filling Curves (SFC) can provide a more spatially coherent scan order for images as compared to universal scan orders such as S-curve, or Peano-Hilbert curves. As shown in the example of a trouser and a face, the scan line tends to cover the background before moving to the foreground. (SFCs generated here using half-resolution images and resized for clarity. Best viewed in color.)</figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
Prior works have proposed various space-filling curves (SFCs) in the last hundred years, most of them context-agnostic, *i.e.*, they are completely defined by the size and dimension of the space without taking into account spatial information of the space, *e.g.*, pixels in the case of two-dimensional images. These universal context-agnostic SFCs are typically defined recursively to ensure simplicity and scale. Some of the SFCs also have spatial coherence properties and have been used in various image-based applications [@moon2001analysis; @kamata1993implementation; @ansari1992image; @thyagarajan1991fractal].
|
| 11 |
+
|
| 12 |
+
However, in many applications such as video conferencing, health-care, or social media, the images being transmitted, are often repetitive with a similar layout and content with minor variations transmitted over and over again. GIFs are another great example that consists of highly repetitive content and need to be stored efficiently and often, losslessly. Since universal SFCs do not utilize the intrinsic information of image content, they are far from optimal for a single image or a set of images with repetitive structure (refer to [1](#fig:teaser){reference-type="ref+label" reference="fig:teaser"} for an example). Dafner *et al.* [@dafner2000context] proposed SFCs that exploit the inherent correlation between pixel values in an image. Our work improves upon Dafner *et al.* , in two aspects.
|
| 13 |
+
|
| 14 |
+
- Instead of discovering a single SFC for every image independently, we propose a data-driven technique to find optimal SFCs for a set of images. We postulate that context-based SFCs are more suitable for linearizing a group of images (or a short video/gif), since the cost of storing the SFC itself can be amortized by the number of images.
|
| 15 |
+
|
| 16 |
+
- We devise a novel alternating minimization technique to train an SFC weights generator, which allows us to optimize for any given objective function, even when not differentiable.
|
| 17 |
+
|
| 18 |
+
To the best of our knowledge, ours is the first work to propose a machine learning method for computing context-based SFCs and opens new directions for future research on optimal scanning of 2D and 3D grid-based data structures such as images, videos, and voxels. We demonstrate both quantitatively and qualitatively the benefit of our approach in various applications.
|
| 19 |
+
|
| 20 |
+
# Method
|
| 21 |
+
|
| 22 |
+
We first describe the algorithm for computing SFC for one image as proposed by Dafner *et al.* [@dafner2000context] in [3.1](#sec:cbsfc){reference-type="ref+Label" reference="sec:cbsfc"}. We then extend this treatment to a more general setting where we can optimize the SFC for any non-differential objective function for multiple images in [3.2](#sec:nsfc){reference-type="ref+Label" reference="sec:nsfc"}. The rest of the [3](#sec:approach){reference-type="ref+Label" reference="sec:approach"} describes major components of our framework and training procedure in detail.
|
2205.11029/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2205.11029/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
<figure id="fig:system_example" data-latex-placement="t">
|
| 4 |
+
<embed src="case.pdf" style="width:47.0%" />
|
| 5 |
+
<figcaption>An example of the GUI-based task-oriented dialogue system(GUI-TOD). The Action Executor will execute tasks on GUI and the system will generate a response based on the execution result.</figcaption>
|
| 6 |
+
</figure>
|
| 7 |
+
|
| 8 |
+
Recent years have witnessed the rapid development of task-oriented dialogue systems [@zhang2020recent; @ni2021recent; @chen2022dialogzoo; @chen2017survey]. They have been widely applied to customer support, booking system and especially intelligent personal assistant. These task-oriented dialogue systems work in a similar pipeline: firstly identify the user intent, then extract necessary information by the process of slot-filling. After getting enough information for the task, the agent will call the backend APIs (provided by APP developers) to fetch information, and then generate a response based on the query result.
|
| 9 |
+
|
| 10 |
+
There are some drawbacks of this framework. Firstly, TODs rely on publicly accessible APIs or APIs designed for TODs to perform tasks, but such APIs may not exist in real-life APPs, which hinders the application of TODs. Secondly, a system should be customized to recognize the pre-defined API-related slots, which limits the generality.
|
| 11 |
+
|
| 12 |
+
::: table*
|
| 13 |
+
Action Description
|
| 14 |
+
---------------------------- --------------------------------------------------------------------- --
|
| 15 |
+
`Click`(`item` = $x$) Click the item with index $x$ on the screen.
|
| 16 |
+
`Swipe`(`direction` = $x$) Swipe screen towards direction $x$, which includes "up" and "down".
|
| 17 |
+
`Input`(`text` = $x$) Input the text $x$ to the smartphone.
|
| 18 |
+
`Enter`( ) Press the "Enter" button on the keyboard.
|
| 19 |
+
`Clear`( ) Clear the current input box.
|
| 20 |
+
`Back`( ) Press the "back" button on the smartphone.
|
| 21 |
+
`End`( ) Turn has been finished and it will go to Response Generator module.
|
| 22 |
+
:::
|
| 23 |
+
|
| 24 |
+
Consider how humans perform tasks on smartphones They don't need a parametric API but finish tasks by interacting with the GUI (graphical user interface), indicating that GUI is a more general interface. Previous studies explore how to translate natural language commands into GUI operations [@mazumder2021flin; @pasupat2018mapping; @xu2021grounding]. These studies focus on single query and step-by-step operations, while in real scenarios the query would be multi-turn interaction and there is no clear instruction about how to execute the task. Etan [@riva2021etna] and SUGILITE [@li2017sugilite] are two systems that support learning GUI operations from demonstrations, but these systems are script-based and are sensitive to the change in GUI and workflow. Duplex on the web [@duplex] can directly operate the website to perform the required task, for example booking a movie ticket. However, it only supports limited websites, and it's more a unified GUI interface than a task-oriented dialogue system that enables general GUI operation.
|
| 25 |
+
|
| 26 |
+
To this end, we propose the task of GUI-based task-oriented dialogue system (GUI-TOD). It supports multi-turn conversation and direct GUI operation. All tasks would be performed on the GUI of real APPs, which means we no longer need TOD-specific APIs to communicate with APPs, and it would be possible to apply TOD on any APPs. Since there is no available benchmark published, We collect META-GUI, a dataset with dialogues and GUI traces on real Android APPs. A GUI trace is a series of GUI operations, including screenshots, Android view hierarchies as well as actions. Android view hierarchy is an XML-style file, which organizes the content of GUI through a hierarchical structure. It also contains the types of items on the screen and their bounding boxes. An example is shown in Appendix [10](#view hierarchy){reference-type="ref" reference="view hierarchy"}. When a user requests a task, the system should open the related APP and execute the task through multiple operations on GUI. It requires a comprehensive understanding of GUI structure and interaction logic. An interaction example is shown in Figure [1](#fig:system_example){reference-type="ref" reference="fig:system_example"}.
|
| 27 |
+
|
| 28 |
+
We focus on building an agent with general ability to operate GUI, rather than optimize for specific APPs. Our proposed GUI-TOD system leverages both the visual information and textual information on the screen to predict the next action to be executed and generate the system response. Our experiments show that the GUI-TOD outperforms heuristic baselines by a large margin, with an action completion rate of 82.74%.
|
| 29 |
+
|
| 30 |
+
Our contributions are followings:
|
| 31 |
+
|
| 32 |
+
- We propose a GUI-based task-oriented dialogue system, which can perform tasks on mobile APPs through multiple operations on GUI.
|
| 33 |
+
|
| 34 |
+
- We collect META-GUI, a dataset with dialogues and GUI operation traces serving as the benchmark for the proposed system.
|
| 35 |
+
|
| 36 |
+
- We conduct thorough experiments on our dataset and validate the importance of multi-modal information and history information. We show that it is a promising task but needs further exploration.
|
| 37 |
+
|
| 38 |
+
<figure id="fig:gui-tod" data-latex-placement="h">
|
| 39 |
+
<embed src="systempipeline_en.drawio.pdf" style="width:47.0%" />
|
| 40 |
+
<figcaption>The overview of GUI-based task-oriented dialogue system (GUI-TOD).</figcaption>
|
| 41 |
+
</figure>
|
| 42 |
+
|
| 43 |
+
The overview of GUI-TOD is shown in Figure [2](#fig:gui-tod){reference-type="ref" reference="fig:gui-tod"}. It consists of two sub-modules: Action Executor (AE) and Response Generator (RG). The traditional task-oriented dialogue system [@chen2017survey; @zhang2020recent; @yu2014cognitive] splits the task into natural language understanding (NLU) [@zhu2021few], dialogue manager (DM) [@chen2020schema; @zhu-etal-2020-efficient; @chen2018policy; @chen2019agentgraph; @chen2020distributed], and natural language generation (NLG) [@keskar2019ctrl]. We omit the NLU module and directly send user utterances to AE. The AE module has similar features with DM, it executes the requested task by interacting with the GUI for multiple rounds, while DM accomplishes this by calling TOD-specific APIs. The RG module will generate the system response based on the execution results, which is the same as NLG. The process of executing a task is a series of GUI operations, including click, swipe, etc. The task of AE module is action prediction, which aims at predicting the next action to be performed on GUI, and the RG module focuses on generating system's response after executing a task. A major improvement of GUI-TOD is that it does not rely on a pre-defined domain ontology. Conventionally, the DM module will identify a set of slot-value from the user utterance, which serves as the parameter for backend APIs. However, GUI-TOD handles task-specific slot-values during the execution of tasks. When the APP requires a certain input (for example, entering the time and destination), the system can obtain the information by understanding the current user utterance or generating a response for further asking. Compared with CUED actions [@young2007cued] in traditional TOD, actions in GUI-TOD are GUI-related operations rather than communication actions between user and system.
|
| 44 |
+
|
| 45 |
+
Formally, the action prediction task can be defined as: given the GUI trace and dialogue history, predict the next action to be performed. We define the set of actions that can be performed on the APPs in Table [\[action_table\]](#action_table){reference-type="ref" reference="action_table"}. All the actions would take the form of $Action(parameter=*)$. There are seven types of $Action$, including six physical actions: *click, swipe, input, enter, clear, back*, and one virtual action: *end*. The corresponding parameters are listed in Table [\[action_table\]](#action_table){reference-type="ref" reference="action_table"}. The $end$ action is the last action for every GUI trace, which means the end of GUI operations. After an $end$ action is generated, the GUI-TOD would move to the RG module. We denote the *j*th action in turn *i* as $\mathcal{A}_{i,j}=\left( t, p \right)$, where *t* is the action type and *p* is the corresponding parameter. $\mathcal{S}_{i,j}=\left( s, v \right)$ is the *j*th screen in turn *i*, including the screenshot *s* and the view hierarchy *v*. The dialogue in turn *i* is represented as $\mathcal{D}_{i}=\left( U_i, R_i \right)$ where $U_i$ is the $i$th user utterance and $R_i$ is the $i$th system response. The action prediction task is formulated as: $$\begin{equation}
|
| 46 |
+
\mathcal{A}_{i,j}=\mathcal{F}\left( \mathcal{S}_{1:i, 1:j}, \mathcal{A}_{1:i,1:j-1}, \mathcal{D}_{1:i-1}, U_i \right),
|
| 47 |
+
\end{equation}$$ where $1:i$ means from turn $1$ to $i$, $\mathcal{F}$ is a trainable action model, which we discuss in [4.1](#actionmodel){reference-type="ref" reference="actionmodel"}. The RG module takes the GUI trace and dialogue history as input, then generates a response based on the execution result and context. Denote the set of actions in turn $i$ as $\mathcal{A}_i$, the screens in turn $i$ as $\mathcal{S}_i$, the response generation task is formulated as: $$\begin{equation}
|
| 48 |
+
\mathcal{R}_{i}=\mathcal{G}\left( \mathcal{S}_{1:i}, \mathcal{A}_{1:i}, \mathcal{D}_{1:i-1}, U_i \right),
|
| 49 |
+
\end{equation}$$ where $\mathcal{G}$ is the response generator model, which we discuss in [4.2](#responsemodel){reference-type="ref" reference="responsemodel"}.
|
| 50 |
+
|
| 51 |
+
Our dataset consists of two kinds of data: dialogues and GUI operation traces. In each dialogue, user would ask the agent to complete a certain task through multi-turn interaction. Our tasks involve six different domains: weather, calendar, search, taxi, hotel and restaurant. In this paper, we consider APPs that accomplish the same kind of tasks to be in the same domain. To enhance the diversity of our dataset, we use multiple Apps from the calendar and weather domains. The details of APPs are listed in Appendix [8](#app_data){reference-type="ref" reference="app_data"}.
|
| 52 |
+
|
| 53 |
+
<figure id="fig:item_type" data-latex-placement="h">
|
| 54 |
+
<embed src="item_type.pdf" style="width:100.0%" />
|
| 55 |
+
<figcaption>The distribution of the total number of items versus the clicked one for each item type.</figcaption>
|
| 56 |
+
</figure>
|
| 57 |
+
|
| 58 |
+
We collected our data in two-stage: first we collected GUI traces for existing dialogues, then we collected both dialogues and GUI traces.
|
| 59 |
+
|
| 60 |
+
In the first stage, we provided dialogues to annotators and instructed them to perform tasks on real APPs. We started from extracting dialogues from the SMCalFlow dataset [@andreas2020task]. SMCalFlow contains multi-turn task-oriented dialogues, which is known for complex reference phenomenon that requires a comprehensive understanding of context. We extract dialogues from calendar, weather and search domains. Six annotators were recruited to label the GUI traces. We built a web-based annotation system, which was connected to a real Android smartphone (see Appendix [9](#annotation system){reference-type="ref" reference="annotation system"}). Annotators can see the current screen of the smartphone in the system, and control the smartphone by clicking buttons. A dialogue would be shown in the system. Annotators should first read the dialogue, then they were allowed to explore how to finish the task (e.g. check the weather) on smartphone. If the task requirement in the dialogue conflicted with the real-world scenario (for example, creating an event in the past), the annotators could change the content of the dialogue to make the task achievable. After they were ready, they need to use the annotation system to record the actual process of executing the task. Each operation would be recorded, and the screenshot after each operation was also saved together with the view hierarchy.
|
| 61 |
+
|
| 62 |
+
In the second stage, we collected dialogues and GUI traces for domains of hotel, restaurant and taxi. Because there are no available dialogues of these domains in previous datasets, we asked annotators to write new dialogues. We selected three experienced annotators from the last stage. Different from the last stage, the annotator was shown a task objective, which was generated randomly from all available conditions in APPs. The annotators should act as user and system alternatively to write dialogues according to the task objectives. To avoid annotators writing short and simple dialogues, we added constraints about the number of turns and the behaviors in dialogue, e.g. adding a condition or changing a condition. An example of the generated target is shown in Appendix [12](#exampleoftarget){reference-type="ref" reference="exampleoftarget"}. After writing dialogues, the annotators should also record the corresponding GUI operation traces for each turn, which is the same as the last stage.
|
| 63 |
+
|
| 64 |
+
After annotation, we manually reviewed the data. The checklist includes: whether the recorded GUI traces match the dialogues, whether there are invalid operations due to the system error or misoperation, and whether there are redundant operations in the GUI trace. We manually fixed annotations that only have small mistakes, and discarded the task requiring significant modification. The dialogue level pass rate is about $63.6\%$, and finally we got 1125 dialogues in total. For more information, please refer to Appendix [11](#datareview){reference-type="ref" reference="datareview"}.
|
| 65 |
+
|
| 66 |
+
The dialogues collected in the second state were created by three annotators, which lack diversity in expression. Therefore, we published a dialog rewritten task on AMT[^1] (Amazon Mechanical Turk) to polish the dialogues.
|
| 67 |
+
|
| 68 |
+
<figure id="fig:action_distribution" data-latex-placement="h">
|
| 69 |
+
<embed src="action_distribution.pdf" style="width:50.0%" />
|
| 70 |
+
<figcaption>The distribution of actions.</figcaption>
|
| 71 |
+
</figure>
|
| 72 |
+
|
| 73 |
+
During GUI trace annotation, some APPs can not obtain valid Android hierarchy. To handle this problem, we used the online Optical Character Recognition (OCR) service, provided by Baidu Cloud [^2], to detect all texts on the image with their corresponding positions and generate a pseudo layout file.
|
| 74 |
+
|
| 75 |
+
<figure id="fig:armodel" data-latex-placement="h">
|
| 76 |
+
<embed src="multimodalmodel.pdf" style="width:100.0%" />
|
| 77 |
+
<figcaption>The illustration of our proposed model. There are five parts in this figure: (1) encoder; (2) image feature extraction; (3) multi-modal information fusion; (4) the Action Module; (5) the Response Module.</figcaption>
|
| 78 |
+
</figure>
|
| 79 |
+
|
| 80 |
+
We extract items from screen using the corresponding layout file. An item is a clickable leaf node. Similar to [@zhou2021large], we consider an item to be clickable if its `clickable` attribute is true or its parent node is clickable. An item consists of text content, item type and bounding box. We extract the text content of an item by looking at its `text` property first. If it is empty, we use its `content-desc` attribute, otherwise we would use the `resource-id` property. Based on the extracted items, we can locate the target item for the `click` action by comparing the click position and the bounding boxes of items.
|
| 81 |
+
|
| 82 |
+
The total number of dialogues in our dataset is 1125, including 4684 turns. The average number of images for each turn is 5.30, and the average number of words for each utterance is 8. On average, there are 23.80 items for each image, and the item text length is 2.48 words. The distribution of item types is shown in Figure [3](#fig:item_type){reference-type="ref" reference="fig:item_type"}. We also provide an example for each item type in Appendix [13](#exampleofitemtypes){reference-type="ref" reference="exampleofitemtypes"}. It is clear that `TextView` and `ImageView` are the two most frequent type, which indicates that our dataset is informative.
|
| 83 |
+
|
| 84 |
+
The distribution of actions is listed in Figure [4](#fig:action_distribution){reference-type="ref" reference="fig:action_distribution"}. The `click` is the most frequent action, while `clear` is the least action for the reason that only a small number of tasks require clearing the current input box. For `click` action, we further compute the type distribution of target items, which is shown in Figure [3](#fig:item_type){reference-type="ref" reference="fig:item_type"}. `TextView` and `Button` type are mostly clicked, while there are 8 item types never been operated. This implies that the item types may supply some hints for predicting the target items. Besides, the average numbers of words for `response` and `input` action are 9 and 3 respectively.
|
| 85 |
+
|
| 86 |
+
The overview of our system is illustrated in Figure [5](#fig:armodel){reference-type="ref" reference="fig:armodel"}. It's composed of four components: encoder, image feature extractor, multi-modal information fusion module and the output module. The output module can be the Action Module or the Response Module.
|
| 87 |
+
|
| 88 |
+
We call the combination of encoder, image feature extractor, multi-modal information fusion module and the Action Module as Action Model, which is used to predict the next GUI action based on the history. Next, we will describe these modules respectively. For simplify, for the screen history we only consider the last screen here. We will discuss adding more screen histories later.
|
| 89 |
+
|
| 90 |
+
The input of encoder consists of two parts: dialog history $\{\mathcal{D}_{1:i-1}, U_i\}=\{w_{1},...,w_n\}$ and texts in the items $\{m_{1,1:l_1}, \dots, m_{k,1:l_k}\}$. Items are extracted from the last screen, $k$ is the number of items and $l_i$ is the length of the $i$th item's text: $$\begin{align}
|
| 91 |
+
\begin{split}
|
| 92 |
+
X &= \{ w_{1:n};m_{1,1:l_1}, \dots, m_{k,1:l_k}\}, \\
|
| 93 |
+
\textbf{H} &= \text{TransformerEncoder}(X), \\
|
| 94 |
+
\end{split}
|
| 95 |
+
\end{align}$$ where $\textbf{H} = \left[\textbf{D};\textbf{M}\right]$ and $\textbf{D}=\{ \textbf{w}_1, \textbf{w}_2, \dots, \textbf{w}_n\}$ represents encoder outputs of the dialogue history, $\textbf{M}=\{ \textbf{m}_{1,1:l_1}; \dots; \textbf{m}_{k,1:l_k} \}$ represents encoder outputs of item texts.
|
| 96 |
+
|
| 97 |
+
Given a screenshot and its corresponding layout file, we use Faster R-CNN [@ren2015faster] to extract the feature map. Then we apply ROI pooling based on the bounding box of each item, and get the item-level image features $\mathbf{I}=\{\mathbf{I}_1, ..., \mathbf{I}_k\}$.
|
| 98 |
+
|
| 99 |
+
Given the encoder output and the regional image feature extracted above, we concatenate them together. The text features from one item $\textbf{m}_{i,1:l_k}$ are concatenated with the same item feature $\mathbf{I}_i$, and the $\mathbf{w}_{1:n}$ are concatenated with zeros. Then we use a Transformer encoder with $M$ layers to fuse the multi-modal features. For each layer, to enhance the image information, we will concatenate the image features and the output from the last layer again to form the input for the next layer.
|
| 100 |
+
|
| 101 |
+
For the Action model, we need to predict the action type and its corresponding parameters. As shown in Table [\[action_table\]](#action_table){reference-type="ref" reference="action_table"}, there are 7 action types with 3 different parameters. We show some examples of parameter predictions in Appendix [14](#parameterpredictions){reference-type="ref" reference="parameterpredictions"}.
|
| 102 |
+
|
| 103 |
+
We use the encoder output of the `[CLS]` token for action type prediction. We apply a feed-forward network followed by a Softmax layer to predict the action type: $$\begin{align}
|
| 104 |
+
\begin{split}
|
| 105 |
+
\mathbf{p}_a &= \text{Softmax}(\text{FFN}_1(\mathbf{E_{[CLS]}})),
|
| 106 |
+
\end{split}
|
| 107 |
+
\end{align}$$ where $\mathbf{p}_{a}$ is the probability distribution of action, and FFN represents the Feed-Forward Network.
|
| 108 |
+
|
| 109 |
+
For the action parameter, we use three different classifiers:
|
| 110 |
+
|
| 111 |
+
1\) *Input Text Prediction* We assume that the input to the APPs must be part of the user utterance, so we formulate the prediction of input text as a span prediction task. We use two classifiers to predict the begin and end positions in the dialogue: $$\begin{equation}
|
| 112 |
+
\begin{split}
|
| 113 |
+
\mathbf{p}_{ds}=\text{FFN}_2(\mathbf{D}),
|
| 114 |
+
\mathbf{p}_{de}=\text{FFN}_3(\mathbf{D}),\\
|
| 115 |
+
\end{split}
|
| 116 |
+
\end{equation}$$ where the $\mathbf{p}_{ds}$ and $\mathbf{p}_{ds}$ are the probability of start and end position respectively.
|
| 117 |
+
|
| 118 |
+
2\) *Target Item Prediction* The target item classifier is based on the encoding outputs of items. We first computed the item representation by applying average pooling on the encoding outputs, then we use a feed-forward layer to compute the probability of selecting an item followed by a Softmax layer: $$\begin{equation}
|
| 119 |
+
\begin{split}
|
| 120 |
+
\bar{\textbf{m}}_i =& \ \text{Avgpooling}(\textbf{m}_{i,1:l_i}) \ \ 1 \leq i \leq k, \\
|
| 121 |
+
\bar{\textbf{m}} =& \left[\bar{\textbf{m}}_1, \dots, \bar{\textbf{m}}_k\right]. \\
|
| 122 |
+
\mathbf{p}_{m} =& \ \text{Softmax}(\text{FFN}_4(\bar{\textbf{m}})),
|
| 123 |
+
\end{split}
|
| 124 |
+
\end{equation}$$ where $\mathbf{p}_{m}$ is the probability distribution of items.
|
| 125 |
+
|
| 126 |
+
3\) *Direction Prediction* The direction classifier is a two-classes classification layer for the direction *up* and *down*: $$\begin{equation}
|
| 127 |
+
\mathbf{p}_d = \text{Softmax}(\text{FFN}_5(\mathbf{E_{[CLS]}})),
|
| 128 |
+
\end{equation}$$ where $\mathbf{p}_{d}$ is the probability distribution of swipe direction.
|
| 129 |
+
|
| 130 |
+
According to the task definition, besides dialogue histories, we can still use action histories and screen histories. To verify this, we add them to the action model. For action histories, we regard action types as special tokens and add them to the dictionary. We concatenate the most recent $H$ action types $\{t_{1:H}\}$ before the dialogue history as input: $$\begin{equation}
|
| 131 |
+
X = \{t_{1:H};w_{1:n};m_{1,1:l_1},\dots, m_{k,1:l_k}\},
|
| 132 |
+
\end{equation}$$ where $X$ stands for the input of Encoder, $t$ represents the action type.
|
| 133 |
+
|
| 134 |
+
For screenshot histories, we encode all the screenshot in a recurrent way. Assume $\hat{\mathbf{I}}_i=\left[\mathbf{I}_{i,1},...,\mathbf{I}_{i,k}\right]$ is the image feature for $i$th screenshot, and $\bar{\mathbf{I}}_i$ is the history image feature for time step $i$. We compute $\bar{\mathbf{I}}_{i+1}$ by: $$\begin{equation}
|
| 135 |
+
\begin{split}
|
| 136 |
+
\bar{\mathbf{I}}_{i+1} = \text{Attn}(\mathbf{W}_1&\hat{\mathbf{I}}_{i+1},\mathbf{W}_2\bar{\mathbf{I}}_{i},\mathbf{W}_3\bar{\mathbf{I}}_{i}), \\
|
| 137 |
+
&1 \leq i \leq H-1,
|
| 138 |
+
\end{split}
|
| 139 |
+
\end{equation}$$ where $\bar{\mathbf{I}}_{1} = \hat{\mathbf{I}}_{1}$, $H$ is the length of history, $\text{Attn}$ is the attention mechanism [@vaswani2017attention], and $\mathbf{W}_*$ are trainable parameters. We use the $\bar{\mathbf{I}}_{H}$ to replace the image features in Figure [5](#fig:armodel){reference-type="ref" reference="fig:armodel"}.
|
| 140 |
+
|
| 141 |
+
The Response Model aims to generate the response to user. We use the Response Module as the output module and the other parts are the same as Action Model. Considering the prediction of response is mainly decided by the execution results and dialogues, we do not use action histories for the Response Model. For the Response Module, we use a Transformer Decoder with N layers: $$\begin{align}
|
| 142 |
+
\begin{split}
|
| 143 |
+
\textbf{R} = \text{TransformerDecoder}(\left[\textbf{D};\textbf{M}\right]),
|
| 144 |
+
\end{split}
|
| 145 |
+
\end{align}$$ where $\textbf{R}$ represents the predicted response text.
|
2206.07043/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-06-14T12:53:20.211Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.61 Safari/537.36" etag="-i1fRhcFvyojiCtsec0d" version="18.0.5" type="google"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7VpZc+I4EP41PEL5tnkcmByzk53a3WQ3k3kTtnwswiKySGAe9rdvy5ZtfASbFLhmaqAqwWq1uyX114fbjPT5anvD0Dr8nXqYjDTF2470jyNNUy1nCl+CspMU3bIySsAiT9JKwn30HUuiIqmbyMNJhZFTSni0rhJdGsfY5RUaYoy+Vtl8Sqpa1yiQGpWScO8ightsj5HHw4zqaHZJv8VREOaaVUvueIVyZik4CZFHX/dI+tVInzNKeXa12s4xEaeXn8vjp90juVtaN7/9mTyjv2efH778M86EXR9zS7EFhmN+WtF6JvoFkY08r5FmEVAy8ynogk3znTxJ63lD84lxktr5AzCo2hrAMivn4SoQ3w94y8cBjjFDPKKx2ACjC4JXuQZYaqYk45eHXOjTGN3EHharV2H6NYw4vl8jV8y+AliBFvIVkdNCkgSfCpaZBQQlAjkKXCec0WVhfcFdmFJM+xEhc0ooS9XqWPVMbBe37c1MLVtHVrHUTntIu71gxvF2D43SPjeYrjBnO2CRs9rUzG6RzuZI5L2WwFVNSQv3QGtIGpK+EhSSSzzAhYTEEfAwGvB4Al9+21BK1Sj/Ys530ipowymQKOMhDWiMyB2l6xbjiTGOvQ/C9WG8INRdZqTrSCw+1QEjye9025egBSYz5C6DdKG5QWMaYyHKg3Ah91Iu7qqkgjI4yq9C2MTMh0/FQmDwcVsZ7eToOJQkdMNcfIBPhl2OWIClPIX8Nb9dauGSWyG6+pZsno2NjAKK2NdBzEmM5dGNYQJ++lKNmm2AkpL+oFEaIHIW6vsJrKsSgXIeCW/dVirw1qY13GZ7k3fVoFus/P1oNhto/kIbYO7Gw88N923Ev+Zi4fqpxDWMSiSLwXBAbuWzewJZomusTBTVqQBMNc+F7VNB0mpA8j49vkRoj72R8M4sWy5YniizgxMcFFILEZnwuhmTQ7pabJLuxFlNhYDMa7SKiDjAW0xeMI9c1JJeEYmCGAYuQAKzdlCCyigOYGSVo4fUCSCvNNKu7/ua67alXc9aWOaQaVe1euZd+1x5127A4iFdcAGGHAsJftbgrwmSvSKrYF6J6v5Q+h60zvIQdvxWg1uugxf+OQ2u1hOR2TC4M2SdNe1VZ/0iqUkdJDUdqp06UxNA/6jcpExU+AyUmxp1l6FV4a7rNRxn53K2uis/re5wxsUzI/YiLhLHJaYdE9MMpzumqcqZgtohZ9qz+h1ORNnCQyTaAf+pyuf2AoehKE4RoOAtWq2J6B9dSpxTljhF6dJV4tQf0U6GDrWBjk+xj2HfYES4NWlFBoRKYBD7clkk7EYuwDgtMAxlwNq3FRjNZNHylP5zFzrdLaf8Kb0of572CqP2Umj4NlXfEuqQ//evoHT4VGPYgJ2rWnItxh0VVGcLrCjNTt8Caz31ZkP34lo/omsdelnT+XRi9XSt4f3IVKrwV+vFZ18/agiqp6M3/AiAiHZ7bGvBkLRs6g1/zVtDff27xg8X2QpO6tTNJuKle9Cre/CWX6sn9euTdxPGygTqNqsKtAHbCUatZNSNiV3T1NuPLaNT1plTYrPXenkr9OM4T2e92dfJ3vU2SauAczygk2m2MZnaVd94b7KcTifTqWPaqqGl/6tizbrYd/sbDMtfw2Ts5Y+K9Kv/AQ==</diagram></mxfile>
|
2206.07043/main_diagram/main_diagram.pdf
ADDED
|
Binary file (18.2 kB). View file
|
|
|
2206.07043/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
After revolutionizing the field of machine translation [@sutskever2014sequence; @cho-etal-2014-learning; @bahdanau2014neural], sequence-to-sequence (seq2seq) methods have quickly become the standard approach for not only multilingual but also for *monolingual* sequence transduction / text generation tasks, such as text summarization, style transfer, and grammatical error correction. While delivering significant quality gains, these models, however, are prone to hallucinations [@maynez-etal-2020-faithfulness; @pagnoni-etal-2021-understanding]. The seq2seq task setup (where targets are generated from scratch word by word) overlooks the fact that in many monolingual tasks the source and target sequences have a considerable overlap, hence targets could be reconstructed from the source inputs by applying a set of edit operations.
|
| 4 |
+
|
| 5 |
+
Text-editing models attempt to address some of the limitations of seq2seq approaches and there has been recently a surge of interest in applying them to a variety of monolingual tasks including text simplification [@dong-etal-2019-editnts; @mallinson-etal-2020-felix; @agrawal-etal-2021-non], grammatical error correction [@awasthi-etal-2019-parallel; @omelianchuk-etal-2020-gector; @malmi-etal-2019-encode; @stahlberg-kumar-2020-seq2edits; @rothe-etal-2021-simple; @chen-etal-2020-improving-efficiency; @hinson-etal-2020-heterogeneous; @gao2021hierarchical], sentence fusion [@malmi-etal-2019-encode; @mallinson-etal-2020-felix] (see an example in Figure [1](#fig:example){reference-type="ref" reference="fig:example"}), MT automatic post-editing [@gu2019levenshtein; @zietkiewicz2020post; @mallinson-etal-2020-felix], text style transfer [@reid-zhong-2021-lewis; @malmi-etal-2020-unsupervised], data-to-text generation [@kasner-dusek-2020-data], and utterance rewriting [@liu-etal-2020-incomplete; @DBLP:conf/sigir/VoskaridesLRKR20; @Hct2022].
|
| 6 |
+
|
| 7 |
+
<figure id="fig:example" data-latex-placement="tb">
|
| 8 |
+
<embed src="text_editing_example.pdf" />
|
| 9 |
+
<figcaption>An example of using a text-editing approach to solve a sentence-fusion task.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
Text-editing approaches claim to be more accurate or on-par with seq2seq baselines especially in low resource settings, less prone to hallucinations and faster at inference time. These advantages have generated a substantial and continued level of interest in text-editing research. The goal of this tutorial is to provide the first comprehensive overview of the family of text-editing approaches and to offer practical guidelines for applying them to a variety of text-generation tasks.
|
| 13 |
+
|
| 14 |
+
The tutorial is intended for researchers and practitioners who are familiar with generic seq2seq text-generation methods, such as Transformer [@vaswani2017attention] and pre-trained language models like BERT [@devlin-etal-2019-bert]. However, prior experience with text-editing models is not required to be able to follow the tutorial.
|
| 15 |
+
|
| 16 |
+
We expect the topic to attract people in both academia and industry. The high-sample efficiency and low-computational requirements of text-editing models [@malmi-etal-2019-encode; @mallinson-etal-2020-felix] makes them an attractive baseline, e.g., for researchers developing new text-generation tasks for which large training sets do not yet exist. Moreover, the high-inference speed of text-editing methods, owing to their often non-autoregressive architecture [@awasthi-etal-2019-parallel; @mallinson-etal-2020-felix], makes them suitable for building real-time applications.
|
| 17 |
+
|
| 18 |
+
The structure of the tutorial with duration estimates for different sections are shown in Table [1](#tab:structure){reference-type="ref" reference="tab:structure"}. Below we provide brief descriptions for each section.
|
| 19 |
+
|
| 20 |
+
::: {#tab:structure}
|
| 21 |
+
**Section** **Duration**
|
| 22 |
+
------------------------------------------------- --------------
|
| 23 |
+
Introduction 15 min
|
| 24 |
+
$\quad$ What are text-editing models?
|
| 25 |
+
$\quad$ Text-editing vs. seq2seq models
|
| 26 |
+
Model design 40 min
|
| 27 |
+
$\quad$ Example model + model landscape
|
| 28 |
+
$\quad$ Edit-operation types
|
| 29 |
+
$\quad$ Tagging architecture
|
| 30 |
+
$\quad$ Auto-regressiveness
|
| 31 |
+
$\quad$ Converting target texts to target edits
|
| 32 |
+
Applications 45 min
|
| 33 |
+
$\quad$ Overview
|
| 34 |
+
$\quad$ Grammatical Error Correction
|
| 35 |
+
$\quad$ Text Simplification
|
| 36 |
+
$\quad$ Unsupervised Style Transfer
|
| 37 |
+
$\quad$ Incomplete Utterance Rewriting
|
| 38 |
+
Controllable generation 25 min
|
| 39 |
+
$\quad$ Mitigating hallucinations
|
| 40 |
+
$\quad$ Controllable dataset generation
|
| 41 |
+
Multilingual text editing 25 min
|
| 42 |
+
$\quad$ Tokenization
|
| 43 |
+
$\quad$ Handling morphology
|
| 44 |
+
$\quad$ Practical aspects
|
| 45 |
+
Productionization 25 min
|
| 46 |
+
$\quad$ Latency
|
| 47 |
+
$\quad$ Sample efficiency
|
| 48 |
+
Recommendations and future directions 5 min
|
| 49 |
+
**Total** 180 min
|
| 50 |
+
|
| 51 |
+
: Tutorial structure and duration of each section.
|
| 52 |
+
:::
|
| 53 |
+
|
| 54 |
+
We first define the family of text-editing methods: Text-editing models are sequence-transduction methods that produce the output text by predicting edit operations which are applied to the inputs. In contrast, the traditional seq2seq methods produce the output from scratch, token by token. We summarize the main pros and cons of these two approaches and provide guidelines for choosing which approach is more suitable for a given task.
|
| 55 |
+
|
| 56 |
+
The similarities and differences of a set of popular text-editing methods will be analyzed in terms of the types of edit operations they employ, their tagging architecture, and whether they are auto-regressive or feedforward. We also discuss methods for converting target texts into target edit sequences, a task which often does not have a unique solution. Table [\[tab:methods\]](#tab:methods){reference-type="ref" reference="tab:methods"} provides a summary of the similarities and differences between the methods covered in the tutorial.
|
| 57 |
+
|
| 58 |
+
::: table*
|
| 59 |
+
:::
|
| 60 |
+
|
| 61 |
+
A key criterion for determining whether text-editing models are a good fit for a given application is the average degree of overlap between source and target texts. The higher the overlap, the more input tokens can be reused to generate the target, thus resulting in a simpler edit sequence. We give an overview of applications with a high degree of overlap to which text-editing methods have been applied to. Then we do a deep dive in to the following applications: grammatical error correction, text simplification, unsupervised style transfer, and incomplete utterance rewriting.
|
| 62 |
+
|
| 63 |
+
Text-editing models with a restricted vocabulary of phrases to insert [@malmi-etal-2019-encode; @Hct2022] or with linguistically informed suffix-transformation operations [@awasthi-etal-2019-parallel; @omelianchuk-etal-2020-gector] are less prone to different types of hallucination since the models cannot produce arbitrary outputs. Moreover, the restricted vocabulary makes it feasible to manually refine the list of phrases that the model can insert. Another route through which the decomposition of the generation task into explicit edit operations can improve controllability is via biasing of certain types of edits to control how often the model will insert new text [@dong-etal-2019-editnts; @omelianchuk-etal-2020-gector]. Controllable generation with editing models can be useful for generating large synthetic datasets with a desired distribution of errors, which yields improvements in tasks such as grammatical error correction [@stahlberg-kumar-2021-synthetic]. We will provide concrete examples of the aforementioned control measures and their effects.
|
| 64 |
+
|
| 65 |
+
Most text-editing models, like text-generation models in general, are evaluated on English, but there are also methods evaluated or specifically developed for other languages, including Chinese [@hinson-etal-2020-heterogeneous; @liu-etal-2020-incomplete], Czech [@naplava-straka-2019-grammatical], German [@mallinson-etal-2020-felix], Russian [@stahlberg-kumar-2020-seq2edits], and Ukrainian [@syvokon2021uagec]. Apart from general tokenization-related challenges discussed in [@mielke2021between], an additional challenge with applying text-editing methods to morphologically rich languages is a potential mismatch between the subword tokens, on which the underlying sequence labeling model operates, and the morphemes or affixes, on which the edits should happen. Possible solutions to this challenge include developing custom inflection operations [@awasthi-etal-2019-parallel; @omelianchuk-etal-2020-gector] or learning them from the data [@straka2021character], and using more fine-grained edit operations, such as character-level edits [@gao2021hierarchical].
|
| 66 |
+
|
| 67 |
+
An additional challenge when building a truly multilingual model---as opposed to one model per language---is to ensure that it is not skewed towards a particular language or a set of languages [@chung-etal-2020-improving] while being computationally efficient.
|
| 68 |
+
|
| 69 |
+
We discuss how casting a text-generation problem as a text-editing task often allows the use of significantly faster and more data-efficient model architectures, without sacrificing output quality. We make use of the TensorFlow Profiler[^2] to compare latencies of text-editing and non-text-editing solutions for an example problem, and illustrate where the time savings come from.
|
| 70 |
+
|
| 71 |
+
<figure id="fig:rec" data-latex-placement="tb">
|
| 72 |
+
<embed src="when_to_edit.pdf" />
|
| 73 |
+
<figcaption>Proposed flowchart for deciding when to try a text-editing approach.</figcaption>
|
| 74 |
+
</figure>
|
| 75 |
+
|
| 76 |
+
We provide practical guidelines for when to use (and when not to use) text-editing methods (see Figure [2](#fig:rec){reference-type="ref" reference="fig:rec"} for a summary). We also outline possible future directions which include: ($i$) learned edit operations, ($ii$) studying the effects of different subword segmentation methods since these typically determine the granularity at which the edit operations are applied, ($iii$) text-editing-specific pre-training methods, ($iv$) sampling strategies for text-editing methods, and ($v$) studying the effects of scaling up text-editing methods, a strategy that has been found to be very effective for many other text-generation methods [@brown2020language; @chowdhery2022palm].
|
| 77 |
+
|
| 78 |
+
A significant portion of the tutorial is devoted to discussing multilingual text-editing, including applying text-editing models to morphologically rich languages which presents specific challenges related to larger vocabularies and the need to edit word affixes. The presenters come from both academia and industry, are native speakers of 8 languages based in 4 different countries (Switzerland, Germany, Canada, USA), and are of different seniority levels from a PhD student to a Senior Staff Research Scientist.
|
| 79 |
+
|
| 80 |
+
Before the tutorial, we expect the audience to read [@vaswani2017attention] and [@devlin-etal-2019-bert]. For references to text-editing works that will be discussed in the tutorial, see Table [\[tab:methods\]](#tab:methods){reference-type="ref" reference="tab:methods"}.
|
| 81 |
+
|
| 82 |
+
50% of the methods that will be discussed in the tutorial (cf. Table [\[tab:methods\]](#tab:methods){reference-type="ref" reference="tab:methods"}) are developed by different subsets of the tutorial instructors.
|
| 83 |
+
|
| 84 |
+
is a Senior Research Scientist at Google Switzerland. His research is focused on developing text-generation models for grammatical error correction and text style transfer. He received his PhD from Aalto University, Finland, where he also taught a course on Recent Advances in Natural Language Generation in Spring 2022.
|
| 85 |
+
|
| 86 |
+
is a final-year PhD student in CS at McGill University and Mila, Canada. Her research is focused on conditional text generation. She is a co-organizer for the NewSum workshop at EMNLP 2021 and ENLSP workshop at NeurIPS 2021.
|
| 87 |
+
|
| 88 |
+
is a Research Engineer at Google Switzerland. His research is focused on low-latency text-to-text generation. He received his PhD from the University of Edinburgh, Scotland.
|
| 89 |
+
|
| 90 |
+
is a Research Engineer at Google Switzerland. His current research focuses on multi-lingual NLG. He organized [workshops](http://scai-workshop.github.io) and conducted [tutorials](https://clickmodels.weebly.com/tutorials.html) at conferences such as SIGIR, EMNLP, and IJCAI. Aleksandr received his PhD from University of Amsterdam, The Netherlands.
|
| 91 |
+
|
| 92 |
+
is a Research Engineer at Google Switzerland focusing on grammatical error correction and low-latency models. He received his MSc from Jagiellonian University.
|
| 93 |
+
|
| 94 |
+
is a Research Engineer at Google Switzerland working on text editing with application to grammatical error correction. He received his PhD from the University of Trento, Italy.
|
| 95 |
+
|
| 96 |
+
is a Research Scientist at Google focusing on grammatical error correction and text style models. He received his PhD from Cambridge University, UK.
|
| 97 |
+
|
| 98 |
+
is a Senior Research Engineer at Google Switzerland. His work is focused on multi-lingual rewriting of questions in low-latency settings. Sebastian received his PhD in Engineering from the Technical University of Berlin, Germany.
|
| 99 |
+
|
| 100 |
+
is a Senior Staff Research Scientist at Google leading a research team working on speech and language algorithms. He received his PhD from the Johns Hopkins University, US.
|
| 101 |
+
|
| 102 |
+
is a Staff Research Scientist at Google Switzerland leading an applied research team working on next generation NLG solutions. He received his PhD from University of Trento, Italy.
|
| 103 |
+
|
| 104 |
+
Text-generation methods have the potential to generate non-factual [@maynez-etal-2020-faithfulness; @pagnoni-etal-2021-understanding; @kreps2020all] and offensive content [@gehman-etal-2020-realtoxicityprompts]. Furthermore, training these models on uncurated data can lead to the models replicating harmful views presented in the training data [@bender2021dangers]. Text-editing models are also susceptible to these issues, but they have been shown to mitigate some of them. Specifically, they reduce the likelihood of different types of hallucination [@malmi-etal-2019-encode] and their higher sample efficiency [@malmi-etal-2019-encode; @mallinson-etal-2020-felix] enables more careful curation of the training data. The tutorial will discuss the ethical issues related to text generation and provide concrete examples on how text-editing models can help mitigate them.
|
| 105 |
+
|
| 106 |
+
[^1]: Website: <https://text-editing.github.io/>
|
| 107 |
+
|
| 108 |
+
[^2]: <https://www.tensorflow.org/guide/profiler#trace_viewer_interface>
|
2206.08194/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-20T18:05:55.211Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36" version="16.4.0" etag="mSvfLzu-hGN6eihSjlj7" type="google"><diagram id="n6Ih5OqvS4WBo2tvgCn8">5VrBcpswEP0aXzMIDNjH1knaQzvTGR+aHDVGAToCeWQ5xv36CiMhEHHiUiGofbHZh3aR9mlXK8HMW2XFFwq3yXcSITxznaiYefcz1/UWDv8tgWMFgOWiAmKaRgJSwDr9jQQo9OJ9GqFdqyEjBLN02wY3JM/RhrUwSCk5tJu9ENx+6hbGqAOsNxB30Z9pxJIKXbihwr+iNE7kk0GwrO5kUDYWI9klMCKHBuQ9zLwVJYRVV1mxQrj0nfRLpfd45m7dMYpydomCK7rBjnJsKOJDFWJOcv73mZJ9HqFSw+ESoSwhMckh/kbIloOAg78QY0dBFNwzwqGEZVjcRUXKnoR6ef3cuL4vmsJRCjmjx6em8NwUlNJJUlrRp5Je1XWOPKYYi/vVYMsRtty1I3u6EZDf9SCoeeHzGZEM8UfyJhRhyNLXtikoZlZct6tVf5CUW3QdEQT1FJAxIGVpgkEaIya0FIX8otENBZ2IfZtkzyrJ4O9JBldLcmCN5PloJIMeJINrJjkYjGQxpleI96K3HdbbFB+SlKH1Fp6ccuCLcZtOYQ5RhoqzK8gZr8nRO9ro50I+qJVRQkljUZw75/3c8tA77gimnticu9C3nNvCiwn812nvW8ttoeFp/8I9uiKYUOVnE5Ggr+hvRAIYKBIWk6/j7lzbkSB9bSEUPGuhsJx+yrtinufWeJaDmnrOA+PlPAA6HplYLLT3NY0MOGAwuNaCQS97veGCwf0/gkEvhf3LSmFgIhjG2+T32f81C+MBg2G8PaA/XDCMt9Pvl/aum2h9JTNItOndvqWsF1jMepZOAPrN0AmcL1ZB0pmNY55Gg/DGOdPPTfS66WLO9Np7wALM0vnCZDmb+5qr/Z6cOR8YMsiZpaOCyXLma652+3IGPjBkjrP6tfCtcmZsPbP3Tsa1dAwxWc6MrWe6oXA4zrqV9I1xZio36oaC4XKjpROOyXJmrAbRDYWmOOOi+nSpaq6+//Ie/gA=</diagram></mxfile>
|
2206.08194/main_diagram/main_diagram.pdf
ADDED
|
Binary file (1.37 kB). View file
|
|
|
2206.08194/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Due to their low acquisition latency and high precision, rotating LiDAR sensors are among the most prevalent sensors for autonomous vehicles [39]. The acquired sequences of 3D points exhibit a complex structure in which the temporal and spatial dimensions are entangled through the rotation of the sensor around a reference point in motion; see Figure 1. However, this structure is often not reflected in the formatting of open-access LiDAR datasets [3,25,29], which are discrete sequences of range images, or frames, each corresponding to a 360◦ degree arc around the sensor. Consequently, most LiDAR semantic segmentation methods operate on one or several such frames at the same time, in the image [12] or point cloud [51,48,43] format. However, waiting for an entire frame to be acquired introduces an unavoidable latency of more than 100ms on top of the processing time, excluding applications for high-speed or urban driving. In this paper, we address this issue by introducing (i) HelixNet, the largest available LiDAR dataset, and whose fine-grained point information allows for the realistic real-time evaluation of segmentation methods, and (ii) Helix4D, a spatio-temporal transformer designed for the efficient segmentation of LiDAR sequences.
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Fig. 1: Online LiDAR Segmentation. The 3D point sequences of rotating LiDAR data of our proposed dataset HelixNet follow a complex helix-like structure in space and time, represented in a by using the vertical axis for both time and elevation. We propose an efficient spatio-temporal transformer to process angular slices of data centered on the sensor's position. The slices are partitioned into voxels, each attending other voxels from past slices to build a large spatio-temporal receptive field b. Our proposed model can segment the LiDAR point stream c with state-of-the-art accuracy and in real-time.
|
| 8 |
+
|
| 9 |
+
Our dataset HelixNet, has several key advantages compared to standard datasets such as SemanticKITTI [3], see Table 1. By organizing points with respect to sensor rotation and reporting their precise release times, we can accurately benchmark the real-time readiness of leading state-of-the-art LiDAR sequence segmentation algorithms. Furthermore, the pointwise sensor orientation allows us to split the data into slices of acquisition corresponding to a fraction of the sensor's rotation. These slices can be processed sequentially by our proposed network Helix4D, resulting in a lower acquisition latency and a more realistic scenario for autonomous driving. Based on a spatio-temporal transformer designed explicitly for LiDAR sequences, Helix4D is more than 50 times smaller than the current best semantic segmentation architectures and reaches state-of-the-art performance with significantly reduced latency.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
We introduce HelixNet, a new large-scale and open-access LiDAR dataset intended for the evaluation of real-time semantic segmentation algorithms. In contrast to other large-scale datasets, HelixNet includes fine-grained data on sensor rotation and position, as well as point release time.
|
| 14 |
+
|
| 15 |
+
General Characteristics. As seen in Figure 2, HelixNet contains 20 sequences of 3D points, each corresponding to 6 to 7 minutes of continuous acquisition, for a total of 129 minutes. Scanning was performed by an HDL-64E Velodyne rotating LiDAR [24] mounted on a mobile platform [36]. As shown in Figure 3, HelixNet covers multiple cities and a wide variety of environments such as a university campus, dense historical centers, and a highway interchange. With a total of 10 billion points across 78 800 frames and 8.85 billion individual labels, HelixNet is the largest densely annotated open-access rotating LiDAR dataset by a factor of 1.7 as shown in Table 1. HelixNet follows the file format of SemanticKITTI [3], allowing researchers to evaluate existing code with minimal effort.
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+
Fig. 3: Extracts from HelixNet. Our proposed dataset contains various urban scenes from motorway to pedestrian plazas and historical centers. In the first row, we represent extracts of 15 to 30s of acquisition colored according to the point release time. In the second row, we represent the point semantic labels.
|
| 20 |
+
|
| 21 |
+
We use a 9-classes nomenclature: road (16.4% of all points), other surface (22.0%), building (31.3%), vegetation (8.5%), traffic signs (1.6%), static vehicle (4.9%), moving vehicle (2.1%), pedestrian (0.9%), and acquisition artifact (0.05%). Points without labels correspond to either un-annotated (6.2%) parts of the clouds due to their ambiguity, or point without echos (6.1%). Compared to fine-grained classes such as the ones used by SemanticKITTI [3] or Paris-Lille3D's [38], our focused nomenclature limits class imbalance and makes macro-averaged metrics more stable.
|
| 22 |
+
|
| 23 |
+
Each point is associated with the 9 following values: (1-3) Cartesian coordinates in a fixed frame of a reference, (4-6) cylindrical coordinate relative to the sensor at the time of acquisition, (7) intensity, (8) fiber index, and (9) packet output time. As detailed in the next paragraph, the last two features are not typically available in large-scale datasets and cannot be inferred.
|
| 24 |
+
|
| 25 |
+
Sensor-Based Timing and Grouping. A rotating LiDAR consists of a set of lasers—or fibers—arranged on a rotating sensor head. The lasers send periodic pulses of light whose return times give the position of the impact points relative to the sensor. In the context of autonomous driving, these sensors are typically deployed on a moving platform and capture 3D points with centimetric accuracy. The sensor releases the data stream as a discrete temporal sequence of packets of 3D points. For an HDL-64E LiDAR, each packet contains 6 × 64 points, corresponding to around 1◦ rotation of the sensor. To represent the real-time
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
Fig. 4: Sensor Acquisition Geometry. We represent in a the acquisition of a rotating sensor, which is split into <sup>1</sup> ⁄<sup>3</sup> turn slices in b. As the Laser emitters position forms an angle of over 17.3 ◦ around the sensor head, taking slices with respect to the sensor rotation θ results in a jagged profile.
|
| 30 |
+
|
| 31 |
+
operational setting of autonomous driving, we associate with each point the timestamp of its packet output event, i.e. the instant the packet is available and not the acquisition time of the point. The latency between the acquisition of the first point and the complete transfer of its packet is 278µs. Although small compared to acquisition and inference times, this more rigorous timing constitutes a step towards a more realistic evaluation setting of segmentation algorithms of LiDAR sequences.
|
| 32 |
+
|
| 33 |
+
On top of its absolute position, we associate with each individual point its cylindrical coordinates relative to the position of the sensor at the exact time of its acquisition. This differs from other datasets such as SemanticKITTI [3], which gives the relative position of all points but the absolute position of the sensor only once per frame. While sensor movement can be interpolated, the vehicle trajectory might not be linear and the sensor head rotates. For comparison, at 50km/h, the sensor moves more than 1.4m during each rotation.
|
| 34 |
+
|
| 35 |
+
LiDAR sequences are typically split into frames containing points that cover a 360◦ degree arc around the sensor. However, the acquisition geometry makes this grouping artificial. Indeed, the fibers (i.e. the individual lasers) do not all face the same direction: they are arranged around the sensor's heads at different angles, with a range of more than 17.3 ◦ . This means that the points within a packet are not vertically aligned but present a jagged profile as seen in Figure 4. In order to obtain frames with straight edges such as those of SemanticKITTI [3], we would have to consider an acquisition over a sensor rotation of 377◦ , adding a further 5ms of latency. Contrary to other datasets, HelixNet contains the index of the emitter of each point and organizes the points with respect to the angle of the sensor itself This allows us to easily build frames or frame portions that are directly consistent with the rotation of the sensor head itself. This is important for measuring the real latency of segmentation methods and, as described in the next section, contributes to the efficiency of our proposed network.
|
| 36 |
+
|
| 37 |
+
We consider a sequence of 3D points acquired by a rotating LiDAR on a mobile platform, which we split into chronologically ordered slices of acquisition. As represented in Figure 5, we process each slice with a U-Net architecture [37] with cylindrical convolutions [51]. At the lowest resolution, a spatio-temporal transformer network connects neighboring voxels in space and time, resulting in a large receptive field. We first describe the construction of slices, then our cylindrical U-Net, and finally the transformer module.
|
| 38 |
+
|
| 39 |
+
Instead of processing the data frame-by-frame, we propose to split the sequence into slices covering a fixed portion of the sensor rotation, resulting in a shorter acquisition time and a lower latency. Each point i of the sequence is characterized by the angular position $\theta_i$ of the sensor head at its exact time of acquisition. The points are sorted in chronological acquisition order i.e. $\theta_i \leq \theta_j$ if i < j. We partition the sequence into groups of contiguous points called slices, acquired during a portion $\Delta\theta \in ]0, 2\pi]$ of a full rotation of the sensor itself. Choosing $\Delta\theta = 2\pi$ corresponds to the classic frame-by-frame setting and implies an acquisition latency of 104ms in HelixNet or SemanticKITTI [3]. A slice size of $\Delta\theta = 2\pi/5$ leads to an acquisition latency of 21ms, which is more conducive to real-time processing of driving data.
|
| 40 |
+
|
| 41 |
+
Inspired by the Cylinder3D model [51], we first discretize each slice along a fine cylindrical partition grid (1). Each point i is associated with a descriptor $x_i^{\text{point}}$ based on its intensity, relative position with respect to the sensor in Cartesian and cylindrical coordinates, and its offset with respect to the center of its voxels in grid (1). We compute the point feature $f_i^{\text{point}}$ by applying a shared Multi-Layer Perceptron (MLP) $\mathcal{E}^{\text{point}}$ to $x_i^{\text{point}}$ for all points i in the slice. The resulting $f_i^{\text{point}}$ are then maxpooled with respect to the voxels of grid (1) to serve as input to a convolutional encoder $\mathcal{E}^{\text{grid}}$ . The network $\mathcal{E}^{\text{grid}}$ is composed of sparse cylindrical convolutions [18] and strided convolutions for downsampling. $\mathcal{E}^{\text{grid}}$ produces a set of L sparse feature maps $f^{\text{grid}(1)}, \dots, f^{\text{grid}(L)}$ with decreasing resolutions:
|
| 42 |
+
|
| 43 |
+
$$f_i^{\text{point}} = \mathcal{E}^{\text{point}} \left( x_i^{\text{point}} \right)$$
|
| 44 |
+
(1)
|
| 45 |
+
|
| 46 |
+
$$f^{\mathsf{grid}(1)}, \cdots, f^{\mathsf{grid}(L)} = \mathcal{E}^{\mathsf{grid}} \left( \mathsf{maxpool} \left( f^{\mathsf{point}} \right) \right) ,$$
|
| 47 |
+
(2)
|
| 48 |
+
|
| 49 |
+
where maxpool is performed with respect to grid (1). At the lowest resolution grid (L), we apply the transformer-based module $\mathcal{T}$ presented in the next subsection to the feature map $f^{\text{grid}(L)}$ to obtain the coarse cylindrical map $g^{\text{grid}(L)}$ :
|
| 50 |
+
|
| 51 |
+
$$g^{\operatorname{grid}(L)} = \mathcal{T}\left(f^{\operatorname{grid}(L)}\right)$$
|
| 52 |
+
(3)
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+
Fig. 5: Helix4D Architecture. A point sequence is split into angular slices, whose points are encoded by E point and pooled along a fine-grained cylindrical partition. A convolutional encoder E grid yields feature maps at lower resolutions. We apply W consecutive spatio-temporal transformer blocks T <sup>w</sup> on the coarse voxels, with attention spanning across current and past slices. The resulting features are up-sampled to full resolution with a convolutional decoder D grid using the encoder's maps at intermediate resolutions through skip connections. Finally, the grid features are allocated to the points, which are classified by D point .
|
| 57 |
+
|
| 58 |
+
The decoder Dgrid combines cylindrical convolutions and strided transposed convolutions to map g grid(L) to a feature map g grid(1) at the highest resolution, and uses the maps f grid(L−1) , · · · , f grid(1) through residual skip connections. We concatenate for each point i the descriptor g grid(1)(i) of its voxel in grid (1) and its point feature f point i . Finally, the point decoder Dpoint associates a vector of class scores c point <sup>i</sup> with each point i:
|
| 59 |
+
|
| 60 |
+
$$g^{\mathsf{grid}(1)} = \mathcal{D}^{\mathsf{grid}}\left(g^{\mathsf{grid}(L)}, f^{\mathsf{grid}(L-1)}, \cdots, f^{\mathsf{grid}(1)}\right)$$
|
| 61 |
+
(4)
|
| 62 |
+
|
| 63 |
+
$$c_i^{\mathsf{point}} = \mathcal{D}^{\mathsf{point}}\left(\left[g^{\mathsf{grid}(1)}(i), f_i^{\mathsf{point}}\right]\right) ,$$
|
| 64 |
+
(5)
|
| 65 |
+
|
| 66 |
+
where [ · ] is the channelwise concatenation operator. The network is supervised by the cross-entropy and Lov´asz-softmax [4] losses directly on the point prediction, without class weights.
|
| 67 |
+
|
| 68 |
+
Our approach differs from Cylinder3D [51] by relying on simple 3×3×3 sparse cylindrical convolutions instead of asymmetrical convolutions and dimensionbased context modeling. Furthermore, we do not use voxel-wise supervision.
|
| 69 |
+
|
| 70 |
+
Our simplified architecture results in a lighter computational and memory load, but can still learn rich spatio-temporal features thanks to the addition of the transformer module described below.
|
| 71 |
+
|
| 72 |
+
We denote by V the set of non-empty voxels at the lowest resolution grid (L) for all slices of the considered sequence. We associate with each voxel v of V a
|
| 73 |
+
|
| 74 |
+
feature $f_v^{\text{voxel}}$ defined as the value of $f^{\text{grid}(L)}$ at v. We remark that $f^{\text{voxel}}$ can be ordered as a non-strictly ordered time sequence, and propose to successively apply W independent transformer blocks $\mathcal{T}^1, \dots, \mathcal{T}^W$ whose architecture is described below. We denote by $g^{\text{voxel}}$ the resulting spatio-temporal voxel representation:
|
| 75 |
+
|
| 76 |
+
$$g^{\text{voxel}} = \mathcal{T}^W \circ \dots \circ \mathcal{T}^1(f^{\text{voxel}}) \ .$$
|
| 77 |
+
(6)
|
| 78 |
+
|
| 79 |
+
We associate each voxel v of $\mathcal{V}$ with the absolute position $(X_v, Y_v, Z_v)$ of its center, the release time $T_v$ of its first point, and the index $I_v$ of the sensor rotation of its corresponding slice. In order to use a sparse attention scheme, we define for each voxel v a spatio-temporal mask M(v) characterized by a radius R and a set of rotation offsets $P \subset \mathbb{N}$ :
|
| 80 |
+
|
| 81 |
+
$$M(v) = \{ u \mid ||(X_v, Y_v, Z_v) - (X_u, Y_u, Z_u)|| < R, I_v - I_u \in P \} .$$
|
| 82 |
+
(7)
|
| 83 |
+
|
| 84 |
+
In the context of autonomous driving, we choose R = 6m and $P = \{0, 5, 10\}$ . With a standard rotation speed of 10Hz, this corresponds to considering slices 0.5 and 1 seconds in the past along with the current one. See Figure 6 for an illustration of the receptive field and attention maps.
|
| 85 |
+
|
| 86 |
+
Simplified Transformer Block. We now define a single transformer block $\mathcal{T}^w$ with H heads operating on a sequence of voxel features $f^{\text{voxel}}$ of dimension D. For each head h and each voxel v, we apply the following operations:
|
| 87 |
+
|
| 88 |
+
- (i) A single linear layer $\mathcal{L}^h$ generates both a key $k_v^h$ of dimension K and a value $\operatorname{val}_{v}^{h}$ of dimension D/H.
|
| 89 |
+
- (ii) For all voxels u in the mask M(v), we define the compatibility score $y_{u,v}^h$ as the cross-product between keys and with a learned relative positional encoding $PE^h(u, v)$ .
|
| 90 |
+
- (iii) The cross-voxel attention $a_{u,v}^h$ is obtained with a scaled softmax.
|
| 91 |
+
- (iv) The values val<sub>u</sub><sup>h</sup> of voxels in M(v) are averaged into a vector $\tilde{f}_v^h$ using their respective cross-voxel attention as weights.
|
| 92 |
+
- (v) The vectors $\hat{f}_{v}^{h}$ are concatenated channelwise across heads and added to the input of the block to define its output.
|
| 93 |
+
|
| 94 |
+
These operations can be summarized as follows:
|
| 95 |
+
|
| 96 |
+
$$k_v^h, \operatorname{val}_v^h = \mathcal{L}^h \left( f_v^{\mathsf{voxel}} \right)$$
|
| 97 |
+
(8)
|
| 98 |
+
|
| 99 |
+
$$k_v^h, \operatorname{val}_v^h = \mathcal{L}^h \left( f_v^{\text{voxel}} \right)$$
|
| 100 |
+
(8)
|
| 101 |
+
$y_{u,v}^h = \left( k_v^h \right)^{\mathsf{T}} \left( k_u^h + \operatorname{PE}^h(u, v) \right)$ for $u \in M(v)$ (9)
|
| 102 |
+
|
| 103 |
+
$$\left\{a_{u,v}^{h}\right\}_{u\in M(v)} = \operatorname{softmax}\left(\left\{y_{u,v}^{h}\right\}_{u\in M(v)}/\sqrt{K}\right)$$
|
| 104 |
+
(10)
|
| 105 |
+
|
| 106 |
+
$$\tilde{f}_{v}^{h} = \sum_{u \in M(v)} a_{u,v}^{h} \operatorname{val}_{u}^{h}$$
|
| 107 |
+
|
| 108 |
+
$$\mathcal{T}^{w}(f^{\text{voxel}})_{v} = f_{v}^{\text{voxel}} + [\tilde{f}_{v}^{1}, \cdots, \tilde{f}_{v}^{H}].$$
|
| 109 |
+
(11)
|
| 110 |
+
|
| 111 |
+
$$\mathcal{T}^w(f^{\text{voxel}})_v = f_v^{\text{voxel}} + [\tilde{f}_v^1, \cdots, \tilde{f}_v^H] . \tag{12}$$
|
| 112 |
+
|
| 113 |
+
Our design is similar to the classical transformer architecture but uses keys as queries to save memory and computation. We also do not use feed-forward
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
|
| 117 |
+
Fig. 6: **Spatio-Temporal Attention.** We represent the spatio-temporal mask and attention score of one head of the transformer for two different voxels. The network gathers information from different frame offsets P as the sensor moves.
|
| 118 |
+
|
| 119 |
+
networks after averaging the values: the only learnable part of a block $\mathcal{T}^w$ is its linear layers $\mathcal{L}^h$ and its relative positional encoding PE<sup>h</sup>.
|
| 120 |
+
|
| 121 |
+
Since $g^{\text{voxel}}$ only requires information about the voxels of the current and past slices, it can be computed sequentially for all slices in the order in which the sensor releases them. For a given slice, the voxel map $g^{\text{grid}(L)}$ for non-empty voxels is given by the values of $g^{\text{voxel}}$ , and set to zero otherwise. To save computation at inference time, we store in memory the keys, values, and absolute positions of the voxels in past slices with a fixed buffer of $\max(P)$ rotations. This allows us to allocate a large spatio-temporal receptive field to each voxel without supplementary computations.
|
| 122 |
+
|
| 123 |
+
Relative Positional Encoding. We propose to learn relative positional vectors $\operatorname{PE}^h(u,v)$ that encode the spatio-temporal offset $(X_u,Y_u,Z_u,T_u)-(X_v,Y_v,Z_v,T_v)$ between voxels u and v for each transformer block w independently. Inspired by the work of Wu et al. [47], we first discretize the offsets along each dimension $d \in \{X,Y,Z,T\}$ with $B_d$ irregular bins. For each dimension d and head d, we learn d weight vectors of size d. We define the functions $\operatorname{PE}^h_d: \mathbb{R} \mapsto \mathbb{R}^K$ that map the d-dimension of an offset to the vector associated with its corresponding bin. The positional encoding between two voxels d and d is the sum of the vectors corresponding to their discretized offsets in each dimension:
|
| 124 |
+
|
| 125 |
+
$$PE^{h}(u,v) = PE_{X}^{h}(X_{u} - X_{v}) + PE_{Y}^{h}(Y_{u} - Y_{v}) + PE_{Z}^{h}(Z_{u} - Z_{v}) + PE_{T}^{h}(T_{u} - T_{v}).$$
|
| 126 |
+
(13)
|
| 127 |
+
|
| 128 |
+
Relative positional encoding vectors are used directly in the calculation of the compatibility score, as given in (9). Additional details on positional encoding are given in the supplementary material.
|
| 129 |
+
|
| 130 |
+
We evaluate the performance and inference time of our approach and other state-of-the-art methods in both online and frame-by-frame settings. We use our proposed dataset HelixNet and the standard SemanticKITTI dataset.
|
| 131 |
+
|
| 132 |
+
Online Evaluation Setting We aim at evaluating the real-time readiness of rotating LiDAR semantic segmentation algorithms in the context of autonomous driving. The total latency of a model is determined by its inference speed and also the time it takes to acquire its input. Operating on full frames requires at least 104ms of acquisition, which is incompatible with realistic autonomous driving scenarios. Instead, we propose an online evaluation setting using the slices defined in Sec. 4.1. By default, we use a slice size of a fifth turn of the sensor head: ∆θ = 2π/5, corresponding to 21ms of acquisition.
|
| 133 |
+
|
| 134 |
+
Slices are processed sequentially. We define the inference latency of a segmentation method as the average time between the release of the last point of a slice and its segmentation. To meet the real-time requirement, inference must be faster than the acquisition of a slice. Slower processing would cause the classification to continuously fall behind. Although thinner slices directly reduce acquisition latency, they also make the real-time requirement more strict: as a full turn must be processed in less than 104ms, a fifth turn must be in at most 21ms.
|
| 135 |
+
|
| 136 |
+
Adapting SemanticKITTI. SemanticKITTI [3,16] contains 43 552 frames along 22 sequences of LiDAR scans densely annotated with 19 classes. In contrast to HelixNet, SemanticKITTI is not formatted with respect to the sensor rotation and only gives the acquisition time and sensor position once per frame. To measure the latency, we make the following approximation: (i) the fibers are assumed to be vertically aligned, meaning that the angle of the points is the same as the sensor's; (ii) we interpolate the acquisition time of points between frames from their angular positions; (iii) we use the acquisition time as release time. To obtain the absolute positions of the voxels, we assume that the sensor jumps between the positions given by the camera poses for each frame. In our open-source implementation, we provide an adapted dataloader allowing methods already running on SemanticKITTI to be evaluated in the online setting with minimal adaptation.
|
| 137 |
+
|
| 138 |
+
Adapting Competing Methods. To evaluate the semantic segmentation performance and latency of other segmentation algorithms in the online setting, we process the point clouds corresponding to each slice independently and sequentially. This approach restricts the spatial receptive field to the extent of the slices. However, as the sensor moves, it is not straightforward to add past slices whose relative positions may no longer be valid. By explicitly modeling the spatiotemporal offset between voxels, Helix4D does not suffer from this limitation.
|
| 139 |
+
|
| 140 |
+
We selected five segmentation algorithms with open-source implementations and trained models for SemanticKITTI. SalsaNeXt [12] uses range images, Polar-Net [48] and panoptic PolarNet [50] a bird's eye view polar grid, SPVNAS [43] a regular grid, and Cylinder3D [51] a cylindrical grid. We do not consider methods that stack frames as their structure and resulting latency is incompatible with the online setting. When using SemanticKITTI, we evaluate the provided pretrained models on the validation set. On HelixNet, we retrain the models from scratch
|
| 141 |
+
|
| 142 |
+
Table 2: **Semantic Segmentation Results.** Performance of Helix4D and competing approaches on HelixNet and on the validation set of SemanticKITTI\*, in the frame-by-frame and online setting. We report the mean Intersection-over-Union (mIoU) and the inference time in ms. Methods meeting the real-time requirement are indicated with $\checkmark$ and those who do not with $\checkmark$ . $\star$ SemanticKITTI is denoted as SK. Measuring the latency on this dataset requires making non-realistic approximations about the fiber position.
|
| 143 |
+
|
| 144 |
+
| Method | Size | Full frame $lacktriangle$ | | $104 \mathrm{ms}$ | ¹⁄₅ frame ▶ | | $21 \mathrm{ms}$ | |
|
| 145 |
+
|--------------------|---------------|---------------------------|------|-------------------|-------------|------|------------------|--|
|
| 146 |
+
| | $\times 10^6$ | HelixNet | SK* | Inf. (ms) | HelixNet | SK* | Inf. (ms) | |
|
| 147 |
+
| SalsaNeXt [12] | 6.7 | 69.4 | 55.8 | 23 🗸 | 68.2 | 55.6 | 10 🗸 | |
|
| 148 |
+
| PolarNet [48] | 13.6 | 73.6 | 58.2 | 49 🗸 | 72.2 | 56.9 | 36 🗶 | |
|
| 149 |
+
| Pan. PolarNet [50] | 13.7 | | 64.5 | 50 🗸 | _ | 60.3 | 44 🗶 | |
|
| 150 |
+
| SPVNAS [43] | 10.8 | 73.4 | 64.7 | 73 🗸 | 69.9 | 57.8 | 44 🗶 | |
|
| 151 |
+
| Cylinder3D [51] | 55.9 | 76.6 | 66.9 | 108 🗡 | 75.0 | 65.3 | 54 🗶 | |
|
| 152 |
+
| Helix4D (Ours) | 1.0 | 79.4 | 66.7 | $45 \checkmark$ | 78.7 | 66.8 | 19 🗸 | |
|
| 153 |
+
|
| 154 |
+
Table 3: **HelixNet Semantic Segmentation Scores.** We report the IoU for each class of HelixNet evaluated in the online setting with slices of 72°.
|
| 155 |
+
|
| 156 |
+
| | λ | .s. | aurface | 78 × 8, | tion ac | signs | vehicle | g vehicle | rian<br>Artifac | χ. |
|
| 157 |
+
|-----------------|------|--------|---------|---------|---------|--------|---------|-------------|-----------------|------|
|
| 158 |
+
| Method | Road | Office | Build | Jeger | Traffic | Static | Month | Pedes | Artife | Avg |
|
| 159 |
+
| SalsaNeXt [12] | 84.4 | 76.1 | 88.7 | 70.7 | 61.4 | 58.6 | 35.1 | 68.5 | 69.7 | 68.2 |
|
| 160 |
+
| PolarNet [48] | 86.2 | 77.9 | 91.2 | 77.9 | 63.2 | 64.8 | 35.4 | 68.1 | 84.8 | 72.2 |
|
| 161 |
+
| SPVNAS [43] | 80.5 | 77.1 | 93.0 | 81.8 | 68.0 | 60.9 | 36.9 | 71.7 | 59.0 | 69.9 |
|
| 162 |
+
| Cylinder3D [51] | 85.3 | 78.4 | 93.5 | 83.9 | 66.2 | 63.3 | 35.7 | 77.7 | 90.9 | 75.0 |
|
| 163 |
+
| Helix4D (Ours) | 87.8 | 82.5 | 94.0 | 84.4 | 68.9 | 72.3 | 46.4 | <b>78.8</b> | 93.3 | 78.7 |
|
| 164 |
+
|
| 165 |
+
using the procedure of their official repository. We removed all test-time augmentations that resulted in prohibitive inference time. All methods are evaluated on the same workstation using a NVIDIA TESLA V100 32Go GPU.
|
| 166 |
+
|
| 167 |
+
Analysis. In Table 2, we report performance in frame-by-frame and online setting with slices of $72^{\circ}$ , for Helix4D and competing methods, for HelixNet and SemanticKITTI. We observe that Helix4D yields state-of-the-art accuracy, with mIoU scores only matched by Cylinder3D [51]. However, Cylinder3D is 50 times larger in terms of parameters and twice slower, not meeting the real-time requirement even in the full frame setting. As reported in Table 3, distinguishing moving vehicles in HelixNet is particularly difficult. Our approach even largely outperforms Panoptic PolarNet despite this method using instance annotation as supervision, preventing us from evaluating on HelixNet. Helix4D yields significantly improved scores thanks to its larger spatio-temporal receptive fields: 14m and 1000ms vs. 8m and 21ms for Cylinder3D for a fifth rotation.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+
Fig. 7: Influence of Slice Size. We plot the processing time (left, in ms) and precision (right, in mIoU) of different methods with respect to the considered size of slices, estimated on the validation set of SemanticKITTI [3]. Methods whose inference time is slower than the acquisition time of the slice (red shaded area) do not meet the real time requirement.
|
| 172 |
+
|
| 173 |
+
Table 4: Ablation Study. We report the speed and accuracy of several modification of our Helix4D on the validation set of SemanticKITTI.
|
| 174 |
+
|
| 175 |
+
| Method | Size<br>×103 | Full Frame<br>mIoU | 104ms<br>Inf. (ms) | ⁄5 Frame<br>1<br>mIoU | 21ms<br>Inf. (ms) |
|
| 176 |
+
|----------------------------------|--------------|--------------------|--------------------|-----------------------|-------------------|
|
| 177 |
+
| Helix4D | 985 | 66.7 | 45 ✓ | 66.8 | 19 ✓ |
|
| 178 |
+
| (a) Asymmetric Convolutions 1171 | | 66.6 | 56 ✓ | 66.6 | 31 ✗ |
|
| 179 |
+
| (b) Cylindrical U-Net | 985 | 58.6 | 22 ✓ | 60.2 | 16 ✓ |
|
| 180 |
+
| (c) Slice-by-Slice | 985 | 62.9 | 29 ✓ | 62.6 | 19 ✓ |
|
| 181 |
+
| (d) w. Queries | 993 | 65.2 | 45 ✓ | 64.8 | 20 ✓ |
|
| 182 |
+
| (e) w/o. Positional Encoding | 983 | 64.3 | 41 ✓ | 64.1 | 18 ✓ |
|
| 183 |
+
| (f) Helix4D Tiny | 306 | 65.3 | 45 ✓ | 64.9 | 17 ✓ |
|
| 184 |
+
|
| 185 |
+
In the online setting, only two approaches meet the real-time requirement: SalsaNeXt [12] and Helix4D. Our approach outperforms SalsaNeXt by over 10 mIoU points in both the full frame and the on-line settings. In short, Helix4D is as accurate as the largest and slowest models with an inference speed comparable to that of the fastest and less accurate models. The total latency (acquisition plus inference time) of our model evaluated online is 40ms (21 + 19ms), and reaches the same performance as Cylinder3D evaluated on full frame with a latency of 212ms (104 + 108ms), an acceleration of more than 5 folds.
|
| 186 |
+
|
| 187 |
+
In Figure 7, we report the inference time and mIoU for different slice sizes. Due to various overheads, the inference time appears in an affine relationship with the size of slices, making the real-time requirement stricter for smaller slices. Due to its very design, the performance of Helix4D is not affected by the slice size. In contrast, competing methods perform worse with smaller slices.
|
| 188 |
+
|
| 189 |
+
Ablation Study We assess on SemanticKITTI the impact of different design choices by evaluating several alterations of our method, reported in Table 4.
|
| 190 |
+
|
| 191 |
+
- (a) Asymmetric Convolutions: we replace the 3 × 3 × 3 convolutions in our U-Net with the convolution design proposed by Cylinder3D [51]. We did not observe a significant change in performance and an increase in run-time of 50%, failing the real-time requirement for slices of 72◦ .
|
| 192 |
+
- (b) Cylindrical U-Net: we replace the transformer by a 1 × 1 × 1 convolution on the voxels of the lowest resolution. We observe a slight decrease in run-time and a significant drop of over 6 mIoU points. This result shows that the transformer is able to learn meaningful spatio-temporal features at low resolution.
|
| 193 |
+
- (c) Slice-by-Slice: we restrict the mask M(v) of each voxel to its current slice. This reduction in the temporal receptive field results in a drop of 4 mIoU points, without any appreciable acceleration.
|
| 194 |
+
- (d) w. Queries: we modify our simplified transformer to associate a query for each voxel along with keys and values, and use key-queries compatibilities. This does not affect the run-time and slightly decreases the performance.
|
| 195 |
+
- (e) w/o. Positional Encoding: we remove the relative positional encoding PE in the calculation of compatibilities in equation (9). This leads to a slightly decreased run time, but decreases performance by more than 2.5 points. This illustrates the advantage of explicitly modeling the spatio-temporal voxel offsets.
|
| 196 |
+
- (f) Helix4D Tiny: we replace the learned pooling in our U-Net with maxpools and use narrower feature maps for a total of 306k parameters. This method only performs two points under Helix4D with a third of its parameters.
|
2209.05861/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2209.05861/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Overview: Sponsored search is a form of online advertising that enables advertisers to display their ads along with organic results on web search engines. It is a major source of revenue for search engines and helps advertisers attract targeted user traffic. To participate in sponsored search, advertisers can bid on keywords that are
|
| 4 |
+
|
| 5 |
+
Bhargav Dodla\* dodla.bhargav@gmail.com Indian Institute of Technology Madras Chennai, India
|
| 6 |
+
|
| 7 |
+
> Amit Singh siamit@microsoft.com Microsoft Bengaluru, India
|
| 8 |
+
|
| 9 |
+
relevant to their products or services using different match types. For instance, an exact match bid keyword will only match search queries that have the same search intent as the keyword. Conversely, phrase match will match queries that contain or include the meaning of the keyword. Matching search queries with relevant keywords, also referred to as query rewriting, is a complex and challenging task for various reasons. First, search queries and bid keywords are often short and ambiguous, making it difficult to precisely infer the user's or advertiser's intent. For example, the query *"currys 912 black"* is vague, but the user is actually looking for the HP 912 ink cartridge in black color from the retailer Currys. Second, search queries and keywords can span a wide range of topics, domains, languages, and countries, adding to the complexity of the task. Third, a search query must be matched with all possible high-quality exact/phrase keywords, not just one, from a large collection of bid keywords. Lastly, the matching must be done in real-time and be computationally efficient to handle all search traffic.
|
| 10 |
+
|
| 11 |
+
Existing methods for query rewriting matching can be broadly categorized into two main groups: information retrieval (IR) and generative or NLG based retrieval. IR methods use various techniques to learn sparse or dense representations for queries and keywords based on bag-of-words, static pretrained features, or deep learning models [\[1,](#page-6-0) [3,](#page-6-1) [4,](#page-6-2) [6,](#page-6-3) [16,](#page-6-4) [20–](#page-6-5)[22,](#page-6-6) [25\]](#page-6-7). Among them, recent Dense Retrieval (DR) methods that leverage effective negative mining strategies such as ANCE [\[29\]](#page-6-8), RocketQA [\[24\]](#page-6-9), NGAME [\[7\]](#page-6-10) have been shown to achieve state-of-the-art performance on various retrieval benchmarks [\[2\]](#page-6-11). On the other hand, NLG-based methods for query rewriting such as CLOVER [\[18\]](#page-6-12), ProphetNet-Ads [\[23\]](#page-6-13) use generative models to directly transform queries into keywords. This involves training language models to generate query rewrites and then constraining their generation space during inference to the set of bid keywords.
|
| 12 |
+
|
| 13 |
+
Given these two distinct approaches to the same problem, we perform a comprehensive study comparing their performance. Our results show NLG and DR methods retrieve a similar number of high-quality keywords per query, but many of them are different and non-overlapping. Specifically, about 40% of the high-quality keywords retrieved by DR and NLG are unique to each method and weren't retrieved by the other. We analyze the reasons for these and find they stem from the structural differences between the approaches. For instance, NLG methods treat query rewriting as a
|
| 14 |
+
|
| 15 |
+
<sup>\*</sup>Work done during internship at Microsoft
|
| 16 |
+
|
| 17 |
+
<span id="page-1-0"></span>
|
| 18 |
+
|
| 19 |
+
| Query | Generative Retrieval | Dense Retrieval | CLOVER-Unity |
|
| 20 |
+
|----------------------------|-----------------------------------|-----------------------------------|------------------------------------------------|
|
| 21 |
+
| | black 912 | currys kettles | hp 912 ink cartridges black |
|
| 22 |
+
| currys 912 black | currys currys | currys electrical kettles | currys ink cartridges |
|
| 23 |
+
| | black curry | kettles at currys | black hp printer cartridges |
|
| 24 |
+
| | student properties nottingham | rentals in nottinghamshire uk | nottingham student studios |
|
| 25 |
+
| student houses for rent | student accom nottingham | rentals nottingham united kingdom | student flats in nottingham |
|
| 26 |
+
| nottingham | rent house rental | 3 bedroom house nottingham | student lettings nottingham united kingdom |
|
| 27 |
+
| | 250 honda | honda | honda 250 cc motorcycle |
|
| 28 |
+
| 06 honda 250 | honda motor | hondas 250 | 06 honda cbr 250 |
|
| 29 |
+
| | honda250 | car for lease | honda 2006 bike |
|
| 30 |
+
| | biglietti per la cappella sistina | sistina roma | cappella sistina excursions |
|
| 31 |
+
| | (tickets for the sistine chapel) | (sistine rome) | (sistine chapel excursions) |
|
| 32 |
+
| cappella sistina biglietti | capella sistina | cappella sistina museum | cappella sistina vatican city |
|
| 33 |
+
| (sistine chapel tickets) | (sistine chapel) | (sistine chapel museum) | (sistine chapel vatican city) |
|
| 34 |
+
| | la cappella | cappella sistina orari e prezzi | musei vaticani e cappella sistina prenotazione |
|
| 35 |
+
| | (the chapel) | (sistine chapel times and prices) | (vatican museums and sistine chapel booking) |
|
| 36 |
+
|
| 37 |
+
Table 1: Comparison of keywords retrieved by CLOVERv2, NGAME and our proposed CLOVER-Unity approach for four queries
|
| 38 |
+
|
| 39 |
+
token-level generation task with a token-level loss and thus are better at learning word-level relationships such as synonymy and hypernymy. As shown in example 2 of Table [1,](#page-1-0) NLG models can better identify interchangeable tokens, such as *houses* and *properties* or *accom*, which do not alter the query/keyword intent. However, we also find that NLG methods, especially the non-autoregressive ones, struggle to retrieve longer keywords that may have a few additional tokens. This is because non-autoregressive NLG models have to predict all tokens of the keyword in parallel during inference. In contrast, DR methods do not depend on keyword token length during inference and thus do not have this limitation. In the same example in Table [1,](#page-1-0) DR methods retrieve keywords that contain additional tokens such as *united kingdom*, which NLG models miss.
|
| 40 |
+
|
| 41 |
+
With NLG and DR models retrieving substantially different highquality keywords for the same query, it leads to a natural question of whether we can combine the strengths of both approaches. To this end, we propose CLOVER-Unity, a unified retriever that blends the advantages of NLG and DR methods using a shared encoder. Unlike standard multi-task learning, where a shared model solves more than one related task, CLOVER-Unity solves the same task (query rewriting) using two different methods: NLG and DR. Through offline experiments, we show that the NLG and DR components of CLOVER-Unity consistently outperform the individually trained NLG and DR models by an average of 19.2% and 7.4%, respectively, in terms of good keyword density. We attribute these improvements to training the Unity model with both the contrastive DR loss and the token-level NLG loss. For instance, we show that the DR component of CLOVER-Unity is better at identifying token-level relationships due to the additional token-level objective function. Further, we show that a single CLOVER-Unity model with one forward pass retrieves 9.8% more good keywords than the ensemble of two separate DR and NLG models while using 45.8% lower GPU compute. We also conduct extensive online experiments on Microsoft Bing in 140+ countries and achieve significant gains in revenue, clicks, impressions, and coverage, demonstrating the practical utility of CLOVER-Unity for query rewriting in sponsored search. To the best of our knowledge, we are the first to present such a unified
|
| 42 |
+
|
| 43 |
+
<span id="page-1-1"></span>
|
| 44 |
+
|
| 45 |
+
Figure 1: Training process of CLOVER-Unity using contrastive DR and log-likelihood NLG loss functions
|
| 46 |
+
|
| 47 |
+
framework for query rewriting. We also share our practical lessons and optimization tricks for deploying such unified models in production. Finally, we demonstrate the versatility of CLOVER-Unity by applying it to problems beyond sponsored search. We evaluate it on public datasets from the extreme classification repository [\[2\]](#page-6-11) and show that CLOVER-Unity outperforms the leading extreme classification algorithms in all metrics.
|
| 48 |
+
|
| 49 |
+
# Method
|
| 50 |
+
|
| 51 |
+
In this section, we describe the various components of our proposed approach for query rewriting. Our goal is to match any given search query with all its relevant exact bid keywords and phrase bid keywords in real-time. We first present details of the NLG and DR baselines in [2.1](#page-2-0) and [2.2.](#page-2-1) Then, we introduce CLOVER-Unity, which combines the benefits of both methods in a single model.
|
| 52 |
+
|
| 53 |
+
NLG models treat query rewriting as a token-level prediction task, *i.e.*, they predict relevant keyword tokens $\{k_t\}_{t=1}^m$ individually instead of retrieving them directly. However, predicting tokens one at a time in an autoregressive (AR) fashion is very slow as it requires m sequential forward passes over the model (m is the keyword length). Moreover, when performing beam search with beam size B, each forward pass requires O(B) compute. As we show in section 3.2, existing AR models like CLOVER [18], ProphetNetAds [23], and even efficient variants with deep encoder and shallow decoder [14] have an order of magnitude higher inference cost than dense retrieval approaches. To overcome these issues, we propose CLOVERv2, which uses an encoder-based non-autoregressive (NAR) model with AR trie decoding for efficient inference. Specifically, we first assume the keyword tokens $K_t$ are conditionally independent given the query, i.e., the probability distribution can be factorized as:
|
| 54 |
+
|
| 55 |
+
$$p^{NAR}(K_1,\ldots,K_m|Q,\theta) = \prod_{t=1}^m p^{NAR}(K_t|Q,\theta)$$
|
| 56 |
+
|
| 57 |
+
This decomposition allows us to predict the token distributions independently in parallel, using only one forward pass through an encoder model such as BERT [8] with a language modeling head.
|
| 58 |
+
|
| 59 |
+
**Decoding:** We construct a trie $T_K$ of valid bid keywords K to constrain the generation space to the closed set K. We developed an optimized C++ implementation of the trie data structure that is highly memory efficient and can be hosted even on commodity CPU machines. For example, a trie of 1 billion keywords (45 GB of raw text) requires only 6.5 GB of CPU RAM. The trie plays a crucial role in decoding; it functions as an autoregressive language model to guide the generation process. In particular, we proceed in a left-to-right manner, where at each time step t, we consider only the children of the partially generated prefix $k_{< t}$ in $T_K$ as possible next tokens. Essentially, decoding a NAR model with trie is equivalent to an AR language model with (unnormalized) probability distribution:
|
| 60 |
+
|
| 61 |
+
$$\tilde{p}(K_t = k_t | k_{< t}, q, \mathcal{K}) = \begin{cases} p^{NAR}(K_t = k_t | q), & \text{if } k_t \in \text{child}_{T_{\mathcal{K}}}(k_{< t}) \\ 0, & \text{otherwise} \end{cases}$$
|
| 62 |
+
|
| 63 |
+
We note that NAR models with AR trie decoding (i) do not suffer from issues such as token repetition or mode mixing observed in vanilla NAR [10], and (ii) achieve comparable or better performance than AR models while being more efficient (section 3.2).
|
| 64 |
+
|
| 65 |
+
We adopt a siamese architecture with shared parameters to learn dense representations for queries and keywords. To train the encoder effectively, we need to carefully select a small set of irrelevant keywords for each query. We cannot use all irrelevant keywords for a query because the number of training points and bid keywords is very large (millions or billions). Moreover, randomly choosing negative keywords from a uniform distribution or from the same batch can result in uninformative negatives and slow convergence [29]. To address these issues, we use NGAME [7], a negative mining technique that selects hard negative samples within the training batches. NGAME has been shown to outperform other approaches such as ANCE [29], RocketQA [24], and TAS [11]. In our application, we omit the classifier layer in NGAME because bid keywords $\mathcal K$ change
|
| 66 |
+
|
| 67 |
+
over time. After training, we index the keyword embeddings on SSD using the DiskANN algorithm [13], which requires minimal CPU RAM online. During inference, we retrieve the keywords that are approximately the top k nearest neighbors of the query embedding.
|
| 68 |
+
|
| 69 |
+
We now introduce CLOVER-unity, which uses a variant of the standard multi-task learning setup where we solve one task with two different methods. In particular, we use a single encoder to perform dense and generative retrieval simultaneously, as shown in Figure 1. Given a query $q = \{q_1, \ldots, q_n\}$ , we prepend a special [CLS] token and encode the query using a transformer encoder $\mathcal{E}_{\theta}$ . The resulting sequence of hidden states, $h^q = \{h^q_c, h^q_1, \ldots, h^q_n\}$ , is used to compute the DR and NLG representations of the query. The first hidden state, $h^q_c$ , serves as the DR representation, while the remaining hidden states, $h^q_t \in \mathbb{R}^d$ , are projected to the output vocabulary space to obtain the NAR logits, $\log \tilde{p}(K_t|q,\theta) = \mathbf{W}h^q_t$ where $\mathbf{W} \in \mathbb{R}^{V \times d}$ is a learnable weight matrix, V, and d are vocabulary and hidden sizes. We apply the same process to keywords, obtaining a DR representation $h^c_c$ for each keyword.
|
| 70 |
+
|
| 71 |
+
**Training:** We train CLOVER-Unity on a supervised dataset $\mathcal{D} = \{(q^{(i)}, k^{(i)})\}_{i=1}^L$ of high-quality query keyword pairs. For each query $q^{(i)}$ , we obtain hard negative keywords $l^{(i)}$ using the NGAME negative mining technique. Our objective is to minimize a weighted combination of the triplet margin loss for DR and negative log-likelihood for NLG, as defined by the following equation:
|
| 72 |
+
|
| 73 |
+
$$\mathcal{L}(\theta, \mathcal{D}) = \frac{1}{L} \sum_{q,k \in \mathcal{D}} \left( \left[ (h_c^l)^T h_c^q - (h_c^k)^T h_c^q + \gamma \right]_+ \right.$$
|
| 74 |
+
$$\left. - \alpha \sum_{t=1}^m \log p(K_t = k_t | q, \theta) \right)$$
|
| 75 |
+
|
| 76 |
+
where $h_c^q$ , $h_c^k$ , and $h_c^l$ are the DR representations of the query, relevant, and irrelevant keywords, respectively.
|
| 77 |
+
|
| 78 |
+
**Inference:** During inference, we obtain the query representation $h_c^q$ and the NAR probabilities $p(K_t|q,\theta)$ through a single pass over the encoder $\mathcal{E}_\theta$ . We then simultaneously retrieve bid keywords using (i) DiskANN search on the query vector $h_c^q$ , and (ii) trie-based beam search on the predicted probabilities $p(K_t|q,\theta)$ . However, we observed that using standard beam search for decoding NLG models tends to produce very similar keywords with common prefixes and low diversity. To address this issue, we propose permutation decoding, which leverages a property of NAR models: they do not have an explicit order of generation. Therefore, we decode NAR models in multiple orders (e.g., left-to-right and right-to-left) and rank the top B results based on cumulative log probability, which remains unchanged regardless of the decoding order.
|
2209.08244/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-09T05:35:14.317Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36" etag="dSdJI9e1SE0aPeBrx0hF" version="17.5.1" type="device"><diagram id="0lhl3Z3JChG9FTZ58kOk" name="Page-2">7V1tc9o4EP41zPRuhozfZMNHIEnvZq6ddDpzbT91DAjQxVjENkm4X396tS2/gJNgx1zVacFayWujXT2PtF65A3u2ff4Y+bvNJ7yEwcAyls8D+3pgWZbhuuSLSg5cYhoe4JJ1hJZClgm+on+hbCike7SEsdIwwThI0E4VLnAYwkWiyPwowk9qsxUO1Kvu/LW4opEJvi78AJaafUPLZCOkppFr/gdE64249AiIiq2fNuaCeOMv8VPuWvbNwJ5FGCf8aPs8gwHtPdkvaINuH7azzzerOH7eWd8m/m045NpvX3JK+hMiGCbnVW1x1Y9+sBf9tcMBWhzoeQmM/AThkBzjFbXGml6e/HQw+2CSj99oI1ob4X24FBX3vMINyI1O451P6+PkIKzhPuxpb00XOMDRwJ7Qk9dz/4NBz7Zm5LPyiF6JnMXMHybDlb9FwYGfvsUhJpdZQLVJzPyQNjB2z9l1ydGafZM7Jf+Kfg9Iz5EPbtO0JHsSsL4kkmt6TG8O0M4DpMdPtTXTttKKr1JjZWq4xdIa8sE6n379NhQGAcwMpM10HrHSmpekiYFiZMDNDNgdEWWIK8suyc2YXpJdBPK7M5nepw1R95UbgwifSM8y+SbZBrlmSz/eKOdRizHgYCKXy1AQzJiXUGGIQ1Y7zXUAjBL43KiL06q0dz9CvIVJxPsgp8S1M3MYh0w+zomfGIpkyikeyroNB5JMX+48P87k69z1i7ZnxdQB8kLVLUW7kv/yoUdwgI8+4fAcwdKBmFqP2kCxnLCbsBqvTg3EzJM3DtVlW8bUmkzoeE8ifA9lDTOaPcVELUoofYyM9E5OYpnAPGHlDIYFtmUGNFKqcvgpgqgcieNPOdQHos0mB/iynS+IJrNNBqaD1CIvwFb7BdgqzUYHKrdYHm0tjbYabTXaarTtDdo6cnrfG7R1Sp0Ol2QhIIo4SjZ4jUM/uMmk08U+ekytktmIdlx2wl8Y70STf2CSHIRp/H2CVbvBZ5R8zx3/oKqugChdPwvNrHCQhZD89u/5Qu4sWsxOYyV5XtFHXmbnGO+jBTy9LEj8aA2P6RMUR3v6qNdEMCC496guyc7uAuBshGtrwtWEqwlXE25vCNftHeG6mnDPR7h2Q8IFvSLc0dkI90g80ZqamnQ16WrS1aTbNemOTPs06bpeh6Q71qR7PtIFDUl31CvSNcusKwco7a9Kbizwl+kcITD63HPhBwNv+mngXf9EA3BDjjkPe9eclkP4wA/qGtOGRb4n3/6WokA4j3ecVwXr78MAxrFgfPJvh34SLUNE1fAWG4p09BkxGbxrCvQpIvHfXINIZJwnqvv6AVqH5HhBXAgSRJlSNEDkJ0xExRYtl2zYRJB0mD9nqqgz7jAKE2ZMQLrqmuoigyMW7lmHVHlYk6KcV3uDlLu4kragbGyr6wfglKGMUVJnUGaOa/14iR4r3ZiacygsSP1YGLHsyep4WPkLVc8GBo+QWj1/qhHL3AUpM5XqRUpOmZDQ1Nzy/fo7mBcFqNH49KrG56d9kKDhRMyaJwH56aGf0EnqnZx1/5lNvdIBgoq3QGSl2yoNpFTMbHF8eL2A6Z0WXdwygMrWxqjk4rZV4eJOWy4uL5Z3cYJwV1dXHNYuDa3atJ5pSGJLJ1tl8zkV1rPasp53kXOtor1iMr1JJjSRKrMpk90i2h1i1rWULRaBH8dowYWiialM4a4MK5238RsanZrHsdIdjBAxDHXj65dP2k9O5kYNJ3PSXc43mxOn3tERl/dnqxCxGxUcld+qOK3gq+l9vAF85BBUwScRM6ovw3SdHg90hpeOxuhojI7G1EVjbm9HM3f8ztGYYoaXSwvv+ghE3sArIVandWmI1RCrIbY/EFtM6+oBxNoXuQrracQ73YFzcpVknXuV9DYvcN5EtDqdSxOtJlpNtP0h2mI6Vw+IFmiiPSPRWk2J1ukX0XovJlqd2KXpV9Ovpt9Lot9iYlcl/Xaa2GWNNP2ekX6dpvTr9Yp+7frcrsvJiVmtRgsywt85J+bL8BdPhik8LBmbJYTrNhlGKtbJMKetV0qGcW2vZL5Ok2Gsy9zu01I2zLnpymtIV9Lk7SevFP0PWKqOlpNXnPJe3lr6qF04FhkEVDHIZ7LAKGcm18yLz3FB+LBHAZpHaL89cR2Vmy4NHztLbTYNz1Oc1XErtmlUoaXdGte9P1r2Avnax6mRpZreA1egEVJV6AJOQZerauI43h7mVcTeNOZpzKvBPDXZ3bUrIhjdYt4Z9iW5tfFXFjQeeLMvfIcRUTe75zuOLGNIl3tc/nsmZ81/ki8UrpIDVxBA/g13MQrY3uR8BFrvJmr8+MoqeN+o4VaL1rxPzk872hVnEne7Z9YjXja9E2VrFtFvWpqle9ea+dY5Q6yQ/Sn5EKmxXXtsLwf5OOu41ZRNS42zAjmvz6OU6UrK7iQOIVm+ByvZbPX6I1dzKtJ6pW6/GByPtCp4kHccUJwUZlO+/LzQVOeFAnWySWELWzmaPjuVY74nwVvQnyfoqmMZr3Is83/oWE3T3/rmWW4tuVXNodsgPIsR3DFC6/5RYn94zgFqBKKK59wug+2gfnN1Vx5ja485NoPum8e49RPorjzGLLxqQvtOTfaCm86Ye+M99bGrty/+/6ZvAyEHcxws48OWfPHyjr8g5IaX8I4+7sVR6G8hkYTwidbyv2ydv6aL/1fpov/nRKbr5HtNdEjhVRTqKi7tGHbZpat8ur0XHL5/Ss6vEsQ3FdO7jveGIH4hs0uGJTsK4oP+vDVCRxfe9qpNt19rwPdHIx1dOE/Of888S5Jq7zyrY8hyRDk3s5+OAH3nY189rukLDGU0oC8OZ/bU4TqGsstzuMZ51X3zuPfPx9EQ1+prvWT4qi8Op58FXajDNc7F7czjBmIzV26dmm3jsm/+Aw==</diagram></mxfile>
|
2209.08244/main_diagram/main_diagram.pdf
ADDED
|
Binary file (50.8 kB). View file
|
|
|
2209.08244/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Cooperative multi-agent reinforcement learning (MARL) is a well-abstracted model for a broad range of real applications, including logistics [@li2019cooperative], traffic signal control [@xu2021hierarchically], power dispatch [@wang2021multiagent], and inventory management [@feng2022multiagent]. In cooperative MARL, centralized training with decentralized execution (CTDE) is a popular learning paradigm, where the information of all agents can be gathered and used in training. Many CTDE methods [@lowe2017multi; @foerster2018COMA; @sunehag2018vdn; @rashid2018qmix; @wang2020qplex; @zhang2021fop; @yu2021surprising] have been proposed and shown great potential to solve cooperative multi-agent tasks.
|
| 4 |
+
|
| 5 |
+
Another paradigm is decentralized learning, where each agent learns its policy based on only local information. Decentralized learning is less investigated but desirable in many scenarios where the information of other agents is not available, and for better robustness, scalability, and security [@Zhang2019multi]. However, *fully decentralized learning* of agent policies (*i.e.*, without communication) is still an open challenge in cooperative MARL.
|
| 6 |
+
|
| 7 |
+
The most straightforward way for decentralized learning is directly applying independent learning at each agent [@tan1993multi], which however induces the well-known non-stationarity problem for all agents [@Zhang2019multi] and may lead to the learning instability and a non-convergent joint policy, though the performance varies as shown in empirical studies [@rashid2018qmix; @de2020independent; @papoudakis2021benchmarking; @yu2021surprising].
|
| 8 |
+
|
| 9 |
+
In the paper, we directly tackle the non-stationarity problem in the simplest and fundamental way, *i.e.*, fixing the policies of other agents while one agent is learning. Following this principle, we propose *multi-agent alternate Q-learning* (**MA2QL**), a *minimalist* approach to fully decentralized cooperative multi-agent reinforcement learning, where agents take turns to update their policies by Q-learning. MA2QL is theoretically grounded and we prove that when each agent guarantees an $\varepsilon$-convergence at each turn, their joint policy converges to a Nash equilibrium. In practice, MA2QL only requires the minimal changes to independent Q-learning (IQL) [@tan1993multi; @tampuu2015multiagent] and also independent DDPG [@lillicrap2015continuous] for continuous action, *i.e.*, simply swapping the order of two lines of codes as follows.
|
| 10 |
+
|
| 11 |
+
<figure data-latex-placement="h">
|
| 12 |
+
<div class="minipage">
|
| 13 |
+
<div class="algorithmic">
|
| 14 |
+
<p>all agents interact in the environment agent <span class="math inline"><em>i</em></span> updates by Q-learning terminate</p>
|
| 15 |
+
</div>
|
| 16 |
+
</div>
|
| 17 |
+
<div class="minipage">
|
| 18 |
+
<div class="algorithmic">
|
| 19 |
+
<p>all agents interact in the environment agent <span class="math inline"><em>i</em></span> updates by Q-learning terminate</p>
|
| 20 |
+
</div>
|
| 21 |
+
</div>
|
| 22 |
+
<figcaption><strong>MA2QL</strong></figcaption>
|
| 23 |
+
</figure>
|
| 24 |
+
|
| 25 |
+
We evaluate MA2QL on a didactic game to empirically verify its convergence, and multi-agent particle environments [@lowe2017multi], multi-agent MuJoCo [@peng2021facmac], and StarCraft multi-agent challenge [@samvelyan2019starcraft] to verify its performance in discrete and continuous action spaces, fully and partially observable environments. We find that MA2QL consistently outperforms IQL, despite such minimal changes. The effectiveness of MA2QL suggests that simpler approaches may have been left underexplored for fully decentralized cooperative multi-agent reinforcement learning.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
**Dec-POMDP.** Decentralized partially observable Markov decision process (Dec-POMDP) is a general model for cooperative MARL. A Dec-POMDP is a tuple $M=\left\{S,A,P,Y,O,I,n,r,\gamma\right\}$. $S$ is the state space, $n$ is the number of agents, $\gamma \in [0,1)$ is the discount factor, and $I = \{1,2\cdots n\}$ is the set of all agents. $A = A_1 \times A_2 \times \cdots \times A_n$ represents the joint action space where $A_i$ is the individual action space for agent $i$. $P(s^{\prime} |s,\bm{a} ): S \times A \times S \to [0,1]$ is the transition function, and $r(s,\bm{a} ): S \times A \to \mathbb{R}$ is the reward function of state $s$ and joint action $\bm{a}$. $Y$ is the observation space, and $O(s,i):S \times I \to Y$ is a mapping from state to observation for each agent. The objective of Dec-POMDP is to maximize $J({\bm{\pi}}) = \mathbb{E}_{\bm{\pi}}\left[ \sum_{t = 0}^\infty \gamma^t r(s_t,\bm{a}_t ) \right],$ and thus we need to find the optimal joint policy ${\bm{\pi}}^{*} = \arg\max_{{\bm{\pi}}} J({\bm{\pi}})$. To settle the partial observable problem, history $\tau_i \in \mathcal{T}_i: (Y \times A_i)^*$ is often used to replace observation $o_i \in Y$. Each agent $i$ has an individual policy $\pi_i(a_i|\tau_i)$ and the joint policy $\bm{\pi}$ is the product of each $\pi_i$. Though individual policy is learned as $\pi_i(a_i|\tau_i)$ in practice, we will use $\pi_i(a_i|s)$ in analysis and proofs for simplicity.
|
| 30 |
+
|
| 31 |
+
**Dec-MARL.** Although decentralized cooperative multi-agent reinforcement learning (Dec-MARL) has been previously investigated [@zhang2018fully; @de2020independent], the setting varies across these studies. In this paper, we consider Dec-MARL as a *fully* decentralized solution to Dec-POMDP, where each agent learns its policy/Q-function from its own action individually ***without communication or parameter-sharing***. Therefore, in Dec-MARL, each agent $i$ actually learns in the environment with transition function $P_i(s'|s,a_i) = \mathbb{E}_{a_{-i} \sim \pi_{-i}}[P(s'|s,a_i,a_{-i})]$ and reward function $r_i(s,a_i) = \mathbb{E}_{a_{-i} \sim \pi_{-i}} [r(s,a_i,a_{-i})]$, where $\pi_{-i}$ and $a_{-i}$ respectively denote the joint policy and joint action of all agents expect $i$. As other agents are also learning (*i.e.*, $\pi_{-i}$ is changing), from the perspective of each individual agent, the environment is non-stationary. This is the non-stationarity problem, the main challenge in Dec-MARL.
|
| 32 |
+
|
| 33 |
+
**IQL.** Independent Q-learning (IQL) is a straightforward method for Dec-MARL, where each agent $i$ learns a Q-function $Q(s,a_i)$ by Q-learning. However, as all agents learn simultaneously, there is no theoretical guarantee on the convergence due to non-stationarity, to the best of our knowledge. In practice, IQL is often taken as a simple baseline in favor of more elaborate MARL approaches, such as value-based CTDE methods [@rashid2018qmix; @son2019qtran]. However, much less attention has been paid to IQL itself for Dec-MARL.
|
| 34 |
+
|
| 35 |
+
To address the non-stationarity problem in Dec-MARL, a fundamental way is simply to make the environment stationary during the learning of each agent. Following this principle, we let agents learn by turns; in each turn, one agent performs policy iteration while fixing the policies of other agents. This procedure is referred to as *multi-agent alternate policy iteration*. As illustrated in Figure [1](#fig:api){reference-type="ref" reference="fig:api"}, multi-agent alternate policy iteration differs from policy iteration in single-agent RL. In single-agent RL, policy iteration is performed on the same MDP. However, here, for each agent, policy iteration at a different round is performed on a different MDP. As $\pi_{-i}$ is fixed at each turn, $P_i(s'|s,a_i)$ and $r_i(s,a_i)$ are stationary and we can easily have the following lemma.
|
| 36 |
+
|
| 37 |
+
::: {#lemma-policy-iteration .lemma}
|
| 38 |
+
**Lemma 1** (multi-agent alternate policy iteration). *If all agents take turns to perform policy iteration, their joint policy sequence $\{\bm{\pi}\}$ monotonically improves and converges to a Nash equilibrium.*
|
| 39 |
+
:::
|
| 40 |
+
|
| 41 |
+
::: proof
|
| 42 |
+
*Proof.* In each turn, as the policies of other agents are fixed, the agent $i$ has the following update rule for policy evaluation, $$\begin{equation}
|
| 43 |
+
Q_{\pi_i}(s,a_i) \leftarrow r_i(s,a_i) + \gamma \mathbb{E}_{s'\sim P_i,a_i' \sim \pi_i}[Q_{\pi_i}(s',a_i')].
|
| 44 |
+
\end{equation}$$ We can have the convergence of policy evaluation in each turn by the standard results [@sutton2018reinforcement]. Moreover, as $\pi_{-i}$ is fixed, it is straightforward to have $$\begin{equation}
|
| 45 |
+
Q_{\pi_i}(s,a_i) = \mathbb{E}_{a_{-i} \sim \pi_{-i}}[Q_{\bm{\pi}}(s,a,a_i)].
|
| 46 |
+
\end{equation}$$
|
| 47 |
+
|
| 48 |
+
Then, the agent $i$ performs policy improvement by $$\begin{equation}
|
| 49 |
+
\label{cond1}
|
| 50 |
+
\pi^{\operatorname{new}}_i(s) = \arg\max_{a_i} \mathbb{E}_{\pi^{\operatorname{old}}_{-i} }\left[ Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i} ) \right].
|
| 51 |
+
\end{equation}$$ As the policies of other agents are fixed (i.e., $\pi^{\operatorname{new}}_{-i}=\pi^{\operatorname{old}}_{-i}$), we have $$\begin{equation}
|
| 52 |
+
\begin{aligned}
|
| 53 |
+
V_{\bm{\pi}^{\operatorname{old}}}(s) & = \mathbb{E}_{\bm{\pi}^{\operatorname{old}}}[Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i})] = \mathbb{E}_{\pi_i^{\operatorname{old}}}\mathbb{E}_{\pi_{-i}^{\operatorname{old}}}[Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i})] \\
|
| 54 |
+
& \le \mathbb{E}_{\pi_i^{\operatorname{new}}}\mathbb{E}_{\pi_{-i}^{\operatorname{old}}}[Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i}) ] = \mathbb{E}_{\pi_i^{\operatorname{new}}}\mathbb{E}_{\pi_{-i}^{\operatorname{new}}}[Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i}) ] \\
|
| 55 |
+
& = \mathbb{E}_{\bm{\pi}^{\operatorname{new}}}[Q_{\bm{\pi}^{\operatorname{old}}}(s,a_i,a_{-i}) ] = \mathbb{E}_{\bm{\pi}^{\operatorname{new}}}[r(s,a_i,a_{-i}) + \gamma V_{\bm{\pi}^{\operatorname{old}}}(s^\prime) ]\\
|
| 56 |
+
& \le \cdots \le V_{\bm{\pi}^{\operatorname{new}}}(s),
|
| 57 |
+
\end{aligned}
|
| 58 |
+
\end{equation}$$ where the first inequality is from [\[cond1\]](#cond1){reference-type="eqref" reference="cond1"}. This proves that the policy improvement of agent $i$ in each turn also improves the joint policy. Thus, as agents perform policy iteration by turn, the joint policy sequence $\{\bm{\pi}\}$ improves monotonically, and $\{\bm{\pi}\}$ will converge to a Nash equilibrium since no agents can improve the joint policy unilaterally at convergence. ◻
|
| 59 |
+
:::
|
| 60 |
+
|
| 61 |
+
<figure id="fig:api" data-latex-placement="t">
|
| 62 |
+
<embed src="figures/MA2QL.pdf" style="width:100.0%" />
|
| 63 |
+
<figcaption>Illustration of <em>multi-agent alternate policy iteration</em> (upper panel) and <em>multi-agent alternate Q-iteration</em> (lower panel) of three agents. As essentially the MDP differs at the different turn of each agent, policy iteration/Q-iteration of each agent iterates over different MDPs.</figcaption>
|
| 64 |
+
</figure>
|
| 65 |
+
|
| 66 |
+
Lemma [1](#lemma-policy-iteration){reference-type="ref" reference="lemma-policy-iteration"} immediately indicates an approach for Dec-MARL with convergence guarantee and also tells us that if we find the optimal policy for agent $i$ in each round $k$ given the other agents' policies $\pi_{-i}^k$, then the joint policy will obtain the largest improvement. This result can be formulated as following, $$\begin{equation}
|
| 67 |
+
\label{eq:pimax}
|
| 68 |
+
\begin{aligned}
|
| 69 |
+
&\pi^{*,k}_i = \arg\max_{\pi_i} \mathbb{E}_{\pi^{k}_{-i}}\left[ Q_{\pi_i,\pi_{-i}^{k}}(s,a_i,a_{-i} ) \right] \\
|
| 70 |
+
&V_{\pi_i,{\pi}_{-i}^{k}}(s) \le V_{\pi^{*,k}_i,{\pi}_{-i}^k}(s) \quad \forall \pi_i, \forall s.
|
| 71 |
+
\end{aligned}
|
| 72 |
+
\end{equation}$$ We could obtain this $\pi^{*,k}_i$ by policy iteration with many *on-policy* iterations. However, such a method will face the issue of sample inefficiency which may be amplified in MARL settings. We will use Q-iteration to settle this problem in the next section.
|
| 73 |
+
|
| 74 |
+
To address the problem of multi-agent alternate policy iteration, we propose *multi-agent alternate Q-iteration*, which is sufficiently truncated for fast learning but still has the same theoretical guarantee. Further, based on multi-agent alternate Q-iteration, we derive *multi-agent alternate Q-learning*, which makes the minimal change to IQL to form a simple yet effective value-based decentralized learning method for cooperative MARL.
|
| 75 |
+
|
| 76 |
+
Instead of policy iteration, we let agents perform Q-iteration by turns as depicted in Figure [1](#fig:api){reference-type="ref" reference="fig:api"}. Let $\mathcal{M}^k_i=\{P_i^k,r^k_i\}$ denote the MDP of agent $i$ in round $k$, where we have $\mathcal{M}^{k}_i \neq \mathcal{M}^{k-1}_i$ unless $\pi_{-i}$ has converged, and $Q^{t,k}_{i}(s,a_i)$ denote the Q-function of agent $i$ with $t$ updates in the round $k$. We define the Q-iteration as follows, $$\begin{align}
|
| 77 |
+
\label{q-learning}
|
| 78 |
+
Q^{t+1,k}_{i}(s,a_i) \leftarrow r^k_i(s,a_i) + \gamma \mathbb{E}_{s^\prime \sim P_i^k} \left[ \max_{a_i^\prime} Q^{t,k}_{i}(s^\prime,a^\prime_i) \right].
|
| 79 |
+
\end{align}$$ Then, the sequence $\{Q^{t,k}_{i}\}$ converges to $Q^{*,k}_{i}$ with respect to the MDP $\mathcal{M}_i^k=\{P_i^k,r_i^k\}$, and we have the following lemma.
|
| 80 |
+
|
| 81 |
+
::: {#lemma-varepsilon .lemma}
|
| 82 |
+
**Lemma 2** ($\varepsilon$-convergent Q-iteration). *By iteratively applying Q-iteration [\[q-learning\]](#q-learning){reference-type="eqref" reference="q-learning"} at each agent $i$ for each turn, for any $\varepsilon>0$, we have $$\begin{equation}
|
| 83 |
+
\begin{aligned}
|
| 84 |
+
\big\|{Q^{t,k}_{i} - Q^{*,k}_{i}}\big\|_\infty \le \varepsilon, \quad \text{when \,\,} t \ge \frac{\log\left( (1- \gamma) \varepsilon \right) - \log (2R + 2\varepsilon)}{\log \gamma},
|
| 85 |
+
\end{aligned}
|
| 86 |
+
\end{equation}$$ where $R = \frac{r_{\max}}{1-\gamma}$ and $r_{\max} = \max_{s,\bm{a}}r(s,\bm{a})$.*
|
| 87 |
+
:::
|
| 88 |
+
|
| 89 |
+
::: proof
|
| 90 |
+
*Proof.* From the definition of $Q^{t,k}_{i}$ [\[q-learning\]](#q-learning){reference-type="eqref" reference="q-learning"}, we have $$\begin{equation}
|
| 91 |
+
\begin{aligned}
|
| 92 |
+
\big\|Q^{t+1,k}_{i} - Q^{t,k}_{i}\big\|_\infty & = \big\|\gamma \mathbb{E}_{s'\sim P_i^k}[\operatorname{max}_{a_i^\prime} Q^{t,k}_i(s^\prime,a_i^\prime) - \operatorname{max}_{a_i^\prime} Q^{t-1,k}_i(s^\prime,a_i^\prime)] \big\|_\infty \\
|
| 93 |
+
& \le \gamma \big\|Q^{t,k}_i - Q^{t-1,k}_i \big\|_\infty \le \gamma^t \big\|Q^{1,k}_i - Q^{0,k}_i \big\|_\infty.
|
| 94 |
+
\end{aligned}
|
| 95 |
+
\end{equation}$$ Then for any integer $m \ge 1$, we have $$\begin{equation}
|
| 96 |
+
\begin{aligned}
|
| 97 |
+
\big\|Q^{t+m,k}_{i} - Q^{t,k}_{i}\big\|_\infty & \le \big\|Q^{t+m,k}_{i} - Q^{t+m-1,k}_{i}\big\|_\infty + \cdots + \big\|Q^{t+1,k}_{i} - Q^{t,k}_{i}\big\|_\infty \\
|
| 98 |
+
& \le \gamma^t \frac{1 - \gamma^m}{1-\gamma} \big\| Q^{1,k}_i - Q^{0,k}_i\big\|_\infty.
|
| 99 |
+
\end{aligned}
|
| 100 |
+
\end{equation}$$ Let $m \to \infty$, and we have $$\begin{equation}
|
| 101 |
+
\label{td1}
|
| 102 |
+
\begin{aligned}
|
| 103 |
+
\big\| Q^{*,k}_{i} - Q^{t,k}_{i} \big\|_\infty & \le \frac{\gamma^t}{1-\gamma}\big\| Q^{1,k}_i - Q^{0,k}_i \big\|_\infty \\
|
| 104 |
+
& \le \frac{\gamma^t}{1-\gamma} \max_{s,a_i} \big| r^k_i(s,a_i) + \gamma \mathbb{E}_{s'\sim P_i^k}[\operatorname{max}_{a_i^\prime} Q^{0,k}_i(s^\prime,a_i^\prime)] - Q^{0,k}_i(s,a_i) \big|.
|
| 105 |
+
\end{aligned}
|
| 106 |
+
\end{equation}$$ *As all agents update by turns*, we have $Q^{0,k}_i = Q^{t^{k-1}_{i},k-1}_{i}$, where $t^{k-1}_{i}$ is the number of Q-iteration for agent $i$ in the $k-1$ round. Therefore, we have $$\begin{equation}
|
| 107 |
+
\big\|{Q^{0,k}_i - Q^{*,k-1}_{i}\big\|_\infty = \big\| Q^{t^{k-1}_{i},k-1}_{i-1} - Q^{*,k-1}_{i}} \big\|_\infty \le \varepsilon.
|
| 108 |
+
\label{induct-prop}
|
| 109 |
+
\end{equation}$$ With this property, we have $$\begin{align}
|
| 110 |
+
& \big| r^k_i(s,a_i) + \gamma \mathbb{E}_{s' \sim P_i^k}[\operatorname{max}_{a_i^\prime} Q^{0,k}_i(s^\prime,a_i^\prime)] - Q^{0,k}_i(s,a_i) \big| \nonumber \\
|
| 111 |
+
& = \big| r^k_i(s,a_i) + \gamma \mathbb{E}_{s' \sim P_i^k}[\operatorname{max}_{a_i^\prime} Q^{0,k}_i(s^\prime,a_i^\prime)] - Q^{*,k-1}_{i}(s,a_i) + Q^{*,k-1}_{i}(s,a_i) - Q^{0,k}_i(s,a_i) \big| \nonumber \\
|
| 112 |
+
& \le \big| r^k_i - r^{k-1}_{i} \big| + \gamma \big| \mathbb{E}_{s' \sim P_i^k}[\operatorname{max}_{a_i^\prime} Q^{0,k}_i(s^\prime,a_i^\prime)] - \mathbb{E}_{s' \sim P_i^{k-1}}[\operatorname{max}_{a_i^\prime} Q^{*,k-1}_{i}(s^\prime,a_i^\prime)] \big| \nonumber \\
|
| 113 |
+
& \quad + \big|Q^{*,k-1}_{i}(s,a_i) - Q^{0,k}_i(s,a_i) \big| \le 2r_{\max} + (\frac{2\gamma r_{\max}}{1-\gamma} + \varepsilon) +\varepsilon = 2R + 2\varepsilon \label{td2},
|
| 114 |
+
\end{align}$$ where the second term in the last inequality is from $\|Q^{*,k-1}_{i}\|_\infty \le \frac{r_{\max}}{1-\gamma}$, $\|Q^{0,k}_{i}\|_\infty \le \|Q^{*,k-1}_{i}\|_\infty + \varepsilon$, and [\[induct-prop\]](#induct-prop){reference-type="eqref" reference="induct-prop"}. Finally, by combining [\[td1\]](#td1){reference-type="eqref" reference="td1"} and [\[td2\]](#td2){reference-type="eqref" reference="td2"}, we have $$\begin{equation}
|
| 115 |
+
\big\|Q^{*,k}_{i} - Q^{t,k}_{i}\big\|_\infty \le \frac{\gamma^t}{1-\gamma}(2R+2\varepsilon).
|
| 116 |
+
\end{equation}$$ We need $\|Q^{*,k}_{i} - Q^{t,k}_{i}\|_\infty \le \varepsilon$, which can be guaranteed by $t \ge \frac{\log\left( (1- \gamma) \varepsilon \right) - \log (2R +2\varepsilon)}{\log \gamma}$. ◻
|
| 117 |
+
:::
|
| 118 |
+
|
| 119 |
+
::: {#corollary-t .corollary}
|
| 120 |
+
**Corollary 1**. *For any $\varepsilon > 0$, if we take sufficient Q-iteration $t^k_i$, *i.e.*, $Q^k_i = Q^{t_i^k,k}_i$, then we have $$\begin{align*}
|
| 121 |
+
\big\|Q^k_i - Q^{*,k}_i\big\|_\infty \le \varepsilon \quad \forall k, i.
|
| 122 |
+
\end{align*}$$*
|
| 123 |
+
:::
|
| 124 |
+
|
| 125 |
+
With Lemma [1](#lemma-policy-iteration){reference-type="ref" reference="lemma-policy-iteration"}, Lemma [2](#lemma-varepsilon){reference-type="ref" reference="lemma-varepsilon"}, and Corollary [1](#corollary-t){reference-type="ref" reference="corollary-t"}, we have the following theorem.
|
| 126 |
+
|
| 127 |
+
::: {#theorem-q-iteration .theorem}
|
| 128 |
+
**Theorem 1** (multi-agent alternate Q-iteration). *Suppose that $Q^*_i(s,\cdot)$ has the unique maximum for all states and all agents. If all agents in turn take Q-iteration to $\|Q^k_i - Q^{*,k}_i\|_\infty \le \varepsilon$, then their joint policy sequence $\{\boldsymbol{\pi}^k\}$ converges to a Nash equilibrium, where $\pi_i^k(s) = \arg\max_{a_i}Q_i^k(s,a_i)$.*
|
| 129 |
+
:::
|
| 130 |
+
|
| 131 |
+
::: proof
|
| 132 |
+
*Proof.* First, from Lemma [1](#lemma-policy-iteration){reference-type="ref" reference="lemma-policy-iteration"}, we know $Q^{*,k}_i$ also induces a joint policy improvement, thus $Q^{*,k}_i$ converges to $Q^{*}_i$. Let $\pi_i^*(s)=\arg\max_{a_i}Q^*_i(s,a_i)$, then $\bm{\pi}^*$ is the joint policy of a Nash equilibrium.
|
| 133 |
+
|
| 134 |
+
Then, we define $\Delta$ as $$\begin{equation}
|
| 135 |
+
\Delta = \min_{s,i} \max_{a_i \not = \pi^*_i(s)} |Q^*_i(s,\pi^*_i(s)) - Q^*_i(s,a_i) |.
|
| 136 |
+
\end{equation}$$ From the assumption we know that $\Delta > 0$. We take $\varepsilon = \frac{\Delta}{6}$, and from Lemma [2](#lemma-varepsilon){reference-type="ref" reference="lemma-varepsilon"}, we know there exists $k_0$ such that $$\begin{equation}
|
| 137 |
+
\big\|Q^*_i - Q^{*,k}_i\big\|_\infty \le \varepsilon \quad \forall k \ge k_0.
|
| 138 |
+
\end{equation}$$ For $k\ge k_0$ and any action $a_i \not = \pi^*_i(s)$, we have $$\begin{equation}
|
| 139 |
+
\begin{aligned}
|
| 140 |
+
Q^k_i(s,& \pi^*_i(s)) - Q^k_i(s,a_i) \\
|
| 141 |
+
& = Q^k_i(s,\pi^*_i(s)) - Q^{*,k}_i(s,\pi^*_i(s)) + Q^{*,k}_i(s,\pi^*_i(s)) - Q^{*}_i(s,\pi^*_i(s)) \\
|
| 142 |
+
& \quad + Q^{*}_i(s,\pi^*_i(s)) - Q^{*}_i(s,a_i) + Q^{*}_i(s,a_i) - Q^{*,k}_i(s,a_i) + Q^{*,k}_i(s,a_i) - Q^k_i(s,a_i) \\
|
| 143 |
+
& \ge Q^{*}_i(s,\pi^*_i(s)) - Q^{*}_i(s,a_i) - |Q^k_i(s,a_i) - Q^{*,k}_i(s,a_i)| - |Q^{*,k}_i(s,a_i) - Q^{*}_i(s,a_i)|\\
|
| 144 |
+
& \quad - |Q^{*}_i(s,\pi^*_i(s)) - Q^{*,k}_i(s,\pi^*_i(s))| - |Q^{*,k}_i(s,\pi^*_i(s)) -Q^k_i(s,\pi^*_i(s))| \\
|
| 145 |
+
& = \Delta - 4\varepsilon = \Delta/3 > 0,
|
| 146 |
+
\end{aligned}
|
| 147 |
+
\end{equation}$$ which means $\pi^k_i(s) = \arg\max_{a_i} Q^k_i(s,a_i) = \arg\max_{a_i} Q^*_i(s,a_i) = \pi^*_i(s)$. Thus, $Q_i^{k}$ of each agent $i$ induces $\pi_i^*$ and all together induce $\bm{\pi}^*$, which the joint policy of a Nash equilibrium. ◻
|
| 148 |
+
:::
|
| 149 |
+
|
| 150 |
+
Theorem [1](#theorem-q-iteration){reference-type="ref" reference="theorem-q-iteration"} assumes that for each agent, $Q_i^*$ has the unique maximum for all states. Although this may not hold in general, in practice we can easily settle this by introducing a positive random noise to the reward function. Suppose the random noise is bounded by $\delta$, then we can easily derive that the performance drop of optimizing environmental reward plus noise is bounded by $\delta/(1-\gamma)$. As we can make $\delta$ arbitrarily small, the bound is tight.
|
| 151 |
+
|
| 152 |
+
From Theorem [1](#theorem-q-iteration){reference-type="ref" reference="theorem-q-iteration"}, we know that if each agent $i$ guarantees $\varepsilon$-convergence to $Q_i^{*,k}$ in each round $k$, multi-agent alternate Q-iteration also guarantees a Nash equilibrium of the joint policy. This immediately suggests a simple, practical decentralized learning method, namely multi-agent alternate Q-learning (MA2QL).
|
| 153 |
+
|
| 154 |
+
For learning Q-table or Q-network, MA2QL makes the minimal changes to IQL.
|
| 155 |
+
|
| 156 |
+
- For learning Q-tables, all agents in turn update their Q-tables. At a round $k$ of an agent $i$, all agents interact in the environment, and the agent $i$ updates its Q-table a few times using the collected transitions $\left<s,a_i,r,s' \right>$.
|
| 157 |
+
|
| 158 |
+
- For learning Q-networks, all agents in turn update their Q-networks. At a round of an agent $i$, all agents interact in the environment and each agent $j$ stores the collected transitions $\left<s,a_j,r,s' \right>$ into its replay buffer, and the agent $i$ updates its Q-network using sampled mini-batches from its replay buffer.
|
| 159 |
+
|
| 160 |
+
There is a slight difference between learning Q-table and Q-network. Strictly following multi-agent alternate Q-iteration, Q-table is updated by transitions sampled from the current MDP. On the other hand, Q-network is updated by minibatches sampled from the replay buffer. If the replay buffer only contains the experiences sampled from current MDP, learning Q-network also strictly follows multi-agent alternate Q-iteration. However, in practice, we slightly deviate from that and allow the replay buffer to contain transitions of past MDPs, following IQL [@sunehag2018vdn; @rashid2018qmix; @papoudakis2021benchmarking] for sample efficiency, the convergence may not be theoretically guaranteed though.
|
| 161 |
+
|
| 162 |
+
MA2QL and IQL can be simply summarized and highlighted as ***MA2QL agents take turns to update Q-functions by Q-learning, whereas IQL agents simultaneously update Q-functions by Q-learning.***
|
2210.08933/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-27T13:16:39.221Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="Aux2dSTuGJnce12Runjp" version="20.0.4" type="google"><diagram id="zvqGJeQhe3FCccDdP7SY" name="第 1 页">7V1bc9u2Ev41nmkf5MGd5KMvTZ1TJy0T1u2clzOqRVuqJdGR6MjOrz/AgpRFErYkigIlkpmMDIHQAthvd7FcLsATejF5/nXWfxx+igbh+ISgwfMJvTwhxGNCfqqKF10hMNUV97PRQFfh14qvox9hUomS2qfRIJxnGsZRNI5Hj9nK22g6DW/jTF1/NosW2WZ30Tjb62P/PukRvVZ8ve2Pw0Kzv0aDeKhrXeK81l+Fo/th2jMWnr4y6aeNExLzYX8QLVb6or+c0ItZFMW6NHm+CMeKdylf9IA+vHF1ObBZOI03+cHgx/lN5N0HDw8Pwz/POf84eRj1Eirf++OnZMInRIwlvfO7SJKVo45fElaIb09ReqE3B6DOZANCHiXW56/XZele/2Xy/+KE//LyP6K/pLTlIDX5tCXJ9ETkwCW+8sv5YjiKw6+P/Vt1ZSElTNYN48lYfsOy2J8/atDvRs/hQA1uNB5fRONoBoToBfxbdrHKrHTm4SwOn1eqEub9GkaTMJ69yCbJVYISCUkkmYvk+2JFLpKq4YpIpHX9RBLvl5RfwZKFBK8tsCMG7F5ZjlOWV8dbCVmioIpP++S1e2CsZnbUZNoANWHswLDzDNjlGTkdnKm1Qn67Hffn89Ftln+z6Gk6UIy7RO8xKRyka8kbLFphATewIK2bheN+PPqeXYFMfEl6+CMagRCm+DhZBATKsXYePc1uw+RXqytGnpC3hlDcn92HcYEQwLScdnnkUh+gPdCRHMe5553ycuBRvJbUvuEzOReNho9hVhV8jK4ltW/4TP7FdoseFW8seqenp5sucnLZiXOr2Xh0P1UCI6UglCvYuVqcRtJlP0suTEaDgfr5+SyUY+n/A6SU/DwqXgH3+PkJv1S0nuJIjxdIz+NZ9BCmK+M0moa5xTKpqmCdpHT9OmkSVbE3d5J2eFvEGyNUN+Dt84xIHgOvrGuUp4SpXd+Its83ossVcGf4DLSsA9g+78ipDkADLesAmvyjRgOIWVXmk9RuPk3OTqPBW/oRu+tenpJ18EzhuEaDV2B5ec2rHTzeNvAKxq6815k3wNbBE20DD1dnNmsHz2kbeAWWl9e8us0md9sGXuEWmzplwctTYtgyeK2LtBhusEvDZ6BlG8BU+9sDoOEGuzSABlrWAWxdtKXgK5bXv7rNpyBtA69wl1Ze9/KUrIPXukhLgeXlNa928FoXaSkYu/JeZ94AWwevdZGWwl1aebNZO3iti7QUWF5e8+o2m44JvNc0zxPnXHLnAp84lw1K0jUmNFjN9HRMMZIug6W5GSxO6+IqIrcJQfCSNrJAKB8Z3bOJTI1Ae6BzcD4KUho8Aynb8LUunuLxyuAzkLINH2kbfIJUZDidug1n6+IpDqsIOi9PyDZ0rYumFDheWuvqhq51sZSCnSvtaeYtr23oWhdJEZUZzLqha13GSoHjpbWuZoOJUevUbmWLXsJ0p2y6UZGUaznfCKPW6V7xrro0fkVS9vFrXcJY8ba6NH5FUtbxM25+1wHo+ff7E1MsO40q95J4swpo99Cp4KGOaaP1Z1Kg58l4Kmd49jweTR8y1Idx/Ai/+yD/LxaL0wU9jWZyJB+w53nyj/7JKrHvo3BxHj1nyCA1KMaY6l8wfCqXCeRQfEoyv5xFubmNJveZBivyk9ZKWpzJqa42e5W8tJKfeo6Ta5VMeqPJEoh6fwAMimd5aIAG4d3ciNDmh4JoQo/qECGCBhkin9R9JJIaTvykpGQFKWdJlb1AXoEidQOsNrrIIvN8zFxdVH+w0E0YDbBykFVrHBCki5gFBISv5/GAUOjEEQFhQIx7suQBjPIqh3bUuSbq5COErj1Fg7rkiogbifMVZT60ZgyrjtWBUKrs61/qWqIe9SHVWursDfyYc5+oLVaKlGqRlNE1VXnMCHN8zVhy/Yp6DPpiCPtMnTCV0GUeT3rjwl3WcoclvXEXQW8co2sGk8ec+MQDTgFziQdsdR1fz0/yyQPkOHwRkttqJx/qyan7Lk9KqhMNjUMCoiGQ13VJ6N6BlSjQsFHuY4x1SY2CouQy1vz1yI2icOUy9Zf8NyO+o5x8/OfvHqG94Je/e+c9fMloVrV2l0op/lowNxFWBshSyT+sC6oHKDKe1Cn7j+ACRzhwVJ1A5NpVKAiOfU+xTQD7MYKrjnPFhXcluOcLL6lRnwCWkKLqIAa/cfy0BK2A2ZzTQCTiwPykBN0LLahysCIdtqqFT1kOBNUDJ9eCeMkUkhIH+gAe81AgtKDJwSclUDJBYEqIBdyBOREvEUwBgHPMoSyFGGab9Mf1KFDAQPYEIT6j8CvgHwUKVKqyFlyBEgkGTafQMxNSL6AOvhGYEZcEQVnlRV3QEFEQAp/pmUF79SkVDowCkqRA1dUVGLGqQUkNVQlfEkYCcyW+B+oCzFUtrqQlAjMC41AFL2AcTEBiJoARwEdx5dIrj/lY6yuop+vBZR/OpkEc6kACuJB16iuHDkFOwGaBfaFU2htgB1zQukwl/FSzFCaUlAlPawEUOEhFlZVtkhJLlld0x4LIEcLgBAY2EQ24FDLgspRBoa2bmhRgKc1QAHOXQiVt1qba/FFpM3Pq1GZtE6Uq6II0UCBp0n5fCUcpo17XgUde2kQ4AInkoOf6KQX5mdLYdP6fewTYX9/0Cawt3HGvEyPvUF+vn1qNIfNElmmyBnOuZPhGWrMrtVxjkHABOqUlWWBHiihovZQJoq2CkMLqQJ3KAkLETcpSrdmylmqlEnIBE/iaIvgl9rW0wuJDQTc5CbQ0MzdgIPxSr6QBuJGqiIVPYYUEjQfRlPPRRkUNEeYoFx33Rv5GegU+pmKpiXr5FgEG08ucAIOdlnhr5ZMdEwT2Vk4R6+HdSNZtjrYyR/Vh7YC54IGjbWoAvMdIQqsdEIJ9LPhS4TGsLZ60ONob8/XfHphazZGepIJdqMUSd5y4CFy5abrW5YG8AkVl9zBNitAzS5t42vdwpHFNvBCQvcR54yJ1STCRRUBVeW8YhKtHb7xr+VMYIpwUBhhhsFWyEayTNHE7cABJjNK+66EgmKG64Gyhsxd2UJTV2u/OVqtbpXjWn87votkk0+9cnfL5k8rE6uGfc2NUSTW94s0DyjRTaTaZy7dPM3VzrFNvigQ3bbsrg9SUB/2435tMxr1pNMhSm2jeWexw/vTP3jr897kXh88QvchQ/P3LZabPt4cXhH+fxdHELktGFXf3NA9ls+Sm/Ww4C+8y5E4IXXNXAGO9zd5P7+fWAYZaqL3fpG572M2aD7Xjfhz+BH6+1H+OflbzA4uATh3k/NxJzxvSY/JCzdKzD1d1Q+l52+q/Yg93hhdoU6QnUZ1cN/i+Bqbvxz+ukOeYqWjOFkyf1sv0nAtqYPk+nNQqGe5tyfC6pfxiHcP34U9WyHCd7txJ+N6cgB3rdOC+8gO1c9su3nxWtnm6Pc/tYnHTJ/p1ba/A3Ymwe91f4eL1gFvdXoFxdyTsm4DPorgfjyLVmffu0/HNBcDJCYBH6xYA45nAjU4iyO+LKX9kUIGS7SODcLqxsD3oFfNuyuNnoGUfwdalHRczb8ojaKBlH8HWpUHm87130MD6LWj7kiArOzIvv/ejBvTalwJZGXoFObCPXus2dxfsXXnfs+7ztXH73k+Q33Gxg+WsH732be+uzGspyIF99Ejb0Cvul8Ekz/Xye28wyZ9EtHcEWxd3Mdxtl0bQRMs6gq2LvRjutksjaKJlG0HWOh0sRCxLn8BWoGT7BDbMWqd/BqtXGj8DLfsIdrHPHRA00LKPYBf7LK+B9VvQLvZZXvvqPj4Wdnm2C70Cz8vrXv3odbHP8r5n3a8rwClL2oNeIeZV3nLWj14X+yyve7VbTk4M6G2XskbYGylrZ1+OOmNtJUeViuT7h/5kNFZgBaNJqM6s+Bwu5OeXaNKfVpTVmJUIlobVV0RbGER7fzltfPekxjcl5MPTePzS+9wJyu6C4hgOF18+SbEkKaYYUkWS8lGC2wlKFYKyXL1WBYXYFRRTqKoiQbkc3d09fQ2/dYKy/Q4Kno2Amdae5TlbVUvK3ZS9DL5F9/zLxZ9/nLmuP+h/623nWibMG/TnQ/Al8XtO5vyxfzua3stvZAnMX8k8392RciBOKM+7jjzvOm78ADdHqUCoOh/UCLLJA60K5KOHtXAO6aaoumvo7BnU7R4GtgxUD1UDaoHO3u8WTTFuln9REznyFzWRQl5h3W9qwsbXWRcYPz1yxhffP14/402R5W5L397ekYVr38RpfHd1RtVIA19GZ+K7XU0zvnK607Qma5rJ8y9oWuO8iQNQNZN3XmB847yJA2D87hHSzsZtYeOWUNZn40yhzoyq/aaiXI3zJ0yct6xrpmy6TtearGtroxNa1xrnURyAsq2NT2jWN86nOADWdxEKq3aOGZ6f2rVzabbRO8rWQI/CxHe7muZ0EYq2adraCMVvTfQnDkDV1kYofmuiN3EAjO8iFDbPrDRl7Vk2cWsDFEEDnIn84bAmvltWtC480TJFWxudCBrgS4gN0pItK9ra0ETQAFcivyflAPjexSVsGrgDeJ6bytL7SUpNcyUO4LGi2wUm2qZqawMTTXjQkXcmDkHV1gYmmvCYo3iiYv2M7wITVm1c/cFXd21kAjXRnag/Buh2oYm2qdra2ARqojtxAKq2NjiBmuhO2GT807+fnoYMhWe/fxx9np1708VwbtxYuuUG5Lff2gZA8Qv14uB/7iSELxI+iDMVso82M4TlMd6D0th8rZoROtOdjyXopscFXV7taofOtMp0fsW+/IqlZ2nBrTDCbVrb9q+px29iTcBZ1dPdI/clgTtyA1s7cKkFsI8cPi7k8nfd9SPXhfCt3nObdpZbXRwreDlwCVVtwA2I1UMBzNBVcCJeOeiOfH08AOh2f4pQErojXyAPALrdD43rVsgtVkib+/nMgO/+GKKEruYD3Uegq4UV0uamNDN0u0d6SkJ37Ctk/dDVErXJP8k9AugKK2Tt0NHdoetWyM33pzl1L5DUFKfL5SId94PDfDJ53RqWPkDpNKwdGsZMUbmMhh17TkR+72f9KmaKi2VYfuzZEPldnzZZ/hxFwdPMXSD//OZhEv94+PB8VkEyRGON2iyK+/EoUp15776aZgu/0cvC71F7Rs4Iv0nhOvj3Bb/HDwz+3YPZHfxbrLeFY7MsJn8a8e/y6m3ij5F3YPh3YXWb+DPHnv7Lr7NIAbK89qt0hIefokGoWvwf</diagram></mxfile>
|
2210.08933/main_diagram/main_diagram.pdf
ADDED
|
Binary file (82.2 kB). View file
|
|
|
2210.08933/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Among existing generative models, GAN [\(Goodfellow et al.,](#page-9-0) [2014\)](#page-9-0) suffers from the instability issue [\(Salimans et al.,](#page-11-0) [2016\)](#page-11-0), subjecting to mode collapse [\(Metz et al.,](#page-10-0) [2017\)](#page-10-0); VAE [\(Kingma &](#page-10-1) [Welling,](#page-10-1) [2014\)](#page-10-1) has to rely on surrogate objectives to approximate maximum likelihood training and Flow-based models [\(Dinh et al.,](#page-9-1) [2017\)](#page-9-1) has to use specialized architectures to construct reversible transform. Diffusion models [\(Ho et al.,](#page-10-2) [2020;](#page-10-2) [Nichol & Dhariwal,](#page-10-3) [2021\)](#page-10-3) have circumvented several of these limitations and emerged as a new paradigm for generative models, theoretically underpinned by non-equilibrium thermodynamics [\(Sohl-Dickstein et al.,](#page-11-1) [2015\)](#page-11-1) and score-matching network [\(Song & Ermon,](#page-11-2) [2019\)](#page-11-2). To date, the major breakthroughs are in domains using continuous signals, such as vision [\(Saharia et al.,](#page-11-3) [2022a;](#page-11-3)[b;](#page-11-4) [Ramesh et al.,](#page-11-5) [2022\)](#page-11-5) and audio [\(Kong et al.,](#page-10-4) [2020\)](#page-10-4). However, extending continuous diffusion models to natural language remains an open challenge due to the inherently discrete nature of texts.
|
| 4 |
+
|
| 5 |
+
On the basis of unconditional generation in continuous space which is illustrated in Figure [1\(](#page-1-0)a), existing efforts [\(Hoogeboom et al.,](#page-10-5) [2021;](#page-10-5) [Austin et al.,](#page-9-2) [2021\)](#page-9-2) start customizing diffusion models to text in discrete space on unconditional language modeling (i.e., free text generation). Diffusion-LM [\(Li](#page-10-6) [et al.,](#page-10-6) [2022\)](#page-10-6), as in Figure [1\(](#page-1-0)b), models texts in continuous space and proposes to use an extra-trained classifier as guidance (i.e., the condition signal x) to impose subtle changes (usually complex, finegrained constraints) on generated sentences. Nonetheless, these models do not naturally generalize to conditional language modeling (i.e., the model assigns probabilities p(w|x) to sequences of words w given x). In the more general sequence-to-sequence (SEQ2SEQ) setting where the condition x is also a sequence of words, applying Diffusion-LM can be difficult. The reason is that classifiers are attributes-oriented, and we can not train hundreds-of-thousands classifiers to model the semantic meaning between conditions and generated sentences.
|
| 6 |
+
|
| 7 |
+
SEQ2SEQ is an essential setting in NLP that covers a wide range of important tasks such as openended sentence generation, dialogue, paraphrasing, and text style transfer. In this paper, we propose
|
| 8 |
+
|
| 9 |
+
<span id="page-0-0"></span><sup>1</sup>Code is available at <https://github.com/Shark-NLP/DiffuSeq>
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>Figure 1: The demonstration of unconditional, classifier-guided, and classifier-free diffusion models.
|
| 14 |
+
|
| 15 |
+
DIFFUSEQ, depicted in Figure 1(c), a classifier-free diffusion model that supports SEQ2SEQ text generation tasks. By modeling the conditional probability of the target sentence $\mathbf{w}$ given context $\mathbf{x}$ using one single model, one advantage of DIFFUSEQ is that this paradigm allows a complete model to fit data distribution and utilize conditional guidance, rather than depending on a separate classifier.
|
| 16 |
+
|
| 17 |
+
Different from canonical generation approaches in an autoregressive (AR) left-to-right manner (Radford et al., 2019), DIFFUSEQ generates text tokens parallelly in the non-autoregressive (NAR) way. To corroborate the effectiveness of our DIFFUSEQ, we conduct experiments on four SEQ2SEQ tasks. Compared to AR and NAR models, which suffer from the "degeneration" problem (Holtzman et al., 2019) and rely on decoding strategies, DIFFUSEQ can achieve considerable sentence-level diversity without sacrificing the quality (see § 4.2).
|
| 18 |
+
|
| 19 |
+
To sum up, we make a series of technical and conceptual contributions: (a) we are the first to deploy the diffusion model on SEQ2SEQ text generation, and our proposed DIFFUSEQ as a conditional language model is trained end-to-end in a classifier-free manner; (b) we establish a theoretical connection among AR, NAR and DIFFUSEQ models, and justify DIFFUSEQ as an extension of iterative-NAR models; (c) with strong empirical evidence, we demonstrate the great potential of diffusion models in complex conditional language generation tasks.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
**Preliminary.** A diffusion model typically contains forward and reverse processes. Given a data point sampled from a real-world data distribution $\mathbf{z}_0 \sim q(\mathbf{z})$ , the forward process gradually corrupts $\mathbf{z}_0$ into a standard Gaussian noise $\mathbf{z}_T \sim \mathcal{N}(0, \mathbf{I})$ . For each forward step $t \in [1, 2, ..., T]$ , the perturbation is controlled by $q(\mathbf{z}_t | \mathbf{z}_{t-1}) = \mathcal{N}(\mathbf{z}_t; \sqrt{1 - \beta_t} \mathbf{z}_{t-1}, \beta_t \mathbf{I})$ , with $\beta_t \in (0, 1)$ as different variance scales. Once the forward process is completed, the reverse denoising process tries to gradually reconstruct the original data $\mathbf{z}_0$ via sampling from $\mathbf{z}_T$ by learning a diffusion model $f_{\theta}$ .
|
| 24 |
+
|
| 25 |
+
**Problem Statement.** Many recent efforts have been devoted to adapting diffusion models to discrete texts (See § 5). However, they all focus on unconditional sequence modeling. In this paper, we target the sequence-to-sequence text generation tasks. In particular, given a m-length source sequence $\mathbf{w}^x = \{w_1^x, ..., w_m^x\}$ , we aim to learn a diffusion model that can produce a n-length target sequence $\mathbf{w}^y = \{w_1^y, ..., w_n^y\}$ conditioning on the source sequence.
|
| 26 |
+
|
| 27 |
+
We propose DIFFUSEQ to extend vanilla diffusion models to learn conditional text generation (as shown in Figure 2), concerning the model architecture and the training objective.
|
| 28 |
+
|
| 29 |
+
Forward Process with Partial Noising. In the beginning of forward process, we follow Diffusion-LM (Li et al., 2022) to design an embedding function $EMB(\mathbf{w})$ to map the discrete text $\mathbf{w}$ into a continuous space. In particular, given a pair of sequence $\mathbf{w}^x$ and $\mathbf{w}^y$ , DIFFUSEQ learns a unified
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+
<span id="page-2-0"></span>Figure 2: The diffusion process of our conditional diffusion language model DIFFUSEQ. Given the source $\mathbf{w}^x$ and the target $\mathbf{w}^y$ , we pair-wisely transform them into continuous space $\mathbf{z}_0$ . The partial Gaussian noise is iteratively added on the target space of $\mathbf{z}_t$ .
|
| 34 |
+
|
| 35 |
+
feature space of $\mathbf{w}^x$ and $\mathbf{w}^y$ by embedding transformation and concatenation as $\mathrm{EMB}(\mathbf{w}^{x \oplus y}) = [\mathrm{EMB}(w_1^x),...,\mathrm{EMB}(w_m^x),\mathrm{EMB}(w_1^y),...,\mathrm{EMB}(w_n^y)] \in \mathbb{R}^{(m+n) \times d}$ . The transformation allows us to adapt discrete textual input into the standard forward process, by extending the original forward chain to a new Markov transition $q_\phi(\mathbf{z}_0|\mathbf{w}^{x \oplus y}) = \mathcal{N}(\mathrm{EMB}(\mathbf{w}^{x \oplus y}),\beta_0\mathbf{I})$ .
|
| 36 |
+
|
| 37 |
+
We denote $\mathbf{z}_t = \mathbf{x}_t \oplus \mathbf{y}_t$ to simplify the wordings, where $\mathbf{x}_t$ and $\mathbf{y}_t$ represent parts of $\mathbf{z}_t$ that belong to $\mathbf{w}^x$ and $\mathbf{w}^y$ , respectively. For each forward step $q(\mathbf{z}_t|\mathbf{z}_{t-1})$ , we gradually inject noise into last step's hidden state $\mathbf{z}_{t-1}$ to obtain $\mathbf{z}_t$ . Unlike conventional diffusion models that corrupt the whole $\mathbf{z}_t$ (both $\mathbf{x}_t$ and $\mathbf{y}_t$ ) without distinction, we only impose noising on $\mathbf{y}_t$ . This modification (termed **partial noising**) allows us to adapt diffusion models for conditional language modeling.
|
| 38 |
+
|
| 39 |
+
Reverse Process with Conditional Denoising. The ultimate goal of the reverse process is to recover the original $\mathbf{z}_0$ by denoising $\mathbf{z}_t$ : $p_{\theta}(\mathbf{z}_{0:T}) := p(\mathbf{z}_T) \prod_{t=1}^T p_{\theta}(\mathbf{z}_{t-1}|\mathbf{z}_t)$ . We model the learning process $p_{\theta}(\mathbf{z}_{t-1}|\mathbf{z}_t) = \mathcal{N}(\mathbf{z}_{t-1}; \mu_{\theta}(\mathbf{z}_t, t), \sigma_{\theta}(\mathbf{z}_t, t))$ using the proposed diffusion model DIFFUSEQ: $f_{\theta}(\mathbf{z}_t, t)$ , where the $\mu_{\theta}(\cdot)$ and $\sigma_{\theta}(\cdot)$ is the parameterization of the predicted mean and standard deviation of $q(\mathbf{z}_{t-1}|\mathbf{z}_t)$ in forward process, derived using Bayes' rule. The detailed derivations are in Appendix A. With the partial nosing strategy adopted in the forward process, we can impose the input as the condition when denoising as shown in Figure 1. The proposed conditional denoising is classifier-free by nature: we do not require extra-trained classifiers to control the denoising process.
|
| 40 |
+
|
| 41 |
+
Specifically, we use a transformer architecture to model $f_{\theta}$ , which spontaneously models the semantic relation between $\mathbf{x}_t$ and $\mathbf{y}_t$ . We compute the variational lower bound ( $\mathcal{L}_{VLB}$ ) following the original diffusion process. $\mathcal{L}_{round}$ corresponds to rounding operation in Figure 2.
|
| 42 |
+
|
| 43 |
+
$$\mathcal{L}_{\text{VLB}} = \mathbb{E}_{q(\mathbf{z}_{1:T}|\mathbf{z}_{0})} \left[ \underbrace{\log \frac{q(\mathbf{z}_{T}|\mathbf{z}_{0})}{p_{\theta}(\mathbf{z}_{T})}}_{\mathcal{L}_{T}} + \sum_{t=2}^{T} \underbrace{\log \frac{q(\mathbf{z}_{t-1}|\mathbf{z}_{0}, \mathbf{z}_{t})}{p_{\theta}(\mathbf{z}_{t-1}|\mathbf{z}_{t})}}_{\mathcal{L}_{t-1}} + \underbrace{\log \frac{q_{\phi}(\mathbf{z}_{0}|\mathbf{w}^{x \oplus y})}{p_{\theta}(\mathbf{z}_{0}|\mathbf{z}_{1})}}_{\mathcal{L}_{0}} - \underbrace{\log p_{\theta}(\mathbf{w}^{x \oplus y}|\mathbf{z}_{0})}_{\mathcal{L}_{\text{grand}}} \right].$$
|
| 44 |
+
|
| 45 |
+
$$(1)$$
|
| 46 |
+
|
| 47 |
+
We further simplify the training objective as follows (details in Appendix A):
|
| 48 |
+
|
| 49 |
+
$$\min_{\theta} \mathcal{L}_{\text{VLB}} = \min_{\theta} \left[ \sum_{t=2}^{T} ||\mathbf{z}_0 - f_{\theta}(\mathbf{z}_t, t)||^2 + ||\text{EMB}(\mathbf{w}^{x \oplus y}) - f_{\theta}(\mathbf{z}_1, 1)||^2 - \log p_{\theta}(\mathbf{w}^{x \oplus y} | \mathbf{z}_0) \right]
|
| 50 |
+
\rightarrow \min_{\theta} \left[ \sum_{t=2}^{T} ||\mathbf{y}_0 - \tilde{f}_{\theta}(\mathbf{z}_t, t)||^2 + ||\text{EMB}(\mathbf{w}^y) - \tilde{f}_{\theta}(\mathbf{z}_1, 1)||^2 + \mathcal{R}(||\mathbf{z}_0||^2) \right], \tag{2}$$
|
| 51 |
+
|
| 52 |
+
here we use $\tilde{f}_{\theta}(\mathbf{z}_t, t)$ to denote the fractions of recovered $\mathbf{z}_0$ corresponding to $\mathbf{y}_0$ . Note that although in the first term, we only compute the loss w.r.t $\mathbf{y}_0$ , due to the attention mechanism in the transformer, the reconstruction of $\mathbf{y}_0$ also takes $\mathbf{x}_0$ into account, thus the gradients from the first term
|
| 53 |
+
|
| 54 |
+
will also affect the learning of $\mathbf{x}_0$ . The mathematically equivalent regularization term $\mathcal{R}(||\mathbf{z}_0||^2)$ ) regularize the embedding learning. We further share the embedding function between source and target sequences, enabling the training of two different feature spaces jointly. This sets DIFFUSEQ away from existing solutions in vision such as GLIDE (Nichol et al., 2022).
|
| 55 |
+
|
| 56 |
+
**Training and Inference Methods.** In our preliminary experiments, we find that the high diversity in NLP datasets and long diffusion steps often result in insufficient training. We hypothesize the reason is that sampling step t uniformly causes unnecessary noise in the $\mathcal{L}_{VLB}$ objective. We hence employ importance sampling (Nichol & Dhariwal, 2021) to address this problem.
|
| 57 |
+
|
| 58 |
+
$$\mathcal{L}_{\text{VLB}} = \mathbb{E}_{t \sim p_t} \left[ \frac{\mathcal{L}_t}{p_t} \right], \ p_t \propto \sqrt{\mathbb{E}[\mathcal{L}_t^2]}, \ \sum_{t=0}^{T-1} p_t = 1.$$
|
| 59 |
+
(3)
|
| 60 |
+
|
| 61 |
+
Intuitively, the importance-weighted sampling algorithm will spend more steps on diffusion steps with larger $\mathcal{L}_t$ , and vice versa.
|
| 62 |
+
|
| 63 |
+
To conduct SEQ2SEQ generation given the condition EMB( $\mathbf{w}^x$ ), we randomly sample $\mathbf{y}_T \sim \mathcal{N}(0,I)$ and concatenate $\mathbf{y}_T$ with EMB( $\mathbf{w}^x$ ) to obtain $\mathbf{z}_T$ . We can now repeat the reverse process until we arrive at $\mathbf{z}_0$ . At each sampling step, an anchoring function is executed towards reparameterized $\mathbf{z}_t$ . Specifically, the anchoring function: (a) operates rounding on $\mathbf{z}_t$ to map it back to word embedding space following Li et al. (2022); (b) replaces the part of recovered $\mathbf{z}_{t-1}$ that belongs to $\mathbf{w}^x$ with the original $\mathbf{x}_0$ , considering that this part is recovered from corrupted $\mathbf{z}_t$ via $f_\theta$ and not strictly equals to $\mathbf{x}_0$ . Note that (b) is designed for DIFFUSEQ.
|
| 64 |
+
|
| 65 |
+
To improve the quality of generation, we apply the widely used Minimum Bayes Risk (MBR) decoding strategy (Koehn, 2004). We first generate a set of candidate samples $\mathcal S$ from different random seeds of DIFFUSEQ and select the best output sequence that achieves the minimum expected risk under a meaningful loss function (e.g. BLEU or other cheaper metrics like precision). In practice, we use the negative BLEU score in our implementation.
|
| 66 |
+
|
| 67 |
+
<span id="page-3-3"></span>Connections to AR, Iter-NAR, and Fully-NAR Models. To better understand the behavior of DIFFUSEQ, we give the theoretical connection to autoregressive (AR), iterative non-autoregressive (iter-NAR), and fully non-autoregressive (fully-NAR) models. We argue that DIFFUSEQ can be seen as an extension of iter-NAR model. Detailed graphical learning discrepancies of these four cases are discussed in Appendix B for reference.
|
| 68 |
+
|
| 69 |
+
AR models learn $p(\mathbf{w}_{1:n}^y|\mathbf{w}^x)$ by autoregressive decomposition based on left-context:
|
| 70 |
+
|
| 71 |
+
<span id="page-3-0"></span>
|
| 72 |
+
$$p_{\text{AR}}(\mathbf{w}_{1:n}^{y}|\mathbf{w}^{x}) = \underbrace{p(w_{1}^{y}|\mathbf{w}^{x})}_{\text{initial prediction}} \underbrace{\prod_{i=1,\dots,n-1} p(w_{i+1}^{y}|\mathbf{w}_{1:i}^{y},\mathbf{w}^{x})}_{\text{progressive left-context prediction}},$$
|
| 73 |
+
(4)
|
| 74 |
+
|
| 75 |
+
while fully-NAR models (Gu et al., 2018; Qian et al., 2021) learn the conditional probability given independent assumption for fast inference:
|
| 76 |
+
|
| 77 |
+
<span id="page-3-1"></span>
|
| 78 |
+
$$p_{\text{fully-NAR}}(\mathbf{w}_{1:n}^y|\mathbf{w}^x) = \prod_{i=1,\dots,n} p(w_i^y|\mathbf{w}^x).$$
|
| 79 |
+
(5)
|
| 80 |
+
|
| 81 |
+
To make a better analogy to AR and NAR models, we use a lossless way to formulate iterative NAR models (Gu et al., 2019; Ghazvininejad et al., 2019) by introducing a series of intermediate sequences $\mathbf{w}_{1:K-1}^y$ , $\mathbf{w}_K^y = \mathbf{w}^y$ with K editable iterations:
|
| 82 |
+
|
| 83 |
+
<span id="page-3-2"></span>
|
| 84 |
+
$$p_{\text{iter-NAR}}(\mathbf{w}_{1:n}^{y}|\mathbf{w}^{x}) = \sum_{\mathbf{w}_{1}^{y}, \dots, \mathbf{w}_{K-1}^{y}} \underbrace{\prod_{i=1\dots n} p(w_{1,i}^{y}|\mathbf{w}^{x})}_{\text{initial prediction}} \underbrace{\prod_{k=1\dots K-1} \prod_{i=1\dots n} p(w_{k+1,i}^{y}|\mathbf{w}_{k,1:n}^{y}, \mathbf{w}^{x})}_{\text{progressive full-context prediction}}.$$
|
| 85 |
+
(6)
|
| 86 |
+
|
| 87 |
+
Previous study (Huang et al., 2022) shows that there is a gap called *conditional total correlation* between AR Eq. (4) and fully-NAR Eq. (5) learning paradigms, because of lossy decomposition of NAR models. However, when comparing iter-NAR Eq. (6) with AR Eq. (4) models, they both can be factorized into an initial prediction term and a progressive prediction process based on different context (i.e. left-context in AR and full-context in iter-NAR), and the discrepancy pointed out by
|
| 88 |
+
|
| 89 |
+
Huang et al. (2022) is therefore closed in iter-NAR assuming sufficient steps. By showing DIF-FUSEQ is an extension of the iter-NAR model, we offer a justification that it will not suffer from the conditional total correlation for the same reason.
|
| 90 |
+
|
| 91 |
+
A straight-forward way to formulate pure continuous diffusion models is to introduce a series of Gaussian noise-corrupted features along with diffusion steps: $\mathbf{y}_{1:T-1}, \mathbf{y}_0 = \mathbf{y}, \mathbf{y}_T \sim \mathcal{N}(0, \mathbf{I})$ .
|
| 92 |
+
|
| 93 |
+
<span id="page-4-2"></span><span id="page-4-1"></span><span id="page-4-0"></span>
|
| 94 |
+
$$p_{\text{diffusion}}(\mathbf{w}^y|\mathbf{w}^x) = \int_{\mathbf{y}_T, \dots, \mathbf{y}_0} \underbrace{p(\mathbf{w}^y|\mathbf{y}_0, \mathbf{w}^x)}_{\text{final prediction}} \underbrace{\prod_{t=T, \dots, 1} p(\mathbf{y}_{t-1}|\mathbf{y}_t, \mathbf{w}^x)}_{\text{progressive full-context diffusion}}, \tag{7}$$
|
| 95 |
+
|
| 96 |
+
where $p(\mathbf{y}_{t-1}|\mathbf{y}_t, \mathbf{w}^x)$ describes the diffusion step on continuous representations $\mathbf{y}$ . The rounding operation in DIFFUSEQ maps the continuous vectors $\mathbf{y}$ to discrete $\mathbf{w}^y$ for each time step t, we in addition introduce this into Eq. (7):
|
| 97 |
+
|
| 98 |
+
$$p_{\text{DIFFUSEQ}}(\mathbf{w}^y|\mathbf{w}^x) = \sum_{\mathbf{w}_T^y, \dots, \mathbf{w}_1^y} \int_{\mathbf{y}_T, \dots, \mathbf{y}_0} p(\mathbf{w}^y|\mathbf{y}_0, \mathbf{w}^x) \prod_{t=T, \dots, 1} p(\mathbf{w}_t^y|\mathbf{y}_t, \mathbf{w}^x) p(\mathbf{y}_{t-1}|\mathbf{w}_t^y)$$
|
| 99 |
+
(8)
|
| 100 |
+
$$= \sum_{\mathbf{w}_T^y, \dots, \mathbf{w}_1^y} \int_{\mathbf{y}_T, \dots, \mathbf{y}_0} p(\mathbf{w}_T^y|\mathbf{y}_T, \mathbf{w}^x) \prod_{t=T-1, \dots, 0} p(\mathbf{y}_t|\mathbf{w}_{t+1}^y) p(\mathbf{w}_t^y|\mathbf{y}_t, \mathbf{w}^x).$$
|
| 101 |
+
(9)
|
| 102 |
+
|
| 103 |
+
By rearranging Eq. (8) into Eq. (9), we can see DIFFUSEQ can be seen as a more generalized form of iter-NAR Eq. (6) before marginalizing out $\{y_T, \ldots, y_0\}$ , despite the different initialization of $y_T^2$ . A more detailed derivation is shown in Appendix C.
|
2210.13005/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-12-25T07:43:06.441Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/17.4.2 Chrome/100.0.4896.60 Electron/18.0.1 Safari/537.36" version="17.4.2" etag="B_6G5CZ8UBWDsi3PNX-O" type="device"><diagram id="0TlPKZw2wR53y0CRZn7h">7Vxdc6M2FP01zCSdyQ6S+PJj7GTbzvRjZ73TNk8dxcg2LUYulmNnf30lkACBHDsbCDHZPMTmSghxz7lX0pESC01W+x9TvF7+SkMSW9AO9xa6sSAENgj4h7A85hboBNKySKMwt9mlYRp9JepWad1GIdlIW25ilMYsWuvGGU0SMmOaDacp3enV5jQONcMaL4jWDWGYznBMGtX+jEK2zK0I2pXqP5FosVSPDlTJCqvaso3NEod0V3kYurXQJKWU5d9W+wmJhft0x3w8UFr0LCUJO+UGKLvBHtXLpXSbhEQU2xYa05Qt6YImOP6F0jU3Am78hzD2KGHBW0a5aclWsSwl+4j9JW8X3+8q9pt9peDmUV0kLH3Mbvngqsu7all5W3al7tuwlP5LJjSmadZ3ZGc/vCR/KxIudMQ2dJvOpMmRxMHpgkhXeU3vgQITTmdCV4Q/n1dJSYxZ9KC3jiWtFkW90vH8i/S9GQfUHQ7g9XEYZT/fjIPfHw6yNw843spGLejFvBPjOeV94ckCy457/21FkI6/RCuei6D9G9nx35/pCidloQapMoqWrjYZbNe8AvDW+6xqeZO3kJ/Zk+9f2Ap/J2eSf6gmuRvu64/htvwdlblGSO4onmD5xXi3jBiZrnNP7HiS14mHN+s8686jvSDwE3EqHigJDLzioQ8kZWRvHcpoB7ggb3AcmWvl6OJCN7/eVVK1rLKsJGllewl73KbTOPWn8rKM4dvSOu4kzmUAnxDpWpyXYX8g0k+IZ+9NxbM30Hie9hnPHUQtGulR69mvF7X+QDliuZMlFv2/GxhZnB7JEhwjy3HU3AZqz2ZYjtLF/SUv/JLiMGKRGDPECiNfbp2GKoeA1eCLo0XCv884FISP1mMBVMSXPteyYBWFYT5uEf5K+D5rSgwNaxolLHOsO7bcG9EWH6o2EvnGLCChCamxw20plQRAZ4fjNNgBHAM9YAv0GBnoIWLv0988GtmSMHxxZ/mT6aUKyTeNCGoruUMdEc+ACOwIESUanOcSV6BRmThPJnLi3JhSFyUHpmhtTr7krZ8Ev0qUPWR/QCOERgEMoBic9Gm4Cz94tufZwAe+z+vozeczSNliVa+oPcQd1YLb9vWG8nlno6GMJcW7nkYc0B1xul+TnzBTB7A5VQc9aiCqP4ObiPW6+O5g/uXB/pbYoKmUvb81NnDfWOQ2JzJVUOTMor1ht/D8XdXxz0ABnDDsqqCBpuG2UDhJEl6LLYXyNbnlYyRclbUU4s0ye2fQ59js+qPasPmN468HAm2QD/RE4PhPDfItjs3uQIeKoek6rt/fUh0MVfwbrLDj9cmWozLgacpOC7LOTMg6v29T0Tux6H/g7pP6jiRRquq2JPfEZC5Kzk/scd3awGYSe1wDZZw2KGMSAw1qT0gvppfvSvFxfVeHxaT4mGBpRfExiXDPjGT4ZAKeX3B8xVmOWaa5TvNo57ZZSNllIymfkQYL28EfBTX81QGY19BgVRvt4m/Pitl/aVTrgKd4IjhRECRLCpZ7+8PZsySlDItRiV+7LSVzB9aWE27QZI1p/G+FNaDrrPHpYsoHg8mQkL9qC3rkH4c+6Ar5PnRH8MIE8wdOI1zs+v2czAl3Ne/lWbIKdMQqFzk6qzzUTCigK1qhHmh1KAPdkBlN5kKAO0+GGPPOoc2xNpgT1CRuv7kS7SwfmQ4h1tBZpHS71n2Cnnx1eRZagmJVjjafvDgHNZd4o2YwGVwC2nCJSe+rKxkNJbbCXUGVj3gVxaLzpjDT6WbYei3OLG4YTlVFx8r03spVzmR1LFzQdh7FsbmdF2zh5hxpYlXBwrS8UrYXyslII8LVN2rJvtYKqLXSnlhsPAT4nTx9kUcB/ahx6bnkudJb6ewUgGr5PI+PHCBDdS9RHVmq7iWiDjamTk71QXcOfxPHLowOhz06/KhcdqbbJEM7dgFruyOveexCtfGuj12otPhWIhcdlazONHKHtgsOawemXnNfUxF0cCQZ2uY36pMkR7WqV9v8xmLz+zPB8dWOpnHIL0LMsCWcU9/8XpCEpLn88X53wIvtzmIH3KBxmnjTxg44MklV3zdNrBPEyzagt+tn7JuKXFciJTp6Au/lyGd/KWNB3hd7UARoSaOGSN8zM22yj7qC36SpVVB7V6ddYOO0SzMFtxSH/LL85yC5nFX+kxV0+z8=</diagram></mxfile>
|
2210.13005/main_diagram/main_diagram.pdf
ADDED
|
Binary file (33.4 kB). View file
|
|
|
2210.13005/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Real-world problem scenarios are flooded with sequential event data consisting of chronologically arrived events which reflect certain behaviors, activities or responses. A typical example is user activity prediction [@ye2013s; @feng2015personalized; @zhou2019deep; @hidasi2018recurrent; @sun2019bert4rec; @zhou2020s3] that aims to harness user's recent activities to estimate future ones, which could help downstream target advertisement in online platforms (e.g., e-commerce or social networks). Predicting future events also plays a central role in some real situations such as clinical treatment [@beunza2019comparison] for promoting social welfare.
|
| 4 |
+
|
| 5 |
+
A common nature of (sequential) event prediction lies in the different time intervals within which training and testing data are generated. Namely, models trained with data collected at one time are supposed to predict next event in the future [@zhao2020event; @peska2020off; @rossetti2016contrasting] where the underlying data-generating distributions may have gone through variation due to environmental changes. However, most existing approaches [@hidasi2018recurrent; @granroth2016happens; @qiao2018pairwise; @kang2018self; @sun2019bert4rec; @rendle2010factorizing; @he2016fusing; @he2017translation] overlook this issue in both problem formulation and empirical evaluation, which may leave the real problems under-resolved with model mis-specification, and result in over-estimation for model performance on real data.
|
| 6 |
+
|
| 7 |
+
As a concrete example in sequential recommenders [@fang2020deep], the data-generating distribution for user clicking behaviors over items is highly dependent on user preferences that are normally correlated with some external factors, like the dynamic fashion trends in different years [@gong2021aesthetics] or seasonal influences on item popularity [@stormer2007improving]. These highly time-sensitive external factors would induce distinct user preferences leading to different behavioral data as time goes by. This issue could also partially explain the common phenomenon of model performance drop after adaptation from offline to online environments [@rossetti2016contrasting; @peska2020off; @huzhang2021aliexpress].
|
| 8 |
+
|
| 9 |
+
Handling distribution shift in sequential event data poses several non-trivial challenges. *First*, the temporal shift requires models' capability for out-of-distribution (OoD) generalization [@ood-classic-1], i.e., extrapolating from training environments to new unseen environments in remote future. Prior art that focus on model learning and evaluation over in-distribution data may yield sub-optimal results on OoD testing instances. *Second*, as mentioned before, there exist external factors that impact the generation of events. These external factors, which we term as *contexts*, are unobserved in practice. To eliminate their effects, one may also need the latent distributions that characterize how the contexts affect events generation. Unfortunately, such information is often inaccessible due to constrained data collection, which requires the models to learn from pure observed sequences.
|
| 10 |
+
|
| 11 |
+
<figure id="fig_motiv" data-latex-placement="tb!">
|
| 12 |
+
<p>[]</p>
|
| 13 |
+
<p><span><embed src="motiv.pdf" style="width:64.0%" /></span></p>
|
| 14 |
+
<figcaption>A toy example in recommendation. Prior art would spuriously correlate non-causal items (‘ice cream’ and ‘beach T-shirt’) and produce undesired results under a new environment in the future. Our approach endeavors to alleviate the issue via counteracting the effects of contexts (seasons).</figcaption>
|
| 15 |
+
</figure>
|
| 16 |
+
|
| 17 |
+
To resolve these difficulties, in this paper, we adopt a generative perspective to investigate the *temporal distribution shift* problem and propose a new variational context adjustment approach with instantiations to solve the issue for sequential event prediction.
|
| 18 |
+
|
| 19 |
+
**A generative perspective on temporal distribution shift.** We use proof-of-concept Structural Causal Models (SCMs) to characterize the dependency among contexts, historical sequences and labels (i.e., next event type) in terms of both data generation and model prediction. We show that contexts essentially act as a confounder, which leads the model to leverage spurious correlations and fail to generalize to data from new distributions. See Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"} for a toy example to illustrate the issue.
|
| 20 |
+
|
| 21 |
+
**Variational context adjustment.** We propose a variational context adjustment approach to resolve the issue. The resulting new learning objective has two-fold effects: 1) helping to uncover the underlying relations between latent contexts and sequences in a data-driven way, and 2) canceling out the confounding effect of contexts to facilitate the model to explore the true causality of interest (as illustrated in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"}). We also propose a mixture of variational posteriors to approximate context prior via randomly simulating pseudo input sequences.
|
| 22 |
+
|
| 23 |
+
**Instantiation by a flexible framework.** We propose a framework named as CaseQ to instantiate ingredients in the objective, which could combine with most off-the-shelf sequence backbone models. To accommodate temporal patterns under different environments, we devise a novel hierarchical branching structure for learning context-specific representations of sequences. It could dynamically evolves its architecture to adapt for variable contexts.
|
| 24 |
+
|
| 25 |
+
**Empirical results.** We carry out comprehensive experiments on three sequential event prediction tasks with valuation protocols designed for testing model performance under temporal distribution shift. Specifically, when we enlarge time gap between training and testing data, CaseQ can alleviate performance drop by $47.77\%$ w.r.t. Normalized Discounted Cumulative Gain (NDCG) and $35.73\%$ w.r.t. Hit Ratio (HR) for sequential recommendation, which shows its robustness against temporal distribution shift.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
We denote $\mathcal X=\{1,2,\cdots,M\}$ as the space of event types, and assume each event is assigned with an event type $x_i\in \mathcal X$. Events occurred in chronological order consist of a sequence $\mathcal S =\{x_1, x_2, \cdots, x_{|\mathcal S|}\}$. As mentioned before, the data distribution is normally affected by time-dependent external factors, i.e., *context* $c$. We use $S$, $Y$, $\hat Y$ and $C$ to denote the random variables of historical event sequence $\mathcal S$, ground-truth next event $y$, predicted next event $\hat y$ and context $c$, respectively. The data distribution can be characterized as $P(S, Y| C) = P(S| C) P(Y| S, C)$.
|
| 30 |
+
|
| 31 |
+
Given training data $\{(\mathcal S_i, y_i)\}_{i=1}^N$ generated from data distributions with $(\mathcal S_i, y_i)\sim P(S, Y| C = c_{tr}^{(i)})$, where $c_{tr}^{(i)}$ denotes the specific context when the $i$-th training sample is generated, we are to learn a prediction model $\hat y_i = f(\mathcal S_i; \theta)$ that can generalize to testing data $\{(\mathcal S_j, y_j)\}_{j=1}^{N'}$ from new distributions with $(\mathcal S_j, y_j)\sim P(S, Y| C = c_{te}^{(j)})$, where $c_{te}^{(j)}$ denotes the specific context when the $j$-th testing sample is generated. The distribution shift stems from different contexts that change over time, which we call *temporal distribution shift* in this paper.
|
| 32 |
+
|
| 33 |
+
<figure id="fig_diag" data-latex-placement="t!">
|
| 34 |
+
<embed src="diag.pdf" style="width:70.0%" />
|
| 35 |
+
<figcaption>Structural causal model for sequence learning.</figcaption>
|
| 36 |
+
</figure>
|
| 37 |
+
|
| 38 |
+
Most of existing approaches target maximizing the likelihood $P_\theta(y|\mathcal S)$ as the objective for optimization. Here we use $P_\theta(\cdot)$ to denote the distribution induced by prediction model $f_\theta$. Based on the definitions, we can build two Structural Causal Models (SCMs) that interpret the causal relations among 1) $C$, $S$, $Y$ (given by data-generating process) and 2) $C$, $S$ and $\hat{Y}$ (given by model learning), as shown in Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}(a) and (b).
|
| 39 |
+
|
| 40 |
+
For Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}(a), the three causal relations are given by the definitions for data generation, i.e., $P(S, Y|C) = P(S| C) P(Y|S, C)$. We next illustrate the rationales behind the other two causal relations $S\rightarrow \hat Y$ and $C\rightarrow \hat Y$.
|
| 41 |
+
|
| 42 |
+
$\boldsymbol{S\rightarrow \hat Y}$: This relation is induced by the prediction model $\hat y = f(\mathcal S;\theta)$ that takes a historical event sequence $\mathcal S$ as input and outputs the prediction for next event $\hat y$. The relation from $\mathcal S$ to $\hat y$ is deterministic given fixed model parameters $\theta$.
|
| 43 |
+
|
| 44 |
+
$\boldsymbol{C\rightarrow \hat Y}$: This relation is implicitly embodied in the learning process that optimizes the model parameters with a given dataset collected at one time. By our definition, the training dataset is generated from a latent distribution affected by context $c_{tr}$ and the MLE algorithm yields $$\begin{equation}
|
| 45 |
+
\theta^* = \arg\min_{\theta}\mathbb E_{(\mathcal S,y)\sim P(S,Y|C=c_{tr})} [l\left(f(\mathcal S; \theta), y\right)],
|
| 46 |
+
\end{equation}$$ where $l(\cdot, \cdot)$ denotes a certain loss function, e.g., cross-entropy. This indicates that the learned model parameters $\theta^*$ is dependent on the distribution of $c_{tr}$. Also, due to the fact $\hat y = f(\mathcal S;\theta)$, we conclude the proof for relation from $C$ to $\hat Y$. In short, intuitive explanations for such a causal relation lie in two facts: 1) $C$ affects the generation of data used for model training, and 2) $\hat Y$ is the output of the trained model given input sequence $S$.
|
| 47 |
+
|
| 48 |
+
The key observation is that $C$ acts as the confounder in both Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}(a) and (b), which could play a crucial role leading to undesirable testing performance of MLE-based approaches once the distribution is shifted. As implied by the causal relations in Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}(a), there exists partial information in $S$ that is predictive for $Y$ yet highly sensitive to $C$, a usually fast-changing variable in real-world scenarios. As a result, the correlation between $S$ and $Y$ in previous contexts may become spurious in future ones. This also explains the failure of MLE-based models for generalizing to data from new distributions, according to the similar causal pattern in Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}(b).
|
| 49 |
+
|
| 50 |
+
We can reuse the toy example in Fig. [1](#fig_motiv){reference-type="ref" reference="fig_motiv"} as an intuitive interpretation for the failure. The 'summer' season (a context) acts as a confounder that correlates buying 'ice cream' (a historical event) and buying 'T-shirt' (a label), between which exists no obvious causal relation. However, the model would 'memorize' their correlation and tend to improperly recommend 'T-shirt' in the 'winter' season (new context). Whereas in fact the user purchases ice cream in winter most possibly because he/she is a dessert lover, in which case recommending other desserts would be a better decision.
|
| 51 |
+
|
| 52 |
+
To address the confounding effect of $C$ and endow the model with robustness to temporal distribution shift, we propose to target model learning with $P_\theta(Y|do(S))$ instead of the conventional $P_\theta(Y|S)$. As shown in Fig. [\[fig_context\]](#fig_context){reference-type="ref" reference="fig_context"}(c), the $do$-operator cuts off the arrow (i.e., causal relation) coming from $C$ to $S$, which essentially simulates an ideal data-generating process where sequences are generated independently from contexts. This operation blocks the backdoor path $S\leftarrow C\rightarrow \hat Y$ that spuriously correlates $S$ and $Y$, and enables the model to learn the desired causal relation $S\rightarrow Y$ which is invariant to environmental change.
|
| 53 |
+
|
| 54 |
+
An ideal way to compute $P_\theta(Y|do(S))$ is to carry out randomized controlled trial (RCT) [@pearl2016causal] by recollecting data from a prohibitively large quantity of random samples under any possible context, which is infeasible since we could neither control the environment nor collect data in the future. Fortunately, there exists a statistical estimation of $P_\theta(Y|do(S))$ by leveraging backdoor adjustment [@pearl2016causal], wherein the confounder $C$ is stratified into discrete pieces $\mathcal C = \{c_i\}_{i=1}^{|\mathcal C|}$. By using basic rules induced by the $do$-operator (see derivation in Appendix [6](#ap_backdoor){reference-type="ref" reference="ap_backdoor"}), we have: $$\begin{equation}
|
| 55 |
+
\label{eqn_backdoor}
|
| 56 |
+
P_\theta(Y|do(S)) = \sum_{i=1}^{|\mathcal C|} P_\theta(Y|S, C = c_i) P(C = c_i).
|
| 57 |
+
\end{equation}$$ Intuitively, the backdoor adjustment approximates an ideal situation where $c_i$ is enumerated according to the context prior $P(C)$ independent from input sequences, which serves to counteract the effect of $C$ on the generation of $S$. Nevertheless, optimizing with Eq. [\[eqn_backdoor\]](#eqn_backdoor){reference-type="eqref" reference="eqn_backdoor"} is intractable since $c_i$ is usually unobserved or even undefined, and its prior distribution $P(C)$ is also unknown. Furthermore, computing Eq. [\[eqn_backdoor\]](#eqn_backdoor){reference-type="eqref" reference="eqn_backdoor"} requires awareness of the relation between contexts and generated sequences, which is implicit in the data-generating process behind the data.
|
| 58 |
+
|
| 59 |
+
To address the difficulty for targeting Eq. [\[eqn_backdoor\]](#eqn_backdoor){reference-type="eqref" reference="eqn_backdoor"}, we introduce a variational distribution $Q(C|S)$ as the estimation for latent contexts given input sequences. By treating $C$ as a latent variable and using variational inference technique, we can obtain the following tractable evidence lower bound (ELBO) as learning objective (see derivation in Appendix [7](#ap_variational){reference-type="ref" reference="ap_variational"}): $$\begin{equation}
|
| 60 |
+
\label{eqn_obj}
|
| 61 |
+
\begin{split}
|
| 62 |
+
&\log P_\theta(Y|do(S=\mathcal S)) \\
|
| 63 |
+
\geq & \mathbb{E}_{c\sim Q(C|S=\mathcal S)} \left [ \log P_\theta(Y|S=\mathcal S, C=c) \right ] - \mathcal{D}_{KL}\left(Q(C|S=\mathcal S) \| P(C)\right),
|
| 64 |
+
\end{split}
|
| 65 |
+
\end{equation}$$ where the last step is given by Jensen's Inequality and the equality holds if and only if $Q(C|S)$ exactly fits the true posterior $P(C|S, Y)$, which suggests it successfully uncovers the latent context from observed data. The first component in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"} is the negative reconstruction error (i.e. prediction error). The second component is the Kullback--Leibler (KL) divergence of the variational distribution and context prior distribution. Similar with [@makhzani2015adversarial], we could re-write the second term as a summation of the entropy of $Q(C|S)$, i.e., $\mathbb H[Q(C|S=\mathcal S)]$ and the cross-entropy between $Q(C|S)$ and prior distribution, i.e., $\mathbb H[Q(C),P(C)]$. The former enforces high variance of contexts for each event sequence, and the latter aligns the aggregated posterior with the prior.
|
| 66 |
+
|
| 67 |
+
The distribution $P(C)$ characterizes the true context prior in real world. It serves to regularize $Q(C|S)$ through the KL term in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"}. Choosing an appropriate $P(C)$ is important yet challenging. One straightforward solution adopted by some previous works [@dinh2015nice; @zhang2020causal] is to use a pre-defined prior such as uniform distribution. However, such a simplistic distribution may not reflect the true context prior, and potentially lead to over-regularization [@burda2015importance]. An alternative way is to estimate the prior by computing the average of the variational posterior [@hoffman2016elbo], i.e., $P(C) \approx 1/N \sum_{i=1}^N Q(C|S=\mathcal S_i)$. However, such a method is computationally expensive [@tomczak2018vae] and would often result in biased estimation given a limited quantity of training data collected within a certain time interval. Inspired by [@tomczak2018vae] using mixture of Gaussian as a flexible and learnable prior, we propose to use a mixture of pseudo variational posteriors as an estimation: $$\begin{equation}
|
| 68 |
+
\label{eqn-prior}
|
| 69 |
+
\hat P(C) = \frac{1}{R} \sum_{j=1}^R Q(C|S=\mathcal S'_j).
|
| 70 |
+
\end{equation}$$ where $\mathcal S'_j$ is a randomly generated pseudo event sequence and we set $R\ll N$ to reduce the computational cost. Note that the variational distribution $Q(C|S)$ is given by the prediction model (see details in Section [3](#sec-framework){reference-type="ref" reference="sec-framework"}). Therefore, the prior estimation by Eq. [\[eqn-prior\]](#eqn-prior){reference-type="eqref" reference="eqn-prior"} is a general reflection of how the model favors each context given uninformative inputs. Also, the estimated prior is learned with the model in a fully data-driven manner.
|
| 71 |
+
|
| 72 |
+
We next parameterize the components in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"}, i.e., $P_\theta(Y|S, C)$ and $Q(C|S)$, and propose a flexible framework named as CaseQ by taking 'cas' from 'causal' and 'seq' from 'sequence'.
|
| 73 |
+
|
| 74 |
+
Given an event sequence $\mathcal S = \{x_1, x_2, \cdots, x_t\}$ as input, we use an embedding to represent each type of event. We consider a global embedding matrix $\mathbf H_x \in \mathbb R^{M\times d}$ to map each type of event into a $d$-dimensional embedding space: $$\begin{equation}
|
| 75 |
+
\mathbf h_m^1 = OneHot(x_m)^{\top} \mathbf H_x,
|
| 76 |
+
\end{equation}$$ where $M$ denotes the number of event types, $OneHot(\cdot): \mathbb Z^+ \rightarrow \{0,1\}^M$ transforms an event into a one-hot column vector, and $\mathbf h_m^1 \in \mathbb R^d$ denotes the initial representation for an event $x_m$.
|
| 77 |
+
|
| 78 |
+
To encode input sequences and accommodate the effect of $C$ on prediction, we consider an encoder $\Phi(\cdot)$ conditioned on a specific context. It takes a sequence of event embeddings and estimated context representation $\mathbf c_{(t)}$ at time step $t$ (given by the branching unit detailed next) as input, and outputs a sequence of hidden states: $$\begin{equation}
|
| 79 |
+
\begin{split}
|
| 80 |
+
[\mathbf h_1^2, \mathbf h_2^2, \cdots, \mathbf h_t^2] &= \Phi([\mathbf h_1^1, \mathbf h_2^1, \cdots, \mathbf h_t^1], \mathbf c_{(t)}; \Theta),
|
| 81 |
+
\end{split}
|
| 82 |
+
\end{equation}$$ where $\mathbf h^2_m$ is the $m$-th hidden state for the event sequence. Assume that there are $K$ types of contexts, i.e., $\mathcal C = \{\mathbf c_k\}_{k=1}^{K}$ and $c_{(t)}\in \mathcal C$. Each type of context $c_k$ is represented as a $K$-dimensional one-hot column vector $\mathbf c_k \in \{0,1\}^K$ whose $k$-th dimension is $1$. Based on the SCM in Fig. [2](#fig_diag){reference-type="ref" reference="fig_diag"}, the context would change the underlying mechanism of how the next event is generated in real-world. Therefore, we consider context-specific *inference units* $\{\Phi_k(\cdot\;;\theta_k)\}_{k=1}^{K}$ as sub-networks of the encoder $\Phi(\cdot)$ to learn context-aware sequence representations, where $\Theta = \{\theta_k\}_{k=1}^{K}$. Then, we have $$\begin{equation}
|
| 83 |
+
\label{eqn_encoder}
|
| 84 |
+
\Phi([\mathbf h_1^1, \mathbf h_2^1, \cdots, \mathbf h_t^1], \mathbf c_{(t)}; \Theta) = \sum_{k=1}^K \mathbf c_{(t)}[k] \cdot \Phi_k([\mathbf h_1^1, \cdots, \mathbf h_t^1]; \theta_k),
|
| 85 |
+
\end{equation}$$ where $\mathbf c_{(t)}[k]$ returns the $k$-th entry of the vector $\mathbf c_{(t)}$. The inference unit can be specified as arbitrary off-the-shelf sequence models such as recurrent neural network (RNN) or self-attention (SA) [@vaswani2017attention].
|
| 86 |
+
|
| 87 |
+
We next introduce a *branching unit* $\Psi(\cdot)$ to parameterize the variational posterior $Q(C|S)$ that aims to obtain $\mathbf c_{(t)}$ given the sequence. At each time step, it takes the sequence representation as input and outputs a probability vector $\mathbf q_t \in [0, 1]^K$, whose $k$-th entry represents the probability of the corresponding context $c_k$, i.e., $$\begin{equation}
|
| 88 |
+
\begin{split}
|
| 89 |
+
\mathbf q_t = \Psi(\mathbf h^1_t; \Omega),\; \text{where}\; \sum_{k=1}^{K} \mathbf q_t[k] = 1.
|
| 90 |
+
\end{split}
|
| 91 |
+
\end{equation}$$ We use a $d'$-dimensional *context embedding* $\mathbf w_k\in \mathbb R^{d'}$ to accommodate the information of each context $\mathbf c_k$ by an embedding matrix $\mathbf H_c \in \mathbb R^{K\times {d'}}$, where $\mathbf w_k = \mathbf {c_{k}}^{\top} \mathbf H_c$ and $d' = d (d + 1)$. Then, we split each context embedding into several fix-sized parameters $\mathbf W_k\in \mathbb R^{d\times d}$, $\mathbf a_k\in \mathbb R^{d}$: $$\begin{equation}
|
| 92 |
+
\begin{split}
|
| 93 |
+
\mathbf W_k = \mathbf w_k[:d^2].reshape(d, d), \; \mathbf a_k = \mathbf w_k [d^2: d (d+1)].
|
| 94 |
+
\end{split}
|
| 95 |
+
\end{equation}$$ The attribution score $s_{tk}$ that measures how likely a sequence up to time step $t$ belongs to context $c_k$ can be calculated via $$\begin{equation}
|
| 96 |
+
s_{tk} = \langle\mathbf a_k, Tanh(\mathbf W_k \mathbf h_t)\rangle.
|
| 97 |
+
\end{equation}$$ Namely, we first project the sequence representation into a new $d$-dimensional space and then use dot product to measure the similarity. The variational distribution $Q(C|S=\mathcal S)$ can be specified as a probability vector $\mathbf q_t$ that is computed by using Softmax function over $s_{tk}$, i.e., $\mathbf q_t = Softmax([s_{tk}]_{k = 1}^{K}; \tau)$, where $\tau$ controls the confidence level. To implement the random sampling procedure $c\sim Q(C|S)$ in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"}, one can also apply the categorical reparameterization trick that uses differentiable samples from Gumbel-Softmax [@jang2017categorical] distribution, i.e., $$\begin{equation}
|
| 98 |
+
\mathbf q_t[k] = \frac{\exp \left( (s_{tk}+g)/\tau\right)}{\sum_{k=1}^{K}\exp \left((s_{tk}+g)/\tau\right)}, \quad g\sim Gumbel(0,1).
|
| 99 |
+
\end{equation}$$ Then, we can revise the right term of Eq. [\[eqn_encoder\]](#eqn_encoder){reference-type="eqref" reference="eqn_encoder"} as $\sum_{k=1}^K \mathbf q_t[k] \Phi_k([\mathbf h_1^1, \mathbf h_2^1, \cdots, \mathbf h_t^1]; \theta_k)$.
|
| 100 |
+
|
| 101 |
+
The aforementioned single-layered architecture requires an independent inference unit for each type of context, i.e., $|\mathcal C|=K$, which has several drawbacks: 1) It is impractical to accommodate a large amount of context types due to limited computational resources; 2) Real-world contexts are not entirely isolated, and thus independently parameterizing them may lead to over-fitting and undesired generalization performance given limited training data. To address these limitations, we further devise a hierarchical branching structure for CaseQ framework. Instead of using a one-hot vector, we denote a context as a 0-1 matrix, i.e., $$\begin{equation}
|
| 102 |
+
\mathbf c_k = stack([\mathbf c^1_k, \mathbf c^2_k, \cdots, \mathbf c^{D}_k]) \in \{0,1\}^{D\times K},
|
| 103 |
+
\end{equation}$$ where $D$ denotes the number of layers and $K$ denotes the number of parallel inference units in each layer. Each row in $\mathbf c_k$ is a one-hot vector indicating a certain inference unit in a layer, and each type of context corresponds to a certain combination of inference units. The entire network could then be represented as the stack of multiple parallel inference units and branching units: $$\begin{equation}
|
| 104 |
+
\begin{split}
|
| 105 |
+
[\mathbf h_1^{l+1}, \mathbf h_2^{l+1}, \cdots, \mathbf h_t^{l+1}] &= \sum_{k=1}^{K} \mathbf q^l_t[k] \;\Phi_k^l([\mathbf h_1^l, \mathbf h_2^l, \cdots, \mathbf h_t^l]; \theta_k) ,
|
| 106 |
+
\end{split}
|
| 107 |
+
\end{equation}$$ where $\mathbf q^l_t = \Psi^l(\mathbf h^l_t)$, and $l\in\{1,2,\cdots,D\}$. The final prediction result at each time step can be obtained by ranking the relevance scores of current hidden states and event embeddings: $$\begin{equation}
|
| 108 |
+
\hat y_t = \mbox{argmax}_{m\in \{1,\cdots,M\}}(\mathbf H_x \cdot \mathbf h^{D+1}_t)[m].
|
| 109 |
+
\end{equation}$$ The above computation acts as a realization of $P_\theta(Y|S=\mathcal S, C=c)$ and $c\sim Q(C|S=\mathcal S)$, required by the first term in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"}. Besides, the KL term in Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"} also requires density estimation from $Q(C|S=\mathcal S)$. To achieve this, we use the continued Kronecker product of the probability vector $\mathbf q_t^l$ in each layer to represent the final variational posterior: $$\begin{equation}
|
| 110 |
+
\mathbf q_t = Flatten\Big( \mathop{\bigotimes}\limits_{l=1}^{D}\mathbf q_t^l \Big)\in \mathbb R^{K^D},
|
| 111 |
+
\end{equation}$$ where $\mathbf q_t^l\in \mathbb{R}^K$ and $\otimes$ denotes Kronecker product. The produced variational posterior is a $K^D$-dimensional vector, where each entry is the probability of a certain path (i.e., a combination of inference units across layers) in the network. This design has some merits. First, we could accommodate $|\mathcal C|=K^D$ types of contexts that grow exponentially w.r.t. model depth $D$. This helps to endow the model with larger capacity for learning diversified contexts. Second, different contexts may associate with each other by sharing the same inference unit in some layers, which plays as an effective *inductive bias* that guides the model to generalize to new environments (see more details and justifications in Appendix [8](#ap_just){reference-type="ref" reference="ap_just"}.
|
| 112 |
+
|
| 113 |
+
According to Eq. [\[eqn_obj\]](#eqn_obj){reference-type="eqref" reference="eqn_obj"}, the final loss function can be written as $$\begin{equation}
|
| 114 |
+
\sum_{\mathcal S} \sum_{t=1}^{|\mathcal S|-1} \left[ l(y_t, \hat y_t) + \alpha \mathcal D_{KL}\left(\mathbf q_t \| \frac{1}{R} \sum_{j=1}^R \mathbf q(\mathcal S'_j)\right) \right].
|
| 115 |
+
\end{equation}$$ where $\alpha$ is a weight parameter for balance, $\mathbf q(\mathcal S'_j)$ denotes the produced variational posterior distribution with a pseudo sequence $\mathcal S'_j$ as input, and $l(\cdot, \cdot)$ can be arbitrary loss functions (e.g., cross-entropy) depending on specific tasks.
|
2302.06058/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2302.06058/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The past decade has witnessed thriving deep neural networks (DNNs) in various machine learning applications [@he2016deep; @he2017mask; @girshick2014rich]. In large part, the prosperity is driven by increasing parameters and computations, which however, make DNN models too cumbersome to be deployed on resource-constrained edge devices such as cell phones and Internet-of-Things (IoT) devices. Therefore, the research community is sorely in need of technical renovation to compress the DNNs [@hubara2016binarized; @tan2019efficientnet; @lin2020hrank].
|
| 4 |
+
|
| 5 |
+
<figure id="fig1b" data-latex-placement="!t">
|
| 6 |
+
<figure id="fig1a">
|
| 7 |
+
<embed src="fig1_a.pdf" style="width:98.0%" />
|
| 8 |
+
<figcaption>Vanilla N:M Mask<span id="fig:vanilla" data-label="fig:vanilla"></span></figcaption>
|
| 9 |
+
</figure>
|
| 10 |
+
<figure id="fig1b">
|
| 11 |
+
<embed src="fig1_b.pdf" style="width:98.0%" />
|
| 12 |
+
<figcaption>Transposable N:M Mask<span id="fig:transposable" data-label="fig:transposable"></span></figcaption>
|
| 13 |
+
</figure>
|
| 14 |
+
<figcaption>Comparison between vanilla N:M mask and transposable N:M mask (2:4 case). The vanilla N:M mask <span class="citation" data-cites="zhou2021learning nvidia2020a100"></span> generates sparse weights with N:M property in rows, leading to forward acceleration but remaining dense backward propagation as the weight transposition operation impairs N:M blocks. The transposable N:M mask <span class="citation" data-cites="hubara2021accelerated"></span> generates sparse weights that have N:M property in both rows and columns, leading to forward & backward acceleration. Both methods consider only one sparse mask.</figcaption>
|
| 15 |
+
</figure>
|
| 16 |
+
|
| 17 |
+
<figure id="fig:framework" data-latex-placement="!t">
|
| 18 |
+
<embed src="fig2.pdf" style="width:100.0%" />
|
| 19 |
+
<figcaption>Framework of the proposed Bi-direction Masks (Bi-Mask). It separately builds two N:M sparse masks in the forward and backward direction, thus enabling training acceleration in both directions. During backward propagation, Bi-Mask performs an efficient row permutation to make the sparse weights have more eligible N:M weight blocks before generating the backward mask.</figcaption>
|
| 20 |
+
</figure>
|
| 21 |
+
|
| 22 |
+
By removing redundant network weights [@lecun1989optimal; @han2015learning; @he2017channel], network sparsity has emerged as a piece of modern equipment to obtain a lightweight sparse model. Through removing individual weights at arbitrary positions, fine-grained sparsity is demonstrated to reach a high sparse ratio with performance guarantee [@han2015learning; @evci2020rigging]. Unfortunately, the resulting unstructured sparse weights hardly produce acceleration on off-the-shelf hardware. Coarse-grained sparsity is more hardware friendly as it typically removes an entire weight block [@ji2018tetris; @meng2020pruning] or convolution filter [@liu2019metapruning; @lin2020hrank]. In comparison with fine-grained sparsity, the compressed model gains noticeable speedup, yet suffers more performance degradation. Therefore, it is a challenging yet valuable issue to simultaneously retain model performance of DNN models and achieve hardware acceleration.
|
| 23 |
+
|
| 24 |
+
Luckily, recent N:M fine-grained sparsity has provided a promising solution. By requiring at most N non-zero elements out of every M contiguous weights, N:M sparsity includes the performance advantage of fine-grained sparsity as well as practical acceleration thanks to the hardware innovation of N:M sparse tensor core [@ronny2020nvidia; @fang2022algorithm]. Nvidia [@nvidia2020a100] has presented the ASP (APEX's Automatic Sparsity) paradigm that achieves 2:4 sparsity within three steps, unfolded as training a dense network, applying 2:4 fine-grained sparsity using magnitude-based pruning [@han2015learning], and re-training the sparse network. Despite the satisfying performance, ASP exhibits drawbacks in its tedious training cost as it contains dense network training and N:M sparse retraining. This largely prohibits the application of N:M sparsity when confronting with scarce training resources.
|
| 25 |
+
|
| 26 |
+
The above issue has been partially addressed by directly training an N:M sparse network from scratch [@zhou2021learning]. Yet, the sparse tensor core is only utilized to accelerate the forward multiplication during training. As illustrated in Fig. [\[fig:vanilla\]](#fig:vanilla){reference-type="ref" reference="fig:vanilla"}, the weight transposition operation in the backward impairs N:M blocks and thus fails to support acceleration in gradient calculation. To mitigate this, [@hubara2021accelerated] proposed N:M transposable mask, where a binary mask that indicates whether to remove weights is required to have N:M property along the rows and columns. Therefore, after performing transposition, it still satisfies the N:M format as shown in Fig. [\[fig:transposable\]](#fig:transposable){reference-type="ref" reference="fig:transposable"}. Unfortunately, the transposable requirement is observed to have more performance degradation, which is presumably caused by less flexibility of the sparsity pattern [@hubara2021accelerated]. In Sec. [3.2](#trans){reference-type="ref" reference="trans"}, we further show severe performance degradation at a higher sparse level such as 1:8 and 1:16. We therefore reflect on this: how can we address the efficiency of N:M sparse training without a compromise on performance?
|
| 27 |
+
|
| 28 |
+
In this paper, we attempt to answer the above question by introducing a novel method of Bi-directional Masks (Bi-Mask) that performs surprisingly well without any N:M transposable constraint. Fig. [4](#fig:framework){reference-type="ref" reference="fig:framework"} illustrates framework of our Bi-Mask. In particular, along the forward and backward directions, two separate binary masks are constructed according to the weight magnitude [@han2015learning]. As a contrast, we require the forward mask to follow N:M property in its rows while in columns for the backward mask. By this way, we concurrently enable forward & backward acceleration from the N:M sparse tensor core. Also, the bi-directional masks benefit performance from more flexible sparsity pattern. Nevertheless, they also bring about deficiency of gradient gap since the backward mask modifies the gradient of forward loss. Given this issue, an efficient row permutation is further introduced before enforcing the backward mask. In detail, we first change row order of weight matrix and then pick up the permutation with the most eligible N:M weight blocks from a dozen of candidates. By changing column order of output gradient accordingly, we succeed in retaining the same outputs between with/without row permutation, and at the same time well reducing the gradient gap between uni-directional/bi-directional mask(s).
|
| 29 |
+
|
| 30 |
+
[]{#tab:merit_comparison label="tab:merit_comparison"}
|
| 31 |
+
|
| 32 |
+
Our simple design of Bi-Mask turns out to achieve remarkable results. Besides forward & backward training acceleration, Bi-Mask well improves the performance of transposable mask (T-Mask) across different N:M patterns, benchmarks, and networks. For example, Bi-Mask achieves 71.5% Top-1 accuracy when training 1:16 sparse ResNet-50 on ImageNet, surpassing T-mask by 5.3%. More surprisingly, our approach achieves comparable or even better results than vanilla N:M methods, where the backward propagation can not be accelerated. For example, our Bi-Mask exceeds Top-1 accuracy of SR-STE [@zhou2021learning] by 0.4% when training 2:4 sparse ResNet-50 on ImageNet. Table [\[tab:merit_comparison\]](#tab:merit_comparison){reference-type="ref" reference="tab:merit_comparison"} provides advantage comparison between different mask methods.
|
| 33 |
+
|
| 34 |
+
# Method
|
| 35 |
+
|
| 36 |
+
We first introduce some basic knowledge about the N:M fine-grained sparsity. Let $\mathbf{W}\in \mathbb{R}^{I \times J}$ be the parameter matrix from a specific network layer. Considering the input tensor $\mathbf{X}$, the forward propagation represented with form of matrix multiplication can be formulated as: $$\begin{equation}
|
| 37 |
+
\label{eq:forward}
|
| 38 |
+
\mathbf{Y}= \mathbf{W}* \mathbf{X},
|
| 39 |
+
\end{equation}$$ where $\mathbf{Y}$ is the output tensor and $*$ is the matrix multiplication operation. N:M sparsity forces at most N out of M consecutive weights in the rows of $\mathbf{W}$ to have non-zero values. The sparsity can be achieved via a binary matrix $\mathbf{B}\in \{0,1\}^{I\times J}$ where a block of every M contiguous elements contains at most N as: $$\begin{equation}
|
| 40 |
+
\label{eq:contraint_forward}
|
| 41 |
+
{\Vert\mathbf{B}_{i, j:j+\text{M}}\Vert}_0 \le \text{N},
|
| 42 |
+
\end{equation}$$ in which $i = 1, 2, 3, ..., I$ and $j =1, \text{M}, 2\text{M}, ..., J$. Then, the sparse forward propagation can be formulated as: $$\begin{equation}
|
| 43 |
+
\label{eq:sparse_forward}
|
| 44 |
+
\mathbf{Y}= (\mathbf{B}\odot \mathbf{W}) * \mathbf{X},
|
| 45 |
+
\end{equation}$$ where $\odot$ denotes the element-wise multiplication. Since $\mathbf{B}\odot \mathbf{W}$ meets N:M requirement, the matrix multiplication with $\mathbf{X}$ can be efficiently implemented by the N:M sparse tensor core, as illustrated in Fig. [\[fig:vanilla\]](#fig:vanilla){reference-type="ref" reference="fig:vanilla"}.
|
| 46 |
+
|
| 47 |
+
N:M sparse training starts from randomly-initialized networks [@zhou2021learning; @zhang2022learning], thus avoiding heavy burden of pre-training a dense model [@nvidia2020a100]. We base our study on the popular SR-STE [@zhou2021learning] for N:M sparse training, simply illustrated for ease of understanding in the following. During forward propagation, it adapts the binary mask $\mathbf{B}$ at each iteration as: $$\begin{equation}
|
| 48 |
+
\label{eq:weight_projection}
|
| 49 |
+
\mathbf{B}_{i, j+m} = \left\{ \begin{array}{ll}
|
| 50 |
+
0, \; \textrm{if} \; |\mathbf{W}_{i, j+m}| < \text{Top}(|\mathbf{W}_{i, j:j+\text{M}}|, \text{N}),\\
|
| 51 |
+
1, \; \text{otherwise},
|
| 52 |
+
\end{array} \right.
|
| 53 |
+
\end{equation}$$ where $1 \leq m \leq \text{M}$, $|\cdot|$ represents the absolute function, and $\text{Top}(|\mathbf{W}_{i, j:j+\text{M}}|, \text{N})$ returns the N-$th$ largest value within $|\mathbf{W}_{i, j:j+\text{M}}|$. Therefore, we obtain the forward binary mask according to the weight magnitude in each block. During backward propagation, the gradients of $\mathbf{B}\odot \mathbf{W}$ are directly passed to $\mathbf{W}$ according to the straight-through-estimator (STE) [@bengio2013estimating].
|
| 54 |
+
|
| 55 |
+
The above sparse mask is indeed uni-directional towards forward propagation. By forming N:M blocks in rows of the mask, Eq. ([\[eq:sparse_forward\]](#eq:sparse_forward){reference-type="ref" reference="eq:sparse_forward"}) permits forward acceleration from the N:M sparse tensor core between the weights and inputs. Unfortunately, such a vanilla mask crashes backward acceleration due simply to the transposition operation. To explain, the gradient in the backward propagation is computed as: $$\begin{equation}
|
| 56 |
+
\label{eq:backward}
|
| 57 |
+
g(\mathbf{X}) = (\mathbf{B}\odot \mathbf{W})^T * g(\mathbf{Y}),
|
| 58 |
+
\end{equation}$$ where $g(\cdot)$ denotes the gradient with respect to its input. The above equation requires $(\mathbf{B}\odot \mathbf{W})^T$ to have N:M blocks in rows for accelerating multiplication with $g(\mathbf{Y})$, however, it is in columns on account of the transposition operation. Thus, the backward propagation remains dense and fails to be accelerated, as illustrated in Fig. [\[fig:vanilla\]](#fig:vanilla){reference-type="ref" reference="fig:vanilla"}.
|
| 59 |
+
|
| 60 |
+
To address this issue, [@hubara2021accelerated] presented transposable N:M mask that is required to satisfy row-wise and column-wise N:M blocks such that the transposition also undertakes an important mission of N:M property in rows. Consequently, the binary mask $\mathbf{B}$ is constrained as: $$\begin{equation}
|
| 61 |
+
\label{eq:constraint_transposable}
|
| 62 |
+
{\Vert\mathbf{B}_{i, j:j+\text{M}}\Vert}_0 \le \text{N},
|
| 63 |
+
\quad {\Vert\mathbf{B}_{k:k+\text{M}, l}\Vert}_0 \le \text{N},
|
| 64 |
+
\end{equation}$$ where $i = 1, 2, 3, ..., I$, $j = 1, \text{M}, 2\text{M}, ..., J$, $k = 1, M, 2M, ..., I$, and $l = 1, 2, 3, ..., J$. Besides, [@hubara2021accelerated] further introduced a $2$-approximation algorithm to reduce complexity of finding the transposable mask.
|
| 65 |
+
|
| 66 |
+
<figure id="fig:mask_diversity" data-latex-placement="!t">
|
| 67 |
+
<figure id="fig2a">
|
| 68 |
+
<embed src="fig3_a.pdf" style="width:98.0%" />
|
| 69 |
+
<figcaption>Flexibility Comparison<span id="fig:mask-diversity" data-label="fig:mask-diversity"></span></figcaption>
|
| 70 |
+
</figure>
|
| 71 |
+
<figure id="fig2b">
|
| 72 |
+
<embed src="fig3_b.pdf" style="width:98.0%" />
|
| 73 |
+
<figcaption>Performance Comparison<span id="fig:performance" data-label="fig:performance"></span></figcaption>
|
| 74 |
+
</figure>
|
| 75 |
+
<figcaption>Comparison between vanilla mask and transposable mask including (a) flexibility measured by mask diversity <span class="citation" data-cites="hubara2021accelerated"></span> and (b) performance of training sparse ResNet-50 <span class="citation" data-cites="he2016deep"></span> on ImageNet <span class="citation" data-cites="deng2009imagenet"></span>. </figcaption>
|
| 76 |
+
</figure>
|
| 77 |
+
|
| 78 |
+
Here we rethink the transposable pursuit for N:M sparse training. Although it enables backward acceleration, the flexibility of sparse networks is greatly restricted, which comes at the cost of performance degradation. We first report the flexibility comparison between vanilla mask and transposible mask under different N:M cases. Fig. [\[fig:mask-diversity\]](#fig:mask-diversity){reference-type="ref" reference="fig:mask-diversity"} measures the flexibility using mask diversity that calculates the number of all possible masks under a given N:M mask case [@hubara2021accelerated]. We can see a drastic flexibility degradation, in particular in cases of a small N or M. As a consensus [@gale2019state; @nvidia2020a100], more restrictions on sparse patterns incur worse performance of sparse networks. For example, unstructured sparsity [@han2015learning] that removes arbitrary weights generally performs much better than structured sparsity [@li2016pruning] that removes entire filter weights. Consequently, severe performance occurs in transposable mask in comparison with the vanilla method, as we experimentally verify in Fig. [\[fig:performance\]](#fig:performance){reference-type="ref" reference="fig:performance"}, notably very poor 1:8 and 1:16. The uni-directional masks, either vanilla or transposable, do not accomplish N:M backward acceleration without a compromise on performance. Therefore, in what follows, we address this issue from the perspective of bi-directional masks.
|
| 79 |
+
|
| 80 |
+
In this section, we formally present our Bi-directional N:M masks (Bi-Mask). As its name suggests, our Bi-Mask disentangles the forward & backward weight sparsity by involving two different masks during N:M sparse training. Concretely speaking, in the forward direction, we count on the vanilla N:M mask $\mathbf{B}$ from Eq. ([\[eq:contraint_forward\]](#eq:contraint_forward){reference-type="ref" reference="eq:contraint_forward"}) that calls for N:M in rows to ensure the forward acceleration and results in better flexibility than the transposable N:M mask as we report in Fig. [\[fig:mask-diversity\]](#fig:mask-diversity){reference-type="ref" reference="fig:mask-diversity"}. Very differently, we additionally build another mask $\bar{\mathbf{B}} \in \{0, 1\}^{I \times J}$ in the backward direction with the N:M requirement on its columns as: $$\begin{equation}
|
| 81 |
+
{\Vert\bar{\mathbf{B}}_{k:k+\text{M}, l}\Vert}_0 \le \text{N},
|
| 82 |
+
\end{equation}$$ in which $k=1, \text{M}, 2\text{M}, ..., I$, and $l = 1, 2, 3,..., J$. In this fashion, the backward acceleration is supported as well without a compromise on the flexibility of backward mask, and the backward gradient $g(\mathbf{X})$ in Eq. ([\[eq:backward\]](#eq:backward){reference-type="ref" reference="eq:backward"}) is represented by the following approximation: $$\begin{equation}
|
| 83 |
+
\label{eq:gradient_calculation}
|
| 84 |
+
\bar{g}(\mathbf{X}) = (\bar{\mathbf{B}} \odot \mathbf{W})^T * g(\mathbf{Y}).
|
| 85 |
+
\vspace{-0.2cm}
|
| 86 |
+
\end{equation}$$
|
| 87 |
+
|
| 88 |
+
Nevertheless, the forward $\mathbf{B}$ requires gradient of $g(\mathbf{X})$ for our Bi-Mask, which yields a gradient gap between practical bi-directional gradient $\bar{g}(\mathbf{X})$ and ideal uni-directional gradient $g(\mathbf{X})$. To solve this issue, we adapt the backward mask $\bar{\mathbf{B}}$ to the magnitudes of masked weights during sparse training as follows: $$\begin{equation}
|
| 89 |
+
\label{eq:backward_mask}
|
| 90 |
+
%\scriptsize
|
| 91 |
+
\bar{\mathbf{B}}_{k+m, l} = \left\{ \begin{array}{ll}
|
| 92 |
+
0, \; \textrm{if} \; |(\mathbf{B}\odot \mathbf{W})_{k+m, l}| \\ \qquad < \text{Top}(|(\mathbf{B}\odot \mathbf{W})_{k:k+\text{M}, l}|, \text{N}),\\
|
| 93 |
+
\mathbf{B}_{k+m, l}, \; \text{otherwise},
|
| 94 |
+
\end{array} \right.
|
| 95 |
+
\end{equation}$$ where $k = 1, \text{M}, 2\text{M}, ..., I$, $l = 1, 2, ..., J$, and $1 \le k \le \text{M}$. For a deeper analysis, it is easy to understand that $\mathbf{B}_{k+m, l} = 0$ is a fully not necessary condition of $\mathbf{B}_{k+m, l} = 0$. That is, the event $\mathbf{B}_{k+m, l} = 0$ will produce the event $\mathbf{B}_{k+m, l} = 0$, but is not the only way for $\mathbf{B}_{k+m, l} = 0$ to occur.
|
| 96 |
+
|
| 97 |
+
The rationale behind Eq. ([\[eq:backward_mask\]](#eq:backward_mask){reference-type="ref" reference="eq:backward_mask"}) is two-fold: 1) It maximizes the similarity of forward and backward masks by setting $\bar{\mathbf{B}}_{k+m, l} = \mathbf{B}_{k+m, l}$ if the magnitude of $\mathbf{W}_{k+m,l}$ is beyond the top-N largest. 2) Applying our backward mask does not affect the updating of these weights with zero forward masks since $\mathbf{B}_{k+m, l} = 0$ always results in $\bar{\mathbf{B}}_{k+m, l} = 0$. Unfortunately, it is a possibility that $\bar{\mathbf{B}}_{k+m, l} = 0$ does not necessarily result from $\mathbf{B}_{k+m, l} = 0$, in which case gradients of some non-zero masked weights are mistakenly eliminated, incurring performance degradation.
|
| 98 |
+
|
| 99 |
+
::: algorithm
|
| 100 |
+
Return $\mathbf{W}\odot \mathbf{B}$.
|
| 101 |
+
:::
|
| 102 |
+
|
| 103 |
+
To decrease this possibility, we continue a row permutation method along the row dimension of $\mathbf{B}\odot \mathbf{W}$. Our major motivations are also two-fold: 1) We can see from Eq. ([\[eq:backward_mask\]](#eq:backward_mask){reference-type="ref" reference="eq:backward_mask"}) that the resulting mask block $\bar{\mathbf{B}}_{k:k+\text{M}, l}$ would exactly match with ${\mathbf{B}}_{k:k+\text{M}, l}$ if $(\mathbf{B}\odot \mathbf{W})_{k:k+\text{M}, l}$ has N:M sparsity, and no gradient gap would occur. 2) Performing row permutation of $\mathbf{B}\odot \mathbf{W}$ improves the number of eligible N:M blocks as illustrated in Fig. [4](#fig:framework){reference-type="ref" reference="fig:framework"}. Importantly, it does not violate the gradient computation. Denoting $P \in \mathbb{N}^{I}$ as a permutation of $\{1,2,3,...,I\}$, the backward gradient $\bar{g}(\mathbf{X})$ in Eq. ([\[eq:gradient_calculation\]](#eq:gradient_calculation){reference-type="ref" reference="eq:gradient_calculation"}) can be equally computed as: $$\begin{equation}
|
| 104 |
+
\label{eq:backward_final}
|
| 105 |
+
\bar{g}(\mathbf{X}) = \big(\bar{\mathbf{B}} \odot (\mathbf{W}_{P, :})\big)^T * \big(g(\mathbf{Y})_{:, P}\big),
|
| 106 |
+
\end{equation}$$ where the backward mask $\bar{\mathbf{B}}$ is computed based on the permutated $(\mathbf{B}\odot \mathbf{W})_{P, :}$ accordingly: $$\begin{equation}
|
| 107 |
+
\label{eq:backward_mask_final}
|
| 108 |
+
\bar{\mathbf{B}}_{k+m, l} = \left\{ \begin{array}{ll}
|
| 109 |
+
0, \; \textrm{if} \; \Big|\big((\mathbf{B}\odot \mathbf{W})_{P, :}\big)_{k+m, l}\Big| \\ \quad \quad \; < \text{Top}(\Big|\big((\mathbf{B}\odot \mathbf{W})_{P, :}\big)_{k:k+\text{M}, l}\Big|, \text{N}),\\
|
| 110 |
+
(\mathbf{B}_{P,:})_{k+m, l}, \; \text{otherwise}.
|
| 111 |
+
\end{array} \right.
|
| 112 |
+
%
|
| 113 |
+
\end{equation}$$
|
| 114 |
+
|
| 115 |
+
Therefore, we only need to find a permutation $P$ that results in more eligible N:M blocks in each column of $(\mathbf{B}\odot \mathbf{W})_{P,:}$. More N:M blocks decrease the possibility of eliminating gradients of non-zero masked weights. To avoid the cumbersome $I!$ possible permutations at each training iteration, we update a good permutation at a regularly spaced interval of every $\Delta T$ training iterations, and at each interval pick up the one that leads to the most eligible N:M blocks from randomly generating $K$ permutation candidates.
|
| 116 |
+
|
| 117 |
+
In Sec. [4.4](#sec:ablation){reference-type="ref" reference="sec:ablation"}, we analyze that the permutation candidate number $K=100$ already returns good performance. Compared with the aforementioned $2$-approximation algorithm for the transposable N:M mask [@hubara2021accelerated], our method brings negligible runtime burden as we experimentally reported in Sec. [4.3](#imagenet){reference-type="ref" reference="imagenet"}. Our algorithm presented in this paper is outlined in Alg. [\[alg:bimask\]](#alg:bimask){reference-type="ref" reference="alg:bimask"}.
|
2304.04205/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-11-08T06:32:48.683Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.3.0 Chrome/104.0.5112.114 Electron/20.1.3 Safari/537.36" version="20.3.0" etag="B88nJnm2TEaE-ySc9Y_f" type="device"><diagram id="-KIEYB52n2Ww6ZgYT5i9">7V3bkto4EP0aHkPJV/BjgJnsVG12UzupvTwaLMAbgyhbZGC/fuU7qAUIj8TMYGeqUljIln3OkbrVapmeNV7tvsT+ZvmVBDjqmSjY9axJzzSHrsP+Twv2ecEnwzCsvGgRh0FeaNQFz+F/uChERek2DHByVJESEtFwc1w4I+s1ntGjMj+OyctxtTmJjlvd+IuiRVQXPM/8CINqf4UBXRalhuvVX/yCw8WybNp17PyblV/VzguSpR+Ql4PGrIeeNY4Jofmn1W6MoxS+Epj8jh5PfFvdWYzXVOYEMz/hpx9ti4cr7ovuy6d9WYYUP2/8WXr8wijtWaMlXUXsyGAf/WSTYzwPd5hddTQna/ror8Io5fd7uGJMmeg3/ML+/4Os/HVaJYyiMYlInLVgjUcP4zG7vVFCY/IDH3wzNKeW67JvitvEMcW7k49qVAAy7WGywjTesyrlCcgu6CyEZ1UFLwcslpwvDwksC/1COYvq6jW27EMBrxhq6zLUMdmugxTECWLPfAF4DsTHx4fByBKBGAy8KUJqQBy4HIYGEmBoizC0FWBo68VwbEzQZCTC0LRs2wkUYTjkMTRviaHTni4vh7S2Hu8KkHYjWgDGPi/Sz3+GSThlyDPrQAI/Cum+rMWuf1gR0MSAocfMHKO5JmvMQV8UsWYWjJXJjAGJWfkohTlkFu5z8cUqDIK0GWEHOu5iEuQrodK5PPB4AiZVEDmQIvJpPY/Z8wSs5teOyZNMDhHHpCkww7qYHLZn8DPMtx39PAD173SJ46Tf739c/QP2VRBlOYAoBxDlaOoR5aRKnU91dX949MbuWOi6ToeO7ShyXQ3L4GEewP4wEMDsqIDZADA/L/0N/vQlncymNmOCZySOceTTkKw/bg9JqxRTdsNUw5w3kCBOl8UwJKbId9E/hm/aPeDs+ClgN575UB+1KyggZeDxbq8DSXF1SV/5dPs66ResAdXbTvpXfVPG4NLRJvCTZXY3hipnle8UAvxFpllJp4BT9REJ0rtLUtPR6o4BZxGC+eBQV8eAM/vOtxU7XQZvVZB9Q99WNHG/R9ttIN5FQt4NjTecVXfjVMWMy1twwSxc20AFJ+Fts+CMAL5rCPxaXSa8JPEcAXgdfE5XJWu8jjA4QP+YqvxCOCgXK+WhkXzwfDL68/jyIjSKFr6RMI1x1hNu4Dy5HKQJ2cYzXJxXoyq4FJic8OxQP15gCi7FkPX3B9U2aYXk3E2faqlmPL9mzX+FrJwkYCSgzZIYqJMEry59knDRhXszdYtIIirRIhENG4vIajiuNOFMIgOgRZx5jTnj8zh0ciYRAmkPZ9Uc+3rOwBKHRs4kMhzulzPeVqnkTNLANuFMlCvRGs54b0ElZ7x/pZAziejK/XLGOw4qOePdGYWcSSRA3C9nvOOgkjPenVHIGQzidOG12kd5w3WAcgxoc3gNOvY3XCGzJGIpVxFwsacwdHH2TxF8IEkLCeAT5QJbKuCDUYSH2E+ydJSn9ZzEK13JKBGeU+3ji4ZUFGj3RXyJFuSV8KU0goB3If2bff6E+giZRcE/Kbx9yyqPJ7sC7+xgf3DwDccM9cxOZGXHYw0UBes5KPsHqPx3u9oUPLnZaX5MD47fvV8B+zBvXKT9iiqNqbqUIedXXB3UZC310VDcmKoopaU0etIJrCaKH03kHVfeXMsKrAn9SgMx5XCFDkYq4+wo1QkGbnV7/TKLTsEojQJVgumbTqeZy5oBpqe5ZviAlk7NKI1C1ZqRlwzjL97nJ3nesCwoHSm7LKhPzo6Ozu58qdP2SaEKebdMoQqVxtWaqLDFkoEphuokwztaCiUDw3rfs4t3k2/5yXdJ/YVgn4rJdymqLun3Ym72W+5nsyVCgnezxRO80kIU/da3xdOG8cOuS0jmwZvohn1C+btHPkwevCkIz+rKg7fbnCwE0qzNxl4YJFHSC7s6CHqyJVURULvNuUhQEnwve4UkeHXpk4SNLtwbH2xTLqI2J0dBETXeRID4CJfsuNKEszYnR0HOGmf5GyAHVSNnbU6Ogpw1TqqHvq5GziR2uN0vZ8BWKeRM0sA24Kx8/HZyBrwFhZzpS6p32rxrDToOCjnTl1TvmC3mDDoOCjnTl1TvqA7V3DyjEZWvQa4tSVFwmNFoQOatMyRLv9wUhmC6XOmTryKousQNcqUdiUjIvedKQx/bggToypV2JKIIs238s3riA9Mwi/wkCWfnDMLp1eYrFwuzle1omrVb9qkPYHA83kcAKyyy9gZcSVesywMbaYdqA1eORBCkk9wr/FKw96Jx4jG4lDbRndrtpkx0ElGcBRPS5uSIXvxuhD8tq6OrR3rz5HsuDqQhWpdVYmplYiJRFG6SU+7JucXw414X4Lm/rV6FfWWfE9hnKTObU3y22+nCtrTg182pbjOESYxXEsjpGLD25SjTbHTisrL0JZO6jaIcbWd39zp2udm1viRNVyIe0rkjrwi5gPc0N98HBX5nRjLkcvV6rzHwxC2p8kdciYhOpzqFqmueeAJVJ7mrX4HqPKtfvK5UmfA0piO1XXWezYXSrMazff5KGvfKuBqzkd5wN/F7FwtcpzUav3gELB8iXUMUawluEPYU20aNmU13rqDm3jbY56fv1TWuxiwovfuBP6AiGjvbYKlUpyI05lhp3/D7/kVx8ie/Xr/orVMUGpO4PuCO3vcvM2VrXVBmwDVSJ7NyxHwvMrsrTQAjolIT+qZEA4mIbzkv/dWf4ugbScJsX641mRJKyaoHUz8o4ZNwsuQTa7LaLdLfRO9P/SSc9Xc9kIkz8dK//Iz8h8EzofzAdLYsDtIznoub87eUyC6aXFqxAm6hAzeo6VpVGZgdC4VX5/X5JB3H7mtjgh3WPzafd58Umq8kwGmN/wE=</diagram></mxfile>
|
2304.04205/main_diagram/main_diagram.pdf
ADDED
|
Binary file (17.6 kB). View file
|
|
|
2304.04205/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recently, person re-identification (ReID) for pedestrian matching in non-overlapping camera views has experienced fast development. However, ReID is still challenging when people appear both in the daytime and in low-light situations where only infrared cameras can clearly capture their appearances, raising the task of visible-infrared ReID (VI-ReID). Many remarkable works [@sysumm01; @gan; @duallevel; @xmodal; @twostream; @sdl] have been witnessed in the field of VI-ReID. For realistic scenarios, discovering *rich and diverse* modality-shared semantic concepts usually helps to improve the effectiveness of VI-ReID [@nuances; @paenet]. So far, *diverse* modality-shared feature learning remains challenging.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:1" data-latex-placement="t">
|
| 6 |
+
<embed src="figure/fig1v4-1.pdf" />
|
| 7 |
+
<figcaption>An illustration of our motivation on VI-ReID. It is assumed that body shape information and identity-related modality-shared information (presented in dashed box) are partially overlapped with each other. To make extracted features more <em>diverse</em>, we propose shape-erased feature learning paradigm that decomposes the representation into shape-related feature and shape-erased one. Learning shape-erased feature drives the model to discover richer modality-shared semantic concepts other than body shape. </figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
Among the cues for VI-ReID, we can identify pedestrians by their body shapes in many situations, for it contains modality-invariant information and also robust to light changes. Nevertheless, body shape is not the only or a sufficient semantic concept that interprets the identity of a person. It may be hard in some situations to tell the difference only depending on the body shape, but we can still distinguish them by other semantic concepts, such as their belongings, hairstyles or face structures. Inspired by this, we illustrate an information theoretic measure between visible and infrared modality as a Venn diagram on the left of the dashed line in Fig. [1](#fig:1){reference-type="ref" reference="fig:1"}. It is assumed that body shape (presented in red) and identity-related modality-shared information (presented in dashed box) are partially overlapped with each other. Note that *partially* is also due to there exists identity-unrelated information contained in body shape map, , human pose. This partially overlapped assumption indicates that the target information for VI-ReID, which is identity-related and modality-shared, can be divided into two independent components that are related and unrelated to body shape.
|
| 11 |
+
|
| 12 |
+
Based on the above observation and assumption, to dig more diverse modality-shared cues for VI-ReID, we expect to erase the body-shape-related semantic concepts in the features to force the VI-ReID model to extract more and other modality-shared features for identification. As illustrated on the right of the dashed line in Fig. [1](#fig:1){reference-type="ref" reference="fig:1"}, the *shape-erased* feature is decorrelated from the *shape-related* feature to simultaneously discover shape-unrelated knowledge, while *shape-related* feature can be explicitly guided by some given body shape prior, which is easy to obtain by existing pre-trained human parsing models [@humanparsing]. In this way, both *shape-related* and *shape-erased* features are explicitly quantified while the discriminative nature of the two features can be independently maintained.
|
| 13 |
+
|
| 14 |
+
Specifically, we propose shape-erased feature learning paradigm that introduces orthogonality into representation to satisfy a relaxation of independent constraint. The representation is then decomposed into two sub-representations lying in two orthogonal subspaces for *shape-related* and *shape-erased* feature learning, respectively. By learning and covering most discriminative body shape feature in one subspace, the *shape-erased* feature is forced to discover other modality-shared discriminative semantic concepts in the the other subspace as *shape-related* feature is constrained in its orthogonal complement. Under the above assumptions, we formulate this shape-erased feature learning paradigm from a mutual information perspective, and demonstrate that jointly learning *shape-erased* and *shape-related* objectives achieves a conditional mutual information maximization between *shape-erased* feature and identity discarding body shape information, thus enhancing the diversity of the learned representation explicitly. We finally design a Shape-Guided dIverse fEature Learning (SGIEL) framework that jointly optimizes *shape-related* and *shape-erased* objectives to learn modality-shared and discriminative integrated representation. The contributions of our work are summarized as follows:
|
| 15 |
+
|
| 16 |
+
- We propose a shape-erased feature learning paradigm for VI-ReID that decorrelates *shape-erased* feature from *shape-related* one by orthogonal decomposition. *Shape-related* feature in one subspace is guided by body shape prior while *shape-erased* feature is constrained in its orthogonal complement to discover more and other modality-shared discriminative semantic concepts, thus enhancing the diversity of the learned representation explicitly.
|
| 17 |
+
|
| 18 |
+
- Based on the proposed shape-erased feature learning paradigm, we design a Shape-Guided dIverse fEature Learning framework that jointly optimizes *shape-related* and *shape-erased* objectives to learn modality-shared and discriminative integrated representation.
|
| 19 |
+
|
| 20 |
+
- Extensive experiments on SYSU-MM01, RegDB, and HITSZ-VCM datasets demonstrate the effectiveness of our method.
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
Consider random variables $X^{(i)}$ and $Y$ representing data and label of VI-ReID, where $i\!=\!1$ for visible modality and $i\!=\!2$ for infrared modality. The observed values of $X^{(i)}$ and $Y$ are used to build a dataset $D\! =\!\{D^{(i)}\}_{i=1}^2$, where $D^{(i)}\!=\!\{x_j^{(i)},y_j\}_{j=1}^{N_i}$. Samples of each modality are collected from the same group of $C$ persons, but the number of each identity's samples for each modality may arbitrary. Let $f$ and $g$ denote image encoder and classifier, the goal of VI-ReID is to learn an $f$ to extract representation $z^{(i)}=f(x^{(i)}) \in \mathbb{R}^n$ invariant to different modalities and different camera views.
|
| 25 |
+
|
| 26 |
+
We borrowed pre-trained Self-Correction Human Parsing (SCHP) model proposed in [@humanparsing] to segment body shape from background. Given a pixel of an image, we directly summed the probabilities of being a part of the head, torso, or limbs, predicted by SCHP, to create the body-shape map. Specifically, for each sample $x^{(i)}$ from dataset $D$, either visible or infrared, we used SCHP to produce its paired body shape map $x^{(s)}$ with the same image size and label, , it is a one-to-one mapping between $D$ and its corresponding body shape data. Let $f_{s}$ and $g_{s}$ denote body shape map encoder and classifier, the latent representation of $x^{(s)}$ is $z^{(s)} =f_{s}(x^{(s)}) \in \mathbb{R}^m,$ $m\!<\!n$.
|
| 27 |
+
|
| 28 |
+
In this section, we first explain the key independent assumption for explicitly quantifying *shape-related* and *shape-erased* features, and a relaxation to approximate it. Based on this relaxed independent constraint, we introduce the proposed Shape-Erased Feature Learning.
|
| 29 |
+
|
| 30 |
+
<figure id="fig:main" data-latex-placement="t">
|
| 31 |
+
<embed src="figure/mainv4.pdf" />
|
| 32 |
+
<figcaption>Shape-Guided Diverse Feature Learning. We utilize one shared backbone for visible, infrared images, and their body shape maps, while only BN layers are view-specific; “cat” refers to concatenating <span class="math inline"><em>z</em><sup>(<em>i</em>)</sup></span> along batch dimension, where <span class="math inline"><em>i</em> = 1(2)</span> for visible(infrared); In shape-erased feature learning paradigm, by regularizing <span class="math inline"><em>P</em></span> to be semi-orthogonal (Eq. <a href="#eq:ortho" data-reference-type="eqref" data-reference="eq:ortho">[eq:ortho]</a>), we decompose <span class="math inline"><em>z</em><sup>(<em>i</em>)</sup></span> into <em>shape-related</em> <span class="math inline"><em>z</em><sub><em>s</em><em>r</em></sub><sup>(<em>i</em>)</sup></span> and <em>shape-erased</em> <span class="math inline"><em>z</em><sub><em>s</em><em>e</em></sub><sup>(<em>i</em>)</sup></span>. <span class="math inline"><em>z</em><sub><em>s</em><em>r</em></sub><sup>(<em>i</em>)</sup></span> is learned to imitate and cover discriminative body shape features in subspace <span class="math inline"><em>P</em></span> (Eq. <a href="#eq:mse" data-reference-type="eqref" data-reference="eq:mse">[eq:mse]</a> and <a href="#eq:srce" data-reference-type="eqref" data-reference="eq:srce">[eq:srce]</a>), while <span class="math inline"><em>z</em><sub><em>s</em><em>e</em></sub><sup>(<em>i</em>)</sup></span> is decorrelated to mine other modality-shared discriminative features in subspace <span class="math inline"><em>P</em><sup>⟂</sup></span> (Eq. <a href="#eq:sc" data-reference-type="eqref" data-reference="eq:sc">[eq:sc]</a>).</figcaption>
|
| 33 |
+
</figure>
|
| 34 |
+
|
| 35 |
+
We first formulate the main design of our shape-erased feature learning paradigm as a graphical model illustrated in Fig. [2](#fig:graph){reference-type="ref" reference="fig:graph"}. It is assumed that modality-shared *shape-related* feature, $Z_{sr}^{(i)}$, and modality-shared *shape-erased* feature, $Z_{se}^{(i)}$, are independent from each other, and derived from an integrated representation $Z^{(i)}$ extracted in $X^{(i)}$, , $Z^{(i)}\rightarrow Z_{sr}^{(i)},\ Z^{(i)}\rightarrow Z_{se}^{(i)}$. The independence between the two components, $Z_{sr}^{(i)} \perp \!\!\! \perp Z_{se}^{(i)}$, is necessary for learning any two features simultaneously without affecting each other. We formulate this independence as the following Eq. [\[eq:partition\]](#eq:partition){reference-type="eqref" reference="eq:partition"}, $$\begin{equation}
|
| 36 |
+
\begin{split}
|
| 37 |
+
I(Z_{sr}^{(i)};Z_{se}^{(i)})&=0,
|
| 38 |
+
% I(Z_{sr}^{(i)};X^{(i)}) + I(Z_{se}^{(i)};X^{(i)}) &= I(Z^{(i)};X^{(i)}).
|
| 39 |
+
\end{split}
|
| 40 |
+
\label{eq:partition}
|
| 41 |
+
\end{equation}$$ where $I(\cdot;\cdot)$ denotes mutual information. As the mutual information estimation is complex and time-consuming, we relax the independence as an orthogonal constraint, and perform orthogonal decomposition to achieve the relaxed version of Eq. [\[eq:partition\]](#eq:partition){reference-type="eqref" reference="eq:partition"} as: $$\begin{equation}
|
| 42 |
+
\begin{split}
|
| 43 |
+
z_{sr}^{(i)}&= P^Tz^{(i)}, \\
|
| 44 |
+
z_{se}^{(i)}&=(I_n-PP^T) z^{(i)},
|
| 45 |
+
\end{split}
|
| 46 |
+
\label{eq:orthodecompose}
|
| 47 |
+
\end{equation}$$ where $P \in \mathbb{R}^{n\times m} (m<n)$ denotes a semi-orthogonal matrix and $PP^T$ forms an orthogonal projector. In this way, *shape-related* feature is learned in subspace $P$ while *shape-erased* features is learned in the orthogonal complement $P^\perp$, approximately satisfying the independent constraint. In practice, as $P$ is usually initialized by standard normal distribution, if $n \rightarrow \infty$, the probability that $P$ becomes a semi-orthogonal matrix goes to 1. To further enhance this orthogonality, we regularize $P$ by $L^1$-norm on the difference of each dimension between $P^TP$ and identity matrix $I_m$ by Eq. [\[eq:ortho\]](#eq:ortho){reference-type="eqref" reference="eq:ortho"}: $$\begin{equation}
|
| 48 |
+
\mathcal{L}_{ortho} = \frac{1}{m} \sum^{m}_{j=1} \| (P^TP)_j - (I_m)_j\|_1.
|
| 49 |
+
\label{eq:ortho}
|
| 50 |
+
\end{equation}$$
|
| 51 |
+
|
| 52 |
+
As introduced in Section [1](#sec:intro){reference-type="ref" reference="sec:intro"}, we aim to explicitly quantify $Z_{sr}^{(i)}$ and $Z_{se}^{(i)}$ so that $Z_{se}^{(i)}$ can infer identity $Y$ when discarding information used to describe $X^{(s)}$. This can be formulated as maximizing conditional mutual information between $Z_{se}^{(i)}$ and $Y$ given body shape $X^{(s)}$, , $I(Z_{se}^{(i)};Y|X^{(s)})$: $$\begin{equation}
|
| 53 |
+
\max I(Z_{se}^{(i)};Y|X^{(s)})= I(Z_{se}^{(i)};Y) - I(Z_{se}^{(i)};Y;X^{(s)}),
|
| 54 |
+
\label{eq:sedef}
|
| 55 |
+
\end{equation}$$ where the first term represents mutual information between $Z_{se}^{(i)}$ and $Y$, and the second represents mutual information between $Z_{se}^{(i)}$, $Y$ and $X^{(s)}$.
|
| 56 |
+
|
| 57 |
+
To optimize Eq. [\[eq:sedef\]](#eq:sedef){reference-type="eqref" reference="eq:sedef"}, we can maximize the first term $I(Z_{se}^{(i)};Y)$ by minimizing cross-entropy ($l_{ce}(q,p)= -\sum_{k=1}^{C} p_k \log q_k$) as Eq. [\[eq:scid\]](#eq:scid){reference-type="eqref" reference="eq:scid"}, $$\begin{equation}
|
| 58 |
+
\mathcal{L}_{seid} = \mathbb{E}_{ (z_{se}^{(i)},y)\thicksim (Z_{se}^{(i)},Y)} l_{ce}(g(z_{se}^{(i)}),y),\label{eq:scid}
|
| 59 |
+
\end{equation}$$ In the following, we will discuss how to estimate and minimize the second term $I(Z_{se}^{(i)};Y;X^{(s)})$.
|
| 60 |
+
|
| 61 |
+
Since $I(Y;X^{(s)})$ is intractable, we approximate it by the following two steps.
|
| 62 |
+
|
| 63 |
+
\
|
| 64 |
+
Firstly, we consider a requirement that a representation $Z$ of $X$ can describe $Y$ at least as well as using the original data $X$ instead. This requirement is known as *sufficiency* [@suff] that can be defined as follows:\
|
| 65 |
+
$$\begin{equation}
|
| 66 |
+
I(X; Y|Z) = 0 \iff I(X;Y)=I(Z;Y).
|
| 67 |
+
\label{eq:suff}
|
| 68 |
+
\end{equation}$$ For $Z^{(s)}$, if the classification loss $\mathcal{L}_{sid}$ is minimized, $$\begin{equation}
|
| 69 |
+
\mathcal{L}_{sid} = \mathbb{E}_{ (z^{(s)},y)\thicksim (Z^{(s)},Y)} l_{ce}(g_s(z^{(s)}),y),\label{eq:sid}
|
| 70 |
+
\end{equation}$$ then following [@infodropout], we can assume $Z^{(s)}$ of $X^{(s)}$ for $Y$ is *sufficient*, and thus we replace $I(Z_{se}^{(i)};Y;X^{(s)})$ with $I(Z_{se}^{(i)};Y;Z^{(s)})$ in Eq. [\[eq:sedef\]](#eq:sedef){reference-type="eqref" reference="eq:sedef"} combining Eq. [\[eq:suff\]](#eq:suff){reference-type="eqref" reference="eq:suff"}. This replacement can be formulated as the following **Theorem 1**.
|
| 71 |
+
|
| 72 |
+
::: theorem
|
| 73 |
+
**Theorem 1**. *If representation $Z^{(s)}$ of $X^{(s)}$ is **sufficient** for $Y$, then $I(Z^{(i)}_{se};Y;X^{(s)})=I(Z^{(i)}_{se};Y;Z^{(s)})$.*
|
| 74 |
+
:::
|
| 75 |
+
|
| 76 |
+
The proof can be found in supplementary material.
|
| 77 |
+
|
| 78 |
+
\
|
| 79 |
+
Secondly, we hope shape-related feature $Z_{sr}^{(i)}$ can fully represent real body shape feature $Z^{(s)}$, so that if $Z_{sr}^{(i)}\equiv Z^{(s)}$, then $I(Z_{se}^{(i)};Y;Z^{(s)})$ can also be approximated by $I(Z_{se}^{(i)};Y;Z_{sr}^{(i)})$: $$\begin{equation}
|
| 80 |
+
\begin{split}
|
| 81 |
+
I(Z_{se}^{(i)};Y;X^{(s)})&=I(Z_{se}^{(i)};Y;Z^{(s)})\\
|
| 82 |
+
&=I(Z_{se}^{(i)};Y;Z_{sr}^{(i)})\le I(Z_{se}^{(i)};Z_{sr}^{(i)})=0,
|
| 83 |
+
\end{split}
|
| 84 |
+
\label{eq:upbound}
|
| 85 |
+
\end{equation}$$ which is upper-bounded by Eq. [\[eq:partition\]](#eq:partition){reference-type="eqref" reference="eq:partition"}. To achieve a $Z_{sr}^{(i)}$ fully representing $Z^{(s)}$, as there exists a one-to-one mapping between $Z_{sr}^{(i)}$ and $Z^{(s)}$, we maximize $I(Z_{sr}^{(i)};Z^{(s)})$ by minimizing element-wise mean squared error (MSE) to guide $Z_{sr}^{(i)}$ to imitate $Z^{(s)}$ as Eq. [\[eq:mse\]](#eq:mse){reference-type="eqref" reference="eq:mse"}, $$\begin{equation}
|
| 86 |
+
\mathcal{L}_{srmse} =\mathbb{E}_{ (z_{sr}^{(i)},z^{(s)})\thicksim (Z_{sr}^{(i)},Z^{(s)}) } \frac{\| z_{sr}^{(i)} - z^{(s)}\|_2^2}{m} ,\label{eq:mse}
|
| 87 |
+
\end{equation}$$ where $\|\cdot \|_2$ denotes $l^2$-norm.
|
| 88 |
+
|
| 89 |
+
Moreover, to reduce cross-view discrepancy between $Z_{sr}^{(i)}$ of $X^{(i)}$ and $Z^{(s)}$ of $X^{(s)}$, we aim to minimize the following conditional mutual information $I(X^{(i)};Z_{sr}^{(i)} | X^{(s)})$: $$\begin{equation}
|
| 90 |
+
\min I(X^{(i)};Z_{sr}^{(i)} | X^{(s)}),\label{eq:crossviewcmi}
|
| 91 |
+
\end{equation}$$ denoting the remaining information in $Z_{sr}^{(i)}$ given the view of $X^{(s)}$. To minimize Eq. [\[eq:crossviewcmi\]](#eq:crossviewcmi){reference-type="eqref" reference="eq:crossviewcmi"}, we follow [@mireid] to approximate an upper bound of it as a Kullback--Leibler (KL) divergence between $p(y|z_{sr}^{(i)})$ and $p(y|z^{(s)})$. The proof of that can be found in supplementary material. For simplicity, we directly minimize cross-entropy loss as Eq. [\[eq:srce\]](#eq:srce){reference-type="eqref" reference="eq:srce"} for the remaining information entropy term in KL divergence only depending on target distribution $p(y|z^{(s)})$, $$\begin{equation}
|
| 92 |
+
\mathcal{L}_{srkl} = \mathbb{E}_{ (z_{sr}^{(i)},z^{(s)})\thicksim (Z_{sr}^{(i)},Z^{(s)}) } l_{ce}( g_s(z_{sr}^{(i)}),g_s(z^{(s)})).\label{eq:srce}
|
| 93 |
+
\end{equation}$$ Combining Eq. [\[eq:mse\]](#eq:mse){reference-type="eqref" reference="eq:mse"} and [\[eq:srce\]](#eq:srce){reference-type="eqref" reference="eq:srce"}, the final shape-related objective becomes: $$\begin{equation}
|
| 94 |
+
\mathcal{L}_{sr} = \mathcal{L}_{srmse} + \mathcal{L}_{srkl}.\label{eq:sr}
|
| 95 |
+
\end{equation}$$
|
| 96 |
+
|
| 97 |
+
Minimizing Eq. [\[eq:sr\]](#eq:sr){reference-type="eqref" reference="eq:sr"}, we can represent $Z^{(s)}$ by $Z_{sr}^{(i)}$ approximately. If both *sufficiency* of $Z^{(s)}$ is achieved and Eq. [\[eq:sr\]](#eq:sr){reference-type="eqref" reference="eq:sr"} is minimized, then, $$\begin{equation}
|
| 98 |
+
I(Z_{se}^{(i)};Y|X^{(s)})\ge I(Z_{se}^{(i)};Y)
|
| 99 |
+
\end{equation}$$ will hold by Eq. [\[eq:upbound\]](#eq:upbound){reference-type="eqref" reference="eq:upbound"}. In this way, *shape-erased* features can be learned by minimizing classification loss in Eq. [\[eq:scid\]](#eq:scid){reference-type="eqref" reference="eq:scid"} as information used to describe discriminative body shape feature are approximately discarded by orthogonal decomposition in Eq. [\[eq:orthodecompose\]](#eq:orthodecompose){reference-type="eqref" reference="eq:orthodecompose"}.
|
| 100 |
+
|
| 101 |
+
It is to be noted that both $Z_{sr}^{(i)}$ and $Z_{se}^{(i)}$ are assumed to be shared features between the two modalities. $Z_{sr}^{(i)}$ is learned to imitate body shape representation $Z^{(s)}$ to be modality-shared naturally; For $Z_{se}^{(i)}$, we eliminate modality-specific information in a mutual manner as follows: $$\begin{equation}
|
| 102 |
+
\min I(X^{(1)};Z_{se}^{(1)}|X^{(2)})+I(X^{(2)};Z_{se}^{(2)}|X^{(1)}).
|
| 103 |
+
\end{equation}$$ Similar to Eq. [\[eq:srce\]](#eq:srce){reference-type="eqref" reference="eq:srce"}, we approximated it as a cross-modal cross-entropy between $p(y|z_{se}^{(1)})$ and $p(y|z_{se}^{(2)})$ and vice versa: $$\begin{equation}
|
| 104 |
+
\mathcal{L}_{sekl} = \mathbb{E}_{ (z_{se}^{(i)},z_{se}^{(3-i)})\thicksim (Z_{se}^{(i)},Z_{se}^{(3-i)}) } l_{ce}(g(z_{se}^{(i)}),g(z_{se}^{(3-i)})),\label{eq:scce}
|
| 105 |
+
\end{equation}$$ where $i=1,2$.
|
| 106 |
+
|
| 107 |
+
Combining Eq. [\[eq:scid\]](#eq:scid){reference-type="eqref" reference="eq:scid"} and [\[eq:scce\]](#eq:scce){reference-type="eqref" reference="eq:scce"}, the shape-erased objective can be formulated as: $$\begin{equation}
|
| 108 |
+
\mathcal{L}_{se} = \mathcal{L}_{seid} + \mathcal{L}_{sekl}.\label{eq:sc}
|
| 109 |
+
\end{equation}$$
|
| 110 |
+
|
| 111 |
+
In Section [3.2](#sec:core){reference-type="ref" reference="sec:core"}, we decompose representation $z^{(i)}$ into two orthogonal components named *shape-related* $z_{sr}^{(i)}$ and *shape-erased* $z_{se}^{(i)}$. To further enhance the discriminative and modality-shared natures of $z^{(i)}$, we apply commonly used classification loss $\mathcal{L}_{id}$ and triplet loss [@triplet] $\mathcal{L}_{triplet}$ on $z^{(i)}$. For triplet pairs, we find the hardest positive and negative pairs among all samples in a mini-batch, consisting of both visible and infrared samples.
|
| 112 |
+
|
| 113 |
+
Similar to $\mathcal{L}_{sekl}$, we apply the following Eq. [\[eq:gkl\]](#eq:gkl){reference-type="eqref" reference="eq:gkl"} for eliminating cross-modal discrepancy in a mutual way: $$\begin{equation}
|
| 114 |
+
\mathcal{L}_{kl} = \mathbb{E}_{ (z^{(i)},z^{(3-i)})\thicksim (Z^{(i)},Z^{(3-i)}) } l_{ce}(g(z^{(i)}),g(z^{(3-i)})).\label{eq:gkl}
|
| 115 |
+
\end{equation}$$
|
| 116 |
+
|
| 117 |
+
Combining $\mathcal{L}_{id}$, $\mathcal{L}_{triplet}$ and [\[eq:gkl\]](#eq:gkl){reference-type="eqref" reference="eq:gkl"}, the integrated representation objective can be formulated as: $$\begin{equation}
|
| 118 |
+
\mathcal{L}_{int} =\mathcal{L}_{id}+\mathcal{L}_{triplet}+\mathcal{L}_{kl} .\label{eq:g}
|
| 119 |
+
\end{equation}$$
|
| 120 |
+
|
| 121 |
+
Moreover, we implement a re-weighting mechanism to focus on more difficult objective between $\mathcal{L}_{sr}$ and $\mathcal{L}_{se}$ during the whole training process. Let $\theta_t$ denote parameters to be optimized at training iteration $t$-th, we measure this difficulties by comparing the norms of $\frac{\partial \mathcal{L}_{sr}(\theta_t) }{\partial \theta_t}$ and $\frac{\partial \mathcal{L}_{se}(\theta_t) }{\partial \theta_t}$. The objective with a larger gradient norm is regarded to be more difficult at iteration $t$-th. To save computation cost, following [@featgrad; @featgrad2], we approximate the actual parameter-level gradients by the representation-level gradients, , replacing $\frac{\partial \mathcal{L}(\theta_t) }{\partial \theta_t}$ with $\frac{\partial \mathcal{L}(\theta_t) }{\partial z^{(i)}}$. Our final re-weighting mechanism is as follows: $$\begin{equation}
|
| 122 |
+
\small
|
| 123 |
+
\begin{split}
|
| 124 |
+
\alpha_t^{sr} &= {\|\frac{\partial \mathcal{L}_{sr}(\theta_t) }{\partial z^{(i)}}\|_2^2}/({\|\frac{\partial \mathcal{L}_{sr}(\theta_t) }{\partial z^{(i)}}\|_2^2+\|\frac{\partial \mathcal{L}_{se}(\theta_t) }{\partial z^{(i)}}\|_2^2}) , \\
|
| 125 |
+
\alpha_t^{se} &= {\|\frac{\partial \mathcal{L}_{se}(\theta_t) }{\partial z^{(i)}}\|_2^2}/({\|\frac{\partial \mathcal{L}_{sr}(\theta_t) }{\partial z^{(i)}}\|_2^2+\|\frac{\partial \mathcal{L}_{se}(\theta_t) }{\partial z^{(i)}}\|_2^2}) ,
|
| 126 |
+
\end{split} \label{eq:weightingfactor}
|
| 127 |
+
\end{equation}$$
|
| 128 |
+
|
| 129 |
+
The overall training loss can be summarized as the following Eq. [\[eq:overall\]](#eq:overall){reference-type="eqref" reference="eq:overall"} : $$\begin{equation}
|
| 130 |
+
\mathcal{L}_{train}= \mathcal{L}_{int} + \alpha_t^{sr} \mathcal{L}_{sr} + \alpha_t^{se} \mathcal{L}_{se} + \mathcal{L}_{ortho} + \mathcal{L}_{sid}.\label{eq:overall}
|
| 131 |
+
\end{equation}$$ The overall framework of our method is illustrated in Fig. [3](#fig:main){reference-type="ref" reference="fig:main"}. To reduce the computation and GPU memory consumption, one modeling backbone is shared for three types of data (two modalities and body shape map). Considering the distribution gap among them, similar to AdaBN [@adabn], for each Batch Normalization (BN) layer in the backbone, we implement three new parameter-specific BNs to replace it as a normalization across different distributions. Following the design of BNNeck by [@bnneck], we perform three parameter-specific BNNecks after the backbone.
|
| 132 |
+
|
| 133 |
+
:::: table*
|
| 134 |
+
::: tabular
|
| 135 |
+
c\|c\|c\|cccc\|cccc \*Params & \*Method & \*Venue & &\
|
| 136 |
+
& & & Rank-1 & Rank-10 & Rank-20 & mAP & Rank-1 & Rank-10 & Rank-20 & mAP\
|
| 137 |
+
$\thickapprox$`<!-- -->`{=html}1x & CM-NAS [@cm-nas] & CVPR'21 & 61.99 & 92.87 & 97.25 & 60.02 & 67.01 & 97.02 & 99.32 & 72.95\
|
| 138 |
+
$\thickapprox$`<!-- -->`{=html}1x & CAJL [@caj] & ICCV'21 & 69.88 & 95.71 & 98.46 & 66.89 & 76.30 & 97.90 & 99.50 & 80.40\
|
| 139 |
+
$\thickapprox$`<!-- -->`{=html}1x & MPANet [@nuances] & CVPR'21 & 70.58 & 96.21 & 98.8 & 68.24 & 76.64 & 98.21 & 99.57 & 80.95\
|
| 140 |
+
$\thickapprox$`<!-- -->`{=html}1x & MMN [@mmn] & ACMMM'21 & 70.60 & 96.20 & 99.00 & 66.90 & 76.20 & 97.20 & 99.30 & 79.60\
|
| 141 |
+
$\thickapprox$`<!-- -->`{=html}1.75x & MTL [@crossmodalmultitask] & PR'22 & 67.25 & 95.38 & 98.46 & 64.29 & 69.58 & 96.66 & 99.03 & 74.37\
|
| 142 |
+
$\thickapprox$`<!-- -->`{=html}1.25x & PAENet [@paenet] & ACMMM'22 & 74.22 & **99.03** & **99.97** & **73.90** & 78.04 & **99.58** & **100.00** & **83.54**\
|
| 143 |
+
$\thickapprox$`<!-- -->`{=html}2x & MSCLNet [@msclnet] & ECCV'22 & [76.99]{.underline} & [97.93]{.underline} & [99.18]{.underline} & 71.64 & [78.49]{.underline} & [99.32]{.underline} & [99.91]{.underline} & 81.17\
|
| 144 |
+
$\thickapprox$`<!-- -->`{=html}1x & Ours & - & 75.18 & 96.87 & 99.13 & 70.12 & 78.40 & 97.46 & 98.91 & 81.20\
|
| 145 |
+
$\thickapprox$`<!-- -->`{=html}2x & Ours (C) & - & **77.12** & 97.03 & 99.08 & [72.33]{.underline} & **82.07** & 97.42 & 98.87 & [82.95]{.underline}\
|
| 146 |
+
:::
|
| 147 |
+
|
| 148 |
+
[]{#tab:sysumm01 label="tab:sysumm01"}
|
| 149 |
+
::::
|
| 150 |
+
|
| 151 |
+
:::: table*
|
| 152 |
+
::: tabular
|
| 153 |
+
c\|c\|c\|cccc\|cccc \*Strategy & \*Method & \*Venue & &\
|
| 154 |
+
& & & Rank-1 & Rank-5 & Rank-10 & mAP & Rank-1 & Rank-5 & Rank-10 & mAP\
|
| 155 |
+
Video & MITML [@vcm] & CVPR'22 & [63.74]{.underline} & [76.88]{.underline} & [81.72]{.underline} & 45.31 & 64.54 & [78.96]{.underline} & 82.98 & 47.69\
|
| 156 |
+
\*Image & LbA [@lba] & ICCV'21 & 46.38 & 65.29 & 72.23 & 30.69 & 49.30 & 69.27 & 75.90 & 32.38\
|
| 157 |
+
& MPANet [@nuances] & CVPR'21 & 46.51 & 63.07 & 70.51 & 35.26 & 50.32 & 67.31 & 73.56 & 37.80\
|
| 158 |
+
& DDAG [@ddag] & ECCV'20 & 54.62 & 69.79 & 76.05 & 39.26 & 59.03 & 74.64 & 79.53 & 41.50\
|
| 159 |
+
& VSD [@mireid] & CVPR'21 & 54.53 & 70.01 & 76.28 & 41.18 & 57.52 & 73.66 & 79.38 & 43.45\
|
| 160 |
+
& CAJL [@caj] & ICCV'21 & 56.59 & 73.49 & 79.52 & 41.49 & 60.13 & 74.62 & 79.86 & 42.81\
|
| 161 |
+
& Baseline & - & 62.02 & 75.35 & 81.35 & [47.05]{.underline} & [64.90]{.underline} & 78.64 & [83.68]{.underline} & [48.21]{.underline}\
|
| 162 |
+
& Ours & - & **67.65**& **80.32**& **84.73**& **52.30**& **70.23**& **82.19**& **86.11**& **52.54**\
|
| 163 |
+
:::
|
| 164 |
+
|
| 165 |
+
[]{#tab:vcm label="tab:vcm"}
|
| 166 |
+
::::
|
| 167 |
+
|
| 168 |
+
::: tabular
|
| 169 |
+
c\|cc\|cc \*Method & &\
|
| 170 |
+
& Rank-1 & mAP & Rank-1 & mAP\
|
| 171 |
+
CM-NAS [@cm-nas] & 82.57 & 78.31 & 84.54 & 80.32\
|
| 172 |
+
CAJL [@caj] & 84.75 & 77.82 & 85.03 & 79.14\
|
| 173 |
+
MPANet [@nuances] & 82.8 & 80.7 & 83.7 & 80.9\
|
| 174 |
+
MMN [@mmn] & 87.5 & 80.5 & 91.6 & 84.1\
|
| 175 |
+
MTL [@crossmodalmultitask] & 88.34 & 84.06 & 89.91 & 85.64\
|
| 176 |
+
PAENet [@paenet]& **95.35** & **89.98** & **97.57** & **91.41**\
|
| 177 |
+
MSCLNet[@msclnet] & 83.86 & 78.31 & 84.17 & 80.09\
|
| 178 |
+
Ours &[91.07]{.underline} &[85.23]{.underline}& [92.18]{.underline} &[86.59]{.underline}\
|
| 179 |
+
:::
|
| 180 |
+
|
| 181 |
+
[]{#tab:regdb label="tab:regdb"}
|
2306.01150/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-12-29T08:48:28.629Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.56" etag="JmHUhHZaiaGI1n_RU0BU" version="20.7.4" type="device"><diagram id="0sx5U0dI9uww27bclxkK" name="Page-1">7Vxbk5s4Fv4t++DaJFV2GTDYfuy4M5lUze5OTXpmk33ZkkG2VY0RI0Tbnof97SsJgYUkfGvT7SQkVW6QxJF0zqdzkQ70vNl6+5GAdPUPHMG45w6jbc+777nueDxmv7xgVxT4Y78oWBIUFUXOvuAz+gvKwqEszVEEs1pDinFMUVovDHGSwJDWygAheFNvtsBxvdcULKFR8DkEsVn6bxTRVVE6ccf78p8hWq7Knp1gWtSsQdlYziRbgQhvlCLvQ8+bEYxpcbXezmDMeVfypXjup4baamAEJvSUB7IPD5PVH//BwezX7fgL/f1L/t9dX1J5AnEuJ9xzg5jRe5/yIdOd5EPwZ87H+T5GCeyv5ITvWBMnYL36+wbsain/CjILzEbH+A7COqEZzgmChFX9E24sj/vvPyUhJoTL1B3efWQ/DyB77Pn3JWU204J42eGwrMlSkFhHPwfh45LgPIn6IY4xKaZAlnPwZiien/EZ+V517fp+dT0c+G/NgX7FuUAa5FhFT5B3DHjnTCowCXkxSCJZ+GcOM4pwMlDmUAy2YhpYp+wimWepwsQLJyRmM3Tl8MtZeM7obUHbnMrfxTzgNmVsh5FYa+xnQxCFLzBkZ+zsuW27KsZuDFz09O4dKgHz7p3gerYR+BJToCtVQKUcCrJZHq5EE8ChhsRPxscEY/gEBHwtNPYCtvJl8Cw8vlGB53p7FrhDGwOY5Ig65WyF85iLL8F89HMoFOQ6jSGF8Y7d4JyX40WhOCncUj7eB7LTRM4pEVqR5lzZIMq5hRNejQWDN+IJTKJs0AQHEzVlcbovc2tMcvmwWPmKrmNW4HBGUoIf4axg1X3Cx8A0DIpjrQjEaJmw2xguOIUnSChiKv1OFlPMh7ZZsSl+TgvFtGHGi5UJWUCuMIfViFQNW6pLRhBulSKpcT9CvIZUcFHWCtnxR0rzNypuN3tbMhrKJivFjkylfQHSfC0rynsNzy6kkj9D4bsWha/xHSbRHbece34qIqhziCvgn8AaxXx6D2gNs1KhD3/Daybvook0607A7uEW0S/ycX79VRLm1/dbpeJ+V94kbOJfymb85qtas39I3O2OCQ9GNYtvik6RjW8RTVnG1AOgTBnUXQ6LvGQPv2IkdEkDMkZ+nULG1nQI5UOqWdfoBO5hOhSQJaQGHQGeatKX48nr8HSbeAqC6+BJp9MynkYGnhxumu4SZspAYbMLu/W/ifAMMEFLlABOd5UzhPS55aLCQlPuL7LIAy5QgvijWQsmJuRuALEYmTWKIt7NCXYmBnMYv698Ab3z89aEdyXD5QYDvwaEMiKrma6xxXQNWzJdfmOsIoMM08HiFf1McEYEK6N0q7m+Z0cmv8AlU2iCXkMc0mHsQudIxJIaxAKLxnSClhAWPDcanohg2BZcXSOU44FpFZVWIamtt09JKjz8GXfuBTbtXvjsWVHKJUHmXSiV+AlDu3JkqQ7w6Dj/ldPTWPiagcwZ6/8ay3Uy1parayxX1/cs69Vtab2OO+fzNp3Pyll4rvdpEGrZ/Zx0iLpRRHnOlRClE2oZUVObT6EZtuFlRa6IjOZxGRdlNI92PXVfTzAiz+AijxOYdfHPGcYu0KKfiembeq4F/5O2wp9SE1q800aXSMQ/C8k87g39DOMnyOUkgx8tPnJlfDQ8sjNdbUtXe9IHfV5K0CO8yc3qyqerDbHR2bMvHxvClSWlonOkA7oebbYSVrmBdxp0R61Bt/mY0c74RkBrYNTc+bcKqi9BfVFRdHonEEnWIFbqngBBgP1lOg7QnPCD6YPtQpA2NdkoYSM/AhA1DPNMm/bZ9EOULM0nMUlXDN1FhVuUcRD2pTq+EzQWVKlBTNEmsp9hOVFRQwkjtWDUy36Eui1wIs7NlU74gqkPq6LFZjJ/RIwcp1mYj75EmtlnBENMhL3q0xUKH4VVEu3EHl05T72tIpOD7RR41NotYgyoPs0IZWkMdmVzHsyzi7+hdcrUCD/3O6wJL4nLLzotPhJ6dmvl+1srh8yvU4Jtf/H2W15hptcwO77ZopuNb9Mwe75pmG2nwe0ZZttx8CHDPCenb2iyZcDWpwIYttLf9PtquQRup6m+WU31mjrnJrKfOhv8nSL7x7bBZ6VxdTbgh14pN2oDzj/0u8VE0mbP67txg6cnbq0GrbnBZhbbb/1f5DKZDDxTHWqMru9nH+H3OdvdYIXXQJOROFlSduR7rrfw+X/ZTikv/vX0bX1WE4h/1xGoP9byOCbmwbDtqMiZtiVPM4uskme1nspV43mDsbKk5o2rqRNybdWOzFX7skI288EqIXvDgXlw28mvJr+x88ryM7OtKvk508Gkk99B+XmMR+b20ctKsMu/uZFsCT3JwdNdpVOzJXTfzCDUcraE0yXg3CqkJhcm4BiQ0gm1DSkzA8fjCQT31XsB7ObTOiX4Ca6ZMLosmTNCOd0oeSeGcq1lyZSdXZ7DHTTmcKsntRxBZd7JwW2D/cZA45Zaqkf2zx6pugVx9u7D95/I7NVzu0aO6Qp7U99EbWvncK6ZIHMgYBXOcRewnukwj1zntR3m0pnpvJvX9m50FeDrS/v09OIjhFr2blxz6xKItOCcMggwJdvjr7Aw7wZmmXB1vnnfplH58Hs5Mf8aVkKTbOW3Kgh1bBD1WrMS5rbm3PBkP2wpASHlpzkymczII/9MSR7SnIg2N2jibxINus8wGlqsiWvzddvDQ/MbsSd6kNOj7yue/f0eS7cW6mOl8KF4T1u+s1g1LZ0feX9KmmTVi5pBVn8599lO96uxTOdX+SZlE8NOik5e4FDzhxJK+dpoI4oPCcPSz/j+JZhm5E2UR/tHuHn8hftjhPip9hXInMScG7R0utVqw1BZ3BZvajFTrZ2uuwcOerxJd1B3LG61vGb9slGr7Zjngs8iXPGbCGd+H+0uioR7LD4TU73Zfw8pQHF2JO/m5nXINXTGtA45WwKHM7Jhzj0bc+x2/83LIgrefzjU+/B/</diagram></mxfile>
|
2306.01150/main_diagram/main_diagram.pdf
ADDED
|
Binary file (46.4 kB). View file
|
|
|
2306.01150/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
:::: table*
|
| 4 |
+
::: center
|
| 5 |
+
+:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
| 6 |
+
| **RQ1:** Which parts of task definitions are important when performing zero-shot instruction learning? - For classification tasks, label-related information is crucial, as it helps the model identify the output space and identify each label's meaning when generalizing. - Additional details or constraints besides primary mentions of input and output, in general, do not improve model performance. As model size increases, additional details become important. - Task definitions can be extensively compressed with no performance degradation, particularly for generation tasks. |
|
| 7 |
+
| |
|
| 8 |
+
| **RQ2:** Is natural language the most efficient format to communicate task instructions to models? - Framing instructions as a structured input/action/output triplet is potentially a more efficient and effective way of creating task definitions. - In fact, using only basic metadata and the label space (without label definitions) in a structured format, we achieve similar, or even better performance as with full definitions. |
|
| 9 |
+
| |
|
| 10 |
+
| **RQ3:** How can we improve models' understanding of definitions as well as model performance? - Adding a meta-tuning stage for adapting models to the writing styles of definitions improves the performance. |
|
| 11 |
+
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
| 12 |
+
:::
|
| 13 |
+
::::
|
| 14 |
+
|
| 15 |
+
Large language models or LLMs [@devlin2019bert; @2020t5; @Brown2020LanguageMA] demonstrate the ability to perform zero-shot cross-task generalization through learning from instructions of tasks [@Sanh2022MultitaskPT; @Wei2022FinetunedLM; @Mishra2022CrossTaskGV; @Wang2022BenchmarkingGV; @ouyang2022training; @openai_chatgpt]. By fine-tuning an LLM with *task definitions* and a few *demonstration examples* on upstream training tasks, the model acquires the power to perform new tasks with unseen definitions and example. This is known as *instruction learning*.
|
| 16 |
+
|
| 17 |
+
However, a natural question is: to what extent does the zero-shot generalization ability derive from the model's understanding of task definitions? Recent work in prompt-based learning suggests models might not interpret even short prompts as people expect [@webson-pavlick-2022-prompt; @shin2020autoprompt; @deng2022rlprompt; @Prasad2022GrIPS]. Task definitions are special prompts that are usually long and encode rich information. We imagine models' understanding of definitions also departs from human expectation. To investigate this question, we conduct a systematic analysis using both human annotation and computational approaches. Our study is based on the English portion of the large-scale SUPER-NATURALINSTRUCTION (NIv2) dataset [@Wang2022BenchmarkingGV], which comprises 757 training tasks and 119 unseen test tasks.
|
| 18 |
+
|
| 19 |
+
First, we explore which type of information in task definitions is necessary for maintaining model performance. We define eight categories of content and provide a fine-grained annotation for all the sentences in task definitions. Then, we retrain the model with every occurrence of each category in NIv2 ablated out, and measure the model performance on the validation set with the same ablation. We observe variable contributions to model performance across content types. For example, input descriptions are in general not helpful to generalization performance, i.e., removing them causes little to no degradation of performance. However, larger models tend to leverage them more. On the other hand, the label information is of great importance. Providing natural-language Label Definitions helps specify the task-specific meaning of common verbalizers while providing the label verbalizer only helps in generalizing to a new label space. We also find that we can achieve similar or even better performance compared to full definitions by only providing the models with a label space along with very basic task metadata, e.g., category, domain, reasoning type, etc. This suggests that costly human generation of task definitions may not always be more helpful than available basic metadata about the task.
|
| 20 |
+
|
| 21 |
+
Second, motivated by @feng2018pathologies, to understand what is necessary for models to perform well, we propose **S**yntax-guided **T**ask **D**efinition **C**ompression (STDC), an automatic approach to removing content in task definitions that is not helpful for model performance. STDC queries the model for predictions on inputs and only requires black-box access. We can remove around 60% of tokens while achieving \~3 points of performance improvement of T5-XL on a held-out set. This implies that instead of understanding the whole definition of the task, the models are relying on particular text while ignoring the rest. Along with similar observations as the ablation study above, STDC reveals new patterns of how models understand definitions. For example, models usually do not need to see the whole label space, but might infer the rest with a partial label space.
|
| 22 |
+
|
| 23 |
+
Given our observations, we conclude that current instruction learning models rely on partial information in definitions. We imagine the lack of consistency in the creation process of task definitions might hinder the model from attending to all key information in definitions. Thus, we propose two complementary strategies to overcome this. The first strategy is to replace the full definition with a JSON-like formatted triplet of input, action, and output. A JSON-like triplet simplifies the creation of task definitions by asking authors of the definition to fill in blanks in templates instead of writing from scratch, and the common structure increases consistency between authors. The second strategy is to perform meta-tuning before instruction learning to adapt LLMs to any predefined styles of task definitions. We achieve 4.2, 4.0, and 2.1 Rouge-L improvements on BART-Large, T5-Large, and T5-XL, respectively, combining these two strategies. We summarize our key findings in Table [\[table:summaryoffindings\]](#table:summaryoffindings){reference-type="ref" reference="table:summaryoffindings"}. [^3]
|
| 24 |
+
|
| 25 |
+
{#fig:annotation}
|
| 26 |
+
|
| 27 |
+
In this section, we introduce the formulation of instruction learning, as well as the models and benchmarks used in our study. Further details are presented in Appendix [10](#appendix:details){reference-type="ref" reference="appendix:details"}.
|
| 28 |
+
|
| 29 |
+
Instruction learning aims to train a language model so that it understands natural language task instructions and is able to generalize to a new task by solely reading new instructions. A task instruction may include several elements. In this paper, we follow @Wang2022BenchmarkingGV and adopt instructions with 1) a *task definition*: a high-level description of the input and output of the task; and 2) *demonstration examples*: some input-output examples for the task. Note that other content such as *things to avoid* and *negative examples* may also be included but have been shown to be less effective [@Mishra2022CrossTaskGV].
|
| 30 |
+
|
| 31 |
+
A task instruction is generally pre-pended to an input and passed to the LLM. The LLM is first fine-tuned on several upstream training tasks and then asked to conduct inference on an unseen test task, given only the task instruction.
|
| 32 |
+
|
| 33 |
+
We adopt the English portion of NIv2 [@Wang2022BenchmarkingGV], which contains 757 training tasks and 119 unseen test tasks. The test tasks fall into 12 categories, including textual entailment, data-to-text generation, etc. However, we also consider a more coarse split of test tasks into *classification* and *generation* tasks, based on whether the output space is fixed or not. For each task, we select 100 examples for either fine-tuning or testing and report performance of Rouge-L [@lin-2004-rouge], following @Wang2022BenchmarkingGV. We use the task definition and two demonstration examples as the instruction. The original paper does not provide an official validation split, which we prepare by putting aside 76 training tasks. We fix the validation set for all experiments to ensure no data leakage. Note that for later experiments, results for Section [3](#section:ablation){reference-type="ref" reference="section:ablation"} and Section [4](#compressionsection){reference-type="ref" reference="compressionsection"} are reported on the validation split which we hold out ourselves while results for Section [5](#improvesection){reference-type="ref" reference="improvesection"} are on the official test set.
|
| 34 |
+
|
| 35 |
+
We experiment with the T5-Large and T5-XL models [@2020t5] since the family of T5 sequence-to-sequence models has been shown by @Wang2022BenchmarkingGV to achieve superior performance after fine-tuning compared to frozen models like GPT-3 [@Brown2020LanguageMA] or InstructGPT [@ouyang2022training] on NIv2 benchmark[^4]. We also consider BART-Large [@lewis2020bart] in the experiments. **All results are reported as average performance over three random seeds**.
|
| 36 |
+
|
| 37 |
+
# Method
|
| 38 |
+
|
| 39 |
+
We present the pseudo-code for the compression algorithm.
|
| 40 |
+
|
| 41 |
+
:::: algorithm
|
| 42 |
+
**Input:** A model $f$. a set of examples for a specific task $S$: $\mathcal{D}_{S}$. The full task definition: $X_{full} = \{w_1, w_2, ..., w_n\}$. The performance of $f$ on $\mathcal{D}_{S}$ with $x_{full}$: $f\left(\mathcal{D}_{S} | X_{full} \right)$. Constituency tree for the task definition: $\mathcal{T}$.\
|
| 43 |
+
**Output:** Compressed definition $X_{compressed}$.
|
| 44 |
+
|
| 45 |
+
::: algorithmic
|
| 46 |
+
Initialization: traverse the parse tree $\mathcal{T}$. Find the tree depth $Dep(\mathcal{T})$. The set of nodes $N_i$ at each layer i = 1,2, $\cdots$, $Dep(\mathcal{T})$. $X_{compressed}$ = $X_{full}$ Remove $n_i$ and compute the new performance of $f$ with $X_{full} \textbackslash n_i$: $f\left(\mathcal{D}_{S} | X_{full} \textbackslash n_i \right)$ Remove $n_i$ and its subtree. $X_{compressed}$ = $X_{compressed} \textbackslash n_i$ $X_{compressed}$
|
| 47 |
+
:::
|
| 48 |
+
|
| 49 |
+
[]{#Algo label="Algo"}
|
| 50 |
+
::::
|
| 51 |
+
|
| 52 |
+
We present examples of the input/action/output triplets as task definitions in Table [\[table:examplesoftriplet\]](#table:examplesoftriplet){reference-type="ref" reference="table:examplesoftriplet"}.
|
| 53 |
+
|
| 54 |
+
:::: center
|
| 55 |
+
::: {#table:hyperil}
|
| 56 |
+
Hyperparameter Range
|
| 57 |
+
---------------- ------------------------------------------------------------
|
| 58 |
+
Learning rate $1 \times 10^{-5}$, $5 \times 10^{-5}$, $1 \times 10^{-4}$
|
| 59 |
+
Batch size 16, 32, 64, 128
|
| 60 |
+
\# Epochs 2, 3, 6
|
| 61 |
+
|
| 62 |
+
: Hyper-parameter choices
|
| 63 |
+
:::
|
| 64 |
+
::::
|
| 65 |
+
|
| 66 |
+
We find that we need slightly different hyperparameters for instruction learning after meta tuning. We conduct grid search of learning rate, batch size, and the number of epochs on the validation set. The list of candidates are shown in Table [4](#table:hyperil){reference-type="ref" reference="table:hyperil"}. The learning rate is scheduled with constant learning rate with warm up steps 300 for all models.
|
| 67 |
+
|
| 68 |
+
:::: table*
|
| 69 |
+
::: center
|
| 70 |
+
+:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
| 71 |
+
| **TASK1** task1292_yelp_review_full_text_categorization You are given a review about a place. You need to provide a rating from \"1 star\" to \"5 stars\" for this place. **Input**: \[\"a review about a place\"\] **Action**: \[\"provide a rating from \" 1 star \" to \" 5 stars \" for this place\"\] **Output**: \[\"a rating from \" 1 star \" to \" 5 stars \"\"\] |
|
| 72 |
+
| |
|
| 73 |
+
| **TASK2** task321_stereoset_classification_religion Given two sentences regarding the stereotypes of religions, determine whether the second sentence is either a stereotype, anti-stereotype, or is unrelated with respect to the first sentence and the target. A stereotype is a widely held but oversimplified notion of a group of people. An anti-stereotype is an idea that goes against the widely held but oversimplified notion of a group of people. Sentences and targets are unrelated if there is no relevance between them. **Input**: \[\"two sentences regarding the stereotypes of religions\"\] **Action**: \[\"determine whether the second sentence is either a stereotype , anti-stereotype , or is unrelated with respect to the first sentence and the target\"\] **Output**: \[\"stereotype, anti-stereotype\", \"A stereotype is a widely held but oversimplified notion of a group of people\", \"An anti-stereotype is an idea that goes against the widely held but oversimplified notion of a group of people\"\] **TASK3** task628_xlwic_word_with_different_meaning_sentence_generation In this task, you are given a word, followed by a sentence. You should respond with a valid sentence which contains the word with the same meaning as in the given sentence. For example, if the given sentence refers to a 'fly' as the insect, you should not respond with a sentence which uses 'fly' as the verb. You may use the word in a different tense than is given. For example, you may use the word 'ended' in the output where the given input word is 'end'. **Input**: \[\"a word, followed by a sentence\"\] **Action**: \[\"respond with a valid sentence which contains the word with the same meaning as in the given sentence\"\] **Output**: \[\"a valid sentence\"\] |
|
| 74 |
+
| |
|
| 75 |
+
| **TASK4** task405_narrativeqa_question_generation You will be given a summary of a story. You need to create a question that can be answered from the story. You can create a question about characters, events, facts and beliefs, etc. Your question should be specific, try not to use pronouns instead of full names. As the stories are sometimes movie plots, they will contain actor names in parentheses. You should not use those names. Only use character names. Try to ask a question about all parts of the plot, not just the beginning. **Input**: \[\"a summary of a story\"\] **Action**: \[\"create a question that can be answered from the story\"\] **Output**: \[\"a question\"\] |
|
| 76 |
+
| |
|
| 77 |
+
| **TASK5** task1202_atomic_classification_xneed In this task, you are given two phrases: Head and Tail, separated with \<sep\>. The Head and the Tail events are short phrases possibly involving participants. The names of specific people have been replaced by generic words (e.g., PersonX, PersonY, PersonZ). PersonX is always the subject of the event. You have to determine whether it is plausible for the Head to desire the Tail or not. In this task, desire means desires of sentient entities. For example, doctors likely desire to cure a patient. Classify your answers into \"Yes\" and \"No\". The phrase may also contain a placeholder that can be an object, a person, and/or an action. **Input**: \[\"two phrases : Head and Tail , separated with \< sep \>\"\] **Action**: \[\"determine whether it is plausible for the Head to desire the Tail or not\"\] **Output**: \[\"Yes, No\"\] |
|
| 78 |
+
| |
|
| 79 |
+
| **TASK6** task1580_eqasc-perturbed_question_generation Given a statement, generate a question such that the answer is contained in that statement. **Input**: \[\"a statement\"\] **Action**: \[\"generate a question such that the answer is contained in that statement\"\] **Output**: \[\"a question\"\] |
|
| 80 |
+
| |
|
| 81 |
+
| **TASK7** task383_matres_classification You will be given a context and a verb separated with a newline character, and you have to answer if the given verb is a negation or not. A verb is a negation if it is not going to exist, not happen, or has no effect. The output should be Ÿesïf the verb is a negation and N̈oötherwise. **Input**: \[\"a context and a verb separated with a newline character\"\] **Action**: \[\"answer if the given verb is a negation or not\"\] **Output**: \[\"Yes, No\", \"\" Yes \" if the verb is a negation and \" No \" otherwise\"\] |
|
| 82 |
+
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
| 83 |
+
:::
|
| 84 |
+
::::
|
| 85 |
+
|
| 86 |
+
[^1]: $^*$Work done when Fan Yin was an intern at Salesforce.
|
| 87 |
+
|
| 88 |
+
[^2]: $^\diamondsuit$Jesse and Philippe contributed equally; order is random.
|
| 89 |
+
|
| 90 |
+
[^3]: Code will be released at\
|
| 91 |
+
<https://github.com/fanyin3639/Rethinking-instruction-effectiveness>.
|
| 92 |
+
|
| 93 |
+
[^4]: At the time the paper is finished. See Section [6](#related work){reference-type="ref" reference="related work"} and Section [7](#discussion){reference-type="ref" reference="discussion"} for updated discussions.
|
| 94 |
+
|
| 95 |
+
[^5]: With https://github.com/yzhangcs/parser
|
| 96 |
+
|
| 97 |
+
[^6]: https://instructions.apps.allenai.org/
|
| 98 |
+
|
| 99 |
+
[^7]: https://huggingface.co/models?sort=downloads\
|
| 100 |
+
&search=google%2Ft5
|
| 101 |
+
|
| 102 |
+
[^8]: https://github.com/yizhongw/Tk-Instruct
|
| 103 |
+
|
| 104 |
+
[^9]: https://github.com/microsoft/DeepSpeed
|
2308.02000/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2023-05-08T05:16:38.219Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/21.2.8 Chrome/112.0.5615.165 Electron/24.2.0 Safari/537.36" etag="h2i1_4zK3keYe6rRM4JB" version="21.2.8" type="device">
|
| 2 |
+
<diagram name="第 1 页" id="DK2vwwx7p4-NovbYCbh_">
|
| 3 |
+
<mxGraphModel dx="957" dy="703" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1654" pageHeight="1169" math="0" shadow="0">
|
| 4 |
+
<root>
|
| 5 |
+
<mxCell id="0" />
|
| 6 |
+
<mxCell id="1" parent="0" />
|
| 7 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-75" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#666666;" parent="1" vertex="1">
|
| 8 |
+
<mxGeometry x="340" y="700" width="360" height="230" as="geometry" />
|
| 9 |
+
</mxCell>
|
| 10 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-76" value="<font face="Consolas" style="font-size: 18px;">Predicate<br>Head</font>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;" parent="1" vertex="1">
|
| 11 |
+
<mxGeometry x="770" y="680" width="100" height="60" as="geometry" />
|
| 12 |
+
</mxCell>
|
| 13 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-78" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=-0.02;entryY=0.069;entryDx=0;entryDy=0;entryPerimeter=0;dashed=1;strokeWidth=3;fillColor=#e1d5e7;strokeColor=#9673a6;exitX=0.983;exitY=0.068;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-79" target="d_Zs7AjmUmSOx8Cpjod4-93" edge="1">
|
| 14 |
+
<mxGeometry relative="1" as="geometry">
|
| 15 |
+
<mxPoint x="320" y="690" as="sourcePoint" />
|
| 16 |
+
<Array as="points">
|
| 17 |
+
<mxPoint x="716" y="688" />
|
| 18 |
+
</Array>
|
| 19 |
+
</mxGeometry>
|
| 20 |
+
</mxCell>
|
| 21 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-129" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.988;exitY=0.52;exitDx=0;exitDy=0;exitPerimeter=0;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" edge="1">
|
| 22 |
+
<mxGeometry relative="1" as="geometry">
|
| 23 |
+
<mxPoint x="319.64" y="806.2" as="sourcePoint" />
|
| 24 |
+
<mxPoint x="380" y="903.0000000000001" as="targetPoint" />
|
| 25 |
+
<Array as="points">
|
| 26 |
+
<mxPoint x="350" y="806" />
|
| 27 |
+
<mxPoint x="350" y="903" />
|
| 28 |
+
</Array>
|
| 29 |
+
</mxGeometry>
|
| 30 |
+
</mxCell>
|
| 31 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-79" value="<i style=""><font style="" face="Consolas"><font style="font-size: 18px;">LDM Encoder</font><br></font></i>" style="rounded=1;whiteSpace=wrap;html=1;verticalAlign=middle;horizontal=0;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
|
| 32 |
+
<mxGeometry x="290" y="670" width="30" height="260" as="geometry" />
|
| 33 |
+
</mxCell>
|
| 34 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-81" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;strokeWidth=2;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-84" target="d_Zs7AjmUmSOx8Cpjod4-94" edge="1">
|
| 35 |
+
<mxGeometry relative="1" as="geometry">
|
| 36 |
+
<Array as="points">
|
| 37 |
+
<mxPoint x="520" y="805" />
|
| 38 |
+
<mxPoint x="520" y="845" />
|
| 39 |
+
</Array>
|
| 40 |
+
</mxGeometry>
|
| 41 |
+
</mxCell>
|
| 42 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-82" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.25;entryDx=0;entryDy=0;dashed=1;strokeColor=#6c8ebf;strokeWidth=2;fillColor=#dae8fc;" parent="1" target="d_Zs7AjmUmSOx8Cpjod4-87" edge="1">
|
| 43 |
+
<mxGeometry relative="1" as="geometry">
|
| 44 |
+
<mxPoint x="460" y="765" as="sourcePoint" />
|
| 45 |
+
<Array as="points">
|
| 46 |
+
<mxPoint x="460" y="770" />
|
| 47 |
+
<mxPoint x="460" y="747" />
|
| 48 |
+
</Array>
|
| 49 |
+
</mxGeometry>
|
| 50 |
+
</mxCell>
|
| 51 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-83" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;strokeWidth=2;dashed=1;strokeColor=#82b366;entryX=0;entryY=0.75;entryDx=0;entryDy=0;exitX=0.669;exitY=0.985;exitDx=0;exitDy=0;exitPerimeter=0;fillColor=#d5e8d4;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-84" target="d_Zs7AjmUmSOx8Cpjod4-94" edge="1">
|
| 52 |
+
<mxGeometry relative="1" as="geometry">
|
| 53 |
+
<mxPoint x="470" y="840" as="sourcePoint" />
|
| 54 |
+
<Array as="points">
|
| 55 |
+
<mxPoint x="460" y="840" />
|
| 56 |
+
<mxPoint x="460" y="863" />
|
| 57 |
+
</Array>
|
| 58 |
+
</mxGeometry>
|
| 59 |
+
</mxCell>
|
| 60 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-84" value="<font style="font-size: 16px;" face="Consolas">U-Net Encoder<br><br></font>" style="rounded=1;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=#dae8fc;strokeColor=#6c8ebf;shadow=0;" parent="1" vertex="1">
|
| 61 |
+
<mxGeometry x="380" y="770" width="120" height="70" as="geometry" />
|
| 62 |
+
</mxCell>
|
| 63 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-85" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;fillColor=#ffe6cc;strokeColor=#d79b00;strokeWidth=1.2;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-87" target="d_Zs7AjmUmSOx8Cpjod4-97" edge="1">
|
| 64 |
+
<mxGeometry relative="1" as="geometry">
|
| 65 |
+
<Array as="points">
|
| 66 |
+
<mxPoint x="660" y="780" />
|
| 67 |
+
<mxPoint x="680" y="780" />
|
| 68 |
+
<mxPoint x="680" y="903" />
|
| 69 |
+
<mxPoint x="660" y="903" />
|
| 70 |
+
</Array>
|
| 71 |
+
</mxGeometry>
|
| 72 |
+
</mxCell>
|
| 73 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-108" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;endArrow=none;endFill=0;strokeWidth=2;entryX=-0.027;entryY=0.485;entryDx=0;entryDy=0;entryPerimeter=0;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" target="d_Zs7AjmUmSOx8Cpjod4-84" edge="1">
|
| 74 |
+
<mxGeometry relative="1" as="geometry">
|
| 75 |
+
<mxPoint x="381" y="800" as="targetPoint" />
|
| 76 |
+
<mxPoint x="661" y="765" as="sourcePoint" />
|
| 77 |
+
<Array as="points">
|
| 78 |
+
<mxPoint x="681" y="765" />
|
| 79 |
+
<mxPoint x="681" y="715" />
|
| 80 |
+
<mxPoint x="351" y="715" />
|
| 81 |
+
<mxPoint x="351" y="805" />
|
| 82 |
+
<mxPoint x="377" y="805" />
|
| 83 |
+
</Array>
|
| 84 |
+
</mxGeometry>
|
| 85 |
+
</mxCell>
|
| 86 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-110" value="<font style="font-size: 12px;" face="Consolas">Repeat K time steps</font>" style="edgeLabel;html=1;align=center;verticalAlign=bottom;resizable=0;points=[];labelBackgroundColor=none;" parent="d_Zs7AjmUmSOx8Cpjod4-108" vertex="1" connectable="0">
|
| 87 |
+
<mxGeometry x="-0.1172" y="1" relative="1" as="geometry">
|
| 88 |
+
<mxPoint as="offset" />
|
| 89 |
+
</mxGeometry>
|
| 90 |
+
</mxCell>
|
| 91 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-131" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.667;entryY=0.192;entryDx=0;entryDy=0;strokeWidth=2;fillColor=#e1d5e7;strokeColor=#9673a6;entryPerimeter=0;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-87" target="d_Zs7AjmUmSOx8Cpjod4-93" edge="1">
|
| 92 |
+
<mxGeometry relative="1" as="geometry">
|
| 93 |
+
<Array as="points">
|
| 94 |
+
<mxPoint x="710" y="765" />
|
| 95 |
+
<mxPoint x="710" y="710" />
|
| 96 |
+
<mxPoint x="740" y="710" />
|
| 97 |
+
</Array>
|
| 98 |
+
</mxGeometry>
|
| 99 |
+
</mxCell>
|
| 100 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-87" value="<font style="" face="Consolas"><br></font>" style="rounded=1;whiteSpace=wrap;html=1;verticalAlign=bottom;fillColor=#dae8fc;strokeColor=#6c8ebf;labelPosition=center;verticalLabelPosition=middle;align=center;" parent="1" vertex="1">
|
| 101 |
+
<mxGeometry x="540" y="730" width="120" height="70" as="geometry" />
|
| 102 |
+
</mxCell>
|
| 103 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-89" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;fillColor=#ffe6cc;strokeColor=#d79b00;strokeWidth=1.2;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-97" target="d_Zs7AjmUmSOx8Cpjod4-90" edge="1">
|
| 104 |
+
<mxGeometry relative="1" as="geometry">
|
| 105 |
+
<Array as="points">
|
| 106 |
+
<mxPoint x="440" y="890" />
|
| 107 |
+
</Array>
|
| 108 |
+
</mxGeometry>
|
| 109 |
+
</mxCell>
|
| 110 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-90" value="<font size="1" face="Consolas"><i style="font-size: 14px;">Attention</i></font>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
|
| 111 |
+
<mxGeometry x="390" y="810" width="100" height="20" as="geometry" />
|
| 112 |
+
</mxCell>
|
| 113 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-91" value="<font size="1" face="Consolas"><i style="font-size: 14px;">Attention</i></font>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
|
| 114 |
+
<mxGeometry x="550" y="740" width="100" height="20" as="geometry" />
|
| 115 |
+
</mxCell>
|
| 116 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-92" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.973;exitY=0.153;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitPerimeter=0;strokeWidth=2;fillColor=#f8cecc;strokeColor=#b85450;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-93" target="d_Zs7AjmUmSOx8Cpjod4-76" edge="1">
|
| 117 |
+
<mxGeometry relative="1" as="geometry">
|
| 118 |
+
<Array as="points" />
|
| 119 |
+
</mxGeometry>
|
| 120 |
+
</mxCell>
|
| 121 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-115" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;endArrow=oval;endFill=1;strokeColor=#97D077;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-94" edge="1">
|
| 122 |
+
<mxGeometry relative="1" as="geometry">
|
| 123 |
+
<mxPoint x="710" y="930" as="targetPoint" />
|
| 124 |
+
</mxGeometry>
|
| 125 |
+
</mxCell>
|
| 126 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-94" value="<font style="font-size: 24px;" face="Consolas"><i style="font-size: 16px;"><br></i></font>" style="rounded=1;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
| 127 |
+
<mxGeometry x="540" y="810" width="120" height="70" as="geometry" />
|
| 128 |
+
</mxCell>
|
| 129 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-95" value="<font size="1" face="Consolas"><i style="font-size: 14px;">Attention</i></font>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
|
| 130 |
+
<mxGeometry x="550" y="850" width="100" height="20" as="geometry" />
|
| 131 |
+
</mxCell>
|
| 132 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-96" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;fillColor=#ffe6cc;strokeColor=#d79b00;strokeWidth=1.2;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-97" target="d_Zs7AjmUmSOx8Cpjod4-95" edge="1">
|
| 133 |
+
<mxGeometry relative="1" as="geometry" />
|
| 134 |
+
</mxCell>
|
| 135 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-128" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=-0.009;exitY=0.512;exitDx=0;exitDy=0;fillColor=#ffe6cc;strokeColor=#d79b00;exitPerimeter=0;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-97" edge="1">
|
| 136 |
+
<mxGeometry relative="1" as="geometry">
|
| 137 |
+
<mxPoint x="382" y="902.5" as="sourcePoint" />
|
| 138 |
+
<mxPoint x="602" y="740" as="targetPoint" />
|
| 139 |
+
<Array as="points">
|
| 140 |
+
<mxPoint x="362" y="903" />
|
| 141 |
+
<mxPoint x="362" y="723" />
|
| 142 |
+
<mxPoint x="602" y="723" />
|
| 143 |
+
</Array>
|
| 144 |
+
</mxGeometry>
|
| 145 |
+
</mxCell>
|
| 146 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-97" value="<i><font style="font-size: 14px;" face="Consolas">Context (Other players)</font></i>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
|
| 147 |
+
<mxGeometry x="380" y="890" width="280" height="25" as="geometry" />
|
| 148 |
+
</mxCell>
|
| 149 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-98" value="<font face="Consolas"><i style="">Italic</i></font>" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
| 150 |
+
<mxGeometry x="776.3299999999999" y="925" width="85" height="20" as="geometry" />
|
| 151 |
+
</mxCell>
|
| 152 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-99" value="<font style="font-size: 10px;">Optinal module</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 153 |
+
<mxGeometry x="776.02" y="946" width="85.62" height="17" as="geometry" />
|
| 154 |
+
</mxCell>
|
| 155 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-101" value="" style="endArrow=classic;html=1;rounded=0;strokeWidth=2;" parent="1" edge="1">
|
| 156 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 157 |
+
<mxPoint x="779.61" y="794" as="sourcePoint" />
|
| 158 |
+
<mxPoint x="863.7099999999999" y="794" as="targetPoint" />
|
| 159 |
+
</mxGeometry>
|
| 160 |
+
</mxCell>
|
| 161 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-102" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;strokeWidth=2;strokeColor=#000000;" parent="1" edge="1">
|
| 162 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 163 |
+
<mxPoint x="779.61" y="823" as="sourcePoint" />
|
| 164 |
+
<mxPoint x="863.99" y="823" as="targetPoint" />
|
| 165 |
+
</mxGeometry>
|
| 166 |
+
</mxCell>
|
| 167 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-103" value="<font face="Consolas">Normal</font>" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
| 168 |
+
<mxGeometry x="776.9499999999999" y="881" width="85" height="20" as="geometry" />
|
| 169 |
+
</mxCell>
|
| 170 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-104" value="<font style="font-size: 10px;">Basic module</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 171 |
+
<mxGeometry x="776.9499999999999" y="900" width="84.37" height="20" as="geometry" />
|
| 172 |
+
</mxCell>
|
| 173 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-105" value="<font style="font-size: 10px;">Skip connection</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 174 |
+
<mxGeometry x="776.24" y="826.3199999999999" width="85.62" height="20" as="geometry" />
|
| 175 |
+
</mxCell>
|
| 176 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-107" value="<font style="font-size: 16px;" face="Consolas">U-Net Decoder<br>(<i>Actor</i>)<br></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 177 |
+
<mxGeometry x="535" y="760" width="130" height="38" as="geometry" />
|
| 178 |
+
</mxCell>
|
| 179 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-116" value="<font style="font-size: 10px;">Data flow</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 180 |
+
<mxGeometry x="776.02" y="792" width="85.62" height="25" as="geometry" />
|
| 181 |
+
</mxCell>
|
| 182 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-117" value="" style="endArrow=oval;html=1;rounded=0;endFill=1;exitX=0.045;exitY=-0.047;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-118" edge="1">
|
| 183 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 184 |
+
<mxPoint x="776.99" y="855" as="sourcePoint" />
|
| 185 |
+
<mxPoint x="858.7" y="855" as="targetPoint" />
|
| 186 |
+
</mxGeometry>
|
| 187 |
+
</mxCell>
|
| 188 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-118" value="<font style="font-size: 10px;">Predict value</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 189 |
+
<mxGeometry x="776.64" y="856" width="85.62" height="20" as="geometry" />
|
| 190 |
+
</mxCell>
|
| 191 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-77" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.52;exitDx=0;exitDy=0;strokeWidth=2;exitPerimeter=0;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-79" target="d_Zs7AjmUmSOx8Cpjod4-84" edge="1">
|
| 192 |
+
<mxGeometry relative="1" as="geometry">
|
| 193 |
+
<Array as="points" />
|
| 194 |
+
</mxGeometry>
|
| 195 |
+
</mxCell>
|
| 196 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-80" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;strokeWidth=2;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" source="d_Zs7AjmUmSOx8Cpjod4-84" target="d_Zs7AjmUmSOx8Cpjod4-87" edge="1">
|
| 197 |
+
<mxGeometry relative="1" as="geometry">
|
| 198 |
+
<Array as="points">
|
| 199 |
+
<mxPoint x="520" y="805" />
|
| 200 |
+
<mxPoint x="520" y="765" />
|
| 201 |
+
</Array>
|
| 202 |
+
</mxGeometry>
|
| 203 |
+
</mxCell>
|
| 204 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-132" value="<div style="text-align: justify;"></div>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
|
| 205 |
+
<mxGeometry x="500" y="950" width="10" height="10" as="geometry" />
|
| 206 |
+
</mxCell>
|
| 207 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-133" value="<font style="font-size: 10px;"><i>LDM</i></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 208 |
+
<mxGeometry x="510" y="940" width="30" height="30" as="geometry" />
|
| 209 |
+
</mxCell>
|
| 210 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-134" value="<div style="text-align: justify;"></div>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
|
| 211 |
+
<mxGeometry x="550" y="950" width="10" height="10" as="geometry" />
|
| 212 |
+
</mxCell>
|
| 213 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-135" value="<font style="font-size: 10px;"><i>Attention</i></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 214 |
+
<mxGeometry x="560" y="940" width="50" height="30" as="geometry" />
|
| 215 |
+
</mxCell>
|
| 216 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-136" value="<div style="text-align: justify;"></div>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
| 217 |
+
<mxGeometry x="620" y="950" width="10" height="10" as="geometry" />
|
| 218 |
+
</mxCell>
|
| 219 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-137" value="<font style="font-size: 10px;"><i>PPO</i></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 220 |
+
<mxGeometry x="630" y="940" width="30" height="30" as="geometry" />
|
| 221 |
+
</mxCell>
|
| 222 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-138" value="<div style="text-align: justify;"></div>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
| 223 |
+
<mxGeometry x="300" y="950" width="10" height="10" as="geometry" />
|
| 224 |
+
</mxCell>
|
| 225 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-139" value="<span style="font-size: 10px;">Diffusion model</span>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 226 |
+
<mxGeometry x="310" y="940" width="80" height="30" as="geometry" />
|
| 227 |
+
</mxCell>
|
| 228 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-140" value="<div style="text-align: justify;"></div>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;" parent="1" vertex="1">
|
| 229 |
+
<mxGeometry x="400" y="950" width="10" height="10" as="geometry" />
|
| 230 |
+
</mxCell>
|
| 231 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-142" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;strokeWidth=2;fillColor=#f8cecc;strokeColor=#b85450;dashed=1;" parent="1" edge="1">
|
| 232 |
+
<mxGeometry relative="1" as="geometry">
|
| 233 |
+
<mxPoint x="660" y="765" as="sourcePoint" />
|
| 234 |
+
<mxPoint x="740" y="710" as="targetPoint" />
|
| 235 |
+
<Array as="points">
|
| 236 |
+
<mxPoint x="710" y="765" />
|
| 237 |
+
<mxPoint x="710" y="710" />
|
| 238 |
+
</Array>
|
| 239 |
+
</mxGeometry>
|
| 240 |
+
</mxCell>
|
| 241 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-141" value="<span style="font-size: 10px;">Predicate head</span>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
| 242 |
+
<mxGeometry x="410" y="940" width="80" height="30" as="geometry" />
|
| 243 |
+
</mxCell>
|
| 244 |
+
<mxCell id="d_Zs7AjmUmSOx8Cpjod4-93" value="<i style=""><font style="" face="Consolas"><font style="font-size: 18px;">LDM Decoder</font><br></font></i>" style="rounded=1;whiteSpace=wrap;html=1;horizontal=0;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
|
| 245 |
+
<mxGeometry x="720" y="670" width="30" height="260" as="geometry" />
|
| 246 |
+
</mxCell>
|
| 247 |
+
<mxCell id="JmrVX_fI45OATfd5FMgu-1" value="<i style="border-color: var(--border-color); color: rgb(0, 0, 0); font-family: Consolas; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; font-size: 16px;"><font style="border-color: var(--border-color); font-size: 16px;">Critic<br>(U-Net)<br style="border-color: var(--border-color);"></font></i>" style="text;whiteSpace=wrap;html=1;verticalAlign=middle;align=center;labelBackgroundColor=none;" vertex="1" parent="1">
|
| 248 |
+
<mxGeometry x="520" y="810" width="160" height="40" as="geometry" />
|
| 249 |
+
</mxCell>
|
| 250 |
+
</root>
|
| 251 |
+
</mxGraphModel>
|
| 252 |
+
</diagram>
|
| 253 |
+
</mxfile>
|