Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2006.16981/main_diagram/main_diagram.drawio +1 -0
- 2006.16981/main_diagram/main_diagram.pdf +0 -0
- 2006.16981/paper_text/intro_method.md +78 -0
- 2009.10815/main_diagram/main_diagram.drawio +1 -0
- 2009.10815/paper_text/intro_method.md +170 -0
- 2101.09429/main_diagram/main_diagram.drawio +1 -0
- 2101.09429/main_diagram/main_diagram.pdf +0 -0
- 2101.09429/paper_text/intro_method.md +207 -0
- 2104.00322/main_diagram/main_diagram.drawio +1 -0
- 2104.00322/paper_text/intro_method.md +64 -0
- 2104.03149/main_diagram/main_diagram.drawio +0 -0
- 2104.03149/paper_text/intro_method.md +42 -0
- 2104.05938/main_diagram/main_diagram.drawio +1 -0
- 2104.05938/main_diagram/main_diagram.pdf +0 -0
- 2104.05938/paper_text/intro_method.md +59 -0
- 2105.04241/main_diagram/main_diagram.drawio +1 -0
- 2105.04241/main_diagram/main_diagram.pdf +0 -0
- 2105.04241/paper_text/intro_method.md +82 -0
- 2106.15004/main_diagram/main_diagram.drawio +0 -0
- 2106.15004/paper_text/intro_method.md +91 -0
- 2108.09645/main_diagram/main_diagram.drawio +1 -0
- 2108.09645/main_diagram/main_diagram.pdf +0 -0
- 2108.09645/paper_text/intro_method.md +132 -0
- 2109.07983/main_diagram/main_diagram.drawio +1 -0
- 2109.07983/main_diagram/main_diagram.pdf +0 -0
- 2109.07983/paper_text/intro_method.md +59 -0
- 2109.09031/main_diagram/main_diagram.drawio +1 -0
- 2109.09031/main_diagram/main_diagram.pdf +0 -0
- 2109.09031/paper_text/intro_method.md +59 -0
- 2110.08499/main_diagram/main_diagram.drawio +1 -0
- 2110.08499/main_diagram/main_diagram.pdf +0 -0
- 2110.08499/paper_text/intro_method.md +161 -0
- 2111.13131/main_diagram/main_diagram.drawio +1 -0
- 2111.13131/main_diagram/main_diagram.pdf +0 -0
- 2111.13131/paper_text/intro_method.md +69 -0
- 2112.02321/main_diagram/main_diagram.drawio +0 -0
- 2112.02321/paper_text/intro_method.md +81 -0
- 2112.07337/main_diagram/main_diagram.drawio +1 -0
- 2112.07337/main_diagram/main_diagram.pdf +0 -0
- 2112.07337/paper_text/intro_method.md +122 -0
- 2202.03609/main_diagram/main_diagram.drawio +0 -0
- 2202.03609/paper_text/intro_method.md +110 -0
- 2202.05343/main_diagram/main_diagram.drawio +1 -0
- 2202.05343/main_diagram/main_diagram.pdf +0 -0
- 2202.05343/paper_text/intro_method.md +54 -0
- 2203.14698/paper_text/intro_method.md +118 -0
- 2203.15266/main_diagram/main_diagram.drawio +0 -0
- 2203.15266/paper_text/intro_method.md +86 -0
- 2205.06457/main_diagram/main_diagram.drawio +1 -0
- 2205.06457/main_diagram/main_diagram.pdf +0 -0
2006.16981/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-02-05T00:49:53.286Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36" version="12.6.5" etag="4zMLb2SwzgEU18WUJra8" type="device"><diagram id="9VOPSXKteyY8vuu9WrpU">7V1tk5s2EP41nmk+hNE74uPdJdd2JulkknTafuRsnc0Eg4vx3bm/vuLNFhJnYwdskcCHixHyAnqe1a52V84E3y1ffk381eJjPBPhBIHZywS/myAEGSfyn6xlW7Rw4BYN8ySYlZ32DV+C/0TZCMrWTTAT61rHNI7DNFjVG6dxFIlpWmvzkyR+rnd7jMP6XVf+XBgNX6Z+aLb+FczSRdlKANhf+E0E80V5a07LC0u/6lw2rBf+LH5WmvD7Cb5L4jgtPi1f7kSYDV41LsX37l+5unuwRERpmy+g4gtPfrgp3618rnRbvayIZjfZmMmzKI5k4+0iXYbyDMqP6zSJv4m7OIyTvDcGgL2/ud9dqYYH5S1+klaipqG/XgfTr4sgqi7dB2ElVrwE6d/yM3AQxuX5P/m5B1B5/i6jE6hOtsrJJ5EES5GKpGx7lIKVR4QPPhS5lChNtsVtOK3Oi9tAd9ewv09+tlXP9DsVgydmGn3k281FiYdnQgR3wEuNEbEUmGxll+c9tSr+LBRSVW2JCP00eKrf0S8ZPt+J293hUxzIGyNQaiOtlKrURYKBAwAkkrRu/rcucB1vkqkoZajE0sVi5gBUk0yx6wBcF1eMjCFOflCGYt+Us7eZyfhnZvIrvCvAKnEllhEP8zrxaDV7nko1jDVBFPRFMnKcZPMk3qzKbiJJxUtteEqL5D9U3UFrVEpByNPetnp7BTWIGmDj2qioCNWG5MD70+Pvr6hU9v6BtJof/AcRforXQRrEUkfePcRpGi9lBz8M5lnDVL57xvHbMOt560+/ZaMYzRSqP+aHIvSm/G4ar17R0df0+TUl2tniTNNm/nohZuWJvLLKXm/5Ms8cGieI164TSCzXznTzkE0kj3GUNj3tq0RQASdH1ZA1qaHrKLO0/Pv98LIR3svBW81edXWG0KmBilxDuy9EBnfAZGBTLh4eNTKUNv97eNAB3qyON0KOijan5Fp48+vbNqpb8kvaNm/AfLd38uO22LZqmTPiewl8bTduEA6YDVe1bocBt9a6wRaRrr7Nm8uuaN5giwCJtYy3d/4reGWFgWsRnBgB7gxg6y3ckGM1V7VwRxC318QNOXzTF+JJnPrli4HWBDiIv6cRgDDscODtD3o1Apghm3fCn6bBk5/KIUPgYzzbSCbopJBjIR/q9WRICUMDMgZFdPyXwWyW3eb2eRGk4svKz9MRzxKsHBvJoh2W2Sx97y+DMBvXu3i52mQi84cWSVR2KHPBkHeDJQRIAxNSB+GDYEKvAU3UhYdmRmD+2OQ9rMdNheUojl3ooJYEIRw5mF8QqiEHixqnWtk+8wV/nF7Ewh4E19V8ql2i8PIzKhqDRr341Ifx1wws9jSn+moeFhpy1GiwdNDneqB53PhqdGgRU7KWDsNxuLnpo10NcUurjFpUuDGOJ7USN+BO+qlwqyKN1lQauS6sE4icWWnkemZRGyFOte7qvt6oTcGRpXxzNbZx/rPQjQNtviLeeXTjyKQbpQ7rjW1NIUMWpqV9r9GO/buJqwtv1/ny70Z2gHz1sr8oP82zf3+P5CKwEiWfopBWXDP4bM+qtu9VLKR1olBiLnSaVrD69HWWJWsKFnaB9aISs85yaUXbL/CNgr5yYeTEMU5gzh1qJhF6o0WLqi+rDM6uvBpyrbyauJNTy6tVd0nbEAC6NF9qXTbFDSatfWp53CRwkoVrUeR23V0CGokpPZnEbdYCGrdJl5tdmjYdqORmlnEbub1wGzWsFfrldlNMumbRH/1p3aCbNkwx5+c6ADcNCachWfkujDjQ5ktMoMNcsDugYdB7S1XgMYR9+ZilniPGiDgUH8L/QgEsPOQItu2JKwN1L0smK5Hqq6GOTnJ7Sl/lqOeTHY3InZnKN1wJa3wErIV0GDs7gnhEUIcbYs1Y9Vfpv1lqda9R/aGvaphrxl9IX2b5tMjuqJK6JkF9r5CXBUr2Jhadp6C6WKzXXneooGa49YO/Fcl6VNHXii2JmxnHyylpi4rKUUkPKCnRU7dAKilTyus056atlhJuynXhXvmBJrdDpT0tQDpSQoeO6SrdESXcE6nWISVOCyqOlNCh09dNlDAHmtGSk025Xtx7RG6HjGhRHmpTFsV2gphIesRxz5woDLZlq/TeJofKNVGocJOmcliDOHp766/zMOltHk55++dKfv4sZQdCdq+ipg/JLo0aR+E2Z1ESR/PJbkuiFLXOmhfxc/TGINr3O4+heEyH5zpCQDSg+R5ohbcY9uQ9EjPW9llMN0n+Agjc7cDrAbKh+vtG5DxLETIz/416Aw31YMw5esCMdWPMD5b0nZfI08OpVPAZke2Sq085G+AAbATuya/EGDger4nGLX1JSRJ/q3RbZR3WB15BW9VgL8tEKgc7/KR66Yh76OvyQ/F8Z1u2cUP55RNLekDkintjqtz+iP8l8ff0KcKWvTHkh9tvblFmERvBDVv2wJBx0/kl9sDotv2Ke2CIGQE1V7TSk11uIglKsb4BtyJ9FiKaVCVBfeQ4fpBlKiMmtI2OcyfLnRaxS2vVd6hGXI9AsWyNqx4G/kTrwIhJB0IcUjs6YMe4zb3HaiGOHawe+izgUOruDrN2qDUnkHJ8PyeoGdA0OGFLgETbgwZhp+GRfdG2q/20O0Snl223CbbUSqtL3VRLq6vZ3JqgDARIrk9YndnMdbDL90ddaOswvscdRQj36jehzAHEmFI7DthAoFWLMIocD9P9cfAVoDRgNR12NbPgVj9q0lXMhg65/HOo5p4Ca2I2tEV021r8bTfo+q9X2xOboWOo9gpqbyQkbInZ0CFHbocTs6H6D9dcL2ZDWwRnbXHax6ymukPWPZ59bOsxM/13Kc6rkDvVSdbV4HBa0nxqdEpS9FQPWZ7u/7O9ovv+vyzE7/8H</diagram></mxfile>
|
2006.16981/main_diagram/main_diagram.pdf
ADDED
|
Binary file (13.4 kB). View file
|
|
|
2006.16981/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
The most common multi-layer RNN architecture is bottom-up and feed-forward, in the sense that higher layers are supplied with the states of the lower layers as inputs. An $L$-layer deep RNN is then concisely summarized as: $$\begin{align}
|
| 4 |
+
\textbf{y}_t &= D(\textbf{h}^L_t) \\
|
| 5 |
+
\textbf{h}^l_t &= F^{l}(\textbf{h}^{l-1}_t, \textbf{h}^l_{t-1}) \label{rnn_hier} \\
|
| 6 |
+
\textbf{h}^0_t &= E(\textbf{x}_t)
|
| 7 |
+
%\vs{1}
|
| 8 |
+
\end{align}$$ with $l = 0,1,...,L$. For a given time $t$, $\textbf{y}_t$ denotes the model prediction, $\textbf{x}_t$ the input and $\textbf{h}^l_t$ the hidden state of the model at layer $l$. $D$ and $E$ denote the Decoder and Encoder for the model. $F^l$ represents the recurrent dynamics at the hierarchy level $l$ (e.g., an LSTM or GRU).
|
| 9 |
+
|
| 10 |
+
Key-value Attention (also sometimes called Scaled Dot Product attention), defines the backbone of updates to the hidden states in the proposed model. This form of attention is widely used in self-attention models and performs well on a wide array of tasks [@vaswani2017attention; @santoro2018relational]. Given a set of queries $\textbf{Q}$, keys $\textbf{K}$ ($d$-dimensional) and values $\textbf{V}$, an attention score $\textbf{A}_S$ and an attention modulated result $\textbf{A}_R$ are computed as $$\begin{align}
|
| 11 |
+
\mathrm{\textbf{A}_S} &= \mathrm{Softmax} \left (\frac{\textbf{QK}^T}{\sqrt{d}} \right) \label{att_score}\\
|
| 12 |
+
\mathrm{\textbf{A}_R} &= \mathrm{\textbf{A}_S}\; \textbf{V} \label{att_out}
|
| 13 |
+
\end{align}$$
|
| 14 |
+
|
| 15 |
+
RIMs [@goyal2019recurrent] consist of a single layered recurrent structure where the hidden state $\textbf{h}_t$ is decomposed into $n$ modules, $\textbf{h}_{t,k}$ for $k = 1, ... n$. It also has the property that on a given time step, only a subset of modules is activated. In RIMs, the updates for the hidden state follow a three-step process. First, a subset of modules is selectively activated based on their determination of the relevance of their input. Second, the activated modules independently process the information made available to them. Third, the active modules gather contextual information from all the other modules and consolidate this information in their hidden state.
|
| 16 |
+
|
| 17 |
+
Each module creates queries $\bar{\textbf{Q}}={Q}_{inp}\,(\textbf{h}_{t-1})$ ($n \times d$ matrix) which are then combined with the keys $\bar{\textbf{K}}={K}_{inp}\,(\textbf{\o},\textbf{x}_t)$ and values $\bar{\textbf{V}}={V}_{inp}\,(\textbf{\o}, \textbf{x}_t)$ obtained from the input $\textbf{x}_t$ and zero vectors $\textbf{\o}$ to get both the attention score $\bar{\textbf{A}}_S$ and attention modulated input $\bar{\textbf{A}}_R$ as per equations [\[att_score\]](#att_score){reference-type="eqref" reference="att_score"} and [\[att_out\]](#att_out){reference-type="eqref" reference="att_out"}. Based on this attention score, a fixed number of modules $m$ are activated for which the input information is most relevant (where the null module, which provides no additional information, has low attention score). We refer to this activated set per time-step as $\mathcal{S}_t$.
|
| 18 |
+
|
| 19 |
+
Given the attention modulated input obtained above, each activated module then undergoes an update in its hidden state: $$\begin{align}
|
| 20 |
+
\bar{\textbf{h}}_{t,k} &= \begin{cases}F_k\, ( \bar{\textbf{A}}_{R_k}\,,\,\textbf{h}_{t-1,k}) \quad & k \in \mathcal{S}_t \\
|
| 21 |
+
\textbf{h}_{t-1,k} \quad & k \notin \mathcal{S}_t
|
| 22 |
+
\end{cases}
|
| 23 |
+
\end{align}$$ $F_k$ here stands for any update procedure, e.g. GRU or LSTM.
|
| 24 |
+
|
| 25 |
+
After an independent update step, each module then consolidates information from all the other modules. RIMs again utilize the attention mechanism to perform this consolidation. Active modules create queries $\hat{\textbf{Q}}=Q_{com}(\bar{\textbf{h}}_{t})$ which act with the keys $\hat{\textbf{K}}={K}_{com}(\bar{\textbf{h}}_{t})$ and values $\hat{\textbf{V}}={V}_{com}(\bar{\textbf{h}}_{t})$ generated by all modules and the result of attention $\hat{\textbf{A}}_R$ is added to the state for that time step: [^1]
|
| 26 |
+
|
| 27 |
+
$$\begin{align}
|
| 28 |
+
\textbf{h}_{t,k} &= \begin{cases}\bar{\textbf{h}}_{t,k} + \hat{\textbf{A}}_{R_k} \quad & k \in \mathcal{S}_t \\
|
| 29 |
+
\bar{\textbf{h}}_{t,k} \quad & k \notin \mathcal{S}_t
|
| 30 |
+
\end{cases}
|
| 31 |
+
\end{align}$$
|
| 32 |
+
|
| 33 |
+
Our contribution is to extend RIMs to a multilayered bidirectional architecture, which we refer to as BRIMs. Each layer is a modified version of the RIMs architecture [@goyal2019recurrent]. A concise representation of our architecture is depicted in [Figure [1](#fig:model-diagram){reference-type="ref" reference="fig:model-diagram"}](#fig:model-diagram). We now describe the dynamic bottom-up and top-down flow of information in BRIMs.
|
| 34 |
+
|
| 35 |
+
We use the procedure provided in RIMs to decompose the hidden state $\textbf{h}^l_t$ on each layer $l$ and time $t$ into separate modules. Thus, instead of representing the state as just a fixed dimensional vector $\textbf{h}^l_t$, we choose to represent it as $\{((\textbf{h}^l_{t,k})_{k=1}^{n_l}, \mathcal{S}^l_t)\}$ where $n_l$ denotes the number of modules in layer $l$ and $\mathcal{S}^l_t$ is the set of modules that are active at time $t$ in layer $l$. $|\mathcal{S}^l_t| = m_l$, where $m_l$ is a hyperparameter specifying the number of modules active in layer $l$ at any time. Each layer can potentially have different number of modules active. Typically, setting $m_l$ to be roughly half the value of $n_l$ works well.
|
| 36 |
+
|
| 37 |
+
We dynamically establish communication links between multiple layers using key-value attention, in a way which differs radically from RIMs [@goyal2019recurrent]. While many RNNs build a strictly bottom-up multi-layer dependency using [\[rnn_hier\]](#rnn_hier){reference-type="eqref" reference="rnn_hier"}, we instead build multi-layer dependency by considering queries $\bar{\textbf{Q}}={Q}_{lay}\,(\textbf{h}^l_{t-1})$ from modules in layer $l$ and keys $\bar{\textbf{K}}={K}_{lay}\,(\textbf{\o},\textbf{h}^{l-1}_{t}, \textbf{h}^{l+1}_{t-1})$ and values $\bar{\textbf{V}}={V}_{lay}\,(\textbf{\o},\textbf{h}^{l-1}_{t},\textbf{h}^{l+1}_{t-1})$ from all the modules in the lower and higher layers (blue and red arrows respectively; [Figure [1](#fig:model-diagram){reference-type="ref" reference="fig:model-diagram"}](#fig:model-diagram)). From this three-way attention mechanism, we obtain the attention score $\bar{\textbf{A}}^l_S$ and output $\bar{\textbf{A}}^l_R$ (Equations [\[att_score\]](#att_score){reference-type="ref" reference="att_score"} and [\[att_out\]](#att_out){reference-type="ref" reference="att_out"} respectively). Note that in the deepest layer, only the lower layer is used and on the first layer, the input's embedded state serves as the lower layer. Also note that the attention receiving information from the higher layer looks at the previous time step, whereas the attention receiving information from lower layer (or input) looks at the current time step. $$\begin{align*}
|
| 38 |
+
\small
|
| 39 |
+
\text{Softmax} \left(\begin{bmatrix}
|
| 40 |
+
\mathbf{Q}_{\;module\; 1} \\
|
| 41 |
+
\mathbf{Q}_{\;module\; 2} \\
|
| 42 |
+
\mathbf{Q}_{\;module\; 3} \\
|
| 43 |
+
\end{bmatrix}
|
| 44 |
+
\begin{bmatrix}
|
| 45 |
+
\mathbf{\color{myred}K}_{\;\phi} & \mathbf{\color{myblue}K}_{\;l-1} & \mathbf{\color{mygreen}K}_{\;l+1}
|
| 46 |
+
\end{bmatrix}
|
| 47 |
+
\right)
|
| 48 |
+
\begin{bmatrix}
|
| 49 |
+
\mathbf{\color{myred}V}_{\;\phi} \\
|
| 50 |
+
\mathbf{\color{myblue}V}_{\;l-1} \\
|
| 51 |
+
\mathbf{\color{mygreen}V}_{\;l+1}
|
| 52 |
+
\end{bmatrix} \\
|
| 53 |
+
\begin{matrix}
|
| 54 |
+
{\color{myred}\phi}: \text{Null} &
|
| 55 |
+
{\color{myblue}l-1}: \text{Bottom Up} &
|
| 56 |
+
{\color{mygreen}l+1}: \text{Top Down}
|
| 57 |
+
\end{matrix}
|
| 58 |
+
\end{align*}$$
|
| 59 |
+
|
| 60 |
+
Based on the attention score $\bar{\textbf{A}}^l_S$, the set $S_t^l$ is constructed which comprises modules for which null information is least relevant. Every activated module gets its own separate version of input (as it's dependent on its query, which is a function of the hidden state of the module) which is obtained through the attention output $\bar{\textbf{A}}^l_R$. Concretely, for each activated module, this can be represented as: $$\begin{align}
|
| 61 |
+
\bar{\textbf{h}}^l_{t,k} &= F_k^l \, ( \bar{\textbf{A}}^l_{R_k}, \textbf{h}^l_{t-1, k}) \quad k \in \mathcal{S}^l_t
|
| 62 |
+
%\vs{1}
|
| 63 |
+
\end{align}$$ where $F_k^l$ denotes the recurrent update procedure.
|
| 64 |
+
|
| 65 |
+
We also perform communication between the different modules within each layer (green arrows; [Figure [1](#fig:model-diagram){reference-type="ref" reference="fig:model-diagram"}](#fig:model-diagram)). In order to enable this communication, we again make use of key-value attention. This communication between modules within a layer allows them to share information, albeit in a limited way through the bottleneck of attention. We create queries $\hat{\textbf{Q}}={Q}_{com}\,(\bar{\textbf{h}}^l_{t})$ from active modules and keys $\hat{\textbf{K}}={K}_{com}\,(\bar{\textbf{h}}^l_t)$ and values $\hat{\textbf{V}}={V}_{com}\,(\bar{\textbf{h}}^l_t)$ from all the modules to get the final update to the module state through residual attention $\hat{\textbf{A}}^l_R$ addition as follows: $$\begin{align}
|
| 66 |
+
\textbf{h}^l_{t,k} &= \begin{cases}\bar{\textbf{h}}^l_{t,k} + \bar{\textbf{A}}^l_{R_k} \quad& k \in \mathcal{S}^l_t \\
|
| 67 |
+
\textbf{h}^l_{t-1,k} \quad& k \notin \mathcal{S}^l_t
|
| 68 |
+
\end{cases}
|
| 69 |
+
\end{align}$$
|
| 70 |
+
|
| 71 |
+
<figure id="fig:noise-cifar">
|
| 72 |
+
<div class="minipage">
|
| 73 |
+
<p><img src="plots/CIFAR_BRIMS.jpg" style="width:100.0%" alt="image" /> <embed src="plots/legend.pdf" /></p>
|
| 74 |
+
</div>
|
| 75 |
+
<figcaption>On sequential CIFAR-10, we evaluate on 32x32 test images and change a certain number of the pixels to random values. We show the <strong>average activation weight (y-axis)</strong> on Input (left), Null (middle) and Higher Layer (right) plotted against the <strong>number of random pixels (x-axis)</strong> (total number of pixels = 1024). We see that as more pixels are set to random values, the model becomes increasingly reliant on the higher-level information. </figcaption>
|
| 76 |
+
</figure>
|
| 77 |
+
|
| 78 |
+
The architecture proposed here doesn't rely on any additional losses and thus can be used as a drop-in substitute for LSTMs and GRUs. For training we consider task-specific losses which range from classification and video prediction losses to RL losses depending on the problem.
|
2009.10815/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-05-31T03:54:17.932Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" version="13.0.9" etag="Y8ZEGT8WxBmb0wbdgWDP" type="device"><diagram id="s5_pg9M3XCNX-nX1Qgtf">7V3fk+I4kv5rKmLvoQn9lvzYzXTNPtxsbEzfxM4+UoWrihgK6oDa7r6//uwCgy0lIJuUbIM7oqMbAQIyP2UqP2Wm7vj49cevq8nby2/LaTq/Y2T6447/cscYV8Jk/+QjP7cjTHC2HXlezabbMXoY+Db7v3Q3SHaj77Npuq68cLNczjezt+rg43KxSB83lbHJarX8Xn3Z03Je/dS3yXPqDHx7nMzd0X/NppuX7aiR5DD+93T2/FJ8MiW7Z14nxYt3A+uXyXT5vTTEv97x8Wq53Gz/9/pjnM5z6RVy2b7v/siz+y+2Shcbnzfs5P6fyfx999t232vzs/ixq+X7Yprmr6d3/Mv3l9km/fY2ecyf/Z7pNxt72bzOd0+vN6vlX+l4OV+uPt7Nk0SMs19fPFOIi2cjT7P5vHjlYrlI86HlYrNTN1XZ4+lk/bL/6N1XTVeb9MfRn0v3Qszgly5f083qZ/aS3RskMXKk5PZtBfoSs1PG94MuKdO7yV9KiuTFKyc7AD3vP+Eg4+w/OzHDIuehRU4+/jgiZ34in6wedw8lCaEBKkhF/FzoxBG/NK7wFYLsBSB7Nd/sxFBRgvrf92XxxKf1h0Q+Zy/IfuuPw5PZ/57zf3/7/GcxUfYVtnNtn3F0m32hzEyl5/U6Wb9tbdfT7EeuAW9FU0vR2SvvP/4A2kbQqGJK22uKU8FG0lUrcdUqEdQqzy+pdDH9nFv+7NHjfLJezx59V5EtsvTHbPPn7l35//+d/Z9kP3b76JdcKqR48HP3wFVQ9n1cPaRTy++sl++rx7SC3c1k9ZxuKtbbQ1eeSlil88lm9p/ql4A0s/uEfy5nH8umWNzKsBHhVSgkQhdQKGba/qzdm8veyZpPcc2d+YRy5tvKxJnvAzZ7IXghSXkgqfECdhblDmHPq8l0lmnQssz7vQHBMr3SNr3GNb0CgIdAWKN6cHsV2UutIAsZyPGZNnAdBLPSQHILhdpkQG1F+krHw2wR7V0BaJWJB1lKHbH9sdmkq8kiEw0jX18f0ul0tnheO8LMfvHmFFwLELq4nMxnz4t8a5VJKM3Gv+Tym2UB8+fdE6+z6XR+TE2HFeSAuAR4hqUdUljDQjtaUcCkUB1KQR5Bd4Dt6ogIXdmyUlY8D29a8wf/TFez7AfmOt2OLbIf+2f5QWWufOAw2cejn+VH9nRN98Xd2fEykmEnsVwUMU13vJIpM9L67Hx4O17qwUcMaOwxGhWRqGgE5kNEI8TQDGjsJxppYjtaIkZKNcaiHFGPCRHB6MEr1dr9V7Y2TUOB0HSe4GpElLXmqQF2SJqPEuHChx22Uxdtk3y4GDxTMCxei3pTNu9GdLOVq4TMDKxM9n/kmYkRV7AH69RfDDVhoo/grkw0G9U9LFp2XxxIoPpoFHok1AGN9szhwOhDwvUWjD0waDaTWKAK26DJkCcU1IOSHHbIvcGkGhl9zBapDGY6IcUf2hStXJxC6+lPwQMu86FzB+D2B7j2mb8WapQ0BykfGX5+QkQ8ujz5gMf+4tGcMKQm2/LRhrzDNh4+bj6huRFByvoL0v6jSlJiK7vpllEW+YRHp0IEDUTpV9Lcnia7YK9IZBtnv2OWaY6Rf6Tfy/ltHhlxEsqIKz5w/TZZFGPvpRy58jj02v3Y+0MxRMtvPwyfmfRcPl4HDz132ruExRPGWNAVCZQ3QSmwfBjCISeYankJdwooAKBTy6TpW2GP8klni+ez5tDKpkBQQ36AYu22wbQfUA0YORQ+qZG3qAY4+yqcGjy47FtUg4isBh86GCujaDy+vx+PIREb9pDFT1gy1Q6DGTNBLmpm4XSSmqdHSKTq0aQPT0gi5axdkfowfH1HKZzGyRNXpMXYRTU3MVMJ20Jp3MxY7kOf9B2lKi5KfYL9vqMUFmkwlJ4NhRtWfP2xmGXPfcr+/vr7Hz2ONQNkkEgm/BJsIeoGRekISWS2XHpPZmUrkY00OUaRSt74RP3czNqZGY/q4gilf7emahVM1SqoqocYOteAMe7JWyZ3x7ruA2bsIJpjV/L1VQ9OJ4Hc1EXUg0fkfRN6cKvPRVQ9YNcIomQJF/vGvcA50smCYhIwQGB+MHy4gJMdLIJG9I7Upzp58Gu18PSUqsfHMDtrxYC0C8Mg2YcKqETQsN9tH6M0n8CbMkvuKZ3KVAeSO6etyz0oN9AnvOu4cvcpj7sJvEeWu08MfxN4V3Hl7hNQ3wTeI8sdim5RWlNN1n9lst/N9bAq5cdMXnN9LR7W+T/f0vnTp8+bTSay2XIxMJoVfFAjRtT2/wkbAd0wmJSBEBL08Lppc7EjjUkQhC61seglYdywLth6rFessxPDhZU629qtfTrv3ZlU3l225qHk6993h2KwxuVfBc7KjcaKse5QnImywKFJ0wRfQm0aUwbL5xX1qndQYEUroDpg7Bys7jhmVWE/YOXYHGma1qsbbk/Fg8FK+rAhIdLEqayghBh2Eid1ahnO4bTpMU3HAchJthM9VGPZDHPefIM6xQj1wWnbvD11HQCccQttGnrT64STTOyi18ZV/NzCosjGPE/7MtVOfpZe9pa/YF3na4ud4zwOaGJvFkURvh2Qu/0ejXEclIIL2Y8uSYx7RhJxMy2DkmiW5JQaj/EkR4gT+EWVXFAazJIcz2SHJznZsuSCElmW5AjR24Y8CJJTRAAnmjElF7MJcm7lkgRLcknLkotZX4DsIVzJyZiSi1lGgOohTJI4di6q5GJWC+B6CNGu5JRP0NxRD+EeV0SVXMzcf2QP0bLkriqGUDEld1UxRFTJXVUMEVVyVxVDRJXcVcUQUSXXTsvKT2REtHVSRJKLOXiw3dD295xkUrvXg5J+rKgTxLwaSXNxrzYGJKKcnhrx7qOo/Sl7X2VzDhHqQ21NceBc1ZSQYHqvdyYd0uKYw8Bgcc7gS1+ELyjBE5gQD2XaJ9oerMuBV2KQJ5AITkYaIyAnE0zzcUsGOpi4FpNh0D4Mw1UmrmnAquvOrWsncc3gJa7pcIlrut41QVeUuNYPWDk2R+ElrqlwiWs6bplHB31DTG5B+/BZ1+kbgNuTi7HuLGLHNyR4vsEE9A31buq5Jt/QC1g5Nkfj+QYd0DfEZUJ7ndTcIbAV3ZcOPFgWtDbNU9YJG3GAWCPskAMd7MIePRCi9SgLTlzVK3JUUxcRFiogYRGXEB0MTxjDo7ENj45leMzAlV5meHRAw+MUGiLq/WYqZTpIoAibtLUDJm/UJEBnXi/I1K6Tcapeii+NVfNi2DUjssPoc2qQ/W2WjQmHCQyGPoWNvlDde+/f10OTCzfbzs180kWqb7mtGwtETxqIDMZQ93i5eJxs0kX2d9B5lY4WZkTsa/eKptlRdA5R0ihLPBer0/Xmv5frtS8EGjfPPHZDu8BRmpL2NSo8IdD17FD3RYKhs1C9iu7HUJ+iL+7gt+XT5nXyA+xqZI38bQsEMnncuE+OP/YrT7N09V9BUIHSyvMYnPCajlnREzcS6uZJQDwh9PI0ca/l+SrR0qyF5I75VJxF7BtmYhbOBZedLq4TiiG5mIVzwSVnmI6IugSBoLI9Y5OT36YB4x0cD5aP9gzQr6hYbJ2JGxWzG2VoY6nXv7+HsKfi1lR4RFeCQHR1HT8GwE/SNfwIk5sSO6P0wBywkTiwqE27DZ35FM2DJR0nEH2FsVP9ZZkFkyUKAdpSHt+dDsHnFhbU9WMJgXZPwcLPJGZV6ZN5TB/BK7cejBRYNyJk2wPqiBXe0gfbHsSsOI0mVeJIVah429UkZi1qi0iVIl4+ZFIvcQ3voF/o0haFjCgrnvc/6O/9mYcSCa0cp1s4UHIkrEm9NxwOQ5bPFuxmqqStRLUBRh8wIidgJIyj+Boo0sBkSTAYRU03G2BUxxoJcgmM7FvPSVAYQbxW2JjnOk5VtMV/GAqFIgbAoEbYi+zp9Ru/zw06hWYcIjeD3ehGiQc9dROqcEuQaWxVQAzOLarC7b5FYqvCgyu5AVVw7l78CRso8P5DHFV4ECw3qgrYQAVUhQcvc6OqgA1UQFXEbDo2Ht/fj8dY0gMaeuYt3OJRtpT0NQlDcqCnZ2zhxc3CCIw8Gll4fU3EAJEXWXjUI2rsD/IYKLyi5rgsPJ5gCC9oZ6LYyIOFFw55UGSGwTN9mX369fc/hrPys6slszWOuiF6Ckfd9RoAnU3Zsep98qLbUgJOXgy7f8Wxng5Qp79DFtBuxkM1Lb1BWltypUZEqiO0NuNqJLNlWPwR1fm9a45OfwpNzKlPwaO+KfVJPBhA2i+QfkT0wUFKkiT7lMPVesFOiyn1YAzqgrT/GBAUIBAS3fTUDZyvxA2F0CtCNsk16lWNRCmJ1HYNl2j41MwsrK492JL4lULFxnZP73GKtP1UyUX03v4mhcu2oEFZlqbNAsuXXgQp0STAeUPkUC8oQ3OB4PPXfdz8EETwko+K+3rOCJ7t0xgrsj8MXyR+FpTj6RPuI5NrLCg/1F3cty94iFvqguCjIz7yQQYLWg/RJ8THFnyo3hff0vlT9uTnzSaTBpi9NzCt1a0uEDhCTCsYNeFgIWipRwebYhvV/vproYdxN1pj7+FWrlTdD3aGSFBEAHGQakwfKAKEtIyGJA1YvTqRK+qT3ROMgYaotPWt3wnQj9TExFjcIpLI3Wa7Dh/i5q6T5iZKGsBEheWwmQ/T09t+jx2Hj4K2Qa3Bp3Y7SPDrk+LjTvTY9Xjb3aWtJCn3IdEQkT29n82r7+vzKQ6D+P8LoHkaK+jQBL9+AIzFzCfDvN1dEZW0HYFxFlF4hGBe8K6pc015bOHF7GNCCN4d74qw9oUXs10J6jXvlAJlYZGFF7MvCSE5AY8lPNW+8GJWrCA7DFd4kQ+JeMyKFWSHQRybF1t4MStWkB1G+8KLWbGC7DBchiKy8IRPINZRh9G+8K4qwoicxiSuKsKILbyrijBiC++qIozYwruqCCO28OK2OLzZ2wcZMnfPuXvzHDAf4tGPiNvF8IaRAhSNZ5olpjFUgGMeYEJMrFz1KXO3scKwsQJNiImVlo6Ubw0rigFtUC7BCljzExYr0ocbGLASBCsX+SAIK4F9kPShQgasBMHKRT4IwkpgHyR9mJ/hQu1S5hCcyNZY5wlUYRE0nJGdrGSJnEQfmZ2W9dpvXFMSfQG3SophMdiZhQ0m0XPkJHoeNIle1uuecU1J9P3AGGiIGHISPdA3AhNjQdMyeuI7IhPVsoXimK74DgWta9WxdQ36DoHsO0RY31GPtrwq39ELjIGGiCP7DhbWd8SlOw+3+FBZgQwx7CRooE5yt0tyGEpg/kru287RhhA0yu55ldvM0k1DOhgWVVQ6tf8oSOyraXNVUXJUVRfRHzQo/aHisqODGQpmhlggM8TimSE2mKGLzBALaIaAFgqYqkfoGt2TeuIOEjEM8DpugOUfsJ3uyIZdtAkVBh++PlrRpkLoGd1diHYbjUC/FX+HBsADoBlDopEFQCNEb2P0Abt/X5e6f90Nvb4+LBpzE3ShXl/7FpzotKdq5wryGuzSWcvTdLt+xGJVaKvi+seypxVds2wJZNlkcz97LJe41Os6YHt61c5t5piI7LovVAyoqrgk/yqDjJsuHjj/SrVzXXmHLVeXjRQIuUvSuEDIBU7jUhC3XnNzpKDN0R+bbK8yWWRuh5Gvi8flNNu5DDul8gkxS/i+mfkhcYQ4O6X9XejoOyUNkdkYO+PxcvE42aSL7O+g9GpaALCx4YRBtz4E2yDrvlY8SwYl5MdNqtCsv8JrPZtRx6x4Do68yOk82oNVu4FrsSV0oBP5WmztkTF5o6qIfVm89mB7blQVsS+L1zG7RymF2AxEMtr6viJu9yi8fhZS6vaFF7N7VHDkRd6UGSgK7C3yYgsvZiwVHHmRd7QmZiwVHHmxhddSHkg9Xrf3BwtSCMC9KTUypaMjVZ3V+8Ad3LWdnhuR+zUtpWkMCMpPksIh6MzcmAiqVzk4IAgRQSwggs7MjYmgoMkaQPUfwiEo8hlnd2CmOOTqLriSIoGP0MNlWpigmRYDnGrCCa4Aag4nHjnX2gRNyDgPp16q+DKLAVdrBFRx0OJCD4sxrP7mziRsv5skaK3fAI1zq7zD+4wkaC3gAI2g0AhsNRgADYykqi9ff/8f32yqxieeH3kT91uitAw6gUScUgJk8IDnlFDWG5PyOKK8mdMEgTm1ZdN0QdavimPFJT3dWbRU417SKhOgl2bCQi5aBCq0TUiYPkDionao2wSsomKbONcVhU2TLopD+oqPpA/4yFlHGQgf0NyY+HBpzFIC/NZnP6wKd73Ph7/Ia1twOpOk5Ow2leIc7TDZkbfQytunK9uwN/PqCNSfs0QjllKVi/Q65+IVKc6sCwUrbR1i+1e5SGsqLa2pMBcmAoMXBRV7b/CJjIhgVZdgDD/rFLJHdteXGkgru4vOeQtFpbshVLThiZdizAagnf6ICUAEftEGYO8PNBV1bABFMycmnDlhBIESrK3NimngvGoaKKZl6LoVsDQtSWPQ2CbAhDMBjCCQhfVB08BJ+fiLrkWaat+Sb48KPFOii5t2QqCCDagIaSvc2mzZfMcQ1cdA9GRNAllCBLI1y9PksTrJOBPHLA9LyT/S7+W3X/qB67fJohh7L1HY5XHotfux94diiJXffhg+M6lDl/e2Jvnu8mBdGGNbzIK+OFcmxMjxhegbqDMCca0DvrdDPCi+y1+h6TGUS2cd/XI3upxU4jJfAZcTWg7usQPnCucxItW9A2VNutH2KlqVxMiR3reLtVuJZk+PdHJo+9q0lSgtIq7DxAF3n2h5twNqjqJGnUCNImJEFd8/3fSslDrEbDYxD9Z5mBG0DNsBOMfNTVWlOlOpoAhY4WZUak+duJ9S9JUOARy0XNoBOLB+RaLoGT+FgSLnrC+fWDH7Y0NACC1Xd4DQMQhJyypg+SlC2Qnbs3VaESBUbMMHCMWDEJb7Og2hrfuKASEPOr9WJgeQuXE+K5gdiZT3rWU4Unz7kZJr9ymE0zvgIFchZHcwyjzWbfT7A7NXcq4UWK2KInrhil4ZSPSh2gww2slLf3evzGxAIMEXlV9npM726qkI/jB8mex9slJvAvQyMuh9OLXrA33SvuA7eU1sG4gXkQUftHdbnxAfW/Bxe77fbAsNqbJgjB0LHiRRzUsTPjbItabGjEaC1i8P+PHDjwqHH2hqRPwwNEJkwE9z/Ohw+IGmxsQPNhvSz/67Qkg3UvcnSTD677JC8oMq3DAmtio8WJMbVUUN6hBHFUFJlJAdWoVUbUcnLCgRErRDK2lfeEHJjNjIi8wEsZjN5IMjL7bwYjaTD468yCceLGYz+eDIiyw83nbPrKEBYznmVGeIqcZtlVhyhrII1quF8bZ7bw0QK0NM1gNCDYiZM6xGQIh5RNI33ffztNovsyynZg6s9uFKhSjo4drYgBEjzhsiRgh9fjZMlCD0ChtQ0gAlChUl0GyYKBmuRmgHJRoVJdBsmCgJev2Bi5Lp/Wx+PI25b7rfR5ulSoSGipeJNZUQfmdmmWomP0sve8tfsK7zpYtPOsBoO2dzUHlQZ7UOHy7MSbZhaB00YBfkSkZ9e8wWHQHRDx24BwF33SoA2n9GVoEHjXdrKoDvSQ6mAgGRgRitsH+brP/KhAV1Ipi85mpbPKzzf76l86dPnzebTGaz5eJk54K+NQIJkQ4pZQYPiz/WxZ3tJcDs23Cj08cCO5tk/TKZfmydHAE6yzlJdpmmXsv5eTWZzjLBnljlAkctiibuSlZF67Myqa/YiAOa4Q0Wc/ZwtczX3GETksn25bflNM1f8f8=</diagram></mxfile>
|
2009.10815/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Politeness principles, displayed in practice in day-to-day language usage, play a central role in shaping human interaction. Formulations of politeness principles are related to basic human needs that are jointly met in and through interaction . Natural language offers various ways to enact politeness. One of the most influential politeness theories from linguistics is proposed in , in which a detailed exposition is offered of the individual actions whose cumulative effect results in saving face and losing face, along with a consideration of cost. Using this framework, it is possible to analyze how interlocutors make decisions about where and how these devices should be used based on an intricate cost-benefit analysis . We refer to these component actions here as face acts.
|
| 4 |
+
|
| 5 |
+
The idea of face acts appears quite attractive from a computational standpoint for their potential role in understanding what is ``meant'' from what is ``said'' .
|
| 6 |
+
Consequently, politeness has been widely researched in various domains of language technologies in addition to foundational work in pragmatics and sociolinguistics . However, much prior work modeling politeness reduces the problem to a rating task or binary prediction task, separating polite and impolite behavior, with the result that what is learned by the models is mainly overt markers of politeness or rudeness, rather than the underlying indirect strategies for achieving politeness or rudeness through raising or attacking face, even in the indirect case where no overt markers of rudeness or politeness might be explicitly displayed.
|
| 7 |
+
|
| 8 |
+
In contrast, the main contribution of this work is the investigation of eight major face acts, similar to dialogue acts, including an investigation of their usage in a publicly available corpus of . In the selected corpus, a persuader (ER) is tasked with convincing a persuadee (EE) to donate money to a charity. The nature of the task prompts frequent utilization of face acts in interaction, and thus these face acts are abundantly present in the chosen dataset. We also provide a generalized framework for operationalizing face acts in conversations as well as design an annotation scheme to instantiate these face acts in the context of persuasion conversations (\S, \S). We offer the annotations we have added to this public dataset as another contribution of this work (\S).
|
| 9 |
+
Additionally, we develop computational models to identify face acts (\S) as well as construct a latent representation of conversational state to analyze the impact of face acts on conversation success (\S). We achieve 0.6 F1 on classifying face acts (\S), and 0.67 F1 in predicting donation outcome (\S). We observe that the predicted face acts significantly impact the local probability of donation (\S)\footnote{We include our annotation framework in Appendix and the annotated dataset and code is publicly available at https://github.com/ShoRit/face-acts}.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
Face, based on the politeness theory of , reflects the `public self-image' that every rational adult member of society claims for himself. It can be subdivided into positive face, referring to one's want to be accepted or valued by society, and negative face, referring to one's right to freedom of action and freedom from imposition.
|
| 14 |
+
|
| 15 |
+
We refer to `face acts' as utterances/speech acts that alter the positive and/or the negative face of the participants in a conversation. We hereby refer to the acts that attack one's face as Face Threatening Acts (FTA) and those acts that raise one's face as Face Saving Acts (FSA). For example, criticizing an individual is an attack on the other's positive face, whereas refusing to comply with someone's wishes, raises one's own negative face.
|
| 16 |
+
We also note that a single utterance or act can simultaneously affect the face of one or both participants in a conversation. For example, a refusal to donate to a charity because they do not trust the charity involves asserting one's negative face as well as decreasing the charity's positive face.
|
| 17 |
+
|
| 18 |
+
The implication of a face act between the participants is governed by several factors such as `power' and relative `social distance', as well as the relative threat (`ranking') of the face act . For example, refusing to comply with the orders of one's superior is more threatening than requesting a friend for some change.
|
| 19 |
+
|
| 20 |
+
Moreover, face acts need to be contextualized for a particular situation based on the rights and obligations of the individual participants, such as in compliance-gaining episodes . For example, a teacher has the responsibility and right to assign homework to the students. Such an action cannot be perceived as an attack on negative face, even though the student is reluctant to do so.
|
| 21 |
+
|
| 22 |
+
Based on the definition of face and face acts, we design a generalized annotation framework to capture the face dynamics in conversation. We instantiate our framework on a publicly-available corpus on persuasion dialogues.
|
| 23 |
+
|
| 24 |
+
We use the pre-existing persuasion corpus of . Each conversation comprises a series of exchanges where the persuader (ER) has to convince the persuadee (EE) to donate a part of their task earnings to the charity, Save the Children. This selected corpus is well-situated for our task since each conversation is guaranteed to have a potential face threat (i.e., a request for money) and hence, we can expect face act exchanges between the two participants. It also sets itself apart from other goal-oriented conversations such as restaurant reservations and cab booking since in those cases the hearer is obligated to address what might otherwise come across as a FTA (request/ booking), and thus in those cases non-compliance can be assumed to be due to logistic issues rather than an unwillingness to co-operate.
|
| 25 |
+
|
| 26 |
+
In the selected corpus, the participants are Amazon Mechanical Turk workers who are anonymous to each other, which controls for the `social distance' variable. Moreover, the participants have similar `power', with one role having some appearance of authority in that it represents an organization, but the other role representing possession of some desired object (i.e., money). Thus, we argue that although ER imposes an FTA by asking for donation, EE is equally at liberty to refuse. Moreover, ER does not incur a penalty for failing to persuade. In fact the corpus includes some conversations that do not talk about donation at all. We also emphasize that the task was set up in a manner such that EE come into the interaction blind to the fact that ER have been tasked with asking them to donate.
|
| 27 |
+
|
| 28 |
+
We assess the success of a conversation based on whether EE agrees to donate to the charity. We label successful conversations as donor conversations and non-donor conversation otherwise. We refer the reader to for more details about the dataset.
|
| 29 |
+
|
| 30 |
+
[h]
|
| 31 |
+
\small
|
| 32 |
+
\centering
|
| 33 |
+
|
| 34 |
+
{p{0.07\textwidth} p{0.85\textwidth}}
|
| 35 |
+
\toprule
|
| 36 |
+
Face Act & Description \\ \midrule
|
| 37 |
+
SPos+ & (i) S posit that they are virtuous in some aspects or they are good. \\
|
| 38 |
+
|
| 39 |
+
&(ii) S \textbf{compliment the brand or item {they represent or endorse}} and thus project their credibility.\\
|
| 40 |
+
&(iii) S state their preference or want, something that they like or value.\\ \hline
|
| 41 |
+
|
| 42 |
+
SPos-& (i) S confess or apologize for being unable to do something that is expected of them. \\
|
| 43 |
+
& (ii) S criticise or humiliate themselves. They damage their reputation or values by either saying they are not so virtuous or criticizes some aspect of the brand/item they endorse or support.\\ \hline
|
| 44 |
+
|
| 45 |
+
HPos+& (i) S compliment H either for H's virtues, efforts, likes or desires. It also extends to S acknowledging the efforts of H and showing support for H.\\
|
| 46 |
+
& (ii) S can also provide an implicit compliment to incentivize H to do something good.\\
|
| 47 |
+
& (iii) S empathize / sympathize or in general agree with H.\\
|
| 48 |
+
& (iv) S is willing to do the FTA as imposed by H (implying that the FTA is agreeable to S.)\\ \hline
|
| 49 |
+
HPos-&(i) S voice doubts or criticize H or the product/brand that H endorses.\\
|
| 50 |
+
& (ii) S disagree with H over some stance, basically contradicting their viewpoint. \\
|
| 51 |
+
&(iii) S is either unaware or indifferent to H's wants or preferences. \\ \midrule \midrule
|
| 52 |
+
SNeg+&
|
| 53 |
+
(i) S reject or are unwilling to do the FTA. Stating the reason does not change the circumstances of non-compliance but sometimes helps to mitigate the face act.\\ \hline
|
| 54 |
+
SNeg-& (i) S offer to assist H.\\ \hline
|
| 55 |
+
HNeg+ &
|
| 56 |
+
(i) S seek to decrease the imposition of the FTA on H by either decreasing the inconvenience such as providing alternate, simpler ways to carry out the FTA or decrease the threat associated with the FTA. \\
|
| 57 |
+
& (ii) S apologize for the FTA to show that S understood the inconvenience of imposing the request but they have to request nevertheless. \\ \hline
|
| 58 |
+
HNeg-&
|
| 59 |
+
(i) S impose an FTA on the H. The FTA is some act which H would not have done on their own. \\
|
| 60 |
+
&(ii) S increase the threat or ranking of the FTA \\
|
| 61 |
+
&(iii) S ask/request H for assistance?\\
|
| 62 |
+
\bottomrule
|
| 63 |
+
|
| 64 |
+
\caption{Generalized framework for situating and operationalizing face acts in conversations. The predicates for each of the face act are highlighted in bold.}
|
| 65 |
+
\vspace{-0.3cm}
|
| 66 |
+
|
| 67 |
+
In a two-party conversation, a face act can either raise (+) or attack (-) the positive face (Pos) or negative face (Neg) of either the speaker (S) or the hearer (H), leading to 8 possible different outcomes. For example, HPos+ means raising the positive face of the hearer. We provide a generalized framework in Table for labelling a speech act / utterance with one or more face acts, building upon the politeness theory of . The framework is designed to be explicit enough to ensure the creation of a reliable coding manual for classifying face-acts, as opposed to the simple classification of requests and other directives as intrinsic FTAs . Moreover, since we also seek to operationalize FSA, we make some departure from the original classification of directives. For example, we feel that compliments directed at the hearer, should be HPos+ rather than HNeg- (as observed in )
|
| 68 |
+
since an appreciation for someone’s efforts is more desirable.
|
| 69 |
+
|
| 70 |
+
We highlight the predicates that result in a particular face act in bold in Table . For example, S claiming to be virtuous or doing some good deed amounts to raising their own positive face (SPos+). Although the framework is designed to be generalizable across domains, the predicates themselves need to be instantiated based on the domain of choice. For example, in this particular corpus, the act of requesting someone for donation counts as a FTA. We refer the readers to Table in Appendix which outlines how the face acts are instantiated for the specific persuasion dataset.
|
| 71 |
+
|
| 72 |
+
Each conversation in the dataset consists of $10$ or more turns per participant with one or more utterances per turn. Each utterance is labeled with one or more face acts according to our annotation framework, or `Other' if no face act can be identified, or if the utterance contains no task-specific information (Eg: Small talk). We consider ER to be a representative of the charity since ER advocates for donations on their behalf. We show the flowchart detailing the annotation framework in Figure of Appendix .
|
| 73 |
+
|
| 74 |
+
\noindent{Validating the annotation scheme:} Two authors of the paper annotated $296$ conversations in total.
|
| 75 |
+
The annotation scheme underwent five revisions, each time with three different conversations, eventually yielding a high Cohen's Kappa score of $0.85$ across all face acts .
|
| 76 |
+
The revised scheme was then used to annotate the remaining conversations.
|
| 77 |
+
We show an annotated conversation snippet in \Tref{Table: Annotated Conversation Snippet}.
|
| 78 |
+
|
| 79 |
+
Our annotated dataset comprises $231$ donor conversations and $65$ non-donor conversations. \Tref{Table: Dataset Stats} shows the distributions of different face acts employed by ER and EE respectively for both donor and non-donor conversations. We also note that certain face-acts do not occur in our corpus, such as SPos- for ER, presumably because ER does not have a reason to debase themselves or the charity they endorse. We provide a detailed explanation of the occurrence of such acts in the supplementary section.
|
| 80 |
+
We observe multiple statistically significant differences in face act prevalence based on whether EE is a donor or non-donor.
|
| 81 |
+
Some findings are intuitive, such as an increase in HPos+ for Donor conversations (for both ER and EE). We argue that EE had acknowledged the efforts of the charity and was willing to donate, and was thus rewarded with compliments from ER. Likewise, SNeg+ occurs significantly more in Non-donor situations, due to a refusal to comply. We note that a majority of the turns labeled `Other' involve greetings or conversation exchanges unrelated to the main business of the conversations.
|
| 82 |
+
|
| 83 |
+
[!htbp]
|
| 84 |
+
\centering
|
| 85 |
+
\small
|
| 86 |
+
{lllll}
|
| 87 |
+
\toprule
|
| 88 |
+
Face Acts & \multicolumn{2}{c}{ER} & \multicolumn{2}{c}{EE}\\
|
| 89 |
+
\midrule
|
| 90 |
+
{} & \multicolumn{1}{l}{{ }{ }{ }D} & \multicolumn{1}{l}{{ }{ }{ }N} & \multicolumn{1}{l}{{ }{ }{ }D} & \multicolumn{1}{l}{{ }{ }{ }N}\\
|
| 91 |
+
SPos+ & 19.95& 23.03 & { }8.29& { }6.51\\
|
| 92 |
+
SPos- & { }0.00 & { }0.00 & { }0.18& $ { }{ }{ }0.96^{*}$\\
|
| 93 |
+
HPos+ & $23.08^{***}$ & 16.24& $36.17^{***}$& 21.07\\
|
| 94 |
+
HPos- & { }0.70 & { }$2.65^{*}$ & { }4.37 & $10.73^{**}$ \\
|
| 95 |
+
SNeg+ & { }0.00 & { }0.00 & { }3.85 & $11.97^{***}$\\
|
| 96 |
+
|
| 97 |
+
HNeg+ & { }5.50& { }4.81& { }0.00 & { }0.00\\
|
| 98 |
+
HNeg- &10.47& 10.85 & { }9.20 & 13.03\\
|
| 99 |
+
Other & 40.31& 42.42 & 37.94 & 35.73\\
|
| 100 |
+
\bottomrule
|
| 101 |
+
|
| 102 |
+
\caption{Distribution of different face acts for the donor (D) and non-donor (N) for ER and EE. *, **, and *** signify that the specific act is statistically significant for D and N according to the independent t-test with p-values $\leq$ $0.05$, $0.01$, and $0.001$ respectively.}
|
| 103 |
+
|
| 104 |
+
\vspace{-0.5cm}
|
| 105 |
+
|
| 106 |
+
[h]
|
| 107 |
+
\centering
|
| 108 |
+
\includegraphics[scale =0.22]{Figures/Face_Acts_new.png}
|
| 109 |
+
\caption{Overview of \method{}. We first encode the utterances by passing the BERT representations of the token through a BiGRU layer followed by Self Attention. The BERT, BiGRU and Self-Attention outputs are then fused to get the final token representation before max pooling. This utterance representation is passed through a uni-directional GRU followed by Masked-Self-Attention and fusion. One part of the model uses the face classifier to predict the face-act of each utterance while the other model uses another layer of Masked-Self-Attention to predict the donation probability. The details are in Section }
|
| 110 |
+
|
| 111 |
+
\vspace{-0.6cm}
|
| 112 |
+
|
| 113 |
+
We model the task of computationally operationalizing face acts as a dialogue act classification task. Given an dialogue with $n$ utterances, $D = [u_{1},u_{2}, ..., u_{n}]$, we assign labels $y_{1}, y_{2} ... y_{n}$ where $y_{i} \in Y$ represents one of 8 possible face acts or `Other'. Although, we acknowledge that an utterance can have multiple face acts, we observe that multi-labeled utterances comprise only $2\%$ of our dataset, and thus adopt the simplification of predicting a single face-act for each utterance\footnote{For each utterance with multiple labels, one is randomly select from that set to be treated as the Gold label.}. Several tasks in the dialogue domain, such as emotion recognition , dialogue act prediction and open domain chit-chat , have achieved state-of-the-art results using a hierarchical sequence labelling framework. Consequently, we also adopt a modified hierarchical neural network architecture of that leverages both the contextualized utterance embedding and the previous conversational context for classification. We hereby adopt this as the foundation architecture for our work and refer to our instantiation of the architecture as \method{}.
|
| 114 |
+
|
| 115 |
+
\noindent{\textbf{Architecture of \method{}:}} An utterance $u_{j}$ is composed of tokens $[w_0, w_1, ..., w_K]$, which are represented by their corresponding embeddings $[e(w_0), e(w_1),..., e(w_K)]$. In \method{}, we obtain these using a pre-trained BERT model .
|
| 116 |
+
|
| 117 |
+
We pass these contextualized word representations through a BiGRU to obtain the forward $\overrightarrow{h_{k}}$ and backward $\overleftarrow{h_{k}}$ hidden states of each word, before passing them into a Self-Attention layer. This gives us corresponding attention outputs, $\overrightarrow{ah_{k}}$ and $\overleftarrow{ah_{k}}$. Finally, we concatenate the contextualized word embedding with the GRU hidden states and Attention outputs in our fusion layer to obtain the final representation of the word. We perform max-pooling over the fused word embeddings to obtain the $j^{th}$ utterance embedding, $e(u_j)$. Formally,
|
| 118 |
+
|
| 119 |
+
\overrightarrow{h_{k}} =& \operatorname{GRU}\left(e\left(w_{k}\right), \overrightarrow{h_{k-1}}\right) \\
|
| 120 |
+
\overleftarrow{h_{k}} =&\operatorname{GRU}\left(e\left(w_{k}\right), \overleftarrow{h_{k+1}}\right)\\
|
| 121 |
+
\overrightarrow{ah_{k}} =&\operatorname{SelfAttention}(\overrightarrow{h_{k}})\\
|
| 122 |
+
\overleftarrow{ah_{k}} =& \operatorname{SelfAttention}(\overleftarrow{h_{k}})\\
|
| 123 |
+
e_c(w_k) = \operatorname{tanh}(&W_w[\overrightarrow{ah_{k}}; \overrightarrow{h_{k}}; e(w_k); \overleftarrow{h_{k}}; \overleftarrow{ah_{k}}] + b_w)
|
| 124 |
+
|
| 125 |
+
\vspace{-5mm}
|
| 126 |
+
|
| 127 |
+
e(u_j) = \operatorname{max}(e_c(w_1), e_c(w_2),... e_c(w_K))
|
| 128 |
+
|
| 129 |
+
Similarly, we calculate the contextualized representation of an utterance $e_c(u_j)$ using the conversation context.
|
| 130 |
+
In departure from , we pass $e(u_{j})$ through a uni-directional GRU that yields
|
| 131 |
+
the forward hidden state $\overrightarrow{H_{j}}$. Masked Self-Attention over the previous hidden states, yields $\overrightarrow{AH_{j}}$. We fuse $e(u_j)$, $\overrightarrow{H_{j}}$ and $\overrightarrow{AH_{j}}$ before passing it through a linear layer with tanh activation to obtain $e_c(u_j)$. This ensures that current $e_c(u_j)$ is not influenced by future utterances, enabling us to observe change in donation probability over time in Section
|
| 132 |
+
|
| 133 |
+
\overrightarrow{H_{j}} =& \operatorname{GRU}\left(e\left(u_{j}\right), \overrightarrow{H_{j-1}}\right)\\
|
| 134 |
+
\overrightarrow{AH_{j}} =& \operatorname{MaskSelfAttention}(\overrightarrow{H_{j}})
|
| 135 |
+
|
| 136 |
+
\vspace{-0.2cm}
|
| 137 |
+
|
| 138 |
+
e_c(u_j) = \operatorname{tanh}(W_u[\overrightarrow{AH_{j}}; \overrightarrow{H_{j}}; e(u_j)] + b_u)
|
| 139 |
+
|
| 140 |
+
We explore different hierarchical architecture variants which differ in the creation of contexualized embeddings $e_c(w_k)$ and $e_c(u_j)$ in Equation and . (1) \method includes only the final hidden state $\overrightarrow{H_{j}}$; (2) \method{}-f additionally employs the utterance embedding $e_(u_j)$; and (3) \method{}-sf, which also includes the attention vector $\overrightarrow{AH_{j}}$.
|
| 141 |
+
|
| 142 |
+
We feed the final contextualized utterance embedding $e_c(u_j)$ through a FC layer with dropout and project it onto the state space of face-acts. We then apply softmax to obtain a probability distribution over the face-acts, with negative logarithmic loss as the loss function. Given the true labels $y$ and the predicted labels $y'$, the loss is computed for all $n$ utterances in a conversation as:
|
| 143 |
+
|
| 144 |
+
\vspace{-0.3cm}
|
| 145 |
+
|
| 146 |
+
L_{f} = -\sum_{i=1}^{n} \sum_{y_j \epsilon Y}y_jlog(y'_{j})
|
| 147 |
+
|
| 148 |
+
\vspace{-0.3cm}
|
| 149 |
+
|
| 150 |
+
Donation Outcome Prediction: notes that the exchange of face acts contributes towards an evolving conversational state. We seek to view the evolving state representation within our sequence model and analyze its impact on conversation success. The best reflection of what the evolving conversational state accomplishes in the context of persuasion is whether a donation occurs or not. We thus add the prediction task as supervision and interpret the resulting conversation state based on how the probability of donation changes. We accomplish the supervision by incorporating another loss, called donation loss, in addition to the loss obtained for face acts.
|
| 151 |
+
|
| 152 |
+
For each utterance $u_{j}$, we apply masked self attention over the set of contextual utterance embeddings $e_c(u_j)$ till the $j^{th}$ utterance and project it through a linear layer with tanh activation to obtain the donation score $don_{j}$. The tanh non-linearity ensures that the donation score remains between -1 and 1 and intuitively denotes the delta change in scores from the previous step. We finally compute the probability of donation $o'_{j}$ at the $j^{th}$ step, by applying sigmoid activation over the sum of probability at the previous step and the delta change $don_j$. This ensures that the $o'^{th}_{j}$ probability is restricted between 0 and 1. We obtain the donation loss $L_{d}$ similar to by taking the mean squared error of the donation probability at the last step $o'_{n}$ and the actual donation outcome $o_{n}$. $o_{n}$ is 1 if successful, otherwise 0. We also experiment with Binary Cross Entropy loss and obtain similar results.
|
| 153 |
+
|
| 154 |
+
e_d(u_j) =& \operatorname{MaskSelfAttention}(e_c(u_j))\\
|
| 155 |
+
don_j =& \operatorname{tanh}(W_d[e_d(u_j)]+ b_d)\\
|
| 156 |
+
o'_j =& \sigma(o'_{j-1}+don_j)\\
|
| 157 |
+
L_{d} =& (o'_{n}- o_n)^{2}
|
| 158 |
+
|
| 159 |
+
The donation loss is combined with the original face-act loss in a weighted fashion using some hyperparameter $\alpha$, such that $\alpha$ $\epsilon$ $[0,1]$.
|
| 160 |
+
|
| 161 |
+
\vspace{-0.1cm}
|
| 162 |
+
L_{tot} = \alpha L_{f} + (1-\alpha)L_{d}
|
| 163 |
+
\vspace{-0.1cm}
|
| 164 |
+
|
| 165 |
+
\noindent Correlating face acts with donation outcome:
|
| 166 |
+
The aforementioned formulation enables us to obtain the donation probability at any given step and assess the impact of difference in conversational state (due to a specific face act) on the local assessment of the probability of donation. To quantify the impact, we perform linear regression with the donation probability at each time step ($y_{i}$) as the dependent variable. The independent variables includes the predicted face acts for that step $(f_{i}^{k})$ and the donation probability at the previous step $y_{i-1}$.
|
| 167 |
+
|
| 168 |
+
y_{i} = \beta_{0}*y_{i-1} + \sum_{f^{k}}\beta_{k}*f_{i}^{k}
|
| 169 |
+
|
| 170 |
+
Here, $\beta_{k}$ represents the coefficient of the corresponding face-act and $\beta_{0}$ the coefficient for $y_{i-1}$.
|
2101.09429/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-05-08T18:53:34.835Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" etag="CAjK4a78EHx0jTDWkc6I" version="13.0.9" type="device"><diagram id="nanmpDWxzCEqGYSDXc3q" name="Page-1">5VlNc5swEP01ProDCGJyTJy0vXSmM55pk95UtAZNBXJlEeP8+gojvgSu7TY2CZnxgX36QHpvd7XCEzSPs08Cr6IvnACbOBbJJuhu4ji26ziT/GeRbYHMXK8AQkGJ7lQDC/oMGrQ0mlIC61ZHyTmTdNUGA54kEMgWhoXgm3a3JWftt65wCB1gEWDWRb9TIqMC9T2rxj8DDaPyzbalW2JcdtbAOsKEbxoQup+gueBcFk9xNgeWk1fyUoz7uKe1WpiARB4zYOU+kfTh2093enONF8/O9Ee6nupZnjBL9Yb1YuW2ZEDNoshWxu0mohIWKxzkLRult8IiGTNl2epxSRmbc8aFshOe5CP07CAkZHuXbVdkKC8CHoMUW9VFD3Bmmj/tQBWfm1qOivSoIcWVxrD2gLCauiZJPWieTuDM6XAWCCBU5rQFXECHQbV52aZqLQX/BQZZPfxhRsNEmYEiCxR+m1NJlXfe6IaYEsL2aSN4mhDIt2K9kBa+oYXb1cLvkcI5lxTo1buvfd2mDA3uvm6HM5WzafI7hSTYjth3HUOIHt91L+m7/hG+m5Cb/AyrSW3ooPYttg9N47Fp3GWau8LatpgE0jn1DB7VMYtFCPLQ2dHlu8Gn18NniQlgWNKn9jL6SNZv+MppIms5XcdryemaOq15KgLQo5rHojmRZ/iFb0xU8NCZaKd5te1/d4PrjhsoqacEljhlcrzhiIy86Pbkxdklw7EsOFvxeIXjfPeh3G3amrn+iBUxDndkD5wg7WOK06MypHVKhoSMyt2gD562Hhst9aDcOD2rFmnpcEnzyrOvZUQveqPZ1+5WQ1XUs13U59i4A9+sjKoSfrDA995j4Nv+kZGPhox8299zbv935JsXnnNH/tVLOdlQ9fegfoDME+DN+kH3Hjb64tv2Dhffl8347/QWVH3leS23oHLiv92CrPHqYQYGGroUcnruQLuKdMQimF+ZzyiCMus/YIoTpf4bC93/AQ==</diagram></mxfile>
|
2101.09429/main_diagram/main_diagram.pdf
ADDED
|
Binary file (10.9 kB). View file
|
|
|
2101.09429/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Research interests in XAI are re-emerging. The earlier works such as [@chandrasekaran1989explaining], [@swartout1993explanation], and [@swartout1985rule] focused primarily on explaining the decision process of knowledge-based systems and expert systems. The primary reason behind the renewed interest in XAI research has stemmed from the recent advancements in AI, its application to a wide range of areas, the concerns over unethical use, lack of transparency, and undesired biases in the models. In addition, recent laws by different governments are necessitating more research in XAI. According to [@samek2017explainable] and [@fernandez2019evolutionary], XAI encompasses Machine Learning (ML) or AI systems for demystifying black models internals (e.g., what the models have learned) and/or for explaining individual predictions.
|
| 4 |
+
|
| 5 |
+
In 2019, Mueller et al. presents a comprehensive review of the approaches taken by a number of types of "explanation systems" and characterizes those into three generations: (1) first-generation systems, (2) second generation systems, and (3) third generation systems[@mueller2019explanation]. The first generation systems attempt to clearly express the internal working process of the system by embedding expert knowledge in rules often elicited directly from experts (e.g., via transforming rules into natural language expressions). The second generations systems can be regarded as the human-computer system designed around human knowledge and reasoning capacities to provide cognitive support. For instance, arranging the interface in such a way that complements the knowledge that the user is lacking. Similar to the first generation systems, the third generation systems also attempt to clarify the inner workings of the systems. But this time, these systems are mostly "black box" (e.g., deep nets, ensemble approaches). In addition, nowadays, researchers are using advanced computer technologies in data visualizations, animation, and video, that have a strong potential to drive the XAI research further. Many new ideas have been proposed for generating explainable decisions from the need of primarily accountable, fair, and trust-able systems and decisions.
|
| 6 |
+
|
| 7 |
+
There has been some previous work [@molnar2019quantifying] that mentions three notions for quantification of explainability. Two out of three notions involve experimental studies with humans (e.g., domain expert or a layperson, that mainly investigate whether a human can predict the outcome of the model) [@dhurandhar2017tip], [@friedler2019assessing], [@huysmans2011empirical], [@poursabzi2018manipulating], [@zhou2018measuring]. The third notion (proxy tasks) does not involve a human, and instead uses known truths as a metric (e.g., the less the depth of the decision tree, the more explainable the model).
|
| 8 |
+
|
| 9 |
+
Some mentionable reviews on XAI are listed in Table [1](#tab:survey){reference-type="ref" reference="tab:survey"}. However, while these works provide analysis from one or more of the mentioned perspectives, a comprehensive review considering all of the mentioned important perspectives, using a mutual test case, is still missing. Therefore, we attempt to provide an overview using a demonstration of a mutual test case or task, and then analyze the various approaches from multiple perspectives, with some future directions of research towards responsible or human-centered AI.
|
| 10 |
+
|
| 11 |
+
The mutual test case or task that we use in this paper to demonstrate and evaluate the XAI methods is *credit default prediction*. This mutual test case enables a better understanding of the comparative advantages of different XAI approaches. We predict whether a customer is going to default on a mortgage payment (i.e., unable to pay monthly payment) in the near future or not, and explain the decision using different XAI methods in a human-friendly way. We use the popular Freddie Mac [@freddie_mac] dataset for the experiments. Table [\[tab:dataset\]](#tab:dataset){reference-type="ref" reference="tab:dataset"} lists some important features and their descriptions. The description of features are taken from the data set's [@freddie_mac] user guide.
|
| 12 |
+
|
| 13 |
+
::: table*
|
| 14 |
+
**Feature** **Description**
|
| 15 |
+
------------------------------ ----------------------------------------------------------------------------------------
|
| 16 |
+
creditScore A number in between 300 and 850 that indicates the creditworthiness of the borrowers.
|
| 17 |
+
originalUPB Unpaid principle balance on the note date.
|
| 18 |
+
originalInterestRate Original interest rate as indicated by the mortgage note.
|
| 19 |
+
currentLoanDelinquencyStatus Indicates the number of days the borrower is delinquent.
|
| 20 |
+
numberOfBorrower Number of borrower who are obligated to repay the loan.
|
| 21 |
+
currentInterestRate Active interest rate on the note.
|
| 22 |
+
originalCombinedLoanToValue Ratio of all mortgage loans and apprised price of mortgaged property on the note date.
|
| 23 |
+
currentActualUPB Unpaid principle balance as of latest month of payment.
|
| 24 |
+
defaulted Whether the customer was default on payment (1) or not (0.)
|
| 25 |
+
:::
|
| 26 |
+
|
| 27 |
+
We use well-known programming language R's package "iml" [@iml] for producing the results for the XAI methods described in this review.
|
| 28 |
+
|
| 29 |
+
# Method
|
| 30 |
+
|
| 31 |
+
This section summarizes different explainability methods with their pros, cons, challenges, and competitive advantages primarily based on two recent comprehensive surveys: [@molnar2018interpretable] and [@doshi2017towards]. We then enhance the previous surveys with a multi-perspective analysis, recent research progresses, and future research directions.[@doshi2017towards] broadly categorize methods for explanations into three kinds: Intrinsically Interpretable Methods, Model Agnostic Methods, and Example-Based Explanations.
|
| 32 |
+
|
| 33 |
+
The convenient way to achieve explainable results is to stick with intrinsically interpretable models such as Linear Regression, Logistic Regression, and Decision Trees by avoiding the use of "black box'' models. However, usually, this natural explainability comes with a cost in performance.
|
| 34 |
+
|
| 35 |
+
In a **Linear Regression**, the predicted target consists of the weighted sum of input features. So the weight or coefficient of the linear equation can be used as a medium of explaining prediction when the number of features is small. $$\begin{equation}
|
| 36 |
+
\label{eq:lr}
|
| 37 |
+
y = b\textsubscript{0} + b\textsubscript{1}*x\textsubscript{1} + ... + b\textsubscript{n}*x\textsubscript{n} + \epsilon
|
| 38 |
+
\end{equation}$$ In Formula [\[eq:lr\]](#eq:lr){reference-type="ref" reference="eq:lr"}, y is the target (e.g., chances of credit default), b~0~ is a constant value known as the intercept (e.g., .33), b~i~ is the learned feature's weight or coefficient (e.g., .33) for the corresponding feature x~i~ (e.g., credit score), and $\epsilon$ is a constant error term (e.g., .0001). Linear regression comes with an interpretable linear relationship among features. However, in cases where there are multiple correlated features, the distinct feature influence becomes indeterminable as the individual influences in prediction are not additive to the overall prediction anymore.
|
| 39 |
+
|
| 40 |
+
**Logistic Regression** is an extension of Linear Regression to the classification problems. It models the probabilities for classification tasks. The interpretation of Logistic Regression is different from Linear Regression as it gives a probability between 0 and 1, where the weight might not exactly represent the linear relationship with the predicted probability. However, the weight provides an indication of the direction of influence (negative or positive) and a factor of influence between classes, although it is not additive to the overall prediction.
|
| 41 |
+
|
| 42 |
+
**Decision Tree-based models** split the data multiple times based on a cutoff threshold at each node until it reaches a leaf node. Unlike Logistic and Linear Regression, it works even when the relationship between input and output is non-linear, and even when the features interact with one another (i.e., a correlation among features). In a Decision Tree, a path from the root node (i.e., starting node) (e.g., credit score in Figure [1](#fig:dt){reference-type="ref" reference="fig:dt"}) to a leaf node (e.g., default) tells how the decision (the leaf node) took place. Usually, the nodes in the upper-level of the tree have higher importance than lower-level nodes. Also, the less the number of levels (i.e., height) a tree has, the higher the level of explainability the tree possesses. In addition, the cutoff point of a node in the Decision Trees provides counterfactual information. In Figure [1](#fig:dt){reference-type="ref" reference="fig:dt"}, if the credit score is greater than the cutoff point 748, then the customer is predicted as non-default. Also, tree-based explanations are contrastive, i.e., a "what if" analysis provides the relevant alternative path to reach a leaf node. According to the tree in Figure [1](#fig:dt){reference-type="ref" reference="fig:dt"}, there are two separate paths (credit score $\rightarrow$ delinquency $\rightarrow$ non-default; and credit score $\rightarrow$ non-default) that lead to a non-default classification.
|
| 43 |
+
|
| 44 |
+
However, tree-based explanations cannot express the linear relationship between input features and output. It also lacks smoothness; slight changes in input can have a big impact on the predicted output. Also, there can be multiple different trees for the same problem. Usually, the more the nodes or depth of the tree, the more challenging it is to interpret the tree.
|
| 45 |
+
|
| 46 |
+
<figure id="fig:dt" data-latex-placement="h!">
|
| 47 |
+
<img src="media/dtree.png" />
|
| 48 |
+
<figcaption>Decision Trees</figcaption>
|
| 49 |
+
</figure>
|
| 50 |
+
|
| 51 |
+
**Decision Rules** (simple IF-THEN-ELSE conditions) are also an inherent explanation model. For instance, "IF credit score is less than or equal to 748 AND if the customer is delinquent on payment for more than zero days (condition), THEN the customer will default on payment (prediction)". Although IF-THEN rules are straightforward to interpret, it is mostly limited to classification problems (i.e., does not support a regression problem), and inadequate in describing linear relationships. In addition, the **RuleFit** algorithm [@friedman2008predictive] has an inherent interpretation to some extent as it learns sparse linear models that can detect the interaction effects in the form of decision rules. Decision rules consist of the combination of split decisions from each of the decision paths. However, besides the original features, it also learns some new features to capture the interaction effects of original features. Usually, interpretability degrades with an increasing number of features.
|
| 52 |
+
|
| 53 |
+
Other interpretable models include the extension of linear models such as **Generalized Linear Models (GLMs)** and **Generalized Additive Models (GAMs)**; they help to deal with some of the assumptions of linear models (e.g., the target outcome y and given features follow a Gaussian Distribution; and no interaction among features). However, these extensions make models more complex (i.e., added interactions) as well as less interpretable. In addition, a **Naïve Bayes Classifier** based on Bayes Theorem, where the probability of classes for each of the features is calculated independently (assuming strong feature independence), and **K-Nearest Neighbors**, which uses nearest neighbors of a data point for prediction (regression or classification), also fall under intrinsically interpretable models.
|
| 54 |
+
|
| 55 |
+
Model-agnostic methods separate explanation from a machine learning model, allowing the explanation method to be compatible with a variety of models. This separation has some clear advantages such as (1) the interpretation method can work with multiple ML models, (2) provides different forms of explainability (e.g., visualization of feature importance, linear formula) for a particular model, and (3) allows for a flexible representation. Some of the model-agnostic interpretation methods include Partial Dependence Plot (PDP), Individual Conditional Expectation (ICE), Accumulation Local Effects (ALE) Plot, Feature Interaction, Feature Importance, Global Surrogate, Local Surrogate (LIME), and Shapley Values (SHAP).
|
| 56 |
+
|
| 57 |
+
The partial Dependence Plot (PDP) or PD plot shows the marginal effect of one or two features (at best three features in 3-D) on the predicted outcome of an ML model [@friedman2001greedy]. It is a global method, as it shows an overall model behavior, and is capable of showing the linear or complex relationships between target and feature(s). It provides a function that depends only on the feature(s) being plotted by marginalizing over other features in such a way that includes the interactions among them. PDP provides a clear and causal interpretation by providing the changes in prediction due to changes in particular features. However, PDP assumes features under the plot are not correlated with the remaining features. In the real world, this is unusual. Furthermore, there is a practical limit of only two features that PD plot can clearly explain at a time. Also, it is a global method, as it plots the average effect (from all instances) of a feature(s) on the prediction, and not for all features on a specific instance. The PD plot in Figure [2](#fig:pdp){reference-type="ref" reference="fig:pdp"} shows the effect of credit score on prediction. Individual bar lines along the X axis represent the frequency of samples for different ranges of credit scores.
|
| 58 |
+
|
| 59 |
+
<figure id="fig:pdp" data-latex-placement="h!">
|
| 60 |
+
<img src="media/pdp.png" />
|
| 61 |
+
<figcaption>Partial Dependence Plot (PDP)</figcaption>
|
| 62 |
+
</figure>
|
| 63 |
+
|
| 64 |
+
Unlike PDP, ICE plots one line per instance showing how a feature influences the changes in prediction (See Figure [3](#fig:ice){reference-type="ref" reference="fig:ice"}. The average on all lines of an ICE plot gives a PD plot [@goldstein2017package] (i.e., the single line shown in the PD plot in Figure [2](#fig:pdp){reference-type="ref" reference="fig:pdp"}). Figure [4](#fig:pdp_ice){reference-type="ref" reference="fig:pdp_ice"}, combines both PDP and ICE together for a better interpretation.
|
| 65 |
+
|
| 66 |
+
<figure id="fig:ice" data-latex-placement="h!">
|
| 67 |
+
<img src="media/ice.png" />
|
| 68 |
+
<figcaption>Individual Conditional Expectation (ICE)</figcaption>
|
| 69 |
+
</figure>
|
| 70 |
+
|
| 71 |
+
<figure id="fig:pdp_ice" data-latex-placement="h!">
|
| 72 |
+
<img src="media/pdp-ice.png" />
|
| 73 |
+
<figcaption>PDP and ICE combined together in the same plot</figcaption>
|
| 74 |
+
</figure>
|
| 75 |
+
|
| 76 |
+
Although ICE curves are more intuitive to understand than a PD plot, it can only display one feature meaningfully at a time. In addition, it also suffers from the problem of correlated features and overcrowded lines when there are many instances.
|
| 77 |
+
|
| 78 |
+
Similar to PD plots (Figure [2](#fig:pdp){reference-type="ref" reference="fig:pdp"}, ALE plots (Figure [5](#fig:ale){reference-type="ref" reference="fig:ale"} describe how features influence the prediction on average. However, unlike PDP, ALE plot reasonably works well with correlated features and is comparatively faster. Although ALE plot is not biased to the correlated features, it is challenging to interpret the changes in prediction when features are strongly correlated and analyzed in isolation. In that case, only plots showing changes in both correlated features together make sense to understand the changes in the prediction.
|
| 79 |
+
|
| 80 |
+
<figure id="fig:ale" data-latex-placement="h!">
|
| 81 |
+
<img src="media/ale.png" />
|
| 82 |
+
<figcaption>Accumulated Local Effects (ALE) Plot</figcaption>
|
| 83 |
+
</figure>
|
| 84 |
+
|
| 85 |
+
When the features interact with one another, individual feature effects do not sum up to the total feature effects from all features combined. An H-statistic (i.e., Friedman's H-statistic) helps to detect different types of interaction, even with three or more features. The interaction strength between two features is the difference between the *partial dependence function for those two features together* and the sum of the *partial dependence functions for each feature separately*. Figure [6](#fig:feature_interaction){reference-type="ref" reference="fig:feature_interaction"} shows the interaction strength of each participating feature. For example, *current Actual UPB* has the highest level of interaction with other features, and *credit score* has the least interaction with other features. However, calculating feature interaction is computationally expensive. Furthermore, using sampling instead of the entire dataset usually shows variances from run to run. [6](#fig:feature_interaction){reference-type="ref" reference="fig:feature_interaction"},
|
| 86 |
+
|
| 87 |
+
<figure id="fig:feature_interaction" data-latex-placement="h!">
|
| 88 |
+
<img src="media/interaction.png" />
|
| 89 |
+
<figcaption>Feature interaction</figcaption>
|
| 90 |
+
</figure>
|
| 91 |
+
|
| 92 |
+
Usually, the feature importance of a feature is the increase in the prediction error of the model when we permute the values of the feature to break the true relationship between the feature and the true outcome. After shuffling the values of the feature, if errors increase, then the feature is important. [@breiman2001random] introduced the permutation-based feature importance for Random Forests; later [@fisher2018model] extended the work to a model-agnostic version. Feature importance provides a compressed and global insight into the ML model's behavior. For example, Figure [7](#fig:feature_importance){reference-type="ref" reference="fig:feature_importance"} shows the importance of each participating feature, *current Actual UPB* possess the highest feature importance, and *credit score* possess the lowest feature importance. Although feature importance takes into account both the main feature effect and interaction, this is a disadvantage as feature interaction is included in the importance of correlated features. We can see that the feature *current Actual UPB* possesses the highest feature importance (Figure [7](#fig:feature_importance){reference-type="ref" reference="fig:feature_importance"}), at the same time it also possesses the highest interaction strength [6](#fig:feature_interaction){reference-type="ref" reference="fig:feature_interaction"}. As a result, in the presence of interaction among features, the feature importance does not add up to total drop-in of performance. Besides, it is unclear whether the test set or training set should be used for feature importance, as it demonstrates variance from run to run in the shuffled dataset. It is necessary to mention that feature importance also falls under the global methods.
|
| 93 |
+
|
| 94 |
+
<figure id="fig:feature_importance" data-latex-placement="h!">
|
| 95 |
+
<img src="media/feature_importance.png" />
|
| 96 |
+
<figcaption>Feature importance</figcaption>
|
| 97 |
+
</figure>
|
| 98 |
+
|
| 99 |
+
A global surrogate model tries to approximate the overall behavior of a "black box'' model using an interpretable ML model. In other words, surrogate models try to approximate the prediction function of a black-box model using an interpretable model as correctly as possible, given the prediction is interpretable. It is also known as a meta-model, approximate model, response surface model, or emulator. We approximate the behavior of a Random Forest using CART decision trees (Figure [8](#fig:global_surrogate){reference-type="ref" reference="fig:global_surrogate"}). The original black box model could be avoided given the surrogate model demonstrates a comparable performance. Although a surrogate model comes with interpretation and flexibility (i.e., such as model agnosticism), diverse explanations for the same "black box'' such as multiple possible decision trees with different structures, is a drawback. Besides, some would argue that this is only an illusion of interpretability.
|
| 100 |
+
|
| 101 |
+
<figure id="fig:global_surrogate" data-latex-placement="h!">
|
| 102 |
+
<img src="media/global_surrogate.png" />
|
| 103 |
+
<figcaption>Global surrogate</figcaption>
|
| 104 |
+
</figure>
|
| 105 |
+
|
| 106 |
+
Unlike global surrogate, local surrogate explains individual predictions of black-box models. Local Interpretable Model-Agnostic Explanations (LIME) was proposed by [@ribeiro2016should]. Lime trains an inherently interpretable model (e.g., Decision Trees) on a new dataset made from the permutation of samples and the corresponding prediction of the black box. Although the learned model can have a good approximation of local behavior, it does not have a good global approximation. This trait is also known as local fidelity. Figure [9](#fig:lime){reference-type="ref" reference="fig:lime"} is a visualization of the output from LIME. For a random sample, the black box predicts that a customer will default on payment with a probability of 1; the local surrogate model, LIME also predict that the customer will default on the payment, however, the probability is 0.99, that is little less than the black box models prediction. LIME also shows which feature contributes to the decision making and by how much. Furthermore, LIME allows replacing the underlying "black box'' model by keeping the same local interpretable model for the explanation. In addition, LIME works for tabular data, text, and images. As LIME is an approximation model, and the local model might not cover the complete attribution due to the generalization (e.g., using shorter trees, lasso optimization), it might be unfit for cases where we legally need complete explanations of a decision. Furthermore, there is no consensus on the boundary of the neighborhood for the local model; sometimes, it provides very different explanations for two nearby data points.
|
| 107 |
+
|
| 108 |
+
<figure id="fig:lime" data-latex-placement="h!">
|
| 109 |
+
<img src="media/lime.png" />
|
| 110 |
+
<figcaption>Local Interpretable Model-Agnostic Explanations (LIME)</figcaption>
|
| 111 |
+
</figure>
|
| 112 |
+
|
| 113 |
+
Shapley is another local explanation method. In 1953, Shapley [@shapley1953value] coined the Shapley Value. It is based on coalitional game theory that helps to distribute feature importance among participating features fairly. Here the assumption is that each feature value of the instance is a player in a game, and the prediction is the overall payout that is distributed among players (i.e., features) according to their contribution to the total payout (i.e., prediction). We use Shapely values (See Figure [10](#fig:shapely){reference-type="ref" reference="fig:shapely"}) to analyze the prediction of a random forest model for the credit default prediction problem. The actual prediction for a random sample is 1.00, the average prediction from all samples in the data set is 0.53, and their difference .47 (1.00 $-$ 0.53) consists of the individual contributions from the features (e.g., *Current Actual UPB* contributes 0.36). The Shapely Value is the average contribution in prediction over all possible coalition of features, which make it computationally expensive when there is a large number of features. Unlike LIME, Shapely Value is an explanation method with a solid theory that provides full explanations. However, it also suffers from the problem of correlated features. Furthermore, the Shapely value returns a single value per feature; there is no way to make a statement about the changes in output resulting from the changes in input. One mentionable implementation of the Shapely value is in the work of [@lundberg2016unexpected] that they call SHAP.
|
| 114 |
+
|
| 115 |
+
<figure id="fig:shapely" data-latex-placement="h!">
|
| 116 |
+
<img src="media/shapely.png" />
|
| 117 |
+
<figcaption>Shapely values</figcaption>
|
| 118 |
+
</figure>
|
| 119 |
+
|
| 120 |
+
The Break Down package provides the local explanation and is loosely related to the partial dependence algorithm with an added step-wise procedure known as "Break Down" (proposed by [@staniak2018explanations]). It uses a greedy strategy to identify and remove features iteratively based on their influence on the overall average predicted response (baseline) [@Chapter179:online]. For instance, from the game theory perspective, it starts with an empty team, then adds feature values one by one based on their decreasing contribution. In each iteration, the amount of contribution from each feature depends on the features values of those are already in the team, which is considered as a drawback of this approach. However, it is faster than the Shapley value method due to the greedy approach, and for models without interactions, the results are the same [@molnar2018interpretable]. Figure [11](#fig:breakdown){reference-type="ref" reference="fig:breakdown"} is a visualization of *break down* for a random sample, showing contribution (positive or negative) from each of the participating features towards the final prediction.
|
| 121 |
+
|
| 122 |
+
<figure id="fig:breakdown" data-latex-placement="h!">
|
| 123 |
+
<img src="media/instance_level_explanation_bankruptcy.png" />
|
| 124 |
+
<figcaption>Breakdown</figcaption>
|
| 125 |
+
</figure>
|
| 126 |
+
|
| 127 |
+
Example-Based Explanation methods use particular instances from the dataset to explain the behavior of the model and the distribution of the data in a model agnostic way. It can be expressed as "X is similar to Y and Y caused Z, so the prediction says X will cause Z". According to [@molnar2018interpretable], a few explanation methods that fall under Example-Based Explanations are described as follows:
|
| 128 |
+
|
| 129 |
+
The counterfactual method indicates the required changes in the input side that will have significant changes (e.g., reverse the prediction) in the prediction/output. Counterfactual explanations can explain individual predictions. For instance, it can provide an explanation that describes causal situations such as "If A had not occurred, B would not have occurred". Although counterfactual explanations are human-friendly, it suffers from the "Rashomon effect", where each counterfactual explanation tells a different story to reach a prediction. In other words, there are multiple true explanations (counterfactual) for each instance level prediction, and the challenge is how to choose the best one. The counterfactual methods do not require access to data or models and could work with a system that does not use machine learning at all. In addition, this method does not work well for categorical variables with many values. For instance, if the credit score of customer 5 (from Table [2](#tab:example-based){reference-type="ref" reference="tab:example-based"}) can be increased to 749 (similar to the credit score of customer 6) from 748, given other features values remain unchanged, the customer will not default on a payment. In short, there can be multiple different ways to tune feature values to make customers move from non-default to default, or vice versa.
|
| 130 |
+
|
| 131 |
+
Traditional explanation methods are mostly based on explaining correlation rather than causation. Moraffah et al. [@moraffah2020causal] focus on the causal interpretable model that explains the possible decision under different situations such as being trained with different inputs or hyperparameters. This causal interpretable approach share concept of counterfactual analysis as both work on causal inference. Their work also suggests possible use in fairness criteria evaluation of decisions.
|
| 132 |
+
|
| 133 |
+
::: {#tab:example-based}
|
| 134 |
+
Customer Delinquency Credit score Defaulted
|
| 135 |
+
---------- ------------- -------------- -----------
|
| 136 |
+
1 162 680 yes
|
| 137 |
+
2 149 691 yes
|
| 138 |
+
3 6 728 yes
|
| 139 |
+
4 6 744 yes
|
| 140 |
+
5 0 748 yes
|
| 141 |
+
6 0 749 no
|
| 142 |
+
7 0 763 no
|
| 143 |
+
8 0 790 no
|
| 144 |
+
9 0 794 no
|
| 145 |
+
10 0 806 no
|
| 146 |
+
|
| 147 |
+
: Example-Based Explanations
|
| 148 |
+
:::
|
| 149 |
+
|
| 150 |
+
An adversarial technique is capable of flipping the decision using counterfactual examples to fool the machine learner (i.e., small intentional perturbations in input to make a false prediction). However, adversarial examples could help to discover hidden vulnerabilities as well as to improve the model. For instance, an attacker can intentionally design adversarial examples to cause the AI system to make a mistake (i.e., fooling the machine), which poses greater threats to cyber-security and autonomous vehicles. As an example, the credit default prediction system can be fooled for customer 5, just by increasing the credit score by 1 (see Table [2](#tab:example-based){reference-type="ref" reference="tab:example-based"}), leading to a reversed prediction.
|
| 151 |
+
|
| 152 |
+
Hartl et al. [@hartl2019explainability] emphasize on understanding the implications of adversarial samples on Recurrent Neural Network (RNNs) based IDS because RNNs are good for sequential data analysis, and network traffic exhibits some sequential patterns. They find that adversarial the adversarial training procedure can significantly reduce the attack surface. Furthermore, [@marino2018adversarial] apply an adversarial approach to finding minimum modification of the input features of an intrusion detection system needed to reverse the classification of the misclassified instance. Besides satisfactory explanations of the reason for misclassification, their approach work provide further diagnosis capabilities.
|
| 153 |
+
|
| 154 |
+
Prototypes consist of a selected set of instances that represent the data very well. Conversely, the set of instances that do not represent data well are called criticisms[@kim2016examples]. Determining the optimal number of prototypes and criticisms are challenging. For example, customers 1 and 10 from Table [2](#tab:example-based){reference-type="ref" reference="tab:example-based"} can be treated as prototypes as those are strong representatives of the corresponding target. On the other hand, customers 5 and 6 (from Table [2](#tab:example-based){reference-type="ref" reference="tab:example-based"}) can be treated as a criticism as the distance between the data points is minimal, and they might be classified under either class from run to run of the same or different models.
|
| 155 |
+
|
| 156 |
+
Influential instances are data points from the training set that are influential for prediction and parameter determination of the model. While it helps to debug the model and understand the behavior of the model better, determining the right cutoff point to separate influential or non-influential instances is challenging. For example, based on the values of feature credit score and delinquency, customers 1, 2, 9, and 10 from Table [2](#tab:example-based){reference-type="ref" reference="tab:example-based"} can be treated as influential instances as those are strong representatives of the corresponding target. On the other hand, customers 5 and 6 are not influential instances, as those would be in the margin of the classification decision boundary.
|
| 157 |
+
|
| 158 |
+
The prediction of the k-nearest neighbor model can be explained with the k-neighbor data points (neighbors those were averaged to make the prediction). A visualization of the individual cluster containing similar instances provides an interpretation of why an instance is a member of a particular group or cluster. For example, in Figure [12](#fig:knn){reference-type="ref" reference="fig:knn"}, the new sample (black circle) is classified according to the other three (3-nearest neighbor) nearby samples(one gray, two white). This visualization gives an interpretation of why a particular sample is part of a particular class.
|
| 159 |
+
|
| 160 |
+
<figure id="fig:knn" data-latex-placement="h!">
|
| 161 |
+
<img src="media/knn.png" />
|
| 162 |
+
<figcaption>KNN</figcaption>
|
| 163 |
+
</figure>
|
| 164 |
+
|
| 165 |
+
Table [\[tab:interpretability_methods\]](#tab:interpretability_methods){reference-type="ref" reference="tab:interpretability_methods"} summarizes the explainability methods from the perspective of (A) whether the method approximates the model behavior (i.e., creates an illusion of interpretability) or finds actual behavior, (B) whether the method alone is inherently interpretable or not, (C) whether the interpretation method is ante-hoc, that is, it incorporates explainability into a model from the beginning, or post-hoc, where explainability is incorporated after the regular training of the actual model (i.e., testing time), (D) whether the method is model agnostic (i.e., works for any ML model) or specific to an algorithm, and (E) whether the model is local, providing instance-level explanations, or global, providing overall model behavior.
|
| 166 |
+
|
| 167 |
+
Our analysis says there is a lack of an explainability method (i.e., a gap in the literature), which is, at the same time actual and direct (i.e., does not create an illusion of explainability by approximating the model), model agnostic, and local, such that it utilizes the full potential of the explainability method in different applications. There are some recent works that bring external knowledge and infuse that into the model for better interpretation. These XAI methods have the potential to fill the gap to some extent by incorporating domain knowledge into the model in a model agnostic and transparent way (i.e., not by illusion).
|
| 168 |
+
|
| 169 |
+
::: table*
|
| 170 |
+
Method Approx. Inherent Post/Ante Agnos./Spec. Global/Local
|
| 171 |
+
------------------------------------------ --------- ---------- ----------- -------------- --------------
|
| 172 |
+
Linear/Logistic Regression No Yes Ante Specific Both
|
| 173 |
+
Decision Trees No Yes Ante Specific Both
|
| 174 |
+
Decision Rules No Yes Ante Specific Both
|
| 175 |
+
k-Nearest Neighbors No Yes Ante Specific Both
|
| 176 |
+
Partial Dependence Plot (PDP) Yes No Post Agnostic Global
|
| 177 |
+
Individual Conditional Expectation (ICE) Yes No Post Agnostic Both
|
| 178 |
+
Accumulated Local Effects (ALE) Plot Yes No Post Agnostic Global
|
| 179 |
+
Feature Interaction No Yes Both Agnostic Global
|
| 180 |
+
Feature Importance No Yes Both Agnostic Global
|
| 181 |
+
Global Surrogate Yes No Post Agnostic Global
|
| 182 |
+
Local Surrogate (LIME) Yes No Post Agnostic Local
|
| 183 |
+
Shapley Values (SHAP) Yes No Post Agnostic Local
|
| 184 |
+
Break Down Yes No Post Agnostic Local
|
| 185 |
+
Counterfactual explanations Yes No Post Agnostic Local
|
| 186 |
+
Adversarial examples Yes No Post Agnostic Local
|
| 187 |
+
Prototypes Yes No Post Agnostic Local
|
| 188 |
+
Influential instances Yes No Post Agnostic Local
|
| 189 |
+
:::
|
| 190 |
+
|
| 191 |
+
Chen et al. [@chen2018learning] introduce an instance-wise feature selection as a methodology for model interpretation where the model learns a function to extract a subset of most informative features for a particular instance. The feature selector attempt to maximize the mutual information between selected features and response variables. However, their approach is mostly limited to posthoc approaches.
|
| 192 |
+
|
| 193 |
+
In a more recent work, [@jung2020information] study explainable ML using information theory where they quantify the effect of an explanation by the conditional mutual information between the explanation and prediction considering user background. Their approach provides personalized explanation based on the background of the recipient, for instance, a different explanation for those who know linear algebra and those who don't. However, this work is yet to be considered as a comprehensive approach which considers a variety of user and their explanation needs. To understand the flow of information in a Deep Neural Network (DNN), [@ancona2017towards] analyzed different gradient-based attribution methods that assign an attribution value (i.e., contribution or relevance) to each input feature (i.e., neuron) of a network for each output neurons. They use a heatmap for better visualizations where a particular color represents features that contribute positively to the activation of target output, and another color for features that suppress the effect on it.
|
| 194 |
+
|
| 195 |
+
A survey on the visual representation of Convolutional Neural Networks (CNNs), by [@zhang2018visual], categorizes works based on a) visualization of CNN representations in intermediate network layers, b) diagnosis of CNN representation for feature space of different feature categories or potential representation flaws, c) disentanglement of "the mixture of patterns" encoded in each filter of CNNs, d) interpretable CNNs, and e) semantic disentanglement of CNN representations.
|
| 196 |
+
|
| 197 |
+
In the industrial control system, an alarm from the intrusion/anomaly detection system has a very limited role unless the alarm can be explained with more information. [@wang2020explaining] design a layer-wise relevance propagation method for DNN to map the abnormalities between the calculation process and features. This process helps to compare the normal samples with abnormal samples for better understanding with detailed information.
|
| 198 |
+
|
| 199 |
+
[@kim2017interpretability] propose a concept attribution-based approach (i.e., sensitivity to the concept) that provides an interpretation of the neural network's internal state in terms of human-friendly concepts. Their approach, *Testing with CAV (TCAV)*, quantifies the prediction's sensitivity to a high dimensional concept. For example, a user-defined set of examples that defines the concept 'striped', TCAV can quantify the influence of 'striped' in the prediction of 'zebra' as a single number. However, their work is only for image classification and falls under the post-modeling notion (i.e., post-hoc) of explanation.
|
| 200 |
+
|
| 201 |
+
[@kursuncu2019knowledge] propose a knowledge-infused learning that measures information loss in latent features learned by the neural networks through Knowledge Graphs (KGs). This external knowledge incorporation (via KGs) aids in supervising the learning of features for the model. Although much work remains, they believe that (KGs) will play a crucial role in developing explainable AI systems.
|
| 202 |
+
|
| 203 |
+
[@islam2019infusing] and [@islam2019domain] infuse popular domain principles from the domain in the model and represent the output in terms of the domain principle for explainable decisions. In [@islam2019infusing], for a bankruptcy prediction problem they use the 5C's of credit as the domain principle which is commonly used to analyze key factors: character (reputation of the borrower/firm), capital (leverage), capacity (volatility of the borrower's earnings), collateral (pledged asset) and cycle (macroeconomic conditions) [@angelini2008neural], [@5cs_of_credit]. In [@islam2019domain], for an intrusion detection and response problem, they incorporate the CIA principles into the model; *C* stands for *confidentiality*, *I* stands for *integrity*, and *A* stands for *availability* [@matt2006introduction]. In both cases, the infusion of domain knowledge leads to better explainability of the prediction with negligible compromises in performance. It also comes with better execution time and a more generalized model that works better with unknown samples.
|
| 204 |
+
|
| 205 |
+
Although these works [@islam2019infusing; @islam2019domain] come with unique combinations of merits such as model agnosticism, the capability of both local and global explanation, and authenticity of explanation, they are still not fully off-the-shelf systems due to some domain-specific configuration requirements. Much work still remains and needs further attention.
|
| 206 |
+
|
| 207 |
+
The quantification or evaluation of explainability is an open challenge. There are two primary directions of research towards the evaluation of explainability of an AI/ML model: (1) model complexity-based, and (2) human study-based.
|
2104.00322/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-03-31T16:28:46.840Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" etag="D3XTLk6N0kATrwiWYuHw" version="14.4.8" type="device"><diagram id="EJMJvEYF-gBuo_97C7Dc" name="Page-1">7V3bcqM4EP2aPMbFHfsx8WRvlZ2dnWztZd5kkG02GHmEPIn361fC3BFGYARM4qlUjRFCgPqc7lZLam705e71Rwz221+RC/0bTXFfb/QPN5qmqbZK/2Mlx1OJZRqngg323FORmhU8ef/BuFCJSw+eC8NCRYKQT7x9sdBBQQAdUigDGKOXYrU18ot33YMNrBQ8OcCvlv7luWR7Kp2bSlb+E/Q22+TOqhKf2YGkclwQboGLXnJF+sONvsQIkdOv3esS+qzzkn45XfdDzdn0wTAMiMgFQMX7vx/unnbWi/IUfJ3DXz4fbnU7fjhyTN4YurQD4kOEyRZtUAD8h6z0HqND4ELWrEKPsjqPCO1poUoL/4WEHGNpggNBtGhLdn58Fr565O/4cvb7n9zvDww6ujJTCv+M5CyDkW3GR58g9naQQBxfHxKMnuES+QhH76KfLk7PJBKkfX9/em/2srX9GReF6ICduNbP+7vji2t+fjr+Gf5ifsXb3+3HWy2VJqUBRPSJ8JFeh6EPiPet2D6I8bhJ66WXfkIevbOmxNxR7Rg4MXN0Qyk2QQDeQBJflQn+DmNwzFXbswrhmftoxftoSglHDfWT54rr0x+nJ0iOcn2SFUXY5OO0vos15RvwD3F/VnBL+bVnP53DioH0ZesR+LQHkeReqHIqQnB1wvDjKi0AzvMmQvZvB+J7AYzLXYCff6PNeIS9L8WlWSzUolJWMyW4egZwHHx9g5jA11xRFUrxWd0s9LxqxZJ4yTSTHhdt80pJLyEnD76c3NqLZd4slgHUydrz/RzpXRPOXYOnDubaSrcsrnQKwuuuCvTYREXUPFPP5qqfC3TIRWLUvwN2dRCydD6WVbRmmxVCmoMSUhVQlO+LkbYgI1V1UpS0r5TsREnNKlFyUbWRNo+SiixKLr4DSQ6gKkuuizm265IMMN+5XG5LcjFGl4txtWB8y9RswvRJmbDkuSfNsCnaML088le0WdWxnHNomfif/cvSmiQrAZyvHZ60LGcOV2uprOydbPxwiznvFgbqEHn5C365+/KfsbQIubXAT4vt4ot9mxjugSOEAX3qNETIDqIY4cxMDqMwYXoyCgvy43wlyKznDnS4kFnNTcNU+gdIXm3ze1gOktrGAU21CDQzlrzUuJ4qEHpoMAr1mrpWt0vX4KbSHBjQBnWrrHED/bMkZF8J9acHCYXH0fF8ZirD6HhrUdLx+mJWsuQ1Wr7alNnY1Om1+zAYfErzBryWT5hoUfSoGQatrweUnLgNIxTd0QqqsX/NTtJfG/b/R0AOGLD7/xzsDyRplD7kqd1TrQrMKWXJOY0RIOY2FmAXFwHf2wTMOaXAYXNM90wBeA7w7+ITO891/Tq/tUieFJHRwTMkzrbeZNHXSYhZPy3VYoxXmrFR1ao2SuMfeXWkydJGRougCAGrqI9zAuR1d0gZQmJFxLrVoZ0I6IgAx9c4yPfBPvSixk41tp7vPoIjOpDkNslRFyUk3ahUtAR3WMAzK7YsOZotgij03YkH/M/QISDYiIi0KjIXo/0fiT/FCiKnBuKHb5D5NlUDEjOZRPaKnfThOrl2hQhBu/gAx72VNhp1lXlP/+g7LpkBM+nbLOmxmh3TP1YdkyUKqMyBFwkXgpC8wJB0QcV5ujRjpcnB0GQhocVgvwsSotUdIEMCon2x9qOu3VItDIM+BN+PtFJSTFha2pW30+LtsSjx0YDRYir4rdNYExbeaNISiJVfaTwCjTVDjMayQu+meaVxmSOX01iatASC61caj0BjXTBqJw0YLVaBvHUaW8LCG01aAqshrzQegcaGNbJT3SLa9dZpPBcW3ljSsq4xrWnSOMHOaMC4hrgqHJkujW2BSMbwK1ZElh90nM0cepIyWR/QforSaGhI8gSl3SJqMoXJp06Y2Sdb49iTesGmFZS6z0kZC7EhU3kRU3+0lxz5uNh65wU6ngmvRRIHN50WIp9n3sVmQ9rMtC05FtPWyHeHSysZm10Mz3kCTljGksMq71BHaP3hR1xHHIs4GQ1OkuM+U1AZPel/W1i2Ywlz3mrTAvaP9xg4z4zZTZsXsqFE1aA2rAdr7Q+6HqaY8RBbpBawUUs/3l55LbrJ8fbSHUTDiEvjiKuPxYYf0I6qRHp66YMw9NYe9eW/zwWHIgDrecFhiosk6YvGwYk6KE5abXEfhdYCawzztKaDZFm05s17DUxr3hC9D1o/glWUoOjK6g5j/fKmBt4y4mFZ3WKwPxarmzcUSmJ1JTkFh9W6Nai4eGPrPlj9A2RbAyA9//BKhzIOQVdai2dMWAjQelgfvFXyi/fmg2tF34rng/N27MuTFm/42werf8TA9WDUwmdIOyiM9v48guPVaIvDxZinm0TOOeOy+M3fVtdM72IHNrCaZ0Av3PYrZ1doyc82OElQpO0K5UpCYFT0JiVhlXbEGZbgSr8+sgRxJSGS9SRw71hG00xJuSDcptaseff8zfm987ysnMkmXqGtuhJF27hVV3iz/ale42R5Qwa3pOzCOXWjNKQyyzq3Ziq82lBp269R3vInOUuEwECsA3zVPHzVBvgOB8ZGkIkmdBgGZOVxe2qG2oKssnCjbJckg0xgWNC3tZrExt9KfG7IbBJcSYjkfZ26tZIo2kYFIZr+dSAFUc4AvejJCmkLsSVifSkIgXXWA1shiSCbCnhK1sVQxFbzNaJQL6NQdqIrgXXfb9K81NJ/tMGQOmqyIjVnUbLERXybktmuLDWZiJoYJ0/Z2fRjvQ2des8uefax+8wclnZtrTDk+3qlHP+8GFESoO/d1+N+IqT3aF3e3nIir+z8J0AIxEFUSVPOdXalZzn9Xz9FZpSDo+l3Z/LGc27POD3eS3yU2+OyJr+fCAhcgF1a4cPHj+8qhn4RTHTLmOnzAk7UOYeWhj6zeUCRhRPdGtNM5gdeqV284OM9OVOqqFphKDgzlbSgbjjIjob/HhBXLskES2PCZv1CAxpdeun3fpItG2N97+dsF046d3RBJ5VcBqi6JrR5GFxYtg74CcP70FUl94GzOFpaPne+JFtsqLhKsv6LCQbHMxlYkt/DN0mmKMnSNxbSSZ/xJClrycadG63TwB7wfdYB1CCTA14BApnTeWHa1gmlY70MDiUdbXI+46bKWsLDdyQ1SXhIWllxAHLDVuuB3d6HYVoNp2t/YAAxiIJnFEUYOTAMc8hZlW/QhCbxsG91+KlzUdEtPMx1U8Vdz+ZPgCh86A0TC9ZqTFbbULBWMygX3mmeVETrdQhbRovpYfZR1lP17NO2+sP/</diagram></mxfile>
|
2104.00322/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep learning models have achieved impressive success on a wide range of challenging tasks. However, their performance was shown to be brittle in the face of *adversarial examples*: small, imperceptible perturbations in the input that drastically alter the classification (Carlini & Wagner, 2017a;b; Goodfellow et al., 2014; Kurakin et al., 2016b; Moosavi-Dezfooli et al., 2016; Szegedy et al., 2013; Tramèr et al., 2017; Dong et al., 2018; Tabacof & Valle, 2016; Xie et al., 2019b; Rony et al., 2019). The problem of designing reliable robust models has gained significant attention in the arms race against adversarial examples. Adversarial training (Szegedy et al., 2013; Goodfellow et al., 2014; Madry et al., 2017; Zhang et al., 2019b) has been proposed as one of the most effective approaches to defend against such examples, and can be described as solving the following min-max optimization problem:
|
| 4 |
+
|
| 5 |
+
$$\min_{\theta} \mathbb{E}_{(x,y) \sim \mathcal{D}} \left[ \max_{x': \|x' - x\|_{p} \le \epsilon} L\left(x', y; \theta\right) \right],$$
|
| 6 |
+
|
| 7 |
+
where *x* 0 is the -bounded perturbation in the *`<sup>p</sup>* norm and *L* is the loss function. Different unrestricted attacks methods were also suggested, such as adversarial deformation, rotations, translation and more (Brown et al., 2018; Engstrom et al., 2018; Xiao et al., 2018; Alaifari et al., 2018; Gilmer et al., 2018).
|
| 8 |
+
|
| 9 |
+
The resulting min-max optimization problem can be hard to solve in general. Nevertheless, in the context of -bounded perturbations, the problem is often tractable in practice. The inner maximization is usually approximated by generating adversarial examples using projected gradient descent (PGD) (Kurakin et al., 2016a; Madry et al., 2017). A PGD adversary starts with randomly initialized perturbation and iteratively adjust the perturbation while projecting it back into the -ball:
|
| 10 |
+
|
| 11 |
+
$$x_{t+1} = \prod_{\mathbb{B}_{\epsilon}(x_0)} (x_t + \alpha \cdot \operatorname{sign}(\nabla_{x_t} L(G(x_t), y))),$$
|
| 12 |
+
|
| 13 |
+
where *x*<sup>0</sup> is the natural example (with or without random noise), and ΠB(*x*) is the projection operator onto the -ball, *G* is the network, and *α* is the perturbation step size. As was shown by Athalye et al. (2018), PGD-based adversarial training was one of the few defenses that were not broken under strong attacks.
|
| 14 |
+
|
| 15 |
+
That said, the gap between robust and natural accuracy remains large for many tasks such as CIFAR-10 (Krizhevsky et al., 2009) and ImageNet (Deng et al., 2009). Generally speaking, Tsipras et al. (2018) suggested that robustness may be at odds with natural accuracy, and usually the trade-off is inherent. Nevertheless, a growing body of work aimed to improve the standard PGD-based adversarial training introduced by Madry et al. (2017) in various ways such as improved adversarial loss functions and regularization techniques (Kannan et al., 2018; Wang et al., 2019b; Zhang et al., 2019b), semi-supervised approaches(Carmon et al., 2019; Uesato et al., 2019; Zhai et al., 2019), adversarial perturbations on model weights (Wu et al., 2020), utilizing out of distribution data (Lee et al., 2021) and many others. We refer to related work for a more extensive literature review.
|
| 16 |
+
|
| 17 |
+
**Our contribution.** In this work, we propose a novel approach to regulating the tradeoff between robustness and natural accuracy. In contrast to the aforementioned works, our method enhances adversarial training by enforcing a feature representation that is invariant across the natural and adversarial domains. We incorporate the idea of Domain-Adversarial Neural Networks (DANN) (Ganin & Lempitsky, 2015; Ganin et al., 2016) directly into the adversarial training process. DANN is a representation learning approach for domain adaptation, designed to ensure that predictions are made based on invariant feature representation that cannot discriminate between source and target domains. This technique is modular and can be easily incorporated into any standard adversarial training algorithm. Intuitively, the tasks of adversarial training and of domain-invariant representation have a similar goal: given a source (natural) domain *X* and a target (adversarial) domain *X*<sup>0</sup> , we hope to achieve *g*(*X*) ≈ *g*(*X*<sup>0</sup> ), where *g* is a feature representation function (i.e., neural network). As we present in section 3.3, our work is also theoretically motivated by the domain adaptation generalization bounds.
|
| 18 |
+
|
| 19 |
+
In a comprehensive battery of experiments on MNIST (LeCun et al., 1998), SVHN (Netzer et al., 2011), CIFAR-10 (Krizhevsky et al., 2009) and CIFAR-100 (Krizhevsky et al., 2009) datasets, we demonstrate that by enforcing domain-invariant representation learning using DANN simultaneously with adversarial training, we gain a significant and consistent improvement in both robustness and natural accuracy compared to other state-of-the-art adversarial training methods, under Auto-Attack (Croce & Hein, 2020) and various strong PGD (Madry et al., 2017), and CW (Carlini & Wagner, 2017b) adversaries in white-box and black-box settings. Additionally, we evaluate our method using unforeseen "natural" corruptions (Hendrycks & Dietterich, 2018), unforeseen adversaries (e.g., *`*1, *`*2), transfer learning, and perform ablation studies. Finally, we offer a novel score function for quantifying the robust-natural accuracy trade-off.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+
Figure 1: Illustration of the proposed architecture to enforce domain invariant representation. The feature extractor and label classifier form the a regular DNN architecture that can be used for the main natural task. The domain classifier is incorporated alongside the label classifier. The reversal gradient layer multiplies the gradient by a negative number during the back-propagation.
|
| 24 |
+
|
| 25 |
+
# Method
|
| 26 |
+
|
| 27 |
+
In this section, we introduce our Domain Invariant Adversarial Learning (DIAL) approach for adversarial training. The source domain is the natural dataset, and the target domain is generated using adversarial attack on the natural domain. We aim to learn a model that has low error on the source (natural) task (e.g., classification) while ensuring that the internal representation cannot discriminate between the natural and adversarial domains. In this way, we enforce additional regularization on the feature representation, which enhances robustness.
|
| 28 |
+
|
| 29 |
+
The motivation behind the proposed method is to enforce an invariant feature representation to adversarial perturbations. Given a natural example *x* and its adversarial counterpart *x* 0 , if the domain classifier manages to distinguish between them, this means that the perturbation has induced a significant difference in the feature representation. We impose an additional loss on the natural and adversarial domains in order to discourage this behavior.
|
| 30 |
+
|
| 31 |
+
We demonstrate that the feature representation layer does not discriminate between natural and adversarial examples, namely *G<sup>f</sup>* (*x*; *θ<sup>f</sup>* ) ≈ *G<sup>f</sup>* (*x* 0 ; *θ<sup>f</sup>* ). Figure 2 presents the scaled mean and standard deviation (std) of the absolute differences between the natural examples from test and their corresponding adversarial examples on different features from the feature representation layer. Smaller differences in the mean and std imply a higher domain invariance — and indeed, DIAL achieves near-zero differences almost across the board. Moreover, DIAL's feature-level invariance almost consistently outperforms the naturally trained model (model trained without adversarial training), and the model trained using standard adversarial training techniques (Madry et al., 2017). We provide additional features visualizations in Appendix H.
|
| 32 |
+
|
| 33 |
+
Recently, other communities also discovered the benefits of adopting analogous architectures to DANN, such as the contrastive learning community which used similar architecture to improve representation learning (Dangovski et al., 2021; Wang et al., 2021)
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
Figure 2: We visualize the (a) Mean and (b) standard deviation (std) differences comparison between three models: (1) Naturally trained model (without adversarial training), named Clean. (2) Model trained using standard adversarial training, named standard AT, and (3) Model trained using our method, DIAL. We visualize five random features from the features layer. Each bar represent the difference between the means/std of the natural examples and the mean/std of their corresponding adversarial examples on this same feature.
|
| 40 |
+
|
| 41 |
+
Let us define the notation for our domain invariant robust architecture and loss. Let *G<sup>f</sup>* (·; *θ<sup>f</sup>* ) be the feature extractor neural network with parameters *θ<sup>f</sup>* . Let *Gy*(·; *θy*) be the label classifier with parameters *θy*, and let *Gd*(·; *θd*) be the domain classifier with parameters *θd*. That is, *Gy*(*G<sup>f</sup>* (·; *θ<sup>f</sup>* ); *θy*) is essentially the standard model (e.g., wide residual network (Zagoruyko & Komodakis, 2016)), while in addition, we have a domain classification layer to enforce a domain invariant on the feature representation. An illustration of the architecture is presented in Figure 1.
|
| 42 |
+
|
| 43 |
+
Given a training set {(*x<sup>i</sup> , yi*)} *n <sup>i</sup>*=1, the natural loss is defined as:
|
| 44 |
+
|
| 45 |
+
$$\mathcal{L}_{\text{nat}}^{y} = \frac{1}{n} \sum_{i=1}^{n} \text{CE}(G_{y}(G_{f}(x_{i}; \theta_{f}); \theta_{y}), y_{i}).$$
|
| 46 |
+
|
| 47 |
+
We consider two basic forms of the robust loss. One is the standard cross-entropy (CE) loss between the predicted probabilities and the actual label, which we refer to later as DIALCE. The second is the Kullback-Leibler (KL) divergence between the adversarial and natural model outputs (logits), i.e., the class probabilities between the natural examples and their adversarial counterparts, which we refer to as $DIAL_{KL}$ .
|
| 48 |
+
|
| 49 |
+
$$\mathcal{L}_{\text{rob}}^{\text{CE}} = \frac{1}{n} \sum_{i=1}^{n} \text{CE}(G_y(G_f(x_i'; \theta_f); \theta_y), y_i),$$
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
$$\mathcal{L}_{\text{rob}}^{\text{KL}} = \frac{1}{n} \sum_{i=1}^{n} \text{KL}(G_y(G_f(x_i'; \theta_f); \theta_y) \parallel G_y(G_f(x_i; \theta_f); \theta_y)).$$
|
| 53 |
+
|
| 54 |
+
where $\{(x'_i, y_i)\}_{i=1}^n$ are the generated corresponding adversarial examples. Next, we define source domain label $d_i$ as 0 (for natural examples) and target domain label $d'_i$ as 1 (for adversarial examples). Then, the natural and adversarial domain losses are defined as:
|
| 55 |
+
|
| 56 |
+
$$\mathcal{L}_{\text{nat}}^{d} = \frac{1}{n} \sum_{i=1}^{n} \text{CE}(G_d(G_f(x_i; \theta_f); \theta_d), d_i),$$
|
| 57 |
+
|
| 58 |
+
$$\mathcal{L}_{\text{adv}}^{d} = \frac{1}{n} \sum_{i=1}^{n} \text{CE}(G_d(G_f(x_i'; \theta_f); \theta_d), d_i').$$
|
| 59 |
+
|
| 60 |
+
We can now define the full domain invariant robust loss:
|
| 61 |
+
|
| 62 |
+
$$\begin{aligned} \text{DIAL}_{\text{CE}} &= \mathcal{L}_{\text{nat}}^{y} + \lambda \mathcal{L}_{\text{rob}}^{\text{CE}} - r(\mathcal{L}_{\text{nat}}^{d} + \mathcal{L}_{\text{adv}}^{d}), \\ \text{DIAL}_{\text{KL}} &= \mathcal{L}_{\text{nat}}^{y} + \lambda \mathcal{L}_{\text{rob}}^{\text{KL}} - r(\mathcal{L}_{\text{nat}}^{d} + \mathcal{L}_{\text{adv}}^{d}). \end{aligned}$$
|
| 63 |
+
|
| 64 |
+
The goal is to minimize the loss on the natural and adversarial classification while maximizing the loss for the domains. The reversal-ratio hyper-parameter r is inserted into the network layers as a gradient reversal layer (Ganin & Lempitsky, 2015; Ganin et al., 2016) that leaves the input unchanged during forward propagation and reverses the gradient by multiplying it with a negative scalar during the back-propagation. The reversal-ratio parameter is initialized to a small value and is gradually increased to r, as the main objective converges. This enforces a domain-invariant representation as the training progress: a larger value enforces a higher fidelity to the domain. A comprehensive algorithm description can be found in Appendix A.
|
2104.03149/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2104.03149/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Visual Question Answering (VQA) is a popular task that aims at developing models able to answer free-form questions about the contents of given images. The research community introduced several datasets [\[5,](#page-8-0) [23,](#page-9-0) [26,](#page-9-1) [27\]](#page-9-2) to study various topics such as multimodal fusion [\[7\]](#page-8-1) and visual reasoning [\[4,](#page-8-2) [22\]](#page-8-3). The popular VQA v2 dataset [\[21\]](#page-8-4) is the largest dataset of photographs of real scenes and humanprovided questions. Because of strong selection biases and annotation artifacts, these datasets have served as a test-bed for the study of dataset biases and shortcut learning [\[18\]](#page-8-5) (we will use the term "shortcut" exclusively in the rest of the paper). These spurious correlations correspond to superficial statistical patterns in the training data that allow predicting correct answers without deploying the desirable behavior. Issues of shortcut learning have become an increasing concern for other tasks in vision and natural language processing [\[18,](#page-8-5) [14\]](#page-8-6). In extreme cases, shortcuts in VQA may allow guessing the answer without even looking at the image [\[1\]](#page-8-7). Some shortcuts can be more subtle and involve both textual and visual elements. For instance, training questions containing *What sport* are strongly associated with the answer *tennis* when they co-occur with a racket in the image (see Figure [1\)](#page-1-0). However, some examples can be found in the validation set, such as *What sport field is in the background ?*, that lead to a different answer (*soccer*) despite a racquet being present in the image. Because of such exceptions, a model that strongly relies on simple co-occurrences will fail on unusual questions and scenes. Our work studies such multimodal patterns and their impact on VQA models.
|
| 4 |
+
|
| 5 |
+
The presence of dataset biases in VQA datasets is well known [\[1,](#page-8-7) [21,](#page-8-4) [23,](#page-9-0) [29\]](#page-9-3), but existing evaluation protocols are limited to text-based shortcuts. Our work introduces VQA-*CounterExamples* (VQA-CE for short) which is an evaluation protocol for multimodal shortcuts. It is easy to reproduce and can be used on any model trained on VQA v2, without requiring retraining. We first start with a method to discover superficial statistical patterns in a given VQA dataset that could be the cause of shortcut learning. We discover a collection of co-occurrences of textual and visual elements that are strongly predictive of certain an-
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup>Equal contribution †Work done before April 2021 and joining Tesla
|
| 8 |
+
|
| 9 |
+
<span id="page-1-1"></span>
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>Figure 1. Overview of this work. We first mine simple predictive rules in the training data such as: what + sport + racket<sup>V</sup> → tennis. We then search for counterexamples in the validation set that identify some rules as undesirable statistical shortcuts. Finally, we use the counterexamples as a new challenging test set and evaluate existing VQA models like UpDown [\[3\]](#page-8-8) and VilBERT [\[31\]](#page-9-4).
|
| 14 |
+
|
| 15 |
+
swers in the training data and often transfer to the validation set. For instance, we discover a rule that relies on the appearance of the words "what","they","playing" together with the object "controller" in the image to always predict the correct answer "wii". We consider this rule to be a shortcut since it could fail on arbitrary images with other controllers, as it happens in the real world. Thus, our method can be used to reflect biases of the datasets that can potentially be learned by VQA models.
|
| 16 |
+
|
| 17 |
+
We go one step further and identify counterexamples in the validation set where the shortcuts produce an incorrect answer. These counterexamples form a new challenging evaluation set for our VQA-CE evaluation protocol. We found that the accuracy of existing VQA models is significantly degraded on this data. More importantly, we found that most current approaches for reducing biases and shortcuts are ineffective in this context. They often reduce the average accuracy over the full evaluation set without significant improvement on our set of counterexamples. Finally, we identify shortcuts that VQA models may be exploiting. We find several shortcuts giving predictions highly correlated with existing models' predictions. When they lead to incorrect answers on some examples from the validation set, VQA models also provide incorrect answers. This tends to show that VQA models exploit these multimodal shortcuts. In summary, the contributions of this paper are as follows.
|
| 18 |
+
|
| 19 |
+
- 1. We propose a method to discover shortcuts which rely on the appearance of words in the question and visual elements in the image to predict the correct answer. By applying it to the widely-used VQA v2 training set, we found a high number of multimodal shortcuts that are predictive on the validation set.
|
| 20 |
+
- 2. We introduce the VQA-CE evaluation protocol to assess the VQA models' reliance on these shortcuts. By running a large-scale evaluation of recent VQA approaches, we found that state-of-the-art models exploit these shortcuts and that bias-reduction methods are ineffective in this context.
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
We introduce our method to detect shortcuts relying on textual and visual input. Our approach consists in building a dataset of input-output variables and applying a rule mining algorithm. The code for our method is available online [\\*](#page-2-0). In Visual Question Answering (VQA), we consider a training set Dtrain made of n triplets (v<sup>i</sup> , q<sup>i</sup> , ai)i∈[1,n] with v<sup>i</sup> ∈ V an image, q<sup>i</sup> ∈ Q a question in natural language and a<sup>i</sup> ∈ A an answer. VQA is usually casted as a problem of learning a multimodal function f : V ×Q → A that produces accurate predictions on Dtest of unseen triplets.
|
| 25 |
+
|
| 26 |
+
Mining predictive rules on a training set Our goal is to detect shortcuts that f might use to provide an answer without deploying the desired behavior. To this end, we limit ourselves to a class of shortcuts that we think is often leveraged by f. We display in Figure [2](#page-2-1) our rule mining process. These shortcuts are short predictive association rules A → C that associate an antecedent A to a consequent C. Our antecedents are composed of words of the question and salient objects in the image (or image patch), while our consequents are just answers. For instance, the rule {what, color, plant} → {green} provides the answer "green" when the question contains the words "what", "color" and "plant". These shallow rules are by construction shortcuts. They are predictive on the validation set but do not reflect the complex behavior that needs to be learned to solve the VQA task. For instance, they do not rely on the order of words, neither the position and relationships of visual contents in the image. They lack the context that is required to properly answer the question. Moreover,
|
| 27 |
+
|
| 28 |
+
<span id="page-2-0"></span><sup>\*</sup><https://github.com/cdancette/detect-shortcuts>
|
| 29 |
+
|
| 30 |
+
<span id="page-3-1"></span>
|
| 31 |
+
|
| 32 |
+
<span id="page-3-0"></span>Figure 3. Examples of shortcuts found in the VQA v2 dataset. The confidence is the accuracy obtained by applying the shortcut on all examples matching by its *antecedent*. The support is the number of matching examples. More supporting examples and counterexamples are shown in the supplementary material.
|
| 33 |
+
|
| 34 |
+
even rules that seem correct often have a few counterexamples in the dataset, i.e. examples that are matched by the antecedent but the consequent provides the wrong answer. We later use these counterexamples in our evaluation procedure.
|
| 35 |
+
|
| 36 |
+
Binary dataset creation To detect these rules, we first encode all question-image-answer triplets of Dtrain as binary vectors. Each dimension accounts for the presence or absence of (a) a word in the question, (b) an object<sup>V</sup> in the image, represented by its textual detection label from Faster R-CNN, (c) an answer. The number of dimensions of each binary vector is the sum of the size of the dictionary of words (e.g. ~13,000 words in VQA v2), the number of detection labels of distinct objects in all images (e.g. 1,600 object labels), and the number of possible answers in the training set (e.g. 3,000 answers). We report results with ground-truth instead of detected labels in the supplementary materials.
|
| 37 |
+
|
| 38 |
+
Frequent itemset mining On our binary dataset, we apply the GMiner algorithm [\[12\]](#page-8-20) to efficiently find frequent *itemsets*. An itemset is a set of tokens I = {i1, .., in} that appear very frequently together in the dataset. The support of the itemset is its number of occurrences. For example, the itemset {what, color, plant, green} might be very common in the dataset and have a high support. GMiner takes one parameter, the minimum support. We include an additional parameter, which is the maximum length for an itemset. We detail how we select parameters at the end of this section.
|
| 39 |
+
|
| 40 |
+
Rules extraction and filtering The next step is to extract rules from the frequent itemsets. First, we filter out the itemsets that do not contain an answer token, as they cannot be converted to rules. For the others that do contain an answer a, we remove it from the itemset to create the antecedent X ( X = I \ a). The rule is then X ⇒ a. The support s of the rule is the number of occurrences of X in the dataset. The confidence c of the rule is the frequency of correct answers among examples that have X .
|
| 41 |
+
|
| 42 |
+
We then proceed to filter rules. We apply the following three steps: (a) we remove the rules with a confidence on the training set lower than 30% (remove when c < 0.3) (b) if some rules have the same antecedent but different answers, then we keep the rule with the highest confidence and remove the others. For instance, given the rules {is, there} ⇒ yes and {is, there} ⇒ no with a respective confidence of 70% and 30%, we only keep the first one with the answer yes. (c) if a rules r1's antecedent is a superset of another rule r2's antecedent, if both have the same answer, and r<sup>1</sup> has a lower confidence than r2, then we remove r1. For instance, given the rules {is, there} ⇒ yes and {is, there, cat} ⇒ yes with a respective confidence of 70% and 60%, we only keep the first one without the word cat. We consider the remaining rules as shortcuts. Note that rules with a confidence of 100% could be considered *correct* and not shortcuts, but these rules will not influence our evaluation protocol, detailed in Section [4.](#page-4-0)
|
2104.05938/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-11T09:29:25.520Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" etag="iSmdeHX1D4K3UzY3_DTd" version="14.5.8" type="google"><diagram id="hEERIxEm96b3tCfoYrjV" name="Page-1">7V1bd5u4Fv41Xqd9iBc3YXhM0qaXac/JTLraTt9kkG2mXDwYx0l//ZFAAgTC5m7HdWbNKpYESHt/+6KtLTFRb72ndyFcrz4HNnInimQ/TdQ3E0WRJdXA/5CS56QEGFJSsAwdmzbKCh6cX4jdSUu3jo02XMMoCNzIWfOFVuD7yIqSMnovDMNgxzdbBC7/1jVcolLBgwVdWipNZ1n5N8eOVrS1DrSs4j1yliv6clnWzaTGg6w17dBmBe1glytS307U2zAIouTKe7pFLqEfI01y311FbdrjEPlRnRt02fy6+/JD/vFH9PXpf1+lD9e/5ldATx7zCN0tHTTtbfTMqBAGW99G5CnyRL3ZrZwIPayhRWp3mO+4bBV5Lq3eRGHwE90GbhDGd6t3dzcGvmA1jIp4+DcLx3W5lnf62xtcXh4ZHewjCiP0lCuiI32HAg9F4TNuQmsVjVKdIk9T6UB3GRd1QNus8gycMfxQ7CzTZ2e0xReUvA1ILatDk1q3DDRfCAir3Ol3t/0QVlY4usoKKNFVEdE1len+6SqCsO5GhA4BHmeewPq/24BVXG1ifXONG8jG+imrJJoioXlWhgkRLh2Yb6Qvyb9fgrVj4Tse0NLDVIWRE/js9Xg4SQ+SpiVuY5JHPEtDhDsF53EDCf+G2yhIuhlXQ9dZ+vjawm9CmLk3hG8O1lfXtMJzbJvcfLMOHD+KKQ1uJuBNARN+4KMSgGhhBkGpJ1EsIEaTS4hJhY5DzGCAmXUHjMQDhjGYe0odENV4lSJ6lTzFV9AjGsGfb9a5l5dAN0SfhMN/sII1wrXBgtjpFblch8E/xDBjLvo2KUXQw//Mt45rO/6SdW4e5rpbGJVUqyh5jsMe8+rLNsRiKBHtJF3h/4lKwmiRaPlMpeWG+VrYCUZKp1hWbtUWM9hrYRo6K1QlyTQlqRpbmzX027NIwbCR/kJeEKHEbcIawE0fx7i03cS1cIN9r9ywO4hFN562fTFDgSzNKLtlXX7d05D2IqTtc1XCn/vQCUIncn7FEiKFZXYtEIy24SlwBxMgQWSJLiIl1IGXL1HaUvzpTA2pkvK6HelESKtoWtlpSgLS4XA5h6+kmLFELQqvXpPLxB0io11Az3Gfk9u9wA82sWPKNckIUsFDUJwsAmy3SWk8DUp/MTsOYkuOS96Qa9IxQGw1wOb/UFs5bcu8l1aPUbLHJO5CWpNVUOXJKpgDRQpILzg/nhTGnjwpjx2/rJkNNyvuvrx/xh7P5lOkPnPpcrXxHIrUBviFTvQc16lS7oHJFIwUK3HpGoUO9qxQSLoY65y0B7nRJ/5eHfqmVSlpM6cQELcwo26Ois+5YknNyndpb5M6OXcPdRfTShNkddhtTMuXufcXGR//TLmfL+QxSduVwLtXmEfwunrUrU09II1Yq2n8t/cRFfpsmLmRixbkxpc4M9LLMQpFFcUowGBTI6PnGEUhErFYIN2ySgTGNfbMnEs90RWwKSYlLAvJ5enKyvJ0NeUpGIqw5tDBn5SA1IwI6d9riM0EPJW1MpUlMGaEjUUMjhMJeod8FELSyT+3KCZUUnKJCJXlkw/OzvSpIIoo0nyDxYTSQVRB5ygRHulh63kwJC1ZRGW3CuKZuodQhH215oa3Qder7H4cRfi2guQ5O+Jn0b7hCarlbjcx3vNRINrXiXp3cRPaiAso+AmSJJIXUdR9MEeBma8uqlbvUdU+WCvkwYuazeNGlXgDzexuPvBujKpkRStgR1WyVyerY6+aqVhhFw8F8eroZWGPbBIiqUMBYdBNs8l/LUU/N9YeVjqE/bMsSRIFBa/PYszJjRtIFIXj77HRRxxlw/WgvmXxrqAVdjHqj06VC9674N2x9+hxXstdQlodfA5lynsdplL2OkxRUEsdzO3QzjD2YnJEVsCsZvBFH4zK4Jgzgoc1spxFnIlzmRLsE88ZjxtdPfac4FD21onMCWxnY2031PGG82Bb0yNob5YFJrTliPr1IRp3Qjw8XSd+R7mz318OXY82MczB8ww8tEG80gZjeatMsMozTTLLDtaOj2U8nnAHOxjamzMm8Qgi2IVYBSGr6MwBZ37YueIlHnKcMQ/L1Wjl+D9pfO2suWpZ5Vf/fRZjTm7crVAc4YLuzySXUqg0DgWYGnh7J06VZkmQJ2UGDgcmL1GdAaeNciGqo5jlxaR0iw8XcBhs348i2sbxAnOVVH5x1xAQVp8JZuTaYIQVJYGNlkYjiORc8miqEgP4PDfZLEdz0sjNONEcUZrbkaI5zYxdm00xxwim9GFJDznumW2sSEK6BMNGQmW696eV4zo6ms8x3qYUROBspwFV0/z7dA/nZ+jDJQr7pvl/coG3E6BvX5tRe4tGHDsO2WZ4A+y2y+8/SzuFn6pq+N9l2n2+vFqlNtrPOErPmunqBnsHR+l9OyB2j/Gq+3bCd9yTdTpB3sH2+jcyBZ9h+DNJEzs7QpxO5Hd8VovCwNY2fKzrG74kKnCBYaF+aWHyswDzkWPFF1N9XFPdF6A6m/zhMHYwDl/z+IFXs9mEHTXAXe45dqADx4ScqeEmDCKlg01imwlz84NGxosRlGJVTcnVP+MuC0gdItWF5SP2CG6VQ7R8ZAwVqmbpxkMtH93hv7d6icL5ExH7SATW+b2BmqaVEzqFhB1sDUA9tJV20ETg688f6greZgXX5NIOrK0Xs+AQi+cJHj7NK5cMF0rFkqE+14FOR0oPOpX1njBg6jwGjPIykCjjfrB9+OqhfX6DLiDeh4G9TeKVyZ6mauv1W64apjnfDC6grIuFKmM4jaGdgyrWmUXZo4rTRfuRVPFRj0a9DTzPiSKELqJYCRl+AV8oiqNux9CUw6KIfPuaHK5NqO3CzcaxeE5heoTP3/GPK2kqyYCV/E3oNiVSQQvePFFKJr+e87/u2ZlktJA/PFoVyfJt/Ece8ORE31lf8HXyYkB/ZW8lP54PMRPZ3BHhZVbmWLXvBIYQuTByHhH3cBH76BvuCUpzSlstONBAKp5VtAm2oYXofRkMyo8yzamqmdkf92BVBlNDk7I/lX9LBMMlikpvwYCIdxSzZlTIqsczkwvjkfd3utBepac+Z1hPepAhP2VRB2EQbWlrKAzVMC1DmhMcSeEFB4BWgnPqwAaSWmCs0Q7WQFOnmg4qYT2bzpRaUO4NPTWO1r+gpyt6ChlwSmv0yHvQIysEPY2VYm9IqnEq3yBIolaTwUg+BCFqeKU+DG/CuT1UYZ+uSIi/pyH78MKJYLb0aQqzJWZlqfAgqeAV9mSr5YLvARTOVg9ke/s+MLEYqjEsJA7VzA2ggZ4cfKOQoWsIzu0S+ffqYFnzQBT27JgsV/uUbEOU1RdneV/N4YacfZxm2hXTvWutff2Wk0gDGLyxyjZrHIo8DDaNZLBvEnhwHR9dsd4lhxWTU6urYdd1/9FhvGp7T0tmBYLT6DgQc/nW84MgrpmDXeuQsnpLz/xwrv3NDoXVveYeIEgOlRyyemhvrVie4aT9qriPdhPhsmT6WRH+lpYLdD0kCQ+ELNGpJn1Da9hEm5p7B0ZNeTqUbC/uzVlk3o+eR99qg0ojvVUF8prqTPpSRiVhHcTWiLBrj05po1z/Qi56hDFNYu8Eo4M4NpW9Y8XpKfyiT9rk7q3xvaLhmH4qWyVOI1n2N9k30Y+2HNoOXTZR/OaZmcNtojhqWuN+U97L9oduAlzbDotmEh62wm4cqgqX237M8f5ODmqeO35gsJmLx9Gp9raaSwpjdZilkDYjq0AUZhF+yXSwoxrYlwEua0zDxesNFovtusZkAOl015hAja9TX5DUL5JU9iXifpGkyvrYa92gh08qt080o8llZDIfQn9jhc46umSa7U36BLPyt+bGzTQDL2RRu5L4B9ed2RLmqWgfk9c+oMjautpnVlgPVosGsad1Z9OUhR0edN0Z1Fh37hOW7W1WRzgUbFFbMKS7diqeM7DhYW/vtpxdwzoB4RfvIzxRJbVJoy/BOl6reUBLsp+k0YllhN2f4By5AgOUg1Z941MG1l68W4Hv4+k3fesk5WMNE9M1H4a/IVgsNmgYrIgWpc9RtJVCRkBb2S76q2PLtmhyO7psK0mj9mcRHkuyGdx/B8keefJ6NMk2pMJ3X1pKtiyrxxVt0Y6v0UVbzYn2wQS005Rx9Rxl3LsJ3fXq/eM7Ez3d6h9//PNf7+NVDRE//T2CWjH3V7RdWxRC7mWPoJCuByVxyN3aH24ffrvt2trseNu1hQjo4cNd7WN31xa0kRdPmy57RIVwUXmTL9wjKlx16gEvqhz6/35DT952a8xwnf/ReLw66vkO35C7WZUXMO9h6DowVgv1FzfPVMHooGBkRlQwQsD0cBxEe8DcQh/aDvQvmNmHmVkhZDAmZnafVu//WLz/9ef8Xr3Vvoff7IV3JZr7d3H3uC9zVvt4Vd/sDPCDnYgQRy1vRyerl2u2Fkm6EH9IOX5TD6xhI09naoJD/mWlzBoTNGYN/hkGRHoyxx0TcvU5sMnM5u3/AQ==</diagram></mxfile>
|
2104.05938/main_diagram/main_diagram.pdf
ADDED
|
Binary file (83.8 kB). View file
|
|
|
2104.05938/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Meetings remain the go-to tool for collaboration, with 11 million meetings taking place each day in the USA and employees spending six hours a week, on average, in meetings (Mroz et al., 2018). The emerging landscape of remote work is making meetings even more important and simultaneously taking a toll on our productivity and wellbeing (Spataro, 2020). The proliferation of meetings makes it hard to stay on top of this sheer volume of information and increases the need for automated methods for accessing key information exchanged during them. Meeting summarization (Wang and Cardie, 2013; Shang et al., 2018;
|
| 4 |
+
|
| 5 |
+
<span id="page-0-0"></span>
|
| 6 |
+
|
| 7 |
+
Figure 1: Examples of query-based meeting summarization task. Users are interested in different facets of the meeting. In this task, a model is required to summarize the contents that users are interested in and query.
|
| 8 |
+
|
| 9 |
+
Li et al., 2019; Zhu et al., 2020) is a task where summarization models are leveraged to generate summaries of entire meetings based on meeting transcripts. The resulting summaries distill the core contents of a meeting that helps people efficiently catch up to meetings.
|
| 10 |
+
|
| 11 |
+
Most existing work and datasets on meeting summarization (Janin et al., 2003; Carletta et al., 2005) pose the problem as a single document summarization task where a single summary is generated for the whole meeting. Unlike news articles where people may be satisfied with a high-level summary, they are more likely to seek more detailed information when it comes to meeting summaries such as topics (Li et al., 2019), opinions, actions, and decisions (Wang and Cardie, 2013). This poses the question of whether a single paragraph is enough to summarize the content of an entire meeting?
|
| 12 |
+
|
| 13 |
+
Figure 1 shows an example of a meeting about "remote control design". The discussions in the meeting are multi-faceted and hence different users might be interested in different facets. For example, someone may be interested in learning about the new trends that may lead to the new product
|
| 14 |
+
|
| 15 |
+
<sup>\*</sup> These two authors contributed equally. The order of authorship decided by the flip of a coin.
|
| 16 |
+
|
| 17 |
+
standing out, while others may be more interested in what other attendees thought about different elements of the design. It is challenging to compress or compose a short summary that contains all the salient information. Alternatively, summarization systems should adopt a more flexible and interactive approach that allows people to express their interests and caters to their diverse intents when generating summaries [\(Dang,](#page-10-2) [2005,](#page-10-2) [2006;](#page-10-3) [Litvak](#page-11-2) [and Vanetik,](#page-11-2) [2017;](#page-11-2) [Baumel et al.,](#page-10-4) [2018\)](#page-10-4).
|
| 18 |
+
|
| 19 |
+
With comprehensive consideration of the multigranularity meeting contents, we propose a new task, query-based meeting summarization. To enable research in this area, we also create a highquality multi-domain summarization dataset. In this task, as shown in Figure [1,](#page-0-0) given a query and a meeting transcript, a model is required to generate the corresponding summary. The query-based approach is a flexible setup that enables the system to satisfy different intents and different levels of granularity. Besides the annotated queries and corresponding gold summaries at different levels of granularity, our new dataset contains a rich set of annotations that include the main topics of each meeting and the ranges of relevant text spans for the annotated topics and each query. We adopt a hierarchical annotation structure that could not only assist people to find information faster, but also strengthen the models' summarization capacity.
|
| 20 |
+
|
| 21 |
+
In this paper, we employ a two-stage meeting summarization approach: *locate-then-summarize*. Specifically, given a query, a model called *Locator* is used to locate the relevant utterances in the meeting transcripts, and then these extracted spans are used as an input to another model called *Summarizer* to generate a query-based summary. We present and evaluate several strong baselines based on state-of-the-art summarization models on QM-Sum. Our results and analysis from different perspectives reveal that the existing models struggle in solving this task, highlighting the challenges the models face when generating query-based meeting summaries. We are releasing our dataset and baselines to support additional research in queryfocused meeting summarization.
|
| 22 |
+
|
| 23 |
+
Overall, our contributions are listed as follows: 1) We propose a new task, query-based multidomain meeting summarization, and build a new benchmark QMSum with a hierarchical annotation structure. 2) We design a *locate-then-summarize* model and conduct comprehensive experiments on
|
| 24 |
+
|
| 25 |
+
its strong variants and different training settings. 3) By human evaluation, we further pose the challenges of the new task, including the impact of different query types and factuality errors.
|
| 26 |
+
|
| 27 |
+
# Method
|
| 28 |
+
|
| 29 |
+
In this section, we first define the task of querybased meeting summarization, then describe our two-stage *locate-then-summarize* solution in detail.
|
| 30 |
+
|
| 31 |
+
Existing meeting summarization methods define the task as a sequence-to-sequence problem. Specifically, each meeting transcript X = (x1, x2, · · · , xn) consists of n turns, and each turn x<sup>i</sup> represents the utterance u<sup>i</sup> and its speaker s<sup>i</sup> , that is, x<sup>i</sup> = (u<sup>i</sup> , si). Additionally, each utterance contains l<sup>i</sup> words u<sup>i</sup> = (w1, · · · , wl<sup>i</sup> ). The object is to generate a target summary Y = (y1, y2, · · · , ym) by modeling the conditional distribution p(y1, y2, · · · , ym|(u1, s1), · · · ,(un, sn)).
|
| 32 |
+
|
| 33 |
+
However, meetings are usually long conversations involving multiple topics and including important decisions on many different matters, so it is necessary and practical to use queries to summarize a certain part of the meeting. Formally, we introduce a query Q = (w1, · · · , w|Q<sup>|</sup> ) for meeting summarization task, the objective is to generate a summary Y by modeling p(y1, y2, · · · , ym|Q,(u1, s1), · · · ,(un, sn)).
|
| 34 |
+
|
| 35 |
+
In our two-stage pipeline, the first step requires a model to locate the relevant text spans in the meeting according to the queries, and we call this model a Locator. The reason why we need a Locator here is, most existing abstractive models cannot process long texts such as meeting transcripts. So we need to extract shorter, query-related paragraphs as input to the following Summarizer.
|
| 36 |
+
|
| 37 |
+
We mainly utilize two methods to instantiate our Locator: Pointer Network [\(Vinyals et al.,](#page-12-15) [2015\)](#page-12-15) and a hierarchical ranking-based model. Pointer
|
| 38 |
+
|
| 39 |
+
<span id="page-5-0"></span>
|
| 40 |
+
|
| 41 |
+
Figure 3: Hierarchical ranking-based locator structure.
|
| 42 |
+
|
| 43 |
+
Network has achieved widespread success in extractive QA tasks [\(Wang and Jiang,](#page-12-16) [2017\)](#page-12-16). For each question, it will point to the <start, end> pair in the source document, and the span is the predicted answer. Specific to our task, Pointer Network will point to the start turn and the end turn for each query. It is worth noting that one query can correspond to multiple spans in our dataset, so we always extract three spans as the corresponding text for each query when we use Pointer Network as Locator in the experiments.
|
| 44 |
+
|
| 45 |
+
In addition, we design a hierarchical rankingbased model structure as the Locator. As shown in Figure [3,](#page-5-0) we first input the tokens in each turn to a feature-based BERT to obtain the word embedding, where feature-based means we fix the parameters of BERT, so it is actually an embedding layer. Next, CNN [\(Kim,](#page-11-15) [2014\)](#page-11-15) is applied as a turn-level encoder to capture the local features such as bigram, trigram and so on in each turn. Here we do not use Transformer because previous work [\(Kedzie et al.,](#page-11-16) [2018\)](#page-11-16) shows that this component does not matter too much for the final performance. We combine different features to represent the utterance u<sup>i</sup> in each turn, and concatenate the speaker embedding s<sup>i</sup> as the turn-level representation: x<sup>i</sup> = [u<sup>i</sup> ; s<sup>i</sup> ], where [; ] denotes concatenation and s<sup>i</sup> is a vector randomly initialized to represent the speaking style of meeting participants.
|
| 46 |
+
|
| 47 |
+
Then these turn representations will be contextualized by a document-level Transformer [\(Vaswani](#page-12-17) [et al.,](#page-12-17) [2017\)](#page-12-17) encoder.
|
| 48 |
+
|
| 49 |
+
Next, we introduce query embedding q which is obtained by a CNN (shared parameters with CNN in turn-level encoder) and use MLP to score each turn.
|
| 50 |
+
|
| 51 |
+
We use binary cross-entropy loss to train our Locator. Finally, turns with the highest scores are selected as the relevant text spans of each query and will be inputted to the subsequent Summarizer.
|
| 52 |
+
|
| 53 |
+
Given the relevant paragraphs, our goal in the second stage is to summarize the selected text spans based on the query. We instantiate our Summarizer with the current powerful abstractive models to explore whether the query-based meeting summarization task on our dataset is challenging. To be more specific, we choose the following three models:
|
| 54 |
+
|
| 55 |
+
Pointer-Generator Network [\(See et al.,](#page-12-4) [2017\)](#page-12-4) is a popular sequence-to-sequence model with copy mechanism and coverage loss, and it acts as a baseline system in many generation tasks. The input to Pointer-Generator Network (PGNet) is: "<s> Query </s> Relevant Text Spans </s>".
|
| 56 |
+
|
| 57 |
+
BART [\(Lewis et al.,](#page-11-17) [2020\)](#page-11-17) is a denoising pretrained model for language generation, translation and comprehension. It has achieved new state-ofthe-art results on many generation tasks, including summarization and abstractive question answering. The input to BART is the same as PGNet.
|
| 58 |
+
|
| 59 |
+
HMNet [\(Zhu et al.,](#page-12-3) [2020\)](#page-12-3) is the state-of-the-art meeting summarization model. It contains a hierarchical structure to process long meeting transcripts and a role vector to depict the difference among speakers. Besides, a cross-domain pretraining process is also included in this strong model. We add a turn representing the query at the beginning of the meeting as the input of HMNet.
|
2105.04241/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-11-24T07:45:53.676Z" agent="5.0 (X11; CrOS x86_64 13421.89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36" version="13.9.2" etag="8Lyz_dagzgDoGcnHi_5K" type="google"><diagram id="ueppWeH9OfFCDz9rL9tQ">7Vttc6M2EP41nn6qh/eXj4kv6c300unUnendR8XIWC1GVJZju7++EogXgRwIB5imucxc0CJWi/bZ3UdrZ2Gu9uefCEh2TziA0cLQgvPC/LQwDM/R2f9ccMkElmNkgpCgIBPppWCN/oFCqAnpEQXwIE2kGEcUJbJwg+MYbqgkA4TgkzxtiyN51QSEsCFYb0DUlP6BAroTr2VrpfwzROEuX1nXxJ09yCcLwWEHAnyqiMyHhbkiGNPsan9ewYjvXb4v2XOPV+4WhhEY0y4PiH1/AdFRvNvCcCL26P0WMw3MQHoRb+38fcT5jR8PqU/u2ATdTs7lTXYVit9VLVuwkZWs8JEgSNitX+Cp+njvBe371Zf1wmbPaY8EBzh1/zEK4h+4AXTHvM7mgn3CJsfPh6RiJNuezE7Z9kMC4usGbcEeRZfMpELxwjBNi/3eFK8Xw1PjrtL6p7v1z6n5V23M7LnR/qKY4nQfIV8OEdiyeQ2xIa1tUHjm8h3dR0yg8+2mBP8FVzjChEliHENuG4qimghEKIzZMIJbruEFEopYaN4J8R4FAV/k/rRDFK6TbGdOLA8xGcHHOIAc+1phFFcAz1fjRy+ikmUziPeQkgubkj+QB7LIZKYYnsq0wLJbJttVUkL+GBCZKCw0l8HKLkS8qmPXfI+x+3sLriYzKgX6bygOuYoInyLu9RCgmMEnrSwMrtwb+CVdVeScj5hoxoSviIm8VA4dE5YiJmq7DOPgjnMANsIJjOUNh2dEv1auv/FtWdpi9OksdikdXPJBHDwibpEYMZO/VgcVHXxYKklHuZbM1TmdMCTHNKHASonl8p/CZTCocRYKSAjF1rqdfVhxkspHuYzACFD0Iq+ocpxY4VeM0rgVEDFdGSJ2zfUHFs0bKB6q0peaHkt/XU+2Bw09KYqKl+4ELPsDWCpg6dbMkeX5S8f2y3/uMEDjap2xsOY0ocV8sBZDTOgOhzgG0UMpvS+lXzBOhGv/hJRexPEJHBmBkyFZAKrE0DcJQmpANXEuA3VwwGVOkjJZFYOG1hmD3wkupwYuq1u6YbsFLpVpCZ9wuL6MZcvL1M9wdav816azi2z9vmh0W2imzHGe33aKqrAwU8XCPrecfw7H5ApDbF9QVy2ot3M4xZpM2njxNrYnB4WKmVVitcr5WMQEAHrbjSqWnI0Hn7eNKDSHoXb1NGg1qZ03ErPzWmDYweHWa6edwn9PcI+5aUJMCvmMnQ3IRqR53RjL97bZetRV+d4bwPd+B/I1ToV8vdL1I3UzoXHVqpoHqETt/MnKqtfrNNCjluXpS5FF+hcuZVqZvHBZAxSuIZOWDb3AUuHSM55Nxp1HylJunf9PV6F0fa5pao4pRzcUKWcyJu/WOLNeB8Bwp7n8Rd9hzjHnlXOgzrKOqwKm77gmGC3nOPbtco7qQ4BbNg+Ke3NsHuTdKinnGJPRnFppKj4VHrx9oCtzW9f+gT5wAyHf9dE7CPNIih8dhLd0EKbMlfPt4Rs3pWKKnqrX2a/TtPUtX85plr80tUpb37dljV37+rbp1vSORgOd2aLPve1BQFGU5w4/xcc/3RHXqmpA0Kma928CXc+j48ikrufHP9OAxak1Sl2tH1IabTF/6Y+HFFV//Z0h5Q1U/z+GlNrJwteW+buOgBRVN/59UPePHmoGwysfc0/A0XMd/5Oz4hANtPHPinPpq02Jwy69/LIciq9hNr65WS1kffj7gDR7bpTadmpdp77fAWwror0rHxuWf4iRTS//msV8+Bc=</diagram></mxfile>
|
2105.04241/main_diagram/main_diagram.pdf
ADDED
|
Binary file (18.2 kB). View file
|
|
|
2105.04241/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Transformer-based models such as BERT are very effective in capturing long-range dependencies in text passages through the attention mechanism [\(Vaswani et al.,](#page-6-0) [2017;](#page-6-0) [Devlin et al.,](#page-5-0) [2019\)](#page-5-0). However, the amount of compute in attention depends quadratically on the number of tokens in an input text passage. As such, the standard BERT implementation limits input size to a fixed number (often 512) of tokens.
|
| 4 |
+
|
| 5 |
+
In reality, dependencies over significantly longer ranges are common and modeling them is crucial. For instance, in a sentence like *Inside the Sammath Naur, the Ring-bearer struggled to throw the Ring into the volcano*, the narrative interweaves several prior storylines from a book. Comprehending this sentence therefore requires looking up previous
|
| 6 |
+
|
| 7 |
+
mentions of *Ring-bearer* and *Sammath Naur*, located many tokens away.
|
| 8 |
+
|
| 9 |
+
Several methods have been proposed to address this challenge; see [\(Tay et al.,](#page-6-1) [2020\)](#page-6-1) for a survey and [§3](#page-2-0) for a detailed discussion. One popular strategy is to reduce the number of tokens attended to. Longer inputs can in fact be processed in this way – but only up to a limit of around 5,000 tokens, as used in [\(Ainslie et al.,](#page-5-1) [2020;](#page-5-1) [Zaheer et al.,](#page-6-2) [2020;](#page-6-2) [Beltagy et al.,](#page-5-2) [2020\)](#page-5-2) – far below the context sizes required to model long documents such as books.
|
| 10 |
+
|
| 11 |
+
Another strategy such as HIBERT [\(Zhang et al.,](#page-6-3) [2019\)](#page-6-3) splits inputs into smaller segments which are processed individually, then assembled into a hierarchical representation. As a downside, intersegment context is unavailable during encoding.
|
| 12 |
+
|
| 13 |
+
We propose READTWICE, a simple approach that combines the strengths of both strategies. As its name suggests, the main idea is to process the input twice: a long text input (such as a document, or even a book) is treated as a collection of shorter text segments which are read independently and in parallel. Then, the encoder reads each segement again, now *augmented* with compressed information from other segments.
|
| 14 |
+
|
| 15 |
+
The crucial component in READTWICE, as illustrated in Figure [1,](#page-1-0) is a memory module that holds compressed information from all segments. That compressed information is used only *once*: in the second pass. Thus, READTWICE is much more computationally efficient than models like ETC that rely on memory for all segments, in every layer. While READTWICE requires two passes, it differs from hierarchical models such as HIBERT that do not condition segment encoding on other segments. [§3](#page-2-0) contrasts these approaches in more detail.
|
| 16 |
+
|
| 17 |
+
We validate the efficacy of READTWICE on extractive question answering (QA) tasks, showing strong performance on HotpotQA [\(Yang et al.,](#page-6-4) [2018\)](#page-6-4), TriviaQA [\(Joshi et al.,](#page-5-3) [2017\)](#page-5-3) and Narra-
|
| 18 |
+
|
| 19 |
+
<sup>∗</sup>Work is done while at Google
|
| 20 |
+
|
| 21 |
+
<sup>†</sup> On leave from University of Southern California (feisha@usc.edu)
|
| 22 |
+
|
| 23 |
+
<span id="page-0-0"></span><sup>1</sup> Source code and pre-trained checkpoints for READTWICE can be found at [https://goo.gle/](https://goo.gle/research-readtwice) [research-readtwice](https://goo.gle/research-readtwice).
|
| 24 |
+
|
| 25 |
+
<span id="page-1-0"></span>
|
| 26 |
+
|
| 27 |
+
Figure 1: READTWICE model architecture. The input is processed twice, with a memory table for inter-segment information sharing.
|
| 28 |
+
|
| 29 |
+
tiveQA (Kociský et al., 2018). In particular, READ-TWICE significantly improves the state-of-the-art on QA based on *entire books* in NarrativeQA, with absolutes gains of 4.5 ROUGE-L points and 3 BLEU-1 points (relative improvements of 23% and 17%, respectively).
|
| 30 |
+
|
| 31 |
+
# Method
|
| 32 |
+
|
| 33 |
+
We first describe the READTWICE model, followed by its pre-training procedure.
|
| 34 |
+
|
| 35 |
+
The model reads a large text document split into N segments $x_1, \ldots, x_N$ ; each $x_i$ is limited to 512 tokens, as in a typical BERT model.
|
| 36 |
+
|
| 37 |
+
The model architecture is depicted in Figure 1. In the first read, each segment is encoded independently with standard BERT. Then, memories are extracted from each segment—a process we describe in detail later—and gathered into a global memory pool. For the second read, a MemoryAttention layer (with a residual connection and a LayerNorm on top) is first used to merge the information from the former intra-segmental contextual token embeddings and the global memory. The merged result is then read by another small BERT model with only two Transformer layers to produce the final output. The rationale is that the first read already generates rich contextualized embeddings, and the second read only needs to incorporate information from the memory. More formally:
|
| 38 |
+
|
| 39 |
+
$$\begin{split} &H_i^0 = \mathtt{TokenEmbed}(x_i), H_i^1 = \mathtt{BERT}_1(x_i), \forall \ i \\ &M_i = \mathtt{ExtractMemories}(H_i^1), \forall i \\ &M = \mathtt{Gather}([M_1, \dots, M_N]) \\ &H_i^2 = \mathtt{MemoryAttention}(H_i^1, M), \forall i \\ &H_i^3 = \mathtt{LayerNorm}(H_i^1 + H_i^2), \forall \ i \\ &H_i^4 = \mathtt{BERT}_2(H_i^3), \forall \ i \end{split}$$
|
| 40 |
+
|
| 41 |
+
Next, we describe the newly introduced layers.
|
| 42 |
+
|
| 43 |
+
ExtractMemories and Gather Our aim is to compress the information in each segment and disseminate it to other segments to be used in the second read. We consider three types of memories:
|
| 44 |
+
|
| 45 |
+
- READTWICE (CLS). One obvious choice is to use the CLS token representation associated with segment $x_i$ as a summary of the segment.
|
| 46 |
+
- READTWICE (STS). To obtain more finegrained memories, we extract a memory vector for each consecutive span of 32 tokens. Contextual embeddings of each span's first and the last tokens are concatenated and linearly projected to a single point in the token vector space as the span representation. The projection matrix is learned end to end.
|
| 47 |
+
- READTWICE (E). In another variant of spanbased memory, we memorize representations of entity mention spans. To obtain these spans, we first annotate each segment with an external Named Entity Recognition system. Then, each entity mention span is encoded in the same way as in READTWICE (STS). This design is motivated by the intuition that longrange dependencies primarily occur between entities.
|
| 48 |
+
|
| 49 |
+
Empirically, we find that READTWICE (E) leads to best performance (see the ablation in Section 4.4) and it is the memory type used in our headline results.
|
| 50 |
+
|
| 51 |
+
We collect all memories from all segments into a flat memory table. The table size is given by the number of segments (CLS), the number of 32-token spans (STS), or the number of entity mentions (E).
|
| 52 |
+
|
| 53 |
+
MemoryAttention In this layer, we let contextual token embeddings from individual segments interact with other segments' memories via dot-product attention over the memory table.
|
| 54 |
+
|
| 55 |
+
Let $h_{ij}$ be the contextual embedding of token j in segment i after the first read. And let m be a
|
| 56 |
+
|
| 57 |
+
memory table entry whose source segment is given by ms. We then define its attention weight as:
|
| 58 |
+
|
| 59 |
+
$$\alpha_m = \frac{e^{h_{ij}^T M_m + r_{i,m_s}}}{\sum_m e^{h_{ij}^T M_m + r_{i,m_s}} + e^{h_{ij}^T M_0}}$$
|
| 60 |
+
(1)
|
| 61 |
+
|
| 62 |
+
where M<sup>0</sup> is a learnable no-op memory not associated with any specific text. ri,m<sup>s</sup> is a learned position score which captures the relative distance between segment i and the memory Mm, akin to [Shaw et al.](#page-5-5) [\(2018\)](#page-5-5):
|
| 63 |
+
|
| 64 |
+
$$r_{i,m_s} = \omega(\operatorname{dist}(i,m_s))$$
|
| 65 |
+
(2)
|
| 66 |
+
|
| 67 |
+
where ω is a set of weights indexed by the distance
|
| 68 |
+
|
| 69 |
+
$$dist(i, m_s) = \begin{cases} -B & i - m_s < -B \\ B & i - m_s > B \\ i - m_s & \text{otherwise} \end{cases}$$
|
| 70 |
+
(3)
|
| 71 |
+
|
| 72 |
+
where the cutoff threshold B clips the effect of distance to [−B, B]. We set B to 10 in this work.
|
| 73 |
+
|
| 74 |
+
Finally, the MemoryAttention layer output for a given token is given by
|
| 75 |
+
|
| 76 |
+
$$h_{ij}^2 = \sum_{m=1} \alpha_m M_m \tag{4}$$
|
| 77 |
+
|
| 78 |
+
We pretrain READTWICE similarly to [\(Devlin](#page-5-0) [et al.,](#page-5-0) [2019\)](#page-5-0), using the Wikipedia and BooksCorpus datasets. When entity mentions are used in the memory table, the texts are processed with the Entity Linking (EL) and Named Entity Recognition (NER) tools from the Google Cloud NLP API[2](#page-2-1) . Moreover, we use existing hyperlinks in Wikipedia as additional entity annotations. The first and the second BERT readers are trained end-to-end.
|
| 79 |
+
|
| 80 |
+
Our pre-training objective is the standard Masked Language Model (MLM) task, with the MLM prediction loss computed based on the output of the second reader.
|
| 81 |
+
|
| 82 |
+
In order to encourage the model to rely on the memory, we increase the difficulty of the MLM task. Following the entity masking procedure in [\(Guu et al.,](#page-5-6) [2020;](#page-5-6) [Sun et al.,](#page-5-7) [2019\)](#page-5-7), we mask entity mention tokens more aggressively at a 25% rate and jointly mask all tokens within a mention. By contrast, for non-entity tokens, we mask contiguous sequences of random length at a 15% rate.
|
2106.15004/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2106.15004/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
To safely and efficiently navigate through complex traffic scenes, autonomous vehicles need the ability to predict the intent and future trajectories of surrounding vehicles. There is inherent uncertainty in predicting the future, making trajectory prediction a challenging problem. However, there's structure to vehicle motion that can be exploited. Drivers usually tend to follow traffic rules and follow the direction ascribed to their lanes. High definition (HD) maps of driving scenes provide a succinct representation of the road topology and traffic rules, and have thus been a critical component of recent trajectory prediction models as well as public autonomous driving datasets.
|
| 4 |
+
|
| 5 |
+
Early work [@cui2019multimodal] encodes HD maps using a rasterized bird's eye view image and convolutional layers. While this approach exploits the expressive power of modern CNN architectures, rasterization of the map can be computationally inefficient, erase information due to occlusions, and require large receptive fields to aggregate context. The recently proposed VectorNet [@gao2020vectornet] and LaneGCN [@liang2020laneGcn] models directly encode structured HD maps, representing lane polylines as nodes of a graph. VectorNet aggregates context using attention [@vaswani2017attention], while LaneGCN proposes a dilated variant of graph convolution [@kipf2016semi] to aggregate context along lanes. These approaches achieve state-of-the-art performance using fewer parameters than rasterization-based approaches.
|
| 6 |
+
|
| 7 |
+
The above methods represent the HD map as a graph and encode the input context into a single context vector as shown in Fig.[1](#fig:concept){reference-type="ref" reference="fig:concept"}. The context vector is then used by a multimodal prediction header [@cui2019multimodal; @chai2019multipath] to output multiple plausible future trajectories. The prediction header thus needs to learn a complex mapping, from the entire scene context to multiple future trajectories, often leading to predictions that go off the road or violate traffic rules. In particular, the prediction header needs to account for both *lateral* or *route* variability (e.g. will the driver change lane, will they turn right etc.) as well as *longitudinal* variability (e.g. will the driver accelerate, brake, maintain speed). This decoupling of routes and motion profiles for trajectories has been used in path planning [@paden2016survey; @lavalle2006planning], and more recently in prediction [@zhang2020map].
|
| 8 |
+
|
| 9 |
+
Our core insight is that the graph structure of the scene can additionally be leveraged to explicitly model the lateral or route variability in trajectories. We propose a novel approach for trajectory prediction termed Prediction via Graph-based Policy (PGP). Our approach relies on two key ideas.
|
| 10 |
+
|
| 11 |
+
**Predictions conditioned on traversals:** We selectively aggregate part of the scene context for each prediction, by sampling path traversals from a learned behavior cloning policy as shown in Fig. [1](#fig:concept){reference-type="ref" reference="fig:concept"}. By more directly selecting the subset of the graph that is used for each prediction, we lessen the representational demands on the output decoder. Additionally, the probabilistic policy leads to a diverse set of sampled paths and captures the lateral variability of the multimodal trajectory distribution.
|
| 12 |
+
|
| 13 |
+
**Latent variable for longitudinal variability:** To account for longitudinal variability of trajectories, we additionally condition our predictions with a sampled latent variable. This allows our model to predict distinct trajectories even for identical path traversals. We show through our experiments that this translates to greater longitudinal variability of predictions.
|
| 14 |
+
|
| 15 |
+
We summarize our main contributions on multimodal motion prediction using HD maps:
|
| 16 |
+
|
| 17 |
+
- A novel method which combines discrete policy roll-outs with a lane-graph subset decoder.
|
| 18 |
+
|
| 19 |
+
- State-of-the-art performance on the nuScenes motion prediction challenge.
|
| 20 |
+
|
| 21 |
+
- Extensive ablations demonstrating ability to capture lateral and longitudinal motion variations.
|
| 22 |
+
|
| 23 |
+
<figure id="fig:concept" data-latex-placement="t">
|
| 24 |
+
<img src="concept.png" />
|
| 25 |
+
<figcaption><strong>Overview of our approach.</strong> We encode HD maps and agent tracks using a graph representation of the scene. However, instead of aggregating the entire scene context into a single vector and learning a one-to-many mapping to multiple trajectories, we condition our predictions on selectively aggregated context based on paths traversed in the graph by a discrete policy.</figcaption>
|
| 26 |
+
</figure>
|
| 27 |
+
|
| 28 |
+
# Method
|
| 29 |
+
|
| 30 |
+
[]{#sec:formulation label="sec:formulation"} We predict the future trajectories of vehicles of interest, conditioned on their past trajectory, the past trajectories of nearby vehicles and pedestrians, and the HD map of the scene. We represent the scene and predict trajectories in the bird's eye view and use an agent-centric frame of reference aligned along the agent's instantaneous direction of motion.
|
| 31 |
+
|
| 32 |
+
We assume access to past trajectories of agents in the scene obtained from on-board detectors and multi-object trackers. We represent the past trajectory of agent $i$ as a sequence of motion state vectors $s^{i}_{-t_h:0} = [s^{i}_{-t_h},..., s^{i}_{-1}, s^{i}_{0}]$ over the past $t_h$ time steps. Each $s^i_t = [x^i_t, y^i_t, v^i_t, a^i_t, \omega^i_t, \mathcal{I}^i]$, where $x_t^i$, $y_t^i$ are the BEV location co-ordinates, $v_t^i$, $a_t^i$ and $\omega_t^i$ are the speed, acceleration and yaw-rate of the agent at time $t$, and $\mathcal{I}^i$ is an indicator with value 1 for pedestrians and 0 for a vehicles. We nominally assign the index 0 to the target vehicle, and timestamp 0 to the time of prediction.
|
| 33 |
+
|
| 34 |
+
**Nodes:** We represent the HD map as a directed graph $\mathcal{G}(V, E)$. The network of lane centerlines captures both, the direction of traffic flow, and the legal routes that each driver can follow. We seek to use both as inductive biases for our model. We thus use lane centerlines as nodes ($V$) in our graph. We consider all lane centerlines within a fixed area around the target vehicle. To ensure that each node represents a lane segment of a similar length, we divide longer lane centerlines into smaller snippets of a fixed length, and discretize them to a set of N poses. Each snippet corresponds to a node in our graph, with a node $v$ represented by a sequence of feature vectors $f^{v}_{1:N} = [f^{v}_{1},..., f^{v}_{N}]$. Here each $f^v_n = [x^v_n, y^v_n, \theta^v_n, \mathcal{I}_n^v]$, where $x^v_n$, $y^v_n$ and $\theta^v_n$ are the location and yaw of the $n^{th}$ pose of $v$ and $\mathcal{I}_n^v$ is a 2-D binary vector indicating whether the pose lies on a stop line or crosswalk. Thus, our node features capture both the geometry as well as traffic control elements along lane centerlines.
|
| 35 |
+
|
| 36 |
+
**Edges:** We constrain edges ($E$) in the lane graph such that any traversed path through the graph corresponds to a legal route that a vehicle can take in the scene. We consider two types of edges. Successor edges ($E_{suc}$) connect nodes to the next node along a lane. A given node can have multiple successors if a lane branches out at an intersection. Similarly, multiple nodes can have the same successor if two or more lanes merge. To account for lane changes, we additionally define proximal edges ($E_{prox}$) between neighboring lane nodes if they are within a distance threshold of each other and their directions of motion are within a yaw threshold. The yaw threshold ensures that proximal edges are not erroneously assigned in intersections where multiple lanes cross each other.
|
| 37 |
+
|
| 38 |
+
To account for multimodality of the distribution of future trajectories, we output a set of $K$ trajectories $[\tau^1_{1:t_f}, \tau^2_{1:t_f},...,\tau^K_{1:t_f} ]$ for the target vehicle consisting of future x-y locations over a prediction horizon of $t_f$ time steps. Each of the $K$ trajectories represents a mode of the predictive distribution, ideally corresponding to different plausible routes or different motion profiles along the same route.
|
| 39 |
+
|
| 40 |
+
<figure id="fig:model" data-latex-placement="t">
|
| 41 |
+
<img src="model.png" />
|
| 42 |
+
<figcaption><strong>Proposed model.</strong> PGP consists of three modules trained end-to-end. The graph encoder (top) encodes agent and map context as node encodings of a directed lane-graph. The policy header (bottom-left) learns a discrete policy for sampled graph traversals. The trajectory decoder (bottom-right) predicts trajectories by selectively attending to node encodings along paths traversed by the policy and a sampled latent variable. </figcaption>
|
| 43 |
+
</figure>
|
| 44 |
+
|
| 45 |
+
Fig. [2](#fig:model){reference-type="ref" reference="fig:model"} provides an overview of our model. It consists of three interacting modules trained end-to-end. The *graph encoder* (Sec. [4.1](#sec:encoder){reference-type="ref" reference="sec:encoder"}) forms the backbone of our model. It outputs learned representations for each node of the lane graph, incorporating the HD map as well as surrounding agent context. The *policy header* (Sec. [4.2](#sec:policy){reference-type="ref" reference="sec:policy"}) outputs a discrete probability distribution over outgoing edges at each node, allowing us to sample paths in the graph. Finally, our attention based *trajectory decoder* (Sec. [4.3](#sec:decoder){reference-type="ref" reference="sec:decoder"}) outputs trajectories conditioned on paths traversed by the policy and a sampled latent variable.
|
| 46 |
+
|
| 47 |
+
Inspired by the simplicity and effectiveness of graph based encoders for trajectory prediction [@gao2020vectornet; @liang2020laneGcn], we seek to encode all agent features and map features as node encodings of our lane graph $\mathcal{G}(V, E)$.
|
| 48 |
+
|
| 49 |
+
**GRU encoders.** Both, agent trajectories and lane polylines form sequences of features with a well defined order. We first independently encode both sets of features using gated recurrent unit (GRU) encoders. We use three GRU encoders for encoding the target vehicle trajectory $s^0_{-t_h:0}$, surrounding vehicle trajectories $s^i_{-t_h:0}$ and node features $f^v_{1:N}$. These output the motion encoding $h_{motion}$, agent encodings $h^i_{agent}$ and initial node encodings $h^{v}_{node}$ respectively.
|
| 50 |
+
|
| 51 |
+
**Agent-node attention.** Drivers co-operate with other drivers and pedestrians to navigate through traffic scenes. Thus, surrounding agents serve as a useful cue for trajectory prediction. Of particular interest are agents that might interact with the target vehicle's route. We thus update node encodings with nearby agent encodings using scaled dot product attention [@vaswani2017attention]. We only consider agents within a distance threshold of each lane node to update the node encoding. This allows our trajectory decoder (Sec [4.3](#sec:decoder){reference-type="ref" reference="sec:decoder"}) to selectively focus on agents that might interact with specific routes that the target vehicle might take. We obtain keys and values by linearly projecting encodings $h^i_{agent}$ of nearby agents, and the query by linearly projecting $h^{v}_{node}$. Finally, the updated node encoding is obtained by concatenating the output of the attention layer with the original node encoding.
|
| 52 |
+
|
| 53 |
+
**GNN layers.** With the node encodings updated with nearby agent features, we exploit the graph structure to aggregate local context from neighboring nodes using graph neural network (GNN) layers. We experiment with graph convolution (GCN) [@kipf2016semi] and graph attention (GAT) [@velivckovic2017graph] layers. For the GNN layers, we treat both successor and proximal edges as equivalent and bidirectional. This allows us to aggregate context along all directions around each node. The outputs of the GNN layers serve as the final node encodings learned by the graph encoder.
|
| 54 |
+
|
| 55 |
+
Every path in our directed lane graph corresponds to a plausible route for the target vehicle. However, not every route is equally likely. For example, the past motion of the target vehicle approaching an intersection might indicate that the driver is preparing to make a turn rather than go straight. A slow moving lane make it likelier for the target vehicle to change lane rather than maintain lane.
|
| 56 |
+
|
| 57 |
+
We seek to learn a policy $\pi_{route}$ for graph traversal such that sampled roll-outs of the policy correspond to likely routes that the target vehicle would take in the future. We represent our policy as a discrete probability distribution over outgoing edges at each node. We additionally include edges from every node to an *end* state to allow $\pi_{route}$ to terminate at a goal location. The edge probabilities are output by the policy header shown in Fig. [2](#fig:model){reference-type="ref" reference="fig:model"}. The policy header uses an MLP with shared weights to output a scalar score for each edge $(u,v)$ given by,
|
| 58 |
+
|
| 59 |
+
$$\begin{equation}
|
| 60 |
+
\mbox{score}(u, v) = \mbox{MLP}\left(\mbox{concat}(h_{motion}, h_{node}^{u}, h_{node}^{v}, \mathbbm{1}_{(u,v) \in E_{suc}})\right).
|
| 61 |
+
\end{equation}$$
|
| 62 |
+
|
| 63 |
+
The scoring function thus takes into account the motion of the target vehicle as well as local scene and agent context at the specific edge. We then normalize the scores using a softmax layer for all outgoing edges at each node to output the policy for graph traversal,
|
| 64 |
+
|
| 65 |
+
$$\begin{equation}
|
| 66 |
+
\pi_{route}(v|u) = \mbox{softmax}(\left\{\mbox{score}(u, v)| (u, v) \in E \right\}).
|
| 67 |
+
\end{equation}$$
|
| 68 |
+
|
| 69 |
+
We train the policy header using behavior cloning. For each prediction instance, we use the ground truth future trajectory to determine which nodes were visited by the vehicle. We can naively assign each pose in the future trajectory to the closest node in the graph. However, this can lead to erroneous assignment of nodes in intersections, where multiple lanes intersect. We thus only consider lane nodes whose direction of motion is within a yaw threshold of the target agent's pose. An edge $(u, v)$ is treated as visited if both nodes $u$ and $v$ are visited. We use negative log likelihood of the edge probabilities for all edges visited by the ground truth trajectory ($E_{gt}$), as the loss function for training the graph traversal policy, given by
|
| 70 |
+
|
| 71 |
+
$$\begin{equation}
|
| 72 |
+
\label{eq:l_bc}
|
| 73 |
+
\mathcal{L}_{BC} = \sum\limits_{(u,v) \in E_{gt}} -\mbox{log}(\pi_{route}(v|u)) .
|
| 74 |
+
\end{equation}$$
|
| 75 |
+
|
| 76 |
+
Sampling roll-outs of $\pi_{route}$ yields plausible future routes for the target vehicle. We posit that the most relevant context for predicting future trajectories is along these routes and propose a trajectory decoder that selectively aggregates context along the sampled routes.
|
| 77 |
+
|
| 78 |
+
Given a sequence of nodes $[v_1, v_2, ..., v_M]$ corresponding to a sampled policy roll-out, our trajectory decoder uses multi-head scaled dot product attention [@vaswani2017attention] to aggregate map and agent context over the node sequence as shown in Fig. [2](#fig:model){reference-type="ref" reference="fig:model"}. We linearly project the target vehicle's motion encoding to obtain the query, while we linearly project the node features $[h^{v_1}_{node}, h^{v_2}_{node}, ...,h^{v_M}_{node}]$ to obtain keys and values for computing attention. The multi-head attention layer outputs a context vector $\mathcal{C}$ encoding the route. Each distinct policy roll-out yields a distinct context vector, allowing us to predict trajectories along a diverse set of routes.
|
| 79 |
+
|
| 80 |
+
Diversity in routes alone does not account for the multimodality of future trajectories. Drivers can brake, accelerate and follow different motion profiles along a planned route. To allow the model to output distinct motion profiles, we additionally condition our predictions with a sampled latent vector $z$. Unlike routes, vehicle velocities and accelerations vary on a continuum. We thus sample $z$ from a continuous distribution. We use the multivariate standard normal distribution for simplicity.
|
| 81 |
+
|
| 82 |
+
Finally, to sample a trajectory $\tau^k_{1:t_f}$ from our model, we sample a roll-out of $\pi_{route}$ and obtain $\mathcal{C}_k$, we sample $z_k$ from the latent distribution and concatenate both with $h_{motion}$ and pass them through an MLP to output $\tau^k_{1:t_f}$ the future locations over $t_f$ timesteps, $$\begin{equation}
|
| 83 |
+
\tau^k_{1:t_f} = \mbox{MLP}(\mbox{concat}(h_{motion}, \mathcal{C}_k, z_k)).
|
| 84 |
+
\end{equation}$$ The sampling process can often be redundant, yielding similar or repeated trajectories. However our light-weight encoder and decoder heads allows us to sample a large number of trajectories in parallel. To obtain a final set of $K$ modes of the trajectory distribution, we use K-means clustering and output the cluster centers as our final set of $K$ predictions $[\tau^1_{1:t_f}, \tau^2_{1:t_f},...,\tau^K_{1:t_f} ]$. We train our decoder using the winner takes all average displacement error with respect to the ground truth trajectory ($\tau^{gt}$) in order to not penalize the diverse plausible trajectories output by our model, $$\begin{equation}
|
| 85 |
+
\label{eq:l_reg}
|
| 86 |
+
\mathcal{L}_{reg} = \mbox{min}_k \frac{1}{t_f}\sum\limits_{t=1}^{t_f}\lVert \tau^{k}_{t} - \tau^{gt}_{t}\rVert_2.
|
| 87 |
+
\end{equation}$$
|
| 88 |
+
|
| 89 |
+
We train our model end-to-end using a multi-task loss combining losses from Eq. [\[eq:l_bc\]](#eq:l_bc){reference-type="ref" reference="eq:l_bc"} and Eq. [\[eq:l_reg\]](#eq:l_reg){reference-type="ref" reference="eq:l_reg"}, $$\begin{equation}
|
| 90 |
+
\mathcal{L} = \mathcal{L}_{BC} + \mathcal{L}_{reg}.
|
| 91 |
+
\end{equation}$$
|
2108.09645/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-03T00:47:27.295Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" etag="co98t5jN1FAaRNenqTCE" version="16.1.2" type="device"><diagram id="zsbYD2KvyKgyGcvFXbIs" name="TwoStage">7V1bc6O4Ev41rpp5iAskJMRj4sxkz6m51SSndue8TBEbJ96xjQ9mNpdff4S5GIQAYZBQErJbuzbmYqtb3V9/anVP4GzzeBW4u/vP/sJbT4CxeJzAywkA0IEm/V905Ck+YhlOfOAuWC3iQ+bxwPXq2UsOGsnR36uFty+cGPr+Olztigfn/nbrzcPCMTcI/IfiaUt/XXzqzr1LnmgcD1zP3bVXOu3P1SK8T34XNHKn/+Gt7u6TR1uYJJds3PTs5MD+3l34D7mHwQ8TOAt8P4xfbR5n3joavXRgHvYYfgn2z6H119Onn99nz2TmnsXP/djmkuw3BN42PP3WhvevK/Dr4s/gDN4+h+dns2WYXGL8465/JwOW/NbwKR3BwP+9XXjRTYwJvHi4X4Xe9c6dR58+UJ2hx+7DzZq+M+nLpb8NP7qb1TpSl5vVhooeGF+8B/rf7/7G3SanJHpi0jG8SL6BF4TeIyOzht9rZkKg6uv5Gy8Mnuh1yV3OSPLjnlJdTt4/HPXANFM9vc/pAE6Vw02U7y67+XF86YtkiPnD7X9xrP+GFz/m8O7LbPfV+Xd4+fMMwzbjbQqM92q9nvlrPzhcC5co+icZ5Nzx+I8e34eB/8vLfYIPfxLFABgxYOxMHViSBOHIAUiTw2tWezq+qDDihKf4BslOyw85kTXiZs+K7wbzdDStFzEPkCE0DyxD6UQAb84eQbsoBwvw3IJSIbw9p4BBsxCgpVQIVmnMvQVFkslbPwjv/Tt/664/HI9eFH3E/pcXzu+TN8cLPvn+LhHM314YPiVmy/0d+kWx0QENnv6Krp+i9O2P/GeXj8nN43dP6XNTXGq090R2dLfHVZh7LH2XPZW+Pj40epM+Mx6saIROUAs6yv7vYO7V2aUkygjd4M4La050EF/RAm/thqt/il+PpzSHS8+DwH3KnbDzV9twn7vzt+jAUX8tUHSx2GSwN3O+TWdTzfn0RfwN+FcTo/g05KDi74lHKbmKmQfZmJw+NdDbs08CPkItYsXamidDmXmCB6iXGihjaqBaGxW9+eYF9CGhFyiyW9AQtVv2MHbLKio2rLdb2EZ15zfZLQCYUAQBtZbLrjFUJ06DDupMctpsFjQZNGhyNvUKEy+5rGbuSXfTUFDdAemo7p2MJ+lsPDvZy6LQmmTWxV6qkLkoMot1YyiROxzMQm2fdf1zNUEfJmgWrtYLb2JfbCb2ZfxRg60QoIAKjISA5Io4aIE8srB4eIeAWygV7zAcnYXLeIcXjVnS2CJjnLHCM7Z5JvYSI5Xcu8kE8Qgz6hCbEmnO3TRrlWTrbwfVipaKQBSYblOZu+ZrTLYEk9oZlm6WDAfNcuA6akxPGuMo0ZiSjZGtMQLcLDX/u+hlSDGB9+xH97vY5eLO7HguGG2mSB69dDXdPOhkSIfRpypw6Rgc6OB6ZDnnUiVz4t0uJUIHYFPFZqJK25k6oIQgbDK1SRlE0JOJJQtHWCXxXZXkRwckLI6+u17dRUM9pwNzkFY0bKu5uz5PPtisFosYc3j71bN7e7hVJJckAKf3RRcTdBndi5qH/VGQRQElFicvzfRQK1sAUvORmjJTmrgdJqJPuck8c8+BilAaVOSxkwc4f8T4v6ox/isQfs4R9C9vs8QAleXNXS6TJ3A8ClyiwC2TT/kNKPAyizeitnrURgRRG5QTGWI0MGojI2prykYh0xTb5lAbJKW5TgAvTyjCcqhaLbrN9zJ39/E1mnF1oA1BzUBbyhyNPlyKvAkhU928OKhn66RTulOInYJjb3Drh3fs8jE/iVPi+hoQdOSWwVcINYst6dcsTecf42oLZ3raFmKnpzXwggvg8VzxetnfkQQFJSeS/NM+A0Qf2GQywNayyo7U5sjNlia3MsE1AdjdRIO+vd3vDmORONa1twwPTm62DNw5FSk7L6N/41doFhy+PHrFHlgqa2I164nJz2+VpShVNNlNOsE52jBOdyaOBUNPdx73NU533aY7T08UT3cJmW9VBJktMT+iOmXOrk+Zk47MkSAyl7QwmgVqqYdxGFWqSL4or7Ay+5IybkARVwd4XF1khK7eFbDn+wG8EdLKG5WyJwYHn1VJdlfvbkbR1YlucCABeQTcCCSGBhKQTcQcPG5I6z+MiqK1ogyPOCEoyVDlKm0jCD2BntcTeIrm30ta2y3tY2e3vIkCT8QETZhVTcnAE1ZRnjcjad28RQANzVjD+g3bXHvT3xbISasUkS4bBGClIRrWDkHRALifHdrlLdVMHQfVOSaQR6jidQRa9jsqxbxm4v/9jopEHeR6FuOOc3qCCXePxw/pq7swg1Mf36VB1OZ9Fj/Ft6dfN35Cer7CsApqFVZhAd5NbVg18rM6omUMtUPL9TmNavfzq3Nm+f38OjkzR9SZdc6z4DszwhoyxVvpYPcd0i+1+o1s1Uol26xaoA/ValsRgrCkcpLWya3w0Ju68YjjGNxE4hHCToBipwNoKsGnor/9enOQ0pp6lRyAih9TAaAiZfjk3npreS6VTSQTUNxK55iUTk2eN8lVQj2qcM2sr/SaxjTdCnCqdUtP8ZfLvSfFcFk8HjuCSLOfFDxVbPafVaSlzfiRfwTAoqWMK3rGRwlUgKD8NaICoMHWdeGwAbz6SNLYAKt7dulLxFNAUzxlAVGn17VIyElOz2HTs1DB6TWej0BNGaTeTBuoMG3UPu1W1FLxzVBT3dT9Li6zfdirchpk0sgS2awl4u1PVMpLWlW0MhXblbvZuKPc6IRi2WRjYDbZ4uXRduLVXlqNQy3qcqfL3QO68a7uWLprtQVdKxp0S4hVLlipBm411sSTg7caKvhpoxX2sFphVzjHOFLbaLLKOtyKBuMXbaOiSLxK11hO0PxVktArWICQW0YDCHlbwBGstB2ZllPrbJWuoMvMIa+x2gPn8iBRbtgZ1GqjKpLtZrTaXKtNhrfaSCdCTN2KTn6qGxN9CDEkSoh1Ru18BsskTE8gy8pUVFUXgRo+ayRGMjkVTQkBGpgSqJEpUcutSzcLwkl0g/p/fXvjGJIVgN8bJ3Mfw/gS4a2HoA9f0nZxxWTWVlq2xrHqWuP05ouqdsNf64Fll8slmHOx7ALfYiTTA52x8hu6FjzSqfXNK3M/oqwhHLSXA6piDW/G2crM1sG3ZSCdMglf12zFomyR1bW3VDcNqKcVlWqA+UY1gFRYDaXID6Msdq0CfwR2voTiR+YSOZARV5GgI2RknZAOq1ZYJ/7zlVkhZVxmNw2o4htH1FiasBosWODuJNMQvNIJeyh15ZVsYXTRS3eotugCskvoTiNWwHbTJZKwQn0a2wvsWFDQd+kO5mUkM+F6YmqUck9S7pz8UGFPmEImJIdaFS2JYh6xxajU2+5TYeJjo4kUrPCXRPl9KtgGg/1N/nJW3NhZrJuomc5iNrJ4clbapwKfkCKnMpe5q43XMpdZHIcOuiphVxFCUeHSQ15cc93LloGmaLChjflmfawOzJBdzwy9xEJ1AnNabqZrM4YTrsQhCexBZ4oQNiwCiImgnbINGfZDUxs6lkmiZkkk3aagqPqGXUZ9A1IfTemZJ7AdfcYVitiJRvrhkJkHnOwPC6lMV9KDwNakR/kSOaSHXd74Ou4Tae0w2aKYGuwTsU+otDjyHMfhwy8Dzr46ztLjdXDTU/ROL+v15RK7cHCKy+ZVRBwprkIpb3sqxnCp5LfsMjU5NmHttbG2DvyWPe7t7S5YwuzZwbYGmK17El63kNIeZptGgfMYlsYkagPWtpElZgo/2thWEiamk+VlkB26VxcVVzI8hJLZiPF5iriIdFgqGhKPO9ZtpiO8bQkT8+mVvfssUk+D9hWBtpzFuEPQqjTYJFCVKegm5ar6eFHHg3ThrPeVsxNrrA43QdnQvWLljNfdQNrKGVFDBL7RBQbMy0dqvcDQmwftXonvhaTjRjCtkOov3VALE8KD7von3F4mfRVpj8z7p5/7dxkgmxXsf7nlTW3Fdpktb4BWfgEzWVKYn7pv8tgGeY5Bp24mJwZ4J2qGdGNBXoaxqOrifHmsw7/JKu9vslr7G12r6w84w1nkx5/hSsvrE17DjtiEU9F9jWQpILnXbpuJxVYjty0+x+/wcpWlSc+pzlS8GTMVE5GwmYoabIlzqlisuDngGCpP+KEyX3RKQ2UH8MBzoTHRdehS0EBBckl02i64sWump3Ys6rp9lSlmiIxyzweee5S22ubwOC2+tMsc5yjtdtKuWDdXK3CBdhHednEeBIfYIhnUhbu/z4BQTt5MJJRfciqERSdkNky6hCdlkeTGG3HGOz3WkQk7Y7ftsmZatMW4abB7htk7Sc5+cnh15lo2Ca7nUI5QgGlb9p5Hogj2DVaDGIarsmqXG5gN3TnY6V7lrhtrAloRrqppEEd0s3A/DVpbd9FMy20n6mQ5CopYOrzdwy2NC6ruQJ6ysdzO1KNpqWJv2NZy1tCGhcfM9SoefcraHHmVZPSdtNDNYCE8jziL58wu17E2v6qydOfFOctT+dKc/eTP3ZCKkP6G6CZ/uEH0+jKITF7VIkp2eFc5cXUPTnLOp399YkECT5vULrSYBo/KkzibBy39IDL+SmezafAYOQXT+erbf8ZZfKIeAUNEjxy1esShC95Sn8Pek2csJgffARkUaEsaIIYzcFgWqYIzaBszIKPqK1d+s6Yr5MQZpsGlNyVYvStv6wVu9IW+7uiLkJqL0eqdZvVKWszDLoZaozd8G5FBsz36zxi0GL92utHDDMkuy+hhXPWVK79Z0xWyjF41dduv0btcLZdepD+rg70ZbV93DoZVZp7t4+2zlGj7BLZB9xq3DceAQYD0i9uqqVKpcdv388/jLD5Vj4Tif9UYphWbKifhTdaAE4DZATfKA464uWtY3oiXGdRxi3pr0WJTQLRKd6ib6WSWbpO/ezsvMcpUc4wwvma0yqdpklNs1yfTJtO3gR+J8gjsqf28/+wvvOiM/wM=</diagram></mxfile>
|
2108.09645/main_diagram/main_diagram.pdf
ADDED
|
Binary file (83.2 kB). View file
|
|
|
2108.09645/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In this section, we first restate the definition of Kantorovich optimal transport (OT) and unbalanced Optimal transport (UOT) between two empirical measures. After that, we review the definition of the mini-batch optimal transport (m-OT), mini-batch unbalanced optimal transport (m-UOT), and discuss misspecified matchings issue of m-OT and some challenges of m-UOT.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:mOT" data-latex-placement="!t">
|
| 6 |
+
<div class="center">
|
| 7 |
+
<table>
|
| 8 |
+
<tbody>
|
| 9 |
+
<tr>
|
| 10 |
+
<td style="text-align: center;"><embed src="images/main_pot.pdf" style="width:95.0%" /></td>
|
| 11 |
+
</tr>
|
| 12 |
+
</tbody>
|
| 13 |
+
</table>
|
| 14 |
+
</div>
|
| 15 |
+
<figcaption> <span> The illustration of Example <a href="#example:illustration" data-reference-type="ref" data-reference="example:illustration">1</a> for m-POT for different values of <span class="math inline"><em>s</em></span> including <span class="math inline"><em>s</em> = 1</span> (m-OT). The green points and blue points are respectively the supports of the empirical measures <span class="math inline"><em>μ</em><sub><em>n</em></sub></span> and <span class="math inline"><em>ν</em><sub><em>n</em></sub></span>. Black solid arrows and associate weights represent the optimal mappings between <span class="math inline"><em>μ</em><sub><em>n</em></sub></span> and <span class="math inline"><em>ν</em><sub><em>n</em></sub></span>. Red solid arrows represent misspecified mappings. The <span class="math inline">5 × 5</span> matrix is the incomplete transportation matrix <span class="math inline"><em>π</em><sub><em>P</em><sub><em>X</em><sup><em>m</em></sup></sub>, <em>P</em><sub><em>Y</em><sup><em>m</em></sup></sub></sub><sup>POT<sub><em>s</em></sub></sup></span> which is created from solving POT between <span class="math inline"><em>P</em><sub><em>X</em><sup><em>m</em></sup></sub></span> and <span class="math inline"><em>P</em><sub><em>Y</em><sup><em>m</em></sup></sub></span>. </span></figcaption>
|
| 16 |
+
</figure>
|
| 17 |
+
|
| 18 |
+
Let $\mathcal{X}:=\{x_i\}_{i=1}^n$, $\mathcal{Y}:=\{y_j\}_{j=1}^{n}$ be two interested samples. The corresponding empirical measures are denoted by $\mu_n:=\frac{1}{n} \sum_{i=1}^n \delta_{x_i}$ and $\nu_n:=\frac{1}{n}\sum_{j=1}^{n} \delta_{y_j}$.
|
| 19 |
+
|
| 20 |
+
**Optimal Transport:** The Kantorovich optimal transport [@villani2009optimal; @peyre2019computational] between $\mu_n$ and $\nu_n$ is defined as follows: $\text{OT}(\mu_n,\nu_n) := \min_{\pi \in \Pi(\boldsymbol{u}_n,\boldsymbol{u}_n)} \langle C,\pi\rangle,$ where $C$ is the distance matrix (or equivalently cost matrix) between $\mathcal{X}$ and $\mathcal{Y}$ that is produced by a ground metric (e.g., Euclidean distance or other designed distances).
|
| 21 |
+
|
| 22 |
+
**Unbalanced Optimal Transport:** The unbalanced optimal transport [@chizat2018unbalanced] between $\mu_n$ and $\nu_n$ is defined as follows: $\text{UOT}^{\tau}_\phi(\mu_n,\nu_n) := \min_{\pi \in \mathbb{R}_+^{n\times n}} \langle C,\pi \rangle + \tau \text{D}_\phi (\pi_{1} , \mu_{n})
|
| 23 |
+
+ \tau \text{D}_\phi (\pi_{2} ,\nu_{n}),$ where $C$ is the distance matrix, $\tau > 0$ is a regularized parameter, $D_\phi$ is a certain probability divergence (e.g., KL divergence and total variational distance), and $\pi_{1}$, $\pi_{2}$ are respectively the marginal distributions of non-negative measure $\pi$. The computational cost of both OT and UOT problems are of order $\mathcal{O}(n^2/\varepsilon^2)$ and $\mathcal{O}(n^2/\varepsilon)$, respectively. Its solutions are obtained by running the Sinkhorn algorithm [@cuturi2013sinkhorn], which updates directly the whole $n\times n$ matrix. It means that storing an $n\times n$ matrix is unavoidable in this approach, thus the memory capacity needs to match the matrix size.
|
| 24 |
+
|
| 25 |
+
In real applications, the number of samples $n$ is usually very large (e.g., millions). It is due to the large-scale empirical measures or detailed discretization of continuous measures. Therefore, solving directly OT between $\mu_n$ and $\nu_n$ is generally impractical due to the limitation of computational devices, namely, memory constraints, and vast computation. As a solution, the original $n$ samples of two measures are divided (via sampling with or without replacement) into subsets of $m$ samples, which we refer to as mini-batches. The mini-batch size $m$ is often chosen to be the largest number that the computational device can process. Then, a mini-batch framework is developed to aggregate the optimal transport between pairs of the corresponding mini-batches into a global result.
|
| 26 |
+
|
| 27 |
+
**Motivating examples:** We now provide some motivating examples to further illustrate the practical importance of mini-batch methods. The first example is regarding training a deep learning model. In practice, it is trained by a loss that requires computing a large-scale OT, e.g., deep generative models [@genevay2018learning] and deep domain adaptation [@damodaran2018deepjdot]. The size of the cost matrix cannot be large in practice since memory is also utilized to store models and data. The second example is color transfer application when the numbers of pixels in both source and target images are very large (e.g., millions). The mini-batch approach is used to transport a small number of pixels from source images to a small number of pixels from target images [@fatras2020learning]. That process is repeated for a large number of iterations.
|
| 28 |
+
|
| 29 |
+
**Related methods**: As another option, stochastic optimization can be utilized to solve the Kantorovich dual form with parametric functions, i.e., Wasserstein GAN [@arjovsky2017wasserstein; @leygonie2019adversarial]. Due to the limitation of parametrization, it has been shown that this approach provides a very different type of discrepancy from the original Wasserstein distance [@mallasto2019well; @stanczuk2021wasserstein]. Recently, input convex neural networks are developed to approximate the Brenier potential [@makkuva2020optimal]. Nevertheless, due to limited power in approximating Brenier potential [@korotin2021continuous], recent works have indicated that input convex neural networks are not sufficient for computing OT. Finally, both approaches require special choices of the ground metric of OT. Namely, Wasserstein GAN has to use the $\mathcal{L}_1$ norm to make the constraint of dual form into the Lipchitz constraint and Brenier potential exists only when the ground metric is $\mathcal{L}_2$.
|
| 30 |
+
|
| 31 |
+
Next, we revise the definition of mini-batch optimal transport (m-OT) [@fatras2020learning; @fatras2021minibatch]. To ease the ensuing presentation, some notations in that paper are adapted into our paper. To build a mini-batch of $1 \leq m \leq n$ points, we sample $X^m:=\{x_1,\ldots,x_m\}$ with or without replacement from $\mathcal{X}^m$ (similarly, $Y^m$ are drawn from $\mathcal{Y}^m$) where $m$ is the mini-batch size.
|
| 32 |
+
|
| 33 |
+
::: {#def:mOT .definition}
|
| 34 |
+
**Definition 1**. *(Mini-batch Optimal Transport) For $1 \leq m\leq n$ and $k \geq 1$, $X^m_1,\ldots,X^m_k$ and $Y^m_1,\ldots,Y^m_k$ are sampled with or without replacement from $\mathcal{X}^m$ and $\mathcal{Y}^m$, respectively. The m-OT transportation cost and transportation plan between $\mu_n$ and $\nu_n$ are defined as follow: $$\begin{align}
|
| 35 |
+
\text{m-OT}_k(\mu_n,\nu_n) &= \frac{1}{k} \sum_{i=1}^k \text{OT}(P_{X_i^m},P_{Y_{i}^m}); \nonumber \\
|
| 36 |
+
\pi^{\text{m-OT}_k} &= \frac{1}{k} \sum_{i=1}^k \pi^{\text{OT}}_{P_{X_i^m},P_{Y_i^m}}, \label{eq: mini-batch_OT}
|
| 37 |
+
\end{align}$$ where $\pi^{\text{OT}}_{P_{X_i^m},P_{Y_i^m}}$ is a transportation matrix that is returned by solving $\text{OT}(P_{X_i^m},P_{Y_i^m})$. Note that, $\pi^{\text{OT}}_{P_{X_i^m},P_{Y_i^m}}$ is expanded to a $n \times n$ matrix that has padded zero entries to indices which are different from those of $X_i^m$ and $Y_i^m$.*
|
| 38 |
+
:::
|
| 39 |
+
|
| 40 |
+
We would like to recall that $k=1$ is the choice that practitioners usually used in real applications.
|
| 41 |
+
|
| 42 |
+
**Misspecified matchings issue of m-OT:** m-OT suffers from the problem which we refer to as *misspecified mappings*. In particular, misspecified mappings are non-zero entries in $\pi_k^{\text{m-OT}}$ while they have values of zero in the optimal transport plan $\pi$ between original measures $\mu_n$ and $\nu_n$. We consider the following simple example:
|
| 43 |
+
|
| 44 |
+
::: {#example:illustration .example}
|
| 45 |
+
**Example 1**. *Let $\mu_n, \nu_n$ be two empirical distributions with 5 supports on 2D: $\left\{(0,1),(0,2),(0,3),(0,4), (0,5)\right\}$ and $\{(1,1),(1,2),(1,3),(1,4),(1,5)\}$. The optimal mappings between $\mu_n$ and $\nu_n$, $\{(0,i)-(1,i)\}_{i=1}^5$ are shown in Figure [1](#fig:mOT){reference-type="ref" reference="fig:mOT"} . Assuming that we use mini-batches of size 3 for m-OT. We specifically consider a pair of mini-batches $X^m=\{(0,1),(0,2),(0,3)\}$ and $Y^m=\{(1,3),(1,4),(1,5)\}$. Solving OT between $X^m$ and $Y^m$ turns into 3 misspecified mappings $(0,1)-(1,3)$, $(0,2)-(1,4)$, and $(0,3)-(1,5)$ that have masses $1/3$ (see Figure [1](#fig:mOT){reference-type="ref" reference="fig:mOT"}).*
|
| 46 |
+
:::
|
| 47 |
+
|
| 48 |
+
Currently, [@fatras2021minibatch] mitigated the misspecified matching issue by proposing to use unbalanced optimal transport as the transportation type between samples of mini-batches. The mini-batch unbalanced optimal transport is defined as follow:
|
| 49 |
+
|
| 50 |
+
::: {#mUOT .definition}
|
| 51 |
+
**Definition 2**. *(Mini-batch Unbalanced Optimal Transport) For $1 \leq m\leq n$, $k \geq 1$, $\tau >0$, a given divergence $D_\phi$, $X^m_1,\ldots,X^m_k$ and $Y^m_1,\ldots,Y^m_k$ are sampled with or without replacement from $\mathcal{X}^m$ and $\mathcal{Y}^m$, respectively. The m-UOT transportation cost and transportation plan between $\mu_n$ and $\nu_n$ are defined as follow: $$\begin{align}
|
| 52 |
+
\text{m-UOT}_k^{\phi,\tau}(\mu_n,\nu_n) &= \frac{1}{k} \sum_{i=1}^k \text{UOT}_\phi^\tau(P_{X_i^m},P_{Y_i^m}); \nonumber \\
|
| 53 |
+
\pi^{\text{m-UOT}_k^{\phi,\tau}} &= \frac{1}{k} \sum_{i=1}^k \pi^{\text{UOT}_\phi^\tau}_{P_{X_i^m},P_{Y_i^m}}, \label{eq: mini-batch_UOT}
|
| 54 |
+
\end{align}$$ where $\pi^{\text{UOT}_\phi^\tau}_{P_{X_i^m},P_{Y_i^m}}$ is a transportation matrix that is returned by solving $\text{UOT}_\phi^\tau(P_{X_i^m},P_{Y_i^m})$. Note that $\pi^{\text{UOT}_\phi^\tau}_{P_{X_i^m},P_{Y_i^m}}$ is expanded to a $n \times n$ matrix that has padded zero entries to indices which are different from of $X_i^m$ and $Y_i^m$.*
|
| 55 |
+
:::
|
| 56 |
+
|
| 57 |
+
**Example:** In Example [1](#example:illustration){reference-type="ref" reference="example:illustration"}, UOT can reduce masses on misspecified matchings by relaxing the marginals of the transportation plan. Using $D_\phi$ as KL divergence, we show the illustration of m-UOT results in Figure [5](#fig:mUOT){reference-type="ref" reference="fig:mUOT"} in Appendix [8.1](#subsec:visual){reference-type="ref" reference="subsec:visual"}. We would like to recall that the regularized coefficient $\tau$ controls the degree of the marginal relaxation in m-UOT.
|
| 58 |
+
|
| 59 |
+
**Discussion on m-UOT:** The m-UOT has some issues which originally come from the nature of UOT. First, the "transport plan\" for the UOT is hard to interpret since the UOT is developed for measures with different total masses. Second, the magnitude of regularized parameter $\tau$ depends on the cost matrix in order to make the regularization effective. Hence we need to search for $\tau$ in the wide range of $\mathbb{R}^+$, which is a problem when the cost matrix changes its magnitude. We illustrate a simulation to demonstrate that the transportation plan of UOT for a fixed parameter $\tau$ changes after scaling supports by a constant in Figure [6](#fig:mUOTscale){reference-type="ref" reference="fig:mUOTscale"} in Appendix [8.1](#subsec:visual){reference-type="ref" reference="subsec:visual"}. In deep partial DA, m-UOT needs to regularize the scale of the feature space of the feature extractor. Also, m-UOT has not been applied to deep generative models and color transfer due to this challenge.
|
| 60 |
+
|
| 61 |
+
In this section we propose a novel mini-batch approach, named *mini-batch partial optimal transport* (m-POT), that uses *partial optimal transport* (POT) as the transportation at the mini-batch level. We first review the definition of partial optimal transport in Section [3.1](#subsec:POT){reference-type="ref" reference="subsec:POT"}. Then, we define mini-batch partial optimal transport and discuss its properties in Section [3.2](#subsec:mPOT){reference-type="ref" reference="subsec:mPOT"}. Moreover, we illustrate that POT can be a natural choice of transportation among samples of mini-batches via simple simulations.
|
| 62 |
+
|
| 63 |
+
Now, we restate the definition of partial optimal transport (POT) that is defined in [@figalli2010optimal]. Similar to the definition of transportation plans, we define the notion of partial transportation plans. Let $0 < s \leq 1$ to be transportation fraction. Partial transportation plan between two discrete probability measures $\boldsymbol{\alpha}$ and $\boldsymbol{\beta}$ is $\Pi_s(\boldsymbol{\alpha},\boldsymbol{\beta}):=\big\{ \pi \in \mathbb{R}_+^{|\boldsymbol{\alpha}|\times|\boldsymbol{\beta}|} : \pi 1_{|\boldsymbol{\beta}|} \leq \boldsymbol{\alpha}, \pi^\top 1_{|\boldsymbol{\alpha}|} \leq \boldsymbol{\beta}, 1^\top \pi 1 = s \big\}$. With previous notations, the partial optimal transport between $\mu_n$ and $\nu_n$ is defined as follow: $$\begin{align}
|
| 64 |
+
\label{eq:POT}
|
| 65 |
+
\text{POT}_s(\mu_n,\nu_n) = \min_{\pi \in \Pi_s(\boldsymbol{u}_n,\boldsymbol{u}_n)} \langle C, \pi \rangle,
|
| 66 |
+
\end{align}$$ where $C$ is the distance matrix. Equation ([\[eq:POT\]](#eq:POT){reference-type="ref" reference="eq:POT"}) can be solved by adding dummy points (according to [@chapel2020partial]) to expand the cost matrix $\overline{C}=\left[\begin{array}{cc}
|
| 67 |
+
C & 0 \\
|
| 68 |
+
0 & A
|
| 69 |
+
\end{array}\right]$, where $A>0$. In this case, solving the POT turns into solving the following OT problem: $$\begin{align}
|
| 70 |
+
\min_{\pi \in \Pi(\bar{\boldsymbol{\alpha}},\bar{\boldsymbol{\alpha}})} \langle \bar{C},\pi \rangle, \label{eq:POT_equivalence}
|
| 71 |
+
\end{align}$$ with $\bar{\boldsymbol{\alpha}}=[\boldsymbol{u}_n,1-s]$. Furthermore, the optimal partial transportation plan in equation ([\[eq:POT\]](#eq:POT){reference-type="ref" reference="eq:POT"}) can be derived from removing the last row and column of the optimal transportation plan in equation ([\[eq:POT_equivalence\]](#eq:POT_equivalence){reference-type="ref" reference="eq:POT_equivalence"}).
|
| 72 |
+
|
| 73 |
+
The partial transportation naturally fits the mini-batch setting since it can decrease the transportation masses of misspecified mappings (cf. the illustration in Figure [1](#fig:mOT){reference-type="ref" reference="fig:mOT"} when two mini-batches contain optimal matching of the original transportation plan). Specifically, reducing the number of masses to be transported, i.e., reducing $s$ (from right images to left images in Figure [1](#fig:mOT){reference-type="ref" reference="fig:mOT"}), returns globally better mappings. With the right choice of the transport fraction, we can select mappings between samples that are as optimal as doing full optimal transportation. Moreover, POT is also stable to compute since it boils down to OT. Therefore, there are several solvers that can be utilized to compute POT.
|
| 74 |
+
|
| 75 |
+
<figure id="fig:DA_s" data-latex-placement="t!">
|
| 76 |
+
<div class="center">
|
| 77 |
+
<table>
|
| 78 |
+
<tbody>
|
| 79 |
+
<tr>
|
| 80 |
+
<td style="text-align: center;"><embed src="images/mPOT_DA_change_mass.pdf" style="width:85.0%" /></td>
|
| 81 |
+
</tr>
|
| 82 |
+
</tbody>
|
| 83 |
+
</table>
|
| 84 |
+
</div>
|
| 85 |
+
<figcaption> <span>Performance of m-POT on the deep DA when changing the fraction of masses <span class="math inline"><em>s</em></span>. The optimal values of <span class="math inline"><em>s</em></span>, which achieve the best accuracy, are marked by the <span class="math inline">⋆</span> symbol. In the left figure, the optimal ratios for digits datasets lie between <span class="math inline">0.8</span> and <span class="math inline">0.9</span>. In the middle figure, the best performing values are smaller, from <span class="math inline">0.5</span> to <span class="math inline">0.7</span>, for the Office-Home dataset. On the VisDA dataset in the right figure, the optimal fraction of masses is <span class="math inline">0.75</span>.</span> </figcaption>
|
| 86 |
+
</figure>
|
| 87 |
+
|
| 88 |
+
Now, we define *mini-batch partial optimal transport* (m-POT) between $\mu_n$ and $\nu_n$ as follow:
|
| 89 |
+
|
| 90 |
+
::: {#mPOT .definition}
|
| 91 |
+
**Definition 3**. *(Mini-batch Partial Optimal Transport) For $1 \leq m\leq n$, $k \geq 1$, $0 < s \leq 1$, $X^m_1,\ldots,X^m_k$ and $Y^m_1,\ldots,Y^m_k$ are sampled with or without replacement from $\mathcal{X}^m$ and $\mathcal{Y}^m$, respectively. The m-POT transportation cost and transportation plan between $\mu_n$ and $\nu_n$ are defined as follow: $$\begin{align}
|
| 92 |
+
\text{m-POT}_k^{s}(\mu_n,\nu_n) &= \frac{1}{k} \sum_{i=1}^k POT_s(P_{X_i^m},P_{Y_{i}^m}); \nonumber \\
|
| 93 |
+
\pi^{\text{m-POT}_k^s} &= \frac{1}{k} \sum_{i=1}^k \pi^{POT_s}_{P_{X_i^m},P_{Y_i^m}},
|
| 94 |
+
\end{align}$$ where $\pi^{POT_s}_{P_{X_i^m},P_{Y_i^m}}$ is a transportation matrix that is returned by solving $POT_s(P_{X_i^m},P_{Y_i^m})$. Note that $\pi^{POT_s}_{P_{X_i^m},P_{Y_i^m}}$ is expanded to a $n \times n$ matrix that has padded zero entries to indices which are different from of $X_i^m$ and $Y_i^m$.*
|
| 95 |
+
:::
|
| 96 |
+
|
| 97 |
+
From the equivalence form of POT in equation ([\[eq:POT_equivalence\]](#eq:POT_equivalence){reference-type="ref" reference="eq:POT_equivalence"}), we have an equivalent form of m-POT in Definition [3](#mPOT){reference-type="ref" reference="mPOT"} as follows: $$\begin{align}
|
| 98 |
+
\text{m-POT}_k^{s}(\mu_n,\nu_n) = \frac{1}{k} \sum_{i = 1}^{k} \min_{\pi \in \Pi(\bar{\boldsymbol{\alpha}}_{i},\bar{\boldsymbol{\alpha}}_{i})} \langle \bar{C}_{i},\pi \rangle. \label{eq:equivalent_mPOT}
|
| 99 |
+
\end{align}$$ Here, $\overline{C}_{i}=\left[\begin{array}{cc}
|
| 100 |
+
C_{i} & 0 \\
|
| 101 |
+
0 & A_{i}
|
| 102 |
+
\end{array}\right] \in \mathbb{R}_{+}^{(m+1) \times (m+1)}$ and $\bar{\boldsymbol{\alpha}}_{i} = [\boldsymbol{u}_{m},1-s]$ where $C_{i}$ is a cost matrix formed by the differences of elements of $X_{i}^{m}$ and $Y_{i}^{m}$ and $A_{i} > 0$ for all $i \in [k]$. The computational complexity of approximating each OT problem in equation ([\[eq:equivalent_mPOT\]](#eq:equivalent_mPOT){reference-type="ref" reference="eq:equivalent_mPOT"}) using entropic regularization is at the order of $\mathcal{O} \left(\frac{(m+1)^2}{\varepsilon^2} \right)$ [@altschuler2017near; @lin2019efficient] where $\varepsilon > 0$ is the tolerance. Therefore, the total computational complexity of approximating mini-batch POT is at the order of $\mathcal{O} \left(\frac{k (m+1)^2}{\varepsilon^2} \right)$. It is comparable to the computational complexity of m-OT, which is of the order of $\mathcal{O} \left(\frac{k \cdot m^2}{\varepsilon^2} \right)$ and slightly larger than that of m-UOT, which is $\mathcal{O} \left(\frac{k \cdot m^2}{\varepsilon} \right)$ [@pham2020unbalanced], in terms of $\varepsilon$.
|
| 103 |
+
|
| 104 |
+
**Concentration of m-POT:** We first provide a guarantee on the concentration of the m-POT's value for any given mini-batch size $m$ and given the number of mini-batches $k$.
|
| 105 |
+
|
| 106 |
+
::: {#theorem:concentration_bound .theorem}
|
| 107 |
+
**Theorem 1**. *For any given number of minibatches $k \geq 1$ and minibatch size $1 \leq m \leq n$, assume that the entries of $C_{ij}$ are obtained from the distance $d(X_i,Y_j)$. Furthermore, we assume that $d(X_i,\mathbb{E}X_i)$ and $d(Y_j,\mathbb{E}Y_j)$ have sub-exponential distribution $SE(v^2,\gamma)$ (see Definition [5](#def:supexponent){reference-type="ref" reference="def:supexponent"} in Appendix [6](#sec:concentration){reference-type="ref" reference="sec:concentration"}). Then $$\begin{align*}
|
| 108 |
+
\mathbb{P} \Bigg{(}&\big|\text{m-POT}_k^{s}(\mu_n,\nu_n) - \text{m-POT}^{s}(\mu,\nu)\big| \geq \\ &D_n \sqrt{\frac{8\log(4/\delta)}{\lfloor n/m\rfloor}} + D_n \sqrt{\frac{2\log(4/\delta)}{k}} \Bigg{)} \leq \delta
|
| 109 |
+
\end{align*}$$ where $D_n = s\Big[d(\mathbb{E}X_i,\mathbb{E}Y_j)+ 2\max\Big\{\gamma \big[\log(2n) + \log(8/\delta)\big], \frac{v^2}{\gamma} \Big\} \Big]$ and\
|
| 110 |
+
$\text{m-POT}^{s}(\mu,\nu) : = \mathbb{E}_{X \sim \mu^{\otimes m}, Y \sim \nu^{\otimes m}} \left[\text{POT}_{s}(P_{X^{m}}, P_{Y^{m}}) \right]$.*
|
| 111 |
+
:::
|
| 112 |
+
|
| 113 |
+
The proof of Theorem [1](#theorem:concentration_bound){reference-type="ref" reference="theorem:concentration_bound"} is in Appendix [6.1](#sec:concentration_value){reference-type="ref" reference="sec:concentration_value"}. Furthermore, in Appendix [6.2](#sec:concentration_plan){reference-type="ref" reference="sec:concentration_plan"}, we study the concentration of the m-POT's transportation plan. We demonstrate that the row/ column sum of the m-POT's transportation plan concentrates around the row/ column sum of the full m-POT's transportation plan (cf. Definition [4](#def:full_bactch_mPOT){reference-type="ref" reference="def:full_bactch_mPOT"} in Appendix [4](#def:full_bactch_mPOT){reference-type="ref" reference="def:full_bactch_mPOT"}).
|
| 114 |
+
|
| 115 |
+
**Practical consideration for m-POT:** First of all, as indicated in equation ([\[eq:equivalent_mPOT\]](#eq:equivalent_mPOT){reference-type="ref" reference="eq:equivalent_mPOT"}), m-POT can be converted to m-OT with mini-batch size $m+1$. Therefore, it is slightly more expensive than m-OT and m-UOT in terms of memory and computation. The second issue of m-POT is the dependence on the choice of fraction of masses $s$ because $s$ plays a vital role in alleviating misspecified mappings from m-OT. At the first glance, choosing $s$ may seem as challenging as choosing $\tau$ in m-UOT; however, it appears that searching for $s$ is actually easier than $\tau$. For example, we show that the transportation plan of POT for a fixed parameter $s$ is the same while the transportation plan of UOT for a fixed parameter $\tau$ changes significantly when we scale the supports of two measures by a constant in Figure [7](#fig:mPOTscale){reference-type="ref" reference="fig:mPOTscale"} in Appendix [8.1](#subsec:visual){reference-type="ref" reference="subsec:visual"}. Moreover, in partial deep domain adaptation, m-UOT needs to use an additional regularizer coefficient for controlling the scale of the feature space of the neural networks while m-POT does not need that parameter.
|
| 116 |
+
|
| 117 |
+
[]{#table:DA_digits_summary label="table:DA_digits_summary"}
|
| 118 |
+
|
| 119 |
+
<figure id="fig:twostageDA" data-latex-placement="!t">
|
| 120 |
+
<div class="center">
|
| 121 |
+
<table>
|
| 122 |
+
<tbody>
|
| 123 |
+
<tr>
|
| 124 |
+
<td style="text-align: center;"><embed src="images/POT-TwoStag.pdf" style="width:80.0%" /></td>
|
| 125 |
+
</tr>
|
| 126 |
+
</tbody>
|
| 127 |
+
</table>
|
| 128 |
+
</div>
|
| 129 |
+
<figcaption> <span>The pseudo computational graph for the two-stage deep domain adaptation. </span> </figcaption>
|
| 130 |
+
</figure>
|
| 131 |
+
|
| 132 |
+
[]{#table:DA_visda_summary label="table:DA_visda_summary"}
|
2109.07983/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-09-08T02:43:02.728Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36" etag="VeA6gCEAtrk7XIB6EFtH" version="15.1.1" type="device"><diagram id="0wAM1OeMx7Mw7FipftDe" name="Page-1">7Vtbc6M2GP01eUyGO/jRcZzLTDK73Wxnu33ZESBjNRi5Qk7s/vpKIAxISowT47h245kEfZJAnKPvKufMHs2WNwTMpw84humZZcTLM/vqzLL8wGK/uWBVChzbKwUJQXEpMmvBI/oHCqEhpAsUw7w1kGKcUjRvCyOcZTCiLRkgBL+0h01w2n7qHCRQETxGIFWlP1BMp6U0cI1afgtRMq2ebBqiZwaqwUKQT0GMXxoie3xmjwjGtLyaLUcw5dhVuJTzrl/pXS+MwIx2mXB9BW/Qr+Gv6+Hv3vkPb3n79c/xuSlu8wzShXhjsVq6qiCAMUNENDGhU5zgDKTjWnpJ8CKLIX+OwVr1mHuM50xoMuFfkNKVoBcsKGaiKZ2lold9l4prQBJI33iBQTmOr7ExUSBwA/EMUrJiAwhMAUXPbVaB2BzJetx66leM2FIsQ+zjQJAodrE5MNp3yPGCRFBMqklgF41V1KKCmm1osv6n6R00Wd5+afI6sJTFQ26WWCtKQZ6jqCPEG6F7qc1TZZ2mDctUyT6IsDO4cNsYm90w7nAnR7pTuat6Y8sJDlupSiA/X6k8U9Iqe788DTbTVJPAEX2ZIgof56AA74WFJR0Bf4aEwuWbUFa9VnvrVu6goYOO56pKuDZQOvRbuG3tIVTbc5dRmBBAGS48NPBStojLkLCrhF+xeC1GDIJcQZNFKnN+ySACaQpTzO4yYzPmkCC2Wkjkvq91xybwJ2gJqzCvbKfpCKeYFI+2YxcGscPkOSX4CTZ6Aiu0PY/PwBmtVFITBG3PpSNvb78yTA0613Fdk05ZEXbGpu0fhGlioJLVH3w+Q0Q0fzb7rpbi5mVrJVrvN2mVXvVt0xxJgU1/TfoGq8a8N1g1hs35gPz1J9mm9KSgetKrjlFemzyDXZSr2KmZrZ7a2HMPKEMzwGeOY6QxFO0NtlHzW5o+9vinB322fAm+gU6fg32qs6Ua53v4DLOcQpSduPm1XCkK9Kyu5tfqjS/V/N6DLFnwWoHiR0XJ47+gGnYn1RjsVTXUIPxy/O37iSuF7UlK0T0m6U8ptorDP7Drd4CfE7x3o5u94VeFmccW0/Udqrm2XHtTQrXOJYe9RX3Kog816rPVXTmklKBwQZmvM0ZFjWqCIFGjP6aLtL232gYywxmU1FyIQIqSjDUjtpcKA801G0UgHYqOGYrjYrvrTEhbBXZtel252mHoTIezT8trdyjSf8TyQpO5OF/n4Aaeb4Nd2eSBXO7T+zQVWLmKuztgrWMA1jUPD1j7KIC1Dw9Y5yiAlXO+AwBWrX6wBA/MOFxZmPM/ZV5iPMK/LzZ4xtNKVhy/7TFtS8+mph7eW1rpdLDsjaM4EZc0w+Qloo0ombV+VmExu65jZN7YQdlTFBwO5JBPrhM00s8Ph9xB15B7Z8d8upOR+YKqlZzvPKB9TZcjQOKtzelkMrGiSKeNsRd6bj/1tDbe1ZZo7hyNJvZmWB21ljZir0xAXmzQ7UjIMH0tK2iQEILoKSm84JcFTVFWmcoYkKcvbBaihcZeGG4jF0nhhGoIDCKoJzAMXMftIwFRjluNjpUftzcKO1R+KoZiQMEjxaT4WtW2vkwB2St+ipyPMoOGOVHnZmBoiHL5R9DRkJc/Ozr19aSk3tQ6Ol3c4ro9ceOq+fsDyJ94gEJgCdlp5O2WIZFjf3LW7nbI2k9Ba5RDh0PQGjU8FFpzl3GIUJaciNo48vH0p6uNWjq4m80xoaAIK18wifPiHuydWfoV6W3cQR6/Kf7D73oy3Ztrd9V6wiVTekN/qLmOhVdMQ2JI7A6xWEnEfbgpOMuFQXO1Fi0KYDjRZLsABpOoD28SXMg5kKMG0nXZokmW3xtZaiYzGqpHpYdoqERbcOz3U/jUHvL1Zby869wIr+gtNsPn35LB4J5cw3PVdo2mIEuKb+Ctj1XeqBcRHME8f3+Gs301T05/+k5mPFWLfA1DuygMaRlS7d03OE85zrt0IZMJ9Iq0UQevUAJt6Bb7g9DoJTyWzZm2SKerKcnf794ZFWrF9S7L2QsdORO21YUJW+NYemNC9StXMIX06HVCPpHUfstG50DewQRr1v/rVdY263+Ys8f/Ag==</diagram></mxfile>
|
2109.07983/main_diagram/main_diagram.pdf
ADDED
|
Binary file (13.9 kB). View file
|
|
|
2109.07983/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
We now describe our **C**ontrastive **A**ttributed explanations for **T**ext (**CAT**) method. Contrastive explanations, convey why the model classified a certain input instance to a class $p$, and not another class $q$. This is achieved by creating contrastive examples (also called contrasts) from input instance which get predicted as $q$. Contrastive examples are created by minimally perturbing the input such that the model prediction changes. In the case of text data, perturbations can be of three types: (1) inserting a new word, (2) replacing a word with another, and (3) deleting a word. In addition to keeping the number of such perturbations small, constrastive explainers also try to maintain grammatical correctness and fluency of the contrasts [@gyc; @mice].
|
| 4 |
+
|
| 5 |
+
As an example, take the case of a black box model trained on the AG News dataset that predicts which category a certain news headline falls under. Given a headline, "*Many technologies may be a waste of time and money, researcher says*\" which is predicted as Sci-Tech, a contrastive explainer will try to explain why this headline wasn't predicted as, say, *Business* by generating a contrastive example, "*Many ~~technologies~~ [jobs]{style="color: red"} may be a waste of time and money, researcher says*\" which is predicted as Business. Observe that a single word replacement achieves a prediction change. Such contrastive explanations can help users test the robustness of black box classification models.
|
| 6 |
+
|
| 7 |
+
We observed that even with constraints for minimal perturbation and fluency on a given black box model and an instance, there are multiple contrastive examples to choose from and, very often, many are less informative than others. For example, another possible contrast is, "*Many technologies may be a waste of ~~time~~ [investment]{style="color: red"} and money , researcher says\"* which also gets predicted as Business. However, this particular explanation is not as intuitive as the previous one as "money\" is a form of "investment\" and the nature of the sentence has not changed in an obvious sense with the word "technologies\" still present in the sentence.
|
| 8 |
+
|
| 9 |
+
To alleviate this problem, we propose to construct and use a set of *attribute classifiers*, where the attributes could be tags/subtopics relevant to the classification task obtained from the same or a related dataset used to build the original classifier. Attribute classifiers indicate the presence/absence of a certain subtopic in the text and confidence scores from these classifiers could be used as a regularization to create a contrast. We thus prefer contrasts which change attribute scores measurably as opposed to those contrasts which do not. However, at the same time, we want a minimal number of attribute scores to change so as to have crisp explanations. Hence, our regularization not only creates more intuitive contrasts, but also provides additional information to the user in terms of changed subtopics which, as confirmed through our user study in Section [4.4](#sec:human){reference-type="ref" reference="sec:human"}, provide better understanding of the model behavior. The important steps in our method are depicted in Figure [1](#fig:cat){reference-type="ref" reference="fig:cat"}.
|
| 10 |
+
|
| 11 |
+
Formally, given an input text $x \in \mathcal{X}$, and a text classification model $f(\cdot)$ which predicts $y = f(x)\in \mathcal{Y}$, we aim to create a perturbed instance $x'$ such that the predictions $f(x) \neq f(x')$ and $x'$ is "minimally\" different from $x$. We use a set of $m$ attribute classifiers $\zeta_i: \mathcal{X} \rightarrow \mathbb{R} , \forall i \in \{1,\ldots,m\}$, which produce scores indicative of presence (higher scores) or absence (lower scores) of corresponding attributes in the text. We say that attribute $i$ is added to the perturbed sentence if $\zeta_i(x') - \zeta_i(x) > \tau$ and removed when $\zeta_i(x') - \zeta_i(x) < -\tau$, for a fixed $\tau > 0$. Word-level Levenshtein distance between original and perturbed instance $d_{Lev}(x',x)$, which is the minimum number of deletions, substitutions, or insertions required to transform $x$ to $x'$ is used to keep the perturbed instance close to the original. The naturalness (fluency) of a generated sentence $x'$ is quantified by the likelihood of sentence $x'$ as measured by the language model used for generation; we denote this likelihood by $p_{\text{LM}}(x')$. For a predicate $\phi$, we denote $\mathbbm{1}_\phi$ the indicator of $\phi$, which takes the value 1 if $\phi$ is true and $0$ otherwise. Given this setup, we propose to find contrastive examples by solving the following optimization problem: $$\begin{align}
|
| 12 |
+
\label{eqn:main}
|
| 13 |
+
\max_{x' \in \mathcal{X}}&~~~ ||\zeta(x') - \zeta(x)||_{\infty}-\beta\sum_{i} \mathbbm{1}_{|\zeta_i(x') - \zeta_i(x)|>\tau} \nonumber \\
|
| 14 |
+
& ~~~+ \lambda \cdot \max_{j \in \mathcal{Y}\setminus y} \{~[f(x')]_j - [f(x)]_y~\} \nonumber \\
|
| 15 |
+
& ~~~+ \eta \cdot p_{\text{LM}}(x') ~-~\nu \cdot d_{Lev}(x',x),
|
| 16 |
+
\end{align}$$ where $\zeta(x)$ is a vector such that $[\zeta(x)]_i=\zeta_i(x)$, $\beta, \lambda, \eta, \nu > 0$ are hyperparameters that trade-off different aspects, and $||\cdot||_{\infty}$ is the $l_{\infty}$ norm. The first term in the objective function encourages to pick an $x'$ where at least one attribute is either added/removed from $x$. The second term minimizes the number of such attributes for ease of interpretation. The third term is the contrastive score, which encourages the perturbed instance to be predicted different than the original instance. Fourth and fifth terms ensure that the contrast is fluent and close to the original instance, respectively.
|
| 17 |
+
|
| 18 |
+
The above objective function defines a controlled natural language generation problem. Earlier methods for controlled generation that shift the latent representation of a language model (such as GPT-2) [@gyc; @pplm] have resulted in generated sentences being very different from the original sentence. We thus adopt a different strategy where we first take the original sentence and identify locations where substitutions/insertions need to be made using available feature attribution methods such as Integrated Gradients [@integrated_gradients]. These words are ordered by their attribution and greedily replaced with a `[MASK]` token. An MLM pre-trained BERT model [@transformer; @bert] is then used to fill these masks. We take the top $k$ such replacements ranked by BERT likelihood. For insertions, a mask token is inserted to the right and left of important words in order to generate a set of perturbations similar to the input example. The attribute classifiers are applied to each generated candidate contrast, and the best $x'$ is selected as evaluated by Eq. [\[eqn:main\]](#eqn:main){reference-type="ref" reference="eqn:main"}. For $m$ token perturbations, the above process is repeated $m$ times, where at each round, the top $k$ perturbed texts are ranked and selected according to Eq. [\[eqn:main\]](#eqn:main){reference-type="ref" reference="eqn:main"}, and the above perturbation process is applied to all selected perturbed texts from the previous round. Note that we perform the hyperparameter tuning for Eq. [\[eqn:main\]](#eqn:main){reference-type="ref" reference="eqn:main"} only once per dataset. Details on hyperparameter tuning and optimizing Eq. [\[eqn:main\]](#eqn:main){reference-type="ref" reference="eqn:main"} are in Appendix [\[sec:hyperparam\]](#sec:hyperparam){reference-type="ref" reference="sec:hyperparam"}.
|
| 19 |
+
|
| 20 |
+
Regarding generalizability of our approach, as already noted, the attribute classifiers can be derived from other sources of data and are not necessarily dependent on the data and model being explained. Furthermore, other methods could be used to obtain attributes; unsupervised methods such as LDA, VAEs, GANs could be leveraged to ascertain semantically meaningful attributes. The attribute classifiers that appear in the loss function of Eq. [\[eqn:main\]](#eqn:main){reference-type="ref" reference="eqn:main"} could be replaced by disentangled representations learned by VAEs [@DIP-VAE] or by topic models. Hence, CAT is generalizable beyond annotated datasets.
|
| 21 |
+
|
| 22 |
+
We use an MLM pre-trained BERT[^3] model from Huggingface [@huggingface] to generate text perturbations. For attributes, classes from the Huffpost News-Category [@news_category] and 20 Newsgroups [@kaggle20NewsGrp] datasets were used. The Huffpost dataset has 200K news headlines split into 41 classes. We merged similar classes and removed those which weren't a standard topic; 22 classes remained. The 20 Newsgroups dataset has 18000 newsgroup posts with 20 topic classes. Together, we obtained 42 attributes. For 22 classes from Huffpost, we trained 22 1-vs-all binary classifiers with a distilbert [@distilbert] base, so that the same sentence can have multiple classes. For 20 Newsgroups, we trained multiclass classifiers on the other 20 classes. More details on attribute classifiers are provided in Appendix [\[sec:attr\]](#sec:attr){reference-type="ref" reference="sec:attr"}. Note that attribute classifiers are transferable as they need not depend on the dataset and model being explained.
|
| 23 |
+
|
| 24 |
+
We evaluate our explanation method on models trained on AgNews [@AgNews2015], DBPedia [@Lehmann2015DBpediaA], Yelp [@Shen2017], and NLI [@nli]. For an apples-to-apples comparison of our methods with GYC on AgNews, DBPedia and Yelp, we trained models with the same architecture as the ones in their work: an Embedding Bag layer followed by a linear layer.
|
| 25 |
+
|
| 26 |
+
For MICE the Roberta based model was used for all datasets as that is what the publicly provided implementation naturally applies to. MICE uses a two-step framework to generate conterfactual explanations, with the generator being T5 [@rafel-T5] fine tuned on the task-specific dataset. More details on model training are provided in Appendix [\[sec:classifier_details\]](#sec:classifier_details){reference-type="ref" reference="sec:classifier_details"} and on datasets in Appendix [\[sec:datasets\]](#sec:datasets){reference-type="ref" reference="sec:datasets"}.
|
| 27 |
+
|
| 28 |
+
:::: table*
|
| 29 |
+
::: tabular
|
| 30 |
+
\|p8.25cm\|p2.5cm\|p2.5cm\|p2.5cm\|\
|
| 31 |
+
& & &\
|
| 32 |
+
& & &\
|
| 33 |
+
|
| 34 |
+
& & &\
|
| 35 |
+
|
| 36 |
+
New Human Species ~~Discovered~~ [influenza]{style="color: red"} & & &\
|
| 37 |
+
|
| 38 |
+
US shows flexibility on ~~Israeli~~ [virtual]{style="color: red"} settlements & & &\
|
| 39 |
+
|
| 40 |
+
Pace of U.S. ~~Factory~~ [population]{style="color: red"} Growth Climbs in Dec & & &\
|
| 41 |
+
|
| 42 |
+
It may take 146 years for Nigeria to wipe out ~~corruption~~ [funds]{style="color: red"} from ~~its~~ [bank]{style="color: red"} system going by the latest report \... & & &\
|
| 43 |
+
|
| 44 |
+
\
|
| 45 |
+
\
|
| 46 |
+
& & &\
|
| 47 |
+
& & &\
|
| 48 |
+
|
| 49 |
+
Two outdoor workers conversing while on ~~break~~ [lawn]{style="color: red"}. $<$/s$>$ people on sidewalk & & &\
|
| 50 |
+
|
| 51 |
+
The double sink is freshly polished chrome. $<$/s$>$ The sink is ~~chrome~~ [rust]{style="color: red"} colored& & &\
|
| 52 |
+
|
| 53 |
+
A group of children, wearing white karate shirts, look at the American flag.$<$/s$>$ The children ~~are~~ [looked]{style="color: red"} at a karate tournament.& & &\
|
| 54 |
+
|
| 55 |
+
A man is about to ~~play~~ [steal]{style="color: red"} his guitar.$<$/s$>$ a man is performing for school children& & &\
|
| 56 |
+
|
| 57 |
+
Two women are giving each other a hug while a man holding a glass is looking at the camera. $<$/s$>$ The people are all taking ~~naps~~ [photos]{style="color: red"} during the hottest part of the day. & & &\
|
| 58 |
+
:::
|
| 59 |
+
::::
|
2109.09031/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-02-01T05:03:46.879Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="EQgwRYmB2xK6FZHU1dOq" version="14.0.1" type="device"><diagram id="68_iu_bPBRVzWjoGUWC-" name="Page-1">7Vxbc5s4FP41nuk+OMP98uhL3DykSTrd3TZ9yWCQbXWxoUJO7Pz6lbgjhI0dIOzGnbZGQj6Avk/nxpEH8mS9+4wsf/XFc4A7kARnN5CnA0kSBVkmH7RnH/VoghZ1LBF04kFZxzf4CpJvxr1b6ICgMBB7nouhX+y0vc0G2LjQZyHkvRSHLTy3eFXfWoJSxzfbcsu936GDV1GvoQpZ/w2Ay1VyZVGIz6ytZHAsIlhZjvcSdYVj5OuBPEGeh6Oj9W4CXDp5ybxEgmYVZ9MbQ2CD63zhx+OtdeNMJ2gpPX/+qeDrB/9+qERSni13Gz9wfLN4n8yAvUXPgMoQB/IYbJwRnVXS3HgbMmK8wms3OzmDLm0JpLUghxPP9VA2NsDI+yedR5n0lB8ifi7gFHCJH+kz8NYAoz0Z8JKhkYCxygGR9CHgWhg+F9G0YlIsU3HpFR48SO5EEmICK7GYmL1Sgm4iIfC2yAbxl/KTz8hhBYkmIwhbaAlwSRCZbGufG+bTAUH1/Yoicx1ZOHhfosY8YJFE5CC6g6SVgyDrCol2Auk0Duk0F1POeOHNZezTfm+95MQwCLXDiAwQDX+XnSRHy+hToX/VCba25H8XLPAn8ukHcKBek38TRPnxRzwsviR5guiqiQyG/hjscJHmEYsZanPYbrlwuSFNm3AbkP7xM0AYEtUyik+soePQy4xfVhCDb75l02u+EEVK+pC33TjhuhMOrRMqE+wOrpQdw909A31uJSmclSQxTM0vmhxPTqeBKLXGg0jKHBWZMX4a6OOUEL8G+vQ0KhAN7tNDe+9Cgg2SKqDLcWUeoXg7Tzss+59liO39FhMxICFVZPlEleHSQJJNfSroejMk0Jj1LhplEmgcEuhtkUDmWaDmlAEqQC6eDDl5AuJpVK3RHNBW4EfuxwLu6KplbJ1YhlW/HmnXDa1t2VQLsOr1UNVaQ1XtEFWpT6iONEMYG+2gKmrqO8ParuUuwnrXJ1hnM9OUDzqs58Mqme+9WnUOrBW2D67DgCk/m6xbgz0/13trzYH74AUQQ4+enXsYe2sywKUnxqk5zE32IvxDhoQXGyVgCTzk4vuZrjCmEeGIzoQ0s52NeAVJTLgIDfWVTa4ozRwLW+SD9hMXevZru/bp8XBuBdAebuFw6e791ZCgN5PplA6fph4efvEQGP4dP83wC9hsiediXPmbZckXJPc+mRATK3RNKYXx7SS5TKlDUVLzlDKPUyoXS9quFRAMDvnZZMaE8E9pbiW++105rT0JLRuLLWXziKCK2LKpcE7h+fEXrPMQaQKj8kXlSj0PbU1l0BbVbtGusbLTQGmL3P0YER0P8HGrnEW8tOVARDR9ZDM2HqIgJUGScKVyidGIbTYKsyurZZcr6eokSE7QbtzluiGWMQgf4KMlQmRFLmKsmyWMU1c7D7LcAMja/s/bu++yqv36+vvn8OareS/cD0UOxt0qULCD+Ecsih4/kuNhtM5oc7qLpz9s7HONB4AgmQQK62F8Iv12gOlarzQ2a51N81yFzZrnsqTmNDaXXRfzfLIrpjTkislia64YF2pe2uUC9UGoWct9ttfNhuctQ91AKkapTsX8Vcqbforeq5z49qTo0x1xAKmk+A2wqLWbMFWZgLnDhCkXTaNDNKVeoNlkopRFs8tEKRdOXozUFpx3vYCzyWwWC2eXCVK+f84Lwipi3kuCtI8JUpVNwHSYIOVTqoar1nQa5QUEuJMsimoUFzAniSJxJltuIovCn22et3RxjAuVTmxlgGkWRdR1jBXziKCWHWOxxtusC9YFiM5PbrBgd57cEHme8wXtQ2jLYmNwc0S1jff/+vWDKkhXRUelyxcQ/OxhWy8gZsRt/JgvIBRFZ1F+91cQ6VO0U9rzNQ5Z8QqQEEOfJuW5JMyY0MeJPl7PrNE9LZAtRhmOBYyFzbMCmm2A+aIZyHWdCWnFcvwh8nzi1mLaRG+2V4XtwwrQCdrCQKeABx8Od04qo2Pc5VZx/53kplYwh/hrirj9YaDmVFZ3jHS7pdW3yeqmZlkf2wgSk0oxP7Vs8y3QLgwb2Fxo54aqqA0ZbFPqnfZut8CawdayMZnWjwHt+yvoViNaMlNo/yP2ZcPGYxrShM2s1CNsJbUelVMdxXs1DE5PomGNTU+xr+hrx8JyhVLoqqzjtBLb8nbNOiTJePFYoAWfJGGrdkVQnjkHN4hdmNMsc5LLt/R60qbOHv2uELt/5IMYkeAJxsFecoCe0k2Z4ZAptTowZCd1Ha/vPuhOTc0wj3mSbW3U5POl1frEpPQwqzZ8HGSFiBWVh2cqqEpw8tWJB63YcXPXr+SvzmwIP7u0zWCUVt0d7I0prXbzFa9v1TX1dQhNe75a81AUJWW8p5/IVYnPPaWytthLNgQ3o1IMQS+ix9n8zSupaE+n8NIQjemUyhnryarUzCIcqYo/dVWW8kusoIZ+V0Iz+DdceV/CwfFv/l0JPqd4CY9mPOL+E+oID2pvL2IFsRUwbav5cys1AnJj+I0uSH73w9HND00H0sddkNoRt9IrbhrHdFRdbpZyNezbsLa5eW5lyX+em31h0vHyg3O51GQlA2lmP+sVDc9+HE2+/hc=</diagram></mxfile>
|
2109.09031/main_diagram/main_diagram.pdf
ADDED
|
Binary file (36.6 kB). View file
|
|
|
2109.09031/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep Reinforcement Learning (RL) has achieved success on a wide variety of tasks, ranging from computer games to robotics. However, RL agents are typically trained on a single task and are extremely sample-inefficient, often requiring millions of samples to learn a good policy for just that one task. Ideally, RL agents should be able to utilize their prior knowledge and adapt to tasks quickly, just as humans do. Meta-learning, or learning to learn, has achieved promising results in this regard, allowing agents to exploit the shared structure between tasks in order to adapt to new tasks quickly during meta-test time.
|
| 4 |
+
|
| 5 |
+
Although meta-learned policies can adapt quickly during meta-test time, training these meta-learned policies could still require a large amount of data. Several popular meta-RL methods [\(Duan et al.,](#page-9-0) [2016;](#page-9-0) [Finn et al.,](#page-9-1) [2017;](#page-9-1) [Mishra et al.,](#page-9-2) [2017;](#page-9-2) [Rothfuss et al.,](#page-10-0) [2018;](#page-10-0) [Wang et al.,](#page-10-1) [2016\)](#page-10-1) utilize on-policy data during meta-training to better align with the setup at meta-test time, where the agent must generate on-policy data for an unseen task and use it for adapting to the task. Recent works [\(Fakoor](#page-9-3) [et al.,](#page-9-3) [2019;](#page-9-3) [Rakelly et al.,](#page-10-2) [2019\)](#page-10-2) have sought to incorporate off-policy RL [\(Fujimoto et al.,](#page-9-4) [2018;](#page-9-4) [Haarnoja et al.,](#page-9-5) [2018\)](#page-9-5) into meta-RL to improve sample efficiency.
|
| 6 |
+
|
| 7 |
+
The combination of off-policy RL and relabeling, in which experience is shared across tasks, has been utilized in the multi-task RL setting, in which an agent learns to achieve multiple different yet related tasks, for both goal-reaching tasks [\(Andrychowicz et al.,](#page-9-6) [2017\)](#page-9-6) and more general multi-task settings [\(Eysenbach et al.,](#page-9-7) [2020\)](#page-9-7). Experience collected for one task may be completely useless for training a policy to learn that task, but could be extremely informative in training a policy to learn a different task. For example, an agent trying to shoot a hockey puck into a net might miss to the right. This experience could easily be used to train an agent to shoot a puck into a net positioned further to the right [\(Andrychowicz et al.,](#page-9-6) [2017\)](#page-9-6).
|
| 8 |
+
|
| 9 |
+
Both meta-RL and multi-task RL involve training on a distribution of tasks, so it follows that we can also combine relabeling techniques with meta-RL algorithms in order to boost both sample
|
| 10 |
+
|
| 11 |
+
<sup>2</sup>AIR, Tsinghua University <sup>3</sup>HeliXon Limited
|
| 12 |
+
|
| 13 |
+
<span id="page-0-0"></span><sup>1</sup>Code: <https://www.github.com/michaelwan11/hfr>
|
| 14 |
+
|
| 15 |
+
efficiency and asymptotic performance. In meta-RL, an agent learns to explore sufficiently to identify the task it is supposed to be solving, and then uses that knowledge to achieve high task returns. The agent collects exploratory pre-adaptation data, then undergoes some adaptation process using that pre-adaptation data. Finally, after adaptation, the agent attempts to solve the task. Meta-RL algorithms typically have a meta-training phase followed by a meta-test phase. The goal during meta-training is to train the meta-parameters such that they could be quickly adapted to solve any task from the meta-train task distribution, given a small amount of data from that task. At meta-test time, given a new unseen task, the goal is to rapidly adapt the *learned* meta-parameters for this task, using a small amount of task-specific data. The focus in this paper is to improve the sample efficiency of the meta-training phase via data sharing.
|
| 16 |
+
|
| 17 |
+
Using concepts from maximum entropy RL (MaxEnt RL), we introduce a relabeling scheme for the meta-RL setting. Prior relabeling methods for multi-task RL have used the total reward of the trajectory under different tasks to guide the relabeling [\(Eysenbach et al.,](#page-9-7) [2020;](#page-9-7) [Li et al.,](#page-9-8) [2020\)](#page-9-8). Direct application of this type of relabeling to the meta-RL setting is potentially sub-optimal since the multi-task RL and meta-RL objectives are distinct (learning to perform many tasks vs. learning to learn a new task). Towards developing an approach more suited to meta-RL, we define the notion of the *utility* of a trajectory under the different tasks, where the utility captures the usefulness of the trajectory for efficient adaptation under those tasks. We call our method Hindsight Foresight Relabeling (HFR) – we use *hindsight* in replaying the experience using reward functions from different tasks, and we use *foresight* in computing the utility of trajectories under different tasks and constructing a relabeling distribution over tasks using these utilities. We demonstrate the efficacy of our method on a variety of robotic manipulation and locomotion tasks. Notably, we show that our method, as the first meta-RL relabeling technique (applied during meta-training) that we are aware of, leads to improved performance compared to prior relabeling schemes designed for multi-task RL.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
```
|
| 22 |
+
Input: Trajectory to be relabeled (\tau)
|
| 23 |
+
Output: Task to relabel the trajectory with (\psi)
|
| 24 |
+
Function ComputeUtility (\tau, \psi):
|
| 25 |
+
for each training task \psi^i do
|
| 26 |
+
for each (s_t, a_t, r_t) \in \tau do
|
| 27 |
+
\begin{aligned} &U_{\psi^i}\left(\tau\right) \leftarrow \texttt{ComputeUtility}(\tau,\psi^i) \\ &\log Z\left(\psi^i\right) \leftarrow \texttt{GetLogPartition}(\psi^i) \end{aligned}
|
| 28 |
+
Replace r_t with r_{\psi} (s_t, a_t)
|
| 29 |
+
end
|
| 30 |
+
Sample embedding using encoder z \sim q_{\phi}\left(z|\tau\right) Sample a batch of initial states \left\{s_{1}^{i}\right\}_{i=1}^{N_{U}} \sim B_{\psi}
|
| 31 |
+
Return \psi \sim \operatorname{softmax}\{U_{\psi^i}(\tau) - \log Z(\psi^i)\} (Eq. 8)
|
| 32 |
+
Function GetLogPartition (\psi): Sample batch of trajectories \left\{ au^i \right\}_{i=1}^N \sim B_{\psi}
|
| 33 |
+
Sample actions for these states using the
|
| 34 |
+
post-adaptation policy \pi_{\theta}(\cdot|s,z):
|
| 35 |
+
\begin{array}{l} \textbf{for} \ \ each \ trajectory \ \tau^i \ \ \textbf{do} \\ \mid \ \ U_\psi \left(\tau^i\right) \leftarrow \texttt{ComputeUtility}(\tau^i, \psi) \end{array}
|
| 36 |
+
\{a_1 \sim \pi_\theta \left(a_1^i | s_1^i, z\right)\}_{i=1}^{N_U}
|
| 37 |
+
Return U_{\psi}=\frac{1}{N_{U}}\sum_{i=1}^{N_{U}}Q_{\theta}\left(s_{1}^{i},a_{1}^{i},z\right) (Eq. 9)
|
| 38 |
+
Return \log Z\left(\psi\right) \approx \log\left(\frac{1}{N}\sum_{i=1}^{N}e^{U_{\psi}\left(\tau^{i}\right)}\right)
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
<span id="page-5-1"></span>We compute the utility of the input trajectory for every training task, along with an empirical estimate of the log-partition function of the tasks. The task to relabel the trajectory with is then sampled from a categorical distribution. For our experiments, we build on top of the PEARL algorithm (Rakelly et al., 2019), which is a data-efficient off-policy meta-RL method. PEARL maintains task-specific replay buffers $B_{\psi}$ . If HFR returns the task $\psi'$ , then $\tau$ is relabeled using the reward function $r_{\psi'}$ and added to $B_{\psi'}$ for meta-training on the task $\psi'$ .
|
| 42 |
+
|
| 43 |
+
The adaptation procedure $\pi'=f_\phi(\pi_\theta,\tau,r_\psi)$ for a task $\psi$ corresponds to a sequence of steps: 1.) augment $\tau$ by marking each transition with a reward value computed using $r_\psi(s_t,a_t)$ , 2.) condition the encoder on $\tau$ to sample an embedding, $z\sim q_\phi\left(z|\tau\right)$ ; and 3.) condition the policy on z to obtain the post-adaptation policy, $\pi'=\pi_\theta(\cdot|s,z)$ . The calculation of the utility function (Eq. 4) requires generation of post-adaptation trajectories, which could be computationally inefficient, especially if the number of tasks is large. To avoid this cost, for each task, we sample a batch of initial states $s_1\sim p_1\left(s_1\right)$ and the corresponding actions from the post-adaptation policy, and compute the utility based on an estimate of the state-action value function $Q_\psi^{\pi'}\left(s_1,a_1\right)$ as:
|
| 44 |
+
|
| 45 |
+
<span id="page-5-2"></span>
|
| 46 |
+
$$U_{\psi}(\tau) = \mathbb{E}_{s_1 \sim p_1, a_1 \sim \pi'(\cdot|s_1)} \left[ Q_{\psi}^{\pi'}(s_1, a_1) \right]$$
|
| 47 |
+
(9)
|
| 48 |
+
|
| 49 |
+
Since we use PEARL, we can avoid training separate task-specific value functions $Q_{\psi}$ , and instead get the required estimates from the task-conditioned critic Q(s, a, z) already used by PEARL (Eq. 3). We
|
| 50 |
+
|
| 51 |
+
<span id="page-6-0"></span>
|
| 52 |
+
|
| 53 |
+
Figure 5: MuJoCo environments we evaluate on – sparse reward manipulation tasks (a, b, c), as well as sparse and dense reward locomotion tasks (c, d).
|
| 54 |
+
|
| 55 |
+
<span id="page-6-1"></span>
|
| 56 |
+
|
| 57 |
+
Figure 6: Performance of our relabeling algorithm HFR (shown in blue) on sparse reward tasks. HFR consistently outperforms baselines on both sparse reward robotic manipulation and locomotion tasks. Visual-Reacher uses image observations, while the other environments use proprioceptive states.
|
| 58 |
+
|
| 59 |
+
highlight that HFR facilitates efficient data-sharing among the training tasks via trajectory-relabeling *without* altering the meta-train and test-time adaptation rules of the base meta-RL algorithm.
|
2110.08499/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-14T19:12:01.497Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" etag="k7uDZfv6Qe2jrYtlM5jZ" version="16.0.0" type="device"><diagram id="eTsy3CMo_EVLmfRaSItg" name="Page-1">7V1bc6M4Fv41qZ1+CKUbt8dOpzO7Vd21s5Wpmp6nLYxlm20MHoyTeH79StwMQhhwuNkhD91YgBD6Ph0dnYu4w1+2b78G1m7z3V9S9w6B5dsdfrxD6B7phP3HS45JCVa1uGQdOMu4DJ4Knp2/aVIIktKDs6T7woWh77uhsysW2r7nUTsslFlB4L8WL1v5bvGpO2udPBGcCp5ty6Wly/5wluEmLjXU3NX/pM56kz4ZguTM1kovTgr2G2vpv+aK8Nc7/CXw/TA+2r59oS7vvbRf4vueKs5mDQuoFza54cc366+fmx//eSHr/xINPt//vV3cJ7W8WO4heeFvvrde+cGWBqz8q2czSPnRI02Pfvn29fFT8krhMe2nwD94S8ofBe/ww+vGCenzzrL52VdGDVa2Cbducnrle2ECNTTY730Y+D+z/kX8Csd1v/iuH0S146VFjZWdXZk7o9kGXazYmeRFaBDSt8oeglm/M8ZSf0vD4MguSW5AUNcUjOLbEsJCNQX09QS/CrCiGXHxJoc+SS61EtKts0eccGEHCTQtYEIlmEoAMHrt+KF9CNzjQ2DZP2lYj0QRNhEX/jvX109PmP3V4uX5Ho2qDq3Q8T1Wcm+CjhACqqlAVUAIq4pulkCChoJhGaP+QMIzSEkPQ10BpgCSpilAL4NEFKiXQYK6moHXOU5kxinGSWc4qUTAyTAVIhlMaoZeASdkKljvCSdVgpPmchx27GAdRr0QF/C+jZSAtHu1vw5+fAEG0V++KId0WsgruN9HaH1mF0Bj95a/I37ao2/zc+lD2UvFzy22hRXn2icQi6EVFklTnNESuCUMsFxnzeG3GbpsHsYPHHuHKSqfkxNbZ7nkj5Gy9ERN0IKanUhsrGdDOSWZprH5E5dIpiGJuO5LWmvTZBea2dWGXdjE2QQyJXbp02QXntnVhl2aqSu6amZ/xvSIZkyNaN+t/U8GFgLPHGyPATlTrs10CYFiIpD94SLlCC6vRpluzVg6IOfMSs7td5bXiDqEUyfWYwEH+T7Bi5906Sos84r9b205NvGDsp8imeImVPCpiGudlUKwQajUWJISBdkZAy2wpnVDAGgYQJAyQJNJmZQIecRxX4in1jgJ5Iu+8FYf9jQSWFsmUe7UxxzGi14AXq2oZkuNTEvdXICOVl3QFO0XFQAbkmmkP4BhqSvpck2fk59+EG78te9Z7tdTqSAlT9d88/1d0sX/o2F4TGSndQj9IgCsy4LjD36/oqY//0yqi348vhV+HSUCWS0L5GQy40BaQfiZW4H5TOBa+71jp8VPjiufU1gFpv4IdP1OXJvHTV6WK2SFuerinuTdd54irLf9Q2DTc7AkopY1eE3DOpFc5lxAXSt0XooNkZEnupW9mHXMXbDzHS/c52r+jRfkJitEROuBjvJULN0BDVM03hXvYAdxK05Uzl7nHewuW3gGYHcNS3Mk64Gllw2sibIbgi7oXWajqYpsNFGxkrhpyX0nmg4wUsptG2Sk4JHnAdTlRNDvEJvkUImN2dOfCZhOqI/BbzLz+7r5ja+F37o2Br/Vmd/XzW9yJfyGeBT9RJv5fd38Vq+F3/oo/NZvid9PT6aZxDh8GH7rV8JvDMEY/DZmfl81v3EnppgB+E2wMQa/q31jzRwlxiWOsbx7t/4RmsxtW3SvLX37fk93lU62CkduP36ZYRxvCJqGkvpcalwz5pC+tyx+fErG63dLvlwFouSrcsvRNyeMhbiuJj8zGc6OTyKc/0gleIcyshvTmsTSq4sBTEAIqezKCg2waHMz9LNStNw24Y5+pCga2xupT0pLODNWpjEmpmu3a814BMWQm2EYjyr1hqXzIp3W22kH7SJzxFqenbfKEJusOGro1cZlIABKYlg6+ZNBJ39ZUsl0ePHquMuVE9D97bMjjeqsYYcqCcTrjx2yVJYJsYMG9OaJAVEzsYGxQiTpaP1xozp9pscoTUVRhojFHApbHTDQKoNxK5CGxsBIj+0TmbXlptqy3lRbHieeDxPRJlynLZNSnucg2nJ1es0U5r0/GmpFi+D2pkNkEqWUJyOXkxpQoCSJvj85ObbvYZaTTeXkxL0RdVJPcgcZRU5WeyOmICdtdpHjHXi22IeTk1hrluajDbmexNVpPlPgy7nFxe0yhWDY1C+FkGKSIflS3j5oTgu7FGaNKGLIaFVmmKqgQWFGIytO1xO0kSlOzfWmYQM9sNZQtRppCaqB82F3kkXrKIHWeJREmVHjkbLhCHWzMB4VAFHNmIx+/UYDh/U7T8fPhkHb0TLGUMAVovudjn9EzJIO2JPjv/2oImIa+jCj6qbSc24nPLD5UDGuZNbQtDHSz/BNped8QH6TTqaCAfit62gMflfvqjaHv047/FVVzbN7XU0gEhaPnRwzG/CbylOjqTwdx4CviQa1WgO+TsYIC8TV27tNwSD7+6YuvucWDbIal5TNLHWDbtKFzVk8Xod4TMMspqpu1gk7yR1GaR/pAcQjmba/6uGwWlmu/xFFpHmJMqlqw7qvSLX7agr0+ZjuTl3TlGYRtUN7OwmaNF2KS1jOmMr160fhkmESRSONyDRoVgepzuo4z4GVtXXcY8yCDF3Wa5jbBzbUfaF8j+PSmcykIhJJbW1uqY9tfA9NugAdYVUBuT/BckbSwPhCJOKQ4FcnbUwd/I31Ur3gGxt3DEBb3NVBcb80IWN83BeHwDsTpTc68mrrET8s8tVW8StMxekCMQLZ0tvMAIP1gA2dVEOqUwz6xGy6iGGsGA1g0hU0JErV9tEeUfqXN12Y2BpatG3LcEKDyr9LPxgx/swXrUysZbUNZ3TADVD6AJIMcH1IwNVqu1yPw/If012TqHxY5jQUrR6wQUeoWm0Ju6lPunSBJSquMxroLoN6gdTZSjUqPTRQ9g7KSDGowUm9qW39byfuLN0tZKpxEgZpHXcGRUfgEHFn6VdsZ37X8nvyGSrNxwTqYkxISF/15ZWskvgd3h+W3354mTWBoD0Nr2rT5aKRKjCHdY4V1qmbZlMX2KCxnGrZJvrKa0TgV9dfWPz+z2HI3pJ/9Fzs9+l9FlTtanEBiYKE4BuNyDPDDUWXbJ9BekNMZhEVgNlvrB0/XLn0LZky5LPHmZy2plNJuRNr7Ptp2bvTtlBFwmHd/NCkKiJU1VUGWPlJeIiJQ2af/ZicOe+iEr8714JB5yvWmvGpM8DLpt5ZrDMa6UARd6KdiFjXyrbaGTGmmsNoUTdJxKqNtXkNVgZfhapbi2pztALKdHJrEVXFsUkmJlav+sB362B1sXX0PkGrBWGafkqpC/ABDygQvZ08lkyy2aPMTt/bl9dTaZ6D/ptvtx6k04czM4F0AqcGSnCqSA4nHBTOS+MAbzYwRDVQkzCDQZ1j2k0ZF/v8OPrlavh79WuQrpuy8Q1P47utUg1NUzRis9rStUznH+UQm84WA2nTm7ewfFM/KzrtpnYwePdgOJNkNqXBwBXXXDqO2dm44BWDU719fbem9EJ89tbaNrZ0U09DRBbuWWH0sA+Be3wILPsnd3XUzbCncSQaQQyZCoVxgy8sJfpY4IdWpD3iRzMaEHHFsZuoi5kd8wBCAUYMmk3tSCLMu5veqyM9E+eDnXXqSZVKRUf5Uynx7oTw8sXWFJbQfa6uMERKujNtxoR0u6n8upooaVbpMNp4C2Plxxm3KjQVwSGBCVKaDVyIzR4HbnVY6TsGLpoHbmWsqakr4leRpzBw9bIJs9uQt1vMjdSRqZiiTwjiprtY9AemzLrZYfcO5XeXdC/RJ9C9ZQuizHj8O+sabx+1kzUzir4XRsJsXr6cG5wH4vo99ZzV2ZZ7cyvoFxgjkwmWS9Bgvfgl+voZa98XrgDzmNPkWIWfysJVoJjIL4ldu5nNcqaXbjAxc1qUi8tgIJVCUst3f2SrjihrHPFVFV/eirIgI2nu6NNdIYOomzSkuCW8Ds8PthG3c6dfk17n50ncluikSxn/g3v2OrbjraX3y3WZ/EmHzZZeUjkQ/QVhYHn7FasyrZwRNr3g1Q+WxWfnb1+w5c86movvRUHAE/OS3kQ8/Ss9VnN9u3T2O9dK+tXxXCf34JXrW2G+QSLOj7592NLIbATY0OBQUzZGrJDnKedmrfrwPxCmgsjy+L3hhh6zSrkhcB3lPoN1OuFZqURSsqq8xX5X1A6rkhdmRk6RkecU/xmxa0JslvvTxawLuf9vJp6DvNCmbzbdhVF3BiWRX0ouA/EWJsBNtEvrpF0C33OPTXbcmsk1RXLNIvxWEJtF+HQx60KEP1OX2rGezj/lRT2bnrT4gLKK7bwSb/HrdtR24n4rf/8rngtmwX2llJoF960gNgvu6WLWheCOtnsHW39JI/seF9rsRZzYPBL67J819Si3wdxFZpRESIuSvpXNpKEzYYDQVBPzzURPmTaiU9yUmlWlG+/AC+yq7GfgczhOoUrsNTffIzTw1/8D</diagram></mxfile>
|
2110.08499/main_diagram/main_diagram.pdf
ADDED
|
Binary file (35.6 kB). View file
|
|
|
2110.08499/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Multi-Document Summarization is the task of generating a summary from a cluster
|
| 4 |
+
of related documents.
|
| 5 |
+
State-of-the-art approaches to multi-document summarization are primarily either graph-based , leveraging graph neural networks to connect information between the documents, or hierarchical , building intermediate representations of individual documents and then aggregating information across. While effective, these models either require domain-specific additional information e.g. Abstract Meaning Representation , or discourse graphs ,
|
| 6 |
+
or use dataset-specific, customized architectures, making it difficult to leverage \pretrained language models. Simultaneously, recent \pretrained language models (typically encoder-decoder transformers) have shown the advantages of \pretraining and transfer learning for generation and summarization .
|
| 7 |
+
Yet, existing \pretrained models either use single-document \pretraining objectives
|
| 8 |
+
or use encoder-only models that do not work for generation tasks like
|
| 9 |
+
summarization \citep[e.g., CDLM, ][]{cdlm}.
|
| 10 |
+
|
| 11 |
+
[t]
|
| 12 |
+
\centering
|
| 13 |
+
\includegraphics[width=0.9\linewidth]{intro.pdf}
|
| 14 |
+
\caption{
|
| 15 |
+
\sys vs existing pretrained models.
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
[t]
|
| 19 |
+
\centering
|
| 20 |
+
\includegraphics[width=1\linewidth]{fig1-primera.pdf}
|
| 21 |
+
\caption{Model Structure of \sys.
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
Therefore, we argue that these \pretrained models are not necessarily the best fit for multi-document summarization.
|
| 25 |
+
Alternatively, we propose a simple
|
| 26 |
+
\pretraining approach for multi-document summarization, reducing the need for dataset-specific architectures and large fine-tuning labeled data (See Figure to compare with other \pretrained models).
|
| 27 |
+
Our method is designed to teach the model to identify and aggregate salient information across a ``cluster'' of related documents during \pretraining.
|
| 28 |
+
Specifically, our approach uses the Gap Sentence Generation objective (GSG) , i.e. masking out several sentences from the input document, and recovering them in order in the decoder.
|
| 29 |
+
We propose a novel strategy for GSG sentence masking which we call, Entity Pyramid, inspired by the Pyramid Evaluation method .
|
| 30 |
+
With Entity Pyramid, we mask salient sentences in the entire cluster
|
| 31 |
+
then train the model to generate them, encouraging it to find important information across documents and aggregate it in one summary.
|
| 32 |
+
|
| 33 |
+
We conduct extensive experiments on 6 multi-document summarization datasets from 3 different domains. We show that despite its simplicity, \sys achieves superior performance compared with prior state-of-the-art \pretrained models, as well as dataset-specific models in both few-shot and full fine-tuning settings. \sys performs particularly strong in zero- and few-shot settings, significantly outperforming prior state-of-the-art up to 5 Rouge-1 points with as few as 10 examples. Our contributions are summarized below:
|
| 34 |
+
\setlist{nolistsep}
|
| 35 |
+
[leftmargin=*,wide=0pt,noitemsep]
|
| 36 |
+
\setlength{- sep}{0pt}
|
| 37 |
+
- sep0em
|
| 38 |
+
- We release \sys, the first \pretrained generation model for multi-document inputs with focus on summarization.
|
| 39 |
+
- We propose Entity Pyramid, a novel \pretraining strategy
|
| 40 |
+
that trains the model to select and aggregate salient information from documents.
|
| 41 |
+
- We extensively evaluate \sys on 6 datasets from 3 different domains for zero-shot, few-shot and fully-supervised settings. We show that \sys outperforms current state-of-the-art on most of these evaluations with large margins.
|
| 42 |
+
|
| 43 |
+
In this section, we discuss our proposed model \sys, a new \pretrained general model for multi-document summarization. Unlike prior work, \sys minimizes dataset-specific modeling by simply concatenating a set of documents and processing them with a general efficient encoder-decoder transformer model (\S). The underlying transformer model is \pretrained on an unlabeled multi-document dataset, with a new entity-based sentence masking objective to capture the salient information within a set of related documents (\S).
|
| 44 |
+
|
| 45 |
+
# Method
|
| 46 |
+
|
| 47 |
+
Our goal is to minimize dataset-specific modeling to leverage general \pretrained transformer models for the multi-document task and make it easy to use in practice. Therefore, to summarize a set of related documents, we simply concatenate all the documents in a single long sequence, and process them with an encoder-decoder transformer model. Since the concatenated sequence is long, instead of more standard encoder-decoder transformers like BART and T5 , we use the Longformer-Encoder-Decoder (LED) Model , an efficient transformer model with linear complexity with respect to the input length.\footnote{
|
| 48 |
+
We use LED and not other efficient transformers like Bigbird-\pegasus
|
| 49 |
+
for two reasons, the first is that BigBird's global attention can't be assigned to individual tokens in the middle of the sequence, which is important for the representation of long documents as shown in . Second,
|
| 50 |
+
because \pretrained checkpoints are available for LED, while BigBird-\pegasus released the already fine-tuned checkpoints.
|
| 51 |
+
}
|
| 52 |
+
LED uses a sparse local+global attention mechanism in the encoder self-attention side while using the full attention on decoder and cross-attention.
|
| 53 |
+
|
| 54 |
+
When concatenating, we add special document separator tokens (<doc-sep>) between the documents to make the model aware of the document boundaries (Figure ). We also assign
|
| 55 |
+
global attention to these tokens which the model can use to share information across documents
|
| 56 |
+
(see \S for ablations of the effectiveness of this input structure and global attention).
|
| 57 |
+
|
| 58 |
+
In summarization,
|
| 59 |
+
task-inspired \pretraining objectives have been shown to provide gains over general-purpose \pretrained transformers \citep[\pegasus;][]{pegasus}.
|
| 60 |
+
In particular, \pegasus introduces Gap Sentence Generation (GSG) as a \pretraining objective where some sentences are masked in the input and the model is tasked to generate them. Following \pegasus, we use the GSG objective, but introduce a new masking strategy designed for multi-document summarization.
|
| 61 |
+
As in GSG, we select and mask out $m$ summary-like sentences from
|
| 62 |
+
the input documents
|
| 63 |
+
we want to summarize, i.e. every selected sentence is replaced by a single token [sent-mask] in the input, and train the model to generate the concatenation of those sentences as a ``pseudo-summary'' (Figure ).
|
| 64 |
+
This is close to abstractive summarization
|
| 65 |
+
because the model needs to reconstruct the masked sentences using the information
|
| 66 |
+
in the rest of the documents.
|
| 67 |
+
|
| 68 |
+
The key idea
|
| 69 |
+
is how to select sentences that best summarize or represent
|
| 70 |
+
a set of related input documents (which we also call a ``cluster''), not just a single
|
| 71 |
+
document as in standard GSG.
|
| 72 |
+
use three strategies - Random, Lead (first $m$ sentences), and ``Principle''.
|
| 73 |
+
The ``Principle'' method computes sentence salience score based on ROUGE score of each sentence, $s_i$, w.r.t the rest of the document ($D/\{s_i\}$), i.e. $\mathrm{Score(}s_i) = \mathrm{\textsc{Rouge}}(s_i,D/\{s_i\})$.
|
| 74 |
+
Intuitively, this assigns a high score to the sentences that have a high overlap with the other sentences.
|
| 75 |
+
|
| 76 |
+
However, we argue that a naive extension of such strategy to multi-document summarization would be sub-optimal since multi-document inputs typically include redundant information, and such strategy would prefer an exact match between sentences, resulting in a selection of less representative information.
|
| 77 |
+
|
| 78 |
+
[tb]
|
| 79 |
+
\centering
|
| 80 |
+
\scriptsize
|
| 81 |
+
{@{}p{\linewidth}@{}}
|
| 82 |
+
\toprule
|
| 83 |
+
Document \#1
|
| 84 |
+
Wildfires have burned across tens of thousands of acres of parched terrain in \textcolor{blue}{Colorado}, spurring thousands of evacuations ...\textcolor{blue}{(0.107)}..., residents have sought shelter in middle schools, and local officials fear tourists usually drawn to the region for the summer may not come.\\
|
| 85 |
+
\hline
|
| 86 |
+
Document \#2
|
| 87 |
+
... \textit{In \textcolor{blue}{Colorado}’s southwest, authorities have shuttered the San Juan National Forest in southwestern Colorado and residents of more than 2,000 homes were forced to evacuate.\textcolor{blue}{(0.187)}}
|
| 88 |
+
No homes had been destroyed ... \textit{\textcolor{red}{“Under current conditions, one abandoned campfire or spark could cause a catastrophic wildfire, ..., with human life and property,” said San Juan National Forest Fire Staff Officer Richard Bustamante}}...\\
|
| 89 |
+
\hline
|
| 90 |
+
Document \#3
|
| 91 |
+
The Buffalo Fire west of Denver is ... Several wildfires in \textcolor{blue}{Colorado} have prompted thousands of home evacuations ...\textcolor{blue}{(0.172)}... Nearly 1,400 homes have been evacuated in Summit County, \textcolor{blue}{Colorado}, ...\textcolor{blue}{(0.179)}... \textcolor{red}{“Under current conditions, one abandoned campfire or spark could cause a catastrophic wildfire, ... , with human life and property,” said Richard Bustamante, SJNF forest fire staff officer} ...\\
|
| 92 |
+
\hline
|
| 93 |
+
Entities with High Frequency \\
|
| 94 |
+
\hline
|
| 95 |
+
\textcolor{blue}{Colorado}, 416, Tuesday, Wildfires, San Juan National Forest,...\\
|
| 96 |
+
\bottomrule
|
| 97 |
+
|
| 98 |
+
\caption{An example on sentence selection by Principle vs our Entity Pyramid strategy. Italic text in \textcolor{red}{red} is the sentence with the highest Principle ROUGE scores, which is thereby chosen by the Principle Strategy. Most frequent entity 'Colorado' is shown with \textcolor{blue}{blue}, followed by the Pyramid ROUGE scores in parenthesis. The final selected sentence by Entity Pyramid strategy is in italic. which is a better pseudo-summary than the ones selected by the Principle strategy.}
|
| 99 |
+
|
| 100 |
+
For instance, Figure shows an example of sentences picked by the Principle strategy vs our Entity Pyramid approach. The figure shows a cluster containing three news articles discussing a wildfire happened in Corolado, and the pseudo-summary of this cluster should be related to the location, time and consequence of the wildfire, but with the Principle strategy, the non-salient sentences quoting the words from an officer are assigned the highest score, as the exact same sentence appeared in two out of the three articles.
|
| 101 |
+
In comparison, instead of the quoted words, our strategy selects the most representative sentences in the cluster with high frequency entities.
|
| 102 |
+
|
| 103 |
+
To address this limitation, we propose a new masking strategy inspired by the Pyramid Evaluation framework which was originally developed for evaluating summaries with multiple human written references. Our strategy aims to select sentences that best represent the entire cluster of input documents.
|
| 104 |
+
|
| 105 |
+
[tb]
|
| 106 |
+
\centering
|
| 107 |
+
\includegraphics[width=\linewidth]{pipeline.pdf}
|
| 108 |
+
\caption{The Entity Pyramid Strategy to select salient sentences for masking.
|
| 109 |
+
Pyramid entity is based on the frequency of entities in the documents.
|
| 110 |
+
The most representative sentence are chosen based on Cluster ROUGE for each entity with frequency $>1$, e.g. Sentence 10 in Document 2 for Entity 1.
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
\paragraph{Pyramid Evaluation}
|
| 114 |
+
The Pyramid Evaluation method is based on the intuition that relevance of a unit of information can be determined by the number of references (i.e. gold standard) summaries that include it.
|
| 115 |
+
The unit of information is called Summary Content Unit (SCU); words or phrases that represent single facts. These SCUs are first identified by human annotators in each reference summary, and they receive a score proportional to the number of reference summaries that contain them. A Pyramid Score for a candidate summary is then the normalized mean of the scores of the SCUs that it contains. One advantage of the Pyramid method is that it directly assesses the content quality.
|
| 116 |
+
|
| 117 |
+
\paragraph{Entity Pyramid Masking} Inspired by how content saliency is measured
|
| 118 |
+
in the Pyramid Evaluation, we hypothesize that a similar idea could be applied in multi-document summarization to identify salient sentences for masking.
|
| 119 |
+
Specifically, for a cluster with multiple related documents, the more documents
|
| 120 |
+
an SCU appears in, the more
|
| 121 |
+
salient that information should be to the cluster. Therefore, it should be considered for inclusion in the pseudo-summary in our masked sentence generation objective. However, SCUs in the original Pyramid Evaluation are human-annotated, which is not feasible
|
| 122 |
+
for large scale \pretraining.
|
| 123 |
+
As a proxy, we explore leveraging information expressed as named entities, since they are key building blocks in extracting information from text about events/objects and the relationships between their participants/parts . Following the Pyramid framework, we use the entity frequency in the cluster as a proxy for saliency.
|
| 124 |
+
Concretely, as shown in Fig. , we have the following three steps to select salient sentences in our masking strategy:
|
| 125 |
+
|
| 126 |
+
[tb]
|
| 127 |
+
\footnotesize
|
| 128 |
+
\caption{Entity Pyramid Sentence Selection}
|
| 129 |
+
[1]
|
| 130 |
+
\Require Document cluster
|
| 131 |
+
\Require List of entities w/ frequency $> 1$. $N$ length of the list
|
| 132 |
+
\Require $m$ number of sentences to select
|
| 133 |
+
\Ensure List of sentences to mask
|
| 134 |
+
\State $E \gets $ sort entities by frequency, descending
|
| 135 |
+
\State $selected=[]$
|
| 136 |
+
\For{$i \gets 1$ to $|E|$}
|
| 137 |
+
\State $SentCand \gets $ all sentences in the cluster containing $E[i]$
|
| 138 |
+
\State $cur\_sent = \arg\max_{s \in SentCand} Score(s)$
|
| 139 |
+
|
| 140 |
+
\State $selected.append(cur\_sent)$
|
| 141 |
+
\If{$|selected|==m$}
|
| 142 |
+
\State Break
|
| 143 |
+
\EndIf
|
| 144 |
+
\EndFor
|
| 145 |
+
\State $\mathrm{Return}$ $selected$
|
| 146 |
+
|
| 147 |
+
[leftmargin=*,wide=0pt]
|
| 148 |
+
\setlength- sep{0em}
|
| 149 |
+
- Entity Extraction. We extract named entities using SpaCy .\footnote{Note that entity information is only used at \pretraining time. This is unlike some prior work (e.g. ) that utilize additional information (like named entities, coref, discourse, or AMR) at fine-tuning and inference time.}
|
| 150 |
+
- Entity Pyramid Estimation. We then build an Entity Pyramid for estimating the salience of entities based on their document frequency, i.e. the number of documents each entity appears in.
|
| 151 |
+
- Sentence Selection. Similar to the Pyramid evaluation framework, we identify salient sentences with respect to the cluster of related documents. Algorithm shows the sentence selection procedure.
|
| 152 |
+
As we aim to select the entities better representing the whole cluster instead of a single document, we first remove all entities from the Pyramid that appear only in one document. Next, we iteratively select entities from top of the pyramid to bottom (i.e., highest to lowest frequency), and then select sentences in the document that include the entity as the initial candidate set. Finally, within this candidate set, we find the most representative sentences to the cluster by measuring the content overlap of the sentence w.r.t documents other than the one it appears in.
|
| 153 |
+
This final step supports the goal of our \pretraining objective, namely
|
| 154 |
+
to reconstruct sentences that can be recovered using information from other documents in the cluster, which encourages the model to better connect and aggregate information across multiple documents.
|
| 155 |
+
Following we use ROUGE scores as a proxy for content overlap. For each sentence $s_i$, we specifically define a Cluster ROUGE score as
|
| 156 |
+
$Score(s_i) = \sum_{\{doc_j\in C, s_i \not\in\ doc_j\}} \mathrm{\textsc{rouge}}(s_i,doc_j)$
|
| 157 |
+
Where $C$ is the cluster of related documents.
|
| 158 |
+
|
| 159 |
+
Note that different from the importance heuristic defined in \pegasus , Entity Pyramid strategy favors sentences that are representative of more documents in the cluster than the exact matching between fewer documents
|
| 160 |
+
(See Figure for a qualitative example.)
|
| 161 |
+
. The benefit of our strategy is shown in an ablation study (\S).
|
2111.13131/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-11-15T11:49:37.664Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/15.4.0 Chrome/91.0.4472.164 Electron/13.5.0 Safari/537.36" etag="RNlS00Jqd6ksAmq8dyRV" version="15.4.0" type="device"><diagram id="u_omFjpVSmRfQzXucVGK" name="Page-1">3Vxtc5s4EP41nmk/tAOIN39MnDh3016bazt3137JYJBtNRi5Qq7t/vqTQLxKxnYChtST6cBqAbG7z75oRUdgstrdEW+9/AsHMBwZWrAbgZuRYeimYYz4nxbsU4qbERYEBYKpIHxGv6AgaoK6QQGMK4wU45CidZXo4yiCPq3QPELwtso2x2H1qWtvASXCZ98LZeq/KKBLQdU1rRj4A6LFUjzayQZWXs6cEuKlF+BtSkp4wO0ITAjGND1a7SYw5MLL5JLeaHpgNJ8YgRE95YJP4d8P8JtPrtDd/Zex+QWuZ9s3mZh/euFGvLGYLd1nIiB4EwWQ30UfgevtElH4ee35fHTLlM5oS7oKxXD+lho7maMw/MhYEeXatzgppgQ/woLIBZkyTnCISfJEMHd96Ps5d2lk5lpmch/57YVAfkJC4a5EEtK4g3gFKdkzFjFqAzu9RJimLWSxLfQMMm0uSyrmE0jtS9jWIr91IX52IDRwhjas48qAATNPcYoJXeIFjrzwtqBeF+ricip43mO8Fkr6DindC6x5G4qrKjwo2hhviA+brEkA1CMLSBv4zJSPv0ujoggMPYp+VqGoknpy6RUh3r7EsMYoonHpzvecUOjftKr6lwB0Hj87SGdQ6D9/laebhDMIk2CWQPb/ieuTk6/85K2Vnd7syoM3e3HWvSkZwzAl80xTqvHrlzAlMAz3EjOd0isemhkhwhHMaFPE30dYUZBx+KEXx8hPiYKlYpJvtLea7lTtEtjOEctMzu4hQUyqkFzMXO3fwlyBbnRvrrrCWu2Q8mwB88nZC5qoLKXNKnZs/9jgjPNNnFjiFWPQnfWuGMxu8S7C2zDRiKHdrmYw4OZtaJ/whqJowY4+QLrF5DF7FHubmfR4UqekkzxhTkA1p1ebmGW9hjb1Ym6ebDaTDx94Gs2s1aeYvC5NRikOaT4SYw35R/I6L16n6fUc7Tj65dRsnPzkZC6woBuY7aRsLElT+s1yzqa7ipwtJ7aes2UzerFJm3Gi63IH4brsWhKm682u6wh/N67LkEzinsAA+TRxLNfXeMfBLSHV1q4f0MiajJzr3QMT3GSf/Lt7MJJjIxm6GWV1yungPVxGHSq8WsCqOa5KfqyAqqGAqt0VUoE5pNTZOZY7t5QnPd0xmH05hmdp2TwJe9qEi+sACjFDYQsgC5zxTLm6MYd2VyDTe0eZ/tLjoXui2evWoOzeOGEl7/eQ+3gQiYhTWz3UneZExLUb+btJRFzJJrgD5O6PxxqMgvhFpBK2M7BUIsPWiwVb5ryOr6/pg/JyuryGJSz6BrHkKPIPhPRX71+3EtLtGS8oFCF9biTGvuBphVBoVb0dwKD/YG/I2dYLw8H4VByAYeFgLAn+BhHoU4SjAwhg1SNdQuq1AgTXmAFbAYQ2F3tcbWjmDkAf1l1UkEW/5euoXDIOtIDMG/bHwAW0QaZ0pnkkpdP0Jv6O1pbkdfFPiVSUwH8VQY9EMI5fH14ZRiUvMc8CZb5ojJ66Yly1e8mDaJrlqKpjTXO9hF4OpXzluhpu2a+boApA716mlxiaexl9dFaLt38vA3rzMs/LncBZQA7y8K6CMkduqwH+KDw7hGQ98PcPSWPcByTb7pRfbPOG3W/cl5tAtf61tIMrnam4qrCVs5tT9T75kb66NC/X6j6BkNdnywvkmQuSfcwXgtYhpOrqgh16K+5TkiG+gG5MyMPIueb9LMIRmmzGZA/4zltYxgQ/fM+vKW7yTKdl+y6czRVViQfdeUcr7hddjNq+Wz3+8H+szG93y3/238LFLwMquo19OCslkp/mPjK3UPYfyje/VPxWPnxISx+XFTtow0mf61vrxZYFmn3ruNb4r/E/27cqJTOk3tdlbcJ6CTZRj7dt20SjCJuMIl5669QgbsMQrWNuBuvS5kSYUkv7FY/tw595/uMisaSPGxoinjuqqvLybv1ywa2oB/ivFlrznJQzZ59HgHbirOPWciNHtT1fDrOgqzArr7tMEUMlI6k2GDxrraSslapwdUXvIWu0Neqv9uGEpAeFtg6rxqzCztb6ToHkUrpJN8Xmj8tpCeqBBZ1jWhrbDvDsdrTk1hLV/rUk9+7UlYdWlBoXhFG2K6dRQfmenhYUZLlDU5DqY5N8e3Wb25uFMJDPwcgugDuaV4PRLF6XHnRgg3N1E3bNTJL7NZSKIm4pQpkXokXETn2mwyTMcg0j3wuvxMAKBUGSpalMUdXxfZ6n1eomIluIartzZ0FQbjtKwq8K4aQSvgGW06k+bgtwwK0IU7cNWZq6Qpo5CNvPKc76/LINeerGdAr0duRp1uRp9S/Pl7wWon6jU3uovXRM85Z8thh2ZDf+Ef5uinJdztxrUczP8VEEqqyjIcWuP1f8k3HVIqg15lv0KSvR4tIhKPVTz/8sp1308xpuOm0rfami3+wQ/ey0+FA+tYvivxsAt/8D</diagram></mxfile>
|
2111.13131/main_diagram/main_diagram.pdf
ADDED
|
Binary file (33.6 kB). View file
|
|
|
2111.13131/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Image contains much information and understanding it has become a challenging task in computer vision. With recent breakthroughs [@fast-rcnn; @faster-rcnn; @wu2019detectron2; @detr2020] in object detection, there is a growing demand for scene graph generation as it helps in scene understanding tasks like visual question answering, image captioning, self-driving car and crowd-behavior analysis. Scene graphs are used to represent the visual image in a better and more organized manner that exhibits all the possible relationships between the object pairs. The graphical representation of the underlying objects in the image showing relationships between the object pairs is called a scene graph [@johnson2015image].
|
| 4 |
+
|
| 5 |
+
Object detection is important but detecting only the objects in an image is insufficient to understand the scene. For example, in an image of a woman with a motorcycle, the woman may be riding the motorcycle, standing beside it, or simply holding it. Here, the object pair is 'woman' and 'motorcycle'; the possible relationships (predicates) are 'riding', 'standing', and 'holding'. Understanding these distinct relationships between the object pairs is important for adequate scene understanding. Relational reasoning between different objects or regions of random shape is crucial for most recent notable tasks such as image captioning, self-driving cars (cite recent projects) in the computer vision domain.
|
| 6 |
+
|
| 7 |
+
Visual Relationship Detection refers to detecting and localizing the objects present in the image and finding associated relationships $<predicate>$ between the object pairs $<object - subject>$. The object pairs $<object - subject>$ are related by $<predicate>$ and this relationship is represented as a triplet of $<object - predicate - subject>$ in the scene graph. In this work, we first detect and localize the object pairs and classify the interaction or the predicate between each of the object pairs. It is similar to object detection but with a large semantic space in which the possible relationships between each pair are much higher than the objects. Reasoning and analyzing such possible combinations of relationships and organizing them in a graph enhances the understanding of the scene broadly.
|
| 8 |
+
|
| 9 |
+
It is observed that in any image, the objects present in it are highly correlated, and the pattern is repeatedly occurring [@motifs]. For example,"man\" and "woman\" both wear clothes, "car\" tends to have "wheels\"; such strong regularities can be found between the different objects. Thus, the distribution of real-world relations becomes highly skewed and unbalanced. The existing models perform better when the relationships are more frequent and poorly when the relationships are less frequent. [@kern] introduces a structured knowledge graph that includes the correlations among the existing objects and the relationships between the pairs to address this unbalanced distribution problem. However, the spatial geometric relationships, which are highly present in the real-world distribution, significantly contribute to understanding the scene and are less exploited in the existing works. The relationships such as near, far, top, down, beside, left, right are geometric relationships that frequently appear. Our work aims to add geometric relationships and refine the relations that the baseline model predicts.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
To have more insights on the dataset [@visualgenome], we categorized the relations based on higher relation types - **Geometric**, **Possessive**, **Semantic**, **Misc.**, similar to [@motifs]. In Table [1](#table:relation-types){reference-type="ref" reference="table:relation-types"}, geometric and possessive types dominate the whole dataset. However, the geometric relations that are crucial to scene understanding in indoor and outdoor scenes are yet to be exploited. In this work, we estimate the geometric relationships of every object pairs using geometric parameters - **distance**, **direction** shown in Figure [1](#fig:gcontext){reference-type="ref" reference="fig:gcontext"}, after the region proposals are regenerated and validate them on the model's output [@kern]. We append the geometric relationships if the reasoning module of the model fails to predict; also, we filter the geometric relations if the model's predicted relations become too ambiguous. As the dataset [@visualgenome] has 15 geometric relations, we further categorized them based on the parameters. For example, if the **distance** between the pair of the objects is small, we categorize it as **near** and **far** on the other hand. Moreover, estimating the **direction**, we further categorize the geometric relations as top, bottom, under, left, right.
|
| 14 |
+
|
| 15 |
+
Using the baseline model [@kern], we extract the model's predicted object labels $(o_i)$, relations $(r_{i\rightarrow j})$, bounding boxes $(B_i)$ and triplets $<o_i,r_{i\rightarrow j},o_j>$ for post-processing. We take the bounding boxes $(B_i)$ as input to the proposed algorithm, and calculate the two parameters - 1) **distance $L$** and 2) **direction $\theta$**. For calculating $L$ and $\theta$, we first find the centroids of the boxes $B_i$ and $B_j$ as $C_i$, $C_j$ respectively. Taking center coordinates, we use **L2 Distance** to calculate the distance $L$ between the object pairs; trigonometric function to calculate the direction $\theta$ from $o_i$ to $o_j$ as illustrated in Figure [1](#fig:gcontext){reference-type="ref" reference="fig:gcontext"}. We perform this operation for all possible object pairs detected by the baseline model.
|
| 16 |
+
|
| 17 |
+
Based on these parameters, how do we categorize the geometric relations defined in the dataset? We decide on categorizing the relations based on the following functions -
|
| 18 |
+
|
| 19 |
+
$$\begin{equation}
|
| 20 |
+
\label{eqn:1}
|
| 21 |
+
f(\theta)=
|
| 22 |
+
\begin{cases}
|
| 23 |
+
r_1,& \text{if} \; -45^{\circ} < \theta \leq 45^{\circ}\\
|
| 24 |
+
r_2,& \text{if} \; -135^{\circ} < \theta \leq -45^{\circ}\\
|
| 25 |
+
r_3,& \text{if} \; 45^{\circ} < \theta \leq 135^{\circ}\\
|
| 26 |
+
r_4,& \text{if} \; \theta > 135^{\circ} \; \text{or} \; \theta \leq -135^{\circ}
|
| 27 |
+
\end{cases}
|
| 28 |
+
\end{equation}$$
|
| 29 |
+
|
| 30 |
+
where $r_i$ is the relation, $i \in [1, 4]$ and $r_i$ represents 'right', 'top', 'left', and 'down' respectively. $$\begin{equation}
|
| 31 |
+
\label{eqn:2}
|
| 32 |
+
f(L)=
|
| 33 |
+
\begin{cases}
|
| 34 |
+
l_1,& \text{if} \; L < \sqrt(l_{box}^2 + h_{box}^2)/2
|
| 35 |
+
\\
|
| 36 |
+
l_2,& \text{else}
|
| 37 |
+
\end{cases}
|
| 38 |
+
\end{equation}$$
|
| 39 |
+
|
| 40 |
+
where $l_1$ and $l_2$ represents predicates 'near' and 'far' respectively; *$l_{box}$*, *$h_{box}$* denotes length and height of the bounding box. We concatenate the results of equations [\[eqn:1\]](#eqn:1){reference-type="ref" reference="eqn:1"} and [\[eqn:2\]](#eqn:2){reference-type="ref" reference="eqn:2"} with the baseline's model [@kern] triplets to add and refine the geometric relations as shown in Figure [3](#fig:method){reference-type="ref" reference="fig:method"}.
|
| 41 |
+
|
| 42 |
+
{#fig:gcontext width="70%"}
|
| 43 |
+
|
| 44 |
+
In the result section [6](#sec:results){reference-type="ref" reference="sec:results"}, we show how the relationships between objects are predicted accurately after the post-processing algorithm. In short, we extract the predicted bounding boxes of an image and the predicted classes using KERN model. After calculating the centroids of the bounding boxes and the distance between them. We then calculate the directions between each bounding boxes. Using the distance and direction, we infer geometric relationships between the objects. We also added 6 predicate classes to our visual genome dataset [@visualgenome]. Two predicate classes - above and near was already present in the state-of-the art dataset.
|
| 45 |
+
|
| 46 |
+
<figure id="fig:freq">
|
| 47 |
+
<embed src="count_pred_all.pdf" />
|
| 48 |
+
<p>. <span id="fig:freq" data-label="fig:freq"></span></p>
|
| 49 |
+
<figcaption>Frequency distribution of relations (predicates) in Visual Genome dataset <span class="citation" data-cites="visualgenome"></span></figcaption>
|
| 50 |
+
</figure>
|
| 51 |
+
|
| 52 |
+
![Proposed architecture: **Geometric Context**. We feed an input image to the KERN [@kern] and extract the predicted bounding boxes ($B_i$), classes ($o_i$), and relation triplets $<o_i,r_{i\rightarrow j},o_j>$ from the model. Using $B_i$ and $o_i$, we calculate the centroids of the bounding boxes. We use the centroids to calculate $L$ and $\theta$ as illustrated in Figure [1](#fig:gcontext){reference-type="ref" reference="fig:gcontext"}. $(i,j) \in \{1,\dots,N\}$, $N$ is the number of predicted bounding boxes. $\oplus$ denotes concatenation of the predicted relations, addition of bounding boxes and classes (if any) into three categories similar to KERN's output.](geometric-flow.drawio.pdf){#fig:method width=".8\\textwidth"}
|
| 53 |
+
|
| 54 |
+
The visual genome dataset [@visualgenome] contains 1,08,077 images, 5.4M region descriptions, 1.7M visual question answers, 3.8M object instances, 2.8M attributes and 2.3M relationships. It has 150 object classes and 50 unique relationships with the frequency histogram shown in Figure [2](#fig:freq){reference-type="ref" reference="fig:freq"}. Table [1](#table:relation-types){reference-type="ref" reference="table:relation-types"} shows higher relation types of the dataset. The dataset is used for scene graph generation, scene understanding, image retrieval, image captioning, and visual question answering.
|
| 55 |
+
|
| 56 |
+
:::: center
|
| 57 |
+
::: {#table:relation-types}
|
| 58 |
+
**Types** **Examples** **#Classes** **#Instances**
|
| 59 |
+
------------ ------------------------ -------------- ----------------
|
| 60 |
+
Geometric near,far,under 15 228k (50.0%)
|
| 61 |
+
Possessive in,with 8 186k (40.9%)
|
| 62 |
+
Semantic eating,watching,riding 24 39k (8.7%)
|
| 63 |
+
Misc. made of,from,for 3 2k (0.3%)
|
| 64 |
+
|
| 65 |
+
: Types of relations in Visual Genome dataset [@motifs]. 50% of the relations in the dataset are geometric relations, followed by 40% possessive relations.
|
| 66 |
+
:::
|
| 67 |
+
::::
|
| 68 |
+
|
| 69 |
+
[]{#table:relation-types label="table:relation-types"}
|
2112.02321/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2112.02321/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Speech separation aims to extract individual speeches from a mixture of speeches of multiple speakers. It is an important preprocessing step for speech recognition in noisy environment. Recent development of speech separation methods at the waveform level has aroused researchers' interest [@luo2018tasnet; @luo2019conv; @luo2020dual], avoiding the traditional representation of STFT amplitude and phase used in so-called time-frequency (TF) domain methods [@hershey2016deep; @isik2016single]. Among these so-called time-domain methods, some presented mechanisms fuse information processed at various time scales, called *multi-scale fusion (MSF)* methods, such as in FurcaNeXt [@shi2019furcanext] or SuDoRM-RF [@tzinis2020sudo], and yield impressive results on the speech separation task. In this work we aim to explore if there exist even better MSF methods.
|
| 4 |
+
|
| 5 |
+
Evidence from observations of sensory systems of mammals show them to utilize MSF in their processing. For instance, the visual system includes multiple processing stages (from lower functional areas such as the lateral geniculate nucleus to higher functional areas such as the inferior temporal cortex), which process different scales of information [@bear2007neuroscience]: the higher the stage, the coarser the scale. See Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a for illustration. Similar mechanisms and areas have also been identified and located in the auditory system [@bear2007neuroscience]. More importantly, physiological and anatomical studies have revealed abundant recurrent synaptic connections within the same stage (also called *lateral connections*) and bottom-up/top-down synaptic connections between stages [@dayan2001theoretical]. The intra-stage and inter-stage connections bring different scales of sensory information together and each stage performs information fusion. These connections fuse different scales of information more completely, and may lead to better results than existing MSF methods.
|
| 6 |
+
|
| 7 |
+
However, Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a merely reflects a purely static structure in the brain and does not show the dynamics of the sensory system. In biological systems, given a stimulus, the neurons along a sensory hierarchy do not fire simultaneously like shown in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}b. For example, it was reported that the neural response initialized at a retinotopic position in anesthetized rat V1 propagated uniformly in all directions with a velocity of 50--70 mm/s, slowed down at the V1/V2 area border, after a short interval, spread in V2, then reflected back in V1 [@xu2007compression]. In general, "the speed of an action potential varies among neurons in a range from about 2 to 200 miles per hour"[@Nairne2014psychology]. The time at which a neuron starts to fire depends on a variety of factors including the neuron type, the stage at the sensory pathway, the number of the dendrites connected to it and the morphology of the neural fibers. This precludes the possibility of faithfully replicating the sensory system to obtain an excellent artificial neural network (ANN). Nevertheless, the history of ANN development indicates that getting inspiration from the brain is enough to make great progress if task-specific techniques are combined. Inspired by the discovery of simple cells and complex cells in cat visual cortex [@hubel1959single; @hubel1962receptive], a hierarchical model Neocognitron [@Fukushima80] was proposed and later developed into convolutional neural networks [@lecun1989backpropagation] by applying the backpropagation algorithm. We investigate empirically if there exists an asynchronous updating scheme for the structure shown in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a that provides improvement for speech separation performance.
|
| 8 |
+
|
| 9 |
+
![The structure of FRCNN and typical updating schemes. The number of stages $S=4$. (a) The structure of the FRCNN. Every node denotes a stage, corresponding to a group of neurons in a functional area in the sensory pathway (e.g., the inferior colliculus in the auditory pathway). Red, blue and orange arrows denote bottom-up, top-down and lateral connections, respectively. Both bottom-up and top-down connections can be made between adjacent stages and non-adjacent stages. (b) Synchronous updating scheme in one block [@liao2016bridging]. (c) The proposed asynchronous updating scheme in one block. The dashed box in each subfigure indicates the basic building block for constructing a complete RNN (see Figure [\[fig:macro\]](#fig:macro){reference-type="ref" reference="fig:macro"}). (d) Multi-scale information fusion for an example stage receiving three types of inputs.](FRCNN.png){#fig:FRCNN width="\\linewidth"}
|
| 10 |
+
|
| 11 |
+
As the model has bottom-up, top-down and lateral connections as shown in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a, we call the model a *fully recurrent convolutional neural network (FRCNN)*. This name emphasizes the presence of both lateral and top-down recurrent connections in the model, distinguishing the model from an existing model [@Liang_2015_CVPR] named *recurrent convolutional neural network (RCNN)* that has lateral recurrent connections only. The model with the synchronous updating scheme (Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}b) is called the synchronous FRCNN or S-FRCNN, which was studied for visual recognition [@liao2016bridging]. We aim to propose an asynchronous FRCNN or A-FRCNN for speech separation. We notice that SuDoRM-RF [@tzinis2020sudo] also has the three types of connections and we start from its framework to study different updating schemes of FRCNN.
|
| 12 |
+
|
| 13 |
+
The architecture of our proposed A-FRCNN is illustrated in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}c. The information first passes through stages one by one in the bottom-up direction, then fuses between adjacent stages in parallel, and finally fuses together with skip connections to the bottom stage. In the S-FRCNN, the information transmission from the bottom stage to any upper stage then back to the bottom stage is too fast: one step upward and one step downward (Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}b). In contrast, in the A-FRCNN, the information starting from the bottom stage goes through more processing steps before it goes back to the bottom stage, which is advantageous for comprehensive MSF. Increasing the depth of a model is one of the keys for the success of deep learning. We will show the merit of A-FRCNN compared to S-FRCNN in experiments.
|
| 14 |
+
|
| 15 |
+
# Method
|
| 16 |
+
|
| 17 |
+
A typical speech separation method is to model different sources in the temporal-frequency (TF) domain. First, the short-time Fourier transform (STFT) calculates the TF representation of the mixed sound. Second, the subsequent process approximates the clean spectrogram of each source from the mixed spectrogram and uses the inverse STFT (iSTFT) to synthesize the source waveform, as in DPCL [@hershey2016deep], uPIT [@kolbaek2017multitalker] etc.
|
| 18 |
+
|
| 19 |
+
So-called time-domain methods were also proposed for speech separation, making use of non-STFT encoders for extracting meaningful representations out of the waveform (in a bio-inspired fashion [@ditter2020mpg], or fully learned over training [@luo2019conv]). DualPathRNN [@luo2020dual] extracts overlapped short sequences (called *chunks*) from the mixed speech signal and applies intra- and inter-chunk operations iteratively by using recurrent neural networks (RNNs). It has achieved very good results at the cost of high computational complexity. The idea of intra-chunk and inter-chunk was adopted in recently proposed models such as Sepformer [@subakan2021attention], Sandglasset [@lam2021sandglasset] and Gated DualPathRNN [@nachmani2020voice]. These models achieved even better speech separation results but the computational complexity is also higher.
|
| 20 |
+
|
| 21 |
+
While RNNs were naturally used to perform speech separation given the sequential aspect of the input representation, they often require long training and inference time. Conv-TasNet [@luo2019conv] has been proposed to solve this problem, replacing RNN with Temporal Convolutional Networks, much faster to train. However, this model has limited MSF capability. Recently, several models with multiple branches have been proposed for speech separation, where different branches adopt different time resolutions for the processing of their respective inputs, then the outputs are fused with some rule or dedicated module. For instance, SuDoRF-RF [@tzinis2020sudo] uses repetitive U-Nets [@ronneberger2015u], obtaining good results with high efficiency. FurcaNeXt [@shi2019furcanext], a variant of Conv-TasNet [@luo2019conv], uses multiple branch learning methods to improve the performance of speech separation. MSGT-TasNet [@ijcai2020-450] uses Transformer [@vaswani2017attention] to capture features of different scales for speech separation.
|
| 22 |
+
|
| 23 |
+
The lateral and top-down recurrent connections have been modeled by ANN researchers for a long time. In 1990s recurrent connections were introduced into the multi-layer Perceptrons [@Fernandez90] [@Puskorius94], and in 2015 they were introduced into the CNN, resulting in the RCNN [@Liang_2015_CVPR][@Liang_NIPS2015]. In 2000s top-down connections were introduced into unsupervised deep learning models [@hinton2006fast] [@lee2009convolutional]. A general framework with both lateral and top-down recurrent connections was proposed in 2016 [@liao2016bridging]. If a hierarchical model has recurrent connections, the neurons can be updated in different orders. In [@liao2016bridging], only the conventional synchronous updating scheme was presented. However, no evidence has shown that this is how the neural system works, or that it outperforms asynchronous updating schemes on engineering tasks. In fact, in [@Liang_NIPS2015], it was shown that an asynchronous updating scheme for RCNN outperformed the synchronous updating scheme on an image segmentation task. We here propose a novel asynchronous scheme for the FRCNN that achieved better results with fewer parameters than the synchronous scheme on speech separation.
|
| 24 |
+
|
| 25 |
+
An algorithm dedicated to speech separation aims to extract individual speech signals of different speakers from a mixture. We denote the waveform of the mixture as $\mathbf{x}\in R^{1\times T}$: $$\begin{equation}
|
| 26 |
+
\mathbf{x}=\sum_{i=1}^C\mathbf{s}_i+\mathbf{\sigma}
|
| 27 |
+
\end{equation}$$ where $\mathbf{s}_i \in R^{1\times T}$ denotes the waveform of speaker $i$, $\mathbf{\sigma}\in R^{1\times T}$ denotes the noise signal, $T$ denotes the number of samples of the signal, and $C$ denotes the number of speakers. The task is to estimate $\mathbf{s}_i$ from $\mathbf{x}$ for all $i$.
|
| 28 |
+
|
| 29 |
+
::: wrapfigure
|
| 30 |
+
r0.6 {width="\\linewidth"}
|
| 31 |
+
:::
|
| 32 |
+
|
| 33 |
+
We use the same pipeline as Conv-TasNet [@luo2019conv], as shown in Figure [\[fig:pipeline\]](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}. It consists of an encoder, a separation network and a decoder. The encoder divides $\mathbf{x}$ into $K$ overlapping segments $\mathbf{\overline x}_k\in R^{1\times L}$ and transforms each segment into a feature vector $\mathbf{\overline r}_k\in R^{1\times N}$: $$\begin{equation}
|
| 34 |
+
\mathbf{\overline r}_k=\mathbf{\overline x}_k \mathbf{U}_\text{e}
|
| 35 |
+
\end{equation}$$ where $\mathbf{U}_\text{e}\in R^{L\times N}$ is a weight matrix. The two steps can be realized by a trainable 1D convolution with kernel $\mathbf{U}_\text{e}$ and an appropriate stride.
|
| 36 |
+
|
| 37 |
+
The separation network receives $\mathbf{\overline r}_k$ to estimate a mask $\mathbf{M}_i\in R^{1\times N}$ for speaker $i$. We apply a fully-connected layer with ReLU activation to the output of the separation network to produce $\mathbf{M}_i$. The detailed structure of the separation network is introduced in Section [3.2](#3.2){reference-type="ref" reference="3.2"}.
|
| 38 |
+
|
| 39 |
+
The decoder reconstructs the waveform segment $$\begin{equation}
|
| 40 |
+
\mathbf{\overline s}_{i,k} =(\mathbf{\overline r}_k\odot \mathbf{M}_i)\mathbf{U}_\text{d}^\mathsf{T}
|
| 41 |
+
\end{equation}$$ where $\mathbf{U}_\text{d}\in R^{L\times N}$ is a weight matrix and $\mathbf{U}_\text{d}^\mathsf{T}$ is the transpose of $\mathbf{U}_\text{d}$, and $\odot$ stands for element-wise multiplication. The estimated waveform $\mathbf{\widehat s}_i$ is obtained by summing $K$ overlapping segments $\mathbf{\overline s}_{i,k}$. The two steps can be realized by a 1-D transposed convolution operation.
|
| 42 |
+
|
| 43 |
+
We use the FRCNN as the separation network. It can be represented by a graph with nodes denoting stages and edges denoting connections. Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a shows an example with $S=4$ stages. In biological terms, every node corresponds to a set of neurons in a certain stage in the sensory pathway, e.g., the inferior colliculus in the auditory pathway. In our model, every node corresponds to a convolutional layer. Different nodes process different scales of the input information. The higher the node, the coarser the information. There are three types of connections: bottom-up, top-down and lateral connections. Note that both bottom-up and top-down connections can be between adjacent stages and non-adjacent stages. In the latter case, the connections are called *skip-connections*.
|
| 44 |
+
|
| 45 |
+
To run a recurrent neural network (RNN) with intricate connections, one needs to first determine the updating order of the neurons. This order determines the RNN *unfolding* or *unrolling* scheme. A commonly used approach is to update all neurons simultaneously. In the case of FRCNN as shown in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}a, it corresponds to updating all stages synchronously. This scheme is depicted in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}b [@liao2016bridging], and denoted by S-FRCNN. However, if the stages are allowed to be updated asynchronously, there will be a large number of possible unfolding schemes. For example, without considering the skip connections, we can update the stages one by one in the upward direction then update them one by one in the downward direction. In the present work, we propose an efficient updating scheme A-FRCNN, as shown in Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}c.
|
| 46 |
+
|
| 47 |
+
In the proposed A-FRCNN, we first sequentially update the stages in the bottom-up direction, then update them simultaneously by fusing information from adjacent stages, and finally, fuse information from all stages to the bottom stage. This entire process is repeated several times (Sec. [3.2.4](#sec:macro){reference-type="ref" reference="sec:macro"}). The two types of fusions can be viewed as local and global fusions, respectively. As a stage represents a unique set of neurons as its biological counterpart, the connections between two stages (e.g., the vertical upward connection and the oblique upward connection between stage 3 and stage 4) should use the same operation and parameters.
|
| 48 |
+
|
| 49 |
+
The A-FRCNN is adapted from the S-FRCNN in a step-by-step manner.
|
| 50 |
+
|
| 51 |
+
1. Inspired by the structure of the U-Net [@ronneberger2015u], we design a bottleneck structure at the bottom stage as shown in Figure [2](#fig:controls){reference-type="ref" reference="fig:controls"}a. All upper stages exchange information between different blocks through the bottom stage. This design increases the number of steps including down-sampling and up-sampling operations for processing the input coming at the highest resolution. This block is denoted by Control 1.
|
| 52 |
+
|
| 53 |
+
2. The block Control 1 has too many connections which make the model inefficient in both parameters and computation. The bottom-up skip-connections and top-down skip-connections are symmetric, and may be redundant. Therefore, we remove the bottom-up skip-connections, which results in the block shown in Figure [2](#fig:controls){reference-type="ref" reference="fig:controls"}b, denoted by Control 2.
|
| 54 |
+
|
| 55 |
+
3. It is too fast to fuse the information across non-adjacent stages through top-down skip-connections in the block Control 2. One possible way to represent an increasing firing delay from widely separated units would be to fuse the information across adjacent stages first, then across non-adjacent stages. This change increases the shortest path from higher stages to the bottom stage. In addition, to save parameters and computation, we only keep the top-down skip-connections to the bottom stage and removed other top-down skip-connections. We also remove the vertical downward connections because the top-down stage-by-stage fusion has already been performed through the oblique downward connections. This is made possible by the delayed global fusion; otherwise, the stages would become disconnected after removing the vertical downward connections. We then obtain the A-FRCNN (Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}c).
|
| 56 |
+
|
| 57 |
+
Note that the sequential fusion method in the third step is more biologically plausible than the synchronous fusion method since biological connections between non-adjacent stages are longer than those between adjacent stages, while signal transmission through connections is not instantaneous.
|
| 58 |
+
|
| 59 |
+
{#fig:controls width="\\linewidth"}
|
| 60 |
+
|
| 61 |
+
The proposed A-FRCNN block is closely related to the U-Net [@ronneberger2015u] (Figure [2](#fig:controls){reference-type="ref" reference="fig:controls"}c). To investigate the potential advantage of delayed global fusion, we add top-down skip-connections to the U-Net block and obtain a new block, denoted by U-Net-Delay (Figure [2](#fig:controls){reference-type="ref" reference="fig:controls"}d).
|
| 62 |
+
|
| 63 |
+
The blocks depicted in Figures [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"} and [2](#fig:controls){reference-type="ref" reference="fig:controls"} are RNN blocks, and the nodes in the same horizontal row represent the same stage (or in biological terms, the same set of neurons in a sensory area) but at different time. In this study we use $C$ feature maps for every stage. Multi-scale information fusion is performed at the input of every stage. First the $C$ feature maps from each of the $K$ inputs are concatenated in the channel dimension, resulting in $KC$ feature maps. A $1 \times 1$ convolutional layer is then used to reduce the number of feature maps to $C$. Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}d illustrates this process. This concatenation method was used by default in our experiments. One can also sum up the $K$ inputs to obtain $C$ feature maps.
|
| 64 |
+
|
| 65 |
+
Figures [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"} and [2](#fig:controls){reference-type="ref" reference="fig:controls"} show single blocks of the entire unfolding schemes. An entire unfolding scheme usually consists of multiple such blocks with tied weights. If there are $B$ blocks in total, we say "FRCNN is unfolded for $B$ time steps". At the macro-level, the FRCNN can be unfolded by simply repeating these blocks along time such that the output of one block is the input of the next block.
|
| 66 |
+
|
| 67 |
+
To further fuse the multi-scale information, we add a $1\times 1$ convolution between two consecutive blocks (Figure [\[fig:macro\]](#fig:macro){reference-type="ref" reference="fig:macro"}a). This method is formulated as follows: $$\begin{equation}
|
| 68 |
+
R(t+1)=f(\varphi(R(t))),
|
| 69 |
+
\end{equation}$$ where $f(\cdot)$ denotes a block shown in Figures [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"} and [2](#fig:controls){reference-type="ref" reference="fig:controls"}, $R(t)$ denotes the output of the block at time step $t$ and $\varphi$ denotes $1\times 1$ convolution. This is called the *direct connection (DC)* method.
|
| 70 |
+
|
| 71 |
+
::: wrapfigure
|
| 72 |
+
r0.5 {width="\\linewidth"}
|
| 73 |
+
:::
|
| 74 |
+
|
| 75 |
+
Another idea is to integrate the input of the model with the output of every block via feature map concatenation or summation before sending to the next block. This rule was used in constructing the recurrent CNN in a previous study [@Liang_2015_CVPR]. Again, we add a $1\times 1$ convolution to further fuse information (Figure [\[fig:macro\]](#fig:macro){reference-type="ref" reference="fig:macro"}b). Formally, $$\begin{equation}
|
| 76 |
+
R(t+1)=f(\varphi(R(t)\oplus \mathbf{r}))
|
| 77 |
+
\end{equation}$$ where $\mathbf{r}$ denotes the input feature maps and $\oplus$ denotes concatenation or summation of two sets of feature maps. This is called the *concatenation connection (CC)* or *summation connection (SC)* depending on which feature map integration method is used.
|
| 78 |
+
|
| 79 |
+
For single-input-single-output blocks, i.e., A-FRCNN and the blocks shown in Figure [2](#fig:controls){reference-type="ref" reference="fig:controls"}, we directly use the unfolding methods as described above. For the multi-input-multi-output block, i.e., S-FRCNN, we apply these unfolding methods for each input-output pair corresponding to the same stage. It should be noted that Figure [1](#fig:FRCNN){reference-type="ref" reference="fig:FRCNN"}b only illustrate the intermediate blocks of S-FRCNN unfolding scheme. In the beginning of unfolding we use downsampling to obtain different scales of feature maps, and in the end of unfolding we use up-sampling to fuse different scales of feature maps.
|
| 80 |
+
|
| 81 |
+
We use the standard BP algorithm to train the model. The object is to maximize the scale-invariant signal-to-noise ratio (SI-SNR) [@le2019sdr]. See Supplementary Materials for details. SI-SNR is also a metric to evaluate the performance of speech separation methods.
|
2112.07337/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-20T06:55:21.826Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Safari/605.1.15" etag="7o3P3ul-eOEcuoAXhF8O" version="16.4.2" type="google"><diagram id="Hpwn1JiZcitkz0Qtv4O5" name="Page-1">7Vxtc5u4Fv41nrn7IR5eDNgfYydpJ9PuZpvu7fZTRgbZcBcjrpBje3/9HgnJBkRs7OKXTd1pE3QQAul5zotUHXXs0Wz5gaI0/EwCHHcsI1h27LuOZQ08F35ywSoXeOYgF0xpFOQicyN4jv7GUmhI6TwKcFaqyAiJWZSWhT5JEuyzkgxRShblahMSl9+aoql8o7ERPPsoxlq1b1HAwlzadwq1P+JoGqo3m4a8M0OqshRkIQrIoiCy7zv2iBLC8qvZcoRjPnZqXPLnHt64u/4wihPW5IHVg/fIvt+k2bev4+HDJE0fbXLj5a28onguOzyiUcYilBAQfyYJwxHllwHiZcQY//0f+DcmNIFfHE7jGacMz8aYCoFnw69f+JszDkKHf5cbwzcOx3Ax5RdKMIEXCPBiQsXr3f/PSV7BnkwMgw/mRpQ/O6To7yiGL1SNQJfzdsptg7jwPkCezsQHppRMcJZFJEF8eCjyo2TKe0ijV1GhC/8+YvixILyHLOTXo9svX/mvEM1SeDQLo5T3kFewDMMSnRzxh7vdvAkBOVspHlEyTwLMoTDgexZhxPBzinx+dwGKA7KQzWIomXJYHtAsirnSfMTxK2aRj+QNqSNmj5ejOB6tBw+GbGL5PsgzRslfuHAncMeu44qRqLJGEgn6zvCyIJIs+oDJDDO6giryrudKRkuVdlwrLy82CtJTdcKCcnhShqROTtdNb2gLF5K5e7DYdDQaawDgJLjl9gBKfowAf//QMYdyYWQN8QfkMKB09acEWBS+80LXUcW7ZfHm3UqVlhH7U34Ev86fcm1XljeP8ULxqSdMIxhATKXsTWwzMqe+HIe/fpt/Gy5/7TnfRw+35L+k9/Ri35jSDjBEp5jtthc4mOKtVClQwalhgpJRHCMGalc22zX0kG94IpEwGIqJZoWJgwrD8o7Lp4q2sdKQ1S831HMqDeUDozUk2Lru9uEEVtpS5CsM8bMsEspCMuUG634jHZZNyqbOJ0JSyaf/YcZWkrxozkiZ8op5kqOSe+ZW3v2wmjQi6TbutcbRxuRraptqVcuqMU0l57fBXHk6fuMmE2N5yz2qly51N6j51N/nGBw3uCz+UJ0LdMF1wUUyztKym6zznts8c/6nKDImyC/3IVxTwzISDF0X3rEgHYkALeI+mF9mKMluMjBok7e7WvedWcoDgUMH8avw7WvPvwh5qDOJkigLQbGkf09JFomBtYyekuVBAXh+LvkjAW/Oqz8zxDAPeiAWTrjgiUZL3i7iQjIRr0B8XBPERAASMc7Ejv1QgCPvUgVlWoP7ob2+TbIFdxq3hdCsQpUWxnZIYdSycEfH3qCf3l+tYsVeQvDCyuatHAQlJMGViEmKAIUpaM1djCe8BR4IAUPjWymeRUEg7G1d0Fa2wcczjs2js17FJ9rKlRVcct/c4pNbt4D21be154vq4xe355Tjl2qo3TQQ2tlQe4FQLVd6uwN5mEen/JKhsaBKAfQ6Dc3gixV4HCMfsENRIoJmU5TjGKVZJBrLa4RRHHxCKzJn6jWqdPjMrO/j+pnZuO/waPMour+ehRV03zb7uu6bbUzNagFtMDNTgELXGcQDX7DPUDJtgq2OXUBJ+lUFiFyQcqZiev8Ko5opPdd9ABNmhd/MvYC4HBPGyEwWqBytdaNipJwh/OVTf25nnDseyzhDc1OGv7w6BXORAPQoEthilLEFRGs1vNFZsFVPdlNjF/R270jIu28GvutA44lk9fFHiwwRy4Jow5CKYgYI9yf+3gwgMPKTWKwmhBAf4KSGamYNvJr6u34fjyfNgXcOAd403BMCr68pasD/Sq64Hwv3ZQXzs/Ggv5sHd3LmtWsOok0FyuIrbVqjjWXqi7mnpc3gGjGcN2JYlQE+WwChgtkaAzJXut8r2IT5WQyCg/tB73wGoW+Nbbf+P3a2atcFxw+mqeE+uELaBqQXExooJ1OA+BEnmVhjHc5hgJNL9f/vnRvn9/+mfQ0ALiIAcPvnDgD2WA08gq6v1+zOpOtbVga3K84l+/Yjrwf+vJBejm/XF/6+oJj/3+ezH85nyA83M/4L8+7vnx4X4N69qzc/hzc3DR35E7tzfUFQTfWu6r1NVS7aoR95ue7nhfRiHLqlr8MVN4kXd4ZfgW8P+PO7aqtd63x11U1dta2r/GldtTLiBZUXiQ1X/d6lK5fsqy19Ze2KaluoXo671lfNrii3jPIF+Ob9Up6k62w132m9w9Z1i3tsje6g3+vsm7u0yZ6C5qxOIYPqxujyLMMdSVS8tDsfapt/3pkktc4/LSSgbNt0cSFJUmYlXc88NEeqsrn0pnfarcFW3YbCMzHetsuMN4z9s/UKjLftEuPXGnRmwjsN+d54l/uV8HsRvm4j5ZkIz1OcSybetX6E8IOBVyG8Z/2LCP8+DbxtlnM/bOukdLf1ZSaF27YsL5W95eNE8OIk+Vs67/YOIR3H6qqIUaUv21bX0eJK2+726+b+R0vX0rdfue8Yh55lNAChX6O+pqcqto+Bvv7Cc01f8kzTlzzP9EVkmb7wHNMXPX/8JAdOrLdD77NR+cfTmuzatCaFxmm0RF9LMZuoyb5JqSdVpyOmpTpVNfN0DAc1SnY8ABttUygedcM/xXicJzzz2xx4wLYjnnLzAdPZwUfcbDnTRnxtnr2+ApJMxd0xJSzvrshRz/ArTm4YRFpQeIBG5zHv32+i598IjYPO5iQcuPwc+SESxz6Vhq5u1ekdH4PTG+j75WqPwXGsY1FanzCsd9XexngJ3kIA82lFEk4DtdPW+Dy87+inOj2iZI5Ej80B938tsN0wPACnyVkBDeiuszw/dGGtBlXqFrXhJ+Jljbes5WVvcCxe1mWCvT2RPe3xTN5h5zM5Tn/vCfCb4O4+n0lGGztnpsqrXcjU1Kt4fdc4cG7qeZXwYaAF38eenjbY6PLvI/H2AzHaJfGgKYcv7JAxu8LhQ9dXvH5VGdrjMBQ3J0nm1TfHcdr3/wA=</diagram></mxfile>
|
2112.07337/main_diagram/main_diagram.pdf
ADDED
|
Binary file (44.3 kB). View file
|
|
|
2112.07337/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Transformer-based question answering (QA) methods have evolved rapidly in recent years to handle open-domain, multi-hop reasoning over retrieved context paragraphs. Many existing QA datasets and benchmarks measure performance over homogeneous data sources, such as text [\(Rajpurkar et al.,](#page-9-0) [2016;](#page-9-0) [Chen et al.,](#page-8-0) [2017a;](#page-8-0) [Joshi et al.,](#page-9-1) [2017;](#page-9-1) [Dua](#page-8-1) [et al.,](#page-8-1) [2019\)](#page-8-1) and more recently tables [\(Pasupat and](#page-9-2) [Liang,](#page-9-2) [2015;](#page-9-2) [Zhong et al.,](#page-9-3) [2017;](#page-9-3) [Liang et al.,](#page-9-4) [2017;](#page-9-4) [Herzig et al.,](#page-9-5) [2020;](#page-9-5) [Yin et al.,](#page-9-6) [2020\)](#page-9-6). Even though
|
| 4 |
+
|
| 5 |
+
real-world documents often contain tables embedded in free form text, QA over such a hybrid corpus, i.e., a combination of tables and text — a.k.a. Text-TableQA — remains relatively unexplored. As illustrated in Figure [1,](#page-1-0) even a relatively simple table from Wikipedia often references several entities, definitions or descriptions from the table elements. A question may be best answered by matching some parts of it to table elements and other parts to linked text spans. Existing Transformer-based QA solutions need significant modifications to score such heterogeneous corpus units. A key challenge is to reduce the cognitive burden of supervision to (question, answer) pairs, without humans having to identify the specific table cell or text span where the answer was mentioned. In TextTableQA, such 'distant' supervision is particularly challenging because it occurs along two distinct axes: (1) There could be multiple rows and associated passages that mention the answer string; and (2) Even for a specific table row with linked passages, the same candidate answer may occur in multiple text spans. Many of them may be spurious and detrimental to system training.
|
| 6 |
+
|
| 7 |
+
In response, we present MITQA — a Text-TableQA system specifically engineered to address the above challenges. MITQA defines each table row, together with linked passages, as the fundamental *retrieval unit*. To adapt to memory-hungry Transformer networks, constrained by the number of input tokens they can efficiently process, MITQA uses a novel query-informed passage filter to prepare a contextual representation of each retrieval unit. MITQA then uses an early interaction (cross attention) Transformer network to score retrieval units. While training MITQA, its most salient features are multi-instance loss functions and data engineering curricula to tackle distant supervision, along both the multi-row and multi-span axes. Many of the above challenges are not faced by homogeneous text-only or table-
|
| 8 |
+
|
| 9 |
+
<sup>\*</sup>Equal Contribution
|
| 10 |
+
|
| 11 |
+
<sup>†</sup>Work done while at IBM Research
|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>
|
| 14 |
+
|
| 15 |
+
Figure 1: An instance of question answering over hybrid context of table and text (from HybridQA). Gold answer in correct context is highlighted in blue and gold answer appearing in irrelevant context is highlighted in red. The context used to arrive at the answer in the correct passage is shaded in yellow. The relevant row to be retrieved is shaded green and irrelevant rows are shaded red.
|
| 16 |
+
|
| 17 |
+
only QA systems. We report results on extensive experiments on two recent TextTableQA challenge data sets, HybridQA and OTT-QA, where our system outperforms baselines and is currently at the top of HybridQA<sup>1</sup> leaderboard. Source code is available at https://github.com/primeqa/primeqa.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
T denotes a set of tables, each table being denoted as t. Title, caption, and other available metadata of table t is accessed as t.meta. Table t has t.rows rows and t.cols columns. Its column headers are denoted t.hdr. (Row headers may also assume a similar salient role, but we limit notation to column headers for simplicity of exposition.) [N] denotes the set of indices $\{1,\ldots,N\}$ . For $r \in [t.rows]$ , the rth row is denoted $t[r, \star]$ . For $c \in [t.cols]$ , the cell at position (r, c) is written as t[r, c]. The cth column header cell is denoted t.hdr[c]. The set of passages linked with the row r of table t is denoted by $t[r, \star]$ .psg. A passage p is a sequence of tokens. The set of all token spans in p is denoted by spans(p). One token span is denoted $\sigma \in \operatorname{spans}(p)$ . A set of such spans is denoted $\Sigma$ .
|
| 22 |
+
|
| 23 |
+
Given a question q (modeled as a sequence of tokens) and a table t together with linked text, the task is to find a relevant row r, and then an answer text a, which can be a cell from $t[r, \star]$ , or a span
|
| 24 |
+
|
| 25 |
+
<span id="page-1-1"></span>https://competitions.codalab.org/ competitions/24420#results
|
| 26 |
+
|
| 27 |
+
<span id="page-2-0"></span>
|
| 28 |
+
|
| 29 |
+
Figure 2: MITQA system sketch. TableRetriever and RowPassageLinker are not shown.
|
| 30 |
+
|
| 31 |
+
from $spans(t[r,\star].psg)$ . In HybridQA, the table t and associated linked passages are provided along with the question q. In contrast, for OTT-QA, the correct table t and linked passages need to retrieved from a corpus of tables and initially unconnected passages—a more challenging setting.
|
| 32 |
+
|
| 33 |
+
In this section we first describe the modules shown in Figure 2, that are shared for closed-domain (table and linked text provided, as in HybridQA) and open-domain (OTT-QA) applications. After that we describe TableRetriever and RowPassageLinker that are needed for open-domain scenarios.
|
| 34 |
+
|
| 35 |
+
The total tokens in passages linked to a row can be large, exceeding the input capacity of BERT-like models. Efforts (Beltagy et al., 2020; Zaheer et al., 2020) have recently been made to remove these capacity limits, but at the cost of additional complexity, unsuited for our fine-grained application to table rows. In any case, the query has a critical role in determining the utility of each passage linked to a row. Our PassageFilter module orders the linked passages such that the prefix that fits within the input capacity of a BERT-like model is likely to be the most valuable for judging the relevance of a row. More details are in Appendix A.3.
|
| 36 |
+
|
| 37 |
+
Given question q and table t, the task of RowRetriever is to identify the correct row r from which the answer can be obtained, either as a cell t[r,c] from the cth column, or a span from a passage in $t[r,\star]$ .psg. We implement RowRetriever by training a BERT-based sequence classification model (Devlin et al., 2018) on a binary classification task with correct rows to be labelled as 1s and the rest as 0s. Suppose the columns of t are indexed left-to-right using index t. Then t-hdrt0 and t1, t2 are
|
| 38 |
+
|
| 39 |
+
<span id="page-3-0"></span>
|
| 40 |
+
|
| 41 |
+
Figure 3: Distribution of number of rows containing the answer-text in the training set of HybridQA. "2: 12.8%" in the chart means that 12.8% instances of training set has exactly 2 rows with answer-text appearing in them.
|
| 42 |
+
|
| 43 |
+
the header and cell in column c. The input x to the BERT encoder is fashioned as:
|
| 44 |
+
|
| 45 |
+
$$\begin{array}{c|c} \texttt{[CLS]} \ q \ \texttt{[SEP]} & t.\mathsf{hdr}[c] \ \text{is} \ t[r,c] \ \texttt{[DOT]} \\ & c \in [t.\mathsf{cols}] \\ \\ \texttt{[SEP]} \ t.\mathsf{meta} \ \texttt{[DOT]} & p \ \texttt{[DOT]} \end{array} \tag{1} \\ & p \in \mathsf{PassageFilter}(t,r,q) \\ \end{array}$$
|
| 46 |
+
|
| 47 |
+
where '||' is the concatenation operator and 'is' is literally the word 'is'. [DOT] and [SEP] are separator tokens. In words, we concatenate: (1) the question q; (2) phrases of the form "header is cellvalue", over all columns; (3) table metadata (title etc.); and (4) passages linked to the given row, that survive through PassageFilter; before passing into a BERT-Large encoder in a specific format to get a suitable latent states. The [CLS] embedding output by BERT is sent to a feed-forward neural network to make the label prediction. During inference, all {question, row} pairs are passed through this sequence classifier. The row with the largest score for class 1 is identified as the chosen row.
|
| 48 |
+
|
| 49 |
+
**Distant supervision of RowRetriever:** A row retrieval system that expects supervision in the form of gold rows exacts a high cognitive burden from annotator in preparing training instances. In the case of HybridQA and OTT-QA, we only have final answer-text as supervision, not relevant row/s, cell/s or text span/s. Given a table with connected passages and a question, we identify potential gold rows by exact string matching answer-text on rows (cells and linked texts).
|
| 50 |
+
|
| 51 |
+
As depicted in Figure 3, for HybridQA, $\sim$ 40% of the training instances have the problem of multiple rows containing the correct answer text. In training set of HybridQA dataset, for some instances, the gold answer appears in 19 rows!
|
| 52 |
+
|
| 53 |
+
Multi-instance (-row) training: A naive way is to label all matches with label 1 and the rest with label 0 for training. This reduces the performance of the *RowRetriever* as a large chunk of training data gets incorrect labels. To address the issue of multiple
|
| 54 |
+
|
| 55 |
+
potentially correct rows we map this problem into a multiple-instance learning setup (Dietterich et al., 1997; Andrews et al., 2003), with question-row pairs as instances and potential correct rows for a question forming a bag. We are given a question q and table t, with row subset $B \subseteq t$ .rows labeled 1 (relevant) and the rest, t.rows \ B labeled 0 (irrelevant). RowRetriever applied to the retrieval unit of row r is modeled as a function $f(x_r)$ , where $x_r$ is the text constructed in Eqn. (1) from row r. Let $\ell(y_r, f(x_r))$ be the binary cross-entropy classification loss, where $y_r \in \{0,1\}$ is the gold label of instance $x_i$ . For a given table and a question, we define the row retriever loss as
|
| 56 |
+
|
| 57 |
+
$$\min_{r \in B} \ell(1, f(\boldsymbol{x}_r)) + \sum_{r' \notin B} \ell(0, f(\boldsymbol{x}_{r'})).$$
|
| 58 |
+
(2)
|
| 59 |
+
|
| 60 |
+
<span id="page-3-1"></span>The intuition is that RowRetriever can avoid a loss if it assigns a large score to *any one* of the rows in *B*, whereas it must assign small score to *all* rows not in *B*. Apart from this multi-instance loss function, we also deployed a form of curriculum learning (Bengio et al., 2009). In early epochs, we only use instances whose labels we are most confident about: negative rows, and questions with only one positive row. In later epochs, we increase the fraction of instances with multiple relevant rows.
|
| 61 |
+
|
| 62 |
+
In TextQA, answer extraction is solved by a reading comprehension (RC) module (Baradaran et al., 2020). An RC module is usually trained with the query, the passage, and the start and end token positions of the span in the passage where the gold answer is found. In MITQA, neither start and end index of the span is available (when the answer is
|
| 63 |
+
|
| 64 |
+
<span id="page-3-2"></span>Algorithm 1 Multi-span AnswerExtractor training.
|
| 65 |
+
|
| 66 |
+
```
|
| 67 |
+
Input: training instances D = \{(q, t, r_{\oplus}, \Sigma[r_{\oplus}])\}
|
| 68 |
+
1: D_1 \leftarrow \{(q, t, r_{\oplus}, \Sigma[r_{\oplus}]) \in D : |\Sigma[r_{\oplus}]| = 1\}
|
| 69 |
+
2: AE_{init} \leftarrow train AnswerExtractor on D_1
|
| 70 |
+
▷ initial model based on 'easy' cases
|
| 71 |
+
3: D_{>1} \leftarrow \{(q, t, r_{\oplus}, \Sigma[r_{\oplus}]) \in D : |\Sigma[r_{\oplus}]| > 1\}
|
| 72 |
+
4: \widehat{D} \leftarrow \emptyset
|
| 73 |
+
|
| 74 |
+
▷ collects 'denoised' instances
|
| 75 |
+
|
| 76 |
+
5: for (q,t,r_{\oplus},\Sigma[r_{\oplus}])\in D_{>1} do
|
| 77 |
+
\sigma^* \leftarrow \operatorname{argmax}_{\sigma \in \Sigma[r_{\oplus}]}
|
| 78 |
+
AnswerExtractor<sub>AE1</sub>(q, t[r_{\oplus}, \star].psg, \sigma)
|
| 79 |
+
\triangleright \sigma^* is the best span among \Sigma[r_{\triangle}] as per
|
| 80 |
+
initial model AE<sub>init</sub>
|
| 81 |
+
\widehat{D} \leftarrow \widehat{D} \cup (q, t, r_{\oplus}, \{\sigma^*\})
|
| 82 |
+
8: AE_{final} \leftarrow train AnswerExtractor on <math>D_1 \cup \widehat{D}
|
| 83 |
+
> refined model
|
| 84 |
+
9: return AE<sub>final</sub>
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
a passage span), nor are the table cell coordinates (when the answer is in a table cell). Furthermore, high level supervision of whether the correct answer is a table cell or passage span, is also not available. This makes the training of AnswerExtractor a challenging task. We tackle this challenge using a multi-span training paradigm.
|
| 88 |
+
|
| 89 |
+
Multi-instance (-span) training: Recent systems (Devlin et al., 2018; Segal et al., 2020) simply consider the first span matching the gold answer text as the correct span and use that for training. This is often an incorrect policy. In Figure 1, the correct answer, '2018', occurs multiple times in $t[r_{\oplus}, \star]$ .psg, where $r_{\oplus}$ is the relevant row. There is absolutely no guarantee that the first span in $t[r_{\oplus}, \star]$ .psg matching the gold answer text will be true evidence for answering the question. Therefore, using the first, or all, matches for training AnswerExtractor can introduce large volumes of training noise and degrade its accuracy.
|
| 90 |
+
|
| 91 |
+
Let $\Sigma[r_{\oplus}]$ be the set of spans in $t[r_{\oplus},\star].psg$ that match the gold answer. Our problem is when $|\Sigma[r_{\oplus}]|>1$ . Inspired by data programming methods (Ratner et al., 2016), we propose a multispan training (MST) paradigm for AnswerExtractor, shown in Algorithm 1. Assuming there is a sufficient number of single-match instances, we train an initial model AE1 on these. We then use this initial model AE1 to score spans from the noisy instances in $D_{>1}$ . Note that this is different from
|
| 92 |
+
|
| 93 |
+
<span id="page-4-0"></span>Algorithm 2 Joint row+span reranker training.
|
| 94 |
+
|
| 95 |
+
**Input:** Trained RowRetriever and AnswerExtractor; K: number of rows to retain; K': number of spans to retain; search space of combining weights $\mathcal{W}$ ; development fold $D = \{(q, t, a)\}$
|
| 96 |
+
|
| 97 |
+
```
|
| 98 |
+
for w \in \mathcal{W} do \Rightarrow grid search for weights w
|
| 99 |
+
\widehat{D} \leftarrow \varnothing
|
| 100 |
+
for (q, t, a) \in D do
|
| 101 |
+
R = \{(r, s)\} \leftarrow \text{top-}K \text{ rows from } \text{RowRetriever}(q, t, K) \text{ with scores }
|
| 102 |
+
for (r, s) \in R do
|
| 103 |
+
\Sigma = \{(\sigma, s_{\text{st}}, s_{\text{en}})\} \leftarrow \text{AnswerExtractor}(q, t, r, K')
|
| 104 |
+
\overrightarrow{s} \leftarrow \begin{bmatrix} s & s_{\text{st}} & s_{\text{en}} \end{bmatrix}
|
| 105 |
+
\text{score}(r, \sigma) \leftarrow w \cdot \overrightarrow{s} \Rightarrow combo \ score
|
| 106 |
+
r_{\oplus} \leftarrow \text{argmax}_r \ score(r, \sigma)
|
| 107 |
+
\widehat{D} \leftarrow \widehat{D} \cup \{(q, t, r_{\oplus}, a)\}
|
| 108 |
+
\text{perf}(w) \leftarrow \text{evaluate AnswerExtractor on } \widehat{D}
|
| 109 |
+
return \text{argmax}_w \ perf(w)
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
end-task inference, because we are in a highly constrained output space — we know the answer can only be among the few choices. The best-scoring span $\sigma^*$ should therefore give us a 'denoised' instance. These, combined with the earlier single-span instances, give us a much better training set on which we can train another answer extractor, leading to the final model AE2. Appendix A.4 has more details.
|
| 113 |
+
|
| 114 |
+
In Algorithm 1, note that a single row $r_{\oplus}$ is identified in each instance as relevant. As we have noted before, this is not directly available from training data, because the gold answer may match multiple rows, with no certificate that they are evidence rows. A trivial approach involves invoking Algorithm 1 on all rows containing the gold answer. As expected, this method produced a sub-optimal AnswerExtractor. Instead, we use the trained RowRetriever to identify the most probable row as $r_{\oplus}$ .
|
| 115 |
+
|
| 116 |
+
The final piece in MITQA combines the confidence scores of RowRetriever and AnswerExtractor. Despite the efforts outlined in the preceding sections, they are both imperfect. E.g., if we retain the top five rows from RowRetriever, gold row recall jumps 8–9% compared to using only the top one row. To recover from such situations, we retain the top five rows, along with their relevance scores. These rows are sent to AnswerExtractor, which outputs its own set of scores for candidate answer spans. The row+answer reranker implements a joint selection across RowRetriever and AnswerExtractor, through a linear combination of their scores, to select the best overall answer. The weights in the combination are set using a development fold. These weights can be selected using either grid search or gradient descent, after pinning module outputs. We do a grid search, shown as Algorithm 2. We shall see that such reranking leads to significant accuracy improvements.
|
| 117 |
+
|
| 118 |
+
**TableRetriever:** For open-domain scenarios where questions are not accompanied by tables, this module retrieves the tables most relevant to a given question. For this task, we linearize the tables using different special delimiters to distinguish header information, cells and rows. we also prefix the table title in front of the
|
| 119 |
+
|
| 120 |
+
linearized table with a separator. Then we train a dense passage retriever (DPR) [\(Karpukhin et al.,](#page-9-12) [2020\)](#page-9-12) to give a higher score for a table if it is relevant to the question while computing the dot product of the encoded table and question. Details about table linearization and DPR training are in Appendix [A.1.](#page-10-3)
|
| 121 |
+
|
| 122 |
+
RowPassageLinker: This module iterates over each row of the tables retrieved by *TableRetriever* and links relevant passages to the row. For every cell in the row, RowPassageLinker first searches for nearest neighbour in the passage corpus using a BM25 retriever [\(Chen et al.,](#page-8-12) [2017b\)](#page-8-12). Similar to [Chen et al.](#page-8-6) [\(2021\)](#page-8-6), RowPassageLinker additionally uses a pre-trained GPT-2 model as context generator for each row and uses the generated context to retrieve more relevant passages from passage corpus. Details are in Appendix [A.2.](#page-10-4)
|
2202.03609/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03609/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Reinforcement Learning (RL) aims at training agents to take actions in an environment by maximizing their rewards. In recent years, RL has demonstrated its effectiveness in various applications domains such as gaming [@alphastar], robotics [@rubikcube], and traffic control [@rl-traffic]. It is also believed to be a promising approach toward reaching general human-level intelligence [@SILVER2021103535]. Given the fact that many real world applications are safety-critical, it becomes essential to study the safety and robustness of reinforcement learning systems.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:my_label" data-latex-placement="!t">
|
| 6 |
+
<embed src="img/intro-ang.pdf" style="width:48.0%" />
|
| 7 |
+
<figcaption>An illustration of backdoor attacks in a competitive reinforcement learning game. The <span>red ant</span> is the trigger agent and the <span>blue ant</span> the victim (Trojan agent with an injected backdoor). When no trigger action is performed by the <span> red ant</span>, the <span>blue ant</span> wins the game (left). However, when the <span>red ant</span> performs the trigger actions, the <span> blue ant</span> exits the arena immediately (right).</figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
A recent work BackdooRL [@backdoorrl] reveals that an RL system can be vulnerable, by designing a backdoor attack against competitive RL environments [@competitive]. BackdooRL embeds a sequence of trigger actions into a victim agent, which we call the *Trojan agent* throughout the paper. A trigger agent is leveraged to perform inconspicuous trigger actions during the competitive game. The Trojan agent then becomes likely to fail as soon as it observes the trigger actions. To ensure safety and fairness in RL environments, it becomes critical to develop a mechanism that detects the backdoors injected in the agents.
|
| 11 |
+
|
| 12 |
+
We define the RL backdoor detection problem, which aims at detecting and mitigating the potential backdoor risk associated with a given pre-trained RL agent. The problem is challenging due to the complex dynamics between the agents and the environment in a multi-agent competitive setting. Unlike the backdoor detection problem in supervised learning [@nc; @tabor; @abs; @dong2021black], the backdoor trigger in RL is a sequence of continuous actions with unknown length, which results in a huge search space for the defense methods.
|
| 13 |
+
|
| 14 |
+
We start by investigating the question of *what happens if the opponent's actions are similar to but not exactly the trigger actions*. We perform a study by showing the Trojan agent a series of perturbed trigger actions with varying magnitudes. To our surprises, the results suggest that the Trojan agent's performance also degrades when it sees nearby trigger actions, which we call the *pseudo triggers*. As the perturbation magnitude increases, the Trojan agent's performance degrades smoothly. We name it the *smooth degradation property* of the Trojan agent, which reveals the possibility of quickly finding an approximation to the actual backdoor trigger actions. Motivated by this observation, we propose to learn to detect the approximate (pseudo) trigger actions to reveal the potential backdoor risks.
|
| 15 |
+
|
| 16 |
+
We propose , which is the first backdoor detection and mitigation approach on competitive reinforcement learning. The idea of is to optimize a separate policy with a reversed reward function given by the (target) Trojan agent. We find that this approach can quickly identify an approximate trigger with a high possibility. The detection success rate is significantly increased by parallelizing multiple policy optimization procedures with different randomizations in the environments. Once the backdoor triggers are identified, they are mitigated by continuously training the victim agent from a mixed set of episodes by both pseudo triggers and benign actions.
|
| 17 |
+
|
| 18 |
+
Evidenced by extensive experiments, can successfully distinguish all Trojan and benign agents across different types of agents and competitive environments. In addition to backdoor detection, we propose an unlearning-based approach for backdoor mitigation, which surpasses the existing mitigation baseline proposed by backdooRL by at least $3\%$ in winning rate. We also evaluate the robustness of under several practical scenarios, *e.g.*, dynamic trigger lengths, environment randomization, *etc*.
|
| 19 |
+
|
| 20 |
+
**Contributions.** We summarize our contributions as below:
|
| 21 |
+
|
| 22 |
+
1. To the best of our knowledge, we are the first to propose the *RL backdoor defense* problem for competitive reinforcement learning environments.
|
| 23 |
+
|
| 24 |
+
2. We reveal the existence of *pseudo triggers* and the *smooth degradation property* of the Trojan agents, *i.e.*, they already degenerate when they see approximated triggers and becomes worst with the exact trigger.
|
| 25 |
+
|
| 26 |
+
3. We propose a simple yet effective backdoor detection approach using policy optimization with a reversed cumulative reward of the Trojan agent on a parallelism of multiple randomized environments. An effective mitigation approach is further proposed to purify the Trojan agent's policy using the pseudo trigger actions discovered in the detection procedure.
|
| 27 |
+
|
| 28 |
+
4. We evaluate across different types of agents, environments and complex attack variants. The results suggest that is effective against backdoor attack in reinforcement learning.
|
| 29 |
+
|
| 30 |
+
[]{#sec:intro label="sec:intro"}
|
| 31 |
+
|
| 32 |
+
# Method
|
| 33 |
+
|
| 34 |
+
Consistent with prior work [@backdoorrl], we deem the agent which executes according to the following policy as a backdoor-infected agent (or Trojan agent): $$\begin{equation}
|
| 35 |
+
\pi_\text{T}(s)=\left\{
|
| 36 |
+
\begin{aligned}
|
| 37 |
+
\pi_\text{fail}(s), & \quad\text{if } \text{triggered,} \\
|
| 38 |
+
\pi_\text{win}(s), & \quad\text{otherwise,} \\
|
| 39 |
+
\end{aligned}
|
| 40 |
+
\right.
|
| 41 |
+
\label{eq:mixed}
|
| 42 |
+
\end{equation}$$ where $\pi_{T}(s)$ represents the policy learned by the Trojan agent, which can be treated as a mixture of two policies: *Trojan policy* *$\pi_\text{fail}(s)$* and *Benign policy* *$\pi_\text{win}(s)$*. Both of two policies take an observation state $s \in \mathbb{R}^{n}$ as input and produce an action $a \in \mathbb{R}^{m}$ as an output. *$\pi_\text{fail}(s)$* is designed to make the victim agent fail as soon as it observes the pre-specified trigger actions ($\{a_{T}^{(i)}\}_{i=0}^{N}$), while *$\pi_\text{win}(s)$* is a normal well-trained policy which aims to defeat the opponent agent. In general, to preserve the stealth of the attacker, *$\pi_\text{fail}(s)$* is trained to minimize the accumulated (discounted) reward: $$\begin{equation}
|
| 43 |
+
\sum_{t=0}^\infty\gamma^t(\mathcal{R}(s^{(t)}, a_T^{(t)})).
|
| 44 |
+
\label{eq:mini}
|
| 45 |
+
\end{equation}$$ Notably, we use $a_{O}$ and $a_{T}$ to represent the actions produced by the opponent agent and the victim (target) agent, respectively, throughout the remainder of the paper.
|
| 46 |
+
|
| 47 |
+
The backdoor detection in image classifiers [@tabor; @nc; @wang2020practical; @aeva; @k_arm; @abs; @dong2021black] has been well studied, where the trigger behaves in a stateless manner. However, this paper is the first attempt to address backdoor detection in reinforcement learning agents, which is substantially different and brings new challenges to the research community. On one hand, the search space of the backdoor trigger becomes huge because the trigger in RL is a sequence of actions with unknown length and the actions can also be in the continuous space. On the other hand, the defense approach cannot access the value network of target agent, which poses additional strict constraints on the backdoor defense solutions.
|
| 48 |
+
|
| 49 |
+
We introduce in this section our approach to detecting and mitigating the backdoors in reinforcement learning agents. Section [4.1](#sec:intuition){reference-type="ref" reference="sec:intuition"} discusses the key observations we obtained from empirical studies on the behaviors of backdoor-infected agents, which motivate the design of . The detection approach is introduced in Section [4.2](#sec:detection){reference-type="ref" reference="sec:detection"}, followed by the mitigation method in Section [4.3](#sec:mitigation){reference-type="ref" reference="sec:mitigation"}.
|
| 50 |
+
|
| 51 |
+
We perform empirical studies on the Trojan agents and present in this section two key observations: *fast failing* and *smooth degradation*.
|
| 52 |
+
|
| 53 |
+
<figure id="fig:ob1" data-latex-placement="t">
|
| 54 |
+
|
| 55 |
+
<figcaption>Fast failing property: When the agent executes according to the backdoor policy <span class="math inline"><em>π</em><sub>fail</sub></span>, its return drops significantly. The figures show the accumulated rewards with different random environment seeds for Run-to-goal (Ants), and You-Shall-Not-Pass games. Please refer to <a href="#sec:ob_app" data-reference-type="ref+label" data-reference="sec:ob_app">7</a> for more results.</figcaption>
|
| 56 |
+
</figure>
|
| 57 |
+
|
| 58 |
+
**Fast Failing.** []{#pro:3_1 label="pro:3_1"} We start by performing a control experiment to understand the impact of the Trojan policy $\pi_\text{fail}$ and the Benigh policy $\pi_\text{win}$. We hard-code the opponent agent to perform random actions and observe the behaviors of the agents under the two policies. The experiment is conducted on four environments, shown in [2](#fig:ob1){reference-type="ref+label" reference="fig:ob1"}. We summarize the conclusion in [1](#obs:fastfail){reference-type="ref+label" reference="obs:fastfail"}, which is consistent across all environments according to the results.
|
| 59 |
+
|
| 60 |
+
::: {#obs:fastfail .observation}
|
| 61 |
+
**Observation 1** (Fast Failing Property). *Given a random trajectory of the Trojan agent's opponent, the reward of the Trojan poilcy is significantly lower than the reward of the Benign policy. And their gap grows bigger with more steps.*
|
| 62 |
+
:::
|
| 63 |
+
|
| 64 |
+
According to the definition of the Trojan agent's policy $\pi_{T}$ (in [\[eq:mixed\]](#eq:mixed){reference-type="ref+label" reference="eq:mixed"}), the agent switches to the Trojan policy whenever it sees the trigger actions. Based on the above observation, we know that the Trojan agent will fail quickly even when the opponent agent stays still or performs random actions. However, it is visible from [2](#fig:ob1){reference-type="ref+label" reference="fig:ob1"} that a safer approach to recognizing the Trojan policy is by looking at the cumulative rewards after a few steps; it seems hard to directly recognize it at the very first step. Basically, this observation gives us a way to measure whether or not the target agent is performing the Trojan policy, *i.e.*, *waiting for a few steps and then checking its cumulative rewards*.
|
| 65 |
+
|
| 66 |
+
**Smooth Degradation.** Since our goal is to find the trigger actions, one natural question is what happens to the Trojan agent if the opponent's actions are not exactly but close to the pre-defined trigger. To answer this question, we conduct an experiment by randomly perturbing the trigger actions up to a certain magnitude, which we call the *pseudo tiggers*. We observing the Trojan agent's behaviors after seeing these pseudo trigger actions. The results are shown in Figure [3](#fig:exp_study){reference-type="ref" reference="fig:exp_study"}, which reveals that the failure rate of the Trojan agent is smoothly decreased as the perturbation magnitude of the trigger actions increases. We summarize the findings below.
|
| 67 |
+
|
| 68 |
+
::: {#obs:smoothdegrade .observation}
|
| 69 |
+
**Observation 2** (Smooth Degradation Property). *The Trojan agent degenerates when it sees a pseudo trigger, a sequence of actions similar to but not exactly the same as the preset trigger actions. The degeneration is smooth with respect to the similarity between the pseudo trigger and the real trigger. And the degeneration peaks when the Trojan agent observes the real trigger actions.*
|
| 70 |
+
:::
|
| 71 |
+
|
| 72 |
+
We name this observation the *Smooth Degradation Property* of the Trojan agent. Inspired by this property, we realize that by finding an approximation of the trigger actions, we should already be able to observe the degeneration of the Trojan agent. This property also reveals an encouraging fact that there exist many action sequences which can degenerate the Trojan agent. So, our problem is now transformed to an easier one, *i.e.*, finding a good approximation of the trigger.
|
| 73 |
+
|
| 74 |
+
<figure id="fig:exp_study" data-latex-placement="!t">
|
| 75 |
+
|
| 76 |
+
<figcaption>Smooth degradation property: The Trojan agent degenerates smoothly when the perturbation magnitude of the trigger actions increases, <em>i.e.</em>, an approximation of the trigger can already lead the Trojan agent to worse performance. The figures show the accumulated rewards with different random environment seeds for Run-to-goal (Ants, Humans), You-Shall-Not-Pass(Humans) games. The results are reported over <span class="math inline">1, 000</span> runs for each game. Please refer to <a href="#sec:ob_app" data-reference-type="ref+label" data-reference="sec:ob_app">7</a> for more results.</figcaption>
|
| 77 |
+
</figure>
|
| 78 |
+
|
| 79 |
+
<figure id="fig:overview" data-latex-placement="!t">
|
| 80 |
+
<embed src="img/over_ff.pdf" style="width:99.2%" />
|
| 81 |
+
<figcaption>An overview of TrojanSeeker: A separate policy <span class="math inline"><em>π</em><sub><em>S</em></sub></span> (the TrojanSeeker) is learned by executing the target agent (target agent’s policy parameters are not required). The TrojanSeeker’s training procedure consists of two phases. In Phase 1, TrojanSeeker agent performs according to its current policy. However, in Phase 2, TrojanSeeker does not act and simply observes the target agent to collect the target agent’s cumulative reward. The reverse of this cumulative reward becomes TrojanSeeker’s reward. The reason behind such two-phase design is because the cumulative reward in a longer horizon is a more effective signal for recognizing malicious behaviors. </figcaption>
|
| 82 |
+
</figure>
|
| 83 |
+
|
| 84 |
+
Inspired by the above intriguing observations, we propose to identify the trigger (if an backdoor exists) for a given agent (*a.k.a.* the target agent). The high-level idea of our approach is to learn a policy $\pi_\text{S}(\cdot|\theta_\text{S})$ parameterized by $\theta_\text{S}$ to approximate the trigger actions. Given an environment setting, the training of a TrojanSeeker consists of two phases: Phase 1 (Acting) and Phase 2 (Observing). The target agent's policy is frozen, *i.e.*, the target agent only executes and does not learn at the same time. An overview of is illustrated in [4](#fig:overview){reference-type="ref+label" reference="fig:overview"} where the full solution also includes training the TrojanSeeker policy under a parallelism of randomized environments.
|
| 85 |
+
|
| 86 |
+
**The Acting Phase.** The purpose of the first phase (*aka.* the acting phase) is to allow the TrojanSeeker to perform in front of the target agent possible actions that may trigger malicious behaviors of the target agent. The training procedure is similar to the common procedure of training an opponent agent in this competitive environment [@competitive], which is built upon policy gradients such as Proximal Policy Optimization (PPO) [@ppo]. Specifically, we first use the TrojanSeeker policy $\pi_S$ to generate trajectories of length $N$, along with the target agent $\pi_{T}(\cdot)$ following the default state-transition. We set the $s_{S}^{(N)}$ as the terminal state, which means the TrojanSeeker $\pi_{S}$ only play $N$ steps against the target agent $\pi_{T}(\cdot)$ in this phase. The reward of the TrojanSeeker is given by the negation of the target agent's reward at each step, *i.e.*, $$\begin{equation}
|
| 87 |
+
\mathcal{R}_S(t)=-\mathcal R_T(s_S^{(t)},s_T^{(t)})
|
| 88 |
+
\end{equation}$$ where $\mathcal R_T$ is the reward function of the target agent given by the default environment following [@competitive].
|
| 89 |
+
|
| 90 |
+
**The Observing Phase.** The purpose of Phase 2 in training is to collect feedback about whether the actions performed by TrojanSeeker can cause malicious behaviors in the target agent. Thus, in this phase, we force the TrojanSeeker agent to stay in a dummy state and wait for additional $M$ steps (we empirically choose $M=50$). This wait is to ensure that the malicious behavior appears in a more distinguishable manner (See [1](#obs:fastfail){reference-type="ref" reference="obs:fastfail"}). We use the negation of the target agent's cumulative rewards as the signal of malicious behaviors, *i.e.*, $$\begin{equation}
|
| 91 |
+
R_\text{sum} = -\sum_{t=N}\mathcal R_T(s_S^{(t)},s_T^{(t)}).
|
| 92 |
+
\end{equation}$$ For Run-To-Goal(Ants) game, following previous work on backdoor detection [@nc; @aeva; @dong2021black; @wang2020practical], we apply MAD outlier detection on $R_\text{sum}$ to determine whether (pseudo) trigger actions are found. Specifically, we firstly collect the negation of the target agent's accumulated reward against a dummy opponent agent within $M$ steps for 500 times as an array $R_{arr}$. Then for each given $R_\text{sum}$, we calculate its anomaly index based on $R_{arr}$ using MAD outlier detectors. We tag the $R_\text{sum}$ with anomaly index $\ge$ 4 as an outlier following previous work [@aeva]. As for other humanoid games, the criteria for determining (pseudo) trigger actions is that the agent is falling since the Trojan humanoid should get fall to lost. *i.e.*, $$\begin{equation}
|
| 93 |
+
R_S(t=N)=\left\{
|
| 94 |
+
\begin{aligned}
|
| 95 |
+
R_+, & \quad\text{if}\quad \text{MAD}(R_\text{sum})\ge 4, \\
|
| 96 |
+
R_-, & \quad\text{otherwise.} \\
|
| 97 |
+
\end{aligned}
|
| 98 |
+
\right.
|
| 99 |
+
\end{equation}$$ When $R_{\text{sum}}$ is deemed as an outlier, we say the TrojanSeeker successfully finds the trigger and gives a reward of $R_+=1000$; otherwise, we say the TrojanSeeker fails with a penalty of $R_-=-1000$ reward. The reward/penalty is given to the terminal state ($s_{S}^{N}$) and distributed to the rewards of former states by a discounted factor $\gamma$. The setting of reward/penalty values follows the configurations in [@competitive]
|
| 100 |
+
|
| 101 |
+
**Environment Randomization.** We train TrojanSeeker's policy $\pi_{S}(\cdot|\theta_{S})$ through maximizing its cumulative rewards under our designed environment. Notably, during each training procedure, we should keep the environment seed fixed since the Trojan behaviours cannot always be activated for all initial states ($\approx 70\%$), as illustrated in [4.1](#sec:intuition){reference-type="ref+label" reference="sec:intuition"}; A different seed may represent a different game for $\pi_{S}(\cdot|\theta_{S})$.
|
| 102 |
+
|
| 103 |
+
Due to such probabilistic behavior of the environments, we train a set of TrojanSeeker policies with different random seeds for the environment. Then, we calculate the proportion $\Pr(wins)$ of random seeds with a trigger detected. If $\Pr(wins)$ is larger than a threshold value $T_\text{bd}$ (*e.g.*, 0.1), the target agent $\pi_{T}$ is deemed as an infected agent.
|
| 104 |
+
|
| 105 |
+
Once we identified the Trojan agent and its triggers, the next question is how to mitigate these triggers and purify the Trojan agent's policy $\pi_{T}(\cdot|\theta)$. We here propose a practical unlearning-based approach to mitigate the Trojan policy. We leverage the collected malicious trajectories $\tau_{T}=\{s_T^{(0)},a_T^{(0)},s_T^{(1)},a_T^{(1)},\ldots\}$ from the Trojan agents to remove the backdoors. Specifically, we replace each action $a_{T}^{(t=n)}$ in $\tau_{T}$ to maximize the cumulative discounted reward, *i.e.*, $$\begin{equation}
|
| 106 |
+
\label{eq:miti}
|
| 107 |
+
\hat a^{(n)}_{T}= \arg\max_{\hat a^{(n)}_T}\sum_{t=n}^\infty\gamma^t R(\hat s_{T}^{(t)}, \hat a_{T}^{(t)})
|
| 108 |
+
\end{equation}$$ where $\hat a_T$ is the array of action and $\hat s_T$ is the corresponding states for each time step given by the environment with $\hat s^{(n)}_T =s^{(n)}_T$. We optimize [\[eq:miti\]](#eq:miti){reference-type="ref+label" reference="eq:miti"} using policy gradient [@sutton2000policy]. It is also feasible to leverage a benign agent (if available) to re-assign $a_{T}^{(t)}$ value by inferring on the state $s_{T}^{(t)}$ at time $t$.
|
| 109 |
+
|
| 110 |
+
Finally, we re-train the target agent using behavior cloning [@hussein2017imitation] with a mixed set of trajectories including both purified trajectories $\hat{\tau}_{T}$ and the benign trajectories $\tau_{B}$ obtained through playing itself.
|
2202.05343/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-02-06T20:57:21.933Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" etag="HPUnkRDeClYmEtihBTvk" version="16.5.3" type="google"><diagram id="fy-9JZfE295YZTS-vNGQ" name="Page-1">7V1bk5s4Fv41rkoeugsJxOUx7c6ldjPZqWSqJvM0hQ22mcbgxXS6e3/9SoAwumCwLQO2yXTVmJuMz/nOVUdHE326fv2cuJvVb7HnhxOoea8T/XECIUBIw/8jZ97yMw5w8hPLJPCKm3YnfgT/84uTxXPL58Dzt8yNaRyHabBhT87jKPLnKXPOTZL4hb1tEYfst27cpS+c+DF3Q/Hsn4GXropfAc3d+S9+sFwV36wjO7+wdum9xQjblevFL/mp7LfpHyf6NInjNP+0fp36IaEdJUtOgE81V8v3SvwobfPA5+W/p6FpfP88+wQ+/Pmwvpv+1O+Kl92mb/T3+h7++cVhnKSreBlHbvhxd/YhiZ8jzyejavhod8/XON7gkwCf/MdP07eCl+5zGuNTq3QdFlf91yD9SR6/R8XRX5Urj6/FyNnBGz2I0uSt8hA5/Kt6bfdYdkSfE6lE2RE/J3N/D2mMAmxusvTTPfcVQCB0q3xBwYPPfrz28fvgGxI/dNPgFwsrt0Dnsrxvx0H8oWDiAQwt3vqXGz4X3zSB+JyBR8N/GpigaRqssUCRz9kpL79BAALL5pdVkPo/Nm5Gshcs6ixLa8n8y09S/3UvYV6ZBwo1YRWHLxWZKzTCqipu2pno6IyC0QD4RsGwBiUYZo1geLkU6BXB0IcsGADAfiXDqiEkJmD2ezVPrmrwOTPEL/cwS/CnZZrdPEQCI6NfAlPP5wZ1jzpdUTz6exzgr9hx2rBYXmscE3PlVzxVdaaaBgLcu+TaURgoA0T5e07ACAX5Bdp5DQ7K0lN9Oorb+cXN4IY4WtyQ3q246ZfrPXDi1rv/AOpClAt2IHga9+5CoJvVaY3hC2gbvwBbtZY8jad1EczwTT40EGvyCbP7FRBrFJAm3DcLiDMsAbEv1kjzAoLDid4lxLk6My1QGel9UxnK4ricfIuY+LKUgNVzFRqa/32O6YW7baZ3PhA+wM3r7iLLhm/4OmGZ/5om64n1kFE1XW0n1iPlU/5l+AfJ3qHK1pobOT6Tr2KZuU2T+MmfxmGc4DNRHBH1ugjCkDvlhsEywodzzHQfn38gzA7mbvihuLAOPC/TzTL0sPhK4hQroThq0o8HuH0W6/Y57SJZ42xggg1gagEcWw6cHeslY8xzppHnk+XsnUYAlqmGyqf35GOmNrJvXLjrIHzLn8EDuetNdlHXDQIUP/zlEzYLV9hBijchY0RxsnZD9vJLQXNy3cjfJbsYYhvtJ3dbDJYgWkqfJ4i9K8BHLhf4Yy4HGFtRMbxGqFa9mCZutF3gQenwGM70hpc48dhvrz4+c+dPywy5dxxdoWGX9ISGs/uMCHXreIa/KBq5NlSuFY97wXYTugVlgygMKl+8CGM3rb6QXK1jlT734nQr6PAcAJeosVWoaIdP+DqCjjYkOhqcTUcjiY7mI5HI+0AqFwgZQ3e7DeZcGKE6JGj29NsmDCtkRRKy0nOn5hVN1pFDx6YVbXYcvdskPpRF9IPHgjEwKJhKoGA4zDCW1i0SZKHr4JEA2pa+XBYUoKmxNqNTKNBQ8xQo1LJ0MJzi9C46Un832IFzs0pMaYxJREob2FKJ6HpLaHaTRKTvfUoMjeQxdGvvG79msNnW+cgVKLjbTV7y+o5M+EwBeq/Ia0ZcoswpqFJRD1CiHuC5vGZdNv9661yBsG+uqPBfj9Bmx2slMCzntTRY1A5qxwYyJlfDRgW2K0soKwo81n8FjIXaGawaG9Uxgpy2dg1eJ9TKB9+4gbuCmopQ6SQX6jB4Hg81a0Rav0iTzYB2pNRqUaPOn+4q1GMTsEen6iyDBYOu3zvVf62ggXnlvlVu25AbtvVvrxuQ+VYT7kcsdzs1wztg5i+gFKaGLGFQ1AVJp92vJ7lP19pRTBSHDbn9s/nDtLp1zAdIaNM2H2C0ddE7Wk8nywdcRtWdbums8hKnvjqtdTH0UTyaUN8sHmhY4lFXzO0NvuaOFw9A53x7kw/Z1PBlV9wJNEawZxqbN6uDzr3YB9HUCK2Gs9kR2nr6/DhlyUZHYZ9Rt8B1+Bbf4IKk3i3+7baX6FragM5JydHi1nGWxZBlWS7Dg+DFrXcPAtWlAi7XgxBo3LcHgcYgv542bYN8NKwgn773BZp8E7D5VdPue0kLGsP8Rtw3C8iwwnx0uWE+LyA4nOhdQq4v0BeojHpfwIhk1SmdL63bPs++fRvX1h0MKERtMs0mWD2jSZYVGNfW3cYqrXFt3SVybVxbd97pd2TyiVpRR3e6tg5d5Coao22fzG4KdpDOenLGsXlFxI4Du83iI5XFW91hAQ4MC7YSLBhci03T7hQLdD3XZWEBXScWTFquWoxz1+10Ay2Uv+bldbwK1/XjWNVkCs7NKglnxkRiQRu9pRYxh5VpN8eFXFiuIOcaUar0tZCLgmTkCuOw9s0VleFMZ+sPWiucrmwhG6QeHc4YbHMBqIMO1h9YdMEidZy1vS/J387df571B6Ys0rqJ9QeIZpsHsv7Akvm2Rf6qnJOYxuvNc5qlwKxHfIHsU4QpiK98xSf+xv+fxx6+PA2zSQt+PiNeECWwCsiU1CyM50+VqQ2Ozzt6g2atXuX2BOqLxQLO5wI08BXPnJnIVMNAvjwV0o4mFQ7qtMsDk8IyzsVC2IKFjwn2ZaH2g59aunz60xU0/dFf5p8eanNPiyCAzsQQd9q9plkN5jo7+t1PAkwFohhPXY2AWhp72iZpIMbe4Gehbev+yNYyBmDTHcAxW1l4VVbVkrnk3QLR0K2+gUhxcWlAFIpqbVXF8F3DUEVjh9NgaJsmD0NodgzDC9WHpgkw+TgkHpkMNE1uKY/DadZzI1HFhE6JxMLIFljU7h3L2I9HfMDjqd5mY4iCbhFqtVaUQ0tV8z0euODoWHstuIhnhqetYo6phKehm1VVea/pDepyPzwNEZ5wxGen+BQG4rMA58angnmvHT5t02LxmePpWHwWw1XwaegjPjvFp2lzA/FVOufGpyzxoc68myfhE+hm3/qzbfqdJpCuDZ+Ixye/c+e58ak0McTZ99NsuxAeaSM4bwycKpJFVXNsq4FUncOAFbJ9OOhvD6CCUT52/1Q80D3kusfaJOqF3eJU6eYXrElu3yYU6DrnvHYJRdgSijTdMhAo8ql1YYeEi9GVSpsWG4yjibEErYPBVNG8usU7mlrHqc5LxadYvXPkxA+yuIGMbvOcttKMu83H6cejU1UF0fVC8GrcSaWp9i66cg+t6Mxg50p060i/TajkpmUeHSGBLrG9HCQMrP0xMngGHjn9xm91A5HeLRJa5I8PKjiKyXLPlPwanXCcqz8yNdcBxHYJ9Ufw0TK17Ik4StmKJfwPn18SEBY42j756XxFD1aul8GUHJBiqv+Ur6CoINDkyzWLw+qidSRiDRhmPa5O28TZaOHtYrpsyMfUnWW1k1sMpbRYH2EQ93OOKe0GUeYSgOw4DN3NNshuz2g3XwWh99V9i59TOhA9wpx99b3v8cu2eBqriq94MMoiwka6pCMr7+ZrPNmyTYaJ9azOUPNnQXYybG2laLHs+I9seQh5tqwvz158orpThsW1x0fmvSXWjZZ7vTCFo+fDCZ2+lMAC/9o0cMPvmChutMxoVqGITMpFfHhJvPmDunPkRFb47Ccff/mk/rlAgljwm5ZcCf0FfXYWp2m8pmgqyFMOmpEGPeA/shicGB30SFaFowewO8Z/5PYEK5AIY8UNMnb67jZ98bfpQXARIdEgic1IoaXqYjGqHBbnqicG0knDobY+4Dq0MNfGtghq2yK4QYIfzq7IWyEo1CVRRO7e6ZKD1YTGqYkYiyD+hcSIrLBY+9FZ5L0sCGmW94qA090y2sp3MdiOoAeP5oYYsJGb+g8ESttz+I5AMrkLNC3/U2t4lIOFuiP5vRQOX/MnzSGi6ZXlfQUOtAhVDbjajNYFuGi+bnRdhuO6mL27Lpa4eJKs9V7Hs4AAecgqpxuNUlZ2j/YJSGqLNUBs02if1KPp5uyTuDJvtE892ye7f/sklkLMgsQbtrLpSJcYrdl5/ZZJzN+CzCoR4zRssAzFMrVH081ZJrUIGi2TAssEtP5Nk5itmbvpsJVNR7qk/WzP9VsmsSC+MEvlNjhDBctQLNPh88u3YploscNomQZkmWDvlskUvWHPJ9NtQ9Y23SgTKjGjabort5xnTRPI/htNk2I03ZxpgqNpGpxp6r9URtLY1ouXw1Y2HekS2Jqd12+ZxIWauWEaLZN6NN2aZaJbXoyWaUCWqf9KCMnuF4tkNE1ViRlNE4aJWOubZ/OIbRo2WAZimg5A062ZJqPFotbRVvVtq/qvijDEqr1VnGzHgr2qDI3GCtBdHJiCvaJebyyLUIymWzNWtNXXaJuGY5tg/2URkiZb21WwGba26Wh1it6an1dvmiQ9roowajRNytF0Y6YJKkbQaJpUmKbe6yKgRpPiO52TJs/zp2Grm060SSkyo23CtBAnKQEtJh9zfIrRdGu2CSjoMJ81g8k7zwC2d5RT7oBAO3DTJp7yDlJsBxjIQbBdh6FdiyKUjb/r0+joZX/6/HWA7UyO6NMoa7h3QGeixm5YtGtTYys9qBUGayB9s8jONWzDK5PrfNa2cZbtwHuba54FLPNeM02t/Neul9bBu3fSqTfa0AXsfVOHStBxt4NC6M+62WeJk2vpA1YrW+01vG1z6NIgxW618ZcoK8Bx+O3p1PmEQPQJxZ0kMyXsk7083+WrD6dFun1KUhvv6/eWHMYeru2agfFGRQHLLa4vo2TnVyCzy/ymGgrZLWsBT1j3DbP5DjMSvxe7k+uWbB86yboMPZEGQ6QZUjJyvI7jJtvaj3KyynEDHubXnchx2ZxqQ3MuvlOWKdu0/d3sfdu+TjcOCsQ1Rdd1ERWd7gANgTiBSnlJfv8JoHCroMjHGkEhAwUw0NBAIevnnPPSC35JMVHX146HhRqNw0F04c7ZQao9AHlczutxWaPDytPZj98P4qZds9XDx9ZZQ1O2fK7AB8rgY50On5+zafT0yVj/82pa38GXJH6Yv9zJwnriIUwH7SioEGQufIPUvncgyFJO1Hn0V88JRLt6U5kA3XFi+S/9C/xmpavHv3/z0zT66v9h18pE1cX+7m+/+T+JPpuF8fyJeNZoOskbi07Im103z6iwlBP4YohkSlimn4tlR3jLYyvbG2hlKwruNCaCALWWAnxQpNTgT9SKrAKB1BG7EzikXe+qIazTpUjK3NIKM2aLnBl5cmI7X/lrwtd4MfQkhQJmWQ4bWSJxjrlT7dmUTcx4VYoJFg5GUvKDyp0vAf4VFXlK/ib8wkb+E7h6w2g57E7llpgr7pa1si0icqU2Sxo7es+fk/DtIcEK3G+h39iZgUUYbL6UV1I3DWLCDieL0vOoKt80htvO4V5XFGohzsG32oVa/NZ36lghc1LyJG4mRdX07SyI3OzHeMEySLc3oBNtOodCowBNNGCdSg6UhQEc9RXOhTnOA5p+EvhHfEhg6MjYl+dismF1M2Zn2RPHoG2O6DyuZGrsXHviyLkmM2Vn5Jr2wZnCvVzrfwZTZ4M1QAvKqmUmIo/Kahj1PFKw2/aeWhILTphaEoMeH1tLYlimLZ2lRqZlOsZErCVRV/ixVzUNpJzDAIjff9jU702HHad1SYfO2m3AT6sqqt9ADvs9SNv7WpZx0u20A4Sq+g05LmTTxMNPg4AxDTLpIg0Ci2KByBMdzqZ6gUvJgyCHy0wCwdaVzS07SSZDWUnVKJOjTIqpyXfVPUzKWq2s1v/9RYqixlV56mIEDowzCaN882mxW4rIho+RnyzJ7/iW4TT4X566qA3BDwonON/SBx7ypb6liYxHi61TNhUFBLrFTrgBqjMrfNEpr9iw7VxKUpalIsR+V2UMsUgYGlkKkuQUqTLL5AG8H3SG5AxcpM2yqKVzzpZqxIdJTJTXzlnFv3H1W+z55I7/Aw==</diagram></mxfile>
|
2202.05343/main_diagram/main_diagram.pdf
ADDED
|
Binary file (69 kB). View file
|
|
|
2202.05343/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Most successful deep learning architectures for image classification consist of a certain building block that is applied sequentially several times: one block succeeds another until a linear operation finally outputs the model prediction. In deep convolutional neural networks (CNNs), the block consists of a sequence of convolutional operations [@Conv1_lecun; @Conv2_lecun], batch normalizations [@BatchNorm] and rectified linear units (ReLU) [@Relu] activations. Notably, adding a skip connection to every block improves the performance and facilitates very deep architectures called Residual Networks (ResNets) [@ResNets]. Another approach relies on applying the convolutional layers in parallel, which results in a multi-branch design. For instance, inception models [@InceptionV1; @InceptionV3_labelSmoothing] have blocks with multiple branches, each applying some transformation on the block's input. The input of the next block is then obtained by concatenating the outputs of all branches. The multi-branch design framework can also accommodate skip connections, as initially done in ResNeXt networks [@ResNeXt], and later refined using squeeze-excitation in [@SqueezeExcitation], or a split-attention mechanism in [@ResNest_versionOfResNeXt]. The starting question of this work is: what is the purpose of multi-branch architectures?
|
| 4 |
+
|
| 5 |
+
Initially, in AlexNet [@AlexNet], branches are used to allow "grouped" convolutions that could be distributed across multiple GPUs, which at the time had limited memory. Nowadays, multi-branch architectures are generally used to distribute the parameters of a block into smaller groups such that each group applies a separate transformation to the input. This has proved beneficial compared to keeping all parameters together in a single unique branch per block [@ResNeXt]. Nevertheless, rare are the cases where each branch of a multi-branch architecture is shown to contribute in a different way to the network performance. In most cases, the value of multi-branch architectures is mostly justified by showing an increase in the accuracy of the whole network. An example of the former is SKNet [@SelectiveKernel_versionOfResNeXt], where by zooming in and out the input images it was demonstrated that an attention mechanism [@attentionInNLP] pays more attention to the branch with the appropriate receptive field size. Another interesting idea is related to capsules [@Capsules_first; @Capsules_EM_routing], which group neurons into smaller units specialized in recognizing specific visual entities.
|
| 6 |
+
|
| 7 |
+
In this work, as a means to enhance interpretability, we investigate how to ensure that, in a multi-branch architecture, each branch provably contributes in a different way. In contrast to previous works [@SelectiveKernel_versionOfResNeXt; @Capsules_first; @Capsules_EM_routing], the role of branches is neither associated to some visual entity, nor to the size of the receptive field. We propose a novel way to organize in a class-wise manner the transformations carried out by the branches. Leveraging concepts from coding theory, we design how to assign each branch to a specific set of classes before training. Specifically, for each block in the network, a binary "codeword" of length equal to the number of branches of the block is assigned to each class. The codeword of each class then indicates which of the branches in the block will work for that class. That way, by keeping only all the branches of the network assigned to that class, it is possible to form a path unique for that class that traverses the network and through which the information related to that class flows. To showcase the advantages of our idea, we use the the state-of-the-art multi-branch architecture ResNeXt [@ResNeXt] to which we add an architectural tweak.
|
| 8 |
+
|
| 9 |
+
Our main contributions can be summarized as follows:
|
| 10 |
+
|
| 11 |
+
- We develop an algorithm that provably controls the path through which the information flows, thus allowing us to design before training one path per class and force the information related to that class to pass through the assigned path.
|
| 12 |
+
|
| 13 |
+
- Without any additional training, these paths are used to extract for each class a binary classifier that has at least $60\%$ less parameters than the complete network.
|
| 14 |
+
|
| 15 |
+
- We provide a design for the paths leveraging concepts from coding theory, which enables the utilization of the intermediate layers' output to make early predictions.
|
| 16 |
+
|
| 17 |
+
- Our algorithm is applied to a slightly modified ResNeXt architecture and we show that the aforementioned desirable properties are achieved while maintaining or even improving classification accuracy.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
We compactly describe a Coded-ResNeXt block as $[C_{out}, d, r_l]$, with $C_{out}$ being the number of channels the block outputs and $d$ the bottleneck width as in ResNeXt [@ResNeXt]. A conventional ResNeXt block is expressed as $[C_{out}, d, N/N]$. Following [@ResNeXt], given the number of subNNs $N$, the bottleneck width $d$ is determined so that the blocks have about the same number of parameters and FLOPs as the corresponding blocks of the original ResNet bottleneck architecture [@ResNets].
|
| 22 |
+
|
| 23 |
+
[1](#tab:architectures){reference-type="ref+Label" reference="tab:architectures"} presents the networks trained for CIFAR-10 (C10), CIFAR-100 (C100) [@CIFAR], and ImageNet 2012 [@Imagenet] classification datasets. In CIFAR-10/100 we tried to keep $N$ low, but sufficiently high to enable reducing $r_l$ to less than $0.25$ and still obtaining a strong coding scheme with minimum Hamming distance larger or equal to 4. For ImageNet we used the default values of ResNeXt-50. Remarkably, even though the number of classes increases exponentially across datasets ($K\in\{10,100,1000\}$), the proposed coding methodology allows to efficiently share the subNNs between classes, so that both (a) random pairs of classes are assigned to very different subsets of subNNs; and (b) only a linear increase of the number of subNNs ($N\in\{10,20,32\}$) is needed.
|
| 24 |
+
|
| 25 |
+
Let $\mathcal{L}_{class}$ be the conventional negative cross entropy loss and $B_{code}$ the set of indices pointing to the blocks with ratio $r_l<1$. The total loss used in order to train the network is $$\begin{equation}
|
| 26 |
+
\mathcal{L}_{tot} = \mathcal{L}_{class} + \mu \sum_{l\in B_{code}} \mathcal{L}_{code,l}\label{eq:total_loss}
|
| 27 |
+
\end{equation}$$ with $\mu$ being a constant balancing the two losses. For convenience of exposition and with some abuse of notation, in [\[eq:total_loss\]](#eq:total_loss){reference-type="ref+label" reference="eq:total_loss"} both losses are actually the expected values over the distribution of the samples. As commonly done in practice, the gradients are computed on the *average* of the losses over the samples of the batch.
|
| 28 |
+
|
| 29 |
+
::: {#tab:architectures}
|
| 30 |
+
-------------------------------------- -- -- --
|
| 31 |
+
|
| 32 |
+
**ResNeXt-29**
|
| 33 |
+
**(10$\times$`<!-- -->`{=html}11d)**
|
| 34 |
+
for CIFAR-10
|
| 35 |
+
**ResNeXt-29**
|
| 36 |
+
**(20$\times$`<!-- -->`{=html}6d)**
|
| 37 |
+
for CIFAR-100
|
| 38 |
+
**ResNeXt-50**
|
| 39 |
+
**(32$\times$`<!-- -->`{=html}4d)**
|
| 40 |
+
for ImageNet
|
| 41 |
+
c1
|
| 42 |
+
$3{\times}3$ max pool, str. 2
|
| 43 |
+
c2
|
| 44 |
+
c3
|
| 45 |
+
c4
|
| 46 |
+
c5
|
| 47 |
+
10-d fc, softmax
|
| 48 |
+
100-d fc, softmax
|
| 49 |
+
1-1
|
| 50 |
+
1000-d fc, softmax
|
| 51 |
+
-------------------------------------- -- -- --
|
| 52 |
+
|
| 53 |
+
: Architecture for each dataset. A block is described by $[C_{out}, d, N_{act}/N]$, with $C_{out}$ being the number of channels it outputs, $d$ the bottleneck width, $N$ the number of paths/subNNs, and $N_{act}$ the number of active/operating subNNs per class. In the beginning of stages c3 and c4 in all datasets, and additionally for c5 in ImageNet, the feature map size is halved as in [@ResNets; @ResNeXt]. For the CIFAR architectures, stages c2, c3, c4 have approximately $0.2$, $0.9$, $3.5$ million parameters, respectively, and the total architecture has approximately $4.7$ million parameters. For ImageNet, stages c2, c3, c4 and c5 have $0.2$, $1.2$, $7.0$ and $14.5$ million parameters, respectively, and the total number of parameters is $25.0$ millions.
|
| 54 |
+
:::
|
2203.14698/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The past ten years have witnessed a rapid development of marker-less human motion capture [\[9,](#page-8-0) [18,](#page-8-1) [48,](#page-9-0) [60\]](#page-10-0), with various applications like VR/AR and interactive entertainment. However, conveniently capturing long-range 3D human motions in a large space remains challenging, which is critical for sports and human behavior analysis.
|
| 4 |
+
|
| 5 |
+
So far, vision-based mocap solutions take the majority in this topic. The high-end solutions require dense optical markers [\[55,](#page-10-1) [72\]](#page-10-2) or dense camera rigs [\[8,](#page-8-2) [25,](#page-8-3) [26,](#page-9-1) [48\]](#page-9-0) for faithfully motion capture, which are infeasible for consumer-level usage. In contrast, monocular capture methods are more practical and attractive. The recent learningbased techniques have enable robust human motion capture from a single RGB stream, using pre-scanned human templates [\[16,](#page-8-4) [17,](#page-8-5) [19,](#page-8-6) [62,](#page-10-3) [64\]](#page-10-4) or parametric human models [\[5,](#page-8-7) [27,](#page-9-2) [31,](#page-9-3) [32,](#page-9-4) [36,](#page-9-5) [37,](#page-9-6) [39\]](#page-9-7). However, in the long-range capture scenarios where the performers are far away from the cameras, the captured images suffer from degraded and blurred artifacts, leading to fragile motion capture. Vari-
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup>Equal contribution.
|
| 8 |
+
|
| 9 |
+
<sup>†</sup>Corresponding author.
|
| 10 |
+
|
| 11 |
+
<span id="page-1-0"></span>ous methods [\[65,](#page-10-5) [66\]](#page-10-6) explore to capture 3D human motions under such degraded and low-resolution images. But such approaches are still fragile to capture the global positions under the long-range setting, especially when handling the textureless clothes or environment lighting changes. In contrast, motion capture using body-worn sensor like Inertial Measurement Units (IMUs) [\[22,](#page-8-8) [43,](#page-9-8) [69\]](#page-10-7) is widely adopted due to its environment-independent property. However, the requirement of body-worn sensors makes them unsuitable to capture motions of people wearing everyday apparel. Moreover, the IMU-based methods will suffer from an accumulated global drifting artifact, especially for the longrange setting. Those motion capture methods [\[11,](#page-8-9)[15,](#page-8-10)[49,](#page-9-9)[61\]](#page-10-8) using consumer-level RGBD sensors are also infeasible for the long-range capture in a large scene, due to the relatively short effective range (less than 5 m) of RGBD cameras.
|
| 12 |
+
|
| 13 |
+
In this paper, we propose a rescue to the above problems by using a consumer-level LiDAR. A LiDAR sensor provides accurate depth information of a large-scale scene with a large effective range (up to 30 m). These properties potentially allow capturing human motions under the longrange setting in general lighting conditions, without suffering from the degraded artifacts of visual sensors. Nevertheless, capturing long-range 3D human motions using a single LiDAR is challenging. First, under the long-range setting, the valid observed point clouds corresponding to the target performer is sparse and noisy, making it difficult for robust motion capture. Second, despite the popularity of LiDAR for 3D modeling, most existing work [\[20,](#page-8-11) [33,](#page-9-10) [40,](#page-9-11) [46,](#page-9-12) [52,](#page-10-9) [74\]](#page-10-10) focus on scene understanding and 3D perception. The lack of a large-scale LiDAR-based dataset with accurate 3D human motion annotations leads to the feasibility of a datadriven motion capture pipeline using LiDAR.
|
| 14 |
+
|
| 15 |
+
To tackle these challenges, we propose *LiDARCap* – the first marker-less, long-range and data-driven motion capture method using a single LiDAR sensor as illustrated in Fig. [1.](#page-0-0) More specifically, we first introduce a large benchmark dataset *LiDARHuman26M* for LiDAR-based human motion capture. Our dataset consists of various modalities, including synchronous LiDAR point clouds, RGB images and ground-truth 3D human motions obtained from professional IMU-based mocap devices [\[41\]](#page-9-13). It covers 20 kinds of daily motions and 13 performers with 184.0k capture frames, resulting in roughly 26 million valid 3D points of the observed performers with a large capture distance ranging from 12 m to 28 m. Note that our LiDARHuman26M dataset is the first of its kind to open up the research direction for data-driven LiDAR-based human motion capture in the long-range setting. The multi-modality of our dataset also brings huge potential for future direction like multimodal human behavior analysis. Secondly, based on our novel LiDARHuman26M dataset, we provide LiDARCap, a strong baseline motion capture approach on LiDAR point clouds. Finally, we provide a thorough evaluation of various stages in our LiDARCap as well as state-of-the-art imagebased methods baselines using our dataset. These evaluations highlight the benefit of the LiDAR-based method against the image-based method under the long-range setting. We also provide preliminary results to indicate that LiDAR-based long-range motion capture remains to be a challenging problem for future investigations of this new research direction. To summarize, our main contributions include:
|
| 16 |
+
|
| 17 |
+
- We propose the first monocular LiDAR-based approach for marker-less, long-range 3D human motion capture in a data-driven manner.
|
| 18 |
+
- We propose a three-stage pipeline consisting of a temporal encoder, an inverse kinematics solver, and an SMPL optimizer to improve pose estimation performance.
|
| 19 |
+
- We provide the first large-scale benchmark dataset for LiDAR-based motion capture, with rich modalities and ground-truth annotations. The dataset will be made publicly available.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
Marker-less 3D motion capture in long-range scenarios is still challenging to the existing methods. As 2D cameras have no depth information, the inherent ambiguity of human joint locations exists in image-based methods, while depth cameras only work in near range. LiDAR sensors have the advantages of both long working range and good distinguish-ability in the depth dimension. In this work, we first develop a human motion dataset containing the LiDAR point clouds on long-range human motion scenarios, together with the synchronized IMU-captured motion ground truth. Our second goal is to establish an end-to-end model that can infer an optimal parametric human model from Li-DAR point clouds. We use Skinned Multi-Person Linear Model(SMPL) [2] to represent the pose and shape of a human body compactly. SMPL model contains pose parameters $\theta \in \mathbb{R}^{72}$ associated with human motion, formulated as the relative rotations for 23 joints, to their parent joints and the global body rotation for the root joint, and the shape parameters $\boldsymbol{\beta} \in \mathbb{R}^{10}$ , which control height, weight, limb proportions. The translation parameters $\mathbf{t} \in \mathbb{R}^3$ will be used when the human position is needed. The SMPL model deforms a template triangulated mesh with 6890 vertices based on pose and shape parameters, which is formulated as $\mathbf{V} = \mathcal{M}(\boldsymbol{\theta}, \boldsymbol{\beta})$ .
|
| 24 |
+
|
| 25 |
+
Long-range motion capture has great potentials in various applications, such as immersive VR/AR experience and action quality assessment. In this paper, we propose the first long-range LiDAR-based motion capture dataset, Li-DARHuman26M.
|
| 26 |
+
|
| 27 |
+
**Data Acquisition.** We collect data respectively in two scenarios as shown in Fig. 2. The first scene is a patio, which supports far distance human capture. The second scene is an open space between two buildings, supporting a large capturing pitch angle to avoid self-occlusion. The setup details of collection equipment are shown in Tab. 1.
|
| 28 |
+
|
| 29 |
+
We recruit 13 volunteers (including 11 males and 2 females) to participate in data collection, and they all have signed the consent. The duration for each one varies from 15 to 30 minutes. The distance distribution is shown in
|
| 30 |
+
|
| 31 |
+
<span id="page-3-6"></span><span id="page-3-1"></span>
|
| 32 |
+
|
| 33 |
+
| Scene | Range | Height |
|
| 34 |
+
|-------------|--------|--------|
|
| 35 |
+
| The scene 1 | 12-28m | 5m |
|
| 36 |
+
| The scene 2 | 14-24m | 7m |
|
| 37 |
+
|
| 38 |
+
Table 1. The setup details of equipment used in two scenes.
|
| 39 |
+
|
| 40 |
+
<span id="page-3-2"></span>
|
| 41 |
+
|
| 42 |
+
| Dist(m) | 11-13 | 14-16 | 17-19 | 20-22 | 23-25 | 26-28 |
|
| 43 |
+
|----------|-------|-------|-------|-------|-------|-------|
|
| 44 |
+
| Ratio(%) | 0.7 | 31.4 | 47.2 | 17.4 | 2.4 | 0.9 |
|
| 45 |
+
|
| 46 |
+
Table 2. Distance distribution in the dataset.
|
| 47 |
+
|
| 48 |
+
<span id="page-3-3"></span>
|
| 49 |
+
|
| 50 |
+
| Dataset | Frames | Data Source | Long-range? | IMU? | Video? | Real? | Scene |
|
| 51 |
+
|----------------|--------|-------------|-------------|------|--------|-------|---------|
|
| 52 |
+
| Human3.6M [23] | 3.6M | Image | N | Y | Y | Y | Indoor |
|
| 53 |
+
| HumanEva [47] | 80.0K | Image | N | Y | N | Y | Indoor |
|
| 54 |
+
| 3DPW [57] | 51.0K | Image | N | Y | Y | Y | Outdoor |
|
| 55 |
+
| SURREAL [54] | 6.5M | Image | N | N | Y | N | Indoor |
|
| 56 |
+
| PedX [29] | 10.1K | Point Cloud | Y | N | Y | Y | Outdoor |
|
| 57 |
+
| LiDARHuman26M | 184.0K | Point Cloud | Y | Y | Y | Y | Outdoor |
|
| 58 |
+
|
| 59 |
+
Table 3. Statistics and characteristics of related datasets.
|
| 60 |
+
|
| 61 |
+
<span id="page-3-0"></span>
|
| 62 |
+
|
| 63 |
+
Figure 2. Two scenes for data acquisition.
|
| 64 |
+
|
| 65 |
+
Tab. 2 In summary, LiDARHuman26M provides 184,048 frames, 26,414,383 points, and 20 kinds of daily motions (including walking, swimming, running, phoning, bowing, etc). It consists of three modalities: synchronous LiDAR point clouds, RGB images, and ground-truth 3D human motions from professional IMU-based mocap devices. We preprocessed the data by erasing the background and eliminating the localization error of the IMUs. Details are given in the supplementary materials.
|
| 66 |
+
|
| 67 |
+
**Data Characteristic.** Tab. 3 presents statistics of our dataset in comparison to other publicly available 3D human pose datasets. Our LiDARHuman26M dataset has the following features: First, our dataset contains many long-range (up to 28 meters away) human motions, while the image datasets usually have limited capturing distance. Although 3DPW has a certain improvement in this aspect, most of the annotated data still focuses on people nearby. Second, our dataset covers up to 20 daily motions, while HumanEva has only six motions and PedX mainly focuses on walking. Third, our dataset covers three different modalities, including point clouds, RGB videos, and the mocap ground truth provided by IMU. Current image-based datasets do not provide depth information, which is essential for long-range motion capture. SURREAL projects 3D SMPL meshes on the images, and the rendered images are unreal. PedX provides pseudo labels for 3D motions through optimization of LiDAR points along with 2D labels.
|
| 68 |
+
|
| 69 |
+
**Challenge.** The long-range characteristic of LiDARHuman26M causes sparsity. As shown in the Fig. 3, the number of points on one person varies greatly, ranging from 30 points to 450 points. Furthermore, it can be manifested in
|
| 70 |
+
|
| 71 |
+
<span id="page-3-4"></span>
|
| 72 |
+
|
| 73 |
+
Figure 3. Different human poses with the distance to LiDAR increasing.
|
| 74 |
+
|
| 75 |
+
whole body sparsity and partial missing. When the human body moves further from the LiDAR, the points that fall on the body are significantly reduced, resulting in insufficient information to describe the motion. Two different actions may have similar point cloud distributions at low resolution. For example, when capturing at 12m distance, the direction of the human head relative to the body is clear. The data ensures a good alignment between the captured motion and the rough outline. There are only one or two points on the human head at 24m and 27m capturing distance, which is insufficient to confirm the head orientation. Meanwhile, more parts of the body will inevitably miss with the distance increasing. For example, when capturing at 27m distance, the arm is missing, leading to a loss of elbow rotation. The possible reason behind this is body occlusion or too sparse points caused by too far capturing distance.
|
| 76 |
+
|
| 77 |
+
We propose LiDARCap (shown in Fig. 4), a markerless, long-range, and data-driven method for 3D human motion capture using LiDAR point clouds. Trained on Li-DARHuman26M, LiDARCap takes point cloud sequences from monocular LiDAR sensor as input and outputs the 3D human motion sequences.
|
| 78 |
+
|
| 79 |
+
**Preprocessing.** Given an input LiDAR point cloud sequence $\mathcal{P} = \{\mathbf{P}^{(t)}|t=1...T\}$ of T frames and each frame contains arbitrary number of points $\mathbf{P}^{(t)} = \{\mathbf{p}_i^{(t)}|i=1...n_t\}$ . We fix the number to 512 by sampling or repeating to perform a unified down-sampling operations.
|
| 80 |
+
|
| 81 |
+
**Temporal Encoder.** In this step, we leverage PointNet++ [44] as the backbone to extract a 1024-dim global descriptor $\mathbf{f}^{(t)}$ for each point cloud frame $\mathbf{P}^{(t)}$ .
|
| 82 |
+
|
| 83 |
+
In addition, in order to fuse temporal information, the frame-wise features $\mathbf{f}^{(t)}$ are fed into a two-way GRU (bi-GRU) to generate hidden variables $\mathbf{g}^{(t)}$ . At the last of this module, we use $\mathbf{g}^{(t)}$ as input to MLP decoders to predict the corresponding joint locations $\hat{\mathbf{J}}^{(t)} \in \mathbb{R}^{24 \times 3}$ . Here, the loss $\mathcal{L}_{\mathcal{J}}$ of the temporal encoder is formulated as:
|
| 84 |
+
|
| 85 |
+
$$\mathcal{L}_{\mathcal{J}} = \sum_{t} \|\mathbf{J}_{GT}^{(t)} - \hat{\mathbf{J}}^{(t)}\|_{2}^{2}$$
|
| 86 |
+
(1)
|
| 87 |
+
|
| 88 |
+
<span id="page-4-2"></span><span id="page-4-0"></span>
|
| 89 |
+
|
| 90 |
+
Figure 4. The pipeline of our method with a point cloud sequence as the input consists of a temporal encoder, an inverse kinematic solver, and an SMPL optimizer. T represents the length of the sequence, and N represents the number of points.
|
| 91 |
+
|
| 92 |
+
where $\mathbf{J}_{GT}^{(t)}$ is the ground truth joint locations of the t-th frame.
|
| 93 |
+
|
| 94 |
+
Inverse Kinematics Solver. ST-GCN [67] is adopted as the backbone here to extract features of the predicted joints in a graph way. We concatenate the frame-wise global feature with each joint to generate the completed joint features $\mathbf{Q}^{(t)} \in \mathbb{R}^{24 \times (3+1024)}$ as the graph node. The output of ST-GCN is subsequently fed into the regressor to compute the joint rotations $\mathbf{R}_{6D}^{(t)} \in \mathbb{R}^{K \times 6}$ . The 6D rotation is mapped to the final axis-angle format when the loss is computed. We choose the 6D rotation representation as the intermediate results for its better continuity, as demonstrated in [73].
|
| 95 |
+
|
| 96 |
+
The loss of this module $\mathcal{L}_{\Theta}$ is formulated as:
|
| 97 |
+
|
| 98 |
+
$$\mathcal{L}_{\Theta} = \sum_{t} \|\boldsymbol{\theta}_{GT}^{(t)} - \hat{\boldsymbol{\theta}}^{(t)}\|_{2}^{2}$$
|
| 99 |
+
(2)
|
| 100 |
+
|
| 101 |
+
where $oldsymbol{ heta}_{GT}^{(t)}$ is the ground truth pose parameters of the t-th frame
|
| 102 |
+
|
| 103 |
+
**SMPL Optimizer.** We put an SMPL Optimizer module at the last stage to further improve the regression on $\theta$ . The joint rotations are fed into an off-the-shelf SMPL model to obtain the 24 joints on the SMPL mesh. $\mathcal{L}_2$ loss between the predicted joints and the ground truth ones is used again in this module to increase the accuracy of the regressed $\theta$ in the last stage. The only difference is that the joints in the first stage are regressed directly through an MLP-based decoder, and here the joints are sampled on the parametric mesh vertices determined by $\theta$ .
|
| 104 |
+
|
| 105 |
+
The loss of this module $\mathcal{L}_{\mathcal{J}_{SMPL}}$ is formulated as:
|
| 106 |
+
|
| 107 |
+
$$\mathcal{L}_{\mathcal{J}_{SMPL}} = \sum_{t} \|\mathbf{J}_{GT}^{(t)} - \hat{\mathbf{J}}_{SMPL}^{(t)}\|_{2}^{2}$$
|
| 108 |
+
(3)
|
| 109 |
+
|
| 110 |
+
where $\mathbf{J}_{SMPL}^{(t)}$ is the joint locations sampled from the SMPL mesh parameterized by the pose parameter $\hat{\boldsymbol{\theta}}^{(t)}$ .
|
| 111 |
+
|
| 112 |
+
This step provides stronger constraints on the regression of $\theta$ in a geometrically intuitive way. The ablation experiment is conducted to demonstrate its necessity, and more details can be seen in Sec. 4.2.
|
| 113 |
+
|
| 114 |
+
To sum up, our pipeline can be trained through optimizing the united loss function $\mathcal{L}$ formulated as below in an end-to-end way:
|
| 115 |
+
|
| 116 |
+
$$\mathcal{L} = \mathcal{L}_{\mathcal{J}} + \mathcal{L}_{\Theta} + \mathcal{L}_{\mathcal{J}_{SMPL}} \tag{4}$$
|
| 117 |
+
|
| 118 |
+
Training details. We train our method for 200 epochs with Adam optimizer [30] and set the dropout ratio as 0.5 for the GRU layers and ST-GCN module. We apply batch normalization layer after every convolutional layer except the final output layer before the decoder. During training, one NVIDIA GeForce RTX 3090 Graphics Card is utilized. The batch size is set to be 8, while the learning rate is set to be $1 \times 10^{-4}$ . The decay rate is $1 \times 10^{-4}$ . The network architecture involved in the evaluation section is trained using the most suitable learning rate until convergence. We train our method on the proposed LiDARHuman26M dataset, and experiment details are provided in Sec. 4.
|
2203.15266/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.15266/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Large-scale data and annotations are crucial for successful deep learning [\[22\]](#page-8-0). However in many real-world problems, annotations are very labor-intensive and expensive to acquire [\[8\]](#page-8-1). Annotation costs increase even higher when handling numerous tiny objects such as in remote sensing [\[7,](#page-8-2) [15,](#page-8-3) [33\]](#page-9-0), extreme weather research [\[24\]](#page-8-4), and microscope image analysis [\[12,](#page-8-5) [16\]](#page-8-6). These settings often require highly-skilled annotators and accordingly high compensation. For instance, cell annotation in Computational Pathology requires expert physicians (pathologists), whose training involves several years of clinical residency [\[3,](#page-8-7) [31\]](#page-9-1). Reducing cost and effort for these annotators would directly enable the collection of new large-scale tiny-object datasets, and contribute to higher model performances.
|
| 4 |
+
|
| 5 |
+
Several prior works have been proposed to reduce annotation cost in other tasks. Interactive segmentation methods [\[23,](#page-8-8)[35\]](#page-9-2) focus on reducing the number of interactions in the segmentation of a *single* foreground object, which can be classified as a "many interactions to one instance" approach. However, tiny-objects annotation can benefit from a "many interactions to many instances" approach as one image can contain many instances. Object counting methods [\[4,](#page-8-9) [26\]](#page-8-10) count multiple instances from a few user clicks and do follow a "many interactions to many instances" approach. However, these methods highlight only objects of the *same class* as the one being counted and thus can be classified as a "one class to one class" approach. However, images with tiny-objects are often composed of objects from multiple classes. Thus, tiny-object annotation should implement a "many classes to many classes" approach.
|
| 6 |
+
|
| 7 |
+
To address the above needs, we propose C3Det, an effective interactive annotation framework for tiny object detection. [Fig. 1](#page-0-0) shows how a user interacts with C3Det to create bounding-boxes of numerous tiny objects from multiple classes. Once a user clicks on a few objects and provides their class information, C3Det takes those as inputs
|
| 8 |
+
|
| 9 |
+
<span id="page-1-1"></span><span id="page-1-0"></span>
|
| 10 |
+
|
| 11 |
+
Figure 2. The *global context* of user inputs matter. "Late-Fusion" does not consider the global context and can miss far away objects (red dotted lines) from user inputs (marked as circles). C3Det captures the global context well and can detect far away objects.
|
| 12 |
+
|
| 13 |
+
and detects bounding boxes of many objects, even including object classes that the user did not specify. The user repeats this process until the annotation is complete. By utilizing user inputs in the "many interactions to many instances" and "many classes to many classes" way, C3Det can significantly speed-up annotation.
|
| 14 |
+
|
| 15 |
+
A key aspect of our approach is in making each user click influence objects that are nearby (local context) as well as far away (global context). To encourage the annotatorspecified class to be consistent with model predictions, we insert user inputs (in heatmap form) at an intermediate stage in the model (late-fusion) and apply a class-consistency loss between user input and model predictions. This alone can capture local context well, but may miss far away objects. We therefore introduce the C3 (Class-wise Collated Correlation) module, a novel feature-correlation scheme that communicates local information to far away objects (see [Fig. 2\)](#page-1-0), allowing us to learn many-to-many instance-wise relations while retaining class information. Through extensive experiments, we show that these components combined, result in significant performance improvements.
|
| 16 |
+
|
| 17 |
+
To validate whether our performance improvements translate to lower annotation cost in the real-world, we perform a user study with 10 human annotators. Our approach, C3Det, when combined with further manual bounding box corrections, is shown to be 2.85× faster and yield only 0.36× task load (NASA-TLX) compared to manual annotation, achieving the same or even better annotation quality as measured against the ground-truth. This verifies that C3Det not only shows improvements in simulated experiments, but also reduces annotation cost in the real-world.
|
| 18 |
+
|
| 19 |
+
In summary, we make the following contributions: (a) we address the problem of multi-class and multiinstance interactive annotation of tiny objects, (b) we introduce a training data synthesis and an evaluation procedure for this setting, (c) we propose a novel architecture for interactive tiny-object detection that considers both local and global implications of provided user inputs, and finally (d) our experimental results and user study verify that our method reduces annotation cost while achieving high annotation quality.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
In this section, we describe the proposed method. First, we introduce the overall architecture of C3Det. Next, we describe a training data synthesis procedure for multi-class and multi-instance interactive object detection. Finally, we describe each component of C3Det: the Late Fusion Module (LF), the Class-wise Collated Correlation Module (C3), and User-input Enforcing Loss (UEL).
|
| 24 |
+
|
| 25 |
+
C3Det detects objects in a given image guided by a few user inputs, and outputs bounding boxes and the class of as many objects as possible, including those that are not specified by such inputs. We denote the input image as I, and the number of user inputs as K.
|
| 26 |
+
|
| 27 |
+
Each user input is denoted as $(\boldsymbol{u}_k^{pos}, u_k^{cls})$ , where k is the index of user input, $\boldsymbol{u}_k^{pos}$ defines a 2D position, and $u_k^{cls} \in \{1 \dots C\}$ is the object class. At inference time, $(\boldsymbol{u}_k^{pos}, u_k^{cls})$ is provided by a user, while at training and validation time, it follows the center point and class of the chosen ground-truth bounding box. Before passing the user inputs to the model, we convert each input $(\boldsymbol{u}_k^{pos}, u_k^{cls})$ as a heatmap $\boldsymbol{U}_k$ by placing a 2D Gaussian centered at $\boldsymbol{u}_k^{pos}$ with a predefined standard deviation $\sigma_{\text{heatmap}}$ .
|
| 28 |
+
|
| 29 |
+
The input image I is first forwarded through a CNN feature extractor to yield a feature map $F_{\rm I}$ . Separately, the user input heatmaps, $U_{1...K}$ , are passed to the LF and C3 modules, which utilize user inputs in local and global manners,
|
| 30 |
+
|
| 31 |
+
<span id="page-2-1"></span><sup>&</sup>lt;sup>1</sup>C3Det responses takes just a few seconds on our user study GUI.
|
| 32 |
+
|
| 33 |
+
<span id="page-3-3"></span>respectively. The outputs of these modules, $F_{\rm LF}$ and $F_{\rm C3}$ , are then concatenated to $F_{\rm I}$ before passing on to the next layers (see Fig. 3).
|
| 34 |
+
|
| 35 |
+
As C3Det only modifies the outputs of the backbone network, it is applicable to both one-stage and two-stage architectures. In the case of Faster R-CNN [27] and RetinaNet [19], for example, the concatenated outputs are passed on to the region proposal network (RPN) and to the classification and box regression subnets, respectively.
|
| 36 |
+
|
| 37 |
+
During training, we simulate the user inputs based on ground-truth annotations. First, we randomly sample a target number of user inputs from a uniform distribution $N_u \sim \mathcal{U}_{[0,20]}$ . While we define the uniform distribution to extend to 20 only, this hyper-parameter can be adjusted as necessary. We then sample $K = \min{(N_u, N_a)}$ objects (without replacement) from the ground-truth, where $N_a$ denotes the number of available objects for the current sample. The object centers and class indices are then passed on to C3Det as user inputs.
|
| 38 |
+
|
| 39 |
+
When incorporating user input heatmaps to the network, two common approaches in interactive segmentation are early-fusion [14, 30, 34, 35] and late-fusion methods [2, 25, 37]. Early-fusion methods concatenate user-input heatmaps to the input image, while late-fusion methods inject user-input heatmaps to an intermediate layer in the network, with [37] or without [2, 25] processing the heatmaps with CNN layers. Prior insights show that late-fusion outperforms early-fusion [25, 37], and we find that this is also the case for interactive tiny-object detection.
|
| 40 |
+
|
| 41 |
+
To handle a varying number of user inputs, while maintaining the class information of the given inputs, we group the K user input heatmaps by class, then apply a pixel-wise max operation to each group to yield C heatmaps. For the case where no inputs are provided for an object class, we simply pass a heatmap filled with zeros. The heatmaps are passed to the LF module (a CNN-based feature extractor such as ResNet-18) that outputs feature maps $F_{\rm LF}$ .
|
| 42 |
+
|
| 43 |
+
The LF module handles these heatmaps without any global pooling, and therefore does not lose any spatial information. For the local area around a user input $\boldsymbol{u}_k^{pos}$ , the predicted objects' class can be directly affected by the user input $u_k^{cls}$ . We can therefore consider the LF module as one that considers the *local context* of user inputs.
|
| 44 |
+
|
| 45 |
+
While understanding the local context can help in predicting the correct class for objects near to user inputs, objects
|
| 46 |
+
|
| 47 |
+
<span id="page-3-0"></span>
|
| 48 |
+
|
| 49 |
+
Figure 3. **Overall architecture.** User inputs are processed and considered at both local (by Late Fusion) and global (by Classwise Collated Correlation) context scales for multi-class multi-instance interactive tiny-object detection. The " $\oplus$ " symbol indicates channel-wise concatenation.
|
| 50 |
+
|
| 51 |
+
far away from user inputs must be impacted in a different way. Recently, in [26], a correlation operation between $F_{\rm I}$ and user input related features was used to improve object counting performance, using a few exemplars to count as many similar objects as possible in a given image. Similarly, we suggest to extract template features from $F_{\rm I}$ based on user inputs, perform correlation with $F_{\rm I}$ (see Fig. 4), and merge the correlation maps class-wise.
|
| 52 |
+
|
| 53 |
+
For each provided user input heatmap $U_k^2$ , we perform the following to obtain a "template" vector,
|
| 54 |
+
|
| 55 |
+
$$T_{k}(i) = \sum_{x,y} F_{I}(i,x,y) U_{k}(x,y), \qquad (1)$$
|
| 56 |
+
|
| 57 |
+
where i refers to a channel index and x, y the column and row indices in $\mathbf{F}_{\mathrm{I}}$ and $\mathbf{U}_{k}$ . This template vector can then be used as follows to generate a correlation map $\mathbf{M}_{k}$ ,
|
| 58 |
+
|
| 59 |
+
$$\boldsymbol{M}_{k}(x,y) = \sum_{i} \boldsymbol{T}_{k}(i) \boldsymbol{F}_{I}(i,x,y). \tag{2}$$
|
| 60 |
+
|
| 61 |
+
Once K correlation maps are computed, we combine them class-wise based on $u_k^{cls}$ , via an element-wise $\max$ operation as defined by,
|
| 62 |
+
|
| 63 |
+
$$\mathbf{F}_{C3}(c, x, y) = \max{\{\mathbf{M}_k(x, y) | u_k^{cls} = c, \forall k \in [K]\}}, (3)$$
|
| 64 |
+
|
| 65 |
+
where c refers to a class index. Classes that do not have any associated user inputs are simply represented by a correlation map filled with zeros.
|
| 66 |
+
|
| 67 |
+
This reduction allows us to produce C correlation maps to pass on to the next stages, no matter how many user inputs are provided. We describe this approach as a *correlate-then-collate* method, where each user input is handled independently. An intuitive alternative is a *collate-then-correlate* method, where user input heatmaps are combined by class first, to perform the correlation operation once per object class. The *collate-then-correlate* alternative may be more robust to the choice of user input, but also assumes
|
| 68 |
+
|
| 69 |
+
<span id="page-3-1"></span> $<sup>^2</sup>$ The heatmap is typically resized to match the size of $\emph{\textbf{F}}_{\rm I}$ and normalized such that it sums to 1.
|
| 70 |
+
|
| 71 |
+
<span id="page-4-5"></span><span id="page-4-0"></span>
|
| 72 |
+
|
| 73 |
+
Figure 4. **Procedure of generating a correlation map.** A template vector is extracted from a feature map based on a user-input. The correlation map is computed between the template vector and the feature map. GSP stands for "global sum pooling".
|
| 74 |
+
|
| 75 |
+
that each object class can be described by a single feature representation. In our ablation study (see Fig. 8c), we show that the *correlate-then-collate* method performs better, and thus choose this to define our C3 module.
|
| 76 |
+
|
| 77 |
+
The explicit correlation operation performed by the C3 module allows for local features to be compared across the entire image. This extends the effect of user inputs on the model's predictions beyond the considerations of the LF module. In other words, we can consider the C3 module as one that considers the *global context* of user inputs by learning many-to-many instance-wise relations.
|
| 78 |
+
|
| 79 |
+
When a user specifies an object to be of a certain class, C3Det should reflect this class on its prediction. Therefore, we propose to apply a training-time consistency loss between user inputs and model predictions through a User-input Enforcing Loss that enforces a class-wise consistency.
|
| 80 |
+
|
| 81 |
+
For each simulated user input, $(\boldsymbol{u}_k^{pos}, u_k^{cls})$ , we retrieve the associated ground-truth bounding box $\boldsymbol{y}_b^{bbox}$ . We compare each of these ground-truth objects with all J predicted objects (indexed by $j \in \{1 \dots J\}$ ). Each prediction consists of a bounding box $\hat{\boldsymbol{y}}_j^{bbox}$ and class $\hat{\boldsymbol{y}}_j^{cls}$ . To compute the loss, we check for a non-zero intersection-overunion (IoU) between every input-prediction pair, and apply a class-consistency loss. The full loss is formulated as,
|
| 82 |
+
|
| 83 |
+
$$\mathcal{L}_{\text{UEL}} = \sum_{j,k} \mathbb{1}_{IoU(\hat{y}_{j}^{bbox}, y_{k}^{bbox}) > 0} \cdot \ell(\hat{y}_{j}^{cls}, u_{k}^{cls})$$
|
| 84 |
+
(4)
|
| 85 |
+
|
| 86 |
+
where $\ell$ is a loss function such as the cross entropy loss or the focal loss, depending on the main task loss.
|
2205.06457/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-11-29T06:32:30.463Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" etag="yEogFkz--0VLJ1sA7Fun" version="15.8.4" type="device"><diagram id="VSGBo4FPLrqWgdyTCYHy" name="Page-1">7Vpbc5s6EP41zLSZaYaLwfajb0k7p017mkzb85SRQTFqMKJY+NJffyQhgQDh2G4cJ22TmRitFmm1u9+3kmLDGc3XlylIwg84gJFhm8HacMaGTX+6Nv1gkk0u6VpCMEtRkIusUnCNfkIhNIU0QwFcVBQJxhFBSVXo4ziGPqnIQJriVVXtDkfVWRMwEzOapeDaBxFsqH1FAQlzac9VtN9CNAvlzJYpeuZAKgvBIgQBXikiZ2I4oxRjkj/N1yMYMedJv+TvXbT0FoalMCa7vHDpzS/A5+838y9ff6KrcPzdXHlvxCgLspELhgFdv2jilIR4hmMQTUrpMMVZHEA2qklbpc57jBMqtKjwOyRkI4IJMoKpKCTzSPRSg9PNN/E+b/zHGueubI7Xaud4I1p3KIpGOMIpt9S56/nQ96l8QVJ8D5Weac/tuOyNfHVsSa1Okx7AWeoLrfW1Ew5HyT/B7bfr7ueh8xG6mfQUAekMki16dhFaigmI55CugL6XwggQtKzaAURyzgq9Mn70QYRQH85tRi5BlImZDNuLCPMdpgtWA+39yLDseLPgoRpQBctL1mUnfZqJTz7KVAq+oBtXCqml04ZiKiWT2KekkCrKuSlSv5Z+ZXKxTFmFiMDrBPDIrCjDVBPp0ITQxH8JUwLXiqgZQslpHQHoTa29KvnBkqAPFW7wzF+PuhbE/QNA7GfpsnDz0RDdfRDSNBfEqJYn2krc+v3OyDw59O0m9LVxcJ8I+nomd06eBQ8Es2ey35cSTOupiFw/e/fk0Ty4Sv9WWeCdspzbpyjnZenWFfgx/M3KOU3IZ1bOey9qT75jAa9wUy3QgQt7QUcX6J49dTzvYOhr3dvZcSPfkjZPw/+dVuQHaFnHpyADv/BbiXcWAJ8dSesUsEL3KELxLAOcKFrQ7IE5w2g8XbCPQRzSaYyJZQwHxsBl3pjK1gWzYA2ZY7loaPSHiLVCMGfHeAQK7SHVZi2CRLPX9Yv36FAWe28uR+mTuhnUipFjDEyGx/wpnrHAA/5BUjnqOJ6dU8EVVfKMoSl6Mf+cZnJ8FzVMGdVnBM2F01HY1HTTZf6QYw1d5oNiZIvPtCyMNUYdKsyNkY5w/FoHaHhwWpjGrKgatiztjtla2zl8W46IktxaJpAUvFISBbVqScEiAbGUvYX5xc09TwnW8EHKRQw9hMA4YA85pMmGC3NBdcGKAerwuimjbJ0JxC4IyFIQE+aiYUbk6LkhYsJG0PeYKl8QjiKQLCBfCF/OitICI5p7GMul5lOGeJEgAqLzPeZ4/YCuLiRNPEtxhUV+qWbX+B9EaBbThk95mW4TDuH6vEfettmFiY2aruH/1jL/pn5st6xmne90NHW+UHx0kncbJK9hWEkOgmyogugdJ+xdhQA4B2b07324B/OqtPGEJMEJIQeIRAPNIo6dO5Y4khVQTtslUs9f75Xxf0Juu7UtrOU0U9vxNKntHiuzrUPulV/E8dXkP83YQ4tGv6uLfd/rOuCR97CSkp73JlZa2X5+3WvLWhRoPgIBi/vbGMxhIc6Vtuxmd+I5EeAHZkdxkpFbCllSm/458NAOuXicGuvVjtKu2+AhW1dinaPxkPYGRYkjzkhbIP+EiNUrR/HPytNFzP1dK0frxecd9PTXXEG3PzUPv/jU+/dFXH9Y7fcfugPNftXk0FvTJMS3MUPvYMtRUTVvWDm5m+LewmYb9S67KoiKq4G42J7zKkWdZjLUmaR6TZH3iBOBGHvINvR+KJt9eX6g77DB/s2K8wKb1y5GufnEdv5vRx+OfWfwieYL5ANd4XO+brQQZ/07OGdftCjXu4EgZZ3sWxpcmh+kI5pwTIzYEsaIAgX5JF/NSC7C9BHZ7Hc22HLffbrivQMdHOmAbFZrgaOpBcVluVoL+kerBd5OLLAFZp8GN+8mVze378ZnEnRVUQnBy8nVePL5rA7GUpzn2uByciazVTz/OkjffxxRqz5enekBq3bno6sSBciqvC25kZYABcYZ+aWz6SvL7hVz1Z5fNzGvQLzqXQH4VrLUXQ88tnEVi05qSUF7ZU5JAjypXYU5J7WiFV471ISTGq5i7pR2VOviX5cUOaWSZblVeEYuesp5t+6Q/m6F8lNx/R6j19wJua5mJ2QdcCymzfI7wLxP+Sa1M/kf</diagram></mxfile>
|
2205.06457/main_diagram/main_diagram.pdf
ADDED
|
Binary file (44.3 kB). View file
|
|
|