Eric03 commited on
Commit
cc75910
·
verified ·
1 Parent(s): 834666d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2006.10562/main_diagram/main_diagram.drawio +1 -0
  2. 2006.10562/paper_text/intro_method.md +123 -0
  3. 2006.11132/main_diagram/main_diagram.drawio +0 -0
  4. 2006.11132/paper_text/intro_method.md +102 -0
  5. 2007.06702/main_diagram/main_diagram.drawio +1 -0
  6. 2007.06702/main_diagram/main_diagram.pdf +0 -0
  7. 2007.06702/paper_text/intro_method.md +107 -0
  8. 2008.00441/main_diagram/main_diagram.drawio +1 -0
  9. 2008.00441/main_diagram/main_diagram.pdf +0 -0
  10. 2008.00441/paper_text/intro_method.md +40 -0
  11. 2009.10007/main_diagram/main_diagram.drawio +1 -0
  12. 2009.10007/main_diagram/main_diagram.pdf +0 -0
  13. 2009.10007/paper_text/intro_method.md +102 -0
  14. 2010.12007/main_diagram/main_diagram.drawio +0 -0
  15. 2010.12007/paper_text/intro_method.md +52 -0
  16. 2102.00405/main_diagram/main_diagram.drawio +1 -0
  17. 2102.00405/paper_text/intro_method.md +34 -0
  18. 2103.02062/main_diagram/main_diagram.drawio +1 -0
  19. 2103.02062/main_diagram/main_diagram.pdf +0 -0
  20. 2103.02062/paper_text/intro_method.md +53 -0
  21. 2103.15812/main_diagram/main_diagram.drawio +1 -0
  22. 2103.15812/paper_text/intro_method.md +44 -0
  23. 2104.00369/main_diagram/main_diagram.drawio +1 -0
  24. 2104.00369/main_diagram/main_diagram.pdf +0 -0
  25. 2104.00369/paper_text/intro_method.md +15 -0
  26. 2104.07098/main_diagram/main_diagram.drawio +0 -0
  27. 2104.07098/paper_text/intro_method.md +82 -0
  28. 2106.05087/main_diagram/main_diagram.drawio +1 -0
  29. 2106.05087/main_diagram/main_diagram.pdf +0 -0
  30. 2106.05087/paper_text/intro_method.md +143 -0
  31. 2106.08827/main_diagram/main_diagram.drawio +0 -0
  32. 2106.08827/paper_text/intro_method.md +89 -0
  33. 2106.12108/main_diagram/main_diagram.drawio +0 -0
  34. 2106.12108/paper_text/intro_method.md +258 -0
  35. 2110.05448/main_diagram/main_diagram.drawio +1 -0
  36. 2110.05448/main_diagram/main_diagram.pdf +0 -0
  37. 2110.05448/paper_text/intro_method.md +5 -0
  38. 2112.01565/main_diagram/main_diagram.drawio +1 -0
  39. 2112.01565/main_diagram/main_diagram.pdf +0 -0
  40. 2112.01565/paper_text/intro_method.md +95 -0
  41. 2112.05364/main_diagram/main_diagram.drawio +0 -0
  42. 2112.05364/paper_text/intro_method.md +126 -0
  43. 2202.03229/main_diagram/main_diagram.drawio +1 -0
  44. 2202.03229/paper_text/intro_method.md +75 -0
  45. 2203.15235/main_diagram/main_diagram.drawio +0 -0
  46. 2203.15235/paper_text/intro_method.md +93 -0
  47. 2204.02426/main_diagram/main_diagram.drawio +0 -0
  48. 2204.02426/main_diagram/main_diagram.pdf +0 -0
  49. 2204.02426/paper_text/intro_method.md +46 -0
  50. 2204.03541/main_diagram/main_diagram.drawio +0 -0
2006.10562/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-24T15:05:14.731Z" agent="5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 YaBrowser/21.2.2.102 Yowser/2.5 Safari/537.36" version="13.10.9" etag="0t9sr_w33cX6lpzcS5oW" type="google"><diagram id="4NqFUmhTfvI-QSf4jpYU">7V1dc6M2FP01fowHIcTH49q7bh+2M53JTPtMQLHpYuTK8sbpr68wks2HksUyRk6svAQJIUDncHW59yBP4Hy9/43Gm9UfJMX5xHXS/QR+nbhu4AHI/5U1r1WNG3lOVbOkWVrVgVPFY/YfFpWy2S5L8bbRkBGSs2zTrExIUeCENepiSslLs9kzyZtn3cRLcUbnVPGYxDnuNPs7S9lK1AKn1vx3nC1X4tQhEjue4uTHkpJdIc43ceHz4a/avY5lX6L9dhWn5KVWBb9N4JwSwqqt9X6O83Jw5bBVxy3e2Hu8booL1ucAtzrgZ5zvsLziw3WxVzkWL6uM4cdNnJTlFw74BM5WbJ3zEuCbh5vFqSgd76cs5PETzmfHEZmTnFC+qyAFLpsySn4ch7ds/0wKtojXWV7S5i9M07iIRbWgiAt5Oc6zZcELCb9HTI89ye75kKf+k4/88tAsz2v1i8V8ziGEM3HXmDK8f3PkwBEPTnRM1pjRV95EHPAQBAJDQXJJ8ZcaYSTMqxpXfFEXC44uj12fcOIbAio1bNDCpgtbhJqwAX9E3DyLmy5uvmMQN/Rr3E7AOE3EfgVLTNmXcspq1S2y8moOnR3KAhLEy7hIW0fwmlp7Xqq1/gcz9irK8Y4RXkUoW5ElKeL8OyEbySVdglRDgVM5pQpst2RHE9yYZvh9LDFrPAr9GeBMHQCjBgckLhTnMct+Ns+vwlt0/ifJ+DlPTcjz85ZfV5sQx2voxRHfcmRojsCzrQQnCZLepyDJg+z5JlgSDDgDOJfMAO6QM8BTiDzkqGYA6EJ3qBkgas4AbjDiDBBa3HRxQ45B3CJrlS+yymebYGfqtsxveC3ze2pzdO6DN6gmO6lmF3HYRYZchioss0wxa0xihWA8YgFLrIuI5XUdyeB8rgHoBC1DFt2QHwl6RO4sS85jSahhkfwwbLAEXviysVceMAxlhowa3pkPCxzfoBMLhowb3hlyoWcSuB6BQxvwVQJnMr0CesTyLGxK2FBkMEwPhoyu3RdukWcStx7RNevMvuPMynmm7s0CjZee286wABvMuwJN/LNdqptPsrg9YnPWX1aCGyKD7rLbI/RlcVPiBhyT7zkSJ2uZdS3z2VbYZJ4FBXwKqP1FauYNHxvvI3yzPLsqz0akWQBN0axHvM3S7D2aBV1H8yS1/jRJGKsLvAZPzvZ9PlYapo9Q0Dq5SqC90KSPa6V72rF8kwqwQaV79xUUBoFvLgnTR7pncVPjhkKD0Xw4ZPTtzoCLoEngrPLswk8Ywq47CzVee247DSN9a0uTAWkiZ7vPlIaBVnCmPREYlS1BqzfTBc51oEngbEDqQtN8thk2mYcB6C1V6vAxcWi/bzVNrTGZFbjjMatHYM0y670vqZyuPwk1Pq+68WwLtCrBK/BEYx2Gj5RtgUNGEe/LlQXeW1/TjrJUjtXwaQNndKUEr0cUkWOxKTeTHc1fZ5TjUNqj2VlR4fbYpxnFCctIccCRlvc521b7nSnqoFrWbDDN+P1hWp4zK5aCKQNA8ACcVggehl0MXBcpQIBDgNCN0U2n0w4O/AZZc4ybhBXPQ52roqrD9HK4siTOv4gd6yxNy9MoQW0+mXUkh1uerfUIOIrh9xSj3/ZztUZfFfjycybutYGB/++OyB0PFV+/8Aahs9kfRkLu51vL8v8io9uyh3W16mLVK7+gquOqze3jHA4FM4zcKWog7clgeQ3pMEJXQrobKbuv5wy47dz+qA9aj3jXHc41SJGnh0oQBplrVJGhgazdI05IkVpzJ7/Saxs7X/G0Xc/Y2c8rtdMCJpU0nlVAaeNmVEnjWQmUNnBGlTTytc6GKnVX51EIwz2NBMptK2mQFVxdgSZnL+N0+0oa1A3n6E8E9xUMdY0qaZCVQOkCB40qaZD9BPBC06yzml7LBI+nd3BHVNIgK9IyTa0xmTWikgZZjdaFzIq6/qT0vT6RkgZZxdUVeKKzoPUHUtIg+xMI2u8gRpU0yEqgtIEzqqTxu1HE+0ouu0aTy74VMqmSy0d/toaBdzUhk6+KfA2UXP4eWyVNbeHeoJVbDhS2bqjcMi+efuS0ckZOPyULv/0P</diagram></mxfile>
2006.10562/paper_text/intro_method.md ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ In this work we consider uncertainty estimation within the standard Bayesian ensemble-based framework [@galthesis; @malinin-thesis]. Here, model parameters $\bm{\theta}$ are considered random variables and a prior ${\tt p}(\bm{\theta})$ is placed over them to compute a posterior ${\tt p}(\bm{\theta}|\mathcal{D})$ via Bayes' rule:
4
+
5
+ :::: empheq
6
+ align
7
+
8
+ ::: split
9
+ `p`(\|) &=  .
10
+ :::
11
+
12
+ []{#eqn:bayesposterior label="eqn:bayesposterior"}
13
+ ::::
14
+
15
+ where $\mathcal{D}=\{\bm{x}^{(i)},y^{(i)}\}_{i=1}^N$ is the training dataset. Each set of parameters can be considered a hypothesis or explanation about how the world works. Samples from the posterior should yield explanations consistent with the observations of the world contained within the training data $\mathcal{D}$. However, on data far from $\mathcal{D}$ each set of parameters can yield different predictions. Therefore, estimates of *knowledge uncertainty* can be obtained by examining the diversity of predictions.
16
+
17
+ Consider an ensemble of probabilistic models $\{{\tt P}(y | \bm{x}; \bm{\theta}^{(m)})\}_{m=1}^M$ sampled from the posterior ${\tt p}(\bm{\theta}|\mathcal{D})$. Each model ${\tt P}(y|\bm{x}, \bm{\theta}^{(m)})$ yields a *different* estimate of *data uncertainty*, represented by the entropy of its predictive distribution [@malinin-thesis]. Uncertainty in predictions due to *knowledge uncertainty* is expressed as the level of spread, or "disagreement", of models in the ensemble [@malinin-thesis]. Note that exact Bayesian inference is often intractable, and it is common to consider either an explicit or implicit approximation ${\tt q}(\bm{\theta})$ to the true posterior ${\tt p}(\bm{\theta}|\mathcal{D})$. While a range of approximations has been explored for neural network models [@Gal2016Dropout; @deepensemble2017; @maddox2019simple][^2], to the best of our knowledge, limited work has explored Bayesian inference for gradient-boosted trees. Given ${\tt p}(\bm{\theta}|\mathcal{D})$, the *predictive posterior* of the ensemble is obtained by taking the expectation with respect to the models in the ensemble:
18
+
19
+ :::: empheq
20
+ align
21
+
22
+ ::: split
23
+ `P`(y \| , ) =& \_`p`(\|) \_m=1\^M `P`(y \| ; \^(m)), \^(m) \~`p`(\|) .
24
+ :::
25
+ ::::
26
+
27
+ The entropy of the predictive posterior estimates *total uncertainty* in predictions:
28
+
29
+ ::: empheq
30
+ align =& \_`P`(y \| , ) . []{#eqn:total-uncetainty label="eqn:total-uncetainty"}
31
+ :::
32
+
33
+ *Total uncertainty* is due to both *data uncertainty* and *knowledge uncertainty*. However, in applications like active learning [@batchbald] and out-of-domain detection it is desirable to estimate *knowledge uncertainty* separately. The sources of uncertainty can be decomposed by considering the *mutual information* between the parameters $\bm{\theta}$ and the prediction $y$ [@mutual-information]:
34
+
35
+ :::: empheq
36
+ align
37
+
38
+ ::: split
39
+ \_ =& \_ - \_\
40
+ & - \_m=1\^M \[`P`(y \| ; \^(m))\]  .
41
+ :::
42
+
43
+ []{#eqn:mi label="eqn:mi"}
44
+ ::::
45
+
46
+ This is expressed as the difference between the entropy of the predictive posterior, a measure of *total uncertainty*, and the expected entropy of each model in the ensemble, a measure of *expected data uncertainty*. Their difference is a measure of ensemble diversity and estimates *knowledge uncertainty*.
47
+
48
+ Unfortunately, when considering ensembles of probabilistic *regression* models $\{{\tt p}(y | \bm{x}; \bm{\theta}^{(m)})\}_{m=1}^M$ over continuous-valued target $y \in \mathbb{R}$, it is no longer possible to obtain tractable estimates of the (differential) entropy of the predictive posterior, and, by extension, mutual information. In this cases uncertainty estimates can instead derived via the law of total variation:
49
+
50
+ :::: empheq
51
+ align[]{#eqn:law-total-variation label="eqn:law-total-variation"}
52
+
53
+ ::: split
54
+ \_ =& \_ + \_ .
55
+ :::
56
+ ::::
57
+
58
+ This is conceptually similar to the decomposition [\[eqn:mi\]](#eqn:mi){reference-type="eqref" reference="eqn:mi"} obtained via mutual information. For an ensemble of probabilistic regression models which parameterize the normal distribution, and where each models yields a mean and standard-deviation, the total variance can be computed as follows:
59
+
60
+ ::::: small
61
+ :::: empheq
62
+ align[]{#eqn:law-total-variation-example label="eqn:law-total-variation-example"}
63
+
64
+ ::: split
65
+ \_ & \_ + \_,    {\_m,\_m} = f(;\^(m)).
66
+ :::
67
+ ::::
68
+ :::::
69
+
70
+ However, while these measures are tractable, they are based on only first and second moments, and may therefore miss high-order details in the uncertainty. They are also not scale-invariant, which can cause issues is the scale of prediction on in-domain and out-of-domain data is very different.
71
+
72
+ is a powerful machine learning technique especially useful on tasks containing heterogeneous features. It iteratively combines weak models, such as decision trees, to obtain more accurate predictions. Formally, given a dataset $\mathcal{D}$ and a loss function $L: \mathbb{R}^2 \rightarrow \mathbb{R}$, the gradient boosting algorithm [@friedman2001greedy] iteratively constructs a model $F:X \rightarrow \mathbb{R}$ to minimize the empirical risk $\mathcal{L}(F|\mathcal{D}) = \mathbb{E}_{\mathcal{D}} [L(F(\bm{x}), y)]$. At each iteration $t$ the model is updated as: $$\begin{equation}
73
+ \label{eq:update}
74
+ F^{(t)}(\bm{x}) = F^{(t-1)}(\bm{x}) + \epsilon h^{(t)}(\bm{x})\,,
75
+ \end{equation}$$ where $F^{(t-1)}$ is a model constructed at the previous iteration, $h^{(t)}(\bm{x}) \in \mathcal{H}$ is a weak learner chosen from some family of functionds $\mathcal{H}$, and $\epsilon$ is learning rate. The weak learner $h^{(t)}$ is usually chosen to approximate the negative gradient $- g^{(t)}(\bm{x},y):=-\frac{\partial L(y,s)}{\partial s}\big|_{s=F^{(t-1)}(\bm{x})}$: $$\begin{equation}
76
+ \label{eq:gradient_step}
77
+ h^{(t)} = \argmin_{h\in \mathcal{H}} \mathbb{E}_{\mathcal{D}}\big[\big(-g^{(t)}(\bm{x},y)- h(\bm{x})\big)^2 \big].
78
+ \end{equation}$$ A weak learner $h^{(t)}$ is associated with parameters $\bm{\phi}^{(t)} \in \mathbb{R}^{d}$. We write $h^{(t)}(\bm{x},\bm{\phi}^{(t)})$ to reflect this dependence. The set of weak learners $\mathcal{H}$ often consists of shallow decision trees, which are models that recursively partition the feature space into disjoint regions called leaves. Each leaf $R_j$ of the tree is assigned to a value, which is the estimated response $y$ in the corresponding region. We can write $h(\bm{x},\bm{\phi}^{(t)}) = \sum_{j=1}^{d} \phi^{(t)}_j \1_{\{\bm{x}\in R_j\}}$, so the decision tree is a linear function of $\bm{\phi}^{(t)}$. The final GBDT model $F$ is a sum of decision trees [\[eq:update\]](#eq:update){reference-type="eqref" reference="eq:update"} and the parameters of the full model are denoted by $\bm{\theta}$.
79
+
80
+ For classification tasks, a model yields estimates *data uncertainty* if it is trained via negative log-likelihood and provides a distribution over class labels. However, classic GBDT regression models yield point predictions, and there has been little research devoted to estimating predictive uncertainty. Recently, this issue was addressed by @ngboost via an algorithm called NGBoost (Natural Gradient Boosting), which allows estimating *data uncertainty*. NGBoost simultaneously estimates the parameters of a conditional distribution ${\tt p}(y|\bm{x},\bm{\theta})$ over the target $y$ given the features $\bm{x}$, by optimizing a proper scoring rule. Typically, a normal distribution over $y$ is assumed and negative log-likelihood is taken as a scoring rule. Formally, given an input $\bm{x}$, the model $F$ predicts two parameters of normal distribution - the mean $\mu$ and the logarithm of the standard deviation $\log \sigma$. The loss function is the expected negative log-likelihood:[^3]
81
+
82
+ ::: empheq
83
+ align `p`(y \|, \^(t)) = (y\|\^(t),\^(t)), {\^(t),\^(t)} = F\^(t)() .[]{#eq:ngboost label="eq:ngboost"}\
84
+ (\|) = \_\[ -(y\|,)\] = -\_i=1\^N (y\^(i)\|\^(i), ) . []{#eq:nll label="eq:nll"}
85
+ :::
86
+
87
+ Note that $\bm \theta$ denotes the concatenation of two parameter vectors used to predict $\mu$ and $\log \sigma$.
88
+
89
+ As discussed in Section [2](#sec:background){reference-type="ref" reference="sec:background"}, *knowledge uncertainty* can be estimated by considering an ensemble of models $\{{\tt p}(y | \bm{x}; \bm{\theta}^{(m)})\}_{m=1}^M$ sampled from the posterior ${\tt p}(\bm{\theta}|\mathcal{D})$. The level of diversity or "disagreement" between the models is an estimate of *knowledge uncertainty*. In this section, we consider three approaches to generating an ensemble of GBDT models. We emphasize that this section discusses *ensembles of GBDT models*, where a *each* GBDT model is itself an *ensemble of trees*.
90
+
91
+ **SGB ensembles** One way to generate an ensemble is to consider several independent models generated via Stochastic Gradient Boosting (SGB). Stochasticity is added to GBDT models via random subsampling of the data at every iteration [@friedman2002stochastic]. Specifically, at each iteration of [\[eq:gradient_step\]](#eq:gradient_step){reference-type="eqref" reference="eq:gradient_step"} we select a subset of training objects $\mathcal{D}'$ (via bootstrap or uniformly without replacement), which is smaller than the original training dataset $\mathcal{D}$, and use $\mathcal{D}'$ to fit the next tree instead of $\mathcal{D}$. The fraction of chosen objects is called *sample rate*. This implicitly injects noise into the learning process, effectively inducing a distribution ${\tt q}(\bm{\theta})$ over such models. Thus, an *SGB ensemble* is an ensemble of independent models $\{\bm{\theta}^{(m)}\}_{m=1}^M$ built according to SGB with different random seeds for sub-sampling data. Unfortunately, there are no guarantees on how well the distribution ${\tt q}(\bm{\theta})$ estimates the true posterior ${\tt p}(\bm{\theta}|\mathcal{D})$.
92
+
93
+ **SGLB ensembles** Remarkably, there is a way to sample GBDT models from the true posterior ${\tt p}(\bm{\theta}|\mathcal{D})$ via a recently proposed Stochastic Gradient Langevin Boosting (SGLB) algorithm [@SGLB]. SGLB combines gradient boosting with stochastic gradient Langevin dynamics [@DBLP:journals/corr/RaginskyRT17] in order to achieve convergence to the global optimum even for non-convex loss functions. The algorithm has two differences compared with SGB. First, Gaussian noise is explicitly injected into the gradients, so [\[eq:gradient_step\]](#eq:gradient_step){reference-type="eqref" reference="eq:gradient_step"} is replaced by: $$\begin{equation}
94
+ \label{eq:sglb_gradient_step}
95
+ h^{(t)} = \argmin_{h\in \mathcal{H}} \mathbb{E}_\mathcal{D}\left[\left (-g^{(t)}(\bm{x},y)- h(\bm{x}, \bm{\phi})+\nu\right )^2\right], \nu \sim \mathcal{N}\left(0, \frac{2}{\beta \epsilon}I_{|\mathcal{D}|}\right)\,,
96
+ \end{equation}$$ where $\beta$ is the inverse diffusion temperature and $I_{|\mathcal{D}|}$ is an identity matrix. This random noise $\nu$ helps to explore the solution space in order to find the global optimum and the diffusion temperature controls the level of exploration. Second, the update [\[eq:update\]](#eq:update){reference-type="eqref" reference="eq:update"} is modified as: $$\begin{equation}
97
+ \label{eq:update_sglb}
98
+ F^{(t)}(\bm{x}) = (1 - \gamma \epsilon) F^{(t-1)}(\bm{x}) + \epsilon h^{(t)}(\bm{x}, \bm{\phi}^{(t)})\,,
99
+ \end{equation}$$ where $\gamma$ is regularization parameter. If the number of all possible trees is finite (a natural assumption given that the training dataset is finite), then the SGLB parameters $\bm{\theta}^{(t)}$ at each iteration form a Markov chain that weakly converges to the stationary distribution, also called the invariant measure: $$\begin{equation}
100
+ \label{eq:invariant_measure}
101
+ p^{*}_\beta(\bm{\theta}) \propto \exp(-\beta \mathcal{L}(\bm{\theta} | \mathcal{D}) - \beta \gamma \|\Gamma \bm{\theta}\|_2^2)\,,
102
+ \end{equation}$$ where $\Gamma = \Gamma^T > 0$ is an implicitly defined regularization matrix which depends on a particular tree construction algorithm [@SGLB].
103
+
104
+ While [@SGLB] used the weak convergence to [\[eq:invariant_measure\]](#eq:invariant_measure){reference-type="eqref" reference="eq:invariant_measure"} to prove the global convergence, we apply this to enable sampling from the true posterior. For this purpose, we set $\beta = |\mathcal{D}|$ and $\gamma = \frac{1}{2|\mathcal{D}|}$. For the negative log-likelihood loss function [\[eq:nll\]](#eq:nll){reference-type="eqref" reference="eq:nll"} the invariant measure [\[eq:invariant_measure\]](#eq:invariant_measure){reference-type="eqref" reference="eq:invariant_measure"} can be expressed as: $$\begin{equation}
105
+ \label{eq:invariant_measure_nll}
106
+ p^{*}_\beta(\bm{\theta}) \propto
107
+ \exp\left(\log {\tt p}(\mathcal{D}| \bm{\theta}) - \frac{1}{2} \|\Gamma \bm{\theta} \|_2^2 \right) \propto
108
+ \tt{p}(\mathcal{D}|\bm{\theta}){\tt p}(\bm{\theta})\,,
109
+ \end{equation}$$ which is proportional to the true posterior distribution ${\tt p}(\bm{\theta}|\mathcal{D})$ under Gaussian prior ${\tt p}(\bm{\theta}) = \mathcal{N}(0, \Gamma)$. Thus, an *SGLB ensemble* is an ensemble of independent models $\{\bm{\theta}^{(m)}\}_{m=1}^M$ generated according to the SGLB algorithm using different random seeds. In this case, asymptotically, models are sampled from the true posterior ${\tt p}(\bm{\theta}|\mathcal{D})$.
110
+
111
+ <figure id="fig:virtual" data-latex-placement="t">
112
+ <embed src="figures/vSGLB.pdf" style="width:80.0%" />
113
+ <figcaption>Virtual ensemble</figcaption>
114
+ </figure>
115
+
116
+ **Virtual SGLB ensembles** While SGB and SGLB yield ensembles of independent models, their time and space complexity is $M$ times larger than that of a single model, which is a significant overhead. Consequently, generating an ensemble requires either significantly increasing complexity or sacrificing the quality by reducing the number of training iterations. To address this, we introduce the concept of a *virtual ensemble* that enables generating an ensemble using *only one* model. This is possible since a GBDT model is itself an *ensemble of trees*. However, in contrast to random forests formed by independent trees [@shaker2020aleatoric], the sequential nature of GBDT models implies that all trees are dependent and individual trees cannot be considered as separate models. Hence, we use "truncated" sub-models of a single GBDT model as elements of an ensemble, as illustrated in Figure [1](#fig:virtual){reference-type="ref" reference="fig:virtual"}. Notably, a virtual ensemble can be obtained using any already constructed GBDT model. Below we formally describe this procedure applied to SGLB models since in this case we can guarantee asymptotically sampling from the true posterior ${\tt p}(\bm{\theta}|\mathcal{D})$.
117
+
118
+ Each "truncated" model is described by the vector of parameters $\bm{\theta}^{(t)}$. As the parameters $\bm{\theta}^{(t)}$ at each iteration of the SGLB algorithm form a Markov chain that weakly convergences to the stationary distribution [\[eq:invariant_measure_nll\]](#eq:invariant_measure_nll){reference-type="eqref" reference="eq:invariant_measure_nll"}, we can consider using them as an ensemble of models. However, unlike parameters taken from different SGLB trajectories, these will have a high degree of correlation, which adversely affects the ensemble's quality. This problem can be overcome by retaining only every $K$-th set of parameters. Formally, fix $K \ge 1$ and consider a set of models $\Theta_{T, K} = \{\bm{\theta}^{(Kt)}, \big[\frac{T}{2K}\big]\le t \le \big[\frac{T}{K}\big]\}$, i.e., we add to $\Theta_{T, K}$ every $K$-th model obtained while constructing *one* SGLB model using $T$ iterations of gradient boosting. Choosing larger values of $K$ allows us to reduce the correlation between samples from the SGLB Markov chain. Furthermore, we do not include to the ensemble the models $\bm{\theta}^{(t)}$ with $t < T/2$ as [\[eq:invariant_measure_nll\]](#eq:invariant_measure_nll){reference-type="eqref" reference="eq:invariant_measure_nll"} holds only asymptotically. The set of $M=\big[\frac{T}{2K}\big]$ models $\Theta_{T, K}$ is called a *virtual ensemble*. Note that virtual ensembles behave similarly to true ensembles in the limit (for large $K$ and $T$).
119
+
120
+ Importantly, we can compute the prediction of $\Theta_{T,K}$ with the same computation time as one $\bm{\theta}^{(T)}$. Indeed, when computing the prediction of one model, we have to sum up the predictions made by individual trees. To get the virtual ensemble, we only have to store the partial sums. For SGLB, we also have to account for regularization [\[eq:update_sglb\]](#eq:update_sglb){reference-type="eqref" reference="eq:update_sglb"}. Formally, according to [\[eq:update_sglb\]](#eq:update_sglb){reference-type="eqref" reference="eq:update_sglb"}, for SGLB we have $\bm{\theta}^{(T)} = \sum_{i=1}^{T}\epsilon(1-\gamma\epsilon)^{T-i}\bm{\phi}^{(i)}$, where $(1-\gamma\epsilon)^{T-i}$ appears due to shrinkage. While computing $\bm{\theta}^{(T)}$ we store the partial sums $\bm{\theta}_{\le t}^{(T)} = \sum_{i=1}^{t}\epsilon(1-\gamma\epsilon)^{T-i}\bm{\phi}^{(i)}$. Then, any model $\bm{\theta}^{(t)}$ from $\Theta_{T,K}$ can easily be obtained from the stored values: $$\begin{equation}
121
+ %\label{eq:invariant_measure_nll}
122
+ \bm{\theta}^{(t)} = \sum_{i=1}^{t}\epsilon(1-\gamma\epsilon)^{t-i}\bm{\phi}^{(i)} = (1-\gamma\epsilon)^{t-T} \bm{\theta}_{\le t}^{(T)}\,.
123
+ \end{equation}$$
2006.11132/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2006.11132/paper_text/intro_method.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Gathering collections of images on a topic of interest is getting easier every day: simple tools can aggregate data from social media, web search, or specialized websites and filter it using hashtags, GPS coordinates, or semantic labels. However, identifying visual trends in such image collections remains difficult and usually involves manually organizing images or designing an ad hoc algorithm. Our goal in this paper is to design a clustering method which can be applied to such image collections, output a visual representation for each cluster and show how it relates to every associated image.
4
+
5
+ Directly comparing image pixels to decide if they belong to the same cluster leads to poor results because they are strongly impacted by factors irrelevant to clustering, such as exact viewpoint or lighting. Approaches to obtain clusters invariant to these transformations can be broadly classified into two groups. A first set of methods extracts invariant features and performs clustering in feature space. The features can be manually designed, but most state-of-the-art methods learn them directly from data. This is challenging because images are high-dimensional and learning relevant invariances thus requires huge amounts of data. For this reason, while recent approaches perform well on simple datasets like MNIST, they still struggle with real images. Another limitation of these approaches is that learned features are hard to interpret and visualize, making clustering results difficult to analyze. A second set of approaches, following the seminal work of Frey and Jojic on transformation-invariant clustering [@freyEstimatingMixtureModels1999; @freyFastLargescaleTransformationinvariant2002; @freyTransformationinvariantClusteringUsing2003], uses explicit transformation models to align images before comparing them. These approaches have several potential advantages: (i) they enable direct control of the invariances to consider; (ii) because they do not need to discover invariances, they are potentially less data-hungry; (iii) since images are explicitly aligned, clustering process and results can easily be visualized. However, transformation-invariant approaches require solving a difficult joint optimization problem. In practice, they are thus often limited to small datasets and simple transformations, such as affine transformations, and to the best of our knowledge they have never been evaluated on large standard image clustering datasets.
6
+
7
+ In this paper, we propose a deep transformation-invariant (DTI) framework that enables to perform transformation-invariant clustering at scale and uses complex transformations. Our main insight is to jointly learn deep alignment and clustering parameters with a single loss, relying on the gradient-based adaptations of K-means [@macqueenMethodsClassificationAnalysis1967] and GMM optimization [@dempsterMaximumLikelihoodIncomplete1977]. Not only is predicting transformations more computationally efficient than optimizing them, but it enables us to use complex color, thin plate spline and morphological transformations without any specific regularization. Because it is pixel-based, our deep transformation-invariant clustering is also easy to interpret: cluster centers and image alignments can be visualized to understand assignments. Despite its apparent simplicity, we demonstrate that our DTI clustering framework leads to results on par with the most recent feature learning approaches on standard benchmarks. We also show it is capable of discovering meaningful modes in real photograph collections, which we see as an important step to bridge the gap between theoretically well-grounded clustering approaches and semi-automatic tools relying on hand-designed features for exploring image collections, such as AverageExplorer [@zhu2014averageExplorer] or ShadowDraw [@lee2011shadowdraw].
8
+
9
+ We first briefly discuss related works in Section [2](#sec:related){reference-type="ref" reference="sec:related"}. Section [3](#sec:approach){reference-type="ref" reference="sec:approach"} then presents our DTI framework (Fig. [1](#fig:method_a){reference-type="ref" reference="fig:method_a"}). Section [4](#sec:learning){reference-type="ref" reference="sec:learning"} introduces our deep transformation modules and architecture (Fig. [2](#fig:method_b){reference-type="ref" reference="fig:method_b"}) and discuss training details. Finally, Section [5](#sec:results){reference-type="ref" reference="sec:results"} presents and analyzes our results (Fig. [3](#fig:teaser){reference-type="ref" reference="fig:teaser"}).
10
+
11
+ <figure id="fig:overview_DTI">
12
+ <figure id="fig:method_a">
13
+ <p>  <img src="overview_a.png" alt="image" /></p>
14
+ <figcaption>Classical versus Deep Transformation-Invariant clustering</figcaption>
15
+ </figure>
16
+ <figure id="fig:method_b">
17
+ <img src="overview_b.png" />
18
+ <figcaption>Deep transformation module <span class="math inline">𝒯<sub><em>f</em><sub><em>k</em></sub></sub></span></figcaption>
19
+ </figure>
20
+ <figure id="fig:teaser">
21
+ <p><img src="overview_weddingkiss.jpg" alt="image" /> <img src="overview_santaphoto.jpg" alt="image" /></p>
22
+ <figcaption>Prototypes learned from unfiltered Instagram images associated to different hashtags</figcaption>
23
+ </figure>
24
+ <figcaption><strong>Overview.</strong> <strong>(a)</strong> Given a sample <span style="color: red"><span class="math inline"><em>x</em><sub><em>i</em></sub></span></span> and prototypes <span style="color: blue"><span class="math inline"><em>c</em><sub>1</sub></span></span> and <span style="color: mygreen"><span class="math inline"><em>c</em><sub>2</sub></span></span>, standard clustering such as K-means assigns the sample to the closest prototype. Our DTI clustering first aligns prototypes to the sample using a family of parametric transformations - here rotations - then picks the prototype whose alignment yields the smallest distance. <strong>(b)</strong> We predict alignment with deep learning. Given an image <span style="color: red"><span class="math inline"><em>x</em><sub><em>i</em></sub></span></span>, each parameter predictor <span style="color: myorange"><span class="math inline"><em>f</em><sub><em>k</em></sub></span></span> predicts parameters for a sequence of transformations - here affine <span style="color: myorange"><span class="math inline">𝒯<sub><em>β</em><sub>aff</sub></sub><sup> aff</sup></span></span>, morphological <span style="color: myorange"><span class="math inline">𝒯<sub><em>β</em><sub>mor</sub></sub><sup> mor</sup></span></span>, and thin plate spline <span style="color: myorange"><span class="math inline">𝒯<sub><em>β</em><sub>tps</sub></sub><sup> tps</sup></span></span> - to align prototype <span><span class="math inline"><em>c</em><sub><em>k</em></sub></span></span> to <span style="color: red"><span class="math inline"><em>x</em><sub><em>i</em></sub></span></span>. <strong>(c)</strong> Examples of interpretable prototypes discovered from large images sets (15k each) associated to hashtags in Instagram using our DTI clustering with 40 clusters. Each cluster contains from 200 to 800 images.</figcaption>
25
+ </figure>
26
+
27
+ In this paper we present:
28
+
29
+ - a deep transformation-invariant clustering approach that jointly learns to cluster and align images,
30
+
31
+ - a deep image transformation module to learn spatial alignment, color modifications and for the first time morphological transformations,
32
+
33
+ - an experimental evaluation showing that our approach is competitive on standard image clustering benchmarks, improving over state-of-the-art on Fashion-MNIST and SVHN, and provides highly interpretable qualitative results even on challenging web image collections.
34
+
35
+ Code, data, models as well as more visual results are available on our project [webpage](http://imagine.enpc.fr/~monniert/DTIClustering/)[^1].
36
+
37
+ # Method
38
+
39
+ Contrary to most recent image clustering methods which rely on feature learning, we propose to perform clustering in pixel space by making the clustering invariant to a family of transformations. We consider $N$ image samples $x_{1:N}$ and aim at grouping them in $K$ clusters using a *prototype method*. More specifically, each cluster $k$ is defined by a prototype $c_k$, which can also be seen as an image, and prototypes are optimized to minimize a loss $\mathcal{L}$ which typically evaluates how well they represent the samples. We further assume that $\mathcal{L}$ can be written as a sum of a loss $l$ computed over each sample: $$\begin{equation}
40
+ \label{eq:basic}
41
+ \mathcal{L}(c_{1:K})=\sum_{i=1}^N l(x_i,\,\{c_{1}, \ldots,c_K\}).
42
+ \end{equation}$$ Once the problem is solved, each sample $x_i$ will be associated to the closest prototype.
43
+
44
+ Our key assumption is that in addition to the data, we have access to a group of parametric transformations $\{{\mathcal T}_\beta,\, \beta\in B \}$ to which we want to make the clustering invariant. For example, one can consider $\beta \in \mathbb{R}^6$ and ${\mathcal T}_\beta$ the 2D affine transformation parametrized by $\beta$. Other transformations are discussed in Section [4.1](#sec:transfo){reference-type="ref" reference="sec:transfo"}. Instead of finding clusters by minimizing the loss of Equation [\[eq:basic\]](#eq:basic){reference-type="ref" reference="eq:basic"}, one can minimize the following transformation-invariant loss: $$\begin{equation}
45
+ \mathcal{L}_{\textrm{TI}}(c_{1:K})=\sum_{i=1}^N \min_{\beta_{1:K}}\ l(x_i,\,
46
+ \{{\mathcal T}_{\beta_1}(c_1), \ldots,{\mathcal T}_{\beta_K}(c_K)\}).
47
+ \label{eq:ti}
48
+ \end{equation}$$ In this equation, the minimum over $\beta_{1:K}$ is taken for each sample independently. This loss is invariant to transformations of the prototypes (see proof in Appendix [8](#sec:proof){reference-type="ref" reference="sec:proof"}). Also note there is not a single optimum since the loss is the same if any prototype $c_k$ is replaced by ${\mathcal T}_{\beta}(c_k)$ for any $\beta\in B$. If necessary, for example for visualization purposes, this ambiguity can easily be resolved by adding a small regularization on the transformations. The optimization problem associated to $\mathcal{L}_{\textrm{TI}}$ is of course difficult. A natural approach, which we use as baseline (noted TI), is to alternatively minimize over transformations and clustering parameters. We show that performing such optimization using a gradient descent can already lead to improved results over standard clustering but is computationally expensive.
49
+
50
+ We experimentally show it is faster and actually better to instead learn $K$ (deep) predictors $f_{1:K}$ for each prototype, which aim at associating to each sample $x_i$ the transformation parameters $f_{1:K}(x_i)$ minimizing the loss, i.e. to minimize the following loss: $$\begin{equation}
51
+ \mathcal{L}_{\textrm{DTI}}(c_{1:K},f_{1:K})= \sum_{i=1}^N
52
+ l(x_i,\,\{{\mathcal T}_{f_1(x_i)}(c_1),
53
+ \ldots,{\mathcal T}_{f_K(x_i)}(c_K)\}),
54
+ \label{eq:dti}
55
+ \end{equation}$$ where predictors $f_{1:K}$ are now shared for all samples. We found that using deep parameters predictors not only enables more efficient training but also leads to better clustering results especially with more complex transformations. Indeed, the structure and optimization of the predictors naturally regularize the parameters for each sample, without requiring any specific regularization loss, especially in the case of high numbers $N$ of samples and transformation parameters.
56
+
57
+ In the next section we present concrete losses and algorithms. We then describe differentiable modules for relevant transformations and discuss parameter predictor architecture as well as training in Section [4](#sec:learning){reference-type="ref" reference="sec:learning"}.
58
+
59
+ The goal of K-means algorithm [@macqueenMethodsClassificationAnalysis1967] is to find a set of prototypes $c_{1:K}$ such that the average Euclidean distance between each sample and the closest prototype is minimized. Following the reasoning of Section [3.1](#sec:dti){reference-type="ref" reference="sec:dti"}, the loss optimized in K-means can be transformed into a transformation-invariant loss: $$\begin{equation}
60
+ \mathcal{L}_{\textrm{DTI K-means}}(c_{1:K},f_{1:K})= \sum_{i=1}^N
61
+ \underset{k}{\min}~\|x_i - {\mathcal T}_{f_k(x_i)}(c_k)\|^2.
62
+ \label{eq:dtikmeans}
63
+ \end{equation}$$ Following batch gradient-based trainings [@bottouConvergencePropertiesKmeans1995] of K-means, we can then simply jointly minimize ${\mathcal L}_{\textrm{DTI K-means}}$ over prototypes $c_{1:K}$ and deep transformation parameter predictors $f_{1:K}$ using a batch gradient descent algorithm. In practice, we initialize prototypes $c_{1:K}$ with random samples and predictors $f_{1:K}$ such that $\forall k,\forall x,{\mathcal T}_{f_k(x)} = \mathop{\mathrm{Id}}$.
64
+
65
+ ::: algorithm
66
+ []{#algo:dtigmm label="algo:dtigmm"}
67
+ :::
68
+
69
+ We now consider that data are observations of a mixture of $K$ multivariate normal random variables $X_{1:K}$, i.e. $X = \sum_{k} \delta_{k, \Delta}X_k$ where $\delta$ is the Kronecker function and $\Delta \in \{1,\ldots,K\}$ is a random variable defined by $P(\Delta
70
+ = k) = \pi_k$, with $\forall k,\,\pi_k>0$ and $\sum_k\pi_k = 1$. We write $\mu_k$ and $\Sigma_k$ the mean and covariance of $X_k$ and $G(\,\textbf{.}\,;\mu_k,\Sigma_k)$ associated probability density function. The transformation-invariant negative log-likelihood can then be written: $$\begin{equation}
71
+ \mathcal{L}_{\textrm{DTI GMM}}(\mu_{1:K}, \Sigma_{1:K}, \pi_{1:K}, f_{1:K}) =
72
+ -\sum_{i=1}^{N} \log\Big(\sum_{k=1}^K\pi_k G\big(x_i \,;
73
+ \scaleto{{\mathcal T}_{f_k(x_i)}}{10pt}(\mu_{k}),
74
+ \scaleto{{\mathcal T}^{*}_{f_k(x_i)}}{11pt}(\Sigma_{k})\big)\Big),
75
+ \label{eq:dtigmm}
76
+ \end{equation}$$ where ${\mathcal T}^{*}$ is slightly modified version of ${\mathcal T}$. Indeed, ${\mathcal T}$ may include transformations that one can apply to the covariance, such as spatial transformations, and other that would not make sense, such as additive color transformations. We jointly minimize $\mathcal{L}_{\textrm{DTI GMM}}$ over Gaussian parameters, mixing probabilities, and deep transformation parameters $f_{1:K}$ using a batch gradient-based EM procedure similar to [@hosseiniMatrixManifoldOptimization2015; @greffNeuralExpectationMaximization2017; @gepperthGradientbasedTrainingGaussian2019] and detailed in Algorithm [\[algo:dtigmm\]](#algo:dtigmm){reference-type="ref" reference="algo:dtigmm"}. In practice, we assume that pixels are independent resulting in diagonal covariance matrices.
77
+
78
+ In such gradient-based procedures, two constraints have to be enforced, namely the positivity and normalization of mixing probabilities $\pi_k$ and the non-negativeness of the diagonal covariance terms. For the mixing probabilities constraints, we adopt the approach used in [@hosseiniMatrixManifoldOptimization2015] and [@gepperthGradientbasedTrainingGaussian2019] which optimize mixing parameters $\eta_k$ used to compute the probabilities $\pi_k$ using a softmax instead of directly optimizing $\pi_k$, which we write $\pi_{1:K} = \mathop{\mathrm{softmax}}(\eta_{1:K})$. For the variance non-negativeness, we introduce a fixed minimal variance value $\sigma_{\mathrm{min}}^2$ which is added to the variances when evaluating the probability density function. This approach is different from the one in [@gepperthGradientbasedTrainingGaussian2019] which instead use clipping, because we found training with clipped values was harder. In practice, we take $\sigma_{\textrm{min}} = 0.25$.
79
+
80
+ We consider a set of prototypes $c_{1:K}$ we would like to transform to match a given sample $x$. To do so, we propose to learn for each prototype $c_k$, a separate deep predictor which predicts transformation parameters $\beta$. We propose to model the family of transformations ${\mathcal T}_{\beta}$ as a sequence of M parametric transformations such that, writing $\beta=(\beta^1, \ldots,\beta^M)$, ${\mathcal T}_{\beta} = {\mathcal T}^{M}_{\beta^M} \circ \dotsc \circ
81
+ {\mathcal T}^{1}_{\beta^1}$. In the following, we describe the architecture of transformation parameter predictors $f_{1:K}$, as well as each family of parametric transformation modules we use. Figure [2](#fig:method_b){reference-type="ref" reference="fig:method_b"} shows our learned transformation process on a MNIST example.
82
+
83
+ For all experiments, we use the same parameter predictor network architecture composed of a shared ResNet [@heDeepResidualLearning2016] backbone truncated after the global average pooling, followed by $K\times M$ Multi-Layer Perceptrons (MLPs), one for each prototype and each transformation module. For the ResNet backbone, we use ResNet-20 for images smaller than $64\times 64$ and ResNet-18 otherwise. Each MLP has the same architecture, with two hidden layers of 128 units.
84
+
85
+ To model spatial transformations of the prototypes, we follow the spatial transformers developed by Jaderberg et al. [@jaderbergSpatialTransformerNetworks2015]. The key idea is to model spatial transformations as a differentiable image sampling of the input using a deformed sampling grid. We use affine ${\mathcal T}^{\,\textrm{aff}}_\beta$, projective ${\mathcal T}^{\,\textrm{proj}}_\beta$ and thin plate spline ${\mathcal T}^{\,\textrm{tps}}_\beta$ [@booksteinPrincipalWarpsThinplate1989] transformations which respectively correspond to 6, 8 and 16 (a 4x4 grid of control points) parameters.
86
+
87
+ We model color transformation with a channel-wise diagonal affine transformation on the full image, which we write ${\mathcal T}^{\,\textrm{col}}_\beta$. It has 2 parameters for greyscale images and 6 parameters for colored images. We first used a full affine transformation with 12 parameters, however the network was able to hide several patterns in the different color channels of a single prototype (Appendix [9.4](#sec:color_discuss){reference-type="ref" reference="sec:color_discuss"}). Note that a similar transformation was theoretically introduced in capsules [@kosiorekStackedCapsuleAutoencoders2019], but with the different goal of obtaining a color-invariant feature representation. Deep feature-based approaches often handle color images with a pre-processing step such as Sobel filtering [@caronDeepClusteringUnsupervised2018; @jiInvariantInformationClustering2019; @kosiorekStackedCapsuleAutoencoders2019]. We believe the way we align colors of the prototypes to obtain color invariance in pixel space is novel, and it enables us to directly work with colored images without using any pre-processing or specific invariant features.
88
+
89
+ We introduce a new transformation module to learn morphological operations [@haralick1987image] such as dilation and erosion. We consider a greyscale image $x \in {\mathbb R}^{D}$ of size $U\times V=D$, we write $x[u,v]$ the value of the pixel $(u,v)$ for $u\in \{1,\ldots,U\}$ and $v\in \{1,\ldots,V\}$. Given a 2D region $A$, the dilation of $x$ by $A$, $\mathcal{D}_A(x)\in {\mathbb R}^{D}$, is defined by $\mathcal{D}_A(x)[u,v]=\max_{(u',v') \in A}~x[u+u',v+v']$ and its erosion by $A$, $\mathcal{E}_A(x)\in {\mathbb R}^{D}$, is defined by $\mathcal{E}_A(x)[u,v]=\min_{(u',v') \in
90
+ A}~x[u+u',v+v']$. Directly learning the region $A$ which parametrizes these transformations is challenging, we thus propose to learn parameters $(\alpha, a)$ for the following soft version of these transformations: $$\begin{equation}
91
+ {\mathcal T}^{\,\textrm{mor}}_{(\alpha, a)}(x)[u,v] = \dfrac{\sum_{(u',v') \in W}~x[u+u',v+v']\cdot
92
+ a[u+u',v+v'] \cdot e^{\alpha x[u+u',v+v']} }{\sum_{(u',v') \in W}~a[u+u',v+v']\cdot
93
+ e^{\alpha x[u+u',v+v']}},
94
+ \end{equation}$$
95
+
96
+ where $W$ is a fixed set of 2D positions, $\alpha$ is a softmax (positive values) or softmin (negative values) parameter and $a$ is a set of parameters with values between 0 and 1 defined for every position $(u',v')\in W$. Parameters $a$ can be interpreted as an image, or as a soft version of the region $A$ used for morphological operations. Note that if $a[u',v']=\mathbf{1}_{\{(u',v')\in A\}}$, when $\alpha \rightarrow +\infty$ (resp. $-\infty$), it successfully emulates $\mathcal{D}_A$ (resp. $\mathcal{E}_A$). In practice, we use a grid of integer positions around the origin of size $7\times 7$ for $W$. Note that since morphological transformations do not form a group, transformation-invariant denomination is slightly abusive.
97
+
98
+ We found that two key elements were critical to obtain good results: empty cluster reassignment and curriculum learning. We then discuss further implementation details and computational cost.
99
+
100
+ Similar to [@caronDeepClusteringUnsupervised2018], we adopt an empty cluster reassignment strategy during our clustering optimization. We reinitialize both prototype and deep predictor of \"tiny\" clusters using the parameters of the largest cluster with a small added noise. In practice, the size of balanced clusters being $N/K$, we define \"tiny\" as less than 20% of $N/K$.
101
+
102
+ Learning to predict transformations is a hard task, especially when the number of parameters is high. To ease learning, we thus adopt a curriculum learning strategy by gradually adding more complex transformation modules to the training. Given a target sequence of transformations to learn, we first train our model without any transformation - or equivalently with an identity module - then iteratively add subsequent modules once convergence has been reached. We found this is especially important when modeling local deformations with complex transformations with many parameters, such as TPS and morphological transformations. Intuitively, prototypes should first be coarsely aligned before attempting to refine the alignment with more complex transformations.
2007.06702/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-06-05T19:29:37.556Z" agent="5.0 (X11)" etag="HKMovLB8Ms0ohQH-L2I7" version="13.1.14" type="device"><diagram id="9ZFVwTugjTrvKbDMytx6" name="Page-1">7VxZc6M4EP41rtp9CAU64dFx5qrKHDWZ2Z08TWGj2EwweDDeOPvrVzLCIJCxk4CNN6hSMWodQPcndbfUYgBH8/W72F3MPkYeCwbA9NYDeDUAwEIADMSf6T2mFEpoSpjGvicr5YQb/18miaakrnyPLZWKSRQFib9QiZMoDNkkUWhuHEcParW7KFDvunCnrEK4mbhBlfq37yWzlGpjM6e/Z/50lt3ZMmXJ2J3cT+NoFcr7hVHI0pK5m3UjO1/OXC96SEmb1vDNAI7iKErSq/l6xALB1oxj6S3e7ijdPnLMwuSQBst7MLr/+vv93LxBq+Ht7V/vrz5cWLKbf9xgJXkhnzZ5zJizeT0mejEH8PJh5ifsZuFOROkDhwOnzZJ5wHMWv7zzg2AUBVG8aQvv7hiZTDh9mcTRPSuUeNQZm6LDaex6Pn+HKz/movWjkJeHUSy4d1l9x+x5WZywdYEk3/kdi+YsiR95FVmKbSkqCU0osw+5nLe0WUHGW6IrsTXddp0zmV9IPj+F52bbPL8DO3hOxgST9nlulVhuGVaF6Ri3xfTvs1vv+iP69tn8DG78m5vHH7+8C+S0y3RmeZhRQY/CRE5wFtIJwSEUuqQpRkPDKSaqMN6GhkkrnLcAMZDSqioIy0Jqx05LcoFORQzM43OzzApMRtModIM3OfVSFVRe5zqKFlI8v1iSPEo5uKskUoXH1n7yQzQ3sMzdFkqu1rLnTeYxy4T8fQuNRPa2WJY32+SydjvFnLjxlCV1mJUQFQypRUPMAjfx/1GVmU5UsumXyOfPkqOIUAPZ1IEcTgjZwFZABCxqAEjyZKv9p68huyzqH81dAFa6tqhtOCbME1K7XkareMIqXW+Atn39F2APdgd75nGxl3K2hjeZpIog1WqzjInHAGkZPiZ4DjKHcew+FqotRIXl7jtjpNwVW/tgXlOdX6S3bxTIGV8U5UaCRKojBeLk9yrKCi6WG5AOeQWLLNZ5obCh3Yna4Js/5yY6MD+xB/7/azR3w2IDMhW/lz430D1e/iFcrART06fgb5U+SFqrUc3L9a7taTWtDcYcHc1oWmCWoAcNCArq0a6o2QyNRbXKMVzCaHMGDng5BpCKASksPPqDF/IfzqrETwb08tOnAb36yS84Ufg53JHimSEnij88+vNQuYseVeGqQty4U6rEJckN/KmwWidcpIzTL4Uoff4gQ1kw9z1vM2Hr0KTirWiywYYMM4QMUDSy1EmB27kGZ3eeQAU+iGjMY2xgJaG2wARPACbr7MGDGpprHGjQInjQeYEHdWEmksRrYQOUGs3dtZyqesClcMHIoPisQIarM5RhGK9HZJCWFIw6R1iAGtQBhdQx8VHNHFH2xUJvKNZ2hRACd7n0JyU78FCu7nVNistSmlWpjNa8B8PtSMNGNE9E7XSHA1zpGmTDc+t0GBYoylHttiHHqHzXHW9z6EPvwGyrnhPRrX+/TFc903P6yq6/H8NdertJmykqceXSb+1SwROMXj4Hl9SIRarL7bZuvdE22lphJA04RrAREV+xcMkuIHj19saeZWtMDVw0PVC3dBdpwDnqAdWsx4Shgc8KQw34SD2GmnWCLHReGKInWvHbEgMW9k70Fj+Ee8xl81440WYBQPBkCNLu3TQxC9Uh6MvPHCU9RLQQsQAyELLz5HQMIi1PMj1Ezgkiej3U28NdQxF21O1KTAzUYUsGkJcjqJn96+F4GQWrROwYHHEH22X2nTZgj0xsNr5rBhMUdmgLWx8YuXsiGeeMfyIu7EZw8SUQVPOahVPOHFE5eYji+53o2JLHHZ98mlBZpclmGyuhxBzqoj1bi7DdbdeeGkjDNNy2R1BNvLAWQOYxAQTtCrNfS1RqMTKwNuBvb/gqOXT77eB9tZfJtPmYln6rR7EnqEHtasj4UXZ+9BL/X8b8V9e6bctwqJmn6irTqUL+tWJBuvMv/UB8JhooqKwXYBFVX4uHIw9E1KFA+66p0ywIYm+gfTZsOqJOW4g37kexok6JQYrDGB9xFOuPy6DujOKzPS5zrDNd0CJ1McSdPDoDrVaPzujloVsqOK2NqDucy9xl0sxUU93m7tYpUb2UGjjg1CubDAEQVpTNUcP0tCLG1XHXURvxBbYePVRL4G7Zerpo7X74NTf8xNJJ0dYDJ7f1cHdG4/naerQVW69dIwxRu6a6aoRpGtPqsuCzLM2mAvyR1R0gd+77EqgVfGpR4bT+fQkd9srfl1B7bvnzEtmCTQ+93QeMj+EGY8eCGfiyWKWGsbfP2z7ad070prVu5+vs90HKMU4bN7bWhuqUS4t1Cw+9Tf08LBDT1pw8M4qbIKffBcGv91NXez1jTA60qXHjNsvLZNpA5GI/imtGcdkzPvkuCD3JLkhZs56Jp3zwqCatmINP9ZQpKbnKpS+qlqoTDGuq73GVy40t87iOMT2Nd3J8zYKqGKw9Tte2S1IWfCWK8VCno4Igp9TRDrdi96g4HGM8m3+5OK2efxkavvkP</diagram></mxfile>
2007.06702/main_diagram/main_diagram.pdf ADDED
Binary file (24.6 kB). View file
 
2007.06702/paper_text/intro_method.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The computational complexity of automated planning [\(By](#page-7-0)[lander 1991,](#page-7-0) [1994\)](#page-7-1) has motivated research on heuristics [\(Hoffmann and Nebel 2001;](#page-7-2) [Bonet and Geffner 2001;](#page-7-3) [Helmert and Domshlak 2009\)](#page-7-4), that, in conjunction with search algorithms can efficiently find a solution [\(Hart, Nils](#page-7-5)[son, and Raphael 1968;](#page-7-5) [Hoffmann and Nebel 2001;](#page-7-2) [Bonet](#page-7-3) [and Geffner 2001\)](#page-7-3). The primary disadvantage of this technique is the initial investment required. Designing good heuristic-generation principles such as "delete-relaxation" [\(Hoffmann and Nebel 2001\)](#page-7-2) often requires a careful study of the representation language or the structure of the underlying problems. These factors make automatic synthesis of heuristics particularly attractive.
4
+
5
+ There is evidence that one can learn such heuristics from the action models automatically when there is enough data [\(Groshev et al. 2018a;](#page-7-6) [Shen, Trevizan, and Thiebaux 2020\)](#page-8-0). ´ Existing approaches for learning such heuristics have proved to be difficult to scale and to transfer to problems with object names and object quantities not seen in the training data. Moreover, such approaches often require hand-engineered action models in a representational language such as the
6
+
7
+ Copyright © 2021, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
8
+
9
+ Planning Domain Definition Language (PDDL) [\(Fox and](#page-7-7) [Long 2003\)](#page-7-7) (see Sec. [6](#page-6-0) for details on related work). Whether such synthesis can be done in the absence of action models or when sufficient training data is not present is a question that has not been sufficiently addressed.
10
+
11
+ This paper answers this question by presenting a method for learning generalizable heuristics when the domain model and/or domain expert are unavailable by utilizing a library of plans to train an artificial neural network. We showcase the effectiveness of abstraction techniques as a domainindependent method for learning heuristic generation functions that do not require access to symbolic action models. We demonstrate the effectiveness of generalized heuristics learned using this method and show that they can transfer to problems with object quantities and/or names different that those in the training data.
12
+
13
+ In the absence of sufficient training data, we develop and evaluate leapfrogging, a bootstrapping technique that was proposed in recent work [\(Groshev et al. 2018a\)](#page-7-6) but has not been sufficiently developed and tested for learning generalized heuristics. We show that this technique is data-efficient and can be used to learn competitive generalized heuristics in the absence of externally generated training data.
14
+
15
+ This paper is organized as follows. Sec. [2](#page-0-0) presents the necessary formal framework. Sec. [3](#page-1-0) defines the learning problem and describes our approach for learning followed by a description of using the learned heuristic for planning (Sec. [4\)](#page-3-0). Sec. [5](#page-3-1) discusses obtained results. Sec. [6](#page-6-0) summarizes related work followed by conclusions (Sec. [7\)](#page-6-1).
16
+
17
+ # Method
18
+
19
+ <span id="page-0-0"></span>A planning *problem* is a tuple Γ = hO, P, A, s*init*, g, δi where O is a set of objects, P is a set of predicates and A is a set of unit-cost actions. For typed domains, we automatically compile in types as unary predicates. The state space S for a planning problem as defined above is the set of all possible assignments of truth values to predicates in P instantiated with objects from O. s*init* ∈ S is the initial state and g is a goal condition expressed as a conjunctive first-order logic formula over the instantiated atoms. δ : S × A → S determines the transition function. Different planning problems from an application domain (e.g. Logistics) share the same P and A components and these components together define a planning *domain*. While a number of representations have been developed to express domain-wide, "lifted" actions (Fikes and Nilsson 1971; Fox and Long 2003; Sanner 2010; Srivastava et al. 2014); such actions could also be implemented using arbitrary generative models or simulators. We assume w.l.o.g., that an action $a \in A$ can be parameterized as $a(o_1,\ldots,o_n)$ where $o_1,\ldots,o_n \in O$ ; we do not place any representational requirements on the specifications of A. A solution to $\Gamma$ is a plan $\pi = a_0,\ldots,a_{n-1}$ which is a sequence of actions inducing a trajectory $\tau = s_0,\ldots,s_n$ such that $s_0 \equiv s_{init}, \, \delta(s_i,a_i) = s_{i+1}$ and $s_n \models g$ . The plan length $|\pi|_{s_i}$ from a state $s_i$ is the number of states starting from $s_{i+1}$ in $\tau$ . We will use $P^k$ to refer to the set of predicates with arity k and $P^{k+}$ for those with arity k or greater.
20
+
21
+ A planning *heuristic* is a function $h: S \to \mathbb{R}^+_0 \cup \{\infty\}$ , where h(s) estimates the cost of reaching the goal state from a state s. The optimal heuristic $h^*(s)$ provides the optimal cost of reaching the goal from s. Typically, search algorithms maintain a priority queue of promising paths and use the heuristic function to compute the keys in this queue (Russell and Norvig 2010). For example, the utility value used in $A^*$ is f(s) = g(s) + h(s) where g(s) is the length of the path up to s and s is the heuristic value of s; the node expanded is the one with the minimum value of s. (Hart, Nilsson, and Raphael 1968).
22
+
23
+ We use *canonical abstractions* (Sagiv, Reps, and Wilhelm 2002) for representing a concrete state such that information about object names and numbers is lifted by grouping them using abstraction predicates. Grouping together states can lead to certain predicates becoming imprecise. As a result, three-valued logic is used to represent truth values of predicates in an abstract state. We introduce canonical abstraction using the help of the following example.
24
+
25
+ <span id="page-1-1"></span>**Example 2.1.** Consider the *gripper* domain, which consists of two rooms and a robot equipped with a set of grippers to pickup or drop balls (Long and Fox 2003).
26
+
27
+ Let $s_{eg} = \{free^1(g_1), at^2(b_1, r_a), at^2(b_2, r_b), robotAt^1(r_a)\}$ be a state in a *gripper* problem instance $\Gamma$ expressed in *typed* PDDL with $O = \{r_a, r_b, g_1, b_1, b_2\}$ , and $g = at^2(b_1, r_b) \land at^2(b_2, r_b)$ . Let $type(gripper) = \{g_1\}$ , $type(room) = \{r_a, r_b\}$ and $type(ball) = \{b_1, b_2\}$ be the object types.
28
+
29
+ **Definition 2.1.** (Role) The role of an object $o \in O$ in a state s is the set of unary predicates that it satisfies: $role(o) = \{p^1|p^1 \in P^1, p^1(o) \in s\}.$
30
+
31
+ For the state in Example 2.1, the role of the object $r_a$ is $role(r_a) = \{room, robotAt\}$ whereas $role(r_b) = \{room\}$ . We will use $\psi(r) = \{o|o \in O, role(o) = r\}$ to denote the set of objects having a particular role r. Thus, $\psi(\{room\}) = \{r_b\}$ , $\psi(\{ball\}) = \{b_1, b_2\}$ , $\psi(\{room, robotAt\}) = \{r_a\}$ and $\psi(\{gripper, free\}) = \{g_1\}$ . The maximum number of possible roles in any state s in any problem $\Gamma$ derived from a domain D with $|P^1|$ unary predicates is $2^{|P^1|}$ .
32
+
33
+ <span id="page-1-2"></span>**Definition 2.2.** (Canonical Abstraction) The canonical abstraction of a state $s = \{p^k(o_1,...,o_k)|p^k \in P, o_1,...,o_k \in O\}$ is an abstract state $\overline{s} = \{\overline{p}^k(role(o_1),...,role(o_k))|\overline{p}^k \equiv p^k\}$ . Let $\mathcal{O} = \psi(role(o_1)) \times ... \times \psi(role(o_k))$ then $\overline{p}^k$ is defined as follows:
34
+
35
+ - $\overline{p}^k(role(o_1), \dots, role(o_k)) = 0 \iff \forall (o_1, \dots, o_k) \in \mathcal{O}$ $p^k(o_1, \dots, o_k) \notin s$ .
36
+ - $\overline{p}^k(role(o_1), \dots, role(o_k)) = 1 \iff \forall (o_1, \dots, o_k) \in \mathcal{O}$ $p^k(o_1, \dots, o_k) \in s$ .
37
+ - $\overline{p}^k(role(o_1), \dots, role(o_k)) = \frac{1}{2} \iff$
38
+
39
+ $$(\exists (o_1, \dots, o_k) \in \mathcal{O} \quad p^k(o_1, \dots, o_k) \in s) \land (\exists (o_1, \dots, o_k) \in \mathcal{O} \quad p^k(o_1, \dots, o_k) \notin s).$$
40
+
41
+ Let $r_0 = \{gripper, free\}, r_1 = \{room, robotAt\}, r_2 = \{room\}$ and $r_3 = \{ball\}$ be the roles in the state $s_{eg}$ . The canonical abstraction of the state $s_{eg}$ is the abstract state $\overline{s_{eg}} = \{free^1(r_0), at^2(r_3, r_1), at^2(r_3, r_2), robotAt^1(r_1)\}$ . The truth values for predicates in $\overline{s_{eg}}$ are $free^1(r_0) = 1$ , $at^2(r_3, r_1) = \frac{1}{2}$ , $at^2(r_3, r_2) = \frac{1}{2}$ and $robotAt^1(r_1) = 1$ .
42
+
43
+ This formulation assumes that domains contain unary and binary predicates. Domains with ternary or higher arity predicates can be easily compiled into domains with binary predicates. The framework presented in this paper can handle higher arity predicates, however, we found that the results were best in domains compiled as binary predicates. We present a case study of Sokoban in the supplementary material by compiling ternary predicates present in the Sokoban domain into binary predicates; Sokoban2. Learning such features is an independent problem and an active area of research (Bonet, Francès, and Geffner 2019). These approaches could be used to learn such predicates for better abstractions.
44
+
45
+ We define the learning problem as follows:
46
+
47
+ **Definition 3.1.** (Learning Generalized Heuristics) Given a library of trajectories of the form $\Xi = \{\langle \pi, \tau, g, O \rangle\}$ for a domain $D = \langle P, A \rangle$ where O is a set of objects, g is a goal formula, $\tau = s_0, \ldots, s_n, \ \pi = a_0, \ldots, a_{n-1}$ contain states and parameterized actions from a planning problem $\langle D, O, s_{init}, g, \delta \rangle$ such that $s_0 \equiv s_{init}, \delta(s_i, a_i) = s_{i+1}$ and $s_n \models g$ , learn a domain-wide generalized heuristic function $h_D$ s.t. $h_D(s, g', O')$ estimates, for any planning problem $\Gamma' = \langle D, O', s'_{init}, g', \delta' \rangle$ and any state s in the state space of $\Gamma'$ , the distance from s to a state s' s.t. $s' \models g'$ .
48
+
49
+ Our overall approach for model-agnostic planning involves solving the learning problem defined above by training a Generalized Heuristic Network (GHN) (Sec. 3) and using the learned GHN for planning (Sec. 4). Our approach is a domain-independent method for learning heuristic generation functions (HGFs) using either training data or problem generators. In the standard planning paradigm, this approach plays a role similar to that of HGFs, which are currently hand-coded. The computation of our heuristic is model-agnostic in that it only needs access to the action names and parameters, true atoms of a state, and the objects of the problem, which could be provided by a blackbox simulator.
50
+
51
+ Vanilla training data generation To gather the training data T, we first generate a set of problem instances and use an off-the-shelf solver to compute a plan for each problem
52
+
53
+ <span id="page-2-1"></span>![](_page_2_Figure_0.jpeg)
54
+
55
+ ![](_page_2_Figure_1.jpeg)
56
+
57
+ Figure 1: The network architecture used in this paper. Activations for $NN_{\mathcal{A}}$ , $(NN_1, \ldots, NN_{\mathcal{A}_{max}})$ and $NN_{len}$ are SoftMax, Sigmoid and ReLU respectively. Each *Dense-32* block contains two fully-connected hidden layers with 32 tensors, each using ReLU as the activation function. Absolute and Binned Inputs comprise vectors $v, m_{p_1}^2, \ldots, m_{p_n}^2$ and $v', {m'}_{p_1}^2, \ldots, {m'}_{p_n}^2$ respectively (described in Sec. 3.1).
58
+
59
+ to form a library of trajectories $\Xi = \{\langle \pi, \tau, g, O \rangle\}$ . Next, for each trajectory $\xi \in \Xi$ , we encode goal hints to every state $s \in \tau_{\xi}$ using the approach in Sec. 3.2 to form tuples $(s, a, |\pi|_s)$ which are then converted to $(s, \overline{s}, a, |\pi|_s)$ using canonical abstraction (Definition 2.2) and added to T. As a part of the data generation process, we maintain a set of roles $\mathcal{R}$ , actions $\mathcal{A}$ , the maximum number of action parameters $\mathcal{A}_{max}$ , and predicates $\mathcal{P}$ that occurred in the training data. Together, they define the input-output dimensions of the network. Once T has been generated, we use standard optimization techniques to minimize the loss.
60
+
61
+ Training data generation using leapfrogging The training data generation method discussed above assumes access to a planner that can solve the problems in the training set. It might be the case that access to such an oracle is either expensive or unavailable. If a problem generator is available, leapfrogging (Groshev et al. 2018a) can be used to incrementally generate training data in an iterative fashion. Initially, problem instances with very few objects $\Gamma'_0$ are solved to generate training data $T_0$ . These instances are small enough that blind search can be used to find solutions. We then apply our learning approach to learn a GHN leap<sub>0</sub> using $T_0$ . Next, $leap_0$ is used to bootstrap the generation of training data $T_i$ for the next iteration. We use the problem generator to generate problem instances of sizes $\Gamma'_0, \ldots, \Gamma'_i$ where problems in $\Gamma'_i$ have more objects than those in $\Gamma'_{i-1}$ and generate $T_i$ by using $leap_{i-1}$ to solve $\Gamma'_0, \ldots, \Gamma'_i$ . We then learn a new GHN $leap_i$ using $T_i$ . Since GHNs learn knowledge independent of the number of objects, this iterative approach allows GHNs to effectively scale even in the absence of efficient, off-the-shelf mechanisms for generating the seed plans.
62
+
63
+ The neural network used in this paper is illustrated in Fig. 1. We use two networks; one to predict the action and its parameters and the other to predict the plan length. We found this approach to be the best in providing good estimates of
64
+
65
+ both the action probabilities and the plan length. We refer the reader to supplementary material for our ablation study.
66
+
67
+ The output of the network is a vector $NN_{\mathcal{A}}$ of length $|\mathcal{A}|$ representing the action probability, a set of vectors $NN_1,...,NN_{\mathcal{A}_{max}}$ each of length $|\mathcal{P}^1|$ that represents the predicted role of the corresponding parameter in the action (recall that a role is a set of unary predicates) and a real-valued number $NN_{len}$ that represents the predicted plan length.
68
+
69
+ The input to the neural network is an abstract state that is represented as a set of vectors and matrices which capture the abstraction of object properties as well as their relationships. We compute inputs of two different types: (a) Absolute Inputs and (b) Binned Inputs. Absolute inputs encode the actual counts of the roles in a concrete state and also capture the role count of the k-ary atoms that are true in the state. For a concrete state s and the corresponding abstract state $\overline{s}$ we represent all roles occurring in s as a vector v of length $|\mathcal{R}|$ . Each k-ary predicate $p^k \in \mathcal{P}^{2+}$ is encoded as a matrix $m_p^k$ of dimensions $|\mathcal{R}|^k = |\mathcal{R}|_1 \times \ldots \times |\mathcal{R}|_k$ . To encode absolute inputs, (a) v[r] is set to the role count $|\psi(r)|$ for every role $r \in \mathcal{R}$ and, (b) $m_p^k[r_i,\ldots,r_j]$ is set to the number of tuples in $\psi(r_i) \times \cdots \times \psi(r_j)$ such that $p(o_i,\ldots,o_j)$ is true in s.
70
+
71
+ Absolute inputs help in predicting the plan length since they capture information about the number of objects in a role. However, for predicting actions, this low level of granularity is unnecessary and we found that this can lead to poor accuracy in predicting the actions. Instead, we compute binned inputs v' and $m'^k_p$ by categorizing the absolute inputs v and $m^k_p$ into levels — which is a configurable hyperparameter — that can express information about the structure of the state at a higher level of granularity. To encode binned inputs, in our experiments, we (a) encoded v'[i] as min(v[i], 2) to categorize $\psi(r)$ as containing zero, one or more than one objects and, (b) encoded $m'^k_p[r_i, \ldots, r_j]$ as one of the three truth values of the predicate $\overline{p}^k(r_i, \ldots, r_j)$ (as defined in Definition 2.2) in $\overline{s}$ . We experimented with
72
+
73
+ a larger set of discrete values to categorize the absolute inputs, like $min(v[r], n \in \mathbb{N})$ , which yielded no significant improvement in network predictions for our experiments.
74
+
75
+ Inclusion of goal-relevant information has been shown to facilitate learning goal dependent concepts (Winner and Veloso 2003; Groshev et al. 2018a). We propose a simple scheme to encode goal hints for canonical abstractions that involves post-processing a state to add new unary predicates, without using domain knowledge, and whose complexity is linear in the number of atoms in the state and goal.
76
+
77
+ For the state $s_{eg}$ in Example 2.1 where $g = at^2(b_1, r_b) \land at^2(b_2, r_b)$ we add atoms $goal_{at}^2(b_1, r_b)$ and $goal_{at}^2(b_2, r_b)$ to $s_{eg}$ . This allows the network to identify goal predicates. Since $at^2(b_2, r_b) \in s_{eg}$ we also add $done_{at}^2(b_2, r_b)$ to $s_{eg}$ which further allows the network to better identify relational structures of a state. For $at^2(b_1, r_b)$ we add two unary atoms $goal_{at_1}^1(b_1)$ and $goal_{at_2}^1(r_b)$ . We similarly add two other unary atoms for $at^2(b_2, r_b)$ . Doing so changes $role(r_b)$ from $\{room\}$ to $\{room, goal_{at_2}\}$ and $role(b_1)$ from $\{ball\}$ to $\{ball, goal_{at_1}\}$ . These changes in object roles allow a richer representation of the abstract state since new roles demarcating objects which are part of goals have been introduced. Finally, since $at^2(b_2, r_b) \in s_{eg}$ and there is no other atom at appearing in the goal where $b_2$ is the first parameter, $done_{at_1}^1(b_2)$ is added to $s_{eg}$ indicating that all atoms named at in g where $b_2$ appears as the first parameter are satisfied in the current state.
78
+
79
+ In general, let G refer to atoms in g for a problem $\Gamma$ . For an atom $p^k(o_1,\ldots,o_k)\in G$ add a new atom $goal_p^k(o_1,\ldots,o_k)$ to the state. This captures goal related relational information in the state s. Also add a set of atoms $\cup_{i=1}^k \{goal_{p_i}^1(o_i)\}$ to the state. As a consequence, an object appearing appearing only in $G^{2+}$ now gets a defined role in s. Whenever a goal atom $p^k(o_1,\ldots,o_k)\in s$ , $done_p^k(o_1,\ldots,o_k)$ is added to the state, else it is removed. Additionally, for an object o, when $\exists p^k\in G, \exists i\in \mathbb{N}_1^k, \forall p^k(\ldots,o_i,\ldots)\in G$ $p^k(\ldots,o_i,\ldots)\in s$ where $o_i=o$ is satisfied, $done_{p_i}^1(o)$ is added to the state, else it is removed. Intuitively, this means that an object at index i for a predicate p is marked as done iff all goal atoms named p where the object appears at index i are satisfied in the concrete state.
80
+
81
+ Hybrid heuristic function Using just the action probability in an algorithm like policy-rollout (Yoon, Fern, and Givan 2007) can lead to poor performance since it does not provide estimates on the cost to reach the goal. Generally, the neighborhood of a state has low variance in terms of the plan length predicted since (a) different actions can lead to states encapsulated by the same abstract state, and (b) different abstract states in the neighborhood are not substantially different. This can cause plan length based search to get stuck expanding nodes in local minima. We mitigate these limitations by combining them to form a hybrid heuristic.
82
+
83
+ We define the *artificial* path cost g'(s) to be the sum of the action probabilities along the path to s which we use to increase the path cost of low confidence paths. This, in conjunction with the predicted plan length helps arrive at better estimates concerning the nodes to expand. Since path information is typically stored in nodes, we compute g'(node) and h(node) from the network output as follows:
84
+
85
+ $$V_{o+}(i,o) = \frac{\sum\limits_{u_j \in \mathcal{P}^1 \cap role(o)} f(NN_i[u_j], \epsilon)}{|\mathcal{P}^1|} \tag{1}$$
86
+
87
+ $$V_{o-}(i,o) = \frac{\sum_{u_j \in \mathcal{P}^1 \setminus role(o)} f(1 - NN_i[u_j], \epsilon)}{|\mathcal{P}^1|}$$
88
+ (2)
89
+
90
+ $$V_p(i,o) = V_{o+}(i,o) + V_{o-}(i,o)$$
91
+ (3)
92
+
93
+ $$V_a(a(o_1, ..., o_n)) = 1 - NN_A[a] \times \frac{\sum_{i=1}^n V_p(i, o_i)}{n}$$
94
+ (4)
95
+
96
+ $$g'(node) = g'(node.parent) + V_a(node.action)$$
97
+ (5)
98
+
99
+ $$h(node) = NN_{len} \tag{6}$$
100
+
101
+ where role(o) is the role of the object o in node.state, $\epsilon \in [0,1]$ is a threshold and f is a filter: $f(x,\epsilon)=1$ if $x \geq \epsilon$ and 0 otherwise. $V_a \in [0,1]$ is the score of an action computed by determining the probability of the action along with the confidence of the instantiated parameters. $V_{o+}(i,o)$ and $V_{o-}(i,o)$ compute the score of the parameterized object's role relative to the predicted role. The score of the instantiated parameter $o_i, V_p \in [0,1]$ is a ratio of the total number of unary predicates that were correctly predicted for $role(o_i)$ . A low value of the action score indicates that the path is detrimental to search and should be penalized.
102
+
103
+ Searching using the learned heuristic network GHNs can be used in standard graph-search based search algorithms like A\* using a blackbox simulator for action application and retrieving the atoms of a state. Given a node in the A\* search tree, we use the hybrid heuristic, $f_{GHN}(node) = g'(node) + h(node)$ described above, to determine which node to expand next.
104
+
105
+ Using $f_{GHN}$ as the key in the priority queue in a search algorithm like A\* only changes the order in which A\* expands nodes. The actual (or real) path cost, g(node.state) is used to determine if a visited state has been reached by a cheaper path under standard operation of A\*. The following result follows from the properties of A\* when used with a closed list (Russell and Norvig 2010).
106
+
107
+ **Theorem 4.1.** Planning with $A^*$ using $f_{GHN}$ is sound and complete on finite state spaces.
2008.00441/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-12-05T14:00:37.339Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36" etag="heSVmvTI8R978uM4mDbX" version="12.3.7" type="google" pages="1"><diagram id="syVIlrBboLQQrdCDa6Ll" name="Page-1">7VrbbqMwEP0aHncV20DKY5teV0lVKZV6eWPBG5BIHDlOQ/r1a4rNxZBAWQp026fY4/HtzBmPPURDk2V4Re21NyMuDjQ4ckMNnWsQAqQj/hNJ9rFkbBmxYEF9Vyilgrn/ioVwJKRb38WbnCIjJGD+Oi90yGqFHZaT2ZSSXV7tDwnys67tBS4I5o4dFKUPvsu8WHoCx6n8GvsLT84MTCtuWdpSWexk49ku2WVE6EJDE0oIi0vLcIKDCDyJS9zv8kBrsjCKV6xOhxmcwGt6c/96fzP9FW6fVy64/CFGebGDrdiwBs2Aj3f2mxcWUeH2Qor42IlU7IntJVCUbFcujuYa8ead5zM8X9tO1Lrj1OAyjy0DXgNJ7xdMGQ4PbgckIHF2YbLEjO65iuhgCFgFr3RR3aVGkhpexj6mkNmCFotk3BQ5XhDgvQNIWA3kA6HuZ4ASoJ6xRNVYnvnT+f1skGhCZA0LTr0azvnV5DY6CwYIJzIGBqdRE044SDj18cDgNEvgVDDCK/c0CuW85gT2ZuM7eVj41un+UUD4VnmKKj8NWT0Ps43ne1kLffYox+DlTC9eSztFFdknXhx2C7cGxQB8A2RLHXwsZMj7jE0XmFWdh0WLVlhMyigObOa/5NdbZkYxwx3x+U4OBQcEFCLE2xSdstcPZRwwyg9UYFQMQ2GgN1Ilu27Os5Nqt51N7wbps+bQjkCrBZ9t4HsH/LzCy1v02LoOe8ie3x77rrfJ6MuGBlSTaHqfPAMwfyw1J5pyJ+mcaGWPt69BNL0m0YxvorVBtLKXbQeBs0XC/B9EMNRXUNdEKHuTNz5x0kPmKdv2z/ehylMBmH2yAYEKI9Zmg9mMDdw89j6jto4UNvUXDNHo+Los/Zg+L8QraJeaZfmNb2q+j5q63hY11affB1FTXbD8/lKbmqALapblipQ3/PS01hOez+avN7ib9LoabHThYhkuohIuIsXUrT3fwbgFF+/3GtKve7d2Dylkdrq+h5RlchSPuiPzQWbFlDRH8sW4ty+AH5auqAqbffuiVTMmDystpjd9RKppMatbn5U7P+azGjwbXBgE6mfmvsOg9M8vmPaBn8JjVU9rfIuGAHxQmOXV9B88sXr6Pyh08Rc=</diagram></mxfile>
2008.00441/main_diagram/main_diagram.pdf ADDED
Binary file (10.4 kB). View file
 
2008.00441/paper_text/intro_method.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Relation extraction (RE) aims at obtaining the semantic relationship between entities using text as a source of knowledge. For instance, from the text snippet, *Steve Jobs and Wozniak co-founded Apple in 1976.*, we can infer that *Steve Jobs* and *Wozniak* have *org:founded_by* relation with *Apple*. RE is an important subtask of information extraction that has significant applications in various higher-order NLP/IR tasks, such as question answering, knowledge graph completion and semantic search [@sunita-2008]. Earlier studies on RE were based on feature engineering. Such methods rely on linguistic and lexical tools to obtain the information required for such feature engineering [@zelenko2003]. Additionally, the performance of these methods is hindered by the sparse feature representation used by the models.
4
+
5
+ With the surge of neural networks, deep learning-based models have become prevalent. In these models, pre-trained word embeddings are employed to solve the feature sparsity problems. Deep learning based RE models can further be categorized along two lines: sequence-based and graph-based models. In sequence-based models, a word sequence is used to embed the text using convolution or recurrent neural networks [@Zeng14; @zhou2016]. In graph-based models, the text is first converted into a graph using a dependency parser or other linguistic tools and then processed with a graph neural network which encodes neighborhood and feature information. Finally, the encoded graph features are used in RE. Along this line, @liu2015dependency and @miwa-bansal2016 employed a bidirectional long short-term memory (BiLSTM) network and @zhang2018graph and @wu-2019 employed a graph convolutional network (GCN) [@kipf2016semi] to encode the textual graph used in their work. Compared to sequence-based models, graph-based models have been shown to be effective in learning long-distance dependencies present in text [@zhang2018graph].
6
+
7
+ Although the state-of-the-art results are obtained using graph-based models, they require external tools to build a graph for the text. Therefore, they are computationally expensive and not fully end-to-end trainable. While sequence-based models do not depend on external linguistic tools, they have been shown less effective for long text, especially when long-distance dependencies are required [@sunil2017]. To bridge this gap, we propose a Self-determined GCN (SGCN) which infers (self-determines) a graph for the text using a self-attention mechanism [@vaswani2017], rather using any external linguistic tool. Then the self-determined graph is encoded using a GCN model. We evaluate the effectiveness of the SGCN on a RE task against several competitive baselines. In summary, our contributions are the following:
8
+
9
+ - We build a novel graph-based model to encode text without the use of any linguistic tools.
10
+
11
+ - We show the effectiveness of the SGCN model on the RE task and achieve the state-of-the-art performance.
12
+
13
+ - We provide a comprehensive ablation analysis that highlights the importance of SGCN.
14
+
15
+ The GCN  [@kipf2016semi] is an extension of a convolutional neural network, which encodes neighborhood information in a graph. Let $G = (\mathbf{V},\mathbf{A},\textbf{X})$ be a graph, where $\mathbf{V}$ represents the vertex set and $\mathbf{A} \in \mathbb{R}^{|\mathbf{V}|\times |\mathbf{V}|}$ typically represents a sparse adjacency matrix, where $\mathbf{A}_{(u,v)}$ = 1 indicates a connection from node $u$ to node $v$, else 0, and $\mathbf{X} \in \mathbb{R}^{|\mathbf{V}|\times d}$ represents node embeddings. Each GCN layer takes the node embedding from the previous layer and the adjacency matrix as input and outputs updated node representations. Mathematically, the new node embedding for node $v \in \mathbf{V}$ in the $l^{th}$ layer is:
16
+
17
+ $$\begin{equation}
18
+ \mathbf{z}_{v}^{(l+1)} = \sigma \left ( \sum_{u=1}^{n} A_{(u,v)} (\mathbf{W}^{(l)} \mathbf{z}_{u}^{(l)} + \mathbf{b}^{(l)} ) \right ),
19
+ \end{equation}$$
20
+
21
+ where $\mathbf{W}^{(l)} \in \mathbb{R}^{d \times o}$ and $\mathbf{b}^{(l)} \in \mathbb{R}^o$ are the parameters of the GCN at layer $l$ and $\sigma$ represents a non-linear activation function.
22
+
23
+ As discussed in Section [1](#sec:intro){reference-type="ref" reference="sec:intro"}, most current works in NLP use a GCN to encode a pre-built graph, e.g., a dependency parsing graph [@Marcheggiani2017] or predicate-argument graph [@marcheggiani-bastings-titov:2018:NAACL]. Pre-built graphs require sophisticated tools that have been trained on manual annotations. Although such methods have demonstrated promising results in various NLP tasks, they are computationally expensive, not fully end-to-end trainable and not applicable to low-resource languages. To overcome these issues, our model dynamically self-determines multiple weighted graphs using a multi-head self-attention mechanism [@vaswani2017] and applies a separate GCN over each one.
24
+
25
+ ![Model architecture of SGCN. First, $h$ adjacency matrices are self-determined from the text using a multi-head self-attention mechanism. Then, a separate GCN is employed for each graph to encode neighborhood information. Finally, the outputs of each GCN are concatenated. ](images/dynamic_graph.png){#fig:sgcnn width="30%"}
26
+
27
+ Concretely, SGCN represents the words from the text as nodes in a graph and learns multiple adjacency matrices ($\mathbf{A}_1^*, \mathbf{A}_2^* \dots \mathbf{A}_h^*$), $\mathbf{A}_i^* \in \mathbb{R}^{|\mathbf{V}|\times |\mathbf{V}|}$ in every layer of the GCN (as depicted in Figure [1](#fig:sgcnn){reference-type="ref" reference="fig:sgcnn"}). Different from the $\mathbf{A}$ used in the traditional GCN, elements in $\mathbf{A}_i^*$ are not binary, but a mean normalized real numbers that represent the strength of the connection in the graph. Mathematically, for the $l^{th}$ layer, we compute the weight of the connection $u$ to $v$ for the $i^{th}$ head, $\mathbf{A}_{i^*_{(u,v)}}$, as:
28
+
29
+ $$\begin{equation}
30
+ \label{eq1}
31
+ \begin{split}
32
+ \mathbf{M}_{i_{(u,v)}}^{(l+1)} &=
33
+ \mathbf{ReLU} \left ( \frac{\mathbf{K_i}^{(l)} \mathbf{z}^{(l)}_u . (\mathbf{Q_i}^{(l)} \mathbf{z}^{(l)}_v)^T} {\sqrt{d} } \right ) \\
34
+ \mathbf{A}_{i_{(u,v)}}^{*^{(l+1)}} &= \frac{\mathbf{M}_{i_{(u,v)}}^{(l+1)}} {\sum_{u'\in V} \mathbf{M}_{i_{(u',v)}}^{(l+1)}} \\
35
+ \end{split}
36
+ \end{equation}$$ where $\mathbf{K_i}^{(l)}, \mathbf{Q_i}^{(l)} \in \mathbb{R}^{d \times d}$ are the trainable parameters. Once all $\mathbf{A_i}^*$s are obtained for the layer, we apply a GCN on each graph to encode the neighborhood information and concatenate the outputs. It is worth mentioning that the attention mechanism used in Eq. [\[eq1\]](#eq1){reference-type="ref" reference="eq1"} differs from the dot-product attention proposed by @vaswani2017. In this operation, we use the ReLU activation function [@relu2010] which can mask some of the attention weights by assigning them zero weight. This is more appropriate for the graph, since there are not always mutual connections between every node pair. In contrast to the traditional GCN, which uses the same connections in each layer, the SGCN determines the different connections.
37
+
38
+ For a given text $T = w_1, w_2 \cdots w_n$ and two target entities of interest $e_1$ and $e_2$ corresponding to the word (phrase) in $T$ , a RE model takes a triplet ($e_1$, $e_2$, $T$) as input and returns a relation for the pair, (including the no relation category) as output. The set of relations used for inference are predefined. We first transform the text into a sequence of vectors using a pre-trained word embedding. Next, we employ a BiLSTM encoder to capture the context information in the vector sequence, which is then further used to represent the node of the graph.
39
+
40
+ To further encode the long-distance context, we employ $k$-layer SGCNs in our model. As explained in Section [3](#sec:sgcn){reference-type="ref" reference="sec:sgcn"}, for each layer, SGCN dynamically determines the weighted connections for the graph using a self-attention mechanism and employs a GCN to propagate neighborhood information into nodes. Next, we employ a layer aggregation, originally proposed by @xu2018representation, in which all the SGCN layer outputs, along with a BiLSTM layer output, are concatenated and fed into a feed-forward layer. Finally, for relation classification, we follow @zhang2018graph and employ another feed-forward layer with a softmax operation on the concatenation of the sentence representation and both target entity representations. Sentence and entity representations are obtained by applying max-pooling over the entire sequence and average pooling to the position of entities in the final representation, respectively. Following @zhang2018graph convention, now onward we refer to this model as **C-SGCN**.
2009.10007/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-06-16T17:39:44.588Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" etag="osR5YvbdhPPjBc5YUI5L" version="13.2.4" type="device"><diagram id="XcZOCzNj3RU5Exr3deJQ" name="Page-1">7V1dd5s8DP41uWwP30kvaUK77LRpT0m3dTc7BNyEjUBeIG26X//a4SOJMYG0MKjxLlYQRiK2pEeWhemJw+Xm2jdWi1vPAk5P4KxNTxz1BIEXZRn+QZS3iNIXYsLct6240Y6g239BTORi6tq2QHDQMPQ8J7RXh0TTc11ghgc0w/e918Nmz55zKHVlzEGGoJuGk6V+t61wEVEHMrejfwH2fJFI5rn4ytJIGseEYGFY3useSdR64tD3vDA6Wm6GwEGdl/RLdN9VztX0wXzghmVuOHv5urrsP//+5U6E+7W7HP/H/TqLubwYzjr+wT1BcSC/y2cPsoVPHb7FXaH8t0aPevkC/NCGHXRmOPbc7YkqkuEugG+Hu0bwaB7/rYQbFySakdD4fGEzoiT0CGcRGyREEFabLAs14QH7cYbzhbTodxSQhQP5AhwGqK3w5PJ1YYdAXxkmuvIKDQbSFuHSgWc8PDSCVaTCz/YGWOiRbccZeo7nbxmJzwMTmCakB6Hv/QF7V7jtv1T4vlIkIwz7GWz2SLGSXANvCUL/DTaJr8qJBscWm5ri607/E/Vf7Kl+QjNii5unnHdKCQ9ivTxFRy+yXWpBI41PPT9ceHPPNRxtR730vbVroU4coW7ZtbnxvFXc3b9BGL7FHsdYh97hYICNHf5At5/L8dlTzAwdjzb7J2/JiQt/795N6PQp4YdOdrdtz5L7ovFM/ItwbBxDw5+D8Fhvxf4VddHR4faBY4T2y6GfIw1efOu9Z29tOFYTUTpUE3GAjX/grX0TxHdhKpA+xvu1QiR4LkxLdjrAFxufZQSLtG0FZpT6/aR/+lkzEiSCHfFSbYZEq7evVnoV3CJfzGGYwyskzGnTc3eHW84IidWPEJPUnlHHuRkcj2LgK/HUGK9j5OZj4UogWSaHLI1FtgIDZAbIjBu14EWjpO5yMziBBQsstCCGFoO2hRbyJwktGLcquKVGpGsTfTwdf9PgXSN1Wkk+u05ybsqOKzbkWtPguEULhPwdzxNMWqzLpAWlOOUJXEtFq17wzHSMILBNUpftZ5p3OeveCRnr0isTs4EsyUeHpDijXTKhXdLVfjDvLfE5ed2CvHeWkYIxEjBGUcfUl0DPR4jyM7hS8aY6uZs83Y5/avBG7u5em8A/+vRx9HRScIBpOjTj8Jg7cD0XYIoak2K/OjKhJgJ/z+Oq8YWlbVlOXiBx6KGqXjXoY4tvslTS6+DK8x6vE4yG9nz162+wGEiPzo/v5lMgn/FS1snUv/qW9VMZz9LXVEU76lnqXgLry1jQx5Wz4GJGJX1KZa6gBLK8fzGNYIIVWIqCJ/NKr68pdQG02K8DoE+H2mTpOlnGfto7Lly4Fg5WrnfIT44Dcgcy0thiVC9E/0Q3WwL/MobaAq5NZeFf6WPwj08F64b/QS78l0Z76RDtOTNVxh1RBIYyU5QSUxf+HLK40dSHyXhyDYnGEnkVdxas9lv7SfMHbXj3MIJN9ey1k2Y9Hw8skFlu/7U8upDwmg0u6zNJLrOK4IKohJLUNiUUkBJeaxPtQb0Z/1Sn47sJU6+ylWN4yQtBvXhJOJcJoFybhpFmOWUwOYD+N2wEqncVZ0VgfYDU5xJ06cfRent2D3wb9i1Sng9CuPg5IVzCkFfCla8shMsDjBFeuZUD4VCrjLe9ZivUIKgD5CVSYH98jl/Sfb43FyAi/6rq+t1wfOBcWQogVc+LHPU8SAEQfGhtKC0LuVoUrAz3/cpAyF4PHx/G0yd4B4zv1OGXAqSNxHdKPfC8dAq6zWWITo/hWBELK2KhkhuNBR80SuouN4MTWRFL5eTiNMAnKGIRpeaKWIiRRX7c2S7LYpFF82NAOzcaUZhGSd3lxiILFlnk5bS4lkUWpLeH22hZLLJofgxo50YjCtMoqV5uJnsTtjt4jL8JK8kN43H+OmW7bIXhcfNjQDs3GrGLRkl14zF72bQ7eDxoGR6TKtnbaCsMj5sfA9q50YhdNEqqG49ZdrszeIyvhDeOx+94WYfhMcNjKrnRiF00SqqX24zlq7uDx/j6ceN4fMHwmOEx40YtdtEoqV5uaCdlgeFxN/BYEVtWz5W88MYAmQFy57nRCF40Sqo7Yc0AuTuAfNE/bxsks/fCGSQzbvTCF42S6p4jSwySOwPJYsty1uxrQ027ewbIreFGI3jRKKnuObLIALkzgJyZIzcPyf1sF7Xz09b52++f8PHq/R1Fyf0Rj8/+lqLkhvEC/L/e4j+zUW11u3Qf7RDqYjaCJAxNBIGEJo+t8krYhr+WAQbPxA1/q/wyUT/5wlCik417sXe8vYmHDtKx0CEvxifuVdsHhgK4LDN8e/mIs59cVu8nGnqSkTbVhiV2pmW7gRfBLbY7cvp9o/2NSkl6iu+iXJ2e5ldRl59Flop5p5o+hRd1bXpSbPVx5fgEG9j2sQ9o8c1vYEuq5sPGonWfv8FAgCd8KIqIAlV8/Ya8WR8rwWDpJcaN3lQMjZLqXu/h0U5jLL3UifTS7ss2KSbLzU7MBFaCwSCZcaMXvmiUVPeKD4Pk7kDyxUXLAJmVYDBAZtzoBS8aJdU9R5YYIHcFkPsKnrVuGpDZRvAMkBk3esGLRkl1z5BFBshdAeSB0DZAlk4HZNzk6q8lElAt0bU20R7Um/HPMh+yZuVCJ5YLCWXLhaooC+F15coyv35zlfmV5esP+hqsj5W1VfzZ66Gqa6h1gWp8zq9dZ1SAoCilX6KSuX9YLETWivytsWvQCh02R/yNJerr6H+RqUlmXim3TU2OVEOlY6bewgb6dHz7eDMmjyk2eMHCWKFD882xYW/6xTHDLOr2m1lKMMw/8+1g3K1DyAVUOAj4JvaEQVAIYzA4fQzgqe8h60mvXcOfvrj1LIBa/A8=</diagram></mxfile>
2009.10007/main_diagram/main_diagram.pdf ADDED
Binary file (59.7 kB). View file
 
2009.10007/paper_text/intro_method.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In recent years, machine learning (ML) has become a viable solution for various applications due to rapid developments in sensor technologies, data acquisition tools, and ML algorithms (e.g., deep learning). It is well-known that training data of sufficient quality and quantity is a pre-requisite to train a ML classification model (classifier), that can generalize reliably. However, there are situations in which access to data that fulfil these requirements is restricted, e.g., due to privacy concerns.
4
+
5
+ Such situations are particularly prominent in the medical domain. One example is the Cesar project [@cesar], which aims to enable individuals to perform sleep monitoring at home with low-cost sensors and ML-based automatic sleep apnea detection on their smart-phone. Having access to labelled sensor data from a large clinical study enables us in the project to train ML classification models and evaluate their performance. The final goal of the project is to allow any individual to use a customized classifier that is tailored to the particular needs of the individual. However, regulatory restrictions prohibit us to share the data, neither with individuals so that they can create their own customized classifiers, nor for other scientific purposes. Creating for any interested individual a customized classifier in our lab is no feasible solution.
6
+
7
+ A possible solution to this problem is to release an anonymized version of the data with the use of existing database anonymization strategies like k-anonymity [@sweeney2002k] or l-diversity [@machanavajjhala2007diversity]. However, since crucial parts of our data are raw sensory time-series data from which identification can be done indirectly via learning, existing database anonymization strategies are not suitable for this task. Another option would be to only release white-box or black-box classifiers in the form of an API or mechanisms that perform classification or extract important statistics from the data. For this purpose, differential privacy [@dwork2008differential] is a well-established framework that offers theoretical guarantees for privacy preserving application of statistical mechanisms. Works such as [@abadi2016deep] make differential privacy a viable option for ML applications. However, this option defeats our goal of giving customization freedom to the end-users. Additionally, though theoretically sound, differential privacy has been shown to yield in many cases unacceptable privacy-performance trade-offs [@jayaraman2019evaluating], and to be susceptible to different information leakage attacks [@zhang2019secret; @hitaj2017deep]. A third option is to train a differentially private generative model like [@jordon2018pate] or [@xie2018differentially] on our data, in order to synthesize a dataset that we could release to the public. We avoid this option for two reasons: (1) because of the points discussed above about differential privacy; (2) as we care about the identification of recordings and not of individual datapoints, we would need to apply the group privacy property while training such a model. However, this would either weaken the attainable privacy guarantee or the performance.
8
+
9
+ Based on the above discussion, we explore in this work a different option, which is not sufficiently studied in related works. We investigate the empirical feasibility of labelled noisy higher-layer representations for training other \"student\" classifiers to generalize reliably on the real data. The goal is to create a labelled dataset from which a model can be taught to perform sleep apnea classification, while at the same time data from this dataset cannot be strongly identified as belonging to a specific recording. To do this, we exploit the knowledge obtained by a given trained classifier, which we refer to as Teacher, $h_T$ for notational convenience. $h_T$ is trained to capture the most important aspects of the real data based on the loss it attempts to minimize, making it learn task-related knowledge about the training data. We expect that excitatory or inhibitory datapoints (which we call *stimuli*) of specific neurons can also contain important information about the class decisions of $h_T$. Based on this, we learn to generate varying stimuli targeting the output of one or more $h_T$ and use these stimuli to train a student classifier $h_S$. *Neuronal Excitation* (NE), is a general method that can be applied on artificial neural networks [@Nguyen_2015_CVPR] as well as on the mammalian inferotemporal cortex [@ponce2019evolving]. We use in this work *Activation Maximization* (or Minimization) (AM) [@erhan2009visualizing] as NE. AM is a well-established method for interpretation of neuronal activity. The overall proposed procedure is loosely related to implicit learning [@reber1989implicit] in the sense that for $h_S$, knowledge about features of the true joint distribution is acquired implicitly, and not through direct loss minimization on data sampled from it (or from a distribution that approximates it).
10
+
11
+ This leads us to an important novel aspect of this work that to the best of our knowledge serves as a differentiating factor compared to other generative approaches. The stimuli we are synthesizing need not necessarily be realistic. On the contrary, to some extent we want them to be unrealistic. We want $h_S$ to learn indirectly through the stimuli and generalize on the real data. Please note that such an approach, only captures those features needed to strongly excite or inhibit different class neurons of $h_T$. This is in contrast to a generative model or framework which would attempt to capture all features necessary to learn the joint or the data (marginal) distribution based on its loss. Therefore, we have more direct access to the conditional distribution we want to learn. We hypothesize that this procedure is a natural way to generate datapoints that, though unrealistic, could contain inherently important information about the class separation we care about, e.g., sleep apnea in our case. Additionally, the datapoints could potentially provide less \"unwanted\" information for other class separations which we want not to be learned. In this work our contributions are as follows:
12
+
13
+ - We demonstrate that the proposed approach is an empirically feasible way to learn and generalize successfully on new data from the true distribution.
14
+
15
+ - We investigate the applicability of training different smaller architectures for successful customization with the use of the generated stimuli dataset. We compare with an existing well-established generative approach, namely gradient-penalty Wasserstein GAN [@pmlr-v70-arjovsky17a], and illustrate promising results.
16
+
17
+ - We empirically show the viability of a variation of the proposed approach as a means of generating anonymized data. To do this we develop a patient de-anonymization attack inspired from face identification, and evaluate how the AM stimuli compare to the real data in terms of the identification success of the adversary. Furthermore, we explore the defence capability that the proposed approach offers against membership inference attacks and exhibit additional potentially useful properties of the described method.
18
+
19
+ The rest of the paper is organized as follows: Section 2 presents the proposed approach. Section 3 describes the application scenario and the datasets we use. In Section 4 we perform experiments to investigate the generalization and customization capabilities of the proposed approach. In Section 5 we investigate defensive and anonymization properties and in Section 6 we discuss additional characteristics of the proposed approach. Section 7 compares with related literature. Section 8 concludes this paper.
20
+
21
+ # Method
22
+
23
+ In this section we discuss the approach we use to generate a synthetic labelled dataset consisting of noisy stimuli via the use of AM.
24
+
25
+ <figure id="fig:Method" data-latex-placement="h">
26
+ <img src="adversarial1_v4.png" />
27
+ <figcaption> Four main steps of the proposed approach. TEACHER corresponds to <span class="math inline"><em>h</em><sub><em>T</em></sub></span> and STUDENT to <span class="math inline"><em>h</em><sub><em>S</em></sub></span></figcaption>
28
+ </figure>
29
+
30
+ We want to transfer the knowledge of a given trained DNN classifier $h_T$ to another model or learning algorithm $h_S$ through the use of a synthetic dataset $D_S$, with $h_S,h_T: X\rightarrow Y$, where $X$ is the input and $Y$ the output space. We assume that the original data $D$ [^2], with which $h_T$ is trained is not available for training of $h_S$. The end-user who trains $h_S$ has only access to $D_S$. We aim to enable $h_S$ to classify data that come from the same distribution as $D$ with a similar performance as $h_T$. One way to do this efficiently is to extract the knowledge accumulated by $h_T$ with the creation of $D_S$. The novelty of the proposed approach stems from the fact that we utilize AM in an unconventional manner with the goal of creating a diverse, multi-faceted dataset that can be used to train another classifier.
31
+
32
+ Depending on the success of the generation procedure to map the important features learned by $h_T$ into $D_S$, and the algorithmic and architectural similarity between $h_T$ and $h_S$, we show that it is possible for $h_S$ to learn to perform the classification task $h_T$ has learned.
33
+
34
+ Our proposed design is based on four basic steps (see Figure [1](#fig:Method){reference-type="ref" reference="fig:Method"}):
35
+
36
+ - Step 1 - Training of the teacher. We train $h_T$ in a supervised manner with $D$ to learn the underlying conditional data distribution $p(y|x)$. This requires the original labelled training data.
37
+
38
+ - Step 2 - Creating the synthetic dataset. We create a synthetic dataset $D_S$ that captures features that $h_T$ has learned from training on $D$. A generation procedure $Gen\{h_T\}$ is used to create this synthetic dataset. As $Gen\{h_T\}$, we perform AM via a deep generator network ($G_{AM}$) that transforms a small noise vector $z_{in}$ to examples that strongly activate a predefined neuron. Inspirations for this design are [@baluja2017adversarial; @nguyen2016synthesizing]. After the synthetic set is created, we create its labels. If $x_s$ is a synthetic example created by $Gen\{h_T\}$, we give $x_s$ the label that $h_T$ chooses for it, i.e., either the class with the maximum output probability, $\arg\max_i \{h_T^{(i)}(\theta,x_s) \}$ ($i$ corresponds to the possible class of the output and $\theta$ corresponds to the parameter vector of $h_T$), or the softmax of the output to capture the output probabilities of $h_T$.
39
+
40
+ - Step 3 - Training of the student. As next step $h_S$ is trained by using the synthetic data and labels produced by Step 2. $h_S$ can be a larger or smaller DNN than $h_T$, or even be based on a different learning method, e.g, an SVM.
41
+
42
+ - Step 4 - Use the student on the test set.
43
+
44
+ The choice of $Gen\{h_T\}$ and which method to use for $h_S$ are two central decisions. Next, we discuss the choice of $Gen\{h_T\}$.
45
+
46
+ For the stimuli generation we consider two approaches: AM and code inversion [@mahendran2016visualizing], since both approaches result in expressive data based on the acquired knowledge of $h_T$. Code inversion requires the original training data, or the logits of the data from the fully connected layer we try to match. However, training $h_S$ without access to $D$ would be beneficial towards our anonymization objectives. Therefore, we use AM for the generation of the synthetic dataset. We further discuss this choice from a more theoretical viewpoint in the next section. Note that contrary to other works like [@nguyen2016synthesizing], the goal of $Gen\{h_T\}$ is not to produce realistic looking synthetic data, but instead to create a synthetic dataset that can reliably train $h_S$.
47
+
48
+ Formally, we assume a trained model $h_T(\theta,x)$ with $x$ being an element of the input space $X$, and $\theta$ the parameter vector of the model, which is an element of parameter space $\Theta$. To synthesize data that the trained model $h_T(\theta,x)$ perceives as elements of a class, $Gen\{h_T\}$ uses AM on the activations of the output layer of $h_T(\theta,x)$ via $G_{AM}$ such that:
49
+
50
+ $$\begin{equation}
51
+ \theta^*_G=\arg\max_{\theta_G s.t ||G_{AM}(\theta_G,z_{in})||_\infty\leq R_G } \{h_{T_L}^{(i)}(\theta,G_{AM}(\theta_G,z_{in}))\}
52
+ \end{equation}$$
53
+
54
+ where $i$ corresponds to the class we want to find a stimulus for, and $\theta_G$ is the parameter vector of $G_{AM}$ [^3]. Note that $\theta$ is static because $h_T$ is already trained and we are optimizing for $\theta_G$ given varying \"pseudo inputs\" of $z_{in}$. $R_G$ is the maximum allowed value of the norm of the output of $G_{AM}$ [^4]. We use the $l_\infty$-norm, i.e., a hyper-cube to express the bounding, and $L$ denotes the output layer. We choose to perform AM in the output layer of $h_T$ since we want stimuli that correspond to a strong belief for a class in $h_T$. We use $z_{in}$ as an input random noise vector (mainly from uniform distribution) which $G_{AM}$ transforms into the stimulus. We intentionally do not give any additional priors to $Gen\{h_T\}$, since we do not want to constrain our exploration of the feature space.
55
+
56
+ We stop the AM when the output of the target class neuron $i$ is higher than all the other output neurons and exceeds a threshold $TH$:
57
+
58
+ $$\begin{equation}
59
+ h_{T_L}^{(i)}(\theta,G_{AM}(z_{in}, \theta_G^*))>TH
60
+ \label{eq:Thresh}
61
+ \end{equation}$$
62
+
63
+ This implies that $\theta^*_G$ that satisfies Eq. [\[eq:Thresh\]](#eq:Thresh){reference-type="ref" reference="eq:Thresh"} exists in a subspace defined by the decision boundaries of class $i$. We assume that softmax activation is used on the output layer.
64
+
65
+ We specify the loss of $G_{AM}$ to maximize the logits of each class of $h_T$ (TEACHER in Figure [1](#fig:Method){reference-type="ref" reference="fig:Method"}). Then, to make the generated synthetic stimuli more diverse, we add inherent randomness in the AM procedure. We do this by reinitializing the AM generator, by randomly setting $TH$ to one of several possible values, and by editing randomly generated datapoints instead of generating them (for more details please refer to Appendix D). Our goal is to capture a wide variety of different starting positions for the gradient ascent (AM). This means that assuming that the starting position $x_{s_{in}}$ of a stimulus before the AM optimization takes place is drawn from distribution $p_{AM_{in}}$, we want $supp(p_{AM_{in}})=X$, so that $x_{s_{in}}\in X$. Furthermore, we want to randomize where the gradient ascent will end up, given that the threshold condition is satisfied.
66
+
67
+ We perform the AM from each initial position towards a random class with equal probability, to create a stimulus. The result is a synthetic dataset comprised of the stimuli of $h_T$ for all the classes. The goal of the different initial feature space positions is to take advantage of the multi-faceted property of the neuronal activation [@nguyen2016multifaceted] when performing AM to create a diverse synthetic dataset. In the next section, we analyze our choice of AM as a means to confine the feature space, in order to \"focus\" $h_S$'s learning to important regions of the feature space.
68
+
69
+ Assuming a marginal distribution $p_N$, with a support $supp(p_N)=X$ from which we draw data that we label with $h_T$ and then use them to train $h_S$. It is straightforward to show that for $h_T$ and $h_S$, with the same architecture which corresponds to the same hypothesis space, i.e., $H_S=H_T$, $h_S$ has theoretically the ability to learn to perfectly imitate $h_T$. Even when $h_S$ uses a thinner variant of the network architecture of $h_T$, it is possible to show that if certain conditions are met, we can probabilistically expect better learning behavior as we increase the hypothesis $H_S$ towards that of $H_T$. We defer a more detailed discussion on this topic to Appendix C.
70
+
71
+ However, despite of the above point, as the dimensionality of the input space increases, training $h_S$ with generated data from a different marginal data distribution than the real data becomes unviable. To mitigate this effect, we *confine* the space in which we perform the minimization. There could be many ways to confine the space in which we wish to have low true risk. Since we have in our case no access to the true marginal distribution $p(x)$, we confine the space to sub-regions of high confidence for our conditional estimator. Thus, one of the core ideas of this work is that, *assuming that our $h_T$ is good at approximating $p(y|x)$, regions that the classes are inconclusive for $h_T$ should also be inconclusive for $p(y|x)$.* Thus, by definition there cannot be a strong belief for any algorithm that estimates $p(y|x)$ well in these sub-regions. In the next section, we explain how we use this principle to inhibit the class probabilities of a recording's conditional approximator, i.e., a model that recognizes different recordings, for the purpose of anonymization.
72
+
73
+ To satisfy our objective of generating anonymized data in relation to information about class separation for a different labelling for patient classification that we wish to hide, we need to extend our approach. Our design is loosely inspired by the analysis of [@feutry2018learning]. We assume a new learning task $X\rightarrow U$, different from the original $X\rightarrow Y$. $U$ in our case is the set of all the identities of the patients we have in our dataset, which corresponds, based on some ordering of the identities, to $\{0,..,N_p\}\in\mathbb{N}$ where $N_p$ is the number of patients.
74
+
75
+ <figure id="fig:MethodExcInh" data-latex-placement="h">
76
+ <img src="ExcitatoryInhibitory.png" />
77
+ <figcaption> Synthesizing stimuli that excitate the <span class="math inline"><em>p</em>(<em>y</em>|<em>x</em>)</span> approximator <span class="math inline"><em>h</em><sub><em>T</em></sub></span>, depicted as TEACHER<span class="math inline"><sub><em>Y</em></sub></span>,while inhibiting the <span class="math inline"><em>p</em>(<em>u</em>|<em>x</em>)</span> approximator, depicted as TEACHER<span class="math inline"><sub><em>U</em></sub></span> </figcaption>
78
+ </figure>
79
+
80
+ To model this labelling, we use a random variable $u\in U$, conditionally dependent on $x$ by $p(u|x)$. Our goal is to generate data that are not strongly affiliated with any outcome of $u$. Formally, we want: $\forall x_S\in D_S, \forall u_c\in U: p(u_c|x_s)\approx p(u_c)$. We hypothesize that if $y$ and $u$ are not strongly correlated given different outcomes of $x$ then learning for $X\rightarrow Y$ would be possible with data that satisfy the previous requirement.
81
+
82
+ In practice, to achieve this, we train a network $h_{TU}$ to approximate $p(u|x)$. Then we alternate between training updates for the original objective and the following cross-entropy loss: $$L_U(\theta_G)= - \sum_{u_c} \hat{p}(u_c)\log {h_{TU}(G_{AM}(\theta_G,z_{in}))}$$ The optimization is done for $\theta_G$, after training $h_T$ and $h_{TU}$. The weights of $h_T$ and $h_{TU}$ are frozen. $\hat{p}(u)$ is the empirical approximation of the marginal distribution of $u$. The optimization stops when both the threshold condition of Eq. [\[eq:Thresh\]](#eq:Thresh){reference-type="ref" reference="eq:Thresh"}, and a proximity condition regarding $\hat{p}(u)$ are satisfied (e.g., $L_U(\theta_G)<TH_U$, for some threshold $TH_U$).
83
+
84
+ The previous section presents a new approach to use modified noise, transformed to stimuli instead of the real data as a means of teaching new classifiers about the class separation for a task we are interested in. Furthermore, this approach can provide protection against potential adversarial information leakage attacks. These two advantages of the algorithm are individually evaluated in Section 4 and 5. Both evaluations are performed for the application scenario of sleep apnea detection and we use for both the same datasets. Therefore, we describe in this section the application scenario and the datasets, before we (1) investigate in Section 4 how well different ML methods or architectures can generalize when trained with stimuli generated from the knowledge of $h_T$ and (2) investigate in Section 5 whether using stimuli instead of real data can provide protection for certain adversarial attacks.
85
+
86
+ In the Cesar project we want to detect Obstructive Sleep Apnea (OSA) with low-cost sensors and using ML-based analysis on smart-phones. Sleep Apnea events are defined as the cessation of airflow for at least 10 seconds or reduced airflow by at least 30% (American Academy of Sleep Medicine - AASM, [@ApneaAASM]).
87
+
88
+ OSA can be detected by Polysomnography(PSG) in sleep laboratory or by polygraphy at home. A variety of physiological signals are commonly used for OSA detection, including the electrocardiogram (ECG), electroencephalogram (EEG), electromyogram (EMG), electrooculograph (EOG), oxygen saturation, heart rate, blood pressure and respiration from the abdomen, chest and nose.
89
+
90
+ OSA is a very common, yet severely under-diagnosed disorder. In Norway, it is estimated that around 25% of all middle-aged Norwegians are at high risk of having obstructive sleep apnea, yet approximately 70--80% of all cases are expected to be undiagnosed [@punjabi2008epidemiology]. In our work, we use data from a large clinical study, called A3 study, at the Oslo University Hospital and St. Olavs University Hospital. In this study, sleep monitoring data from several hundred patients is collected and analyzed. As such, this is data that is collected every day in clinical settings to address a severe health issue. However, privacy regulations do not permit to share the data and prevent reproducibility of the results in this paper gained with the A3 data. Therefore, we use in addition to the A3 data the well-known open sleep monitoring data set called Apnea-ECG [@ApneaEcg].
91
+
92
+ Sleep monitoring data are for most humans hard to evaluate and can contain noise and artifacts. Thus, we start our investigation with a simpler problem and a well understood dataset, namely digit recognition with the MNIST dataset. The insights gained with MNIST help to properly experiment and interpret results with the more challenging sleep monitoring datasets. Furthermore, the use of two different types of data indicates the generalizability of the proposed approach.
93
+
94
+ We use three datasets to evaluate our approach:
95
+
96
+ - **MNIST** is a well-known database containing a training set of 60000 28$\times$`<!-- -->`{=html}28 black and white images of 0-9 handwritten digits and a test set of 10000 digits. Out of the 60000 training data we use 5000 as validation set.
97
+
98
+ - **Apnea-ECG** is a well-known open database from Physionet which contains data from multiple sensors capturing ECG, respiration from the chest and abdomen, nasal airflow (NAF), and oxygen saturation. The data contain sleep recordings from 8 patients with durations of 7- 10 hours. Apnea-ECG has been collected in a sleep laboratory with PSG and preprocessed such that it contains only high quality data. From Apnea-ECG we mainly use the NAF signal, because it can be used adequately to train a classifier to recognize apneas and yields the best single signal performance among all the respiratory signals as shown in our previous work [@kristiansen2018data] . We use the eight sleep recordings that contain the NAF signal (i.e., a01, a02, a03, a04, c01, c02, c03, b01). The sampling frequency of the sensors is 100 Hz and all recordings contain labels for every minute window of breathing that signifies whether the minute is apneic or not, i.e., whether the person is experiencing an apneic event during this minute or not.
99
+
100
+ - The **A3 study** [@traaen2019treatment] investigates the prevalence, characteristics, risk factors and type of sleep apnea in patients with paroxysmal atrial fibrillation. The data were obtained with the use of the Nox T3 sleep monitor [@Nox] with mobile sleep monitoring at home, which in turn results into lower data quality than data from PSG in sleep-laboratories. An experienced sleep specialist scored the recordings manually using Noxturnal software such that the beginning and end of all types of apnea events is marked in the time-series data. To use the data for the experiments in this paper, we labeled every 60 second window of the data as apneic (if an apneic event happened during this time window) or as non-apneic. The data we use in the experiments is from 438 patients and comprises 241350 minutes of sleep monitoring data. The ratio of apneic to non-apneic windows is 0.238. We use the NAF signal from the A3 data in the experiments, i.e., the same signal we use from Apnea-ECG .
101
+
102
+ In this section we investigate how well can models trained on $D_S$ generalize, and how feasible it is for different architectures or methods to learn from $D_S$.
2010.12007/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2010.12007/paper_text/intro_method.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We encode the scene as a set of $400\times400$ floating-point feature maps representing various aspects of the bird-eye view of the scene with the spatial resolution of $0.5$ meters. All feature maps share the same reference frame, which is chosen such that the agent of interest is located in the center of the map at the prediction time and the Y axis of the map is aligned with agent's orientation. Each map represents some aspect of the scene such as lane or crosswalk locations, agent presence, speed or acceleration at a particular timestamp, lane availability induced by traffic light states etc. Feature maps are concatenated along the channel dimension to form the input of the scene encoder $f$. We use 7 timestamps for time-dependent feature maps with 2Hz rate for the in-house dataset, which, combined with time-independent maps gives us a total of 71 input channels. For the Argoverse dataset we use 10 timestamps at 5Hz rate and 62 input channels respectively.
4
+
5
+ A trajectory is represented as a $\mathbb{R}^{M \times 2}$ vector of agent's $(x, y)$ locations sampled at $M$ regular time intervals. $M$ is 25 for the in-house dataset and 30 for the Argoverse dataset. All trajectories are expressed in the reference frame where agent's position at prediction time is $(0, 0)$ and the agent is oriented along the first dimension.
6
+
7
+ We use a shallower and narrower version of Inception-ResNet-v1 [@Inception-resnet] with a total of 716K parameters as the scene encoder $f$. The trajectory encoder $g$ is a sequence of fully-connected layers with skip connections and batch norm layers, having a total of 12 fully-connected layers and 36K parameters. We've experimented with other encoder architectures but found no significant improvement. The dimensionality of the shared trajectory-scene latent space is $64$. Trajectory index size is 2M for the in-house dataset and 250K for Argoverse.
8
+
9
+ :::: minipage
10
+ []{#ablation-table label="ablation-table"}
11
+
12
+ ::: {#inference-table}
13
+ Model ADE FDE LL
14
+ ---------------- ----------- ----------- -----------
15
+ base **1.302** **2.968** -2.12
16
+ no noise 1.302 3.017 -2.10
17
+ no history 1.339 3.056 -2.24
18
+ no rebalancing 1.434 3.330 -2.62
19
+ 2 modes 1.310 2.990 -1.70
20
+ 3 modes 1.308 2.985 -1.61
21
+ 5 modes 1.309 2.986 **-1.56**
22
+
23
+ : Inference approaches comparison
24
+ :::
25
+ ::::
26
+
27
+ :::: minipage
28
+ []{#inference-table label="inference-table"}
29
+
30
+ ::: {#inference-table}
31
+ Model Inference ADE FDE Hit rate (0.5m)
32
+ ---------- ---------------------- ----------- ----------- -----------------
33
+ no noise mode, top-1 1.450 3.318 0.176
34
+ mean, top-150 1.302 3.017 0.191
35
+ mean, top-500 1.291 2.992 0.190
36
+ mean, top-1000 **1.287** 2.983 0.189
37
+ noise mean shift, top-150 1.314 2.995 **0.193**
38
+ mean shift, top-500 1.311 2.987 **0.193**
39
+ mean shift, top-1000 1.309 2.984 **0.193**
40
+ mean, top-150 1.302 2.968 0.192
41
+ mean, top-500 1.297 2.956 0.192
42
+ mean, top-1000 1.295 **2.952** 0.191
43
+
44
+ : Inference approaches comparison
45
+ :::
46
+ ::::
47
+
48
+ We use the Adam optimizer [@Adam] with the batch size of $128$ split between $4$ GeForce 1080 GTX GPUs. The learning rate starts at $5\mathrm{e}{-4}$ and is reduced by half every time there is no improvement in validation loss for 5 pseudo-epochs of 2000 batches each (1000 for Argoverse). Training our models on the in-house dataset until convergence in this mode takes about 5 days, training on the Argoverse takes 3 days.
49
+
50
+ We report metrics commonly used in the trajectory prediction literature: mean displacement error on the last timestamp (*FDE*), mean displacement error over all timestamps (*ADE*), the fraction of agents for which maximum displacement error over all timestamps is less than 0.5 meters (*Hit rate*). To assess the ability of various models to correctly describe multiple modes, we also introduce the *LL* metric, which evaluates a weighted set of predictions provided by a model by computing log-likelihood of the ground truth trajectory $t^{gt}$ under a simple probabilistic model: $$\begin{equation*}
51
+ LL\left(\left\{t^{pred}_i\right\}, \left\{w^{pred}_i\right\}\right) = \log \left[ \sum_{i}w^{pred}_{i}\mathcal{N}\left(t^{gt}|t^{pred}_{i},\sigma^{2}I\right) \right].
52
+ \end{equation*}$$ Here $w^{pred}_i$ must sum to 1 and $\sigma$ is set to 1. We believe this metric to be a much better indicator of multimodal performance than commonly used *DE@N* metrics that measure the error of the best prediction among the top $N$. The latter don't take prediction weights into account and can be easily manipulated by emitting a diverse set of predictions even if the model believes there being just one mode.
2102.00405/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-22T13:36:10.513Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" etag="Qx5YwVeeNjS7Li8OPnyr" version="14.4.9" type="device"><diagram id="sZrtWId9JKYE0pufIThY" name="Page-1">7Vpdd6I6FP01LNsHXUAI4KNY585DZ+oaZ+6deYwQkRaIg7Fqf/0kEL4EuVqtWqcu1zLZOQnJPvuEHFAC/WD1T4Rm0y/Ewb6kys5KAneSqqqwC9gPR9YJomgp4kaeI7AcGHkvWICyQBeeg+clQ0qIT71ZGbRJGGKbljAURWRZNpsQv3zVGXJxBRjZyK+i/3kOnSaoCeUc/4w9d5peWZFFS4BSYwHMp8ghywIEBhLoR4TQpBSs+tjn7KW8JP0+bWnNJhbhkO7SwX8aQJmYjy9o6bUfH36B+xFqG2JudJ0uGDts/aJKIjolLgmRP8hRKyKL0MF8VJnVcpt7QmYMVBj4iCldC2eiBSUMmtLAF61swtH6p+gfV37xSgem1btVsfFuLWpzGpGnzA2AIcns+ZS3kiKgOVlENm5gQhXiQpGLaYMdyFzHRI9JgNkMWb8I+4h6z+V5ICE+N7MTXXtRhNYFgxnxQjovjDzkADMQgaSkIhJhpMGSr1khGTGtFaaWQ7Ee9tCG+aGNPbWhX4I2DPUE2uh+aGNPbWiXoA2onUAb6a37Qxw7iwNegjj0U4hDMPKM/IVYljSQpZ4uWWZcMCSTnUlkAfay1oEodGEZMaTup4I93GKfDLuhylxzXEDLqUfxaIZihy7Zcbasr+z4xisTz/f7xCdRPBDAigOxkYmp0NLVDYD0TFTPOKJ41SyrqgxEB30jltP6Mj+XZi6dFs6kqV2dcArOfkWg6+cI9HLAqscNWLBjwCrqgRF7EPGgEkTfGSWh98KuTcK30zlTuelodTo31THQj6Rz1dg47KpnF/pZ0qQ3Fbq2q9APzXcOIl6rCH0QjLHjeKF7bSqHytlV/o4TviNHB9w1Og491R/kMFiJjiGK+AGPTPgiZxjbU1b4jlz3CgNGP3/AvOMs+MgBo+8aMIdmOgc5TK8EzFcUYIYMQupRPo9v2CZu6F3jMUrvnj1eapI/aLWqCWBLUtkV5NZeWeBmp6ZUsCXBu7fz78S0sW3X+XdsQv7o9ij+1cD/H5OBfFL/VvMSSdV9ypmYobDEtv57wV98WGNkP7kx82074anHtwN3fKNqJk/sY6eqsl4oa7d5f1ZyxW/zhTZGByAbsFS85WU+nDwhIW1PUOD566RbQEIyjwVRMpnHGzU3ULTZqmZm0GorHb0LdMUwWF+L+4R/QQcqsAuhhttyRtQ4Svt1tnyqlinCnJYs/9Ip0TqGYuha12QrZyKXGT+KqsmGCRkA4thsXNHFLlS0NYgawlzI5XKDqHdw9XXtZXXJ0In3smrq+Z5ktt/eCa2bhrvwcPitdZuUb153S/73S3WA5tuz6Nf78ZP1bNgPrkv2dSnNiWVfzSn/Vtk/HCj6Ufv+of863T/8VaKvyUtOK/r04FwS/VaS5T1Jrr5g3P6WqOiQkIT4SIyXCc/ILRBu6DWEQ/XNGFcqjFtf74cV1tmaaZnaGoaQ77khq9qMDsxwi5Pl2cjviYbAc5z42Uud28qObfAcf2LCtyXxYEYFaV1Ml/fw0Rj7VrY/Hd+VYOOdbVYvJvVGnS/3dyWr5v9OS97/5n/yA4M/</diagram></mxfile>
2102.00405/paper_text/intro_method.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Natural language processing is one of the most important fields in computation linguistics. Tokenization, embedding, POS, NER, text classification, language modeling are some of the sub-tasks of NLP. Any computational linguistics researcher or developer needs hands-on tools to do these subtasks efficiently. Due to the recent advancement of NLP, there are so many tools and methods to do word tokenization, word embedding, POS, NER in the English language. NLTK [@loper], coreNLP [@manning_2014], spaCy [@spacy2], AllenNLP [@DBLP:journals/corr/abs-1803-07640], Flair [@akbik_2019], stanza [@qi2020stanza] are few of the tools. These tools provide a variety of methods to do tokenization, embedding, POS, NER, language modeling for the English language. Support for other low resource languages like Bengali is limited or no support at all. A recent tool like iNLTK [@arora_2020] is an initial approach for different Indic languages including Bengali. But as it groups with other indic languages special monolingual support like easy pre-processing, tokenization, embedding, POS, NER for Bengali is missing. Besides, iNLTK is mostly based on deep learning(DL) language model based pipeline, which needs DL based infrastructure to do NLP tasks. And that makes iNLTK verbose and language model centric tool for Bengali language. On the other side, BNLP is totally machine learning(ML) based toolkit that can do an instant process for Bengali NLP tasks. Table [\[tab:compare\]](#tab:compare){reference-type="ref" reference="tab:compare"} provides detailed feature comparison between BNLP and other tools.
4
+
5
+ ::: table*
6
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
7
+ | Tool | --------- | ML Based | ------------- | Tokenizer | Embedding | POS | NER | LM |
8
+ | | Support | | Pre-trained | | | | | |
9
+ | | Bengali | | Model | | | | | |
10
+ | | --------- | | ------------- | | | | | |
11
+ +:======:+:===================================================:+:===================================================:+:===================================================:+:===================================================:+:===================================================:+:===================================================:+:===================================================:+:===================================================:+
12
+ | NLTK | | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | | | | | | |
13
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
14
+ | spaCy | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | | | | | | | |
15
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
16
+ | Flair | | | | | | | | |
17
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
18
+ | stanza | | | | | | | | |
19
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
20
+ | inltk | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | | | | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; |
21
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
22
+ | BNLP | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | (0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle; | |
23
+ +--------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+
24
+ :::
25
+
26
+ BNLP is an open-source language processing toolkit for Bengali is build to address this problem and breaks the barrier to do different Bengali NLP tasks by:
27
+
28
+ - Providing different tokenization methods to tokenize Bengali text efficiently
29
+
30
+ - Providing different embedding method to embed Bengali word using the pre-trained model and also provides an option to train an embedding model from scratch
31
+
32
+ - Providing hands-on start option for POS or NER of Bengali sentences and also provides an option for training CRF based POS tagger or NER model from scratch.
33
+
34
+ BNLP offers several widely used text preprocessing techniques like removing stopwords, removing punctuations, removing foreign words. BNLP Github repositories[^1] for source code of the package, pre-trained model and documentation[^2]. BNLP libraries have a permissive MIT license. BNLP is easy to install via pip or by cloning repository, easy to plugin with any python projects.
2103.02062/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-05T02:15:51.717Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" version="14.2.9" etag="dvyAZ2cjDiTarp-PmBMb" type="device"><diagram id="VwJInFRVuOZcXAthndoV">7V1Zc9s6lv41qep5MAv78pjcm0y/dFWq0jM9/cjYjK1p2VLLcuLMrx9A4npIgYsAkhJlVyUWJQEkznf2g4MP9I/n9//cxdunv20ekvUHgh7eP9A/PxCCtebmP3vld3oFo/TK4271kF4rLnxb/V+SXqTp1bfVQ/Ja+eB+s1nvV9vqxfvNy0tyv69ci3e7za/qx35s1tVZt/FjUrvw7T5e16/+Y/WwfzpeVRwV1/+arB6f9vnzpe88x9mH0wuvT/HD5lfpEv38gf6x22z2x7+e3/9I1nb1snU5fu/LiXfzG9slL/suXyDHL/yM12/ps6X3tf+dPWzy8vDRrpl59bJ5MRc/Pe2f1+YVNn++7nebfyV/bNab3eHT9MsXan7yd7LlEebKceTkoba2xc3ifAkMeJLNc7Lf/TYf+VUscrbGT6X1za7tknW8X/2sDh+ntH7Mh8tn+LpZmYkJyoAp0nFSWMqMSNkQr5u33X2Sfqu8qGCgnLrpQAIOtI93j8m+NpBZ5vh36WNb+4FXxw0rMI9C7vtCzs+bP453kL0q0aC4dABQM5iobzAhJL98+XKRYCLqwsBEJJiHtYAJcIvAfsHE/INJa8MAlwgmysBa86FgakOlJzBRKGl4C5g0+LzwCybuX80pdKFgYoA2bKhkakWlLzBBSdOi5igPK5mEFzCVIVODl9aUHhTf7MHky2ZqRaUnMDEKwIHcYGIYfJ76BZP0L5ku1gD3BiYObSYWCEw9DXAe2ABXvsF0wQY492WAjwUm3tMA54ENcO0fTBdrgAtfBngrKj2BSfQ0wHlgAzzTojcL3BId2jpQonRFUyssfaEJiiYYVYSfD2yBY+wFTddhgktfVlMrLD2hSUITHLvRJAOb4Jj4l00Xa4N7Q5MayWySPW1wFdgGx7coeGmxfRnhY6FJ9TTCVWAjHN/C4MVia19WeCssPaFJ97TCa5+XntF0i4MXi40RWG0+VDi14tIXnKCwaTHDdWgz/BYJL8PJl+XUDkxf9QMIWOJcugGFUWhT/BYOD4IoPJL5hFFPa7wojQpljt9i4uXV9mWPj4co3NMixxgKNe0ZUbfAeGm1iS+bvB2bvhBVK4NrscrrX/BslmcPejPL7Rdg7RCTQxHVik1viAKKmrciqqclj0lgU57cIuplCHozvFrB7AuCtK8pTwOb8hmAOiLqfh2/vq7ue4MKE/t7CaDivjDVWiZzAlNDiNgvmH0jYmcigoEIDUfDQSHkh/j1KXlISViipr3+Nd7vk93L4QpBdeeaXQAl/RUugoFoRwk/hJJN4VuxNgvz6bv543F/WPfjhR+bw00WNBb/fttkb9y9HrZUfbTUw9v34s1slL8nz9tkt7GTZ8ROxzX3eBy6Op25XLoFgK598r6vomiXmDuIvx8+YM3U+G2/Od7T4e14vXq08Lo3WEmMjPj0M9ntV/fx+mP6xvPq4cF++VOqF800/NMH/qd9vtV6nYmWFMtVeZNe3G3eXh4OCEf5Pdtpkve+gM2+IE8AoQTofJNFGdGQ9cvgraDFBY2mUGwYaKyekxsYWsFQMyFZHQ06FBiaoqhBwPDfhxluaGhDA9T2og6GPBDqHQ2DIqDXrv/h5pW8yGisXTBD9H9T5DGMkN/Fq5fVy6P5wOf3+Hm7vjF5O5NDiU9H1P/ZGL0dtVcDz73bf8u2qKPaUh5i16gp0khTpKV79zG9QJEg9FCXAMaL4UD+RALtF7e70b3RF+cVcrGhqoDVB4oYQfkPDoaCfrG2hah4yIb+9iaGC9bQDgG3R6O6ticVVtrzJFWuH/LkUndFBtNdvK7HeJPrkivAsxRZh2jVr6fVPvm2je/t61+7eFvFb/y6PTZ9+bF6t/iuCaSN+epq/7uP6j/S5bTuB8H0TBCUlkw1rJjysWD9avo6hGhpk7zPQ7RwMf/37XmbvhY1rfGvZH//lKsQo3G+rNbZpB3ER8uqEytfpcAaHf+lskKEO6wiIWnxw0QdyCSiCmlO0n91gzRSEa38iL6CCDKUZJEQhVpAaphYMvolyh798C/tM4tHmdWvDPCqEAgoe0eRAYsufmRdEFw05KAmBKOSYBgbqzCwo8HaigsdccUlZuTwLwOCCUVYY1H80GlQgrWODEbNN7nAktHqbQodUU6lSn/1QMgQFUlBmDjOw7WoTIJFRI3SV9lv1hvOd3cjJ0GYijAr6CGx+4mwjsxqIW24QQvFsxLDLoOdnVanY9Uz+mGEO80iA1TNJBWK5Rs3M0ZgEWcFfolmdT6QUfFtxLPQtmc+QDiSWhlYcEwVkahKUckjhAgufodxgkRmGI6RuWHDDAJXMUiMMpdYZb8EySCMoEXEtKaIYU0sdqs3QUnEZc7xZuGZ84k0jgwPMEGVxIaQBHK2EKoYrDLU+XwQoAozIB9gYWxRloFYgKWaCx+wAx9whYVxKzii0KL0wwf0wAhEGoWDlODVOU4g0HtPqQMfGDbkGjMqCD2LD6hhBGzkh1k0TYmNl4zHCGxo5PfyrW+FDZVKXAGtLMAPchqmAmUBEhg43eujjaWEi+fFwjmsP9ObDQ0xXz7AhIykLHtzVYAZZ68U3TU/DWb8GAhz36Zx/VT5JgdKbfckcI5goW5GFotGTHVUgpKgVQkwF3mHSXUWBo2IgfLP4Dh97oMUHE38eS+BvRzAAUpCq7UhwX3RCHMPGw5hgwp0Z+rSCx4xXgS2ppJBboceM+DJDAOM26HXwKHvphL9+vPm3oAj43wgpz+vwrox3ltTBGUCQi/Cn5dj+POMTe/PM+HTn2fC+PO4ZjJnkTruF/nLTZ+5HXiOI1H+mci7Av47ZmEc+Nq4Hg2Mfsmza0KY22s1DlWTazwv/90YMdHAdO1ZgQGP+OuXs7om/LX47DMRcS2ez3CR5/TZQ0q8ftmhq0Kc22lv2IZ02RDrFwvwBzG+3LwLERdSdwd3a3sqgtKg7q7PJB4BuNy8DCTsfMruAiGOT1R2x0kvjE1ddseYq8rLkJcpVRJLDVGZMUDCqKvqDlMVIXp22R0TzrI7ZUM/Rdmd7gSg3kEZJzmsP6xIqVCOuB+IV+UsJjQSCuD7zFPilpvqaSmlN34KJSU/hU/EOrD+2PrGI5TSt03jUeD2ywVdFQQBbY2UrKw5mUkpvTfM9SvR94ixAF3AAyp1Kp1K3Ybwykp9GpBQ4dbpXkrpqXbr9GopfRid7qYGxr0q6amCOh3wAPWr35ebaTkYvxeg3xlM1HuStUo7XPYR1ftyMzGQtLNR76EgB8YdT7173yEUVL1b3XhaoQjos08DEkyc6p15cdkxd6p3PYbL7qYGJr1cdsycG+Xco52v7ZebA6L0MgL0lAQJl1IxiwC9WG6GCBJ2NgH6UIgjEwXoxXKTQAYmlyHk4AYwP5BjeB5CjiwXgHKuQi4M4uCw4wm55eZlWiy5ucRtoP7z5US7Lbnx4jZiuWkZSNq5xG2CQa6fhegRY8tt5EblRUSnqQgTKmRoHtFpsdz8CCTtbKRcIMjBcceTcsvNgDB+GQ4rg63xPUk5NQ+HdbmbUSBhZ+OwhkIcm8phXW7qgaMLEXI6COQ4nYWQk8tNPUDCzkfIhUEcHHY0ISeXm3poseTm4q9C/efNX3VacuP5q2nBwCIRyOYZlQsGuX4WokeMLTf3wPRFROWYChMi4WQeUTm53NwDJO1spFwgyMFxx5NyAbpvaX3ifLDzcSFRpHmpbVB11e5kxMqCidRBMkbHA8oPPegQRtLIDyJVtZyTMltRyrPfbO/hgHRVhBDTQknb4EoAh5NEGBc922SYnvI2SS0QEUwzgrBA3PmkUrgfyD0Y4+ZdKvJfvztAZb8MyNR8QNglMAInVUbQLnQM5QPjBk/OBxw0iqPcecs2VDYd0r0fqBMU6XcY6wixopVQ1plgXlDHYgyZj+n0WIdHWrdgHcNQfEUJnI/mAKfiBEQzkNoy6w86LzDb9ozhwWzE4ORgzsuou4HZ9j+aTnD3yy2dgrrTF9Sa0kPr3POhrgxKaO6Nqexmiz1NvLwDaKpGUZYjC08f7kpm0vaWzX9gC9vOYvtw2lqxFqABqLb8dHoWX1DH1qfMYzoM1pdgUnnWtFzjdA8DFhllbKSZuV8lGVg4Im3qJT/bLdXZvjhB9UtyTc0Jdq3mzwqUjcEKFM+AFSirSn2jSN03TaYEe79s29Rgx/QSwH4IJAUHu/FNpwe7sVYqch/0GO4p9gXI8lC/LdGzpPWFgJ1KAPY5Yp3jMbDO1AywzpFHrBsjf0Kpv9zsIwc1FsBFJrTKEFlnldHZCiamrTdb/GDgsHZOPrqLZd2T+MsLqeXmHiFh7w7xjtKPmkqQB0IcrNyIJOIUES64IowwxJI7TIMBLUBPOnu4ge8AXpnYqIG66bVdso73q5/V8XuQWFLD/SWaVigjIlY+dhKLoQR3TgJmIWE0tXDdA21+0pMPxIcPdr6mXu7eKbemZqL1iIsppCZmVbFJBoav3Yq6bRaPAnS5+6ogaelEVUHBIOZW+AExFaCT3LUqZQnUy2ArzDUJhkosUB2DUytj1GgaDNPK7sHO18rL3YhFjchDhVImmlcW3tjyEW8Qd6OHYHGkSMmrItVgjaye0EYGhqUMoCUtBb846zOLP4mql7svq4XQd4RHuOJfXzMeW2YBk2RPGgCPfrJK16LhuZESNV2UqyoRkbKmUkNpD2YBXVElnCaIimfOR2XgHqjzgejwsc5W8JosVqC2KHhu1r3sd0/VXttIuhI4MK/epW0R3LSjw7OCb5vGo0RdbsamRanZPTKoTIOrBmTLNHI0PPbL4CxawysRoZKqwgMp79bvmDlnGUW/26PXyveQhmyHKXj3YOdr+A6Zod3m7eUheUgR++tptU++beN7++6vXbytAhpKzCqMmbmyMd9d7e3jCYvzH0ZIZvhP+eMhfn3K57Mvvsb7fbJ7OVwhqJCnP5PdPnk/ixt0AzvgrACnCfoVaeFa2Q4pi0eztNuTj3K/eXlJ7vfx9+zjqP6I2KkzNK3GNjM+Ly2AQMEWoEPMvAVM8evWLIB58WP1bgFRQ1eBJdQVFEe6nA53V1dM1FdMNSyY8rFeHeLBF8uKp1a9tK6YBEPiFccpW+CMYfYQ7GHEdB4No+AOJgn2og/dywXzh7TPLP7sRIyuODTZBkFY6mNMuFm0jAqFOTDsia4KIUAWIN5oV3aoN9IKjOppSFXevLMRazx9aQMG50mTWhOJ8tmTQ8OWuHqgdLYpMZ2E4MrZk3ASX5tOnfTgtM/Zk5g7D6dyDna2U4MzH8rnhlX55VDPHoIR7jBnkYFqvue0msU1GpyX6qlRtp1y7B2r7LBhFTPGMVVE6GoAStm6CYKL34GMYIfhhoJcmRmMe1Yt+SEi0tJo9fSXBMrR20oAbbwnhjWx6GXV+BKVEZcFz2vW4uHjyLABE1RJQ2ma33TB3UIoKEL8MUO/mOn0zEBpxNI+BAdIz5IZtFHriissCOUcMVQVlp6YQdgnIdJoHaQEJ4DjTqDQNy9oo+GOvGhUgshJOowVlI6QUtismaaGmBRwd2BOuOJ6+xauMiub8xMrVqLkBlbYQU7EVbhqOsOwSuc6LxGV5AcD26TgqD4t8Cs+ZaTNmq95eRcNITAMDoiZK65tb8GM8bhLcBC0yqezkUqiOgklfhCGjdGPi55LIDQXUkpdcTV7C+LcpJxMaIVBmHvUkCItQMOo6fxvCXyOaSBS9b4lAnlvDjyOoVqv7H0LDCSyBt53mOx6i/NtxBdwOZyP1OJ868Auh59+UjNxvufBCVXXm6oqt3riBOB6g+5sJzA4ruvdkxFgT4o0JeUN6ni5KS63d83b92hO4BjlLSb9Ote1YT2aFbhffuuaEAaIx+bhXA/GUIuT7hMzZLGYcXvXM5FKwEcBScfhAAPO9YhC6opL1M9yrqeSWYEQ5h42pERbcBaj7fjDuVSzwcPo/FQWEXj84VTVbHjBaQ54AOJsqtnCYA4OO141G+6XF5m8mk1Xq6dAhnUu1WyqWs0Gdur7KWYzs5SL2TiwvkaqZnPSo181GwFVSMJ36GS5CZmWAnUbbJjDMXiwqPewkTF8gXrbND7F7RWf3N6zQh2eNzfVQXjBQNev8N0nyLynQyasUBcsUkiVvI2JUEJdKt0enhigQB3qdD2DCnVM+paog683soE3JU+Wmx/BLWfdzkXJ1w4e9eS3w7NuJ1PyZMEZFEDc2Sj5QKCD446n5DNMXYqS19LhKM5FySs5gpK3XbimV/JOevRU8gRJ1z4092gedH6HZNDV7enP7dJ674S8SMT7Hn+cNQtcoGo71GVdQFoEHmwEVdLAc90pcp7m4Z7Ep95bblYEUnY2WZFAkIPDwlFJOJAtd7cIJZch5igOI+b4TMTccpMVkLKzEXOhIIcnE3PLzUa0WHNzCZbVTqmEcYsg1lzbLD4huNyuYZC2c4mVhcJcTyvRI8jocpMCFF9EUoDCnXa+5BzT85BzdLk5AUjbuci5YJhD7pxAQJCRdpBdayC0qe1uwEAoXe4GBKovJEKggrhrjMwjQpBlp5aIQDXXCEEYyMFhx4sQ0OVG2xm7DDHHaBgxJ2ci5pYbioeUnY2YCwU5OpmYW3C03W3NzSZAoMIEpdzW3JgBguXG4iFtZxMgCIS5nlaiT5AtN9rO6EUEQhkJE5RiYiaBULbcWDyk7VzkXDDMkakCoaxDtP1aA6FUjhoIZaQXP3fqvaf18MMZW1s8RZqXeoiBk5pIxMpaAOuJuu/ZKnCpFcJIGnFNJOiMd9yMxrNfPnTLjLBd/JgWStoOdwL4+DLCuGjaKMMcAmELMQQigmlmeEAg7nxSKdwP5B6M23epyH8VQPqZdeisX05galYQl8EKlI3BChRPzwoUtJ5kyH3LZFKw90s/zBvswvb4LIFUzRLqGI2BdczGATsI9mUmigPt1c8Tz7uIWL9Ux9SAtjQsAZqDE41ngmiikZmlQLSset2+EE3sQWglREN/ZjRLBpWi84CtWvBNjIMuS9KcgZUigcW5nz5QTp9ca0oPTbXPR7/dMapp7harKvgpijgp7TecKjOHjNIpQi6gNbQyBpYu7Ct4rlRnYS4tCxUrAToF203xlVlIGGmubSyj6C9UBa/hv/Kjpi3NT4dHWSSVeSZtbldJBrpCEZsDy3fHcuKZD/plgqbmA4ovgQ/4CHwAk4cdm8/2BTrjvWx0RqfEsp8DQuYh0zGaCZidQh3zUaS6hlJ9AqGOcT+prmEyzXN7v4y3LgTtbsk9G7Q7RbcvtDMyA7Qz5hHtbrmPaVjBz/vl2Dr1hlHIvytbRnNDgiK7tkvW8X71szp+dwzzQ0Pn4qeqwIeeGsuRa1QFRvWEUU5ck7Z0aeHY9WXuOZzCO6TgrjTPy3Fr5ajkJanc1LN/DOHOUa0OhasSQECYpDNnsJbKUdckHpPAqVRdJAJRvXKU4XLl6ESdsUNBDsHK0bEwttxtQFyAntIzFXI8DOL0TITccncBQcrOR8gFghyfSsgtdxNQiynHRURYySmcqGgUqr9jB+eape/XkGuZxCf+lrsFCFKWsSrTN5zVfsmA62cd+oTYcjcAca4cdtxcJByrlyj7AJxqq4ofScItd/MPpOxcJFwgwLFaSfxYEs5PeuRaYsIqSEyYTxETlufEhMWYMWFxy0t0zksoILL85CXyOq9RExPZJv5hmYmsd5Y/GPZLTVw7DJ2icDAMnaIwGAydsjA77HKYMATf9gBD0g7Da92kxrJqkHE2qYkOYXpj9G7tnz/WyXvK+p/abG7zfs7r5Pg6IwCKEANLfCgJR6mgyKzqjlY0nlJEwNp7OvQ4bzhQ1yq9QezVFBkXa7Myn76bPx73h4U/XrCcU0GD+PfbJnvj7vVA0o/mAxRv34s3zYX7nLTFxYPD8eNH+dJxts/v+118v8/vYpe98fddvHpZvTyaAT+/x8/bdfJa/1B2xTz48XZrl0uPBbBtOHVfRe4uMU8Vfz98wOIxfttvXlPo2pfr1aPl+XsDwMQ84CfL7qv7eP0xfeN59fBgv/wpFclmGv7pA/+zWapUFWZ6sZBtqKtUOcEF7wCWGbpEXcgQ7WCT82RMU5Q8x1sDtlLsWFztHr//xRrb5j7MRKj0p1Gk/2FfHNDWgsYj8b/ukrtHs7bbGzJyZEjQcYPgBvUjGpBBvSCjQ/z6pn1Oah9V2/mlI3Agelf9o2Hcuj6UTw3UJai8Xq+2r0m7cRe/bpN7u7A/Vu+WNU4zU937OIt3tAS8Q+q8wxqoz7ywTlNgdAKhim/CtAQIDgBRL+2gwWRplzDm7HnKuDoTMlXG0FMzFbkxVRkRE3KV7BIRmz9XEehejspVTeGcCbiK3riqjIgpuapLcej8uYqyKbnKGcUZj6vYjavKiJiSq7pUI86fq5iYkquaIhITcBW/cVUZEVNyVYdYxS1GdTJGpVUeSMqoJwFZOseowEZpCjneY4RKdqg7u1H9JNUxk97Ijpkake5NUZRKKqxXYiv77us2fmlUH52UQS07918vqx+b3fPaEvSbTYsdU2QrO0cSG5SYqd6+O3Id+bXjjbXk0RapdTQ7gboyB6lQWicb+CZ/hskfDAMeg6WP7S9WlWQh5Y/qEG+60f003W3XU196h5ySACHo3hQju9G9M90pdBoHU92MNCa/d4mEmWX/lr58TR6fzZJ/Li61QqCh+2ZK4pOkbwBa884HADEiKxBTkZCXAB7CqwRnaLCyENUGPQzC0FM5Z/2WpeemUep0eHFGhvA/DmgwZh9BH421FxuI1crD4hf79uZtv307lLIdysicVWQ3Q7mPoayyHFm+T2nM4jI1dAv2YcOWW3BWRF5vKfq4ix9WZg3B+psru9//Y0eNbBwrvfBPO2eEKM0u/PmeTnx89bv86muyW5mVsggp3VuH2vqj9EkvZUG12chhAe0tMlAMS1DuSKE89ySF8+ZzefWv5/0Y6nQ0ekZC+Osu2e4294nhIBuGuInRQWKUZwm9HEz1Jh0kO9q4zH3EsYXzhBg1L3cbS9QCmLt4+/S3zUNiP/H/</diagram></mxfile>
2103.02062/main_diagram/main_diagram.pdf ADDED
Binary file (45.2 kB). View file
 
2103.02062/paper_text/intro_method.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Large-scale time series forecasting is prevalent in many real-world applications, such as traffic flow prediction (Vlahogianni et al., 2014), stock price monitoring (Box et al., 2011), weather forecasting (Xu et al., 2019), etc. Traditional forecasting models such as SSM (Durbin & Koopman, 2012), ARIMA (Zhang, 2003), ETS (De Livera et al., 2011) and Gaussian Processes (Brahim-Belhouari & Bermak,
4
+
5
+ This work was done during Yucheng Lu's internship at Amazon. Lifan Chen participated in this work while he was with Amazon. <sup>1</sup>Department of Computer Science, Cornell University, Ithaca, NY, USA. <sup>2</sup>Amazon Web Services (AWS) AI Labs, Palo Alto, CA, USA. <sup>3</sup>Amazon Research, New York, NY, USA. <sup>4</sup>University of Pennsylvania, Philadelphia, PA, USA. Correspondence to: Yucheng Lu <yl2967@cornell.edu>.
6
+
7
+ Proceedings of the 38<sup>th</sup> International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
8
+
9
+ 2004) are the folklore methods for modeling the dynamics of a single time series. Recently, deep forecasting models (Faloutsos et al., 2019) that leverage deep learning techniques have been proven to be particularly well-suited at modeling over an entire collection of time series (Rangapuram et al., 2018; Wang et al., 2019; Salinas et al., 2020). In such setting, multiple time series are jointly learned, which enables forecasting over a large scope.
10
+
11
+ In practice, a time series dataset can be heterogeneous with respect to a single forecasting model (Lee et al., 2018). The heterogeneity here specifically indicates the underlying distribution of interests may vary across different time series instances due to local effects (Wang et al., 2019; Sen et al., 2019); or is correlated to time in each time series individually – a phenomenon we refer to as concept drift (Gama et al., 2014). In light of this, a seemingly plausible solution is to maintain multiple forecasters. However, in most applications training a single model is inevitable since deploying multiple models incurs storage overhead and sometimes generalizes worse (Montero-Manso & Hyndman, 2020; Oreshkin et al., 2019; Gasthaus et al., 2019). As a first investigation in this paper, we provably show the time series heterogeneity can induce arbitrarily large gradient estimation variance in many optimizers, including SGD (Bottou, 2010), Adam (Kingma & Ba, 2014), AdaGrad (Ward et al., 2019), etc.
12
+
13
+ Extensive study has been conducted on reducing gradient estimation variance in stochastic optimization such as using mini-batching (Gower et al., 2019), control variate (Johnson & Zhang, 2013) and importance sampling (Csiba & Richtárik, 2018). These methods are mostly motivated by optimization theory and do not consider time series heterogeneity at a finer-grained level. In this paper, we take a different perspective: observing that the distribution of interests in time series is usually recurring over time horizon or is correlated over instances (Liao, 2005; Aghabozorgi et al., 2015; Maharaj et al., 2019), we argue gradient variance induced by time series heterogeneity can be mitigated via stratification. Specifically, the intuition is that if we can somehow stratify the time series into multiple strata where each stratum contains homogeneous series, then the variance on the gradient estimation can be provably reduced via
14
+
15
+ weighted sampling over all the strata. Our paper concludes with a specific algorithm named SCott (Stochastic Stratified Control Variate Gradient Descent), an SGD-style optimizer that utilizes this stratified sampling strategy with control variate to balance variance-complexity trade off.
16
+
17
+ Our contributions can be summarized as follows:
18
+
19
+ - 1. We show in theory that even on a simple AutoRegressive (AR) forecasting model, the gradient estimation variance can be arbitrarily large and slows down training.
20
+ - We conduct a comprehensive study on temporal time series, and show how stratification over timestamps allows us to obtain homogeneous strata with negligible computation overhead.
21
+ - We propose a variance-reduced optimizer SCott based on stratified sampling, and prove its convergence on smooth non-convex objectives.
22
+ - 4. We empirically evaluate SCott on both synthetic and real-world forecasting tasks. We show SCott is able to speed up SGD, Adam and Adagrad without compromising the generalization of forecasting models.
23
+
24
+ **Notations.** Throughout this paper, we use $y_j$ to denote the j-th coordinate of a vector y. We use $y_{i,a:b}$ to denote $[y_{i,a}, y_{i,a+1}, \cdots, y_{i,b-1}, y_{i,b}]$ . For two variables $g_1$ and $g_2, g_1 = \Omega(g_2)$ means there exists a numerical constant c such that $g_1 \geq cg_2$ . We use $|\mathcal{S}|$ to denote the cardinality of a set $\mathcal{S}$ . We use $\mathbb{E}[X]$ and $\mathrm{Var}[X]$ to denote the expectation and variance of a random variable X, given their existence.
25
+
26
+ # Method
27
+
28
+ In this section we introduce the formulation of training forecasting models and stochastic gradient optimizers.
29
+
30
+ **Problem Statement.** As in other machine learning tasks, training forecasting models is often formulated into the Empirical Risk Minimization (ERM) framework. Given N different time series: $\{z_i\}_{i=1}^N$ where $z_{i,t}$ denotes the value of i-th time series at time t, let $x_{i,t}$ denote the (potentially) available features of i-th time series at time t, we aim to train a forecasting model F with parameters $\theta$ (Table 1). The training is then formulated by connecting F with a loss function $\mathcal L$ to be minimized. For instance, given the notation in Table 1, a deterministic model over loss function $\mathcal L$ at prediction time $t_0$ can be expressed as
31
+
32
+ $$f_{i,t_0}(\boldsymbol{\theta}) = \mathcal{L}(\boldsymbol{z}_{i,t_0+1:t_0+\tau_n}, \hat{\boldsymbol{z}}_{i,t_0+1:t_0+\tau_n}),$$
33
+
34
+ where $\hat{\boldsymbol{z}}_{i,t_0+1:t_0+\tau_p} = F(\boldsymbol{z}_{i,t_0-\tau_c+1:t_0},\boldsymbol{x}_{i,1:T_i};\boldsymbol{\theta})$ and $f_{i,t_0}(\boldsymbol{\theta}): \mathbb{R}^d \to \mathbb{R}$ is the loss incurred on the *i*-th time series at time $t_0$ . Popular options for loss functions $\mathcal{L}$ include Mean Square Error (MSE) Loss, Quantile Loss, Negative Log Likelihood, KL Divergence, etc (Gneiting & Katzfuss, 2014). The training is then formulated as an optimization
35
+
36
+ Table 1. Quantity of interests to approximate in different forecasting types. Inside the table, F denotes the model where it takes context and features as input, and then make predictions via model parameters θ. τ<sup>c</sup> and τ<sup>p</sup> denote the context length and prediction length, t<sup>0</sup> is referred to as prediction time by convention.
37
+
38
+ | Forecasting Model Type | Mappings/Distributions to Approximate |
39
+ |------------------------|------------------------------------------------------------------------------------------------|
40
+ | Deterministic | zˆi,t0+1:t0+τp<br>= F(zi,t0−τc+1:t0<br>, xi,1:Ti<br>; θ) |
41
+ | Probabilistic | Pˆ(zi,t0+1:t0+τp<br> zi,t0−τc+1:t0<br>, xi,1:Ti<br>; θ) = F(zi,t0−τc+1:t0<br>, xi,1:Ti<br>; θ) |
42
+
43
+ problem as
44
+
45
+ $$\hat{\boldsymbol{\theta}} = \arg\min_{\boldsymbol{\theta} \in \mathbb{R}^d} \left[ f(\boldsymbol{\theta}) = \frac{1}{|\mathcal{D}|} \sum_{i=1}^N \sum_{t=1}^{T_i} f_{i,t}(\boldsymbol{\theta}) \right], \quad (1)$$
46
+
47
+ where T<sup>i</sup> denotes the maximum prediction time in the ith time series. We denote D as the set of all the training examples indexed by (i, t), ∀1 ≤ i ≤ N, ∀1 ≤ t ≤ T<sup>i</sup> . A time series segment with certain length is then used as a single training example1 .
48
+
49
+ Stochastic Gradient Optimizers. A stochastic gradient optimizer refers to an iterative algorithm that repeatedly updates model parameters with a uniformly sampled minibatch of training examples. Concretely, this stochastic gradient with mini-batch size M is computed as follows:
50
+
51
+ $$\nabla f_{\xi}(\boldsymbol{\theta}) = \frac{1}{M} \sum_{(i,t)\in\xi} \nabla f_{i,t}(\boldsymbol{\theta}), \qquad (2)$$
52
+
53
+ where |ξ| = M and each element in ξ is in the form of a tuple (i, t) where i ∼ Uniform[1, N] and t ∼ Uniform[1, T<sup>i</sup> ]. Popular optimizers includes SGD, Adam, Adagrad, etc.
2103.15812/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-10-05T21:58:30.615Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/15.4.0 Chrome/91.0.4472.164 Electron/13.5.0 Safari/537.36" etag="7uGwVljd-PQt-CEcPAEW" version="15.4.0" type="device"><diagram id="WlfBNp3svzzV1grW7QyO" name="Page-1">7Z1td9y2ubV/jT9KiyAIAvxoJ3HakzbNS58m7ZcuWRrbamzJleXE6a9/Ri8caShSAiWS996TnXPWaizLExm8BrP3BYB85r94//nrs4MPb/96erR696wsjj4/818+K8vSF/X6fy6+8vvVV1wZyquvvDk7Prr+2s0Xfjz+32r7i5+Oj1Yft750fnr67vz4w/YXD09PTlaH51tfOzg7O/1t+9ten77b/o9+OHhz/R8sbr7w4+HBu9Wdb/vp+Oj87dVXU7j13X9aHb952/6XXXH9O+8P2m++fomPbw+OTn+79d/yXz3zX5ydnp5f/dv7z1+s3l0MXjssVy/0cuB3Nz/Y2erkPOcP/N+Pn8++/u/3b//834/l0fPvjj797V8f965f5deDd5+u/8LXP+z57+0InJ1+OjlaXbxI8cy/+O3t8fnqxw8Hhxe/+9v6mq+/9vb8/bv1r9z6X48OPr69/N6LX7w+fvfui9N3p2eXL+RfFSu/qtdfP13/8ePzCyDCxUt+PD87/WXVfuPJ6cn6pV/c/du1P+rq7Hz1+daXrv+2X69O36/Oz35ff8v17+418fpaXLO3F+LVr3+7uZKhDldfe3vrKqYNQdf0vNm8+M0Ar//leoz7x/vvvzVfvvjhm7eHP/nf985//v31X775156LCw74UVilo2rOEfZFuTXCVSrvjLCr2rfW7SGuqmamIS7TmCF2I4d4ilEr0zaXVVPcGbVYxbuDVsY4F5fNw4N2fnZ8cPLm4lejRuzs9Pzg/Pj0ZP3LMqV7KN3m/uj4bD2fX/251cHH8xkxDj7s+2brkviq3q/CnatSVj2zRVn5/crPdWFG0Yw+YdQhPTgjV7W7O8ZNneaaLh4e3/Un94eLfz08ODt6eIBfvzv+8Kdb//6PgU9Dd3T0+vLj7yryNEX34sw2a1fFw5+LrgjV3cuwfqc8/TJ8Ln4tfvj5b39rflh9KL4q/9T89LejvaqsxlyJT68ypqFXV++Lv7zafOHg8Jc3l++Wv306f3d8MaDX74mzX/62CSXFfrH9tfVYfrlX7IdbF7ToXOjri+guvv5hdXa8HpTVxVW7mMMuJ83vbr64JuHzl+sLff0y62/5dPbx+NfVD6v2Ve7S8jpc/N8dKNa/U1/+c/EnTk/Ob3396p9piHHF9tt2/R7dL+/Ojr7nc7792hzAeAHDAkwJAUwlYFiAKSCACQKGBBiXIICpBQwLMDUEMBk+SMBgAIMRejN8gIDBAAYj9GaYPQGDAQxE6G2XtgQMPDAQmTdkrCGKFwheICJvu/4vXuB5gUi8QZqXhReIwBtkeVl4gcBFjpcFlz3n96M9MHK8NMB4CGDkeGmACRDAyPGiAtN0Am+AiDBSvCy8eAReahleFl4cBC8yvCS8uAaCFxleFl4iBC8yvCy8QOTdWoaXhReMvCvFy8ILRt6V4SXhBSPuyu+S4IKRdmV3SXDBCLuSuyS4QGTdKLdLggtE1I1SuyS47CGsTEeZXRZcSgheZHZZeKkgeJHZReUldbd6Q8QXmV0WXiDOkkSZXRZeIA4HRKldFl4gzk5HuV0SXjDuFxQld1l4gci7SXaXhReIvJukd1l4gci77e3bxQs8LxB5N8nvkvACEXeT9C4JLhhpV3aXBBeMsCu5S4ILRtaV2yXBBYIWmV0SWjDuEpRkdll4gbhJUCOzi8pL7IQXiFMkjcwuCy8Qx0gamV0WXiDOkTQyuyy8QBwkaaR2SXjBuEdQI7fLwgtG3pXcZeEFI+/K7rLwgpF35XdZeMHIu/K7JLwgxF1fSO+S4IKQdn0hu0uCC0LY9YXkLgkuCFnXF3K7JLggRF1fSO2S4IJwjyBfyOyy4IJwjyBfyOyi8lJ3VhoRDpH4QmaXhReEUyS+kNll4QXhGIkvZHaHeamrWKfYx4t3lQ/VXV5eX/4zDy8IJwO8k9plmV8Qzkx7J7dLwgvEPYK8k9xl4QUi7zrZXRZeIPKuk95l4QUj78rvsvCCkXfld0l4wYi70rskuGCkXdldElwwwq7kLgkuEFm3lNslwQWCFpldElog7hHkWzzECxwvofNhBHGIpJTZZeEF4hRJKbOLupOhywvEMZJSZpeFF4hzJKXMLgsvEAdJSqldkvwCcY8gX8rtsvCCkXcld1l4gci7XnaXhReIvOvld1l4gci7Xn6XhBeIuOuld0lwgUi7XnaXBBeMsCu5S4ILRtaV2yXBBSPqSu2S4AJxjyAvs4uKS9X5MII4Q+Jldkl4wThEUsnssvACcYqkktlF3cnQ5QXiGEkls8syv0CcDKikdll4gci7ldwuCS8Y9wiqJHdZeMHIu7K7LLxg5F3pXRZeMPKu/C4LLxh5V36XhBeIuBukd0lwgUi7QXaXBBeIsBskd0lwgci6QW6XBBcIWmR2UWnxnZ0vEBvrgswuCS8Yh0iCzC4LLxCnSILMLgsvEMdIgswuCy8Q50iCzC4LLxB5t5baJeEF4x5BtdwuCy8QebeW3GXhBSLv1rK7LLxA5N1afpeFF4y8K79LwgtG3JXeJcEFI+3K7pLgghF2JXdJcMHIunK7JLggRN2q0eyCikvZmV0QtkpVjaYXFl4QDpGEQktHJLxAnCIJhZaOWHhBOEYSCi0dsfCCcI4kFFo6YuEFIe+GQktHLLxg5F0tHZHwAnGToFBo7YiFF4y8q8UjFl4w8q78LgsvGHlXfpeFF4i86+R3SXiBiLtOepcEF4i062R3SXCBCLtOcpcEF4is6+R2UXFxnb0MCFvrgpPbZeEFYW9dcHK7JLxAnCMJTm6XhReEgyTBye2y8IJwkiQ4uV0WXiDybim3y8ILRN4tJXdJeIG4TVBo8RAv8LxA5N1SepeFF4i8W8rvsvCCkXfld1l4wci78rskvGDEXeldElww0q7sLgkuGGFXcpcEF4is6+V2UXHp0AKxs85L7ZLgArGzzsvskuACcYjES+xy4IJxiMTL65LgAnGIxEvrkuCCEXVldUlwwYi6srokuGBEXVldDlwwbg/kZXVJcIGIupWsLgkuEFG3ktUlwQUi6layuiS4QETdSlaXAxeIpFtJ6nLQghF05XQ5aMHIuVK6qLTslVu4eIi9dJWcLgsvEJvpKkldFl4gDo5UsrokvGCcHAnSuiy8QBwdCfK6LLxA5N0gscvCC0TeDTK7LLxA5N0gt0vCC8aNgYLsLgsvGHlXfpeFF4y8K7/LwgtG3pXfZeEFI+/K75LwAhF3a+ldElwg0m4tu0uCC0TYrSV3YXFx5TYwZXD79sDI7vIA4xGAkd7lAcYhACO/SwOMaxCAkeDlASYiACPDywMMROiV4uUBBiL0yvHyAIMQeqMsLw0wCJk3SvPS8IIQeaM8Lw0vCIk3SvPS8IIQeKMsLw0vEHlXkpeGl70EwIscLw8vJQIwcrw8wFQIwMjx4gLjt4Epa4QII8fLA0wFAEyS4+UBpkQARpKXB5gCARhZXhpgXEIARpqXBxiE0JvkeXmAgQi9Er08wECEXpleHmAgQq9M7zAwdRXr9QD1AONd5UN1F5jXl//MBAxE5pXopZlgICKvPC8NLwiJt5HmpeEFIfA2srw0vCDgIsdLg8ueA9jY0Mjx8gDjEYCR48UFptoGBuJwSSPHywMMwumSRo6XBxiE4yWNHC8PMAjnSxpJXhpgIG4i1Mjy8gADEHrrQpqXBxiA0FsX8rw8wACE3rqQ6cXd2NAFBiD01oVMLw0wAJm3LiR6aXiBiLzyvDQJBiLxSvPS8AIReGV5aXiByLuSvDS8ANxEqC7keHl4AbiJUO3keHGBCdvAIJwtqZ0cLw8wAIdLaifHywMMwOmS2snx8gADcF6gdpK8PMAAnKeunSwvDTAINxGqnTQvDzAQoVeelwcYiNAr0csDDETolenF3dnQBQYh9JYyvTQzDELmLSV6aXhBiLwtHuIFnxeExFtK89LwghB4S1leGl4QcJHjpcEF4SZCdSnHiwtM3fk8QjhbUsrx8gCDcLiklOPlAQbhdEkpx8sDDMLxEi/HywMMwvkSL8lLAwzCTYTq9m6eAoYAGITQ66V5eYBBCL1enpcHGIjQK9PLAwxE6JXppQEGIvNK9NLwAhF55XlpeIFIvNK8NLwgBN5KlpeGF4S8W0ny0vCCcBOh9j414gWQl9j5PEI4WlLJ8dIAA3G2pJLj5QEG4XBJJcfLAwzC6ZJKjpcHGITzApUkLw8wEKFXlpcGGIibCFXSvDzAIITeIM/LAwxC6A0SvTzAIITe9hymgCEABiH0BpleGmAQMm+Q6KXhBSLyyvPS8AKReKV5aXiBCLyyvDS8IOAix4uLS9rGxSNstQtyvDTAQJwtqeV4eYBBOFxSy/HyAINwuqTdYCxgCIBBOF5Sy/HyAIMQemtJXhpgIG4iVMvy8gADEXqleXmAgQi98rw8wECEXpleHmAgQq9MLw0wCJk3SvTS8IIQeaM8Lw0vCIm3XTwXL/i8IATeKMtLwwtC3o2SvLi8NJ35BWHvVJTk5QEG4WxJlOSlAQbicEmU5OUBBuF0SZTk5QEG4XhJlOTlAQYh9CZZXh5gEEJvkualAQbiLkKtGhIwBMAghN4k0csDDELoTTK9PMBAhF6ZXh5gIEKvTC8NMBCZV6KXhheIyCvPS8MLROKV5qXhBSHwNrK8sLyUxTYvHmGzXSPLywMMwm67dtITMPjAQBwvaWR5eYBBOF/SyPLyAINwwKSR5eUBBiL0yvLyAAMReqV5aYCBuI9QI8/LAwxE6JXo5QEGIPTGQqaXBxiA0BsLmV4eYABCb2x/JgGDDwxA5o2FRC8NLwCRNxbyvDS8QCReaV4aXiACrywvLi+uwwvAZrtYyPLyAAOw2y4Wsrw8wAAcL4mFLC8NMAjnS6KT5eUBBuCASXSyvDzAIITe9mcSMATAIIReJ83LAwxC6HXyvDTAINxIKDqJXh5gIEKvTC8PMBChV6aXBxiI0CvTywMMROiV6aUBBiHzlhK9NLwgRN5SnpeGF4TE2+IhXgB5Kbd58Qi77UppXh5gELbbldK8PMAgnC8ppXlpgIE4YFJK8/IAg3DCpJTm5QEGIvRK8/IAAxF6pXl5gEEIvV6elwYYhDsJRS/RywMMQuj1Mr08wCCEXi/TywMMQuj1Mr08wECEXpleGmAgMq9ELw0vEJFXnpeGF4jEK82Ly4vf5sUj7Lbz0rw8wCBst6ukeXmAQThgUknz8gCDcMCkkualAQbihEklzcsDDELoraR5eYCBCL3SvDzAQIReeV4eYCBCr0QvDTAQtxKqZHp5gIEIvTK9PMAghN4g08sDDELoDTK9PMAghN4g00sDDELmDRK9NLwgRN6Q4XlXJ0fPz85Of1v/6uT04nKvL+vHt6uj60G+xcrF1787OF9foJPLr5SF34z2T9d/x/K+0VwdvVndO5a3Rir0jFT7tbPVu4PzNQ1bL943fNf/he9Oj9c/yc2litXWlSqL1F6q9kU+nn46O1xd/7mby3D3pdqbew2/1PnB2ZvV+Z2Xuryim7/64y+yczkP27t4R3/Ip7wsDk9PTi7el6/aVyjupT/47SF11V32neu5pC7MRb8r6oz8PjAwDw314wdsk1eLvAGabXZY/2VUiBf6NHkI0Ic/Zaw+Q1yhFsxDyef2U8g2eKyhURNmg6ayh0ZteBiao4NVen3YC81hWr16PS8ctT0cqr6ocCRzOOqMj5tRSX94FEYn/T2ApF9rZjX+OM73SYbvIk2xNJTAJP1aW0nZoLFP+rW2ky4U5kbDYZ/0a20dRYUDIOlnbBO1cvo9o7N40NemSONP4zhAE1TQ18IPDSUwQT9qHYgNGvugH7UOtFCWGw2HfdCPspKocNgH/ZghI0cq/aFRoFT6Ud7N9uN4Ayhy0o8SbTSU4CR9CTg2aACSvs5vLxPmxsMBkPSlJVHhAEj6GTbSSun3zKtLB/32Aum9Y/Vp3AzQhBT0k0QbDSUwQT9JwLFBYx/0k3YJLpTlRsNhH/STrCQqHPZBP2XIyHFKf3AUKJV+knez/TjeAAqd9CXaaCjBSfoScGzQACR9bRNcJsyNh8M+6TfSkqhw2Cf9ZurzuFMofcNk30i02X78NvlHXQzfNTJrNJTAJPtGxo0NGvtk32hbIBs0AIlfenKZxD8eDoDEP/W53OFRoHT7jQSc8YzLcDC3kXGjoQSlAbhCJo4NGvMG4ArtF2SDxrwBuEIac6EGwHdg1xUZ9XDxxxAUmY8hmHNctF/DdKa9ARM4+btC/ZCGEpzkr7rIBo198neqi2zQ2Cd/p7q4SPJ/BBz2yd9l1MKRu32GRoHR/TunfR22M+4GUOQG4LSRg4YSmAbgtJGDDRqABqCNHGzQADQAacxlGsB4OAAaQIa9XNr91/bq38nPGU+0aYAupODfpkhRgk8JTPAvJeTYoLEP/i0jgoYGGvvgX8peLhT8R8NhH/zLDGk5Tv0PjgKl+i/l52xn3HLo3DhUA5CQo6EEpwFIyLFBA9AAtM1wmTA3Hg6ApC9diQqHfdJvXxpJ8Zf2it/Lw9l+Cm+4RA74XuKNhhKYgO8l3tigsQ/4XrsI2aCxD/5elnKZ4D8eDoDg//hzvWNHgVLxe3k44xmX4Xyvl3ijoQSnAUjIsUFj3wDan0DQ0EBj3wAqacyFGgDh+d5KImqhGWX0jV+7KyJ77u6SyNK0yEDB0hLwaJF6gqUl4tGijXKwtDR4tMjMTRxqxz+1wOFRIakCO4d006w3pyXIpsDS0k2zALRIo8DS0k2zALSUogWVlm6aBaBFFg4uzQJQ8fgDmNNtyy47d10PmfuyN+lqjoHJEEvjtq2Epz+Cth0ggG0rQSrFdpkw5D/X2XB6kUKhoQRm20rQXic2aOy3rQRpOTZo7Let1LJzy2xbGQ+H/baVOkPGjWwAQ6NAuXG9ln6ynXHrfAFh+C6SdqKhBKYB1NoDxgaNfQOotRWMDRqABiCNuUwDGA8HQAPIsJdWawA9o7N4AZCnM55w8x/zbPgmkpijoQSmAESJOTZo7AtA1O45NmjsC0CUxVyoAIyGw74AxAx5Oa4ADI4C5RJAlKeznXE3gCI3gCgxR0MJTgOQmGODBqABaLshGzQADUAac5kGMB4OgAaQYS+tlgB65tulC0B7gfTesZpwmwGakApAkpijoQSmACSJOTZo7AtA0m5DNmjsC0CSxVyoAIyGw74ApMnPAQ+OAuUSQJKns51xE8M54CQxR0MJTgOQmGODBqABaLshGzT2DaCRxlymAYyHw74BNNOfA376EoBh4m8k5mxn2Cb/KI3hu0YmjoYSmMTfyMSxQWOf+BttL2SDBiDxS1suk/jHwwGQ+Cc/9zs4CpTOv5GYM55xGQ7+NjJxNJSgNICykIljg8a8AZSF9heyQWPeAMpCGnOhBsB38LcsHn/wd7bHIuxlPhWhcn6+cXn84yIeGuldeCpCWchFmX4S3QAK3IzKQvKJhhKcZqQtdGzQADQjmTo2aACakcTdIs3oEXDYN6M2UE/YACY7EA2wNrL+7+nNYzrjbgBFbgBO8omGEpgG4DKklKCBgsa+AThtqWODxr4BOGnMW9AMJ/0OTs/rVLxI88IB0AAy7KXVGoD9UxFKJ09nPOHWAzRBFQCJORpKcAqAxBwbNPYFoMVW0NBAY18ASlnMZZYAxsNhXwBaFqYrAIOjQLkEUMrT2c64G0CRG0ApMUdDCUwDKCXm2KABaADabsgGDUADkMZcpgGMhwOgAWTYS6slAPunIpSlPJ3xhJsGaEIqAO1LixJ8SmAKgJeYY4PGvgD4DGEnaKCgsS8AXhZzoQIwGg77AuAnPwc8OAqUSwBens52xvUM54C9xBwNJTgNQGKODRqABqDthmzQADQAacxlGsB4OOwbQDunQS0BWN4aqZKZs51iN0QiR/5KKo6GEpjIX0nFsUFjH/kr7S9kg8Y+8lfylstE/vFwAET+yQ/+Do4CpfSvZOaMZ1yGk7+VVBwNJTgNQCqODRr7BhC0wZANGvsGEKQxF2oAhCd/Q4a9XLwBXP+pvTbtt48DiJmNwIUZB+zxz5F46BLsxPMSgiTVMrNNGFpcQ6pGQfYJjgaYChS0WQ4VDoCqI/VmW3XGQwNQdWTiUGcUgKqTYdxGJvfJjjgjLHa0b1+9eaxm3GYAJ6REX8sm0VACk/TrDMskaKCgsW8AtfbIsUFj3wBq6ceFGsBoOOwbQJ1hI63cPcBzDmr5ONsJd8MndAGQmKOhBKcASMyxQQNQALRFjg0a+wIQZTGXKQDj4bAvADFDXo4rAIOjQLkEEOXpbGfcDaDIDSBKzNFQAtMAosQcGzT2DSBq+yAbNAANQBpzmQYwHg6ABpBhL62WAACecxDl6Ywn3DhAE1QBkJijoQSmALRzv6Chgca+ACTtNmSDxr4AJFnMhQrAaDjsC0Ca/Pzu4ChQLgEkeTrbGXcDKHIDSBJzNJTgNACJOTZoABqAthuyQQPQAKQxl2kA4+EAaADTnwN++hKAYeJvJOaMZ1iGc7+NTBwNJTCJv5GJY4PGPvE32l7IBo194m+kLRdK/ITnfpvJz/0OjgKl828k5mxn3Ibh4G8jE0dDCU4DkIljgwagAWh/IRs05g3AF9KYyzSA8XCYNwBfTH7wd8bHHGQWgqp9vPIc45Uh7kaN180V2IWnHPhCjsr0E+oGUODG5AtJKRpKUBqTL7S1jg0a88bkCxk8NmgAGpOE3i1oxsPRwex5nYoXaV5oAJpUhtAb2QyGRoFxLcUXclfGM3EawAmpGbRQihJ8SmCagdMWPDZo7JuByzB5ggYKGvtm4KQ3p2wGL8q6fvlyXmjsm4HLsJ1Wawb2T1fwTl7PdiLe8AldDCTyaCjBKQYSeWzQABQD7cxjgwagGMhu2i4ZjIfGvhi0H5DTFYPBUaBcMijl9Wxn4g2gyM2ghUKU4FMC0wxKiTw2aOybQalti2zQ2DeDUnpzkeMXj4ADoAFkWE2rpQH7py74Uv7OeMKtB2iCKgASdjSU4BQACTs2aOwLQPvSgoYGGvsC4GUxFyoAo+GwLwA+Q16OKwCDo0C5BODl6Wxn3A2gyA3AS8zRUALTALzEHBs0AA1A2xDZoAFoANKYyzSA8XAANIDpzw0/fQnA8hZLXmbOeIplOCi8uc2YKIGnBCbyV1JxbNDYR/4qQ9EJGiho7CN/JW+5UOQnPBBcTX4geHAU5pP+VTlfBaik5myn3IrhSHAlF0dDCU4FkItjgwagAmiHIRs0ABVAHnOZCjAeDvsKECY/+js4Ck9/7kKTWQlcmHHApn5Qxc0l2IkHL2weta3ZZt7ZJgytriFVoyD9BEcDTAUK2i2HCod91QlSb7ZVZzw09lUnyMShzigAVSfDuI1M7pOdcUY44hAkl4xn3DiAE1Sil02ioQQm6bfJQNDQQGPfAGptkmODxr4B1NKPCzWA0XDYN4A6w0ZauXuAByDU8nG2E+6GT+QCUEvM0VCCUwAk5tigASgA2iLHBg1AAZDFXKYAjIcDoABkyMtxBWBwFCiXAKI8nfGM2wzghNQAosQcDSUwDSBKzLFBY98AorYPskFj3wCiNOa9j0Brk/5QN5gRDvsGEDPspdUSAMCDDqI8ne2Eu+ETugBIzNFQglMAJObYoAEoANptyAaNfQFoU6agmXkJYDwc9gUgTX5+d3AUKJcAkjyd7Yyb8o/WGL6LJOZoKIFpAElijg0a+waQtN2QDRqABiCNuUwDGA8HQAOY/hzw05cALBO/xJzxDMtw7jfJxNFQApP42xu2CRoaaOwTf6PthWzQ2Cf+RtpyocRPeO63mfzc7+AoUDr/RmLOdsbdAIrcABqZOBpKcBqATBwbNAANQPsL2aABaADSmMs0gPFwADSAyQ/+zveYA59bCNoLMf14VUWGuBs3XpsrsAtPOagKOSrTT6gbQIEbU1VIStFQgtKYNk+0FzQ00Jg3pqqQwWODxrwxVYWE3iKN6RFwmDemqsgQd6MawPAoMK6ZVIUclfGMWw/gBNUAJKVoKMFpANpqxwaNfQNoP/4EDQ009g3ASWPee6ukh+DoYPa8TsWLNC809s3AZVhNq7UB+6coVE7+znYi3vCJXAychB0NJTDFwEnYsUEDUAy0A48NGoBiILu5zNLAeDgACkCG1By3NDA4CpRLA07+znjGTQM4ITWAFlRRgk8JTAMoJezYoLFvAC0jgoYGGvsGUEpjLtQARsNh3wDKDHtptQRg/xSFqpSns51wy6HDOVAFQGKOhhKcAiAxxwYNQAHQLkQ2aAAKgCzmMgVgPBz2BaB96emWAAZHgXIJwMvT2c64G0CRG4CXmKOhBKYBeIk5NmjsG4DXdkM2aOwbgJfGXKYBjIcDoAFMfT54iiUAy1smeZk54ymW4UCwl4qjoQQn8kvFsUFjH/nbn0DQ0EBjH/krecuFIj/hwd8qQ1eOk/6Do0Ap/SuZOdsZdwMocgOopOJoKIFpAJVUHBs0AA1AGwzZoAFoANKYyzSA8XAANICpH6MwRQMYeIyCyy0Eab41gc2DoScbr+rpiyTt+PTMNb0D5MJ8QIUM3zBugCZ4FjfQcyZCRuX+w87GS3yEbwBFrpRB4mGZz+wRNMBUxyDfgAqHfUUM8gqocNhXwSB/gAqHfRUMGZ5gZHIfGgXKxaCgjT7GyT3/QfWG7yLt7Flois2nASa5twlAcMDBYZ/ca+3UQYXDPrnX0oaocNgn9zrDFlo5d4DnN9TyZbbBfcMncnCvJc5oKMEJ9BJqbNAABH1tyGGDBqAAyD4uUwDGwwFQACbfxTU4CpTqPsq7Gc+4Q5vckBpAlICjoQSmAUSJOTZo7BtA1PY+NmjsG0CUxpzy0c4vX6aiKOaFxr4ZxAyrabU0APBchyh/ZzsRb/iELgYSdjSU4BQDCTs2aACKgXYPskFjXwzalCloZl4aGA+HfQFIk5/HHRwFyqWBJH9nO+Om/KMyhu8iCTsaSmAaQJKwY4PGvgEkbUNkgwagAUhjLtMAxsMB0ACmP9f79CUAy8QvMWc8wzKc400ycTSUwCT+RiaODRr7xN9o2yEbNPaJv5G2vHczUJvsh7rAjHDYJ/4mw1aOS/yDo0Dp/BuJOdsZdwMocgNoZOJoKMFpADJxbNAANADtL2SDBqABSGPe4/zDKh1VfdCk8pW/hGZGOAAawOQHgid4TIHhcxtCkWHmxg3IBA+3xnksQSgkoUw/gm4ABa5EoZB1oqEEpRKFQnvn2KAxr0ShkKJjg8a8EoVCxm6RbVCPgMO8EoUiw8yNagDDo8C4KBIKSSjjGTf/AfKG7yJZJxpKcBqA9tKxQWPfANqPP0FDA419A3DSmAs1gNFw2DcAl2EvrdYA7B+TEJw8ne2Eu+ETuQA4iTkaSmAKgJOYY4MGoABoKx0bNAAFQBZzmQIwHg6AApAhL8ctAQyOAuUSgJOnM55x0wBOSA2gBVWU4FMC0wBKiTk2aOwbQMuIoKGBxr4BlNKYCzWA0XDYN4Ayw15aLQHYPw4hlPJ0thPuhk/oAiAxR0MJTgGQmGODBqAAaLchGzQABUAWc5kCMB4O+wLQvvR0SwCDo0C5BODl6Wxn3A2gyA3AS8zRUALTALzEHBs09g3Aa7shGzT2DcBLYy7TAMbDAdAApj4HPMUSgOWtkbzMnPEUy3Dw10vF0VCCE/ml4tigsY/87U8gaGigsY/8lbzlQpGf8OBvlaErx0n/wVGYT/pXxXwVoJKas51yN4QiV4BKLo6GEpgKUMnFsUEDUAG0w5ANGoAKII+5TAUYDwdABZj86O/gKORXgOs/tdfG/fZ5AE1mJXBhxgGb+gkSN5dgJx6YsHkGtmYbo4+ooVU3pMoUpKUW+kzKpwGmGoUMLSU4TOCwr0BBSg4VDvuqE2TiUOGwrzohw7iNTO6TnXFGOOIQJJdsk/sGUOjkLpu0zBQ7ggac5K7NcKhwACR3bXpDhcM+udfShqhw2Cf3OsMWWjl3gAcU1PJltsF9wydycK8lzmgogQn0tYQaGzT2Qb/W1jY2aAAKgOzjLWjGw9HB7OXLVBTFvNAAFIMMGTmuGAyOAqXSr+XjjGfiOIATVDOQmKOhBKYZRAk7Nmjsm0HUtj82aOybQZTenLIZfPFFXY9sBqOhsW8GMcN2Wi0ZADzQIMrr2U7EGz6Ri0GUyKOhBKcYSOSxQQNQDLSrkA0agGIgu2m7ZDAeGoBiMPn53cFRoFwyaK+Q3lRWMzHD+d0kkUdDCUwzSBJ5bNDYN4OUIfIEDRQ09s0gSW/e2wzaUwND5wxmhMO+AaTpzwE/fWnAMvFL2NnOsInh3G+SoaOhBCfxy9CxQQOQ+LUdkQ0a+8TfSFvel/hT+cpfwdFJ/GGVjqp54bBP/M3k54cHR4HS+TcSc7YzbpN/9sbwXSQTR0MJTANoZOLYoLFvAI32HbJBA9AApDGn3A30wr98efn9M0ID0AwmP0A83+MPfG5RaDckzTFekz8uohk6XEL59ING7sr4k2toKQ6oSV0cvxIlJJSgNKm60JY7NmjMm1RdyOyxQWPepOpCom/KJvVl+Cp9OWqNZTw05k2qLjJE38hmMNmBa4A1lrqQ0zKdiW8AhW4Gklg0lOA0A23NY4MGoBloax4bNADNQHpzkXMVj4DDvgG0gRpxbcD+KQ21k7+znXA3fCIXACdhR0MJTAFwEnZs0NgXAKedeWzQ2BcAJ4u5yGPaHgEHQAHIkJejCsDwKFAuATh5OuMZtx7ACaoBSMzRUILTACTm2KCxbwAttoKGBhr7BlBKYy7UAEbDYd8AWhYQlwDsn7pQl/J0thPuhk/kAlBKzNFQAlMASok5NmgACoB2G7JBA1AAZDGXKQDj4QAoAFOfDx4eBcolgFKeznjGZTgf3L60KMGnBKYBeIk5NmjsG4DPEHaCBgoa+wbgpTEXagCE54D99OeAn74EYHjLpNrLzNlOsZ7h4K+XiqOhBCfyS8WxQQMQ+bW/kA0agMgvb7lM5B8Ph33krzJ05TjpPzgKlNK/kpmznXE3gCI3gM2Ng0UJPCUwDaCSimODxr4BVNpgyAaNfQOopDGXaQDj4QBoAJOf/J2gAQw8RsHFzEbgwowDNvVzJ24uwS48R6GuJKmMP6KGFt2gKpOsFA0lMJWpndUFzdxBZjQc9tUoSNWhwmFfgYIMHSoc9hUoZJi4kYl+sqPPCIsgQdLJNqttAEVO9EGWiYYSnESvvXPLfC6PhwMg0WuPHCocAIlemhEVDoBEn2EXrRw9wPMM2nev3jtWUa0ZoAkp0NcSajSUwAT6WqJtoY/l0XDYB/paW95Q4bAP9LUsIyoc9oG+zpCL4wL94ChQKvpaHs02q20AhU70Emo0lOAkeok2NmgAkr6297FBY98AorTkvY8ofgiODmYvX6aiKOaFxr4ZxAxLaaX6AZ5bEOXjbCfiDZ/IxSBKzNFQAlMMooQdGzT2xSBqlyAbNADFQHZzmaWB8XAAFIDJz+MOjgLl0kCUvzOecRnO40YJOxpKYBpAO/kLGhpo7BtA0rZCNmjsG0CSxlyoARCe303Tn999+hKAYeJPEnO2M+wGSOTEn2TiaCjBSfwycWzQACR+bTtkgwYg8Utb3rsZqE32Q11gRjgAEv/k53sHR4HS+TcSc8YzLsMB30YmjoYSmAbQyMSxQWPfABrtL2SDxr4BNNKY9zj/sEpHVR80qXzlL6GZEQ77BtBMfiB4xscW5BaC9v4Ic4zX5I95aIYOi1A+taCRo7L9hGqGltygGpOkFA0lOI1JW+vYoDFvTLGQwWODxrwxxUJCb5ldUuPhMG9MscgQdyMbwGQHpQHWTGIhR2U6494ACtwAYiEpRUMJSgOIhbbasUED0AC01Y4NGoAGII25SAN4BBwADSDDXlqtAdg/FSEW8nTGE24aoAmpALRQihJ8SmAKgJOYY4PGvgC4DGEnaKCgsS8AThZzoQIwGg77AuAy5OWoAjA8CpRLAE6eznbGdUN78KAagMQcDSU4DUBijg0agAag7YZs0AA0AGnMZRrAeDjsG0D7QYi4BGD/tIRYytPZTrgbPpELQAuFKMGnBKYAlBJzbNDYF4BSuw3ZoLEvAKUs5jIFYDwcAAVg6nPAw6NAuQRQytMZz7gE54BjKTFHQwlOA5CYY4PGvgG0Ly1oaKCxbwBeGnOhBkB4Dthn2MvFlwAMb40Uvcyc7RS7IRI58nupOBpKYCK/l4pjgwYg8mt/IRs0AJFf3nKZyD8eDoDIP/XB3+FRmFH6pxkrgNSc8ZTLcPS3koujoQSmAlRycWzQ2FeAKsPRCRooaOwrQCWPuVAFIDz6W01+9HdwFJ7+vIQmsxK4MOOATf2AiZtLsAsPTIiVLJXtR1Q1tOoGVZmkpWgowalM2l23TJAZDwdANZKqQ4XDvgK1eVBwwMFhX4FChokbmegnO/uMcPQhSDrZZrUNoMiJPsgy0VACk+iDNs8t87k8Hg77RB+0SQ4VDoBEL82ICgdAos+wi1aOHuCBBkEezTiqxQGaoAK9hBoNJTCBvg0Ggmbuj+XRcNgH+lpb3lDhsA/0tSwjKhz2gb7OkIvjAv3gKFAq+loezTarbQBFTvS1hBoNJTiJXqKNDRqApK/tfWzQADQAaclb0Awn/aFuMCMcAA0gw0ZaKX2ABxREeTfjCbcZoAmpAEQJOBpKYApAlJhjg8a+AETtBmSDxr4ARFnMZZYAxsNhXwDi5OduB0eBcgkgytPZzriR4dxtlJijoQSnAUjMsUED0AC0fZANGvsGkKQxl2kA4+GwbwBp+nO6T18CMEz8SWLOdoZN+UdjDN81MnE0lMAk/iQTxwaNfeJP2l7IBg1A4pe2XCbxj4cDIPFPfo53cBQonX+SmDOecRkO8iaZOBpKYBpAe6tlQUMDjX0DaLS/kA0a+wbQSGMu1AAID/42kx/8ne/xBL6vEKSe4WpvlTjHeE3+OIfNFdiJpxM0clS2n1DN0JIbUmNqJKVoKMFpTNpaxwYNQGOSwWODBqAxSegt05jGw2HemFKRIe5GNoDJDkoDrJmkQo7KdMa9ARS4AaRCUoqGEpQGkApttWODxrwBpEJb7digMW8AqZDGXKQBPAIOgAaQYS+t1gDsn36QCnk64wm3HqAJqgBIzNFQglMAJObYoLEvAO3Hn6Chgca+ADhZzIUKwGg47AuAy5CXowrA8ChQLgE4eTrbGXcDKHIDcBJzNJTANAAnMccGDUAD0HZDNmgAGoA05jINYDwcAA0gw15aLQH0Pi2h75iEm3F8JOqMZ9yhUzhIDaCNk6IEnxKYBlDKzLFBY98AWkYEDQ009g2glMZcqAGMhsO+AZRTHwQeHgXKNYBSos52xi0JDgKnUmaOhhKcBiAzxwYNQAPQfkM2aAAagDTmMg1gPBz2DaB9aag1gAcT/3z3RkpeZs52it0QiRz5vVQcDSUwkd9LxbFBYx/5vTYYskFjH/m9vOUykX88HACRf+qTv8OjMKP0jzNWAKk54ymX4eyvl4ujoQSnAsjFsUFjXwHan0DQ0EBjXwEqecyFKgDd2d+yzlkSWrwCDDwwweU2gmq+8Zp6leTWFXj6AxN65preAXJhxgGa+Knbt4Z8B54osf7baL3A8iP8FqC4lXL9Q2qBgIYSkEq5/pm1QMAGjXWlXP/MWiBYoh08Bg7r6rj+mbWfGRUO++qYszgyMunv0NO2138bLQsYfxzjP217/UNqHYCGEpikr3UAOmjsk77WAZYKc3RP1V7DIS2JCod90q8ybKSV0zd/QsR6eOTdbD+NN3wiB/1Koo2GEpygLwHHBg1A0Neu5WWy3Hg4AIK+rCQqHABBf+LdYPeMAqXSb/dN6c1j9XE8tFkOKekHiTYaSmCSfpCAY4PGPumHDDH3h4Vm0jA3Gg77pB+kJVHhsE/6IcNGWin93ic+LDw88m62n8YbPqGDvkQbDSU4QV8Cjg0agKCvXYJs0NgXgPYnEDQzF4DxcNgXgHryc7qDo0Cp+mv5ONsZt84/EmP4LpKAo6EEpgHUEnNs0Ng3gFrbB9mgAWgA0pjLNIDxcAA0gOnP7z59CcAy8UvMGc+wDOd1a5k4GkpgEn+UiWODxj7xR20jZIPGPvFHacuFEj/hOd6YYSvHJf7BUaB0/lFiznbG3QCK3ACiTBwNJTgNQCaODRqABqD9hWzQADQAacxlGsB4OAAawOQHfCd4fIHl8xzaSzLdgMSnDwjQ4wqSJJTtR9AGUORKlGSdaCiBqURJe+fYoLGvREmKjg0a+0qUZOyWqUTj4bCvRGnih2DfMwqUiyJJEsp4xsV/Bvb6h5R1oqEEpwFoLx0bNPYNoNFeOjZo7BtAI415C5rxcHQw++p5/eLy+2eExr4ZNBlW02ptAOCxB438ne1EvOETuRg0EnY0lMAUg0bCjg0agGKgLXZs0AAUA9nNZZYGxsMBUAAypOa4AjA4CpRLA438nfGMmwZwAmoAsZCwo6EEpQHEQsKODRrzBhCLDGEnaKCgMW8AsZDGXKgBjIbDvAHEIsNeWi0B2D8mIRbydKYT7g2f0AVAYo6GEpwCIDHHBg1AAdAuRDZoAAqALOYiBeARcNgXgDZQT1YAhkeBcQkgOnk62xl3AyhyA3ASczSUwDQAJzHHBo19A3DabsgGjX0DcNKYyzSA8XAANIDJzwdPsARgeMuk6GTmjKdYggPB0UnF0VCCE/ml4tigsY/8LbaChgYa+8hfylsuFPn5Dv7GloXppP/gKMwo/esZB0hqznbK3RCKXAFKuTgaSmAqQCkXxwYNQAXQDkM2aAAqgDzmMhVgPBwAFWDqo7/Do5BfAa7/1F4b99vnBDSZlcCFGQds6idL3FyCXXiQQtw8G1uzjdFH1NCqG1Jl8tJSNJTAVCafoasEDRQ09pXJS+EtlH5Hw2FfjbzMHSoc9tXIZxi6kUl/sjPRCEcivGSU7cfxBlDopC/7REMJTtLXpjo2aACSvjbVLRPmxsNhn/QraUlUOOyTfpVhI62cvv0DEGIl72b7abzhEznoVxJtNJTABP1KAo4NGvugX2nr3DJZbjwcAEFfVhIVDoCgnyEjxwX9wVGgVPqVvJvxx3EcwAkq6Uu00VACk/TbLZmChgYa+6QftE1woTA3Gg77pB+kJVHhsE/6IcNGWil9gAcaBHk320/jDZ/IQT9ItNFQghP0JeDYoAEI+tolyAYNQAGQrVymAIyHA6AATH5Od3AUKFV/+/bVm8dqxmU4p1tLwNFQAtMAaok5NmjsG0Ct7YNs0Ng3gFoac6EGQHh+t57+/O7TlwAsE7/EnO0MWzOc161l4mgowUn8MnFs0AAkfm0jZIPGPvFHactlEv94OOwTf5z8HO/gKFA6/ygxZzvjxvwzM4bvIpk4GkpgGkCUiWODxr4BRO0vZIMGoAFIYy7TAMbDAdAAJj/g277A9I8z8LmFYM7xmvzxD3HosAjl0wyiHJXxJ9TQkhtSY2rnPVGCTwlMY0raWscGjX1jSjJ4bNDYN6YkoXcLmvFwdDD76nn94vL7Z4TGvkmlDKE3shlMdoAaYS0lyV3ZzsQbQKGbgWQVDSU4zUBb8NigAWgG2oLHBg1AM5DenLIZvHw5PzT2zaB9CjPimgHA0xIaeT3biXjDJ3IxaCTyaCiBKQaNRB4bNPbFoNHOPDZo7ItBI7tpu2QwHhqAYpAhO8cVg8FRoFwyaOT1jGfiegAnqGYgkUdDCU4zkMhjg8a8GaRC2xbZoDFvBqmQ3lzm+MV4OMwbQCoyrKbV0oD9UxdSIX9nOuHe8AlcAFIhYUdDCUoBSIWEHRs0AAVAuxPZoAEoALKYixSAR8ABUACmPk88PAqMSwCpkKcznnEJzhOnFkpRgk8JTANwEnNs0Ng3AJch7AQNFDT2DcBJYy7UAPjODSc3/bnhpy8BGN5iKTmZOdsp1hEcFE5OKo6GEpzILxXHBg1A5Nf+QjZoACK/vOUykX88HPaRv8zQleOk/+AoUEr/UmbOdsbdAIrcAFooRAk+JTANoJSKY4PGvgGU2mDIBo19AyilMZdpAOPhAGgAU5/8naIBDDx2wcXMRuDCjAM29XMqbi7BLjx3IZWSVMYfUUOLblCVSVaKhhKYytS+tKChgca+MnkpPDZo7CuTl9FbqDKNhsO+MvkMczeyAUx2VBph0cRLUtnOuBtAkRuAl5WioQSnAWivHRs0AA1Ae+3YoAFoANKYyzSA8XAANIAMe2m1BmD/HIXUTvl671hNuM0ATUgFoJKYo6EEpgBUEnNs0NgXgEpb7digsS8AlSzmQgVgNBz2BaDKkJfjCsDgKFAuAVTydLYz7gZQ6AYgMUdDCU4DkJhjgwagAWi7IRs09g0gSGMu0wDGw2HfAEKGvbRaAgB4XkKQp7OdcDd8IheAIDFHQwlMAQgSc2zQ2BeAoN2GbNAAFABZzGUKwHg4AArA5OeAB0eBcgkgyNMZz7gM54CDxBwNJTANoE0GgoYGGvsGUGu7IRs09g2glsZcqAEQngOupz8H/PQlAMPEX0vM2c6wGyCRE38tE0dDCU7il4ljgwYg8Wt7IRs0AIlf2nKZxD8eDoDEP/m538FRoHT+7f1Z9eaxmnEZDv5GmTgaSmAaQJSJY4PGvgFE7S9kg8a+AURpzIUaAOHB3zj5wd8ZH5eQWwjKGcdr8sdLxKHDIpRPS4hyVLafUHFoyQ2qMUlK0VCC05i0tY4NGvvG1CYsQUMDjX1jShJ6yzSm8XDYN6aUIe5GNoDJDkojrJkkOSrbGXcDKHIDSJJSNJTANICkrXZs0AA0AG21Y4MGoAFIY96CZjwcHcy+el6/uPz+GaEBaAYZVtNqbQDgKQpJ/s54Ik4DNCEVg0bCjoYSmGLQSNixQWNfDJoMkSdooKCxLwaN7OYySwPj4bAvAE2G1BxXAAZHgXJpoJG/s51xm6G9eVANQMKOhhKcBiBhxwYNQAPQNkQ2aAAagDTmMg1gPBzmDaApMuyl1RKA/VMUmkKeznTCveETuAA0hcQcDSUoBaApJObYoDEvAE2hXYhs0JgXgKaQxVykADwCDoACMPX54OFRYFwCaAp5OuMZl+B8cFNIzNFQgtMAJObYoLFvAO3Hn6Chgca+AThpzIUaAN/54MZl2MvFlwAMb5nUOJk52yl2QyRy5HdScTSUwER+JxXHBg1A5Nf+QjZoACK/vOUykX88HACRf+qDv8OjMKP0r2YcIKk54ymX4Ohv05IqSvApgakApVwcGzT2FaBlRNDQQGNfAUp5zIUqAN/R36ac+ujv8Cg8/TkKTWYlcGHGAZv6wRM3l2AXHqTQlLJUth9R5dCqG1RlkpaioQSnMml3HRs0AJVJCo8NGvvK1L60oJm5Mo2Hw74y+QxzN7IBTHZWGuGohJeksp1xN4AiNwAvK0VDCUwD8NpsxwaNfQPw2mzHBg1AA5DGXKYBjIcDoAFk2EurNQD7ByY0Xp7OeMKNAzRBFQCJORpKYApAmyYFDQ009gWg0lY7NmjsC0Ali7lQARgNh30BqDLk5bgCMDgKlEsAlTyd7Yy7ARS5AVQSczSU4DQAiTk2aAAagLYbskED0ACkMZdpAOPhAGgAGfbSagkA4IEJ7QEMvXesJtxmgCakAhAk5mgogSkAQWKODRr7AhC025ANGvsCEGQxFyoAo+GwLwBh8nPAg6NAuQQQ5OlsZ9zAcA44SMzRUILTACTm2KABaADabsgGjX0DqKUxl2kA4+GwbwD19OeAn74EYJj4a4k52xm2zj9KY/iukYmjoQQm8dcycWzQ2Cf+WtsL2aABSPzSlssk/vFwACT+yc/9Do4CpfOvJeaMZ1yGg7+1TBwNJTANIMrEsUFj3wCi9heyQWPfAKI05kINgPDgb5z84O98j0vwmYVgzqclxMkfL7G5AjvxtIQoR2X7CRWHltyQGlOUlKKhBKcxaWsdGzQAjUkGjw0agMYkobdMYxoPh31jan+CCRvAZAelEdZMkhyV7Yy7ARS5ASRJKRpKYBpA0lY7NmjsG0DSVjs2aOwbQJLGXKYBjIcDoAFk2EurNQCApyUkeTrjCbceoAmqAEjM0VCCUwAk5tigsS8AjXbasUFjXwAaWcyFCsBoOOwLQJMhL8cVgMFRoFwCaOTpbGfcDaDIDaCRmKOhBKYBNBJzbNAANABtN2SDBqABSGMu0wDGwwHQADLspdUSAMDTEhp5OuMJd+gQDk4B8EUhMUdDCUgBWEMjMccGjXUBWEOTIewEDRQ01gVgDY0s5kIFYDQc1gVgDcfk54AHR4FwCWA9PvJ0ljPuLUChG4DEHA0lOA1AYo4NGoAGoO2GbNAANABpzCUawGPgsG8AbaCGWgIwTPxOYs52ht0AiZz4nUwcDSUwid/JxLFBY5/4nbYXskFjn/idtOUyiX88HACJf+Jzv/eMAqXzdxJzxjMu/sHf9Q8pE0dDCU4DkIljg8a+AbTYChoaaOwbQCmNuVADoDv4W8Wg/QrDcNRVrFPsg8O7yofq7ozy+vKfXmjGP9O1faTqdSEqq3q/DObAqBLRAFNCAKN2RANMAQGMmhELMC4hAFOrFdEAU0MAo0ZEAwxE6K1LAcMCDETorbW/gwYYiNBba28HCzAYmVfbOlh4wYi88rwsvGAkXmleFl4wAq8sLwsvELjI8bLgsuf8fjQHJsrx0gDjIYCR46UBJkAAI8cLC0zoJN6AkGGiHC8NMB4CGDleGmAcBDCSvCzAuAYCGFleGmAiBDDSvDTAYIReeV4aYDBCr0wvDTAQoTfJ9LIAA5F5k0TvQmdin8wLRORN8rws8wtE4k3SvCy8QATeJMvLwgtG3pXkZeFlD2GdOsnx0vBSQgAjx0sDTAUBjBwvLDBVd+83RISR46UBBuJ0SSPHSwMMxHmBRpKXBhiI89SNLC8LMBg3EWqkeWmAgQi9jTwvDTAYoVeilwYYjNAr0wu7s6ELDEbolellAQYj80r0svCCEXnleVkSDELiTYU0LwsvCIE3FbK8LLxA4CLHy4ILxE2EUiHHSwMMwk2EUiHHCwuM7wQYhMMlqZDjpQEG4XRJKuR4aYBBOF6SCjleGmAQzpekQpKXBRiImwilQpaXBhiI0OukeWmAgQi9Tp6XBhiI0OtkemmAgQi9TqYXdmdDBxiIzOskelkmGIzIK8/LwgtG4pXmZeEFI/DK8rLwgpF3JXlZeEG4iVBycrw0vCDcRCiVcrywwJSdZUeEsyWplOOlAQbhcElq+RAw+MBAnC4p5XhhHW8XGIjzAqUkL80Mg3CeOpWyvCzAQNxEKJXSvDTAYIReeV6WDANxE6FUSvTSzDAYoVemlwYYiNDrZXpZgIHIvF6il4UXiMjr5XlZeIFIvF6al4UXiMDrZXlZeIHARY6XBReMmwh5OV5YYFzn8wjibImX46UBBuJwiZfjhV0U6AIDcbrEy/HSAANxvKSS46UBBuJ8SSXJy5JhMG4iVMnysswwGDcRqqR5aYCBCL2VPC8NMBihV6aXJsNghF6ZXhZgMDKvRC8LLxiRV56XhReMxCvNy8ILROANsrwsvEDk3SDJy8ILxE2EghwvLC9F5/MI4mhJkONlAQbjbEmQ46UBBuJwSZDjhV0U6AIDcbokyPHSzDAQ5wWCJC8NMBihV5aXBRiMmwgFaV6WDINxE6FanpdmhoEIvbVELw0wEKG3lumlAQYi9NYyvSzAQGTeWqKXhReMyCvPy8ILRuKV5mXhBSPwyvKy8AKBixwvKi6u6eyEgdhqV8vxsgCDcbYkyvHSAANxuCTK8dIAA3G6JMrx0gADcbwkyvHSAAMReqMkLwswGDcRirK8NMBghF5pXhpgMEKvPC8NMBihV6aXBhiM0CvTywIMROZNEr0svEBE3iTPy8ILROJN0rwsvEAE3iTLy8ILRN5NkrywvKTO/AKxdypJ8tIAA3G2JEnysgCDcbgkSfLSAANxuiRJ8tIAA3G8JEny0gADEXobWV4aYCBCbyPNywIMxl2EGnleGmAgQm8j0Yt626k7wECE3kaml2aGwQi9Mr00wGCEXpleFmAwMq9ELwsvGJFXnpeFF4zEK83LwgtC4G0KWV5YXmJnawPCZrumkOWlAQZht11TyPKyAANxvKQpZHlpgEE4X9IUsrw0wCAcMGkKWV4aYDBCrywvDTAYoVealwUYiPsINYU8L+zWhi4wGKFXopcGGIjQ62R6aYCBCL1Oppcmw0CEXifTywIMROZ1Er0svEBEXifPy8ILRuKV5mXhBSPwyvLC8lJ3eEHYbNc4WV4aYCB22zlZXhpgEI6XNE6WlwUYiPMlTSnLSwMMwgGTppTlpQEGIvS2fAgYfGAgQm8pzUsDDEToLeV5WYCBuJFQU0r0wm5t6AKDEXplemlmGIzQK9NLAwxG6JXppQEGI/TK9LIAA5F5vUQvCy8QkdfL87LwApF4vTQvLC+hs1QNsdvOS/PSAAOx3c5L89IAA3G+xEvzsgCDccDES/PSAANxwsRL89IAgxF6pXlpgMEIvdK8NMBAhN5KnpcFGIw7CVUSvTTAQITeSqaXBhiI0FvJ9NIAAxF6K5leGmAwQq9MLwswGJlXopeFF4zIK8/LwgtE4g0jAszrd6vPz8/OTn9bj8fq5Oj6X788fHfw8ePx4TY1g+O1Onqzune0bo1F6BmL9mtnq3cH5+vrvfXifQN0/V/47vR4/ZNsLoZvbxq6OR9Wbr/Ex9NPZ4er6z91M8x3Xqhq72+8eaHOz3J+cPZmdX7nhS6v1+av/ZRLGHfAo1Vhpre87Rkh33kwWkzFnTd8+1y725C7dvVlhnd83AGLFvxO4lLXcRuX9ukMt3BJ5X64C0zZ3nFyDmB2wqJVO0nM5oBouyG7fTzDLWJcWW6CxlaqaO8KNwczI2LoLsWKOm6nAVd13pe5sWKzw6cto50IOH+qGLH+v1tXsNxvbv2z/f5a/7NfPzIpxu4tCWKx327LWe6y7sAq/Y5++jcdPvYqgA//tAPlYkd5iU3ns6anXBgAo3qBCkztt4HxDcQMk1EvHgoVq8/H5z9fXvOrwbr49T8vf12ldP3rLz9fX6nLX/x+6xe3Lurl1+AjSuP3izKG4voft3VVQxH2m8rfJJhOys+NK6koOq/beaHZs0oasVb28dP7r96t38kfV5237urqq7ffuA/NNwPTywTvwNRaxvY8ZAv/LXCqnvdf+7U53n4ZC0wPvv1O1j/L1fsvtL/8Z/sGu/jFzXvv8lftm29wRK/QyoID5D2ZOkW8KjqXLPttV24Xv6peuiKkjO6uCXnrmlX1ujPeTMjbV/BiQi6KWxPy48Bo6u7LLj4fZygByKmigaKlafzWhSzdY4loOp8m0S1NxAibcHpG8AHd1M3DH9Cp5wN6vvWa9iJvjXH97vy6K2wNdv3fT6ftb+xd1ZDn628oqw+fb35z/W9vLv73xcH54dtvT8/ety+3/gGvXvHq9+9cyfUYnm9fiYN3x29OLt7q6xG/vHIXI318ePDu+fVvvD8+Orr44+t6tP5xDl5dvtTl8v4FlJeDFV48C19evNan89NbzWmCi1mlTjJOd/uOa+8eslV35ms7beWa/Gp+cXry665eyBi2i2u8u8jRpqSlLmNfNdFlfMA/NNuT68A9eZe+lDuww25HhdVe2Z7gaGdwfxeX5Y1VswN77HaUGF9t1/Dg7IG5qIX5wOzSWuqeq7Y/uOvuemdu1dlzHXN255Vm7jqhaPm4XXbXw/zj9S9Pz87fnr45PTl499XNV1+cXb2xr99AN9/zl9PTD9cX8T+r8/Pff7x6P1185g6JlHBbo9yrUK7eeT9dX2I3BNL6iy+P3z3I0tUlup/vXOyeKr79NgS5CmT9dz/4/da3XYedYW7bYz2dXTWDdHa+v/25hr5/6O9xw+zVTzwxwe1lmjy0Pj86+HB5Wa9f7dVZ+ztj6mU7A16l2qPTT1fp1G29jdzDH6ZTBI+mH4Ct5lj05NSurJ3yQ2QuD7BzV8+VZefq3ZU4i1+9vpahwnj/hSw76aHwd67jomVxfRn/oPsa3XZzr6vOK+TmOOdi54WW3de4jnF9WVxvxAfeiN1a1Zocu3eiG7HPaJfeiWVnY/CjK5X3nYzjl11PXF/CjLNnWmjemj6Lsthaab7TO6ZYaXZFY7vUHIreh71OMUt/s/r9csLce3XwcR1J70Te9isfPxyc3J7ZX7w7Pfzl1vR++/eHX+Whz4FRyfjo4OPbzfdOMan77XXo1L7E1tmRHpibuXYjra/7XBVV131z3UPn6Gtqn91ued3nWk/rXvei76LuVFbbC9uXN/q728J7D53OGNZ6H+kyxeX961++m+U9N8V1SMX2/OrS3dOcve+zeq7j/usLkZGat531AgMV6mq/c4reBbdf3F26ca1o3TosXa3D3nwjlnFLsNVmY9MDo3Xw8cPq8OLHfn38+WKE7yyepcPV4UXCvbN49iqFKtwbUccMeLld5weeddqueGyfTJ9vpHPqwFQjvXJHYdV7K5Cmjv6gnmikN1vyNiMd96P9SGdsrJ9spNfjnI6qvpFO5StfTzXSoXNcYd1iEEY6Z3f9ZLPH69dl/+xxVL+qw2wjXUGMdJ+ZnTJBfnx28VndTfyHp6dnR8cnB+ery99ff3SHL94fnL999fpZfPHLs/jlxZd2NWnWnbMKzd2AU7a71rd2y94jPp6KQcaO5Fyn1P77lU9q19/7bdLgcGYsn7fpGEQyrS/r9ofW7duFj964EWJ3t3vexo3hNfvpbVPOncjZvfFe5crOhUiPvKRl1b2kTdYlnfCCzaWJ3qxOVmfryfzuRP92dXD+/uDDx52dzH3qlIK65zPdLTub997A+4/mDer2nh1LeINw8o+/vvvvt//v/bff//nl51++ePHj86O9ud5th6cnhwfnu/qO8l2n0f+03PaE0dQqbu+777/5/j9v335x+tP//fXnt19/+9p/03sld+2DLnUWNteJY/+x+xXWb8aeF8v7uJslwfRe1b5ZEjzztjM7CjR1O2Y3t97brx9554S90H0x15mZzYnJkJoXxw8+5E+DZbGezU8uDhG8al+huP9zbnOYo/2ca086bgWOmW6HG5qTv//p7dnz9O3zn3/xb3/6ZnXyNmd2HBiVewf58UO1xEgM3SgswxEODMa9Y0s5GHXGyfeBwXhoeDnHI+MTZ9x4bEaYczz0GJCFToQ9BGT+7pe7q+IL33q9HnF7I0EDAU1lD80OnFb+g0FT20OzAweWZ4Pm6GCVXveumtaHafWq/+Efk8HRc8/OueDo7SgTLJfdHR04IdS5qU9wW7d4fuQ26b1UdO75FsrtVzJf2vL9O++Wb/pFd4OX95tT+Ga5fT06GT3mDzsxzv9pegtPmtzus3YEChooaKxzu8/a3ChooKCxzu0+a5/mHxaaR+3gnQyOBXP7wM8cHi/oZ012dTvV3d4F6Jv1txU3//Rt449z3X1hPVYqwLazbxha7ngw57UPQ2nv5Wn4hhvxRCRBBAFRhQfRiDsmCyIIiGo4iNqfSBD1QfR6VQ+cgonNq6J/49JksCQ7WA7K8+b751/9UH2/+vTPw29/ef/v+N/erWFTbN18VlYX/7857LL+uI4vfrs47xK+Wv/bN+t/+/f6dy/3bMaLjZ4fzy9+8+pPcWz4nIyJPefifuNuCeftYzN7PYnZ1z2UTLEtNJb/qd6V35/889dffv36f3//KX1/9sNehpwYdSp465YFnffnq2LlVxdT+elmGgnFnbfryenFlDPnVXGdJ7bsuVbfb90mru8ytFt2J1+NKcfdEed6kLZGu3MVvjs4X79TTi6/Uhb+WffuoGXvIIMt4FSps9By+byVpy/gVKna77xy4/arnnfq9Cdb+q//XLfB2DqYeDlX/297hv7lw78/nH5c7fiBRVd0lgLXl7rv7KrvuzvkbJvyy7nuDfngVX/15t+r9692/6J3bjrYhJ61R98eu1nmms91XDnnnX7w4cPOX/POvUMrH3uu+eaWRlNf894s3rduOHsWv/VeZwvgjziQvg7aHR0d2+Xa25c99fjnKS778+/enH11+q+v//fn//3z/d/+8/L56tMX7c53o/vGP8u5a7xbF/mtf8pn/TdInPAuKg8eErpZ/5r7vvJ7rtn+hKgKt789HDkpcPRt5n3sbIq5Plg9GFi7f6C6PsM9/NiE7l9s+w88e+qd5nuB7+ksuwL8o+RSPvBt6Z8b+O4zRouOfpoO8O24HeoHAe9s1bs+uDn0B7p/ke1vnwdv2+eAzIr3o/bC5uJ9sw95brxd5/ENvnnqPUeGcO0cNY7XnxTDfHf+QHV9cmjwVrzNvd8/D+B9EmqiO6R/3Jz2v7mzRri4Mcn6L1xe7Apo/3fcQydQFznWUZtgpWx0zm5P029uHNyzk7fuadPNBKscvcSOe4b5H0SbxmbbbSYf9n35ZGuaqmbMy05nTFf/Pard65+q9Etq/n30l3/8VP7rJ6v1LbcD61uPqNedZw/4ngex+jDXcyA+vv3h7D///PnXb3/99+Hq/aef/n74f+3xlYWz2OYZ5u0vbt0t455nmPc/y83tX9wSbbInFdyOYb3vmOtLsfXc9L6hrafOarkX+r6fern3+a1395t3p68O3u3+2zt2nvJS1XfVmS9nsuS9Vz1jOXT2La512E47Vc9q/WznlnpHRUeAFwrG9zKJeGKp9wfW3mcqXBY8q9T7A2uXMxUuC55S6v2BtZ+ZCpcFzy31/sA5z3yb/9RS53nNvj0HfntUXM+ozNVu2lszz/OYgEyWXD5MC4xTbycdd/vSa8/3cHPP7O23743Z/nvunTHvHfEHO3vLx+3Sfu83gihIt/0+S50XyFWOdeft2nmZCW833gfdo/bk9kHnHgNdIehGQufjflMWLgRXJN/47lqg3/rdlB75YAMXLrYgp6pKjauLuux8pDi39dsudW62P7MWd30pfjZf9qctL77jlqx7Hsz37B8uy5n2FfZf7L4MvsDF/mb3L3bnFsU+9pxtb3eaLnKtZ3sw7r3X+o+0idQVsXPNXd/O4bm2kPZf9bmewHD1x8ri/bpEdB5fdQnAj7u9T/zufsq7C5plWHIub8PlfO/vNwfv3x/s9ls4ps6+2vYy316yCT3Fdb7LOvv65avV+Y5f1aZzttqlu29W5xb9MJ5rp1zvh/HLnf/oXWfrcusS9z272s31wJy//9Z8+eKHb94e/uR/3zv/+ffXf/nmX/3zcWeYpzsa7Y6OXl9uSnnU0ehHrPh3TqLHWPRknb4Br8JcAz7XTNk+x3P9DV9fPeVtPZq7+kaqm+1NCzH1hti2OS7zVpprtly/2c6PDy7++6v3r1ZHR8cnb9b//u7g99UuX+DtD8PUf76xXvQCZ9yRbvq5csbJsbN/tqzNJ8e5Tor/+f3Bm4se+Gb3Z8bOYy5T4/tuRJ1m2sXaf1kztuhM9sYZeB77jKkipb57fc/1xuldw4G4V2bnsbkh9GyvXHI1ddZn098dyXsvDO6ac8Z7E3TN+b4B3zqtdw8ck20T14JzNnEZO/BAF5z/gMT9wVab+7flFHcBXfBI9L5ztw/UXJx9vh/cvgM1jz3UM5gYHtxfEe/y/rTtFZOfxOm/2HOtRTE9k71qX2OJZ7L3X4Zy1KcE55Ob6ti5fUko1vPprbOmj5xO9+pOFn/ohRd9klPv5c546Az95U6+c++m7nOzH/3Y9iWfwL3+5dnpxcx28+3rOentX0+PVhff8f8B</diagram></mxfile>
2103.15812/paper_text/intro_method.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ It is a long-standing goal to build generative models that produce photo-realistic images driven by intuitive user input. While photo-realism is already reached for well-constrained domains, such as portraits, it remains challenging to make this image generation process interpretable and editable. Desired is a latent space that disentangles an image into parts and their appearances, which would allow a user to recombine and re-imagine a generated portrait interactively.
4
+
5
+ There are promising methods [1], [2], [3] that use spatial image transformations, such as the thin-plate spline (TPS) method, to create pairs of real and deformed images and impose an equivariance loss to discover keypoints and object appearance embeddings as the bottleneck of an autoencoder. Thereby, one can edit the image by moving, the keypoints or modifying the appearance embedding. Yet, generated images lack fine detail. On the other hand, GAN-based editing approaches attain impressive image quality but lack some editing capabilities. Methods such as StyleGAN [4], [5] and SPADE [6] enable the mixing of appearance properties from different faces and to synthesize natural images by 'painting' source and target regions in the feature space. While powerful, their editing on feature maps makes it difficult to faithfully reposition image parts spatially as the influence region of the mask has to be drawn manually with pixel-level accuracy. Our goal is user-friendly control via automatically learned keypoints providing handles analogous to how character rigs are keyframed in classical animation, thereby overcoming manual drawing and applying to domains without semantic labels.
6
+
7
+ Inspired by the control of autoencoder-based techniques and by the improved image quality of GANs, we introduce keypoint locations and associated feature embeddings as latent variables in the generator network of a GAN. Thereby, the location and appearance of image parts is separated and can be controlled. Figure 1 shows how *LatentKeypointGAN* enables editing the output image by changing the keypoint position, adding or removing points, and exchanging associated appearance embeddings locally while maintaining a high image quality that approaches that of existing GANs.
8
+
9
+ We target an unsupervised setting in that the position, extend, and appearance of *parts* (image regions that share appearance and belong together) is learned from unlabelled example images. It eases the application to new domains, where large image collections are available but exact segmentation masks or part labels are missing. By using the GAN objective in favor of image quality, we cannot rely on the equivariance constraints that are established for autoencoders setups. Instead, we introduce new auxiliary objective functions and control the flow of information in the network to re-instantiate equivariance properties and to disentangle pose and appearance.
10
+
11
+ LatentKeypointGAN is designed as a two-stage GAN architecture that is trained end-to-end. In the first step, a generator network turns the input values sampled from a normal distribution into 2D keypoint locations and their associated encoding. We ensure with suitable neural network pathways that some of the encodings are correlated while others remain independent. These generated keypoints are then mapped to spatial heatmaps of increasing resolution. The heatmaps define the position of the keypoints and their support sets the influence range of their respective encodings. In the second step, a SPADE-like [6] image generator turns these spatial encodings into a complete and realistic image. Although entirely unsupervised, the learned keypoints meaningfully align with the image landmarks,
12
+
13
+ ![](_page_1_Picture_0.jpeg)
14
+
15
+ Figure 1. GANs can generate phot-realistic images (a) but lack local editing capability. **LatentKeypointGAN** generates images with associated keypoints (a-b), which enables local editing by moving keypoints (c), exchanging appearance (d), removing individual parts (e), and adding one or more parts (f). Our improvements are on the unsupervised learning of an interpretable latent space that disentangles pose and appearance, which makes it easy to use and applicable to diverse domains, including portraits (top row), indoor rooms (bottom row), and persons (see results section).
16
+
17
+ | Feature | Zhang et al. [1] | Lorenz et al. [3] | Karras et al. [5] | Collins et al. [7] | Alharbi et al. [8] | Kim et al. [9] | Wang et al. [10] | Park et al. [6] | Zhu et al. [11] | Ours |
18
+ |------------------------------------------------|------------------|-------------------|-------------------|--------------------|--------------------|----------------|------------------|-----------------|-----------------|------|
19
+ | Appearance transfer (global) | • | • | • | • | • | • | • | • | • | • |
20
+ | Appearance transfer (local, part-based) | • | • | 0 | • | • | • | • | • | • | • |
21
+ | Removing and adding parts | • | • | 0 | 0 | 0 | 0 | • | • | • | • |
22
+ | Moving parts spatially | • | • | 0 | 0 | 0 | 0 | 0 | 0 | 0 | • |
23
+ | Image quality w/o edits | 0 | 0 | • | • | • | • | 0 | 0 | • | • |
24
+ | Image quality after editing | Ō | Ō | • | • | • | • | 0 | 0 | 0 | • |
25
+ | Training w/o part annotation (unsupervised) | • | • | • | • | • | • | Ō | Ō | Ö | • |
26
+ | Inference w/o manual feature region 'painting' | • | • | • | • | Ō | Ō | • | • | • | • |
27
+
28
+ Table I
29
+
30
+ ● / ① / ○ : FULL / PARTIAL / NO SUPPORT; FEATURE TABLE
31
+
32
+ COMPARISON TO STATE-OF-THE-ART GENERATIVE IMAGE EDITING
33
+
34
+ METHODS.
35
+
36
+ such as a keypoint linked to the nose when generating images of faces, enabling the desired editing. As a byproduct, we can learn a separate keypoint detector on generated image-keypoint pairs for unsupervised keypoint detection, which we utilize to quantify localization accuracy.
37
+
38
+ We summarize our contributions below:
39
+
40
+ - A GAN-based framework for handle-based image manipulation requires less user input than existing techniques and that succeeds on more diverse domains;
41
+ - 2) A new GAN-based methodology for keypoint detection that contests established autoencoder methods;
42
+ - 3) A new metric to compare part disentanglement across existing models.
43
+
44
+ The supplemental materials are here.
2104.00369/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-04T13:50:58.331Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36" etag="Rel6_poNATCoLqI47mDR" version="14.3.0" type="google"><diagram id="3QULAG2NTjU8rfZ8YxRc" name="Page-1">7VzRcqM2FP0aP6aDJAT4MbGzu9Np0swkM9191BqtoYuRi+XY7tdXYAEG4ZgaG8k75MXoIgScIx1d3SsyQpPF9nNClsET82k0gpa/HaHpCEIAXU/8pJbd3oI9a2+YJ6EvK5WG1/BfKo15tXXo01WlImcs4uGyapyxOKYzXrGRJGGbarUfLKredUnmVDG8zkikWv8KfR7srR62SvsXGs6D/M7AkmcWJK8sDauA+GxzYEKPIzRJGOP7o8V2QqMUvByX/XWfjpwtHiyhMW9zwe5541v/TL68/H6HnvEd9adb587JH47v8jemvgBAFlnCAzZnMYkeS+tDwtaxT9NmLVEq6/zB2FIYgTD+TTnfSTbJmjNhCvgikmfpNuRf08t/w7L0TTaWHk+3h4VdXoh5svt6WPhWtpAWy8uyUn6dCpNEbsXWyYx+gE3e3Ugyp/yDehDvK6bAHdxBsvCZsgUVDyQqJDQiPHyv9iwiO+i8qFdyKA4kjf+HUmAOpeBGKcWuUZTKdt9JtJZ3emNvb0yYpoSTlXiVOuMlnykFmyDk9HVJMmw2Qqir3B3F850mnG4/BECezUVOqjxCsrwpNdOVpuBALguZvzxkUOcoAIeyVowIU0YBbDkKbLMGARoY7cqoWTMVVGQt9R8Twqmwpoz4I+hE4n0evifiaJ4eYWzBsX65q+kdgKreOQ1651xL7pAC5Z88oMlKBdDBLlK1sXcAwbg6YViaAQT2oC4n5oGT6uIYpS4A3wqj12fGLHfWVsTqOfVln6h4CyH5VtpGg3JBbAP90o9q0u9oFi44uEWn3J3TK3hg1PiAw1zUnVJoFKVYkbwJizkJ4za6N3aAGqbTrXtQt8vrKJA+rdO4cETvsrCveAzxFFH6JBm6CZ2HLFbhtYHjdkP3RxhFExaxJLsWefePjnd/GdTtemBFBR2APlF3FdRfw3i+x7w15GK9hDuuQWqYfxJ/k8l1MAcNa5N+QS/eoWtfB1axUr2Bzg7G2oFXQxRndHcwtseXRf2a3b3Jo+0XdaTq8eD/1JJNpx0go/wf5A2UdqYUGUUpVH3atyTzaBX9g5bbkJXs24MtRCwXOqzZg0VqNGMYFHmMqG2q1jYrp2Fr3VDxq3DqmcVpg/t9MlGFHMvRLnn1PJWtW/Js1aE+lqiyrbEaB9MNoNOwAO8XwCHce3IqOJ0Gt8zSl5uJ9/bBjVkLF1v1clslq4BjwDaFeswWNQRU+lUvdUo0tKdrUC+v7Qgxax1oD9GaC3Bq1l67/LnPyleBsdsxoXIF6cO6Pd/8/p1j+BCrsTRTI/hIeyw5dyc6RvCR2zF+1SPotvYkIVa3I57X152G71NMhR03JMR7hl3dWHVWXweXzYdfM1mFtacI8c1sM9SwWcdt6f7kETZD3B88LFMuwKlZC3ms7lyZ0ndV/lzX6jjnXEDm6qmq4qtZXd6rq04tYo4Xd3PIIn39DMTMLmxIzvFNZ5HdECo/3tKMRcdaUicpAS6vUrHiCftJ87koZjGtTU/SRMS0GIviTFBGhf0hpSqckehenliEvp+N8ibuqyP/EvR7uEJ/k0OHGuhHV6NfXQ+qChn79+kH1yWqPlkFxaA5YCW1vxAukI4zi+gVBVf5R9awIm6lnhVflZ4jbqf3ybddhh8wgRuIyG2tpU3e4YWF4pHL7AOuykCxeStvYq/W8qqSY7Uhq9qQV2tnD4zSTtZXirdu6j6iWH7Bvq9e/h8A9Pgf</diagram></mxfile>
2104.00369/main_diagram/main_diagram.pdf ADDED
Binary file (22.9 kB). View file
 
2104.00369/paper_text/intro_method.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We start building the dataset by collecting data instances from ToTTo [@parikh-etal-2020-totto], a recent large-scale Table-to-Text dataset that contains tables and table-grounded sentences obtained from a diverse variety of Wikipedia pages. Additionally, ToTTo comes with annotations of table cells that support the sentence: a sentence is supported by the cell contents if it is directly stated or can be logically inferred by them. ToTTo applied several heuristics to sample the tables and the candidate sentences from Wikipedia pages, and their annotators are asked to revise sentences and highlight the corresponding table regions so that the sentences still have the varied language and structure found in natural sentences while being grounded to the table.
4
+
5
+ Sampling examples from the ToTTo dataset was conducted in multiple steps. We first sample tables whose sizes are within 3 to 34 rows long and 3 to 7 columns wide (up to 75th percentile of all ToTTo table sizes) to avoid truncation of sequence of linearized table for transformer-based models, whose default maximum input sequence length is 512. To ensure sentences contain several table entities, we further select tables whose annotation of highlighted regions covers multiple rows. We also collect a subset of single-row highlighted regions which span multiple rows or columns in content. Following this sampling procedure, we were able to obtain 16,576 $\{\textit{table, metadata, highlighted region, sentence}\}$ instances with which we conduct the annotation procedure as described below. The flowchart of the sampling process is found in Figure [7](#fig:ToTTo_filtering_process){reference-type="ref" reference="fig:ToTTo_filtering_process"} of the Appendix.
6
+
7
+ We adopted these table-grounded sentences as the answers in our new QA dataset since they are long, natural sentences containing rich information and inferences over the corresponding table. We also exploit ToTTo's annotations of table cells (the highlighted table region) as the weak supervision (denotations) for training models and labels for evaluating model retrieval competency. We parsed the tables (originally in HTML format) into a 2-dimensional array, where the first row corresponds to the table header. We also processed merged cells by copying the cell content and cell highlighted region to all the individual cells that compose the original merged cell.
8
+
9
+ Question annotations were collected with the help of human judges in two phases: an internal phase conducted by on-site expert annotators, and an external phase conducted by crowd workers on Amazon Mechanical Turk. To streamline the process, we built a custom web interface to visualize table HTML and metadata, augmented with web widgets that allow table region highlighting and sentence editing. A screenshot of the annotation interface is shown in Figure [8](#fig:website-screenshot){reference-type="ref" reference="fig:website-screenshot"} of the Appendix.
10
+
11
+ Provided the necessary context, the annotators were asked to write a question whose answer is the provided ToTTo sentence. The annotators were given the option to modify the sentence, the table cell content, and the highlighted region to better match the associated question.
12
+
13
+ In the first phase of annotation, we enrolled 15 internal annotators who were provided with preliminary guidelines. In addition to the annotation task, they were asked to provide feedback regarding the task instructions and the user experience of the website, based on which we iteratively modified the guideline and the website design.
14
+
15
+ For external annotations, we hired MTurk workers who have completed at least 500 HITs, have 97% approval rate, and are from English-speaking regions. To ensure that the MTurk annotators understand our task, we provided an instruction video for the interactive annotation tool usage, FAQs that clarify the annotations we desire, along with good vs. bad annotation examples. We also created a Slack channel for crowdsourced workers to ask questions and clarify doubts.
2104.07098/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2104.07098/paper_text/intro_method.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Image-to-Image (I2I) translation is the task of transforming images from one domain to another (e.g., semantic maps → scenes, sketches → photo-realistic images, etc.). Many problems in computer vision and graphics can be cast as I2I translation, such as photo-realistic image synthesis [\[1–](#page-8-0)[3\]](#page-8-1), super-resolution [\[4\]](#page-8-2), colorization [\[5,](#page-8-3) [6\]](#page-8-4), and inpainting [\[7\]](#page-8-5). Therefore, I2I translation has recently received significant attention in the literature. One main challenge in I2I translation is the multi-modal nature for many such tasks – the relation between an input domain A and an output domain B is often times one-to-many, where a single input image I A <sup>i</sup> ∈ A can be mapped to different output images from domain B. For example, a sketch of a shoe or a handbag can be mapped to corresponding objects with different colors or
4
+
5
+ styles, or a semantic map of a scene can be mapped to many scenes with different appearance, lighting and/or weather conditions. Since I2I translation networks typically learn one-to-one mappings due to their deterministic nature, an extra input is required to specify an output mode to which an input image will be translated. Simply injecting extra random noise as input proved to be ineffective as shown in [\[2,](#page-8-6) [8\]](#page-8-7), where the generator network just learns to ignore the extra noise and collapses to a single or few modes (which is one form of the mode collapse problem). To overcome this problem, Zhu *et al*. [\[8\]](#page-8-7) proposed *BicycleGAN*, which trains an encoder network E, jointly with the I2I translation network, to encode the distribution of different possible outputs into a latent vector z, and then learns a deterministic mapping G : (A, z) → B. So, depending on the latent vector z, a single input I A <sup>i</sup> ∈ A can be mapped to multiple outputs in B. While BicycleGAN requires paired training data, several works , like MUNIT [\[9\]](#page-8-8) and DRIT [\[10\]](#page-8-9), extended it to the unsupervised case, where images in domains A and B are not in correspondence ('unpaired'). One main component of unpaired I2I is a cross-cycle consistency constraint, where the network generates an intermediate output by swapping the styles of a pair of images, then swaps the style between the intermediate output again to reconstruct the original images. This enforces that the latent vector z preserves the encoded style information when translated from an image i to another image j and back to image i again. This constraint can also be applied to paired training data, where it encourages style/attribute transfer between images. However, training BicycleGAN [\[8\]](#page-8-7) or its unsupervised counterparts [\[9,](#page-8-8) [10\]](#page-8-9) is not trivial. For example, BicycleGAN combines the objectives of both conditional Variational Auto-Encoders (cVAEs) [\[11\]](#page-8-10) and a conditional version of Latent Regressor GANs (cLR-GANs) [\[12,](#page-8-11) [13\]](#page-8-12) to train their network. The training objective of [\[9,](#page-8-8) [10\]](#page-8-9) is even more involved to handle the unsupervised setup.
6
+
7
+ In this work, we propose a novel weakly-supervised pretraining strategy to learn an expressive latent space for the task of multi-modal I2I translation. While end-to-end training of the encoder network E with the I2I translation network poses a convenience, we show that it can be advantageous to break down the training into proxy tasks. In specific, we
8
+
9
+ <span id="page-1-0"></span>show both quantitatively and qualitatively that the proposed pre-training yields the following advantages:
10
+
11
+ - It learns a more powerful and expressive latent space. Specifically, we show that: (1) Our pre-trained latent space captures uncommon styles that are not well represented in the training set, while baselines like BicycleGAN [\[8\]](#page-8-7) and MUNIT [\[9\]](#page-8-8) fail to do so and instead tend to simplify such styles/appearances to the nearest common style in the train set. (2) Pre-training yields more faithful style capture and transfer. (3) Finally, the better expressiveness of the pre-trained latent space leads to more complex style interpolations compared to the baselines.
12
+ - The learned style embedding is not dependent on the target dataset and generalizes well across many domains, which can be useful especially when having limited training data.
13
+ - Style pre-training simplifies the training objective by requiring fewer losses, which also speeds up the training.
14
+ - Our approach improves the training stability and the overall output quality and diversity.
15
+
16
+ We note that our proposed style pre-training is weaklysupervised and doesn't require any manual labeling. Instead, it relies on a pre-trained VGG network [\[14\]](#page-8-13) to provide training supervision. Our approach is inspired by and extends the work of Meshry *et al*. [\[15\]](#page-8-14) which utilizes a staged training strategy to re-render scenes under different lighting, time of day, and weather conditions. Our work is also inspired by the standard training paradigm in visual recognition of first pre-training on a proxy task, either large supervised datasets (e.g., ImageNet) [\[16–](#page-8-15)[18\]](#page-8-16) or unsupervised tasks (e.g., [\[19,](#page-8-17) [20\]](#page-8-18)), and then finetuning (transfer learning) on the desired task. Similarly, we propose to pre-train the encoder using a proxy task that encourages capturing style into a latent space. Our goal is to highlight the benefits of encoder pre-training and demonstrate its effectiveness for multi-modal image synthesis. In particular, we make the following contributions:
17
+
18
+ - We propose to pre-train an encoder to learn a lowdimensional projection of Gram matrices (from Neural Style Transfer) and show that the pre-trained embedding is effective for multi-modal I2I translation, and that it simplifies and stabilizes the training.
19
+ - We show that the pre-trained latent embedding is not dependent on the target domain and generalizes well to other domains (transfer learning).
20
+ - We provide a study of the importance of different loss terms for multi-modal I2I translation network.
21
+ - We propose an alternative to enable sampling from a latent space instead of enforcing a prior as done in VAE training.
22
+ - We achieve state-of-the art results on six benchmarks in terms of style capture and transfer, and diversity of results.
23
+
24
+ # Method
25
+
26
+ Current multi-modal image translation networks require an extra input z that allows for modelling the one-to-many relation between an input domain A and an output domain B as a one-to-one relation from a pair of inputs $(A, z) \to B$ . In previous approaches, there has been a trade-off between simplicity and effectiveness for providing the input z. On one hand, providing random noise as the extra input z maintains a simple training objective (same as in pix2pix [2]). However, [2, 8] showed that the generator has little incentive to utilize the input vector z since it only encodes random information, and therefore the generator ends up ignoring z and collapsing to one or few modes. On the other hand, BicycleGAN [8] combines the objectives of both conditional Variational Auto-Encoder GANs (cVAE-GAN) and conditional Latent Regressor GANs (cLR-GAN) to learn a latent embedding z simultaneously with the generator G. Their training enforces two cycle consistencies: $B \rightarrow z \rightarrow \hat{B}$ and $z \to \tilde{B} \to \hat{z}$ . This proved to be very effective, but the training objective is more involved, which makes the training slower. Also, since the latent embedding is being trained simultaneously with the generator, hyper-parameter tuning becomes more critical and sensitive. The training objective of more recent works (e.g., [9, 10]) is even more complicated. We aim to combine the best of both worlds: an effective training of a latent embedding that models the distribution of possible outputs, while retaining a simple training objective. This would allow for faster and more efficient training, as well as less sensitivity to hyper-parameters. We observe that the variability in many target domains can be represented by the style diversity of images in the target domain B, where the style is defined in terms of the Gram matrices used in the Neural Style Transfer literature [52]. However, using Gram matrices directly to represent styles is not feasible due to its very high dimensionality. So, instead we learn an embedding by separately training an encoder network E on an auxiliary task to optimize for $z=E(I^B)$ capturing the style of an image $I^B$ . Visualizing the pre-trained latent space shows that our pre-trained encoder models different modes of the output distribution (e.g., different colors, lighting and weather conditions, ... etc.) as clusters of images with similar styles as shown in $\S4.7$ . Then, to synthesize an image $\hat{I}^B = G(I^A, z)$ , the input latent can be used to clearly distinguish the style cluster to which the output belongs. This makes for an effective and more stable training of the generator G, since G is just required to discover the correlation between output images and their corresponding style em-
27
+
28
+ <span id="page-2-0"></span>![](_page_2_Picture_3.jpeg)
29
+
30
+ Figure 1: Overview of our training pipeline. Stage 1: pretraining the style encoder E using a triplet loss. Stages 2, 3: training the generator G, and finetuning both G, E together using GAN and reconstruction losses.
31
+
32
+ bedding z. Moreover, experimental evaluation shows that the proposed style-based pre-training yields better results in terms of more faithful style capture and transfer, as well as better output quality and diversity.
33
+
34
+ To incorporate this into BicycleGAN [8], we replace the simultaneous training of the encoder E and the generator G with a staged training (Figure 1) as follows:
35
+
36
+ - **Stage 1**: Pre-train *E* on a proxy task that optimizes an embedding of images in the output domain *B* into a low-dimensional style latent space, such that images with similar styles lie closely in that space (i.e., clustered).
37
+ - Stage 2: Train the generator network G while fixing the encoder E, so that G learns to associate the style of output images to their deterministic style embedding $z = E(I^B)$ .
38
+ - Stage 3: Finetune both the E and G networks together, allowing for the style embedding to be further adapted to best suit the image synthesis task for the target domain.
39
+
40
+ Next, we explain how to pre-train the style encoder network E in §3.1, and how to train the generator G using the prelearned embeddings (§3.2). Finally, we demonstrate the generalization of pre-training the style encoder E in §3.3.
41
+
42
+ The goal of pre-training the encoder network E is to learn a deterministic mapping from the style of a target image $I_i^B \in B$ to a latent style code $z_i = E(I_i^B)$ . Ideally, images with similar styles should be close in the style embedding space, while images with different styles should be far apart. To supervise training such an embedding, we utilize the style loss [52] as a distance metric to measure the style similarity between any two given images. The style encoder network E is then trained using a triplet loss [53], where the input is a triplet of images $(I_a, I_p, I_n)$ , where $(I_a, I_p)$ have similar style, while $(I_a, I_n)$ have different style, as measured by the style loss metric. The training objective for E is given by:
43
+
44
+ $$\mathcal{L}^{\text{tri}}(I_a, I_p, I_n) = \max ([\|z_a - z_p\|^2 - \|z_a - z_n\|^2 + \alpha], 0) + \lambda \mathcal{L}^{\text{reg}}(z_a, z_p, z_n)$$
45
+
46
+ (1)
47
+
48
+ <span id="page-3-4"></span><span id="page-3-2"></span>![](_page_3_Figure_0.jpeg)
49
+
50
+ <span id="page-3-3"></span>Figure 2: Qualitative comparison with baselines. Ours better matches the ground truth (GT) style.
51
+
52
+ ![](_page_3_Figure_2.jpeg)
53
+
54
+ Figure 3: Style transfer for different datasets. We show output for applying different styles to each input image.
55
+
56
+ where $\alpha$ is a separation margin, $\lambda$ is a relative weighting parameter between the main triplet objective and an optional regularization term $\mathcal{L}^{\text{reg}}(\cdot)$ which is an L2 regularization to encourage learning a compact latent space.
57
+
58
+ **Triplet selection.** To generate triplets for pre-training the encoder E, we compute the set of $k_c$ closest and $k_f$ furthest neighbors for each anchor image $I_a$ as measured by the style loss. Then, for each anchor image $I_a$ , we randomly sample a positive image $I_p$ and a negative image $I_n$ from the set of closest and furthest neighbors respectively. We found that, for large datasets, it is sufficient to generate triplets for a subset of the training images. One challenge is the set of images with an outlier style. Such images will be furthest neighbors to most images, and can mislead the training by just projecting outlier images to separate clusters. To deal with this, we sample the negative style image $I_n$ from a larger set of furthest neighbors; while the positive image $I_p$ is sampled from a small set of closest neighbors so that it would have reasonable style similarity to the anchor image.
59
+
60
+ After pre-training the style encoder E (stage 1), we have established a mapping from images in the output domain, $I^B \in B$ , to their style-embedding $z = E(I^B)$ . Feeding the style embedding as input to the generator during training, the generator has good incentive to associate the style of output images to their corresponding style embedding instead of learning to hallucinate the style. It's important to retain the deterministic correspondence between images and their style
61
+
62
+ codes to facilitate the job of the generator to discover this correlation. This is why, during stage 2, we keep the weights of the style encoder, E, fixed. The forward pass reconstructs a training image $I_i^B$ as $\hat{I}_i^B = G(I_i^A, z_i)$ , where $z_i = E(I_i^B)$ . The training objective is similar to that of pix2pix [2]:
63
+
64
+ <span id="page-3-1"></span>
65
+ $$\mathcal{L}^{\text{img}}(I_i^B, \hat{I}_i^B) = \mathcal{L}_{\text{cGAN}}(I_i^B, \hat{I}_i^B) + \lambda_{\text{rec}}\mathcal{L}_{\text{rec}}(I_i^B, \hat{I}_i^B) \quad (2)$$
66
+
67
+ where we use the Least Square GAN loss (LSGAN) [25] for the $\mathcal{L}_{cGAN}$ term, and a VGG-based perceptual loss [14] for the reconstruction term $\mathcal{L}_{rec}$ . Once the generator has learned to associate the output style with the input style embedding, stage 3 finetunes both the generator, G, and the style encoder, E, together using the same objective (2).
68
+
69
+ Style sampling. To perform multimodal synthesis on a given input at test time, we can capture the latent vector z from any existing image and transfer the style to the generated image. However, if we wish to sample styles directly from the latent distribution, one option is to enforce a prior on the latent distribution. For example, we found it effective to add an L2 regularization on the latent vectors to enforce zero-mean embeddings and limit the variance of the latent space. We then compute an empirical standard deviation for sampling. Another alternative to enable sampling is to train a mapper network $\mathcal{M}$ to map the unit Gaussian to the latent distribution. This can be done as a post-processing step after the style encoder has been trained and finetuned. Specifically, we propose to train a mapper network $\mathcal{M}$ using the nearest-neighbor based Implicit Maximum Likelihood Estimation (IMLE) training [54, 55]. The training objective
70
+
71
+ <span id="page-4-3"></span><span id="page-4-2"></span>![](_page_4_Figure_0.jpeg)
72
+
73
+ Figure 4: Style interpolation. Left column is the input to the generator G, second and last columns are input style images to the style encoder, and middle images are linear interpolation in the embedding space (figure better seen in zoom).
74
+
75
+ is given by:
76
+
77
+ $$\mathcal{M} = \arg\min_{\tilde{\mathcal{M}}} \sum_{i} \|z_i - \tilde{\mathcal{M}}(e_i)\|_2^2, \ e_i = \arg\min_{r_j} \|z_i - \tilde{\mathcal{M}}(r_j)\|_2^2$$
78
+ (3)
79
+
80
+ where $\{r_j\}$ is a set of random samples from the unit Gaussian prior, and for each latent code $z_i$ , we select $e_i$ that generates the nearest neighbor $\mathcal{M}(e_i)$ to $z_i$ .
81
+
82
+ The use of Gram matrices for Neural Style Transfer proved to be very effective and it reliably captures the style of arbitrary input images. This implies that Gram matrices can reliably encode styles from a wide range of domains, and they are not specific to a certain domain. Therefore, we hypothesize that encoder pre-training using a style-based triplet loss would learn a generic style embedding that can generalize across multiple domains and be effective for multimodal I2I translation. This would allow for performing the pre-training stage only once using auxiliary training data. The finetuning stage eventually tweaks the embedding to better suit the specific target domain B. We validate our hypothesis experimentally in $\S 4$ , and show that pre-training the style encoder on datasets other than the target domain Bdoesn't degrade the performance. It can even improve the performance if the target dataset is small, in which case pretraining on an auxiliary dataset helps with the generalization of the overall model.
2106.05087/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-14T17:57:28.364Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="mixUOTBux0OowQrF-MDL" version="15.7.3" type="google"><diagram id="HV9PbcvmQ9HC2jQ1gnc-" name="Page-2">7V1bk9o6Ev41U5s84JJkyZfHuSUnW8lJNrN19uyjATPjOoBZYyYz59evfJGxLmABtrBnoCoV8NjGdH/6utXdal3Zt4uXz0mwevoWT8P5FQLTlyv77gohSHyf/pcdeS2OIMe2iyOPSTQtz9oeeIj+DsuDoDy6iabhmjsxjeN5Gq34g5N4uQwnKXcsSJL4F3/aLJ7z37oKHstvBNsDD5NgHkqn/Seapk/FUQ+52+O/hdHjE/tm6JS/eBGwk8tbrJ+Cafyr9l32/ZV9m8RxWrxbvNyG80x6TC6AbH7OYvz05Y/bh+///C2NFs+/j4qn/HTIJdVPSMJlevStv4z+WK3hw9fwz5/z/3z5PpncP9Fb41IOz8F8U0qs/LXpKxMhvQ/VFv1w8+spSsOHVTDJ/vKLIoYee0oXc/oJ0rezeJmWCKASsm+SOA3SKF7SzyM/OzCL5vPbeB4n+Y3tmTcJJxN6fBqsn8JpeZd1msR/VcrC1RF24TJeZs+iKZRSeM9hkoYvNX2WQvocxoswTV7pKeyv0HYst7ysRP0IWj4C2xd0ij//2iKKAt4ixdGnGp6Qm11awrmE8mP1nVt10Telxg7QHmjWXRJvltNKtA3649QgKlNWi6DNaRB6s4mkLvqX7PXpU4c6o0ojnMIwkBTkACCrBzOaal03sFk3rYwrTNoaV/TCT58ydHeqKOBZiFMVAq7l2vJ4cpDFSLquMeg7FvE7Uho6ZEABDcUJI4SE3lQpeA+NbcfpUvA2s2tM7EQeIvQkhcDdroaI3SxtanNX2dvNYn49STNp3WS/OaL2/WswDuc/4nVUjoRxnKbxonbC9Tx6zP6QxoJW4k06j5ZU/MzjUJgmAAgIZypVUfa/u7ktR2V90OUvaZyRLk2V5/C0Bx1Zqa6C9qDdlU6xQqfOPC3FxSnX+d8mZn8YrXN6u6YnILx6yWXG/k7fPWb/3y+foyReLjIhlvekj1jctjjjNOsnkuhshiZKczZ1xg5xBF7GnQ5el9ezYuxClZ79rtRMWrRuwXpVDMNZ9JLpqkMx+rwYkWK42AoxdjZanDchRhucWYxtzmHOKEaF02pUjN7bEKN3ZjH6gxQjFv1DZnLOJUaoMbsdgBxteG45tjkTPaMcybnlqDE57KEcCbMqvRnXGtO+Acjx/ONaNdUaoBzPPq515jLL6XWWi6Cf4lW4vF6/Lie8AMOXKP2z9v6/9H0WEC4+3WW/HrAPr+zDkj7/n/UPtauyj9vL8k/b66afouxH3h0eqlvHm2QS7hFHKY00SB7DtNnLCacs/bJD7zW1EoVa2bEknAdp9MwnbVS6Lr/hRxzl4YRteEvwAh0BL8UPLy/bQka+ExZCybYYIi5EI90pB1/1w0/Ao86k8P3g0RkmHrEthBo8ciweXcERAobxqDO7fj941OVHxhp9BaSNBgtInTjFBZC75oF9BWSLDIkNA1In4vN+AKlrsfvOkC0C0jMLSKQTOrsAcmgM2aLJNsyQSCcG+X4A6b4RhmwRkKYZEl0AKWf+hs+QzmAZUicqfgHk0BgSg8EypE564QLIgYUhW8SjaYK8pGmOcSHfDx4NRyHRJU1z1JTm/XiQpgF5ydMoiK8ZkKjfgBxu4hC1k6c5A7iaQWP3DDQ+r2qp8F8bNIQ90rlA004u5VjQHMV+fWAx3C9AEtDAPccD0vC8g9WdXQDJBVwGZ1c7RKThyIzdTjLlzSByuEZbmHqIJa6DMdo2uiDyKI7sGSIljhSZbThWu510yvtDZN/9SBFHw0FkO/mUN4NIbavddz+yPUSa9iPbyai8P0T2nSPbs9qm/UiNnMpkkzxXHT1q8JzMg/U62hHNBhzI3P0oaxlXPUOLjcUM3LFoQcJCKaRpUanGgtfaaavshPWeR0bCRAmW9a27nwwJF7jcBfRN8QztgleRf3HA+ootRKuzKsVC3r6IR2tQNiyaUOCF9VZHrJPRIppOs3vcJOE6+jsY5/fLIFuKkN6c3FyRuyu5Nc08+7qbYPLXY94Tp7GX3v7xKS73rNo3ls90Ve+QqFoGCiwEfdgKoqs8Q6ls3+bvEM9m67AbulIlOI5odlTrdETlyNoMbQ/as1nelk3qiMS+bcwOFD2yqOwd8JgDr2qTNBavaWqddLbOW1UPut3d6fgOW/bBnH1IvzoxicZq6+tt0xR0jcluHJ/WNE0jQdKCjUTv2kYKi4n9I1Ow0CdqQ9S6iYTCFzkNFhIIthtCAxaS+Qc16P4MfwXJtGCs5E2ZSjZM+2QqEQs4vipv0J2lxBopi4NI6zD+6QuvSB6zSAf6vrcrrsToyvcWH9lHZZvjbqkCyYCh7EBu02g+Da/cm/WVe/eW+ALvck3OyReE74LNSsIN8MVh4XslRYhNk48NVB1GNY3Ro56FM23QVhJS6sBiOMCONQLsB9kYCUDMUbaw79Sc5REdcQDtD1zSDz/CJKK/MWOfI3Al4EWnL8AOmjAELEewUEi0UPpRSSFiJda2tWXqnB3NlHc/mBiXAiacaDlqT+fn2UQ8d6KDN2UU2ZA+2SjSMQqJ7/IE1QrQR5gPEPjGbORhCxC0KG5Nx1LKrjhqn43mqb6raQBPnusIwxm352yLNrJry6ZR2X96bI4F/BYvj9lWQNY4oHCxZhQ2T0qkCMG5W9cmeXBOaItfbJiiCvN5HW81IWYBVB0blaE7sBtrp/XGP6wgvhx/Gh4Jn0O1hVfHnknjiLd1104OkRqa7tQ1NbQdDdaDmNHgcM9mTKJjW+3NcDB4JBe5qwSqQ9TRok4dVSJHe4sQzir6IARyPqqcVkrzaXf+arBJ4yINV9sbSfBSjW3vUW1NxYCg3hIJqmDdWV9cconZqhgeHlvvBX3xTpr1Xocng8QvspuyQWKaipjgByTh6yEN0rCYx76pugnSx+AuEsIqI8GMdTdzJRrR3ZOclkEyjZQsltwBbccCStkhoSamJaaBvrihktfENGKQGNgGmEYVGS6qYNarYMkBT7NOpyia+RHPo0kmMDqDSjfJOFo+ZuzFqnISdt5dlIRljM4JFtm8ezler/hinOJJ+Bqd056u8LaegpR6WQFLl+34vnfre0FP2sxKFSVQDHuxwXx7jlebm6vt2TpUEZTp78ah0MO+xQayONmu+8iAWFixbWh32pLDsH9EdLAvMloo+aFxdKmHQE035SH9UaiCBb916X7PxtjcBwtuiO0rlKra21DMJ7WnUY1w6/vYVhl6BFoe3O57jThVVcsFuPEHLFaBa2b8tV+EWxjFfz+FNWNe3kuzPPfY7wyK+l1lhe4/qCMG0mD9V3GDdbSI5kGSHYszudJ/Hwq3Lpx+zLza52BdZAWDNKWjPJNj7kxsVmHyHK0pIBGYh0GypF6LJfss7EgkHiglMQsm/A+k8Ekeo6ANMdzmbs84k/86nj/njzrOcBmvKJKpd1HmOymCnuJpJpcPnz4/fLtCtxnpfr4r3oTp5KO1Q5i1w9IPbKqMPp2+5+EsNUXenRpl3wWVtWVGWbGLpYewRRSkADvbfZ2o4uJmWSF7zWZtDIdpPn3QJYZFtMzGSPaYaf6wKXX55vn8tKy1fQyiZT6mZkm8qE4Lt9srD4UOfn7lSeDHj+/F0L/71+8XDjgfBxBbnkN5rpoDOtuimnkcuylAe9gWYtweg3uX4jTD3AYqmN9VwxwUU/fl5sQ1PKdM45vxJ04RhMT/LH916p2yKYCwiIHzRx3F1KG7/bwVORMHjDLyHWg8+yBVHRzORgBjy/cJwdCn6nMBX3DsAAsA7NBB6QFiO3Dfoqod1Vl8cRY0FuN25OzGgLMaXaOAcCjgVYYID4IBYUCV5xBWbB7qlapNx/dNZjW+5Z5QcV4Vmr6eUgWtgyTKfUC2QLSyNdnJoq/3PZtlFacvM1/rfjaLJlGYG5odbuFu12rcoQ/VfRjMpNc0UsycHN9k5NnRKKE/f+Sra9Ug17XEEh9ZM1jlWHRXO+hoJAV4YDclBgRZk9CbKkP7HhrbjtOlwDFCgsAJUhfAqEYD2WMQThO5RoH12VbJA0BAOFPpCwB4d3O7xx/nxx3pUrPUmAuaxZ48NWRuXV2rYm1Me1pVRfePiAzlRk4ww/fbEIruFO0gOhWzdbMZmigzA1Nn7BBH4fh1N4aJRJqqRA5RqFosmW1N1b4qCCjIf3DthBzdImlDVSvYlob4sVuvQ+iI9yIdrWuGtvRVGNv7n072C7CJvkJQuRNTG7FsqdxOj7IMzFdbqbqrhn8bq8ey0iVO96wsvd0l1lC4QXeTU8iSuW+qkUzv2FHmDDHndQo7avbnO5gdkS89Nr+2VXEJki4xUUUMgRxkebNNZbZjtk+FxJWB4tfVmqAwjUqd01Y/sZ4RVafTs/SM6BurOWx2Vk3qjuU0z+XvZOMqFNA2p0kPjY3Qk1ye9GaX62/HY4/X6/NcVe2aZoCtWCfrXq3V3HbFsbDr1FkOWsDzG3gu/9Tx8mEW+dTYWrBf2ylAD4txTVdMPukTpXQvR7MLweFE6fBESYiBhl+V8i7LRRtjbQ5ojqmaXStK5yamPLHzdu9iZNQXioEu3/yYHNu+C9rSlM/BHa0aq2q+2EM7JjwxuDvw38Y6K6kx4VBXWp0pIQ5FUBCkTAFCs7wmu++X1T2HpIVsKbWrWjRidH0PhdpOJjh3ZejeQL1YfMNKe14vNaI7AQgEZxYr+rebrRFlu1eIRaJDjVR2WxsIke1ZLt5VIYoBGGp1IGQSuZSI6sAAuxbeWShM8TFcGKjCQp1UiV5nTu79cjpK41GYV3bmC2iu8/WBFD8IfHi4Hv38+rGhKvRmkz3El2V4qQvt3A1Girk9cRVVNJ0VhUJ00HrogZUeYl7c0HUtUHtB2VkwWoMIUaOveilXO0bvDrGkJjkWcP3ty8Oy7o2Wr0GdrecHV6FRIbovITSMJShAIGKBv6t+UI06r0IXWm//rVurb/Okb8a+EuCnP3y3cTvldveX8rfmbGxFH32uf+NTvMic262zH/NZakdq20wgu0bUVAkAw71UbaCZM2TbXGps3NwzoveJLfKZ61ukDZ73XIkqR5BQBdZdSacbpnd86bvdc9D0O9qXYksePS50GUF+ajMySa6dp4NPLHWBmCdl33GbaNlAqUs11W4mV7tnJYEeAdKEitC5tFdjQHwsu8r3ziluz73bq4WRyBX6yp/VMbsqN5e+VMeoqmNsQV/ixP7cjdWhct/oTlzP8xbLVHTWF5JSzGihv59ItEkKAymMgDxihKRs2ftEUPmzOiap3bHKS4XN+StskAwT6KjRf76CG8Ue0ZeCm4PWYUu+EvQV5aFmK26Ue3N2k+Okf/kt3CTRmiryqpbcVKU0a41tyqTm7/FyFLNE5yWp2VZSU1jlo+p0YzipqdhGszVbtb5Yp8MxYgPNMBbXOcSsbTIV1aCeulPfYRhasCFJZyLyy4ZMb7x5LLu9QuJOCLkekLeDkqtE1NPIo5351oNuyu0lT0uU6fY7lypdhQOSCVUa1YNrV8/Wfana7mL3Rhh8lyW7U/Pqy2hFblPQY9u3hbO4nVWyKHd94uG5t5r6aCxeqq6NV11Xuef9RQ/nq8HW2mOnhc2zh7FDb2U6ut55FxM5T0qwRecHu/Ok+kYbyfGNHbFfQ7vzQp3teS87d2tFqqDbRCFG9/KGyn1ndjOIejNvlbR5xwEdLOe+uOeuUMnsiOEl7YEt3qnaWtjUMFbuJnLR9W4NSYEbbV0Dt+FOR+uafkzizMHbnp6x5bd4mtWf3P8f</diagram></mxfile>
2106.05087/main_diagram/main_diagram.pdf ADDED
Binary file (96.5 kB). View file
 
2106.05087/paper_text/intro_method.md ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep Reinforcement Learning (DRL) has achieved incredible success in many applications. However, recent works (Huang et al., 2017; Pattanaik et al., 2018) reveal that a well-trained RL agent may be vulnerable to test-time *evasion attacks*, making it risky to deploy RL models in high-stakes applications. As in most related works, we consider a *state adversary* which adds imperceptible noise to the observations of an agent such that its cumulative reward is reduced during test time.
4
+
5
+ In order to understand the vulnerability of an RL agent and to improve its certified robustness, it is important to evaluate the worst-case performance of the agent under any adversarial attacks with certain constraints. In other words, it is crucial to find the strongest/optimal adversary that can minimize the cumulative reward gained by the agent with fixed constraints, as motivated in a recent paper by Zhang et al. (2021). Therefore, we focus on the following question:
6
+
7
+ Given an arbitrary attack radius (budget) for each step of the deployment, what is the worstcase performance of an agent under the strongest adversary?
8
+
9
+ Finding the strongest adversary in RL is challenging. Many existing attacks (Huang et al., 2017; Pattanaik et al., 2018) are based on heuristics, crafting adversarial states at every step independently, although steps are interrelated in contrast to image classification tasks. These heuristic methods can often effectively reduce the agent's reward, but are not guaranteed to achieve the strongest attack under a given budget. This type of attack is "myopic" since it does not plan for the future. Figure 1 shows an intuitive example, where myopic adversaries
10
+
11
+ ![](_page_0_Figure_11.jpeg)
12
+
13
+ Figure 1: An example that a myopic adversary is not the strongest.
14
+
15
+ only prevent the agent from selecting the best action in the current step, but the strongest adversary can strategically "lead" the agent to a trap, which is the worst event for the agent.
16
+
17
+ Achieving computational efficiency arises as another challenge in practice, even if the strongest adversary can be found in theory. A recent work (Zhang et al., 2020a) points out that learning the optimal state adversary is equivalent to learning an optimal policy in a new Markov Decision Process (MDP). A follow-up work (Zhang et al., 2021) shows that the learned adversary significantly outperforms prior adversaries in MuJoCo games. However, the state space and the action space of the new MDP are both as large as the state space in the original environment, which can be high-dimensional in practice. For example, video games and autonomous driving systems use images as observations. In these tasks, learning the state adversary directly becomes computationally intractable.
18
+
19
+ To overcome the above two challenges, we propose a novel attack method called Policy Adversarial Actor Director (PA-AD), where we design a "director" and an "actor" that collaboratively finds the optimal state perturbations. In PA-AD, a director learns an MDP named *Policy Adversary MDP (PAMDP)*, and an actor is embedded in the dynamics of PAMDP. At each step, the director proposes a perturbing direction in the policy space, and the actor crafts a perturbation in the state space to lead the victim policy towards the proposed direction. Through a trail-and-error process, the director can find the optimal way to cooperate with the actor and attack the victim policy. Theoretical analysis shows that the optimal policy in PAMDP induces an optimal state adversary. The size of PAMDP is generally smaller than the adversarial MDP defined by Zhang et al. (2021) and thus is easier to be learned efficiently using off-the-shelf RL algorithms. With our proposed *director-actor collaborative mechanism*, PA-AD outperforms state-of-the-art attacking methods on various types of environments, and improves the robustness of many DRL agents by adversarial training.
20
+
21
+ - (1) We establish a theoretical understanding of the optimality of evasion attacks from the perspective of policy perturbations, allowing a more efficient implementation of optimal attacks.
22
+ - (2) We introduce a Policy Adversary MDP (PAMDP) model, whose optimal policy induces the optimal state adversary under any attacking budget $\epsilon$ .
23
+ - (3) We propose a novel attack method, PA-AD, which efficiently searches for the optimal adversary in the PAMDP. PA-AD is a general method that works on stochastic and deterministic victim policies, vectorized and pixel state spaces, as well as discrete and continuous action spaces.
24
+ - (4) Empirical study shows that PA-AD universally outperforms previous attacking methods in various environments, including Atari games and MuJoCo tasks. PA-AD achieves impressive attacking performance in many environments using very small attack budgets,
25
+ - (5) Combining our strong attack PA-AD with adversarial training, we significantly improve the robustness of RL agents, and achieve the *state-of-the-art robustness in many tasks*.
26
+
27
+ # Method
28
+
29
+ The Victim RL Agent In RL, an agent interacts with an environment modeled by a Markov Decision Process (MDP) denoted as a tuple $\mathcal{M} = \langle \mathcal{S}, \mathcal{A}, P, R, \gamma \rangle$ , where $\mathcal{S}$ is a state space with cardinality $|\mathcal{S}|$ , $\mathcal{A}$ is an action space with cardinality $|\mathcal{A}|$ , $P:\mathcal{S}\times\mathcal{A}\to\Delta(\mathcal{S})$ is the transition function $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S}$ , $\mathcal{S$
30
+
31
+ **Evasion Attacker** Evasion attacks are test-time attacks that aim to reduce the expected total reward gained by the agent/victim. As in most literature (Huang et al., 2017; Pattanaik et al., 2018; Zhang et al., 2020a), we assume the attacker knows the victim policy $\pi$ (white-box attack). However, the attacker does not know the environment dynamics, nor does it have the ability to change the environment directly. The attacker can observe the interactions between the victim agent and the
32
+
33
+ $<sup>^{1}\</sup>Delta(X)$ denotes the space of probability distributions over X.
34
+
35
+ environment, including states, actions and rewards. We focus on a typical *state adversary* (Huang et al., 2017; Zhang et al., 2020a), which perturbs the state observations returned by the environment before the agent observes them. Note that the underlying states in the environment are not changed.
36
+
37
+ Formally, we model a state adversary by a function h which perturbs state $s \in \mathcal{S}$ into $\tilde{s} := h(s)$ , so that the input to the agent's policy is $\tilde{s}$ instead of s. In practice, the adversarial perturbation is usually under certain constraints. In this paper, we consider the common $\ell_p$ threat model (Goodfellow et al., 2015): $\tilde{s}$ should be in $\mathcal{B}_{\epsilon}(s)$ , where $\mathcal{B}_{\epsilon}(s)$ denotes an $\ell_p$ norm ball centered at s with radius $\epsilon \geq 0$ , a constant called the *budget* of the adversary for every step. With the budget constraint, we define the *admissible state adversary* and the *admissible adversary set* as below.
38
+
39
+ **Definition 1** (Set of Admissible State Adversaries $H_{\epsilon}$ ). A state adversary h is said to be admissible if $\forall s \in \mathcal{S}$ , we have $h(s) \in \mathcal{B}_{\epsilon}(s)$ . The set of all admissible state adversaries is denoted by $H_{\epsilon}$ .
40
+
41
+ Then the goal of the attacker is to find an adversary $h^*$ in $H_{\epsilon}$ that maximally reduces the cumulative reward of the agent. In this work, we propose a novel method to learn the optimal state adversary through the identification of an optimal *policy perturbation* defined and motivated in the next section.
42
+
43
+ In this section, we first motivate our idea of interpreting evasion attacks as perturbations of policies, then discuss how to efficiently find the optimal state adversary via the optimal policy perturbation.
44
+
45
+ Evasion Attacks Are Perturbations of Policies Although existing literature usually considers state-attacks and action-attacks separately, we point out that evasion attacks, either applied to states or actions, are essentially equivalent to perturbing the agent's policy $\pi$ into another policy $\pi_h$ in the policy space $\Pi$ . For instance, as shown in Figure 2, if the adversary h alters state s into state s, the victim selects an action a based
46
+
47
+ ![](_page_2_Figure_8.jpeg)
48
+
49
+ **Figure 2:** Equivalence between evasion attacks and policy perturbations.
50
+
51
+ on $\pi(\cdot|\tilde{s})$ . This is equivalent to directly perturbing $\pi(\cdot|s)$ to $\pi_h(\cdot|s) := \pi(\cdot|\tilde{s})$ . (See Appendix A for more detailed analysis including action adversaries.)
52
+
53
+ In this paper, we aim to find the optimal state adversary through the identification of the "optimal policy perturbation", which has the following **merits**. (1) $\pi_h(\cdot|s)$ usually lies in a lower dimensional space than h(s) for an arbitrary state $s \in \mathcal{S}$ . For example, in Atari games, the action space is discrete and small (e.g. $|\mathcal{A}|=18$ ), while a state is a high-dimensional image. Then the state perturbation h(s) is an image, while $\pi_h(\cdot|s)$ is a vector of size $|\mathcal{A}|$ . (2) It is easier to characterize the optimality of a policy perturbation than a state perturbation. How a state perturbation changes the value of a victim policy depends on both the victim policy network and the environment dynamics. In contrast, how a policy perturbation changes the victim value only depends on the environment. Our Theorem 4 in Section 3 and Theorem 12 in Appendix B both provide insights about how $V^{\pi}$ changes as $\pi$ changes continuously. (3) Policy perturbation captures the essence of evasion attacks, and unifies state and action attacks. Although this paper focuses on state-space adversaries, the learned "optimal policy perturbation" can also be used to conduct action-space attacks against the same victim.
54
+
55
+ Characterizing the Optimal Policy Adversary As depicted in Figure 3, the policy perturbation serves as a bridge connecting the perturbations in the state space and the value space. Our goal is to find the optimal state adversary by identifying the optimal "policy adversary". We first define an Admissible Adversarial Policy Set (Adv-policy-set) $\mathcal{B}_{\epsilon}^{H}(\pi) \subset \Pi$ as the set of policies perturbed from $\pi$ by all admissible state adversaries $h \in H_{\epsilon}$ . In other words, when a state adversary perturbs states within an $\ell_p$ norm ball $\mathcal{B}_{\epsilon}(\cdot)$ , the victim policy is perturbed within $\mathcal{B}_{\epsilon}^{H}(\pi)$ .
56
+
57
+ **Definition 2** (Admissible Adversarial Policy Set (Adv-policy-set) $\mathcal{B}_{\epsilon}^{H}(\pi)$ ). For an MDP $\mathcal{M}$ , a fixed victim policy $\pi$ , we define the admissible adversarial policy set (Adv-policy-set) w.r.t. $\pi$ , denoted by $\mathcal{B}_{\epsilon}^{H}(\pi)$ , as the set of policies that are perturbed from $\pi$ by all admissible adversaries, i.e.,
58
+
59
+ $$\mathcal{B}_{\epsilon}^{H}(\pi) := \{ \pi_h \in \Pi : \exists h \in H_{\epsilon} \text{ s.t } \forall s, \pi_h(\cdot|s) = \pi(\cdot|h(s)) \}. \tag{1}$$
60
+
61
+ **Remarks** (1) $\mathcal{B}_{\epsilon}^{H}(\pi)$ is a subset of the policy space $\Pi$ and it surrounds the victim $\pi$ , as shown in Figure 3(middle). In the same MDP, $\mathcal{B}_{\epsilon}^{H}(\pi)$ varies for different victim $\pi$ or different attack budget $\epsilon$ . (2) In Appendix B, we characterize the topological properties of $\mathcal{B}_{\epsilon}^{H}(\pi)$ . We show that
62
+
63
+ ![](_page_3_Figure_1.jpeg)
64
+
65
+ Figure 3: A state adversary h perturbs s into $h(s) \in \mathcal{B}_{\epsilon}(s)$ in the state space; hence, the victim's policy $\pi$ is perturbed into $\pi_h$ within the Adv-policy-set $\mathcal{B}^H_{\epsilon}(\pi)$ ; as a result, the expected total reward the victim can gain becomes $V^{\pi_h}$ instead of $V^{\pi}$ . A prior work SA-RL (Zhang et al., 2021) directly uses an RL agent to learn the best state adversary $h^*$ , which works for MDPs with small state spaces, but suffers from high complexity in larger MDPs. In contrast, we find the optimal state adversary $h^*$ efficiently through identifying the optimal policy adversary $\pi_{h^*}$ . Our proposed attack method called PA-AD contains an RL-based "director" which learns to propose policy perturbation $\pi_h$ in the policy space, and a non-RL "actor", which targets at the proposed $\pi_h$ and computes adversarial states in the state space. Through this collaboration, the director can learn the optimal policy adversary $\pi_{h^*}$ using RL methods, such that the actor executes $h^*$ as justified in Theorem 7.
66
+
67
+ for a continuous function $\pi$ (e.g., neural network), $\mathcal{B}_{\epsilon}^{H}(\pi)$ is connected and compact, and the value functions generated by all policies in the Adv-policy-set $\mathcal{B}_{\epsilon}^{H}(\pi)$ form a polytope (Figure 3(right)), following the polytope theorem by Dadashi et al. (2019).
68
+
69
+ Given that the Adv-policy-set $\mathcal{B}^H_\epsilon(\pi)$ contains all the possible policies the victim may execute under admissible state perturbations, we can characterize the optimality of a state adversary through the lens of policy perturbations. Recall that the attacker's goal is to find a state adversary $h^* \in H_\epsilon$ that minimizes the victim's expected total reward. From the perspective of policy perturbation, the attacker's goal is to perturb the victim's policy to another policy $\pi_{h^*} \in \mathcal{B}^H_\epsilon(\pi)$ with the lowest value. Therefore, we can define the optimal state adversary and the optimal policy adversary as below.
70
+
71
+ **Definition 3 (Optimal State Adversary** $h^*$ **and Optimal Policy Adversary** $\pi_{h^*}$ ). For an MDP $\mathcal{M}$ , a fixed policy $\pi$ , and an admissible adversary set $H_{\epsilon}$ with attacking budget $\epsilon$ , (1) an **optimal state adversary** $h^*$ satisfies $h^* \in \operatorname{argmin}_{h \in H_{\epsilon}} V^{\pi_h}(s), \forall s \in \mathcal{S}$ , which leads to (2) an **optimal policy adversary** $\pi_{h^*}$ satisfies $\pi_{h^*} \in \operatorname{argmin}_{\pi_h \in \mathcal{B}^{\mathcal{H}}_{\epsilon}(\pi)} V^{\pi_h}(s), \forall s \in \mathcal{S}$ . Recall that $\pi_h$ is the perturbed policy caused by adversary h, i.e., $\pi_h(\cdot|s) = \pi(\cdot|h(s)), \forall s \in \mathcal{S}$ .
72
+
73
+ Definition 3 implies an equivalent relationship between the optimal state adversary and the optimal policy adversary: an optimal state adversary leads to an optimal policy adversary, and any state adversary that leads to an optimal policy adversary is optimal. Theorem 19 in Appendix D.1 shows that there always exists an optimal policy adversary for a fixed victim $\pi$ , and learning the optimal policy adversary is an RL problem. (A similar result have been shown by Zhang et al. (2020a) for the optimal state adversary, while we focus on the policy perturbation.)
74
+
75
+ Due to the equivalence, if one finds an optimal policy adversary $\pi_{h^*}$ , then the optimal state adversary can be found by executing targeted attacks with target policy $\pi_{h^*}$ . However, directly finding the optimal policy adversary in the Adv-policy-set $\mathcal{B}^H_\epsilon(\pi)$ is challenging since $\mathcal{B}^H_\epsilon(\pi)$ is generated by all admissible state adversaries in $H_\epsilon$ and is hard to compute. To address this challenge, we first get insights from theoretical characterizations of the Adv-policy-set $\mathcal{B}^H_\epsilon(\pi)$ . Theorem 4 below shows that the "outermost boundary" of $\mathcal{B}^H_\epsilon(\pi)$ always contains an optimal policy adversary. Intuitively, a policy $\pi'$ is in the outermost boundary of $\mathcal{B}^H_\epsilon(\pi)$ if and only if no policy in $\mathcal{B}^H_\epsilon(\pi)$ is farer away from $\pi$ than $\pi'$ in the direction $\pi' - \pi$ . Therefore, if an adversary can perturb a policy along a direction, it should push the policy as far away as possible in this direction under the budget constraints. Then, the adversary is guaranteed to find an optimal policy adversary after trying all the perturbing directions. In contrast, such a guarantee does not exist for state adversaries, justifying the benefits of considering policy adversaries. Our proposed algorithm in Section 4 applies this idea to find the optimal attack: an RL-based director searches for the optimal perturbing direction, and an actor is responsible for pushing the policy to the outermost boundary of $\mathcal{B}^H_\epsilon(\pi)$ with a given direction.
76
+
77
+ **Theorem 4.** For an MDP $\mathcal{M}$ , a fixed policy $\pi$ , and an admissible adversary set $H_{\epsilon}$ , define the outermost boundary of the admissible adversarial policy set $\mathcal{B}_{\epsilon}^{H}(\pi)$ w.r.t $\pi$ as
78
+
79
+ $\partial_{\pi}\mathcal{B}_{\epsilon}^{H}(\pi) := \{\pi' \in \mathcal{B}_{\epsilon}^{H}(\pi) : \forall s \in \mathcal{S}, \theta > 0, \nexists \hat{\pi} \in \mathcal{B}_{\epsilon}^{H}(\pi) \text{ s.t. } \hat{\pi}(\cdot|s) = \pi'(\cdot|s) + \theta(\pi'(\cdot|s) - \pi(\cdot|s))\}. \tag{2}$ Then there exists a policy $\tilde{\pi} \in \partial_{\pi}\mathcal{B}_{\epsilon}^{H}(\pi)$ , such that $\tilde{\pi}$ is the optimal policy adversary w.r.t. $\pi$ .
80
+
81
+ Theorem 4 is proven in Appendix B.3, and we visualize the outermost boundary in Appendix B.5.
82
+
83
+ In this section, we first formally define the optimality of an attack algorithm and discuss some existing attack methods. Then, based on the theoretical insights in Section 3, we introduce our algorithm, *Policy Adversarial Actor Director (PA-AD)* that has an optimal formulation and is efficient to use.
84
+
85
+ Although many attack methods for RL agents have been proposed (Huang et al., 2017; Pattanaik et al., 2018; Zhang et al., 2020a), it is not yet well-understood how to characterize the strength and the optimality of an attack method. Therefore, we propose to formulate the optimality of an attack algorithm, which answers the question "whether the attack objective finds the strongest adversary".
86
+
87
+ **Definition 5** (Optimal Formulation of Attacking Algorithm). An attacking algorithm Algo is said to have an optimal formulation iff for any MDP $\mathcal{M}$ , policy $\pi$ and admissible adversary set $H_{\epsilon}$ under attacking budget $\epsilon$ , the set of optimal solutions to its objective, $H_{\epsilon}^{\text{Algo}}$ , is a subset of the optimal adversaries against $\pi$ , i.e., $H_{\epsilon}^{\text{Algo}} \subseteq H_{\epsilon}^* := \{h^* | h^* \in \operatorname{argmin}_{h \in H_{\epsilon}} V^{\pi_h}(s), \forall s \in \mathcal{S}\}.$
88
+
89
+ Many heuristic-based attacks, although are empirically effective and efficient, do not meet the requirements of optimal formulation. In Appendix D.3, we categorize existing heuristic attack methods into four types, and theoretically prove that there exist scenarios where these heuristic methods may not find the strongest adversary. A recent paper (Zhang et al., 2021) proposes to learn the optimal state adversary using RL methods, which we will refer to as *SA-RL* in our paper for simplicity. SA-RL can be viewed as an "end-to-end" RL attacker, as it directly learns the optimal state adversary such that the value of the victim policy is minimized. The formulation of SA-RL satisfies Definition 5 and thus is optimal. However, SA-RL learns an MDP whose state space and action space are both the same as the original state space. If the original state space is high-dimensional (e.g. images), learning a good policy in the adversary's MDP may become computationally intractable, as empirically shown in Section 6.
90
+
91
+ Can we address the optimal attacking problem in an efficient manner? SA-RL treats the victim and the environment together as a black box and directly learns a state adversary. But if the victim policy is known to the attacker (e.g. in adversarial training), we can exploit the victim model and simplify the attacking problem while maintaining the optimality. Therefore, we propose a novel algorithm, *Policy Adversarial Actor Director (PA-AD)*, that has optimal formulation and is generally more efficient than SA-RL. PA-AD decouples the whole attacking process into two simpler components: policy perturbation and state perturbation, solved by a "director" and an "actor" through collaboration. The director learns the optimal policy perturbing direction with RL methods, while the actor crafts adversarial states at every step such that the victim policy is perturbed towards the given direction. Compared to the black-box SA-RL, PA-AD is a white-box attack, but works for a broader range of environments more efficiently. Note that PA-AD can be used to conduct black-box attack based on the transferability of adversarial attacks (Huang et al., 2017), although it is out of the scope
92
+
93
+ ![](_page_4_Figure_8.jpeg)
94
+
95
+ **Figure 4:** An overview of PA-AD compared with a heuristic attacker and an end-to-end RL attacker. Heuristic attacks are efficient, but may not find the optimal adversary as they do not learn from the environment dynamics. An end-to-end RL attacker directly learns a policy to generate state perturbations, but is inefficient in large-state-space environments. In contrast, our **PA-AD** solves the attack problem with a combination of an RL-based director and a non-RL actor, so that PA-AD achieves both optimality and efficiency.
96
+
97
+ of this paper. Appendix F.2 provides a comprehensive comparison between PA-AD and SA-RL in terms of complexity, optimality, assumptions and applicable scenarios.
98
+
99
+ Formally, for a given victim policy $\pi$ , our proposed PA-AD algorithm solves a *Policy Adversary MDP (PAMDP)* defined in Definition 6. An actor denoted by g is embedded in the dynamics of the PAMDP, and a director searches for an optimal policy $\nu^*$ in the PAMDP.
100
+
101
+ **Definition 6** (Policy Adversary MDP (PAMDP) $\widehat{\mathcal{M}}$ ). Given an MDP $\mathcal{M} = \langle \mathcal{S}, \mathcal{A}, P, R, \gamma \rangle$ , a fixed stochastic victim policy $\pi$ , an attack budget $\epsilon \geq 0$ , we define a Policy Adversarial MDP $\widehat{\mathcal{M}} = \langle \mathcal{S}, \widehat{\mathcal{A}}, \widehat{P}, \widehat{R}, \gamma \rangle$ , where the action space is $\widehat{\mathcal{A}} := \{d \in [-1, 1]^{|\mathcal{A}|}, \sum_{i=1}^{|\mathcal{A}|} d_i = 0\}$ , and $\forall s, s' \in \mathcal{S}, \forall \widehat{a} \in \widehat{\mathcal{A}}$ .
102
+
103
+ $$\widehat{P}(s'|s,\widehat{a}) = \sum\nolimits_{a \in \mathcal{A}} \pi(a|g(\widehat{a},s))P(s'|s,a), \quad \widehat{R}(s,\widehat{a}) = -\sum\nolimits_{a \in \mathcal{A}} \pi(a|g(\widehat{a},s))R(s,a),$$
104
+
105
+ where g is the actor function defined as
106
+
107
+ $$g(\widehat{a},s) = \operatorname{argmax}_{\widetilde{s} \in B_{\epsilon}(s)} \|\pi(\widetilde{s}) - \pi(s)\| \text{ subject to } \left(\pi(\widetilde{s}) - \pi(s)\right)^T \widehat{a} = \|\pi(\widetilde{s}) - \pi(s)\| \|\widehat{a}\|. \quad (G)$$
108
+
109
+ If the victim policy is **deterministic**, i.e., $\pi_D := \operatorname{argmax}_a \pi(a|s)$ , (subscript D stands for deterministic), the action space of PAMDP is $\widehat{A}_D := A$ , and the actor function $g_D$ is
110
+
111
+ $$g_D(\widehat{a}, s) = \operatorname{argmax}_{\widetilde{s} \in \mathcal{B}_{\epsilon}(s)} \left( \pi(\widehat{a}|\widetilde{s}) - \operatorname{max}_{a \in \mathcal{A}, a \neq \widehat{a}} \pi(a|\widetilde{s}) \right). \tag{G_D}$$
112
+
113
+ Detailed definition of the deterministic-victim version of PAMDP is in Appendix C.1.
114
+
115
+ A key to PA-AD is the director-actor collaboration mechanism. The input to director policy $\nu$ is the current state s in the original environment, while its output $\widehat{a}$ is a signal to the actor denoting "which direction to perturb the victim policy into". $\widehat{\mathcal{A}}$ is designed to contain all "perturbing directions" in the policy space. That is, $\forall \widehat{a} \in \widehat{A}$ , there exists a constant $\theta_0 \geq 0$ such that $\forall \theta \leq \theta_0, \pi(\cdot|s) + \theta \frac{\widehat{a}}{\|\widehat{a}\|}$ belongs to the simplex $\Delta(A)$ . The actor g takes in the state s and director's direction $\widehat{a}$ and then computes a state perturbation within the attack budget. Therefore, the director and the actor together induce a state adversary: $h(s) := g(\nu(s), s), \forall s \in \mathcal{S}$ . The definition of PAMDP is slightly different for a stochastic victim policy and a deterministic victim policy, as described below.
116
+
117
+ For a stochastic victim $\pi$ , the director's action $\widehat{a} \in \mathcal{A}$ is designed to be a unit vector lying in the policy simplex, denoting the perturbing direction in the policy space. The actor, once receiving the perturbing direction $\widehat{a}$ , will "push" the policy as far as possible by perturbing s to $g(\widehat{a},s) \in \mathcal{B}_{\epsilon}(s)$ , as characterized by the optimization problem (G). In this way, the policy perturbation resulted by the director and the actor is always in the outermost boundary of $\mathcal{B}_{\epsilon}^H(\pi)$ w.r.t. the victim $\pi$ , where the optimal policy perturbation can be found according to Theorem 4.
118
+
119
+ For a deterministic victim $\pi_D$ , the director's action $\widehat{a} \in \widehat{\mathcal{A}}_D$ can be viewed as a target action in the original action space, and the actor conducts targeted attacks to let the victim execute $\widehat{a}$ , by forcing the logit corresponding to the target action to be larger than the logits of other actions.
120
+
121
+ In both the stochastic-victim and deterministic-victim case, PA-AD has an optimal formulation as stated in Theorem 7 (proven in Appendix D.2).
122
+
123
+ **Theorem 7 (Optimality of PA-AD).** For any MDP $\mathcal{M}$ , any fixed victim policy $\pi$ , and any attack budget $\epsilon \geq 0$ , an optimal policy $\nu^*$ in $\widehat{\mathcal{M}}$ induces an optimal state adversary against $\pi$ in $\mathcal{M}$ . That is, the formulation of PA-AD is optimal, i.e., $H^{PA-AD} \subseteq H_{\epsilon}^*$ .
124
+
125
+ ```
126
+ 1 Input: Initialization of director's policy \nu; victim policy \pi; budget \epsilon; start state s_0
127
+ ```
128
+
129
+ 2 for $t = 0, 1, 2, \dots$ do
130
+
131
+ - 3 | Director samples a policy perturbing direction $\hat{a}_t \sim \nu(\cdot|s_t)$
132
+ - Actor perturbs $s_t$ to $\tilde{s}_t = g_D(\hat{a}_t, s_t)$ if Victim is deterministic, otherwise to $\tilde{s}_t = g(\hat{a}_t, s_t)$
133
+ - 5 Victim takes action $a_t \sim \pi(\cdot|\tilde{s}_t)$ , proceeds to $s_{t+1}$ , receives $r_t$
134
+ - Director saves $(s_t, \widehat{a}_t, -r_t, s_{t+1})$ to its buffer
135
+ - Director updates its policy $\nu$ using any RL algorithm
136
+
137
+ **Efficiency of PA-AD** As commonly known, the sample complexity and computational cost of learning an MDP usually grow with the cardinalities of its state space and action space. Both SA-RL and PA-AD have state space S, the state space of the original MDP. But the action space of
138
+
139
+ SA-RL is also S, while our PA-AD has action space $\mathbb{R}^{|\mathcal{A}|}$ for stochastic victim policies, or $\mathcal{A}$ for deterministic victim policies. In most DRL applications, the state space (e.g., images) is much larger than the action space, then PA-AD is generally more efficient than SA-RL as it learns a smaller MDP.
140
+
141
+ The attacking procedure is illustrated in Algorithm 1. At step t, the director observes a state $s_t$ , and proposes a policy perturbation $\widehat{a}_t$ , then the actor searches for a state perturbation to meet the policy perturbation. Afterwards, the victim acts with the perturbed state $\widetilde{s}_t$ , then the director updates its policy based on the opposite value of the victim's reward. Note that the actor solves a constrained optimization problem, $(G_D)$ or (G). Problem $(G_D)$ is similar to a targeted attack in supervised learning, while the stochastic version (G) can be approximately solved with a Lagrangian relaxation. In Appendix C.2, we provide our implementation details for solving the actor's optimization, which empirically achieves state-of-the-art attack performance as verified in Section 6.
142
+
143
+ **Extending to Continuous Action Space** Our PA-AD can be extended to environments with continuous action spaces, where the actor minimizes the distance between the policy action and the target action, i.e., $\operatorname{argmin}_{s' \in B_{\epsilon}(s)} \|\pi(s') - \widehat{a}\|$ . More details and formal definitions of the variant of PA-AD in continuous action space are provided in Appendix C.3. In Section 6, we show experimental results in MuJoCo tasks, which have continuous action spaces.
2106.08827/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2106.08827/paper_text/intro_method.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Understanding and predicting human actions and intentions are essential tasks in tackling many real-world problems such as autonomous driving, robot navigation safety, human-robot interaction, and detection of perilous behaviors in surveillance systems. Developing an AI model per-
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Figure_8.jpeg)
6
+
7
+ Figure 1. An illustration of a single frame of the JRDB-Act dataset. As shown, the data captured with a 2D and 3D multimodal sensory platform is accompanied with a new set of annotations including individual actions and social group formation leading to infer social activities (common activities in each social group) to further complement the 2D and 3D detection and tracking annotation in the JRDB.
8
+
9
+ forming these tasks is challenging due to the high variations of human actions in an unconstrained real-world environment. Moreover, dealing with daily actions which resembles a highly unbalanced, long-tailed distribution poses new challenges for many existing approaches.
10
+
11
+ Recently, great progress has been made to create largescale video datasets for human activity understanding [\[3,](#page-8-0) [9,](#page-8-1)[20,](#page-8-2)[26,](#page-8-3)[45\]](#page-9-0). While these popular datasets have contributed significantly to the recent advances in human activity understanding from visual data, their primary application is not targeting robotics domain and therefore rarely reflect the challenges in problems such as human-robot interaction and robot navigation in human crowded environments, *e.g*. shopping malls, university campus, *etc*. Such environments include not only many individuals, but also often groups of people connected to each other through some form of interaction, *e.g*. engaging in common activities or goals, which
12
+
13
+ <sup>∗</sup> Work done while at the Australian National University (ANU).
14
+
15
+ <span id="page-1-0"></span>form the concept of social groups and activities. Moreover, in many robotics problem, *e.g*. for safe navigation and collision risk prediction in human environments, it is essential to anticipate every individual's action and intention way ahead of time, considering their social interactions. To this end, the availability of a spatio-temporally dense annotated human action data is indispensable for the development and evaluation of a robotic perception system.
16
+
17
+ With this motivation, we introduce JRDB-Act, a large-scale dataset captured from a mobile robot platform, containing dense spatio-temporal individual action and social group annotation. JRDB-Act is an extension of the recently introduced JRDB [\[35,](#page-9-1)[43\]](#page-9-2). We now elaborate the unique characteristics of JRDB-Act and our proposed method.
18
+
19
+ New Annotations. We provide a set of atomic action labels for each person at each frame from the three categories of human pose, human-human, and human-object interactions, as shown in Fig. [1.](#page-0-0) Our action vocabulary contains common daily human actions including 11 human pose, 3 human-human, and 12 human-object interaction classes. Since these action labels are densely annotated over space and time, JRDB-Act contains over 2.8M action labels, making it one of the large-scale spatio-temporal action datasets publicly available. Furthermore, the dataset provides new unique annotations, *i.e*. social group labels, by assigning a group ID to each person in each frame such that individuals with the same ID represent a social group. We further provide social activity annotation for each group by inferring it from the annotated individual actions and social groups. Another novel aspect of JRDB-Act is the difficulty level annotation, *e.g*. easy, moderate, and difficult, for each annotated label which reflects the confidence level of annotators for the corresponding label. The provided difficulty level can be conducive to more reliable evaluation paradigms.
20
+
21
+ Unique Challenges. The sequences in JRDB-Act are captured from human daily-life in different indoor and outdoor places of a university campus as an unconstrained environment [\[35\]](#page-9-1) by a mobile robotic platform. Thus, they reflect the highly unbalanced distribution of human actions in realworld scenarios. Moreover, the sequences naturally include diverse levels of human population density. The average number of people per frame in JRDB-Act is 30, which is significantly higher than most popular action datasets. Further, the robot motion and the perspective view of the captured sequences makes this dataset challenging. Considering the aforementioned compelling attributes, dense annotations, and natural complexities, JRDB-Act introduces means to study new problems and challenges in human understanding for computer vision and robotics community.
22
+
23
+ Our Proposed Method. In order to showcase the potential research directions and challenges required to be tackled in JRDB-Act, we develop an end-to-end trainable pipeline for both individual action and social group detection tasks. Our method uses the panoramic video clips as input and adopts a similar backbone as [\[13\]](#page-8-4) to extract spatio-temporal individual features. However, we fuse additional pair-wise geometrical features and incorporate a novel eigenvalue-based loss function to improve the social group detection performance compared to [\[13\]](#page-8-4). We also suggest a simple, yet effective strategy to handle the unbalanced nature of action labels by partitioning and balancing action loss functions based on the occurring frequency of action classes in the dataset.
24
+
25
+ # Method
26
+
27
+ We propose an end-to-end trainable baseline for spatiotemporal detection of individuals' actions, social groups, and social activities per group in videos. The architecture of our model is illustrated in Fig. 4. We utilise the same backbone $f_{\theta}(x)$ as in [13] including the I3D feature extractor, the self-attention, and the graph attention modules to extract rich spatio-temporal feature map for each individual in which social interactions are encoded. To further enhance the social grouping performance and to reduce the discrepancy between train and inference compared to [13], we propose to incorporate an eigenvalue-based loss function [10] on the similarity matrix extracted from the visual features and geometrical relations between the detected bounding boxes. Further, in order to overcome the highly unbalanced nature of action labels in the data, we propose to utilise softmax/sigmoid loss partitioning approach inspired by [33].
28
+
29
+ **Learning Social Group Formation.** Social groups in a scene can be shown as a graph in which nodes are the individuals and the edges indicate the connectivities between them. The graph of the groundtruth social groups can be presented by a matrix A consisting of 0 and 1 in which $A_{i,j}$ indicates whether the pair (i,j) belongs to the same social group. $A_{\theta}$ is formed by the model in which for each pair of bounding boxes i and j, the normalised GIoU [39], $D_G(i, j)$ , representing a geometrical similarity between each pair is calculated such that 0 and 1 represent far and close boxes, respectively. The normalised similarity between the visual features (extracted from $f_{\theta}(x)$ ) of two bounding boxes i and j is also calculated as $D_V(h_{\theta}^i, h_{\theta}^j)$ . The final $A_{\theta}^{i,j}$ is then attained by the concatenation of $D_V(h_{\theta}^i, h_{\theta}^j)$ and $D_G(i,j)$ and utilising a MLP layer to project the 2-dim vector to a 1-dim vector. The training objective in learning social groups is to reduce the discrepancy
30
+
31
+ between the predicted $A_{\theta}$ and $\hat{A}$ . To this end, we utilise a binary cross entropy loss between the elements of $A_{\theta}$ and $\hat{A}$ denoted by $L_{BCE}$ in Eq. 2. Further, since the number of connected components (social groups) in the groundtruth matrix $\hat{A}$ is equal to the number of zero eigenvalues of its laplacian matrix $\hat{L}$ , we want the laplacian matrix of $A_{\theta}$ denoted by $L_{\theta}$ to have the same number of zero eigenvalues as in $\hat{L}$ . To this end, we utilise $L_{eig}(\theta)$ denoted by Eq. 1,
32
+
33
+ <span id="page-4-1"></span>
34
+ $$L_{eig}(\theta) = \hat{e}^T L_{\theta}^T L_{\theta} \hat{e} + \alpha \exp(-\beta t r(\bar{L}_{\theta}^T \bar{L}_{\theta}))$$
35
+ (1)
36
+
37
+ in which $\hat{e}$ is the groundtruth eigenvector corresponding to the zero eigenvalue, $L_{\theta}$ is the laplacian matrix corresponding to the predicted similarity matrix $A_{\theta}$ and $\alpha$ and $\beta$ are coefficients. The proof of Eq. 1 is stated in the supp. material. The loss in Eq. 1 is inspired by the fully differentiable, eigendecomposition-free loss proposed in [10] to train a deep network whose loss depends on the eigenvector corresponding to the single zero eigenvalue of a matrix predicted by the network. We extend it to our problem with multiple zero eigenvalues indicating the number of social groups. To learn the number of social groups, as a cardinality loss, we utilise a mean square error function between the groundtruth number of social groups and the 1-dim learned feature from the concatenation of $h_{\theta}$ (max-pool of boxes' visual features) and the summation of the $A_{\theta}$ elements denoted by $L_{MSE}$ in Eq. 2.
38
+
39
+ <span id="page-4-0"></span>
40
+ $$L_G = L_{BCE}(A_{\theta}, \hat{A}) + L_{eig}(L_{\theta}, \hat{L}) + L_{MSE}((h_{\theta}||\sum_{i} A_{\theta}^{i}), GT_{cardinality})$$
41
+ (2)
42
+
43
+ Learning Actions. Each bounding box is annotated with one pose-based and an arbitrary number of interactionbased action labels and the occurrence of action classes is highly unbalanced in the dataset. One naive way to learn actions is to use a cross entropy loss to learn pose-based and a binary cross entropy loss to learn interaction-based actions. However, we empirically observe that action classifier's performance is highly harmed by the unbalanced nature of action labels. To overcome this problem, we divide the pose-based and interaction-based action classes into several disjoint partitions. The number of samples of the least frequent class in each partition, is greater than 0.1 of the number of samples of the highest frequent class in that partition. In each partition excluding the last one, we add an "Other" class which shows the presence of an action class in the less frequent partitions. We have 3 and 4 partitions for pose-based and interaction-based partitions respectively. The list of action labels in each partition is provided in the supp. material. We then train each posebased and interaction-based partition separately by using cross entropy and binary cross entropy losses respectively as in Eq. 3. Further, to maintain the balance, we only train partitions with an existing groundtruth label for each training sample. An illustration of our action learning strategy is
44
+
45
+ <span id="page-5-4"></span><span id="page-5-0"></span>![](_page_5_Figure_0.jpeg)
46
+
47
+ Figure 4. Overview of our framework during training. Given the spatio-temporal feature representation of the individuals denoted by $h_{\theta}^{i}$ in the key-frame, we optimize two tasks. First, to learn the individual actions, we compute the individual's feature map by concatenating the individual's visual feature and its corresponding social group's feature map $(SG_{\theta}^{i})$ obtained by max-pooling the feature maps of its members. Then, to compute $L_{Act}$ , we compute cross entropy and binary cross entropy losses for each pose-based (P) and interaction-based (I) action groups. Second, to learn the social group formation and the social group cardinality, we calculate the similarity matrix $A_{\theta}$ between individuals based on their pair-wise geometrical $(D_{G}(i,j))$ and feature distance extracted from the backbone $(D_{V}(h_{\theta}^{i},h_{\theta}^{j}))$ and utilise it along with the extracted spatio-temporal feature $(h_{\theta})$ to compute different loss terms as in $L_{G}$ .
48
+
49
+ <span id="page-5-2"></span>![](_page_5_Picture_2.jpeg)
50
+
51
+ Figure 5. Illustration of different softmax and sigmoid terms of $L_{Act}$ for a training sample. As shown, there are 3 groundtruth actions in this sample including one from the pose-based and two from the human-object interaction categories. For the pose-based action, only one softmax is activated as "Walking" belongs to "Softmax 1". The first sigmoid determines whether there is an interaction-based action. The subsequent sigmoids specifically determine the present interaction-based action labels. Here, "Holding sth" belongs to "Sigmoid 2" and "Looking at robot" falls into the "Other". Thus, the third sigmoid is activated to recognise the "Looking at robot" action.
52
+
53
+ shown in Fig. 5.
54
+
55
+ <span id="page-5-1"></span>
56
+ $$L_{Act} = \sum_{i=0}^{2} \lambda_{i} L_{CE}(P_{\theta}^{i}, P^{i}) + \sum_{j=0}^{3} \lambda_{j} L_{BCE}(I_{\theta}^{j}, I^{j})$$
57
+ (3)
58
+
59
+ In Eq. 3, $\lambda$ is a coefficient, $P_{\theta}^{i}$ and $I_{\theta}^{j}$ are the predicted posebased and interaction-based actions, and $P^{i}$ and $I^{j}$ are the corresponding groundtruth labels respectively.
60
+
61
+ **Training.** Our model takes as input a video clip with the key-frame located at the end. The input clip is then fed to
62
+
63
+ the backbone to obtain spatio-temporal feature map of the individuals in the key-frame denoted by $h_{\theta}^{i}$ . The similarity matrix $A_{\theta}$ between individuals is calculated based on their pair-wise geometrical and feature distance. The calculated similarity matrix and the extracted spatio-temporal features are then utilised to learn the social grouping loss $L_G$ denoted by Eq. 2. Given the groundtruth social connections in training, we obtain each social group's feature map by max-pooling the features of its members. Each individual's feature representation is concatenated with its social group feature map. Individual's obtained feature map are utilised to learn the action loss $L_{Act}$ as in Eq. 3. As shown in Fig. 5, For each training sample we only activate the terms of $L_{Act}$ in which there exists a groundtruth label and set the other terms to zero to avoid training with groundtruth vectors of all zeros. The total training objective is stated in Eq. 4.
64
+
65
+ <span id="page-5-3"></span>
66
+ $$L_{total} = L_G + L_{Act} (4)$$
67
+
68
+ **Inference.** At test time, for individual action prediction, we perform softmax operation on the predictions of each cross entropy and sigmoid operation on predictions of each binary cross entropy functions. We then choose the predicted action labels based on a hierarchical approach starting from the first partition and going to the next one in the hierarchy only if the "Other" class is predicted. For social group prediction, we perform graph spectral clustering [57] on the obtained similarity matrix between individuals and by utilising the predicted number of social groups. Since the social activity label of each group is the most frequent action labels
69
+
70
+ <span id="page-6-2"></span><span id="page-6-0"></span>
71
+
72
+ | Method | grouping loss | Cardinality | Geo feature | G1 AP↑ | G2 AP↑ | G3 AP↑ | G4 AP↑ | G5 <sup>+</sup> AP↑ | overall AP↑ |
73
+ |----------------|---------------|-------------|-------------|--------|--------|--------|--------|---------------------|-------------|
74
+ | Baseline1 [13] | BCE | Н | - | 8.0 | 29.3 | 37.5 | 65.4 | 67.0 | 41.4 |
75
+ | Baseline2 | BCE | Н | ✓ | 26.1 | 57.0 | 61.2 | 63.0 | 53.7 | 52.2 |
76
+ | Baseline3 | BCE | MSE | ✓ | 79.6 | 63.0 | 43.7 | 56.9 | 40.7 | 56.8 |
77
+ | Ours | BCE+EIGEN | MSE | <b>√</b> | 81.4 | 64.8 | 49.1 | 63.2 | 37.2 | 59.2 |
78
+
79
+ <span id="page-6-1"></span>Table 1. Social grouping ablation study on JRDB-Act validation-set using groundtruth bounding boxes. G1, G2, G3, G4, G5<sup>+</sup> indicate social groups with 1, 2, 3, 4, 5 or more members.
80
+
81
+ | Method | Action mAP↑ |
82
+ |---------------------|-------------|
83
+ | [CE+BCE] | 8.0 |
84
+ | [W-CE+W-BCE] | 8.1 |
85
+ | [M-CE+M-BCE] [Ours] | 9.0 |
86
+
87
+ Table 2. Individual action detection ablation study on JRDB-Act validation-set using groundtruth bounding boxes.
88
+
89
+ of its members, we follow the same strategy and infer the activity of each predicted social group from the predicted action labels of its individuals.
2106.12108/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2106.12108/paper_text/intro_method.md ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We formalize the setting considered in this paper for transfer learning under the distribution shift.
4
+
5
+ Let $p_S(\bx)$ and $p_T(\bx)$ be the marginal distribution for $\bx$ in source and target domain. The associated second-moment matrices are $\Sigma_S:=\E_{p_S}[\bx\bx^\top]$, and $\Sigma_T:=\E_{p_S}[\bx\bx^\top]$. Labeled data $(\bx,y)$ satisfies $\E_{p_S}[y|\bx]=\E_{p_T}[y|\bx]=f^*(\bx)$ and $y=f^*(\bx)+z$ with Gaussian noise $z\sim \cN(0,\sigma^2)$. We consider both linear ($f^*(\bx):=\E[y|\bx]=\bx^\top \bbeta^*$) and general nonlinear data generation model. When the optimal linear model changes from source to domain we add a subscript for distinction, i.e., $\bbeta_S^*$ and $\bbeta_T^*$. We use bold ($\bx$) symbols for vectors, lower case letter ($x$) for scalars and capital letter ($A$) for matrices.
6
+
7
+ We observe $n_S, n_T$ labeled samples from source and target domain, and $n_U$ unlabeled target samples. Labeled data is scarce in target domain: $n_S\gg n_T$ and $n_T$ can be $0$. Specifically, data is collected as $X_S=[\bx_1^\top |\bx_2^\top |\cdots |\bx_{n_S}^\top]^\top \in \R^{n_S\times d}$, with $\bx_i,i\in [n_S]$ drawn from $p_S$, noise $\bz = [z_1,z_2,\cdots z_{n_S}]^\top, z_i \sim \cN(0,\sigma^2)$. $\by_S = [y_1,y_2,\cdots, y_{n_S}]^\top \in \R^{n_S}$, $\by_T\in \R^{n_T}$ and $X_U\in \R^{n_U\times d}$ are similarly defined). Denote by $\hat \Sigma_S = X_S^\top X_S/n_S$ the empirical second-moment matrix. The positive part of a number is denoted by $(x)_+$.
8
+
9
+ In this work, we focus on designing linear estimators $\hat \bbeta:\R^n\rightarrow \R^d, \by_S \rightarrow A\by_S$[^1] for $\bbeta^*_T\in \cB$. Here $\bbeta^*_T$ is the optimal linear model in target domain ($:=\argmin_{\bbeta}\E_{\bx\sim p_T,z\sim \cN(0,\sigma^2)}[(f^*(\bx)+z-\bx^\top\bbeta)^2]$). [^2]
10
+
11
+ Our estimator is evaluated by the excess risk on target domain, with the worst case $\bbeta^*_T$ in some set $\cB$: $L_{\cB}(\hat\bbeta) = \max_{\bbeta^*\in \cB} \E_{\by_S}\E_{\bx\sim p_T} \left(\bx^\top(\hat \bbeta(\by_S) -\bbeta^*_T)\right)^2$. Minimax linear risk and minimax risk among all estimators are respectively defined as: $$\begin{align*}
12
+ R_L(\cB) \equiv & \min_{\hat\bbeta \text{ linear in } \by_S} L_{\cB}(\hat\bbeta); ~~ R_N(\cB) \equiv \min_{\hat\bbeta} L_{\cB}(\hat\bbeta).
13
+ \end{align*}$$ The subscript "N\" or "L\" is a mnemonic for "non-linear\" or "linear\" estimators. $R_N$ is the optimal risk with no restriction placed on the class of estimators. $R_L$ only considers the linear function class for $\hat\bbeta$. Minimax linear estimator and minimax estimator are the estimators that respectively attain $R_L$ and $R_N$ within universal multiplicative constants. Normally we only consider $\cB=\{\bbeta | \|\bbeta\|_2\leq r \}$. When there is no ambiguity, we simplify $\hat\bbeta(\by_S)$ by $\hat\bbeta$.
14
+
15
+ Our paper considers different settings with distribution shift. Our methods are unified under the following meta-algorithm:
16
+
17
+ - Step 1: Construct an unbiased sufficient statistic $\hat\bbeta_{\text{SS}}$[^3] for the unknown parameter.
18
+
19
+ - Step 2: Construct $\hat\bbeta_{\text{MM}}$, a linear function of the sufficient statistic $\hat\bbeta_{\text{SS}}$ that minimizes $L_{\cB}(\hat\bbeta_{\text{MM}})$.
20
+
21
+ For each setting, we will show that $\hat\bbeta_{\text{MM}}$ achieves linear minimax risk $R_L$. Furthermore, under some conditions, the minimax risk $R_N$ is uniformly lower bounded by a universal constant times $L_{\cB}(\hat\bbeta_{\text{MM}})$.
22
+
23
+ In the sections below, we tackle the problem in several different settings. In Section [3](#sec:cov_shift){reference-type="ref" reference="sec:cov_shift"}, we design algorithms with only covariate shift and linear data-generation models ($f^*$ is linear) for unsupervised domain adaptation ($n_T=0$) in Section [3.1](#sec:linear_model){reference-type="ref" reference="sec:linear_model"}, and supervised domain adaptation ($n_T>0$) in Section [3.4](#sec:linear_source_and_target_data){reference-type="ref" reference="sec:linear_source_and_target_data"}. Section [4](#sec:nonlinear_model){reference-type="ref" reference="sec:nonlinear_model"} is about linear regression with approximation error ($n_T=0$ and $f^*(\bx)$ is a general nonlinear function). Finally we consider model shift for linear models ($\bbeta_S^*\neq \bbeta_T^*$) in Section [5](#sec:model_shift){reference-type="ref" reference="sec:model_shift"}.
24
+
25
+ In this section, we consider the setting with only covariate shift and $f^*$ is linear. That is, only $\Sigma_S$ (marginal distribution $p_S(\bx)$) changes to $\Sigma_T$ (marginal distribution $p_T(\bx)$), but $f^*=\E[y|\bx]=\bx^\top \bbeta^*$ (conditional distribution $p(y|\bx)$) is shared.
26
+
27
+ We observe $n_S$ samples from source domain: $\by_S=X_S \bbeta^* + \bz,\bz\sim \cN(0,\sigma^2 I)$ and only some unlabeled samples $X_U$ from the target domain. Our goal is to find the minimax linear estimator $\hat \bbeta_{\text{MM}}(\by_S)=A\by_S$ with some linear mapping $A$ that attains $R_L(\cB)$ [^4].
28
+
29
+ Following our meta-algorithm, let $\hat \bbeta_{\text{SS}}=\frac{1}{n_S} \hat\Sigma_S^{-1} X_S^\top \by_S$[^5] be an unbiased sufficient statistic for $\bbeta^*$: $$\begin{align}
30
+ \notag
31
+ \hat \bbeta_{\text{SS}} = & \frac{1}{n_S} \hat\Sigma_S^{-1} X_S^\top \by_S = \frac{1}{n_S} \hat \Sigma_S^{-1} X_S^\top X_S \bbeta^* + \frac{1}{n_S} \hat \Sigma_S^{-1} X_S^\top \bz.\\
32
+ \label{eqn:sequence_model}
33
+ = & \bbeta^* + \frac{1}{n_S} \hat \Sigma_S^{-1} X_S^\top \bz \sim \cN\left(\bbeta^*, \frac{\sigma^2}{n_S}\hat \Sigma_S^{-1} \right) .
34
+ \end{align}$$ The fact that $\hat\bbeta_{\text{SS}}(\by_S)$ is a sufficient statistic for $\bbeta^*$ is proven in Claim [\[claim:sufficient_statistic\]](#claim:sufficient_statistic){reference-type="ref" reference="claim:sufficient_statistic"} for a more general case, using the Fisher-Neyman factorization theorem. We prove that the minimax linear estimator is of the form $\hat\bbeta_{\text{MM}}=C\hat\bbeta_{\text{SS}}$ and then design algorithms that calculate the optimal $C$.
35
+
36
+ ::: claim
37
+ []{#claim:A=A_1X^T label="claim:A=A_1X^T"} The minimax linear estimator is of the form $\hat \bbeta_{\text{MM}} = C\hat\bbeta_{\text{SS}}$ for some $C\in \R^{d\times d}$.
38
+ :::
39
+
40
+ In order to derive the minimax linear estimator, we first consider the simple case when $\Sigma_T$ and $\hat\Sigma_S$ are simultaneously diagonalizable. We note that under this setting, minimax estimation under covariate shift reduces to the well-studied problem of finding a minimax linear estimator under weighted square loss (see e.g., [@blaker2000minimax]). One could apply Pinsker's Theorem [@johnstone2011gaussian] and get an estimator function and the minimax risk with a closed form:
41
+
42
+ ::: theorem
43
+ []{#thm:linear_minimax_estimator label="thm:linear_minimax_estimator"} Suppose the observations follow sequence model $\by_S = X_S \bbeta^* + \bz, \bz\sim \cN(0,\sigma^2 I_n)$. If $\Sigma_T=U\diag(\bt)U^\top$ and $\hat\Sigma_S\equiv X_S^\top X_S/n_S = U\diag(\bs) U^\top$, then the minimax linear risk $$\begin{align*}
44
+ R_L(\cB) \equiv & \min_{\hat\bbeta=A\by_S} \max_{\bbeta^*\in \cB} \E\|\Sigma_{T}^{1/2}(\hat \bbeta -\bbeta^*)\|^2\\
45
+ = & \sum_i \frac{\sigma^2}{n_S} \frac{t_i}{s_i} \left(1-\frac{\lambda}{\sqrt{t_i}}\right)_+,
46
+ \end{align*}$$ where $\cB=\{\bbeta|\|\bbeta\|\leq r\}$, and $\lambda= \lambda(r)$ is determined by $\frac{\sigma^2}{n_S}\sum_{i=1}^d \frac{1}{s_i}(\sqrt{t_i}/\lambda-1)_+ = r^2.$ The linear minimax estimator is given by: $$\begin{align}
47
+ \label{eqn:best_linear_estimator}
48
+ \hat \bbeta_{\text{MM}} = &\Sigma_T^{-1/2} U (I-\diag(\lambda/\sqrt{\bt}) )_+ U^\top \Sigma_T^{1/2}\hat\bbeta_{\text{SS}},\\
49
+ \notag
50
+ & \text{where }\hat \bbeta_{\text{SS}} = \frac{1}{n_S} \hat\Sigma_S^{-1} X_S^\top \by_S.
51
+ \end{align}$$
52
+ :::
53
+
54
+ Since $r$ is unknown in practice, we could simply view either $r$ or directly $\lambda$ as the tuning parameter. We compare the functionality of $\lambda$ with that of ridge regression: $\hat\bbeta_{\text{RR}}^{\lambda}=\argmin_{\hat\bbeta} \E\frac{1}{2n}\|X_S\hat\bbeta - \by_S\|^2 + \frac{\lambda}{2}\|\hat\bbeta\|^2= (\hat\Sigma_S+\lambda I)^{-1}X_S^\top \by_S/n_S$. For both algorithms, $\lambda$ balances the bias and variance: $\lambda=0$ gives an unbiased estimator, and a big $\lambda$ gives a (near) zero estimator with no variance. The difference is, the minimax linear estimator shrinks some signal directions based on the value of $t_i$, since the risk in those directions is downweighted in the target loss. The estimator tends to sacrifice the directions of signal where $t_i$ is smaller. Ridge regression, however, respects the value of $s_i$. A natural counterpart is for ridge to also regularize based on $\bt$: let $\hat\bbeta^\lambda_{\text{RR},T}=\argmin \frac{1}{n}\|\Sigma_T^{1/2}(\bbeta- \hat\Sigma_S^{-1}X_S^\top\by_S)\|^2+\lambda \|\bbeta\|^2 = (\Sigma_T+\lambda I)^{-1}\Sigma_T \hat\bbeta_{\text{SS}}$. We will compare their performances in the experimental section.
55
+
56
+ For non-commutative second-moment shift, we follow the same procedure. Our estimator is achieved by optimizing over $C$: $\hat \bbeta_{\text{MM}} = C \hat\bbeta_{\text{SS}}$: $$\begin{align}
57
+ \notag
58
+ & R_L(\cB)\equiv \min_{\hat \bbeta=A\by_S} \max_{\bbeta^*\in\cB} \E\|\Sigma^{1/2}_T(\hat \bbeta - \bbeta^*)\|^2_2 \\
59
+ \notag
60
+ = & \min_{\hat \bbeta = C\hat\bbeta_{\text{SS}}} \max_{\|\bbeta^*\|\leq r} \left\{\|\Sigma_T^{1/2}(C-I)\bbeta^* \|^2_2 + \frac{\sigma^2}{n_S} \Trace(\Sigma_T^{1/2}C\hat \Sigma_S^{-1}C^\top\Sigma_T^{1/2} ) \right\} \tag{Claim \ref{claim:A=A_1X^T}} \\
61
+ %\notag
62
+ %= & \min_{C} \left\{ \|(C-I)^\top\Sigma_T (C-I)\|_2 r^2 + \frac{\sigma^2}{n_S} \Trace(\Sigma_T^{1/2}C\hat \Sigma_S^{-1} C^\top\Sigma_T^{1/2}) \right\}\\
63
+ \label{eqn:best_linear_estimator_noncommute}
64
+ = & \min_{\tau, C} \left\{ r^2 \tau + \frac{\sigma^2}{n_S} \Trace(\Sigma_T^{1/2}C\hat \Sigma_S^{-1} C^\top\Sigma_T^{1/2}) \right\}, \\
65
+ \notag
66
+ & \text{ s.t. } (C-I)^\top\Sigma_T (C-I) \preceq \tau I.
67
+ \end{align}$$ Unlike the commutative case, this problem does not have a closed form solution, but is still computable:
68
+
69
+ ::: proposition
70
+ []{#prop:convex_program label="prop:convex_program"} Problem [\[eqn:best_linear_estimator_noncommute\]](#eqn:best_linear_estimator_noncommute){reference-type="eqref" reference="eqn:best_linear_estimator_noncommute"} is a convex program and computable in polynomial-time.
71
+ :::
72
+
73
+ We achieve near-optimal minimax risk among all estimators under some conditions:
74
+
75
+ ::: theorem
76
+ []{#thm:1.25minimax_risk label="thm:1.25minimax_risk"} The best linear estimator from [\[eqn:best_linear_estimator\]](#eqn:best_linear_estimator){reference-type="eqref" reference="eqn:best_linear_estimator"} or [\[eqn:best_linear_estimator_noncommute\]](#eqn:best_linear_estimator_noncommute){reference-type="eqref" reference="eqn:best_linear_estimator_noncommute"} achieves minimax linear risk: $L_{\cB}(\hat\bbeta_{\text{MM}}) = R_L(\cB)$. When $\Sigma_S,\Sigma_T$ commute, or $\Sigma_T$ is rank 1, our algorithms are near-optimal among nonlinear rules: $L_{\cB}(\hat\bbeta_{\text{MM}})\leq 1.25 R_N(\cB).$
77
+ :::
78
+
79
+ Note that $R_N\leq R_L$ by definition. Therefore 1) our estimator $\hat\bbeta_{\text{MM}}$ is near-optimal, and 2) our lower bound for $R_N$ is tight. Lower bounds (without matching upper bounds) for general non-commutative problem is presented in @kalan2020minimax and we improve their result for the commutative case and provide a matching algorithm. Their lower bound scales with $\frac{d}{n_S}\min_i \frac{t_i}{s_i}$ for large $r$, while ours becomes $\frac{1}{n_S}\sum_i \frac{t_i}{s_i}$. Our lower bound is always larger and thus tighter, and potentially arbitrarily larger when $\max_i \frac{t_i}{s_i}$ and $\min_i \frac{t_i}{s_i}$ are very different. We defer our proof to the appendix.
80
+
81
+ From a probabilistic perspective, ridge regression is equivalent to maximum a posteriori (MAP) inference with a Gaussian prior: $\bbeta^*\sim \cN(0,r^2I)$ (see e.g. @murphy2012machine). Similarly, instead of considering a worst-case risk that minimizes $L_{\cB}(\hat\bbeta) :=\max_{\bbeta^*\in \cB}\E_{\by_S}\|\Sigma_T^{1/2}(\hat\bbeta(\by_S) - \bbeta^*)\|^2$, one could also study the average setting that minimizes $\bar L_{\cB}:= \E_{\bbeta^*\sim \cN(0,r^2 I)} \E_{\by_S}\|\Sigma_T^{1/2}(\hat\bbeta(\by_S) - \bbeta^*)\|^2$ instead. With distribution shift, the performance is evaluated on $\Sigma_T$ instead of $\Sigma_S$. Interestingly with Gaussian prior, this does not give us a different algorithm other than the original ridge regression.
82
+
83
+ ::: proposition
84
+ The optimal estimator under Gaussian prior $\bbeta^*\sim \cN(0,r^2 I)$ evaluated on $p_T$ is: $$\begin{align*}
85
+ \hat \bbeta \leftarrow & \argmin_{\bbeta = A\by_S } \E_{\bbeta^*\sim \cN(0,r^2 I)} \E_{\by_S} \E_{\bx\sim p_T} \left(\bx^\top (\bbeta - \bbeta^*)\right)^2\\
86
+ = & \frac{1}{n_S}(\frac{\sigma^2}{r^2 n_S} I +\hat \Sigma_S)^{-1} X_S^\top \by_S \\
87
+ \equiv & \argmin_{\hat\bbeta} \E\frac{1}{2n}\|X_S\hat\bbeta - \by_S\|^2 + \frac{\lambda}{2}\|\hat\bbeta\|^2\\
88
+ = & (\hat\Sigma_S + \lambda I)^{-1}\hat \Sigma_S \hat \bbeta_{\text{SS}}=: \hat\bbeta_{\text{RR}}^{\lambda},
89
+ \end{align*}$$ when $\lambda = \sigma^2/(n_S r^2)$. Namely, the average-case best linear estimator with Gaussian prior is equivalent to ridge regression with regularization strength $\lambda=\frac{\sigma^2/n_S}{r^2}$: the variance ratio between the noise distribution and prior distribution.
90
+ :::
91
+
92
+ Even though ridge regression achieves the optimal risk in the average sense, it could be much worse than the minimax linear estimator in the worst case. We prove a separation result on a specific example (that is deferred to the appendix).
93
+
94
+ ::: remark
95
+ []{#remark:order-wise_better label="remark:order-wise_better"}
96
+
97
+ There is an example that $R_L(\cB)\leq \cO(d^{-1/4} L_{\cB}(\hat\bbeta_{\text{RR}}^\lambda))$ even with the optimal hyperparameter $\lambda$. [^6]
98
+ :::
99
+
100
+ With specific problems, one should adjust the prior distribution instead of simply assume $\bbeta^*\sim \cN(0,r^2)$. If one replaces the prior by $\bbeta^*\sim \cN(\hat\bbeta_{\text{SS}},r^2)$, one could get another heuristic method:
101
+
102
+ ::: proposition
103
+ Let $\hat \bbeta_{\text{SS}}$ be the estimator from ordinary least square: $\hat\bbeta_{\text{SS}}= \hat \Sigma_S^{-1}X_S^\top\by_S/n_S$. The optimal estimator under Gaussian prior $\bbeta^*\sim \cN(\hat \bbeta_{\text{SS}},r^2 I)$ evaluated on $p_T$ is: $$\begin{align*}
104
+ \hat \bbeta \leftarrow & \argmin_{\bbeta = A\by_S } \E_{\bbeta^*\sim \cN(\hat\bbeta_{\text{SS}},r^2 I)} \E_{\by_S} \E_{\bx\sim p_T} \left(\bx^\top (\bbeta - \bbeta^*)\right)^2\\
105
+ = & \frac{1}{n_S}(\frac{\sigma^2}{r^2 n_S} I + \Sigma_T)^{-1}\Sigma_T \hat\Sigma_S^{-1} X_S^\top \by_S \\
106
+ \equiv &\argmin_{\bbeta} \|\Sigma_T^{1/2}(\bbeta- \hat\bbeta_{\text{SS}})\|^2+\lambda \|\bbeta\|^2\\
107
+ = & (\Sigma_T+\lambda I)^{-1}\Sigma_T \hat\bbeta_{\text{SS}}=:\hat\bbeta^\lambda_{\text{RR},T},
108
+ \end{align*}$$ when $\lambda = \sigma^2 /(n_S r^2)$.
109
+ :::
110
+
111
+ Comparing the closed-form estimator $\hat \bbeta_{\text{RR},T}^{\lambda}:=(\Sigma_T+\lambda I)^{-1}\Sigma_T \hat\bbeta_{\text{SS}}$ to the original ridge regression $\hat \bbeta_{\text{RR}}^{\lambda}:=(\hat\Sigma_S+\lambda I)^{-1}\hat\Sigma_S \hat\bbeta_{\text{SS}}$, we could see that this algorithm regularizes $\hat\bbeta$ based on the signal strength from the target distribution, and it is equivalent to ridge regression by adjusting the prior distribution to center at $\hat\bbeta_{\text{SS}}$, the unbiased estimator for the ground truth $\bbeta^*$. We will compare both methods with our minimax estimator in the experimental section.
112
+
113
+ In practice, we have finite unlabeled samples $X_U\in \R^{n_U\times d},$ where we denote the empirical second-moment matrix as $\hat \Sigma_U = X_U^\top X_U/n_U$. Let $\hat L_{\cB}$ to denote the worst case excess risk measured on the observed target samples: $\hat L_\cB(\hat \bbeta) = \max_{\bbeta^*\in \cB} \E_{\by_S}\frac{1}{n_U} \|X_U(\hat\bbeta(\by_S)-\bbeta^*)\|^2$. To find the best linear estimator that minimizes $\hat L_{\cB}$, our proposed algorithm becomes: $$\begin{align}
114
+ \label{eqn:cov_shift_alg_finite_n_U}
115
+ \hat C \leftarrow & \min_{\tau, C} \left\{ r^2 \tau + \frac{\sigma^2}{n_S} \Trace(C\hat \Sigma_S^{-1} C^\top\hat\Sigma_U) \right\},\\
116
+ \notag
117
+ & \text{ s.t. } (C-I)^\top\hat \Sigma_U (C-I) \preceq \tau I.
118
+ \end{align}$$ Let $\hat\bbeta = \hat C \hat\Sigma_S^{-1}X_S^\top \by_S/n_S$. We want to show that in spite of the existence of estimation error due to the replacement of $\Sigma_T$ with $\hat\Sigma_T$, our generated $\hat\bbeta$ still achieves minimax linear risk (up to constant multiplicative error).
119
+
120
+ For simplicity, in this section we assume input samples are centered: $\E_{p_S}[\bx]=\E_{p_T}[\bx]=0$. This assumption results in no loss of generality. Since the sample mean is more sample-efficient to estimate than covariance matrix, one will be able to first estimate the mean and center the data. We assume some standard light-tail property on the target samples:
121
+
122
+ ::: definition
123
+ []{#definition:linear_subgaussian label="definition:linear_subgaussian"} We call a distribution $p,\E[p]=0$ to be $\rho^2$-subgaussianwhen there exists $\rho > 0$ such that the random vector $\bar{\bx} \sim \bar p$ is $\rho^2$-subgaussian. $\bar p$ is the whitening of $p$ such that $\bar\bx\sim \bar p$ is equivalent to $\bx=\Sigma^{1/2}\bar\bx \sim p$, where $\Sigma=\E_p[\bx\bx^\top]$. [^7]
124
+ :::
125
+
126
+ Note that $\rho$ is defined on the whitening of the data. It doesn't scale with $\|\Sigma\|_{op}$ and should be viewed as universal constant.
127
+
128
+ ::: theorem
129
+ []{#thm:random_design_target label="thm:random_design_target"} Fix a failure probability $\delta\in(0, 1)$. Suppose target distribution $p_T$ is $\rho^2$-subgaussian, and the sample size in target domain satisfies $n_U \gg \rho^4(d+\log\frac{1}{\delta})$. Let $\hat \bbeta:\by_S\rightarrow \hat C\hat\Sigma_S^{-1}X_S^\top \by_S$ where $\hat C$ is defined from Eqn. [\[eqn:cov_shift_alg_finite_n_U\]](#eqn:cov_shift_alg_finite_n_U){reference-type="eqref" reference="eqn:cov_shift_alg_finite_n_U"}. Then with probability at least $1-\delta$ over the unlabeled samples from target domain, and for each fixed $X_S$ from source domain, our learned estimator $\hat\bbeta(\by_S)$ satisfies: $$\begin{equation}
130
+ L_{\cB}(\hat\bbeta)\leq (1+O(\sqrt{\frac{\rho^4(d+\log(1/\delta))}{n}})) R_L(\cB).
131
+ \end{equation}$$ When $\Sigma_T$ commutes with $\hat \Sigma_S$ or is rank 1, we have: $$\begin{equation}
132
+ L_{\cB}(\hat\bbeta)\leq (1.25+O(\sqrt{\frac{\rho^4(d+\log(1/\delta))}{n}})) R_N(\cB).
133
+ \end{equation}$$
134
+ :::
135
+
136
+ Similarly all other results in the paper could be extended to $\hat\bbeta\leftarrow \argmin \hat L_{\cB}(\cdot)$, the estimator obtained with finite target samples $X_U$.
137
+
138
+ ::: remark
139
+ For linear estimators, it naturally considers $X_S$ as fixed and Theorem [\[thm:1.25minimax_risk\]](#thm:1.25minimax_risk){reference-type="ref" reference="thm:1.25minimax_risk"} is comparing our estimator with the optimal nonlinear estimator using the same data $X_S$ from the source domain. In Appendix [\[appendix:random_design\]](#appendix:random_design){reference-type="ref" reference="appendix:random_design"}, we compare our estimator with an even stronger linear estimator with infinite access to $p_S$ and show that our estimator is still within multiplicative factor of it.
140
+ :::
141
+
142
+ In some scenarios, we have moderate amount of labeled data from target domain as well. In such cases, it is important to utilize the source and target labeled data jointly. Let $\by_S = X_S \bbeta^* + \bz_S$, $\by_T = X_T \bbeta^* + \bz_T$. We consider $X_S,X_T$ as deterministic variables, $\hat \Sigma_S^{-1} X_S^\top \by_S/n_S \sim \cN(\bbeta^*, \frac{\sigma^2}{n_S}\hat \Sigma_S^{-1} )$ and $\hat \Sigma_T^{-1} X_T^\top \by_T /n_T \sim \cN(\bbeta^*, \frac{\sigma^2}{n_T}\hat \Sigma_T^{-1} ).$ Therefore conditioned on the observations $\by_S,\by_T$, a sufficient statistic for $\bbeta^*$ is $\hat\bbeta_{\text{SS}} := (n_S\hat \Sigma_S + n_T\hat \Sigma_T)^{-1}( X_S^\top \by_S + X_T^\top \by_T ).$
143
+
144
+ ::: claim
145
+ []{#claim:sufficient_statistic label="claim:sufficient_statistic"} $\hat\bbeta_{\text{SS}}$ is an unbiased sufficient statistic of $\bbeta^*$ with samples $\by_S,\by_T$. $\hat \bbeta_{\text{SS}} \sim \cN(\bbeta^*, \sigma^2(n_S\hat \Sigma_S + n_T \hat \Sigma_T)^{-1} )$.
146
+ :::
147
+
148
+ First consider the estimator $\hat\bbeta_{\text{SS}} = (n_S\hat \Sigma_S + n_T\hat \Sigma_T)^{-1}( X_S^\top \by_S + X_T^\top \by_T )$. Next find the best linear function of $\hat\bbeta_{\text{SS}}$: $$\begin{align*}
149
+ %r_{\cB}(\hat \bbeta, \bbeta_T^*)
150
+ \hat\bbeta_{\text{MM}}%:= & \argmin_{\hat \bbeta=C\hat \bbeta_{\tSS}} \max_{\bbeta^*\in\cB} \E\|\Sigma^{1/2}_T(\hat \bbeta - \bbeta_T^*)\|^2 \\
151
+ %= & \min_{C} \max_{\bbeta^*\in\cB} \|\Sigma_T^{1/2}(C-I)\bbeta^*\|^2 + \sigma^2 \Trace(C^\top \Sigma_T C)\\
152
+ %= & \min_C r^2\| (C-I)^\top\Sigma_T(C-I)\|_2 + \sigma^2 \Trace((n_S\hat \Sigma_S + n_T \hat \Sigma_T)^{-1} C^\top \Sigma_T C)\\
153
+ = & \arg\min_{C,\tau} r^2 \tau + \sigma^2 \Trace((n_S\hat \Sigma_S + n_T \hat \Sigma_T)^{-1} C^\top \Sigma_T C),\\
154
+ \text{ s.t. } & (C-I)^\top\Sigma_T(C-I) \preceq \tau.
155
+ \end{align*}$$
156
+
157
+ ::: proposition
158
+ []{#claim:linear_in_SS label="claim:linear_in_SS"} The minimax estimator $\hat \bbeta_{\text{MM}}$ is of the form $C \hat \bbeta_{\text{SS}}$ for some $C$. When choosing $C$ with our proposed algorithm and when $\hat\Sigma_S$ commutes with $\hat\Sigma_T$ and $\Sigma_T$, we achieve the minimax risk $R_L(\cB) \leq 1.25 R_N(\cB)$.
159
+ :::
160
+
161
+ Now we consider observations coming from nonlinear models: $\by_S=f^*(X_S)+\bz$. Let $\bbeta_S^* = \argmin_{\bbeta}\E_{\bx\sim p_S,z\sim \cN(0,\sigma^2)}[(f^*(\bx)+z-\bbeta^\top \bx)^2]$, and similarly for $\bbeta_T^*$. Notice now even with $f^*$ unchanged across domains, the input distribution affects the best linear model. Approximation error on source domain is $a_S(\bx) := f^*(\bx) - \bx^\top \bbeta_S^*$ and vice versa for $a_T$.
162
+
163
+ Define the reweighting vector $\bw\in \R^n$ as $w_i=p_T(\bx_i)/p_S(\bx_i)$. We form an unbiased estimator via $$\begin{align*}
164
+ \hat \bbeta_{\text{LS}} = & \argmin_\bbeta \{\sum_i \frac{p_T(\bx_i)}{p_S(\bx_i)}( \bbeta^\top \bx_i - y_i)^2\}\\
165
+ = & (X_S^\top \diag(\bw) X_S)^{-1} (X_S^\top \diag(\bw) \by_S).
166
+ \end{align*}$$
167
+
168
+ ::: claim
169
+ []{#claim:MVUE label="claim:MVUE"} $\hat \bbeta_{\text{LS}}$ is asymptotically unbiased and normally distributed with covariance matrix $M:=\Sigma_T^{-1} \E_{\bx\sim p_T} [\frac{p_T(\bx)}{p_S(\bx)} (a_T(\bx)^2+\sigma^2)\bx\bx^\top]\Sigma_T^{-1}$: $$\begin{align*}
170
+ & \sqrt{n_S}(\hat \bbeta_{\text{LS}} - \bbeta_T^*) \overset{d}{\rightarrow} \cN(0, M ).
171
+ \end{align*}$$
172
+ :::
173
+
174
+ Note that large importance weights greatly inflates the variance of the estimator, especially when $p_T/p_S$ blows up somewhere. Therefore here we design the an algorithm to cope with the inflated variance. Again we want to minimize the worst case risk: $$\begin{align*}
175
+ & \min_{\hat \bbeta=C\hat \bbeta_{\text{LS}}} \max_{\bbeta_T^*\in\cB} \E \|\Sigma^{1/2}_T(\hat \bbeta - \bbeta_T^*)\|^2 \\
176
+ \overset{d}{\rightarrow} & \min_{C} \max_{\|\bbeta_T^*\|\leq r} \left\{\|\Sigma_T^{1/2}(C-I)\bbeta_T^* \|^2_2 + \frac{1}{n_S} \Trace(C M C^\top\Sigma_T) \right\}\\
177
+ = & \min_{C} \left\{ \|(C-I)^\top\Sigma_T (C-I)\|_2 r^2 + \frac{1}{n_S} \Trace(C M C^\top \Sigma_T) \right\}
178
+ \end{align*}$$ With $\hat\beta_{\text{LS}}$ computed beforehand, one could first estimate $M$ by let $\hat M:=\frac{1}{n_S} \sum_{i} \Sigma_T^{-1} \frac{p_T^2(\bx)}{p_S^2(\bx)} (y_i- \bx_i^\top \hat \bbeta_{\text{LS}} )^2\bx_i\bx_i^\top \Sigma_T^{-1}$. Therefore our estimator is $\hat \bbeta_{\text{MM}}\leftarrow \hat{C}\hat\bbeta_{\text{LS}}$, where $\hat C$ finds $$\begin{align}
179
+ \label{eqn:best_linear_estimator_with_approx_error}
180
+ \hat C &\leftarrow \arg\min_{\tau, C} \left\{ r^2 \tau + \frac{1}{n_S} \Trace( C \hat M C^\top \Sigma_T ) \right\} \\
181
+ \notag
182
+ & \text{ s.t. } (C-I)^\top \Sigma_T (C-I) \preceq \tau I.
183
+ \end{align}$$
184
+
185
+ ::: claim
186
+ []{#claim:nonlinear_form label="claim:nonlinear_form"} Let $\cB=\{\bbeta| \|\bbeta\|\leq r \}$, and $f^*\in \cF$ is some compact symmetric function class: $f\in \cF\Leftrightarrow -f\in \cF$. Then linear minimax estimator is of the form $C\hat \bbeta_{\text{LS}}$ for some $C$. When $\hat C$ solves Eqn. [\[eqn:best_linear_estimator_with_approx_error\]](#eqn:best_linear_estimator_with_approx_error){reference-type="eqref" reference="eqn:best_linear_estimator_with_approx_error"}, $L_{\cB}(\hat \bbeta_{\text{MM}})$ asymptotically matches $R_L(\cB)$, the linear minimax risk.
187
+ :::
188
+
189
+ By reducing from $\by_S$ to $\hat\bbeta_{\text{LS}}$ we eliminate $n-d$ dimensions, and this claim says that $X_S^\top\by_S$ is sufficient to predict $\bbeta^*_T$. We note that $f^*$ is more general than a linear function and therefore the lower bound could only be larger than $R_N(\cB)$ defined in the previous section.
190
+
191
+ Even though estimating $p_T(\bx)/p_S(\bx)$ might be sample inefficient, it only involves unlabeled data and therefore instance weighting related algorithms still attract prior studies as demonstrated in the related work section. Practical ways to estimate the density ratio involve respectively estimating $p_T$ and $p_S$ [@lin2002support; @zadrozny2004learning], kernel mean matching (KMM) [@huang2006correcting]), and some common divergence minimization between weighted source distribution and target distribution [@sugiyama2008direct; @sugiyama2012density; @uehara2016generative; @menon2016linking; @kanamori2011f]. We propose another simple algorithm that is very convenient to use.
192
+
193
+ We conduct regression on the data samples $(\bx,y)\sim q(\bx,y)$ where $q_Y(y)$ is Bernouli($\frac12$)[^8] and $q_{X|Y}(\bx|y=1)= p_T$, $q_{X|Y}(\bx|y=0)= p_S$. Empirically, we will concatenate $X_S$ and $X_U$ to form input data and stack $\textbf{0}\in \R^{n_S}$ and $\textbf{1}\in \R^{n_U}$ as the target vector $\by$.
194
+
195
+ ::: proposition
196
+ []{#prop:estimate_density_ratio label="prop:estimate_density_ratio"} The optimal function that solves $\alpha\leftarrow \argmin_f \E_{\bx,y\sim q} (f(\bx)-y)^2$ satisfies: $\alpha(\bx) = \frac{p_T(\bx)}{p_S(\bx)+p_T(\bx)}$.
197
+ :::
198
+
199
+ Therefore with proper transformation[^9] on $\alpha$ one could get the importance weights. In practice, one might be flexible on choosing the function class $\cF$ for estimating $\alpha$ and sample complexity will be bounded by some standard measure of $\cF$'s complexity, e.g., Rademacher or Gaussian complexity [@bartlett2002rademacher]. Unlike KMM, this parametrized estimation applies to unseen data $\bx$ which makes cross-validation possible.
200
+
201
+ The general setting of transfer learning in linear regression involves both model shift and covariate shift. Namely, the generative model of the labels might be different: $\by_S= X_S\bbeta_S^* + \bz_S$, and $\by_T = X_T \bbeta_T^* + \bz_T$. Denote by $\bdelta := \bbeta_S^*- \bbeta_T^*$ as the model shift. We are interested in the minimax linear estimator when $\|\bdelta\|\leq \gamma$ and $\|\bbeta_T^*\|\leq r$. Thus our problem becomes to find minimax estimator for $\bbeta^*_T\in \cB = \{\bbeta|\|\bbeta\|\leq r \}$ from $\by_S,\by_T$.
202
+
203
+ First consider a sufficient statistic $(\bar \bbeta_{S}, \bar \bbeta_{T})$ for $(\bbeta_T^*,\bdelta)$. Here $\bar \bbeta_{S} = \hat \Sigma_S^{-1} X_S^\top \by_S/n_S\sim \cN(\bbeta_T^*+\bdelta, \frac{\sigma^2}{n_S}\hat \Sigma_S^{-1})$, and $\bar \bbeta_T = \hat \Sigma_T^{-1} X_T^\top \by_T/n_T \sim \cN(\bbeta_T^*, \frac{\sigma^2}{n_T}\hat \Sigma_T^{-1})$. Then consider the best linear estimator on top of it: $\hat \bbeta = A_1 \bar \bbeta_S + A_2 \bar \bbeta_T$. Write $\Delta = \{\bdelta|\|\bdelta\|\leq \gamma \}$ and $L_{\cB,\Delta}(\hat \bbeta):= \max_{\bbeta_T^*\in\cB,\bdelta\in \Delta } \|\Sigma^{1/2}_T(\hat \bbeta - \bbeta_T^*)\|^2$.
204
+
205
+ $$\begin{align}
206
+ \notag
207
+ & R_L(\cB,\Delta) := \min_{\hat \bbeta=A_1\bar \bbeta_S+A_2\bar \bbeta_T} L_{\cB,\Delta}(\hat \bbeta) \\
208
+ %\notag
209
+ %= & \min_{A_1,A_2} \max_{\|\bbeta_T^*\|\leq r, \|\bdelta\|\leq \gamma} \left\{\|\Sigma_T^{1/2}((A_1+A_2-I)\bbeta_T^* + A_1\bdelta \|^2_2 + \frac{\sigma^2}{n_S}\|\hat \Sigma_S^{-1/2} A_1\|_F^2 + \frac{\sigma^2}{n_T}\|\hat \Sigma_T^{-1/2} A_2\|_F^2 \right\}\\
210
+ \notag
211
+ \leq & \min_{A_1,A_2} \max_{\|\bbeta_T^*\|\leq r, \|\bdelta\|\leq \gamma} \left\{2\|\Sigma_T^{1/2}((A_1+A_2-I)\bbeta_T^*\|^2 \right.\\
212
+ \label{eqn:am-gm_inequality}
213
+ & \left. + 2\|\Sigma_T^{1/2}A_1\bdelta \|^2 + \frac{\sigma^2}{n_S}\Trace(A_1 \hat \Sigma_S^{-1} A_1^\top) \right. \\
214
+ & \left. + \frac{\sigma^2}{n_T}\Trace(A_2 \hat \Sigma_T^{-1} A_2^\top) \right\} \tag{AM-GM} \\
215
+ \notag
216
+ = & \min_{A_1,A_2} \left\{2\|\Sigma_T^{1/2}((A_1+A_2-I)\|_2^2 r^2 + 2\|\Sigma_T^{1/2}A_1\|^2_2\gamma^2 \right.\\
217
+ \notag & \left. + \frac{\sigma^2}{n_S}\Trace(A_1 \hat \Sigma_S^{-1} A_1^\top) + \frac{\sigma^2}{n_T}\Trace(A_2 \hat \Sigma_T^{-1} A_2^\top) \right. \\
218
+ &\left. =: r_{\cB,\Delta}(A_1,A_2) \right\}.
219
+ \end{align}$$ Therefore we optimize over this upper bound and reformulate the problem as a convex program: $$\begin{align}
220
+ \notag
221
+ (\hat A_1, \hat A_2) & \leftarrow \argmin_{A_1,A_2, a, b} \left\{ 2a r^2 + 2b\gamma^2 \right.\\
222
+ \notag
223
+ & \left. + \frac{\sigma^2}{n_S}\Trace(A_1 \hat \Sigma_S^{-1} A_1^\top) + \frac{\sigma^2}{n_T}\Trace(A_2 \hat \Sigma_T^{-1} A_2^\top) \right\}\\
224
+ \notag
225
+ \text{s.t.}& (A_1+A_2-I)^\top \Sigma_T(A_1+A_2-I) \preceq a I,\\
226
+ \label{eqn:convex_program_changed_beta}
227
+ & A_1^\top \Sigma_T A_1\preceq bI.
228
+ \end{align}$$ Our estimator is given by: $\hat \bbeta_{\text{MM}} = \hat A_1\bar \bbeta_S + \hat A_2 \bar \bbeta_T.$ Since $\hat \bbeta_{\text{MM}}$ is a relaxation of the linear minimax estimator, it is important to understand how well $\hat \bbeta_{\text{MM}}$ performs on the original objective:
229
+
230
+ ::: claim
231
+ []{#claim:beta_change_relaxed_loss label="claim:beta_change_relaxed_loss"} $R_L(\cB,\Delta)\leq L_{\cB,\Delta}(\hat \bbeta_{\text{MM}})\leq 2R_L(\cB,\Delta)$.
232
+ :::
233
+
234
+ Finally we show with the relaxation we still achieve a near-optimal estimator even among all nonlinear rules.
235
+
236
+ ::: theorem
237
+ []{#thm:27minimax_model_shift label="thm:27minimax_model_shift"} When $\Sigma_T$ commutes with $\hat\Sigma_S$, it satisfies: $$\begin{align*}
238
+ L_{\cB,\Delta}(\hat \bbeta_{\text{MM}}) := & \max_{\bbeta_T^*\in \cB,\bdelta \in \Delta} \|\Sigma_T^{1/2}(\hat \bbeta_{\text{MM}} - \bbeta^*_T)\|^2\\
239
+ \leq & 27 R_N(\cB,\Delta).
240
+ \end{align*}$$ Here $R_N(\cB,\Delta):= \min_{\hat \bbeta(\by_S,\by_T)}\max_{\bbeta_T^*\in \cB,\bdelta \in \Delta} \|\Sigma_T^{1/2}(\hat \bbeta - \bbeta^*_T)\|$ is the minimax risk.
241
+ :::
242
+
243
+ We defer the complete proof to the appendix. The main proof technique is to decompose the problem to 2-$d$ sub-problems with closed-form solutions and are solvable with Le Cam's two point lemma. We include the proof sketch here:
244
+
245
+ ::: proof
246
+ *Proof sketch of Theorem [\[thm:27minimax_model_shift\]](#thm:27minimax_model_shift){reference-type="ref" reference="thm:27minimax_model_shift"}.* For the ease of understanding, we provide a simple proof sketch when $\Sigma_S=\Sigma_T$ are diagonal. We first define the hardest hyperrectangular subproblem. Let $\cB(\btau)=\{\bb:|\beta_i| \leq \tau_i\}$ be a subset of $\cB$ and similarly for $\Delta(\bzeta)$. We show that $R_L(\cB,\Delta)=\max_{\btau\in \cB,\bzeta\in\Delta} R_L(\cB(\btau),\Delta(\bzeta))$, and clearly $R_N(\cB,\Delta)\geq \max_{\btau\in \cB,\bzeta\in\Delta} R_N(\cB(\btau),\Delta(\bzeta))$. Meanwhile we show when the sets are hyperrectangles the minimax (linear) risk could be decomposed to 2-d problems: $R_L(\cB(\btau),\Delta(\bzeta))=\sum_i R_L(\tau_i,\zeta_i)$. Each $R_L(\tau_i,\zeta_i)$ is the linear minimax risk to estimate $\beta_i$ from $x\sim \cN(\beta_i+\delta_i,1)$ and $y\sim \cN(\beta_i,1)$ where $|\beta_i|\leq \tau_i$ and $|\delta_i|\leq \zeta_i$. This 2-d problem for linear risk has a closed form solution, and the minimax risk can be lower bounded using Le Cam's two point lemma. We show $R_L(\tau_i,\zeta_i)\leq 13.5 R_N(\tau_i,\zeta_i)$ and therefore: $$\begin{align*}
247
+ \frac{1}{2}L_{\cB,\Delta}(\hat\bbeta_{\text{MM}})& \overset{\text{Claim }\ref{claim:beta_change_relaxed_loss}}{\leq} R_L(\cB,\Delta)\\
248
+ \overset{\text{Lemma } \ref{lemma:relate_to_worst_hyperrectangle}}{=}&\max_{\btau\in \cB,\bzeta\in\Delta} R_L(\cB(\btau),\Delta(\bzeta))\\
249
+ \overset{\text{Prop }\ref{proposition:decompose}.a}{=} &\max_{\btau\in \cB,\bzeta\in\Delta} \sum_i R_L(\tau_i,\zeta_i) \\
250
+ \overset{\text{Lemma }\ref{lemma:1d_bound_with_beta_change}}{\leq}& \max_{\btau\in \cB,\bzeta\in\Delta} 13.5 \sum_i R_N(\tau_i,\zeta_i) \\
251
+ \overset{\text{Prop }\ref{proposition:decompose}.b}{=} & 13.5 \max_{\btau\in \cB,\bzeta\in\Delta} R_N(\cB(\btau),\Delta(\bzeta))\\
252
+ \leq & 13.5 R_N(\cB,\Delta).
253
+ \end{align*}$$ ◻
254
+ :::
255
+
256
+ We study in depth the minimax linear estimator for linear regression under various distribution shift settings. We investigated the optimal linear estimators with covariate shift for linear models in unsupervised and supervised domain adaptation settings, with no or scarce labeled data from the target distribution. For nonlinear models with approximation error, we also introduce the minimax linear estimator together with an easy-to-use density ratio estimation method. We further explore some moderate model shift in the linear setting. Our estimators achieve near-optimal worst-case excess risk measured on the target domain and, in some circumstances, are within constant of the minimax risk among all nonlinear rules. The significant improvement of our estimators over ridge regression is demonstrated by a theoretical separation result and by empirical validations even for average case with random parameters.
257
+
258
+ In future work, we will extend our algorithm to classification problems under distribution shift and apply the algorithms to fine-tuning the last-layer of a deep network.
2110.05448/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-09-26T14:07:38.930Z" agent="5.0 (X11)" etag="FqJ_K2DFnXGKn9Tz6QgE" version="15.3.3" type="browser"><diagram id="WdX2H-aWHXAryF2v7st3" name="Page-1">7Vptc5s4EP41TD4lAwL88tEvcXs3vU6m6d21H2WQjaYCuUJ+y6+/lRAGbOy4MY5zjZPMBD2SFrHP7rJaYbmDePVB4Fn0Fw8Js5Adrix3aCHkeAhZ6s8O1xnSbnsZMBU0NIMK4JE+EQPaBp3TkKSVgZJzJumsCgY8SUggKxgWgi+rwyacVe86w1NzR7sAHgPMyM6wf2koI4MiuzT8I6HTKL91K++JcT7aAGmEQ74sQe695Q4E5zK7ilcDwpT2csVk80Z7ejcrEySRx0zo/GxN8OfxsP3whPyO8/c/4/X3WyNlgdncPLGFWgzk9SccxMKq5droovVzzvOO21Qz1YMByJutik64mpr/Wsp4A/h9klj+MO+AdY63BwOW3TWHUWUBSPB5EhL1OA50LyMqyeMMB6p3CeYHWCRjZronlLEBZ1zouYD1h6PRRmpZbbkOiJBkVYKMGj8QHhMp1jAk720bSo1R+6a5LFlIx2BRyTg8g2FjlNON5II2uDDM/QKLaC+LOzo+kVa/PxFVFquM1ZHbJIv+aDQ4E4uOfWka3aszNkCjjy5Mo3f1xtNphGe+MI3+1RsboNH3Lkxj6+qNp9Po2pemsf1evHE06sLPmWj0/QvT2Hkv3tjtDgbnCqqee+lMtVtD45bySBL21AYcWglPAOyHOI022ixpTuEPWEoiEo0g2wU0lYL/2Gy63UOaJGG+id+jx5Ke/Bo15ZggDEu6qG7963Rn7vDAqTbRVZ6yVFlyvDu/KiPlcxEQM628Vd+S5LaeFSWxmBK5I0qzuXnylxOc118aj7czweOZPDaQgoPIqrFkZpE7mrGssu8ZCDM6VeYUgLEQwPvK3WiAWc90xDQM1W1q/brwfLsZF/a2Nimd3ezWq7FMdC4Hds5V+ElxPIN575xfz9uN0K/Lb11JaDtCQ9h8NE3Cxnx5XwB9DUBHxAV9AgoxU2AR0wOG05QGdeSVYzZZUfnN9Krr70rlEMuy1nBlGNCNdd5IQAPfyo3SLNUspunW+jkas7B7SFum8pIF1UMD22/qjbNJBDbviRe+cPytd5fT2jLMc79uTi99OTaCeKQD0XYq2N6fBb7JeKSWaI5C4KmaCVDbKQXq7O7oaiNU64BVnhai9tfJjuXc9fZQPiHL2zTiSkiWcNBk+r+1AddrxgS67YoFbHy+ZAEbK6kLVs1bwOkltv0WQBMi58mVd4TaebL3dojfX5QL6eKo5NOpJp92zkEBMjKR++s9R2e5znFVoxwQhysMzxUj9ONX0atGfheNHKws2n/caMeZzUUKTqGyVkYh7Vpr95fREq9TlaJGEC+mkbpaUhgAFzFRHxak2qGl6uEJW+uhNNUtJS0UBMfZctd8bqkvJmKFS87vmn3eU6XYdYqDv69fep8fP/W+3isV/mJ99iBcJqGmbHis9JOf+0/FR3xjDVyr12UcbFWbAZnrT2pwoshUH8MAdxGJqW7zcRqAwVhoUIxhZgzsxLMpCrvh8ViQbBxekMBIJjHRqw5KtxJ6Bb2FgrIlS2UwC0q0eDyHjd/dmZVxIHb8rnHi9VV21eRrarLBw4X7lvq1qjvFjlVkolrLkMCCQMiAP+nW0EEF9MVkeU5D9S9/+9zX2y1w+t2aHNNp++dKMq9Hhi8hslPdLDg1lczXPWtyrmeGL9nsd98cj3WHhi/P9N767h11miGy47eqRNZ8bdrQ0QI0i8+Rs9Ju8VW3e/8f</diagram></mxfile>
2110.05448/main_diagram/main_diagram.pdf ADDED
Binary file (23.2 kB). View file
 
2110.05448/paper_text/intro_method.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Recent work on generative pre-training has shown that with sufficient data and scale [@DBLP:journals/corr/abs-2001-08361; @DBLP:journals/corr/abs-2010-14701], large language models (LMs) can learn a diverse suite of tasks without explicit supervision [@radford2019language], and that even stronger performance on these tasks can be elicited using few-shot demonstrations [@DBLP:conf/nips/BrownMRSKDNSSAA20]. While few-shot prompting is flexible and enables strong performance on a diverse suite of NLP tasks to be coaxed out of generatively pre-trained LMs without further fine-tuning, its benefits are most pronounced with larger models, with commensurate training, inference, compute, and data costs. Furthermore, the very generality of the pre-training objective which enables multi-task learning can produce LMs with more knowledge than is immediately apparent, requiring carefully designed prompts to bring out fully. The desire to unlock and amplify these latent abilities while also reducing the cost of few-shot prompting motivates our present work, which allows us to continue fine-tuning our models, obtaining more performance from smaller models and pushing our larger models even further, without resorting to few-shot prompting at test time or any additional supervision at train time.
4
+
5
+ We target the domain of unsupervised neural machine translation (NMT), which typically involves *bootstrapping* a weak translation model before amplifying its translation ability via *backtranslation*. Recent work in unsupervised NMT has been dominated by large encoder-decoder architectures where the bootstrap is implemented by denoising/autoencoding tasks (*e.g.*, multilingual Cloze [@DBLP:conf/naacl/DevlinCLT19; @DBLP:conf/nips/ConneauL19], masked-span prediction [@DBLP:journals/jmlr/RaffelSRLNMZLL20; @DBLP:conf/naacl/XueCRKASBR21], reconstruction from corrupted inputs [@DBLP:conf/emnlp/WangZJLL19; @DBLP:journals/tacl/LiuGGLEGLZ20]) intended to produce strong encoders and aligned multilingual representations for decoding. In our present work, we show that generative language modeling alone can implement the entire unsupervised NMT pipeline, and derive state-of-the-art unsupervised NMT systems using only generatively pre-trained language models. We implement the bootstrap by first sampling a small number of zero-shot translations from GPT-3. These are then used as few-shot prompts to sample a larger dataset of synthetic translations. The few-shot prompts are then discarded and the generated samples are *distilled* by fine-tuning the model on these synthetic data in the zero-shot format. This produces a language model aligned to our translation format and amenable to large-scale backtranslation. By using our method to leverage GPT-3's zero-shot translation capability, we achieve a new state-of-the-art in unsupervised translation on the WMT14 English-French benchmark, attaining a BLEU score of 42.1.
2112.01565/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-03T16:13:36.671Z" agent="5.0 (X11)" etag="x_0fwjQdnNQU5ySztDmV" version="14.2.9" type="google"><diagram id="Q31zKPsGl6mWChIkTD9a" name="Page-1">7Ztbc9o4FIB/jWeyD2Tku/2YGJJ0N92mJe02fckotgBtjEWEuO2vXxnLgGwVCLGBTJhJG+tIluRzvnN0jWYG/ek1hYPeZxKhWDNANNXMpmYYuu3a/FcqmWUSX3czQZfiSBRaCtr4PySEQEhHOEJDqSAjJGZ4IAtDkiQoZJIMUkomcrEOieVWB7CLSoJ2COOy9B8csV4m9WywlN8g3O3lLetA5PRhXlhUMezBiEwy0byM2dLMgBLCsqf+NEBxqrxcL1lFV7/JXXSMooRt88Jf93fs5aH7qfXypfd83bzG/3773sjVPIbxSHyx6C2b5SqgZJREKK1F18zLSQ8z1B7AMM2dcKNzWY/1Y5HdwXEckJjQ+btmxwtRGHL5kFHyjPKchCQoLUwSdgX7OE7hCEh/MGKI8sZTjGgiCggmdK6wy/I3CzWMEWVouiISOrhGpI8YnfEiItcAZvaKANLwhAomS/Mu6OutmNYEAmUokOou6l5qnT8Ixb/GCLrCCE7MhAIkazgvI5JnNIZz1VzwArozmC4z+VM3/R1QzHDIs/9GbELoc14r72VWcVasZHGuSiabVW2+FVsLEYxxlxuuGXIDcVOal6lhMHenC5HRx1GUNqPkaEkaOBQdFji3ZT78Mh+6peBDuF/1eHj1uijSIxu5x+SilqUf0EVv4fP3qDECN3bbmDajq9sbz2vYb/dQS+WhmmHxn+tHzb0cpv8xzW2mP3Pxtu76pgDd6RiKAM1zIufJsZ0DIWA6QELAUyCgm3YZAacCL1QS4NZEwD2FybBDaH+u01YSplrdh+UjiLyO0vJO6KGnzi6Wp4RBhkkiIngVINi+BIKrCsd7BUEVjjNrRXj8lkhQBU06fwEEJBmTeJQaAqafcwtniA7zBp5osckSZQvx/IveCXs1RB0XHBo2v6ao02YwfObGMwB/YqgzmnPSvv/8XiNPDdZ39ENbv4qFgdL8V8EHtnN5jm8f3M9182TpGhYSrnd8lq5rKfGhLV32aevw0dup0dL8s/hX8eGbdFgfTj+w6ctOfgSm//0ioYr9As0OBjjdLuAPrIcY1NzmGXxkmhsMH9kf+9s9iGzkRZYKCc94Mp1D7R6UkTCNPSLxw39ojH99mvxqheYTmHVN/7bZUAWDgiVQEl2kRxY8FcZwOMShrHyuEDr7yRNprBPJh9W8Zvr9YJGa5akpZiuv8dTDSs7ypTSRv1Od2YZkREO0xSyXQdpFbIuhE0XSmU0ZgxUj24ptwlxGUQwZHssnPSrDixbuCJ777mKbUj5JsOwCO9mXi7eW+JQr8gsVgUJFmWZKFc05XHz27mhuscH8UdF0tkXTOzI05R1029wZTbkiy9svmqr9jxOaqyvGzWg6JzTfiKb61LbuKV5MunxC1+L/3vdxUPWHArYh79Tt9XRICYNq++a1cWqHeLMS2zzgrEY3cO773qYIl6buEMVcB+lhfY0hbJ0HbYxg/lEFMNuV4fOLs7WtA1hxv3nLAMYpgrOVYoO0wHBNhz1PbscHG/rlryvPH7IeVBpNrUoHegN42muGepUjHM/4fzP50/piRK1RdGHf/XQfvPtu9506j+EUncfb0XkKQ4BTnEZU5Dzce17lPOZ6Z6vHeSo4JVTeH7sIGUnxPV0f23rjqXBzSXVbQeVeVVweWxcmjvO2wupU9+y+sXHnsvZ7Ce9j+qv7B5z+Blf9cYA9q/HN+0ofby7xwyRo6MbB57+vWttvNeyuG043r7r1oxp3LaMwLAF/t3HXBHJFi9lwxeOuZbhyO5vG3WK/Kh531dhXO2utBuHNaJrHjaa+43rKdqxzIB/7OMA6t6yaAF0LXLl3lnMAQOs9h/yh2a35WeTZ6dhRU11C2Ouxo5KAvAMfbwf9SMKbCdxzV3Z9a9cIp6jLrG/XW82TakFx4mmfw6V7bsgMuDvvQJbrcracz1XGUxU75yeequXJr44ntziy1c3TAafkJ54EA5YhMeAVx6jX8FSsq3jJZmeeeHL5x9dZ8eWfsJut/wE=</diagram></mxfile>
2112.01565/main_diagram/main_diagram.pdf ADDED
Binary file (28.3 kB). View file
 
2112.01565/paper_text/intro_method.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Graphs are the natural abstraction of complex correlations found in numerous domains including social media [\[1\]](#page-8-0), communications [\[2\]](#page-8-1), transportation and telematics [\[3\]](#page-8-2), [\[4\]](#page-8-3), and medicine discovery [\[5\]](#page-8-4). Consequently, different types of graphs have been a main topic in many scientific disciplines such as computer science, mathematics, engineering, sociology, and economics. The study of the interconnectedness of the graphs can lead to local/global information inferring as well as latent structure discovery, thus benefiting various downstream applications. However, the complexity of subject graphs in modern studies tends to ever-increase because of the explosive growth of the Internet and our computing capabilities. These features cause the exploration, analysis, and utilization of a graph very inefficient.
4
+
5
+ Thus, the topic *graph sparsification* has emerged in the past two decades [\[6\]](#page-8-5), [\[7\]](#page-8-6), where the objective is to prune edges from a graph to produce a sparsified graph while preserving userdefined metrics in query evaluation or knowledge inferring. As an example, commonly adopted metrics include the graph spectrum and the effective resistance of edges [\[8\]](#page-8-7), [\[9\]](#page-8-8). Sparsification techniques developed w.r.t these metrics have been applied to domains such as power grid management [\[10\]](#page-8-9), [\[11\]](#page-8-10), integrated circuit simulation [\[12\]](#page-8-11), and influence maximization [\[13\]](#page-8-12), [\[14\]](#page-8-13).
6
+
7
+ Nevertheless, most graph sparsification techniques that exist to date are sampling-based [\[15\]](#page-8-14). While effective, they all introduce high computation complexity (due to the involvement of many matrix operations) and lack the flexibility to preserve different graph properties in many applications (since the sampling technique needs to be tailored for each application), e.g., approximate graph analysis [\[16\]](#page-8-15)–[\[19\]](#page-8-16), privacy preserving [\[20\]](#page-8-17), [\[21\]](#page-8-18), and representation learning [\[22\]](#page-8-19). Thus, a general, flexible graph sparsification technique for various reduction objectives and application domains is highly desired.
8
+
9
+ We present SparRL, a general graph sparsification framework empowered by deep Reinforcement Learning (RL) that can be applied to any edge sparsification task with a customized reduction goal. Consider the example shown in Figure [1,](#page-1-0) by setting modularity preservation as the edge reduction objective function, SparRL can prune a user-defined number of edges from the original graph and still preserve the substructure modularity. To improve the learning efficiency and convergence rate of SparRL, we initialize the initial state by randomly sparsifying the graph before each training episode, use Double DQN [\[23\]](#page-8-20) with Prioritized Replay [\[24\]](#page-8-21), and employ -greedy exploration for searching for the optimal pruning strategy.
10
+
11
+ We test SparRL using a wide range of graph datasets and metrics including PageRank, community structure, and pairwise shortest-path distance. As a result, SparRL outperforms all baselines on preserving PageRank, community structure, and single-pair shortest path (SPSP) at a variety of edge-kept ratios.
12
+
13
+ In summary, the contributions of SparRL are the following:
14
+
15
+ - A novel reinforcement learning-based general-purpose graph sparsification framework with modular, taskadaptive, and easily modifiable components;
16
+ - Task flexibility because of the plug-in reward function;
17
+ - Task scalability as SparRL's time complexity is independent of the size of a graph; and
18
+ - Simple to use time v.s. performance trade-offs.
19
+
20
+ The source code of SparRL can be found at [https://github.com/](https://github.com/rwickman/SparRL-PyTorch) [rwickman/SparRL-PyTorch.](https://github.com/rwickman/SparRL-PyTorch)
21
+
22
+ # Method
23
+
24
+ Given a T-step episodic task, at each timestep $t \in [1,T]$ , the RL agent uses its policy $\pi_{\theta}(a_t|s_t)$ to choose action $a_t$ based on state $s_t$ from the environment. Then, the environment responds with reward $r_t$ and next state $s_{t+1}$ . This sequential decision-making process is formulated as a Partially-Observable Markov Decision Process (POMDP) defined by the tuple $(\mathcal{S}, \mathcal{A}, P, R, \Omega, \mathcal{O}, \gamma)$ , where $\mathcal{S}$ is the state space, $\mathcal{A}$ is the action space, P(s'|s,a) is the transition probability function, R is the reward function, R is the set of observations, R is the observation probability function, and R is the discount factor. The objective of the RL agent is to find R that can maximize the sum of discounted rewards, or the return R is R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R in R
25
+
26
+ <span id="page-2-1"></span>![](_page_2_Figure_0.jpeg)
27
+
28
+ Fig. 2: The SparRL model architecture consists of the node encoder, edge encoder, and action-value head. The input to the model includes the subgraph $H_t$ , the degrees of the nodes $d_{H_t}$ , the ratio of edges still in the graph $\eta_t$ , and the one-hop neighborhood of the set of nodes in $H_t$ . The node encoder uses a GAT [42] on the one-hop neighborhood of each node embedding to create a new node embedding which is then combined with its degrees and $\eta_t$ . The edge encoder combines each pair of nodes that represent an edge. The action-value function edge produces the q-value for each edge.
29
+
30
+ Related to (PO)MDP is the concept of Q-function $Q^{\pi}$ : $\mathcal{S} \times \mathcal{A} \to \mathbb{R}$ , which describes the total expected reward by taking action a in state s and then following policy $\pi$ thereafter. The element that we want to obtain is the optimal Q-function $Q^*$ , from which we can trivially derive the optimal policy $\pi^*(s) = argmax_aQ^*(s,a)$ . One way to learn $Q^*$ is through Q-learning [43]:
31
+
32
+ $$Q_{t+1}(s_t, a_t) \leftarrow \alpha_t \left[ r_{t+1} + \gamma max_{a'} Q_t(s_{t+1}, a') - Q_t(s_t, a_t) \right] + Q_t(s_t, a_t)$$
33
+
34
+ , where $\alpha_t \in (0,1]$ is the step size. Accompanying the rise of deep learning, many variations of Q-learning have been developed, among which Deep Q-Network (DQN) [44] has gained popularity due to its success in playing Atari games. There exist several major improvements of DQN, including Double DQN [23], Prioritized Replay [24], and Dueling DQN [45]. In this work, we choose Double DQN and Prioritized Replay as a component of SparRL since it has significant improvement over DQN with small code changes.
35
+
36
+ In this section, we first provide an overview of our approach, then detail its components and design rationale.
37
+
38
+ The aim of this work is to apply a reinforcement learning-based approach to find an edge sparsified graph G'=(V,E'), where V represents the set of nodes in the graph and E' represents the set of edges in the sparsified graph, that approximates the original graph G=(V,E), where E is the set of edges in the original graph, measured over some user-defined performance metric.
39
+
40
+ We treat this as an episodic task, where SparRL sequentially prunes edges from G up until T edges have been removed. Each timestep t, an edge-induced subgraph $H_t = (V_{H_t}, E_{H_t})$ , where $V_{H_t}$ represents the set of nodes and $E_{H_t}$ represents the set of edges in the subgraph at timestep t, is constructed by sampling a subset of $|E_H|$ edges from the current sparsified graph's edges $E_t'$ , thus $V_{H_t} \subseteq V$ and $E_{H_t} \subseteq E'$ . Then, SparRL's action $a_t$ consists of choosing an edge to prune from $E_{H_t}$ .
41
+
42
+ This process of sampling and pruning an edge repeats for T timesteps to produce $G_T'$ . For ease of notation, we will remove the t subscript whenever we are not referring to an arbitrary timestep in an episode; for example, $|E_H|$ represents the subgraph length for all timesteps, rather than belonging uniquely to a timestep. We describe this process in Algorithm 1.
43
+
44
+ ```
45
+ \begin{array}{l} \textbf{input} : G = (V, E), \ T \ \text{(the number of edges to prune)} \\ \textbf{output} : \textbf{the sparsified graph} \ G' = (V, E') \\ G' \leftarrow \textbf{clone} \ G \\ \textbf{for} \ t = 1 \ \textbf{to} \ T \ \textbf{do} \\ H_t \leftarrow \textbf{Randomly sample a subgraph of} \ |E_H| \ \textbf{edges from} \ G' \\ d_{H_t} \leftarrow \textbf{Degrees of nodes in} \ H_t \\ \eta_t \leftarrow \frac{|E'_t|}{|E|} \\ \mathcal{N}_t \leftarrow \textbf{One-hop neighborhood of nodes in} \ H_t \\ q_values \leftarrow f_{SparRL}(H_t, d_{H_t}, \eta_t, \mathcal{N}_t) \\ a_t \leftarrow \text{arg max}_a \ q_values \\ \textbf{Prune edge} \ a_t \ \text{from} \ G' \\ \textbf{end for} \\ \textbf{return} \ G' \\ \end{array}
46
+ ```
47
+
48
+ During training, we exploit the simplicity of the environment, by allowing the initial state $s_1$ to be sampled from any state in the state space $\mathcal S$ . This is implemented as a preprocessing step, where before each episode, we randomly prune $T_p$ edges from G to produce the initial sparsified graph at the first timestep $G_1'$ . The number of edges to randomly prune is sampled from a discrete uniform distribution $T_p = \mathcal U(0, |E| - T_{max})$ , where $T_{max}$ is the maximum number of edges to prune each episode. The upper bound in this case $|E| - T_{max}$ is to prevent pruning more than |E| edges from the graph.
49
+
50
+ There are a few of benefits of performing this preprocessing step. First, the size of the state space is equivalent to the cardinality of the power set of the edges $|\mathcal{P}(E)|$ . So, for any reasonably sized graph, it is trivial to show that it essentially intractable to visit every state. Thus, this step provides sufficient coverage of the space by randomly sampling states to visit. Second, without this step, the requirement for SparRL to reach a particular state with an edge-sparsifed graph G' whose set of edges is E', is to first prune |E|-|E'| edges. Therefore, this requirement is removed, which enables more efficient training.
51
+
52
+ ```
53
+ input : G = (V, E), T_{max}, num\_episodes, |E_H|
54
+ for i=0 to num\_episodes do
55
+ G' \leftarrow \text{clone } G
56
+ Sample T_p \sim \mathcal{U}(1, |E| - T_{max})
57
+ Randomly prune T_p edges from G'
58
+ Sample T \sim \mathcal{U}(1, T_{max})
59
+ for t = 1 to T do
60
+ H_t \leftarrow \text{Randomly sample a subgraph of } |E_H| \text{ edges from } G'
61
+ d_{H_t} \leftarrow \text{Degrees of nodes in } H_t
62
+ \eta_t \leftarrow \frac{|E_t'|}{|E_t'|}
63
+ \eta_t \leftarrow \frac{1}{|E|}
64
+ \mathcal{N}_t \leftarrow \text{One-hop neighborhood of nodes in } H_t
65
+ q\_values \leftarrow f_{SparRL}(H_t, d_{H_t}, \eta_t, \mathcal{N}_t)
66
+ a_t \leftarrow \text{Use } \epsilon\text{-greedy exploration to sample action from}
67
+ Prune edge a_t from G'
68
+ r_t \leftarrow R(G')
69
+ Save timestep trajectory (o_t, o_{t+1}, a_t, r_t) in replay buffer
70
+ Train f_{SparRL} on batch of trajectories from replay buffer
71
+ end for
72
+ ```
73
+
74
+ Third, it makes exploration invariant from the behavior of the reinforcement learning policy. This can help prevent early convergence to local optima, as the states that it visits won't be entirely based on the current policy.
75
+
76
+ We describe the full training procedure in Algorithm 2. This differs from the Algorithm 1 as its objective is to train the network, using the preprocessing step just described and other things will go more into detail in later sections, as opposed to getting an edge-sparsified graph at a particular edge-kept ratio.
77
+
78
+ We formalize the task of sparsification as a POMDP that is solved using reinforcement learning. The state space $s_t \in S$ is defined over every possible edge-sparisified graph G', and thus its edges E' can be any element in the power set of the edges in the original graph; $E' \in \mathcal{P}(E)$ and V is assumed to be fixed. The action space $A \in a_t$ is defined over the set of edges that can be pruned from a given subgraph $H_t$ . The reward $r_t \in \mathbb{R}$ is dependent on the properties of the graph that is encouraged to preserve, and thus will be discussed later in the experiments section. The transition probability function $P(s_{t+1}|s_t,a_t)$ is deterministic as the action $a_t$ is fully described by the next state $s_{t+1}$ . That is, when the edge referenced by $a_t$ is pruned, the next state is simply $s_{t+1} = G'_{t+1} = (V, E'_t \setminus a_t)$ . The observation probability function $\mathcal{O}$ is defined over every possible randomly sampled subgraph $H_t$ , and thus assigns a uniform probability to every possible element in this set. The set of observations $o_t \in \Omega$ , is defined over what we provide as input into the policy of SparRL. This includes the subgraph $H_t$ , the degrees of all the nodes in the subgraph $d_{H_t}$ , the ratio of edges left in the graph $\eta_t = |E_t'|/|E|$ , and the one-hop neighborhood of the set of nodes in $H_t$ , which is $\mathcal{N}_t = \{(u, v) \in E | u \in V_{H_t} \text{ and } v \in V\}$ .
79
+
80
+ Now, we will provide justification for our input into the policy. The naive choice is to set $o_t = H_t$ , which would be a perfectly reasonable choice as input. However, this
81
+
82
+ provides no context on the global state of the graph at the current timestep $G_t'$ beyond $E_{H_t} \subseteq E_t'$ . It's trivial to see that many different edge-sparsified graphs could contain this same subgraph. The degrees of the nodes in subgraph $d_{H_t}$ and the one-hop neighborhood $N_t$ are used to inject local context into $o_t$ . The ratio of edges left in the graph $\eta_t$ provides some global context. All of these inputs assist the policy by reducing the candidates of possible states of $s_t$ , because the policy can identify what set of states corresponds to these properties.
83
+
84
+ The discovery of the true $s_t$ that corresponds with the current $o_t$ is important, because it helps disambiguate what the true underlying optimal edge sparsification strategy would be in $o_t$ .
85
+
86
+ We use Double DQN [23] to represent the SparRL sparsification policy that is parameterized by a deep neural network. The policy is trained over a sampled batch of trajectories, sampled using prioritized replay [24].
87
+
88
+ The model architecture, shown in Figure 2, is composed of the node encoder, edge encoder, and action-value head. The node encoder first looks up the initial node embedding for all the nodes in the graph, which are trained jointly with the model. Then, the node encoder uses a GAT [42] that applies self-attention to the neighborhood of each node to produce a new node embedding. Each node embedding of the subgraph is then separately combined with its degrees, in-degree, and out-degree if the graph is directed, and the ratio of edges left in the graph $\eta_t$ . The edge encoder, for every edge, combines the two node embeddings that represent an edge to form an edge embedding. Finally, the action-value head outputs the q-values $Q(o_t, a_t^1) \dots Q(o_t, a_t^{|E_H|})$ , for each edge in the subgraph where $a_t^i$ for $i \in [1, |E_H|]$ gives the index of the edge in the subgraph. The GAT in the node encoder uses a single fully-connected layer with 1 unit for computing the attention coefficients, both the main parts of the node encoder and edge encoder consist of two fully-connected layers with 128 units each that are followed by LeakyReLU activation, and the action-value head consists of a single fully-connected layer with one unit.
89
+
90
+ Thus, the model approximates the O-value function:
91
+
92
+ $$f_{SparRL}(H_t, d_{H_t}, \eta_t, \mathcal{N}_t) = Q(o_t, a_t^1), \dots, Q(o_t, a_t^{|E_H|}).$$
93
+ (1)
94
+
95
+ Each edge of the subgraph is independently run through the network, so the subgraph length $|E_H|$ is not constrained by the network. Therefore, any number of edges can be considered to be pruned at each timestep during test time.
2112.05364/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2112.05364/paper_text/intro_method.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ With transformer-based models [@transformer] dominating the leaderboard for many key NLP tasks like summarization [@liu-lapata-2019-text] and sentiment analysis [@sentiment_bert], their core multi-head self-attention mechanism has been thoroughly investigated recently. In particular, to explain why and how transformers work, researchers analyze the learnt self-attention matrices of language models or task-specific models (e.g., @raganato-tiedemann-2018-analysis [@vig-belinkov-2019-analyzing]), with @voita-etal-2019-analyzing for instance, exploring the patterns of attention heads in neural machine translation during pruning. Instead of visualization on trained or fine-tuned models, in a recent work, @li-etal-2021-t3 introduce a visual analytic framework, T^3^-Vis, to help researchers to better train and fine-tune transformer models, by providing valuable insights about the model's intrinsic properties and behaviours.
4
+
5
+ Meanwhile, a parallel line of research has shown that injecting linguistic/positional information into the attention matrices is useful for reducing the size of the model while keeping competitive results, and even improve the performance in some cases. This can be done by either enforcing fixed attention patterns, like @raganato-etal-2020-fixed do with positional attention in a machine translator, and @xiao-etal-2020-really with fixed discourse tree attention in a summarizer; or alternatively by guiding the attention weights through more flexible masking strategies, like @yang-etal-2018-modeling [@fan2021mask] do by using masks to enforce locality within a fixed-window, and others have done to inject discourse information [@mihaylov-frank-2019-discourse], syntactical dependencies [@bai-etal-2021-syntax], and world knowledge [@liu2020k]. Interestingly, all the above strategies can also be applied to the original model through injecting the patterns into newly added attention heads using techniques such as Projected Attention Layers (PAL) [@stickland2019bert], in which case while the size of the model is not reduced the injected information will be more flexibly and effectively integrated into the original model.
6
+
7
+ In this paper, we propose and test a novel human-in-the-loop pipeline, that to the best of our knowledge is the first attempt trying to synergize research on analyzing self-attention, along with work on injecting information into attention matrices. Initially, an NLP expert analyzes the attention heads of a transformer model using an interactive visual interface[^3][@li-etal-2021-t3] to identify potentially meaningful and useful patterns/relations. Then, those patterns are evaluated on the validation set to confirm their global relevance. Finally, the patterns found to be useful are applied back to the original model, making the resulting model more accurate and/or more interpretable, because those patterns become inherent and transparent properties of such model. Besides, those patterns can also be applied to models with smaller size, in what can be regarded as a human-guided, interpretable knowledge distillation process. Typically, in knowledge distillation a small or focused model is obtained from a huge or general purpose model by automatically distilling the most useful knowledge, with the resulting model being still however a black box, because it is often unclear what information has been distilled and what has not [@gou2021knowledge]. In contrast, in our pipeline, a human can find attention patterns that are useful for the task-specific model to make predictions, and then apply those to the smaller model, improving its performance, with less parameters as well as better interpretability.
8
+
9
+ In order to test the feasibility and potential benefits of our approach, we run a case study on the extractive summarization task, in particular using the popular BERTSum model [@liu-lapata-2019-text], and we find that: *(i)* For some of the important heads, the patterns they learn do have practical meaning, either lexical, local or positional. For instance, matching token (i.e. the trend on attending to other tokens with the same id) is an important clue for the summarization model. *(ii)* By applying the patterns back into the original model through PAL, the resulting model can achieve better task performance and stronger interpretability. *(iii)* Remarkably, the human-guided knowledge distilled model performs much better than the vanilla transformer (baseline), and can be competitive even with fine-tuned BERT distilled models, which by distilling all aspects of the model have an a priori substantial advantage over our technique that only distils attention patterns.
10
+
11
+ # Method
12
+
13
+ ![The generic pipeline, which contains two main parts - pattern extraction (middle) and pattern application (right), given a trained task-specific model (left).](figures/pipeline_v3.png){#fig:pipeline width="\\linewidth"}
14
+
15
+ In this section, we will briefly describe the proposed pipeline (Figure. [1](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}). Specifically, given a trained model (the left part in Figure.[1](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}), NLP experts are supposed to first extract important patterns on the visual interface[@li-etal-2021-t3] (middle part in Figure.[1](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}) by following these three steps.\
16
+ *Step 1*: Estimate the importance scores for all the heads on the validation set, and find important heads that stand out\
17
+ *Step 2*: Recognize special patterns in the important attention heads\
18
+ *Step 3*: Evaluate and validate the pattern to confirm global relevance
19
+
20
+ Once the important patterns are identified, there are two common approaches - fixed and masking - to apply them as constraints to the attention matrices in the transformer-based neural models (see §[\[sec:apply_patterns\]](#sec:apply_patterns){reference-type="ref" reference="sec:apply_patterns"}). The pipeline also recommends two scenarios to apply the patterns: the first one is to enhance the original model, while the second one is to train a new model in which the patterns are enforced. Additional potential ways to make use of the patterns are left as future work.
21
+
22
+ Although the multi-head self attention mechanism in transformers allows the model to learn multiple types of relationships between input representations across a single hidden layer, the importance of the individual attention heads can vary depending on the downstream tasks. Motivated by @molchanov2019importance [@michel-2019-heads], we assume the importance of each head to be independent to avoid an NP-hard combinatorial search. In principle, the importance of an attention head $I(h)$ can be defined as the loss increment on the validation set when removing it during inference. $$\begin{equation}
23
+ \label{eq:leave-one-out}
24
+ I(h) = \sum_{(x, y) \in X} \bigg( L(y|x, H - {h}) - L(y|x, H) \bigg)
25
+ \end{equation}$$ where $L(y|x, H)$ is the loss on the example $x$ with label $y$ with all the heads $H$, while $L(y|x, H-{h})$ is the loss with the heads except $h$. However, this leave-one-out strategy for computing importance scores, requiring $|H|$ times inferences (on the validation set), is extremely time consuming and not scalable to large datasets. Thus, following previous works [@michel-2019-heads; @molchanov2019importance], we explore approximate importance score estimation methods, which only require a single forward and backward pass for each example. As different methods are suitable for different tasks, we have considered the following three popular estimation methods.
26
+
27
+ **Head Sensitivity**: Proposed by @michel-2019-heads, a mask variable is applied to the output of each individual attention head, where its gradient sensitivity is used as a proxy score for the importance.
28
+
29
+ **Layer-wise Relevance Propagation (LPR)**: Designed to compute the contributions of individual pixels for image classification [@bach2015pixel]. LRP is adapted by @voita-etal-2019-analyzing for the transformer model to estimate the attention head relevance to the prediction.
30
+
31
+ **Taylor Estimation**: @molchanov2019importance propose using the Taylor expansion to estimate the error induced from removing a parameter from the model. In our work, we use the first-order expansion to avoid the overhead from computing the Hessian, where the gradient with respect to validation loss is summed over all parameters of an attention head to estimate its importance.
32
+
33
+ In the pipeline, NLP experts can select the most proper estimation method for their own task either based on prior knowledge (e.g. Head Sensitivity should be selected in MT following @michel-2019-heads), or empirically, by verifying which of the three approximate estimation methods best aligns (e.g. by cosine similarity) with 'gold head importance scores' computed by leave-one-out (Eq. [\[eq:leave-one-out\]](#eq:leave-one-out){reference-type="ref" reference="eq:leave-one-out"}) on the validation set.
34
+
35
+ In this step, the human expert should start with the most important heads and visually inspect their attention distributions looking for patterns
36
+
37
+ Here, we define a pattern very broadly as a predicate $P$ that can be verified on any pair of input tokens $(x_i,x_j)$. For instance, the positional pattern 'preceding token' would be true if $x_i$ appears before $x_j$. Candidate patterns should satisfy two criteria: 1) occur consistently among relevant tokens; 2) be interpretable by human experts to be beneficial for the downstream tasks. For example, in previous works on analyzing the attention heads of pretrained language models [@vig-belinkov-2019-analyzing; @kovaleva-etal-2019-revealing], some attention heads show a high correlation with position or linguistic properties (like syntactic dependency).
38
+
39
+ When a specific interesting pattern is uncovered from visualizing the attention heads, the next step is to confirm its global relevance on each head by empirically measuring the proportion of total attentions from the head aligned with the pattern aggregated over data samples. By the evaluation on each single head over the whole validation set, the NLP expert can verify if the pattern generally exists across different data samples, instead of only appearing by chance on certain data that the expert happened to look at.
40
+
41
+ Specifically, we define the global relevance (GR) of an attention pattern $P$ for the attention head $h$ as follows: $$\begin{eqnarray}
42
+ \textrm{GR}(P,h) &=&\sum_{x\in X} \frac{gr(x,P, h)}{|X|} \label{eq:gr}\\
43
+ \textrm{gr}(x,P,h) &=& \frac{\sum_{i}^{|x|}\sum_{j}^{|x|} \alpha_{i,j}^{h} \cdot \mathbbm{1}_{P(x_{i},x_{j})}}{|x|}
44
+ \end{eqnarray}$$ where $\textrm{gr}(x,P,h)$ denotes the global relevance of a pattern $P$ for head $h$ on a single data sample $x$, and to validate the generality, $\textrm{GR}(P,h)$ is then computed as the average $\textrm{gr}(x,P,h)$ over the validation set. The attention value from token $x_i$ to $x_j$ on the head $h$, denoted $\alpha_{i,j}^h$, is aggregated if and only if $P(x_{i},x_{j})$ holds. Note that $\sum_i^{|x|}\sum_j^{|x|} \alpha_{i,j}^{h}=|x|$ due to the property of attention matrices.
45
+
46
+ Given a pattern $P$, it will be kept if there exists at least one significantly relevant head. There are several ways to decide whether the relevant head exists based on GR, e.g., setting a threshold. Here, we suggest to use one-tailed one-sample t-test on each head $h^*$ with the null hypothesis as: $GR(P,h^*) < \bar{GR}(P,h)$, where $\bar{GR}(P,h)$ is the average of $GR(P,h)$ over $H$. The p-value is usually set as 0.01. If there is at least one head rejecting this null hypothesis, i.e. showing its significantly higher relevance than most other heads, we keep the pattern $P$ for further applications.
47
+
48
+ []{#sec:apply_patterns label="sec:apply_patterns"} Once important and interpretable patterns have been identified, they can be injected into a transformer model by either fixing or masking the attention weights prior to the softmax function. Although the two strategies are very similar with respect to what they can achieve, as we will see in the case study they can be more or less appropriate depending on the nature of the pattern that needs to be applied. For fixed attention weights, the attention logits in the scaled-dot-product attention is replaced with a fixed (possibly input dependent) matrix such that $$\begin{equation}
49
+ \textrm{FixAttn}(V, X) = \sigma(F(X))V
50
+ \end{equation}$$ where $\sigma$ is the softmax operation, $V$ is the value vectors, and $F(X) \in [0, 1]$ computes a binary matrix from the input sequence $X$ based on the specific pattern. Similarly, a pattern can also be applied by casting a mask over the attention weights computed from the key and query vectors, such that $$\begin{equation}
51
+ \textrm{MaskAttn}(Q,K,V,X) = \sigma(M(X) + QK^T)V
52
+ \end{equation}$$ where $M(X) \in [0, -\infty)$ computes the desired behaviour in the same fashion as $F(X)$, and is added to the attention logits to approximate the multiplication of the attention distribution by a weight.
53
+
54
+ In practice, patterns can be applied in at least two scenarios (i) to enhance the original model on which they are discovered aiming to improve its accuracy and interpretability (ii) to improve smaller models, in what can be regarded as a human-guided distillation process. In the first scenario, although patterns can be directly injected into the pretrained encoder, determining to which heads the patterns should be applied to requires extensive hyperparameter search and risk overfitting. Instead, we opt to inject the patterns via additional attention heads through techniques such as the Projected Attention Layers [@stickland2019bert]. As for the second scenario, the patterns are simply applied on the heads (one per head) for each layer, and the new models are trained from scratch.
55
+
56
+ In our case study, we adopt the architecture of the popular BERTSum model [@liu-lapata-2019-text][^4]. The model first obtains the contextualized sentence representation from the pretrained BERT encoder, and uses a single-layer binary classifier to score the sentences for summary selection. More specifically, a pair of BOS, EOS tokens are inserted before and after each sentence to indicate the segment boundary, and during prediction, the last hidden state of the BOS token of each sentence is used as the sentence representation. We apply our pipeline on the BERTSum model trained on the CNN/DM dataset [@herman-2015-teaching; @see-etal-2017-get], a widely used summarization dataset and as evaluation metrics we use standard ROUGE scores [@lin-2004-rouge].
57
+
58
+ <figure id="fig:example_summ" data-latex-placement="th!">
59
+ <img src="figures/summ_example.png" />
60
+ <figcaption>Example of Extracting Patterns in the extractive summarization case study. Important heads are found ((a), up left). Then three pattern types are identified ((b), up right): Matching Token, Intra-Sentence and Positional (-1, +1), shown in Green, Olive, and Blue respectively. Finally, each pattern is evaluated with GR on all of the heads ((c), bottom) . For better visualization, we only label one head with significantly larger GR in (c) for each pattern.</figcaption>
61
+ </figure>
62
+
63
+ The process of extracting patterns in the extractive summarization case study is shown in Figure. [2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"}.
64
+
65
+ To identify the most appropriate head importance estimation method for extractive summarization, we evaluate the three proxy scores (Sensitivity, LRP and Taylor) against the leave-one-out head importance score by using the selection process outlined in §[3.1.1](#sec:find_patterns_step1){reference-type="ref" reference="sec:find_patterns_step1"}. As shown in Table [\[tab:estimation-similarity\]](#tab:estimation-similarity){reference-type="ref" reference="tab:estimation-similarity"}, the Taylor Estimation [@molchanov2019importance] is the most aligned with leave-one-out in term of cosine similarity and therefore it is used in the the rest of our analysis.
66
+
67
+ The resulting estimated importance score heatmap of all heads is shown in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"} (a), revealing that head importance is not uniformly distributed, i.e. a small number of heads play a dominant role for the summarization task, which is inline with the findings in @michel-2019-heads.
68
+
69
+ With help from a visual interface, we analyze the attention distributions of the most important heads by looking for human interpretable relationships encoded in the attention weights. From this analysis, three specific types of patterns appear to be present in the most important heads. Those patterns are then evaluated on the validation set to assess their global relevance on each head.
70
+
71
+ **Matching Token (Green in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"})** We observe that the attention weights of some important heads appear to exhibit an \"attending to matching tokens\" pattern. Specifically, the attention value $\alpha_{i,j}^{h}$ between input tokens $x_i$ and $x_j$ on head $h$ is high whenever $x_i = x_j$. For example, as shown in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"} (b), the token \"photo\" mostly attends to other appearances of the token \"photo\" in the input sequence. To evaluate whether this pattern has a large global relevance for any head, we only consider tokens that appear at least twice within a single documents, and compute GR (Eq. [\[eq:gr\]](#eq:gr){reference-type="ref" reference="eq:gr"}), in which $P(x_i,x_j)$ holds if and only if $x_i=x_j$, i.e. $\mathbbm{1}_{P(x_i,x_j)} = (\mathbbm{1}_{\textrm{freq}(x_i) > 1}) \times (\mathbbm{1}_{x_i=x_j})$.
72
+
73
+ The evaluation results show that there are several heads for which the matching token pattern has high global relevance with (See the Green box in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"} (c)). Interestingly, these heads are prominent in the importance heatmap, which suggests the matching token pattern is critical for the summarization task.
74
+
75
+ **Intra-Sentence (Olive in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"})** For some heads, the attentions among tokens tend to be localized within the sentence boundaries, as shown on an example in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"} (b).
76
+
77
+ To evaluate this pattern, GR is computed with $P(x_i,x_j)$ holding if and only if $x_i$ and $x_j$ occurs within the same sentence boundary. This reveals that such pattern appears frequently, especially in the mid to upper layers of the transformer encoder. (See Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"} (c))
78
+
79
+ **Positional (Blue in Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"})** Similar to findings in @kovaleva-etal-2019-revealing, we observe 'positional heads', which focus specifically on either the preceding or following tokens, i.e., both $\alpha_{i,i-1}^{h}$ and $\alpha_{i,i+1}^{h}$ have high values. To evaluate this pattern, GR is computed with $P(x_i,x_j)$ holding iff $j=i-1$ for preceding positional heads and $j = i + 1$ for succeeding positional heads. The pattern is verified to exist in the lower layers of the encoder, as shown in the blue boxes of Figure.[2](#fig:example_summ){reference-type="ref" reference="fig:example_summ"}(c).
80
+
81
+ After uncovering potentially important patterns and confirming their relevance, we apply them to transformer-based summarizers through masking and fixing the attention weights. Aiming for more general 'cross-dataset' insights, in addition to the CNN/DM dataset, on which we trained the model that we use to extract the patterns, we also evaluate the benefit of the patterns when applied to the NYT-50 dataset [@sandhaus2008new]. [^5]
82
+
83
+ The patterns identified from our analysis can be applied on an attention head through masking or fixing its corresponding attention weight matrix. Specifically, for the matching token pattern, we apply an attention mask which enforces that when a token appears more than once in the document, it should attend only to other occurrences of itself: $$\begin{equation}
84
+ \label{eq:match_mask}
85
+ M^{(m)}_{i,j} =
86
+ \begin{cases}
87
+ 1 & (x_i = x_j) \lor (\textrm{freq}(x_i) = 1)\\
88
+ 0 & \textrm{otherwise}
89
+ \end{cases}
90
+ \end{equation}$$ where the constraint is removed for tokens occurring only once in the document.
91
+
92
+ Similarly, for intra-sentence attention, the attention mask specifies that only tokens within the sentence boundary can attend to each others, where:
93
+
94
+ $$\begin{equation}
95
+ \label{eq:intra_mask}
96
+ M^{(s)}_{i,j} =
97
+ \begin{cases}
98
+ 1 & \textrm{SameSent}(x_i, x_j) \\
99
+ 0 & \textrm{otherwise}
100
+ \end{cases}
101
+ \end{equation}$$
102
+
103
+ Lastly, we use a fixed attention matrix to encode the two positional patterns with: $$\begin{equation}
104
+ \label{eq:head_mask}
105
+ F^{(-1)}_{i,j} =
106
+ \begin{cases}
107
+ 1 & j = i - 1 \\
108
+ 0 & \textrm{otherwise}
109
+ \end{cases}
110
+ \end{equation}$$ And $F^{(+1)}_{i,j}$ being the same, but equal to 1 for $j = i + 1$. We opt for fixed attention matrices for these patterns to save computational overhead since it has the same effect as applying the mask (each row is a one-hot vector). This is similar to the method proposed by @raganato-etal-2020-fixed, but we only fix for the preceding and following token patterns.
111
+
112
+ We first experiment with injecting the patterns back into the pre-trained BERTSum summarizer. In particular, we apply them through additional attention heads in the form of a Projected Attention Layer (PAL)[@stickland2019bert], along with the existing parameters of the original model.
113
+
114
+ The hidden size of our PALs is $256$, which consists of $4$ additional attention heads ($d_k=d_v=d_q=64$). PAL is added in each of the 12 BERT layers, where our patterns are applied in the 4 PAL attention heads. To ensure the changes in performance are due to the patterns rather than the additional parameters, we also compare against adding PAL without applying the patterns. Results in Table [\[tab:pal-results\]](#tab:pal-results){reference-type="ref" reference="tab:pal-results"} indicate applying the patterns in the PAL (+PAL(Ours)) improves BERTSum's performance on both datasets, where the performance gains on the NYT-50 are similar (or even slightly better) than on the in-domain CNN/DM dataset, supporting the generality of the discovered patters. This suggests that following our pipeline can boost model performance, as well as its interpretability, as the model follows meaningful patterns. Interestingly, visualizing the head importance scores reveals that the PAL heads with patterns applied are significantly more important (by 2 orders of magnitude) than the PAL heads without patterns applied [^6].
115
+
116
+ In a second round of experiments, we apply the three kinds of patterns on a simpler and smaller summarizer, namely a non-pretrained 6-layer 8-head transformer architecture [@transformer] and compare against the vanilla summarizer baseline. Since this can be seen as a form of human-guided distillation, we also compare against two popular SOTA distillation models from recent works: DistilBERT [@Sanh2019DistilBERTAD] and TinyBERT [@jiao-etal-2020-tinybert]. Specifically, we compare against the 6-layer 12-head variant of these two models. As these models use the embedding-layer from BERT (TinyBERT uses projected embeddings but maintains the same dimensions), we also use the pretrained embeddings for a fair comparison.
117
+
118
+ Under both settings, each of the four patterns (with 2 as positional patterns) is applied in a separate attention head across all layers in the model. Note that since the goal of these experiments is to assess the benefits of the patterns, we do not perform extensive hyperparameters search when applying these patterns (e.g. on which layer, on how many heads, etc.).
119
+
120
+ As shown in Table [\[tab:kd-overall\]](#tab:kd-overall){reference-type="ref" reference="tab:kd-overall"}, for CNN/DM the pattern-infused models significantly outperform the baseline vanilla summarizer under all three settings (6-8, 6-12, and 6-12 w/ BERT embeddings), and can also beat the two distilled models with the same setting, which convincingly demonstrates the utility of applied patterns. As for the cross-dataset experiments on the NYT-50 dataset, our models significantly improves over the vanilla transformer and DistilBERT, but lags behind TinyBERT in performace. Since TinyBERT also distilled the layer output along with the attention weights and predicted distribution of BERT, it has a substantial advantage over our technique that only distils attention patterns. We suspect that the additional knowledge helped TinyBERT in capturing long term dependencies required by NYT-50.
121
+
122
+ Overall, with the specific patterns applied, our models are arguably more interpretable than both vanilla transformers and distilled models, as we certainly know the information encoded in each masked/fixed attention heads. Additionally, as it was the case for the enhanced pre-trained model, the attention heads expressing the patterns tend to have higher importance scores than the other heads [^7], suggesting that such patterns are effectively leveraged by the model.
123
+
124
+ To study the contribution of individual patterns, we perform an ablation study by applying all combinations of patterns on CNN/DM using the transformer model with 6 layers and 8 heads.
125
+
126
+ According to Table [\[tab:kd-ablation\]](#tab:kd-ablation){reference-type="ref" reference="tab:kd-ablation"}, we observe that applying matching token and intra-sentence together achieves the strongest improvement on the performance among all combinations, only slightly lower than applying all patterns. Meanwhile, the gains from applying them separately are only marginal. One intriguing explanation is that these two patterns allows the model to learn sentence-level features based on term frequency (plausibly similar to TF-IDF [@jones1972statistical]), where higher scores are assigned to sentences containing frequently appearing tokens. Additionally, although applying only the positional patterns causes the performance to degrade, it works better when combined with the two other patterns. The reason why this happens is unclear and further study are left as future work.
2202.03229/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-10-02T10:57:05.831Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36" version="15.4.1" etag="z2nMpOzgXN9n1S5K6h4k" type="google"><diagram id="aZtizCSpSjkDWpX5xNk2">7V3bcttIkv0aPbqi7pfHtj2XjdiOmNje2N1+pCVa1owsamR62p6v3ywSRYJVIACCCaBI1vQELYEQASJPXiozT9ad+PD1x1/eFq9ffl09LJ/vOH34cSc+3nHOmaXwjz/yMxxRanvk8e3pYXuM7Q/89vTvZXWw+sPH708Py28HJ65Xq+f10+vhwfvVy8vyfn1wbPH2tvrj8LTPq+fDq74uHpfJgd/uF8/p0f99elh/2R613OyP/3X59PglXJlpt33n6yKcXH2Tb18WD6s/aofEn+7Eh7fVar396euPD8tn//TCc9n+3Z+PvLu7sbfly7rPH/DtH/xr8fy9+m53XD/Dn77/vIJPgBtc/6y+tf7n91V44923jUx+gRMUff2xfxN+evT/fli9fFu/LZ78Z2w/79NbeA/+XXx9hR9ePn3z/2xQEk6DW91eOZzLD26Cr5c//PEv66/PcIDBj4vnp8cX+PkevvLyDQ78a/m2fgJh/VK98fXp4cH/+fu3Jdz24tPmoyj8/rqC+9tAQb2/Ux/9Z31fr7ZfbfPR8BVW/1h+WD2v4HM/vqxe/Kd8fnp+jg5VDxGuu/xxVBBsJ15QjOXq63L99hNOqf5AqAoRlU6841psD/yxRxiX1UlfaujS1bFFBerH3Wfv5Q4/VKJvhoFogEH04O+/v/1r+VA9mOXLwy9ej/aPoCYQL79KZxXdPcWgKGL7539+8jezEcP2UsuHROd6PzvpiHXKOim5ZYorcfgonXaEGe0cY4rCOSp5sKrhuaqW51rdyt9WG4CH+9CGESGYMJbDtTiVkUQtI8pxx6lQwtnwjMPnf1t9f7tfVh9ZV9nTrmKdI1pqGr7w4UXWi7fH5Tq5CAhz8bN2WqUZR7+pcppIKjV8FwvmLVwl3INylGjm4H3KtZLWRFjcXm6PzJ1Ie4FVFrCigJUStRGeFNopyg5vQ1hJ9iCiUumhaG29DKOUEw8RbbjSWhgxEl4VXEXB0+TMOckindFOEM2tYFZZSQ21qHhVBa/j4xWMDJgcurM6ciS8MgpIApwybZlkYM7nwKsB81u7B4EKV90N18e31ffXu2NBTxV1V4HW3S7W7Y85yWj82AVNcAUSD86vDi0rJQliOycmMslzEMmDgL+ANYdX0T++PK2Xv70u7v07f0BEGwWp3163C5HPTz+8or+HJ/jysFP53TLAq+zj8+Lbt+rnb/9Yru+/VL+0676MYtM7Lj5/Xur7+ySQhXcejPtE6eY+1ov108rHye8c7RvL6naTYYmQmjthqH8Ni59KmMZqYiyjwoCmWq14IlhpiQ5/7F91KuQjp5wjb5vIOwV+kXejvA0xwmj/w+ZVH4aD2loihQDHCqJnInz+7AJ3icBpETiGwH34T60yXss5rHRcJgIPXqVIHFvi0jF4H6IRK5nVWpk0dzGTyFkR+TgiF7BmEo4qZg0sEUQQ1vwST1OaxY+jSJzDooTCItB6qftlicxF5Gn6sig5isiZX2g6WPRDFA/hekNKeiaJpzlAViSOIXHn127whtNWGJ9mzkXiaRatLMdxHDm4ceYUdY4yR4VcvmNpDm8moffIRfnU5m/Vr1W6dC9M/wRXb+svq8fVy+L5P1er10rCf1+u1z8r4fmiX3uSNc3IHiZZ0yTsXXfatUNo0hGfy/Kpd5CYMlEKkFG/qmaSC6YpaKsUxx/3kUwn9/JiUlu1eT3M6x7J0pycTuWwEpTSKMX9q3SHX8MoQZyzcJhLwKCM0nZHsqkDkposzeYVJCEiqU3IR/I/yFCCb0a0olZLpSwEgG40KKWJwgRKs9ZzxoFD3Rn46q4xYDz8St8K1eAMNj4HIkYIKIRRmp+KqQ6nZZkjSgtlqXbGnzEMU12ZS86JBcsIKwxBBXjIXpg6tdjDNgtpox0X29fDmzCcEQh/nGKUSSoFi/B7XrWHpVnQYhincbHHkmbIhlEqS5xSGhbtgDIVF0bxDCNP06sFShP52CPZOGQoCQ2WnxoH71NDwSjJ0bCU5m2Lk53ayQIIiOLgYZmWcBPajuJkBTfEciUFuDrHGI11YxovK7kmzAjfQKJBcEKjetmA3mIaJ/eyR1KYyJaRaVjIMm4YLHDA2Soe6SKiZezRq1ugNI6XPZIcRV/JwtKmtpK1o0Ep907aW3CygCoijbHUN/3BKUIOA1WnlyXmwMvO4WSZYIQJygyHW9BSclwnO2ujLYD27ef/+V/gEdhw4PfNAUp5OPDxR/38jz/rv/1t+fYE39eTWLYHfzytt5+oqt9+r72z/yT/S/igvloFT2UDrO2hKju6RUH1h/TuGpSP+aSg2/1n7MFtIqWRpIAIVgqhtN68hmb5YNCZJNpwiE8UFyJJgGLpHuEQwEIcq6rXw68KN+Cr90zAM4cTK/OApns9KjVF927Do+EtGxm3Pu0pt6+HblNoQoUUjMFXduA6x3FoPt9htWGCV6+HvczwFIhzDh4p4xQsCMVtxW/oQS9adaNa1VwGQNYq6cNE7pShAHvJVb+lB7JSwROGFTuIS0sLy6QqIEbTqVnLd0WnptapNrQ35vxOVynt3R0sq8z29ZAyxuAetNXaMnhK8FiR/VRwh8ZCoMvggUphQI0drs70KBIWnbkenTmjJo7jh4SQRMDiDEI77lnmZg4/JI3yPDYLESyFAI/iZitEj2pp0anr0ak2tHdUwHAckXBE1EvEyJMhgruzgCgKmDJGCfimBnf2g2iqC2MMrPmv5fO28baMq6kdOqp3yciPxnE1dKxxNTxBwcURPh4WS/u5sRlc39vlp893Q5vB2wWnpSZS1f6L7CWTKdFeOeIkE2C6jNDU0CYTeeycs8R8BbyemcRsHJjh/YwgLnIW8xWQeWYSM0THBMIGpZiANaw0LmcxN5XnMFz3+6eXxeahFM/sDbwShFNYPPmQ1+wmK+78dGhfryFCsAY/LTD8dFoVKga8r582bWIUTJHaKLk09ppNy9OSRRE5SmgmmSVOWKoEBXlJlY5RmU3maUr9XfHgKEJXnJH9HEVteD5CT3PCReg4QtcQ1TmljPLTLIQN7YEZCF2mScsidByhG26ISwe35iD0K5ihk6fMraBk68sFN1KEcDkHmacZtqLoOEJ3QhAfslNqrfRmPh+hp/m2InOctCoVvlGsGpErTUjU5CD0HiSAi+WTdIw25or4mSeaKSuUMTrW1ObRxqfMLDbwARICdwuyYlF9+cgi/vSJxQC9MChYyPgaihJeG7A9GplE9ui+LzgaiqM2ER9JDODiSPq04iZG2c5JH228hpy1k3wuLOCPS48cUpwGRtqLov0qXUPZkfp4NIO7sMIa6sDiSnZIfEi2MpC41Ad5zaOFZjd8rQ70SKIE1/JpiLpqTppGqoJo+Xp0JhcgjeFBj+RecHFkNCf7RZ+Kwz1EHM3arXsrHjTe42ggy6njKvFOH+NQBzsc6Mjb41zz2KDZzV6r/zySisK1e05bsoMO4Llf3+kQIPWYGVSANIb/PJbewgUSo37nu9oitB8rYAiSePGg43vQ9p0KkRzoRBt2dXjQZJs7jutCZ92/M28uSX3MRSiw1edchD6GC1QvYWHVwjw/SfhdzzpSPEPTy35EszScgfuG9baOOoklBKis3qEyDlULrASRziqxYTfKsAjf3QX4n1qeSeFmeEI1p6jXDXgltHWdIoYqbizooDAqAqx1nBgDqrtFrRlpXUcpMdJxra3mzMSbWRq4R1YLqHBJw6pHQahozZVoDdp2we1qo/2uBLWKwjjzKTrUBrwp2S91QIdx1aZMUroqtWnHc5RZG5qAkb4OAvrnp1lwmmzR7OAm7F5vxlKayMVJS7Yrrk0IijvFRZXJSFelJWeVkZGci7GO1L6sHGsl0+pctORkU8usGjqQc+1l+NFVqU07nttrR0jOhQFGialVT6fxLk4xUo/BcHdFCe1NpeV9APOcEaoF3WVrDuWWLXFR51eGvPdYeLo/sKOs0c4mIv5ghNpsKBhvQbjQlp6RLVVTZUvPEmVaCCx97ChzI/LloIaZTIWwgi70fDmoOq17FaHjCD1fDmpwIEXo6ELPl4Oq02JCkTmKzPPloOo0E14UHUfo+XJQdZkfM9bMt3w5qPpimQ8rgN7T2j9iMWdqsX/O0EIwT0MXDTNhPY9LSbWckk1HkpHGUhpthzMdJ1WnabyCq7Fw1SpyJI5q11WmIqmaWYd3ZwIO9HpG7MCi0hgSZ7XjKhNxVo2Gu5CCUQuP1I/LjkzkuJxVk2ZIi2GcxeEiUVg7DONkHNaQHCzAmtvjInFau64yFanVzEpxyAQcU3tcpF7ojqtMw3HtcLjjclzDtiTFLM7tb5Eorx1mcTLOq7nYqUsXCKxWkWNxYDsvMxUJ1szaiJ4JPKb2uDic2I6LTMSJ7fC4I3NiTekQ76tF9aav4C/rTV+hzeICtc1yAgEBY1JopyiLbgOJImucIZZq6fff5I6G3akmpsh6Np9/1oZxTZ2LtG1chqwpfeU367KwFonWEVXToUiJpiHMGunTrpW/srv60TSE2RAxFiW6PSXC4s92aNE0/NkOLRqXP2vLvrpXrEUd8Mah0xpriWWKB5beHHRa8DGkonUJrZWOBgqNSq61PaqDRYUuVYXOK38jOaJpuLYdjmhcrq3lRYuuV4s64I1Dve1wRNNQbzs80bhEXFu2pBnaDuxAVZjd/3cotmx5uDYtlZYG8L4Sb90NOl/ypk2rmIXpgSP0fMmbttB7xhJ6vuRNm1blitBxhJ4vedOm1aHi0lFkni95M2w1UBQdXej5kjddmnsvio4i84zJm65Htri0IN5htCCCspNaOojFvEoU8qZTgESf0lLbnpaYmTQVeTN0pRVcTYCrVpEjkTe7rjIVedMVKskIGfHIgYXqBzJ5s+MqE5E3YUVNZG3njdgOj0vedGmGtBjGWRwuEnmzwzBORt50hUySicdFIm92XWUq8qYrVJLpPS5SX27HVaYhb3Y43HHJm+5iN/jOBPlo/haJvNlhFicjb7oy9ioTf4tF3uy8zFTkTTcrlyATeEztcXHImx0XmYi82eFxRyZvMlq6+IewN0O7XJ29GfosLlDdGBjlqnPXOBkZUyTyJoSsxO33nLZRn9xE5E1rHJFGwxdRoHmSx3cxKnuT0R4lnKJtl6pFEy0TYQlW7UNsfCdw7BqnoG9aY4nhtf8Ob2Jc+iYLibmiRbenRVj8zQ41moa/2aFG4/I3YSlW1OiK1agd3zgETsc5BI7b8V9K0DAka1ICpzWGuM0EJ6sV+IZIiUYlcDLao0RYdOhideisGjiSK5qGwdnhisZlcDJatrC/ZjVqxzcOhbPDFU1D4ezwReNSOMEUdWvRbe2s2TRkzU41ZO2cXl9Gz5iqt3/oA40iOzCKhwZRdZnDYPnqdm9nBZst32S7qk4me/wtqJUgThrOIFrXXOqIoc4dbYNg7y2o268yVY6Wa7Lba9tT4aPvysDS77u0aFXcwzOjF1stPl2PMok5TpgHQTXZ5e6ZGm2813lTxPDqyxC3FDRmi0brORq79SENJWL8ZOVZOVFENLJMysU5xiSZoBc/9DCayH3hNiLMddCUZiVDnR54KKK422laFHg00r7Qwg52sfS+i1UVtLADbZjbKFumDUIjL2jMFo0dYQdeYnoMDtggNGZSqixhRy5hRweJdVaqLHLY0cwKxos7LpbleLG6ghZ3TLNvXMdVMO38xRIjbwCLHVHHNDS4CbE4K5eyxBz5xRxKaZKQH7FDjvaLTBNxNM+yxIs4LpbnebGaghZxYO380B5xdFwF08qXYl++WOyIONCaWFsjjgmxmMm+VCXiyCXi6JhIM+vcG+SYo3nED1rMwXsULoudzzPmwNrkoz3mGDZgZRAWS6UvXyx2xBxo3cojDPsZhEVeYo7MsTtxzNExrGzWkWjIMUfz9De8mKNH1bLY+TxjDpwRMx0hR/tFMK18qfHli8SOiGOSYUfTITETpl+JN3KJN6xWJHQ2+JkhcpR4o+Mq08QbzbvI4MUb+fH/ipXvGW8kc94Ght0dEUfXZTAtfany5YvGjpgDC41dIxYnRGOPOl+JO24p7uiYPjzrjGPkuKN5nDNe3FEoivla+q64o2PKLVbcMWyY7hA0ilLpyxeNXXEHEhrHGe08CI2ZTHwtcUcucUfXbPp5R+AjRx5H5v2jhR6CN6iXfl5XunKgZ/qf31fhjXffNlr0C5yg6OuP/Zvw06P/99fvz+und//jP/bhzjePbD/001s44S9vi9cv4TDc5/Zy2/cSDV8vf6wPNXnx/PTo9zW9B2z7qWHv/aamT/eL51+qN74+PTw8bxzTEu518WnzUV5NKonA56r3d+qj/yzwRdvvU5utFBSuMiJ1HawOVU/unM1UGZXRNC8eZsbUtGq3XW1drXSLWvWekiSaKmxbkXx7XbwMF/9/vLx+9+j52/d//xv+fi/m7cfemJgNPRSyNV6plbDgWrUKc8w6RS5QRN5UysIQ+W+r5++bzYZvXNgMFgwOonjwpJoaK4OJ3ZMDHAjfWMUoNcYEv1MXPh1N+D2qR48Qyb8efQ73q5eX5f26evZ3u32zT3k+kTaw3ZKj/gw2Wyqkj4HDUpxqjEcxKz2pT6y2Fdbx5yiIgbhBU04Npcwm+Rk/0kxpq63khjubGhnJCHgAiEGsNkyqsF1u/XFLTpyFqzg/2kyDyTo1lpMaQkYunfTlFQhfDuMbZggEXwpCLMZ8kDUwTcqcv02/IBJcgE91+pSrIEVyjHm9rv8POVRL6wEXt2f9589Lfd+4Z/2DcZ9qkwm7LHCHbnBGDIWVg1bcaKqNigDhBz5JbSgXksJvIo25pIV1Nlgm7ayRUgXWwoFuNJ9ynlVK0+ysCLlZyE4R5gNopywsoyhXlyLjNN0rioyPODltCSzNpYV1EVh3EQ0/zlfIMs2iFiEPtdZEUQYxjrPwf+0a8k5zCTlNTqZBZBFyH2vdHKJlIOKmBFks4SyrIXe9lxiHc593pqs++LmCOtZixGCs4GSavCoB8UA/mq36XWzj9XD1Y03qJ/JTvzSXVNRvYIRDGeFca2acUQY0x2ajf2mWrAh5WISTr4wvttEU2cbq/GzsxU5XGS4adSGiSbM4Ze03MPrM1jSqi+2FG65/DTuzV2DPSv9Umnu5etE07MiUpWh65EwuWg5NLmpbnstLDhfLnx8smlA/yz16ULeX1wj160OtsfmJZlaGdf/Hi/3Q8moMORISYneGdFwGsTVEGSetga9q3c6BVHdhBJHMOe6bjJUL3fZojSPqxjqd8gR0VKkeStPvAnTHZaYBNAiDWEkpNUpxqsOe23iIvsGMlWvynhnG/pPzhAcydFp3Hb/rT9sZau36FF3HWVPkZRdxOkDBhxPhhDQ+uQR+NDJI7wSh0oK5ksZZOIVbZB5x+Dbc+R3vqXHMMq2cQp6IFjbQKdp13bFEeylJKuI0tYxTuEnne6KGKU37Vfw20Ywqy4SUQvGRJgwqSyTVzgkKoQII75D3DCaBaKtAEKBSVEmNHBrrHvndok+Xr08dBQgsheq4zDQaxQQjcAn4El65uaGHLN7RVer28vKNSccMY/NK1O2iwTJwe5v2+8529iDM7+zX73Xz1WjLBsurucRMr9/OweOAC0ifP9DUDd5OnTkHdkz4rwmGzHBzSJ0fmFQbZGx6VDgm5zMa10xnNIAO5jRX0liXCp8LTqyD1QsAwGgtoszNMI3vUWaYN/WnT2tsjAczXSDHUQ2cV3EiyTG+DFZ0USkIXryQllsavNKN9NF0aEPcRnoprEadViBul9XYIeN4UX4xMk5z8rdLeOvyalGYdimkRp0m94uMB9rqXDmNuvS1YtnqIxHZ/DI2V93WGhaFB2t+3ZSj0aflaNoXH6ZlmGN/yaTZsxIpDfOi+WpfnzTclWlfU/eCOY1SPIX2pemkon3D4ps4Byiz0b40JVZi2GHxTb4y7tHCexMWVmZnYS92i53BkglOJXfJpPmbsu4bFnnmaxevmk7crH1NTG9zWl/lFNrXo7Hv2iQjL0Iytkeu5KLF0OSe9GlNX1OI4arbippDuia6b36BQ9gc5JYk09RApE9jmU4hmR7dMVfU/WEaaijzd38cCQbHJjJOxPs67PyKiYwRWs/sFLG31cyUJ5yvm8YYNTJGNEZkMo+9vSxVSDIcOs78Yv7JGdMXRws5ocg6xloiL6OI1OF5IolRYG9JdoTEiLzLuk0TjUW5ri+OaK8dJYyrYRrTfpGYb8XHaYmOKYz80DjEfCvsqLgQ7m9BndpLDkjq1H6RidTpVP4itj7dXia+MdGYX1Tu+nQtXhd98YSM/WmD1C7Qxo1EXuxXux2Bu+iaKhoYW++qvrvqdtTwD9VD3fUtnrcDMWZLijAKsWMbaYmx/2XYqBz9mZusn7njxG/bvfufnVMEx7ccPk8ELGcR7PbzneeZ30bP49tqvdjsQCw+vnP9m33adYcbTfwW8eAytJbGykiOmhKtYelJIRh1vIGrpSWsURn4La48uds0eM4jp5wn89vg1Y4kcwVLegtydT5YCnXUIHPp6wI+zAkvKbVgLqHfxnYtcwhdKQhvnVYQJhsnZQNhby6Z38a+wHPIXEtYK2gw/IJubHw6XmIumadJvyJzHJkbqYgQsA6kUnEHC8FsZH4bxNw5ZG4lI3thwemZyJzTNOVV9BxH5k4YCNyVosqLztJ0z6G5ZH4bFOA5ZM6oEMQKOItrJmBtnUsQB7FFIvSyQkcSOhPpPLa5xHzVO/K0isnFid+9XE6oEEBgRriAmBxctqWBNdORjDm5RCCoI8JyXwhRzNiQNd5dxgmimNuvDA4vg1cj4DRN3BW8nISXdklGmRzBxgGMtJZY6wf+SD9zumfhfBBgst+8aKjQO/Lk6nRsxH7jsHnonfCNBlRzw4wxlAo1EBsdl1GcEiepCbHoSDsQ+QKuFIpyy6VV1kQFYMklMUbsrBqLgHheUwWnV037nt3vHclTYZsxsJJk71tlpBCYVuyq9/eZ3+0dyXFh4wXsJrytjXLKguFjIwJm8ibIq3V7gAbCqKUMolwD3mJoDN1xGQvQ8VUUas32arO4PcME2WcDwLQiu72r7iWc3e1FKbyRvJ4zlIQcodBcRH1niFaMXfVYxvnd3rH0HzZiGNUK1nv7BV8/4zYIMj3GIhTH18sjQVgL6z0QaXgZx/Ex+HTiaq6vX/YI2/MxuDeyXdhKK0VYoaG5vlBDmweYtT5u4eydqBNVKDzxc6gq57d4H3Rzh968g27u0NORqyZpTRxTXEnuY8S4nRQrcWIFcb5hnSlY2DYkTixxShoqtiZ9rP0uuIRA1YJzdpwbG4a37hInhhOrDJN8++qQ1WjCER7swtQoV+WYan0liVJO+KKSsC5pafA0B2d3ejiWcgiinLWg5kpLFyuH0YwItY+EcLeB5mzWgSBFOfCVI05Dj6MbhmpitdxVd6NIbyLlUNoRyqVw4QVZOWatBBXlGKQc7bBtTFYN0Q5BDHXOMqaNplH05pwiQiurqpfRPAcnRjrFHdvEeJH3UobUvmi1mMDTjcnnuxTdGNdxdFRR0aIqKggVbB/TzOI54EuSevsJcrGWTT6gpWjHuJ6jo9iC5DlghcyIqtUV5/AcTgminRDGGUfBIiBXdFifCdqnq0OtV5JRI9wyXZwfBzJrBfKxkQa0Q5Oi5s5P7OHh86a+8Lq4f1r7s5g6rif1FBcPGaB6imvX5pypRoF1Jdwpra2SzlEVwQyrgs7Ab1luNVfwlCDWiVTKWO/1qHZ6u14Zr4LO+pQqswI2JkDzTrcyJYml8KdVSBBjcZJ1ATgOR5yoRT+jYTFwXObB4jFcDcHoFWKx3S5GRbShJfl2KHYUGDCR2Gfq/FlItPyT0LoXEh/U0j543sXfv399rT5Ut3j4rqFF/cB54LTDo84UnO2wwVojMqmJr0cpXxtiNAzyaK8IjYJOXtB5NejEqgvBoodsLLMVFoIBGeUOBIWVopZSua0v1+O1M/E+ZKfJ4TkkGTAppA+7AxomKmePc8tJyENQReNZc7DWIczPp6le+DhmWAlNwrLJl3lGxHkfklbBeWvUG/B5UTjXivjx5gb+WJrYnmPl3Ji1xBhlnVTCl/Oj1J6wkuxaYPwKcDxuGe9RUbzYcGO09q6G2aq7HuJMga0hgq2o1Fw4Fi3xpNMETPuu9WpoGC2EIPvOburiik7zvKUxcN2HnDY7rgfgc6gu9MJ1w04eueNaWk2oVHDvRlLGkuEQ7V3bWIGJoeA29qXtOF2HCew+NLrZgZ2bwW6MuPMGdnskgpUfFsoSWDlSvwGK1iw0ru8CHrDn21ZypYx0PRldg3A9ehFuRlwfwehFwbG9wQiJZ9FROpsQjqOXzhDgGK3JsrO6jWFy3qUQbR3Z9+jstrbf5TlgAUhr+eah4YTSnGwZPIZqH8REl2lPpyDiPDzJgvMzcN4YNmeOcyrJvt2GqYiFi5XokMaQLZ2SGj+DMY5iuCP16vN4eQ6RZdEPIZ+HpBu9cN4YRWeOcz9Fhdl9iBvhvLEneYA9h7hln1thIkqvaMvINOac5wjzizLnR9LWecM8js6jZZzjjNRGCQ0Oz6Wnt4QipGA8qgJZgHnI7HnPMR7MR69C5t6x2QfIomkLsDBxOlMgMymJZpRpJzdk5AhhSlviF2chhzwSDZlRiH+2vcZVpD8elEcvNE7Vo3kKJHNFX0dTJlKDcHsuRQpOjIVr7To6xgNfWv3Leyx1U5jxeaEtbUT+ByNU84hif5XaiXTzvx2Gz9wMzTHiByjAqmbD3mCReCkHEEnpZEWKTTcjADcahopucZCC+8gpZw0yFmnNrMEWFTSchgZryPFJE0ZY4lJKfx0L4Ih2WVUTNkg8gELjGechIS0y5T25/CKQ0G4XGCxQCQQ9IM8tIGQ2diEtzVyel3ig98slb0KDgljSLHGE7GJjzzmpVUVCOrk+tL4hctEtkUt/qV3a1jLzSY15FoxLxyfPJruwQeSRGYH7cYB/2h8dMiTwgPF73Ew20nsPyqK79cm9l/3T/X9/eXq5q7OAWd8Fgj3drkYLBAlBPJNayPBy8jIg2dfTjReCyzQxfXmudho15Rs11XHWaU49bbConXp69mjPsGgPP2/ySLsFfFf+6SD7tE9GHck/nWgU0MxA5bwOcrByQMw13Db0B8EZ49wOn9PACQusIfa198vmbXt28sNsPuzKYp5i+c8U+Ts/2k+luekjkq9ZCc4IdSJEAXVbwSkxghm6exHHYdPsUrinLaQBBm0yXKemk4SCG/cdBad9PNLoht2A4/2DppH6nDSeAX59W/k9wPeng5f78uvqYenP+H8=</diagram></mxfile>
2202.03229/paper_text/intro_method.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The capability of neural models to perform symbolic reasoning is often seen as a step towards the framework for unified AI, *i.e.*, building end-to-end trainable system for tasks, which need to combine low level perception with high level cognitive reasoning [\(Kahneman, 2011\)](#page-10-0). While neural networks are naturally excellent at perception, they are increasingly being developed for high-level reasoning tasks, *e.g.*, solving SAT [\(Selsam et al., 2019;](#page-11-2) [Amizadeh et al., 2019a;](#page-9-0)[b\)](#page-9-1), neural theorem proving [\(Rocktäschel et al., 2015\)](#page-11-3), differentiable ILP (∂ILP) [\(Evans & Grefenstette, 2018\)](#page-10-1), playing blocks world [\(Dong et al., 2019\)](#page-10-2), solving sudoku [\(Palm et al., 2018;](#page-11-1) [Wang et al., 2019\)](#page-11-4). Our work follows this literature for solving combinatorial puzzles – in particular, the methods that do not assume an explicit knowledge of rules, but instead learn to directly solve the problem using some of its solved instances, *e.g.* Recurrent Relational Networks (RRN) [\(Palm et al., 2018\)](#page-11-1). Such models assume a fixed value-set, *i.e.*, the set from which variables are assigned values is assumed to be constant during training and testing. This is a significant limitation, since it may not always be possible to generate sufficient training data for similar large problems in which variables take values from a bigger value-set [\(Najafian et al., 2018\)](#page-10-3). Moreover, it is also a desirable goal since as humans, we often find it natural to generalize to problems of unseen variable and value sizes, once we know how
4
+
5
+ <sup>∗</sup>Equal contribution. Work done while at IIT Delhi. Current email: vidit.jain@alumni.iitd.ac.in
6
+
7
+ to solve similar problems of a different size, e.g., we may solve a $12 \times 12$ sudoku after learning to solve a $9 \times 9$ sudoku. We note that graph based models have been shown to generalize well on varying graph sizes, e.g., finding a satisfying solution of a CNF encoding of a CSP with 100 Boolean-variables, after training on CNF encodings of CSPs with only 40 Boolean-variables (Selsam et al., 2019). However, the model trained using CNF encoding of Boolean-CSPs cannot be used directly for a non-Boolean CSP in which variables take value from a different (larger) value-set.
8
+
9
+ In response, we study value-set invariance in combinatorial puzzles from the same domain. To formally define a similar puzzle with variables taking values from a different value-set, we make use of Lifted CSP (Joslin & Roy, 1997), a (finite) first-order representation that can be ground to CSPs of varying variable and value-set sizes. We note that even though we use Lifted CSPs to define value-set invariance, its complete specification is assumed to be unknown. Specifically, we do not have access to the constraints of the CSP, and thus neural SAT solvers like NeuroSAT (Selsam et al., 2019) can not be used. While training, we only assume access to solved instances along with their constraint graph. We define our problem as: given solved instances and corresponding constraint graph of an unknown ground CSP with a value-set of size k, can we learn neural models that generalize to instances of the same lifted CSP, but with a different value-set of size k' (typically k' > k)? An example task includes training a model using data of $9 \times 9$ Sudoku, but testing on a $12 \times 12$ or a $16 \times 16$ Sudoku. We build our solution using RRNs as the base architecture. They run GNN on the constraint graph, and employ iterative message passing in a recurrent fashion – the nodes (variables) are then decoded to obtain a solution. We present two ways to enhance RRNs for value-set invariance.
10
+
11
+ Binarized Model: Our first model converts a multi-class classification problem into a binary classification problem by converting a multi-valued variable into multiple Boolean variables, one for each value in the value-set. The binarized constraint graph gets defined as: if there is an edge between two variables in original constraint graph, there are k edges between Boolean nodes corresponding to the same value and the same two variables in the new graph. In addition, all k Boolean variables, corresponding to a multi-valued variable, are connected with each other. This model naturally achieves value-set invariance. At test time, a larger value-set just results in a larger graph size. All GNN weights are tied, and because all the variables in the binarized model are Boolean, embeddings for binary values '0' and '1', trained at training time, are directly applicable at test time.
12
+
13
+ Multi-valued Model: Our second model directly operates on the given multi-valued variables and the corresponding constraint graph, but introduces a value node for every value in the value-set. Each pre-assigned (unassigned) variable node is connected to that (respectively, every possible) value node. The challenge in this model is initializing value nodes at test time when k' > k. We circumvent this problem by training upfront k' or more value embeddings by randomly sub-selecting a k sized subset during each learning iteration. This random sub-selection exploits the symmetry of value-set elements across instances. During test time, k' of the learned embeddings are used.
14
+
15
+ We perform extensive experimental evaluation on puzzles generated from three different structured CSPs: Graph Coloring (GCP), Futoshiki, and Sudoku. We compare two of our models with an NLM (Dong et al., 2019) baseline – a generic neural reasoner, which either fails to scale or performs significantly worse for most test sizes used in our experiments. We also compare our two models along the axes of performance and scalability and discuss their strengths and weaknesses.
16
+
17
+ # Method
18
+
19
+ A combinatorial puzzle can be thought of as a grounded CSP and to formally define a puzzle from the same domain but a larger value-set, we resort to the notion of '*Lifted CSPs*' that represent an abstraction over multiple ground CSPs of the same type. A lifted CSP does not include a specific set of variables and values; instead, it operates in terms of variable and value *references* that can be instantiated with all ground variables and values in a ground CSP. This makes them amenable to instantiate CSPs or puzzles with varying number of variables as well as values. We define a Lifted CSP L<sup>C</sup> as a three tuple hP, R, Ci. P is a set of predicates: a predicate p ∈ P represents a Boolean function from the set of its arguments, which are variable references. Similarly, R is a set of relations over value space – a r ∈ R reprents a Boolean function over arguments that are value references. A predicate (or a relation) with its arguments is called an atom. C is a set of lifted constraints, constructed by applying logical operators to atoms – they are interpreted as universally quantified over all instantiations of variable and value references. Finally, Lifted CSP uses a special unary function Value, whose argument is a variable reference and evaluates to a value reference. As an example, a lifted CSP for Sudoku may have a P ={Nbr} for whether two cells are in same row, column or box, R = {Neq}, representing two values are unequal, and a lifted constraint: Nbr(c1, c2) → Neq(Value(c1), Value(c2)).
20
+
21
+ A lifted CSP L<sup>C</sup> yields a ground CSP C, given a set of variables O, and a set of values V, and a complete instantiation of all predicates and relations over this set (e.g., in Sudoku, the number of cells, possible values, and which cells are neighbors and which are not). The ground constraints are constructed by instantiating lifted constraints over all variables and values. A (satisfying) solution, y, of a CSP refers to a complete specification of Value: O → V function, such that all the constraints are satisfied. We are often given a partial (satisfying) solution, x – an assignment of values to a subset of variables O ⊆ O ˜ and the goal is to output y, such that y agrees with x for the subset O˜.
22
+
23
+ Given a ground CSP C, the Constraint Graph, $G_C = (N_C, E_C)$ , is constructed by having each variable in the CSP represent a node in the graph and introducing an edge between two nodes $n_1^C$ , $n_2^C$ iff the corresponding variables appear together in some constraint. The edges in the constraint graph are typed based on the identity of the lifted constraint from which it comes. Note that there could be multiple edges between nodes $n_1^C$ , $n_2^C$ in $G_C$ , if these nodes appear together in more than one constraint. We embed the knowledge about relations between values in $\mathcal V$ in the form of another graph, called Relation Graph, $G_R = (N_R, E_R)$ , where there is a node for every value in the set $\mathcal V$ , and there is a (directed) edge between nodes corresponding to $v_l, v_l'$ depending on whether $r(v_l, v_{l'})$ is true or not, for every $r \in \mathcal R$ . Similar to $G_C$ , this graph can also have multi-edges between two pairs of nodes, if more than one relationship holds between the corresponding values.
24
+
25
+ **Problem Definition:** To achieve value-set invariance, our goal is to train a model $M_{\Theta}$ on training data from an unknown ground CSP C (with variables $\mathcal{O}$ and value-set $\mathcal{V}$ ) obtained from an unknown lifted CSP $\mathcal{L}_C$ , and test it on an arbitrary ground CSP C' from the same lifted CSP (with variables $\mathcal{O}'$ and value-set $\mathcal{V}'$ ), where $|\mathcal{V}| \neq |\mathcal{V}'|$ . Formally, we are given training data $\mathcal{D}$ as a set of tuples $\{((\mathbf{x^i}, G_{C^i}), \mathbf{y^i})\}_{i=1}^M$ , along with a relationship graph $G_R$ encoding relations between values in the value-set $\mathcal{V}$ . Here, $\mathbf{i}^{th}$ instance denotes a partial and corresponding complete solution for $C^i$ . We note that explicit form of the constraints in $C^i$ or $\mathcal{L}_C$ are not available, only the graphs are given to the model. Our goal is to learn model $M_{\Theta}$ , such that given graphs $G_{C'}$ and $G_{R'}$ , and a partial solution $\mathbf{x}'$ (for CSP C'): $M_{\Theta}(\mathbf{x}') = \mathbf{y}'$ , only if $\mathbf{y}'$ is a corresponding complete solution for $\mathbf{x}'$ . Note that in one of our models, we will additionally assume that $\max |\mathcal{V}'|$ , denoted as $k_{\max}$ , is known to us at training time, which we argue is a benign assumption for most practical applications.
26
+
27
+ We propose two models for value-set invariance: the *Binarized Model*, and the *Multi-valued Model*. In each case, we assume the training data is provided in the form $\mathcal{D} = (\{(\mathbf{x^i}, G_{C^i}), \mathbf{y^i}\}_{i=1}^M, G_R)$ as described in Section 3. Let $\mathcal{V}$ and $\mathcal{V}'$ denote the value-sets at train and test time, with cardinality k, k', respectively. For each model, we first present a high level intuition, followed by description of: (a) Construction of Message Passing Graph (b) Message Passing Rules (c) Loss Computation, and finally (d) Prediction on a problem with larger value-set.
28
+
29
+ Intuition behind our Binarized Model comes directly from the 'sparse encoding' of a discrete CSP into a SAT formula (de Kleer, 1989; Walsh, 2000), in which assignment of a value $v \in \mathcal{V}$ to any variable $\mathbf{x}[\mathbf{j}] \in \mathcal{O}$ is encoded by a Boolean variable that represents $\mathbf{x}[\mathbf{j}] == v$ . Such an encoding converts a single multi-
30
+
31
+ <span id="page-3-1"></span>![](_page_3_Figure_7.jpeg)
32
+
33
+ Figure 1: An example Futoshiki Puzzle of size $3\times 3$ and the corresponding graphs. A value of -1 indicates an unassigned variable. Black and red edges are Constraint and Relation edges respectively. The digits 5,7,1 in square boxes represent a random 3-permutation of $k_{\rm max}$ , used in multi-valued model for initialization of node embeddings.
34
+
35
+ valued variable into multiple Boolean valued variables. We convert a Constraint Graph (fig. 1) with nodes representing multi-valued variables (yellow nodes), into a Binary Graph (fig. 1) with Boolean nodes (blue nodes). This creates a $|N_C| \times k$ grid of Boolean nodes, with a row representing a variable, a column representing a value and a grid cell (a Boolean node) representing assignment of a particular value to a particular variable. Such a graph can easily represent relationship between the values as well (horizontal red edges), thereby encapsulating the information present in the Relation Graph (fig. 1). We use this Binary Graph for message passing.
36
+
37
+ **Construction of Message Passing Graph:** We denote the *Message Passing Graph (MPG)* by G = (N, E) with the set of nodes N and set of edges E, constructed as follows: **Nodes:** For each node $n_j^C \in N_C$ in the Constraint Graph (fig. 1, yellow nodes), we construct k binary valued nodes, denoted as $n_{j,1}, n_{j,2} \cdots n_{j,k}$ in N (blue nodes in Binary Graph). **Edges:** We construct two categories
38
+
39
+ <span id="page-3-0"></span><sup>&</sup>lt;sup>1</sup>There is an alternative encoding scheme called 'compact encoding'. It is discussed in the appendix
40
+
41
+ of edges in G. The first category of edges are directly inherited from the edges of the constraint graph $G_C$ (black vertical edges), with k copies created due to binarization. Edge type is same as in the original constraint graph and is denoted by q. Formally, for every edge, $e^C_{(\mathbf{j},\mathbf{j}')} \in E_C$ , where $e^C_{(\mathbf{j},\mathbf{j}')}.type = q$ , we introduce k edges denoted as $e^q_{(\mathbf{j}\mathbf{l},\mathbf{j}'\mathbf{l})}$ , i.e., there is an edge between every pair of nodes, $n_{\mathbf{j},\mathbf{l}}$ and $n_{\mathbf{j}',\mathbf{l}}$ , $1 \leq \mathbf{l} \leq k$ . We refer to them as $Constraint\ Edges$ . The second category of edges encode the information from the Relationship Graph $G_R$ into the MPG, with $|N_C|$ copies of it created, one for each variable. For every edge $e^R_{(\mathbf{l},\mathbf{l}')} \in E_R$ with edge type r, create an edge $e^r_{(\mathbf{j}\mathbf{l},\mathbf{j}\mathbf{l}')}$ with type r between every pair of binary nodes $n_{\mathbf{j},\mathbf{l}}$ and $n_{\mathbf{j},\mathbf{l}'}$ , $1 \leq \mathbf{j} \leq |N_C|$ (e.g., red edges encoding less-than relation between value pairs (1,2), (2,3) and (1,3)). We refer to them as $Relational\ Edges$ .
42
+
43
+ **Recurrent Message Passing:** Once MPG has been constructed, we follow recurrent message passing rules, with weights shared across layers, similar to RRNs (Palm et al., 2018) with some differences. For each node $n_{\mathbf{j},\mathbf{l}}$ in the graph, we maintain a hidden state $h_t(n_{\mathbf{j},\mathbf{l}})$ , which is updated at each step t based on the messages received from its neighbors. This hidden state is used to compute the probability of a binary node taking a value of 1. Since we use sparse encoding, only the node with maximum probability amongst the k binary nodes $n_{\mathbf{j},\mathbf{l}}$ ; $1 \le \mathbf{l} \le k$ , corresponding to multi-valued variable $\mathbf{x}[\mathbf{j}]$ , is assigned a value 1, at the end of message passing. We give the details of message passing and state update function in appendix. Next, we discuss how the nodes are initialized before message passing starts, followed by the details of loss computation.
44
+
45
+ **Initialization:** Irrespective of the size of value-set $\mathcal V$ or vertices $N_C$ , there are 3 learnable embeddings (u[0], u[1] and u[-1]) for initialization: two for binary values 0 and 1, and one for value -1 representing unassigned nodes. All k nodes corresponding to an unassigned variable $\mathbf x[\mathbf j]$ are initialized with u[-1], i.e., whenever $\mathbf x[\mathbf j]$ is NULL (yellow nodes with -1), $u_0(n_{\mathbf j,1}) = u[-1], \forall v_1 \in \mathcal V$ , where $u_0$ represents initial embedding function. On the other hand, if $\mathbf x[\mathbf j]$ is preassigned a value $v_{\mathbf i}$ , then $u_0(n_{\mathbf j,1}) = u[0], \forall v_1 \neq v_{\hat{\mathbf i}}$ , and $u_0(n_{\mathbf j,\hat{\mathbf i}}) = u[1]$ . E.g., variable corresponding to the binary nodes in 1st row has a preassigned value of '3', consequently, binary nodes in 1st and 2nd column of the 1st row are initialized with u[0], and binary node in the 3rd column of 1st row, which corresponds to assignment ' $\mathbf x[1] = 3$ ', is initialized with u[1]. Lastly, the hidden state, $h_0(n_{\mathbf j,1})$ , of each node, $n_{\mathbf j,1}$ , is initialized as a $\mathbf 0$ vector, $\forall \mathbf j, \forall v_1$ .
46
+
47
+ **Loss Computation:** The Binary Cross Entropy (BCE) loss for each node $n_{\mathbf{j},\mathbf{l}}$ is computed w.r.t. its target, $\tilde{\mathbf{y}}[\mathbf{j},\mathbf{l}]$ , which is defined as 1 whenever $\mathbf{y}[\mathbf{j}] = \mathbf{l}$ and 0 otherwise. At each step $t \in \{1 \dots T\}$ , we can compute the probability $Pr(n_{\mathbf{j},\mathbf{l}}.v=1;\Theta)$ of classifying a node $n_{\mathbf{j},\mathbf{l}}$ as 1 by passing its hidden state through a learnable scoring function s, i.e., $Pr_t(n_{\mathbf{j},\mathbf{l}}.v=1;\Theta) = \sigma(s(h_t(n_{\mathbf{j},\mathbf{l}})))$ , where $\sigma$ is the standard Sigmoid function. Here, $n_{\mathbf{j},\mathbf{l}}.v$ denotes the value that node $n_{\mathbf{j},\mathbf{l}}$ can take and belongs to the set $\{0,1\}$ . Loss at step t is the average BCE loss across all the nodes: $\frac{1}{|N|}\sum_{n_{\mathbf{j},\mathbf{l}}\in N}\tilde{\mathbf{y}}[\mathbf{j},\mathbf{l}]\log Pr_t(n_{\mathbf{j},\mathbf{l}}.v=1;\Theta) + (1-\tilde{\mathbf{y}}[\mathbf{j},\mathbf{l}])\log Pr_t(n_{\mathbf{j},\mathbf{l}}.v=0;\Theta)$ . Like Palm et al. (2018), we back-propagate through the loss at every step $t \in \{1 \dots T\}$ as it helps in learning a convergent message passing algorithm. During training, the objective is to learn the 3 initial embeddings u[-1], u[0], u[1], functions used in message passing and state update, and the scoring function s.
48
+
49
+ **Prediction on a problem with larger size of value-set:** While testing, let the constraint and relation graph be $G_{C'}$ and $G_{R'}$ with n' and k' nodes respectively. Let $\mathbf{x}'$ be a partial solution, with n' variables $\mathbf{x}'[\mathbf{j}]$ , each taking a value from value-set $\mathcal{V}'$ of size k'. As described above, we create a graph G' with n'k' nodes, run message passing for T steps, and for each variable $\mathbf{x}'[\mathbf{j}]$ , compute the k' probabilities, one for each of the k' nodes $n_{\mathbf{j},\mathbf{l}} \forall \mathbf{l} \in \mathcal{V}'$ corresponding to the variable $\mathbf{x}'[\mathbf{j}]$ , which is assigned the value corresponding to maximum probability, i.e., $\hat{\mathbf{y}}[\mathbf{j}] = \arg\max_{\mathbf{l} \in \mathcal{V}'} Pr_T(n_{\mathbf{j},\mathbf{l}}.v = 1; \Theta)$ .
50
+
51
+ Multi-valued model differs from the binarized model by avoiding binarization of nodes, and instead explicitly adding *Value Nodes* in the message passing graph, one for each value in the value-set. The message graph consists of two components: (a) A Graph G = (N, E) to represent constraints inherited from the constraint graph $G_C = (N_C, E_C)$ (b) A Graph $\tilde{G} = (\tilde{N}, \tilde{E})$ to represent relations inherited from the relationship graph $G_R = (N_R, E_R)$ . We refer to G as *Constraint Message Passing Graph* (CMPG), and $\tilde{G}$ as *Relationship Message Passing Graph* (RMPG). Message passing on RMPG first generates desired number of embeddings (upto $k_{\rm max}$ ), one for each of the value nodes. This is followed by message passing on CMPG which uses the embeddings of the value nodes
52
+
53
+ generated by RMPG and computes embeddings for each variable node. Finally, the variable nodes are classified based on the similarity of their embedding with the embeddings of the value nodes computed by RMPG. Learning to generate upto $k_{\rm max}$ embeddings from training samples with only $k(< k_{\rm max})$ values in the value-set is the main technical challenge that we address in this model.
54
+
55
+ Construction of CMPG: Nodes: For each node $n_{\mathbf{j}}^C \in N_C$ in the constraint graph, we construct a k-valued node, denoted as $n_{\mathbf{j}} \in N$ . Total number of such nodes constructed is $|N_C|$ . We refer to these as $Variable\ Nodes$ (yellow nodes in Multi-Valued Graph in fig. 1). Additionally, for each value $v_1 \in \mathcal{V}$ in the value-set, we create a node, denoted as $n_1^v \in N$ . Total number of such nodes constructed is $|\mathcal{V}|$ . We refer to these as $Value\ Nodes$ (orange nodes). Edges: For every edge, $e^C_{(\mathbf{j},\mathbf{j}')} \in E_C$ , where $e^C_{(\mathbf{j},\mathbf{j}')}$ . type = q, we introduce an edge denoted as $e^q_{(\mathbf{j},\mathbf{j}')}$ with type q. These edges are directly inherited from the constraint graph. We refer to these as $Constraint\ Edges$ (black edges). Additionally, to indicate the pre-assignment of values to the variables in $\mathbf{x}$ , we introduce new edges connecting value nodes to appropriate variable nodes. Whenever $\mathbf{x}[\mathbf{j}] = v_1$ , add an edge, $e^a_{(\mathbf{j},\mathbf{l})}$ between variable node $n_{\mathbf{j}}$ and value node $n_{\mathbf{l}}^v$ (blue edges). If $\mathbf{x}[\mathbf{j}]$ is NULL, i.e., unassigned, then add k edges, $e^{\bar{a}}_{(\mathbf{j},\mathbf{l})}$ , $\forall v_1 \in \mathcal{V}$ , connecting the variable node $n_j$ with all k value nodes $n_1^v$ (e.g., green edges connecting orange value node '2' to all '-1' variable nodes). We refer to them as $Assignment\ Edges$ .
56
+
57
+ Construction of RMPG: Nodes: For each value $v_1 \in \mathcal{V}$ , create a node denoted as $\tilde{n}_1^v \in \tilde{N}$ (purple nodes in Relation Graph in fig. 1). Total number of such nodes constructed is $|\mathcal{V}|$ . We refer to these as *Value Nodes*. Edges: For every pair of value nodes, $\tilde{n}_1^v$ and $\tilde{n}_1^v$ , introduce an edge $\tilde{e}_{(1,1')}^v$ with type r if $r(v_1, v_{1'})$ holds based on the relationship graph $G_R$ , i.e., $e^R_{(1,1')} \in E_R$ with edge label r (red edges). These edges are defined for relations that exist between values in the value-set.
58
+
59
+ **Achieving Value-set Invariance:** A key question arises here: why do we need to construct a separate RMPG (G)? Why not embed relevant edges in CMPG (G), as done for the binarized model? The answer lies in realizing that we represent each value in the value-set explicitly in the multi-valued model, unlike the binarized model. Hence, our model needs to learn representation for each of them in the form of value node embeddings. Further, to generalize we need to learn as many embeddings as there are values in the largest test value-set, i.e., $k_{\text{max}} = \max |\mathcal{V}'|$ . We achieve this by randomly sub-selecting a k-sized set from $\{1\dots k_{\max}\}$ and permuting the chosen subset for each training example in a given mini-batch, and then computing the 'relationship-aware' embeddings from this permuted subset through message passing in RMPG. The 'relationship-aware' embeddings are then used to initialize the value nodes (orange nodes) during message passing in CMPG. For instance, if the permutation obtained is $\{w_1, \dots, w_1, \dots, w_k\}$ , where $\forall l, 1 \leq w_l \leq k_{\max}$ , then embedding for the value node $\tilde{n}_1^v$ in $\tilde{G}$ is initialized by $w_1^{th}$ learnable embedding (e.g., purple nodes for values '1', '2', and '3' are initialized by the 5th, 7th, and 1st learnable embedding, respectively). After message passing on G, the 'relationship-aware' embedding of $\tilde{n}_{1}^{v}$ (purple node) is used to initialize the embedding for value node $n_1^v$ (orange node) in G. This elegant process is able to train all the $k_{\max}$ embeddings by simply using the training data corresponding to $\mathcal{V}$ , and the corresponding relationship information. Since these relationship aware embeddings need to be pre-computed before they can be passed to the downstream constraint processing, we construct two different message passing graphs, one for computing relationship-aware embeddings and one for constraint handling.
60
+
61
+ **Recurrent Message Passing on RMPG:** Rules of message passing and hidden state updates at every step t are similar to RRN in Palm et al. (2018) and defined in detail in the appendix. After updating the hidden states for total $\tilde{T}$ steps, the final embeddings, $\tilde{h}_{\tilde{T}}(\tilde{n}_1^v) \ \forall v_1 \in \mathcal{V}$ , are used as 'relationship-aware' embeddings for initializing the input features (embeddings) of the nodes in CMPG G. We now discuss the initialization of the value nodes before message passing in RMPG.
62
+
63
+ **Initialization:** There are a total of $k_{\max}$ learnable embeddings, $\tilde{u}[l']$ , $1 \leq l' \leq k_{\max}$ , out of which any k are randomly chosen for initializing the nodes in RMPG. e.g., $\tilde{u}[5]$ , $\tilde{u}[7]$ , $\tilde{u}[1]$ are chosen to initialize the purple value nodes '1','2', and '3' in Relation Graph in fig. 1. Formally, for each input $\mathbf{x}$ , select a k-permutation, $\mathcal{P}_{\mathbf{x}}$ , of $k_{\max}$ . Initialize the embedding of $\tilde{n}_l^v$ in $\tilde{G}$ with $\tilde{u}[\mathcal{P}_{\mathbf{x}}[\mathbf{l}]]$ , $\forall \mathbf{l} \in \{1 \dots k\}$ . Initialize the hidden state, $\tilde{h}_0(\tilde{n}_l^v)$ , $\forall \tilde{n}_l^v \in \tilde{N}$ with a $\mathbf{0}$ vector.
64
+
65
+ **Recurrent Message Passing on CMPG:** Message passing on CMPG updates the hidden state, $h_t(n_j)$ , of each variable node $n_j$ for a total of T ( $t \le T$ ) steps using the messages received from its
66
+
67
+ neighbors. The details are similar to message passing in binarized model and are discussed in the appendix. Below we describe the initialization of node embeddings followed by computation of loss.
68
+
69
+ **Initialization:** We initialize the embedding of value nodes (orange nodes), $n_{\mathbf{l}}^v$ in G, using the final 'relationship-aware' embeddings, $\tilde{h}_{\tilde{T}}(\tilde{n}_{\mathbf{l}}^v)$ , of $\tilde{n}_{\mathbf{l}}^v$ (purple nodes) in $\tilde{G}$ . The variable nodes that are preassigned a value (non-zero yellow nodes) in $\mathbf{x}$ , are initialized by the embedding of the corresponding value node, *i.e.*, if $\mathbf{x}[\mathbf{j}] = \mathbf{l}$ , then $n_{\mathbf{j}}$ is initialized with the 'relationship-aware' embedding, $\tilde{h}_{\tilde{T}}(\tilde{n}_{\mathbf{l}}^v)$ , of $\tilde{n}_{\mathbf{l}}^v$ . The embedding of nodes corresponding to the unassigned variables ('-1' yellow nodes) are initialized by the average, $(1/k) \sum_{v_1 \in \mathcal{V}} \tilde{h}_{\tilde{T}}(\tilde{n}_{\mathbf{l}}^v)$ , of all 'relationship-aware' embeddings. Initialize hidden state $h_0(n_{\mathbf{j}})$ of each variable node $n_{\mathbf{j}}$ with a $\mathbf{0}$ vector.
70
+
71
+ **Loss Computation:** For each variable represented by node $n_{\mathbf{j}}$ , the ground truth value $\mathbf{y}[\mathbf{j}]$ acts as the target for computing standard Cross Entropy Loss. The probabilities over $\mathcal{V}$ are computed as follows: At step t, a scoring function, s, computes a score, $s(h_t(n_{\mathbf{j}}), h_t(n_{\mathbf{l}}^v))$ , for assigning a value $v_1 \in \mathcal{V}$ to a variable $n_{\mathbf{j}}$ based on the hidden state of corresponding value and variable nodes. For each variable node, a Softmax converts these scores into probabilities over the values $v_1 \in \mathcal{V}$ , i.e., $Pr(n_{\mathbf{j}}.v = v_1) = Softmax(s(h_t(n_{\mathbf{j}}), h_t(n_{\mathbf{l}}^v)))$ , $\forall v_1 \in \mathcal{V}$ , where, $n_{\mathbf{j}}.v \in \mathcal{V}$ denotes the value that node $n_{\mathbf{j}}$ can take. Loss at step t is nothing but the average over variable nodes: $L_t = -\frac{1}{|N|} \sum_{n_{\mathbf{j}} \in N} \log Pr(n_{\mathbf{j}}.v = \mathbf{y}[\mathbf{j}])$ . To ensure that the multi-valued model learns different embeddings for each value in the value-set, we add an auxiliary loss term, corresponding to the total pairwise dot product (similarity) of any two embeddings, before and after message passing in $\tilde{G}$ . We call it Orthogonality Loss. Its weight, $\alpha$ , is a hyper-parameter.
72
+
73
+ **Prediction on a problem with larger size of value-set:** For a puzzle with larger value-set, $\mathcal{V}'$ , a bigger RMPG is created, whose k' nodes are initialized with the (learnt) first k' embeddings. Unlike training, we always choose first k' embeddings to avoid randomness during testing. Prediction is made using the probabilities at the last step T, i.e., $\hat{\mathbf{y}}[\mathbf{j}] = \arg\max_{v_l \in \mathcal{V}'} Pr(n_{\mathbf{j}}.v = v_l)$ .
74
+
75
+ **Relative Comparison:** In the binarized model, the constructed graph G has $k|N_C|$ nodes and at least $k|E_C|+|N_C|k(k-1)/2$ edges due to binarization. This increases the graph size by a factor of at least k. As a result, we soon hit the memory limits of a GPU while training the binarized model with bigger problems. The model also needs significantly more inference time due to its bigger size. On the other hand, multi-valued model, while being compact in terms of its representation, needs to learn additional embeddings, for a speculative size of value-set during testing. This poses additional requirement on the model both in terms of representation, and learning, possibly affecting the quality of generalization. While this is a simple analytical understanding of the possible merits of the two models, we examine experimentally the impact of these issues on real datasets.
2203.15235/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2203.15235/paper_text/intro_method.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The capability of photo editing, which had been confined in 2D space, has recently been popped out into 3D space. For example, predicting depth from a 2D image enables the composition of objects in an image [\[42,](#page-9-1) [58\]](#page-9-2). Object segmentation allows the projection of an image to a new view [\[27,](#page-8-1) [36,](#page-8-2) [53\]](#page-9-3). Lighting and ground plane estimation make it possible to relight objects and generate a new shadow in an image [\[9,](#page-8-3)[14,](#page-8-4)[15,](#page-8-5)[55\]](#page-9-4). Such techniques for 3Daware image editing have allowed the user to manipulate an image in a more intuitive manner — as if the object exists in 3D space — and opened new opportunities in downstream applications.
4
+
5
+ As a missing piece of the existing 3D-aware image manipulation methods, we focus on *3D-aware image deformation*. Unlike the aforementioned techniques, 3D-aware deformation does not just alter the scene information (e.g., camera parameters, lighting conditions) or modify the 2.5D information. Instead, it allows the user to directly manipulate the 3D geometry and appearance of an object. More relevant topics to 3D-aware deformation would be (1) human pose transfer [\[6,](#page-8-6) [29,](#page-8-7) [33\]](#page-8-8), which works only for human bodies, (2) novel view synthesis [\[13,](#page-8-9) [37,](#page-8-10) [52\]](#page-9-5), which is limited to altering a viewpoint of an image, and (3) 3D modelbased manipulation [\[26\]](#page-8-11), which requires the exact 3D model of the object in an image. To address these limitations, we aim to enable 3D-aware image deformation with minimal restrictions on shape category and deformation type.
6
+
7
+ For 3D-aware deformation, it is *necessary* to reconstruct the object in a 2D image to 3D space; however, it is *not sufficient* in general. Deformation requires either a surface or volume information [\[21,](#page-8-12) [49\]](#page-9-6). However, most of existing
8
+
9
+ <sup>\*</sup> equal contributions, † corresponding author
10
+
11
+ <span id="page-1-0"></span>methods of image-based 3D reconstruction do not directly output a surface or volume [\[8,](#page-8-13) [46\]](#page-9-7) or produce a surface without proper consideration about intrinsic shape properties [\[12,](#page-8-14) [23,](#page-8-15) [28,](#page-8-16) [40,](#page-9-0) [41,](#page-9-8) [47,](#page-9-9) [50,](#page-9-10) [54\]](#page-9-11) – which can largely affect the deformation result. See an example in the red branch of Figure [1.](#page-0-0) The inaccurate topological prediction connecting legs causes undesired visual artifacts in the deformation. Indeed, aiming for topological correctness in 3D reconstruction is a difficult task due to its nature defined with both continuous and discrete quantities.
12
+
13
+ In this paper, given a 3D point cloud of the object in an input image (whose 2D-to-3D reconstruction is performed by PIFu [\[40\]](#page-9-0)), we propose to enable 3D-aware image deformation through learning the additional intrinsic geometric property: the shape Laplacian. The shape Laplacian is the *essential* information encoding the geometry intrinsics. In particular, bounded biharmonic weights [\[21\]](#page-8-12) – which has been widely used as a standard technique for deformation in computer graphics – compute the linear blending weights associated with deformation handles as the minimizers of the deformation energy defined using the shape Laplacian. In our framework, we utilize the *estimated* shape Laplacian of a 3D reconstruction to obtain bounded biharmonic weights to plausibly model handle-based image deformation.
14
+
15
+ To this end, we introduce a neural network that can predict the shape Laplacian of the underlying volume of a 3D point cloud reconstructed from a 2D image — without directly converting the point cloud to a volume. Considering that the deformation energy can be discretized with the standard linear FEM Laplacian LM<sup>−</sup>1L (where L is a symmetric cotangent Laplacian matrix and M is a diagonal lumped mass matrix), we design our network to learn the matrices L and M<sup>−</sup><sup>1</sup> from the supervision obtained from a ground truth 3D mesh. The elements in the inverse mass matrix M<sup>−</sup><sup>1</sup> are predicted for each individual point, while the elements of the cotangent Laplacian matrix L are predicted by taking *pairs* of the input points. We use a symmetric feature aggregation function for such pairs and also a weight module to enforce the output matrix L to be symmetric and sparse. In test time, we recover the deformation energy from the predicted L and M<sup>−</sup><sup>1</sup> to compute bounded biharmonic weights with user-specified deformation handles. Since our method learns the shape Laplacian instead of the handle-dependent deformation weights, it can generalize well to arbitrary handle configurations.
16
+
17
+ In the experiments, we show our results of 3D-aware deformation on 2D character and clothed human images. We also showcase an user-interactive image editing scenario, where the user produces intuitive 3D deformations based on the specified control points. For quantitative evaluation, we test our method on a large-scale 3D point cloud dataset (i.e., DFAUST [\[4\]](#page-8-17)), in which our method is shown to produce more accurate deformation weights compared to the alternative methods on mesh reconstruction and point cloud Laplacian.
18
+
19
+ Our main contributions can be summarized as follows:
20
+
21
+ - We propose a method for 3D-aware deformation of 2D images, which can be applied with minimal restrictions on shape category and deformation type.
22
+ - We introduce a novel network architecture that can learn the shape Laplacian with several desired properties (i.e., positive semi-definiteness, symmetry and sparsity) from a 3D reconstruction. To the best of our knowledge, this is the first study to demonstrate that a learning-based approach can be effective in predicting the shape Laplacian of the underlying volume of a point cloud.
23
+ - We empirically demonstrate that our *learning*-based approach leads to more plausible deformations compared to the alternative cases of *calculating* the approximation of the shape Laplacian using mesh reconstruction or point cloud Laplacian methods.
24
+
25
+ # Method
26
+
27
+ Bounded biharmonic weights [\[21\]](#page-8-12) are linear blending weights that propagate affine transformations defined at arbitrary control handles to the other points in the shape. Given a tetrahedral mesh[1](#page-2-0) M = {V, F} representing the volume of a shape with the sets of vertices and faces V and F, respectively, a control handle H is defined as a point (H ∈ V) or a region (H ⊂ V) on the mesh[2](#page-2-1) . Specifically, given (1) the source shape M with n vertices (V = {vi}i=1···n) (2) m number of control handles {Hk}k=1···m, and (3) the affine transformations {Tk}k=1···<sup>m</sup> defined on each control handle, the new position of i-th vertex v<sup>i</sup> ∈ V can be calculated using the following linear formulation:
28
+
29
+ $$\mathbf{v}_i' = \sum_{k=1}^m w_{k,i} \mathbf{T}_k \mathbf{v}_i, \tag{1}$$
30
+
31
+ where wk,i is a deformation weight associated with the k-th control handle H<sup>k</sup> and the i-th vertex v<sup>i</sup> .
32
+
33
+ The deformation weights w<sup>k</sup> = {wk,1, · · · , wk,n} T for each handle are computed as a minimizer with respect to the deformation energy subject to several constraints for the desired properties of deformation (e.g., partition of unity, non-negativity). The minimization problem can be written as follows:
34
+
35
+ <span id="page-2-2"></span>
36
+ $$\underset{\{\mathbf{w}_k\}_{k=1\cdots m}}{\operatorname{argmin}} \sum_{k=1}^{m} \frac{1}{2} \ \mathbf{w}_k^T A \mathbf{w}_k \tag{2}$$
37
+
38
+ $$\text{subject to: } w_{k,i} = 1 \quad \forall i \quad \text{s.t.} \quad \mathbf{v}_i \in \mathcal{H}_k$$
39
+
40
+ $$w_{k,i} = 0 \quad \forall i \quad \text{s.t.} \quad \mathbf{v}_i \in \mathcal{H}_{l,l \neq k}$$
41
+
42
+ $$\sum_{k=1}^{m} w_{k,i} = 1, \quad i = 1, \cdots, n,$$
43
+
44
+ $$0 \le w_{k,i} \le 1, \quad k = 1, \cdots, m, \quad i = 1, \cdots, n,$$
45
+
46
+ <span id="page-2-0"></span><sup>1</sup>While it is also possible to compute the shape Laplacian from a *surface* mesh, we focus on the *volume* Laplacian in this work.
47
+
48
+ <span id="page-2-1"></span><sup>2</sup>The handle can also be defined *out of* the mesh as a skeleton bone or a vertex of a cage. Here, we only describe the case when the handle is defined as a point or a region on the mesh.
49
+
50
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
51
+
52
+ Figure 2. Architecture overview. Given a point cloud $\mathcal{P} = \{\mathbf{p}_i\}_{i=1\cdots n}$ (i.e., a 3D reconstruction of an RGB image $\mathcal{I}$ ), our method learns the shape Laplacian to compute the deformation energy matrix $A \in \mathbb{R}^{n \times n}$ of the underlying volume of $\mathcal{P}$ . The proposed framework mainly consists of three modules: (1) Feature Extraction Module, (2) Cotangent Laplacian Prediction Module, and (3) Inverse Mass Prediction Module. Feature Extraction Module first extracts per-point features $\mathcal{F} = \{\mathbf{f}_i\}_{i=1\cdots n}$ of $\mathcal{P}$ . Given $\mathcal{P}$ and $\mathcal{F}$ , Cotangent Laplacian Prediction Module estimates the cotangent Laplacian matrix $L \in \mathbb{R}^{n \times n}$ and Inverse Mass Prediction Module predicts the inverse mass matrix $M^{-1} \in \mathbb{R}^{n \times n}$ of $\mathcal{P}$ . The final deformation energy A can be obtained as $LM^{-1}L$ .
53
+
54
+ where $A=LM^{-1}L$ is the deformation energy matrix of the source mesh $\mathcal{M}, L \in \mathbb{R}^{n \times n}$ is the cotangent Laplacian matrix of $\mathcal{M}$ , and $M \in \mathbb{R}^{n \times n}$ is a lumped mass matrix whose diagonal elements represent the volume associated with each of the vertices in $\mathcal{M}$ . Both L and M are defined on *volume* information of a shape and thus cannot be directly computed from a point cloud. Although it is possible to recover its volume using a mesh reconstruction method (e.g., [3,5,16,24,25,30,38]), it may introduce topological noises that result in erroneous deformation weights. To address this issue, we argue that *learning* for the deformation energy A enables more accurate and robust deformation.
55
+
56
+ We now introduce our network that can learn the deformation energy matrix $A \in \mathbb{R}^{n \times n}$ of the underlying volume of a 3D reconstruction represented as a point cloud $\mathcal{P} = \{\mathbf{p}_i\}_{i=1\cdots n}$ . Instead of directly estimating the matrix A, we propose to learn the cotangent Laplacian matrix $L \in \mathbb{R}^{n \times n}$ and the inverse mass matrix $M^{-1} \in \mathbb{R}^{n \times n}$ such that the deformation energy can be later recovered as $A = LM^{-1}L$ . The first motivation behind this design is to automatically guarantee the positive semi-definiteness of the predicted A; it is non-trivial to enforce this property when directly predicting a squared matrix using a neural network. The second motivation is to better learn and enforce the symmetry and sparsity structure of the cotangent Laplacian matrix L to allow a more accurate prediction.
57
+
58
+ In what follows, we explain the detailed architecture of our Laplacian learning network, which consists of three modules: (1) Feature Extraction Module, (2) Cotangent Laplacian Prediction Module, and (3) Inverse Mass Prediction Module.
59
+
60
+ Feature Extraction Module Given a point cloud $\mathcal{P} =$
61
+
62
+ $\{\mathbf{p}_i\}_{i=1\cdots n}$ , Feature Extraction Module extracts a point cloud feature $\mathcal{F}=\{\mathbf{f}_i\}_{i=1\cdots n}$ , where $\mathbf{f}_i\in\mathbb{R}^d$ denotes a per-point feature vector corresponding to $\mathbf{p}_i$ . For network architecture, we adopt Point Transformer [56] that can extract permutation and cardinality-invariant feature of a point cloud via self-attention operations.
63
+
64
+ Cotangent Laplacian Prediction Module Given the point cloud $\mathcal{P}$ and the point cloud feature $\mathcal{F}$ , Cotangent Laplacian Prediction Module learns the cotangent Laplacian matrix $L \in \mathbb{R}^{n \times n}$ of the underlying volume of $\mathcal{P}$ . Following the definition of the cotangent Laplacian, L is desired to be a symmetric matrix whose element $L_{ij}$ is non-zero only if $\mathbf{p}_i$ and $p_i$ are topologically connected by an edge. Since L is highly sparse, and the point cloud cardinality n is typically large (e.g., in the order of thousands), it is inefficient to learn the relationship between all point pairs. Therefore, we use a Euclidean prior to select the initial point pair candidates which are probable to have local connectivity - thus correspond to *non-zero* entries of L. More specifically, for each point $\mathbf{p}_i \in \mathcal{P}$ , we compute k nearest neighbor points based on Euclidean distance. We then couple each of the neighbor points with the source point $p_i$ to form point pair candidates. We denote this sampling strategy by KNN-Based Point Pair Sampling (KPS). We note that using KPS not only improves the inference time but also helps the network training by alleviating an imbalanced regression problem, as a significant number of zero-valued regression targets can be filtered out before training. In our experiments, we empirically set k as 32.
65
+
66
+ We now extract a feature for each point pair candidate as follows:
67
+
68
+ $$\mathbf{g}_m = (\gamma_1(\mathbf{p}_i, \mathbf{p}_j), \gamma_2(\mathbf{f}_i, \mathbf{f}_j)). \tag{3}$$
69
+
70
+ where $\{\mathbf{p}_i, \mathbf{p}_j\}$ is the m-th point pair candidate, and $\gamma_1(\cdot)$
71
+
72
+ <span id="page-4-0"></span>and $\gamma_2(\cdot)$ are *symmetric* functions used for the pairwise feature aggregation. Since the matrix L is desired to be *symmetric*, the pairwise feature aggregations must be symmetric to produce the identical features for inputs $(\mathbf{p}_i, \mathbf{p}_j)$ and $(\mathbf{p}_j, \mathbf{p}_i)$ – thus guaranteeing the equality between the later predictions for $L_{ij}$ and $L_{ji}$ . We empirically choose absolute difference and element-wise multiplication for $\gamma_1$ and $\gamma_2$ , respectively. (Refer to Section 5 for the ablation study.)
73
+
74
+ Next, we estimate the entry of the cotangent Laplacian L that corresponds to each point pair candidate as follows:
75
+
76
+ $$L_{ij} = \alpha(\mathbf{g}_m) \odot \phi(\mathbf{g}_m), \tag{4}$$
77
+
78
+ where i and j are the indices of the m-th point pair candidate, $\phi(\cdot)$ is a function that outputs a real-valued scalar, and $\alpha(\cdot)$ is a function predicting a weight $W_{ij} \in [0,1]$ indicating whether the element $L_{ij}$ in the cotangent Laplacian is a non-zero value. The addition of the weight prediction is our key to improving the accuracy of the deformation. As previously mentioned, L has non-zero elements only for the point pairs that are connected through edges; this indicates that L is a very sparse matrix. Capturing its sparsity structure is essential, since it encodes information about the topology of the shape. To better model such sparsity structure, we introduce the function $\alpha(\cdot)$ and predict the weight indicating whether each element in L is zero or not. We empirically find that this additional sparsity structure prediction significantly improves the quality of deformations, as shown in Section 5. We also remark that our architecture of the Cotangent Laplacian Prediction Module — taking point pairs as input — has the advantage of learning well even from a scarce training dataset since the number of training examples increases by k with the number of sampled points.
79
+
80
+ In test time, the entire cotangent Laplacian matrix L can be computed by predicting the off-diagonal elements $\{L_{ij}\}_{i\neq j}$ in parallel via batch processing and calculating diagonal elements $\{L_{ii}\}_{i=1,\cdots,n}$ as the minus sum of the off-diagonal elements in a row – to follow the definition of the cotangent Laplacian.
81
+
82
+ Inverse Mass Prediction Module Given the point cloud $\mathcal{P}$ and the point cloud feature $\mathcal{F}$ , Inverse Mass Prediction Module learns the inverse mass matrix $M^{-1} \in \mathbb{R}^{n \times n}$ of the underlying volume of $\mathcal{P}$ . For each point in $\mathcal{P}$ , we first concatenate its 3D coordinate $\mathbf{p}_i$ and the corresponding feature $\mathbf{f}_i$ to generate a new per-point feature $\mathbf{f}'_i = [\mathbf{p}_i; \mathbf{f}_i] \in \mathbb{R}^{3+d}$ . Then, the inverse mass of each point $\mathbf{p}_i$ can be predicted as $\omega(\mathbf{f}'_i)$ , where $\omega$ is a function instantiated as an MLP. In test time, the entire inverse mass matrix $M^{-1}$ can be computed by predicting the diagonal elements in a batch and padding the off-diagonal entries as zero.
83
+
84
+ **Loss Functions.** We train our network using the supervision obtained from the ground truth mesh corresponding to
85
+
86
+ the input point cloud $\mathcal{P}$ . The overall loss function for our framework can be written as follows:
87
+
88
+ $$\mathcal{L} = \sum_{i,j \in \mathcal{K}(\mathcal{P})} \frac{1}{|\mathcal{K}(\mathcal{P})|} \|L_{ij} - L_{ij}^{\mathsf{gt}}\|_{1} + \sum_{i,j \in \mathcal{K}(\mathcal{P})} \frac{\lambda_{W}}{|\mathcal{K}(\mathcal{P})|} \|W_{ij} - W_{ij}^{\mathsf{gt}}\|_{1} + \sum_{i}^{|\mathcal{P}|} \frac{\lambda_{M} - 1}{|\mathcal{P}|} \|M_{ii}^{-1} - M_{ii}^{-1\mathsf{gt}}\|_{1},$$
89
+ (5)
90
+
91
+ where $L^{\mathrm{gt}}$ , $W^{\mathrm{gt}}$ , and $M^{-1\mathrm{gt}}$ are ground truth matrices for L, W and $M^{-1}$ , respectively. $\mathcal{K}(\mathcal{P})$ denotes a set of point pair indices sampled by KPS. For the weights of the loss terms $\lambda_W$ and $\lambda_{M^{-1}}$ , we empirically choose 100 and 1, respectively.
92
+
93
+ **Deformation Weight Computation.** In test time, once we predict the cotangent Laplacian matrix L and the inverse mass diagonal matrix $M^{-1}$ to recover the deformation energy matrix $A = LM^{-1}L$ , the linear deformation blending weights in Equation 3.1 for the set of deformation handles are calculated by solving the quadratic programming problem. Note that, while the deformation blending weights are *dependent* on the given set of deformation handles, the quantities we predict $(L \text{ and } M^{-1})$ are *not dependent* on the deformation handles. Hence, our framework can allow the user to choose an arbitrary set of deformation handles and move them freely without any restriction.
2204.02426/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2204.02426/main_diagram/main_diagram.pdf ADDED
Binary file (94.1 kB). View file
 
2204.02426/paper_text/intro_method.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ ::: epigraph
4
+ Frustra fit per plura quod potest fieri per pauciora
5
+
6
+ **William of Occam**, *Summa Totius Logicae (1323 CE)*
7
+ :::
8
+
9
+ Spurious correlations and dataset bias greatly impair generalization in deep neural networks [@agrawal2018don; @bolukbasi2016man; @he2009learning; @shrestha2022investigation]. This problem has been heavily studied. The most common approaches are re-sampling strategies [@chawla2002smote; @cui2019class; @he2008adasyn; @sagawa2020investigation], altering optimization to mitigate bias [@DBLP:journals/corr/abs-1911-08731], adversarial unlearning [@adeli2019bias; @grand2019adversarial; @ramakrishnan2018overcoming; @zhang2018mitigating], learning invariant representations [@arjovsky2019invariant; @choe2020empirical; @teney2020unshuffling], and ensembling with bias-amplified models [@cadene2019rubi; @clark2019don; @nam2020learning]. Here, we propose a new approach: incorporating architectural inductive biases that combat dataset bias.
10
+
11
+ <figure id="fig:overview" data-latex-placement="ht!">
12
+ <img src="figures/overview.png" style="width:90.0%" />
13
+ <figcaption><span data-acronym-label="OccamNets" data-acronym-form="singular+short">OccamNets</span> focus on architectural inductive biases, which is an orthogonal direction to tackling dataset biases compared to the existing works.</figcaption>
14
+ </figure>
15
+
16
+ In a typical feedforward network, each layer can be considered as computing a function of the previous layer, with each additional layer making the hypothesis more complex. Given a system trained to predict multiple categories, with some being highly biased, this means the network uses the same level of complexity across all of the examples, even when some examples should be classified with simpler hypotheses (e.g., less depth). Likewise, pooling in networks is typically uniform in nature, so every location is used for prediction, rather than only the minimum amount of information. In other words, typical networks violate Occam's razor. Consider the [BiasedMNIST]{acronym-label="BiasedMNIST" acronym-form="singular+short"} dataset [@shrestha2022investigation], where the task is to recognize a digit while remaining invariant to multiple spuriously correlated factors, which include colors, textures, and contextual biases. The most complex hypothesis would exploit every factor during classification, including the digit's color, texture, or background context. A simple hypothesis would instead be to focus on the digit's shape and to ignore these spuriously correlated factors that work very well during training but do not generalize. We argue that a network should be capable of adapting its hypothesis space for each example, rather than always resorting to the most complex hypothesis, which would help it to ignore extraneous variables that hinder generalization.
17
+
18
+ Here, we propose convolutional [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} which have architectural inductive biases that favor using the minimal amount of network depth and the minimal number of image locations during inference for a given example. The first inductive bias is implemented using early exiting, which has been previously studied for speeding up inference. The network is trained such that later layers focus on examples earlier layers find hard, with a bias toward exiting early. The second inductive bias replaces global average pooling before a classification layer with a function that is regularized to favor pooling with fewer image locations. We hypothesize this would be especially useful for combating background and contextual biases [@ahmed2020systematic; @singh2020don]. OccamNets are complementary to existing approaches and can be combined with them.
19
+
20
+ - We introduce the [OccamNet]{acronym-label="OccamNet" acronym-form="singular+short"} architecture, which has architectural inductive biases for favoring simpler solutions to help overcome dataset biases. [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} do not require the biases to be explicitly specified during training, unlike many state-of-the-art debiasing algorithms.
21
+
22
+ - In experiments using biased vision datasets, we demonstrate that [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} greatly outperform architectures that do not use the proposed inductive biases. Moreover, we show that [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} outperform or rival existing debiasing methods that use conventional network architectures.
23
+
24
+ - We combine [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} with four recent debiasing methods, which all show improved results compared to using them with conventional architectures.
25
+
26
+ # Method
27
+
28
+ We compare [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"} with four state-of-the-art bias mitigation methods, apart from the vanilla empirical risk minimization procedure:
29
+
30
+ - **Empirical Risk Minimization ([ERM]{acronym-label="ERM" acronym-form="singular+short"})** is the default method used by most deep learning models and it often leads to dataset bias exploitation since it minimizes the train loss without any debiasing procedure.
31
+
32
+ - **Spectral Decoupling (SD) [@pezeshki2020gradient]** applies regularization to model outputs to help decouple features. This can help the model focus more on the signal.
33
+
34
+ - **Group Upweighting ([UpWt]{acronym-label="UpWt" acronym-form="singular+short"})** balances the loss contributions from the majority and the minority groups by multiplying the loss by $\frac{1}{n_g^\gamma}$, where $n_g$ is the number of samples in group $g$ and $\gamma$ is a hyper-parameter.
35
+
36
+ - **Group DRO ([gDRO]{acronym-label="gDRO" acronym-form="singular+short"}) [@DBLP:journals/corr/abs-1911-08731]** is an instance of a broader family of distributionally robust optimization techniques [@duchi2019distributionally; @namkoong2016stochastic; @rahimian2019distributionally], that optimizes for the difficult groups in the dataset.
37
+
38
+ - **Predictive Group Invariance ([PGI]{acronym-label="PGI" acronym-form="singular+short"}) [@ahmed2020systematic]** is another grouping method, that encourages matched predictive distributions across easy and hard groups within each class. It penalizes the KL-divergence between predictive distributions from within-class groups.
39
+
40
+ **Dataset Sub-groups.** For debiasing, [UpWt]{acronym-label="UpWt" acronym-form="singular+short"}, [gDRO]{acronym-label="gDRO" acronym-form="singular+short"}, and [PGI]{acronym-label="PGI" acronym-form="singular+short"} require additional labels for covariates (sub-group labels). Past work has focused on these labels being supplied by an oracle; however, having access to all relevant sub-group labels is often impractical for large datasets. Some recent efforts have attempted to infer these sub-groups. Just train twice (JTT) [@liu2021just] uses a bias-prone ERM model by training for a few epochs to identify the difficult groups. Environment inference for invariant learning (EIIL) [@creager2021environment] learns sub-group assignments that maximize the invariant risk minimization objective [@arjovsky2019invariant]. Unfortunately, inferred sub-groups perform worse in general than when they are supplied by an oracle [@ahmed2020systematic; @liu2021just]. For the methods that require them, which *excludes* [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"}, we use oracle group labels (i.e., for [BiasedMNIST]{acronym-label="BiasedMNIST" acronym-form="singular+short"} and [COCO]{acronym-label="COCO" acronym-form="singular+short"}). Inferred group labels are used for [BAR]{acronym-label="BAR" acronym-form="singular+short"}, as oracle labels are not available.
41
+
42
+ For [BiasedMNIST]{acronym-label="BiasedMNIST" acronym-form="singular+short"}, all the samples having the same class and the same value for all of the spurious factors are placed in a single group. For [COCO]{acronym-label="COCO" acronym-form="singular+short"}, objects placed on spuriously correlated backgrounds form the majority group, while the rest form the minority group. [BAR]{acronym-label="BAR" acronym-form="singular+short"} does not specify oracle group labels, so we adopt the JTT method. Specifically, we train an ERM model for single epoch, reserving 20% of the samples with the highest losses as the difficult group and the rest as the easy group. We chose JTT over EIL for its simplicity. [OccamNets]{acronym-label="OccamNets" acronym-form="singular+short"}, of course do not require such group labels to be specified.
43
+
44
+ **Architectures.** ResNet-18 is used as the standard baseline architecture for our studies. We compare it with an OccamNet version of ResNet-18, i.e., OccamResNet-18. To create this architecture, we add early exit modules to each of ResNet-18's convolutional blocks. To keep the number of parameters in OccamResNet-18 comparable to ResNet-18, we reduce the feature map width from 64 to 48. Assuming 1000 output classes, ResNet-18 has 12M parameters compared to 8M in OccamResNet-18. Further details are provided in Sec. [\[sec:arch_details\]](#sec:arch_details){reference-type="ref" reference="sec:arch_details"}.
45
+
46
+ We report the means and standard deviations of test set accuracies computed across five different runs for all the datasets. For [BiasedMNIST]{acronym-label="BiasedMNIST" acronym-form="singular+short"}, we report the unbiased test set accuracy (i.e., $p_{bias} = 0.1$) alongside the majority and minority group accuracies for each bias variable. For [COCO]{acronym-label="COCO" acronym-form="singular+short"}, unless otherwise specified, we report accuracy on the most challenging test split: with seen, but unbiased backgrounds. We also report the average precision score to measure the ability to distinguish 100 anomalous samples from the in-distribution samples for the anomaly detection task of [COCO]{acronym-label="COCO" acronym-form="singular+short"}. For [BAR]{acronym-label="BAR" acronym-form="singular+short"}, we report the overall test accuracies. We use unbiased validation set of [BiasedMNIST]{acronym-label="BiasedMNIST" acronym-form="singular+short"} and validation set with unbiased backgrounds for [COCO]{acronym-label="COCO" acronym-form="singular+short"} for hyperparameter tuning. The hyperparameter search grid and selected values are specified in Sec. [\[sec:hyperparams\]](#sec:hyperparams){reference-type="ref" reference="sec:hyperparams"}.
2204.03541/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff