Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2003.10258/main_diagram/main_diagram.drawio +1 -0
- 2003.10258/main_diagram/main_diagram.pdf +0 -0
- 2003.10258/paper_text/intro_method.md +195 -0
- 2009.08061/main_diagram/main_diagram.drawio +1 -0
- 2009.08061/main_diagram/main_diagram.pdf +0 -0
- 2009.08061/paper_text/intro_method.md +102 -0
- 2010.11354/main_diagram/main_diagram.drawio +1 -0
- 2010.11354/main_diagram/main_diagram.pdf +0 -0
- 2010.11354/paper_text/intro_method.md +174 -0
- 2105.03801/main_diagram/main_diagram.drawio +1 -0
- 2105.03801/main_diagram/main_diagram.pdf +0 -0
- 2105.03801/paper_text/intro_method.md +65 -0
- 2106.04559/main_diagram/main_diagram.drawio +1 -0
- 2106.04559/main_diagram/main_diagram.pdf +0 -0
- 2106.04559/paper_text/intro_method.md +76 -0
- 2106.04876/main_diagram/main_diagram.drawio +1 -0
- 2106.04876/main_diagram/main_diagram.pdf +0 -0
- 2106.04876/paper_text/intro_method.md +259 -0
- 2106.05956/main_diagram/main_diagram.drawio +1 -0
- 2106.05956/main_diagram/main_diagram.pdf +0 -0
- 2106.05956/paper_text/intro_method.md +165 -0
- 2110.06553/main_diagram/main_diagram.drawio +1 -0
- 2110.06553/main_diagram/main_diagram.pdf +0 -0
- 2110.06553/paper_text/intro_method.md +73 -0
- 2112.01525/main_diagram/main_diagram.drawio +1 -0
- 2112.01525/main_diagram/main_diagram.pdf +0 -0
- 2112.01525/paper_text/intro_method.md +245 -0
- 2203.03691/main_diagram/main_diagram.drawio +1 -0
- 2203.03691/main_diagram/main_diagram.pdf +0 -0
- 2203.03691/paper_text/intro_method.md +76 -0
- 2203.03989/main_diagram/main_diagram.drawio +1 -0
- 2203.03989/main_diagram/main_diagram.pdf +0 -0
- 2203.03989/paper_text/intro_method.md +113 -0
- 2203.04115/main_diagram/main_diagram.drawio +1 -0
- 2203.04115/main_diagram/main_diagram.pdf +0 -0
- 2203.04115/paper_text/intro_method.md +121 -0
- 2204.03444/main_diagram/main_diagram.drawio +1 -0
- 2204.03444/main_diagram/main_diagram.pdf +0 -0
- 2204.03444/paper_text/intro_method.md +48 -0
- 2206.04384/main_diagram/main_diagram.drawio +1 -0
- 2206.04384/main_diagram/main_diagram.pdf +0 -0
- 2207.08822/main_diagram/main_diagram.drawio +1 -0
- 2207.08822/main_diagram/main_diagram.pdf +0 -0
- 2207.08822/paper_text/intro_method.md +136 -0
- 2209.06203/main_diagram/main_diagram.drawio +452 -0
- 2209.06203/main_diagram/main_diagram.pdf +0 -0
- 2209.06203/paper_text/intro_method.md +178 -0
- 2209.15172/main_diagram/main_diagram.drawio +0 -0
- 2209.15172/paper_text/intro_method.md +51 -0
- 2210.12152/main_diagram/main_diagram.drawio +1 -0
2003.10258/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2020-03-18T14:32:00.566Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/12.6.5 Chrome/80.0.3987.137 Electron/8.1.0 Safari/537.36" etag="E7zGm8VCknNRF96zp54o" version="12.6.5" type="device"><diagram id="YYWYc_Y2QdQs4yQhj9j2" name="Page-1">7Vxdk6I4FP01PrZFCF95nP7arZqZ3a7q3dmqeZmiJa3UoLEwttq/fgOCQBLbiBBo1YduvUCUc26Se89NGMC76fqP2J9PvpMARwPTCNYDeD8w2cv22L/EstlaAHTtrWUch0FmKwzP4TvOjEZmXYYBXlROpIRENJxXjSMym+ERrdj8OCar6mmvJKp+69wfY8HwPPIj0fpfGNDJ1urZRmH/E4fjSf7NwMiOTP385MywmPgBWZVM8GEA72JC6PbddH2HowS9HJftdY97ju5+WIxnVOWCFdps/lr9+NumS/fb/foX/rl8v3HdbTNvfrTM7jj7tXSTQxCT5SzASSvGAN6uJiHFz3N/lBxdMdaZbUKnEfsE2NsFjclvfEciEqdXQyN97Y7kINrM8hpGUX7mjMxYg7fZr8Exxeu99wl26DG/w2SKabxhp2QXOK4xNLKrNlVnWhUM2l5mm5TYc/IT/cxrxrvWC2DZmwzbY3B2zg9no2cYg7zl8wXZsXuAMjg/lMUhQy/S36dksvF//H748vbi/fv2E//6+vUGKIwZyW2HbLb65r/g6IksQhqSGTv0QiglU4ZOfsKXKBwnByjh0Z/486Sx6XqcTOHDF38RjoZxOpcWcJsF3lVS0ia2s7aZUBYy3lPQ97BVJbtCocCXhNW9FFoHRyPAHEsrfYfZY62wOAardpFyR2gAMuDAYR6NbaoolmCDjgQ1C7WFmnkYNRqH/mwcKcAWE+pnHeIGyQeUBnA0XTSEXgVH0zWHnim6IARD05Y4IRwC2BKgsO9uaELBDU1D9EPZ2Gu15YUKUXE+bibj2BF+2J4bwpJzZThCV8RR5n8AtgWk13fvg27/vA+pex9lKOF3kjR3O8dxyH4Ajsv2p8LYzSQDquDaUATXkk0xTmsB7GF0DwDlL+ZbqeE1XCdB7v44JwfTlYW5j49ZmLsH02MiWGALbgwkbrwbEMpI74yNQ60QwOJZ8CWRaQrk9nqkFMRdrrBtGQeCnHMQwnKEL4Ent8U4YoP4W7V5GWTZNzyRkH1xMdAY4kBjc8AvyDIe4exCsyTlcG2Zlsg2H5FRPx5jKrSV8ri7+RMEjSu1H1Er0HECtYKbtEwtUIjBL4ZbS4EPVW6h13m3BSrpwAVzW7vfSrjV3m+tK7eluJ5LhWoTywWwkM+V2mbVvrK6n1WejNqsah6HLXGO/QevqcAsTYzV5CMTckeMkTSr4xXeaRgEqTgW40X47r+kTSXkzpObSW/Pvh3Y90lbS0oy8RY0k4d4VlUIs93sRsvpnsQ/zLZSEEuc8M4CaLCrduUebIrpnlakbZWU4FSt5yTMXNfrVPCWw6YgkbWgeJ/Wy5ElVbwBKl6WAGzL6rccXQUtrVunZCNmlwKkXLdRKKS3oX+fBCSynE71bzmSOqqAp8Hm9dD/FBQInQr4abMOrE7U3SvgjoII8PkkcNcRHLkPEvg1KS+NNUAca+qKaZ4t0q05iXOuqfmH3NYW0yTc6hbTnGvtqpRsGgqEqJKLUPcd91q9+pjc2j1XQq72nquy5uRSyEWoGnzWlsERF8XqlsEdlUT+YlmtK4MLrOpeRaAgNJRYHUX+YhGOUu78mIrmj1aV73IN0RN6z7fdEN8y4c6y9VKuoohcKQcGQBxPVs0+7nY7crvHLTm4VMJlkRNAQxPVZF2Sakuaa5t7hVjsnLawnCRgcVtYZDuQWtzDIuev9+UTySYWxxOB01zU8xTimr4V9STbWBDginqiQ3ZQ0/N6X1ORbGnxJJVmrTWVfDvSp6rpSfa0sMCo66Kep2NP1Wm4ibtaundAhUrIpynq8dtaAJCUmvVW9dBxufR5KyT8QkFgNLVSEPGlwpZDaHRcvnxZtApk1GVV9I+2aRWn47NYl8gvAAVmvp2kq3WJ6FJWgCJHMgVpRVplrUPfVoD2IFtECusI+pYtSpTkfmaLSKWQ37cVoJ0H66ijJyA0vQK0B9ki0vEMhKaXgHbvgD17CEKjS0B7kC0C47p4rdQDuIJ6/XSRr6jrTheBcV24tp/X2vkiT6v2fBE0tHP7PIqlkloFMPi8RlkLsIXGPN3b8hvau30e7EJPZBeYLLmwXWCZ6V9Yl2oW6TgdU93MxHseVDfbkaV+o5veDp/aOWK+wpC+TZ+/bQyT6DV98LYxREf6TTvPWUVAeM5qKhpkCJWVAkPic4BfSawQDbOPxSO2tyQXTyqHD/8D</diagram></mxfile>
|
2003.10258/main_diagram/main_diagram.pdf
ADDED
|
Binary file (57.3 kB). View file
|
|
|
2003.10258/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep neural networks have become state-of-the-art in many competitive learning challenges. The neural network acts as a flexible function approximator in an overall learning scheme. In supervised learning, the weights of the neural network are optimized by utilizing a representative set of valid input-output pairs. Whereas neural networks solve complex learning tasks [@AlexNet] in this way, concerns arise addressing the black box character [@Gilpin2019; @StopExplaining]: (1) In general, a neural network represents a complex non-linear mapping and it is difficult to show properties for this function from a mathematical point of view, verification of desired input-output relations [@verifiedLearners; @Katz2017] or inference of confidence levels in a probabilistic framework [@Gal]. (2) Furthermore, the learned abstractions and processes within the neural network are usually not interpretable or explainable to an human [@StopExplaining].
|
| 4 |
+
|
| 5 |
+
With our approach, we address mainly the first concern: (1) We propose a neural network which predicts provable within a sample-specific constrained output space. *ConstraintNet* encodes a certain class of constraints, a certain type of a convex polytope, in the network architecture and enables to choose a specific constraint from this class via an additional input in each forward pass independently. In this way, *ConstraintNet* allows to enforce a consistent prediction with respect to a valid output domain. We assume that the partition into valid and invalid output domains is given by an external source. This could be a human expert, a rule based model or even a second neural network. (2) Secondly, we contribute to the interpretability and explainability of neural networks: A constraint over the output is interpretable and allows to describe the decision making of *ConstraintNet* in an interpretable way, later we model output constraints for a facial landmark prediction task such that the model predicts the facial landmarks on a region which is recognized as face and locates the positions of the eyes above the nose-landmark for anatomical reasons. Therefore, the additional input encodes the output constraint and represents high level information with explainable impact on the prediction. When this input is generated by a second model, it is an intermediate variable of the total model with interpretable information.
|
| 6 |
+
|
| 7 |
+
*ConstraintNet* addresses safety-critical applications in particular. Neural networks tend to generalize to new data with high accuracy on average. However, there remains a risk of unforseeable and unintended behavior in rare cases. Instead of monitoring the output of the neural network with a second algorithm and intervening when safety-critical behavior is detected, we suggest to constrain the output space with *ConstraintNet* to safe solutions in the first place. Imagine a neural network as motion planner. In this case, sensor detections or map data constrain the output space to only collision free trajectories.
|
| 8 |
+
|
| 9 |
+
Apart from safety-critical applications, *ConstraintNet* can be applied to predict within a region of interest in various use cases. in medical image processing, this region could be annotated by a human expert to restrict the localization of an anatomical landmark.
|
| 10 |
+
|
| 11 |
+
We demonstrate the modeling of constraints on several facial landmark prediction tasks. Furthermore, we illustrate the application to a follow object controller for vehicles as a safety-critical application. We have promising results on ongoing experiments and plan to publish in future.
|
| 12 |
+
|
| 13 |
+
# Method
|
| 14 |
+
|
| 15 |
+
**Construction approach.** We propose the approach visualized in Fig. [1](#architecture_approach){reference-type="ref" reference="architecture_approach"} to create the architecture of *ConstraintNet* for a specific class of constraints $\mathfrak{C}$. The key idea is a final layer $\phi: \mathcal{Z} \!\times\! \mathcal{S} \!\to\! \mathcal{Y}$ without learnable parameters which maps the output of the previous layers $z\!\in\!\mathcal{Z}$ on the constrained output space $\mathcal{C}(s)$ depending on the constraint parameter $s$. Given a class of constraints $\mathfrak{C}\! =
|
| 16 |
+
\! \{\mathcal{C}(s) \! \subset \! \mathcal{Y} : s\! \in \! \mathcal{S} \}$, we require that $\phi$ fulfills: $$\begin{equation}
|
| 17 |
+
\label{central_prop_phi}
|
| 18 |
+
\forall s\! \in \! \mathcal{S} \; \forall z \! \in \! \mathcal{Z}: \phi(z,s) \in \mathcal{C}(s).
|
| 19 |
+
\end{equation}$$ When $\phi$ is furthermore (piecewise) differentiable with respect to $z$ we call $\phi$ constraint guard layer for $\mathfrak{C}$.
|
| 20 |
+
|
| 21 |
+
The constraint guard layer $\phi$ has no adjustable parameters and therefore the logic is learned by the previous layers $h_{\theta}$ with parameters $\theta$. In the ideal case, *ConstraintNet* predicts the same true output $y$ for a data point $x$ under different but valid constraints. This behavior requires that $h_{\theta}$ depends on $s$ in addition to $x$. Without this requirement, $z\!=\!h_{\theta}(\cdot)$ would have the same value for fixed $x$, and $\phi$ would project this $z$ for different but valid constraint parameters $s$ in general on different outputs. We transform $s$ into an appropriate representation $g(s)$ and consider it as an additional input of $h_{\theta}$, $h_{\theta}:
|
| 22 |
+
\! \mathcal{X} \! \times \! g(\mathcal{S}) \!\to\! \mathcal{Z}$. For the construction of $h_{\theta}$, we propose to start with a common neural network architecture with input domain $\mathcal{X}$ and output domain $\mathcal{Z}$. In a next step, this neural network can be extended to add an additional input for $g(s)$. We propose to concatenate $g(s)$ to the output of an intermediate layer since it is information with a higher level of abstraction.
|
| 23 |
+
|
| 24 |
+
Finally, we construct *ConstraintNet* for the considered class of constraints $\mathfrak{C}$ by applying the layers $h_{\theta}$ and the corresponding constraint guard layer $\phi$ subsequently:
|
| 25 |
+
|
| 26 |
+
$$\begin{equation}
|
| 27 |
+
f_{\theta}(x,s) =
|
| 28 |
+
\phi\big(h_{\theta}(x, g(s)),s\big).
|
| 29 |
+
\end{equation}$$
|
| 30 |
+
|
| 31 |
+
The required property for $\phi$ in Eq. [\[central_prop_phi\]](#central_prop_phi){reference-type="ref" reference="central_prop_phi"} implies that *ConstraintNet* predicts within the constrained output space $\mathcal{C}(s)$ according to Eq. [\[central_prop\]](#central_prop){reference-type="ref" reference="central_prop"}. Furthermore, the constraint guard layer propagates gradients and backpropagation is amenable.
|
| 32 |
+
|
| 33 |
+
**Construction by modifying a CNN.** Fig. [2](#cnn_modified){reference-type="ref" reference="cnn_modified"} illustrates the construction of *ConstraintNet* by using a convolutional neural network (CNN) for the generation of the intermediate variable $z\!=\!h_{\theta}(x, g(s))$, where $h_{\theta}$ is a CNN. As an example, a nose landmark prediction task on face images is shown. The output constraints are triangles randomly located around the nose, convex polytopes with three vertices. Such constraints can be specified by a constraint parameter $s$ consisting of the concatenated vertex coordinates. The constraint guard layer $\phi$ for convex polytopes is modeled in the next section and requires a three dimensional intermediate variable $z\!\in\!\mathbb{R}^3$ for triangles. The previous layers $h_{\theta}$ map the image data $x \!\in\!\mathcal{X}$ on the three dimensional intermediate variable $z\!\in\!\mathbb{R}^3$. A CNN with output domain $\mathcal{Z}\!=\!\mathbb{R}^{N_z}$ can be realized by adding a dense layer with $N_z$ output neurons and linear activations. To incorporate the dependency of $h_{\theta}$ on $s$, we suggest to concatenate the output of an intermediate convolutional layer by a tensor representation $g(s)$ of $s$. Thereby, we extend the input of the next layer in a natural way.
|
| 34 |
+
|
| 35 |
+
![Construction of *ConstraintNet* by extending a CNN. For illustration purposes, we show a nose landmark prediction on an image $x$ with an output constraint in form of a triangle, a convex polytope with three vertices $\{v^{(i)}(s)\}_{i=1}^3$. The constraint parameter $s$ specifies the chosen constraint and consists in this case of concatenated vertex coordinates. A tensor representation $g(s)$ of $s$ is concatenated to the output of an intermediate convolutional layer and extends the input of the next layer. Instead of creating the final output for the nose landmark with a 2-dimensional dense layer, a 3-dimensional intermediate representation $z$ is generated. The constraint guard layer $\phi$ applies a softmax function $\sigma$ on $z$ and weights the three vertices of the triangle with the softmax outputs. This guarantees a prediction $\hat y$ within the specified triangle. []{#cnn_modified label="cnn_modified"} ](architecture.pdf){#cnn_modified width="90%"}
|
| 36 |
+
|
| 37 |
+
In this subsection we model the constraint guard layer for different classes of constraints. Primarily, we consider output constraints in form of convex polytopes. However, our approach is also applicable to problem-specific constraints. As an example, we construct the constraint guard layer for constraints in form of sectors of a circle. Furthermore, we model constraints for different parts of the output.
|
| 38 |
+
|
| 39 |
+
**Convex polytopes.** We consider convex polytopes $\mathcal{P}$ in $\mathbb{R}^N$ which can be described by the convex hull of $M$ $N$-dimensional vertices $\{ v^{(i)}\}_{i=1}^M$: $$\begin{equation}
|
| 40 |
+
\label{convex_polytope}
|
| 41 |
+
\mathcal{P}\bigl(\{ v^{(i)}\}_{i=1}^M \bigr)\!=\!\bigl \{ \sum_{i} p_i
|
| 42 |
+
v^{(i)} : p_i \! \geq \! 0, \; \\
|
| 43 |
+
\sum_{i} p_i \!=\! 1 \bigr \}.
|
| 44 |
+
\end{equation}$$
|
| 45 |
+
|
| 46 |
+
We assume that the vertices $v^{(i)}(s)$ are functions of the constraint parameter $s$ and define output constraints via $\mathcal{C}(s) \! = \!\mathcal{P}(\{
|
| 47 |
+
v^{(i)}(s)\}_{i=1}^M)$. The constraint guard layer for a class of these constraints $\mathfrak{C}\!=\!
|
| 48 |
+
\{\mathcal{C}(s):s\! \in\!\mathcal{S} \}$ can easily be constructed with $z\!\in\!\mathbb{R}^M$:
|
| 49 |
+
|
| 50 |
+
$$\begin{equation}
|
| 51 |
+
\label{convex_phi}
|
| 52 |
+
\phi(z,s)= \sum_{i} \sigma_i(z)
|
| 53 |
+
v^{(i)}(s).
|
| 54 |
+
\end{equation}$$
|
| 55 |
+
|
| 56 |
+
$\sigma_i(\cdot)$ denotes the $i$th component of the the $M$-dimensional softmax function $\sigma:\mathbb{R}^M \! \to \! \mathbb{R}^M$. The required property of $\phi$ in Eq. [\[central_prop_phi\]](#central_prop_phi){reference-type="ref" reference="central_prop_phi"} follows directly from the properties $0
|
| 57 |
+
\! < \! \sigma_i(\cdot)
|
| 58 |
+
\! < \! 1$ and $\sum_i \sigma_i(\cdot)\!=\!1$ of the softmax function. However, some vertices $v^{(i)}$ might not be reachable exactly but upto arbitrary accuracy because $\sigma_i(\cdot) \! \neq \! 1$. Note that $\phi$ is differentiable with respect to $z$.
|
| 59 |
+
|
| 60 |
+
**Sectors of a circle.** Consider a sector of a circle $\mathcal{O}$ with center position $(x_c, y_c)$ and radius $R$. We assume that the sector is symmetric with respect to the vertical line $x\!=\!x_c$ and covers $\Psi$ radian. Then the sector of a circle can be described by the following set of points: $$\begin{alignat}
|
| 61 |
+
{2}
|
| 62 |
+
\label{sector_circle}
|
| 63 |
+
\mathcal{O}(x_c, y_c, R, \Psi)\!=& \bigl \{ r \! \cdot \! (\sin \varphi,
|
| 64 |
+
\cos \varphi ) \! + \! (x_c, y_c) \! \in \! \mathbb{R}^2 : \nonumber \\
|
| 65 |
+
& r \! \in \! [0,R], \varphi \! \in \! [-\Psi/2, +\Psi/2 ] \bigr \}.
|
| 66 |
+
\end{alignat}$$
|
| 67 |
+
|
| 68 |
+
With $s\!=\!(x_c, y_c, R, \Psi)$, the output constraints can be written as $\mathcal{C}(s)\!=\!\mathcal{O}(x_c, y_c, R, \Psi)$. It is obvious that the following constraint guard layer with an intermediate variable $z \! \in \!
|
| 69 |
+
\mathbb{R}^2$ fulfills Eq. [\[central_prop_phi\]](#central_prop_phi){reference-type="ref" reference="central_prop_phi"} for a class of these constraints $\mathfrak{C}\!=\!
|
| 70 |
+
\{\mathcal{C}(s):s\! \in\!\mathcal{S} \}$: $$\begin{alignat}
|
| 71 |
+
{2} \label{phi_sector_circle}
|
| 72 |
+
\phi(z,s) =& \; r(z_1)\!\cdot \! \bigl(\sin \varphi(z_2),\cos \varphi(z_2)
|
| 73 |
+
\bigr)\!
|
| 74 |
+
+\!(x_c,y_c), \\
|
| 75 |
+
r(z_1) =& \; R \cdot \operatorname{sig}(z_1), \label{phi_sector_circle_2}
|
| 76 |
+
\\
|
| 77 |
+
\varphi(z_2) =& \; \Psi\cdot(\operatorname{sig}(z_2)-1/2). \label{phi_sector_circle_3}
|
| 78 |
+
\end{alignat}$$
|
| 79 |
+
|
| 80 |
+
Note that we use the sigmoid function $\operatorname{sig}(t)\!=\!1/(1\!+\!\exp(-t))$ to map a real number to the interval $(0,1)$.
|
| 81 |
+
|
| 82 |
+
**Constraints on output parts.** We consider an output $y$ with $K$ parts $y^{(k)}$ ($k \! \in \!
|
| 83 |
+
\{1,\dots,K\}$): $$\begin{equation}
|
| 84 |
+
y = (y^{(1)},\dots,y^{(K)}) \in \mathcal{Y}\!=\! \mathcal{Y}^{(1)} \times \cdots \times
|
| 85 |
+
\mathcal{Y}^{(K)}.
|
| 86 |
+
\end{equation}$$ Each output part $y^{(k)}$ should be constrained independently to an output constraint $\mathcal{C}^{(k)}(s^{(k)})$ of a part-specific class of constraints: $$\begin{equation}
|
| 87 |
+
\mathfrak{C}^{(k)}= \, \{\mathcal{C}^{(k)}(s^{(k)}) \! \subset \!
|
| 88 |
+
\mathcal{Y}^{(k)} : s^{(k) } \! \in \!
|
| 89 |
+
\mathcal{S}^{(k)}\}.
|
| 90 |
+
\end{equation}$$ This is equivalent to constrain the overall output $y$ to $\mathcal{C}(s)\! = \! \mathcal{C}^{(1)}(s^{(1)}) \! \times \! \cdots \! \times
|
| 91 |
+
\! \mathcal{C}^{(K)}(s^{(K)})$ with $s = (s^{(1)}, \dots, s^{(K)})$. The class of constraints for the overall output is then given by: $$\begin{equation}
|
| 92 |
+
\mathfrak{C}= \{\mathcal{C}(s)\! \subset \! \mathcal{Y} : s\! \in \!
|
| 93 |
+
\mathcal{S}^{(1)} \times \cdots \times \mathcal{S}^{(K)} \! \}.
|
| 94 |
+
\end{equation}$$
|
| 95 |
+
|
| 96 |
+
Assume that the constraint guard layers $\phi^{(k)}$ for the parts are given, for $\mathfrak{C}^{(k)}$. Then an overall constraint guard layer $\phi$, for $\mathfrak{C}$, can be constructed by concatenating the constraint guard layers of the parts: $$\begin{alignat}
|
| 97 |
+
{2}
|
| 98 |
+
\phi(z,s)=&\,\big(\phi^{(1)}(z^{(1)},s^{(1)}),\dots,\phi^{(K)}(z^{(K)},
|
| 99 |
+
s^{(K)})\big),
|
| 100 |
+
\label{parts_phi} \\
|
| 101 |
+
z=& \,(z^{(1)},\! \dots,\! z^{(K)}).
|
| 102 |
+
\end{alignat}$$ The validity of the property in Eq. [\[central_prop_phi\]](#central_prop_phi){reference-type="ref" reference="central_prop_phi"} for $\phi$ with respect to $\mathfrak{C}$ follows immediately from the validity of this property for $\phi^{(k)}$ with respect to $\mathfrak{C}^{(k)}$.
|
| 103 |
+
|
| 104 |
+
In supervised learning the parameters $\theta$ of a neural network are learned from data by utilizing a set of input-output pairs $\{(x_i, y_i)\}_{i=1}^{N}$. However, *ConstraintNet* has an additional input $s\!\in\! \mathcal{S}$ which is not unique for a sample. The constraint parameter $s$ provides information in form of a region restricting the true output and therefore the constraint parameter $s_i$ for a sample $(x_i,y_i)$ could be any element of a set of valid constraint parameters $\mathcal{S}_{y_i} \!
|
| 105 |
+
=\!\{s\!\in\!\mathcal{S}: y_i \! \in \! \mathcal{C}(s) \}$.
|
| 106 |
+
|
| 107 |
+
We propose to sample $s_i$ from this set $\mathcal{S}_{y_i}$ to create representative input-output pairs $(x_i, s_i, y_i)$. This sampling procedure enables *ConstraintNet* to be trained with standard supervised learning algorithms for neural networks. Note that many input-output pairs can be generated from the same data point $(x_i, y_i)$ by sampling different constraint parameters $s_i$. Therefore, *ConstraintNet* is forced to learn an invariant prediction for the same sample under different constraint parameters.
|
| 108 |
+
|
| 109 |
+
:::: algorithm
|
| 110 |
+
::: algorithmic
|
| 111 |
+
\
|
| 112 |
+
|
| 113 |
+
$\theta \gets \text{random initialization}$
|
| 114 |
+
|
| 115 |
+
$I_{batch} \gets get\_batch\_indices(\text{batch})$ $L(\theta) \gets \frac{1}{|I_{batch}|}\sum_{i\in I_{batch}}
|
| 116 |
+
l(y_i,
|
| 117 |
+
\hat y_i)+\lambda R(\theta)$ $\theta \gets update(\theta,\nabla_{\theta} L)$
|
| 118 |
+
:::
|
| 119 |
+
::::
|
| 120 |
+
|
| 121 |
+
We train *ConstraintNet* with gradient-based optimization and sample $s_i$ within the training loop as it is shown in Algorithm [\[pseudocode\]](#pseudocode){reference-type="ref" reference="pseudocode"}. The learning objective is given by: $$\begin{equation}
|
| 122 |
+
\label{objective}
|
| 123 |
+
\arg \min_{\theta} L(\theta) = \frac{1}{N}\sum_{i=1}^N l(y_i, \hat y_i)+\lambda
|
| 124 |
+
R(\theta),
|
| 125 |
+
\end{equation}$$ with $l(\cdot)$ being the sample loss, $R(\cdot)$ a regularization term and $\lambda$ a weighting factor. The sample loss term $l(y_i, \hat y_i)$ penalizes deviations of the neural network prediction $\hat y_i$ from the ground truth $y_i$. We apply *ConstraintNet* to regression problems and use mean squared error as sample loss.
|
| 126 |
+
|
| 127 |
+
In this section, we apply *ConstraintNet* on a facial landmark prediction task and a follow object controller for vehicles. The output constraints for the facial landmark prediction task restrict the solution space to consistent outputs, whereas the constraints for the follow object controller help to prevent collisions and to avoid violations of legislation standards. We want to highlight that both applications are exemplary. The main goal is an illustrative demonstration for leveraging output constraints with *ConstraintNet* in applications.
|
| 128 |
+
|
| 129 |
+
In our first application, we consider a facial landmark prediction for the nose $(\hat
|
| 130 |
+
x_n,\hat y_n)$, the left eye $(\hat x_{le}, \hat y_{le})$ and the right eye $(\hat x_{re},\hat y_{re})$ on image data. We assume that each image pictures a face. We introduce constraints to confine the landmark predictions for nose, left eye and right eye to a bounding box which might be given by a face detector. Then, we extend these constraints and enforce relative positions between landmarks such as *the eyes are above the nose*. These constraints are visualized in the top row of Fig. [3](#constr_visualized){reference-type="ref" reference="constr_visualized"}. The bottom row shows constraints for the nose landmark in form of a triangle and a sector of a circle. These constraints can be realized with the constraint guard layers in Eq. [\[convex_phi\]](#convex_phi){reference-type="ref" reference="convex_phi"} and Eq. [\[phi_sector_circle\]](#phi_sector_circle){reference-type="ref" reference="phi_sector_circle"}. However, they are of less practical relevance.
|
| 131 |
+
|
| 132 |
+
**Modified CNN architecture.** First of all, we define the output of *ConstraintNet* according to: $$\begin{equation}
|
| 133 |
+
\hat y = (\hat x_n, \hat x_{le}, \hat x_{re}, \hat y_n, \hat y_{le}, \hat
|
| 134 |
+
y_{re}),
|
| 135 |
+
\end{equation}$$ and denote the $x$-cooridnates $\hat y^{(k_x)}$ with $k_x \! \in \! \{ 1,2,3 \}$ and the $y$-coordinates $\hat y^{(k_y)}$ with $k_y \! \in \! \{ 4,5,6 \}$. *ConstraintNet* can be constructed by modifying a CNN according to Fig. [2](#cnn_modified){reference-type="ref" reference="cnn_modified"} and Sec. [\[network_architecture\]](#network_architecture){reference-type="ref" reference="network_architecture"}. ResNet50 [@ResNet] is a common CNN architecture which is used for many classification and regression tasks in computer vision [@regressionStudy]. In the case of regression, the prediction is usually generated by a final dense layer with linear activations. The modifications comprise adopting the output dimension of the final dense layer with linear acitivations to match the required dimension of $z$, adding the constraint guard layer $\phi$ for the considered class of constraints $\mathfrak{C}$ and inserting a representation $g(s)$ of the constraint parameter $s$ at the stage of intermediate layers. We define $g(s)$ as tensor and identify channels $c \! \in \! \{1,\dots, dim(s)
|
| 136 |
+
\}$ with the components of the constraint parameter $s$, then we set all entries within a channel to a rescaled value of the corresponding constraint parameter component $s_c$: $$\begin{gather}
|
| 137 |
+
g_{c, w, h}(s) = \lambda_c \cdot s_c \label{constr_repr}, \\
|
| 138 |
+
w \! \in \! \{1,\dots,W \},\, h \! \in \! \{1,\dots,H \}.
|
| 139 |
+
\end{gather}$$ $W$ and $H$ denote the width and height of the tensor and each $\lambda_c$ is a rescaling factor. We suggest to choose the factors $\lambda_c$ such that $s_c$ is rescaled to approximately the scale of the values in the output of the layer which is extended by $g(s)$.
|
| 140 |
+
|
| 141 |
+
{#constr_visualized width="0.7 \\linewidth"}
|
| 142 |
+
|
| 143 |
+
**Bounding box constraints.** The bounding box is specified by a left boundary $l^{(x)}$, a right boundary $u^{(x)}$, a top boundary $l^{(y)}$ and a bottom boundary $u^{(y)}$. Note that the $y$-axis starts at the top of the image and points downwards. Confining the landmark predictions to a bounding box is equivalent to constrain $\hat y ^{(k_x)}$ to the interval $[l^{(x)}, u^{(x)}]$ and $\hat y ^{(k_y)}$ to the interval $[l^{(y)}, u^{(y)}]$ independently. These intervals are one dimensional convex polytopes with the interval boundaries as vertices. Thus, we can write the output constraints for the components with the definition in Eq. [\[convex_polytope\]](#convex_polytope){reference-type="ref" reference="convex_polytope"} as: $$\begin{alignat}
|
| 144 |
+
{2}
|
| 145 |
+
\label{bb_constr_1}
|
| 146 |
+
\mathcal{C}^{(k_x)}(s^{(k_x)})=&\, \mathcal{P}(\{ l^{(x)}, u^{(x)} \}), \\
|
| 147 |
+
\label{bb_constr_2}
|
| 148 |
+
\mathcal{C}^{(k_y)}(s^{(k_y)})=&\, \mathcal{P}(\{l^{(y)}, u^{(y)} \}),
|
| 149 |
+
\end{alignat}$$
|
| 150 |
+
|
| 151 |
+
with $s^{(k_x)}\!=\!(l^{(x)},
|
| 152 |
+
u^{(x)})$ and $s^{(k_y)}\!=\!(l^{(y)}, u^{(y)})$. The constraint guard layers of the components are given by Eq. [\[convex_phi\]](#convex_phi){reference-type="ref" reference="convex_phi"}: $$\begin{alignat}
|
| 153 |
+
{2}
|
| 154 |
+
\phi^{(k_x)}(z^{(k_x)},s^{(k_x)})&=&\,\sigma_1(z^{(k_x)})l^{(x)}+\sigma_2(z^{(k_x)})u^{(x)}, \\
|
| 155 |
+
\phi^{(k_y)}(z^{(k_y)},s^{(k_y)})&=&\,\sigma_1(z^{(k_y)})l^{(y)}+\sigma_2(z^{(k_y)})u^{(y)},
|
| 156 |
+
\end{alignat}$$ with $z^{(k_x)}, z^{(k_y)}\!\in\!\mathbb{R}^2$ and $\sigma$ the 2-dimensional softmax function. Finally, the overall constraint guard layer $\phi(z,s)$ can be constructed from the constraint guard layers of the components according to Eq. [\[parts_phi\]](#parts_phi){reference-type="ref" reference="parts_phi"} and requires a $12$-dimensional intermediate variable $z \!\in \!\mathbb{R}^{12}$.
|
| 157 |
+
|
| 158 |
+
**Enforcing relations between landmarks. []{#rel_pos label="rel_pos"}** We extend the bounding box constraints to model relations between landmarks. As an example, we enforce that the left eye is in fact to the left of the right eye ($\hat x_{le} \!\le\! \hat x_{re}$) and that the eyes are above the nose ($\hat y_{le},\hat y_{re} \! \le \! \hat y_n$). These constraints can be written as three independent constraints for the output parts $\hat y^{(1)}\!=\!\hat x_{n}$, $\hat y^{(2)}\!=\!(\hat x_{le},\hat x_{re})$, $\hat y^{(3)}\!=\!(\hat y_n, \hat y_{le}, \hat y_{re})$: $$\begin{alignat}
|
| 159 |
+
{1}
|
| 160 |
+
\label{rel_constr_1}
|
| 161 |
+
\mathcal{C}^{(1)}(s^{(1)}) = \,& \{ \hat x_n \! \in \! \mathbb{R} : l^{(x)} \! \le \! \hat
|
| 162 |
+
x_n \! \le \!
|
| 163 |
+
u^{(x)} \}, \\
|
| 164 |
+
\label{rel_constr_2}
|
| 165 |
+
\mathcal{C}^{(2)}(s^{(2)}) = \, & \{(\hat x_{le}, \hat x_{re}) \! \in \! \mathbb{R}^2
|
| 166 |
+
: \hat x_{le} \! \le \! \hat x_{re}, \nonumber \\
|
| 167 |
+
& l^{(x)}\! \le \! \hat x_{le}, \hat x_{re} \! \le \!
|
| 168 |
+
u^{(x)} \}, \\
|
| 169 |
+
\label{rel_constr_3}
|
| 170 |
+
\mathcal{C}^{(3)}(s^{(3)}) = \, & \{(\hat y_n,\hat y_{le},\hat y_{re}) \! \in
|
| 171 |
+
\! \mathbb{R}^3
|
| 172 |
+
: \hat y_{le},\hat y_{re} \! \le \! \hat y_n, \nonumber \\
|
| 173 |
+
& \,
|
| 174 |
+
l^{(y)} \! \le \! \hat y_n, \hat y_{le}, \hat y_{re} \! \le \!
|
| 175 |
+
u^{(y)} \},
|
| 176 |
+
\end{alignat}$$ with constraint parameters $s^{(1)}\!=\!s^{(2)}\!=\!(l^{(x)},u^{(x)})$ and $s^{(3)}\!=\! (l^{(y)},u^{(y)})$. Fig. [4](#rel_constr){reference-type="ref" reference="rel_constr"} visualizes the constraints for the output parts: $\mathcal{C}^{(1)}$ is a line segment in $1$D, $\mathcal{C}^{(2)}$ is a triangle in $2$D and $\mathcal{C}^{(3)}$ is a pyramid with $5$ vertices in $3$D. All of these are convex polytopes and therefore the constraint guard layers for the parts $\{\phi^{(k)} \}_{k=1}^3$ are given by Eq. [\[convex_phi\]](#convex_phi){reference-type="ref" reference="convex_phi"}. Note that $\phi^{(k)}$ requires an intermediate variable $z^{(k)}$ with dimension equal to the number of vertices of the corresponding polytope. Finally, the overall constraint guard layer $\phi$ is given by combining the parts according to Eq. [\[parts_phi\]](#parts_phi){reference-type="ref" reference="parts_phi"} and depends on an intermediate variable $z\!=\!(z^{(1)},z^{(2)},z^{(3)})$ with dimension $2\!+\!3\!+\!5\!=\!10$. Note that the introduced relations between the landmarks might be violated under rotations of the image and we consider them for demonstration purposes.
|
| 177 |
+
|
| 178 |
+
{#rel_constr width="90%"}
|
| 181 |
+
|
| 182 |
+
**Training.** For training of *ConstraintNet*, valid constraint parameters need to be sampled ($sample(\mathcal{S}_{y_i})$ according to Algorithm [\[pseudocode\]](#pseudocode){reference-type="ref" reference="pseudocode"}. To achieve this, random bounding boxes around the face which cover the considered facial landmarks can be created. in a first step, determine the smallest rectangle (parallel to the image boundaries) which covers the landmarks *nose*, *left eye* and *right eye*. Next, sample four integers from a given range and use them to extend each of the four rectangle boundaries independently. The sampled constraint parameter is then given by the boundaries of the generated box $l^{(x)},u^{(x)},l^{(y)}, u^{(y)}$. In inference, the bounding boxes might be given by a face detector.
|
| 183 |
+
|
| 184 |
+
{#foc width="0.95 \\linewidth"}
|
| 185 |
+
|
| 186 |
+
The adaptive cruise control (ACC) is a common driver assistance system for longitudinal control and available in many vehicles nowadays. A follow object controller (FOC) is part of the ACC and gets activated when a vehicle (target-vehicle) is ahead. This situation is visualized in Fig. [5](#foc){reference-type="ref" reference="foc"}. The output of the FOC is a demanded acceleration $a_{ego,dem}$ for the ego-vehicle with the goal to keep a velocity dependent distance $x_{rel,set}( v_{ego})$ to the vehicle ahead (target-vehicle) under consideration of comfort and safety aspects. Common inputs $x$ for the FOC are sensor measurements such as the relative position (distance) $x_{rel}$, the relative velocity $v_{rel}$ and the relative acceleration $a_{rel}$ of the target vehicle the coordinate system of the ego-vehicle and the velocity $v_{ego}$ of the ego-vehicle.
|
| 187 |
+
|
| 188 |
+
**Modified fully connected network.** The FOC is usually modeled explicitly based on expert knowledge and classical control theory. Improving the quality of the controller leads to models with an increasing number of separately handeled cases, a higher complexity and a higher number of adjustable parameters. Finally, adjusting the model parameters gets a tedious work. This motivates the idea to implement the FOC as a neural network $a_{ego,dem}\!=\!n_{\theta}(x)$ and learn the parameters $\theta$, in a reinforcement learning setting. Implementing the FOC with a common neural network comes at the expense of loosing safety guarantees. However, with *ConstraintNet* $a_{ego,dem}\!=\!\pi_{\theta}(x,s)$ the demanded acceleration $a_{ego,dem}$ can be confined to a safe interval $[a_{min}, a_{max}]$ (convex polytope in 1D) in each forward pass independently. A *ConstraintNet* for this output constraint can be created by modifying a neural network with several fully connected layers. The output should be two dimensional such that the constraint guard layer in Eq. [\[convex_phi\]](#convex_phi){reference-type="ref" reference="convex_phi"} for a 1D-polytope can be applied. For the representation $g(s)$ of the constraint parameter $s\!=\!(a_{min}, a_{max})$ rescaled values of the upper and lower bound are appropriate and can be added to the input. $g(s)$ is not inserted at an intermediate layer due to the smaller size of the network.
|
| 189 |
+
|
| 190 |
+
**Constraints for safety.** The output of *ConstraintNet* should be constrained to a safe interval $[a_{min}, a_{max}]$. The interval is a convex polytope in 1D: $$\begin{align}
|
| 191 |
+
\label{a_constr}
|
| 192 |
+
\mathcal{C}(s)= \mathcal{P}(\{ a_{min}, a_{max} \}),
|
| 193 |
+
\end{align}$$ with $s\!=\!(a_{min}, a_{max})$. The constraint guard layer is given by Eq. [\[convex_phi\]](#convex_phi){reference-type="ref" reference="convex_phi"}. The upper bound $a_{max}$ restricts the acceleration to avoid collisions. For deriving $a_{max}$, we assume that the target vehicle accelerates constantly with its current acceleration and the ego-vehilce continues its movement in the beginning with $a_{ego,dem}$. $a_{ego,dem}$ is then limited by the requirement that it must be possible to break without violating maximal jerk and deceleration bounds and without undershooting a minimal distance to the target-vehicle. Thus, $a_{max}$ is the maximal acceleration which satisfies this condition. The maximal allowed deceleration for the ACC is given by a velocity dependent bound in ISO15622 [@iso15622] and would be an appropriate choice for $a_{min}$.
|
| 194 |
+
|
| 195 |
+
**Training and reinforcement learning.** In comparison to supervised learning, reinforcement learning allows to learn from experience, by interacting with the environment. The quality of the interaction with the environment is measured with a reward function and the interaction self is usually implemented with a simulator. The reward function can be understood as a metric for optimal behavior and the reinforcement learning algorithm learns a policy $\pi_{\theta}$ which optimizes the reward. In our case, $\pi_{\theta}(x,s)$ is the *ConstraintNet* for the FOC. Instead of sampling the constraint parameter $s$ from a set of valid constraint parameters, exactly one valid $s$ is computed corresponding to the safe interval $[a_{min}, a_{max}]$. Thereby, deep reinforcement learning algorithms for continous control problems are applicable. One promising candidate is the Twin Delayed DDPG (TD3) algorithm [@TD3]. Note that *ConstraintNet* leads to a collision free training, training episodes are not interrupted.
|
2009.08061/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-06-03T23:25:16.818Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" etag="G-SdiItv5lP8dSlOMrqE" version="13.1.14" type="device"><diagram id="TlHfH7Je2DXZtFfskpYl" name="Page-1">7L3XjutI0y34NN/lAWhF8pLeU6REq5sf9EY0oqf49JOpqt1fV/WcwdwcYAboDXSXiqJJZkasWCsyMus/ON8d8hS/KnPI8vY/GJId/8GF/2AYSjEX8AMeeX8dIRnq60A51dn3Sf89cK/P/Psg8n10rbN8/nHiMgztUr9+HkyHvs/T5cexeJqG/edpxdD+fOorLvN/HLincfvPo0GdLdXXUZpE/ntcyeuy+vNkFPn+pov/nPx9i7mKs2H/OvQ5Bxf/g/PTMCxfn7qDz1vYeX/65etG0v/m278aNuX98v/mAjp/Gv7zifpb8z8n/b9Y/n+a//lf5Nddtrhdv1/4u7HL+08PgHa/4Me6+3QVt+XTUoMOMuIkb+1hrpd66MH3ybAsQwdOaOEXXJw+y2lY+4wf2mH63AovPv/+dg+2rUt47TK8wNHPE9j59TWOyJ8j4HMWL/F/cPbrV0x69eV/ML72uettR3S5HFjwz7p7leiV4BOmgv/pC89G4CcnPRZUgSewoXW/ISo7zUR6ceCBdr9L7Qk+GOLOsvxhcqw2pjL8Knu8fKlCAhE1r521JXeyeshM/biTRBKyX/+Eg7j21ZLKaJvJYpnL6Jz05iUXkDoKblvUeRf4exL4SHSna1UpL+CcPZNnRm0ry+M5IcG1VhW81eKJXW0IXeXZ0la056N53W9i9Nc90+7W2XdtyJTbfq3pLcMz3OjT0+iY9+NNH1f3SRon+zZO9W2E4PoaPfOARKKwXMD1zZ97/+3+4iO0mrRrQXvaLam590OOLlGgbVnoMGqt/nX+n//+ej/wDn9vCzi2fr/vGmHMYuBVlfL0YTTsppZD+fM+HJEEx5qeLyQOHt3/831ur0cXbead2I23+h+M4+u/3Qn0PhiJiwN6MApvW9retgTbwVmuJ3muKPEGz3mx7JVuICGxlG1pt5zgmJZ05mrzzP/m6l/P+LTy+mTeceCvmQzuFDCr3Yj0d+suLq69Hp+evr0SjPj7HYR9g+N2fd7aFLPecch9Xf3VJ+AOf40zvKrZt1x4gVZxyyO8VXb5ugdNUQALZlmxFR3/RrA5ns0BndwTYgOXbA/yiR4S29/r6j3PjbOpiHNFkFQyzcrpikHAjZP3MQL8JAVEeR/ljVxVtyVo4EQ0L10zWTMa4dV3DgFul7O306o2e0QxLG7F96jHvZcoD0Ye2daKNj98gOtGuVinaXyBBmS99qafWasiPHAHkefY0lFF8FlVrpxGMc8kuTbrgy0PiaBzTWIiBuITJsnmTmWyQ5V+6jBYArCBw9RplCsmqHEqVJjsOjOHfiWrIYlGBsMuGTjlYj/ejXgHT4Gd8nkU+KGyJTgN/Aaa8H3IUTn22HU+MItSeOY/28ZK3Cn4P+/B8lxTeuIcUZwSEX+7D7ynznfy+useIq84Gx2S2QvEN6mOhCZ1ygJ8JBrYtdFdnJEITflplLK3Aw71q36piMJtjXaqwbssHegHbuKvl9sQ+z2D4bfJFvMlxm3xu01/2lfluyWIRE1zfzXs+7uMR8pa8RaqL//WXngKMAW19Iv9yf5p7/fNyniXuElejuNt/eiDQSk5Jz6J+87+en8t4/eqDqOZUsqffXA4sxrNfgFu9LPN1yp2OGEKlvP42S52CJ+gR2FH3dmfLWMdK+XLtgmJgVR+jI4oHOygRfQjBeerP950rSLnMIYgO/cfLePUIRTNu77tDvuzNwVnidgSnQKCOMQfVuMZb1aVElpzfluzvHLO/ZhmL2v2n1ZDQKvJ1Y112J8joDiLqlaokZPEj3Z9HsOrUkhr7I92ga6RZzACx5SmafPDmnWOYD15BvYizv+wwxCOQYsZ14P4W8vAXWfP4GVDCneV/TkGwhCAMbigU0o7P1rm6BIJPkFoEctfXiYKIRwDwDo4/QqQ6Iev0cBHAluGcfHnKICRAaNwkfqU/tG28q5LhyDWEy2yP+2jRIyAVa0YXa/m7YftSjxd8lwATIf3/uG9ExyHmJKUiP5hvWWuSpyh1MYusj/HoUKMO69md3zNf3kVfFDN+fBBzs+2gZGY4EjEuKL8wjxAXIBf2ZXB/mgZ+NwiYBy0rD7n/KdfGRL0q8OnwY1+jsNFerHi9f8eCxyOnUDTKpX9jXgoRDyrqJtfiOfYIvSro4We/XMULlIlAB964PYvv6qr1DnUETzoG3T+e0eLR0teCJak+Y3FAHTY0t/2lv3RMvDQmACjIK/HDbN/xYkaeta/2PYvtv3/Edty5fFON0NZhyu4ttiwS77Z3ZmMOMVb/8AowTE2fMImfIIsCLZEyR5bftqMhj32dhPr3+3iAtYz3RnXx+Y3qqwIsKeaet7UX94hqHrNq4/c92TnZ29y1gC8UMLUt/cLVXjH91jnTY8vs/x1P8OBPb3cnsNPFBDZciTYsn357sP56bmiXAL7dOPm4fyDlfklaAK/9u/y5+ioFruqao3K75b7B7azYASkx7W9/bRpYDuZw3GTher8b8sJHBgP3qle/xoHkacgeiWjI/5EAlWcywFY25P0vfiXj0gYtFHcK7Wf+OmoD84DPqq9x2H/hSzGAm1qJT8Y+eNd3/xQ1nLY+tzz55iWbgx9MavN9heylE/JZ0Vz00dX/TmmbI1ABKO8h/5zTDlW1VdW1WLfw7xfNsINMPpiA+apv3FKikAT9ulFD79sBFgvsKvlNr+c6De678AWq8B9ebdf9ytDYFfxmd3L8ndUfMKIMPf80Py2kRXYFdpdQOf8sl9D1UEkfVx9X/5tIxmwK2AIKv9rTHnZ8QFWvtNxNH/bCAU921g/VvXjXc1yBPjWkr7z+G0jGMTEMGq0X+iiapwPooLxHp/v3zaysADFVlS+V7/YBM9PJc8HrW+1v20k5kD8tUar/TWmX5ggzvql0H/bCMITZW14ie7JvzF+hrboPy/+b/U0SCgrQtMe1N+R8fGJC9o+/lZ3jgHsynqsL9b5Zb/3N19WdeC2/m34ZSNhxH0MgX3+st/qKd0Ece7/xbh/Me5fjPv/OsYRVq+902dxwV4wh1S8UCZPrA73N8z7nWcBDdvdfAuXcAu/0j7FfKXOnOyLi7uclZFrv0bmC6mi7rH+69n/eva/nv0ve/mXvfyLcf9i3L8Y938c4+jr+ST/g3GBlc0ZIC1otuHT8WDIB/PbOlSD4wgLozImpBhwCY1v1jYX20PBVmImPIT/pz29yljW/u3/f2PMvzHm3xjzb4z5F+P+xbh/Me7/OMbtQrHc/mqbTkmPxu4AY7EVlITMJeip78RMYTEX6+ZHuHIWBfjmT2YGfHwLOwXLy1QG1cCp04XJt/F8zPiFubz/zBk+uEAtDZ/45IQmD/zw52U5cSZV7txluvnU3DxNLwiPfLPBtygsHyrwGnwEIGzvX++zc6a2KYB49dNXuzYsW1zwk1IxFG83tvsz8gYwGR3cgpxDn1zHvs7xtqaLDU1jJZlicMkOS7GmfdvmZKLf2BvWS23SNt2ox221umnpUzdbzLYldyr908sCRHgHeyvqE1i4+eTTtJh2ovBpzcd8w0bAXZqAIZssr4i8YRTLP7LiFtsn5I4neCysUxJarNgG5OsVdL2Hr3rC/2vGFtDgZ8+EhbVLS2tcbB/bb2idHG6Ih4YaIVv/amPZtIiS/jTqIh6seF1NbPjUAjj9LFxoLbwHHM4+lvB1iZ2r/xZPuUeLgqLVyO0QdWykpTzmjUmKr1acp3FFspef6ZGoPcyJ/WD0z1lpniuded6XDMHm84hfA3VH6boU8WnmPC4RYBfGFhXvD/p2XvUrln5SgEhVwJd/msfaojRCPgRpNYloBgj1RKzye7ygp94zX4Weag5uwIFL4PiVzxo8sTVoI3y15EOhp9pdOmhHczWg5t7CYinZoBPM+rL85ilygpjW9VhC2xs8/cby1Pqc8DWOjNK4NQK4F9EdW0GgDrrsbXfBUz2ZH+/xsuH3gKLyR//xEgOaX8lzWHFeC/W7UsLa7chWerGP6ShrG5+myKuLQoPOaAWT82nyhId2Rq1jja8woN5EqmZzD401XG9m6chwNtmIpn5jaecJ1ULP9qKSGP1c2EMluEjavBmXtTZYKWYEL/2Iq0SIk1etu4wxDtW7l21DVR9C9OXpClewnjJvnaGLb4jWPNft0txV7vVCLYUBJ667ORylhNUe6g1bOohT14qFdQqs9azgbWaT5k3nPMBDhX25wpI7wx+dwitZytaOUEzZABpwrWfIFKsvAQLE1SZJvH2KnqswN+RN05m+liPSOrDnaVc0nRyOfeHCdsr6ya59W8IyN3CwpiLjlefUYRPnydhAFjH3me7kS+HWjayTA4SBMQRnpvwORtYeZlVt0DXMUxn+rkTPVDEjolfzjhWwLEqjaavR2kX+GedUqeZkQp7ldszw/tw5KsYc4+GAm/t0A1lH50HW0DLTEsA+JNSYzQMWv3IUWoKGtE0DO+SMPvjiNLDCBGG8+A4iWFl4/JtaShp1Swcd1f1874xv4Bi+QPhcqmHRduj6E3SQln98sFx1AI982rMfILWjCmxbav2D2HxRLl4iVqA0LZgsM98TYqkvRDQ1c+YzhdJIFRNUXzguwwolIXlR1qcm0ZgAyA7RcIdlqtLL9z9GOWt6OHwdYfb6KWL9A2vRw3teXNvcL2zVXO7xo3E+d9RhlOVXlDjvD2BHby7nyKgyBMmkxVoQ9y5ZZPEtWav15Z7XwturUxIkANfSLbC+xmmlZYaxEaX/qpUwSE6G1bkBZ/OPsb+T2HMg2lJ90xn1QDClutjP4iHlT9RdJ4WImKvfrhW44AH+m29fLG71tJW3gvDFTCbECGdKQ84/uiGqwzaQ1ZVana6W9RewNym/cQiueYivSDj8VVHhy2FqwKpGZGRp+oQRuNoNY4iqAzFMRDNJ52Ap0jqIcV+qOWtDWISh2OPThPB41MQ6dDRWzE7y8TkBDKJnuhxzf3RPHVYqJey8VrVtI/741mO+qZPYymD82tOxqnra2POxaphH3NQgfokp9H2LX0phqfFwoGRYXyQMt1V4eukjvuZ+YnMgEh0vJMURZdUOwVbs7TjIcrae3daJWVcXsxjdQVw3+RLyMCfMt6MSS0/X2Vfe4zOTLmkCHRD2QjzzN7akJZZzDrPQ1+z8xA9dYTNtkFcXs0nCthCmlwqDezBFshgwFk1XG50vlMUKUmAeeyPiiSB5ph8RmeYcLoV3vdnADtl3WCFbwOfdIee7XbQbgBEpEmAcaNBr/Hrk7cWzapxK2s2QtOsG7tKTQZAD5sDh+HQDP24qsGyWgr274Na9ht45IUHNskFGm3XFIXnfrvG25MyS4ZrxyImCBIyCM/WnEMTvMf7U1vBc5JRqNNvD7YLA2F2WgafG4BWlp3lLUtMJ1PhIg8mpXBZrZMrnYSyyHL58lUYJ/aGfZ1G5qo/JGnbBtVjozM55KjGkDhHBuB4vpo+PJ0i2SAu+mb7wdLusuRn6kE/oQRl+GMn17ZdCW+eMn7mvCPJmg71Lc+wqTwozcsxcQxhCQl7H+lfTD2rcmGFq1gvafDDeDGk2m3MFn2/RXRXAEfAu6wWyINDNE5V1shEK1NnW87RVtwX6O4VQwRUgHweVXwljad4AW4nUkn8fHNY/DcDmBFENk3fVPCTZ3kgSXldUFyt/2ZdtbBmTz97lw02h6rwYATdE8JHUwUC2LZTVwDrZcG1Wjqm7qwG4xmafx0jdN1vu1kKWnLzaYazharbkWawUKQ8oTrGqaefDOzguryb2zGhdBo7KeUlAla/GCm3aogg9oL+UhEWLQAWC75moOVUP8ED+gz5shhkm5UwqSbTrRW3YQ9lkIGQHGt/Reh6tUswCeV74hcv3p4PJKZfSLrYWeXoFSFUeWSTRWs25SFJGIZsose9sAzTWWznpZ2Tk5i38xBq2QQI4AiGFNxSwdmhNqZ81i0dFnhbtt0Ksxtg+jux1LuWwFVvaJ6J16yMX6ttmN9baDg6f3N1PXSfiqHCdCknvIQ8rVkeRcUcYYA6HLPhwbxFrf3SXhNmo8mRewVX5ROy6gFmJ7SSPWwnrUMn9StyZwn6Op7Mmkn6BZCibVV62E51vBegFRVlG3VLbRc8G0N9HNgjU4J0XzWWqBDY8ekTsOll/gmbcb7IeERNaxI286V/qzGBEnFMmhWGIYQcvU3mqTXGKuHkgBkjMbj/nSaUjIadzKe4aDHTGl0f0LaTZRdNUqCvdaNNTdvs17+A9lIpnw0LsKy5EnQ/Psh3+uG17VaX3zOSN1e4nfWJnDDWRlWrLhZgMasv3D1eMtogtlw2WBKZsBI7wBJ0IXaA1Qg/fJzAloSRlNO01TQpdW87mtWiMCNuw/FsdqCBukj4grXBsOaISTfPWvcPxhIX31UN6KAV77xqGcHJbqdUhOLxsCitDJi7YVyaiDAnQywZzYbPeiQDZcYwDUiazqySsFYWHTdMw5kn1WGEe3abKSY/NcOnGYpwu08uygcLyb4oHK1vBWOJixorgvdvpW7lyEDlVFUYzWU+HFniryzZGrK/jy2NNz92ZzSOrRHtgIP6FLW1E3pzhnzinYTDOUTmSdh9LTUpD2uiyqTgsV8q1YgOJSp+i1CkzIfNB04j+8SLNYPY+/QurVDGG1O9fmZFbzbEBeJtAzc8Wcvx5ZuJR3CUEoF/8NdZFfLk2DfFUJ+syrUd6x+MPNZVZHHLdZNmz1gFBu/yyPe6qc2eWAbvWCY/FLap9leBe8eFuHRrq12jJeuK7rhdoYZKDGtnib46wN5PxdNuJ1NT7HpCVwF9nDe94+q+2cG9q3V+XlmQiNLq/V/c2f3o44+eyVsKJCuB9nVGEMtNpktk+b49zX3YtwBoCm3AW8UUaOiNRK27DB7ZpsN2nAhUn9xtArzesuzWntbggvaOzTelU1Rm+U1mliPcL2xvwto/JZulCedL7tehxWovIxciJ9KPY03JWo6mVUFgRoQJSPs+KBfVuux780drN0y+hN4pLbTm2zZLI8wKx4bkvr3lNJQdmZALArXWqPywNEcqLA/o132ncCabL7JH9dhmaJ1fmhNg1Kg4UNjWuVGKoiZugF/H5iUnWUTgcT21ka8OOjviVI0NBHA+XJ1+Ev7yC2Jlm5ypABs/kwUe+yg4dNtGqIp836dsHKypLHFKeC+BocOyjsVrq2PTkFvOxyEXBsuy6wZ1EoaT4/eqTJKFYW06Pmnhnd42VuESsmfEOOLt941UuaWP1gZ/xJOxoWdGBuZ3zoolGZUScIgO/7sZHqdgL05Iu8k+196Il6blyPuPRVPDMMfat7AahTspWzli4MJcQ73kQzba5A1piCQiaDAGq5DW2h9e6XG92V6YYQ9VEvyBxuboWRh9tExPT4/bJZ+q2BJ6nlP1AQCSTStWQMtXWN5nA5Vg+H/l29aYyQe6BQDaT1AfF9jSqCR2rl9lC1jlBluSTeEOCOCJzBSfr/vooE6mcs0MwUAJXUi9hb1/h8VGHUh5MrJ/ge6N5HkOlW+8+wRsgnmi6F+wOBgW0HNDum66yyoOU72la45ZppMRNMmJGomoyN3NE6GzLODZs657xB8F8iGA6QLAIIJjAbuUr6mWryGmSTN7XPTTOZnyNfX1isi6NdHTM5CdV9CM/xXONYxirawYkU6ukijHSxBEHSuI9aQLvEBSYU2zOTRlvYonINFfjNrLCYJ+GLgF+K54z6rjspzN1HhMN3t8/1insVzVlaO6ecSxvoGTqm4WC3O5Gtr9r5wx4VpU/1fmwtprmNQTwAoPQAu1oAJMErJ3Lo/tV6MhRvVwf4/MCn2W2ng1iWgszkGQRM21lljTQFzVu0qyJKlx93QPxeVDDZJHnFT6re5yBCPw049mqXlEOdwEGMG3CEtVBYbh/tTWLrQxl26blurienRMX8vnF25wVUmggDuDCOJOkIQ912Gg5JWs6ug3zd0sBjH8gu9hl4Dn+m5mXwlMOH2WJzx0wni1rGWPnxwztb2KrpA+scLcjeaxnj3KflFynRg1YqQJxzhZvwCdXtZtpSBG8/TrVim7fJu1l2lXiFwPSziJcIMvZjU/SHjWNk/Gm7uKB39x7MWsQDQgjhq1ODiD8QIiMRL9cz0YycpieGts30yTD2wId4QT8machu98hPFjjGmQE3oRf3EHDeaeqYcqrJZodrhgIghQS3evlXCqYISOeCAsG3o9NI7plgw/GyLyQnudD9YXbn7xtPcN5CHTaojsBV+xMHVG93bhxYu8tAT6Ls+kmX6GnkHd6R6s7Vn+yjreR9RSz8mBuW0TZRgm1PJYPTxUHVPKbtDhknE5AnyacbBzNajdeOCVEhSXNde+hohkHwDTGqfowD/7O8bJgJZAe9miuVCiwz6kc+GAf0156P/PABrSZRULsJN/EvtiXPfogbWUCbTlNqYm0MFOrclE6AqGat12qvkF4MiK/ttsax4mEZ8bIoOKlC8F1GqC2au3DNdtZ7A4CKw8K0E3CaXHngUzv3fIemJhUBnFNaiVaqTeeMXs/f1L2HgIzunaxPMBLAKXyvkgeOQQLkcjGbkX8QacuTQcY2uoOsO471I9ev+QrYMorL5XhUlpCQzu5cWeheNdtrQWqDnQLHA1uobjumbO7EvHk4C/kxfHKAJFOrkj850CC3v/kEu4rSlL3B0uoK5sHUqErhwBnnOAdv+doTAEM1v0ow2AgK7ZMBZqjaWMlLx5SFrPe8QwWnkoLtOtw88A7XYJ+eZhsGdPiyW5UDUIU+6Rr0G3Rk+LHloVYWBDkHBgStia0PaEwfE5Wd2YTTl1dmCd0Dp64QuyciYOPXywCYkC3dzw4gtlfKdE+cIOiQHYbiHwf5uLO+xu1UO9r7qemr1S2wvsyXxQow9EtnBvKzYJUT77yXQKbtlQUhK8cE2rGKJPo/kRGPxfcF96jcKcDrvBh/ppGYLIaoWlo9u64why7dHHW1W+sR+uhhY+OLvbEIVvWy9E5rIxzoHtnZdhWRxhT4J3FPbzsvZKLCEwkVHClz7CsKafCFTltJJGRz+hXAChMCaztguKz5vGlDdelGVD4oZgKKBZQLlzWhZJ7kwhbppBbaUItO48z5RaGRHZQY2BPxto9uB7peqQOJyT5LSW+1iRp0VmCU3GbN8btFlunP9wwBZzqsykdBjQIyA5cPTXA1VPYFkWuLm7sZvCQWrYLzJG9mXJrmy+BSJcPsRb2hQWxwbIv4wsBpkWoHVcEJULufOkGvFZuCwigRgnXpYnOoU6vuERaT78Am1NaZsICBt1Y0HrIZKB4P1XAmuLPzEiB+i5giM0c1KrHEgMZGSwwxqW+Xmm74uP9ZkrMLEQvEIZKDyoHKVMs+dmVQNqpkSDikz6uyKymbZQC0hhYDktL0URFiC+nQNMzOHaeb9LM+7FyP/MG4mf2tHmNA9R0tiMN/kq8fHGxmdm+XyYJ2usi3ZjYKlVGCksGHL02NWjDNUG9LT5gVkv45M7Z4wG8MklXZpQBmeceia1N46i9rjbEqFFmLQDqTkGI0dBXb7oes1g4CsPGIeMMDIY6b/0dYN2wO+LsFQdcIA9zH4hXmx6gW5JFMuHrkjbnvk3RPb1oOzcC0qIRaaNpFHnC2t0dweFUEDb/yXhAwV9DwX8C0anKrcPe9ze9EfkjWcsZMe/OrsuEyIgNDETFK+xqGnflFIZJiVmbor+OYFDSEKDYCv3k4R4s4A8md2dCfFWuqTyIC6e9fEm2ro/9uPXcWq9q8FCBf1J6DhRQ/FfWi4vUgzfS91vWpHEsSswJHRe7mfyd6be7511ASJcUGKv+m+WFGWrVYeNdix7xSCFrcWVpvjlkYotvDP2u9YNxh4+evdYh1OUwNULu1icGC6y3ebZjmze2n8vZGJ+Fs1PL6DJKfwdx68ZGcMVbcEx66WngDtOu6I8Y2XyGCuyey7lqryJkI6q4IYXShL0L+Qc0YM09XwD/Eo9vx1fxxtS+TEU1sGAOzZSwlwqQAedU55Av3Geqji0L5FYLbdWOUwFnIsj3q3RyfeJSpCfHe7Eq6jakAFsZEWB7vqStO5QsxHNDgukJwH7fnP1IIqODwuThtVSRUzqJMGCsJ6jw+OGx8TchUnbJQHPGDs/WheTckLvCQcUMZlDuHlx5Z21XLuVBzLvPksv4z10Xb3Va8uECREYrP+9lDBHp2qwoXn7mF7M9v/SXd+5CSGhCXr4BziazwBLgFEFnEgp48tB9Vjaa19Qd4CTNkiortPAJ6C4yBdFC1GRdJRaO8pXy7/zPjq6E0nqbD2J6/xq50Tr6LeTqzASRqeGiy/VYQu19v/mZREne1e+IB6Wgl1n+UpVPoCqDF+UDVSkcEXF9wITMSKbbxDFFWwL7oD4cUMtxfPvAbCkKL8iKL37OMuD54lefabezBUQS0QILBgMVqxXVE8q3A/zynje+KwLx8BSjloxwQFgir2no2KvqpC1RzyrPPQSes51C717gUlOoD7edRXpEKLi7zrpxdrSL6e364B08VXPH/OFIZbYYWAOzUYsqfTJGMKkIFOqGbyqWqYNcc49XKOauSlgf+X/hF47Y21kaypbKr6GV93l+gzBwEoBkfWBJcDK41vZKM6S9gzh17BmgViCQklJKG9dzT6X4XV2ApSacTtiee9OHMXlB3qVzNuydHp+EFearRQ+MlYk5xHu91LIGbBgK99VMWIj9puMcuQ2AYgLwSl0N9b04iW4Af86hP2/5cNWQK+uwjTjrWne/qjNQyQnaF8L7a07XIq7uc2gGotP9ej5HU46NSFsfDOgdTAd4T/lGMTtPwHQ3pzI44k2998XjMLHmd38VMNH0IrzbG6juQqjuEgprbLP29IoV+Gys6nv+unl3jYxHA7b9gsg7MBaPuFYvYFq5y9gKTD2tl4saDXZbOIA/3XO4UjXHXhUD7H02fJYzBOP9hng5ozfGhP2s1M5uKwfMVkxDrMHQdr1gii7A1BuutI7vkiIcU+0AHFS7mD7M8VhHONi9xbgHY1wvcu5TUVFgHlBQpRjUsm7Gd6YtXtPS0DR5uNfynxh316WoIqMU3aaUcXljRjRDiXREEfjcAYoGLRKhxy0RpgjjWmJLk2sgwzz26WID/tUcscpyiYGm6T4s8pA26azDVKsAvtBixD5HPX4NLeWgt0K4v/gSVkSVsCJKhIk7YBduLEVYd0nt4B0eUQSzbTegY7VBcZGjrq8itgUuKiIPA5zlzvDt/r7+m1ATNu37zzKVFxlFuTIcG1x0S4gfnS8mJXOzxLGGIHNjpCfPKbCySCWtzJcAIjOcrBJRY3kXfcifTRl/nAjNI4DnPL83X1k6uIp3zGuYx/P2WJZm+fRbmHSMS4mj9E5C4dQFu0sgpnKU52qXwgdtgcUBrxWzRj6rVFgDJOxmr91CfzdFGj3zYX8MVc21Nx902a4DxKQkibafb3YXTamt16i7gSEVr4WKwDyQkjjHPdwvOj4h1qc+jQQE8lC0cRxmgI4IwJXKMd/O0VcpNvsAmbtBF+/CHqhyM+ASCqM9+nj3JLG2y82FrJVY5q9842debcrTtIU2/7FTrKFPAGt7wMJsuVsCH/Wx9jPN87yQk5MPwLq8sXE9nN1he1DA8Dp2BWxBunKPqXDgvJtr5taKiUF/4NHnLAs6G9JTI34LE5skp3oIv3CYBjjsb9RQAg4YqID3WQnsdBvZ1UwFhKOmkLulnWxS+ut5ZMnZ9hkVPskrAO6H81SjLkQXJrpjWgvegIumBua9+BPJz9xMvmecO1HcYuXAJSKmTS87HsZZ7mUC95CQITZCbTvxodMh+uFYMFYq62Vfj9UG8Wxhv/T8bc+rlyDpxcuKV0WXpnvxusKSGICGNpyjtCNvXIenc+VlyAoutj0S7/vy7IFivlqMfXwyqyE75eDql5TecobQSvFdeTAvueRvAPpqywpiNpODhzqHoQyoCPrQ++xXkXxA8243dP66KE/mJokRhjws+ZObLNWoeXqoJ3/Z5DVxXVWz74wH2dSIwWoLKsYGDfnkiodU0N6ej6YC4AcnP+0kAOtmmwU17TyNB4P5cB7YPMIdHoBV3nq/CA2UwYqw+eyQsUsRvxDpYAyZC5W1f1Qwx2TD2VVldoL9MyHIsR3CmhTWyZpG+i04Zt2Cuj1QO7pRB6yp4Hr2mSmfkp7nQ9OvBXnJDTIJvCdMSwiU+Zmv0r8Yv+e7OeeUXl0Cd72tVq9HgjU91EiCFRXKsMFsvV6jL1jPI/XXJHgWOQNQ/3nHFBqwtvNLm6Cswdu3K0Bsi27eGUo+HXTa8hQWlcn7PQZgfrwMDMqnbsXs81SszHrlyCe3QEC+nX5KY1y4Z0cqRh0pmW/3sFlUU7j1npub0gD+wEhbyfTvAyIVnHfvL18KhisLGG0KfLli2XVQUl5M+Vi0gK6CSMSdCT6pdSaWJhuwkwytMZNukrG/b4poGIgH40cEUB0zhbxQn1zZeMDel5LeQuvVpg5q99FNNeC8Jzvmvendjq69APCpa8hmXl/59lSPJEYEWp8Zg77nc64v57nFYcJ8IunQagaasp+P6wnztdUsNZyYJlcr2yGiAyF99Q8f+P1QBh1CLoJYoz7kEm472CLQEf+1VTsv0FkGbAWGwC4+A9Hh4cyglGfq1VUNKN7ZRkpfZG/a4yJXMMs4UUSyJNiaXmEuSSb0XjtSIEUVmrqd/kyHJuIerwGKEIjKq8qCvmapHjB0lX3O/uFQkPUAVQI94sp+wkLIUoLSHq4VAZYcffYuue437LprrBi9quuMTHXgA6XROo9nUz/xxQuA5wV/t1EIhTaMj7aELJMeQ+tnmCfkTgA0AZ4bMm+DMeOA6AQ6U8tDpKgefMedAkZc+QpA72D6X9n5FiiH/lJg7h3YLFPlaMPyOFTYCwro68jtTolsh5zDKo045QNZuPiepXpc+UTY8ybIwWIwekaHnU5f3xaO88VnLnxfVLXJqNzKvrlWWgOBFTXmdmqCaHkVBrP4V1VJaDoEpK4IbyxS/6iTa1ixdKrSdWpqHNKn+yTtimHnBFaYmtwPu8TG6t3IxPkVVXK1hkjoWA+6glHFlbTNL9+JFkkGQKMwr9DBGpQ755H2xFzvzGSLpj6H1ABlgsV8KpP3cIc9SdbvyhwCmSWFa6bz/EZcFw5hR88AOhZurlYFsMjHcd9QY+XAMJxO2NlxMVCPudoXv6pjm1vQpwArKGaxLTOvp2SdXO7FYnlbBKyP02AeRJNh2GIdreCuaiFHa3t55oqNLzt9pA/8gFug6EALoAbVowusl4T7mygzHT1GVeCIMgvay8gUW3XQ+Rk4MFeFQSXYE4kWAEuwyqeqVstzkgsArnEEtAckhNuBaZyI7pAZ3QLO3P/KtDAHUIJXSnq5PDeXt/ou63T06js6aNY9Rbmv+hF2AYI1xnfjUEGkC3YQD9v50L07zA8Z6KMISfQFFZHA3Q9oRvjHRxyYIfW7yY7PnJNKcGaSsvknUw11YTRMnUC0Jc9NUMXEWCTxmtMOfHuZtzG5IC1gNjbUfvRZj5XHfnSR7mJL79wxyeBgVHw8nLoCRiTzFjwTvQgR5HkB4E8Jjxm94+pSwnlF0jIweeRBDpnvGhA1Z+axhHSH2UWvf8VXoFnUgIf9VWOOxEuwBtaAuTDDE3LtyZfS1nRHNIfxu803nMy2sD8fK0pBDmYCvTCR+djv7+3pv8SvPeFAfxbLdy2qJTDZAgS8j+St9Mlfo757jzIlOyoYK9eUp7IvzBy/k4YFjj9zGE1IFSORdhM7iDFSNPYXEmbamy6xkesIaelqHGZIMmsHcz8X30ah43c2yqzI5uKpY92AhV//mVkkoPo8Qp+QP/WEEH1gCcdpfz7i+FdDRjFu0gLwaDKApSObbioa4IUuocnaFL0YcjeAGtLkT+9VOHd97xB/AlO8HYiFTakDE8W8aj9eDue4WEADeV5sWUFfcmCnkEmhzX2DXfVJwO0qJQGwtNZ260wXM2T2lb9wnBWvLsGxoqMHC9T+dXcG991AYLU2BYHiyZa5qBpaPCowCca9ku8oFKqEkJl3yG8YsWGfW8RcynfvjNz3DNQkjDdh8DoOHQuAjIF5vo3tmb3h3kmDG5hEeL0sdRm7LW6dvCrcQ1YVN5jbkPapEPlKiYSXdXnb12FCQ8gYqoQvZw5Y5pXXuOAD6J9JldR96TDvAbngcSHK69gDDwg/s774qtSZokIZyNJNfszYC9YMG8U47EVCilkA/Q8Yi/onTs0wFq4TSzYnzFdLVnXHOtBoB+59NNhrOCANbYRs2jRmF7YVjJ0ANuFMyT31a17WcZyk+og8euCPZwn0lKQt7c0eFriCYv2iMmkYwZUtM0kEmf8omvkua0Hofyr0yxwR1eGd9eJxJ5yqIYdleV0iD4yExsuGcEHNRYW1hypUnZNJGCtScaf4tT5m/eztqNfXZtSjsziPwLh2KsIC1ix+s+Z2mfgjUHGqkfGEj54Ylwe6GJYBwFWLpcrMdi/vTxVeYFTNVsFcRQTuEMI7SJ87vAz+eHwyz185CgDwLLI/nYtWsOmzp6Baovf1LF5YM78llrg+4dP7GfVOOBOKaztgKuo2cYBL118rWUD4yJoUCdNThIWR8F0Q4gwogVXOfZ6GxwV5AlSCGB5d7PASktFme7xtS8w09737MQeIpFtxeq+Jxct6Pst1Gp+bzTNr0vfXmmNrT1ugUrwVkwErZ1CU4MtXNYHoyHKP5NzHizirCjYHgIsJbMoWtlQt1zB4amUAolsizS/Saeanx8/b4s3+DRoAf9mAQl2b7ZNHrT0d4z/p3i+M9D/5r0cRU37JRCprALf3kFQbAJ6e783g949dy5D5AbpvS9gCQSuJPnSVmPkbT/TAerKG3l5OjwQdSxrYOz8X684k+Grw2ade3HJ04/PY403dCKYzTRgdM3p2fSerxAjuzWkN3BVlQRwgvua28pYCvcnt9CPePoUbBtuza2xQXqtcPYRvDkAIOUK7XM1PjlaDMyBFPmvkZ97SFnkQOV0ZEJL6C5cVDDBNWYtZVYihMy33GNah0LTWAXXk5yXgESAQ7IcVnVWe+Iv9iiq5IxLfrXfQczWvttl0Yg0SlY8NjDwxn40c9QtUUzddQpurRC06TbvgfqtfPKXhy6dYYGEKl8VPub0/9hylolxwxsbjKMgiTKnmyPB9zPh0ayZAntyHv++lp8Ed0rKilZv9Ccaar5Z7gn1VmKLsVPT1G4pMNgEUn9sMBNYWKiXPYwoMOE+DbWkt4VP4/AcZwRJgQNJgBbTM8fezr1Hcp2lvxht5F5Y4VeFKpbtOhbGNhwEcxSNoHFi2HNSfWS+OWdKN+FTizFL7mQuSPlVD/IyApsuPdzOLzPMBsTyZer3mtIh/Y+JLWUbat8UMzgZUNeJE+ZkkF8ZV6a+IY1jEQQGhr0RSBWfxodIji+m2lo54eg2Qen7oB2FEKm3qkdzSaN3zDmdyLbZSZPdJdJcMUSF56216RyQ4yzjYn/UK20qXGiBpusEV/SoOXCJfPlE/sKj9Uzhn7uocSkx40PcaRfH3ETWvzNnTl6qW1tzOaDyU/Jv2yAhYI07vhoypfQz3fy4BLxFKco0BoHXxa1FTJkG6TlaV73zjfNcQF8hdpSWq82Suj0dW4G8yyyUKMg5YlwqUQI0XYphIn4URcw0ks4wfHgYCLnZDWAcjv6XVCAjT++xZ7Qwz2jD590ZEkLmClyi4x7AvFRzotZuFTsyZ9bwDHlrAmFVsQTi/UeczMyx5bcX5hBiUbzgaVlkRmG3KsjFrlCrD3I3S5473Pr7yJvzwDMh3g+sL91bgegDVB0iuGDftJkrep1qNx8soZWAAxuvQkWjpuz5zOE+9JUq2Lc/TdGvF9Avha9vCvaK5envVig0pEEfyCJDOMAM1Rbp30YArITRtdBd3NVLirnzXo2M8MByaEvrTpXI1A7Ec6IqGwuteHcJaWKozf4Wf3Jpe57EkhczXzoIiD/DTo4Lh6YEI8tmT02fw6sHWzyqqt2QmjI585xHMTH7WGc7axFO56NkpZCe0B/M9MO8HWmhPD4O+jJ6qEDAA22+zgg5+v8yN22DI9V59FZgp7FbmYa3cKKzgaaJy4MaevJo555qCIeUzOLUWg+iQG+/XaH+tUgDxo9k/1QcPYC2mDaLPQDoIiJoe6JtkgkSuFLeyRGrcFpuDgiGWDSVr/8SoXVWVtjCkU8AhQgOYsMWwukBFcYmQrPRAP6gpthzq63u9KS5JAK/O2T1W8vU6a84o4fzCxVMim1Uc7dDeZ1fNQTbb0hBW3T1n82D5rL8SyrBUt7A9AEYVDaN1MJSsdAxsOa0RpmKf9Aj8h9FhG3U+2xeRlebrgLxnt1HOq5o6gdj8WQt6gvGcUbf3E6RsBbi+MWuBAJ/hHT5zOu5xVUUtEsTOFIUIbqOszg7TVNf+/fjeL1E3Dne+CsAPDx0m0LlAoJXAaUCvftUdbki2+TyHvednODF0ENfiozI/6++kXb0dE8UwzZDpVbJ10qLNzoIcb8FR4RpWhyM7mnN5qazoe+uhXvfhM9n+nH1UR3Izy9yFaBDtsx+tv1xNuJyEP8rKA9zxU7wIaNTM6fW03jN058Md7lYJzkkw0S7fzn1+2tVz4Yfu0yd3RGrr+dUv6aTfSSUOjkcZfFVgwrWppj6KcLnwpFLRpLHE4mn1lUy/1g3bvMjOqF5vFlLp24V6R5PuEtUyDlrCsc4hJCwmIuUL4Wd3CemjkYK7/Z6km3YS42h8Vk5diSXiQyknbRLEms9a0KxVfXEqX5Eait0BQQnOX3/zeanS5tuHz/Pnp9bjDZSSkTNqJPSwZGWCREw8o7xxh8s8m39WmVJc/H7mRm6DnqRfEVsyr/vlUpM5ENzaX7vdWhzgfTN4C+jd+La42QBnlUU4q1zLZ1A3u2GISpxLax75R9GkN8ZGzuftey32mN1DSZbgbquKiXAVYMwvqIQ1UvcnVOJpd/Qoib1dDtm6UZZhkjfGCkra+QCkzd7nF/WE09HXdKtgVgDGp9Yw8k77msueic6HFYoMH0jfC7F0AWfbx51SkPCagngL4gEsdFmcM+sWR73vsqZFfgEV3bqG1MUE1OQbBdiNs+LlxX24LcwCmeWmqk1roX4zODreXuC87RXEwCjqGqfxkO5T4+Ri/l0Awb8owrhw7cvWXe4QlelP6vtCh+Q2JXIO2P4E2f7yHVeCB6UZrBmFyn32TiZgmwRiGnmOX6tDdzgpIQyxBFh/e3n2N0XuFjEH8cZST/qW628WSL3IedD9k70H2xsziahWnFc9fa3hdrGw48nJxk8D8AXqWFvWuV00gIdMXJ69CcMxX7/DEa4Em4w58y33yeApTC9f5fnPujZjgBWULwPrl6w6BVtOSoV8pmwDNT1kywKpq4oP8CHP/TjSia/1yVVdOJUh3eK2dI9O9WqoN9LUAGLItlAgAx3jkFD5fLYOrWPH2hUvftptS4PVGeyTp6P38SStW7y+fOuBYHCxgmbERoqJfaVDdNXD0Lf9msvlT1msGMBMxXMHknAUhSTurehrpb9Tc4HX7ZCsCYdhn35l2MS6AHZNKyzn5GeVNeRjFFlxvnpCBxX6CGL1RQlKDF5/r64wIKVCD/danjank+dFX9gcN7/nojw0V8WoXmqzeTD+QF9hFh2ybrpwN8Lq5vJ7NX/E8592KMduiiv12MJBlKV5yM2pLGao5HAa1S8RzDYmIgFd+5TENxSg3BoaJbC5/hOVAef1a3mOV+TZqSCOGNudAyFwS13RdFfy4iOgfw1YA89u8jxr2StZ6gHWeWsf9FDu3O1RTVKrpf4FiBlFvk/0uNhftvlhL2ngGQhZ2gAtpZT3tuKl+5SNEJ3TVgdgMjJcowcU0lICPYrjKN4D83+5S+PeamM7rQVmmiGTTvL86esiT98KOKledDV9U/jb04jK2Qq/1kJXOXGthlDDQPS+K7xsW8b7IsGdjfmdSz5r1RH15Y8PtjK4bhQ983bu3j6PtGDc42/sdK7CJX/Juen6ukZCis4DN5Q2L2B3OeWc9N2CqARE5WnOTlXBMHAdu9c7T+rqZX3DgC5t5RGtxokp8wYVP4TckIoVlDMiGAUwz9e7qyq1vLDYGa10aOoTofbmuE9Ug2sRZ9UCo9oKRN1YuOJm7RvOu6bbkr/PVMXZozJ3ZsZX7vnnqSuP7oW9j/jtmbwyNKSGMzs2EejimS+XchGFj4a782KbweRTALPSn1jYgBDd3u6JySUoL18/0yXua4CZiae9RTmeBE9WxciX/mdh/CxJrGwtKrEjvAVnQG5zWtSVD/jftOQ5+oVq99x5slG+v/zMIS1Y3xQMzHhSiARG4DsP9NceFTB9DXieLh1w/2aarWuYXPP+1FeYn8V63j6oEcy0vOBcOcnAGkZmQpQeLmYXuOi+c4xNqt9r5zHpBVq5xvdZ4SHS7M0rTY+ZU9O3/9/dt03nmMYzG93v2Y986+Vnx97FWbzcRv57maDtgAPhUur+KO5CuoXTEWGMEE4oEybMhQ6KNe8dgOMe6Ckmz1PmS22ZLHCTWz2NMDPtw2QSGtpyMQ9w7Tu36d95r8BTp8yU2efKfeUWpfwz+1Ns0AbXwLazGSLUfekgLQsdIIXfvMjEmUF+UoxwsoWDV7Q1U+TLhm8UUjfm4znzPUwg2iP1iSYjThF7yDSQ5EyhmoQtA3OcPep+yWw5EOeg92+Nl/CYJlx8dMXEk0VSd+5fxSN1QIyhd/h3kbjvxfwAuaU+AVIOqwKc66ZLszQlNj6PihtgjwYDT28FzTsLrMpiLmLw1THCzqn3gyiCl7SgNlzD0JPFPX87zoYi7r7Fw07u4/de8P3Md+YB12FWVql9alerhRKqJILp0BL2WdGS/ow+2V4KGyQ+L1etF4Uz+mt3kttuTHvb0GrrRRdU7K8Pr4ft64eb/0pmr7aZ49hfZn2w2LVAntUVezvHO/lr5xVrNyMsQ3nOloMFvhNQqUuTZxdRntgXzGKog9P2F7ttyR7NA0DJyGJ7ngW2/9mbA54EtRAcKKUemoGUQbwvNIrHDKBjGj5jtm7ubytgOsD7yg8HoLuc9wCZhf6QG54RMekmXDqVbioE+7MPRCTSYtew2D1Ik4xOOjTz/aBNjy3YQRyNIN9abvvy2WbhjU1sW81vd7dZVFKk3EUYi7fc+/e7NvOC8OKcf5nmilwsWNdNxh64nmYu6xlCTnRB3Rl7r9xnPrOLIgLRL3usFJe2L2lMGXBsLNFr+2cvGKN8pYoDNDUNowFaZooOddjFup7HcD+6D0hi/d/mZSlhkOZbNssJjF2Gc41csXABwkD/N5z8ax/96CkKpgwT9/AvWnGzdwAd4XtIVH8SLFhZmo7HOjKibtIyyrysWO+hCvXIZZZiail/riaLm5zE/FTMR0YJpDqLuRFGj6A12Ps0rWzHYvT5gFxaLhB6lBKZ3twPb90XuCZDFuLRlf+73w0cCwEzY2yQjXVFajU/75NGZ74cfVS2xFBJy5/4dtgM1blL9QheQvOgaRUy3VsMK4s+kUScy31pDjre7Ch/00JnHzXaVpN+TpFYL1J6l1FtFhJkFQ1wtYyXffxnF32jElRTi2bqZHTNv8CVooUceCeHFAwXcwrcAanBZ0kvkMUD423XC0wLXc3ZtpGnnNi8vcTv29f6BVXi5f2K9k3q8cqLQP1nUgAj9MuldvL+WiYRZLbznRQCYW+SqwNoJbichqviP4o14Z9OdD0x5m56TIwdCVzaEV60O+SSMKHXm5juqCfZlGxw7uirrxQM9gi/D8lff9+AndNPUc34IFCgC+Y5gehF01XklAUBd3ZhACGvT5y97a+XdA00Mi+Z0dg0HpHLPzuz1KJty10JdYGL2tqHk92sRu5xXs2NUMjxqWNVIzmNnuWX3Qtq3BA56ySELKvzYroRubIMPdH9tXuL7ij2nAv9URjYmw5pl9tOM7w9YKUSfFEGMnGexB+EnTXdaohMQQBy+P4OqIRB8O/D8+HkQ9eQA57BP4wnkdTnbzNQlhLK1i6rQ9SYRaKNstok536nZOIMWuqu/bUjzMTl6s19x0M09WPK+XZWpJ4Zov6WrP7KJ/iNyhHniURUnZDEVTr6J3SEYKMOpiaiv3aioXmee4xNnRUBDFl4C8tHCxImFKWtGVqCpa62MlajpbT3VjgU1Vj5a6A87lJGKpHwmRh7CN07721nqRhW89o/fYWWY2RRQBenxCfXZuejiTyp1laBz0rp7MIFNHVgYw4J5JWBXf9kkT6ZkliVN5JArctFBjRLIiwd0U1Y7VI9HT+EE/Fl6tOzUSZSd8UlZHNdhLQpNL8hf9uBx3DuAU3DrOPLMromQLnXs/zMMHF0eZ1vt8gQ8cLmmHl1XQZThUV7GNtf+xvlO1/jLXDL4gRdDJ3m8T5yuzppYqsoNXPVSApgtqr1ai5kVaD7Imi0b0ppx8R+xHP+3z2ZEOjhsPrmcsv6Lg/sgzosQcq2pf1LPYaNESkSYKwd4MePfMvIvEmi0VChldyRwT7UPzs52cYmoJsgCdKDTnFHCSDzjdF+PkEQ2TBZf+VtP+PdSK+nUQicIIamNn6vaYS1E7XJvfGlRIpwJ31LyeYog3UV8yFADlVdGtLVN3adltK8OQfA+q4ywrfu/fcv7qjWFyKvhNm6NzwRrzDBOAPlSraGakwcYbX1qZqF31ILaJ3WvdCo+sv2xJo7fEFSAFMJOEA8JGuhBoJ5Lq6ywFkDIxa98cgiRQwn5YlhwsEQShs/Zbsz2pLq/9pBSmCbEs6Dbs2MjSRNfv4Yx1rDqbUGY5sn/Ks46TAHwUefQEo8dWXpszgbUik6OhF2cM1T3zga+fZ0g/q/2HuvZlex5n3s0/wuXUUUcEkUIBCgQLr5FzmIIJHFp/dq9pl555zX9pVddpXn1IS9dSS0Qq/u5+nV4WKIFHCzOE25/M00VXTSwbv3uJl17l3CjLRVblBUsNykVTQ/txcCkSx7Be/bNrDABQY6x8D6ZV8DQ/yV2wrxhsuqq8CI7c9hzhaIOoqsPZuZgDVAiJbwrbJcIltTDYZw1607ORe1b2DwVXPRk/culvRuePDR7S+pgFM3jkx/OvwA/CJDUJ3giuqIAXi0vXykc36CzD0jvH8nzPGuq9JymfSeMLSPai2s2l8+jpUfc/X17T22KTlRfZhn8krGkpAuhoXsq8b0oJNt/iiaKe7htQ4Kxbq/yTWPdovG8L92etR8nkIT+yavV/OgjYpEFI4JlParHfX0RFokz9SZYm/eDzgNWjrQ0AcutrRwwaA1H49sXPEv7dcANeZrDEvl8fpIRCI+MsFxlT2fXpmuOGJRhy3Exo8NWr/TheJu1KCN0cd5ADKzmlCujalgfanFMqVkDOc/Ne4QtquMqX6fsu1lJtT0yLlyGi7VywcsjJvYsxQYcfIEXG32s6WNU/YOr4+Ow1yE//L7Ev6nIpyNtsN045VOiZX2ryo7BhndLMHbib6YCt6davGVwQ6Cgiy/4JLX05M61fiXTmP8wmt/Vww7SxT/mYVBaCGJyVizoSyGew1Kuy6BIxU30HK+zDsZU6cacED6rAVUywLiFwjKQhjXvvXXccSC/8xWoOpR+M6W/LKLmrZdF3fNWOvXd8UE4Mm4qXnYCbFS/oqA8IYDiHuPjXGnp32hAMNlj/ugJZTpcFv0lP9TJzBEjIGMz7FEaYRvqK/odJmYQA9aTuJJnE3Ldw3HFmsn+zkaif2d1D7sr8ldSBmm/buq3fRSlF4e6Z6dyu9inRqOAN+83rEeu5wp2Tli5x2mytfLWbvFkFgevq9/SYttQISosSzLHLhNxd5N0w2Yx809ykDFkpI87wLDVgy41BxZHlOafKbUw+fR5oXq0zTv7NL+o6LhZb2oiH5+ZnfdMl/qee/rRs/vsuWyOHfkw1FGhDUiAY+1aySGf1V3W9FQ8QKxSCuyDccI45Exnz7cYdXTE464hKDoJTAxdlRlTmE3qX0svaglf5/tqkwDSclYCzSbWZSnxi8rkZAFZqkIuPxFoCT2nnDke4VY2v/s83ILnK86pHZIpJ0oeWf/SQqbwc+dgrDzf/RoABGKY3JhlhztusoxA7uAmeZwXp8bjgbvrt8Dl/VNVkrWfBTp9G97eWSkuEX+vYR5M+zk00KsSfE9eVPlsdMJn+ta3o780zI0H0fyEM2ktL/7dEmF41wMRwgNsmTT/FnitswZZDG9JNZIipXVg25x7tlkJeHIX+sX5+cpqNC/WBfIs9qPsrxkiCwLU3VyupDzcW/3qDWgpgiDGIKptd+PSHpfl23SH4RnQ7mt977KkfrdpwIRph9NPs7hP2oDZpcBfMMvBXfZI1rTs4OfE1FjAcktBORvEhTEe3rczmVLNH5mjdRsO3iFjOU3vh2PqyCzEtFLVvrPWXcFmrV92BmXzWU2f/TNaN+WNS/IIN9jk9fDXbQgDIhqqJ3pXZIm+SszMpNokWICnr8HsDPBsGj2lrWYBsRLve4D0jeDw2UDFucgYOb1tP6zVqSk7MXpXCV0f7p+aOuR5b6dkyc4BzMEmjyLy6BXLZ+H3Pn+Ij8zYdeJ63FZ47ps6nvxqpBwIwgZmHJc5/X1NqxHCnX+lq6WxemzjWXn/MMu26KceOprcPjXk7lY2jtB+7u2IaW9lKA528qXnT+/NI1weZ7sHPYm9GR73aK8leiBy05QIZLNzv7AkDR3F9wM72BDjDFYBDj5KvDU/vUfzSmsfKlqUkG1UVxXb3vgOLI9OWTkMu97A5eiN58i7z6jPKOprubyu1n6iKfjpPQJf2PIuyDEWp5Oa5mP2XBNZi+OHp9a+jaAyvKVLhSi7Qjst/qT12KSL9LILTn1jW3lx679SMyumHRFTqVie/vGDpeP1pepf3kItW5fOb/qLGAT0gJb9qoMVnDz5rQkb4GDuwLtw/i7mpgWnEs/2PP3m5t+q5ZpBL7Hb7F9xJq6OUkQp28GHDyOYzx7F20wE5k2e8la/gyIZnpvY4h8bdCZZ9/tNPj8UZXsjMsjSz4aNHNsgy+3m4btMXXT139UDuUjgWBNLyDPNu0SX+9a167/uZ4+Z8NpXmeWT59aUHD8mYxrYrV8xlyc9eebVYoYWK/C4tC//FbF1Kj81Xr/FOLjPnizjWFjXF9vSflaTz916ili7nVsaqEbpouQL8U6Gocfpf2FIYSZ1f22aD23xhIXtvlTIn6NDGeGKdQ/66h28rmeX/0R2EXBR58OBx4oiyC7nVpY4Vwa1ul+ewgD/bYVKOu4lHW562HuCALzPr/kgaw5TjHHa4wo733nWPHMBXNdBhPjh/kQ/lY5FW47P5ME0mDRIcSWpa3MEo/lR/DjkL4qbUKfaxd9x/e7qCTBQLoMRkc4XNIZ0EVxSH0LXzu42xbuJy6H0U9f3f9eMZYljqjSLMrtBkhLt9DB4pMMfPg9k5tZvOSuwE4pzeVwdjrOt8/m+/n2IAq3lDU9Fnwq+mbegFsGeY/jM0bI6Sgnb6+gWyZ8+Aa4Oen1RmwIR5VpkiZDPLln+TwDmrVCKRvJD37oJPy0PCby6dz36Xc5tcNzYLsWkeBO2niklkXPLSW+BGKM6OHMik565JJQn8wQYvkavpcFPfWh41rloQXgfOLEj1n9ZR727+vLB/w8iUi+fzldCTLKCXMcylj5KGPq0qFgIMkJZv5afPU30SqaHYfp7cxSY258WZb5iV/jrEzl9ME1yIyTe+v6zxrIcI/1FILrj7C9Z+JeKWMVBWRMx0pnG2jii0GnVcEVVWZ6zooM2UR2pxRo3Sn26TCj/RG/lHT67ijst1qzhHMZVm+9fd+9exbZxy5WnNLlxja3lpd/Wels3r6f/ewpriIYsubn54vlMkPBZWqKxLnpDUhsO+pI/oxvCJHWaxbcjMjQCqhh+6MaNuSO4K5AZN2bOxFMmOWDeTEwb4122p6JnqENxWb82s0ul1ozjaA8U8GyrH3eniy/vj57JVTj/XzDUvhWG1QXCTRooJaFZhjuaDUNA0FKppQi4O8QVfWfis7rNAqDSb2sIWW3H+UAVkxq2yvRLZG1FFtT67wHcXGX9M0R9RNpPpK/71uevVlRJBeHrj5kdqIWxDeCscCXm0c0nXI65Qhhestsmfgv3znQFh/P3VsCYBaiXE+fnf29FrBaREU8S0S27aY21WDVTtEDqU9kI+LbXYh3yTOgCqyFM9ySCYaiYPaA46QviSczpucXjs7lMxBd7A0m+kPZPgdfxzHzNDaZ5v3D3wF+cnEWroEywpUNF6FngupjH5cdESG6VvzYbvemHXepvAAUQAhuOzXTFi23p2hYRMcgEP2pRHIORy8dnDJhc7Mqrrj8AIX4vGzdmWw6zhv3qfqclRx7k0sSqjb3kKSPJf82lqMOWY+t3vZUb8QVW5sMF1M628saaR8s+5YTgITeBY0S24AKFc48rVuONI4xvaqX/eR6C3uh8YhucJ8IUae2bBmQbiO2bO52gN7HNUE7ZS45p+Lzef6HTeE1VRCP0NBoC64VKXmCodNzZxNwcdvTzBsdJD89nx6utSIee/R6IMK3fw7RauSRfWPLp2DEcYCs9sQFJ6DybJFtov7abmFOkT7ZRcQp1G/WAMOvSuZxRWKSLXi8JJ9n2mRnpYdsuH92XdUZnh3p0L1h+YP48OI5g3irPI4N3oPEemFeKgbGertlXfpEz2ylpjeTYOO+X0wksmF4kNwn76n7Y+ueTEnKSqEMcmxUgpCS4xbmR0v1K1onwNgGnAQ/b46F8inqbCBTXHqWhI4VZzvrwnZN/EdfZVkoFvDycoUlVTGOc15Zx3LNI0Ce+rJq+FS2RSeszsAX27Am4Eeh67IrmWifh4K0efMh8yc1kGCgSK+EA7PdBEbW3FienCmsvujJNOv77+lxXANneco+/A4H+FXTXMcl3BZ+Z6Il5IHulNC+WGfWSf4hXwerk+Miw7YbgVmmNM3ErwoSTfLApji9B8s3XXG6ttPao+4b7vEeCVFFme9OnLEz+x5fEQP0Uyi8K7Amw5zBDFYtHHKKeWpeLju06TrrHM1uvQZEoIWaF/zRr5bP+SGViPkdGk8jMt8PdNKeWrRAXq5uectRd3q5RDt3pbbBwVfY8XomEZf3I2Ar8VxdiSZmTBu8pvfmKKJMrnTAPk/ykJbV0ITu+UEH4NSppD96EAiiuEneZ68I6fbmPDpilFYeX25QMc07cbMpg+WXmtVVTiWOe9qyoPflchpF+BOuyOcHXKgoh1YEL4j8WSfITbqrGbGDR/rzzLf/qkyuBAoeeGTD3I/RFvTIJkE1mS5D4l82s8/NeP5DH/6K1k1XjwCvw15/lg+5bZwo/VlFXRRWlVKWYWE22NP5aZwMELZfIPgzTfTkv0DOKy256gb/XDvTJonPh2auQX/61I4okPwlt0dyuMwmOfyqznSCBBw+JhowJjmb84HrfSnuTjrou9Gf+9O1bhdaDDTtf0jpf0hkRBAawZZsmNBBRa8QBP7z0jsasm76x0uk/D+k2G7nrG+zCUKhsL8+cMJ+PvL9+Z2if/2+VulU/rxG//VamVVF+eux5F8fjMafF4q/nw3hVj/fiH5oNzFrmr8GcPxMYFX68xk2exnu64W7S/2/dvZ/48X/Vf+v/438Na2ombOft/28ME7f5tcLYxm94ceqjQr0fwGWoEqixojirLH7sZqqvkN/H/fT1LfoDQ38hRAlr2Lo5y4V+6YfjkeR+fHnH8/gm6qAz079G716fAM/vrME5o399Qr6OY2m6H9I/udXBJGghpVYuYJ1W7HLuQDExl/vz1J+wu4RT/SfyyTyULdN7OYR6PWxq/71fsM0fhip5OTAC816V5od/WDIK3ovhPnqnwTaI/Bp+HaVEvNk3LTa6xLf6TI8c1V4p6nY53/+SBtldeWUnPEmPctFdsbHuDNPmYRVgXdbgvZ5gt9jz8WCO1tpanFC71nT88hpTXl9ioIUk3qjSc/5KlKrVlMXTeQLW9VfYf2+3+Tg72cm7a2173qfqrfVqtglJVPS6JLdaLlv+GU3qMNv7PzX2LWv4aPPV/ieeTQW+MWEPl//9ex/PF8O/WudtA0aT7PElfBFIPoUePqS+g6nVdrf7//r37/nh+bwz7Gg1+Zf850DgpsMsiwTkd2Mml+0oi9+f45Axd42J/sbi7yw/b9+zu0dtsFi3qnV+AJvEat/PAmtPtoJRL/1JvBvS9LclpgANvh4Ks+HrIiGKDyj87N4eAoWKemStNOOXtPj1pxtkfs/+fQf33GM0npx38hz5/SMnuRxs13L7K/RnR6k/g6Plb69Y4L65xOkdYF9s163JiGu38gXfj79syboCX/vM3yqXpdMeqNRCVPo30q7eN+9Os9BF/JyIzvujeIzMh09NrmHFJiqKxDprRCkapX8tlvERhX5SFyzVTW9TL2cKilQ+cMc8m/Vsiq7pBdKfLU0UKGvSAu4EgQa3kkPiXCuvmVyES1MPW17koM/qbjcUwRHbN7qyO2L3z9Vc795981TJHO3i/bp6V6o3Pytyj7eMn61r5za3c5wc6z92eqFX+WDES8y6HxGPeEHPAUT+zEkSHEWGPsu5WlJgXmQvstRsEbFaMzCNjXvtfShciE0qXCOy44dJ8htSbuDGgIA3Pe8z1X8FCHaBN6zkrDpGQcjO7L/dOz9gjbaXXwa95X9s33JalJKaVxSCDaa+N8bmEBOyl0ee08J1z9M1bWiC0UkElLL7781/ECPCDweIQL0VX82AdIYZZPQV4HUdAUHi5AbTSZ8at1hK0paMM36wtwaKe9dBadZKoYOCPMOvNa45RF6i1WdOFOOOKvuJokMIEdzKv4ctyIODjkvVhPr4rKK0eO3Jj0Cb69oXh2DFfgfTU4UsS8UwYvMdLgW/wAc/E+YPO98l+EmEW/hjxYjJf/Sw/5GLMQfRp2Tr/zFmlT7VCi/r8ZFERFkLP2IDX8Ftf3duoZnhT5aQXKin2LG/xgjJOo8lXTY2hfxeyMaiZVLAy7IOe0a/QEsnI8mOG5bq9Ee/A4nF8yRzeGEOdzH+WOVekcXvUhOB+71m4tOtG+SI9bDDXBTL/zRdKZ0ei0MXGI5H0Wh/9nwxeaj8wgJPESh/NeO9UUjwGL85wLiZ22P5Czt/s2Xu+T90TQILQb/VMJ+I0bij4ZTnCxIgPMm2Y5ev7cAci6y6DRtqUKkWPj7+taYg/H9CQOJ+/DP39eqh+YJ3rH4R2DmP9pBSeu5HIB8c70Q/L5asPi6Awvyrxz+K4f/yuG/cvivHP7fLIcSRS3SyppRtKg7PXYDfpq867Zhv63OgU4SVvGPWqZ+jB+BwTHiEQth2z0d3xeCwLzf1uZo3+74FT+1KsJtn3VLdq2aoz/f44NshJlD/YmUzKLX9FuzKXDR+RsakucCzbaiym19/T5bXbgheTKqy9T/iWxS/qWFLi7Wf5wRRxRrhMg82c1vf5wQ5yRoznf4yKqY/tk+78xH8qidTtVvTmU4c+KzaNSnbj1Pv6+HdjbhbMOMft9zJA7yhM72eE/e/4X8QiG4f2FGR2rEP2VPz/jnNWhoqRD+mCu0z2ugDtrjzxaLdwrpg834qr/vwdE+D0m5OWuf9r9aLGIg4/7LMv+QRkm7iOIzTG7u9b9aLPZIg2izliT67/pDdJ5Pvv9S1RDxv++pqCJBOLup01O/n3aZL3pKcJpS8bE/HT8WEgTzcyoFKMf8+4m4FefSEK25L/6QkSvfILnCxUdj/emkqnkkizCj2x8ychEM0FNyd0n+W8Y9hPTRjKrwz/Z5fqFIno5E+0/5BdE+BOFJ/CEj8gya7c6W25/yGwqOcwjCu/+zxWIKcoUEof5TfqEg3o8g/FeLxRNoQqNS1T/l96Uo/CEIRPGnjCBBKBv/ebH+lF/tYvFPPbi5nPynjCBBkC5z7yT6n+0f5UDo72s9IJ32u4xIjo44V+q86Mef7R+DFckizOj5h4yIxR207aZ/uz/bPyLRRjwNzahv/5SRBskVXiHR/lN+QbTPP6L9p4wYSK4GedGSP/YUCcJTNkEQPv/quH913L867v/TOk4tKtaW1m02ABx33zD3P3g0xvU+/T5D/mgaYiwqw83dgCAS2Ql4PGb5bSeaeJyo8U9pRjMfZCHWbASonmXFtTcl+Pd0/3u6/z3d/yKYfxHMvzruXx33r477f1bH+duWqSVF7a/FZ06jP+CnNLtuwn/dbklFQql/J5rDrdLOVzVh5y8mrhac+C1A9teJKuKS52ZVKtlh3YJd+//Fiv5rNf61Gv9ajX+txr867l8d96+O+39Rx50Eg3lbib7+dX8oBBK9RCWE90Ooe9Oy+W4Y9NK9sMZ9RPk1Idn7r/NXio8NWiMsj/e+Q3K9YPlGw3mDe+Jmq10h/ulHsoP2JcpDHm/ryl3H2A4DCNnvVPqRPWVIfCE2sjSMmCIeP5U4lWyB0tlKSOUhJMP9JYV6/x98dbrv3EolNh5ZOcaF9Ls0f6Zmip5w19ft5i3wDCsW36cOkv/IuqTYbo8YrtZ/ulstVbhD9Jlff4aOyRlsPjXxQ38Pn0Fx4+jxOj2lrvxJzi5u2EUVqMx9GaE0Kpzys/qYy5DZ4o5WsNiy+ZptldPrfH5uRMrZA26bd4gk6l6RcefmBcvtBmf0I43ganU3Bq5q9epEvu/Qtg4ikmLWiHSLuN6I9UbKTHX3MdwIAm7pSkj3OptXqmCPBSH0ln8awR6V3FEefSyelUJi2aZo3w9rB4PU09bnI7lVybWJ7pcXiniZ3q+cGuq2t/qwcqOtYnZCx9mW01p2CTA9Mjv+JyHnwihIJ6UVObM/hVwdR1hko+IeZ/1DSiHESOmbejbmxVE8dWR4xafbankbJMNNE3a74+6H/rQb9xwi6YyZQUDZLY8/afwnk14SPD5SxkEiBh8slloUReoKsU5/ZEwdFyEVGDGKcEeeho9wPTHZSyRrnDTMgCprO/5l5QJhFJ3pRSi6+xMBsHjuqM1GapPDOHrB+5tzEqFPkFoM4XC1Jnm+RMQGjgvzNjjkQONc9j21XHJsaqImQp/tczjRP897jWKCHikn8b3aP9qWSjKkjrzpAFrEeXTfT4uHO4+dtSPx6PtQIiMNo2DM3WWpMx7Jn6Odakgh2/3kTpvkak8RejLLWr9OzaDbBmesm21irNebTvZt87aQiO7F8Rn26vzQkwjPzLwE7duvTAtLnHkkjX7iJQ8K9svAm1hRzsZkap8cd+MzudOs33+tRElt9/z68EepSaR30lbLfwqYydqmSArGWVhlYyPxvmVviScgRlwgqponuud+h7j9CyGes+5IWJc2sumiUA+90nldzJW8/MgU05s/Lc3dC38NIZ+CjBw1u/2MrVaIGm2C2clJ3X4S2b8EO6cSbR80R/tZOI+5si1aP+e13w4LxtXKBEe4tttDG9lR8n65Nj35W3K7FitLn3903MOT/RpLuMGzUpk+JAvp8FUm961kvmSrc4yjSOdliSCi/q38JMjMmMt+4J6ffHH9DaIT6VX3V+6oFrfsX7rHp6o6oT0fuvyQtPLhiMUwwrU9fqC/OrlMsunut5l5efUEWiaKhyfZ3A7M5kyYFnwT9gPVgsp3D624BOEjzNK7JaeaWAqM3B+ZsbmGSLyiH42uB2g/uARfxB8E5QzyWj6ueKyE0YJvecxuLBsRHOYG1+i2WR/iV3sDgeS25vKrZGgpDjf/29+/QR7lHlg5S8qgXYvp6OGIvYvhMlAG9lwSn1swjRK195r6OO2xNDytY+kxrtd09dbvya/q9bDH/Or1MtFAVppgXfsZGp/AIhiC4G/thQqqvVSbXH2/bYN6Z9SCN4OF5ZEWkB2Gvz6s3EzqSQ0SM4biCeJHgRk6W+B8+ziBOlZv66fUrFMJshPcNdMD+3DVcyhVuc1HfrYRc7yANMC5g+TJLnJs63SSiLylAz3MyerAGwW5Xs6QmxkxOXe34Hu8zaLmYajC22c+Eo3DYShwMh59nN+67uKX9dEEq13xbjHmIj0aP0kU3weLNLD2dNhPqFz8kJTs2pn4vq2dpEy5/+2uhRnSm6djkHl6F7j1FZ2iq5usGOPsv1KjnnorUUErp8PwgnbD3ShUswUVLa4193Qx9lAc1UJQ/byeTeoogOy+6Xej5HTT4f0yP50XrPj4MKV1C3HI/fkeezC6s/RJTBd08iLVVr9dddpO3fHeDXcXjFxGkHj2ypcX2XWfh+qFXts1L8LGuiM7wzwHwrrlKmks0a0XeNYxpqdRtrOlKy0YS+Kx6uHOsC6aoxo966OV2+AJ0jkZb/j33OHvI6pXUzZxA7VoWZBBiD96wDTPSmr0ZhjzcEzjKyhPyKeJ4iOOagYFwj/VMHcytgNLX6+3ypiMd+Te+qY8eTPMQO89wsi86OHaSPY7zvOOGkSRQfQLYB6jigupaWAHrt8jPkftB/l8Hr24WdyC83N7YTMPFJMHxTqY6ERcjDCGJFCogwGFNu4pbkkjA1Zynh6VfiBZ3qmC5x7xj0ZXQLkM4ryK5xtEOTOgt8Zp5+aO5+wTlx5zg/RTjrs/7lxiZhOdn14ZYKFr+VxzVXlYGXu0L3ysd6vmxiHU8/e3KWEMzDlzZQwpHskRBvtNSxcXL+67aMk/JeSEyi4UpJUH5ilD06DyrjnXaBzRdM0lyWFama9pemhuO27W/tqXp/byrq4jYDUcWtebBeJhtcH40aij5wmreHH8FGkL/v6V/cfHeOKBkuyVRfykzxWZbIhnX2Iwc4KThZSroknTtci+36VIw8LZlWv8uoZz/vC8gaEaut2eSm+C9pKDVtPv10E2cSgKzDpTInnOXng76/unpryppv9x4kecC8aZmtU8a8ZNBmjAJzwa56wSXdg/kD4PHHuvs03V6sl0zf48jpcjxB4wU6aYMF7tr/HqMN7TbuLQZsbRVGojg5G58uOpU5LutnM0/w6PaoQSfQ9zyYu60z1+PLzC3/XItQHhmzwUPkT68/7wEAZ8GhL/efHeUeI4FGVntejJwnDpnCWas6trfZwixRF679vbUZ7BidfSI2HVhfPgVJGjpCqeEsNLexdckvjjo9Otowzuf0UJUpIliOf7Vsg2++DtBzZ5zmegfTf+fuUYGS/+vt/TfmoVgmRDYLo6lNk8Yuto6RA4xBbjuzCcQSKTtSn2xwXXJA3PPAxpcnq5VGDVrMoF9n0U6nTOB628+du7b/Y50/u1eenES0+FWUid+ywMV3rx9C5hXLzzE6Y8Cog6UEC0z9YS77AWuEEM7Zh2Rh3z2Erf5dPTiba5BedRyZXdDmTtSCnIj+zLMwUVO4bq0R6FsaXvWDQSMS4YezRuUwIl4IZT6tCBt164Kn1gcfHEZG385nBgrbTam7alH7Xt/PhNfMRvv5CjNcqAWg2EBOzLtQcLAvClKa/cEnuA0oSHeF1fo1BfCGXdIipWyO4OnzyeJK1PXheJpw3bgh+F/kbxvoqE98LtHGnuemR9afhcAvUqhO/i+mKVqymPwxhHv3whOWAtwC5vKXag4CwPefdvFRIWRq3wvjYeL92ODU/5/EjWD3uXzfBNX0GL95VxWjTQJH57Yznr+cPfdFK5SRemGtocMMZ5u2k2vak2p9VLU2OU7fPTy4tvHi5rFV356oPNupc5E+zHZGwhtpS6BZSLqAx/LhlLZRweA0Xy1C4nazuW9LQ+Oa6jaiTuihFO/Jld2wNfaB0pv9uz93DrA3fNRiU+yZQ0LVhpVUBquHyZkFgzSt5tMXwgejFuKimPcBJUTFLMFzFky+loFjf6LtHVM1H8nNsgcWWzz46y2R1ixQ5SIdXLxB8nzr72ngXpJqwiFLej5L3Ai/FDGJTlFLjUSNcOdzvTnH4gwkouGvuVHdxAS8SKnqSdszp1SiUFrbrqR3x96hVlVEfrivnD2rR7Tm688KN5nLXQQkh4KZkayk0iJWsztzOeNO2MYYieIIJneMlS0QmlvkNdgxTtvbx02yIMQmNd6HpahIk9Ctyee6FPVr+ABO16vHv8+qi+Z8NGJ8siDdJ8JEZ3uZdQOzN/3B9D+Y3SIl3ssC5/WC/4UQw9GxPwzxmIixgFzakKzi61fcoV6hRVnSDQVm+0NCfeXPh/apj8ntju9k3A/1Pc0F4P5i3DCWi5GHxGvnzBAeg+hJrja0kxQ8DbGseXoFeyxzD0zemaPFRzmq2r9Kusrwuelg+2XnoH2OEg3EJJqTi0OnOd0ecpHC6ksGpLUPpOvWeIVvtfio3bo0cx4gV1gCBxOXUddjlcAyxU+o4z88590SEaVqQ4DQlRRpGbfSfeLeNM93oYM5OxOdBUBvFdKfpK0zNBsnfSbhKptfO5MInPN6UX4dCfqTZFKV9h6GFwdqgUoz/Ast9PnW1+FXl5IXm8mMvwIZ0Dm0R64g+nNx11OOT4CIV4ftzonr8YGV0rUGZFmYNH7MTNbQJMqCOpl6JdcnU0kJwyrkqu9W96Mxi9awZmyensTT08ge6HEPfnwH8syn39WYO+R2vwcs3b7WiytmGzvqU5g90o1XSJy8r3E9NKrkHfAYcTDynQP6/DL5pVSDPaHhHhd5PXzqWY3CvRJNkWL99svHzQ6V4jGXd8xGIQVnLPXvomsSe3sLuIrA511HkyJQ+s2uDKnwohm1fxrGrT/aSxTK3tY4nWiRE0d2J53Qdkc6ImW1IpdXry6ISfLjP/vMZ+18gLrIOGTkjNKlgTvGfNAEQnWLa4lixkuqkvA0aNl7LzncJbUkC509ssu7U5xed1++KmVODBamo58CaKde8pYvI8MPlifr0aaDP+oUTFzoeDt33w2JGNnNhiV4XIdyEQ+bIR18U+OLuYCCK5JFWy6NrGUy12Gl6n6/LERkhels4iMzkyGgGUxX9CWXxEwI/2Zt61rC2VaN7T6FZnhK3cb+tUJDu6ePhGuwQtBJ9K+GWIkeGhn6tbB8HXJOLtozvGWAX4OrUs17nJ913bbPtTBVVcZ5D2oU3TTTsi4ssHdmH8GtBmdV89/bzNsYLtFySUR3tGqQU2T3dxdv9JBvC4qlqx3aXfiYCZn85ClgFjDMASzyGmwsS/ADvCXof2dEdAEHEirBWc1bFHKxcGOMIyjvs4H+l/SDxK2BpWxY7znEOx7GXpszsr8OPzp6fnaD5wyo+hwgP3+QZTO/ZFOsp+uaZuQorqloOvz29AB8/2dxrM9Bfud2H8l9Jn/jKVsLLNEmQu2NSHjelQQAbjZ/QU2kZo2UlH4r3dwpdzLNllHRBWnADCPFkeml4jWWke7TTQArLZ0WVKhZX/jHLb8t5y+mol1GhsueBjftjTXh9Y5OYhAWw+ODnqT4SrRHxNvbPt449HsZpfZBpGWSlAIq7n8WvUaBWjC1RY/ymAW9wPT5IrVeyxh2BDzcfZR8K9tQLNujQiP2NgAKO7rjVi5mx35Fs0/QYrO2uqFhfHXJ6y+fbGqdSPBvQltHRA34rtp0EnpwTtbc2JJ5VDJ042z0vXQKMpYYxVgqBxDViHBji9gmZRyAZZxb0yn5dP+8nUMiZE/hUX/lMZJjoglu83TYnkRkHpGV300g7ajilPGIXjmWttnUir2ym8Jr7Ky8XkuERMeBHUjPPpqUuoo+1crIH3/z9tlW8ipQ98ELfPjHg8I2PtknvGv8jHAFJiaMzr0YZID4ngky7qKYcGcLP4LtSpcaUTq5EKHGzFMBgRmLqCLEYDJ/ad3zFo8YiJTaE2r+51MzVS7vifpyb/B0/1+Cq4MZDLufPXwjcNbm7fgwZsaAA2hL8Z1yhA1wRrTWzTpUruhFwfT7xcCeOrH0/U4Inv+ImeiGap7LxtGPYDwdaXbP98e/IZ+5dz5ke+YRO1ZDJCj6CBsjfgJ25ezhS/HhzstF5dOgSldCbLHdc5Ae0GgDYihONtRWR71JF40/P3cVqWBYGaVDlaf4qc4II/LW8ILsuv7eBrwBDCLzElNCYXP5xp1Es1fe8zVJ4J69SPDQNB+n5uoFbbcu9OC/bjMiOPFmF0hcuIMK+WR5J4dHjpivc1S7nU1+/uURS4/rgNARk32plBhlcZCza3mzsUYk73eoRZCbAdkPFdyo9b9SrspoGKXCvR3NGWGMfNVejZMtwU2Kt02FdEBs7vZxOodOjlxDQ5cLcyamGKn/cKoa+zI4K6yG5qQIxp+JZ6mxWZ/Pv99AMd5ia2nID2pm4ogcqDNunfzvhc3iNfpJRuWNGti0I6BC/JcEoUAXwKn95BIFBWJVBXpX7ybojJKgh0iGXssm/wAJr+yLPjqJC9E5kPU2LHt4J/rAD4wAY5w2aIkXIFDlh1/2rxExj/O3r1Eh3efXwagrumio9CEYgXtXNIb5h3eGKj4G82Mu/wxBL9/PnvJyqaEtxEcW5Mr+Ln6CZFvo8bJrSXM+7ze4rvLh/okJ0cXrYHBftdy2d5pN+6iz/dCLcshpywwmrfLdKsxacFXEP4zvexOT7nH1nXd+7+PTgGb8I9aK0y3r3QFfFzd6fPFMQPJdWx+eg6+j56VWU5lOUAwfiweX5i2xnWXXPd9SCnD8j/Yq/Ej59fhCwnp+IGjVJE4RaT9vDpyZpj3BdJcMXHhcLn6yZqQbB1GNZ+4qNrs2FDl2qlRdxbqnBdlgqyQAwqxnQFdLHAKkF7tsOlfqAlAU0D/rb5xsXqo9E6UM1BWcN9SZIWysJer9SCDVJ9g7MtapvmiAXaqPQjolNSPyWAjwLD2jpjI3YMZJJggqCREC7YNEpGyIf4elG97guxwfmOog80KgHOEntM1znQ7PvBs0vCMbfa1QaxdQwzglzvK5YWW90Jy3m+RDEGLQ0vJ4Q+42RprvthZcyHqHbnxrCpDbxQHaM8BQKdQq04+1+B02+fVwsN15FVvkym+JF/tRWQnSBaLyENp57Bjm6wjgVlsfgp878eN8ekmj8HIv8rq8yE7EZfJ97QWvzMv8Hwkqt09dbCdM4C3eZr0LZfGASvYG3VI8kVOj5Sx719ZMAg1PXp3YkU6soMJ9Ag0dUCN/0oH8i4M7IbJfEvyLKvVLwZemhsd19tg7zg3ahLfMQny6s+BcM1gC6MgDS0OLDW9ijWVp/VHb0XXHTFMN1vKwKMirm0fjNC/TLp9EDnQnLukIun52nf/Nj6wO6YNPKSGyEFCPU2bMWOR//4cWhuzLHGyFpccGmEZuln/uItG3e2T6+m8DDdE2HCHm1ctCcSixXsHne6qYjLIjsGY492ma4SqRfE78Nm+hm/I6OqIyxnhcPREp6/zwb5P78qOypTu6+xK5TOy0Fkjkfjjwny1UHOovIrZ/G1RC/E30RMxI/tg3IQ7rM8pAMtBDI+jHK7ao1IwflBfymkPLu+zIcngnTaOEL6BtImPkadhf7hCjk0ONQkMX1chv10v7upMiiJFbZYyE7KZdVAJ9qayD/tgD9JpzuvqVfhQs35rwsTPJ3rnsUubHXokTLQO/bp3+sb4o6wn2bDSj9I/agmeWD1Hn1ly4e5Tq7zJgEnKQCpFteXip351caUoKRDX2yN/jl0D5qNpg4o0LN6ogd5ANY9rz7J0RVpCvZnpoqSwN3S//y0zZiZr5k1p9a0wzT1Q+oscsd934f3ZoO2YfDozRLYOi2oNd25vsEkPhtZ3PgR/0CFbPt5XPkg3TxwSltQCIMqwbsDhWa61/kTZlvO7tQUhAe75DcoGx3fAcq9ddBawckmSC7p3HCzGUMnm1Mpv99XaGOEThoiW58r+wimDXiNDrwmItVGRjty9JVL7s7mBQH2enLxVf1yNNfbosIHmstLZ5O10rd9+zIfDeiiSDj9az74kQf8yIgb+Y3ktzop8h6rZ2/oac532abHLkfkShHb70kfAIV+z+n1M0utdzZ9D+JFJEeAcwKrzPU8JYvq2iJO/iXOWEZ4CZWinU0uNs+hT5jOI/rBbtBSweWYr4pruFg4/EpHfTCpn7xhSA7ujfNRXFYrelj8oWls/S19cqR1b6CwPVGlgo185QgMIenXwDYFRhhyAmIlEWDXHcZac4jeMpQt3HqJvxXagiHkefLuHtlNoURcx3w6709XzW6/iu87TUMEJQeto1JBH/LouF/xKCYFJb/OiUk1SPddHMkyzTDWF6w8zEKQ06cys5KnAfLPySHkal/dUwEFtbf6tNNxHr7PiAPhhqD46wtaQjWJCgjayyHyYIPbtB99W5RNnq1uetedXhbl0S1Emwqih/NzaZbQz/FywPkt1vRY7zCm/YRb2/q7dLa46+e4kNI0iHJ5Cq6OViQVwt7F5vhdu2G8GHMGN0WJvvBQOzPZalv+2l6Mm3Js0HBj1dI3iMpgSyTpXGkMNljGahSqbzaAv62jXy+G4C5sxh2XbComDwVXT8JQ548716yioEK8lUO74U0rRiPkJaUZy/Ka4qbpvddEjaB4mX/4cxEO0jRklst9enNX+3RS78XUnpKRqpG29oGjGe8b2lpQwm4lIhllfdp75FD+bQA38H147LdHcXgXBeq8noZ7zJCn6B52z9Wktk08VRe4L70grUu4QaA8L0grBmJF+84u2vFmcogDilTLSjiiykafXskLmx853YQRAfKZntnzBr5ZJKvpn7I647i+IWsncEIUPe7+87jnu1WPyea+3CWqLahSc3scF1fspAkON/EcZZsIABXQ+pa1haMXAkgCZ4KCuFbvVZtObQ7GWhh3IyvLml3nqnNjodyOtuIXQHtlYiKpuxFaKrFh98nseYCbqqQk+IzaYVbaxJsBtSF7BVd9ONrkjWknF2O2oxlfPbqI+RHfmUS6VdQy73GNT66NMZLypoIDi/VIqfMFOlemvsAY3wk3l7U80wPg7bvqspcjY3+EjH1Qlo97voqUovW7Ii/1BnLWvVc/m1+FpwCuEzY/IEHDdzvDrrWtCBl7VuUvD35QC5isSSntO77Ce4Z9P7VEhBHQHhS8nSMLKpki3OwfkitxGFz7lSzfQn6923DD5MlcRa9n431K7VPe3h9LTKQ4K6tyBc1RQGspCyIqC8MaFkGc5yt+PrE6Wv0UELJFzPntIgRINX4JxEsjtB6PgrWqwXeSn+NapWuq3u7qDb8+Wdc6EWezGbvFh0qKbzk4b8DuwdHElczdXlde25RLFTpQMH/QuynH8VxXh/JivynwUebi89uF+BMwoCfstnSW2Hrszd8j5K78eXzz1BV22lQoDbjHxQdsdp6F8kum4+ezFQcJQHt7xvU2cj7Cwtey/BFaItO/3nJYZEH5wXX4S/04FNxeqHqovK/vb/oGmW0alxBZiNScIVLzTQbCxvfPryQSdkNPKn7ci0xHo3uTr4T+ZPq4h5A/6XHTgdY09W3qTmtCPYTzUQ8hKjyBLF4YLJ/ffVxGZV65N3P0TUPqIC71vqzVs7m8fPyxXQ31+jiF84sRU8RCfuJmrqGrFDI/yaqJFB/aXqILqMaWlA4Dp2HQmNKhWj2RiMTTg3jqkqLzFwSIptMLK3LqchOQyWpaQG82GVIegJPLaWqP211ZLhASzB+WUZTIal23OnwEEs+BN3nGw0v/QjDo4QxKQTyOkK0hjtHcEBa58EoHpsJGWsQ/Pc5DuSuRDgvpkFH3FdR+0vQHPnfTsx7FVRBC4w5FBj3ua8WMhV0/q/ITd3N/CutW+aa0No4oDhDj17h5KZj9bTa+rNVuW9YJzmO28AYpfMwZZNOM6p5oPiTx4q7GFG7nCtvT5wr3XXeZ77OvQKnYVDyQDL4vWSj+JoPbBAYmvitom77CKb+PR1f3XKduoQ1I3ETP/y4rFz2+ogrjcXAPacj+4V02M237zPMUbmBe8r0UKbvfNtO9M+U1HDRkrOaNRzw5KhnJ1GqlKMpFwz5f7LjU8pecUOGcbyo5RcHtAo3E380RSdHoWl1ekRI1ov3yq7hwHv746zCh97b4Nfbl84J4kqHuLFFThuZtDLe+rAoaL1emO0v9SXaPXRAyokvfbb7fuOFylv0iANYRpBt36ZWbKEH8wiU8dVO5Iq5r988AQ3qWJwqErILv95HT57kZwZOxfzzEwQgLPKGBBCNZOZ1IbojJXDUrfF6RnWvJ5uU2MyvMToNOdq1AEWthIdbtTUZPtDM96G3w7jJOHW0bH7ygPdo9cvz0ISkvNrM3+sEy9yPqZFrRDi/t4x5u36fD1bUyi8cVu8rnMAu7G7L7CS1MWBfkZ2Wvr3mymZzNAzXnn1jrXMRPV0PgUksj1PD1Ll5M9uCvASaMQYFmTbmCF+uReWyNiSXfz6VVJqIXoSck8AThUnhfZJ2bsG9LL8B3dqFlC3yhTEUVzZVMd0R+FHFeF3vnvhnAJwDbcJVPwFW/WJYmOriG8T0o/OvMIHg9ZaDvP4DpP23CbXL/OERzTeU5hxMKFpBMyOp+AuOzez4C4y7/jnX/CBEwkHylG2KdM9PS3ipuTj/+xZO4ueo9PrlCjPJqXBCLuPjXx9ohHvqASL7n3i0IgTqIf33h/iK1u+1DeIqUR98e4U5MpN/gGmol9J0xrA5BYonuiqzDb3EtJzCixwofNelrjzD3gW+9UfzhksKsNXAHCrHSzPhoQl5vRBdUxHLc8uBND1iwAx504Duloa/U+Q7f9EInk1gbTBS/XmbEbO5k7hHXZAjmMrP84dtH1uXMXWmNUjYRrw9lvHMf8imLloN4L5ghtckauawY8DwiK+TZBfrcUQ05L0ws/iqAd1VgzOilbw/3oj5I1NAalo6LfGH5C/D98vW433gNcQ+0ijt2/wTruUDc1bilH+Isp6UJLLZlGlpZf+LCDB3qIA+WTB+x7L98lJnJGA0IAETeZqRfJyBBSNkBIc8Ncmdj17lgJdh6qXArhIGWhcT9PUt8PPMbL/ebBnfxZDAkcSkgVjpzLQk/cbnb7umCfyCSFPZlmqIQYmKCqyOO+c69vvltU5qTenRl+vknItnEIApe/29PJU6kcF5WZaAi3qgVv/5ChBZ3t8GPT0PI4duVSQtnP1lOV63TucHPZ/2bt921guE4/+Rl8wM3Z7QCkkwVRni0zsgvhelvW7SfXkvxXMUGHOC2cJmC4x6hFs0xq2ecLMRoRajKmx5WqPIZ/xyPE5Et0/ML8/ZHHlYS7iVzgXjzvcUilNDlGVZzkMAw2hsXttIRs63xTaCDT98HikBAzaBpwlTx6iBozRx8/8XsuUucrJpvPuOBzq8VB/6hFG5EZnl9mfcnMhC4vnzus2A4IzppbF542ykxpOaB88deioYjVgd62IhG2T7vaLfcCQYuIuOjHNL97KZspkZ/Njbz9N2tYparMlYXwbgcaAJZvd0Rkc4lHewRsneY5r2zv+w3P0kQtx6UCEdf+3J+vl4isR1Lr4UEePyWD5PZJGifccYul59y96SIIWRFvMAN07xOgRscvYsDP5SynFkRWppNXzaHM8mm0d3i9yLxnNZDjF5iHmNgQiwDLXhaQLGdfgRdlG/iFVDcum7DntyrZBX5cxkLUvrWNXmqsrWVU8LXj64d3hBQS9g3zzt4tHHhfgkeUdKd8lUL58gfHLJDZ6uGWInb9Nnal1s0T958uW12IyOPqfTl59YKoeQAUQH9qudqsC/fvW2vrz9iPecXeE4FGR990TMsZ29fN59CnPYF94mXXAIRTCebZacKg3Wjcsa7Ig33hTsHdnkn1Tq6x03M7tnDVVoKqMJ1BbhcJE/YigRbNzRuWExieG/9TdMFH7IXTqcJ4wfMmaWPzQh5i6Bd5eJ3EWxYnQgvsz6CD/RMcCyon9WV/ipdVXCT84h71AxhTjriavC7rt7AOIFZNnQ66ms6TOzlccd+i3hG0EyEt9naZx7gYCfFGSPNzx0p8Mgxrogd7F9sGCB8CmsQDNXCiJC79Xa+GGJwI4PypRFrvnv6EQsYlvn1c0QCCj2yKzKyK6eKdtHmbhfNjb/RgZjrvIVD174Sojfoh4uk/ls+1+W0J8uUf0e4CxsMBizUpF5KoWTslT13WbhOJdnk860MvKPBw7ohtDT8kD/B8WPT+sxNAao9KJoz2dINJmfVjGQpIRCqBmFS8cXChiPLIEGbYjHsmA+yx9GR2SOt4ZOB1EzY4dptNOKl7Z7kiPMhjd7I1gDZ92kKckR0DokSRB3N+S1aTwRAvA2yRgL5cyvE/Mav9Wm1l4i3Di6sMTyWqjoav78dodUQ0V4rV8DW3UK83cK5GJnn7CxbXBiVu+ovoej4i3GgcaS9ER6ipId1nT+ug4Aesor3meZiiAu9/txRJR5Xlted6LXj/g3Jd4MTr8xz0TPhqlXBdLicI/5jeZr7bc1vTMhZYvG2wuGyHndS04hmlTYrOD1VhbWtoY3K7z9uKW1VP/MZEZanJz4et6feLIFfzzAUGPDV6wh24R1DAutdTm4eIWUNfhD13TEvlxDYQAPNUitKbRrM3NN0CljlbMldtOkbVLBLXGIJAhs65NSI0T1yeIP7PTrZgQ4hPg/2uDwnkbrbQWnGBy9uXkQAB2Wowm8piwXgmyL2w/vb7AUlkBR8K4g6F0Hb6k4lm+pIW7cpmpn3mwJX5L1tmbTUghHxtuscZ3gc38TReTFbdfIUX8Xz7uGfnIYVydFMB+snNrpMZKosrm8iexvYbb9e8jeXWqyOGGYCUYqedNo112jwYowltURy3ORIQlUKGtm5rczYn49V3yE6D9il5ys5Ynq54iDe1vZzc63R16/etajpt4L2nQkJaQKf5BGPhewGf9+/3t3bO6E+uXOgitpvWvWD6CLGro+NVHFuLjTxV6SyceWhg5SgHP64cDYUcr1i6qNfoAw8JEWo902/ycpzAx+DRpQK45UitJjWVPBpFZ8w825KakznwzegRIL+rtK3byee/fYufALhiuDXo4XTKytuIjUPklpvMcEaz5+MmycrxtXOVYn24HsTYsPOBPn8+P2b38Xr4DYfDHra5NEp/YkinvkztuRustOC3TfADAEUKs9y5F1oXZ+EzxHx4U8QsMNVoX5C2nWdh0Blxawjz38ifmiKPDAhvou8D+28MBYd8U8mQzTGwjFeAFom3SwtsOIz1iV7skfCUhRa6JNvMX+ijcWK4S0pj6da0x9xwz4XxY4oIXu2R4R/tx4ODMvc5XrUJPlg0vL11LyQdvXjTeDYpdwi5xHdj8wJpLoqOHWKoEqUZS+KMEJ9SA/qQ5IP7uMilgJOIveUl9S1OyHTCKcu1aT+r468q06pylpys6FinSUmjwzm+ENgx8J9iqb73by0qO1nt39NCqG/LA3Ln5MkG6XbKyTVMy0idt+LlQzXl13GEMIXFOgLbOBc9k1k66+33j9CSIiWNcsqdoFrgyMaBPEDrkh8dpzM00uCg8gpPx4TJaFe6G9J8Esyn42TeyUWMm35IsE+K7m2A6U9Ofl98njqyD1blf4vNEQZodfqlrbwK3x+ZWrG5XVfsLSLd7O1x2Sy48yaHvOtqPWnaiQpp4VVIxs7Xd+X+QIPGpbzEeNFYvkt47sX3G6xw6tvwNOKeLDx3Qs03JjqqSdXjPpP1kkzupEAMR/t5Qk4VhiUNzptQ659arjlR5gs28zPEddTqlpbw+3ISHy+3DYLsXq9MN/bEV3urY/oiGQhOCJ/D0rHfVm00Wx5/rPKptMnKtFscSlM2q+WQ5Zzr87QtlQjlBtr5Ox3+uqF4Q0v+VryzrcOeXYLEJ4TV2zW/pMh1SU377JrkXW2Z6ZAKkeYIYdxYqQvgsjOeDUqsbz/2ju+45vwRfLtLn2VVV/PJSPTyaMK13yVByNl8if/U4s0AaIpbMHHPO4VLrW23CD2KBlwZtTRkEyEaeLdJBjxvL7kyzheq3Hym/c7uV+F+PsTbx+jU3fZDetJyj6Xcle3fSTNZ/uK1y1FSJiZ6YZEz9I0JaKjjnqc7yPg8JG+VNv4zkrz2OlYXviq3+NX41hFPkIMO3nFEap7UsLlixNy3QpHho1A86v2MSVKmrW1HaFCqfuRgpsnxYizA+X75LgSG+81ORM2pv9kGEE1czR7s0Vr8cIQPu0RviDcJyHLdpmh1TY6fQl+sjMNVgbPugLBFdYN8v+ET3ctHme4i65jQp0yThBukcBIzsTYynFrht50kXA2+5UpbPFtkT3d89G5EC3tc8PIZdRZHm5D0dx+5omY8hi8XcOibh/8ff3f23uzLleNZGH01/S69z70XUwS8MgogQCBkMTw0ot5EIMEiOnXf5moqrxF7XPcfe6xu31tL9u7NqVIJRGRMWcErJDY1I5DN3rxOj2GDvai5k4/b071LEFenxb92ITT8Iqd8wRyLeBcN0bh0tdFPVokAS4nmpyk6/4A6zB9M++sNgVy0GB9Xnx0jyol90s+Ek/Tz/7uycg6nSkvF2vkXNCutmkceJhPZbuiO3SonUQdvC0IJUI2aLZ/Nh045pc0SeR1t0+xGemeLNObdaRAe1Uhnku0elh8ciAFpyf3QJdbCVK/G2kF70dF4voh8ng4pPC8k3LOPR/6aqZoM5gOjuZpDKscgTTbYLSAQi3k59tcd5iLBQSrCrQ89K0skycvt6v8qm6zHEWEhsBd0wdvoDdZwDJdexbUB+Zuy1uSXCyTppbiuFCPt+Nxn1NPS9DPy53gHasyLQpHB5jR3fRCVN2rejvv0abxkdMF9rbt9IcaJqzr8KX9PPG+41/d181cPxXrpLvdqlAQ2jEJnUxjgLcFbIAKU+TrpRZ46h6arcnauNQDY7YPgJ0AQ0zgpD8u0sjjKpB3svfciKFdusS9SK63xoYZn+AI5csevGq2Fw6jiqXEwg/0Lji4B0psKxFr0dco0kGI2FfdYoUoR/SeFBVlW0luHOGxhxJ4McGuH/FDcfaggJc3DEQKixsus1hXdaDbJZLGKE5YnOVUMbvDow7WxZboaeOgkSQ5QMWomNtdFw+aRmlai2vMmz5uSe8SEaueDhktTPHYuLp1F9RdByvXOHMoYezrXvrheHkIktCKvSf1hJHdFIQ+ZveL0ajLTfp6Z7KnM0eaXDtprQCJDuggmNyPK9SwVpqiz9snG7s3QBmAWhrZGcuh9JizB9ipnfisP20EyW2IkOUdwBU1Bl2n43NIzkstmHOw7IdZJNrfPoajsZNriL4TIuy8cMiRnZPLZTgiG3uW7wlQAxWWAKsG5qb3Dq5ggX1gIejTnKACdVCGTimAv33MIK/B10l44W5UiSz5FxZTHc2nhEIdLAwtFGLnQ0qX2LmFIXL2JDHwltCQp9lyC1nmN5+jj6EUOSHAmz5tShmr+hv0klhSq+92KgEfadmRkt9UAY79BJ5OCG1vbDxWFbVj6s8OFoNRMls9bp1ZIBtH92+T8ziTnMw4h2U2gzleCG6OOUUGK9wfXY6j1yolQufrhvp1qfXETCsAmA2G4+WcqFD78Kp5HNKAx2C8NdxrtT3ULyCFKwdtf8rws+jLyrGveNEXkPkO/c5q2HPicAaug5tz3Y3pbug+o+SPW+Md0wiZM+oqkZ9ZUnRadigoKKli9gLL7iVXaJ2v2+AGIrKKqhH+BOySXdpo2RMyH98wiQJHEfavTgA8s4HDuiMXw4sygrUS9p1uKKNmE2sQswL5vMkbMbXi6saO7k8JW3uT03S+UQ6G0N67JVfzutcc2II6P/1tJlAwOxjpvTNhFJAfyXOw99LH7f6Wu7CDNSo3qXYbwMla93iiPvNknB87ZnCDbuyBJfK0zPQRw2hmD8ynOr8IrCKhN3n8GNhtYgrHOf6puASeCj3LozIAJbEbm2WAiC9v6ai55kEALE5WYByxzRSsnF+NCYCxJmClaF+fWI0tk8+d6nrVq+J8pWFsRb87tgeO0SnS8W0Y2+XsPtGjQhLUcc9bWbDNAlgUbk5qbGZa+sIpwlyEdrvHa/3RSyNjd2T9DJSLPTFjbAvx5eiphqCmO8RBvI/u6blgZTxmGx7v1I1bXOnRHCPMyC6wlr8I/Tz1XVvinggxW/5iFSy5pXzJLfl1APNkx9bGL2RiZx1GAJqSYWx+DMEVU5s4uODMQt8YG7KLtKyqnFnfgR5ICWMlUfEozOC8YFB7RD2/3/bYDEXmnvsagKmfJEe8DNkyooUrHXqbna6TCW0LGyaseGC8A4hxpxb32TOqMzy7LcJjFxlPg48OGANbc1dCd4MTLgEuRQl5mLgEen3n8MHuFWCPCtKdK+BANrYYcRGybn/eb5/46RJNvwwZzhFrqYh3t8U4n6/U7XZwe227a5XCBT5UY9XA7uGH8eQfLozJ4kkqAB0cPxqLJY/4Ka4W91FhcPqqnr76W4htkhRF26JFoNpdbF+BmbyMDX4cHrNbhfTMUsKYnROzK52o0YeuPfWIe2e0W2ueaXkYoT4pDOBUBaytffZLGUZTcvbuROXuVQenqefrJZnQV/YwzYp11746a1SDwTyiavakaGuHD9Q697BuF/gnUcUOJv9o7p99MnjGq2EOFzZrF/u4h2YcTtNsSD/vkesovjeFZArzBh8zcXmiaI0rFXlt5NmhyS2xGpWzYjHmhlfTCJU9sOUusK7UDGz+6H66IAvFxR2WZB8RYa0Xu8zidmeNq1v74PJ0s0wUfmwpoU1g4Ibd3NWFSk/VsqCfT58NeMIedhtgmf3Unh3tXaUJcqJu19j9viugM0V6vgFX6hjtcd95yfDZTWYwPaASoHF1nupp3E6KpmKdZRHbWEtkHt4uTLstz4YfbRRceggIuz1yeEmZMb6YXFte/rh/H8HYA8xf7wEneidcBYag6oh+FAJLXQEcM3ZeLmG0zvJ7wKbdRsba6OWucsTJ5DjLVvsWuW23COA6/Vgg5yPea0IKuVJQ71ucLx9hcUlg3nmjI9T+nGoaAYMPj3NN6sq4vRXBVwcbwZA1le9k38KsGTYVEfVpcm3kbOfk4QROd/3Er1TuwMTBZBKcFU48yS/RT2ykcG8UX2jik3MinWYZVonmc3fQqL166wucqLQphzmAdIOmjzulpGw+ckd1jOlbiT9hcNPRSHij7mtU5yER83pz3FZPFw4nLqixj++CkqnscvOUiE/X4HztGLrIprhDYBXM55BFqougQRxoyI7/1DSkIY3zdtpM8ak49rusL9DjLOz0GOlhRSwL6+OMjLpaaogZMALh9OzztZ5bR5rQR1Cy+BjXnV+W2kc/J+L4gHar04qGOszbjTz3DMyK3OLoJoRNkgbVTMMAfn/am/QmFuID99mbBth3gyVXo8wi2IxN15uN/1BNeQq9x94jnXnpHtFwLjD2hvEwDbB20gbC+mMXrSIyCsOb/C6gztY1QPf6mbfioNRUP4TxpyPCYJcQ4RAjgSeAdrVe358LFUa9URyerGsYl/D1rHMUqJ+SCFbzqZt59P2lRH5THu3uPE+YxAq6rl5L3qUjeB+p5ENCONjiJurNPfLKw6LOTuV/6P2nJ0pZhuh+xjv8YHIX8qr4Lo6UgXzfiYGRF02nIx4Uh1J89uut7jjwPsBH3x9c7BMKmAjQ9bkOp+TJ4DIwLK7tBfoX2aGsaWMx1y1tLMxpJwozhlQKPm+2G7DeNlWJ22ffITID9usuxpQGV3RFJRG+QFlYqWVsjTg4XRPbW8at+ksW+fBqucMCuRdezr0pf+3JF3dJZYVYyJ/0u0mW5TKJVtbIjJ3Kw8AiIvAdySSv6DiR7YJsl/DqiMczrZPTZ4+v8xPeIpnTvIfb31Riv5RwugDrNdARjy2sBgvPQAzD0DucP/5c4rXtZnsjltN4uVaQTRNneH52eJLElqm7JLxDh1pa1nXzNNfKQWymS1IccHFzAKameuqre1OQz+heHjWdogL1Sk7AagjMz85O9Hh1Zv5InoVLl+bHrVe5sRW9RPmDYI2wCMbNluraY/MAlqMNTV127IwyNH/pZ2V5R89dXrCEpnKW7FSyfhRFGbRW/xlPCGHygqW7TT5bUiTCt3zmafmM9rQvf/WNUnzRyvaQw2DOBGpF6D086RJ5YL7fXX3Pg5pcsZT+1B46Md6X1VOfr+odaMZsD4Nq5bbLdlX32R3FkEiBTKhtV0IKPSK33/dln+B5jX7cLQ3bJLpGdgRr5pqapA20UQk0sCgg0Z6fEl1gkwejgA0czzcyvj+K0ttBR6Is24tUH7X8MBvHDgqp/QXWJqQ+k9GaxGzHhw1fQ6ah1s5RM2py3BLyX6z8SDicOf98ADo7ZO+dJ/vHHLVuvBWoPP3EYCTNd1rrWmzI0BvdbbOlj70Tb43GbSnppsnBZ18rp0RYEo3mDUXql07POwqTTredD/1ELfUaHtFhxzTdPVMcv8X7c401Q+g+5xooXvLKfkq9HfATgBdhniwl4cZYc7qN+9h6TcBN3qTC6pS8a5QYqNUHtrO5TL1QcXVCm+PUp+HHfB1JPHHkyZ49/3yjz8fHHAVhd21REYrKG+XI7rlL9220Hxl4d4E9oMbMok1DbDS62tjNdqwfcTfSimh9dfNqAqXjo6C6jeU2MCRoO+pbHPFHf3vaUhrf+Xh0GYQa7rDINrjNRrA0RJTV6urj24s0Vfn9csXGjn3J+gaG6Z7a16wgNukYeVwwiCaCKzdeud/4vmPvkNhHm7QuiyN6iB5cHOBWTWQTShTLTWToh1XXttaxO/bZf0xlMVZr0lyMqzJDLCbfxz7fH++ukRWybSJ42LpwVCCbAzF0JQJoWj28SFt2tj1mx/u5DL+6nPHsEXDpgCbj3eamGBakDKR3nzv00XSwyhfi9RKcLGCwq7bry6S37LE/Jw576lDmly59A++I4k31wInR7gh9Rk7hNeezg3P0HtjZK0a0tC68f4w7rL7GsBtafov0x+NBb9BsfsB2JeLOexA3z8G/bOXHRQanUWu6DYLd+R3SEPsS0P+gdYIP7EvrtH2UmqOZD4Q8J90mFhH96psPok22Fq5+dc0jGbk7Sv3Grds09/WtCM/OGIgNhVHdzhEL5iD21uE4woqMJTwM3pY7Fuj82XsucRH5xCfk0S7ja1lTJqJ6Cq1UdjV6xocBwZteQHpDE/U0axRV/8RyEiieo0M4D7+jX5XEHztynR0wU9TLXErwvIcZ+dRF5EjT6hHupD9uLqJr9DR1GR/OvqEQH6r3lKwPty8bmkyuFo8BoX5/ooQFdGR8kOOTAG+2mMJYNcFVdaD49CDxvUcE7/o7O4S9kYb2S6fExKE40uV3kFF0TWFcaJekoXBg+1HbMk2+h1yBXqOrHG3ghSbGNrQvS4bDmRbeJu9LwwR8zCNSRBO6GPhQKWePXzhNYTomEltro/DKLah3OCzXs72K3gDLtaiJFwr9DApmbxGp1vGZi0hKEdIP08KeLBrtW6pT4frlhsGd7jkVjlybg09GxKHPSyjmPXcbkUafH7HnAR/Mrw59RzZiSoTIMBuRNxr+8IbKiwqKiMbowWhEizJYImy3D6dpUmV3Hgm6jxpAzh+6Fuqna110Yk9WO+xwn8Kq8K++5p+nR29VsBtn/jzr8eDDK8XimQSWjQYtY9j/ihg1OnXtsTZHWJJNVdoP3YtxMUzCc0Y+jkqckLtL3E++KpN+miFJwWBwiCf9aJyaiq5Xm3JLIzrSckh4/QNxLxuHUhyDv18045d+jeHI1gNaBane1J5MRmUoQwu1CWG/DLmFJf0l4hDmXFZQjYonZeSfF7JLKQb+1ct2VjfcBJ1Oe7uzUVelSlw3896Iid2579GgerkscGvXiYrijRWRswV5RCiZH2TMIwmKm9Ve8+MdtXGcJJUETc+7sj3eipRUe4SkrMXFaNxQvF8VdHPZuuPpQkyPwKbwR+epD34/lgRJ5JeAWu5NVA/reoeRqoc2gd3stw1MakEeyu1mzkmdBQ5Kd6qw9vBF/R0bJtfEcPmafuDk69Ovf5vNR2XbWdh3tn1vFOaeecfjNq2Q7byxpn1v77aPLqPgVzrRyeZhZJON9BuSkrDgCnmo3sP4oWMoJTOcEGwyb6dScX+/V49tYHtG4x1LmOTrLgjrA/PTyvOOJa1or5iwe96Z3U5dTpJyItpTuTWxcLn/s5PHK/CJH8BZx/c7V+9xgkCtMd52r20/0R6hFey03R5IL9Vj5tP/hPZ4wgkhF+nwW/3mvqXPaHySO/sFWRaPsbsUtKVeivoKcdFFZHM3aANekESu1DnbvOxBMevNLLoHEVCBBHCEMpcI43sYYG+9MZ+t0gmqnxOb6H7oihZvnpYxNv4xirprrDst6RYwDlP2ZtzAQ4GrOryHcSLtwrMwx5s2u9SrZk3Nky4tuBIityIu5K02tV+6c7LJcVDsdPZ3Q8bHcfX00rzaHWpsPKiZQsv5pSYdoTtijo8+DhfFu+Ny0z+BuJ2v0uuWihhssz2+oZfLr/ckLrMHKm9z48d+uCSXjWDTz42LoEy0kQ+lPajeLka5w9a5pio7Rw8tHKDIXAxsDjsj7JXE6S7jLLLBouuNpqwe2SoWFMK3uzd3Syemrw61TnHhhBZPkOmVEgtUHyoh4GtlxFFIhK65AyXiGxd08rWl2WOasrPr6jDnl5SY0O5TikKQ1ibpgOAmOpKDqED6edxvyVhSqreOq+xGcPYF4dAzTh4bqsWKLR7OpMLVNhMgxojPoSyfOk/rIt7tNaSxenqXYVFcjyZBxchTCwShPbx6j3zoj5hQFCZCdtqWODw49EqzWXbUJnMP3IKYpA+bczicee6KDlpGPTdC8PAUd0uZBA1NLTcXWtVEgudB0XhjrLr2racsL1QJXTPQSRpg8u4BFVo2W/75WRSdwau6H2tAr9yfrd9vvC6hevNZtDBff/084CIMFFbjuVbP/iQi01vHWJbzhoPHC3v/DBP8jRiO/m4Dq7ki6xpZyQnJDklniZ0jQLa9HeuDe+lb4PQrdhRX5hA9IrsZlRwt6UC/tuop+XEO80G4J0ePFYeZijd5KjsWtqWyu6ewTnbXcCo0b9tCuQCnzynG8jjOoX627Ia6sVvlae21rC6faXa5fHS7e2U+jwI3TbuOt3LNrMnKtDxzf0OZJzkF96y7U3Z9mrlDYoXWKD+gkD5w3uVkcMqJGmRrxDm8Qi/eRXE2MjTiz6p64rf1R+nABlIl1nPjTL13C1aSsmVn41WfbaG0+CQHqunJc7y528uMIsU738lbuunMHXAORxxFfU2O0lLndrdKxooqCdvaQEuhofOot6DswtOZc5+lYPlIchrMAUg6ztrp5Xn4sUfvweduJwleqcbr7Q5Du3npB9o3NreLgGeclDrUWPjBy9BhRPJWvWaALtX5Alyg+9MQS6Grapre4UNtkQQntlSeKFbaHEuqU46HcY7vqtfGpDsN9EHaHmcjUE9j+3Z29hKwAHkfm5/3fU4tETEoFL1bprSyb1oKBnhfO27vPBZEEYxaW3mHK4YEzRnBmWavvmjerlmishZ/AVLyfqvS7UwLSaUwFvhmObp3xCEzxfRtlrfODcyj4+BdB8jR2kfhEvoi2FZOLGR0LURUUPNkxiiRCaMy4eWdtH1Xk5YUXO6mjZXahd3YZto0uCKKHiVBpO22vKVfCxMJH2Io5DIGg9+paA/aY+MoQg0v2Z6ZPpC5EfHeel7Pg6yZcndhh/h8VQFeHnfgYXn0Yzq8ykn7wEpN6Nr7s4JqwPeIgQSLkY0GzsnsBbxptfo1qT1Cs1ho/jtSfRaXm+JdVfkisXnqEFrfkFpNqXoAe028a5uHenruCVQ51ZbhIbI7i/vGxeLwPh3PmpxbBX0yEUirpvcIbvcc/Vl2de7QjiefxBLWJvEs0cmD0CZp0Aa2HmnQdxo9qNxVanPMoQOUncJ7uT3v+j3V2T19OHYst2dY5/ijHFwqAc3p+RCv9uYRiQMkj2lk8otKUavTAYU9+Wvqu4fj+dkwaDG6GbYzWhiZG0/n1I/7eYtSeHPcqXZtYfnCL2d3OFm2/0DnbqZTHHjW1aI383RrV0v04XGl7ydlRuJhoPPtUbyRaHxfehrI+NlZ9VHPWlHKErFoW1mTW3imdjp1bjirP869L1nNFI0HDIf2GFxcvKgpOMOY/kBboetyL1Fc6m5ixy6Im9vsAztErTOCuVrjk8DKnUhGPebiukXD01Ht8ViY4dU6q2rjxoqRbVPlIpoRNDDL+1IPCzpQvTGlVt3MR43Y3x0PhaIb9hxhJcflRmBBQJbtPaDMue7ZStYGudbZFtsMlYtauxMt6fiJbfTixvp6ATMxW+VEN/gI88Y9niOeXhV0FM/5DPtUQ1qb0LRxNkUQ+RF3gFgOVBULF2nv+TtNUNTTWwd0jdGT6Mn6fmH5Fuuz4IT7497b9Nf7SwoGcb15uBT+4l2ZsXDt6Iw+0K+TnWD5mbb8O0D9EtytX0e5i++HJ3T69f0uilsnBweAfrJbjLglQFqmmV+E150lO7BMJePfu56LJ44bOGs77LAzvHrMmnef3N14K+0DJ9v49g5t8ubSnAKyQTfU/smkz9m1cFj0zXoXkiDgvu/9HqGfCeoIMD5I0XYsTFCIteecC96pw/LEQWGAy45zzxPwuu4DNqjWCBUIDmXUFo/p1naJaPeg6W7/bVpdHxhPDqPiOB1ujivGRQSpWsHwA8uMPewxtHpDkqvGkYr2vO70Q/jqfCbJNncic3XCYHNS4PePE3cyelXuwqvek/CiiBQy8HNLy5wXokORDrr4XElXSJ10ev1CHh/xph51Sn+O4XjtfExS4csyjHm5Hk+HDedI0t9w/m84MCcANyPAK+yiET7BMPT16O41UdX98AgX/oZz5biL6jLqGvBNyAfA31HkBTK9/o5hH38fsrBLX882+MezNMqS9GNZfPvxEIin5UHytTYUU69vhDb9yEVF8bmB5WcMycIXDBXdlOvthl77/B8z9XeG+0f+j79jH6/lFc/o9bHXg7abio8Hberd4Y9Z6SXgTxaiIAu8QvH8qNDrNuuyugK/9+uuq0vwgQL+gvWCW9LUzyrk6qJulqXwePnnhzWYIksgbFffwdPlG5j2HgXwvZHPJ+Dn0Ou8v+HM66+AZSvgyXLZlT2eBuSwS6D3xWjmJRUugM8YzAD/Ozw5Bmar+SMJq0SeC1VtzTwhEtO0RLCFH2KKwRSLGfygCAPDcKPKMvIjgKO0mdC9X8UUsQRUPZZa75ub1N3RmWtuCN9mXv/wI3Gs0i7YoUW4E5Joh7Z+pW4jHskc69Q75WUL/+5bV8QxqUzaJ1vwmSHctbRUpNoFcLOPy4XEX54aRwxSThwkjkn0vXxz87t5EpyvNYPyVOqmXIf703DMqD7EQ1ypglkp6cmdqPF4vm2UmZmUWZoUG8Bn6BwBMenYSQfg88+1f1hfcG0tD8oC7Kfo/Yyd3J2zdSy5D22DljLp6/Of/329H3iHH/cCnj0/3vfpYHSn4GkacBQwjpleSurkfR2W8IHOCOY74llu+d+vc7q7pdOrJjEoE0xNcNkPKwHsA0psDYBBxz71QXHqfQwmjs8X8XIWRE7h2Iu3uyRnS0Q8MeyDspvBM9kv1afO0f8F9Oo7ll0eb/TkWddnuAMrWfRTzwXqY3fbMy7f3QXTp7uPET+uwA89pNvxdioCTJs8m31Bv3ACVviiM4TKhz7i72BXbOfap1RP7qaVxzHk6KdQCMb1RDARHg4WcbuOlx2xawb00UmYudce5ROoh5MppQQ3m8fBsUqFxRh+xpg+Mm6RezCTRvWaVkIuXFA0Lj9yiX14HBpjjqEZEkcUWfER7ocdOvcdBUNFvnTmYYxH32+Ar74a+iEck2fAJ1E+3N0Qd2xYmP6DwDfsiFcU6GIHfLXhtXarD+dZR4hA31OOix3M86z6W8nlbWtf0JUIMIldyobcT1RHjZGDVKpGCjCmROPUKc5zC83We9iwrDHtlZvHtCtjx7lc1GTHq7j0/jy/CSIvBc9Tzb05GpLApidjYhx1DpJ3pXc0DGAKTZtdoqy+fyfqeVHBCJLI3ieSVFMg3a5XLET39nUsnelSwfeBlkzWgbc9nMVNvomWejPwn+SdMa7Nlt43MCgFw603CbmW1u19NI2aZATUWntFeJr0O54ZR0igqzvM7k0R3PcxSkC8MNLBcpwLuVKtg8xwXFca7al8HxsDvo1J7m6anrv3sTWwpZF0wIAxtnPfRwElxsBxBbAAj+j7KBeVM5ik+Wtff+3rr339ta+/9vXXvv7a11/7+mtfv7avIl4aiG4Nx6BIOJgQ1gFxeryZSvsVzdhuCPwpfY2ty26iknl93z431sGS1uPs9gYbk5Kjm/v5fWwuZ1xvTGqOzw62hP8x2JVxm0G0UYYahSTYl6wPY9d0OZJxv79tJj6GkSM9njVo/Pb2lf7ooRjOyg+jfFHDfpLPbfnKxixGLtUNkd6E5Aa6BiQab4CXJJhJC9fRSXyLHDCBvwGvonGHVvvF4laZO9Or1NLb4kadjLuNHFfBImEHKcqgpi4LK2xzN+5Ka3CPeHOA8VngMiTGA8ZM5rmidRgQE1N3OBK/UNt0AIxeVXO4aaXAlGRWInZqSiG7X9beAx8LhlunaO+a/HIF8Ue+YO+QL7wLjEdm6wGnZWBf1Px4fj7Yr3f4WPcooIx07rXpGryHIAU+ISF3hoWsu+8DCBnjUHFmervP1ebwFp4GP8vwqp9AHre3/cpPYwjeYOENOxmlxfXQyXZoa3cXVPSFWflXWSA/OKOK/NtjPboZ4sSxW/JgRKb0vsckVAVRUc7VfriuB0/O2Ta5ycB3YtTVYE52KGsW3mk5N5O8wjDvsidzajfaVloP0xSIM/DbjjAALF+/YVh5MkblYpeEuq3Pv2cADFvFsa+/jTytRJSX5hv5SNYjYflUgxieg8Jej6YmjkBudJVrnmFg6n2X9+FWu4cA5pDN+huO7QuTHufnnR2l5G2XRwaBONa4MD6tB1H6qnLYnSqYfrgybxwHfq44D2AZLzeH93QrxHJY75oWXrA4McPNMLlw0FnyTFpG+rWKwAiMT6XZhuN2K4lx4EqwsqXi4Yr3BOg/J9l9x+zed3M6cFkKQI74epAvBGmT5F5Kwvu7uRKQwJWMcej7gFS4L+fSGgvI+4DWu8Ry11zGzPF9OKkgiIzkPI0kva0wkdxrGYBouPlOj1SAsQPpcUrY1fkyk7p2AUg3rwbeQhBFkg5uxq4GlppQVk1h0r0PEmUB0E5UhBfI2yDTg8kYhTOFt/Z9CC3Lsga3U26S5L7hHYJ4HACZQ2TFBScYN+EON4dZJYmE+nDYARAyWA+GBSDmxHGlkxjvMuKWAA0XVI6yGipsMECumOML5G2oMXdLBMAolau8D2tlANWZPxZvqex18pAONuB7DEa0T7flSMV7PKI2++W+DB3bKNn30Y+QGOsPO1LuxO06vcLWCpCqmhlijPG+a1ZxRFH5C+YvmD8RTMUv/VodG5pwvcqPEQsTt+zQ/gLPScu4Q9QdxbN6Wg3a5oyd2AhntcjeLTPpcGCM1HmB/HimRZYDWrwRfLV4Hz4NvubAGafLLJ4/v+BjvQLKUggi3N7fDILsIIhwXsnSAspSIRf928pbyKC3EEAQcyVLb2zqjcIsO+/WCsdliSBZ6gLypi8MA1j2h3E3y8b7IO8MgNyEqzpehneMy0ucG4CM8spqyKAshSDC8E49CNKm0mhtpHdZ6iRQli4gK1kqJ4E+RvvZW6bYlJvS2/Y+HkZ0u1lbhNxukHAND89xjNNj3/f8sNX55XYlP0+7NScxGSLe8yqKpSrbYlfdpdYDv0lojfP1mK+too51jUmslWgm3vV3csIgrfanfL+2ID0OS27KxaKrYbWPvOW49HacueO3AebinZGstiP124rbeUe48lJYMsG3AeaKxRid62H62i7mBmnLGWTAEN8GmO9awC8T2h6D1cB6NjkhTIoR4nj+ZpsDf2eL9NQevI/q2ECZESVLAGUW2MfTFrpIwCGkI5LEf7RMtMSi2Ofkn6LqO4aARc9fqjARVlg1MWi1/wXzF8yfB2YzoPGwE0fgtCozQ9lJECeB8iZl+0T7j5T+fW7jt3CBqZNliHh8geIgUmPECxB7sFhYrkU9/F2xX6CFpK1lpuC78MHKNXRFdXxpXXruOgqJSEoQzimQucK2pOrLSzWIOlNN825Wr5cV9i7Q7+LqU3taaQgnaYgbZTlPNzssnwVitO9xzUe3MKv6THV3ufMXxfoUu9RL10v7BPpelMSpH8PUYZkmLJUEH72SSxnTFMQ2/tGl1+l10ZfsPU+kryigAD+sVhnDCXDY2psGrxYNvgn8i/UegWKYpUDnNCYyp5ocFwwqrF7KnzEM9U2j/uQErRCVBEb46MPT9YoP1+cJvSUNf/rLHjdMQVNUDFvXongA689Z8+ti48bvaGX4ZI44OY8+f7KkZMWlD6ifru4Bk1f6nID6vOJOOwDyphvMGkY+r/cjKq/0OQF9IxsYWquoY5LVwO/OrvdoFbsQhA0jGa2dOcKbDhYOAERmr2b4iFZ6XRBQXhpu10xi3un/SJLaFS2zg+2DfnzHJ/S75eF2AiDvMcIHpH3hnTtz5Xcfod8tDjc2e48x1dID+t2FZ7fmjycJ+N1HYCsqItCS77qcAyCccb1UXrWyM1JWMyZVFp1sFU871NJhd7reGqd6t1mUFNofgSwAkHdOO0C/27Vu5KV8lxZ8Av3uQGacbOV3HyAnyVZBvscAGeMMpAU3RnKy8rsfAOQmXmXsTrxLC8k4X9Sc+xd563gGnEt4ziSsY3WypMy6Cut1DdSl7lz3Opm/UMeG1NkRmJxpb9SRlJIx7g7lHNvTijo2jIqUBC6v/AoJ+iLy0kIgWtFnZK/GdJRLZ5a+7e+wO8sM6USriOAIJf5TK518RZ8NlATxgWku0Tt9xAHS56llTr6S5ptEOFj6jm/epTljXBEYF3lq04o+JtRE3EXH9vU7fVTAfmqeuaU6rehjclN6m4wjtk/eaRAYl0vb5G4pDe9nPzKgLT8lB/S9oFNgA3j2/dwp34rW4TgGh+WKfEy2b1deoIwPYczNz4NVzE1Inou+oNPt+9nPBRHYxUTp5uoq5nYDKsZVb3yXrmz2SgAe5YHA3Ex9P/v7G1PHORVnCLyAoGtzgCIR/iRp0p3XHgGwa42n8iRh6oLsyTmOIrtAoj1s9RHtq5u6qh5a/NUzv4yIO+li0Tb/Vl/Cz72jHaofTQM4zh8/C2FFonLxhqSfqN4/ae1LAgRsPx2kwkmJ9x2oi2Qq6sapV5JpiQh6Uul8jC3/4nx1kUxlrVweK8m0RAQ96emkK8mkLpKpfCjfJNMSEfSk6FtE8AgjgjL2WNFBgJIpy2RLit6xfz5ATLreATuspA8CtV6ZyaUUvHP+GUYES/fBoYeV1kMg51uZUkrveb0kh9HmzH1k37QeCjnfyvbfos05jDZn4SN7p3y6aL06u2b7FedPUOvtrKzLkncO6xatV2dutl9pvQlqvdJLOrN+l6vaovXqbJOJK603Qa1XevU3radBuVrU6ead6wDIFkabK69enaUT1Hp/KN7ShtdJPyjH7rSyoAEGBEcmq6VtnJTUp5RfWSVAJteufpm6eeUf3JZsQGKcvhXybyF9+mDs3r1ylhGWbEBijOtsgLyF9AEg37IBArRKhNoY11aJDOiDBgBkRR+WFY1JAta0sZI+x1oGWg9Vm2BcaT0W0geIbSdZ0SeC9PFx1b9M7/RhGUgfSVC/ZQMiSB8FgKyzAScBWiUHQf1mlUTJTbMBiPN+9gEIOPscuxPUN7wn5wfMjNLnA66uokYGPPsZy+4k9f3s24vFS887VF2dfQOefZPld294Bwe5WixeAPIt03SCFq+Z8t8s3qqW+esEQFYaQYDRoCT5A/EWzl597PKiocrVS8Pr+ONmoYyEr9vWcfw8k/jz/CXhYHjX/0v2/iV7/9LrC28F1R1eg5zKsbkYKwwA6VYeww2VPidp958neZ9MPaXzh4RTuYb1ydeh7+yYSFtJY/cEUbcl7JlyNZbvB3RjLwdMgp6IAvM44hHtrXM0o0jC+fRGvSESdinlUj6xyiBT7dg2vLbKv4isbEzMQ/Svl3cO30myreJzz/enOcC1cN+RUCg94UXadc2KwCaKzeLhHIc4DKb0MPaTn6ZoD6/VRdrenYniQ/fxnD4XCnJUvSC9MXeapvOcpptAf6JdaKO0DnNU2kjjaA9XJ5iIzUkqoKZI0bGcOK89h71hXdRkm5DP47o6RB9yAoV9CkTpCGXlq/kxWBJO93lQkZCZ835wlw4Elqqa3zUHS/QBR4bblyDedzRLiKq4h11Iqu3SsWM/7UlUjckYcGOWVMSNumbJ9WmvpBgBpVjFjaqxkmIqzLGJeYJm13fPXNrCHFvFbdR3XIusCnNsYpOi6xwblGJjUHHyNxoDKSbVol8gKwtFglJsbHNGXkkxEUoxB4KszskR+uXu0J6Fw0qKCTDK6hCaU6z88iOMssrD8/x+wlnjLMAoq0eE36KsQIqptjxkxjut2UWKHVhrE66kWAZPQ+BLA7eyUMZFikGQtRSDObYyUITP1rJfUmxkHKu1jrz1fqGSASKJ5cQsUG7juy95EUZGMlvrub+855gkKMV4cQqk28pCuUAptjDKSk4RUEPud5PK3Ni3d2wvUEP+L/EWyRBRtZEogpBX2hO8v5bYZSSTVPrY7FBlRRsYNL8xF3W8rO75aEsEHICM63s+HMx/QhBhFTPRlvxnvcG+5T8ZGDNhLtK3/KcGaXNqt9h7jSKT1EDDiPkL5J02GiN5z1M7Ie+0ASC1zIi8el3fcVri5RDEXMkaCOJCkJP0ThsIokjeMXyaqzolBtJmELlvuXV+ya37x+5bbp2D5/4F8n7ueWC9NDdf7VaxjCW3zhqzaK7OPQpz6xBEWN/vWnLrAEQw3889BDGnXV59y60nS2SeNfJv2RUEnvvd3HzLriRLdmUBeT/3BsJARpn/MLylstdBLn+k4aJueXhZkU9onZ/buQrp+IyEORWHNImyEGd3h49vHIL2nLs6Y+wefpdEes3KEk1a9mRMsuPZ6bsXBPR9ngiKJYTdt2pMiwF8KbRaM6x8bQ0sxu2b7Ig8VniTFOZyaR/WDh3enwtHeC5YdNw732pMC2AjYVeF977rYHhfcOs0z8NaB5vDTvQPVaqt6w34pVYubK/ot7rZu8NyXXceynXNb4rA2OQROxDvsXmJZy43JrWwO3Fe13M6BKB3sffFdS0wm8D6PuVsUd/y/0IAeHrIu8e6ypMbRCAbivQefaszOBwZo3NOGHpZ6SzWgTxqDlj/PQYpMlL0lMhvtauQDczp7vj2mtZbeEb2thCml3fcJKYF6zMugA3WtA4hT/H37El8q8hVGOfaPiJghq9oDdnAZbHRXmXigEwrEuGAoTrnrWkN2aCtSafJ1nXGrGlwou8BNljTOk8OLyNRz/to863KOzEjNnI3VT/tZoni1rtUMjO5iYiEN9+wbkz6oK4sDoYQJHhvQCREVHnfCbDyg1lfvxHLETdwGkJO26JrSlzUnOJXp9c4shykQhqxoUm8cyLj2Oo8rN/PeMpAettGHnXrmmjAadDBH1a2Np8Y8Bw6dmJfnu+0AR7xrH6r2/lfxBLHwu53IhFr9/yplJvS2vSwIWEchbcts8bebpDxEI/hpQNYgpS3fT5s9Hmk+hwfd2v8Mw9E3FQlzDyrReZhti4j7+dygDZx4/DEJlm946GD5wVtD9F5hfXkBPSTNezHfF1tcPJgLqdxLnS1pgpMG3S3Y75TV+cyQ8SCl6ykI/WVvmH3DrQBwp36k0o8IGe6k4/Fq2gUdxyA/W2QqkSsapN4wwLyzJvwZXDgii+05AS0Ieaww6o6iUs6IDnRonGotSZQmDxQrjN5/wFzO5Yn9P0Y2Hci0lJiM9N7lI5tZKEwvsfjHWQ9MVkqBPE/oGQBFssFKJiVRQg8wDuw/DrNH0pjZZWdgLZquCP27VYGVJdqZu0K4hvOawLgPN3712/nNwEa7gTUJfWNupcLk26JvPt20wQqGOVQpN231jE1tPLC4ISJa00qO4BbC6Bg1lYeVJe8dCwP5De5emvBqZiAulxrEahg0htQl+G3mxGeBU6ff6HkNa0hG5hT3mXP9d2pQZQZ6doBNsDWtD5Cy/gE2GB938OB1ep/QKsp2KfbpWqGqLbfbocwhFgk1TOWfSqnDv8mrSHbItnY3Q/tdDVGiWM3LEWnhe/twvfmj8T289tgDyCNplVk87o4V+zaCgPeL7KEnqtpxlrcWdfnCjb0YTmKiFcxAkaCa0C66wDu2z0fCfBX73HD9J33Ye6F9jVsXUHLigSUoP1xv/LiwbvFibJ0iIrpRkO/bH2ZtWGIUD/eMrRUAB+WTAqzzmdNT7dxmW9NJDrXzJrDm8Unho2WpudOHt/lBowmQCvzFrtksMI7m8hA+jdwEuOKI2rYkAXYjP4eM5HvNqOZ3g6j9C3XvYWxEf06hkm74onTYv9lrcGu6QG+Z4kqYAF+WVNkd4UyV6pWUQCAPx3KzxNddatoB5CfRS1zncmueYxLcigL29hVvstCGdCpEZLvtIVRK9oWMe4bbaFcKw8js9bc5y2M9sTuHBorSiUulFFOFnxrhSV8cN4ei/A1JRzOSm4H9APZb9JDh9JjpG/dd+lxAzzefiDhTRLAmA8a2I6ypsN1sdsa4VsVv75kE+hKQ79xcQF9oewvjvuTcRy8Y9t29BziLl4cT6R5RtgSPPRmPXCP1PzYO/R4m+DN2J5++LG8RVUoXPHXXeFZB8+oh0b19aUPuqVX70csfkLJfqMZyqwLVKxXIzH7MYElrTIdZ47x5ymqSlrIZqag2tZgsn4niFFDMQfqcZm4UREyS5e2iFkbMCSOqWdYSykZ7SUTv+VniyU/2z2xFU8JArCimfqaiiuL2IP5WcWeugx5p0C95AcZ55SJK43iQRw3/tiZq5o3ackPvkDe87M+0w87V05ObqVVDCzYAr+WT5eN0NzkJEmWjnWwaV1Td95Hezbkt+lhh6Lfe9iR2E962GHUb9XDbvvrPeyWRnRR+IGGIc26yLx7Afzt0Hiw91zaleBLeRT82HZNfYt+aFsnLv/8m/GH/lboI/9I6CORX0cf+rtyH/VHQt9mxX049R19GPIz/CG/Ff7oPxL+1uy3+Qn+fsp+2G+Fvs9v++/wF1Uh0zT1AP5W1VX0M3xZH/vHvvAUhUn0r2Lpxz6mP8HC57MmKoBW6t+X/xlmPr5BrzPwxV9EwDbE/7v5ORk+F2nrZxNEH3C/YPifWYrE3pfqvCaJum9LLcT6evX/D/RDf0K/bQEwxsb1stVfCLl9POvPX/y9zWbwlAEfwDb38Zdfggfx62z88uyclVELfqFFA/j/qS696keAbfL6E/iVxPj643MPMPK5bOPzMyvWAkep++/O3we/xVlRrB55H61fA8BUUfOTnrBlFobwa3564N9Fwv/CycbwFR/Q30828VPB+Fsd7J+15v13Msb/9SflDAJ95wzqJ12bf1/OwP90Ip/A13KaWtvD/6zI/8lSyO8s8ok/ksnzzWEh/zmTByd/M/7f/JHwh27/hybjb+bwoX9oh/lnLsvmZy7Lb4e/f8Jj/kH+BoXXtlnwRxfBXx70Bx22GP0pR/9VEYxvf3Wp31oE/8xr/zcaV96f1Lba0qvDvfk321afC//XjPGvkvlf56QX0a26abu/B14bwTWCuoF/xs8qWOK5fzZG+TuGv3PKF1v8wCn4zyNXvxWr/Krn/nuxys57Ag0D10I8+L1ZdX/CP+8v8flnZxXiJxbr78wqv+rL/16sYqZZ3AFsY8gvTPNn5w/yJzmE35k/fubR/xvNkfYf6J/UICFWVi72E2/jdzVI8F81SH6dNZD/Rdb424aDKG+KDJCPZO//AP/L/oZxcCP/wP9G8uDf/3v8f/6k7EOuNM9PjJSfO/u/Gfv8h6UXII/8OVkDW8fRfsIbv69k+VWj5PdmDexPyhpfXu+0YpV/G2v8LEL9n6p0sD+70sGQ93wG8ZPU5e+sdX4Wof9P5R/0z84/OPEufjY/CcL9zvzzT2Qofoiw1/eoOqdZ9TNqfIbY4RMAImZwJz9g7j834I4Sa2PyfxxwR5FfXeo3Drjj/0SZ1//vKUqg64P2P0+hkL+61G9MUeJnjumfjaIYuVa9/2OKrguQfrLUf0FRgGJv+uFjS9i3/W82vVmVOn1Ih18Y5LXiP8ku4K9NDZXuLx8HiipV6zCCn/g/</diagram></mxfile>
|
2009.08061/main_diagram/main_diagram.pdf
ADDED
|
Binary file (37.6 kB). View file
|
|
|
2009.08061/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Gaussian smoothing, introduced by @cohen19 in [-@cohen19], relies on a "base classifier," which is a mapping $f: \mathbb{R}^d \rightarrow \mathcal{Y}$ where $\mathbb{R}^d$ is the input space and $\mathcal{Y}$ is a set of $k$ classes. It defines a smoothed classifier $\bar{f}$ as $$\bar{f}(x) = \underset{c \in \mathcal{Y}}{\text{argmax}} \; \mathbb{P}(f(x + \delta) = c)$$ where $\delta \sim \mathcal{N}(0, \sigma^2 I)$ is sampled from an isometric Gaussian distribution with variance $\sigma^2$. It returns the class that is most likely to be sampled by the Gaussian distribution centered at point $x$. Let $p_1$ and $p_2$ be the probabilities of sampling the top two most likely classes. Then, $\bar{f}$ is guaranteed to be constant within an $\ell_2$-ball of radius $$R = \frac{\sigma}{2} \left( \Phi^{-1}(p_1) - \Phi^{-1}(p_2) \right)$$ where $\Phi^{-1}$ is the inverse CDF of the standard Gaussian distribution [@cohen19]. For a practical certification algorithm, a lower bound $\underline{p_1}$ on $p_1$ and an upper bound $\overline{p_2} = 1 - \underline{p_1}$ on $p_2$, with probability $1-\alpha$ for a given $\alpha \in (0, 1)$, are obtained and the certified radius is given by $R = \sigma \Phi^{-1}(\underline{p_1})$. This analysis is tight for $\ell_2$ perturbations; the bound is achieved by a worst-case classifier in which all the points in the top-class are restricted to a half-space separated by a hyperplane orthogonal to the direction of the perturbation.
|
| 4 |
+
|
| 5 |
+
In our discussion, we diverge from the standard notation described above, and assume that the base classifier $f$ maps points in $\mathbb{R}^d$ to a $k$-tuple of confidence scores. Thus, $f: \mathbb{R}^d \rightarrow (a, b)^k$ for some $a, b \in \mathbb{R}$ and $a<b$[^1]. We define the smoothed version of the classifier as $$\bar{f}(x) = \underset{\delta \sim \mathcal{N}(0, \sigma^2 I)}{\mathbb{E}}[f(x + \delta)],$$ which is the expectation of the class scores under the Gaussian distribution centered at $x$. The final prediction is made by taking an argmax of the expected scores. This definition has been studied by @SalmanLRZZBY19 in [@SalmanLRZZBY19] to develop an attack against smoothed classifiers which when used in an adversarial training setting helps boost the performance of conventional smoothing. The goal of this work is to identify a radius around an image $x$ within which the expected confidence score of the predicted class $i$, i.e. $\bar{f}_i(x) = \mathbb{E}[f_i(x + \delta)]$, remains above a given threshold $c \in (a,b)$[^2].
|
| 6 |
+
|
| 7 |
+
We measure confidence using two different notions. The first measure is the average prediction score of a class as output by the final softmax layer. We denote the prediction score function with $h: \mathbb{R}^d \rightarrow (0, 1)^k$ and define the average for class $i$ as $\bar{h}_i(x) = \mathbb{E}[h_i(x + \delta)]$. The second one is the margin $m_i(x) = h_i(x) - \max_{j \neq i} h_j(x)$ by which class $i$ beats every other class in the softmax prediction score. In section [4](#sec:conf_measures){reference-type="ref" reference="sec:conf_measures"}, we show that the expected margin $\bar{m}_i(x) = \mathbb{E}[m_i(x + \delta)]$ for the predicted class is a lower-bound on the gap in average prediction scores of the top two class labels. Thus, $\bar{m}_i(x) > 0$ implies that $i$ is the predicted class.
|
| 8 |
+
|
| 9 |
+
Standard Gaussian smoothing for establishing certified class labels essentially works by averaging binary (0/1) votes from every image in a Gaussian cloud around the input image, $x$. It then establishes the worst-case class boundary given the recorded vote, and produces a certificate. The same machinery can be applied to produce a naive certificate for confidence score; rather than averaging binary votes, we simply average scores. We then produce the worst-case class distribution, in which each class lives in a separate half-space, and generate a certificate for this worst case.
|
| 10 |
+
|
| 11 |
+
However, the naive certificate described above throws away a lot of information. When continuous-values scores are recorded, we obtain not only the average score, but also the *distribution* of scores around the input point. By using this distributional information, we can potentially create a much stronger certificate.
|
| 12 |
+
|
| 13 |
+
To see why, consider the extreme case of a "flat" classifier function for which every sample in the Gaussian cloud around $x$ returns the same top-class prediction score of 0.55. In this case, the average score is 0.55 as well. For a function where the *distribution* of score votes is concentrated at 0.55 (or any other value great than ), the average score will always remain at 0.55 for *any* perturbation to $x$, thus yielding an infinite certified radius. However, when using the naive approach that throws away the distribution, the worst-case class boundary with average vote 0.55 is one with confidence score 1.0 everywhere in a half-space occupying 0.55 probability, and 0.0 in a half-space with 0.45 probability. This worst-case, which uses only the average vote, produces a very small certified radius, in contrast to the infinite radius we could obtain from observing the distribution of votes.
|
| 14 |
+
|
| 15 |
+
Below, we first provide a simple bound that produces a certificate by averaging scores around the input image, and directly applying the framework from [@cohen19]. Then, we describe a more refined method that uses distributional information to obtain stronger bounds.
|
| 16 |
+
|
| 17 |
+
# Method
|
| 18 |
+
|
| 19 |
+
In this section, we describe a method that uses only the average confidence over the Gaussian distribution surrounding $x$, and not the distribution of values, to bound how much the expected score can change when $x$ is perturbed with an $\ell_2$ radius of $R$ units. This is a straightforward extension of @cohen19's [@cohen19] work to our framework. It shows that regardless the behaviour of the base classifier $f$, its smoothed version $\bar{f}$ changes slowly which is similar to the observation of bounded Lipschitz-ness made by @SalmanLRZZBY19 in [@SalmanLRZZBY19] (Lemma 2). The worst-case classifier in this case assumes value $a$ in one half space and $b$ in other, with a linear boundary between the two as illustrated in figure [1](#fig:naive_worst_case){reference-type="ref" reference="fig:naive_worst_case"}. The following theorem formally states the bounds, the proof of which is deferred to the appendix[^3].
|
| 20 |
+
|
| 21 |
+
::: {#thm:naive_bnd .theorem}
|
| 22 |
+
**Theorem 1**. *Let $\underline{e_i}(x)$ and $\overline{e_i}(x)$ be a lower-bound and an upper-bound respectively on the expected score $\bar{f}_i(x)$ for class $i$ and, let $\underline{p_i}(x) = \frac{\underline{e_i}(x) - a}{b - a}$ and $\overline{p_i}(x) = \frac{\overline{e_i}(x) - a}{b - a}$. Then, for a perturbation $x'$ of the input $x$, such that, $\left\lVert x' - x\right\rVert_2 \leq R$, $$\begin{equation}
|
| 23 |
+
\label{ineq:naive_lbd}
|
| 24 |
+
\bar{f}_i(x') \geq b \Phi_\sigma ( \Phi_\sigma^{-1} (\underline{p_i}(x)) - R) + a (1 - \Phi_\sigma ( \Phi_\sigma^{-1} (\underline{p_i}(x)) - R))
|
| 25 |
+
\end{equation}$$ and $$\bar{f}_i(x') \leq b \Phi_\sigma ( \Phi_\sigma^{-1} (\overline{p_i}(x)) + R) + a (1 - \Phi_\sigma ( \Phi_\sigma^{-1} (\overline{p_i}(x)) + R))$$ where $\Phi_\sigma$ is the CDF of the univariate Gaussian distribution with $\sigma^2$ variance, i.e., $\mathcal{N}(0, \sigma^2)$.*
|
| 26 |
+
:::
|
| 27 |
+
|
| 28 |
+
The bounds in section [3.1](#sec:naive_bnd){reference-type="ref" reference="sec:naive_bnd"} are a simple application of the Neyman-Pearson lemma to our framework. But this method discards a lot of information about how the class scores are distributed in the Gaussian around the input point. Rather than consolidating the confidence scores from the samples into an expectation, we propose a method that uses the cumulative distribution function of the confidence scores to obtain improved bounds on the expected class scores.
|
| 29 |
+
|
| 30 |
+
Given an input $x$, we draw $m$ samples from the Gaussian distribution around $x$. We use the prediction of the base classifier $f$ on these points to generate bounds on the distribution function of the scores for the predicted class. These bounds, in turn, allow us to bound the amount by which the expected score of the class will decrease under an $\ell_2$ perturbation. Finally, we apply binary search to compute the radius for which this lower bound on the expected score remains above $c$.
|
| 31 |
+
|
| 32 |
+
Consider the sampling of scores around an image $x$ using a Gaussian distribution. Let the probability with which the score of class $i$ is above $s$ be $$p_{i, s}(x) = \underset{\delta \sim \mathcal{N}(0, \sigma^2 I)}{\mathbb{P}}(f_i(x + \delta) \geq s).$$ For point $x$ and class $i$, consider the random variable $Z = -f_i(x + \delta)$ where $\delta \sim \mathcal{N}(0, \sigma^2 I)$. Let $F(s) = \mathbb{P}(Z \leq s)$ be the cumulative distribution function of $Z$ and $F_m(s) = \frac{1}{m} \sum_{j=1}^m \mathbf{1}\{Z_j \leq s\}$ be its empirical estimate. For a given $\alpha \in (0, 1)$, the Dvoretzky--Kiefer--Wolfowitz inequality [@dvoretzky1956] says that, with probability $1-\alpha$, the true CDF is bounded by the empirical CDF as follows: $$F_m(s) - \epsilon \leq F(s) \leq F_m(s) + \epsilon, \forall s,$$ where $\epsilon = \sqrt{\frac{\ln{2/\alpha}}{2m}}$. Thus, $p_{i, s}(x)$ is also bounded within $\pm \epsilon$ of its empirical estimate $\sum_{j=1}^m \mathbf{1}\{ f_i(x + \delta_j) \geq s\}$.
|
| 33 |
+
|
| 34 |
+
The following theorem bounds the expected class score under an $\ell_2$ perturbation using bounds on the cumulative distribution of the scores.
|
| 35 |
+
|
| 36 |
+
::: {#thm:exp_bnds .theorem}
|
| 37 |
+
**Theorem 2**. *Let, for class $i$, $a < s_1 \leq s_2 \leq \cdots \leq s_n < b$ be $n$ real numbers and let $\overline{p_{i, s_j}}(x)$ and $\underline{p_{i, s_j}}(x)$ be upper and lower bounds on $p_{i, s_j}(x)$ respectively derived using the Dvoretzky--Kiefer--Wolfowitz inequality, with probability $1-\alpha$, for a given $\alpha \in (0, 1)$. Then, for a perturbation $x'$ of the input $x$, such that, $\left\lVert x' - x\right\rVert_2 \leq R$, $$\begin{equation}
|
| 38 |
+
\label{ineq:cdf_lbd}
|
| 39 |
+
\bar{f}_i(x') \geq a + (s_1 - a) \Phi_\sigma(\Phi_\sigma^{-1}(\underline{p_{i, s_1}}(x)) - R) + \sum_{j = 2}^n (s_j - s_{j-1}) \Phi_\sigma(\Phi_\sigma^{-1}(\underline{p_{i, s_j}}(x)) - R)
|
| 40 |
+
\end{equation}$$ and $$\bar{f}_i(x') \leq s_1 + (b - s_n) \Phi_\sigma(\Phi_\sigma^{-1}(\overline{p_{i, s_n}}(x)) + R) + \sum_{j = 1}^{n-1} (s_{j+1} - s_j) \Phi_\sigma(\Phi_\sigma^{-1}(\overline{p_{i, s_j}}(x)) + R)$$ where $\Phi_\sigma$ is the CDF of the univariate Gaussian distribution with $\sigma^2$ variance, i.e., $\mathcal{N}(0, \sigma^2)$.*
|
| 41 |
+
:::
|
| 42 |
+
|
| 43 |
+
The above bounds are tight for $\ell_2$ perturbations. The worst-case classifier for the lower bound is one in which the class score decreases from $b$ to $a$ in steps, taking values $s_n, s_{n-1}, \ldots, s_1$ at each level. Figure [3](#fig:worst_case){reference-type="ref" reference="fig:worst_case"} illustrates this case for three intermediate levels. A similar worst-case scenario can be constructed for the upper bound as well where the class score increases from $a$ to $b$ along the direction of the perturbation. Even though our theoretical results allow us to derive both upper and lower bounds for the expected scores, we restrict ourselves to the lower bound in our experimental results. We provide a proof sketch for this theorem in section [3.3](#sec:cert_rad){reference-type="ref" reference="sec:cert_rad"}. Our experimental results show that the CDF-based approach beats the naive bounds in practice by a significant margin, showing that having more information about the classifier at the input point can help achieve better guarantees.
|
| 44 |
+
|
| 45 |
+
<figure id="fig:worst_case" data-latex-placement="t">
|
| 46 |
+
<figure id="fig:naive_worst_case">
|
| 47 |
+
<img src="figures/naive_worst_case.png" />
|
| 48 |
+
<figcaption>Naive classifier</figcaption>
|
| 49 |
+
</figure>
|
| 50 |
+
<figure id="fig:worst_case">
|
| 51 |
+
<img src="figures/conf_cert.png" />
|
| 52 |
+
<figcaption>CDF-based classifier</figcaption>
|
| 53 |
+
</figure>
|
| 54 |
+
<figcaption>Worst case classifier behaviour using (a) naive approach and (b) CDF-based method. As the center of the distribution moves from <span class="math inline"><em>x</em></span> to <span class="math inline"><em>x</em><sup>′</sup></span>, the probability mass of the higher values of the score function (indicated in red) decreases and that of the lower values (indicated in blue) increases, bringing down the value of the expected score. </figcaption>
|
| 55 |
+
</figure>
|
| 56 |
+
|
| 57 |
+
**Computing the certified radius**Both the bounds in theorem [2](#thm:exp_bnds){reference-type="ref" reference="thm:exp_bnds"} monotonic in $R$. So, in order to find a certified radius, up to a precision $\tau$, such that the lower (upper) bound is above (below) a certain threshold we can apply binary search which will require at most $O(\log(1/\tau))$ evaluations of the bound.
|
| 58 |
+
|
| 59 |
+
We present a brief proof for theorem [2](#thm:exp_bnds){reference-type="ref" reference="thm:exp_bnds"}. We use a slightly modified version of the Neyman-Pearson lemma (stated in [@cohen19]) which we prove in the appendix.
|
| 60 |
+
|
| 61 |
+
::: {#lem:N-P .lemma}
|
| 62 |
+
**Lemma 3** (Neyman & Pearson, 1933). *Let $X$ and $Y$ be random variables in $\mathbb{R}^d$ with densities $\mu_X$ and $\mu_Y$. Let $h: \mathbb{R}^d \rightarrow (a, b)$ be a function. Then:*
|
| 63 |
+
|
| 64 |
+
1. *If $S = \left\{ z \in \mathbb{R}^d \mid \frac{\mu_Y(z)}{\mu_X(z)} \leq t \right\}$ for some $t > 0$ and $\mathbb{P}(h(X) \geq s) \geq \mathbb{P}(X \in S)$, then $\mathbb{P}(h(Y) \geq s) \geq \mathbb{P}(Y \in S)$.*
|
| 65 |
+
|
| 66 |
+
2. *If $S = \left\{ z \in \mathbb{R}^d \mid \frac{\mu_Y(z)}{\mu_X(z)} \geq t \right\}$ for some $t > 0$ and $\mathbb{P}(h(X) \geq s) \leq \mathbb{P}(X \in S)$, then $\mathbb{P}(h(Y) \geq s) \leq \mathbb{P}(Y \in S)$.*
|
| 67 |
+
:::
|
| 68 |
+
|
| 69 |
+
Set $X$ to be the smoothing distribution at an input point $x$ and $Y$ to be that at $x + \epsilon$ for some perturbation vector $\epsilon$. For a class $i$, define sets $\underline{S}_{i, j} = \{ z \in \mathbb{R}^d \mid \mu_Y(z) / \mu_X(z) \leq t_{i, j} \}$ for some $t_{i, j} > 0$, such that, $\mathbb{P}(X \in \underline{S}_{i, j}) = \underline{p_{i, s_j}}(x)$. Similarly, define sets $\overline{S}_{i, j} = \{ z \in \mathbb{R}^d \mid \mu_Y(z) / \mu_X(z) \geq t'_{i, j} \}$ for some $t'_{i, j} > 0$, such that, $\mathbb{P}(X \in \overline{S}_{i, j}) = \overline{p_{i, s_j}}(x)$. Since, $\mathbb{P}(f_i(X) \geq s_j) \geq \mathbb{P}(X \in \underline{S}_{i,j})$, using lemma [3](#lem:N-P){reference-type="ref" reference="lem:N-P"} we can say that $\mathbb{P}(f_i(Y) \geq s_i) \geq \mathbb{P}(Y \in \underline{S}_{i,j})$. Therefore, $$\begin{align*}
|
| 70 |
+
\mathbb{E}[f_i(Y)] &\geq s_n \mathbb{P}(f_i(Y) \geq s_n) + s_{n-1} (\mathbb{P}(f_i(Y) \geq s_{n-1}) - \mathbb{P}(f_i(Y) \geq s_n))\\
|
| 71 |
+
& + \cdots + s_1 (\mathbb{P}(f_i(Y) \geq s_1) - \mathbb{P}(f_i(Y) \geq s_2)) + a (1 - \mathbb{P}(f_i(Y) \geq s_1))\\
|
| 72 |
+
&= a + (s_1 - a) \mathbb{P}(f_i(Y) \geq s_1) + \sum_{j = 2}^n (s_j - s_{j-1}) \mathbb{P}(f_i(Y) \geq s_j)\\
|
| 73 |
+
& \geq a + (s_1 -a) \mathbb{P}(Y \in \underline{S}_{i,1}) + \sum_{j = 2}^n (s_j - s_{j-1}) \mathbb{P}(Y \in \underline{S}_{i,j}).
|
| 74 |
+
\end{align*}$$ Similarly, $\mathbb{P}(f_i(X) \geq s_j) \leq \mathbb{P}(X \in \overline{S}_{i, j})$ implies $\mathbb{P}(f_i(Y) \geq s_j) \leq \mathbb{P}(Y \in \overline{S}_{i, j})$ as per lemma [3](#lem:N-P){reference-type="ref" reference="lem:N-P"}. Therefore, $$\begin{align*}
|
| 75 |
+
\mathbb{E}[f_i(Y)] &\leq b \mathbb{P}(f_i(Y) \geq s_n) + s_n (\mathbb{P}(f_i(Y) \geq s_{n-1}) - \mathbb{P}(f_i(Y) \geq s_n))\\
|
| 76 |
+
& + \cdots + s_1 (1 - \mathbb{P}(f_i(Y) \geq s_1))\\
|
| 77 |
+
&= (b - s_n) \mathbb{P}(f_i(Y) \geq s_n) + \sum_{j = 1}^{n-1} (s_{j+1} - s_j) \mathbb{P}(f_i(Y) \geq s_j) + s_1\\
|
| 78 |
+
& \leq s_1 + (b - s_n) \mathbb{P}(Y \in \overline{S}_{i,n}) + \sum_{j = 1}^{n-1} (s_{j+1} - s_j) \mathbb{P}(Y \in \overline{S}_{i, j}).
|
| 79 |
+
\end{align*}$$
|
| 80 |
+
|
| 81 |
+
Since, we are smoothing using an isometric Gaussian distribution with $\sigma^2$ variance, $\mu_X = \mathcal{N}(x, \sigma^2 I)$ and $\mu_Y = \mathcal{N}(x + \epsilon, \sigma^2 I)$. Then, for some $t$ and $\beta$ $$\begin{align*}
|
| 82 |
+
\frac{\mu_Y(z)}{\mu_Y(z)} \leq t \iff \epsilon^T z \leq \beta\\% \text{ and }
|
| 83 |
+
\frac{\mu_Y(z)}{\mu_Y(z)} \geq t \iff \epsilon^T z \geq \beta.
|
| 84 |
+
\end{align*}$$ Thus, each of the sets $\underline{S}_{i,j}$ and $\overline{S}_{i,j}$ is a half space defined by a hyper-plane orthogonal to the direction of the perturbation. This simplifies our analysis to one dimension, namely, the one along the perturbation. For each of the sets $\underline{S}_{i,j}$ and $\overline{S}_{i,j}$, we can find a point on the real number line $\Phi_\sigma^{-1}(\underline{p_{i, s_j}}(x))$ and $\Phi_\sigma^{-1}(\overline{p_{i, s_j}}(x))$ respectively such that the probability of a Gaussian sample to fall in that set is equal to the Gaussian CDF at that point. Therefore, $$\bar{f}_i(x + \epsilon) \geq a + (s_1 - a) \Phi_\sigma(\Phi_\sigma^{-1}(\underline{p_{i, s_1}}(x)) - R) + \sum_{j = 2}^n (s_j - s_{j-1}) \Phi_\sigma(\Phi_\sigma^{-1}(\underline{p_{i, s_j}}(x)) - R)$$ and $$\bar{f}_i(x + \epsilon) \leq s_1 + (b - s_n) \Phi_\sigma(\Phi_\sigma^{-1}(\overline{p_{i, s_n}}(x)) + R) + \sum_{j = 1}^{n-1} (s_{j+1} - s_j) \Phi_\sigma(\Phi_\sigma^{-1}(\overline{p_{i, s_j}}(x)) + R)$$ which completes the proof of theorem [2](#thm:exp_bnds){reference-type="ref" reference="thm:exp_bnds"}. We would like to note here that although we use the Gaussian distribution for smoothing, the modified Neyman-Pearson lemma does not make any assumptions on the shape of the distributions which allows for this proof to be adapted for other smoothing distributions as well.
|
| 85 |
+
|
| 86 |
+
We study two notions of confidence: average prediction score of a class and the margin of average prediction score between two classes. Usually, neural networks make their predictions by outputting a prediction score for each class and then taking the argmax of the scores. Let $h: \mathbb{R}^d \rightarrow (0, 1)^k$ be a classifier mapping input points to prediction scores between 0 and 1 for each class. We assume that the scores are generated by a softmax-like layer, i.e., $0 < h_i(x) < 1, \forall i \in \{1, \ldots, k\}$ and $\sum_i h_i(x) = 1$. For $\delta \sim \mathcal{N}(0, \sigma^2 I)$, we define average prediction score for a class $i$ as $$\bar{h}_i(x) = \mathbb{E}[h_i(x + \delta)].$$ The final prediction for the smoothed classifier is made by taking an argmax over the average prediction scores of all the classes, i.e., $\text{argmax}_i \; \bar{h}_i(x)$. Thus, if for a class $j$, $\bar{h}_j(x) \geq 0.5$, then $j = \text{argmax}_i \; \bar{h}_i(x)$.
|
| 87 |
+
|
| 88 |
+
Now, we define margin $m$ at point $x$ for a class $i$ as $$m_i(x) = h_i(x) - \max_{j \neq i} h_j(x).$$ Thus, if $i$ is the class with the highest prediction score, $m_i(x)$ is the lead it has over the second highest class (figure [\[fig:margin\]](#fig:margin){reference-type="ref" reference="fig:margin"}). And, for any other class $m_i(x)$ is the negative of the difference of the scores of that class with the highest class. We define average margin at point $x$ under smoothing distribution $\mathcal{P}$ as $$\bar{m}_i(x) = \mathbb{E}[m_i(x + \delta)].$$
|
| 89 |
+
|
| 90 |
+
::: wrapfigure
|
| 91 |
+
r0.25 {width=".24\\columnwidth"}
|
| 92 |
+
|
| 93 |
+
[]{#fig:margin label="fig:margin"}
|
| 94 |
+
:::
|
| 95 |
+
|
| 96 |
+
For a pair of classes $i$ and $j$, we have, $$\begin{align*}
|
| 97 |
+
\bar{h}_i(x) - \bar{h}_j(x) &= \mathbb{E}[h_i(x + \delta)] - \mathbb{E}[h_j(x + \delta)]\\
|
| 98 |
+
&= \mathbb{E}[h_i(x + \delta) - h_j(x + \delta)]\\
|
| 99 |
+
& \geq \mathbb{E}[h_i(x + \delta) - \max_{j \neq i} h_j(x + \delta)]\\
|
| 100 |
+
&= \mathbb{E}[m_i(x + \delta)] = \bar{m}_i(x)\\
|
| 101 |
+
\bar{h}_i(x) &\geq \bar{h}_j(x) + \bar{m}_i(x).
|
| 102 |
+
\end{align*}$$ Thus, if $\bar{m}_i(x) > 0$, then class $i$ must have the highest average prediction score making it the predicted class under this notion of confidence measure.
|
2010.11354/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-06-08T23:03:30.321Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36" version="14.7.3" etag="orP1wVN1Rf9byP1_nrcM" type="device"><diagram id="rKq5lZ1zRbW_x9L4xPfO">5VZNj5swEP01HLcyOJDkuGGTbg+VKkVVzi44YK2xqTGF9Nd3iM1XIdpIS057wn4zNuP35mEcHGb1V0Xy9LuMKXc8FNcOfnE8z0PYhUeDXAyCMcIGSRSLDeb2wJH9pRZEFi1ZTItRopaSa5aPwUgKQSM9wohSshqnnSUfvzUnCZ0Ax4jwKXpisU4NuvFRj79SlqTtm11kIxlpky1QpCSW1QDCeweHSkptRlkdUt6w1/Ji1h1uRLvCFBX6ngWeWfCH8NKezdalL+1hYQHwCpNdlTJNjzmJmkgF2gKW6ozDzIUhKXJD9pnVFPbfnRnnoeRSXTfCCAVBGAJeaCXf6FzEVkOVpvXNE7kdT9BhVGZUqwuktO0VYMut7a4n11t7X3yDVb1cK5uWDpRqMWIbJOm27zmEgaVxnlI8Q2nAdcOHhCMMuQ1+l7INPBXXNn+GhNU6r/sgjJLm+eN1f2p3ghrMZiY00Qzo02NxxpwLKeh/AlmIcJYImEZANwV814jBoPWfbSBjccxvdYOSpYgb7V/QUmpuJ2oGc2q6mxk5vQXkXL3vkA/64nDYIITmfNFFlmBy697pC3/GF/7HifQf5ItvAniB75OHTteKgWIYEv5WfD63bL2pW9BE4bX7IK8E73tFK0ZEcpOSgQYxU2AbJhsaC1k2xU9vlDCcd04XWYBV7G+mzpmy6s/dJwv4Zv0g3/wUvxgpPrFbcODfd7esFvILTPs/u2ts8IOM9/8A</diagram></mxfile>
|
2010.11354/main_diagram/main_diagram.pdf
ADDED
|
Binary file (9.66 kB). View file
|
|
|
2010.11354/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Generating sparse neural networks through pruning has recently led to a major reduction in the number of parameters, while having minimal loss in performance. Conventionally, pruning methods operate on pre-trained networks. Gen-
|
| 4 |
+
|
| 5 |
+
*Proceedings of the* 38 th *International Conference on Machine Learning*, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 6 |
+
|
| 7 |
+
erally, such methods use an edge scoring mechanism for eliminating the less important connections. Popular scoring mechanisms include weight magnitudes [\(Han et al.,](#page-9-0) [2015b;](#page-9-0) [Janowsky,](#page-9-0) [1989;](#page-9-0) [Park\\* et al.,](#page-10-0) [2020\)](#page-10-0), loss sensitivity with respect to units [\(Mozer & Smolensky,](#page-10-0) [1989\)](#page-10-0) and with respect to weights [\(Karnin,](#page-9-0) [1990\)](#page-9-0), Hessian [\(LeCun et al.,](#page-9-0) [1990;](#page-9-0) [Hassibi & Stork,](#page-9-0) [1993\)](#page-9-0), and first and second order Taylor expansions [\(Molchanov et al.,](#page-10-0) [2019b;a\)](#page-10-0). More recent approaches use more sophisticated variants of these scores [\(Han et al.,](#page-9-0) [2015a;](#page-9-0) [Guo et al.,](#page-9-0) [2016;](#page-9-0) [Carreira-Perpinan &](#page-9-0) ´ [Idelbayev,](#page-9-0) [2018;](#page-9-0) [Yu et al.,](#page-10-0) [2018;](#page-10-0) [Dong et al.,](#page-9-0) [2017;](#page-9-0) [Guo](#page-9-0) [et al.,](#page-9-0) [2016\)](#page-9-0).
|
| 8 |
+
|
| 9 |
+
Further analysis of pruning has shown the existence of sparse subnetworks at initialization which, when trained, are capable of matching the performance of the fully-connected network [\(Frankle & Carbin,](#page-9-0) [2018;](#page-9-0) [Frankle et al.,](#page-9-0) [2019;](#page-9-0) [Liu](#page-10-0) [et al.,](#page-10-0) [2019;](#page-10-0) [Frankle et al.,](#page-9-0) [2020\)](#page-9-0). However, identifying such "winning ticket" networks requires expensive training and pruning cycles. More recently, SNIP [\(Lee et al.,](#page-10-0) [2019b\)](#page-10-0), [\(You et al.,](#page-10-0) [2019\)](#page-10-0) and GraSP [\(Wang et al.,](#page-10-0) [2019\)](#page-10-0) showed that it is possible to find "winning tickets" prior to training – but still having access to at least some training data to compute initial gradients. Furthermore, other work has shown that such subnetworks generalize well across datasets and tasks [\(Morcos et al.,](#page-10-0) [2019\)](#page-10-0).
|
| 10 |
+
|
| 11 |
+
Our goal is to identify sparse subnetworks that perform almost as well as the fully connected network without *any* training data. The closest methods that tackle the same problem with our work are SynFlow [\(Tanaka et al.,](#page-10-0) [2020\)](#page-10-0) and SynFlow-L2 [\(Gebhart et al.,](#page-9-0) [2021\)](#page-9-0). The authors of [\(Tanaka](#page-10-0) [et al.,](#page-10-0) [2020\)](#page-10-0) introduced the concept of "layer collapse" in pruning – the state when all edges in a layer are eliminated while there are edges in other layers that can be pruned. They also proved that iterative pruning based on positive gradient-based scores avoids layer collapse and introduced an iterative algorithm (SynFlow) and a loss function that conserves information flow and avoids layer collapse.
|
| 12 |
+
|
| 13 |
+
A branch of recent work focuses on the convergence and generalization properties of deep neural networks using linear approximations of the training dynamics [\(Jacot et al.,](#page-9-0) [2018;](#page-9-0) [Lee et al.,](#page-9-0) [2019a;](#page-9-0) [Arora et al.,](#page-9-0) [2019\)](#page-9-0). Under the infinite-width assumption, [\(Jacot et al.,](#page-9-0) [2018\)](#page-9-0) showed how
|
| 14 |
+
|
| 15 |
+
<sup>1</sup> School of Computer Science, Georgia Institute of Technology, USA. Correspondence to: Constantine Dovrolis <constantine@gatech.edu>.
|
| 16 |
+
|
| 17 |
+
<span id="page-1-0"></span>to predict at initialization output changes during training using the Neural Tangent Kernel (NTK). More recently, (Gebhart et al., 2021) decomposed the NTK into two factors: one that is only data-dependent, and another that is only architecture-dependent. This decomposition decoupled the effects of network architecture (including sparsity and selection of initial weights) from the effect of training data on convergence. The architecture-dependent factor can be thought of as the "path covariance" of the network and is referred to as *Path Kernel*. The authors of (Gebhart et al., 2021) show that the training convergence of a network can be accurately predicted using the path kernel trace. That work concluded with a pruning algorithm (SynFlow-L2) that designs sparse networks with maximum path kernel trace – aiming to optimize at least the architectural component of the network's convergence speed.
|
| 18 |
+
|
| 19 |
+
In this work, we first show that even though SynFlow and Synflow-L2 are optimal in terms of convergence for a given network density, they result in sub-networks with "bottle-neck layers" (very small width) – leading to poor performance as compared to other data-agnostic methods that use the same number of parameters. This issue is observed even at moderate density values. This is expected given the recent results of (Golubeva et al., 2021), for instance, showing that increasing the width of sparse networks, while keeping the number of parameters constant, generally improves performance.
|
| 20 |
+
|
| 21 |
+
We then present a method, referred to as *PHEW* (*Paths with Higher Edge Weights*), which aims to achieve the best of both worlds: high path kernel trace for fast convergence, and large network width for better generalization performance. Given an unpruned initialized network, and a target number of learnable parameters, PHEW selects a set of input-output paths that are conserved in the network and it prunes every remaining connection. The selection of the conserved paths is based strictly on their initial weight values – and not on any training data. Further, PHEW induces randomness into the path selection process using random walks biased towards higher weight-magnitudes. The network sparsification process does not require any data, and the pruned network needs to be trained only once.
|
| 22 |
+
|
| 23 |
+
We show that selecting paths with higher edge weights forms sub-networks that have higher path kernel trace than uniform random walks – close to the trace obtained through SynFlow-L2. We also show that the use of random walks results in sub-networks having high per-layer width – similar to that of unpruned networks. Further, PHEW avoids layer-collapse by selecting and conserving input-output paths instead of individual units or connections. We compare the performance of PHEW against several pruning before-training methods and show that PHEW achieves significant improvements over SynFlow and SynFlow-L2. Additionally,
|
| 24 |
+
|
| 25 |
+
we conduct a wide range of ablation studies to evaluate the efficacy of PHEW.
|
| 26 |
+
|
| 27 |
+
Let $(\mathcal{X}, \mathcal{Y})$ denote the training examples, $\mathcal{L}$ the loss function, and $f(\mathcal{X}, \theta) \in \mathbb{R}^{NK}$ the network's output, where N is the number of examples and K is the output dimension. Under the gradient flow assumption, and denoting the learning rate by $\eta$ , the output of the network at time t can be approximated using the first-order Taylor expansion,
|
| 28 |
+
|
| 29 |
+
$$f(\mathcal{X}, \boldsymbol{\theta}_{t+1}) = f(\mathcal{X}, \boldsymbol{\theta}_t) - \eta \, \boldsymbol{\Theta}_t(\mathcal{X}, \mathcal{X}) \, \nabla_f \mathcal{L}$$
|
| 30 |
+
(1)
|
| 31 |
+
|
| 32 |
+
where the matrix $\Theta_t(\mathcal{X}, \mathcal{X}) = \nabla_{\theta} f(\mathcal{X}, \theta_t) \nabla_{\theta} f(\mathcal{X}, \theta_t)^T \in \mathbb{R}^{NK \times NK}$ is the **Neural Tangent Kernel** (NTK) at time t (Jacot et al., 2018). Under the additional assumption of infinite width, the NTK has been shown to remain constant throughout training, and it allows us to exactly predict the evolution of the network's output. More recent work has shown that even networks with limited width, and any depth, closely follow the NTK dynamics (Lee et al., 2019a). For a constant NTK $\Theta_0$ , and with a mean-squared error loss, equation (1) has the closed-form solution:
|
| 33 |
+
|
| 34 |
+
$$f(\mathcal{X}, \boldsymbol{\theta}_t) = (\mathcal{I} - e^{-\eta \boldsymbol{\Theta}_0 t}) \mathcal{Y} + e^{-\eta \boldsymbol{\Theta}_0 t} f(\mathcal{X}, \boldsymbol{\theta}_0)$$
|
| 35 |
+
(2)
|
| 36 |
+
|
| 37 |
+
Equation (2) allows us to predict the network's output given the input-output training examples, the initial weights $\theta_0$ , and the initial NTK $\Theta_0$ . Further, leveraging equation (2), it has been shown that the training convergence is faster in the directions that correspond to the larger NTK eigenvalues (Arora et al., 2019). This suggests that sparse sub-networks that preserve the larger NTK eigenvalues of the original network would converge faster and with higher sampling efficiency (Wang et al., 2019).
|
| 38 |
+
|
| 39 |
+
More recently, an interesting decomposition of the Neural Tangent Kernel has been proposed that decouples the effects of the network architecture (and initial weights) from the data-dependent factors of the training process (Gebhart et al., 2021). We summarize this decomposition next.
|
| 40 |
+
|
| 41 |
+
Consider a neural network $\boldsymbol{f}: \mathbb{R}^D \to \mathbb{R}^K$ with ReLU activations, parametrized by $\boldsymbol{\theta} \in \mathbb{R}^m$ . Let $\boldsymbol{\mathcal{P}}$ be the set of all input-output paths, indexed as $p=1,\ldots,P$ (we refer to a path by its index p). Let $p_i=\mathbb{I}\{\theta_i\in p\}$ represent the presence of edge-weight $\theta_i$ in path p.
|
| 42 |
+
|
| 43 |
+
The edge-weight-product of a path is defined as the product of edge-weights present in a path, $\pi_p(\theta) = \prod_{i=1}^m \theta_i^{p_i}$ . For an input variable $\boldsymbol{x}$ , the activation status of a path is, $a_p(\boldsymbol{x}) = \prod_{\theta_i \in p} \mathbb{I}\{z_i > 0\}$ , where $z_i$ is the activation of the
|
| 44 |
+
|
| 45 |
+
<span id="page-2-0"></span>
|
| 46 |
+
|
| 47 |
+
Figure 1. Comparison of the path kernel trace of sparse networks obtained using various pruning methods as well as PHEW.
|
| 48 |
+
|
| 49 |
+
neuron connected to the previous layer through $\theta_i$ . The $k^{th}$ output of the network can be expressed as:
|
| 50 |
+
|
| 51 |
+
$$\boldsymbol{f}^{k}(\boldsymbol{x},\boldsymbol{\theta}) = \sum_{i=1}^{D} \sum_{p \in \boldsymbol{\mathcal{P}}_{i \to k}} \pi_{p}(\boldsymbol{\theta}) a_{p}(\boldsymbol{x}) x_{i}, \quad (3)$$
|
| 52 |
+
|
| 53 |
+
where $\mathcal{P}_{i\to k}$ is the set of paths from input unit i to output unit k. We can now decompose the NTK using the chain rule:
|
| 54 |
+
|
| 55 |
+
$$\Theta(\mathcal{X}, \mathcal{X}) = \nabla_{\boldsymbol{\pi}} f(\mathcal{X}) \nabla_{\boldsymbol{\theta}} \pi(\boldsymbol{\theta}) \nabla_{\boldsymbol{\theta}} \pi(\boldsymbol{\theta})^T \nabla_{\boldsymbol{\pi}} f(\mathcal{X})^T
|
| 56 |
+
= J_{\boldsymbol{\pi}}^f(\mathcal{X}) J_{\boldsymbol{\theta}}^{\boldsymbol{\pi}} (J_{\boldsymbol{\theta}}^{\boldsymbol{\pi}})^T J_{\boldsymbol{\pi}}^f(\mathcal{X})^T
|
| 57 |
+
= J_{\boldsymbol{\pi}}^f(\mathcal{X}) \Pi_{\boldsymbol{\theta}} J_{\boldsymbol{\pi}}^f(\mathcal{X})^T$$
|
| 58 |
+
(4)
|
| 59 |
+
|
| 60 |
+
The matrix $\Pi_{\theta}$ is referred to as the **Path Kernel** (Gebhart et al., 2021). The path kernel element for two paths p and p' is:
|
| 61 |
+
|
| 62 |
+
$$\mathbf{\Pi}_{\boldsymbol{\theta}}(p, p') = \sum_{i=1}^{m} \left( \frac{\pi_{p}(\boldsymbol{\theta})}{\theta_{i}} \right) \left( \frac{\pi_{p'}(\boldsymbol{\theta})}{\theta_{i}} \right) p_{i} p'_{i} \qquad (5)$$
|
| 63 |
+
|
| 64 |
+
Note that the path kernel, $\Pi_{\theta} \in \mathbb{R}^{P \times P}$ , depends only on the network architecture and the initial weights. On the other hand, the matrix $J_{\pi}^{f}(\mathcal{X}) \in \mathbb{R}^{NK \times P}$ captures the data-dependent activations and re-weights the paths on the basis of the training data input.
|
| 65 |
+
|
| 66 |
+
Convergence approximation: The eigenstructure of the NTK depends on how the eigenvectors of $J_{\pi}^{f}(\mathcal{X})$ map onto the eigenvectors of the path kernel $\Pi_{\theta}$ , as shown by the following result.
|
| 67 |
+
|
| 68 |
+
**Theorem 1** (Gebhart et al., 2021): Let $\lambda_i$ be the eigenvalues of $\Theta(\mathcal{X}, \mathcal{X})$ , $v_i$ the eigenvalues of $J_{\pi}^f(\mathcal{X})$ and $w_i$ the eigenvalues of $\Pi_{\theta}$ . Then $\lambda_i \leq v_i w_i$ and $\sum_i \lambda_i \leq \sum_i v_i w_i$ .
|
| 69 |
+
|
| 70 |
+
Given the eigenvalue decomposition of $\Theta_0$ , Theorem 1 provides an upper bound for the convergence in equation (2). $\Theta_0$ with eigenvalues $\lambda_i$ has the same eigenvectors as $e^{-\eta\Theta_0t}$ with eigenvalues $e^{-\eta\lambda_it}$ . Therefore, $\sum_i v_i w_i$ accurately captures the eigenvalues of $\Theta_0$ and it can be used to predict the convergence of the training process. Even without any
|
| 71 |
+
|
| 72 |
+
training data, the convergence can be effectively approximated from the trace of the path kernel:
|
| 73 |
+
|
| 74 |
+
$$Tr(\mathbf{\Pi}_{\boldsymbol{\theta}}) = \sum_{i} w_{i} = \sum_{p=1}^{P} \mathbf{\Pi}_{\boldsymbol{\theta}}(p, p) = \sum_{p=1}^{P} \sum_{i=1}^{m} \left(\frac{\pi_{p}(\boldsymbol{\theta})}{\theta_{i}}\right)^{2} p_{i}$$
|
| 75 |
+
(6)
|
| 76 |
+
|
| 77 |
+
The authors of (Gebhart et al., 2021) empirically validated the convergence predicted using the trace of the path kernel against the actual training convergence of the network.
|
| 78 |
+
|
| 79 |
+
The previous result has an important consequence for neural network pruning. Given a fully connected neural network at initialization as well as the target density for a pruned network, maximizing the path kernel trace of the pruned network preserves the largest NTK eigenvalues of the original network. Since, the directions corresponding to the larger eigenvalues of the NTK learn faster, the sub-network obtained by maximizing the path kernel trace is also expected to converge faster and learn more efficiently.
|
| 80 |
+
|
| 81 |
+
The path kernel framework has been applied in the design of pruning algorithms that do not require any training data (Gebhart et al., 2021; Tanaka et al., 2020). SynFlow-L2 is such an iterative pruning algorithm that removes edges (parameters) based on the following saliency function:
|
| 82 |
+
|
| 83 |
+
$$S(\theta_i) = \theta_i \odot \frac{\partial \mathcal{R}(\boldsymbol{\theta})}{\partial \theta_i^2} = \theta_i \odot \sum_{p=1}^{P} \left(\frac{\pi_p(\boldsymbol{\theta})}{\theta_i}\right)^2 p_i \qquad (7)$$
|
| 84 |
+
|
| 85 |
+
The process of computing the previous saliency measure and eliminating edges with lower saliency is repeated until the required density is achieved. SynFlow-L2 maximizes the trace of the path kernel, and preserves the following data-independent loss function:
|
| 86 |
+
|
| 87 |
+
$$\mathcal{R}(\boldsymbol{\theta}) = \mathbb{1}^T \left( \prod_{l=1}^{L+1} |\boldsymbol{\theta}^{[l]}|^2 \right) \mathbb{1} = \sum_{p=1}^P (\pi_p(\boldsymbol{\theta}))^2 \qquad (8)$$
|
| 88 |
+
|
| 89 |
+
where $|\boldsymbol{\theta}^{[l]}|^2$ is the matrix formed by the squares of the elements of the weight matrix at the $l^{th}$ layer and L is the number of hidden layers.
|
| 90 |
+
|
| 91 |
+
<span id="page-3-0"></span>
|
| 92 |
+
|
| 93 |
+
Figure 2. Comparison of the number of remaining units at each layer. The network density is selected such that the method of Magnitude-Pruning after training is able to achieve within 5% of the unpruned network's accuracy (see text for justification).
|
| 94 |
+
|
| 95 |
+
We can also observe empirically in Figure 1 that SynFlow-L2 achieves the highest path kernel trace compared to other state-of-the-art pruning methods.
|
| 96 |
+
|
| 97 |
+
Another related pruning method is SynFlow-L1 (or simply "SynFlow") – proposed in (Tanaka et al., 2020). SynFlow is based on preserving the loss function $R(\boldsymbol{\theta}) = \sum_{p=1}^{P} |\pi_p(\boldsymbol{\theta})|$ , which is based on the edge-weight products along each input-output path (rather than their squares).
|
| 98 |
+
|
| 99 |
+
In this section we analyze the resulting architecture of a sparse network that has been pruned to maximize the path kernel trace. As discussed in the previous section, SynFlow-L2 is a pruning method that has this objective.
|
| 100 |
+
|
| 101 |
+
Consider a network with a single hidden-layer, $f: \mathbb{R}^D \to \mathbb{R}^D$ , with N hidden units and D inputs and outputs. The incoming and outgoing weights $\boldsymbol{\theta}$ of each unit are initialized by sampling from $\mathcal{N}(0,1)$ . Let the number of connections in the unpruned network be M, and let m be the target number of connections in the pruned network, so that the resulting network density is $\rho = m/M$ .
|
| 102 |
+
|
| 103 |
+
The optimization of the path kernel trace selects the m out of M parameters that maximize:
|
| 104 |
+
|
| 105 |
+
$$\sum_{p=1}^{P} \sum_{i=1}^{m} \left( \frac{\pi_p(\boldsymbol{\theta})}{\theta_i} \right)^2 p_i \tag{9}$$
|
| 106 |
+
|
| 107 |
+
In Appendix A.1, we show that this maximization results in a fully-connected network in which only $n \leq N$ of the hidden-layer units remain in the pruned network – all other units and their connections are removed. In other words, the network that maximizes the path kernel trace has the narrowest possible hidden-layer width, given a target network density.
|
| 108 |
+
|
| 109 |
+
We also show (Appendix A.2) that this network architecture maximizes the number of input-output paths P: Given a target density $\rho$ , the maximum number of paths results when each hidden-layer has the same number of units, and the network is fully-connected.
|
| 110 |
+
|
| 111 |
+
Intuitively, the previous results can be justified as follows, with the same weight distribution across all units of a layer, increasing the number of input-output paths P results in higher path kernel trace. To maximize P with a given number of edges m, however, forces the pruning process to only maintain the edges of the smallest possible set of units at each layer. So, the networks produced by SynFlow and SynFlow-L2 tend to have narrower layers, compared to other pruning methods that do not optimize on the basis of path kernel trace.
|
| 112 |
+
|
| 113 |
+
To examine the previous claim empirically, and in the context of convolutional networks rather than MLPs, Figure 2 compares the number of remaining units at each layer after pruning, using the VGG19 and ResNet20 architectures. The target network density in these experiments is the lowest possible such that the method of Magnitude-Pruning (that can be performed only after training) achieves within 5% of the unpruned network's accuracy. In higher densities there is still significant redundancy, while in lower densities there is no sufficient capacity to learn the given task. For a convolutional layer, the width of a layer is the number of channels at the output of that layer. We find that both Syn-Flow and SynFlow-L2 result in pruned networks with very small width ("bottleneck layers") compared to other state-ofthe-art pruned networks of the same density. 12 Further, with SynFlow and SynFlow-L2 all layers have approximately the same number of remaining units, i.e., approximately equal width. Note that for the purposes of this analysis (Figure 2), we do not include skip connections for ResNet20 - such connections complicate the definition of "layer width" and paths, but without changing the main result of Figure 2.
|
| 114 |
+
|
| 115 |
+
<sup>&</sup>lt;sup>1</sup>In SNIP, the widest layers get pruned more aggressively as showed in (Tanaka et al., 2020). Due to this SNIP also leads to a decrease in width, but only at the widest layers.
|
| 116 |
+
|
| 117 |
+
<sup>&</sup>lt;sup>2</sup>GraSP and PHEW are able to preserve the same width as the unpruned network for all the layers. The curves for GraSP (green) and PHEW (blue) overlap with the curve for the unpruned network in Figure 2.
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
|
| 121 |
+
Figure 3. The effect of increasing the layer width of SynFlow and SynFlow-L2 networks, while preserving the same set of parameters at each layer. The definition of the x-axis "Width Factor" appears in the main text.
|
| 122 |
+
|
| 123 |
+
Several empirical studies have been conducted to understand the effect of network width and over-parametrization on learning performance [\(Neyshabur et al.,](#page-10-0) [2018;](#page-10-0) [Du et al.,](#page-9-0) [2018;](#page-9-0) [Park et al.,](#page-10-0) [2019;](#page-10-0) [Lu et al.,](#page-10-0) [2017\)](#page-10-0). However, the previous studies do not decouple the effect of increasing width from the effect of over-parametrization. Recently, [\(Gol](#page-9-0)[ubeva et al.,](#page-9-0) [2021\)](#page-9-0) examined the effect of network width under a constant number of parameters. That work conducted experiments with layer-wise random pruning. Starting with a fully-connected network, the width of each layer is increased while keeping the number of parameters the same. The experiments of [\(Golubeva et al.,](#page-9-0) [2021\)](#page-9-0) show that as the network width increases the performance also increases. Further, the distance between the Gaussian kernel formed by the sparse network and the infinitely wide kernel at initialization is indicative of the network's performance. As expected, increasing the width after a certain limit without also increasing the number of parameters will inevitably cause a drop in both test and train accuracy because of very low per-unit connectivity (especially with random pruning).
|
| 124 |
+
|
| 125 |
+
We present similar experiments for SynFlow and SynFlow-L2 in Figure 3. For a given network density, we first obtain the layer-wise density and number of active units that result from the previous two pruning algorithms. We then gradually increase the number of active units by randomly shuffling the masks of each layer (so that the number of weights at each layer is preserved). The increase in layer width can be expressed as the fraction x = (w <sup>0</sup> −w)/(W −w), where W is the layer width of the unpruned network, w is the layer width in the Synflow (or Synflow-L2) pruned network, and w <sup>0</sup> ≥ w is the layer width that results through the shuffling method described above. The maximum value x = 1 results when w <sup>0</sup> = W.
|
| 126 |
+
|
| 127 |
+
Figure 3 shows the results of these models on CIFAR-10/100 tasks using ResNet20 and VGG19. We can see that as the width increases so does the performance of the sparse network, even though the layer-wise number of edges is the same. Similar results appear in the ablation studies of [\(Fran](#page-9-0)[kle et al.,](#page-9-0) [2021\)](#page-9-0) using SynFlow. That study redistributes the
|
| 128 |
+
|
| 129 |
+
edges of a layer, creating a uniform distribution across all units in the layer – doing so increases the performance of the network (see Appendix [C\)](#page-15-0).
|
| 130 |
+
|
| 131 |
+
Summary: Let us summarize the observations of this section regarding the maximization of the path kernel trace – and the resulting decrease in network width. Even without any training data, pruned networks that result by maximizing the path kernel trace are expected to converge faster and learn more efficiently. As we showed however, for a given density, such methods tend to maximize the number of input-output paths, resulting in pruned networks with very narrow layers. Narrow networks, however, attain lower performance as compared to wider networks of the same layer-wise density. In the next section, we present a method that aims to achieve the best of both worlds: high path kernel trace for fast convergence, and large layer-wise width for better generalization and learning.
|
| 132 |
+
|
| 133 |
+
# Method
|
| 134 |
+
|
| 135 |
+
Given a weight-initialized architecture, and a target number of learnable parameters, we select a set of input-output paths that are conserved in the network – and prune every connection that does not appear in those paths. The selection of conserved paths is based strictly on their initial weights – not on any training data. The proposed method is called *"Paths with Higher Edge Weights"* (PHEW) because it has a bias in favor of higher weight connections. Further, the path selection is probabilistic, through biased random walks from input units to output units. Specifically, the next-hop of each path, from unit i to j at the next layer, is taken with a probability that is proportional to the weight magnitude of the connection from i to j. We show that conserving paths with higher edge weight product results in higher path kernel trace. The probabilistic nature of PHEW avoids the creation of "bottleneck layers" and leads to larger network width than methods with similar path kernel trace. Additionally, the procedure of selecting and conserving input-output paths completely avoids layer collapse.
|
| 136 |
+
|
| 137 |
+
In more detail, let us initially consider a fully-connected
|
| 138 |
+
|
| 139 |
+
<span id="page-5-0"></span>MLP network with L hidden layers and $N_l$ units at each layer (we consider convolutional networks later in this section). Suppose that the weights are initialized according to Kaiming's method (He et al., 2015), i.e., they are sampled from a Normal distribution in which the variance is inversely proportional to the width of each layer: $\theta_{i,j}^l \sim \mathcal{N}(0,\sigma_l^2)$ , where $\sigma_l^2 = 2/N_l$ .
|
| 140 |
+
|
| 141 |
+
First, let us consider two input-output paths u and b: u has been selected via a uniform random-walk in which the probability Q(j,i) that the walk moves from unit i to unit j at the next layer is the same for all j; b has been selected via the following weight-biased random-walk process:
|
| 142 |
+
|
| 143 |
+
$$Q(j,i) = \frac{|\theta(j,i)|}{\sum_{j} |\theta(j,i)|}$$
|
| 144 |
+
(10)
|
| 145 |
+
|
| 146 |
+
where $\theta(j, i)$ is the weight of the connection from i to j.
|
| 147 |
+
|
| 148 |
+
In Appendix A.4 we show that the biased-walk path b contributes more in the path kernel trace than path u:
|
| 149 |
+
|
| 150 |
+
$$\mathbb{E}[\mathbf{\Pi}_{\boldsymbol{\theta}}(b,b)] = 2^{L} \times \mathbb{E}[\mathbf{\Pi}_{\boldsymbol{\theta}}(u,u)] \tag{11}$$
|
| 151 |
+
|
| 152 |
+
As the number of hidden layers L increases the ratio between the two terms becomes exponentially higher. On the other hand, as the layer's width increases the ratio of two values remains the same. The reason that PHEW paths result in higher path kernel trace, compared to the same number of uniformly chosen paths, is that the former tend to have higher edge weights, and thus higher $\pi_p(\theta)$ values (see Equation 6). Empirically, Figure 1 shows that PHEW achieves a path kernel trace greater than or equal to SNIP and GraSP, and close to the upper bound of SynFlow-L2.
|
| 153 |
+
|
| 154 |
+
If the PHEW paths were chosen deterministically (say in a greedy manner, always taking the next hop with the highest weight) the path kernel trace would be slightly higher but the resulting network would have "bottlenecks" at the few units that have the highest incoming weights. PHEW avoids this problem by introducing randomness in the path selection process. Specifically, in Appendix A.3 we show that the expected number of random walks through each unit of a layer l is $W/N_l$ , where W is the required number of walks to achieve the target network density. Thus, as long as $W > N_l$ , every unit is expected to be traversed by at least one walk – and thus every unit of that layer is expected to be present in the sparsified network.
|
| 155 |
+
|
| 156 |
+
This is very different than the behavior of SynFlow or SynFlow-L2, in which the width of several layers in the pruned network is significantly reduced. Empirically, Figure 2 confirms that PHEW achieves the larger per-layer width, compared to SynFlow and SynFlow-L2. Additionally, the per-layer width remains the same as the width of the original unpruned network.
|
| 157 |
+
|
| 158 |
+
**Layer-Collapse:** Layer collapse is defined as a network state in which all edges of a specific layer are eliminated,
|
| 159 |
+
|
| 160 |
+
while there are still connections in other layers (Tanaka et al., 2020). Layer collapse causes a disruption of information flow through the sparse network making the network untrainable. SynFlow and SynFlow-L2 have been shown to avoid layer collapse by iteratively computing gradient based importance scores and pruning (Tanaka et al., 2020). PHEW also avoids layer collapse due to its path-based selection and conservation process. Even a single input-output path has one connection selected at each layer, and so it is impossible for PHEW networks to undergo layer collapse.
|
| 161 |
+
|
| 162 |
+
**Balanced, bidirectional walks:** Without any information about the task or the data, the only reasonable prior is to assume that every input unit is equally significant – and the same for every output unit. For this reason, PHEW attempts to start the same number of walks from each input. And to terminate the same number of walks at each output.
|
| 163 |
+
|
| 164 |
+
To do so, we create paths in both directions with the same probability: forward paths from input units, and reverse paths from output units. The selection of the starting unit in each case is such that the number of walks that start (or terminate) at each input (or output) unit is approximately the same. The creation of random-walks continues until we have reached the given, target number of parameters.
|
| 165 |
+
|
| 166 |
+
PHEW in convolutional neural networks: A convolutional layer takes as input a 3D-vector with $n_i$ channels and transforms it into another 3D-vector of $n_{i+1}$ channels. Each of the $n_{i+1}$ units in a layer produces a single 2D-channel corresponding to the $n_{i+1}$ channels. A 2D channel is produced applying convolution on the input vector with $n_i$ channels, using a 3D-filter of depth $n_i$ . Therefore each input from a unit at the previous layer has a corresponding 2D-kernel as one of the channels in the filter. So, even though MLPs have an individual weight per edge, convolutional networks have a 2D-kernel per edge.
|
| 167 |
+
|
| 168 |
+
A random-walk can traverse an edge of a convolutional network in two ways: either traversing a single weight in the corresponding 2D kernel – or traversing the entire kernel with all its weights. Traversing a single weight from a kernel conserves that edge and produces a non-zero output channel. This creates sparse kernels and allows for the processing of multiple input channels at the same unit and with fewer parameters. On the other hand, traversing the entire 2D-kernel that corresponds to an edge means that several other kernels will be eliminated. Earlier work in pruning has shown empirically the higher performance of creating sparse kernels instead of pruning entire kernels (Blalock et al., 2020; Liu et al., 2019). Therefore, *in PHEW we choose to conserve individual parameters during a random-walk rather than conserving entire kernels*.
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
|
| 172 |
+
Figure 4. Comparison of the Top-1 accuracy for sparse networks obtained using PHEW and other state-of-the-art dataindependent baselines. The mean is shown as a solid line while the standard deviation is shown as shaded area.
|
| 173 |
+
|
| 174 |
+
In summary, PHEW follows a two-step process in convolutional networks: first an edge (i.e., 2D-kernel) is selected using equation [\(10\)](#page-5-0). Then a single weight is chosen from that kernel, randomly, with a probability that is proportional to the weight of the sampled parameter. We have also experimented with the approach of conserving the entire kernel, and we also present results for that case in the next section.
|
2105.03801/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-01-20T21:10:41.063Z" agent="5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="t3eH1LGz6PqLbAun1lLi" version="14.2.4" type="device"><diagram id="PIi5A72SrHZegtxBpqiX" name="Page-1">5Z1fc5s4FMU/jR/XI8RfPabJpvvSmc5kpt3dl47HVmO6xGQIaZx++oVY2PjKcQhFOb4kDxkjbAHnXqTfkWQ88c9v1h+L2e3yU77Q2USKxXriX0ykDCKv+l8XPG4KZBhtCq6LdLEp8nYFV+kvbQqFKb1PF/pu741lnmdlertfOM9XKz0v98pmRZE/7L/te57tH/V2dq2tgqv5LLNLv6aLcmlKvUjtdvyl0+ulOXQi482Om1nzZnMld8vZIn9oFfl/TvzzIs/Lzaub9bnOau0aXTafu3xm7/bECr0qu3xgFZz/G3388ePb8uzLr2/zr1/K1d0fnjDh+DnL7s0lm9MtHxsNivx+tdB1NWLif3hYpqW+up3N670PVdCrsmV5k1VbXvXye5pl53mWF0+f9T+cXYjLsCq/K4v8P93sWeWr6uMfzLF1Uer1s5flbcWqkkznN7osHqu3mA/4wuhrEiwxmw+taJmiZStOTdnM5Mf1tuKdhNULo+JrFG1q5quoPDVFJXdFvVNTNGauqFSnpWiScBc02Rd024zBclRwb0eppAquqDcyRbc5gmtIuUtKuyZ4knrcgdRKUomWNHTMT0LEl5eXbygpurf3Qp+7ourEbvswYK4o9aH4vikMuUtK+iY8k4aOOyf3ksqT65wcW1H3kvpEUh8uqWMz6l7SgEgawCVV3CUNiaQhWtLIsb93L2lEJI3gkjp2o+4ljYmkMVxS7t7Jp3YUbp4i7ubJJ+bJg7unyDHqv/ksHl5R7sNQlnmC+9GI+6STZZ7gfjTiPu1kmSe4H40ck757Sal5gvvRBuL4SkrNE9yPxuznnah5gvvR2DHpu5eUmie4H40dk757Sal5gvvR2PE8iXtJqXmC+9F4dFwKR/14dFwKR/14dFwKR/2m5eErKeVSOOono+NSOOono+NSOOono+NSOOono+NSOOon7Af1CZfCB/UT9oP6FEvhpJ+wN08US+Gkz/67JBaW4kmfvXmiWAon/aYx5yspxVI46Sv25oliKZz0FXvzRLEUTvqKvXkiEAXHUsXeO1GGgmOpYu+dKEPBsVSxN0+UoeBYqtibJ8pQeCxlb54oQ+GxlL15ogyFxlLJ/9kGVFJ0jy/ZP9zA+gIpuseXgr15omug0T2+FOzNE11rgu7xpWDvnuigPrrHl4K9e6KGFN/js3dP1JCiB6KkYO+eqCFFTzlLwd49UUOKXl0qBXv3RA0perx0WzNbSS33BEd9/s8xo+4Jjvqje2opHvW90bknOOp7o3NPcNT3Ruee4KjP/qGQlnuCoz77R0Bb7gmO+t7o3BMe9bm7Jwv14VzanABbSS3Uh3OpZO+eKOrDubRRkK+kFPXhXCrZuyeK+nAulezdE0V9OJdK9u6Joj6cSyV790RRH86lkrt7srgUD1Hs3RPlUjxEcXdPFpfCIaq5TfhKSrkUDlE+e/dEuRQOUT5790S5FA5RPnv3RLkUDlFBBy7Vq8VZ/TOuOylaEuaVuGlZX4+sBdfrtPy7Fn8qQ7P5j3ln/fpibQLztPFoNjZH1AvrZ2CJrtVZ5ffFXB+5IC98JgIthcMDCjdlhc5mZfpz/0QOyW6O8DlPq1Ns8YeaxrHY/QV78ZZCTFVk724OsLk8U+cutAcP03wHkPweU1NVOSuudWlV9ZQlW1l+J3E60He/xPGr27yVOVMhjmdPvfFZF2l1SboYX0YRVhgshaSYeq16PGw2dTAe/bJJJK1kqqRT6h0nk+U4leiXPTKpG6BWFkbY9OlgsvqmjySNUeIsf2BNjG83MUHSCm7Ys4nxjzUxsZo2P4r4VmkSdjCO/dLkXbJOQNPG65knQXWfBUfyRO39vXHOdHDGr8mZk28MQqsx6BnV8IW7HxrVDua8T0vgIdoB44thGRMN1Q6ER6ECnTIdBh/4pMxmPgqWMvEhU9Mza+LXtDPxGydNh8lpPkmzWVwHS5pkwKRJTjlpOky/80mazZevYEmjBkwadcpJM/B4XNcUQAU2oDO9/YcyXqjIeeQGHvtiF7m4HjMfKHgH6nIev4EHn9jFL6o0j1otXzJQLF+o13Vcu/w48qjjGh7v0XrH9YV6ncf1nY3o2HEdqqekFTmPnHznkQuGihytyHnkBh48YRc5f6jI0YqcR27gEQx2kZNDRY5W5DxyAw8jsIscXTXVO3K0IueRG3hRFrfIqeEM4YGqnEfvnY/EJMfnafpG8oVqnUd1DCuUUClS34b7k3leVZL0HShQp7T4yOuyQqD3quQBv21E76cDS4/lgRSwuKXD4uNqs8jzsi10dZXLT/lC1+/4Hw==</diagram></mxfile>
|
2105.03801/main_diagram/main_diagram.pdf
ADDED
|
Binary file (1.68 kB). View file
|
|
|
2105.03801/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Transformer-based models [@vaswani2017attention] are ubiquitously state-of-art across many natural language processing (NLP) tasks, including summarization. To achieve the best results, the community has trained ever larger transformer models on larger amount of data, and/or more task-specific optimization objectives [@devlin2018bert; @raffel2020exploring; @lewis-etal-2020-bart; @brown2020language]. In long document summarization, the input sequences could be more than an order of magnitude longer than the limits of these transformer models. Although the limits can be extended, training large transformer models on long sequences is expensive and may not be possible on a standard GPU card because of the self-attention mechanism that grows quadratically with sequence length.
|
| 4 |
+
|
| 5 |
+
To tackle the quadratic characteristic, recent works have modified self-attention mechanism and proposed variants of the transformer such that the quadratic complexity is reduced [@tay2020efficient; @kitaev2020reformer; @child2019generating; @beltagy2020longformer; @ainslie-etal-2020-etc; @zaheer2020big]. However, pre-trained weights of the modified models are not readily available. In contrast, standard models such as BERT [@devlin2018bert] or BART [@lewis-etal-2020-bart] have been trained on various target tasks, including text summarization [@liu-lapata-2019-text]. This allows practitioners to achieve good performance with less training time. Thus, we are interested in exploiting pre-trained models for long-span summarization tasks.
|
| 6 |
+
|
| 7 |
+
We study a range of design configurations empirically and theoretically in regards to memory and compute requirements as well as their performance. We propose that long-span dependencies can be handled by two complementary methods. Firstly, inspired by modified self-attention transformers, we exploit standard transformer models by constraining attention mechanism to be local, allowing longer input spans during training. Secondly, because abstractive summarization systems perform content selection implicitly [@nallapati-etal-2016-abstractive; @lebanoff-etal-2020-cascade], to reduce memory and compute requirements an alternative method is to perform content selection explicitly before the abstractive stage. We study content selection during two phases: training time and test time. At training time, we investigate methods to select data for training fixed-span abstractive models. At test time, we extend existing model-based selection methods, and we propose a multitask content selection method that ranks sentences through extractive labelling based module [@cheng-lapata-2016-neural] and attention based module [@see-etal-2017-get]. Ultimately, we explore the combined approach, consisting of local self-attention transformer and content selection for long-document summarization.
|
| 8 |
+
|
| 9 |
+
We conduct our experiments using a number of design configurations on the Spotify open-domain Podcast summarization dataset [@clifton-etal-2020-100000]. This dataset is challenging not only because of its long-span nature, but also because transcribed spoken utterances typically have lower information density [@li-etal-2019-keep; @manakul2020_interspeech]. Furthermore, we carry out experiments on arXiv and PubMed datasets [@cohan-etal-2018-discourse] to further demonstrate and verify the effectiveness of our approach as well as making comparisons to existing approaches. We highlight the strengths and weaknesses of our approach in different resources and tasks. The main contributions of this paper are:
|
| 10 |
+
|
| 11 |
+
- On local self-attention, we show how to exploit a standard transformer model for long-span summarization, and we show good design considerations based on empirical results.
|
| 12 |
+
|
| 13 |
+
- On content selection, we demonstrate the best selection method at training time, and we propose a multitask content selection (MCS) method outperforming baselines at test time.
|
| 14 |
+
|
| 15 |
+
- Our work has set new state-of-the-art results on Spotify Podcast, arXiv and PubMed datasets in the ROUGE scores. Furthermore, with a small-scale GPU card, our approach achieves comparable or superior performance to previous state-of-the-art systems.
|
| 16 |
+
|
| 17 |
+
<figure id="fig:architecture" data-latex-placement="!t">
|
| 18 |
+
<embed src="fig/architecture.pdf" style="width:99.0%" />
|
| 19 |
+
<figcaption>Overview of the combined architecture where we highlight different aspects of this work. <span class="math inline"><em>N</em><sub>0</sub></span> is the original document length, <span class="math inline"><em>N</em></span> is the input length to the generation system, and <span class="math inline"><em>M</em></span> is the summary length.</figcaption>
|
| 20 |
+
</figure>
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
In Table [\[tab:combine_podcast\]](#tab:combine_podcast){reference-type="ref" reference="tab:combine_podcast"}, a performance gain is obtained in all settings by adding MCS. By comparing different configurations with MCS, it can be seen that the gain from MCS in LoBART(8k) system is the lowest. This is because the average length is 5,727, meaning that many Podcasts inputs to LoBART(8k) do not benefit from content selection.
|
| 25 |
+
|
| 26 |
+
CUED-filt, the best single-model system in @manakul2020cued_speech, uses an attention-based content selection at both training and test time, and it is combined with fine-tuned vanilla BART. Our approach outperforms CUED-filt by improved content selection at both training time and test time as demonstrated by BART(1k)-ORC+MCS. Additionally, local self-attention allows training on longer sequences, and our LoBART(4k)-ORC+MCS system has yielded the best results. Lastly, even though LoBART(8k) requires more resource to train, it does not perform as well as LoBART(4k) due to its smaller attention window, and it also has a lower improvement when adding MCS.
|
| 27 |
+
|
| 28 |
+
::: table*
|
| 29 |
+
:::
|
| 30 |
+
|
| 31 |
+
To verify the effectiveness of our systems, we re-train BART(1k) and LoBART(4k) on arXiv and PubMed datasets. []{#section:arxiv_results label="section:arxiv_results"} Our training is different from Ext+TLM [@pilault-etal-2020-extractive] where their abstractive models are trained using inputs extracted from top two sentences in ROUGE recall for each target sentence without padding, similar to ORC$_\text{no-pad}$. Although in 1k setting, ORC$_\text{no-pad}$ yields %AgORC$_\text{no-pad}$ (defined in Section [5.1](#section:training_bart_with_cs){reference-type="ref" reference="section:training_bart_with_cs"}) of only 2.8% on arXiv (12% on PubMed), in 4k setting this is 39% on arXiv (71% on PubMed). Based on the best configurations on podcast data, we train BART(1k) and LoBART(4k) using TRC or ORC$_\text{pad-rand}$ content selection, and we train the hierarchical model on arXiv/PubMed for MCS.
|
| 32 |
+
|
| 33 |
+
**ArXiv.** In Table [\[tab:arxiv_pubmed_result\]](#tab:arxiv_pubmed_result){reference-type="ref" reference="tab:arxiv_pubmed_result"}, both BART(1k)+MCS and LoBART(4k)+MCS outperform all existing systems. To better understand the advantages of our approach, the following systems are compared: CTRLsum versus our BART(1k) baseline; LED and BigBird versus our LoBART(4k) system.
|
| 34 |
+
|
| 35 |
+
CTRLsum extends BART by conditioning it with extracted keywords $\mathbf{v}$ using a BERT-based model, e.g. $p(\mathbf{y}|\mathbf{X},\mathbf{v})$. Their BERT-based model uses sliding window allowing it to extract $\mathbf{v}$ in long sequences, but their BART is still limited to the first 1,024 tokens. As a result, it performs better than BART(1k), but worse than BART(1k)+MCS.
|
| 36 |
+
|
| 37 |
+
LoBART(4k) has a similar architecture to LED(4k) without the global attention pattern for special tokens. Instead, our LoBART(4k) benefits from knowledge transferred from CNNDM and the ORC$_\text{pad-rand}$ training-time content selection, which yields a larger gain when MCS is applied, i.e. the system trained with truncated data has a smaller gain when MCS is applied. Transfer learning comparison and additional results on the impact of ORC$_\text{pad-rand}$ are provided in Appendix [10](#appendix:additional_results){reference-type="ref" reference="appendix:additional_results"}.
|
| 38 |
+
|
| 39 |
+
Compared to BigBird, LoBART(4k) has a longer input span, e.g. 3,072 vs. 4,096. However, BigBird benefits from utilizing more recent summarization specific pre-training Pegasus [@zhang2020pegasus] which is better than our transfer learning. BigBird incorporates a global attention pattern similar to LED, and it also has a random attention pattern. Hence, LoBART without MCS performs worse.
|
| 40 |
+
|
| 41 |
+
Ultimately, we show that adding MCS to either BART(1k) or LoBART(4k) yields a significant improvement, resulting in state-of-the-art results in both settings. Moreover, although the gain from adding MCS is comparable to the gain observed in extending LED(4k) to LED(16k), the content selection method adds less training cost.
|
| 42 |
+
|
| 43 |
+
**PubMed.** Similarly, LoBART(4k)+MCS achieves state-of-the-art results shown in Table [\[tab:arxiv_pubmed_result\]](#tab:arxiv_pubmed_result){reference-type="ref" reference="tab:arxiv_pubmed_result"}. In contrast to the arXiv results, BART(1k)+MCS does not outperform LoBART(4k) nor BigBird, and the gain from MCS is not as high in both 1k and 4k settings.
|
| 44 |
+
|
| 45 |
+
Local attention yields better performance on PubMed, while MCS yields better performance on arXiv. To understand this discrepancy, a fine-grained analysis is conducted.
|
| 46 |
+
|
| 47 |
+
<figure id="fig:ablation_len" data-latex-placement="!ht">
|
| 48 |
+
<figure id="fig:ablation_len_arxiv">
|
| 49 |
+
<embed src="fig/arxiv_ablation_R1_gain.pdf" style="height:8cm" />
|
| 50 |
+
<figcaption>arXiv (Len:Avg=8,584, 90<span class="math inline"><sup>th</sup></span>%=16,108)</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
<figure id="fig:ablation_len_pubmed">
|
| 53 |
+
<embed src="fig/pubmed_ablation_R1_gain.pdf" style="height:8cm" />
|
| 54 |
+
<figcaption>PubMed (Len:Avg=3,865, 90<span class="math inline"><sup>th</sup></span>%=7,234)</figcaption>
|
| 55 |
+
</figure>
|
| 56 |
+
<figcaption>ROUGE-1 score relative to that of BART(1k) system evaluated on different partitions by length.</figcaption>
|
| 57 |
+
</figure>
|
| 58 |
+
|
| 59 |
+
In Figure [10](#fig:ablation_len){reference-type="ref" reference="fig:ablation_len"}, we partition the test sets by input lengths, and we evaluate the performance improvement in each partition with respect to the BART(1k) baseline.[^9] The results illustrate that as the input length $N$ increases:
|
| 60 |
+
|
| 61 |
+
- The improvement of systems *with* MCS increases and subsequently plateaus out.
|
| 62 |
+
|
| 63 |
+
- The improvement of systems *without* MCS decreases once the input exceeds the length limit but then plateaus, suggesting that fixed-span systems without content selection perform worse once the maximum fixed-span is reached. For instance, below 4,000 input words, LoBART(4k) without MCS performs better than BART(1k)+MCS on both datasets.
|
| 64 |
+
|
| 65 |
+
Therefore, our MCS method is more effective on arXiv compared to PubMed because the average length of PubMed documents is more than twice shorter than the average length of arXiv documents.
|
2106.04559/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-10-16T20:36:44.112Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36" etag="0gWv8rnfgipfNjj5Npwb" version="13.1.3" type="device"><diagram id="pb0gtx3-ZykmVOhzZo7x" name="Page-1">7Rxtc5s2+Nf4bvsABwgM/ugkbru79NYt3bp+6skgg1pADOTY3q+fJMSr5dipje206bUNevSC9Ly/iIzAbbJ+m8Msek8CFI8sI1iPwN3IsqyJOWY/OGRTQkzPtEtImONAwhrAA/4PSaAhoUscoKIzkBISU5x1gT5JU+TTDgzmOVl1hy1I3H1rBkO0BXjwYbwN/YQDGpVQzzEa+DuEw6h6s2nIngRWgyWgiGBAVi0QmI3AbU4ILZ+S9S2KOfYqvJTz3uzorTeWo5QeMgGjGC+0ybvxOpn/jkhIF76hAVAu8wjjpTyx3C3dVCjIyTINEF/FGIGbVYQpesigz3tXjOoMFtEkZi2TPS5wHN+SmOSsnZKUDbqRb0A5ReudezdrjDBeQiRBNN+wIXKC5gCJxYqPKjSvGqKYRkWVqEWRsWHqliMZQjJDWL+gwRd7kCh7BvoqZm5hCwWMf2ST5DQiIUlhPGugN118NmPuCckkFr8iSjdSGOCSki6O0RrTf/h03ZGtz62eu7VcWTQ2VSNl521N4s3P7b5mmmhV88rz8UM9TTqGA7LMffQUsqT4wjxE9IlxlpoVchRDih+7+1BRVE79QDDbYc1Clt3lIMYY3SXKfclZPb6ot3EEqygEbRxTLjJE7LPhofG/S1J1aIXggikbwLC+bjrZU8h/PvgRSiDrvsfpN5RXi7I9luuWo54QanO/UJ9Agk2vJ8BjhQBbSvkdSnbdlyK7J5RB60AZLO32EUJ4FGmsnbJSZDA9QlaYpKQU+2zAB5gXHWkpV74SaenrKndbWGzjnMJSOzFXLyxXYOjAQAbM0p0OW9RO0ZlMmMpXPIUJ+1ssaBlvmO/4hAXbowH80uvkr8nDOfyF4Yb9ZSc1lE+/8ke+rCH2uIAJjjfl9ISkpBDi3RnSHEN5ihGjRy8QchhyOVQ4+XWrQrYj0M0gd/yZb8zhGHUY2faNNeuxFYN+1zJWs0xJ1LqH/8fxzQCSuE6F82pMef5qQIkd1pqKoSWh24NEKyyfK5/FabyW5nVi69U7qxnNRrubqDUzB5hiaEs7c6DQzxwu9EozrHX0UlMfgty6q8ZrI98Ol9R6JLAbQhib1gpeC16q86avRbxKrded41Yfk/caHrY20Ce7aNa0bwO7HCnHbbHudRrHvitp25d2Jav48tU6HmAd7UPDQHsQM6o5Xe4B9nmNqD1UHAgX3F69XcI8uNYgUDP7aZyLR4HOQOSYpsWK2zUDpowcxmydxTBlfEu48/IWpSiHlFxttK6Bno4dm9uEcse1Q3qegP0yMUitMTv6slGfw2vM8YEacwdNzxOz2y8mndKygdaljOChJD02F3ocSb0XR1Lz6oX0WK/mKIqOj7Z2pqeOfctV5hXgjyUqhLHjk2obN2/GG58i7LPYx/iIxA+fJCzG4JjABUdjxJ2ZHBffMFtoBN4caimZzaNdfipoTr6hXk1KUaaCMQ7Zfu98RnpmucENt6DYh/FUdiQ4CASLq+xvl+1PYIJ7npIGgA68SevPlj22VI6T4+iONZR+uGzY09IOja64Wv3gncmIi6nTPIeb1oCMRzNFa+VeUAR6QZE56dWSnzeePZQ7OG3kNH5x7Gboruc9x8UQrQ8oxwxZXAldigsv6nd4CivVI3wRwYw/4kTc37gRP6dFVl4B4UiDVWOB15z+tTq/h3MUfyAFLu3T3ZxQShKFvqecQ27kG+4iSvmVkyk/mvVGQAsdxrG2yBHSArJKYwIDndmxups9hDylhv2Y45s1A0jhHBboC/hiG55p6V+z8ETmYtKTSNvSrW0b4bnbJmLi6PZQJmJyLofjTmJW7W38bL6D6/VKRaYifq9hbWawBoveX0wwcUJ1e3QZ/bC7LPaZc5jm2eKIhz/uBdMywi9p9cZid1TxMLuf3X5kDylMREnxz9/fc3uH/W8o54HFp3ezP2d8g8w48PybITZpjJyb8jAOb93NHniN8JFwKsWYbr6YFlPrxv1v73/ji5s/nUZhJNedcYfpah+wpVIcE+imta1Vhqu8bLsGV6pVWj6hx+Sn5RNqhm6M3WEdw70OnzOQotq6szDpsUKpQ4crt7xEBrENt88gHrhw5GAdWrcbhpM0t2vyPPMwk/fcSLj/Hsd9OhLeM36YSNg6voh4oP2tALgCvGNKnU2/JekCByj12w43VjjcP68TrpmAGUPQddQUNtO0xvUF9bPYTHBFlZpaC+5RiRdL21mHZkzARatvlipnUgpcgB+VCoELmiZlhmuEGC3oaMeFN9N9SlscrHOeXKXWFLJ0LqaoNMlHVMRwzy1Bnmvh5+5CXxwqWtcGZNADE66s0nmRPVfPvmTEfOKx3AKLqxRNLLeKCE/4NLGcKB3xglLKLEO4+VGxMU1IGo5EkUycP0fFMuZODQ9aVwIjTEGzH3OugGgOWQwrJnA7+SMiZINg3onXOS/wizYBKpg3EMjT54FwC35IFAgJKSKyEmSON6Oqhsqz2JLyBlmU4ET/IXFQq8/8O5Vh3yuKY5wVuxzGE9+4csxeAt/bdhWBq8jYgv5XAKdzKlTZ+z6O0mDKP7PlfncMiwL7ox3Xp7oXTuu+o124/bUsT435dt5KVTmXsGMDV2B1MyBj47AMyPZKVu9SntNfaejvP1TfMF5ZYS5LQ539Y+KayFpchpOQl+L4P9fTXM+1XffLsmA+hJYhksVIS1CqRcsEpoUWIRhweF4wl0srHkNNrngiMTcNt0NDTXkDVlGlMw1Pd4f6jhlY50oozIQP91chPLjXLIGlTUAvQ6pNFByh+thvsFId2J9eqn5PQ0Vv83m5pP3sBNS3qmPxHcprwkn50WjP0NiNwmix0kSRanIG/BUJYHdm4sSqZfeV+1dVo/QfVHcCVJnIwRSNffwvRXh2HXk0YBG53kcrB9Ha22tdeZsn7aZg3P+que2fWyerK4/kx48tz7j57BHM/gc=</diagram></mxfile>
|
2106.04559/main_diagram/main_diagram.pdf
ADDED
|
Binary file (76.7 kB). View file
|
|
|
2106.04559/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Today a vast amount of knowledge is hidden in structured datasets, not directly accessible to nontechnical users who are not familiar with the corresponding database query language like SQL or SPARQL. Natural language database interfaces (NLDB) enable everyday users to interact with databases [\(Zelle and Mooney,](#page-7-1) [1996;](#page-7-1) [Popescu et al.,](#page-7-2) [2003;](#page-7-2) [Li and Jagadish,](#page-7-3) [2014;](#page-7-3) [Zeng et al.,](#page-7-4) [2020\)](#page-7-4). However, correctly translating natural language to executable queries is challenging, as it requires resolving all the ambiguities and subtleties of natural utterances for precise mapping. Furthermore,
|
| 4 |
+
|
| 5 |
+
quick deployment and adoption for NLDB require zero-shot transfer to new databases without an indomain text-to-SQL parallel corpus, *i.e.* crossdatabase semantic parsing (SP), making the translation accuracy even lower. Finally, unlike in other NLP applications where partially correct results can still provide partial utility, a SQL query with a slight mistake could cause negative utility if trusted blindly or confusing to users.
|
| 6 |
+
|
| 7 |
+
The recent Spider benchmark [\(Yu et al.,](#page-7-5) [2018a\)](#page-7-5) captures this cross-domain problem, and the stateof-the-art methods merely achieve around 70% execution accuracy at the time of this submission [2](#page-0-1) . Meanwhile, generalization to datasets collected under different protocols is even weaker [\(Suhr et al.,](#page-7-6) [2020\)](#page-7-6). Finally, users generally have no way to know if the NLDB made a mistake except in very obvious cases. The high error rate combined with the overall system's opacity makes it hard for users to trust any output from the NLDB.
|
| 8 |
+
|
| 9 |
+
Our key observation is that our model's top-5 accuracy on Spider is 78.3%, significantly higher than the previous best single-model method at around 68%, and our own top-1 accuracy. Top-5 accuracy is the proportion of times when one of the top five hypotheses from beam-search inference is correct (in execution accuracy evaluation). For top-5 accuracy to be relevant in practice, a nontechnical user needs to be able to pick the correct hypothesis from the candidate list. To this end, we design a feedback system that can unambiguously explain the top beam-search results while presenting the differences intuitively and visually. Users can then judge which, if any, of the parses correctly reflects their intentions. The explanation system uses a hybrid of two synchronous context-free grammars, one shallow and one deep. Together, they achieve good readability for the most frequent
|
| 10 |
+
|
| 11 |
+
<span id="page-0-0"></span><sup>∗</sup>Equal contribution
|
| 12 |
+
|
| 13 |
+
<sup>1</sup> System demo at [https://turing.borealisai.](https://turing.borealisai.com/) [com/](https://turing.borealisai.com/); video at [https://vimeo.com/537429187/](https://vimeo.com/537429187/9a5d41f446) [9a5d41f446](https://vimeo.com/537429187/9a5d41f446)
|
| 14 |
+
|
| 15 |
+
<span id="page-0-1"></span><sup>2</sup><https://yale-lily.github.io/spider>
|
| 16 |
+
|
| 17 |
+
query patterns while near-complete coverage overall.
|
| 18 |
+
|
| 19 |
+
Our system, TURING, is not only interpretable, but also a highly accurate cross-domain NLDB. Our semantic parser is based on the one in [Xu et al.](#page-7-7) [\(2020\)](#page-7-7), which does not handle value prediction like many other previous state-of-the-art models on Spider. Compared to previous executable semantic parsers, we achieve significant gains with a number of techniques, but predominantly by drastically simplifying the learning problem in value prediction. The model only needs to identify the text span providing evidence for the ground-truth value. The noisy long tail text normalization step required for producing the actual value is offloaded to a deterministic search phase in post-processing.
|
| 20 |
+
|
| 21 |
+
In summary, this work presents two steps towards a more robust NLDB:
|
| 22 |
+
|
| 23 |
+
- 1. A state-of-the-art text-to-SQL parsing system with the best top-1 execution accuracy on the Spider development set.
|
| 24 |
+
- 2. A way to relax usability requirement from top-1 accuracy to top-k accuracy by explaining the different hypotheses in natural language with visual aids.
|
| 25 |
+
|
| 26 |
+
As shown in Figure [1,](#page-2-0) TURING's interface has two main components: the database browser showing schema and selected database content, and the search panel where the users interact with the parser. Figure [1](#page-2-0) caption describes the typical user interaction using an example.
|
| 27 |
+
|
| 28 |
+
Behind the front-end interface, TURING consists of an executable cross-domain semantic parser trained on Spider that maps user utterances to SQL query hypotheses, the SQL execution engine that runs the queries to obtain answers, and the explanation generation module that produces the explanation text and the meta-data powering explanation highlighting. The next sections will describe the semantic parsing and explanation modules.
|
| 29 |
+
|
| 30 |
+
The backbone of TURING is a neural semantic parser which generates an executable SQL query T given a user question Q and the database schema S. We follow the state-of-the-art system [\(Xu et al.,](#page-7-7) [2020\)](#page-7-7), but extend it to generate executable SQL query instead of ignoring values in the SQL query, like many other top systems [\(Wang et al.,](#page-7-8) [2019;](#page-7-8) [Guo et al.,](#page-6-0) [2019\)](#page-6-0) on the Spider leaderboard.
|
| 31 |
+
|
| 32 |
+
On the high-level, our SP adopts the grammarbased framework following TranX [\(Yin and Neu](#page-7-9)[big,](#page-7-9) [2018\)](#page-7-9) with an encoder-decoder neural architecture. A grammar-based transition system is designed to turn the generation process of the SQL abstract syntax tree (AST) into a sequence of tree-constructing actions to be predicted by the parser. The encoder fenc jointly encodes both the user question Q = q<sup>1</sup> . . . q|Q<sup>|</sup> and database schema S = {s1, . . . , s|S|} consisting of tables and columns in the database. The decoder fdec is a transition-based abstract syntax decoder, which uses the encoded representation H to predict the target SQL query T. The decoder also relies on the transition system to convert the AST constructed by the predicted action sequences to the executable surface SQL query.
|
| 33 |
+
|
| 34 |
+
To alleviate unnecessary burden on the decoder, we introduce two novel modifications to the transition system to handle the schema and value decoding. With simple, but effective value-handling, inference and regularization techniques applied on this transition system, we are able to push the execution accuracy much higher for better usability.
|
| 35 |
+
|
| 36 |
+
Our transition system has four types of action to generate the AST, including (1) ApplyRule[r] which applies a production rule r to the latest generated node in the AST; (2) Reduce which completes the generation of the current node; (3) SelectColumn[c] which chooses a column c from the database schema S; (4) CopyToken[i] which copies a token q<sup>i</sup> from the user question Q.
|
| 37 |
+
|
| 38 |
+
There are two key distinctions of our transition system with the previous systems. First, our transition system omits the action type SelectTable used by other transition-based SP systems [\(Wang](#page-7-8) [et al.,](#page-7-8) [2019;](#page-7-8) [Guo et al.,](#page-6-0) [2019\)](#page-6-0). This is made possible by attaching the corresponding table to each column, so that the tables in the target SQL query can be deterministically inferred from the predicted columns. Second, we simplify the value prediction by always trying to copy from the user question, instead of applying the GenToken[v] action [\(Yin](#page-7-9) [and Neubig,](#page-7-9) [2018\)](#page-7-9) which generates tokens from a large vocabulary or choose from a pre-processed picklist [\(Lin et al.,](#page-7-10) [2020\)](#page-7-10). Both of the changes constrain the output space of the decoder to ease the
|
| 39 |
+
|
| 40 |
+
<span id="page-2-0"></span>
|
| 41 |
+
|
| 42 |
+
Figure 1: TURING system in action: the user selected database "Dog kennels"; the left and top panels show the database schema and table content. The user then entered "What is the average age of the dogs who have gone through any treatments?" in the search box. This question is run through the semantic parser producing multiple SQL hypotheses from beam-search, which are then explained step-by-step as shown. The differences across the hypotheses are highlighted. The tokens corresponding to table and columns are in bold. If there were more valid hypotheses, a "Show more" button would appear to reveal the additional ones.
|
| 43 |
+
|
| 44 |
+
learning process, but the latter change unrealistically assumes that the values are always explicitly mentioned in the question. To retain the generation flexibility without putting excessive burden on the decoder, we propose a conceptually simple but effective strategy to handle the values next.
|
| 45 |
+
|
| 46 |
+
Value prediction is a challenging, but crucial component of NLDBs, however, only limited efforts are committed to handling values properly in the current cross-domain SP literature. Value mentions
|
| 47 |
+
|
| 48 |
+
are usually noisy, if mentioned explicitly at all, requiring commonsense or domain knowledge to be inferred. On the other hand, the number of possible values in a database can be huge, leading to sparse learning signals if the model tries to choose from the possible value candidates.
|
| 49 |
+
|
| 50 |
+
Instead of attempting to predict the actual values directly, our SP simply learns to identify the input text spans providing evidence for the values. As mentioned earlier, we introduce the CopyToken action to copy an input span from the user question, indicating the clues for this value. The ground-truth
|
| 51 |
+
|
| 52 |
+
CopyToken[i] actions are obtained from a tagging strategy based on heuristics and fuzzy string matching between the user question and the gold values. As a result, the decoder is able to focus on understanding the question without considering other complexities of the actual values which are difficult to learn. If the values are only implicitly mentioned in the user question, nothing is copied from the user question. We leave the identification of the actual values to a deterministic search-based inference in post-processing, after the decoding process. This yields a simpler learning task as the neural network does not need to perform domain-specific text normalization such as mapping "female" to "F" for some databases.
|
| 53 |
+
|
| 54 |
+
Given the schema, the predicted SQL AST and the database content, the post-processing first identifies the corresponding column type (number, text, time), operation type (like, between, >, <, =, ...), and aggregation type (count, max, sum, ...). Based on these types, it infers the type and normalization required for the value. If needed, it then performs fuzzy-search in the corresponding column's values in the database. When nothing is copied, a default value is chosen based on some heuristics (e.g., when there exist only two element "Yes" and "No" in the column, the default value is "Yes"): otherwise, the most frequent element in the column is chosen. Searching the database content can also be restricted to a picklist for privacy reasons like previous works (Zeng et al., 2020; Lin et al., 2020).
|
| 55 |
+
|
| 56 |
+
Another benefit of this simple value handling strategy is the ease to explain. The details are presented in the Sec. 4.
|
| 57 |
+
|
| 58 |
+
Our encoder architecture follows Xu et al. (2020). The encoder, $f_{\rm enc}$ , maps the user question Q and the schema S to a joint representation $\mathcal{H}=\{\phi_1^q,\ldots,\phi_{|Q|}^q\}\cup\{\phi_1^s,\ldots,\phi_{|S|}^s\}$ . It contextualizes the question and schema jointly through both the RoBERTA-Large model similar to (Guo et al., 2019), as well as through the additional sequence of 24 relation-aware transformer (RAT) (Wang et al., 2019) layers. As mentioned in Section 3.1, tables are not predicted directly but inferred from the columns, so we augment the column representations by adding the corresponding table representations after the encoding process.
|
| 59 |
+
|
| 60 |
+
We use a LSTM decoder $f_{\text{dec}}$ to generate the action sequence A. Formally, the
|
| 61 |
+
|
| 62 |
+
generation process can be formulated as $\Pr(A|\mathcal{H}) = \prod_t \Pr(a_t|a_{< t},\mathcal{H})$ where $\mathcal{H}$ is the encoded representations outputted by the The LSTM state is updated encoder $f_{\rm enc}$ . following Wang et al. (2019): $m_t, h_t$ $f_{\text{LSTM}}([\boldsymbol{a}_{t-1} \| \boldsymbol{z}_{t-1} \| \boldsymbol{h}_{p_t} \| \boldsymbol{a}_{p_t} \| \boldsymbol{n}_{p_t}], \boldsymbol{m}_{t-1}, \boldsymbol{h}_{t-1}),$ where $m_t$ is the LSTM cell state, $h_t$ is the LSTM output at step t, $a_{t-1}$ is the action embedding of the previous step, $z_{t-1}$ is the context representation computed using multi-head cross-attention of $h_{t-1}$ over $\mathcal{H}$ , $p_t$ is the step corresponding to the parent AST node of the current node, and n is the node type embedding. For **ApplyRule**[r], we compute $Pr(a_t = ApplyRule[r]|a_{< t}, \mathcal{H}) =$ softmax<sub>r</sub> $(q(\mathbf{z}_t))$ where $q(\cdot)$ is a 2-layer MLP. For **SelectColumn**[c], we use the memory augmented pointer network following Guo et al. (2019). For CopyToken[i], a pointer network is employed to copy tokens from the user question Q with a special token indicating the termination of copy.
|
| 63 |
+
|
| 64 |
+
One of the core challenges for cross-domain SP is to generalize to unseen domains without overfitting to some specific domains during training. Empirically, we observe that applying uniform label smoothing (Szegedy et al., 2016) on the objective term for predicting **SelectColumn**[c] can effectively address the overfitting problem in the cross-domain setting. Formally, the cross-entropy for a ground-truth column $c^*$ we optimize becomes $(1-\epsilon)*\log p(c^*)+\frac{\epsilon}{K}*\sum_c\log p(c)$ , where K is the number of columns in the schema, $\epsilon$ is the weight of the label smoothing term, and $p(\cdot) \triangleq \Pr(a_t = \textbf{SelectColumn}[\cdot]|a_{< t}, \mathcal{H})$ .
|
| 65 |
+
|
| 66 |
+
During inference, we use beam search to find the high-probability action sequences. As mentioned above, column prediction is prone to overfitting in the cross-domain setting. In addition, value prediction is dependent on the column prediction, that is, if a column is predicted incorrectly, the associated value has no chance to be predicted correctly. As a result, we introduce two hyperparameters controlling influence based on the action types in the beam, with a larger weight $\alpha>1$ for **SelectColumn** and a smaller weight $0<\beta<1$ for **CopyToken**.
|
| 67 |
+
|
| 68 |
+
The goal of the explanation generation system is to unambiguously describe what the semantic parser understands as the user's command and allow the user to easily interpret the differences across the multiple hypotheses. Therefore, unlike a typical dialogue system setting where language generation diversity is essential, controllability and consistency are of primary importance. The generation not only needs to be 100% factually correct, but the differences in explanation also need to reflect the differences in the predicted SQLs, no more and no less. Therefore, we use a deterministic rule-based generation system instead of a neural model.
|
| 69 |
+
|
| 70 |
+
Our explanation generator is a hybrid of two synchronous context-free grammar (SCFG) systems combined with additional heuristic post-processing steps. The two grammars trade off readability and coverage. One SCFG is shallow and simple, covering the most frequent SQL queries; the other is deep and more compositional, covering the tail of query distribution that our SP can produce for completeness. The SCFG can produce SQL and English explanation parallel. Given a SQL query, we parse it under the grammar to obtain a derivation, which we then follow to obtain the explanation text. At inference time, for a given question, if any of the SQL hypotheses cannot be parsed using the shallow SCFG, then we move onto the deep one.
|
| 71 |
+
|
| 72 |
+
Using the deep SQL syntax trees allows almost complete coverage on the Spider domains. However, these explanations can be unnecessarily verbose as the generation process faithfully follows the re-ordered AST without 1.) compressing repeated mentions of schema elements when possible 2.) summarizing tedious details of the SQL query into higher level logical concepts. Even though these explanations are technically correct, practical explanation should allow users to spot the difference between queries easily. To this end, we design the shallow grammar similarly to the template-based explanation system in [Elgohary et al.](#page-6-1) [\(2020\)](#page-6-1), which simplifies the SQL parse trees by collapsing large subtrees into a single tree fragment. In the resulting shallow parses production rules yield non-terminal nodes corresponding to 1.) anonymized SQL templates 2.) UNION, INTERSECT, or EXCEPT operations of two templates 3.) or a template pattern followed by ORDER-BY-LIMIT clause. Our shal-
|
| 73 |
+
|
| 74 |
+
low but wide grammar has 64 rules with those nonterminal nodes. The pre-terminal nodes are placeholders in the anonymized SQL queries such as Table name, Column name, Aggregation operator and so on. Finally, the terminal nodes are the values filling in the place holders. The advantage of this grammar is that each high-level SQL template can be associated with an English explanation template that reveals the high level logic and abstracts away from the details in the concrete queries. To further reduce the redundancy, we make assumptions to avoid unnecessarily repeating table and column names. Table. [1](#page-5-0) showcases some rules from the shallow SCFG and one example of explanation. In practice, around 75% of the examples in the Spider validation set have all beam hypotheses from our SP model parsable by the shallow grammar, with the rest handled by the deep grammar. The deep grammar has less than 50 rules. But because it is more compositional, it covers 100% of the valid SQLs that can be generated by our semantic parser. Some sample explanation by the deep grammar can be found in Table. [2.](#page-5-1)
|
| 75 |
+
|
| 76 |
+
Finally, whenever the final value in the query differs from original text span due to post-processing, a sentence in the explanation states the change explicitly for clarity. For example, "*'Asian' in the question is matched to 'Asia' which appears in the column Continent.*"
|
2106.04876/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-06-03T10:28:13.227Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36" etag="_Jd9t_Q_QicukbbBUHMH" version="14.7.4" type="device" pages="2"><diagram id="PHMq1JG7C6zlA0abfhDB" name="Page-1">7R1bc6O6+ddk2j7EIwHi8hjn0tPp7s7OyZnptm/YVmx6MLiYbJz99ZW4Cn3C4AtgJ+zsTEAgAd/9Kt/o9+vd3yN3s/oaLqh/o6HF7kZ/uNE0rCOd/eEj7+nIrYFJOrKMvEV2Vznw7P2i2SDKRl+9Bd1WbozD0I+9TXVwHgYBnceVMTeKwrfqbS+hX33qxl1SMPA8d304+i9vEa/SUZugcvw36i1X+ZMxyq6s3fzmbGC7chfhmzCkP97o91EYxunRendPfQ69HC7pvKeaq8WLRTSI20y4D6L/OOZqdfem/fH74+b5q/tldZut8tP1X7MPzl42fs8hEIWvwYLyRdCNPn1beTF93rhzfvWNIZ2NreK1z84wO3wJg/g5m5uvTaOY7mpfGhegYEREwzWNo3d2SzZBN810SkY/2MrB+VZiA6OcWlYCKoieDboZCSyL1UsosYMMUAcATbt4oKEK0DTDhEArSFiEmdYZzPRDYIabYRaFsRt7YcBOHQ7iVRh5vxgcXT+D+Yvn+/ehH0bJ4rquPz3pOhvfxlH4Z8HLehfgJ3aVZh1dgzSrKcCfy8azQ98AwKYLJuay0zCKV+EyDFz/sRydVkm4vOdLGG4yJPyXxvF7JrPd1zisooiBK3r/wedPSH76b/Hawy5bPD17zzHH8CFgDiX/2mKOf9d+vDEwhK/RnO4BV4aG2I2WNN4nCJCaECLqM+L8WX2RsyOVAJa6e3xmA/+k/B0edxs32HIO0dDTazBPmEWmAqaTNvwwZvxFf4X8cdMNjTz2gjQSx7+Xg43SzNvRXJEfw6kzd24v+uFUQ9MmGqkyq43IxCLt+NV0Jl1xrKmQl6bPnjudRexoyY8+rAQ1sFNVYAhhgBHN7FOCWqMEFU3wRsmonygZk6l3UeS+CzdsQi+It8LK3/lASTeOJlmLtu6ImD94AjtI36FmOqqZXrxxqmqyaRIJFmA5niptICW+0dfI5fOm1PfoCzv4HoXMS0lZXUMPdM6ctOg00bFfYgtXNN0wyKIF5WXnmdawCuF2khixbNl5sBTOg0qw67bekRxx+raDl5G78BjAHryIzrM7qbuNhUsCwp7utak2HVT429iQhb89sPDP3ctR+reU/sYQ0h8X3mshjg0k4X6fNMeaRHlcnMsGXscCHcOAzNe7H2zg+e6Py5bZznlkNkaOhIT2QtvqKniBD4r4XLftjXEerSjlrzm0/IXRo38EXuwlhk7i7squkUbYO6OZl8qLCqIYaOIqNlzfW3JUzBmIEg+XA9Cbu/5ddmHtLRapTKdb75c7S5biaMpEEluXTG/IA1+LifFt6fz2EG3KTZUSXRZAl6Vil864xQDYetxtaMIdNejCDmfoT4EvgqTgLDYgvoosQj8Ig5GkoYMNhR3aA0J0wEAwXNuzvIPRn/swil438chC7IqJAQtBDdUzC1l9Wwh7/LoBjQeUG7BF7gND1Ki0UXe8ZANE9OC7HeuD5XK22Q3r27sybQfGyg2Ux8rrImyWTVpMa/DMGhYpviF1TIFzBr+FQEdxotnI5vRq8b/Vdbt2+pxrItHWYYLGNNvJ6bOjCNkCMV8D7ydhaYKWxwnqJmB0+gxEUMMMh+yd0RTsqJ3fxE3novrchVdYnduNG1T4wfzfaxhnlHj74q49nz35LlEh/k/KDZdEcua35TZRZjSlke+sOCl9Anvj9CE1xu1FJ0Z9d0b9qTv/c5kIgfyWIAxoPxazHFe3kanKmapsZtPqLGWaf4hAUL/Tecg+dbSa2RXbkUQMJjpAWb9Ws8JI7jeujpAjRtaZEeAUAzXR9eRM4PoLKVk5T8JVqSYmSKpnsnV7ojvCv371BgwGPlOf135elRDvnuEdU/LF7NzQFGU0gfxudMbvMDI4ok7pR2vEgB4PLj2wRgx2p2VhsHDUsiLmsBROvAA1qyom6zOa22cESsU5jt6yoK+zKJQG44OP29hbu2lEF7LMmN8qEGqBwJMyRK+KKsou7fnwCSuvgsJ7fZ3laKr4m+Uw++uuOU8Fs+0mOWf/kWqBFbwVJmZUj/iNAYGteR8GL97CZXiHEz8D9TiOHI82FakCZftIZ6QzSLSP7rz4h3CcuTskOy2dHX5S8XV+iCfitD1OUhWx2k37sHYLV0dvW4aUFjKc4BOd1vEyTNGYEs9WSzzfcpfYriKbWNYxLvFlkEDK7YORAIxITb9/+wZF8Rf3ndfkArv5NDPt8JIvORkFBfQZZDLRJAvZsGF1v6o7DduddadppyFK1tIn2tcXijjTkfsKh0ecPoiQvUy92LZtTT81gXwayoZpRlTqxePMH2tEcws0wzhR0ZvyfDsNd9CzOaPIXKA5pZpKZBKTYIv2JTINXfJcDRvGYJUi0+gMMTAadJCu+/IhdZtjH6vbLKcrRLWoKWPLeJstbQa7u92k22MkUXCIhyJldW4P3DK1KmBJPiAA1oBwNbqCKozcHEb+tx/TKrelXsZLYIAWzXI0WNzxbV7Y2dx3t1tvXoU2gGpB6LVGnNPk78r4OLP/C1HUgIF87MRcr9wuyRw1iQlr6uDASoCW2EoTbEo9lR3niQ1VfVEH1DNxyMHteR1ST5vtLVBLC9KoCaD1Q48YI2kbAGLpkyT6r5vEMYs4wqHkibV8O4FyYbPIMfRFnjA21K78LY2f8+I3jDe7G1XdW32Z25keUMlUnLAOI5T7ugwHSJl0+kVXAzJGNinU0GSSH44A27NOwA22gyAkKQmYEquqhazuUrTrsqH2qTOVzViNeYi9tp1sGibVxGtEkTR3bCjtZSl8vk2rYFA2R9nKOIncAI2ETcQMqYav2+aBjup5SHheutC+XO4xnKBgpwZBcV0QxANBkEndVmL3uqBZlZHt4clGRVa8QsFZb2qf2EWPpCQXMZTVe0qJqnUlUVvswtjkEonV07ji+KCJWaSODy2eboOQy/Wc5V1KNULa7QMFVjL0cve52sVqPJNDG6GMHPvyg5QdRGfzewZJ/3y8fkysG6CP7aAGMB1sXibXfbf2p3WraamunWnVDgbXFyq35LDEsKFyo0Xd8BVAVc7sDA3VNmmdo+OS169MLUPGlxyZayuXLHkfbB6Gtu1OlKnirYtndatPYT6rQ3I6Q3mhZDOSxmqKAcoLM5g2lxdaQ3KKTawqzZnoSLPTdrSGlbpW4J2m+pQpl4IgcZUgmfVyDEEWnJGvWBZZJ/uZ7mEPdnKZ5K3XVM/2RN5AfOMjyduxnIljlP2qNliXuebS+3VM8ARmJ8dWSPWWQrKUIwRdQB9kTi89Syyx+rBNtvmYusduhY/ZVrfW7A7TU/IZSZvJHq9cMTLtibypJltt0rPE0RQE2xCsnqekyePG0XL2V97Em0bBhaO/8UNOIgjszlKkdnkPqMFpv9iuRb5SXUSIj2s8Xl25lr4lvxiE0TopKBUuv2W0wK9b6XsmF30aM9F4yz517gVL5Xwer77NQs/8chZ9rlz2gkVC8vw6El4tuchEcbB9YYvmywe0uOEtjBbVp4vTZ8VuLrcSzDVu4Gew1riZlB8TAfILb7vx3QzqXuB7woNf/NCNxReSg/+7NsmGSyKOkQBKApCRSf9Sl7GRdFOWnGkwFOS8yyyM43AtWwhlnPS0GlyEJrZcL533nIjBmj6b54kqZ5JCeOH9VDJGHRnVVjgomC5L57248+raIpPULKfOegrVqw0ZPdVYXUpQ8eZsOAHMNdGdBeiOIAPQHVbk6szuDEy4Wcqor69dXI/6+pMTwEfS1yYGyhoKzX6Vdf1O5udS1ico5tY1QUhdfiSpd1GTq3R2jXKuUeVXprPNPNxSKmzYWNezwq7/zb5RYV+rvD6Hwm5VzXdJxDESQL3C/jmUwu6kTpFYcuWaTSYwtt6vElcVSIxK/AMqcSL/dpGJsIL6elbjqoKKUY1ftxQf1fgnJ4CPrcZtaatnvkW/Kkferx5X1fRcdOS8odWmTfD8MH19AfHzs//MgClX1zjKxpd+lXpeSjQq9Q8k00el/skJ4GMrdYxATbmBB9fqpqru7aq1+qjTW1AiqMEkF6DUVRVtEmjH/aG62x/KzH/p+eT9odhKve8PZZ6hGfpT9WEdjW7Qh2Vivbc+LOFZnfZhmapynR7JqabQ2zq89+TobW/bUnSLKnEz0zcX3qLiGFLc5egicbD/aN8dWHn7xDDbMqOJYzuHUe75NmdWMcE5abltx8Owv1dhwvKFT7hhswm2rtRhTVO/GzabMB/6CRFjOxgW6XaIG3YahdyfLIUsA9Uq/RVh/fH/</diagram><diagram name="Page-2" id="hLsIlT6B3yI5sc1SyKdn">7Vzbdps4FP0aP8YLJK6PsXPpTKedrvFcMn3pkkG2aTFiZDm28/UjgbiKxE5qDEncl8KREEL7nK2jLTkDOF5ubymKF5+Ij8MB0PztAF4NAIC6pfH/hGUnLbY0zGngpya9MEyCByyNWbV14ONVpSIjJGRBXDV6JIqwxyo2RCnZVKvNSFh9a4zmWDFMPBSq1n8Cny1Sq2Nqhf0DDuaL7M26JkuWKKssDasF8smmZILXAzimhLD0arkd41AMXjYu38jXv7+uoRvf/j5jvz+sv9/++uEibezmOY/kn0BxxF7c9Prjhf3tL2js3M+bO8v4MZ/dbeQj2j0K13K85LeyXTaAlKwjH4tG9AEcbRYBw5MYeaJ0w12G2xZsGcpiShhiAYn4rauJIkKDBxIxJCoIwywIwzEJCU0ahxDe3EDI7StGyY8cIWGZ8ccmshuHjoCsd48pw9sS/nJEbjFZYkZ3vIosvXDkCEjv1g1DjsCmcBY984BF2VFM6aPSP+d52wUG/ELC8AxIdAUB7HOPlreEsgWZkwiF14V1VGAkxrio8xshsUTmO2ZsJ8MTrRmp4saHle7uxPNDM7v9t1x2tZWNp3e7DE4OUglOLfl3bDhXZE09/MSQScgYonPMnqpnpBXFgD7pHRSH3I3vqxRydKSBEnyX1xNu+IhFH663MYpWIpaAdrOOvCSs6q7BOSkWl4xHIn4g4nWjGNOAdxDTsv1LYdwXw7NgizMif0lMT5Hn+F3GtG4ObbMa1rBk2xfZljtsK7ZhA91aIX/vaEr51VxcvTMC1t0qUqauKzAB65QEbHRMwJpulylYG1qOu4eGk7tSiPeIm80DudnuFTWbSqBOskhdcVquuIf131qkgMkoXqwSgC95Bd2Kt0VhEd2ylfX05Y1EUdYM/7ikpWrjwpr08hiM8jS7l0oANAzTP8DB5L2cYay8h8dnFwhq7GI4UGEXo4FcDLu1OcA653fP5RD70Pzu6CSSPHpJKdqVKsQkiNiq1PIXYSi8zjSsWvZh1JZmz3yAX6R9KLwu/5iXO6KtcNxnvKZIPDfCYYBn/OILJXyFnGYUQLvCHvH5/NJrPrFb5BNDqwGVyyP7kkpTgy3xidOQU7aaQc4p8gM+sFcBxZ6sidGKlYpKKN6MwQiM+pd4GtCoJ55Ox4mne54ZnjszZCrj/qnB6cXUkOuOBdPr7TO9rsp8ny7vuGFy+We/ydxtkcxtvcYAB5O5lWnExxf/dAWpltm8Z7Rsm6BOy1bHtKyrOt0vUcCCJFNKtLq6hANM3mdtGqRMUAGPDwOrIoTCYC7g8fhIJvKcGKzAQ+GlLFgGvp9yPeYLQzRNmhLQSbLh7ZqjgXkl2uL0viqUu640HdOsY2grGNoNEMLWwkoV3q63MU7C6BEMdVdE/vsF0arFoWWqGOZ7ZacBsXfCnOM4r1eY01+nMqefpbm3Kc2Z3UtzWarRo/2ZfPnc0TTgKlM56DodU3WrMaF0HbPzZP4YirqtzOZqVn3i2bxJt0pxE0NyCANDKBhYS2JSC4MIX2Q9F6W846bKz0PVOZ4y8U9Le/NI7L8H54GwztRQG6p76c2qZ1ve4zZ4T1eqZ8+W0BbUq3gZQNU1mpZfrVF2foKuo8y9mrXnMmevlc3srNLe9DxDrWNlE1j1PSyzomzuf8DS7JqntSCFZt99dsUWXPGteKJ5Ek9UZ6wOPRG8MVfsx0mAn3dF4FgncEXYrSsONc2tuqNrvF5h7fCZW3sjTmrLT27XSY2GlP8Q3W+GlkG4SxeFCxzeY7EcS5eEtcWh1BHSwy7ypzCHyXa9PocdoikOR8j7MU9iNqsSkQh3KEkYTs3rdKvpiHaTtmS1JwECVV7+A3uEf9dZXXoMynyLNs/kLVXKPa26BLo+ZanMaMB9xYe4waEnMI8/pf2cG6ha8QSH4meGr4rAu4prpyYkwQYhyTLVsDZaC2tVND7jeTCejm0MQf2HUUDLbXtRbW/WVeXc86y7D00I+zbrQvWcoQLNmz695tbPhxvGgT86bE17zzYDykefVixYonS79HyS7Wk8QRVPEzTsm8KTxhhQ8MyPnJTOm4SPHkNBSxFk0XQVJ/di77SpgYVaVT0J0fSKD3wQeJtjEs0CH3FPUB98t/5kZflxzg9NR4714+yk8tvij1OkKkzxJz7g9f8=</diagram></mxfile>
|
2106.04876/main_diagram/main_diagram.pdf
ADDED
|
Binary file (27.4 kB). View file
|
|
|
2106.04876/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The AES algorithm [@AES] is based on the key expansion function $f$, which operates on a random $256$-bit initial key $$\begin{equation}
|
| 4 |
+
f : \{0,1\}^{256} \rightarrow \{0,1\}^{1920}
|
| 5 |
+
\end{equation}$$
|
| 6 |
+
|
| 7 |
+
$f$ is computed in iterations, also known as rounds. In each iteration, $128$ bits of the expansion are calculated from the previous bits. The calculation consists of both linear and non-linear operations. The non-linear ones are called the Rijndeael substitution box or S-box for short.
|
| 8 |
+
|
| 9 |
+
The Rijndeael S-box function is described in detail in Chapter 4.2.1 of [@AES] and it is usually implemented as a look-up-table. It is composed of two transformations: (i) an affine transformation and (ii) Nyberg S-box transformation [@nyberg1991perfect]. The Nyberg S-box transformation is a mapping of an input vector to its multiplicative inverse on the Rijndael finite field: $x \rightarrow x^{-1}$ in $GF(2^8)$. This transformation is known as a perfect nonlinear transformation and satisfies certain security criteria.
|
| 10 |
+
|
| 11 |
+
We will use the following notations:
|
| 12 |
+
|
| 13 |
+
1. Denote the expanded key bits as $\hat{w} := (w_0, .. ,w_{n-1})$, where $n$ is the size of the expanded key, which is equal to $1920$. Denote the byte $w_i,...,w_{i+7}$ as $W_i$, and the double word $w_i,...,w_{i+31}$ as $W'_i$, so $W'_i(j) = w_{i+j}$.
|
| 14 |
+
|
| 15 |
+
2. Let $S : \{0,1\}^8 \rightarrow \{0,1\}^8$ be a Rinjdeal S-box. We can extend the definition of $S$ to an input vector of $32$ bits, where the result is obtained by applying S on each byte separately.
|
| 16 |
+
|
| 17 |
+
3. $c = c_1,...,c_{10}$ is a vector of fixed values that is defined in the RCON table which is given in [@AES]. This constant is used in the key expansion function $f$.
|
| 18 |
+
|
| 19 |
+
4. $R$ is the following rotation function: $$\begin{equation}
|
| 20 |
+
R(w_1,...,w_7,w_8,...,w_{32}) = (w_8,...,w_{32},w_1,...,w_7)
|
| 21 |
+
\end{equation}$$ which is used in the key expansion function $f$.
|
| 22 |
+
|
| 23 |
+
5. $k$ is the initial key size $256$, and $b$ is the block size $128$.
|
| 24 |
+
|
| 25 |
+
6. $\%$ is the modulo operator and $\oplus$ is XOR operator.
|
| 26 |
+
|
| 27 |
+
7. For each key index $i$, we denote the round number as $r(i) = \lfloor\frac{i}{b}\rfloor$, and the double word number as $d(i) = \lfloor\frac{i}{32}\rfloor$
|
| 28 |
+
|
| 29 |
+
The key expansion function is critical to understanding our method. Here we describe the constraints that this function inducts on the key bits. For the $i$-bit in the key, the constraints are given by:
|
| 30 |
+
|
| 31 |
+
1. $\forall i: k \leq i < n , i \% b \leq 31, r(i) \% 2 = 0:$ $$\begin{equation}
|
| 32 |
+
\label{eq:Sbox_with_rotation}
|
| 33 |
+
w_i = w_{i - k} \oplus S(R(W'_{d(i-32)}))(i\%b) \oplus c_{\frac{r(i)}{2}}
|
| 34 |
+
\end{equation}$$
|
| 35 |
+
|
| 36 |
+
2. $\forall i: k \leq i < n , i \% b \leq 31, r(i) \% 2 = 1:$ $$\begin{equation}
|
| 37 |
+
\label{eq:Sbox_without_rotation}
|
| 38 |
+
w_i = w_{i - k} \oplus S(W'_{d(i-32)})(i\%b)
|
| 39 |
+
\end{equation}$$
|
| 40 |
+
|
| 41 |
+
3. $\forall i: k \leq i < n , i \% b > 31 :$ $$\begin{equation}
|
| 42 |
+
\label{eq:XOR_equation}
|
| 43 |
+
w_i = w_{i - k} \oplus w_{i -32}
|
| 44 |
+
\end{equation}$$
|
| 45 |
+
|
| 46 |
+
Note that each equation contains three XOR operations between variables, and in some of the equations there is a XOR with a constant value.
|
| 47 |
+
|
| 48 |
+
A deep learning decoder for error correcting codes with a belief propagation algorithm was introduced in [@nachmani2016learning]. The decoding process uses the well-known belief propagation method and adds learnable weight to the algorithm. Specifically, they add weights to the edges in the Trellis graph. For a linear block code with $k$ information bits and $n$ output bits, the parity check matrix of the linear block code $H$ has a size of $(n-k) \times n$.
|
| 49 |
+
|
| 50 |
+
The deep neural Belief propagation algorithm that was introduced in [@nachmani2016learning] has an input layer of $n$ bits. In the architecture that is defined in [@nachmani2016learning] there are two types of hidden layers which are interleaved: (i) variable layer for odd index layer $j$ and (ii) check layer for even index layer $j$.
|
| 51 |
+
|
| 52 |
+
For notational convenience, we assume that the parity check matrix $H$ is regular, meaning, the sum over each row and column is fixed and denoted by $d_v$ and $d_c$ respectively. Each column of the parity check matrix $H$ is corresponding to one bit of the codeword and obtains $d_v$ variable nodes in each variable layer. Therefore, the total number of *variable processing units* in each variable layer is $E=d_v \cdot n$. Similarly, each check layer has $E=(n-k)\times d_c$ *check processing units*.
|
| 53 |
+
|
| 54 |
+
During the decoding process, the messages propagate from the variable layer to check layers iteratively, where the input to the network is the log likelihood ratio (LLR) $\ell \in \mathbb{R}^{n}$ of each bit: $$\begin{equation}
|
| 55 |
+
\ell_v = \log\frac{\Pr\left(c_v=1 | y_v\right)}{\Pr\left(c_v=0 | y_v\right)},
|
| 56 |
+
\end{equation}$$ where $\ell_v$ is the log likelihood ratio for each received signal $y_v$ and $c_v$ is the bit that we want to recover.
|
| 57 |
+
|
| 58 |
+
Denote $x^j$ as the vector messages that propagate in the Trellis graph. For $j=1$ and for odd $j$, the computation in each variable node is: $$\begin{equation}
|
| 59 |
+
\label{eq:odd}
|
| 60 |
+
x^{j}_e = x^{j}_{(c,v)} = \tanh \left(\frac{1}{2}\left(l_v + \sum_{e'\in N(v)\setminus \{(c,v)\}} w_{e'}x^{j-1}_{e'}\right)\right)
|
| 61 |
+
\end{equation}$$ where $N(v)=\{(c,v) | H(c,v)=1\}$ is the set of all edges that connected to $v$ and each variable node indexed the edge $e=(c,v)$ in the Tanner graph. $w_e$ is a set of learnable weights.
|
| 62 |
+
|
| 63 |
+
For even layer $j$, each check layer preforms this computation:
|
| 64 |
+
|
| 65 |
+
$$\begin{equation}
|
| 66 |
+
\label{eq:even}
|
| 67 |
+
x^{j}_e = x^j_{(c,v)} = 2arctanh \left( \prod_{e'\in N(c) \setminus \{(c,v)\}}{x^{j-1}_{e'}}\right)
|
| 68 |
+
\end{equation}$$ where for each row $c$ of the parity check matrix $H$, $N(c)=\{(c,v) | H(c,v)=1\}$ is the corresponding set of edges.
|
| 69 |
+
|
| 70 |
+
Overall, in the deep neural network that is proposed in [@nachmani2016learning] there are $L$ layers from each type (i.e. variable and check). The last layer is a marginalization layer with a sigmoid activation function which outputs $n$ bits. The $v$-th output bit is given by:
|
| 71 |
+
|
| 72 |
+
$$\begin{equation}
|
| 73 |
+
o_v = \sigma \left( l_v + \sum_{e'\in N(v)}\bar{w}_{e'} x^{2L}_{e'} \right),
|
| 74 |
+
\label{eq:base_final}
|
| 75 |
+
\end{equation}$$ where $\bar{w}_{e'}$ is another set of learnable weights. Moreover, in each odd layer $j$ marginalization is performed by: $$\begin{equation}
|
| 76 |
+
o^{j}_v = \sigma \left( l_v + \sum_{e'\in N(v)}\bar{w}_{e'} x^{j}_{e'} \right)
|
| 77 |
+
\label{eq:j_final}
|
| 78 |
+
\end{equation}$$
|
| 79 |
+
|
| 80 |
+
The loss function is cross entropy on the error after each $j$ marginalization: $$\begin{equation}
|
| 81 |
+
\mathcal{L}=-\frac{1}{n}\sum_{h=0}^{L}\sum_{v=1}^{n} c_{v}\log(o^{2h+1}_{v})+(1-c_{v})\log(1-o^{2h+1}_{v})
|
| 82 |
+
\label{eq:loss}
|
| 83 |
+
\end{equation}$$ where $c_{v}$ is the ground truth bit.
|
| 84 |
+
|
| 85 |
+
<figure id="fig:arch" data-latex-placement="t">
|
| 86 |
+
<img src="figures/training_scheme.png" />
|
| 87 |
+
<figcaption>An overview of our method for neural cold boot attack. The input is the initial key of <span class="math inline">256</span> bit, then the AES key expansion functions <span class="math inline"><em>f</em></span> expands it to <span class="math inline">1920</span> key. The expanded key was corrupted by the cold boot model. The corrupted key is inserted into a S-box neural network <span class="math inline"><em>S</em><sub><em>n</em><em>n</em></sub></span> and neural belief propagation decoder. The neural belief propagation decoder constructs from a novel formalization of the AES key expansion function. Then the most accurate <span class="math inline"><em>n</em><sub><em>l</em></sub> + <em>n</em><sub><em>h</em></sub></span> bits are selected in insert with the corrupted key to MAX-SAT solver. The MAX-SAT solver produces the corrected AES key.</figcaption>
|
| 88 |
+
</figure>
|
| 89 |
+
|
| 90 |
+
Our architecture contains two components: (i) A variant of neural belief propagation decoder with neural S-box layers and (ii) a Partial MAX-SAT solver. The proposed model, depicted in Figure [1](#fig:arch){reference-type="ref" reference="fig:arch"}. The input to the neural belief propagation is the corrupted bits $l = l_0,..,l_{n-1}$ and it predicts an approximation for the original key $o = o_0,..,o_{n-1}$. Formally, the value of the $i$-th bit in the original key was $1$ with an approximated probability of $o_i$. The input to the Partial MAX-SAT solver is a CNF formula which part of it defined by $o' \subset o$ , where the probabilities in $o'$ correspond to bits that the network has high confidence in their values (approximately 99$\%$). The output of the Partial MAX-SAT solver is the estimation of the desired key.
|
| 91 |
+
|
| 92 |
+
In contrast to previous cold boot attack methods, the input of our method is a floating vector over $[0,1]$ instead of binary input ${0,1}$. In this way, one can better express the decay model of the memory. In practice, this input can be measured according to the voltage, the memory sector or the amount of time that elapsed from shutting down the power to the time that the bits were read.
|
| 93 |
+
|
| 94 |
+
The belief propagation neural network defined by a parity check matrix $H$. But, the equations in Eq. [\[eq:Sbox_with_rotation\]](#eq:Sbox_with_rotation){reference-type="ref" reference="eq:Sbox_with_rotation"},[\[eq:Sbox_without_rotation\]](#eq:Sbox_without_rotation){reference-type="ref" reference="eq:Sbox_without_rotation"} are not linear, and cannot directly express with a parity check matrix $H$. To convert these equations to linear form, one can change the variables: namely, use $W_t^{s'}:= S(W_t^{'})$ instead of $W_t^'$ on the equations, and using the fact that Eq. [\[eq:Sbox_with_rotation\]](#eq:Sbox_with_rotation){reference-type="ref" reference="eq:Sbox_with_rotation"},[\[eq:Sbox_without_rotation\]](#eq:Sbox_without_rotation){reference-type="ref" reference="eq:Sbox_without_rotation"} are linear in $W_t^{s'}$. The S-box transformation is defined only on boolean vectors. But, the belief propagation neural network uses fractions values. Therefore, to obtain a differentiable red and continuous version of the Rijndael S-box, we first train a neural network $S_{nn}$ mimic it: $$\begin{equation}
|
| 95 |
+
S_{nn} : x_1 , ... , x_8 \rightarrow y_1 , ... , y_{256}
|
| 96 |
+
\end{equation}$$ where $x_i \in [0,1]$ and $y_i \in [0,1]$. The network has three fully connected layers with $512$ ReLU activations. It is trained with a cross entropy loss function. An $argmax$ operation is preformed on the output $y$, in order to get $z_1 , ... , z_{8}$, where it achieve $100\%$ accuracy. We can extend the definition of $S_{nn}$ to an input vector of 32 bits, where the result is obtained by applying $S_{nn}$ on each byte separately.
|
| 97 |
+
|
| 98 |
+
The S-box layer performs the transformation of the variables, which it essential to obtain a linear form. The neural S-box layer constructed from a combination of neural S-boxes, the input to it is a vector $\hat{x} \in [-1,1]^{n}$ and the output is a vector $\hat{s} \in [-1,1]^{n+32\cdot13+1}$ which obtained by concatenation of the input with an additional vector of size $13\cdot32$ (There are 13 additional rounds on the AES-256 key expansion, on each round 4 S-boxes are calculated). The additional vector calculated by applying the neural S-box on the corresponding bits of the input ($\hat{x}$). Formally, $\hat{s}$ is defined as follows:
|
| 99 |
+
|
| 100 |
+
1. $\forall i \in [0,n-1] : \hat{x}_i = \hat{s}_i$
|
| 101 |
+
|
| 102 |
+
2. $\forall i \in [n,(n+13\cdot32)-2] , i \% 32 =0:$
|
| 103 |
+
|
| 104 |
+
$(\hat{s}_i, .., \hat{s}_{i+31})=S_{nn}(\hat{x}_{si(i)}, .., \hat{x}_{si(i)+31})$ where $si(i)=k+b\cdot((i-n)\%32)-32$
|
| 105 |
+
|
| 106 |
+
3. The last bit in $\hat{s}$ is $1$ (namely $\hat{s}_{n+13\cdot 32-1} = 1$) and it used as bias factor.
|
| 107 |
+
|
| 108 |
+
So, in total each S-box layer is constructed from $4 \cdot 13$ neural S-box instances.
|
| 109 |
+
|
| 110 |
+
In order to predict the values of the AES key from the corrupted keys, a naive way is to search the key that is close to the corrupted one over the large space of the existing key.
|
| 111 |
+
|
| 112 |
+
However, when the decay percentage is high, the search space is extremely large, and by design, the key expansion function provides resistance against attacks in which part of the cipher key is known, see Chapter 7.5 in [@AES].
|
| 113 |
+
|
| 114 |
+
Instead, due to the resemblance of the AES keys and the key role of the XOR operation, we rely on network-based error correcting code methods that are suitable for block ciphers. Such codes often employ expansion functions, which are, however, linear.
|
| 115 |
+
|
| 116 |
+
To this end, by changing the variables and approximate the S-box outputs, we convert the key expansion constraints (Eq. [\[eq:Sbox_with_rotation\]](#eq:Sbox_with_rotation){reference-type="ref" reference="eq:Sbox_with_rotation"}, [\[eq:Sbox_without_rotation\]](#eq:Sbox_without_rotation){reference-type="ref" reference="eq:Sbox_without_rotation"}, [\[eq:XOR_equation\]](#eq:XOR_equation){reference-type="ref" reference="eq:XOR_equation"}) to parity check matrix $H$ (the specific values of the matrix will be defined in the following subsection).
|
| 117 |
+
|
| 118 |
+
The original neural belief propagation explained in the introduction. The architecture consist $L$ iterations, which each layer contains variable and check layers, and the input of each layer was the output of the previous layer (or the global input in layer $1$). But, to execute the variables transformation, we modify the architecture as follows: First we add S-box layer after each check layer. Second, we modify the layer structure: in Eq [\[eq:odd\]](#eq:odd){reference-type="ref" reference="eq:odd"} instead using $l$, the $\log$ likelihood of the signal, we used $q$, the output of the marginalization layer on the previous iteration, after going through the S-box layer. In figure [2](#fig:BPNN_with_Sboxs){reference-type="ref" reference="fig:BPNN_with_Sboxs"} we depict the architecture of the modified neural belief propagation .
|
| 119 |
+
|
| 120 |
+
<figure id="fig:BPNN_with_Sboxs" data-latex-placement="t!">
|
| 121 |
+
<img src="figures/BPNN_neural_sbox_variant_1.png" />
|
| 122 |
+
<figcaption>An overview of the modified neural belief propagation decoder. The input is the corrupted key of 1920 bit. Each layer of the neural belief propagation receive two vectors: (i): the output of the previous belief propagation layer(x). (ii): The output of the values from the marginalization layer, after going through the S-box layer(q).</figcaption>
|
| 123 |
+
</figure>
|
| 124 |
+
|
| 125 |
+
[]{#subsection:Tailoring_BPNN label="subsection:Tailoring_BPNN"}
|
| 126 |
+
|
| 127 |
+
Denote the S-box mimicking network output, given by a vector $W_i$, as $Z_i = (z_i, .. ,z_{i+7}) := S_{nn}(W_i)$. We denote the concatenation of $Z_i,Z_{i+8},Z_{i+16},Z_{i+24}$ as $\hat{Z_i}$. We can rearrange the constraints in Eq. [\[eq:Sbox_with_rotation\]](#eq:Sbox_with_rotation){reference-type="ref" reference="eq:Sbox_with_rotation"}, [\[eq:Sbox_without_rotation\]](#eq:Sbox_without_rotation){reference-type="ref" reference="eq:Sbox_without_rotation"}, [\[eq:XOR_equation\]](#eq:XOR_equation){reference-type="ref" reference="eq:XOR_equation"} as follows:
|
| 128 |
+
|
| 129 |
+
1. $\forall i: k \leq i < n , i \% b \leq 31, r(i) \% 2 = 0:$ $$\begin{equation}
|
| 130 |
+
\label{eq:NN_With_rotations}
|
| 131 |
+
0 = w_i \oplus w_{i - k} \oplus S_{nn}(R(W^{'}_{d(i-32)}))(i\%b) \oplus c_{\frac{r(i)}{2}}
|
| 132 |
+
\end{equation}$$
|
| 133 |
+
|
| 134 |
+
2. $\forall i: k \leq i < n , i \% b \leq 31, r(i) \% 2 = 1:$ $$\begin{equation}
|
| 135 |
+
\label{eq:eq_bb}
|
| 136 |
+
0 = w_i \oplus w_{i - k} \oplus S_{nn}(W^{'}_{d(i-32)})(i\%b)
|
| 137 |
+
\end{equation}$$
|
| 138 |
+
|
| 139 |
+
3. $\forall i: k \leq i < n , i \% b > 31 :$ $$\begin{equation}
|
| 140 |
+
\label{eq:sim_xor}
|
| 141 |
+
0 = w_i \oplus w_{i - k} \oplus w_{i -32}
|
| 142 |
+
\end{equation}$$
|
| 143 |
+
|
| 144 |
+
We define $x$ as the concatenate of $w$ and $\hat{Z}_i$ : $x = (w_0,..,w_{n-1},\hat{Z}_{k-32},\hat{Z}_{k-32+b},\hat{Z}_{k-32+2b}..,\hat{Z}_{n-32-b})$. By considering the XOR operation as the addition operator over $\{0,1\}^2$, assuming for simplicity that $R$ is the identity function, so $S_{nn}(R(\hat{W}_{d(i-32)})) = \hat{Z_i}$ and replacing $S_{nn}(W_{i})$ with $\hat{Z_i}$, one can transform equations [\[eq:NN_With_rotations\]](#eq:NN_With_rotations){reference-type="ref" reference="eq:NN_With_rotations"}, [\[eq:eq_bb\]](#eq:eq_bb){reference-type="ref" reference="eq:eq_bb"}, [\[eq:sim_xor\]](#eq:sim_xor){reference-type="ref" reference="eq:sim_xor"} to a matrix form using a matrix $H'$ and a vector $u$, such that: $$\begin{equation}
|
| 145 |
+
\label{eq:H_with_bias}
|
| 146 |
+
H'x+u=0
|
| 147 |
+
\end{equation}$$ where $u$ is a constant vector that consist of the RCON values $c_i$ and zeros.
|
| 148 |
+
|
| 149 |
+
$\forall i,j : 0\leq i < n-k, 0\leq j < (n+32(\frac{n-k}{b}))$ $$\begin{equation}
|
| 150 |
+
\label{eq:define_H}
|
| 151 |
+
H'(i,j)= \begin{cases}
|
| 152 |
+
1, \text{if $i \% b > 31$ , $j = i$ }\\
|
| 153 |
+
1, \text{if $i \% b > 31$ , $j = i+k$ }\\
|
| 154 |
+
1, \text{if $i \% b > 31$ , $j = i+k-32$ }\\
|
| 155 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j = i$ }\\
|
| 156 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j = i+k$ }\\
|
| 157 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j=t(i+k-32)$ }\\
|
| 158 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = i$ }\\
|
| 159 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = i+k$ }\\
|
| 160 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = t(i+k-32)$ }\\
|
| 161 |
+
0, \text{otherwise}\\
|
| 162 |
+
\end{cases}
|
| 163 |
+
\end{equation}$$ Where $t(i) = n+32 r(i)+i\%b$, this function replace the $w_i$ values with their corresponding $z_i$ values. The first three cases correspond to Eq. [\[eq:sim_xor\]](#eq:sim_xor){reference-type="ref" reference="eq:sim_xor"}, The following three cases in the middle correspond to Eq. [\[eq:eq_bb\]](#eq:eq_bb){reference-type="ref" reference="eq:eq_bb"}, and the last three cases correspond to Eq. [\[eq:NN_With_rotations\]](#eq:NN_With_rotations){reference-type="ref" reference="eq:NN_With_rotations"}. $\forall i : 0\leq i<n-k:$ $$\begin{align*}
|
| 164 |
+
\label{eq:define_b}
|
| 165 |
+
u(i) = \begin{cases}
|
| 166 |
+
c_{ \frac{r(i)}{2} },&\text{if $i \% b \leq 31, r(i) \% 2=0$}\\
|
| 167 |
+
0, & \text{otherwise}\\
|
| 168 |
+
\end{cases}
|
| 169 |
+
\end{align*}$$ Note that without assuming that $R$ (in Eq. [\[eq:NN_With_rotations\]](#eq:NN_With_rotations){reference-type="ref" reference="eq:NN_With_rotations"}) is the identity function, rather a rotation function, one can rewrite the same formulation with a single difference, applying permutation on the vector $x$.
|
| 170 |
+
|
| 171 |
+
It remains to convert Eq. [\[eq:H_with_bias\]](#eq:H_with_bias){reference-type="ref" reference="eq:H_with_bias"} to a homogeneous form, by using the bias trick: Instead of the XOR operation with the bias $u$ in Eq. [\[eq:H_with_bias\]](#eq:H_with_bias){reference-type="ref" reference="eq:H_with_bias"}, concatenate one bit with a constant value of $1$ to $x$. This bit used as bias factor, and by using $H$, a concatenate of $H'$ with $u$, we can formulate the Eq. [\[eq:H_with_bias\]](#eq:H_with_bias){reference-type="ref" reference="eq:H_with_bias"} as follows: $$\begin{equation}
|
| 172 |
+
\centering
|
| 173 |
+
\label{eq:H_mul}
|
| 174 |
+
H[x,1] = H
|
| 175 |
+
\begin{bmatrix}
|
| 176 |
+
w_{0} \\
|
| 177 |
+
% w_{2} \\
|
| 178 |
+
\vdots \\
|
| 179 |
+
w_{n-1} \\
|
| 180 |
+
\hat{Z_{k-32}} \\
|
| 181 |
+
\hat{Z_{k-32+b}} \\
|
| 182 |
+
\hat{Z_{k-32+2b}} \\
|
| 183 |
+
\vdots \\
|
| 184 |
+
\hat{Z_{n-b-32}} \\
|
| 185 |
+
1
|
| 186 |
+
\end{bmatrix} = 0
|
| 187 |
+
\end{equation}$$ $\forall i,j : 0\leq i < n-k, 0\leq j \leq(n+32(\frac{n-k}{r})):$ $$\begin{equation}
|
| 188 |
+
\label{eq:define_H}
|
| 189 |
+
H(i,j)=\begin{cases}
|
| 190 |
+
c_{ \frac{r(i)}{2} }, \text{ if } i = (n+32(\frac{n-k}{b})) \text{ and } \\ j \% b \leq 31 \text{ and } r(j)\% 2 =0\\
|
| 191 |
+
1, \text{if $i \% b > 31$ , $j = i$ }\\
|
| 192 |
+
1, \text{if $i \% b > 31$ , $j = i+k$ }\\
|
| 193 |
+
1, \text{if $i \% b > 31$ , $j = i+k-32$ }\\
|
| 194 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j = i$ }\\
|
| 195 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j = i+k$ }\\
|
| 196 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 1 $ , $j = t(i+k-32)$ }\\
|
| 197 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = i$ }\\
|
| 198 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = i+k$ }\\
|
| 199 |
+
1, \text{if $i \% b \leq 31, r(i) \% 2 = 0 $ , $j = t(i+k-32)$ }\\
|
| 200 |
+
0, \text{otherwise}\\
|
| 201 |
+
\end{cases}
|
| 202 |
+
\end{equation}$$
|
| 203 |
+
|
| 204 |
+
In our experiments, in order to isolate the influence of the neural S-box component ($S_{nn}$) on the performance, we use an additional architectures. In the first architecture, we do not use $S_{nn}$, and connect $l$ to the original (without any modifications) belief propagation neural network directly. In this settings we ignore the non linear constrains, and use $H''$, a sub-matrix of $H$: $$\forall i,j : 0\leq i < n-k, 0\leq j \leq n$$ $$\begin{equation}
|
| 205 |
+
\label{eq:define_H''}
|
| 206 |
+
H''(i,j)= \begin{cases}
|
| 207 |
+
1, \text{if $i \% b > 31$ and j = i }\\
|
| 208 |
+
1, \text{if $i \% b > 31$ and j = i+k }\\
|
| 209 |
+
1, \text{if $i \% b > 31$ and j = i+k-32 }\\
|
| 210 |
+
0, \text{otherwise}\\
|
| 211 |
+
\end{cases}
|
| 212 |
+
\end{equation}$$ $$\begin{equation}
|
| 213 |
+
\label{eq:H''_0}
|
| 214 |
+
H l = 0
|
| 215 |
+
\end{equation}$$ This ablation uses only linear constraints so we term it as \"LC\". The second ablation uses $H$, the full matrix, but does not contain neural S-box layers inside the neural belief propagation network (except before the first layer, which is necessary to express all the equations with linear form and can be calculated using the S-box transformation itself). This ablation used the original belief propagation neural network architecture so we term it as \"OBPNN\".
|
| 216 |
+
|
| 217 |
+
Note that the formulation of $H$ also relevant for other variations of AES (i.e. $k$=$128$,$192$). Moreover, the same technique can be used to create deep architectures for side-channel attacks for additional ciphers, for example Serpent [@SERPENT].
|
| 218 |
+
|
| 219 |
+
Based on the $H$ matrix described in Eq. [\[eq:define_H\]](#eq:define_H){reference-type="ref" reference="eq:define_H"} (or the $H''$ described in Eq. [\[eq:define_H\'\'\]](#eq:define_H''){reference-type="ref" reference="eq:define_H''"} for the ablation experiment), we construct a neural belief propagation network, as described in [3.2](#subsection:BP){reference-type="ref" reference="subsection:BP"}. The network receives an input vector $x$, that combines $l$ and add S-box layer before each iteration of the belief propagation network, and predicts the probabilities vector $o$.
|
| 220 |
+
|
| 221 |
+
Once we obtain the initial estimate from the neural network, we use a Partial MAX SAT Solver to search for the corrected key. To run the solver, we define the followings Conjunctive Normal Form (CNF) formulas:
|
| 222 |
+
|
| 223 |
+
1. n variables, one per bit in the key $v_1 ,..,v_n$.
|
| 224 |
+
|
| 225 |
+
2. Converted the bit-relation in Eq. [\[eq:Sbox_with_rotation\]](#eq:Sbox_with_rotation){reference-type="ref" reference="eq:Sbox_with_rotation"},[\[eq:Sbox_without_rotation\]](#eq:Sbox_without_rotation){reference-type="ref" reference="eq:Sbox_without_rotation"},[\[eq:XOR_equation\]](#eq:XOR_equation){reference-type="ref" reference="eq:XOR_equation"} that implies by the key expansion function to a CNF formula by CNF Factorization. The result is the formula $\psi_{AES}$, that consists of $217984$ clauses and $1920$ variables. Eq. [\[eq:XOR_equation\]](#eq:XOR_equation){reference-type="ref" reference="eq:XOR_equation"} for example, which is in the following form: ($a \oplus b = c$), is replaced with the following clauses:
|
| 226 |
+
|
| 227 |
+
$$(1)\quad \neg a \land \neg b \land \neg c \quad \quad \quad (2)\quad \neg a \land b \land c$$ $$(3)\quad a \land \neg b \land c \quad\quad\quad
|
| 228 |
+
(4) \quad a\land b \land \neg c$$
|
| 229 |
+
|
| 230 |
+
With the other equations, the result is more complicated, and each equation has been replaced by numerous clauses. We then insert these clauses into the solver as a hard formula. This formula is identical for all of the keys, and is calculated once.
|
| 231 |
+
|
| 232 |
+
3. For each bit whose value is $1$ in the corruption key, we supply a single clause that enforces this key bit to be $1$, we denote this formula by $\psi_{memory}$. Formally : $\psi_{memory} := \wedge_{i \in [n-1], l_i = 1 } v_i$
|
| 233 |
+
|
| 234 |
+
4. Consider the $n_h$ bits with the highest value in the network output $o$, and the $n_l$ bits with the lowest values. These are the locations for which the network is mostly confident. Let $t_h$ be the $n_h$-th highest value in $o$, and the $t_l$ as the $n_l$-th lowest values in $o$, we take these as thresholds and define the formula: $\psi_{nn} := \Big{(}\wedge_{i \in [n-1], o_i \geq t_h}\neg v_i \Big{)}\wedge \Big{(} \wedge_{i \in [n-1], o_i \leq t_l } v_i \Big{)}$
|
| 235 |
+
|
| 236 |
+
We define $\psi_{AES}$ as hard formula, and $\psi_{nn}$ as soft formula. In the theoretical decay model $\psi_{memory}$ is defined as hard formula, but on the realistic decay model is defined as a soft formula.
|
| 237 |
+
|
| 238 |
+
There is a large number of Partial MAX-SAT Solvers, which operate in a wide variety of strategies. We select the WBO Solver [@wbo_solver] with the implementation of [@LogicNGgithub], which is based on the unsatisfiability method [@UNSAT], and other enhancements [@WBO1; @WBO2; @WBO3]. We select this solver for three main reasons: (i) We have the intuition that DPLL solvers will be suitable for this problem over randomized SAT solvers due to the large complexity of the search space (there are $2^{1920}$ vectors, and there are numerous clauses). This complexity makes us think that it is better to use a solver that scans the space in an orderly manner. We, therefore, decided to use CDCL solvers [@CDCL1; @CDCL2; @CDCL3], the most popular variation of DPLL solvers. (ii) Since it achieved best results in different cold boot attack settings, for example [@Liaobbb]. (iii) In the early step of this research we tried to insert the complete key approximation from the neural network into a CDCL solver. Empirically, we observe that inserting the complete key approximation from the neural network into a CDCL solver does not output the correct keys. Therefore, we decided to focus on a small number of bits. We choose the bits that the neural belief propagation is relatively sure in their values, and in total, the probability that more than a few bits in the subset are errors is very small (smaller than $1 \%$). Therefore, it was natural to use the UNSAT [@UNSAT] method which is suitable for the problem, since the number of unsatisfiability soft clauses is small with high probability.
|
| 239 |
+
|
| 240 |
+
The input of our architecture is the vector $l_1, .. ,l_n$, which represents the corrupted key. We use the fully-connected neural network $S_{nn}$ (which imitates the Rijndael S-box, and extend its functionality to non-binary values) to calculate $\hat{Z}_k,\hat{Z}_{k+b},..,\hat{Z}_{n-k}$ for all $i \in \{k+bt-32 | 0 \leq t \leq \frac{n-k}{b}-1 \}$. In other words, the S-box layer is created by duplicating the fully-connected neural network $S_{nn}$ $\frac{n-k}{b}$ times, one instance per each calculation of $\hat{Z}_i$ .
|
| 241 |
+
|
| 242 |
+
The combination of these variables defines the input for each layer of the neural belief propagation, it denoted by $x$: $$\begin{equation}
|
| 243 |
+
\label{eq:aes_input}
|
| 244 |
+
x = [l_1,..,l_n,\hat{Z}_k,\hat{Z}_{k+b},..,\hat{Z}_{n-k},1]
|
| 245 |
+
\end{equation}$$ which is of size of $n+32 \cdot (\frac{n-k}{b})+1$. Note that in layers that are not the first layer, $l$ is replaced with the output of the marginalization of the previous layer.
|
| 246 |
+
|
| 247 |
+
The $H$ function as defined in Eq. [\[eq:define_H\]](#eq:define_H){reference-type="ref" reference="eq:define_H"}, is used as a parity check matrix for the neural belief propagation.
|
| 248 |
+
|
| 249 |
+
The neural Belief propagation predicts the probability that each key bit was $1$. We denote these probabilities by $o = o_1,..,o_n$. Based on $l,o$, we define the following SAT instance, as described in detail in subsection [4.4](#section:SAT_SOLVER){reference-type="ref" reference="section:SAT_SOLVER"}.
|
| 250 |
+
|
| 251 |
+
1. Define n variables, one per bit $v_1 ,..,v_n$.
|
| 252 |
+
|
| 253 |
+
2. $\psi_{nn}$ a CNF that is induced by the neural belief propagation predictions.
|
| 254 |
+
|
| 255 |
+
3. $\psi_{memory}$ a CNF that is induced by the corrupted key
|
| 256 |
+
|
| 257 |
+
4. $\psi_{AES}$ a CNF that is equivalent to the key expansion constraints.
|
| 258 |
+
|
| 259 |
+
We run WBO solver on this instance. The output of our model is the assignment that is returned from the solver.
|
2106.05956/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-06-02T15:14:27.946Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36" etag="HzWRXi5PCK1pJRp9kr3y" version="14.7.3" type="google"><diagram id="OmoxEXhtXE_FN__e-Z9V" name="Page-1">5Vptb6M4EP41kfY+JAIMNPnYpu2eVr1Vtb3VNZ9WbnDAW4OzxjTJ/vqzwQ4G8kJT0jRaKlV4sMdm5nlmxiY9MI6XnxmcR//QAJGeYwXLHrjuOY7nj8R/KVgVAmBZhSBkOChEdil4wL+REupuGQ5QWunIKSUcz6vCKU0SNOUVGWSMLqrdZpRUZ53DUM1olYKHKSSo0e0/HPCokA49o/ffCIeRntnW7xdD3VkJ0ggGdGGIwE0PjBmlvLiLl2NEpO20XYpxt1uerhfGUMLbDEA/Iv9r9Izv2RfCp/H328erf/vAV4vjK/3GKBAGUE3KeERDmkByU0qvGM2SAEm1lmiVfe4onQuhLYQ/Eecr5U2YcSpEEY+JeoqWmD8a9xOpauCp1vVSac4bK91IOFs9mg1jlGyWw/KWHjejCb+FMSZSkMIk7aeI4Zl6opZoF+M0/oBoNc2rLJ7SjE3RLpteKJxCFiK+o6OrNEqLG1Mo931GNEbiTUQHhgjk+KUKSaiQHa77lc4XN8r/r8GCWvcLJJmaakyTlwZASvdL/y0izNHDHOYmWYgYUHV1e/PPMCFjSijLZwEzT/4JecoZfUbGEz+/drnoBTGOljtNqp46I7cYosKTM1QqFiXZgSJwZPLcPZYThqckZEnCicnBMyeka7UkpGZAd4TMh14yBldGhznFCU8NzfdSYIDywquBcljDVaGxRNl6aW8A3uj8MkGJtYnWsQN4eeteYE2YCrGPj0ZdY3SMxgbcwNCqwA3Yo6qKYqVqVC2+dYA8bRAj70x6jk+4DP7Zk7gN5e1d39ZSMY3xYEeCsjpNULKtZnFOlrD8urOc0yYs7+L84sbrElZnxNf7l73E9z9UXajXbfBTExFrDn79NPjLYCduwc0TF49SgyEvrm44CnQR91GKStdpePAbSnGQQdJwkHhHXvVC1YYJTVDN4EoECQ4T0ZwKy8n8eiUthsVW+lI9iHEQ5PTf5PZqSAgJTNPXpOhRroILOtDEGKfDkNWFW3Wo1W71QcOt7ga36gOA7t0KGm59eMbzP8al3ZDVru4Age2d2KsnLcRftQMUjXo9fT67wvcqr11vdNLy2muW1w2AaVrfwSdE7mmKFeeeKOc03sB7TmupOo3gXCqLl6E8Bx7MCF1MI8j4IM3iGCfhj1mWTAu1JqtHe5z/kfK669Rrb7cRKuxNef1YocIDHydU2O8WKrbu4+ubNQMGVn51GT3ctsV852HmbZBxTxsNpEP2OWvN2R0s3xIXjsByZ+SfmOXefpcJWF/Kr12yVpN1Fp42d1Kt6KG5fdC+ejD0vCqXbfcgNrdk6X7yuS3JZ/jW2+BbLXtrKeDWoKUOdtcqirDTKAWaika1TGTVwHfsmsJvQPIbuvvegOUfsdsf1p164t2+N3z/eHHg+b0RHt4jXuzP6v6ZBhavnrMODSxgOPDcUXlVv0QB62Ig5iov71hhBzxmK8AW15AHq58T8vv2ybH62w8i0zlMKgj3f2XyhxU5cPszFVUuZZcysDjFEYIlz0f66qhDdiFoxksN5VFmm6nSHIZSi23Nlxu0jG96V7ac2viWkWusziKVtZlluGmSTzkthFukCpxgbp7NFpqrszUW8dY43sWH/UYI3sDp7VVcI9U2o/ImQh4QlUWz/BVPAfXyp1Dg5n8=</diagram></mxfile>
|
2106.05956/main_diagram/main_diagram.pdf
ADDED
|
Binary file (22.5 kB). View file
|
|
|
2106.05956/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Normalization techniques are often necessary to effectively train deep neural networks (DNNs) [1, 2, 3]. Arguably, the most popular of these is BatchNorm [1], whose success can be attributed to several beneficial properties that allow it to stabilize a DNN's training dynamics: for example, ability to propagate informative activation patterns in deeper layers [4, 5]; reduced dependence on initialization [6, 7, 8]; faster convergence via removal of outlier eigenvalues [9, 10]; auto-tuning of learning rates [11], equivalent to modern adaptive optimizers [12]; and smoothing of loss landscape [13, 14]. However, depending on the application scenario, BatchNorm's use can be of limited benefit or even a hindrance: for example, BatchNorm struggles when training with small batch-sizes [3, 15]; in settings with train-test distribution shifts, BatchNorm can undermine a model's accuracy [16, 17]; in meta-learning, it can lead to transductive inference [18]; and in adversarial training, it can hamper accuracy on both clean and adversarial examples by estimating incorrect statistics [19, 20].
|
| 4 |
+
|
| 5 |
+
To either address specific shortcomings or to replace BatchNorm in general, several recent works propose alternative normalization layers (interchangeably called normalizers in this paper). For example, Brock et al. [23] propose to match BatchNorm's forward propagation behavior in Residual
|
| 6 |
+
|
| 7 |
+
Email: {eslubana, dickrp}@umich.edu, and hidenori.tanaka@ntt-research.com
|
| 8 |
+
|
| 9 |
+
<sup>\*</sup>Work partially performed during an internship at Physics & Informatics Laboratories, NTT Research.
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Figure 1: Each normalization method has its own success and failure modes. We plot training curves (3 seeds) for different combinations of *normalizer* (see Table 1)*, network architecture,* and *batch-size* at largest stable initial learning rate on CIFAR-100. Learning rate is scaled linearly with batch-size [21]. Layers for which loss reaches infinity are not plotted. Test curves and several other settings are provided in the appendix. The plots show that all methods, including BatchNorm (BN), have their respective success and failure modes: e.g., LayerNorm (LN) [2] often converges slowly and Instance Normalization (IN) [22] can have unstable training with large depth or small batch-sizes.
|
| 14 |
+
|
| 15 |
+
networks [24] by replacing it with Scaled Weight Standardization [25, 26]. Wu and He [3] design GroupNorm, a batch-independent method that groups multiple channels in a layer to perform normalization. Liu et al. [27] use an evolutionary algorithm to search for both normalizers and activation layers. Given the right training configuration, these works show their proposed normalizers often achieve similar test accuracy to BatchNorm and even outperform it on some benchmarks. This begs the question, are we ready to replace BatchNorm? To probe this question, we plot training curves for models defined using different combinations of *normalizer, network architecture, batch size,* and *learning rate* on CIFAR-100. As shown in Figure 1, clear trends begin to emerge. For example, we see LayerNorm [2] often converges at a relatively slower speed; Weight Normalization [28] cannot be trained at all for ResNets (with and without SkipInit [6]); Instance Normalization [22] results in unstable training in deeper non-residual networks, especially with small batch-sizes. Overall, evaluating hundreds of models in different settings, we see evident success/failure modes exist for all normalization techniques, including BatchNorm.
|
| 16 |
+
|
| 17 |
+
As we noted before, prior works have established several properties to help explain such success/failure modes for the specific case of BatchNorm. However, given the pursuit of alternative normalizers in recent works, these properties need to be generalized so that one can accurately determine how normalization techniques beyond BatchNorm affect DNN training. In this work, we take a first step towards this goal by extending known properties of BatchNorm *at initialization* to several alternative normalization techniques. As we show, these properties are highly predictive of a normalizer's influence on DNN training and can help ascertain exactly when an alternative technique is capable of serving as a replacement for BatchNorm. Our contributions follow.
|
| 18 |
+
|
| 19 |
+
- Stable Forward Propagation: In Section 3, we show activations-based normalizers are provably able to prevent exploding variance of activations in ResNets, similar to BatchNorm [5, 6]. Parametric normalizers like Weight Normalization [28] do not share this property; however, we explain why architectural modifications proposed in recent works [6, 7] can resolve this limitation.
|
| 20 |
+
- Informative Forward Propagation: In Section 4, we first show the ability of a normalizer to generate dissimilar activations for different inputs is a strong predictor of optimization speed. We then extend a known result for BatchNorm to demonstrate the rank of representations in the deepest layer of a Group-normalized [3] model is at least Ω(pwidth/Group Size). This helps us illustrate how use of GroupNorm can prevent high similarity of activations for different inputs if the group size is small, i.e., the number of groups is large. This suggests Instance Normalization [22] (viz.,
|
| 21 |
+
|
| 22 |
+
GroupNorm with group size equal to 1) is most likely and LayerNorm [2] (viz., GroupNorm with group size equal to layer width) is least likely to produce informative activations.
|
| 23 |
+
|
| 24 |
+
• Stable Backward Propagation: In Section 5, we show normalization techniques that rely on individual sample and/or channel statistics (e.g., Instance Normalization [22]) suffer from an exacerbated case of gradient explosion [29], often witnessing unstable backward propagation. We show this behavior is mitigated by grouping of channels in GroupNorm, thus demonstrating a speed–stability trade-off characterized by group size.
|
| 25 |
+
|
| 26 |
+
**Related Work:** Due to its ubiquity, past work has generally focused on understanding BatchNorm [5, 4, 6, 9, 10, 7, 13, 29, 30, 31]. A few works have studied LayerNorm [32, 33], due to its relevance in natural language processing. In contrast, we try to analyze normalization methods in deep learning in a general manner. As we show, we can identify properties in BatchNorm that readily generalize to other normalizers and are often predictive of the normalizer's impact on training. Our analysis is inspired by a rich body of work focused on understanding randomly initialized DNNs [34, 35, 36, 37, 38]. Most related to us is the contemporary work by Labatie et al. [39], who analyze the impact of different normalization layers on expressivity of activations and conclude LayerNorm leads to high similarity of activations in deeper layers. As we discuss, this result is in fact a special case of our Claim 3.
|
| 27 |
+
|
| 28 |
+
# Method
|
| 29 |
+
|
| 30 |
+
We first clarify the notations and operations used by the normalizers discussed in this work. Specifically, we define operators $\mu_{\{d\}}(\mathcal{T})$ and $\sigma_{\{d\}}(\mathcal{T})$ , which calculate the mean and standard deviation of a tensor ${\mathcal T}$ along the dimensions specified by set $\{d\}$ . $\|\mathcal{T}\|$ denotes the $\ell_2$ norm of $\mathcal{T}$ . RMS $_{\{d\}}(\mathcal{T})$ denotes the root mean square of $\mathcal{T}$ along dimensions specified by set $\{d\}$ . For example, for a vector $\mathbf{v} \in \mathbb{R}^n$ , we have RMS<sub>{1}</sub> $(v) = \sqrt{\sum_i v_i^2/n}$ . We assume the outputs of these operators broadcast as per requirements. $\rho(.)$ denotes the sigmoid function. We define symbols b, c, x to denote the batch, channel, and spatial dimensions. For feature maps in a CNN, x will include both the height and the width dimensions. The notation c/g denotes division of c neurons (or channels) into groups of size g. When grouping is performed, each group is normalized independently.
|
| 31 |
+
|
| 32 |
+
**Normalization Layers:** We analyze ten normalization layers in this work. These layers were chosen to cover a broad range of ideas: e.g., activations-based layers [1, 40], parametric layers [23, 28], hand-engineered layers [3], AutoML designed layers [27], and layers [22, 2, 4] that form building blocks of recent techniques [41].
|
| 33 |
+
|
| 34 |
+
| Activations-Based Layers | |
|
| 35 |
+
|----------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|
|
| 36 |
+
| $\mu_{\{d\}} = \mu_{\{d\}}(A); \sigma_{\{d\}} = \sigma_{\{d\}}(A)$ | |
|
| 37 |
+
| BN [1] | $\frac{\mathcal{A} - \mu_{\{b,x\}}}{\sigma_{\{b,x\}}}$ |
|
| 38 |
+
| LN [2] | $\frac{A - \mu_{\{c,x\}}}{\sigma_{\{c,x\}}}$ |
|
| 39 |
+
| IN [22] | $\frac{A - \mu_{\{x\}}}{\sigma_{\{x\}}}$ |
|
| 40 |
+
| GN [3] | $\frac{\mathcal{A} - \mu_{\{c/g,x\}}}{\mathcal{A} - \mu_{\{c/g,x\}}}$ |
|
| 41 |
+
| FRN [40] | $\frac{\sigma_{\{c/g,x\}}}{\frac{\mathcal{A}}{RMS_{\{x\}}}}$ |
|
| 42 |
+
| VN [4] | $\frac{A}{\sigma_{\{b,x\}}}$ |
|
| 43 |
+
| EvoBO [27] | $\frac{\mathcal{A}}{\max\{\sigma_{\{b,x\}},v\odot\mathcal{A}+\sigma_{\{x\}}\}}$ |
|
| 44 |
+
| EvoSO [27] | $\frac{\frac{\mathcal{A}\rho(v\odot\mathcal{A})}{\sigma\{c/g,x\}}}{\sigma\{c/g,x\}}$ |
|
| 45 |
+
| Parametric Layers | |
|
| 46 |
+
| $\mu_{\{d\}} = \mu_{\{d\}}(\mathcal{W}); \sigma_{\{d\}} = \sigma_{\{d\}}(\mathcal{W})$ | |
|
| 47 |
+
| WN [28] | $g\frac{\mathcal{W}}{ \mathcal{W} }$ |
|
| 48 |
+
| SWS [23] | $g \frac{W - \mu_{\{c,h,w\}}}{\sigma_{\{c,x\}}}$ |
|
| 49 |
+
|
| 50 |
+
Table 1: Operations performed by different normalizers. A denotes layer input; W denotes incoming neuron weights to a neuron.
|
| 51 |
+
|
| 52 |
+
1. Activations-Based Layers: BatchNorm (BN) [1], Layer-Norm (LN) [2], Instance Normalization (IN) [22], GroupNorm (GN) [3], Filter Response Normalization (FRN) [40], Variance Normalization (VN) [4], EvoNormBO [27], and EvoNoRMSO [27] fall in this category. These layers function in the activation space. Note that Variance Normalization is an ablation of BatchNorm that does not use the mean-centering operation. Typically, given activations $\mathcal{A}_L$ at layer L, these layers use an operation of the form $\mathcal{A}_{\text{norm}} = \phi\left(\frac{\gamma}{\sigma_{\{d\}}(\mathcal{A}_L)}(\mathcal{A}_L - \mu_{\{d\}}(\mathcal{A}_L)) + \beta\right)$ ). Here, $\gamma$ and $\beta$ are learned affine parameters used for controlling quantities affected by the normalization operations (such as mean, standard deviation, and RMS) and $\phi$ is a non-linearity, such as ReLU. The exact operations for these layers, minus the affine parameters, are shown in Table 1.
|
| 53 |
+
|
| 54 |
+
2. Parametric Layers: Weight Normalization (WN) [28] and Scaled Weight Standardization (SWS) [23] fall in this category. Table 1 shows the exact operations. These layers function in the parameter space and act on a filter's weights (W) to generate normalized weights ( $W_{\text{norm}}$ ). The normalized weights $W_{\text{norm}}$ are used for processing the input: $A_{L+1} = \phi(W_{\text{norm}} * A_L)$ .
|
| 55 |
+
|
| 56 |
+
Stable forward propagation is a necessary condition for successful DNN training [36]. In this section, we identify and demystify the role of normalization layers in preventing the problem of *exploding* or *vanishing activations* during forward propagation. These problems can result in training instability due to exploding or vanishing gradients during backward propagation [36, 38]. Building on a previous study on BatchNorm, we first show that activations-based normalizers provably avoid exponential growth of variance in ResNets<sup>1</sup>, ensuring training stability. Thereafter, we show parametric normalizers do not share this property and ensuring stable training requires explicit remedies.
|
| 57 |
+
|
| 58 |
+
Hanin and Rolnick [38] show that for stable forward propagation in ResNets, the average variance of activations should not grow exponentially (i.e., should not explode). Interestingly, Figure 1 shows that all activations-based normalizers are able to train the standard ResNet [24] architecture stably. For BatchNorm, this behavior is provably expected. Specifically, De and Smith [6] find that to ensure variance along the batch-dimension is 1, BatchNorm rescales the $L^{\rm th}$ layer's residual path output by a factor of $\mathcal{O}\left(1/\sqrt{L}\right)$ . This causes the growth of variance in a Batch-Normalized ResNet to be linear in depth, hence avoiding exponential growth of variance in and ensuring effective training. We now show this result can be extended to other normalization techniques too.
|
| 59 |
+
|
| 60 |
+
**Claim 1.** Similar to BatchNorm [6], GroupNorm [3] avoids exponential growth of variance in ResNets, ensuring stable training.
|
| 61 |
+
|
| 62 |
+
Proof. We follow the same setup as De and Smith [6]. Assume the $L^{\text{th}}$ residual path $(f_L)$ is processed by a normalization layer $\mathcal{N}$ , after which it combines with the skip connection to generate the next output: $\mathbf{y}_L = \mathbf{y}_{L-1} + \mathcal{N}(f_L(\mathbf{y}_{L-1}))$ . The covariance of layer input and Residual path's output is assumed to be zero. Hence, the output's variance is: $\text{Var}(\mathbf{y}_L) = \text{Var}(\mathbf{y}_{L-1}) + \text{Var}(\mathcal{N}(f_L(\mathbf{y}_{L-1})))$ . Now, assume GroupNorm with group size G is used for normalizing the D-dimensional activation signal, i.e., $\mathcal{N} = \text{GN}(.)$ . This implies for the $g^{\text{th}}$ group, $\sigma_{g,x}(GN(f_L(\mathbf{y}_{L-1}))) = 1$ . Then, for a batch of size N, denoting the $i^{\text{th}}$ sample activations as $\mathbf{y}_L^{(i)}$ , and using $(\mathbf{y}_L^{(i)})^j$ to index the activations, we note the residual output's variance averaged along the spatial dimension is: $\langle \text{Var}(\mathcal{N}(f_L(\mathbf{y}_{L-1}))) \rangle = \frac{1}{D} \sum_{j=1}^{D} (\frac{1}{N} \sum_{i=1}^{N} (\text{GN}(f_L(\mathbf{y}_{L-1}))^j)^2) = \frac{1}{N} \sum_{i=1}^{N} (\frac{1}{D} \sum_{j=1}^{D} (\text{GN}(f_L(\mathbf{y}_{L-1})^{(i)})^j)^2) = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{D} (\frac{1}{D} \sum_{j=1}^{D} (\text{GN}(f_L(\mathbf{y}_{L-1})^{(i)}))^j)^2) = 1$ . Overall, this implies $\langle \text{Var}(\mathbf{y}_L) \rangle = \langle \text{Var}(\mathbf{y}_{L-1}) \rangle + 1$ . Recursively applying this relationship for a bounded variance input, we see average variance at the $L^{\text{th}}$ layer is in $\mathcal{O}(L)$ . Thus, similar to BatchNorm, use of GroupNorm will enable stable forward propagation in ResNets by ensuring signal variance grows linearly with depth.
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
|
| 66 |
+
Figure 2: Activations-based normalizers ensure linear and stable forward propagation, verifying Claim 1. Activation Variance (Activ. Var.) as a function of layer number in a ResNet-56 [24] processing CIFAR-100 samples.
|
| 67 |
+
|
| 68 |
+
To understand the relevance of the above result, note that for G=1, GroupNorm is equal to Instance Normalization [22] and for G=D, GroupNorm is equal to LayerNorm [2]. Further, since the mean of the signal is assumed to be zero, the average variance along the spatial dimension is equal to the RMS $_x$ operation used by Filter Response Normalization [40]. Thus, by proving the above result for GroupNorm, we are able to show alternative activations-based normalizers listed in Table 1 also avoid the exponential growth of activation variance in ResNets.
|
| 69 |
+
|
| 70 |
+
We show empirical demonstrations of Claim 1 in Figure 2, where the average activation variance is plotted for a ResNet-56. As can be seen, for all activations-based normalizers, the growth of variance is linear in the number of layers. At the end of a Residual module, which spatially downsamples the signal, the variance plummets. However,
|
| 71 |
+
|
| 72 |
+
the remaining layers follow a pattern of linear growth, as expected by our result. We note our
|
| 73 |
+
|
| 74 |
+
<sup>&</sup>lt;sup>1</sup>The case of *non-residual* networks is discussed in appendix. In brief, most normalizers help avoid exploding/vanishing activations by enforcing unit activation variance in the batch, channel, or spatial dimensions.
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
|
| 78 |
+
Figure 3: Parametric normalizers witness exponentially growing variance, verifying Claim 2, but we can stabilize it by modifying the residual-path. We plot log activation variance as a function of layer number in a randomly initialized ResNet-56 [24], using CIFAR-100 samples, with Scaled Weight Standardization (SWS) [23] and Weight Normalization (WN) [28] for different architectures (simplified illustrations provided on top). (a) *Standard ResNet*: Both SWS and WN witness variance explosion in a standard ResNet model, as claimed in Claim 2. (b) *SkipInit*: SkipInit [6] multiplies the residual signal with a scalar $\alpha$ initialized as zero, thus preventing variance explosion in an SWS model at initialization. Meanwhile, by scaling the non-linearity after addition, a WN model continues to witness exploding variance. (c) *Non-Linearity on Residual Path*: Shifting the non-linearity to the residual path prevents variance explosion in both WN and SWS models.
|
| 79 |
+
|
| 80 |
+
theory does not apply to EvoNorms, which are designed via AutoML. However, empirically, we see EvoNorms also avoid exponential growth of variance in ResNets. *Thus, our analysis shows, all activations-based normalizers in Table 1 share the beneficial property of stabilizing forward propagation in ResNets, similar to BatchNorm.*
|
| 81 |
+
|
| 82 |
+
By default, parametric normalizers such as Weight Normalization [28] and Scaled Weight Standardization [23] do not preserve the variance of a signal during forward propagation, often witnessing vanishing activations. To address this limitation, properly designed output scale and bias corrections are needed. Specifically, for Weight Normalization and ReLU non-linearity, Arpit et al. [42] show the output should be modified as follows: $\mathcal{A}_{L+1} = \sqrt{\frac{2\pi}{\pi}-1}(\phi(\mathcal{W}_{norm}*\mathcal{A}_L)-\sqrt{\frac{1}{2\pi}})$ . For Scaled Weight Standardization, only output scaling is needed [23]: $\mathcal{A}_{L+1} = \phi(\sqrt{\frac{2\pi}{\pi}-1}\mathcal{W}_{norm}*\mathcal{A}_L)$ .
|
| 83 |
+
|
| 84 |
+
In Figure 1, ResNet training curves for Weight Normalization [28] and Scaled Weight Standardization [23] were not reported as the loss diverges to infinity. As we explain in the following, this is a result of using correction factors designed to enable variance preservation in non-residual networks.
|
| 85 |
+
|
| 86 |
+
**Claim 2.** *Unlike BatchNorm* [6], Weight Normalization [28] and Scaled Weight Standardization [23] witness unstable training due to exponential growth of variance in standard ResNets [24].
|
| 87 |
+
|
| 88 |
+
*Proof.* Using the correction factors above, both Weight Normalization and Scaled Weight Standardization will ensure signal variance is preserved on the residual path: $\operatorname{Var}(\mathcal{N}(f(\mathbf{y}_{L-1}))) = \operatorname{Var}(\mathbf{y}_{L-1})$ . Thus, using these methods, the output variance at layer L becomes: $\operatorname{Var}(\mathbf{y}_L) = \operatorname{Var}(\mathbf{y}_{L-1}) + \operatorname{Var}(\mathcal{N}(f(\mathbf{y}_{L-1}))) = 2\operatorname{Var}(\mathbf{y}_{L-1})$ . Recursively applying this relationship for a bounded variance input, we see signal variance at the $L^{\text{th}}$ layer is in $\mathcal{O}(2^L)$ . Thus, Weight Normalization and Scaled Weight Standardization witness exponential growth in variance.
|
| 89 |
+
|
| 90 |
+
More generally, the above result shows if the residual path is variance preserving, ResNets will witness exploding variance with growing depth. Prior works [43, 5, 8, 6, 7, 44] have noted this result in the context of designing effective ResNet initializations. Here, we extended this result to show why Weight Normalized and Scaled Weight Standardized ResNets undergo unstable forward propagation. Empirical demonstration is provided in Figure 3a.
|
| 91 |
+
|
| 92 |
+
In their work introducing Scaled Weight Standardization [23], Brock et al. are able to circumvent exponential growth in variance by using SkipInit [6]. Specifically, inspired by the fact that BatchNorm biases Residual paths to identity functions, De and Smith [6] propose SkipInit, which multiplies the output of the residual path by a learned scalar $\alpha$ that is initialized to zero. This suppresses the Residual path's contribution, hence avoiding exponential growth in variance (see Figure 3b). Interestingly, even
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
|
| 96 |
+
Figure 4: **Modified residual-path allows for successful training with parametric layers.** We plot train/test accuracy (over 3 seeds) for ResNet-56 architecture on CIFAR-100 with non-linearity located on the residual path. We see parametric normalizers can train effectively if scaled non-linearities are not located after the addition operation in a ResNet.
|
| 97 |
+
|
| 98 |
+
after using SkipInit, we find Weight Normalized ResNets witness variance explosion (see Figure 3b). To explain this behavior, we note that in the standard ResNet architecture, the non-linearity is located after the addition operation of skip connection and the residual path's signals (see Figure 3a). Thus, even if SkipInit is used to suppress the residual path, the non-linearity will still be applied to the skip connections. Since the scale correction for Weight Normalization ( $\sqrt{2\pi/\pi-1}$ ) is greater than 1, this implies the signal output is amplified at every layer to preserve signal variance; however, since convolutions are absent on the skip path, signal variance never decays. Consequently, *variance is only amplified*, causing the variance to increase exponentially in the number of layers (see Figure 3b).
|
| 99 |
+
|
| 100 |
+
Training ResNets with Weight Normalization: The above discussion shows that for Weight Normalization, since the output has to be scaled-up to preserve signal variance, standard ResNets [24] witness exploding activations. This also hints at a solution: place the non-linearity on the Residual path. This modification (see Figure 3c) in fact results in one of the architectures proposed by He et al. in their original work on ResNets [45]. We verify the effectiveness of this modification in Figure 3c. As can be seen, the signal variance in a Weight Normalized ResNet stays essentially constant for this architecture. Furthermore, we show in Figure 4 that these models are able to match BatchNorm in performance for several training configurations. In general, our discussion here explains the exact reasons why architectures with non-linearity on residual path are better suited for parametric normalizers. Finally, we note that another ResNet architecture which boasts non-linearity on residual paths is pre-activation ResNets [45]. In their experimental setup for designing Scaled Weight Standardization [23], Brock et al. specifically focused on pre-activation ResNets [45]. This is another reason why the problem of exploding activations does not surface in their work.
|
| 101 |
+
|
| 102 |
+
Proper magnitude of activations is a necessary, but not sufficient, condition for successful training. Here, we study another failure mode for forward propagation, *rank collapse*, where activations for different input samples become indistinguishably similar in deeper layers. This can significantly slow training as the gradient updates no longer reflect information about the input data [4]. To understand this problem's relevance, we first show why the ability to generate dissimilar activations is useful in the context of normalization methods for deep learning. Specifically, given a randomly initialized network that uses a specific normalizer, we relate its average cosine similarity of activations at the penultimate
|
| 103 |
+
|
| 104 |
+
layer (i.e., layer before the linear classifier) with its mean training accuracy (= $\frac{\sum_{i=1}^{\text{# of epochs}} \text{Train Acc.[i]}}{\text{# of epochs}}$ , a measure of optimization speed [46]). Results for three different architectures (Non-Residual CNN with 10 layers and 20 layers as well as ResNet-56 without SkipInit) are shown in Figure 5. As can be seen, the correlation between mean training accuracy and the average cosine similarity of activations is high. In fact, for any given network architecture, one can predict which normalizer will enable the fastest convergence without even training the model. This shows normalizers which result in more dissimilar representations at initialization are likely to be more useful for training DNNs.
|
| 105 |
+
|
| 106 |
+
We now note another interesting pattern in Figure 5: LayerNorm results in highest similarity of activations for any given architecture. To explain this, we again revisit known properties of Batch-Norm. As shown by Daneshmand et al. [4, 47], BatchNorm provably ensures activations generated by a randomly initialized network have high rank, i.e., different samples have sufficiently different activations. To derive this result, the authors consider activations for N samples at the penultimate layer, $Y \in \mathbb{R}^{\text{width} \times N}$ , and define the covariance matrix $YY^T$ , whose rank is equal to that of the
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
|
| 112 |
+
- (a) Stable rank vs. group size.
|
| 113 |
+
- (b) Layer-wise Cosine Similarity.
|
| 114 |
+
|
| 115 |
+
Figure 6: The smaller the group size, the higher the rank of the activations, verifying Claim 3 (a) We plot stable rank of activations at the penultimate layer for random Gaussian inputs. As proposed in Claim 3, we find a perfect linear fit between stable rank and values of $\sqrt{\text{Width/Group Size}}$ for different group sizes. (b) Implications of Claim 3 on CIFAR-100 sampels: by increasing group size (constant across layers), we see similarity of features at any given layer increases. This shows LayerNorm [2] cannot generate informative features, thus witnessing slow convergence (see Figure 5).
|
| 116 |
+
|
| 117 |
+
similarity matrix $Y^TY$ . The authors then show that in a zero-mean, randomly initialized network with BatchNorm layers, the covariance matrix will have a rank at least as large as $\Omega(\sqrt{\text{width}})$ . That is, there are at least $\Omega(\sqrt{\text{width}})$ distinct directions that form the basis of the similarity matrix, hence indicating the model is capable of extracting informative activations. In the following, we propose a claim that extends this result to activations-based normalizers beyond BatchNorm.
|
| 118 |
+
|
| 119 |
+
Claim 3. For a zero-mean, randomly initialized network with GroupNorm [3] layers, the penultimate layer activations have a rank of at least $\Omega(\sqrt{\text{width/Group Size}})$ , where width denotes the layer-width (e.g., number of channels in a CNN).
|
| 120 |
+
|
| 121 |
+
The intuition behind the above claim is based on the proof by Daneshmand et al. [4]. In their work, the authors extend a prior result from random matrix theory which suggests multiplication of several zero-mean, randomly initialized gaussian matrices will result in a rank-one matrix [10]. The use of BatchNorm ensures that on multiplication with a randomly initialized weight matrix, the values of on-diagonal elements of the covariance matrix $YY^T$ are preserved, while the offdiagonal elements are suppressed. This leads to a lower bound of the order of $\Omega(\sqrt{\text{width}})$ on the stable rank [48] of the covariance matrix. Now, if one directly considers the similarity matrix $Y^TY$ and uses GroupNorm instead of BatchNorm, then a similar preservation and suppression of on- and offdiagonal matrix blocks should occur. Here, the block size will be equal to the Group size used for GroupNorm. This indicates the lower bound is in $\Omega(\sqrt{\text{width/Group Size}})$ .
|
| 122 |
+
|
| 123 |
+
We provide demonstration of this claim in Figure 6a. We use a similar setup as Daneshmand et al. [4], randomly initializing a CNN with constant layer-width (64) and 30 layers. A GroupNorm layer is placed before every ReLU layer and the group size is sweeped from 1 to 64. As seen in Figure 6a, we find a perfect linear fit between the stable rank and the value of $\sqrt{^{\text{width}}/_{\text{Group Size}}}$ , validating our claim empirically as well.
|
| 124 |
+
|
| 125 |
+
To understand the significance of Claim 3, note that the result shows if the group size is large, then use of GroupNorm cannot prevent collapse of representations (i.e., cannot result
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
|
| 129 |
+
Figure 5: Informative forward propagation results in faster optimization. We plot mean training accuracy (= $\frac{\sum_{i=1}^{\# \text{of epochs}} \text{Train Acc.[i]}}{\# \text{of epochs}}$ ) on CIFAR-100 vs. average cosine similarity at initialization. As shown, normalizers which induce dissimilar activations converge faster. Instance Normalization was removed due to training instability (see Section 5).
|
| 130 |
+
|
| 131 |
+
in informative activations). To demonstrate this effect, we calculate the mean cosine similarity of activations between different samples of a randomly initialized network that uses GroupNorm. We sweep the group size from layer-width to 1, thus covering the spectrum from LayerNorm (Group Size
|
| 132 |
+
|
| 133 |
+
= layer-width) to Instance Normalization (Group Size = 1). We analyze both a non-residual CNN with 20 layers and a ResNet-56. Results are shown in Figure 6b and confirm our claim that by grouping the entire layer for normalization, LayerNorm results in highly similar activations. *This explains the slow convergence behavior of LayerNorm in Figure 5*. Meanwhile, if we reduce the group size, similarity of representations decreases as well, indicating generation of informative activations. This shows use of GroupNorm with group size greater than layer-width can help prevent a collapse of features onto a single representation. Importantly, this result helps explain why GroupNorm can serve as a successful replacement for BatchNorm in similarity based self-supervised learning frameworks [49], which often witness representation collapse issues [50]. Similar to BatchNorm, GroupNorm helps discriminate between representations of different inputs, helping avoid a collapse of representations.
|
| 134 |
+
|
| 135 |
+
Taking the results of Section 4 to the extreme should imply Instance Normalization (i.e., Group Size = 1) is the best configuration for GroupNorm, but as we noted in Figure 1, Instance Normalization witnesses unstable training. To explain this, we describe a "speed-stability" trade-off in GroupNorm in the next section by extending the property of gradient explosion in BatchNorm to alternative normalization layers. Specifically, Yang et al. [29] recently show that gradient norm in earlier layers of a randomly-initialized BatchNorm network increases exponentially with increasing model depth (see Figure 8). This shows the maximum depth of a model trainable with BatchNorm is finite. The theory leading to this result is quite involved, but a much simpler analysis can not only explain this phenomenon accurately, but also illustrate the existence of gradient explosion in alternative layers.
|
| 136 |
+
|
| 137 |
+
**Gradient explosion in BatchNorm:** Following Luther [51], we analyze the origin of gradient explosion based on the expression of gradient backpropagated through a BatchNorm layer. We calculate the gradient of loss w.r.t. activations at layer L, denoted as $\mathbf{Y}_L \in \mathbb{R}^{d_L \times N}$ . We define two sets of intermediate variables: (i) pre-activations, generated by weight multiplication, $X_L = W_L Y_{L-1}$ and (ii) normalized pre-activations, generated by BatchNorm, $\hat{X}_L = \mathrm{BN}(X_L) = \frac{\gamma}{\sigma_{\{N\}}(X_L)}(X_L - \mu_{\{N\}}(X_L)) + \beta$ . Under these notations, the gradient backpropagated from layer L to layer L-1 is (see appendix for derivation): $\nabla_{\mathbf{Y}_{L-1}}(J) = \frac{\gamma}{\sigma_{\{N\}}(X_L)} \mathcal{W}_L^T \mathcal{P}\left[\nabla_{\hat{\mathbf{X}}_L}(J)\right]$ . Here $\mathcal{P}$ is a composition of two projection operators: $\mathcal{P}[\mathbf{Z}] = \mathcal{P}_{\mathbf{1}_N}^\perp[\mathcal{P}_{\mathrm{Ob}(\hat{\mathbf{X}}_L/\sqrt{N})}^\perp[\mathbf{Z}]]$ . The operator $\mathcal{P}_{\mathrm{Ob}(\hat{\mathbf{X}}_L/\sqrt{N})}^\perp[\mathbf{Z}] = \mathbf{Z} - \frac{1}{N}\mathrm{diag}(\mathbf{Z}\hat{\mathbf{X}}_L^T)\hat{\mathbf{X}}_L$ subtracts its input's component that is inline with the BatchNorm outputs via projection onto the Oblique manifold $\mathrm{diag}(\frac{1}{N}\hat{\mathbf{X}}_L\hat{\mathbf{X}}_L^T) = \mathrm{diag}(\mathbf{1})$ . Similarly, $\mathcal{P}_{\mathbf{1}}^\perp[\mathbf{Z}] = \mathbf{Z}(I - \frac{1}{N}\mathbf{1}_N\mathbf{1}_N^T)$ mean-centers its input along the batch dimension via projection onto $\mathbf{1}_N \in \mathbb{R}^N$ .
|
| 138 |
+
|
| 139 |
+
Notice that at initialization, the gradient is unlikely to have a large component along specific directions such as the all-ones vector (1) or the oblique manifold defined by $\hat{\mathbf{X}}_L$ . Thus, the gradient norm will remain essentially unchanged when propagating through the projection operation ( $\mathcal{P}$ ). However, the next operation, multiplication with $\frac{\gamma}{\sigma_{\{N\}}(X_L)}$ (= $\frac{1}{\sigma_{\{N\}}(X_L)}$ at initialization) will re-scale the gradient norm according to the standard deviation of pre-activations along the batch dimension. As shown by Luther [51], for a standard, zero-mean Gaussian initialization, the pre-activations have a standard deviation equal to $\sqrt{\pi-1/\pi} < 1$ . Thus, at initialization, the division by standard deviation operation amplifies the gradient during backward propagation. For each BatchNorm layer in the model, such an amplification of the gradient will take place, hence resulting in an exponential increase in the gradient page of careful supposed in the gradient page.
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+
Figure 7: **Gradient norm vs. pre-activation statistics.** We see high correlation between gradient norm and inverse product of layer-wise pre-activation std. deviations.
|
| 144 |
+
|
| 145 |
+
in the gradient norm at earlier layers. Overall, our analysis exposes an interesting tradeoff in Batch-Norm: Divison by standard deviation during forward propagation, which is important for generating dissimilar activations [4], results in gradient explosion during backward propagation, critically limiting the maximum trainable model depth! Empirically, the above analysis is quite accurate near initialization. For example, in Figure 7, we show that the correlation between the norm of the gradient at a layer ( $\|\nabla_{\mathbf{Y}_L}(J)\|$ ) and the inverse product of standard deviation of the pre-activations of layers ahead of it ( $\Pi_{l=10}^{l=1})^{l}/\sigma_{\{N\}}(\mathbf{X}_L)$ ) remains very high (0.6–0.9) over the first few hundred iterations in a 10-layer CNN trained on CIFAR-100.
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+
Figure 8: **Small group size increases gradient explosion, verifying Claim 4.** We use CIFAR-100 samples and plot layer-wise gradient norm for different models and batch sizes. As shown, Instance Normalization [22] undergoes highest gradient explosion, followed by BatchNorm [1], GroupNorm [3], and LayerNorm [2] in all settings.
|
| 150 |
+
|
| 151 |
+
**Gradient Explosion in Other Normalizers:** We now extend the phenomenon of gradient explosion to other normalizers. The primary idea is that since all activation-based normalizers have a gradient expression similar to BatchNorm (i.e., projection followed by division by standard deviation), they all re-scale the gradient norm during backprop. However, the statistic used for normalization varies across normalizers, resulting in different severity of gradient explosion.
|
| 152 |
+
|
| 153 |
+
**Claim 4.** For a given set of pre-activations, the backpropagated gradient undergoes higher average amplification through an Instance Normalization layer [22] than through a BatchNorm layer [1]. Further, GroupNorm [3] witnesses lesser gradient explosion than both these layers.
|
| 154 |
+
|
| 155 |
+
Proof. The gradient backpropagated through the $g^{\text{th}}$ group in a GroupNorm layer with group-size G is expressed as: $\nabla_{\mathbf{Y}_{L-1}^g}(J) = \frac{\gamma}{\sigma_{\{g\}}(X_L^g)} \mathcal{W}_L^T \mathcal{P}\left[\nabla_{\hat{\mathbf{X}}_L^g}(J)\right]$ (see appendix for derivation). Here, $\mathcal{P}$ is defined as: $\mathcal{P}[\mathbf{Z}] = \mathcal{P}_1^{\perp}[\mathcal{P}_{\mathbb{S}(\hat{\mathbf{X}}_L/\sqrt{G})}^{\perp}[\mathbf{Z}]]$ , where $\mathcal{P}_{\mathbb{S}(\hat{\mathbf{X}}_L/\sqrt{G})}^{\perp}[\mathbf{Z}] = (I - \frac{1}{G}\hat{\mathbf{X}}_L^g\hat{\mathbf{X}}_L^g^T)\mathbf{Z}$ . That is, the component of gradient inline with the normalized pre-activations will be removed via projection onto the spherical manifold defined by $||\hat{\mathbf{X}}_L^g|| = \sqrt{G}$ . As can be seen, the gradient expressions for GroupNorm and BatchNorm are very similar. Hence, the discussion for gradient explosion in BatchNorm directly applies to GroupNorm as well. This implies, when Instance Normalization is used in a CNN, the gradient norm for a given channel c and the $i^{\text{th}}$ sample is amplified by the factor $\frac{1}{\sigma_{\{x\}}(\mathbf{X}_{L,i}^c)}$ (inverse of spatial standard deviation). Then, over N samples, using the arithmetic-mean $\geq$ harmonic-mean inequality, we see the average gradient amplification in Instance Normalization is greater than gradient amplification in BatchNorm: $\frac{1}{N}\sum_i \frac{1}{\sigma_{\{x\}}^2(\mathbf{X}_{L,i}^c)} \geq \frac{N}{\sum_i \sigma_{\{x\}}^2(\mathbf{X}_{L,i}^c)} = \frac{1}{\sigma_{\{N\}}^2(\mathbf{X}_L)}$ . Similarly applying arithmetic-mean $\geq$ harmonic-mean for a given sample and the $g^{\text{th}}$ group, we see average gradient amplification in Instance Normalization is greater than gradient amplification in Instance Normalization is greater than gradient amplification in GroupNorm: $\frac{1}{G}\sum_c \frac{1}{\sigma_{\{x\}}^2(\mathbf{X}_L^g,c)} \geq \frac{G}{\sum_c \sigma_{\{x\}}^2(\mathbf{X}_L^g,c)} = \frac{1}{\sigma_{\{g\}}^2(\mathbf{X}_L)}$ . Extending this last inequality by averaging over N samples, we see average gradient amplification in GroupNorm is lower than that in BatchNorm. This implies grouping of neurons in GroupNorm helps reduce gradient explosion.
|
| 156 |
+
|
| 157 |
+
We show empirical verification of Claim 4 in Figure 8. As can be seen, the gradient norm in earlier layers follows the order Instance Normaliation $\geq$ BatchNorm $\geq$ GroupNorm $\geq$ LayerNorm, as proved in Claim 4. Further, since increasing depth implies more normalization operations, we see gradient explosion increases as depth increases. Similarly, since reducing batch-size increases gradient noise, we find gradient explosion increases with decrease in batch-size as well.
|
| 158 |
+
|
| 159 |
+
Speed-stability trade-off in GroupNorm: Combined with Section 4, our discussion in this section helps identify a speed-stability trade-off in GroupNorm. Specifically, we find that while GroupNorm with group size equal to 1 (viz., Instance Normalization) results in more diverse features (see Claim 3), it is also more susceptible to gradient explosion and hence sees training instability for small batch-sizes/large model depth (see Figure 1). Meanwhile, when group size is equal to layerwidth (viz., LayerNorm), gradient explosion can be avoided, but the model is unable to generate informative activations and thus witnesses slower optimization. Combining these
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
|
| 163 |
+
Figure 9: **Speed–Stability trade-off in GroupNorm** using 20-layer CNNs with batch-size 256 on CIFAR-100. We see increasing group size decreases gradient explosion (improved training stability) at the expense of high activation similarity (reduced optimization speed).
|
| 164 |
+
|
| 165 |
+
results demonstrates that the group size in GroupNorm ensues a trade-off between high similarity of activations (influences training speed) and gradient explosion (influences training stability). To illustrate this trade-off, we can estimate training instability by fitting an exponential curve to layerwise gradient norms (measures degree of gradient explosion) and estimate training speed by calculating cosine similarity of activations at the penultimate layer at initialization (highly correlated with training speed; see Figure 6). Results are shown in Figure 9. We see increasing group size clearly trades-off the two properties related to training speed and stability, with a moderately large group size resulting in best performance. In fact, we see test accuracy is highest exactly at this point of intersection in the trade-off. This explains the success of channel grouping in GroupNorm and other successful batch-independent normalization layers like EvoNormSO [27]. Interestingly, these results also help explain why in comparison to BatchNorm, which suffers from gradient explosion and exacerbates the problem of high gradient variance in non-IID Federated learning setups [52, 53], use of GroupNorm with a properly tuned group-size helps achieve better performance [52, 54].
|
2110.06553/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-11T11:15:47.093Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36" version="15.5.0" etag="XH4lY9eDijAsrNJD783b"><diagram id="lhzgWDLXVg8OaXvbXWHB">7VlLc6M4EP41HIfiYV7H2HFmDjtbqfVW7exRARmrIhAr5NjeX78tkMwzYyeBGW/V+GDULakF/fWnboThrrLjZ46K3VeWYGo4VnI03HvDcWzbcuEiNSelCaNakXKSKFWj2JB/sVJaSrsnCS47AwVjVJCiq4xZnuNYdHSIc3boDtsy2l21QKla0WoUmxhRPBj2F0nErtaGTtDov2CS7vTKtq+eL0N6sDJc7lDCDi2VuzbcFWdM1K3suMJUOk/7pZ738Erv+cY4zsU1E5x6wguie/VshuNTmLp8gkYqG1qxZWAS7liclBv8f/ZMd3wqK5DuYIC9KI5Np7byyHHBWYzLkuSpNgn3VVvtrgTq1upOZ1HnsCMCbwoUS/kA8QWDdiKjINnn8S+YC3x81Sf22dMQophlWPATDNETfAWOis5Ay4cGa1uH4q6F80LpkAqv9Gy6QQAaCoRxQNwfBciG4u2nOyFMGLGkLH7+CCic7fMEy0ewoPsCRFtC6YpRxqu5boJwuI1BXwrOnnGrx49D/LSdBlTXugJUZyZQFyOg9jyI8+RO7kwgxRQBS+Ku0/CRiG+t9t/S16anpPujcn0lnJRQL4KTtLttlWzPY9zh/xV+bPnJG3GT1nFMkSAv3RXHfKdWeGSkimIFU+R1UAqjnvfre1eT2vtaz84A7rBnSCCeYjEwVCF5fuqrwPVeZawmqM5fmoL2kI0tkr+X0HBFmaRa/lTKy3r9GcZuSJojOk7iV/j+EW5PwVTfdO2LXI1cUyfVqdnq3yxbvRtjq+1dYNm1dB2k3PnoGlyk6/vo9/W3x1tllO8FP6+gCd9c0GxR3PX/nySDat+xfscH+P+DZShv+//dFRDbigwdf1jdMwGSQfQTkYzejOSv0vQaUD3rClDnKk11sNxItruZ1LZwJkpti0s5crrUZtu/sByDwHeCabAcJNIZsRw7mXkrljks900jJoUWmlJs4KykC3jWD9fJ6jcDceDPBXH/DXRCiGc463GcsYRqmuZHUiikNtGNLEThtVKGHaCPISUuZQIkMaJ3qiMjSSKnLzmGm0NPlSkZW4V0XOVKb2l499LWXjD1imwPUm3OctzLy0o1QeIdbM3BMPG6IzHrTJF3pzwTMnu7dbj47n4thUfMoaquwPtf7OGLsJ9GbdMPrfOvZ/Baug9qr9CZje5jB0U9xMsdKmQz3nN6WnIUP8sN91LV2pS4VQ1LSfFFt4Hq6vOJ9Gy/vo2ih4coqiwIAIflKhgSwnGsZPCkhHeWM1h3hG+zEW7sWGeKt5XBng3Mtoy1Y0QrI/L1K0z5kSO4m9yA+7E1xYbsdI9/A2sYINFIfLhvjw8Qm69sNZubb5Xu+j8=</diagram></mxfile>
|
2110.06553/main_diagram/main_diagram.pdf
ADDED
|
Binary file (20.3 kB). View file
|
|
|
2110.06553/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
EEG-based emotion recognition is to classify the emotion states according to the EEG signal. As illustrated in Fig. [1](#fig: overview){reference-type="ref" reference="fig: overview"} the overview of the proposed EEG emotion transformers consists of three sub-modules which are feature preparing and re-organization, the innovative self-attention block and classification loss. The four gray blue blocks in Fig. [1](#fig: overview){reference-type="ref" reference="fig: overview"} are our contributions in this paper.
|
| 4 |
+
|
| 5 |
+
<figure id="fig: overview" data-latex-placement="ht">
|
| 6 |
+
<embed src="overview.drawio.drawio.pdf" />
|
| 7 |
+
<figcaption>Overview of EEG Emotion Recognition Transformer (EeT) </figcaption>
|
| 8 |
+
</figure>
|
| 9 |
+
|
| 10 |
+
The EEG emotion classification problem is to learn a mapping function $F$ that maps the EEG input to the corresponding emotion labels:
|
| 11 |
+
|
| 12 |
+
$$\begin{equation}
|
| 13 |
+
Y=F(X^{'})
|
| 14 |
+
\label{DKL}
|
| 15 |
+
\end{equation}$$ where $X^{'}$ denotes the representation of EEG signals. $F$ denotes the mapping function i.e., neural network transformations. $Y \in \{y_1, y_2, ... , y_n\}$ denotes the emotion classification labels. In this paper, the classification cross entropy is adopted as loss function, which is defined as the formula: $$\begin{equation}
|
| 16 |
+
L=-\sum_{c=1}^{C}y_{c}log(y^{'}_{c})
|
| 17 |
+
\label{DKL}
|
| 18 |
+
\end{equation}$$ where $L$ denotes the loss function of the task of EEG emotion recognition, $C$ denotes number of emotion classes, $y_{c}$ is the ground truth emotion label and $y^{'}_{c}$ is the predictors of neural networks.
|
| 19 |
+
|
| 20 |
+
Since the EEG signal collected by a spherical EEG cap, we use a feature representation organization method to organize the signals into 4D matrices to keep as much spatial structure information as possible. We define $X=(E_{1},E_{2},...,E_{T})\in R^{C\times T}$ as an EEG sample collected in $T$ time stamps, where $C$ is the number of electrodes. $E_{t}$ denotes the EEG signal of $C$ electrodes collected at time stamp $t$. Here, we use the DE features denoted as $F_{t}=(D_{1},D_{2},...,D_{S})\in R^{C\times S}$ from $E_{t}$ as described in \[32\]. We set ${\delta[1-3Hz],\theta[4-7Hz],\alpha[8-13Hz], \beta[14-30Hz],\gamma[31-50Hz]}$ as the spectral band set $S$. To explore the interactions among spatial and temporal dimensions, we re-organize $F_{t}$ of the sample $X$ into 4D EEG representation. Specifically, the $s$th band feature $D_{s}$ from $C$ channels is transformed into a 2D map $D_{s}^{'}\in R^{V\times H}$. In other words, we reshape the 1D tensor $D_s \in R_{C}$ into 2D tensor $D_{s}^{'}\in R^{V\times H}$ $(C \leqslant (V \times H))$. When we do the same operation in each band, $F_{t}$ will be transformed into a 3D map $F_{t}^{'}=(D_{1}^{'},D_{2}^{'},...D_{S}^{'})\in R^{S\times V\times H}$. Finally we stack all the transformed 3D feature map along the temporal dimension to get the 4D EEG representation $X^{'}=(F_{1}^{'},F_{2}^{'},...,F_{T}^{'}) \in R^{T \times S\times V\times H}$.
|
| 21 |
+
|
| 22 |
+
We divided the EEG feature of each second $D_i (i=1,2,3..S)$ into $G$ non-overlapping regions, just like the different brain regions in neuroscience. Here we regroup the $C$ EEG electrodes in the $V \times H$ matrices into region sequences, the size of each divided region is $P \times P$, so we get $G=VH/P^{2}$ regions. Each region is flatten into a vector $I(x)_{(p,t)}\in \mathbb{R}^{5P^{2}}$ with $p=1,2...,G$ representing spatial layout of EEG electrodes and $t=1,2,...T$ denoting the index over seconds. Then we linearly map each region $I(x)_{(p,t)}$ into a latent vector $z_{(p,t)}^{(0)}\in \mathbb{R}^{D}$ by means of learnable matrix $M\in\mathbb{R}^{D\times 5P^{2}}$: $$\begin{equation}
|
| 23 |
+
z_{(p,t)}^{(0)}={MI(x)}_{(p,t)+e_{(p,t)}^{pos}}
|
| 24 |
+
\label{DKL}
|
| 25 |
+
\end{equation}$$
|
| 26 |
+
|
| 27 |
+
where $e_{(p,t)}^{pos}\in\mathbb{R}^{D}$ stands for a positional embedding added to encode the spatiotemporal position of each region. The resulting sequence of embedding vectors $z_{(p,t)}^{(0)}$ stands for the input to the next layer of the self-attention block. Note that $z^i$ is output of the $ith$ layer in self-attention block. $p=1,...G$ and $t=1,...,T$ are the spatial locations and indexes over time slices respectively.
|
| 28 |
+
|
| 29 |
+
Our Transformer consists of $L$ encoding blocks. At each block $l$, a query/key/value vector is computed for each region from the representation $z_{(p,t)}^{(l-1)}$ encoded by the preceding block: $$\begin{equation}
|
| 30 |
+
q_{(p,t)}^{(l,a)}=W_{Q}^{(l,a)}z_{(p,t)}^{(l-1)}\in\mathbb{R}^{D_{h}}
|
| 31 |
+
\end{equation}$$ $$\begin{equation}
|
| 32 |
+
k_{(p,t)}^{(l,a)}=W_{K}^{(l,a)}z_{(p,t)}^{(l-1)}\in\mathbb{R}^{D_{h}}
|
| 33 |
+
\end{equation}$$ $$\begin{equation}
|
| 34 |
+
v_{(p,t)}^{(l,a)}=W_{V}^{(l,a)}z_{(p,t)}^{(l-1)}\in\mathbb{R}^{D_{h}}
|
| 35 |
+
\end{equation}$$ $z_{(p,t)}^{(l-1)}$ need to do layer normalization before above operations. $a=1,2...,A$ is an index over multiple attention heads and $A$ denotes the total number of attention heads. The latent dimensionality for each attention head is set to $D_{h}=D/A$.
|
| 36 |
+
|
| 37 |
+
The variants of self-attention block include spatial attention (S), time attention (T), time attention after spatial attention (S-T), and spatial attention joint time attention (S+T). The spatial attention is to learn the spatial structure information. The time attention is to learn the different contributions of different time slices. The time attention after spatial attention is the concatenation of two operations.The spatial attention joint time attention is to do two attention simultaneously.
|
| 38 |
+
|
| 39 |
+
In the case of spatial attention, the self-attention weights $\alpha_{(p,t)}^{(a,l)} \in \mathbb{R}^{N+1}$ for query brain region $(p,t)$ are given by: $$\begin{equation}
|
| 40 |
+
{\alpha_{(p,t)}^{(l,a)space}}=\sigma\left ( \frac{{q_{(p,t)}^{(l,a)}}^T}{\sqrt D_{h}} \right ) \cdot \left [ k_{(0,0)}^{(l,a)}\left \{ k_{({p}', {t}')}^{(l,a)}\right \}_{\begin{matrix}
|
| 41 |
+
{p}'=1,...,N
|
| 42 |
+
\end{matrix}}\right ],
|
| 43 |
+
\label{spatial}
|
| 44 |
+
\end{equation}$$ where ${p}'$ denotes the index of the brain regions. $\sigma$ denotes the softmax activation function. The formula is to calculate the contribution of different brain regions to emotion recognition at one specific time.
|
| 45 |
+
|
| 46 |
+
For the temporal attention, the self-attention weights $\alpha_{(p,t)}^{(l,a)} \in \mathbb{R}^{T+1}$ for query brain region $(p,t)$ are given by: $$\begin{equation}
|
| 47 |
+
{\alpha_{(p,t)}^{(l,a)}}=\sigma \left ( \frac{{q_{(p,t)}^{(l,a)}}^T}{\sqrt D_{h}} \right ) \cdot \left [ k_{(0,0)}^{(l,a)}\left \{ k_{({p}', {t}')}^{(l,a)}\right \}_{\begin{matrix} {t}'=1,...,T
|
| 48 |
+
\end{matrix}}\right ],
|
| 49 |
+
\label{temporal}
|
| 50 |
+
\end{equation}$$ where ${t}'$ denotes the index of the time slots. It is to estimate the weights of different time slots of the same EEG electrode to a specific emotion recognition.
|
| 51 |
+
|
| 52 |
+
Spatial-Temporal (S-T) attention is to do spatial and temporal attention one-by-one. Firstly, the spatial self-attention weights are calculated as Eq. [\[spatial\]](#spatial){reference-type="ref" reference="spatial"}. Then recalibrate the output of previous layer. After that, the the temporal attention weights are learned Eq. [\[temporal\]](#temporal){reference-type="ref" reference="temporal"} from the output of spatial attention layer.
|
| 53 |
+
|
| 54 |
+
The spatial+temporal (S+T) attention is to do spatial and temporal attention simultaneously. The self-attention weights $\alpha_{(p,t)}^{(a,l)} \in \mathbb{R}^{NT+1}$ for query brain region $(p,t)$ are given by: $$\begin{equation}
|
| 55 |
+
{\alpha_{(p,t)}^{(l,a)}}=\sigma \left ( \frac{{q_{(p,t)}^{(l,a)}}^T}{\sqrt D_{h}} \right ) \cdot \left [ k_{(0,0)}^{(l,a)}\left \{ k_{({p}', {t}')}^{(l,a)}\right \}_{\begin{matrix}
|
| 56 |
+
{p}'=1,...,N \\ {t}'=1,...,T
|
| 57 |
+
\end{matrix}}\right ],
|
| 58 |
+
\label{DKL}
|
| 59 |
+
\end{equation}$$ Different from S-T Attention, which regards space and time separately, the S+T attention considers contribution of the spatial and temporal dimensions simultaneously.
|
| 60 |
+
|
| 61 |
+
The encoding $z_{(p,t)}^{(l)}$ at block $l$ is obtained by the first computing the weighted sum of value vectors using self-attention coefficients from each attention head: $$\begin{equation}
|
| 62 |
+
{s_{(p,t)}^{(l,a)}}= \alpha _{(p,t),(0,0)}^{(l,a)} v_{(0,0)}^{(l,a)} + \sum_{{p}'=1}^{N} \sum_{{t}'=1}^{F}\alpha _{(p,t),({p}',{t}')}^{(l,a)}v_{({p}',{t}')}^{(l,a)},
|
| 63 |
+
\end{equation}$$ Then, the concatenation of these vectors from all heads is projected and passed through an multi-layer perceptron (MLP), using residual connections after each operation: $$\begin{equation}
|
| 64 |
+
{{z}'}_{(p,t)}^{l} = {W_{O}^{(l-1)}} \begin{bmatrix}
|
| 65 |
+
s_{(p,t)}^{(l,1)}\\ .
|
| 66 |
+
\\ .
|
| 67 |
+
\\ .
|
| 68 |
+
\\ s_{(p,t)}^{(l,A )}
|
| 69 |
+
|
| 70 |
+
\end{bmatrix} + z_{(p,t)}^{(l-1)},
|
| 71 |
+
\end{equation}$$ where $W_{O}$ is the **Value** of $z_{(p,t)}^{(l-1)}$ by concatenating $v_{(p,t)}^{(l,a)}$. $$\begin{equation}
|
| 72 |
+
z_{(p,t)}^{l} = MLP({{z}'}_{(p,t)}^{l}) + {{{z}'}_{(p,t)}}^{l}
|
| 73 |
+
\end{equation}$$ The ${{z}'}_{(p,t)}^{l}$ goes through the MLP layer to get the output of $lth$ layer.
|
2112.01525/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-11-17T07:53:55.597Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="OuHAF0SBwd7ESS5k7fbG" version="15.5.9"><diagram id="9hfVrF9we89Rz6WZRjqa" name="Page-1">7L3XtqRGtjb6ND3GORfSwJtLSHziEkjcHd57z9MfWKtKqpKqu9V7t/T33udfqqqVGUBEMM03TcwI/Q1+NDs/Bn2udHFS/w0C4v1vMPM3CIIAmLh+3S3HZwtKkJ8N2VjEn03grw1mcSZfGoEvrUsRJ9N3N85dV89F/31j1LVtEs3ftQXj2G3f35Z29fej9kH2ZUTg1wYzCurkd7c5RTznn60E+s3dQlJk+deRQeDLlSb4evOXhikP4m77pglm/wY/xq6bPz81+yOpb+J9pcvnc9zfufrLxMaknf/IAxvhccL8fkssvYrbGnSZNP8EIl/YsQb18uWVv0x3Pr7S4Jp5f39M62Snbpr+DaaTNv7ykYnqYJqK6GrM56a+GsDr49gtbZzcQwPXt7So60dXd+NHhzCDsCCHXu3TPHZV8vVK27XJR2Mwzl87/9L2Oackzn7Lll/f/kvT1C1jlPyjV0bBX6h/iW3SNck8HteT26/8/cre/BvWfm0bkzqYi/X7iQRfxCz7pbtfRtC74poiBHxVia+S/UUhcAz8Gce+7+XzHb48+C1Lf9MXjJM/k9/8oOhvuoa+7/cibJbMv+v3+vANIX5t+hCgf0WYvpLoHwnTmoxzcemXHIRJrXdTMRdde10Ku3numovVX2+g6iK7L8xd/71ofRXHZs9uzPk5DC7p+3n80P5fJQ36VqBuKfuqvh9dfOIMdMtmccnphwyh/6qgfnnZa77J/o/F8vfC9lUaCPh7lgFf8PIbYQR/JIzQ35e779j6r/MQ/OOAMC0NW9cXFN/U6JOxuOaQ3ARKPlv1X5voLS/mxOyDD93cLsZ9z9QwiKrsAzS0Za6Lm74f7b9hCIvd//1JtEfxP0T7r21/Au2h/zYYfxHNfw8S/3HU/USW/1zU/QnEv4ddAvgNF/8w5hK/xe8/BrIXg4Ljm9v6+4bpH9gJgvjhhH8VsM8e/80IDv9OAh9dWy5ZMCdXs9qNTVBf4BlfX5QkaH8nnpc6zr+B6x9J1rdi+KUp+IL30SVbH4jxW0PQFHF8D/NDKPleyv8d6IB8jw7oV0n8RmC/suhbgYX/NHT4Otp36IDVN7njYv2ODdiw3G4lfXPjpy+EpW4v+ZO2v1y/PmVffn/0k3YfIpgG0ff95Em9Jjczvn30B+Pdz//0aVzv4WCs338/2AXw/TJ/HfKixeeo38/kav54qa+tPxazf2JU/g1igJHfqyEOo78TA/gHuPWniQEEIv9tI/Gf5LH/H7MIxPfO8n/ZIFwd/YxA3wvJb12EP9nxhkDsn8vEf5Ljjf4Tv/vBUBDN/j1xG7s5+DL5n8h/E9zDKPwzjn/HRQxBf4bIP+QSogDwM4L+aRr/vwT4xfZ/Du7/BP3GYcTx39v/vxT4QewPKPkdQvV/9/W/JMmC8OvtwL9KFgj+jVdEgr9XEOJHQdOfFzVh+D+nyz8RmGDqP5OHabHfJvC3eMRxLPZ4/FHJ+kfs+4fmDvxhuPmnUQ5C/kC+5ivoF81HppT+p2akvi/Qv4Tz31Ax/fj5u5bmd0z4GJL62gp8bbk+x8EcXEjz+RXi+jb7G/QobFozNuDJZx11/ajmO2ff2fVJBK5/aOtBedfvRxF0FnZ9kH2gZl+2gbgLFJWxUBk5R5XbltFRRr07ShEpV6S4J8UI1DPOvOZrOyVDa/AcRhx33x1ZkRFX2/gBetjaGgMXBWUa4BZ6YTadpgMdaCcbhwhzISgdxdbIH62tTaEWvZ/k2vOIEy8KCnsaRGxH7cyt9qwTQEnbEsET9nq3i3Dc7YPSqs6UkjuTIvyw6rPVcBqCF/bqZUL3qIfoliAknnuwRVz61kx2OGVdsslVz3hEdg3A9qkHr+H39iXJVa/wyjVOpBzgFN0kPDgkvX5p6zaElno01hRwTQw52v7ersHF+x+92yTiTJlLkOn7e8sos8xwa6SKjU7pObTBwVzbjDed2rFDQ8PtvGqFAEjoI85g12DApDjkFfpumssqA6RydzeywnRLHNRZRI8EebIt0+vZc7gucYJFzSH8WEkoYGtLKTP7eSTcC5KVc5rioc8jo7xpU1ItEybjE7v7E65phQK/EkWfVxTfF44Kjk7utWqdT7Z6Gx6lZOu2lfB7DBopmculph1Oa4ocphf7aoyuv4W+4UQcda4e25ip7WLAdSmvUihiTp1Qmi+pgijtWUQqMcEYMGxRebLzuTUKAjhai72QDLBkD45AfTjHm7jVNiPK/SHfTt18yC9ICfytYdZzqE6W0ERYOO6Br9nEmm57kWhLyRttdJ/bYXaHtKNwL7tKB26+g/sTV6FFHZTG2F4OvQXVeT8o2gdPbGwJY3tUNgQFE1QQnx4yHiEsgXvulGjqRAri9tgTm3u7UlLOW5+DEntQdeNI12gV7RhbYMO8o+g7LAjUbN588sQ5cM3+DIS7nx6xbIiIST+fz14WqsySFNYHR0EnZyWJpmW1/B1cqe09QnDNW33ZaVxk0ssx6epyUyHahFJgRw1W3AFXzxwrmMmhtf1mfF3t4KVTlVW6BFqqWTYWKDrKT8UPEKY29o0vcg+KMC3pnJzYt81GnyMxsitdKuF04+I1gBuc2plDk754ucQ2imNguKMXyrCr5f2q7VBNiXT7eNzKLDOSEx3ylh9H64VxNZVQkDZEu+l6hnBtEWoyGDfoBtbkqbvHE1GQcMDNKLDzVNcjKZG9W4OWp04h71vP8vdcW0aRPJ7bxJcOH3uet8h0Ek5qPCq4JpghqPihEhBIc5qcBHaQNHgbQ5tr2CRsdqvZG+SWZpLpzvExIg4lGhrSCzEi9Zkmx+iim+fi2Esx+YX0oZ7HoktGYxcULck6d0cAkLwuLAk+xwDhAu96Ng3eZ7nxz9Za2P6Cf9qHEv9Aj9mqi2MGEdBikPXxdDjmsYx9Tyb8u9DiXgR9KzTZCYShWvEApkBBjL4UhUYcYRQPQ0r5waNiY6QWgeamNRiW9mpxWSSLzB6GPxSzpZ/ypFiUoVYiEcdfZ8LPe7NIsOs/aMmkimWW2fdmO/H1BI/kJTZb7jx02HSFHJObM7WXWn3jzGGueXsncOUEbmhys7z2Gv7SZC4MtAlIOU6apQ3w0ggg44a/UWPbL1not0N25r2N7tfYoWdLyeP9lOoEFx/4+ljCDJxOotmUG3tnNLG84AxR3bu+iWcMcE1+Pe1rAov59eggrjU7XrGCPbre0EF1ObiT0kvcSFqxdFGQoroypuMFcRf8gHw57xpvB+AiQbhdSGNsWnwU2pKvjyfRvnRn21BkGa0BTo8VcmHuYj53tJOhCe9rQNnB3bJ3vaKHFbyK7bcVWRC6AaRpL3gWH2BQz50MXNTjMNrJxJCLjQ6PrPwyLmERL6BXBHCFvhp8acwCH0/KDFtp3Bf/ljcjZfyTdJtEUWMjnxLvCdxv7fiwr3hYvHsy32xusAhctHqyQtCJUzXpeUE71Z+7dD6SOcoJJ3gCp+yg5/UQFN9MarqQc/06hXsRqG4Ic9bNHV0x5ZpivozQEkEWr7ZRQO/+wO82LCUVqPENcvl23K5N0Q44AYZsskPu/hQBcX53etmoNq5v34cNTFe/X0DftVFQYq5ZFuUBrpETCqrg3d2OfH69hY0RItRu6hxSZTF7Sahf5gq87e+kKe6zqdM3CWwTUlckZHgy3LLcEyIh53mgD2xpdQbe1vPgu9E5kfw1BiBOd/WuNesedLLGta7gJ917NLQYibVXtmi3ba26HZaECtVAvI52sgFvaS3FTnmhH27Bq35u9CRhNA9zorQCBYyb/nXLcF0kpglueak+X1WlITPmQ7f1nQ54KiX9fAzxBqx6hbMbHkQ3/l+PPSO7tZPXk4+uQIojzsA4TEmayeyjH8OZVrV9nO24qVVuBYSav56hMOPF5Wu24UNQlCNS4IDqSW9iR3jdNehs5NIFI8nR89HUdOu2pnJz4WGLEj4rUBBI9bPHvwqcJF/XmH4wrqLp7aHNn7IfPlFBeB5Ssbjwm6CDFY7YpTpvM9vAJrJy6zVrGr/Rs2NDcVOKU4Unq8P1Ex+2eKCsJ6CKKtkV2Hu2cmTd8RGX9XUYSe6yl7Q5htCZ7Hcn55i2BpuVdDp0lZsmwsBGdXTj1BPUF8vxbxkvGeryIOBk18qbEXqfh742XebWDV6PPQXPrGlIReLNZK+nGMsGMa6652gG7+vpl/fa5ev30F2Pmtze9eg0lmq1HPY+Gor6de5pcnsano4a97zCp8sSlUVtSdXT+6vVpyaiQyjeNLRXLQhrV7uVHxcwEiSSQ6W6uZLCHW54uYPzPJOh6xReyDXh7LU++GS6rL28JMqhIrEDb3Ua9l5ri02ZV73suCElzSo8osoefa8ekn1QeRtTPRySmuvmCiY4TQM9p748BxkfNbowZN6aJTJT68tkkHEqDWk08iEQHj09onMKLPHyPrsrsG/v97ZSKsJxO/dtQaRYkL2knG72y1U3JReHBFASRl3VxDStVdke0mnenEYtG6Xe0AX2HUzzfTDeIYFNW43M+50YiipTneoIi9sl21qO1lSIi9ck9trLY6IlCR1VyF4Yx29AO4JJmDuEdVI1rX6/axrbGssPJH9Axy4XcF43PkTflUo9J4Aao2fbvdkRQgGAQ0zrBpPMIeYKpCp9uxsOFYyNcJA6jyfIpGD2yEKWc9mXxURKnCAhUHdKmElOshEK7rGGl9GP0MOGKyRoC+IAIm+SY1RqUINkhzZ81+iAhlCo9GAQec0bZ7x1XDVjAWbTBvi25PLU2SF0BPFXy2E6n9PQaFoqmg39FRw849tSuK0IB0kXHLKxF9u6Sh1ArkYFSsvuWX5HsD6B3Z4/n4fukJjU7QcbgOSpbghAa1die6yBLobBvR2HoABNeEOkHFZqahBvT7tJ2irTF0kB5Rtq8uf1EXAp+CDC05SgytQSWgNuz4+D28mO0SfPoo6OorcWmL5+hUdXRL1iMfFckOo0CY8YBQIY0p69OmZm6fahfbzO4UWioJVN5X1VxTYwRBR57H3IM1462pGQ97cxrQY3WJXXIb8QfVadVoHVGxlV1p9JEAi4OelRkVmTWQLuGSX8Hgg0WcQ9W7aZ6Y/lC2LwaxaSoFPVmbL5pINS0/ke6+PPF9eaOE2DMFcZ6HpHVeQdUNnbhD2nS1YvWLo8UP2srv5coTD7hZP4PEl6vkaqy8hc1AjawdivUDFUZW//hNlbUEnacnqGG1OgCJ6x18SzWS2prAGospf2nA9vRD5LVIrgDdDZ1rqefFgqEQubxMJldVqCQWiWKoI6kG8p874xRWL0neRf7fGwnBsSAS+A4iHSkpa9RMTxAdySxOZ+A3jAolZMQ7y8ZRIzJssi5WucxjqZXbott1JZlnxeY+kXbVTF4G+nqn+8cN8D3timv953WuHiU0s0d+QBC+5xIcBk9UO/l+bigWOC1BTs9zduYC50cZyTmhm5+HhxWmtJ94p/CiiIGaYTdKam2ShJJ+Hx0IjPKJGrXuhHzwXUxQP+up3t0mbu9AUd0soTVJJH3I8S/oLKpIkM2CLyQBBurQQRba+sJwjmIZ9KCJbFme9e7XiLLkBwm7apGQjNqH07VTyYqi1ssHXGb+NqL3uYTSdZheAMvSOmARhL/aLy+rql19buMOyD2iSDSm/kJD8oV9XW513GjTmo3MCQpY922bxsvryTCrxgcI5gTJ5r1AlFRwHPAQm4z9f3zmt8PHTBiyI9E6fJ876faT/uP3xXXUPBBgKHXCgm2xS9zIjHdt+ixKztxfx1nScPiqEut/P6qzxTcmDv687HdU5d/daAPVeqKX7PY95GlJZeLte9DCHwSQfVx3gUW3NWZS6v5s9KV33ph/z5++UTiPj5a/Lom4TW1+zgd7ly+OevhRb//lwg/vtyNrVr73qS4IrAASqai/Vz7eB/7cI1AoC/4Q0B/z51TaJ/Zeqa+AOJxr80RftPFoNI5PvcP/h7Av7VuVroD2S5/+zs/08g8v2iyA/T/z8kDPJnqTwE/SXLIv+IKf9ROX3g9xU8/4GrIV+49h9FOfgPaNj/gtUQ/nV/P76shgyqe6dNro8c+f5lPSQsXaEy6AdnvYIX1byo55viK+rpUtdX3qV6mLe3nqL8FzW8KIte8RDHyKVVS9yA3slsgukCnbNCwiYExqVppi9CZ+4cYJpxK6uJyHgH4vTgCxqKI9ntzI7nbieavDexLogwl3itvOOkfieDFYaLFOshpeEKtQQklOmdjob320vTVmNy7+eR6rr19m+JQD+d27scMwLvsqpNy5XHCcBmFUwfJMzWL+/LJMInMICVMguSQ9EPIyzrO9B81pyqAgq3hQt/98+dAaDauGbkluCYj3d+36SKuPN+5vQyPrq3jfYeGQu0c9HL9OGjSZpwnwZdAeYrbOHKy32z1pZmq+ZSDppJ8zrvntebjDx6Hu98wfWifz27bRvsu289qe8XiX0xcoYFoFpzWmzVAdg2qxZBgI48Qe7oQ9xPDjifvdNZH8mTGrOpbR4vF3QOhVHcbDVRBKkeXuNkxa6NBw5zpy0wE5c/FjistsgN9/bChRoRJQA1u8TbXxhhEfBMvZogQJaGMd3p6cFlMFl6r0Wz5Fmc5cugrKHxRUT9IdwZ0g2enTp4gpyrL2a1huEQkCa4YZ7ZaPKmj/6BQGHqBlgn+5rtjUi/rc2isMU7rP0h95NVnEeuINMnZhmRszvBe9xHocLtoEJHwRXx9v1SXaTB2SzrYudBXLEHazZgBbX2q0f9JpdKKe1W8Q26OcmS0HIndEU5Q7ZKAyNjz9+gVa8xysHMyiNvp1YtrwhqK5bJjYewEjXMNneAmXks7/P9hjppcAtgVwO8ZcT7FcuHiXtreoBlPkPVq2JSDXsnRVPT4vl0ILMu3CDmtebdI3YisJUjka/7weMBdFpda0+rcqz24iQakPtsRhoR5sZkXm8CRG17kiOm+zEx31KrBLPJvatRWLJo6XxMmC7uag9zqenzcmaN7tbxJkIaZLxu3VGjrhkcIHLdfbPlWJhTKz5RnBYM31jHYQ9REeit4w1ta9xyWuJmIEtu8NPD5yYemqQES2agsPVljI0ZBSUTs0susB1aFzuHRwCqKjwacrGCz+8RBL0NK7SlJY3tPKVEfuUpK4FKlDzX+IHhbs2rhtHnt17urqXhsxzly/VtFo2RlDgQYyPY1O+Eg03EvqNjLR12jva6Ytsa6p/xM7+X56w7JM36QWZbTXoeblGJ9rGwV2iWigTUS8Ho1oc0N2Z7LPHCDzJs05h8qtapx+JLvkj1zXWOb2XwRT6MfM3WwD6VJqCQc7zMJPcYxnPxEXKgx1GNjPGpY3DeFLi2S6dUFJoAVWJ8BspsJMO+YI3oDoyW/3oN3l8bWGMd3657XpZmUKkdsGv7jnGdCVIRjK8z6JeJHziIvPjSOtUORGJvFRPUspt3GefIjC1hOH/cAJgXUnLf1ANqYD10TLVf8ziPD83mAXdBNV6GMJAJ/JoJyHZYYlZ8QnYUIo14BY/VeONAF+n7ai7ywrs6mAF5FgLVWqkj3liNrNVSUsDQ4e7GwO5z9kKq7npGAb2E4eo0mHLe0hgTUpuX3sQM32PuRAX5ZKc2q1VoXDdaAFltwncigffqyISsmj7XyRnEDR1EIdv8S8wAlQliJm4xkCz9SvG6kVxZ2HJYc8JGxhQCshZ7uXSOCNxUM3y/B99PXlCAVBP5fJ3FkgDF84Vh75KPGaG3ibPzgHcBcsNi2+rbAuYR0Nhr3iT5bhN0fWeqZ6MCufa41zV7WAvcIPqIZcEmdmeKckegd7Uxx2JlC8avbMheDkHCROei1L0CtsNYh0/DBuWh461kjNZsHARx3Wrki6R9QElqZ29f+1CuIj6NWJfzi0L61OsDeKcJrXLUZeCU1HuSbmP/sleVq7TtwcCsd9FDacYpVIqy+ki0VGz41JOLQYXcv2g98JeBukWRsqznnRK3sATVHf8ZARaGwYkZzM+USjmkyMdxeGPeneRWNdV5sRioMryY7LsS8ZEhSQ93mM7ZpsvnnSFj7zTX+ZhIxh9xqe81s5d0psBZ19/C9hKEEUyKAn2Unc6r4AUGr3spIscDLzMj9FFRDHx3I0pXK1QM2CUCt25CtZxC0ftyAtN3iZF5VQbKEHuq70/HG49qdFMD//obllpengda3s7Fc6oxFuJd5/Y8aBow7BwIeQ7w3YxyeTCP+bzw23lO3Zks4o+siPaR9bgMWASRU+wafQghlMZQtw+hMZv8kezYP3ryrjtCWKSUL7mTrb/n2d1uzsW9+443RDaxEOdR86a+jHtEPNrKzaU4744bXh8+EmW+bc14og9PFG8f7U9xcL/2831Z8ddteN+4vzjygwj9z4ui/sA+wD87vET/UHD58w8yF7/EpX8CYX5UfPlXhZfkHwmSfkiQP6/cjoT+ZwSYxH9egPkHROl/S4BJvz4DTGZ9xMx6F+E11a/hJY4LlUoz7HflduK7pICMVjrqLVKqSFlP6jKLzEsGNiNcYDCWvfKG/zXi7lUAuIdjEYEnIrrxHlzX5vQXBq5bKioxC9WkdpBug4pYn2s1wxBVoyPbPipFqSZmHPHYWoWoljTzn+u9FiCs2720I4bO+c6DPRh1SW6RnWWXjGfRFqgPwlLfuAAcL4Z/jls6PYFmdC5rLdFJqikZpzzOVgGuLrtgAAfFSzcMEblcM9717/pL99gmekiINs/Nsumx5nunQU7NPW+vhQukiELCu7iBA9+HHa92tr/ouOqPquvdERgNueIZZUUXrDCv96Rv74If8wci3n7JZS1lqkTSSJib3jBXOHcKl9L4j2GVZo3NjI9H0UFIhSZ1PCMJbnnb/pNI+lgMOmsVx5j0j3WbcF7aKA6RIJU911uEaTyrAda+ixPEezkJpiLsMYD+vd6H9qTbJ7SDY2eT7pHTtd6uX/PYx5x4o0FCc9UdugO1PRwoflrYahFmD4Efq32mN9FpDhKKeq9lztJdlkXX20t+9+50bq+EuMO68eLAQObxzdvm9dyCu8RCj3hoRF+ptb66+QWAoPvcvuv/XqG+703Y8vLFhmn7qEF5Ar07XIFMeK9f19pyOzuc5MiXyyUBgsKXq3XctC3nTJxkkj4q715A547JVRJOBO50w9dnqIuiZ85/zPHFXwQYRMekX61q2NJ+5tHInUJuqvRapLlpFxvb0a3/xi0r8qBS4RJ2WU+UVN0rlvGkJ3H4b7kJ+QqTfVZPyoHblI0FI+5lCOrztduIe95OMGpXj1jN/V9HUOback+gfkZ2sU6xpmH6W95W+DksfcXAYLVFtUU4e3kvxDFZl5FkbFtVOeyC62Ju9G50uH4R9eujbOLSdE4WS+8NEqQUVBYE8i0M2hPRK4J0Kw033XVRyIY8RT117TcqYpMB3qmOGuakkkTfuvQW9EXUj0OEODXVTadnydnoSdIO9adFH7a+h7x+RyLq40UeS3GMKFZf7+bufB1uVbikgb42Qu4FiGtcWjM7ODWtaPnoUTM+tdmFD2ab1CiclmExx3hzHGKvL+16Iq2z4ihNOPwiQa5tSXfNGvGUtegcllcdWaurrv0g1a/Fz+PPJwKtBBkfBZ54e+v0yT3Q5tbB91rEHaa957V6hUuCHJSrsHrttb4h3fWREnRWmNlA1l3wAWJlp0YbddgFWXNf3qJ7yrYqjNaovdCDurBEbjzXTp/mNblShDDhTjgZSHyrldC22TbYRDnAHrl/1P+mVvyCAV0ZmMy9E1wTmL01bgStndRdfIsIRqfxNtzgPUUYsFAE/C6z7J7VRaE08qQrMNXvVV6hEEJS29VVgBDES2VHmS589ZS7HJZ+apI/5vQQRJJA32EpTjB8IJO3SkIecDnjNP46bLSxvaci6hSarIi0nczN8Q1zdFLuLozgJsB47mHrOQMhcfLxrJC7xnaTsGJ6vZ/dnbArF33jLoWpYnM7NS49KBUJo3uxOBOM+9dQqGovMGZzzeUxnUwjoqaDPErmRkj8FLz3fgweuIyuLVEhTVCopzsSpWW6twlEM4w8YRQemJC2QJF9VonWQpfc433FPZvrKWk5xGt48Jhn6T3Rhe3ZXzErgt15m9C21+mmBsE/nwNEW+VAmo7CGXcM5KClzL82pQ1regRpUnHpXRMEh9MUaO1oKEWkF4YzTGQ5VZryoURuGrDfxdhTQc93ym3XIuAdi1LC3RWYiID3V0jMRQpHT/hdxPWGlqJUH+UUChsy4Vks3NUri4jidbI7kB2LFPJWKJf15VKZ1ZkQD7PBgu2NgkCGVmYxlGRmDCF/DxrLEIXZd84w3NPIGupxzfjOgRKb0wtbLHPdCkUlZp9e4dqfpQ0IzJiM43t4ZUQerJcDML/Su6Twztw4kttbCHOB666cDDTX55HU8TN6V3TPxrceF2/L1EREL8tIUw1GTE1qq198dMOsVrtlgPT7c39dvso7YV5hlC3PMfPtA1wEzJdE3IrQQXudAEcDww2vsDPtnNYdoHtLqHraDvriMwilee6cX080lbq2jNgk2Qc+2erYhJ9kaZwzmriHh9Y9qtJ59HwXsQre9dUWLQvgFaqqGQ7y8gYdH7gvWMtt0g7NvXPTzI77Bz6VLfsmqalyh0w5gU7mMD1r8sSYUspdYvOuxeOB6DZFD4TgbTiU04DCYvlVpp371AS7IIQ+6Zs+NZTYwGLEvWtKEzwDH4AYVo/bkXn7J8Z/VK+JZSUwy5bosJ+tDP1O7nnFMDNwmaUTfUgard5ZLP9qlDcaN8wNC6PCAUl1wRYKR3qvP/onS7e7GW+VbI0blgXj2wdLb/J3IUq0QA/9bTEKu8Cq8Kxh4MUDshkRb1pIRVkFXnXaKRjwMRvt+UBdcX/m6Rlkw4M/Cyfs8mfq+WBf0KCPocdHT3AF0hFr2ZK/FGxP5q3TiJcdQjTjUMzOJaDumZXzUDeYaLYQT4ZE93jELy++3rK9XWnX8Q/ab9QuPH3xw1mjbVgu3s6bazxY5AAL0x/Hc1q26UAPXMx0jYKM8nl/X6i12HHDZqUgyKjFRFihi770oXF+3lpNHwEf9fxg0mVNqUF+3pCAYS2BAb+SMKKNBoEpR+/CTFbGTEamGOAgaxuyC++1cF9AtB4jc39Mk20o6YVhd+6pTBFTW3XEjd8JCEKvJcEeDJPpgMBuxO1sPBgN+9ilv+hk+lTF0AhZymtJ03T9KyQstLBQzPaW58DSgJLtNGx4LPK+pI9CPIh3hCrXqwp6TBt1J+eYRa3EcmjUUnxsnXN9hh6OVfQirpsPZoqNT80dwE2JNLKObBzc2MneFVfBZ6NFWPQuqIYvD0RMl2zdZg5WXF9ijiGLE//Nb92LPmdRVIcMt/ZRE0DkMVhEPLyfiBrxn71HIA5eKI1F8WynSe8VvcwrUPTsWnhraPQRzUhnlLMCFwjCJiOvwDlzGVG0J5bx8pbBDUB3BGEQTzBmH88gwxu5UDfEsWI5GtEr3BVdn0ZxDjI2DrocqtuURDcF3opAi0KDX5zydxwnHxt22LwKnVb3USXDtf+8MIeDpIf3kehBP9JCUWOfIbSDMV+vYUYhCsMgiE5NH4U7n4U+5XV9vQKUz6Ichnm014SC/JsRrRDygRjiDv9Ff47OSFvYRIsHkbOZOf0j+8h6ScYbZcdKyrLsI7f0300v/Z3w9pdThn7Gf1+g87t0ytetc7+pz8H+pAgYRP/AWv1fvHf9KzWIn5HPgb6cQYb8jGLEf2sf+1eWwt8f0kT+fpcx+vtDf/7Rzvc/vskd/M1hJRjyXzxpCgJ/t8n924n/ZQdM/ejsg8+Nw1N/n0byg63J0Se/723JYxb+PxdBrj/X+MA3n/7fv0Gf1VfAx0bmNGiK+vh85uooaPqPizCM/GZL9PdXvu/k193QIHTvhv7u2ucs74vtx5Eq31/evjD9vo58zvPjYp3MczL+dL1qVLTZD5//exu/v738def9fR34ZmofF+cxaKf06vRr923yyw1bN8bfj/7t478e4fTTb2gOIcQvtP5IdX/9jH5D+biY+jr4QvXio2bvV6rUXTB/O6Hfbi3/wcE0n2IRjj/YYf4pK/+nt5jjv8No9AfLABj0o/zun3e8yNee//8E0T8h0M84CPz6A2N/HUb/BP6bMBr+zUtg6O9fAv/GDv2l0A2BP1o2+L/Q/X+h+9E1/fJxutgX3P7kTBtO/Teo/R+M4z+BIP4z9F9GcuTPcrbxP1DP+J90KNA/PY3z75wK9O2Vb44G+jeV04PA927vD2vBf7SOSP551fR/4ASw/w2M/fcds4p+tXa/HNoI/zEm/nna+fX4sf8gP+vPPniZ+MclMSj0z89j/fe4XL8b6as4/KsuF0b8Hbn6qwJi4l9IqPzPPq33dyr8A4f8rz2tl/gX9gv8Faf1/vfU95+f4Pt/1fffHhQh/8Jh2/+DK4KKj4qg88uWkw7dq7sStGHZXyqC4jJuZ63COHkHqQdJ0TNFxcwDpGiSeQAUD2zCi5JfW/yiZoXeVDxN0k7oYO29bqVms6ZOkNgKr/d2ka6KdpyrnQi/6yP8OE3X5kQaU4bCjLkPmRDy8cSJ00s+14k2NPFQgsygbedR2xMdr91qX52cyRbtVTG6F1zMLgOAVeEPmQk5IjRkgoDcAPRxzBDO3OVIJ6Ij9zJTyjOpjqiTYiy28+AaiZAcK+RGzuTCkw1YFRntjVR9sQk69gJPtego6ePoFX1iJE/BhB20jar547MZM1M7hHtJ3VNQnrkXMT0fpWaB1jXQIj+Xbu5dPmB8vsdd21no2fXxABS+uPqFTvOT2yPsUtyVUiRGzJUVvFjn2J09on2/biTdeKD+CQmCLnKrWPhCxz4k7tQCe6QfJwvxYz8XYNFT89XdgyfcAXm3BLRFfoQyQqEmzkEPizEGXhmqtK29OhV7uSot3TrF6CLIkCSgfayGNyw0QuZFNkB0Tpcba4cNGTZkY6Szs7v4WaS7/J3qn7wrKG2TnIyRB90Yrap2nvY1G66hAZ2Rg1mX7iroeJVdAUNMDY/8Si7G84RHdFwYR65KmOUnmBYWkQsXCVevl0FqZXVWgbxV2kQJsUcsX3g4w32gBJTtKPk5sk5hkzeoEysrZRXrPmuk8efi5f3HfdSQ3KheDxgL4PnICy1QD91onDsMvLhGbtLymYq0OPrD22zSlaDT8Fx4tFXflgw0G4kJ+jjnYCLZzsFyjgTxbOmBIqy8QHJjTlN5Eq4WbtZYPh5p3oJLXS5iba0PIbKeQsJq3n2QGa1U27EQ4ZQCoWIV74NMiPTNpaYKeSRccLLDy65HCpMaQrGi5YX8Atxe7TngGlHmMhek9I1N++LemkTT7gFC5Qh6PmRw8rTFu9LF376bvlfjvVQ4pI39PjXUFWsOOo54IKpH1d+162YAV56IpzBMw6BBPURlfErCrSf7IflHCXcP6Jr1XWrQXjxzNCJKJlHl22OKrDVAn1z9cAgjeDo49UnqgoqzZvMfoUvfQsHHEdcirAW7OengWQ6zjxnOmYs26SoFXN0adBZvUhGbjfEUc8ZPHRMCPpZO7yMFRrGlxZCZn4wN+Me5dbuGDglGZcWLzxFepnfPZcuvi9TPMsvsZSMcMquf8F3rPqAIqhTr+65nG1O4OUpRO57eDkk/4E0h3tkV+kxYlXFJ5FMWgLuCIksHMzVY8yL0XuemsYk8Ez0L+1IGaBOomZyWh5bhOrzSrHIIpR62KtnVZQvSw3buxiKxVfNxNk4HkK6bchpzy9Bz5B6yE7OYYD9v6b/1n9LKUPEFPpd1+C496REp13kaa0q4yUyBrtlwYStFSSjI64t505iDV0JgchRSFGUfIDUVRF+KebJnZZcxGWYwEOFmjzSaxGHFS307rhqhXn5hBph7c3yf63EANw69xcvsKcgltvGkZ9JdIXSXxL0unGAcBk/LrImY0vO3t4Lp+RR+aGAKvQ3XmlEf946JVpeW0XH3loXeq3wMMKMaYnt+tyNL21M66SkSQzi9wkEfUTi3YmEluNCbKqsCE5ja72JmO9ELQoExXgAYxKVPDFjeqIiLI8sJBh8kbG3Nj7kv1A5zsQ4wPjCV9vH3GfOaOZvhWCXqRc7NAVvPU+TJ6GxAMRWy4OLOM+zYSnaClqGyF5IWkNQqQkRKG4CRduJCegyPQMTi8E0jpesswhoQUOimrjI9FWuxD0luQ7Wg2TRRTwugjE1Zn3DygYuUAN21KxMBFA8DYRD5LgoExXWUSVYIwwPWDlOTNYs27lowH0XG2kSbnop9Fde2okSHhUvE9j4VYxNaeeUXycrp9eGOy1hDQ2F1lhOIFmes6cU6Mjo+NMIVWUHlCf6u0LzzcBx5PDzOnW9reORA7hAbv7EPdZCzhD72yRWpUoSdxzJLD8GAQS/JW0cGQ55svd5YorsMomhy97WLJ4gwG1hyCTYfNlHHJUIJw10QV5O40JUkN09aP3Gvk80rNKqCjI1iHrhQLG2V+UlYk/0QB7Q7M3eMzvjhow+puFjkXCjP3xatQ6Qs0tORNqplox8yKDo9Z9gP14AhR56IIWSNLhbj19IXhrCc5/NlOIRTEvUlUDRU6ouQoHS50yEPn9hUOiZrlnMQql15uSAUxrjpqs7B9ppATSqk7KSGND2XQd81XAbTSr8tts4w6Dw1S1YZsiJc8tiYE4GJRULBMlLCYBgtO5X73mXJsKepN8+XikeVOL7JtrS09vTYRo33YHQ2LteWqZ/P5zpBjn7iQZukLwIn/NBrjhy6fmK9TW6NKoPofN7lJsrlhaxOgPnxBcmLqb7vfbGlu4hyVVUfxkvuilsZb4zrJGGYXU+CZgfA+dV+XLdyamPuiQFld4VcuQf3yaE0XsTOvLFPSzYJ9D63JkRG91LCy7jgh53PJJ1DztkT43XlklsLivlG4kL/eGGYIxl2UsFOWJU2LQ/sWIMyJRWBCcvto3ZGhOZMB3N1ohoEMrNYICDkCfSBHln5ebR2rXMmkcc4A6ChWBaE9p0kubZDQhxiGtma92EyuOxcHqCbYR0hUkGbSiSDZ+PDZpnU2KmXf+E4ItqJPSOIc6q4qMSd762vfjKDoXUPILcZE38ol1v/9GDTNs56NG+8xK2a70pbDm8Iaqk0U0JdD8ZnfVsDgHz6qUGgm5f58MhaIf0EVwySHDpDivqJv2SXXpU2p+376bu2cBVDmKBros5B5qCDk3AN3r+sAP58M9qDgpyZHmjs4hmB+WgsdYrSgItlu8XsjB4TA5mE2qUhP/aYssvzfQY+A/WMSXomo/KhlKT9++VgvgeHkmjwRPUxn4cuq6Y6Hw0+hg2LgxefPDCiY8/SHtfYNVG6FI7Jt/2mvWQIbNmGofmSbN2bU/VNFTrwjoTtci7VzRiyUhPY2a109rbHuKvxyVDXrnG/YV2QQbMlopXjWcnWzQ1eNGlZO7cAUuAODlXsrSksWMDuOvecQ3Wly3m6BS0gGkcvA1yxduw4psycI0yf0daMS3+abl+erV4IxJK0L/IQvUTVXSO7R+2EKcqDgUJtUW+n1Xn1H/5Kn5jE+FRohN8zqpjvysY1g/WFnEnC9u5q+g5LeBkcJpbdZgweHZgiFeecTA6MGfMoEC6mFbjbHQbqwLLPu4BXzb4RCdHfWvcFAm7npvEFUuhjxgAnENaJf1whlBO2dxTiMIiCvKGO0VpHytQu1zgaOKTi6XmBwuxtRH+KBL0iFkjQU3qyM8Y9z7UWRgGahcrR32II+qp9V4wiI6AFtbWLYaQ/yePG8yi99z+uS9Y5yisWFtMC9lDbby+HMz3uvmWBHwHgvIbgnBjZemFuiPkAPZxswiqTMaluC62Xr3oCPDTS3lxlHH7PqFGpS+sov1p17HF2jsDrfrGHXY0bQTxFdMbVA1euIwPO7eQYAI/pQpM1b4Od14dPVjlOESkpmCTlKUBSro85f8fP0r+99LntZsd+D/KhVdTHZgO6B5KHbsaj/notyXiQUSjG9BU3PSduQo0WV7ZqOfmjv142r4PLmpnOScggCqBdl722lvMw48goJJ65NVi43i5wpz1l5gDFzk8cwsaBItlXcSkDqvFMJ/aoqO8PLBSSy9MinB3w0vdCE7uDv+yqFAwCahDHKQeCeZCwMe18rjqH1WebWIeLLFh4MLJxVeI0FFoapY44dzklgXBXUMt6CResRryHjQx9jJNIoFHNbeg6VzfvHQ+V4YAhYMhrAqXMkflC7kLO2mYNT9HAywGQnIAo+dbYdfSuSfTRBGugVKiP58I1wJKqBKN4rDnBXNuRs11ohzFDdpXiM/t0bsFTlLf3Rp1hJ0cD5Xp0SrZLl14XOgn7Xbk4GE6YXYFA9ljlcwrR0kAtQmtkFJPG937X8L5j6aVIosSKqbZNht+YmMtleEvzJ+vwNaJdthdFyBj8lRps/VGK/oTPIp1Pv66P2qYc7HzkJ5c29ch8SHS7PDoMDr0KWC5eaml9Yr10RCxgPOORFM7zdgROgt6aOTdt+TKgeQwEPjX1HBZzJ2InUkJGpFBHx2uUJkMwgCWO0PgAHWHdCbUzJpBXQ60uDwyBqWSluLf8aJezojg+Gu4pircduG20OHzacJFgfaFvMij0cLapBf7eEDJeDhnmCQUe2vkR4MOUGR70IJO7cvgu6Z0g8PCPh1+D75q/4gDHv/zKqDkc5+me2RN9FPjbvS0EdB+XKVERB3JyAoMgut6BhUBVTBYDh04vYCkWtgY8oUYExE//RrLipd4vu47LMocRPKUbjindhwc0B12PHTU+aYbv62emILIdSWhoJJ2UANH2HnNNvwHg9l7zbfROOfYTXfYZolKQvrX5Z+c8XveOjqmeLrKAoH95CFVBNuId6d4RZB41YHhXSpca0OrsMAkMPmOimtVEe+kAnmwZnrnbcG+UCUX1gLoEt/2aznL3EblTCBl2N4VhMRmQKVxvoLKpzuDF053raL58+celQw+1/tBvVlbvIXfhecHi6mZeOpkjQEr1iSKPc8W9ZjW7Hq0bbDqwmtPadx35N7AhEysicrYylHM7qI/R8G6fRPCYqYI7AwfUAO5KkTKcIK1G1nB/y3VL5oWBe78H5D4bniggFYCwWBRChNOsPNEMw2b1cJ7HeoBeVgsmtjWuXCSCwJBOIJWnm/HLW4bnbEd3OkFSAwiynopf++jlybmnrzla9QBvm/HF0zdA84kqxpmZ98hJEWwfo2Ks5YxQl2UZk4eoeAl4Ncb1ZotuTKOdTYz+Ybv1dOniKO14KKatISYSZlej5k+7KOFDUlmM+OmbX3hF20Wc5yJFPEwlxWQzVv17C0viT0X1FO/Elq/V2iR02KbvbHnh9/Fe6ieJ+iFJVbFu9u7yFJn5XYvgHdMqtqwy17ihEaKU5aiFuPGXj/Yu+BHEVmOxqbHOsXN3DmB7POo7MSfbj6TwvQVcuZwdpy3iGfxlkLLmvZ9PFZPHO3WzaUQ5EZkz7kWWBQJFRipgzSs6QAYxMNMTlqOpe3dSOkDP+5y/FNifY35ohBkc2sYhzR2iCJtyRybgI65q4d7QRLaW/rrPucblZ+BITZkwFemfRHXZwSqJs4o4ZwBWdp45g2FxxaY121t8Gs+YrAsEuTTBBY+VtZiH9hev+tCqblNUK3uH5yuHLazoCjMku6/k0vgjwQDSwpk2fxzzHTmCTdahSgukHEF3Y7wZ+DmJdoDCoRjVTHznncZ6xq5o5QS6NpHGK9D1uwFcwZodle12FZ1cJ3n2PTMKshJHs2PDIUqDxUYc2oEZOfOMSyklXgNVi+eb6ut99bojw1JntIufyRUuNKQgvsY96mLKSja9SfNihQ/oEKRXQiLbCfN6r9J3zMumxrEhQ037VA+uaMPG7bHVzcZge9VJsDhl4H1M81a+k/s8BYCWtLTtMrHskoMSQDVB6sUQivJylABCHZ6z6Mr97RzSBjFFZGdgNQQ6eUOBDe9KibXVPN5xY8dHE+RDnoh22vFCMREtwIhSpsmeyicVKwaAnYVhjTqsirNUg6mdZ45GSYayLp0AG/0VveABT+JnidhDfh9V84z4dodBjrs3NsXNnrRfZvWxA7DXkJX1/ZYUwX1W9twiiiWCZ5afIzHQjUNo4gq/E2FibU0PrfDvVKD3GtOVtTVz7qw9AFjygZ+PJKNQaw36TPUsf81nd+aSnA4EG1DsVblk2YM1i0igcO6g1zP1Tl7P712stMS/30cEBCfTbyWUnURdoi1aVkB04c55BhJRglD5fl6GZZ+Jzs6IeRUfD0m+50+K7zOTtewB6+9NygF2aIQZPqlQOR4NaKYG6ln1jTaIBG2laQhWqFjZa7ZrT5eJ8YrLNi4E6ER0IPgxd/mAFm+4UNZ3W7APwaUu+w4UaC1OBKCHPNtGyynBAh35/cG9X5GUGhNudRNeSC4eaqcnEzmLPaO1FVkNwh75FBGiJQOqMKruE/JLrl7QNdRE7iMLLmeusI4LmH6ZU6DPZ1UtkwYBPmsDU7LmylEBL6/A0/zpOQ8YTyuRtZemMF+gCMkiTgP5+x2+VxfQ0d32sznS4o04LwSJd6Un1RkzO6Z46DCcM8LLG303PqdXIHvKTFtsOCEbYhcf1s6jDYA5+9SOIUuWccV+eYsNJurMXAoZeLL2Gu/dFK91RGqfkeh4pOqQiISeu5wPUX4IUO/e/t4qeNsDewOy/s7TMNDT6mYzh+tWIb9t9HzsIVzxE8zHJycLt1SeJOlZTnnsR5LMvgYFWt54oI+Ilpg4xsITmzOdvKw+cgfe2d3ZecDhKhJegpdJzw2zvp+OMduAiEPOHbC9z90/Fn7H9Ux66EdP8kruXbEt35r6e0khzZ36PG473w09Gmdy4SMjzkcluVcMTrLrGL2WCDcekY9i4YnhjJ/6Dc/dh0hlpciNlzgjVhQyoBDv2cd8Qq7RE0llulwRpKBWJkB2jOK9KagPvQXhYttSMl1NuHNPxdoyUgpczXw+Xaw5AWI1RC2jJcSL7/ylonnLHcdd5E0pwdZWnDJh3TAzCWHFey8xiaGXiT2CU4m0zjvtjyydY1pEkKy+ecgLF17cixaH9DJYMw1s8YK5uVdDLiFjJU1dTaa7/8erXO3aA6sUiN638tmG0gWAVXlFv0KYHFCmnQbMHi3AJY1N30Zk0dOGrZeMIoW1MZLZ6/EXw4TKtmGXON9W3ds9f+c30oCjVuEmY+Z3//L+rzhLOpe4xD+8OYq3Hk0lxOQ+szAndm1V8kMGn3f+hW6XlcHFM2M8MZF5Cdnpzzm4vbi3Cg1ML1GMC2iV0BU1ZTB/ubu286fSKWQ56zw0PE794d3ZNU6E7dd26xWmY6psucGrLFWhs+uXd3lJJd0LLEYdYCu/nonUgyZDUwozGbgkQ4/HGQicq3eHCMTFfJh4QHBBVYTb5o+pskxe98JZ1YAuwJYdlvLMpAVCJfYb5Y2BeX0WKOW7rbSrfPwIrui/KXaiHYjdFIwTHpCgjqS6cr2XyELxGqxGtwJKHTrqik66AmCS1KIhISlwFm7xmLNqMrxKNhQvyz2+307yOqUABcGCM4XuzcIrOAyOwqitBCXGxZlT8cbY6ANf38ThwJd9BtTjPgutpU0gep07VYglwBiMNaiFSoZQrKuyvKna8vL1OPVULyf8oxxYL77EtBPN+VSDUUxAXMMuVoZtQZpvZSZNXzd9MZhO4Xmsm/Hm2qQ0QJVqLJBlebmO01K+7o8XY6YW7kIw+cWS7/fZ+k9SwUogsvg760ABCEpnCP+Gumm73h09OdWGTObjFOUpk8XudBI/9owXo6itxWAbJJBD0U4Je14u+l5c3tr4BI9mGg+61+dK3WXt8hD68fQBg2vl2xfqYDFV/G2ukablxUjINn5nG5cTy8KH3w4koLrWFswhGEr4/1V3HVvP4kj0jeYAJi7BZEw2YNiRbHLOTz+Sv7/T9PSZ9azshY2QVOFW6Vap9x+Sxj4My8gRh6sN7FNEYHsv6S73Evfq3RTJh4cYb6yKFo725/0VueLcurcatRmKz+2zJpCPMVDU48ReZZx6lZ1dSuIFYVqPVo2N5TccehAe8szhNxiOypwa0GCYxH9HK6qSEuzcJVVS2uqv3DuHhwSbWM3sJJR6Sd4b6YY9TIDjJg6nI/6tIrn9gRakVJLHNhDxLnhIFdtLHBuT/twzU7cppXzvRdPJouLXcc2Y+LCbw51GXHlgB4znXrYILGHEm9ftlXoA+hFslcJk6g3fzEq8Ha+Q8MTmhJrWk1w2F6tPznJ11J/u3tr4o5GWZc2bzoku9gMGj2qnaVVc6yQgVMdLCc+qqmrtgJFXEWPAn8nr2QimjhSIQznAu4a7Sza5YD/Rt3v4C8o2rnebNFz2JFT+DMkaiyN9uWK0BsRWhCWfPHOmM+DMFal+BMEWXwIuPC/dPMNRiAbnAKgq4Wm6YN/2A8sr8Cs07QnRZPqy4qdKlN511T8eNcAvKCvHAiGPR77d7fymyq6Fe64x3aVTqRvndEHQFgNRcwsE3ksRq293psSWWc5KTbhuo0lLsxzspfLxNrHBKf88i7ddaOfmd8QuvFqSV3ix55Tev0++qcpSW3fMplRzOg5pyZV3KTwpG1uCEXcmy6r0aGamaztczW2cwC92PTRgaDo/J/PQo+V+diniAExL2Emy8xyIdruegKf06/Iev1h8NOc2QpRuinh80jMw49Qs3amw50m8da3ivedCAZb9RCtXbHfuJXp0g1KPcU+bFCCxPquHt4PC5lkCoWqXniFh93nNVCAmCnQr6u3QT8Ys1+wDj/nRTd2X6KN/3LJpqP3VnTjuq0BiymBCl4X2w82z+3Aw38973HS3dRcOySC3UMHOvEBemr6NfUo4WUevGoQYR+LESX1MCboWmJuTYqdluEuU01BQGmpa68FsPVc13aOGUfXMgbD7WGAa73kN+GdNkWpm6aU97iNW2Dcrju25J0XCBCBlQybtbO7qU+iEkwknriralch9ZMmXw5/TegDCTgb0s+7Y7Fld9xgr5TpbGmk/JIwFzoP8XaNdWDo5xEbT4gLM567HJKlJL0kX92EQLuCNxc9Iv39fSKs2o+C2lpLW0kLkWtKsVnKJSiY+BJQ0j7YH8bVklm/7OcorMn0DG0Fw6+a2H+MD2AjXTwDuDfm+3aiIg/Fs4ToEwdVEcDh9YjlHG3UmeZTBcWX8Y9AVVIDtzp5oksa73qSlZ9yjZGJkYSdeLkpSYnDsrmK8TfeCV7P06IDqtYm1LLbLjvqGMgYTDXXZZk6ciUS7rrt7kEd4vGPg45eJAaP090wLh+8LH2i7HVpUr8tIXi8SNUa4qiu83UYoECNU2iU5rDUlW+sy0yYKt4g15oJOvAw2kuPeyqatR/ZQNH7AdMdWj+SuUamcsOM+sRp3BpNG6HwydUy7OWCgmJQW2KThg5Hqz4xaMKMteap0XeXqcnUeFIroiScGQHUc0L0SxsPdxYiH2Sfve7DPuCp/HloQk4/xyqPN8Tok9eIgj0flFWdRxOXPqaILpQ2nO7Md8ktuQfR+CZjo5UWakvg9qtzSH+x06RI7F+NVvqtEPVcsxL7rMAMIF/F1CzuLAKtyEXQJ9MHYlr7Ubh/BvefLiwdrTmKkPCJ9+aZmZHYTJ3i8YidWLbMmBVctl+lhyz5AyRc80qIsy7VOH/VJYQgyCqr/qIRaRwGVT5R3TZM5yyi6DBujck9WTPa7kXyQ2vX5tTaZvdpyO8WwxdSGxvZWBHc2bib1camzX+PASCUv7hdMDUaFMapek5FSZ91mAQ0t4Qn0jXY683Tf1u7XB2oVKM2KcEY5as6xxVZlHanBVlfZcaeehbDH5gxn8kjpwwShcBVvAgZcgaVIO1IzwCw1opxFbAEiQAcAPfmQUkpVoaBztgM1jYILGqRCRcqvEG0eSpCPRj2x84tY5bTOgWjanIkNSGeXDslJV42/1vLrQyQukx32tlu6M7gaMA/3wKHtn7366jOnXXL0sLpdO4XbVaCorfgbF9UPaaniE1+DtyedHgfsALcRObN0Z4A9Au1zx5y1IYUTKPZMmCLVF/FmCLAQezHrB5sI7qBOmyCLDcboz7/tUSaTAwg2XgNdSAJQ1NvDGqvo0Q9iQN1WjwjMq3SAuM2HwGH7gm524cvPKABrtEX6d9e9viFDonTz9/4CSHSqoXWazoqk9zSH+b/dPLHbEZxVg1vame9mp1iBiPb9zPhnyX0ZXNKrq1Jr2Py1iUSAWSTMq91SIwQ+pgkdaTE90L5ZPVwzCrYGZp2+Dn0aaT+WDxb/jDetCXh4wDYevMDl0f3k4ZkoTE5uwA/tLSq0KuTRcEUfb/r9kd6dNqo/mNkosOdDot/geI8LjKc0IPasorCKS1KeHJ+zp4HOEmCTlG8bgkrJl3x+jfi9uEnXbbfb8i+2EQA/UYpCPfhUq4PaOvYAMSQ8BXgYnYbE65Q9dOHNap4hnjDJYF39HyPmOpWV1WsYxQ9hZdRcC3cSR8kj060RtyfEm2/j7mZ/G4+IURcxQKx+zaZQBBZX1NgppmkjOKRDC4pzpY8V/Tgw1+Po4ufI+2FejLbxv+OstVFRZqL5Ld75O1N/pFLheSIaa4VXrSETl+EDHB3ivpi5wJ9hWQmDHPI+chm8w7LhqrHRZd6pfjE4Z/bEwY8/N5cqnGB9ayaA77/vID3ay229p7Bn5FDZLIbxsp3zDUCjp1v4wyubeh4/G5SJrOhUr2KGmfV7LTBTo+vzK+57LtNADJ3PPIFR8RqHDcPhV57tfHV/rgHCPaiq4isg/qS4FOhVRl8Z87IW4VcJavLsJS8pUi4iynq1W1IvK8Xzh42icfdJhS1N7rOoYCiWistsaZimhkEymDqBuP6H6T4RzdiCI0yrNL6eQfB8SJgkBOu8B5AsqEo0G4R+et/pPDE2Hlgs5WTjA8RU6jczo+llg8kdHT5kq7jJaNlUuFLr/tZrRuvitMJH65vdYWYMIS912UbIItgain/PEI05HrB9dzeoLPYc8VTBEoWIvJS3RyZcXQu5f3DfrGiiu/Qqj690pngKmHQYpywnzCyi2SGqdVjZhGoSVVNp/pAN+Fc6cqNSBoVLHxtGTBcDwk2Wc7YXp9NlIBv5pOhhdNvYjH6OXPsetRgeAKCfgoqc2hGGn/H1dcGKmvjeYpU3rznIoIdewmqureUrQfRmLlVGZcwreGK7L80DYYjL5h5ZHDMHPXUCs3//hHxw2BCLeyOOADHB8Zki5pXqlCrVmZ+s9/stIvLPlC5HuAZZwTDzCD3tfCBuMlt8fG4YJK5C7kUnn0ITdSPkNDhDa/W1gSdYQuAetg8xHaknWUJ0ExKvT2MUy22yuovuM9wDPv5NjgAz7+nMlBQV+MCGChHw5jcllNc9mvFtQgmA96hWFr0esx+vBTywvdKrn/D8tubibDjMIviBsEetq6CeCUyuqlZWdYtFbE/r7LNPmpvFbLlRuie6241opi+P8nt6UxR5/nkm+DtBj2P1crTMXoQAAh+TjuldmZXz6s8Hk+sz1u9UBjmjSqpEdusI8ATN+ajidD+LG1dWmIYrV0SpYrg+36/0kx9p/VbEyeJCrXZexZPxMTHDTMoHcDK54X00pramP+cf+2GIJcThkAeLzZQPYmQHMmmPxToi1RDY9whCXRhUwvhICC5IM/vAUyO5vt2jZ1z6XDqlOOTd+ryRPMSE58N7v9/CN6q2wi1M72+evJ1h7Y3fQ7IvT7ewF+qqMYUG+A3o85fzQqaN2GOscGO9OuAwBTjWLCTYxtlfnojNFyfnmhhaaAp8GHDpPjwnoX27eLVi9xnmZPaxMF8tYEG7GSExToJ8vksgv0/3AFijOHe/q30XlcbXPgyMXjGsN9ufxPZpLkIDFXNqoVNpxpyr2MH5PRYzsWQAYgE25Sr4XEM9HYXrFhif6xpY/XriD8j28yXty8qFXrJvimJoe7kgxEbCgGL+4hcp9ZO39FTAeVRPVf5jTEr1TVyIGcF3xytCT0uqqr5UGlfUskQ4nRvMwKOegm56Mb/yCWebeZ+GsA7H2QOL2Tuz7ixOK1b9TSdt6ykkwncPzkxKhcGWXGGxvU/nPIImERtZwKWoEQCuSC5V8lIWlXBeyASAYAu3Cw69znB4QuIDiHcpETwewssGMQatK4l24JQvPxoyb4QwUKmUZ69HIf++g2c6mpoVmImQoffyFBCiDbL2zCKO+9ts+sjQ1vMZuDfksL/+cqxd6FM08N2qTlVnoh+p/11WL0UBrm0vWU4wuijDOJ7kB6egOaRDghQeN5iOMuUX5GbvdBeAKAuFrLZz/pTBtYxK9Xng4Vf+qi9/WzBFfW52lz+BJWccriiiMvvq56jspP02u1xEZKX1V2eCbwjgO9hJF75tFOxvjQkWwbHFL5EY+MP+gL8Ky+4OqyI5GoXt2jjagVYdZjMdHaIvqVmbMhqhtq/W8OWYg1A+anWR0ts5eBBFR71s8fxm2vsvK9wuYCcoTkRJ4g7xeV6sENn6fRWVEWSVG19p8U+wazrYBgfsD4zx9FAL5GcdS7lRTH+d+SqoM3xmGeGvIuAPMSiNVv16dWRA56f6grsErAAwNLqjfv8ZzmavQDZ3xHVg0+CbBcP39DtVgt2RYH7AuKBTdcTPdym/Xth4iWcDXu0rIRn03x78jmVVf38kbXUdt/Lxn283zNKN31KkhRGOHYZB2P1IyY+9+dseH/U+4BQyWMpY1gB7fpTsYCcGxY6cEamlI5pZ9J+UOIirYgC/tgKNAFv51cz88prSXHCGJmVEQuOboFXJl4389t/wkja2W2qW35oDv7D4pzbgew4AFum31TkWI9LcS46xVLjmzocHouWrFP35F7vanMV7zmCrFQSXLz6WQMjxj78S2u8SAKRhAnhneedXs0aWSG4heAkbtvN7Z3Yco1ldkAz5submn2fidvHATS1fHcGEsZDfvd0EiifU+/pKpmgEWGX6PvWWNj/4sQ5gXlVE/E9lNczDPO/W5UxOeHgx/kFLotvv2PuUBv85N/B+OFGXguCnoqFj5fkFFqEBOzd8LZGlohsWIepPVcUpzCDy0TZRmuWxP7TrfDvKG6ctepLLzTqL408aNxxf/v83QhELSM7q7jyZjGX+6mkkpEll9XPNCC3frpTKaSm/qxaoZyeAD8AJm394F0jX/M3Tlups+et5+CmloG0imuWjef2yQ2ePq8/BZce09shLF/xYwcLWfgRxJXVwjX5YxEIS59r1qejVHLAUV/o7EwAzWUIzyRtd0xI2eppMGYXkbs+HWcD8DvgJXD0Fcx9YWO4gPioq4tvib5kj9v0U3elwJo4EVs/D1Vw9PuT1XjGxqb5FIz9eC01K/jcmevng0adZv59C8N5yNfK2Pz+jFdjk1XWJZHWRR+yfHh51QL3w8B1WIJA2lJJ9SS7GmC0EcvR53ZPeV0wwojWU6q8KEhFW5cwKrbtX+0Pjhtkh76ndzG+dhQIrhcDyfLn6v2p2pjyDG96GDEDPQDxhoQxL4zdUgPMttfFb8GTf/tqDLAlEJJaMIpEX8+0fcxREiCb+0ZMs6pDPS2Lq5GaDz6LI5OXXf9Q2x1Dm7ROU1qFp2jZdLNvDySLkz52B/0/9y/7XVWvkf9xWRxG3fzH/rbbzvzRPwLB/Ibd/Lh/8h/pOGPL3sAfFH7V8sMJd77Mc/uLf</diagram></mxfile>
|
2112.01525/main_diagram/main_diagram.pdf
ADDED
|
Binary file (21.7 kB). View file
|
|
|
2112.01525/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Symmetry is one of the most powerful tools in the deep learning repertoire. Naturally occurring symmetries lead to structured variation in natural data, and so modeling these symmetries greatly simplifies learning [1]. A key factor behind the success of Convolutional Neural Networks [2] is their ability to capture the *translational symmetry* of image data. Similarly, PointNet [3] captures *permutation symmetry* of point-clouds. These symmetries are formalized as *invariance* or *equivariance* to a group of transformations [4]. This view is taken to model a general class of *spatial symmetries*, including 2D/3D rotations [5–7]. However, this line of research has primarily focused on real-valued data.
|
| 4 |
+
|
| 5 |
+
We explore complex-valued data which arise naturally in 1) remote sensing such as synthetic aperture radar (SAR), medical imaging such as magnetic resonance imaging (MRI), and radio frequency communications; 2) spectral representations of real-valued data such as Fourier Transform [8,9]; and
|
| 6 |
+
|
| 7 |
+
<span id="page-0-0"></span>
|
| 8 |
+
|
| 9 |
+
Figure 1. An image is a function from the domain $\mathbb{R}^2$ to the codomain $\mathbb{C}^N$ . Image transformations like rotation and translation act on the domain, mapping points in $\mathbb{R}^2$ to other points, while leaving the underlying function values intact. Previous works like [5,6] aim to produce architectures invariant to domain transformations. Co-domain transformations like color distortion or complex-valued scaling, on the other hand, act on the function values only.
|
| 10 |
+
|
| 11 |
+
3) physics and engineering applications [10]. In deep learning, complex-valued models have shown several benefits over their real-valued counterparts: larger representational capacity [11], more robust embedding [12] and associative memory [13], more efficient multi-task learning [14], and higher quality MRI image reconstruction [15]. We approach complex-valued deep learning from a symmetry perspective: Which symmetries are inherent in complex-valued data, and how do we exploit them in modeling?
|
| 12 |
+
|
| 13 |
+
One type of symmetry inherent to complex-valued data is complex-valued scaling ambiguity [18]. For example, consider a complex-valued MRI or SAR signal $\mathbf{z}$ . Due to the nature of signal acquisition, $\mathbf{z}$ could be subject to global magnitude scaling and phase offset represented by a complex-valued scalar s, thus becoming $s \cdot \mathbf{z}$ .
|
| 14 |
+
|
| 15 |
+
A complex-valued classifier takes input **z** and ideally should focus on discriminating among instances from differ-
|
| 16 |
+
|
| 17 |
+
<span id="page-1-0"></span>
|
| 18 |
+
|
| 19 |
+
Figure 2. Our method learns invariant features with respect to complex-scaling of the input. All examples are from CIFAR 10 with our LAB encoding, undergoing multiplication by a unit complex number. ( $\mathbf{b}$ , $\mathbf{e}$ ) tSNE embedding trajectories from DCN [16] and our model. Each color represents a different example. Embeddings form tight clusters for our model, and irregular overlapping curves for DCN. ( $\mathbf{c}$ ) Visualization of our complex-valued embedding of LAB information. The $L^*$ channel is visualized as a grayscale image, and the complex-valued $a^* + ib^*$ visualized as a color image. ( $\mathbf{d}$ ) Model confidence of the correct class for a single example. Higher confidence means larger radius. DCN predictions are highly variable, while our model is robust to complex-scaling and thus constant. ( $\mathbf{f}$ ) Accuracy under complex-scaling and color jitter. Red bars represent complex-rotations sampled from different rotation ranges. Blue bars represent color jitter (as used in [17]). Our method maintains high accuracy across complex-rotations and color jitter, whereas DCN and Real-valued CNN fail. SurReal [18] is robust, but has low overall accuracy. Our method combines high accuracy with robustness. ( $\mathbf{g}$ ) Average accuracy under different rotation ranges, comparing DCN with phase normalization (dotted blue line) and without phase normalization (solid blue line) against our method. The color encoding has a complicated phase distribution, and phase normalization fails to estimate the amount of rotation, resulting in poor accuracy. Our method is thus more suitable for complicated phase distributions.
|
| 20 |
+
|
| 21 |
+
ent classes, not on the instance-wise variation $s \cdot \mathbf{z}$ caused by complex-valued scaling. Formally, function f is called **complex-scale invariant** if $f(s \cdot \mathbf{z}) = f(\mathbf{z})$ and called **complex-scale equivariant** if $f(s \cdot \mathbf{z}) = s \cdot f(\mathbf{z})$ .
|
| 22 |
+
|
| 23 |
+
We distinguish two types of image transformations, viewing an image as a function defined over spatial locations. Complex-valued scaling of a complex-valued image is a transformation in the co-domain of the image function, as opposed to a spatial transformation in the domain of the image (Figure 1). Formally, $I: \mathbb{R}^D \to \mathbb{C}^K$ denotes a complex-valued image of K channels in the D-dimensional space, where $\mathbb{R}$ ( $\mathbb{C}$ ) denotes the set of real (complex) numbers. Some common (D,K) are (2,1) for grayscale images, (2,3) for RGB, and (3,6+) for diffusion tensor images.
|
| 24 |
+
|
| 25 |
+
- 1. **Domain transformation** $T: \mathbb{R}^D \to \mathbb{R}^D$ transforms the spatial coordinates of an image, resulting in a spatially warped image I(T(x)), $x \in \mathbb{R}^D$ . Translation, rotation, and scaling are examples of domain transformations.
|
| 26 |
+
- 2. Co-domain transformation $T': \mathbb{C}^K \to \mathbb{C}^K$ maps the pixel value to another, resulting in a color adjusted image $T'(I(x)), x \in \mathbb{R}^D$ . Complex-valued scaling and color distortions are examples of co-domain transformations.
|
| 27 |
+
|
| 28 |
+
Complex-valued scaling thus presents not only a practical setting but also a case study for co-domain transformations.
|
| 29 |
+
|
| 30 |
+
Existing complex-valued deep learning methods such as DCN [16] are sensitive to complex-valued scaling (Fig 2b). A pre-processing trick to remove such scaling ambiguity is to simply normalize all the pixel values by setting their
|
| 31 |
+
|
| 32 |
+
average phase to 0 and magnitude to 1, but this process introduces artifacts when the phase distribution varies greatly with the content of the image (Figure 2g). SurReal [18] applies manifold-valued deep learning to complex-valued data, but this framework only captures the manifold aspect and not the complex algebra of complex-valued data. Thus, a more general, principled method is needed. We propose novel layer functions for complex-valued deep learning by studying how they preserve co-domain symmetry. Specifically, we study whether each layer-wise transformation achieves equivariance or invariance to complex-valued scaling.
|
| 33 |
+
|
| 34 |
+
Our contributions: 1) We derive complex-scaling equivariant and invariant versions of common layers used in computer vision pipelines. Our model circumvents the limitations of SurReal [18] and scales to larger models and datasets. 2) Our experiments on MSTAR, CIFAR 10, CIFAR 100, and SVHN datasets demonstrate a significant gain in generalization and robustness. 3) We introduce novel complex-valued encodings of color, demonstrating the utility of using complex-valued representations for real-valued data. Complex-scaling invariance under our *LAB* encoding automatically leads to color distortion robustness without the need for color jitter augmentation.
|
| 35 |
+
|
| 36 |
+
# Method
|
| 37 |
+
|
| 38 |
+
In this section, we discuss details of the architectures used in our experiments.
|
| 39 |
+
|
| 40 |
+
**CIFARnet architectures**: For CIFARnet architectures, please refer to tables 6-10. Please note that our replication of wFM [18] uses the $(\log mag, sin\theta, cos\theta)$ encoding for the manifold values, and uses the weighted average formulation.
|
| 41 |
+
|
| 42 |
+
MSTAR architectures: For DCN, please refer to [16],
|
| 43 |
+
|
| 44 |
+
<span id="page-11-3"></span>
|
| 45 |
+
|
| 46 |
+
| Method | Accuracy |
|
| 47 |
+
|-----------------------------------|--------------------|
|
| 48 |
+
| Division Layer<br>Conjugate Layer | <b>67.17</b> 66.73 |
|
| 49 |
+
| Euclidean Distance | 67.17 |
|
| 50 |
+
| Manifold Distance | <b>68.54</b> |
|
| 51 |
+
| GTReLU (r=0) | 67.17 |
|
| 52 |
+
| GTReLU (r=0.1) | <b>68.14</b> |
|
| 53 |
+
| GTReLU (r=1) | 49.15 |
|
| 54 |
+
|
| 55 |
+
Table 4. Ablation test results for our Type-I model on CIFAR 10 with LAB encoding. We find that Division Layer, Manifold Distance, and a GTReLU threshold of r=0.1 perform the best.
|
| 56 |
+
|
| 57 |
+
and for the downsampling block, see Table 12. Our SurReal replication is based on Table I in [18], and our model is based on the SurReal architecture (see Table 14). We use the same real-valued ResNet as the real-valued baseline (see Table 13). In order to pass the complex-valued features into the real-valued ResNet, we convert complex features to real-valued using the $(\log mag, sin\theta, cos\theta)$ encoding, treating each as a separate real-valued channel (resulting in 15 real-valued channels from 5 complex-valued channels).
|
| 58 |
+
|
| 59 |
+
**CDS-Large**: For the model architecture, please see Table 11. We train this model with SGD, using momentum 0.9, weight decay constant $5 \times 10^{-4}$ , using a piece-wise linear learning rate schedule starting at 0.01, increasing to 0.2 by epoch 10, then decreasing to 0.01 by epoch 100, 0.001 by 120, 0.0001 by 150, and staying constant until 200. To ensure fair comparison, use horizontal flips and random cropping augmentation as used in [16]. All models are implemented in PyTorch [53].
|
| 60 |
+
|
| 61 |
+
This code uses a version of the Tangent ReLU nonlinearity originally proposed by SurReal. However, the implementation we used had flipped the phase gradient mask
|
| 62 |
+
|
| 63 |
+
<span id="page-12-0"></span>
|
| 64 |
+
|
| 65 |
+
| Code | Forward Pass | Magnitude Gradient | Phase Thresholding gradient (value) | Phase Thresholding gradient (mask) |
|
| 66 |
+
|----------|--------------|--------------------|-------------------------------------|---------------------------------------|
|
| 67 |
+
| Original | ✓ | ✓ | ✓ | Mask = 1 − (0◦ ≤<br>Phase ≤ 180◦<br>) |
|
| 68 |
+
| Correct | ✓ | ✓ | ✓ | Mask = 0◦ ≤<br>Phase ≤ 180◦ |
|
| 69 |
+
|
| 70 |
+
Table 5. Gradient computation bug in GTReLU computed the wrong mask for phase thresholding gradients. The values were correct, but the mask was flipped. As a result, the phase gradients for phase thresholding stage were enabled for points in the lower half of the complex plane rather than the upper half of the complex plane.
|
| 71 |
+
|
| 72 |
+
for phase thresholding. Specifically, the forward pass was correct, and the magnitude gradients were correct, but the backward pass for the phase thresholding allowed phase gradients through for θ < 0 angles rather than θ > 0 angles. The forward pass of phase thresholding in both cases is still x+, but the backward pass mask was previously 1{x < 0} rather than 1{x ≥ 0}. See Tab. [5](#page-12-0) for details.
|
| 73 |
+
|
| 74 |
+
Result: Surprisingly, this modification increases the accuracy for MSTAR (especially for smaller dataset sizes). The true gradient mask, in contrast, has better convergence and stability (and higher accuracy for CIFAR experiments). This difference highlights the strength of each type of phase mask. For completeness, we have included results for the original ("flipped" phase gradient mask) and the true gradient mask.
|
| 75 |
+
|
| 76 |
+
Impact on phase manipulation: GTReLU has multiple mechanisms for controlling the phase of each input. Since the learned scaling factor (i.e., the stage before thresholding) and phase-scaling (i.e., the stage after thresholding) control the phase in a learnable manner, the model can still manipulate the input phases regardless of the phase gradient mask. This explains the small effect of the bug in most settings.
|
| 77 |
+
|
| 78 |
+
PGM mainly decides which input phases change individually (i.e., by backpropagating individual phase gradients) and which phases only undergo global phase shift/scaling (i.e., through phase-scaling and learned scaling factor). The correct PGM allows 0 ≤ θ ≤ 180 phases to change individually while the rest undergo global shift/scaling. The flipped phase gradient mask does the opposite, allowing phases clipped to 0 to get individual gradients.
|
| 79 |
+
|
| 80 |
+
While the exact orientation in the complex plane (e.g., 0 ≤ θ ≤ 180) is meaningless due to learned scaling and CConv layers, clipped input phases are *statistically* different from non-clipped inputs since clipping concentrates the values on the positive-real line. If the original phase was noisy (e.g., in MSTAR), the flipped PGM may eliminate the impact of noisy phase inputs on the backpropagated gradient. Our current understanding is that the increased MSTAR accuracy for the "flipped phase gradient mask" may come from this increased phase robustness. In contrast, the individual phase control offered by the correct PGM may allow for better convergence and accuracy for CIFAR models.
|
| 81 |
+
|
| 82 |
+
Table 6. SurReal CIFAR Model Architecture
|
| 83 |
+
|
| 84 |
+
<span id="page-13-0"></span>
|
| 85 |
+
|
| 86 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 87 |
+
|-----------------|--------------|-------|---|---|------------------------------------------------|
|
| 88 |
+
| Complex CONV | [3, 32, 32] | 3 × 3 | 2 | 1 | [16, 16, 16] |
|
| 89 |
+
| G-transport | [16, 16, 16] | - | - | - | [16, 16, 16] |
|
| 90 |
+
| Complex CONV | [16, 16, 16] | 3 × 3 | 2 | 1 | [32, 8, 8] |
|
| 91 |
+
| G-transport | [32, 8, 8] | - | - | - | [32, 8, 8] |
|
| 92 |
+
| Complex CONV | [32, 8, 8] | 3 × 3 | 2 | 1 | [64, 4, 4] |
|
| 93 |
+
| G-transport | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 94 |
+
| Distance Layer | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 95 |
+
| Average Pooling | [64, 4, 4] | 4 × 4 | - | - | [64, 1, 1] |
|
| 96 |
+
| FC | [64] | - | - | - | [128] |
|
| 97 |
+
| ReLU | [128] | - | - | - | [128] |
|
| 98 |
+
| FC | [128] | - | - | - | [10] |
|
| 99 |
+
|
| 100 |
+
Table 7. DCN CIFAR Model Architecture
|
| 101 |
+
|
| 102 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 103 |
+
|-----------------|--------------|-------|---|---|------------------------------------------------|
|
| 104 |
+
| Complex CONV | [3, 32, 32] | 3 × 3 | 2 | 1 | [16, 16, 16] |
|
| 105 |
+
| CReLU | [16, 16, 16] | - | - | - | [16, 16, 16] |
|
| 106 |
+
| Complex CONV | [16, 16, 16] | 3 × 3 | 2 | 1 | [32, 8, 8] |
|
| 107 |
+
| CReLU | [32, 8, 8] | - | - | - | [32, 8, 8] |
|
| 108 |
+
| Complex CONV | [32, 8, 8] | 3 × 3 | 2 | 1 | [64, 4, 4] |
|
| 109 |
+
| CReLU | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 110 |
+
| Average Pooling | [64, 4, 4] | 4 × 4 | - | - | [64, 1, 1] |
|
| 111 |
+
| Complex-to-Real | [64, 1, 1] | 4 × 4 | - | - | [128] |
|
| 112 |
+
| FC | [128] | - | - | - | [128] |
|
| 113 |
+
| ReLU | [128] | - | - | - | [128] |
|
| 114 |
+
| FC | [128] | - | - | - | [10] |
|
| 115 |
+
|
| 116 |
+
Table 8. Our (Type-E) CIFAR Model Architecture
|
| 117 |
+
|
| 118 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 119 |
+
|------------------------------|--------------|-------|---|---|------------------------------------------------|
|
| 120 |
+
| Econv | [3, 32, 32] | 3 × 3 | 2 | 1 | [16, 16, 16] |
|
| 121 |
+
| Eq. GTReLU | [16, 16, 16] | - | - | - | [16, 16, 16] |
|
| 122 |
+
| Econv | [16, 16, 16] | 3 × 3 | 2 | 1 | [32, 8, 8] |
|
| 123 |
+
| Eq. GTReLU | [32, 8, 8] | - | - | - | [32, 8, 8] |
|
| 124 |
+
| Econv | [32, 8, 8] | 3 × 3 | 2 | 1 | [64, 4, 4] |
|
| 125 |
+
| Eq. GTReLU | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 126 |
+
| Average Pooling | [64, 4, 4] | 4 × 4 | - | - | [64, 1, 1] |
|
| 127 |
+
| Equivariant FC | [64] | - | - | - | [128] |
|
| 128 |
+
| Invariant Prototype Distance | [128] | - | - | - | [10] |
|
| 129 |
+
|
| 130 |
+
Table 9. Our (Type-I) CIFAR Model Architecture
|
| 131 |
+
|
| 132 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 133 |
+
|--------------------|--------------|-------|---|---|------------------------------------------------|
|
| 134 |
+
| Econv | [3, 32, 32] | 3 × 3 | 2 | 1 | [16, 16, 16] |
|
| 135 |
+
| Division Layer | [16, 16, 16] | 3 × 3 | - | 1 | [16, 16, 16] |
|
| 136 |
+
| GTReLU | [16, 16, 16] | - | - | - | [16, 16, 16] |
|
| 137 |
+
| Econv | [16, 16, 16] | 3 × 3 | 2 | 1 | [32, 8, 8] |
|
| 138 |
+
| GTReLU | [32, 8, 8] | - | - | - | [32, 8, 8] |
|
| 139 |
+
| Econv | [32, 8, 8] | 3 × 3 | 2 | 1 | [64, 4, 4] |
|
| 140 |
+
| GTReLU | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 141 |
+
| Average Pooling | [64, 4, 4] | 4 × 4 | - | - | [64, 1, 1] |
|
| 142 |
+
| Equivariant FC | [64] | - | - | - | [128] |
|
| 143 |
+
| Prototype Distance | [128] | - | - | - | [10] |
|
| 144 |
+
|
| 145 |
+
Table 10. 2-Channel Real-Valued CIFAR Model Architecture
|
| 146 |
+
|
| 147 |
+
<span id="page-14-0"></span>
|
| 148 |
+
|
| 149 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 150 |
+
|-----------------|--------------|-------|---|---|------------------------------------------------|
|
| 151 |
+
| CONV | [3, 32, 32] | 3 × 3 | 2 | 1 | [16, 16, 16] |
|
| 152 |
+
| ReLU | [16, 16, 16] | - | - | - | [16, 16, 16] |
|
| 153 |
+
| CONV | [16, 16, 16] | 3 × 3 | 2 | 1 | [32, 8, 8] |
|
| 154 |
+
| ReLU | [32, 8, 8] | - | - | - | [32, 8, 8] |
|
| 155 |
+
| CONV | [32, 8, 8] | 3 × 3 | 2 | 1 | [64, 4, 4] |
|
| 156 |
+
| ReLU | [64, 4, 4] | - | - | - | [64, 4, 4] |
|
| 157 |
+
| Average Pooling | [64, 4, 4] | 4 × 4 | - | - | [64, 1, 1] |
|
| 158 |
+
| FC | [64] | - | - | - | [128] |
|
| 159 |
+
| ReLU | [128] | - | - | - | [128] |
|
| 160 |
+
| FC | [128] | - | - | - | [10] |
|
| 161 |
+
|
| 162 |
+
Table 11. Our CDS-Large Model Architecture
|
| 163 |
+
|
| 164 |
+
<span id="page-15-1"></span>
|
| 165 |
+
|
| 166 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 167 |
+
|--------------------------------|---------------|-------|---|---|------------------------------------------------|
|
| 168 |
+
| Econv | [3, 32, 32] | 3 × 3 | 1 | 1 | [64, 32, 32] |
|
| 169 |
+
| Conjugate Layer | [64, 32, 32] | 1 × 1 | - | - | [64, 32, 32] |
|
| 170 |
+
| Econv (Groups=2) | [64, 32, 32] | 3 × 3 | 1 | 1 | [64, 32, 32] |
|
| 171 |
+
| ComplexBatchNorm | [64, 32, 32] | - | - | - | [64, 32, 32] |
|
| 172 |
+
| CReLU | [64, 32, 32] | - | - | - | [64, 32, 32] |
|
| 173 |
+
| Econv (Groups=2) | [64, 32, 32] | 3 × 3 | 1 | 1 | [128, 32, 32] |
|
| 174 |
+
| ComplexBatchNorm [128, 32, 32] | | - | - | - | [128, 32, 32] |
|
| 175 |
+
| CReLU | [128, 32, 32] | - | - | - | [128, 32, 32] |
|
| 176 |
+
| Eq. MaxPool | [128, 32, 32] | 2 × 2 | - | - | [128, 16, 16] |
|
| 177 |
+
| ResBlock(groups=2) | [128, 16, 16] | - | - | - | [128, 16, 16] |
|
| 178 |
+
| Econv (Groups=4) | [128, 16, 16] | 3 × 3 | 1 | 1 | [256, 16, 16] |
|
| 179 |
+
| ComplexBatchNorm [256, 16, 16] | | - | - | - | [256, 16, 16] |
|
| 180 |
+
| CReLU | [256, 16, 16] | - | - | - | [256, 16, 16] |
|
| 181 |
+
| Eq. MaxPool | [256, 16, 16] | 2 × 2 | - | - | [256, 8, 8] |
|
| 182 |
+
| Econv (Groups=2) | [256, 8, 8] | 3 × 3 | 1 | 1 | [512, 8, 8] |
|
| 183 |
+
| ComplexBatchNorm | [512, 8, 8] | - | - | - | [512, 8, 8] |
|
| 184 |
+
| CReLU | [512, 8, 8] | - | - | - | [512, 8, 8] |
|
| 185 |
+
| Eq. MaxPool | [512, 8, 8] | 2 × 2 | - | - | [512, 4, 4] |
|
| 186 |
+
| ResBlock(groups=4) | [512, 4, 4] | - | - | - | [512, 4, 4] |
|
| 187 |
+
| Eq. MaxPool | [512, 4, 4] | 2 × 2 | - | - | [512, 1, 1] |
|
| 188 |
+
| Fully Connected | [1024] | - | - | - | [10] |
|
| 189 |
+
|
| 190 |
+
Table 12. DCN Down-sampling Block for MSTAR
|
| 191 |
+
|
| 192 |
+
<span id="page-15-0"></span>
|
| 193 |
+
|
| 194 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 195 |
+
|------------------|---------------|-------|---|---|------------------------------------------------|
|
| 196 |
+
| Complex CONV | [1, 128, 128] | 3 × 3 | 2 | 1 | [12, 64, 64] |
|
| 197 |
+
| ComplexBatchNorm | [12, 64, 64] | - | - | - | [12, 64, 64] |
|
| 198 |
+
| Complex CONV | [1, 64, 64] | 3 × 3 | 2 | 1 | [12, 32, 32] |
|
| 199 |
+
| ComplexBatchNorm | [12, 32, 32] | - | - | - | [12, 32, 32] |
|
| 200 |
+
|
| 201 |
+
Table 13. MSTAR Real-valued Model Architecture
|
| 202 |
+
|
| 203 |
+
<span id="page-16-1"></span>
|
| 204 |
+
|
| 205 |
+
| Layer Type | | | | Input Shape Kernel Stride Output Shape |
|
| 206 |
+
|----------------|---------------|-------|---|----------------------------------------|
|
| 207 |
+
| CONV | [2, 100, 100] | 5 × 5 | 1 | [30, 96, 96] |
|
| 208 |
+
| GroupNorm+ReLU | [30, 96, 96] | - | - | [30, 96, 96] |
|
| 209 |
+
| ResBlock | [30, 96, 96] | - | - | [40, 96, 96] |
|
| 210 |
+
| MaxPool | [40, 96, 96] | 2 × 2 | 2 | [40, 48, 48] |
|
| 211 |
+
| CONV | [40, 48, 48] | 5 × 5 | 3 | [50, 15, 15] |
|
| 212 |
+
| GroupNorm+ReLU | [50, 15, 15] | - | - | [50, 15, 15] |
|
| 213 |
+
| ResBlock | [50, 15, 15] | - | - | [60, 15, 15] |
|
| 214 |
+
| CONV | [60, 15, 15] | 2 × 2 | 1 | [70, 14, 14] |
|
| 215 |
+
| GroupNorm+ReLU | [70, 14, 14] | - | - | [70, 14, 14] |
|
| 216 |
+
| AveragePool | [70, 14, 14] | - | - | [70] |
|
| 217 |
+
| FC | [70] | - | - | [30] |
|
| 218 |
+
| ReLU | [30] | - | - | [30] |
|
| 219 |
+
| FC | [30] | - | - | [10] |
|
| 220 |
+
|
| 221 |
+
Table 14. Our MSTAR Model Architecture
|
| 222 |
+
|
| 223 |
+
<span id="page-16-0"></span>
|
| 224 |
+
|
| 225 |
+
| Layer Type | | | | | Input Shape Kernel Stride Padding Output Shape |
|
| 226 |
+
|------------------|---------------|-------|---|---|------------------------------------------------|
|
| 227 |
+
| Econv (Groups=5) | [1, 100, 100] | 5 × 5 | 1 | 0 | [5, 96, 96] |
|
| 228 |
+
| Eq. GTReLU | [5, 96, 96] | - | - | - | [5, 96, 96] |
|
| 229 |
+
| Eq. MaxPool | [5, 96, 96] | 2 × 2 | 2 | - | [5, 48, 48] |
|
| 230 |
+
| Econv | [5, 48, 48] | 3 × 3 | 2 | 0 | [5, 23, 23] |
|
| 231 |
+
| Eq. GTReLU | [5, 23, 23] | - | - | - | [5, 23, 23] |
|
| 232 |
+
| Division Layer | [5, 23, 23] | 3 × 3 | - | - | [5, 21, 21] |
|
| 233 |
+
| Complex-to-Real | [5, 21, 21] | - | - | - | [15, 21, 21] |
|
| 234 |
+
| CONV (Groups=5) | [15, 21, 21] | 5 × 5 | 1 | - | [30, 17, 17] |
|
| 235 |
+
| GroupNorm+ReLU | [30, 17, 17] | - | - | - | [30, 17, 17] |
|
| 236 |
+
| ResBlock | [30, 17, 17] | - | - | - | [40, 17, 17] |
|
| 237 |
+
| MaxPool | [40, 17, 17] | 2 × 2 | 2 | - | [40, 8, 8] |
|
| 238 |
+
| CONV (Groups=5) | [40, 8, 8] | 5 × 5 | 3 | - | [50, 2, 2] |
|
| 239 |
+
| GroupNorm+ReLU | [50, 2, 2] | - | - | - | [50, 2, 2] |
|
| 240 |
+
| ResBlock | [50, 2, 2] | - | - | - | [60, 2, 2] |
|
| 241 |
+
| CONV (Groups=5) | [60, 2, 2] | 2 × 2 | 1 | - | [70, 1, 1] |
|
| 242 |
+
| GroupNorm+ReLU | [70, 1, 1] | - | - | - | [70, 1, 1] |
|
| 243 |
+
| FC | [70] | - | - | - | [30] |
|
| 244 |
+
| ReLU | [30] | - | - | - | [30] |
|
| 245 |
+
| FC | [30] | - | - | - | [10] |
|
2203.03691/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-02-28T09:55:12.028Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36" etag="FbXQ-5ctwn5pkNhWnn3_" version="16.6.4" type="google"><diagram id="bpAs2NTR309R4TqBo1PY" name="Page-1">7V1bc9u2tv41njnnQRxciAsffYmT0yZpGjfpbl/OUBItK5Ell5YTu79+AxIhgSRIgeINkuVkxhZJgeT6PqwbFoAzfHn//DYOH+4+LMbR7AyB8fMZvjpDaIAwEr/kkZf1EYaD9YFJPB2vD8HtgZvpv1FyECRHn6bj6DF14XKxmC2nD+mDo8V8Ho2WqWNhHC9+pi+7XczSd30IJ8kdwfbAzSicRbnL/pyOl3froxyx7fF30XRyp+4MafJ+96G6OGni8S4cL35q98JvzvBlvFgs13/dP19GMyk8JZf1A10XnN08WBzNlzZf+Lb49OXt093///bhy2z0x5vJ/M9vbwcQqoZ+hLOn5J2T512+KCHEi6f5OJLtwDN88fNuuoxuHsKRPPtTwC6O3S3vZ8np28V8ebmYLeLVd7EP8RU+F8cfl/Hi+0aIqyuns5l25dX1NXqDxfFx+Hi3uVu8WIbL6WIuPgZAfM6/tnqBKF5Gz9qhRAxvo8V9tIxfxCXqbEC5xxIEE14OOPKQvz70cws0Anh97E4DmdKEK2FCrsnmFlv5iz8SCCrAwTA1oEFny0SuK6IrgdF/niR3LtZCHuNQP6TBpw7KBgaPqx52Li6A8OFZ/wadrH/78j+5FLJcDm/P2MUfArW5uP6MXYnD8n/yQMO4zm1oeC+JMx8+yl/6DZto/8P0eTqfyEdev0/SpABlLUb1FHWIvpPQt7cRHY12EfpuEU//FQ8VynbBpl2tnTELhgAkr56oSAib6guQepSk+gKCxAMo1xc4yXcFdaz5nsCZoSck3JS6dTgUVPm8QnhLlwRW8mYsCWs4cRnLZ18ZBuNpq5ZzRxOOZcgkCZ1mTDibTiTuI4FXJMC9kDhNhbE5T07cT8dj+fWLOBIMD4erpiTyD4vpfLmSMREPcSXbelouHhMu5EgzX8yjDB/VoQLtrLFS3m96rxk88W7JkY2pg6gZ+iHIgId5mn4c5LgXgDz3gta0MOe23CNvxF8fJdmEApveC09FuDpm6uQ1ns5FM1t309H+9kfH0Sa0H2fcozhDP5KjH6QG/vH2+Bfs9smi+fh8RR18NZwtRt/zTth1eD+dyZd6F81+RBJDgwnJG7Gc+WHn8p/BvBH5TxwXj3I9nalbb21oKUjRWDneBRBp4icG6atjcTQTpPiRdtdNkCR3+CRZqjMABBnjh9NtPC6e4lGUfE33rzMtiZAl2xLyCAy2PzzdsOj6k2iZa1igGr5olyXdyv4NYOLaWj9o5gvij/UzbBm8gaUGqQOLQONE6kZILcPQnFUFQSkbrWmOSaZhmNGDDfHa9BKCqaaXsH5WyFJfaIvpqNh9WBlcdnHzffqwzVpIm7YNVU4mOmOiAw+mTTQkggb5AMVopVFrVjrAJ5gbg1mAh3K93QWYA+bvhPl9+BLJZMXHRXxfjHCl5EI2S1CERBM9jPkg5wT7gcdo3lro6QDWJQrcpFNP3kM73oNP02SgmTbsXWLVeYpaKvAVmjLFEKIdvXeVcVTDFut+fJQqWic5aSp1yAMvjS9EwAP6D8xrb96S9v717+nL79/ex++ur77cTK9+/Pzl6sNANXxSG62rDch5JjDYV23InHRXasPMGpNnd2JNK8YGANwMaxDAO1pqmTVQjZmcaNO+smEBaYY2kKMdLbVNG2gabTjRph1tEzTm2vZNG2TSNp3HpYWIVwj9QQBz8SfBHuon/vQvgqffPn/++vnv73/H32/+Df/86g9OuevuNDvxc7nrvZU7Y7msUosddQl+nfwy+j/8+2P4183k5oX98ul2YFNgdSDcEZSJX/6jf/hLfvCI+nj1rJ+8ekk+pepfnCcgJxkbAdGe7AtyQyW+FfWqDwFmHhkm3pH1k2W+UHtg5Ldv39FgPhm++/b3z/OA/PZpOL4bIFNJT6YnTATjHuyNyia3nqRWzvTaSvNwAsiP+ENoHBrTsxbAQDgIhZFqoADKbN9NlYBGYdkY5XI8akvV91jaG4L+RjKpKh7PUENGUX0RmpPGwJT8K5Chrg/H0W34tCqg0SQrE1cm0ZajV5+waa4OlArRxKoEmC6O8toqzQsA2S1WezcSGA2XIdM5nsbJIBq+isLHpRGNHUSwdkqVx7dTzK0J2aL/vxIhtzeqBC0SfcciZDUBw07mMKgvc7PRsbZrdSVS3YqlBIW5l7dgxCAqn7anaqGFBRPtTB8eo92xe/j4sJ4iczt9lu64gawGZmYCBQDOWXBdw+kosozFsKA0LtSACzS5Z+2pDQsDeLSouAODzeBeXRe5MT9OPWwiPqP0sMk9Ru1pF6vURKc8vjy/ZJcsb1ZtjcGGEtbaJR04D2gOFdQ1KOgEigMo2AwBvw4UXpzBpEI435++d0rBO+eo1GZx5fjHPQVvEQG9ElD6RMEiGf1KUHBHwVtUQvSv4M1hKXFC39tMHDwsUvMCvA5H32ObGtTXAUqfKBxfrFuza/SPCXVOXXWf3VSv7E5ajTmnrwDgF5eXVqhUG40t7inpbN0g6BsT57RX95hA7hgm6ISJWs2te0zM6tc6gMgJpLEKIwQgca3CyCwsC+tbN9oqclEOr8LI+H42rn3tAqNy8OoXGKWHvbstMDJL1cLY9VWVUU4DF8uLzE9sYbteh4ibKC4yP7HDtUUNi9iR0iJsPT5UUyCHVVlkfjfnRobqht64cvq7z8Ii8ys4NzLUISjuoGA9TaF5sRxYWZH55WzmszqaaS0nxCGMQZjfwLlEa1+Q9IeB71xatedu0T8iFnFm/4reIc2uHKajobBflPU7GM3uH3CdYrOQ9IiBc5M0eu4W/SNiEd32r9mdKCQyy8+5QLQuo0kBWoej6A+4XrFZSHrE4Ngi27rdon9EnAtsu09jqld2JoFGnIt0KxRGVBttdbCAyAzJIdcPNQRJf/VDZkicC6a7h8Sx8iGy/wBic+VDiGwrW7Z70rhXPkTaj7FIczGWk+VDxCLMaqd8iBQ5ftX5mgmU+i8fIhaRklOFF6Ry2WXf5UPEIhB6HSJurXyIWEQ2RyJiR8qHqEXYUqV8qFAgR1A+RB2MJ+rF3Rv0D7d8SPWWVwmKOyhYjMjV9IsLxXIM5UP02EbTNoQ42LEHahHqvQ5IesTg2Ibk6naL/hFpv060AUXvkmY/tuEzevCFoe7NwO8Lkv4wcG++fc/don9E2i8Mbc6Fd7B8yL2Z8TUZzYqGdw5G0bNjqxPdG5IeMTi2yLZut+gfEecC2+7TmOqVnUmgMeci3bqFEYWjrYWgsDQofRdGMOeKT7uHBAO3IHEumO4eEgKdggTaTyjLSaS5+iHIfA8ADCmGhDDM0iZ3k/FPCYl5fhD4JGABAVw9tC4z3oAFNu6OaKFY9t2fFJwVVc1YEhJWmAejiZMTIX/tx7BPaWviNE70zWwKO51HYdz8drCDdQFSdl/J21s0Gm00inZmTIeU0IxmeVw8STFe3C3i6b+L+TKcKSDFB+3LPsRX+LwdKJUJ9iBhPPAZptxHfsYii5DaFx2HINnNxGUGn0k4UsH6y6vr1Jya1JaVPHWTkr0qa7HCqJfSrHj75v2Xzjih9hrNcYKufupy4lZYva/Jw8i/350V7TzaFFew7yEAscKS4wPmiikb2acGGYcRvzVqEDri0fDWXQ1CoYd8fwMrzWxZf1C0MG5HSlfWVAo2RQD6z9NCnRg8rvYnPhcXQPTwvD258mwUGtuDEpcxDvVDdLL+rZFQoM4uPrz/pHGQJpb9ev046ksZaq4YnHZ9Z9OJ5M1IwB+Jp7mQ+E5H4ew8OXE/HY/l1y/iSLxL4oZJQiW78op2ycUZuZJtPS0Xj8l+zDnCJgV2hpq7DDPXXoPBl9F2eyatEncAkUcAID5AwA8gYwFLO9oISBfHpwEDPuBUzU3UqWvyvNsrljRlN9d8ufgojkvaTO+jR/HnWEJl1luHQI6N2sqQY3ofTjbxmni3kn22K6SGgNRRKegxNGWHusTauKfNCeu6WAvbizzOHQObFcczJ7BrdGxESX9Im5fT5ja5+Pn4PI4XP8Wn4Wwx+p4GVUr0OryfzuRLvYtmPyIJacZybiHSs1s535Kdy38lsYt4lOvpbOvFKpfXnPBKEIrGk6gUH72i2yB9dSyOZoIjP6JU4yZIkjt8kqRNwZ9OZiKaaUN40fEoSr62RdbQEvfLW1qG8SRa5lpacWTz4la0MS9ZbjVacKJNI7TBPmuINlgNHbZPm9+D919uRqOPt58/3PzzNboZozeXA452s6b9mZ4w3QuxEoI+ICo9ML6d+anS/imdjNuK+ixUcqVgv3wK0ipdGNFVunAcPt5tWs0OKORziSwYAnMXKoff3j3CXoD0pG5mgX8RMOmnTcE8N41F8AbsqTlgL3aTtyH0+XIpBCUlW5jMOWTfaXfE3BA71Mz37XYPefhpS96UEf2SIEll8b42n8Ary/Wm8nFG27lJ/mbQXf9k4GStwpnWy9AwUxwb0MRSCbQFqE391tbNSfpJ/14OyHk5j8K0LzPPKQCMX/6TXLD68Jf84BH18epZP3n1UugxlTJg7Z5YdBtHHK4Boh6jW6uiMN8kkIFH+fa0WuCkqjdGNyU/m8URkAe09SY4a8s5Mw9i1o4E04zbzXDrAaoN89N+fEUeOkIviGkG9n3d+QIntajdlvljtcHyQejKirxay/UA9FtdwvGMgabI42yrCDFpi29G8St67c23QnW1yvftVld8FJlrLIac+AR0Z2D75pVSbIh5PtLsVzpHxeTaP9pqSsF+JJR3oYzkzK+6C/ZYyV3a5qRNrflenGzBhIJswH+gtAPYI2wLuM/2JFZBO0rbkQx9u02dGUPL9ej740M43w69V6wOgOnqgKJR/5todisjV3Ip/69HgMrSGQV1AZvD6WduKBAOrCNhYzqrKBIuqVlpKPIVoYCfSnOlmQey1DvLxsXMMH+DNVDtaHw/m40Ci+bptSXaymlgxjzs53uzms6HPeBrgKCcxOVyQAahixBO2YDm5W7h8BRkz8txrC3MAfQ9CmHg0wAgSnxO00ZZeDyMoYBCWa2HAUf5RB1mHpQ9EPlEcB1Ss2wZlIkpyDAHsIn1Ac1yMSV9dFVroVaZVKtrVw4Mw9H3yUqnDZLaK3lJPBn+D5JLWwvRSo2K5KLK6m8C/teklVO6ff86LqGzfz1SXb3JWtr2gcq6egA8H3OKoAi4fQYJ8dNjEgPqEcTEw3FEYcAxz2troTtERwgA9X1GsM9MZMeBFwSAi/Mcchrw1shuvXezw8pcTqUABt8sgcSnLmpzizUgKmnzxjZ9Rh7mHBAMAoZ9AlUXOUhdbqyrPjJd/vtJl6d7VGO6XJxmGkkhxE7r8oqjVb1k8FPjUOoJVge1xP6+ebP08ED0PF1uGhF/a22IT9sm5AfVQp18iD7OVepY6gnjUq3lSIaFBR7UEiOZvB7xPaZb1z2TyxDx1OhXwKvcpSD7IogWvmiXJaUQhW/KYdkzQJJ+BhDUeaPy1sQf64dvNItk3M6ojiVywpQWV1gU2D27WSsHbg73qLaSdirwMQRy8FnOZU4TVniGjAQUBYgRCDdzMx21hlabFPU+nr2foTNXc5iMn/rbxvhVJJYj1knqbEwx97HoOz4I0qPrnHoiEAFC9/CAQwYy4Zz10Gc6ooSg27FOq72dTlw+eC7LwU1rMtMaQ6i9splbr9WdA7GxlDUMMrXw5jJwU/Voa3XfFoPHh1n3XXnNYVn3nXKNM/MWDIW+3dZ5H4ZzsU+xHHZLIfrQ04Mkki2Z3Lt2DpQ31HKxHDT19eyg+7uXhyj+MH2OSlZ9eI0TBaqrE5nS1nIHWW1iqU5amzdgtcOtS2VDB6ZM6moLSj2s53F2FPC2rDusNpw57sJHR3ilFz5q4488baQYMm4juVfhI9kWPqLsTUy5xuYpaV7tRKnQxhKL0tj9UW2VmTrzp3LzpHQdivJ9YBiO+Bib+oB4KZ+M27RmCHtcV0eZCliI8+bMNG+qxQV+ITSpqBMd2nFuiAiWtDL8zC62TtDBIuAfPcU/NmjVCZ328m2UAVT5KO1zOozKW8fDtFlYxFW0eGiKl85EszVZPim9CdvHZFUd0sO47BkoNj1D4fug0rb8krZqj+cV9CyLnNGpZ3XrDVLocS1nATP6uGBUuLI7yEj5bYIuepfsQKVPUal/QeHFlrZmdHLb7WE2VddOxeZ7lbA41oXkVBxtxoO/70BHQTvbpUDTgRmy6iNNxfHGYowGnVgHShCqbzZIPUNNzHagyjeG0h3MhDEv1GMxLNskgg2t6t0aesIkYaKp73Sh2Gpwsxw9CDzTltq4rcSrcQXTDrpgs0twt98tA+5RrV/ibGGQe8A2PZZaDmyTq2XfJE8IW0NT+lFlWtZBOKsNkFSJbCxz3rtVb3c1K245axx6iBeXrDRUcIW6XQyHtldwZUc4C41ymAVXCXl3FstD5hTLRdiecUWBR7U52yrjvUfZu5Xy7WogB1kQ334T1moGq3IRFwUe1hYcCdJuie97WLdyhjx499P+bBJ2BbVvOwB7rfP+ChKjTS+i6l6B/4ZKr2C+WwHI1nNo91ZWhULeo3v5nBDkUyJEzDlMdy+IPUAZ51KscusP1fv0alShrYDofwgEnIJAvWtqEE9cEsBARN7Eh6KztTeiZ7HYW5Pda0wiPvZNvYmjIbaPnG216oZZVUpWAQ5kz/YDDgFGaXxXm7sEhAi9KvoPo5vzuvviCWgBJpxgTIB6hFRBKxE9i8qTBIkerAxeC/i2mJssVZ+dZUbaxt1RXC0C6UZxVRXmxUXkTSRGOurErvZhs+Wy8ICc8eRXK3iwA3PlLWxglRU8NoC9Vk++NA9wzI48LQD+CB15swBqLEJUV8avxY83S8UigDoMN76cV0fvxZvXg2lxeNoNJ75l1N1E1WL5zQN04bvpwK7234Lc7gHMMj3GBZ0MY1TlWeadKzo5NpolV3TS6uIyE5ZIhtD7jmVl22lskaZ0l/YrPVT68naWWSKmyDCZ3XsfLu+GwzN28Xm1nvabj2o+yzDWFigK76WSng8f5S/xrVhSIFz1MmnVc9/Itmu4wnyno5tUXLGP21sWQjymJyEyhkX1zF1buTaxJIH5jfYfWWxuVQ2mSoDUqhoqZty5qgZsQC5GJW2z2shhLqtRefxPOEeGTdXaWzrDjIfN0gcf3n86LXxQH3AE0zorO1U0MCgt1uVO47zuplGurnzgu+HvbcKeDIDWKx8Io5edhJNpquWKpMBiGOPIVztwhEsbh5qmtlhi6YJPHzS22gEA2x06OM/eBWjVd9m3aLtKDpgya6f57b3YMJi3YV3PbwcWY1RHPwvXLS2FAg/reyalOYNpM/PbcelNSCfz22HZM/jGBa0L3weUttXD7FtgMTJ46lnd2n8SlCZCQFPz23H5bTqa3w7Ln6Li/HZQ3lr3PSxob2ZVzwtZOtZrhG7Vdi0F/o6oau+FLDsOzzZrB59c4fZdYUigx7Wd6WFmHq4DrrDNwrhViuIKhVR9slAulcGz6z/kpNfWagLmXFiLU1Y6L3TbwY7dlO8NhY5X5WhtTYfaCKjhVi/Q7FbWcG11STfTw82YtVjV7kSdU1NYYlkOna+jdgrLFksS+1u5oSkACe0VMXO1g8lunaodnKp2qOxqkvLoW1WmtVDtID7GCxkTbMMU0afvPizGkbzivw==</diagram></mxfile>
|
2203.03691/main_diagram/main_diagram.pdf
ADDED
|
Binary file (80.9 kB). View file
|
|
|
2203.03691/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Attention-based architectures, such as the Transformer [@vaswani2017attention], have accelerated the progress in many natural language understanding tasks. Part of their success is a result of a parallelizable training scheme over the input length. This improves training times and allows for larger volumes of data which makes these models amenable to pretraining [@radford2018improving; @devlin2018bert]. Therefore, many current state-of-the-art models are fine-tuned extensions of large pretrained Transformers [@bommasani2021opportunities].
|
| 4 |
+
|
| 5 |
+
However, these models come at a significant computational cost. They require considerable resources for pretraining and fine-tuning, which induces high energy consumption [@strubell2019energy] and limits access to research [@bommasani2021opportunities]. Subsequently, @schwartz2020green argue the need for **[Green AI]{style="color: greencolor"}**. They propose a cost evaluation of a result $R$ as following: $$\begin{equation*}
|
| 6 |
+
Cost (R) \propto E \cdot D \cdot H, \label{eq:cost}
|
| 7 |
+
\end{equation*}$$ where $E$ is the computational cost measured in floating point operations (FPO) of a single example, $D$ is the dataset size, and $H$ is the number of hyperparameter configurations required during tuning.
|
| 8 |
+
|
| 9 |
+
<figure id="fig:model-architecture" data-latex-placement="!h">
|
| 10 |
+
<embed src="images/HypermixerV5.pdf" />
|
| 11 |
+
<figcaption>The figure outlines a general model layer consisting of a token mixing component and a feature mixing component (MLP). For token mixing, MLPMixer uses an MLP with a <em>fixed</em> size, maximum input length <span class="math inline"><em>N</em></span> and <em>position-specific</em> weights. In contrast, HyperMixer generates an appropriately sized MLP based on the <em>variable</em> size of the input in a <em>position-invariant</em> way, similar to the attention mechanism. When using attention as token mixing the whole layer is equivalent to a Transformer encoder layer.</figcaption>
|
| 12 |
+
</figure>
|
| 13 |
+
|
| 14 |
+
To achieve the cost reduction for **[Green AI]{style="color: greencolor"}** this paper proposes a simpler alternative to Transformers. We take inspiration from the computer vision community, which has recently seen a surge of research on Multi-Layer Perceptrons (MLPs). Most prominently, MLPMixer [@tolstikhin2021mlp], which is a simple architecture based on two MLPs: one for token mixing and one for feature mixing. However, the token mixing MLP learns a *fixed-size* set of *position-specific* mappings, arguably making MLPMixer's architecture too detached from the inductive biases needed for natural language understanding, in contrast to Transformers [@henderson2020unstoppable].
|
| 15 |
+
|
| 16 |
+
In this paper, we propose a simple variant, [*HyperMixer*](https://www.youtube.com/watch?v=7Twnmhe948A&ab_channel=Kontor.TV) (Figure [1](#fig:model-architecture){reference-type="ref" reference="fig:model-architecture"}), which creates a token mixing MLP dynamically using hypernetworks [@ha2016hypernetworks]. This variant is more appropriate, as it learns to generate a *variable-size* set of mappings in a *position-invariant* way, similar to the attention mechanism in Transformers [@vaswani2017attention]. In contrast to Transformer's quadratic complexity, Hypermixer's complexity is linear in the input length. This makes it a competitive alternative for training on longer inputs.
|
| 17 |
+
|
| 18 |
+
::: {#tab:my_label}
|
| 19 |
+
Transformer MLPMixer HyperMixer
|
| 20 |
+
----------------- -------------------- ------------------ ------------------
|
| 21 |
+
Complexity $\mathcal{O}(N^2)$ $\mathcal{O}(N)$ $\mathcal{O}(N)$
|
| 22 |
+
Pos. invariance
|
| 23 |
+
Variable-length
|
| 24 |
+
|
| 25 |
+
: Properties of models under consideration. $N$ denotes the length of the input sequence.
|
| 26 |
+
:::
|
| 27 |
+
|
| 28 |
+
Empirically, we demonstrate that HyperMixer works substantially better on natural language understanding tasks than the original MLPMixer and related alternatives. In comparison to Transformers, HyperMixer achieves competitive or improved results at a substantially lower cost $Cost (R) \propto E \cdot D \cdot H$: improved inference speeds (E), especially for long inputs; favorable performance in the low-resource regime (D); and efficient tuning for hyperparameters (H). We attribute HyperMixer's success to its ability to approximate an attention-like function. Further experiments on a synthetic task demonstrate that HyperMixer learns to attend to tokens in similar pattern to the attention mechanism.
|
| 29 |
+
|
| 30 |
+
In summary, our contributions can be enumerated as follows:
|
| 31 |
+
|
| 32 |
+
1. A novel all-MLP model, HyperMixer, with inductive biases inspired by Transformers. (Section: [2](#sec:method){reference-type="ref" reference="sec:method"})
|
| 33 |
+
|
| 34 |
+
2. A performance analysis of HyperMixer against competitive alternatives on the GLUE benchmark. (Section: [4.3](#sec:results:peakperformance){reference-type="ref" reference="sec:results:peakperformance"})
|
| 35 |
+
|
| 36 |
+
3. A comprehensive comparison of the **[Green AI]{style="color: greencolor"}** cost of HyperMixer and Transformers. (Sections: [4.4](#sec:results:time-per-example){reference-type="ref" reference="sec:results:time-per-example"}, [4.5](#sec:results:low-resource-performance){reference-type="ref" reference="sec:results:low-resource-performance"}, [4.6](#sec:results:ease-of-tuning){reference-type="ref" reference="sec:results:ease-of-tuning"})
|
| 37 |
+
|
| 38 |
+
4. An ablation demonstrating that HyperMixer learns attention patterns similar to Transformers. (Section: [4.7](#sec:results:locmix-attention){reference-type="ref" reference="sec:results:locmix-attention"})
|
| 39 |
+
|
| 40 |
+
# Method
|
| 41 |
+
|
| 42 |
+
In machine learning, the inductive biases of a model reflect implicit modeling assumptions which are key to facilitate learning and improve generalization on specific tasks. In NLP, well-known models with strong inductive biases include: recurrent neural networks [@elman1990finding], which assume the input to be a sequence; and recursive neural networks [@socher2013recursive], which assume a tree-structure. While both these inductive biases are reasonable, empirically, Transformers have been more successful in recent years. Furthermore, we reiterate the arguments of @henderson2020unstoppable for inductive biases in language and apply them to our model design.
|
| 43 |
+
|
| 44 |
+
@henderson2020unstoppable attributes the Transformer's success to two concepts: *variable binding* and *systematicity*. Variable binding refers to the model's ability to represent multiple entities at once. This is arguably challenging in single-vector representations such as recurrent neural networks. However, Transformers represent each token with its own vector which accounts for variable binding as each token can be interpreted as an entity. Systematicity refers to the models ability to learn generalizable rules that reflect the structural relationship between entities [@fodor1988connectionism]. Transformers achieve systematicity through the attention mechanism which is a learnable set of functions that determines the interaction between entities. The mechanism *modulates*, for every position in the sequence, how to functionally process any other position. Moreover, these function parameters are learnable and shared across all entities.
|
| 45 |
+
|
| 46 |
+
A general layer of MLPMixer is shown in Figure [1](#fig:model-architecture){reference-type="ref" reference="fig:model-architecture"}. Similarly to Transformers, each token is represented as a vector of features, which undergo (non-linear) transformations in multiple layers. MLPMixer employs two MLPs at each layer, one for *feature mixing* and one for *token mixing*. The feature mixing component is applied to each token vector independently, which models the interactions between features. The token mixing component is applied to each feature independently (i.e. its vector of values across tokens), which models the interactions between spatial locations or positions. This could be interpreted as a global attention mechanism which is static and position-modulated. Practically, this is achieved by transposing the dimension representing the features and the dimension representing the positions. Each vector $\textbf{x}^T_{i} \in \mathbb{R}^{N}$, representing feature $i \leq d$, of some input of fixed length $N$, is input into MLP1, which has the following form: $$\begin{equation}
|
| 47 |
+
\operatorname{MLP1}(\textbf{x}^T_{i}) = \textbf{W}_1 (\sigma (\textbf{W}_2^{T} \textbf{x}^T_{i})),\label{eq:mlp1}
|
| 48 |
+
\end{equation}$$ where $\textbf{W}_1, \textbf{W}_2 \in \mathbb{R}^{N \times d'}$, and $\sigma$ represents the $\operatorname{GELU}$ non-linearity [@hendrycks2016gaussian]. Finally, to facilitate learning, layer normalization [@ba2016layer] and skip connections [@he2016deep] are added around each MLP, respectively.
|
| 49 |
+
|
| 50 |
+
The token mixing MLP assumes an input of fixed dimension, which is necessary as the parameters need to be shared across all examples. However, unlike images, textual input is generally of a variable dimension. Therefore, to apply MLPMixer to texts of variable length, a simplistic approach is to assume a maximum length (e.g. the maximum in the dataset). Thereafter, all inputs are padded to the maximum length and masks are applied in the token mixing MLP. This model is able to do variable binding, since each token is represented by its own vector. However, this model lacks systematicity because the rules learned to model interactions between tokens (i.e. the MLP's weights) are not shared across positions.
|
| 51 |
+
|
| 52 |
+
HyperMixer includes systematicity into the MLPMixer architecture through the use of hypernetworks. They are used to generate the weights $\textbf{W}_1, \textbf{W}_2$ of MLP1 (Equation [\[eq:mlp1\]](#eq:mlp1){reference-type="ref" reference="eq:mlp1"}) dynamically as a function of the input. Let $\textbf{x}_{j} \in \mathbb{R}^{d}$, $j \leq N$, where $N$ is the (variable) dimension of the input, represent token $j$. We use the following parameterized functions:
|
| 53 |
+
|
| 54 |
+
$$\begin{equation*}
|
| 55 |
+
\begin{aligned}
|
| 56 |
+
h_1, h_2 &: \mathbb{R}^{N \times d} \rightarrow \mathbb{R}^{N \times d'},
|
| 57 |
+
\end{aligned}
|
| 58 |
+
\end{equation*}$$ to generate $\textbf{W}_1$ and $\textbf{W}_2$, respectively.
|
| 59 |
+
|
| 60 |
+
Theoretically, $h_1$ and $h_2$ could be any function, including sophisticated networks that consider non-linear interactions between tokens, such as the attention mechanism. However, this would defeat the purpose of our model, which is simplicity. Therefore, we choose to generate the rows of the weight matrices from each token independently via another MLP. Concretely, a hypernetwork function can be defined as
|
| 61 |
+
|
| 62 |
+
$$\begin{equation*}
|
| 63 |
+
h_i(\textbf{x}) = \left(
|
| 64 |
+
\begin{array}{c}
|
| 65 |
+
\operatorname{MLP^{\textbf{W}_i}}(\textbf{x}_{1} + \textbf{p}_{1}) \\
|
| 66 |
+
\vdots \\
|
| 67 |
+
\operatorname{MLP^{\textbf{W}_i}}(\textbf{x}_{N} + \textbf{p}_{N})
|
| 68 |
+
\end{array}
|
| 69 |
+
\right) \in \mathbb{R}^{N \times d'},
|
| 70 |
+
\end{equation*}$$ where $\operatorname{MLP^{\textbf{W}_1}}, \operatorname{MLP^{\textbf{W}_2}} : \mathbb{R}^{d} \rightarrow \mathbb{R}^{d'}$ are themselves multi-layer perceptrons with GELU non-linearity. $\textbf{p}_j \in \mathbb{R}^{d}$ is a vector that can encode additional information such as the position. In practice, we use the same absolute position embeddings as in Transformers [@vaswani2017attention].
|
| 71 |
+
|
| 72 |
+
Intuitively, for each token $\textbf{x}_{j}$, $h_1$ decides which information to send to the hidden layer of $\operatorname{MLP1}$, where the information from all tokens are mixed, and $h_2$ decides for each token how to extract information from the hidden layer. Note that, even though $h_1$ and $h_2$ only consider one token at once, non-linear interactions between tokens are still modeled through the hidden layer of $\operatorname{MLP1}$.
|
| 73 |
+
|
| 74 |
+
In order to reduce the number of parameters and operations in the model, and thereby the complexity, we found it useful to tie $h_1$ and $h_2$ as by setting $\textbf{W}_2 = \textbf{W}_1$.
|
| 75 |
+
|
| 76 |
+
In comparison to the MLPMixer defined in Section [2.2](#sec:methods:mlpmixer){reference-type="ref" reference="sec:methods:mlpmixer"}, the use of hypernetworks overcomes two challenges. Firstly, the input no longer has to be of fixed dimensionality. The hypernetwork generates a token mixing MLP of appropriate dimension as a function of the input. Secondly, the hypernetwork models the interaction between tokens with shared weights across all positions in the input. Hence, systematicity is ensured.
|
2203.03989/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-15T22:03:12.973Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" version="16.2.7" etag="Xf6xrU1zwbw3T0a8Ptcm" type="google"><diagram id="fTUZh4PqgZYoXqPYVtMZ">7VxLd5s4FP41PmdmEY5eCLFM02a6aE97Thczs8Qg20yw8WA8SebXj2QQIAnHhIBNOs6ihYsQ5n7ffehKYobv1k+/ZcF29TWNeDJDIHqa4Y8zhDBB4l8peC4EkHhuIVlmcVTKasGP+F9eCkEp3ccR32kN8zRN8nirC8N0s+FhrsmCLEsf9WaLNNGfug2W5RNBLfgRBgm3mv0eR/mqkDLk1fLPPF6u1JMh9Ysr60A1LjverYIofWyI8KcZvsvSNC+O1k93PJHKU3op7rs/crX6YRnf5F1uKJH4J0j25buVvyt/Vi8rfuJWHob7ufjvw+MqzvmPbRBK2aPAV8hW+ToRZ1AcztP9JuLRl3klCMKHZSal3/Z5Em94KY+C7OGb6CbOJQuAA1xdiA5S2XKRbvKSA7443eVZ+sDv0iTNDj8QA84AY+JKWnXHgDjN0jzI43Qj75PnizhJGrfNAcecyg6Lvok4LLXBs5w/HdUorHASBOfpmufZs2hS3uDj4o6S2zeQFOePNVGUklcNjhBW0rOk5rLquEZPHJQAtoOJT4OZFfCUGJzA8pTi7+99H2NxJcjCsh2z9Xx//wFTqsGDwDC6pkDTNQa2qkGLriHy365scrWcoS2HOIBogEKKHeSezX7c7pAmabr9Eq/jvIMV6cqLAs4Woa48CxgaMj5f2MAMoGXmGSpGln5Jm82Qt+uXntYv30S3MkBLq0mC3S4OdWXW/gscSL9bVc5MnnwP8pxnm4MEAcMEIKhUraK2bMGf4vyPg8m45dmfZffy+ONT8+S5PCl+N49UmtAdhYaaXWCrWckynggL/Efvvk335RO+p7F4cAUyIrpn9Kjewy7dZyEvb2qmByf6UWRR/eRBtuS51c+BCNVLd+KGd+XGubiBgcOQX//pCYvHnMY13/N6MgfDMzGHvSbrAaf9dZWSy7ZLSbTyePfA83BVnrRzp+nn6Zy61Pb/i8UChaHm2ulArh36VNP5DbRcO3RbXLv7dtfuTxMEhuZF7mkGYZeziIwDAkGOewoGFzsjAaGGyFND4hLm4OkuqMKlAQT2RoIBThOGSxgE9g2DaAUCjmYRHWoc/xuL0MdXtAWGkUB4VW3iJ7cHegoENhIIHWoWfQtECL8w1NXrQ8dHu8ssiGKhRnVtk8qix3F4NRIYSMnfUldJIBhq+KzHFM9GD1Jiw+ehAeDrUJ+4wvcyfB7T4fPPCF+H8scVvhPw6YkEs2tX48FnVyjEe4jBK01kEXC3n4vDpTyMlUx02RBbYB9u14Oahk2p/yaspShI4qUsZIRCbVzIP0ilxmGQ3JYX1nEUJcdqznqcNUOkPG+WiAG58/2hhkZ65PMs9FBL2UONaN4Enl0kSIK5nJW8otcZPQYuhZ5dXdgF623Cf/n1CgskvgaLiy1cSAsu3tthQXapYR6I8HBFxUaF+S1j3rFwsWsPfBOm0dVcDsDoGSCE+IzI2KWIr85nHkRVHNoGGw0g+vdeLgU5aOSmmDG8FQ2Yg+Xf9qluUQcqK6Slx0JaLT08txA7izR7DLLoyhaZTerDPUjsJQa0pYQ4wGR0y2qOJN3trqg0FnUo5+qd0YQ71FAuOlFJDlpWU5U3wEE+nb00XylPvvMsFqqQHHgfc5gIOT4F9Z9e12SuQ8TYXv0xwxw7z2EinWfjzWF2WXpyAVZNnQbVukoFUM9lDmY/7OU58AFxf09LYuDLbmQj3lq5IE8JSh+EQSWp7z6cabe/NzcEqZ5M+lUgej0DPYexhkPTC10yS8UurK5COBojp74QpySX4qQIb11pCRExaUlJH1q+ImIW+M+0cdlk6QuR25+/YkgCjO4wcwCrw7BHRiNthzVAFyWtD2daTgZdtytrCcNN1kIH93KlfTmLJsZZkdfrq9d0zjG/H39lQonRUf8rHDtgjYeOlwjapc5gucz4MsinW77pNxdvzhiNOV7U0SS+49kDRtpWvYamQ+y1L6NtTVZRe5FvPZMbldSL1+UcqYJQqMCq8Hyb/8VDaUK7RgWn6OhdzVU0dhnc+ANNEBprKyAELaWBkSo2au3M1KJQHUvcZiSpA0d7GDkSHSYTCcxBmyqYvXXw547m3DGyCCI8e7yTllVa8jxTRrziwbUEKw1TX54OwRmLfbjDqrWfYEeQMdakLfptWxaIB1DwK3bRvWMFGxRWw64TBFYLV9+k4LbCopZ6nJxr8ttmmYxpq9fMNb3DTGVM96YHH+i1JSy4hR1DJCwTrT42Br+HBdJayQb2KtkcSWaK2K6bynQSHGD4jd4ZjtmRO1olBreVD3V/o77moFwKbPEuxRcjaLCWtrmZ77ZXF9J9Ry8jjt+yqByP40PaSm9XwMcFnBobapjjwUbtCtj5xVjod9j3d8HCK3QgQLPGxl2H+CcKrz3LpJMLHcRxSYMRSOMLgY7brKH23OMr4oo2T+obX28AxtrP4cIMaauoXZ53xz+wcewLGvUUgdd9qrULIf2JMdL8VgulVZB6Ne2QUeSTAc+1ZgROsE5wI3huNNvKBrsXXsAFxlNdg8RFj70pPdHqoXKlmhvtPHsFIZtps1eAVIIB8vcm5VUtoZnTT88xQyMVx31zemM1flXFHJj2ZnVUbSEdjPaohfan50YWi8Onp6xE8jYKtrlo/q5TxzEmRtQdxkqp8WdFyLm/ShZ5/hyAmRVzFwtOjX3VQ32HDBofO8H2Hgy1+EH7ZtUAe2NIWwW1i/3MCVzY9vMjFCFjL3C5GtDL3/VT+zvOYEDTXBvaWNziYT05YGSoMdZ04jYQqQuoF+gZ7hT6zKFuvSAZ9lxxLB6j9WtNsvQeQonT+quiRfP626z4038=</diagram></mxfile>
|
2203.03989/main_diagram/main_diagram.pdf
ADDED
|
Binary file (30.4 kB). View file
|
|
|
2203.03989/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recent development in Natural Language Processing (NLP) heavily benefits from a high level of maturity of open-source frameworks, such as Fairseq [\(Ott et al.,](#page-8-0) [2019\)](#page-8-0) or HuggingFace Transformers [\(Wolf et al.,](#page-8-1) [2020\)](#page-8-1). Thanks to the standardized interfaces, these libraries allow for immediate experimentation with the most recent research results, practically fostering the speed of further progress in the area. While their use is seamless for countless conventional use-cases of transformer models and fine-tuning to a specific end-task [\(Devlin et al.,](#page-7-0) [2019;](#page-7-0) [Radford](#page-8-2) [and Narasimhan,](#page-8-2) [2018\)](#page-8-2), divergence from this framework requires feasible, but elaborate and complex customizations, increasing the risk of logical errors and complicating the reproducibility of experiments. A characteristic group of problems
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Figure 1: Overview of AdaptOr's objective-centric training framework: Objective 1) registers its compatible head on top of the shared model, 2) performs specific input encoding, and 3) compute loss value based on its output. A Schedule implements a specific sampling curricula and AdaptOr aggregates and propagates objectives' losses and performs optimization.
|
| 8 |
+
|
| 9 |
+
requiring significant changes to the standard pipeline are multi-step and multi-task adaptations.
|
| 10 |
+
|
| 11 |
+
This paper introduces the AdaptOr library, which aims to simplify the more complex training processes that their training objectives can easier describe. AdaptOr challenges the conventional *modelcentric* framework, where data and task selection are constrained by the requirements of the selected language model architecture. Instead, it introduces an *objective-centric* training pipeline, with Objective as the central abstraction of the process.
|
| 12 |
+
|
| 13 |
+
The AdaptOr framework aims to help NLP researchers and practicioners engage in projects that include any of the following:
|
| 14 |
+
|
| 15 |
+
• Multi-objective training: when training a language model on more than one task or data set, including languages, AdaptOr can signif-
|
| 16 |
+
|
| 17 |
+
<sup>1</sup><github.com/gaussalgo/adaptor>
|
| 18 |
+
|
| 19 |
+
icantly simplify the custom code base that needs to be implemented. Even if the objective is custom, the user can avoid adjustments to other parts of the training pipeline.
|
| 20 |
+
|
| 21 |
+
- Custom data schedule: when users need to perform dynamic data sampling, AdaptOr allows them to implement a custom Schedule (see Figure [2\)](#page-3-0), leaving the data and model adjustment logic intact. This simplifies systematic experimentation and reproducibility, and minimizes the risk of errors.
|
| 22 |
+
- Objectives design & evaluation: AdaptOr exposes top-level declaration of training objectives, which enables easy experimentation with custom objectives. Objective-level monitoring can provide custom behavioural insights and allows for pruning less promising experiments earlier in the lengthy training process, saving computational costs.
|
| 23 |
+
- Robustness evaluation: The objectivecentric paradigm provides an easy robustness estimation by evaluating on out-ofdistribution samples. In the standard *sequential* adaptation scenario, objective-centric evaluation exposes characteristic flaws of adaptation, like exposure bias or catastrophic forgetting.
|
| 24 |
+
|
| 25 |
+
This paper is structured as follows: Section [2](#page-1-0) provides an overview of recent work demonstrating the potential of multi-objective training in domain and task adaptation. Section [2.4](#page-2-0) also describes other software frameworks applicable for similar use cases. Section [3](#page-3-1) describes the design of AdaptOr, showing the users how to confidently integrate novel objectives and schedules. In Section [4,](#page-4-0) we describe and implement a set of non-trivial, yet promising domain adaptation experiments using AdaptOr and collect their results. As AdaptOr remains under active development, we close in Section [5](#page-6-0) with an outline of the upcoming features. We welcome contributions of novel objectives and schedules.
|
| 26 |
+
|
| 27 |
+
This section provides an overview of recent work that demonstrates the potential of multi-objective training and schedules that motivated the design of AdaptOr. Our overview consists of a nonexhaustive list of applications that AdaptOr aims
|
| 28 |
+
|
| 29 |
+
to make more accessible for practical use and in future research.
|
| 30 |
+
|
| 31 |
+
Multi-task training has a long history in both traditional machine learning [\(Caruana,](#page-7-1) [1997\)](#page-7-1) and in deep learning [\(Crawshaw,](#page-7-2) [2020\)](#page-7-2). This section describes examples of multi-task (i.e. multi-objective) training, outlining its benefits and potential.
|
| 32 |
+
|
| 33 |
+
Under some circumstances, multi-task training enhances distributional robustness of neural models. [Tu et al.](#page-8-3) [\(2020\)](#page-8-3) demonstrate this on adversarial data sets, exposing common heuristic biases of the language models [\(McCoy et al.,](#page-8-4) [2019\)](#page-8-4). Enhanced model generalization can also be achieved by introducing one or more latent tasks that do not directly correspond to the end task but reflect specific desired properties of the model. One of a few studies in this direction is Sharpness-Aware Minimisation of [Foret et al.](#page-7-3) [\(2021\)](#page-7-3), performing multi-objective training on image classification using cross-entropy and a novel, sharpness-aware objective, reflecting the model's monotonicity on the local neighborhood. In context of Neural Machine Translation (NMT), [Wang and Sennrich](#page-8-5) [\(2020\)](#page-8-5) incorporate Minimum Risk Training (MRT) objective [\(Ranzato](#page-8-6) [et al.,](#page-8-6) [2016\)](#page-8-6), optimising an arbitrary sequence-level measure of outputs. In composition with the traditional token-level cross-entropy objective, MRT improves distributional robustness.
|
| 34 |
+
|
| 35 |
+
By aggregating multiple objectives, [Xie et al.](#page-8-7) [\(2019\)](#page-8-7) show that combining sentence classification objective with maximizing representation consistency to augmented samples fosters data efficiency.
|
| 36 |
+
|
| 37 |
+
The intuition on the benefits of multi-task training presumes that by optimizing the training by multiple cost functions, the final model is less prone to the weaknesses of a specific task [\(Col](#page-7-4)[lobert et al.,](#page-7-4) [2011\)](#page-7-4), possibly reflecting on higherlevel, task-invariant properties of language [\(Bengio](#page-7-5) [et al.,](#page-7-5) [2013\)](#page-7-5).
|
| 38 |
+
|
| 39 |
+
Exposing a model to training samples in a systematic schedule, also referred to as a *curriculum*, can lead to an improvement of the accuracy of the final model [\(Bengio et al.,](#page-7-6) [2009\)](#page-7-6). While the positive effects of more complex schedules based on sample "difficulty" with transformers remain to be explored, multiple studies show the potential of confidence-based sampling to improve accuracy
|
| 40 |
+
|
| 41 |
+
and generalization. Biased samples can be identified, according to model's confidence [\(Pleiss et al.,](#page-8-8) [2020;](#page-8-8) [Swayamdipta et al.,](#page-8-9) [2020\)](#page-8-9) or using Bayesian methods such as the Product of Experts [\(Hinton,](#page-7-7) [2002\)](#page-7-7). Then, they can be either eliminated [\(Bras](#page-7-8) [et al.,](#page-7-8) [2020\)](#page-7-8) or downweighted [\(Utama et al.,](#page-8-10) [2020\)](#page-8-10).
|
| 42 |
+
|
| 43 |
+
More complex scheduling methods are applied in training NMT models. [Bengio et al.](#page-6-1) [\(2015\)](#page-6-1) use decay schedule to sample from both references and the previous outputs of a NMT model, minimizing the discrepancy between training and inference. [Zhang et al.](#page-8-11) [\(2019\)](#page-8-11) successfully use the same sampling strategy in a sequence-level objective. The results of [Lu et al.](#page-7-9) [\(2020\)](#page-7-9) underline the potential of sampling in NMT training, suggesting that the accuracy of transformers on reported MT benchmarks can be outperformed by simpler RNN models by combining objectives in decay schedule.
|
| 44 |
+
|
| 45 |
+
Despite the reported improvements, we find that custom scheduling strategies are rarely used. We attribute this to their complicated integration into the standard training process. To foster the research and applicability of scheduling methods, AdaptOr makes the implementation of custom scheduling strategies easy, comprehensible, and reproducible.
|
| 46 |
+
|
| 47 |
+
Objective-centric frameworks are well-suited for domain adaptation techniques, where AdaptOr provides support for combining traditional end-task objectives with unsupervised adaptation or auxiliarytask objectives in a user-selected schedule. The goal of domain adaptation is to maximize performance on a specific data domain, often denoted as the *adapted* or *target domain* [\(Saunders,](#page-8-12) [2021\)](#page-8-12).
|
| 48 |
+
|
| 49 |
+
Perhaps the most common adaptation approach using pre-trained language models is to continue pre-training on unsupervised samples of the adapted domain [\(Luong and Manning,](#page-7-10) [2015;](#page-7-10) [Lee](#page-7-11) [et al.,](#page-7-11) [2019;](#page-7-11) [Beltagy et al.,](#page-6-2) [2019\)](#page-6-2). This approach has been successfully extended in various directions. For instance, [Gururangan et al.](#page-7-12) [\(2020\)](#page-7-12) show that adapting to a shared task on different domain can enhance accuracy of the eventual application. If supervised data is sparse, other auxiliary tasks, described earlier in Section [2.1,](#page-1-1) can be used as concurrent objectives [\(Xie et al.,](#page-8-7) [2019\)](#page-8-7).
|
| 50 |
+
|
| 51 |
+
In cases where larger volumes of data of given task is available in a different language, adaptation using cross-lingual transfer can be considered. Pretrained language models show that cross-lingual
|
| 52 |
+
|
| 53 |
+
transfer works well with large-data unsupervised objectives [\(Lample and Conneau,](#page-7-13) [2019\)](#page-7-13), but it can also be applied for low-resource supervised objective, such as very low-resource translation [\(Neubig](#page-8-13) [and Hu,](#page-8-13) [2018\)](#page-8-13).
|
| 54 |
+
|
| 55 |
+
If even unsupervised target-domain data is sparse, another option is to subset arbitrary unsupervised sources to automatically identify samples of adapted domain, by applying domain classifier [\(Jiang and Zhai,](#page-7-14) [2007;](#page-7-14) [Elsahar and Gallé,](#page-7-15) [2019\)](#page-7-15). If the boundary between the training and the adapted domain is known, an auxiliary objective can minimise a discrepancy of representations between the training and possibly low-resource target domain [\(Chadha and Andreopoulos,](#page-7-16) [2018\)](#page-7-16).
|
| 56 |
+
|
| 57 |
+
Despite the possibilities, adaptation can also introduce undesired biases. In the scope of NMT, adaptation can cause problems of "catastrophic forgetting", when the model experiences performance degradation on the originally well-performing domains [\(Saunders,](#page-8-12) [2021\)](#page-8-12), or "exposure bias", when the model overfits the non-representative specifics of the target domain, such as the artifacts of data collection [\(Ranzato et al.,](#page-8-6) [2016\)](#page-8-6). Additionally, by normalizing a single type of bias, such as lexical overlap [\(McCoy et al.,](#page-8-4) [2019\)](#page-8-4), the model might degrade its accuracy on other domains [\(Utama](#page-8-10) [et al.,](#page-8-10) [2020\)](#page-8-10). Addressing multiple biases concurrently [\(Wu et al.,](#page-8-14) [2020\)](#page-8-14) can mitigate this problem.
|
| 58 |
+
|
| 59 |
+
AdaptOr allows the knowledgeable user to construct a reproducible and robust adaptation pipeline using native multi-objective evaluation. Covering multiple domains in separate objectives, AdaptOr can expose the above pitfalls, without the need to implement complex separate evaluation routines.
|
| 60 |
+
|
| 61 |
+
# Method
|
| 62 |
+
|
| 63 |
+
The *Adapters* architecture [\(Houlsby et al.,](#page-7-17) [2019\)](#page-7-17), having only a small set of parameters, might be a good fit when performing adaptation of transformer with modest hardware or data. Recently, the AdapterHub library [\(Pfeiffer et al.,](#page-8-15) [2020\)](#page-8-15) makes training and sharing of Adapters convenient. Compared to AdaptOr, AdapterHub does not provide support for more complex adaptation cases, such as using multiple objectives, scheduling, or extended evaluation. However, since both libraries build upon the HuggingFace Transformers library [\(Wolf](#page-8-1) [et al.,](#page-8-1) [2020\)](#page-8-1), their close integration is feasible.
|
| 64 |
+
|
| 65 |
+
If the robustness of models to heuristic shortcuts [\(McCoy et al.,](#page-8-4) [2019\)](#page-8-4) is the primary goal, the
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
1 class ParallelSchedule(Schedule):
|
| 69 |
+
2 def _sample_objectives(self, split: str) -> Iterator[Objective]:
|
| 70 |
+
3 while True:
|
| 71 |
+
4 for objective in self.objectives[split].values():
|
| 72 |
+
5 yield objective
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
Figure 2: AdaptOr provides a convenient base for implementing custom sampling schedules. ParallelSchedule in the figure demonstrates an implementation of the schedule sampling the update objectives in rotation. Further, the sampling can be easily conditioned on the state of Objectives such as the recent outputs, loss, or metrics evaluations.
|
| 76 |
+
|
| 77 |
+
Robustness Gym library [\(Goel et al.,](#page-7-18) [2021\)](#page-7-18) provides a comprehensive evaluation over an extendable set of different kinds of heuristic biases. Robustness Gym provides much deeper evaluation compared to AdaptOr Evaluators, and could be integrated as an AdaptOr Evaluator. Unlike Robustness Gym, AdaptOr enables an evaluation of robustness also on generative tasks, with specified out-of-domain data sets.
|
| 78 |
+
|
| 79 |
+
This section describes the structure and functions of the AdaptOr framework. We introduce its primary components bottom-up. Figure [3](#page-4-1) depicts the relations of these components and compares user interaction with the traditional model-centric pipeline.
|
| 80 |
+
|
| 81 |
+
A LangModule instance provides a management of inputs, outputs and objective-specific model components, referred to as *heads*. Once an objective with given LangModule is instantiated, an objective-compatible model is either initialised, or given by the user (see Section [3.2\)](#page-3-2) and the parameters of this model are merged with the parameters of the previously-registered objectives.
|
| 82 |
+
|
| 83 |
+
The merge works as follows: If no previous objective was registered, then the model of the given objective is considered a base model. The models of the second- and later-registered objectives are then merged with the base model: first, pairs of PyTorch modules of the same name in the base and the new model are identified. If the dimensions and weights of these modules match, the respective module of the newly-adding model is replaced with a module of the base model.
|
| 84 |
+
|
| 85 |
+
In the case of pre-trained transformers, the weights of heads are initialized randomly by default, resulting in a registration of a distinct head for each objective and sharing the remaining parameters. Users can control which parameters (not) to merge by explicitly setting their respective weights
|
| 86 |
+
|
| 87 |
+
as (non-)equal.
|
| 88 |
+
|
| 89 |
+
It is possible to use LangModule with any Py-Torch module that uses a HuggingFace tokenizer, compatible with the given neural module. Therefore, LangModule is also suitable for other models such as recurrent networks.
|
| 90 |
+
|
| 91 |
+
Objectives are the primary component of AdaptOr's training pipeline. Most importantly, an Objective serves two functions: sample encoding and loss computation. By implementing these and choosing the type of a model's head, AdaptOr users can define and experiment with novel training objectives. If they additionally provide an explicit definition of the Objective's model (the objective\_module attribute), the new objective does not even have to comply with common model heads; shared parameters of the given objective\_module would still be merged with the given lang\_module.
|
| 92 |
+
|
| 93 |
+
If no objective\_module is given, the Objective will request that a LangModule assigns the Objective a module of the Objective's default compatible\_head (see Section [3.1\)](#page-3-3).
|
| 94 |
+
|
| 95 |
+
Additionally, every Objective instance performs its own logging, evaluation, and state updates, such as its convergence, based on a valuation of given val\_evaluators, or draws a progress bar, based on the state of its sample iteration. However, the training flow is guided by a Schedule (see Section [3.3\)](#page-4-2). Objectives can implement custom data sampling, but if possible, we recommended to do so in a custom Schedule instance.
|
| 96 |
+
|
| 97 |
+
Since data encoding is also objective-specific, Objectives expose a higher-level user interface of data inputs than other frameworks: instead of encodings, users provide an Objective with a texts\_or\_path and a labels\_or\_path containing raw texts and respective labels. AdaptOr provides an implementation of standard Objectives for sequence and token classification and sequenceto-sequence tasks. When implementing a custom
|
| 98 |
+
|
| 99 |
+
<span id="page-4-1"></span>
|
| 100 |
+
|
| 101 |
+
Figure 3: A comparison of interaction with a model-centric HuggingFace Trainer (left) and objective-centric AdaptOr (right): While in model-centric approach, user resolves text processing, sampling and encoding compatible with selected model of specific objective, objective-centric approach delegates these functionalities to Objective instances. Explicit definition of Objectives and Schedule on AdaptOr's user side makes otherwise complex multi-objective and custom-schedule experiments transparent and reproducible.
|
| 102 |
+
|
| 103 |
+
Objective, note that sampling and encoding are performance bottlenecks on current high-end GPUs.
|
| 104 |
+
|
| 105 |
+
Schedules control the training flow through the interfaces provided by HuggingFace Transformers library. Primarily, they deliver 1) a set of standard stopping strategies based on the state of the Objectives and 2) an IterableDataset instance, constructed by sampling Objectives according to a sampling strategy implemented in its \_sample\_objectives. A Schedule also ensures that outputs of distinct lang\_modules' heads are delivered to the respective Objectives for loss computation.
|
| 106 |
+
|
| 107 |
+
This relatively complex sampling framework provides a very simple interface for custom Schedule implementations (see Section [2.2\)](#page-1-2). For instance, a pre-defined ParallelSchedule is implemented with three lines of code (see Figure [2\)](#page-3-0).
|
| 108 |
+
|
| 109 |
+
An Adapter is customization of the HuggingFace Trainer with only minor adjustments. Specifically, Adapter redirects loss computation to a Schedule, which further distributes outputs to corresponding Objectives and extends native training logs with logs of Objectives' Evaluators. Furthermore, Adapter adjusts persistence of the models so that a model of every head can be reloaded without the use of AdaptOr, by simply using HuggingFace Transformers' AutoModelForXY.from\_pretrained.
|
| 110 |
+
|
| 111 |
+
Based on the actively-developed HuggingFace
|
| 112 |
+
|
| 113 |
+
Transformers library, the AdaptOr allows its users to benefit from all other native features of Hugging-Face Transformers, such as the support for the most recent models, custom logging platforms, or distributed parallel training. Furthermore, it can simplify integration with other custom libraries (see Section [2.4\)](#page-2-0).
|
2203.04115/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-01-26T22:41:26.425Z" agent="5.0 (Windows)" etag="MxK2iQ-rkiFtwt5qMW8T" version="16.2.6" type="google"><diagram id="l9sr0xo-PvOBueZWncCr" name="Page-1">7Vxbc6O4Ev41fjQF4v6YmydbNbtn6mTr7OxTSgZhs4MRC3LizK8/EoirZBvbYDvJeGpiELqh/vpTq1vyRL9bbb6kMFn+jn0UTYDqbyb6/QQATXNM+sVS3ooUy9CLhEUa+jxTnfAU/kQ8UeWp69BHWSsjwTgiYdJO9HAcI4+00mCa4td2tgBH7VYTuOAtqnXCkwcjJGT7K/TJskh1zEbuRxQulmXLmsqfrGCZmSdkS+jj10aS/jDR71KMSXG12tyhiA1eOS5FudmWp1XHUhSTPgV+Pn//ArK5+VsSff/PI/nr71mQTS2jqOYFRmv+xry35K0cgiVZRfRKm+i31Tuo9MaH2RL5/AZG4SKm1x7tDUppwgtKSUjH8YY/WIW+z2pkdSSs5tVmwSCjFFICCs0b4TW5YbesdgYYLW+HgQmwqxgTjw2qBnTWJMv5iKBfZgxwTGZwFUas6J/hiuIGqH+gV/r3v3gFY56Fg0wzWWdIin9UkmWvGIRRdIcjnOYvryPNN5Fd5Ww8cS1bhxZ9wkeQvi/abJWNVkmcqgrCK0TSN5qFFwAlariWAJffv9aY0xyetmzirUQX5DhfVHXXUKAXHA0HIEOTAMOKCJNH+NICiPXvmmE4H9tplg/uDc0AQLKpH9KrBf+OyszH13KPMoYqXhd9v6K6dhM0Oe9qO1WSdFpfcEAf3cHYD31IGOSKSueppD8ndVNa46+iZy3aocYUr2O/4sDXZUjQUwI99vSVkhtNa5Bnh1mCAFmeJ2MW33bn6pGENgAZaW0u0lVNAZZIR6aEjioeG5yOLH3/RLWg4ki2DgC3EOA86k74vVna0NojYznCsMhGRR9rUMrGdg1KOd2Gq9zSaQKyO0UTnDRSv8I5ir7hLCQhZk/nmBC8ohki9uAWej8WOfxbkGYfmiVv7CZLCosstw/KmyDcMIW55f25XxLCTLkbNhJg5vmxoYRUVEFIFStVPNoiVUFIIP1i6VQDZshfe5D1akpZd5p5S2oQThfRW7KcAmPKOHoGqF0zu4/hs0//e0sYxvnVEkXhJr+iSkcLPy9QjOjb0ukOOEoSLwSTAIiKO5upal/17KWOOwAvYrGBNf2sWJMZBL+w9qGx1pkQLgY98At6nxR64MLQs8xTl0GaNcQyqFsLtWZKQdWJuuqwf2JrM4T8OUUyLUcxzsZ0uDXK+xiBbylOcEY1UrZU6/nSl3GLwKY/pHaHMAdI5Q45fKkAZOuOuWMapipwg2zt4ngoX7sEUZg88jxDLD2c9trDMORrD5krxBqNA9zr5ICilifECudOiI+g1oK6fpyXGop4qHqRtoXTn2VSRHvJl8BM0xMcxiRXGPN2Yt6zutYEZ5wlBJKIcYw6bFAmdcilJx81OIVT9zD+VNDxYUhMWFfCIeZYHNLDgq19SWzcD/Ml+RA5gdSXZHkOmgcDWnG7B97oOrJNceANGXvr43mOTmVv+v5DOLKltfwWT5/CiFr8Y5LamC/w5KUIxSFdUpzK04O6VgOQmycpJpCv66Zubdk0NceaW6bVTz8y2j59068oYKDNI1IDqIxudbjKshRgijaPayumxOoBZerwEaAescHC5SpdTZ7AN6e7ag2nGq1qWE2ldCI3htWSWZKOYo82qD382AdBvz3yqqgMt+Devb3dOpWPubgvELTDn64Ax7Q1o/rbnjuA4mpW42OIvgBTaRa3NYmGbKtmeNH2mORJGsJ4kZtiH1euEmNLFhYZU81AD2/1SdwlHfQRCc1Sr5TQQA/Un5vQZL7aURVib+DwQxEd6DGHnZXork3e10CAYxtv5yZAG4gEaCmOGIE/OwHKQgSfjgA/laUHZD6Fz0WA128BOhIZDbHfj37DFRNhPM/Y1+/riITTG0IFPl8TRMs9sDYLv8M2F8h7cemO7zE03bb7w5SEfKWcPtrOV3ByvGdkj+H/QirBXw7D9+Yw1OxhNMZwDMXRBVNIshlRc4HiAAnvqoprjaQ9titIBvkL9MRvcUqWeIFjGD3UqbfeOn2pQh9tQdYFvmK2cSfP8g8i5I2Hmxi7tYWLNiH53rj+m1WlmPzufsNrzm/KqDod+/Tte55RVfUyoSiZB7Ty+7psftcq/A2lFAQ5Wbf3ARwRnnfEeb0Kj7UR6aMArnP1EIP2GYFpeZ7i3otgloVemTwLo2q4Yr/MxJmfpvDntY+bCXE3YKnM8Tr10A5wlPt0aA8WaKd9IdeAFEVUPV/aHRkcwqV1egUQVg+A8HiAa8CII6SJIbWNoRpoNYxEs2MXbmtdbOshNfuPUcT3iF9e9Buz5SZbo6um1rGBin7xUrUWUNHAt0Y2biJubcdWQbsd1W5Wt79f7fz0ouhBrZLVmByvpaUshjPT1NLorhO5+T2SLXQThyvIXvRPuliMTtwWcoX2j7CI6RhEAx1YsdSuY8hVZAFUQ7SEzLHMoHKPwxXMIUebQRUXczNIc6/LDhrEuuk/LQ05j5RHmc8yjxxK/07pPC7dc07nZLFA/9qu/OPQv2NfjYIdYqS1FAxMhrBvPozd92kV7Dz20vYT3adZMY9rhqu9Rsz1mysd62Sgvag2cBRNdNdIIlfMXSMJa5gjesmNHmHC7AcqzhlU3mWUPrygwsm8+/iDEOXY4XXOjxzp9DPhZ68ah7IEL/jWU1vyw154TaIwpq2XPxIiPZvRPYKBgyD0kOLh2EMJyRTaKxnmzuQntyiM2vOsYeiKa6n1RwybAas6P9HyndsVJIeHVI946F4x7j2+JwqvF774kTbhsM3PdcoEvUrWRW0ymF9I8EzK5daySvCSY3FatWu0JWmzKjy8pO0BJN1il+2UX4tOJmc5RmRC3Kv0Xph5+DmDAVIShJMIPScRnZayZ7Kkc0P2nJ/OzM9uupfDhKPpSsfoNkxbccU5BQDFlqx7ATU/RyMAWeS1O6d0Rp39nEumwNwfkilzBNNnrY0HmTCPwsiBU4cAmAuJ3HLcSsErkYtH46i+y1hAU+yxvOX2MZ6O4dZe5TKqWn9poLzfswLruDfMPWuvI36MpV8Ihz7Rbtg/6aKI9b274mstY1vLvwGXUKDnCsq6xApKOGfkHOah3uvSMN1d+cdZodkiT74Hl8aFFKOF+60hI+sYj8qAWmT11KIzBYy4o2HogFFXXa4yYGTrF1evS80hRwdURSU7XlcupQNmd+OwYfcLmwpVmd2fJyhDSmVFBR2crE7ddkzV2a1O2s78I6lTSYEXnK2uzZTb5d1uq9IO87V8vsV8ldiDF7D6ik2Swyur5rSQrHd3eQ6kYaI9aA1qP5rOBUJitswfc9jPXziD/PyFtJYvswi//oHI9Obr5Ow/6zHme6EYpZDg9LIbWg+jNEtqQai27iIpeXU8LXPN94OBzr93nBq6o1ebU5uxESDZAu5aitqZgXu4Neht/avbhe7Vv12uP/wf</diagram></mxfile>
|
2203.04115/main_diagram/main_diagram.pdf
ADDED
|
Binary file (36 kB). View file
|
|
|
2203.04115/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Biological sequences like proteins and DNA have a broad range of applications to several impactful problems ranging from medicine to material design. For instance, design of novel anti-microbial peptides (AMPs; short sequences of amino-acids) is crucial, and identified as the first target to tackle the growing public health risks posed by increasing anti-microbial resistance [AMR; @2022amr]. This is particularly alarming according to a recent report[^1] by the World Health Organization, which predicts millions of human lives lost per year (with the potential breakdown of healthcare systems and many more indirect deaths), unless methods to efficiently control (and possibly stop) the fast-growing AMR are found.
|
| 4 |
+
|
| 5 |
+
Considering the diverse nature of the biological targets, modes of attack, structures, as well as the evolving nature of such problems, diversity becomes a key consideration in the design of these sequences [@mullis2019diversity]. Another reason for the importance of being able to propose a *diverse* set of good candidates is that cheap screening methods (like in-silico simulations or in-vitro experiments) may not reflect well future outcomes in animals and humans, as illustrated in Figure [1](#fig:ddpipeline){reference-type="ref" reference="fig:ddpipeline"}. To maximize the chances that at least one of the candidates will work in the end, it is important for these candidates to cover as much as possible the modes of a *goodness* function that estimates future success. The design of new biological sequences involves searching over combinatorially large discrete search spaces on the order of $\mathcal{O}(10^{60})$ candidates. Machine learning methods that can exploit the combinatorial structure in these spaces (e.g., due to laws of physics and chemistry) have the potential to speed up the design process for such biological sequences [@bayes4DD; @bayesDD2; @das2021accelerated].
|
| 6 |
+
|
| 7 |
+
<figure id="fig:ddpipeline" data-latex-placement="t">
|
| 8 |
+
<embed src="figures/DrugDiscoveryLoop.pdf" style="width:100.0%" />
|
| 9 |
+
<figcaption>Illustration of a typical drug discovery pipeline. In each round, a set of candidates is proposed which are evaluated under various stages of evaluation, each measuring different properties of the candidates with varying levels of precision. The design procedure is then updated using the feedback received from the evaluation phase before the next round begins. Because the early screening phases are imperfect, and the ideal “usefulness" of the candidate can be ill-defined, it is important to generate for these phases a <span><em>diverse</em></span> set of candidates (rather than many similar candidates who could all fail in the downstream phases).</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
The development process of such biological sequences, for a particular application, involves several rounds of a candidate ideation phase (possibly starting with a random library) followed by an evaluation phase, as shown in Figure [1](#fig:ddpipeline){reference-type="ref" reference="fig:ddpipeline"}. The evaluation consists of several stages ranging from numerical simulations to expensive wet-lab experiments, possibly culminating in clinical trials. These stages filter candidates with progressively higher fidelity oracles that measure different aspects of the *usefulness* of a candidate. For example, the typical evaluation for an antibiotic drug after ideation would comprise of: (1) in-silico screening using approximate models to estimate anti-microbial activity of $\mathcal{O}(10^6)$ candidates (2) in-vitro experiments to measure single-cell effectiveness against a target bacterium species of $\mathcal{O}(10^3)$ candidates (3) trials in small mammals like mice with $\mathcal{O}(10)$ $\,$candidates (4) randomized human trials with $\mathcal{O}(1)$ candidates. These oracles are often imperfect and do not evaluate all the required properties of a candidate.
|
| 13 |
+
|
| 14 |
+
The biological repertoire of DNA, RNA and protein sequences is extremely diverse, to support the diversity of structure, function and modes of action exploited by living organisms, where the same high-level function can be potentially executed in more than one possible manner [@mullis2019diversity]. Moreover, the ultimate success of candidate drugs also depends on satisfying multiple often conflicting desiderata, not all of which likely can be precisely estimated in-silico. This fact, combined with the overall effect of the above aggressive filtering and use of potentially imperfect oracles, needs to be addressed in the design phase through the *diversity* of the generated candidates. Diverse candidates capturing the *modes* of the imperfect oracle improve the likelihood of discovering a candidate that can satisfy all (or many) evaluation criteria, because failure in downstream stages is likely to affect nearby candidates (from the same mode of the oracle function), while different modes are likely to correspond to qualitatively different properties.
|
| 15 |
+
|
| 16 |
+
This setup of iteratively proposing a batch of candidates and learning from the feedback provided by an oracle on that batch fits into the framework of active learning [@aggarwal2014active]. Bayesian Optimization is a common approach for such problems [@rasmussen2005gaussian; @garnett_bayesoptbook_2022]. It relies on a Bayesian surrogate model of the usefulness function of interest (e.g., the degree of binding of a candidate drug to a target protein), with an output variable $Y$ that we can think of as a reward for a candidate $X$. An acquisition function ${\cal F}$ is defined on this surrogate model and a pool of candidates will be screened to search for candidates $x$ with a high value of ${\cal F}(x)$. That acquisition function combines the expected reward function $\mu$ (e.g., $\mu(x)$ can be the probability of obtaining a successful candidate) as well as an estimator of epistemic uncertainty $\sigma(x)$ around $\mu(x)$, to favour candidates likely to bring new information to the learner. There are many possible candidate selection procedures, from random sampling to genetic algorithms evolving a population of novel candidates [@bayes4DD; @belanger2019biological; @moss2020bayesian; @swersky20amortized; @bayesDD2]. An alternative is to use Reinforcement Learning (RL) to maximize the value of a surrogate model of the oracle [@angermueller2019model]. RL methods are designed to search for a single candidate that maximizes the oracle, which can result in poor diversity and can cause candidate generation to get *stuck* in a single mode [@bengio2021flow] of the expected reward function. Additionally, as the final goal is to find *novel* designs that are different from the ones that are already known, the generative model must be able to capture the tail ends of the data distribution.
|
| 17 |
+
|
| 18 |
+
In settings where diversity is important, another interesting way to generate candidates is to use a generative policy that can sample candidates proportionally to a reward function (for instance, the acquisition function over a surrogate model) and can be sampled i.i.d to obtain a set of candidates that covers well the modes of the reward function. A sample covering the modes approximately but naturally satisfies the ideal criterion of high scoring and diverse candidates. GFlowNets [@bengio2021flow] provide a way to learn such a stochastic policy and, unlike Markov chain Monte Carlo (MCMC) methods (which also have this ability), amortize the cost of each new i.i.d. sample (which may require a lengthy chain, with MCMC methods) into the cost of training the generative model [@Zhang2022GenerativeFN]. As such, this paper is motivated by the observation that GFlowNets are appealing in the above Bayesian optimization context, compared with existing RL and MCMC approaches in domains such as small molecule synthesis.
|
| 19 |
+
|
| 20 |
+
In this work, we present an active learning algorithm based on a GFlowNet generator for the task of biological sequence design. In addition to this, we propose improvements to the GFlowNet training procedure to improve performance in active learning settings. We apply our proposed approach on a broad variety of biological sequence design tasks. The key contributions of this work are summarized below:
|
| 21 |
+
|
| 22 |
+
- An active learning algorithm with GFlowNet as the generator for designing novel biological sequences.
|
| 23 |
+
|
| 24 |
+
- Investigating the effect of off-policy updates from a static dataset to speed up training of GFlowNets.
|
| 25 |
+
|
| 26 |
+
- Incorporating the epistemic uncertainty in the predicted expected reward to improve exploration in GFlowNets.
|
| 27 |
+
|
| 28 |
+
- Validating the proposed algorithm on three protein and DNA design tasks.
|
| 29 |
+
|
| 30 |
+
We consider the problem of searching over a space of discrete objects $\mathcal{X}$ to find objects $x \in \mathcal{X}$ that maximize a given usefulness measure (oracle) $f: \mathcal{X} \mapsto \mathbb{R}^{+}$. We consider the setting where this oracle can only be queried $N$ times in fixed batches of size $b$. This constitutes $N$ rounds of evaluation available to the active learning algorithm. The algorithm also has access to an initial dataset $D_0=\{(x^0_1, y^0_1), \dots, (x^0_n, y^0_n)\}$, where $y^0_i=f(x^0_i)$ from evaluations by the oracle.
|
| 31 |
+
|
| 32 |
+
The algorithm has to propose a new batch of candidates $\mathcal{B}_i = \{x_1^i, \dots, x_b^i\}$, given the current dataset $\mathcal{D}_i$, in each round $i\in \{1, \dots, N\}$. This batch is then evaluated on the oracle to obtain the corresponding scores for the candidates $y_j^i=f(x_j^i)$. The current dataset $\mathcal{D}_i$ is then augmented with the tuples of the proposed candidates and their scores to generate the dataset for the next round, $\mathcal{D}_{i+1} = \mathcal{D}_i \cup \{(x_1^i, y_1^i), \dots, (x_b^i, y_b^i)\}$.
|
| 33 |
+
|
| 34 |
+
This problem setup is similar to the standard black-box optimization problem [@audet2017derivative] with one difference: the initial dataset $D_0$ is available as a starting point, which is actually a common occurrence in practice, i.e., a historical dataset. This setup can also be viewed as an extension of the Offline Model Based Optimization [@trabucco2021conservative; @trabucco2021designbench] paradigm to multiple rounds instead of a single round.
|
| 35 |
+
|
| 36 |
+
# Method
|
| 37 |
+
|
| 38 |
+
As discussed in Section [1](#sec:intro){reference-type="ref" reference="sec:intro"}, searching for a single candidate maximizing the oracle can be problematic in the typical scenario where the available (cheap, front-line) oracle is imperfect. Instead, we are interested in looking for a diverse set of $K$ top candidates generated by the algorithm, $\mathcal{D}_{\text{Best}}= \text{TopK}(\mathcal{D}_K \setminus \mathcal{D}_0)$. We outline the key characteristics that define the set of *ideal* candidates.
|
| 39 |
+
|
| 40 |
+
- **Performance/Usefulness Score**: The base criteria is for the set to include high scoring candidates, which can be quantified with $$\begin{equation}
|
| 41 |
+
\text{Mean}(\mathcal{D})=\frac{\sum_{(x_i,y_i) \in \mathcal{D}} y_i}{|\mathcal{D}|}
|
| 42 |
+
\end{equation}$$
|
| 43 |
+
|
| 44 |
+
- **Diversity**: In addition to being high scoring, we would like the candidates to capture the modes of the oracle. One way to measure this is
|
| 45 |
+
|
| 46 |
+
$$\begin{equation}
|
| 47 |
+
{\small\text{Diversity}}(\mathcal{D}) \hspace*{-1mm}=\hspace*{-1mm} \frac{\sum\limits_{(x_i,y_i)\in \mathcal{D}}\sum\limits_{(x_j,y_j)\in \mathcal{D}\setminus \{(x_i,y_i)\}}\hspace*{-1mm} d(x_i, x_j)}{|\mathcal{D}|(|\mathcal{D}| - 1) }
|
| 48 |
+
\end{equation}$$ where $d$ is a distance measure defined over $\mathcal{X}$.
|
| 49 |
+
|
| 50 |
+
- **Novelty**: Since we start with an initial dataset $\mathcal{D}_0$, the proposed candidates should also be different from the candidates that are already known. We measure this *novelty* in the proposed candidates as follows: $$\begin{equation}
|
| 51 |
+
\text{Novelty}(\mathcal{D}) = \frac{\sum_{(x_i,y_i) \in \mathcal{D}} \min_{s_j \in \mathcal{\mathcal{D}}_0} d(x_i, s_j)}{|\mathcal{D}| }
|
| 52 |
+
\end{equation}$$
|
| 53 |
+
|
| 54 |
+
All three metrics are applied on the TopK scoring candidates, i.e., for $\mathcal{D}=\mathcal{D}_{\text{Best}}$. It is important to note that either of these metrics considered *alone* can paint a misleading picture. For instance, a method can generate diverse candidates, but these candidates might be low scoring and similar to the known candidates. Thus, a method should be evaluated holistically, considering *all* the three metrics.
|
| 55 |
+
|
| 56 |
+
GFlowNets [@bengio2021flow; @bengio2021gflownet] tackle the problem of learning a stochastic policy $\pi$ that can sequentially construct discrete objects $x\in\mathcal{X}$ with probability $\pi(x)$ using a non-negative reward function $R:\mathcal{X}\mapsto \mathbb{R}^{+}$ defined on the space $\mathcal{X}$, such that $\pi(x) \propto R(x)$. This property makes GFlowNets well-positioned to be used as a generator of diverse candidates in an active learning setting. In this section, we present our proposed active learning algorithm based on GFlowNets [@bengio2021flow]. We only present the relevant key results, and refer the reader to @bengio2021gflownet for a thorough mathematical treatment of GFlowNets. Figure [2](#fig:al_setup){reference-type="ref" reference="fig:al_setup"} provides an overview of our proposed approach and Algorithm [\[algo:multi_round\]](#algo:multi_round){reference-type="ref" reference="algo:multi_round"} describes the details of the approach.
|
| 57 |
+
|
| 58 |
+
<figure id="fig:al_setup" data-latex-placement="htbp">
|
| 59 |
+
<embed src="figures/gfn_al.pdf" />
|
| 60 |
+
<figcaption>GFlowNet-AL: Our proposed approach for sequence design with GFlowNets consists of three main components: (1) GFlowNet Generator <span class="math inline"><em>π</em><sub><em>θ</em></sub></span> (green box), which generates diverse candidates with probability proportional to <span class="math inline"><em>R</em>(<em>x</em>)</span>, which is defined by the proxy, (2) Proxy (blue) which consists of a model <span class="math inline"><em>M</em></span> that can output a mean prediction <span class="math inline"><em>μ</em></span> and uncertainty estimate <span class="math inline"><em>σ</em></span> around <span class="math inline"><em>μ</em></span>, along with an acquisition function <span class="math inline">ℱ</span>, which combines the mean and uncertainty predicted by the model, and (3) Dataset <span class="math inline">𝒟<sub><em>i</em></sub></span> (yellow) which stores all the available candidates up to round <span class="math inline"><em>i</em></span>. In each round, the model <span class="math inline"><em>M</em></span> is first trained on <span class="math inline">𝒟<sub><em>i</em></sub></span>. The generative policy is then trained with reward function <span class="math inline"><em>R</em> = ℱ(<em>M</em>.<em>μ</em>, <em>M</em>.<em>σ</em>)</span> and data <span class="math inline">𝒟<sub><em>i</em></sub></span>. A new batch of candidates <span class="math inline">ℬ<sub><em>i</em></sub></span> is then sampled from <span class="math inline"><em>π</em><sub><em>θ</em></sub></span>, evaluated with the Oracle <span class="math inline">𝒪</span> (red) and then added to <span class="math inline">𝒟<sub><em>i</em></sub></span> to obtain <span class="math inline">𝒟<sub><em>i</em></sub></span>. This process repeats for <span class="math inline"><em>N</em></span> rounds of active learning. </figcaption>
|
| 61 |
+
</figure>
|
| 62 |
+
|
| 63 |
+
::: algorithm
|
| 64 |
+
:::
|
| 65 |
+
|
| 66 |
+
We assume the space $\mathcal{X}$ is *compositional*, that is, object $x\in\mathcal{X}$ can be constructed using a sequence of actions taken from a set $\mathcal{A}$. After each step, we may have a partially constructed object, which we call a state $s \in \mathcal{S}$. For example, @bengio2021flow use a GFlowNet to sequentially construct a molecule by inserting an atom or a molecule fragment in a partially constructed molecule represented by a graph. In the auto-regressive case of sequence generation, the actions could just be to append a token to a partially constructed sequence. A special action indicates that the object is complete, i.e., $s = x \in {\mathcal X}$. Each transition $s{\rightarrow}s' \in \mathcal{E}$ from state $s$ to state $s'$ corresponds to an edge in a graph $G= (\mathcal{S}, \mathcal{E})$ with the set of nodes $\mathcal{S}$ and the set of edges $\mathcal{E}$. We require the graph to be directed and acyclic, meaning that actions are constructive and cannot be undone. An object $x \in \mathcal{X}$ is constructed by starting from an initial empty state $s_0$ and applying actions sequentially, and all complete trajectories must end in a special final state $s_f$. The fully constructed objects in $\mathcal{X}\subset\mathcal{S}$ are *terminating states*. The construction of an object $x$ can thus be defined as a trajectory of states $\tau = (s_0 {\rightarrow} s_1 {\rightarrow} \dots{\rightarrow} x {\rightarrow} s_f)$, and we can define $\mathcal{T}$ as the set of all trajectories. $\text{Parent}(s) = \{s': s'{\rightarrow} s \in \mathcal{E}\}$ denotes the parents for node $s$ and $\text{Child}(s) = \{s': s{\rightarrow} s' \in \mathcal{E}\}$ denotes the children of node $s$ in $G$.
|
| 67 |
+
|
| 68 |
+
@bengio2021gflownet define a *trajectory flow* $F:\mathcal{T}\mapsto \mathbb{R}^+$. This trajectory flow $F(\tau)$ can be interpreted as the probability mass associated with trajectory $\tau$. The *edge flow* can then be defined as $F(s{\rightarrow} s') = \sum_{s{\rightarrow} s' \in \tau}F(\tau)$, and *state flow* can be defined as $F(s) = \sum_{s \in \tau}F(\tau)$. The flow associated with the final step (transition) in the trajectory $F(x{\rightarrow} s_f)$ is called the terminal flow and the objective of training a GFlowNet is to make it approximately match a given reward function $R(x)$ on every possible $x$.
|
| 69 |
+
|
| 70 |
+
The trajectory flow $F$ is a measure over complete trajectories $\tau \in \mathcal{T}$ and it induces a corresponding probability measure $$\begin{equation}
|
| 71 |
+
P(\tau) = \frac{F(\tau)}{\sum_{\tau \in \mathcal{T}}F(\tau)} = \frac{F(\tau)}{Z},
|
| 72 |
+
\end{equation}$$ where $Z$ denotes the total flow, and corresponds to the partition function of the the measure $F$. The forward transition probabilities $P_F$ for each step of a trajectory can then be defined as $$\begin{equation}
|
| 73 |
+
P_F(s|s') = \frac{F(s{\rightarrow} s')}{F(s)}.
|
| 74 |
+
\end{equation}$$ We can also define the probability $P_F(s)$ of visiting a terminal state $s$ as $$\begin{equation}
|
| 75 |
+
P_F(s) = \frac{\sum_{\tau \in \mathcal{T}: s\in\tau}F(\tau)}{Z}.
|
| 76 |
+
\end{equation}$$
|
| 77 |
+
|
| 78 |
+
A *consistent flow* satisfies the *flow consistency equation* $\forall s \in \mathcal{S}$ defined as follows: $$\begin{equation}
|
| 79 |
+
\sum_{s'\in \text{Parent}(s)} F(s'{\rightarrow} s) = \sum_{s'' \in \text{Child}(s)} F(s{\rightarrow} s'').
|
| 80 |
+
\end{equation}$$
|
| 81 |
+
|
| 82 |
+
It has been shown [@bengio2021flow] that for a consistent flow $F$ with the terminal flow set as the reward, i.e., $F(x{\rightarrow} s_f) = R(x)$, a policy $\pi$ defined by the forward transition probability $\pi(s'|s) = P_F(s'|s)$ samples object $x$ with probability proportional to $R(x)$ $$\begin{equation}
|
| 83 |
+
\pi(x) = \frac{R(x)}{Z}.
|
| 84 |
+
\end{equation}$$
|
| 85 |
+
|
| 86 |
+
GFlowNets learn to approximate an *edge flow* $F_\theta:\mathcal{E} \mapsto \mathbb{R}^+$ defined over $G$, such that the terminal flow is equal to the reward $R(x)$ and the flow is *consistent*. This is achieved by defining a loss function whose global minimum gives rise to the consistency condition. This was first formulated [@bengio2021flow] via a temporal difference-like [@sutton2018reinforcement] learning objective, called *flow-matching*:
|
| 87 |
+
|
| 88 |
+
$$\begin{equation}
|
| 89 |
+
\mathcal{L}_{FM}(s; \theta) = \left(\log \frac{\sum_{s'\in \text{Parent}(s)} F_\theta(s'{\rightarrow} s)}{\sum_{s'' \in \text{Child}(s)}F_\theta(s{\rightarrow} s'')}\right)^2.
|
| 90 |
+
\label{eq:fm_objective}
|
| 91 |
+
\end{equation}$$
|
| 92 |
+
|
| 93 |
+
@bengio2021flow show that given trajectories $\tau_i$ sampled from an exploratory training policy $\tilde{\pi}$ with full support, an edge flow learned by minimizing Equation [\[eq:fm_objective\]](#eq:fm_objective){reference-type="ref" reference="eq:fm_objective"} is consistent. At this point, the forward transition probability defined by this flow $P_{F_\theta}(s'|s) = \frac{F_\theta(s{\rightarrow} s')}{\sum_{s'' \in \text{Child}}F_\theta(s{\rightarrow} s'')}$ would sample objects $x$ with a probability $P_F(x)$ proportionally to their reward $R(x)$.
|
| 94 |
+
|
| 95 |
+
In practice, the trajectories for training GFlowNets are sampled from an exploratory policy that is a mixture between the GFlowNet sampler $P_{F_\theta}$ and a uniform choice of action among those allowed in each state: $$\begin{equation}
|
| 96 |
+
\bar{\pi}_\theta = (1-\delta)P_{F_\theta} + \delta \cdot \text{Uniform}.
|
| 97 |
+
\end{equation}$$
|
| 98 |
+
|
| 99 |
+
This uniform policy introduces exploration preventing the training from getting stuck in one or a few modes. This is analogous to $\epsilon$-greedy exploration in reinforcement learning.
|
| 100 |
+
|
| 101 |
+
@malkin2022trajectory present an alternative objective defined over trajectories with faster credit assignment for learning GFlowNets, called *trajectory balance*, defined as follows:
|
| 102 |
+
|
| 103 |
+
$$\begin{equation}
|
| 104 |
+
\mathcal{L}_{TB} (\tau;\theta) = \left(\log \frac{Z_\theta \prod_{s{\rightarrow} s' \in \tau}P_{F_\theta}(s'|s)}{R(x)}\right)^2,
|
| 105 |
+
\label{eq:trajbal_objective}
|
| 106 |
+
\end{equation}$$ where $\log Z_\theta$ is also a learnable free parameter. This objective can improve learning speed due to more efficient credit assignment, as well as robustness to long trajectories and large vocabularies. Equation [\[eq:trajbal_objective\]](#eq:trajbal_objective){reference-type="ref" reference="eq:trajbal_objective"} is the training objective we have used in this paper.
|
| 107 |
+
|
| 108 |
+
When generating sequences in an auto-regressive fashion (appending one token at a time), as in this paper, the mapping from trajectories to states becomes *bijective*, as there is only one path to reach a particular state $s$. The directed graph $G$ then corresponds to a directed tree. Under these conditions, the flow-matching objective is equivalent to discrete-action Soft Q-Learning [@haarnoja2017reinforcement; @buesing2019approximate] with a temperature parameter $\alpha=1$, a uniform $q_{\mathbf{a}'}$, and $\gamma=1$, which obtains $\pi(x)\propto R(x)$. While the trajectory balance objective in [\[eq:trajbal_objective\]](#eq:trajbal_objective){reference-type="eqref" reference="eq:trajbal_objective"} asymptotically reaches the same solution, our results [and that of @malkin2022trajectory] suggest it does so faster.
|
| 109 |
+
|
| 110 |
+
::: algorithm
|
| 111 |
+
:::
|
| 112 |
+
|
| 113 |
+
In our active learning setting, the reward function for the GFlowNet is obtained by training a model from a dataset $\mathcal{D} = \{(x,y)\}$ of labeled sequences with input object $x$ and observed oracle reward $y$ and we would like to make sure that the GFlowNet samples correctly in the vicinity of these $x$'s (especially those for which $y$ is larger). We can observe that the flow-matching objective (Equation [\[eq:fm_objective\]](#eq:fm_objective){reference-type="ref" reference="eq:fm_objective"}) and the trajectory balance objective (Equation [\[eq:trajbal_objective\]](#eq:trajbal_objective){reference-type="ref" reference="eq:trajbal_objective"}) are *off-policy* and *offline*. This allows us to use trajectories sampled from other policies than $\pi$ during training, so long as the overall distribution of training trajectories $\tilde{\pi}$ has full support. These trajectories can be constructed from the $x$'s in a given dataset by sampling for each of them a sequence of ancestors starting from terminal state $x$ and sampling a parent according to the backward transition probability. In the auto-regressive case studied here, there is only one possible parent for each state $s$, so we immediately recover the unique trajectory leading to $x$ from $s_0$. This provides a set of offline trajectories.
|
| 114 |
+
|
| 115 |
+
Inspired by work in RL combining on-policy and off-policy updates [@nachum2017bridging; @guo2021text], we propose incorporating trajectories from the available dataset in the training of GFlowNets. At each training step we can augment the trajectories sampled from the current forward transition policy with trajectories constructed from examples in the dataset. Let $\gamma \in [0, 1)$ denote the proportion of offline trajectories in the GFlowNet training batch. As we vary $\gamma$ from $0$ to $1$, we move from an online setting, originally presented in [@bengio2021flow], to an offline setting where we learn exclusively from the dataset. Relying exclusively on trajectories from a dataset, however, can lead to sub-optimal solutions since the dataset is unlikely to cover $\mathcal{X}$. Algorithm [\[algo:inner_loop\]](#algo:inner_loop){reference-type="ref" reference="algo:inner_loop"} describes the proposed training procedure for GFlowNets which incorporates offline trajectories.
|
| 116 |
+
|
| 117 |
+
We hypothesize and verify experimentally in Section [5.4.1](#sec:mixingresults){reference-type="ref" reference="sec:mixingresults"}, that mixing an empirical distribution in the form of offline trajectories can provide the following potential benefits in the context of active learning: *(1) improved learning speed*: it can improve the speed of convergence since we make sure the GFlowNet approximation is good in the vicinity of the selected interesting examples from the dataset $\mathcal{D}$ *(2) lower bound on the exploration domain*: it guarantees exploration around the examples in $\mathcal{D}$.
|
| 118 |
+
|
| 119 |
+
Another consequence of a reward function that is learned from a finite dataset $\mathcal{D} = \{(x,y)\}$ is that there will be increasing uncertainty in the model's predictions as we move away from its training $x$'s. In the context of active learning, this uncertainty can be a strong signal to guide exploration in novel parts of the space and has been traditionally used in Bayesian optimization [@angermueller2019model; @swersky20amortized; @jain2021deup]. @bengio2021gflownet hypothesize that using information about the uncertainty of the reward function can also lead to more efficient exploration in GFlowNets. We study this hypothesis, by incorporating the model uncertainty of the reward function for training GFlowNets.
|
| 120 |
+
|
| 121 |
+
This requires two key ingredients: (a) the reward function should be a model that can provide an uncertainty estimate on its output, and (b) an acquisition function that can combine the prediction of the reward function with its uncertainty estimates to provide a scalar score. There has been significant work in developing methods that can estimate the uncertainty in neural networks, which we employ here. In our experiments, we rely on MC Dropout [@mcdropout] and ensembles [@deepensembles] to provide epistemic uncertainty estimates. As for the acquisition function, we use Upper Confidence Bound [@ucb] and Expected Improvement [@ei]. With the experiments of Section [5.4.2](#sec:uncertaintyresults){reference-type="ref" reference="sec:uncertaintyresults"}, we study the effects of these choices and observe the improvement provided by incorporating the uncertainty estimates.
|
2204.03444/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-03-16T12:17:44.361Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36" version="17.1.2" etag="QW0PgXS66X8v66rSs1fx" type="google"><diagram id="cfwoFyBaLu1ay3RNAGRA">7V3dk5s4Ev9rpir7EAoJkMTjfOXuqpLd3CZ1t3lkbMbDxjY+zGxm9q8/yYCtL4MMAuwJk4fYAoTpX3eru9XdXHm3q5d/ZNHm6VM6j5dX0J2/XHl3VxACAhH9j428FiPY94uBRZbMiyH3MPAl+Tsur6xGn5N5vC3HiqE8TZd5shEHZ+l6Hc9yYSzKsvSHeNpjupwLA5toEQs/gw18mUXLWDntv8k8fypGCcSH8X/GyeKpujNAYXFkFVUnlxNvn6J5+oMb8u6vvNssTfPi0+rlNl4y4ol0+XDk6P6HZfE6N7kAFhf8FS2fy2crf1f+Wj3sIkufN1fezWOyXN6myzSjw+t0TY/elBfHWR6/6AgfPSxlOh5+GNg/LuWTOF3FefZKTykneg9c6BAScn/FFCXLQM8vf+qPAwLArTjkiSN/4LqO5x7+yuuikgkW+3sf6EQ/lKTSk81rJhul2noes/NdSqkfT0kef9lEM3b0B5UKOvaUr+j8d0Ci7RX0EL6+CelPuUnpFUnOHhmzWbZ5ln6PuROJy/6xCdJ1/iFaJUt27n/ibB6tozqAeCBgLRA+QgLhke9gDhTsKSiEGgw8tzvRfYXodxFlsWgbK8SnYrVhH2evy4SikHnNEDwUeH182A9Es++LHYq/Ped0mrgc3xbqCAQ6QHwfU+wE5JCrAny7++sVt0rkXisYHeAeYENYgQ1rYCMWYAsU2L5myWYZ51cQLXNG6Ix+WrBPn5J1sl4oaDYJj56GBTaVfgY6tPD9NbrXwHN3fU8+3NrBAXgiEAF2QsRpIw8pSBCkUWLICVB3MJBC3HhOl7bya5rlT+kiXUfL+8PojajJDud8TNNNSdk/4zx/Ldfp6DlPRYDilyT/g/v8jU3lBOW3u5dy5t2XV+7L5zhL6BPGWTW2pk/7B/+Fm4l9PUy1+1bNpXKCLJ11UsgIVI89pWf6nM1iQU3lUbaIK/D0HJLFyyhP/hJn74IuVkTt38+UhtRUGktBSoL14cN9QBfyWlHUY3FAbLfe2VeQgGpIunbt/wJVLvvSkGQMoexbKDqwe3np5zShMx4UaSAaIoDoAatmLGSynOQAxnWWRa/caRt2wrbmtkDiExI4IWeXYsLP3ny5F550OT7lcvqheDzDyYCvm2xPmkKFKRTcsfMeVyMOD7WmGx25fl6sKN9QvkjXihScZkKftxUgMy8CTdom9DVWgAVtUzHkZAP0ZwNg1QYI+1GK5DSlaE+kAdAaG6/dpFha5o3NhwEcYSg5wgA3i3BPnjBQww3nKsIXIa6hKq7gSHjKus0O1FjS53Sb7O7xVmNKiijJwYnhgkrAH1OUDuLzjZcerSg1ikRbWbMqSoFGlLyhREkNNf0aL3b3eLf9xao0UZPxhjKxsmCdgzRh1+GWpSEjtAApAIwhXcMuHjqO7xrwaeWpQinSSLTe3TFP86SrRUezjfWIz4MxGCdw0lqEZHtmGEBscIcCoI8ChxP7MAhEPAOtVujBMSCKEvg93sUE37B/7xFRehBu2iVDOh0cWNDB4ZgGTTvf4GCifKvmOBvfoPJzBf3eVYJNwYSXE6s5AqZ7XmB6Y4IJRgHzIjxwoLOiBgMGNputb8VZ8KU9AETGSufQJtEUGQHs8a9YFlFFGfS/Z5YRdXMw0bihInfgX6towTY6XSm7gP6MYrriNKvA7uORkhl5j9i/0RAWDT8MR0NYzdhRyF9tROeU1vHfKbv0ZsPF/Pfj3EZAM0ovcZU1CHY6tNxg8u5CRvt5ksWzciCOtowtomWyYF9nlMi7W5y3uRlQ7wzhQxIJEBD3QweAA+IkVBCvWIRHnPKJDftTk++jLnvr+TVLw2QEX0bbbTI70U/bC1jfe9YczQKNlFRjHV23AEm+GnJczlnDEixHdrbVaYnkEco3GWanCBpEhRokOtpuijzenWjX8INh0Ls/7YtdcX2Vt+uhIou+hq98G9pXzQZ6y2SHAtkBGI/uavzjLdPdF+nujUd3NclkcLo3Jq+FlpLXsCvtoMlJScPRvbIUObrfRLPvD6xEQKY/feBct9RLdQWaUgPFQGPkS2bR8ro8sErm853HrMNY8qKHkg4kLQa4MXdeBxK0AZKarTGUcGi834HXYAQd6PHCMZxsGEQW3hDZxTUY+aOR3aA+5w2RXVyCERqN7Ab+ft9kH20FRmQ0sht43CeT/cRA13DKHIDx+LsPR/aMCC27UONxdB+u6xkRWvKZoDsaofvwVc+I0JKTBMczBA2c0ykqKoYvcSBqfoKckMsSJS2johifNK29OKivOsof4ih/zqifjKIVk6v1w3ZT7GlJlbHXi0W2S2V822UyGItrEHabdjKIhv8836m6BXSqNzfwmU+ivULSfbqZJSvXAgKEeAIC1JvwuV0C4isI6Fou2FCZVX+H8yF/U5ivB/LTFQtxISOiqRbvjf62m1x0pr9gSgxDf+CGDuFWB1/VQL3R38CbnkwGcW0nctIrcojPRVylCU1NBoJ9edoQcj0csJHJcGo+N1F3hTUPYysj26/p0+F+TLdqD4GWhX1Bv+GwECrBX1RvQujSX2wU2vom1RCTAIs8H2KR57Hjc+ufYY2/Mm0IkMPJqxvINwkCLpnGrAz+VHEOXZNHsybOBiEUjvnKXbajnOepnHe0VkJctB/c2Iu1uXBuTFxCLpBXK9tM34vBnDlbM1gXJ1QX8RGyLg8MUqVYsgPvi64u1/QEQDYvav7l1yxKqG/qfk1W1J91v8zidZQlqWkaJoP5Y/QQL6XokvG2b8ZqTMqGc4wNS2rRyYObq+Cuhn+59EDYaq/GpNtdIY9HVy3XcQkQTU/PikJ9D0StEzhY0qHp4+M27hzbmOJbJ0ODArGlYbt+Nuq0oetgbmHB8k1cxB2UfrWltQ6xtHJicGNby11QE1tzP0Ub1Xo9j1SUI6zbvNTuFVWV52xtW1PanGjuctFXzkqgxt/u4u2MOiV5mk2AmkY0KhE/A0ChCujUwfO4KAYOAJzTIuIIMXFczmuBCoy9NfRUA4Njtxls2zaoqetg7yj6+t7EQ6CohhfL/k2uuv30OYvfb7J0FlOrjTVolY+f7EK4OheimiWpBt49Rdl8Z2gV9eO32h/H1HYWsWebZemmOMtxnF841yORb2G3Kuys99Tkrn++GzqY2wVXN8EBYAVEKtsh6Lg1Vq4x5wUKuc+1qPliG9AFmkLnQuJVPuncgg7suaViMb+247q9LfVADbH+BL0moK8QHErdPRSZ7qvGMxilkcvbbUVXLcuC4KKOgmsMpkE62FSw20Zmfc+TAyK+S8XYUwzDYap0gyled3qDJeALpry85gHH5cyqsGX4zveChrtA1Xizv7IiNaA2VXVVGITq+iumiQSKLPcVU0E9FHa1SNQ5EvU64odbACHYgXBUTAB2oBkINlJ1kBrY+jlACEAtCOGgIPRQ9HUZIHh1IEAwKAg9lIBdBgi1qzb0BgVhasFy+masVGbGDC0e0LabsfJruOqntWi/qZGRqdhAqs6vxGIvpbDBiOux2AAZ5Gv1nG1tZkBLgBSksgMI9urMCfoAjg+VowNkYiODsMikT6UqKxc76DiWkGLJvwXPb6deMUQNdwlq+mhZSnbBoMWj2kp8QacFcsbI8wzxnYvxBbJylTJX//7YRl6tNuypx0qAzAejJIFiNbhjKQk03rKrpxxQbQ5oIatvNgcUG4SipmVSatHoEuHVNyJQDdFe4z6QXii+Slu5iYuVo5YXyYAuksTkxrYWxqqS+HwWxlpv6LhJfc7MKyor3G2drMLroVSPLyak0rsKd+lcrdNF4zW1sG6ZHeXr1tbfHh932W/HFtNjOVk/+6qL653Oi191TysTPiM9t4/xXJie6+oP7GeVXnon8iBpk0LVj5bThZN703JrQyVncANQl2T6BtQiXy0AOqvJI9mBb0VN/pyZgsBBchTVaygb6CtTEBtEuKfksjYw+x6F2ZUdqb3BTAD1/pTw5DB5ZngKnbfIM4MOJoqIVgK8a8t4ONqyC9ouIZGvEg2Uu/jcXXp7WyBW49dTnlkJkZpn5rFdFTVcNECeGdGFjCV8ziWxYx9AkaCygEl92hkMg4bFta+UD3JBaYB9wlObkAZDPBY8cILnqilVDYbhWPBcUOqgBh5bLc3rk9g8F4wFz9QJ7/SYVVi3UHkgdDCXSwNbWpEIdLmLPSuSqCGqKdtNznarT66CTljf57XH5DcydcqzvHPMRM9GNVL91jHzEjWhhf63jvU3trV1TC6npvVyC9TLMb7OFXetc9XzD3QDpV6LgBa7K2rpe6CZ2XUgOVnw2nApGYVLK+4CIndBM/bSLXr6DUiD/Q7DfUpzBhWY8UiyVtfgHgnrkkUBdAKuW62ZWdaGeQzSRt9cc29cvY6tvjaqrzTy0CCENlk5chq52LhXzSDT2ECnZ5H7Ne9J3t2Ei5R3zds98hsAMng0WxZOqIYLp3aJbVRKfUTL8wPH54wBs/cN2QjXh3ACeIjCToy0/cmHQFiNWU7dMU83CQA6T/lVQ54TupbR9VHQ0HuxN3TPsEC7bdhAZAFTe1L0tQrBGs3GZBnnR80/Vr9nIdszBLU3wVh4lcCIOe2hGout6QuabnOjxqBKA07jLFFtEue7Rwr3e9bukz6uu4qj9S8NKaNXQhPQLp7ueYf5MZZKiIDvYJ2PSzTyJPlPrVTbae8dmZzcnQIawslFZ+DkBoM6uWru4a8xxXxXovorg/GBMhR0v9DB2VMvSuGYoDfVQnRTAVW2CZ+V5MtUFlJ+dcrAhp0TqrI/YIiaM16+8ceOWDLmtgu/kVGtlyb2jPWGncBVY4qfs3ieFAnNynL8e/yYrONVvFsfLazDFjqB18576AS+LiT3/aHF9yr602FPnubU8jit2fdP1wM8lJKI2dtYNGtKk4GADhux3bgWjKEXWso3UeW7FLtBBFwN5B0E/GLeySinMYdiu0xNq6W+XsoIqlT2S+C9vXoXme8IsbsyH/2apUwNH+wtyjJPn9I5K4K7/z8=</diagram></mxfile>
|
2204.03444/main_diagram/main_diagram.pdf
ADDED
|
Binary file (24.8 kB). View file
|
|
|
2204.03444/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The task of coarsely estimating the place where a photo was taken based on a set of previously visited locations is called *Visual (Image) Geo-localization* (VG) [37, 42, 86] or *Visual Place Recognition* (VPR) [21,44] and it is addressed using image matching and retrieval methods on a database of images of known locations. We are witnessing a rapid growth of this field of research, as demonstrated by the increasing number of publications [2,10,14,22–24,28,30,36, 37,42,44,46,57,60,72,73,76–79,81,87], but this expansion
|
| 4 |
+
|
| 5 |
+
| | Vanilla | Resize<br>(80%) | Data augm.<br>(brightness = 2) | Pred. refinement<br>(nearest crop) | PCA<br>(2048) | CRN [37] |
|
| 6 |
+
|-----|---------|-----------------|--------------------------------|------------------------------------|---------------|----------|
|
| 7 |
+
| R@1 | 63.4 | 64.3 | 68.6 | 67.0 | 56.6 | 68.8 |
|
| 8 |
+
|
| 9 |
+
Table 1. Example of how results can be influenced by little train or test time changes to the VG pipeline. Recall@1 for a ResNet-18 with NetVLAD trained on Pitts30k and tested on Tokyo24/7. Results are thoroughly discussed in later sections.
|
| 10 |
+
|
| 11 |
+
is accompanied by two major limitations:
|
| 12 |
+
|
| 13 |
+
i) A focus on single metric optimization, as it is common practice to compare results solely based on the recall on chosen datasets and ignoring other factors such as execution time, hardware requirements, and scalability. All these aspects are important constraints in the design of a real-world VG system. For instance, one might gladly accept a 5% drop in accuracy if this leads to a 90% decrease of descriptors size as the resulting reduction in memory requirements enables a better scalability. Similarly, computational time and descriptor dimensionality are crucial constraints in realtime applications, given a target hardware platform.
|
| 14 |
+
|
| 15 |
+
ii) A lack of a standardized framework to train and test VG models. It is common practice to perform direct comparisons among off-the-shelf methods that use different setups (*e.g*., data augmentation, initialization, training dataset, *etc*.) [37, 67, 85], which can hide the improvement (or lack thereof) obtained by algorithmic changes and it does not allow to pinpoint the impact of each individual component. Table 1 shows how some simple engineering choices can have big effects on the recall metric.
|
| 16 |
+
|
| 17 |
+
Although previous benchmarks for VPR [85] and the related task of Visual Localization [58, 65] offer interesting insights, they do not address the aforementioned issues. For these reasons, we propose a new open-source benchmark that provides researchers with an all-inclusive tool to build, train, and test a wide range of commonly used VG architectures, offering the flexibility to change each component of a geo-localization pipeline. This allows to rigorously examine how each element of the system influences the final results while providing information computed on-the-fly regarding the number of parameters, FLOPs, descriptors dimensionality, *etc*.
|
| 18 |
+
|
| 19 |
+
Using our framework, we run numerous experiments aiming to understand which components are the most suitable for a real-world application, and derive good practices depending on the target dataset and one's hardware availability. For example, we find that ResNet-50 [29] provides a good trade-off between accuracy, FLOPs and model size, and that Visual Transformers can successfully replace the CNN backbones and achieve better geo-localization performances when trained on larger datasets. Furthermore, we observed that partial negative mining and reduced resolution yield important decrease in computations without significantly compromising the performance, or even yielding gains in some cases.
|
| 20 |
+
|
| 21 |
+
The benchmark's software and models are hosted at <https://deep-vg-bench.herokuapp.com/>.
|
| 22 |
+
|
| 23 |
+
# Method
|
| 24 |
+
|
| 25 |
+
This section describes the VG pipeline used in our benchmark (*cf* . Fig. 1) and our experimental setup.
|
| 26 |
+
|
| 27 |
+
The VG task is commonly tackled using an image retrieval pipeline: given a new photo (*query*) to be geolocalized, its location is estimated by matching it to a database of geo-tagged images. A VG system is thus an algorithm that first extracts descriptors for the database images (offline) and for the query photo (online), then it applies a nearest neighbors search in the descriptor space. The orange blocks in Fig. 1 show that a VG system is built through several design choices, including network architectures, negative mining methods, and engineering aspects such as image sizes and data augmentation. All of these choices impact the behavior of the system, both in terms of performance and required resources. We propose a new benchmark to systematically investigate the impact of the components of VG systems, using the modular architecture shown in Fig. 1 as a canvas to reproduce most VG methods based on CNN backbones and to develop new models based on Visual Transformers.
|
| 28 |
+
|
| 29 |
+
This abstract model contains several components that can be modified, both during training and test time: the backbone (Sec. 4.1); feature aggregation (Sec. 4.2); mining training examples (Sec. 4.4); image resizing (Sec. 4.6); data augmentation (Sec. 4.5). We conduct a series of tests focused individually on each of these elements, to systematically show each component's influence. Due to limited space, we only summarize here the results of some experiments, while detailed results and additional experiments on pre/post-processing methods and predictions refinement, effect of pre-training and many other aspects are provided in the Appendix.
|
| 30 |
+
|
| 31 |
+
The code of the benchmark follows the modular structure shown in Fig. 1, where each component can be modified. We further provide scripts to download and format a number of datasets, and to train and test the models making easy to perform a large number of experiments while ensuring consistency and reproducibility of results. Our codebase allows to easily reproduce the architectures used in a wide range of works [2, 26, 37, 42, 60, 63, 71, 78] and commonly used training protocols [2, 42, 78]. More details on the software are provided in Appendix B.
|
| 32 |
+
|
| 33 |
+
We use six highly heterogeneous datasets (see Tab. 2 and maps in Appendix A), which together cover a variety of real-world scenarios: different scales, degree of inter-image variability, different camera types. For training, we use Pitts30k [2] and Mapillary Street-Level Sequences (MSLS) [78] datasets, as they provide a small and large amount of images, respectively. While Pitts30k is very homogeneous, *i.e*. all images share the same resolution, weather conditions and camera, MSLS represents a wide range of conditions from very diverse cities. Regarding MSLS, given the lack of labels for the test set, we follow [28] and report validation recalls computed on the validation set. To assess inter-dataset robustness, we also test all models on four other datasets: Tokyo 24/7 [72], Revisited San Francisco (R-SF) [13, 40, 74], Eynsham [16] and St Lucia [47]. Further details on these datasets, such as their geographical coverage, are included in Appendix A.
|
| 34 |
+
|
| 35 |
+
In all experiments, unless otherwise specified, we use the metric of recall@N (R@N) measuring the percentage of queries for which one of the top-N retrieved images was taken within a certain distance of the query location. We mostly focus on R@1 and, following common practice in
|
| 36 |
+
|
| 37 |
+
| | # train/val<br>datab./queries | # test<br>datab./queries | Dataset<br>size | Database<br>type | Database img. size | Queries<br>type |
|
| 38 |
+
|------------|-------------------------------|--------------------------|-----------------|------------------|--------------------|-----------------|
|
| 39 |
+
| Pitts30k | 20K / 15K | 10K / 6.8K | 2.0 GB | panorama | 480×640 | panorama |
|
| 40 |
+
| MSLS | 934K / 514K | 19K / 11K | 56 GB | front-view | $480 \times 640$ | front-view |
|
| 41 |
+
| Tokyo 24/7 | 0/0 | 75K / 315 | 4.0 GB | panorama | $480 \times 640$ | phone* |
|
| 42 |
+
| R-SF | 0/0 | 1.05M / 598 | 36 GB | panorama | $480 \times 640$ | phone* |
|
| 43 |
+
| Eynsham | 0/0 | 24K / 24K | 1.2 GB | panorama | 512×384 | panorama |
|
| 44 |
+
| St Lucia | 0/0 | 1.5K / 1.5K | 124 MB | front-view | $480 \times 640$ | front-view |
|
| 45 |
+
|
| 46 |
+
Table 2. **Summary of the datasets:** "panorama" means images are cropped from a 360° panorama (including undistortion); "front-view" means that only one (forward facing) view is available; "phone" means photos were collected with a smartphone. "panorama" and "front-view" images were taken with car-rooftop cameras. \* Variable resolution.
|
| 47 |
+
|
| 48 |
+
the literature [2, 9, 10, 28, 37, 42, 52, 53, 78], use 25 meters as a distance threshold, but we also investigate how results change varying thresholds and top-N (cf. Appendix D.5). For reliability, all results are averaged over three repetitions of experiments. To avoid overloading the tables, standard deviations are shown in the Appendix, where the reported experiments are a superset of the ones in this manuscript. Training is performed until recall@5 on the validation set does not improve for 3 epochs. Given the variability in datasets size (see Tab. 2), we define an epoch as a pass over 5,000 queries. We use the Adam optimizer [38] for training, as in general it leads to faster convergence and better performance than SGD. Following the widely used training protocol defined in [2], we use a batch size of 4 triplets, where each triplet is composed of an anchor (the query), a positive and 10 negatives. Following standard practice [2, 10, 28, 42, 77, 78], at train time, the positive is selected as the nearest database image in features space among those within a 10 meters radius from the query and negative images selected from those further than 25. Due to the size of each dataset, we use full database mining when training on the Pitts 30k, and partial mining when training on the MSLS (cf. Sec. 4.4 for details).
|
2206.04384/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-03-17T10:59:35.534Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36" etag="5QwfZFWWExZUeMn6BUXj" version="15.5.2" type="google"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7VnbctowEP0az7QPzdiWL/gxXErTeyZpk/Qlo7GF7UZYRCgB8vWVYvkmHEMpDoT0AUZaSWtpz9HZNWigN54PKZxEX0iAsGbqwVwDfc00Hdfj38KwSA1Ad1JDSOMgNemF4Sx+QKnRyKx3cYCm0paaGCGYxZOq0SdJgnxWsUFKyaw6bURwUDFMYIgq2xCGMx9itDTtIg5YlFo7plvYP6A4jLInG4488Bhmk6WLaQQDMis9Cww00KOEsLQ1nvcQFrHL4nJxsrjAn2+c4cfT6S380f10/vXnu3SX7/9mSX4EihK2XdcgdX0P8Z2Ml2b33gwS/xryxlt5cLbIoknJXRIg4dHQQHcWxQydTaAvRmecPtwWsTGWwyOSMEkIg0erG2I4FWjqvD1llNzkiIjZeXjF8JonlpG5R5SheQksGYEhImPE6IJPkaNAzBdLcjZL1swKbrjSFJVoYUsblGwMc89FxHlDBr0eAONcv3oAJ9+Ob379Ph2e9Nxbz5GY1QEwPUgAzCzgEgDTttYCwGoLAGMZADXmKAmOhRLxni8iGPvVMBeYiMihgOuPXEwoi0hIEogHhbXLQ0oXl9l00bkSnSM76/bn5cH+YiNMGKQhYqupJ/bbiFwJGbvhalCEIYvvq7JbB5d8wncS82PkxDBshRiuVXUxJXfUR3JVWecURxZQHDmKozQwS44e2ZMfe3NCmbU3uv4280vDqlxKr2WPYEK5JSGJIMwoxlgxQRyHiSAk5wLi9q64gjFPe8dyYBwHwSPb6jSiyti27rrhKEhYy3cd1DAKtHXX67NdHx1qtrMsFYDOEgBODQCGbraEgLU/amuukls0j9mlfKhol0Sa94pForOZRKeStlqiV0o52CspNxUpB2r1tK6U5y8RGX89xVHLUm7X6sXoidLssMU8r8oatORZxdz5LyX/KCVNb4T7IiVKCQEM78hWktPahaGp+DL1IzXRtSwo7v5w1m3vbeMl8Cr7WSvjAuhsRipgqLkOrMUojjFclKZNxIRp04b12g0/mTq9xvm8ke5gq/Tu1ObL+tr6wLNlR9WtHb/6ZDR9VunZIOs9k1yV02WTXL2wyhu4TpV2nrdhrux4zY62JGuWYSs52W5fpoz63wH4p48wg3xkdK25Xb7tHtTc/iuUL0s3KrCY3q7laxeV0+6L9v1SF1tNauaG7/W2ZSr0akddbKDS2GveV/P8ltSovmriHxbjAInjvkIFsnVQhSKrf7evQLxb/Imbolr8Ew4GfwA=</diagram></mxfile>
|
2206.04384/main_diagram/main_diagram.pdf
ADDED
|
Binary file (7.91 kB). View file
|
|
|
2207.08822/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-04-28T02:08:45.598Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/17.2.4 Chrome/96.0.4664.174 Electron/16.1.0 Safari/537.36" version="17.2.4" etag="qG-g_jRRm3AT4MlOb8Yv" type="device"><diagram id="HRgmrsm4Ucn5YnzruTnL">7VtZb9s4EP41BtqHGiIp6njM1e5iG7TYYLHHmywxtlBZcnUkdn/98tRJx3JCpWnitEik4Ygiv284MxxJM3Sx3n7Kg83qOotIMoNWtJ2hyxmENoA2/cMkOyFBHvCEZJnHkZCBRnAT/yBSaElpFUek6CiWWZaU8aYrDLM0JWHZkQV5nt131W6zpHvXTbAkA8FNGCRD6d9xVK6kFFhW0/AbiZcrdWukWtZBrS0ExSqIsnsh4jroaoYu8iwrxdF6e0ESBp8CRnT0cU9rPbKcpOWYC6C44C5IKjk5Oa5yp2abZ1UaEaZvzdD5/Souyc0mCFnrPSWYylblOqFngB7exklykSVZTs/TLKVK51FQrPjlrL0o8+xbjZpDJXIAJC/Jdu8kQA0NtSqSrUmZ76iKvMBxIZi7WFwmjeoDgI4E/b7hyLek1qpFD7SlYiDtYlnfoUGOHkjw9ECi1wEk8OewB2S9Og/h6PgGcLSPwREcxrGLE+ohO4PoFrN/tWarxeE/7IosLVty8WMGcRfYzhx5fdNFQ9NFnjVH7hB14BhAHWtQdxJ6h/MFPViWfLJCwMDo8OF8rzLV8KHgzvqMKkC82TaNqpff05IsSU7bP8cpCfhBsOOCd5+urq/fq9vQEYs7de9Oxa0R9QyD0lDq2O+tI83SCpJ4mdLTkNJGB4POGakx9fhnsmEdRxG7jdbcugvbhFXQFTaHfasAGA+sAiqtjkkYsAjn8DokaXTGoinDLQmKIg4PLT06/Xz3D8Np7nhACf7lAsv2lOByK6EUZ7v22VeSx3QyjKQW2iTqRewyyJek7LjmEfC3gMXWEFcly0kSlPFd9446sOUdvmYxXzOKXYTw3EU9di23x1qRVXlI5LXtED6mOx/3ehOADHrjZlAjMMoy3L2+otgEqdY1hGK1MbeQLxfBOzo2+p/eydIevWeHjFqL+5TbYB0nO3H5Okuzgq++jkrjdiyd15lB3M9HMZ0ck/JUqz5Tk8V8ulRyyY7ZwDCbJqawHdIFta6yuEd1A5tuBNJ1C/vF8KYC5p0xnyM7aTcI34kVGepiAYxSELDRszOuCjyGXVup3XvtuanmRZbeZUlVxlmqZgOtj1VCWWJHMu8mET/7g+QpR7weHL9GjVA3eq4wmBvUTEj4fKbBHQ8TA37a9vxMyh09n3jt/TtiEQGYSMYAJuxGAdYo4wBrbEUC1sJjAZOraMCEVm/wMiqMMZa6qbaTxlFhtvgbTcvHTT+7pgFB3GoQzq25qt0mvVyry6aNepBavmwNoW/I/LS25rawu8ak3mAx1gFeuJE9Mf41JX+OD9Aw+QPuMPkDDpxrMu5+yHhMoPeeK/U7i6JTcidYH26yALA1rGMN5yaSO3+K5G5EJgbwC0vFHN/XVA7qQs6xqZi2Ox/DqVIxZQsPEVmsgg07DKvFPhNv0boQ1v55UQuC8NuSr4EvVZnEbEFxeRTk377QbuJyJ9J33BVCLh3nhL2QhKHOCS88bGNTeyrkoCE37rC2AdSGobPqoIFlB8Bhuo4oC42Dbt9CfZoP8zwNmGgIpq3ZnpootwE4QdgCurB1Q2MEy+asdZCWcVGwdOgUxaCLLY2rg5rlZCJgAV15dRq6VxQCRjfZbijwnN5fnG02bPkQBWBThWLnOdnXFYUfZN9k3vonAzNOl7w+Qfc71tW2zIOwFKKC7wwfNJFFrjGao23J7MYnwsSLbF3g8OACOYaeJ7iU/4GZuN7QTKCryXQ9E6ZzdGXbqOmQMEspwpWylpha0l0cVUHS8jCHAsrbtR/Hs8fZDwKaPYIR+xlRB39VJQnXdQaIO5rEztMhbuRZFNAVmKfanb7snajje91+nrQRdexebwY3oroq0qvnzEXImduGKNN2NiFjI2pAp9KBWk1o6BSxeuTWznYtTenARBiCp0LPEa9UAPcnszWiznNiS7k9a1hJwrqy3GRsPWsp6dcvJxhxqJoX9rA/UflA+27eJARfvZqakRE/rKkQTkfy0TWix5J8LevAJ5KF+7YGDzOnI/mnVnM+Jlkg6zgbmbtzNtZ8xVvv/ipE4+f/WB9VGpeH3mZ8w5UdHw+dA9LYjW1PVNnRvuH4bLb0leZvM/bZAv11y+zqZCn7LcUeeBiIn9NS3mRBygWuN1ihtoO6/Yyubmh7A73ezJU3tK81vQHOPGvoVdXjs+M50/QGHXsyzp7ztaSXzRnAj60ianuzJisjolNh6ogNERpGMl2qPFWlA+nqUk/dI8G9qTLLb0hasDetTzsl6ELNs0xNGgNsrzaSJ5E9RVnL1ZGtPuC6yNabqgzEVwH9tJXMl/PZ8Ksvk9ntyzCVPJMQoMsPvql6me2NfGUYmXgCi6YomGlN55J8r1g55UfHak7kHyTf9l0j3M/k1xmt+N98l4Gu/gc=</diagram></mxfile>
|
2207.08822/main_diagram/main_diagram.pdf
ADDED
|
Binary file (21.5 kB). View file
|
|
|
2207.08822/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recently, deep learning models have evolved to deliver acceptable performance in many areas such as computer vision, natural language processing, and speech recognition. However, the number of parameters and computational complexity of most deep learning applications have increased significantly. This ever-increasing computational complexity makes the deployment of deep neural networks harder on edge devices. Furthermore, the energy required to train a deep learning model increases proportionately with computational complexity. Thus, the training cost of these large and complex models also increases significantly on the high-end cloud servers. Although, quantization techniques are commonly used to accelerate *inference* of deep learning models, here we target a fully integer training pipeline *(i.e. forward propagation, back-propagation and SGD)*. Unlike recent proposals in quantized back-propagation, we directly change the number representation of floating-point values. Furthermore, we show empirically and theoretically that our proposed integer training methodology is effective in training deep learning models with *integer-only arithmetic*.
|
| 4 |
+
|
| 5 |
+
Despite the fact that accelerated training using integer back-propagation seems intriguing, there are some challenges associated with integer gradient computation. For example, (i) gradients must be scaled correctly in order to be adapted to the limited dynamic range of the integer number (e.g. `int8`: $[-128,127]$). (ii) The numerical error of the gradient must be ***unbiased*** in order to preserve the convergence trajectory of the training algorithm. A small numerical error can be accumulated through the course of training and change the convergence behaviour. (iii) The integer training must be *distribution independent*, i.e. the training method should not depend on the distribution of the gradients, weights, or training data.
|
| 6 |
+
|
| 7 |
+
We propose a novel integer training method designed to address aforementioned challenges (i), (ii) and (iii) simultaneously. Having a linear dynamic fixed-point mapping coupled with a non-linear inverse mapping is the key to full integer training.
|
| 8 |
+
|
| 9 |
+
To this end, we make the following contributions:
|
| 10 |
+
|
| 11 |
+
- A hardware-friendly integer training method is proposed based on extracting the maximum floating-point exponent of the tensors as the *scale*. We propose linear fixed-point mapping of tensors, while the corresponding inverse mapping for floating-point is non-linear. Our proposed method addresses all the previously mentioned challenges: (i) it computes the scales dynamically, and moreover the scale does not need to be adjusted in the course of training; (ii) it provides an unbiased estimation of the gradients and consequently, its convergence trajectory closely follows the floating-point version; (iii) our proposed representation mapping does not depend on the distribution of the training data, weights, or gradients.
|
| 12 |
+
|
| 13 |
+
- We study the optimality gap of our proposed integer training algorithm and show it is analogous to its floating-point counterpart in the course of training (**Theorem 1**). Our analysis of stochastic gradient descent with our proposed fixed-point gradients shows the original floating-point optimality gap is only shifted by a negligible amount (**Remark 3**).
|
| 14 |
+
|
| 15 |
+
- Our proposed method effectively performs all operations required in modern neural networks using *integer-only arithmetic*. For instance, the computation of linear layer, convolutional layer, batch-norm and layer-norm, residual connections and also stochastic gradient descent (including gradients, momentum, weight decay, and weight update) are all performed in integer arithmetic. To the best of our knowledge, this is the first time that *back-propagation* of a batch-norm, and the computation of stochastic gradient descent (SGD) is performed in integer arithmetic with negligible loss in the accuracy for large datasets such as ImageNet.
|
| 16 |
+
|
| 17 |
+
The rest of this paper is structured as follows. Section [2](#sec:related-works){reference-type="ref" reference="sec:related-works"} reviews some previous works in the field of quantized back-propagation and quantifies the similarities and differences with our *representation mapping* method. Section [3](#sec:methodology){reference-type="ref" reference="sec:methodology"} discusses our integer training methodology in detail. Theoretical aspects of our integer training method on SGD are studied in Section [4](#sec:sgd-analysis){reference-type="ref" reference="sec:sgd-analysis"}. Experimental results supporting our integer training methodology and convergence theory are presented in Section [5](#sec:results){reference-type="ref" reference="sec:results"}.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
Common quantization methods, which use division and clipping techniques (refer to Appendix [7.6](#appendix:quantization_review){reference-type="ref" reference="appendix:quantization_review"}), are inefficient for back-propagation. Therefore, we decided to go one step deeper and change the number format directly. Here, we propose a hardware-friendly *number representation mapping* using the dynamic fixed-point number format where the scale is defined *per tensor*. In this approach, each tensor can be represented by its `int8` version while it is multiplied by a shared scale, as opposed to other methods that allow multiple shared scales for different partitions of the tensor (see @darvish2020pushing [Figure 4]). One shared scale per tensor makes the computations easier in the computing hardware (e.g. CPU or GPU). However, the training algorithm diverges if the representation mapping is not executed properly. To tackle this problem, we suggest two subtle changes; (i) we propose to perform fixed-point mapping of a tensors in a ***linear*** fashion while the inverse mapping is ***non-linear*** as explained in Sections [3.1](#sec:lin-quant){reference-type="ref" reference="sec:lin-quant"} and [3.2](#sec:nonlin-dequant){reference-type="ref" reference="sec:nonlin-dequant"}. This is the key to success of our algorithm since a linear fixed-point mapping allows monotonic conversion of floating-point format to fixed-point, while a non-linear inverse mapping allows preserving information. (ii) We suggest to use stochastic rounding in the back-propagation in a way that preserves the expected value of the tensors as well as their vital statistics, such as the mean. Stochastic rounding in conjunction with representation mapping is crucial to keep the integer training loss trajectory close to its floating-point counterpart.
|
| 22 |
+
|
| 23 |
+
<figure id="fig:quant" data-latex-placement="!t">
|
| 24 |
+
<p><embed src="figs/quant2.pdf" style="width:50.0%" /> <embed src="figs/dequant2.pdf" style="width:40.0%" /><br />
|
| 25 |
+
<span>(a)</span><span>(b)</span><br />
|
| 26 |
+
</p>
|
| 27 |
+
<figcaption> (a) Linear fixed-point mapping, (b) Non-linear inverse mapping. </figcaption>
|
| 28 |
+
</figure>
|
| 29 |
+
|
| 30 |
+
Our proposed integer training method is based on manipulating the floating-point number format. This is a very simple and effective way of converting floating-point values to fixed-point values as opposed to other commonly used methods that involve division operation. Our method essentially uses *shift* and *round* operations to convert floating-point to fixed-point format, see Figure [1](#fig:quant){reference-type="ref" reference="fig:quant"}(a).
|
| 31 |
+
|
| 32 |
+
In this method, a tensor comprising $n$ floating-point numbers $(f_1, f_2, ... , f_n)$ is converted to a fixed-point tensor. First, as shown in Figure [1](#fig:quant){reference-type="ref" reference="fig:quant"}(a), sign, exponent, and mantissa of each floating-point number are extracted using the *unpack to integer* function. For instance, $f_1$ is unpacked to $s_1,e_1,m_1$ where $(s,e,m)$ is used to denote (*sign*, *exponent*, *mantissa*). We simply find the *maximum* element of $e_1,e_2,… e_n$ i.e. $e_{\mathrm{max}}=\max(e_1,e_2,… e_n)$ to extract the shared scale of the tensor. Subsequently, the original mantissas are shifted to the right according to the difference of their exponent and $e_\mathrm{max}$, to get the individual mantissas. For instance, $m_1$ is shifted to right by $e_\mathrm{max}-e_1$ which is denoted by $m_1 >> (e_\mathrm{max}-e_1)$. By shifting mantissas to right, we intentionally push the small values to the *sub-normal* region, where the most significant bit of mantissa is not 1 in binary format i.e. $(1)_2$, see @zuras2008ieee. Pushing the mantissas to the sub-normal region is performed to align their exponents with $e_\mathrm{max}$ and this unifies the scale for fixed-point values in a tensor. In the next step, to create `int8` mantissas, the 24-bit single floating-point mantissas (i.e. 23-bit mantissa + 1 implicit hidden bit) are further rounded to 7-bit mantissas to construct signed `int8` values i.e 7-bit unsigned integer and 1 sign bit. As an example, let us assume the shifted mantissa is $(0.01011001010101010100000)_2$, then it is going to be randomly rounded to either $(0.010110)_2$ or $(0.010111)_2$ based on a probability (Refer to [7.1](#appendix:stochastic-rounding){reference-type="ref" reference="appendix:stochastic-rounding"} for details).
|
| 33 |
+
|
| 34 |
+
Our proposed fixed-point mapping method is performed by pushing mantissa values to the sub-normal region and rounding them stochastically. Hence, the inverse mapping must be performed using a floating-point alignment module that normalizes the mantissas. A *normalized* floating-point mantissa is required to start with the most significant bit $(1)_2$. For instance, an alignment module converts $2^{127} \times (0.0101)_2$ to $2^{127-2} \times (1.0100)_2$. The alignment module is a very well-known logic circuit that is available in the commodity hardware using Leading Zero Anticipator (LZA) block [@schmookler2001lza]. Coupling a linear fixed-point mapping with a non-linear inverse mapping in this way keeps the number format close to floating-point. Moreover, combining them with stochastic rounding, results in having unbiased fixed-point variant that forces the trajectory of the integer training closely follow the floating-point counterpart.
|
| 35 |
+
|
| 36 |
+
Figure [1](#fig:quant){reference-type="ref" reference="fig:quant"}(b) shows the inverse mapping unit. This unit receives a tensor of mantissas and a single value of shared exponent computed by an integer layer. Then a tensor of exponents is reconstructed by repeating the value of the shared exponent. The size of this tensor is equal to the mantissa tensor's size. Moreover, the mantissa tensor is rounded and its sign is extracted. Note that the rounding is used here because the output of the previous layer might have some excessive mantissa bits; this normally happens in matrix multiplication and convolution operations. Then the sign, exponent, and mantissa tensors are sent to an alignment unit that shifts the mantissa and adjust the exponent in order to normalize the floating-point number format [@zuras2008ieee]. Finally, the result is packed and carried out to the next layer.
|
| 37 |
+
|
| 38 |
+
When the fixed-point mapping of floating-point values is completed, the fixed-point values are used to perform the *integer-only* layer computations. For instance, let us consider a linear layer which consists of a General Matrix Multiplication (GEMM) operation, but, the idea can be generalized to other types of layers. Figure [2](#fig:mixed){reference-type="ref" reference="fig:mixed"} demonstrates the layer-wise integer computations of a linear layer where the shared exponents and integer mantissas are treated separately. As shown in the Figure [2](#fig:mixed){reference-type="ref" reference="fig:mixed"}, in order to perform an integer-only GEMM operation, we need to multiply scales $(2^{e_\mathrm{max1}} \times 2^{e_\mathrm{max2}} )$. This multiplication performs an integer addition operation on the exponents $(e_\mathrm{max1}+e_\mathrm{max2})$. Furthermore, the integer mantissas are sent to an integer GEMM module to compute the output mantissa tensor. Also note that in our implementation, when the mantissa tensor is in `int8` format, multiplication is in `int16` format and accumulation is in `int32` format.
|
| 39 |
+
|
| 40 |
+
<figure id="fig:mixed" data-latex-placement="!t">
|
| 41 |
+
<p><embed src="figs/int-computation2.pdf" style="width:60.0%" /> </p>
|
| 42 |
+
<figcaption> A fully integer linear layer. </figcaption>
|
| 43 |
+
</figure>
|
| 44 |
+
|
| 45 |
+
Here we provide an intuition of how our proposed representation mapping approach works. Let us denote ${A}$ as a tensor and ${\hat{A}}$ as its fixed-point version in the way that is introduced earlier. In addition, random variables $A_i$ and $\hat{A}_i$ are $i^\text{th}$ element of those tensors. Also note that $A_i$ and $\hat{A}_i$ are different *representations* of the same real number, but one is in floating-point format and the other is in dynamic fixed-point format. Thus, one can relate $A_i$ and $\hat{A}_i$ with a random error term $\delta_i$ as $\hat A_i = {A}_i + \delta_i$ . Since in our training method we used stochastic rounding [@connolly2021stochastic], $\mathbb{E}{\{\hat{A}_i\}} = A_i$, or equivalently $\mathbb{E}{\{\delta_i\}} = 0$ (see Appendix [7.1](#appendix:stochastic-rounding){reference-type="ref" reference="appendix:stochastic-rounding"}). In other words, the fixed-point value is on the average equal to the floating-point value. Note that here we consider single precision floating-point values as a surrogate of real values.
|
| 46 |
+
|
| 47 |
+
**Linear and convolutional layers:** For these two types of layers, both forward and backward propagation computations are based on inner products. Thus, it is easy to see that our proposed fixed-point inner product $\hat{C}_{ij}= \sum_{k} \hat{A}_{ik}\hat{B}_{kj}$ is an unbiased estimator of the floating-point inner product $$\begin{equation}
|
| 48 |
+
\mathbb{E} \{\hat{C}_{ij}\}= \mathbb{E} \left\{\sum_k \hat{A}_{ik}\hat{B}_{kj} \right\} = \mathbb{E} \left\{\sum_k ({A}_{ik} + \delta^A_{ik}) ({B}_{kj} + \delta^B_{kj}) \right\} = \sum_k {A}_{ik}{B}_{kj} = {C}_{ij} .\
|
| 49 |
+
\end{equation}$$
|
| 50 |
+
|
| 51 |
+
**Residual connections:** A residual connection involves element-wise addition of two tensors. Each fixed-point element $\hat{C}_{ij}= \hat{A}_{ij} + \hat{B}_{ij}$ is an unbiased estimator of its floating-point version $$\begin{equation}
|
| 52 |
+
\mathbb{E} \{\hat{C}_{ij}\}= \mathbb{E} \{ \hat{A}_{ij} + \hat{B}_{ij}\} = \mathbb{E} \{ ({A}_{ij} + \delta^A_{ij})+({B}_{ij} + \delta^B_{ij})\} = {A}_{ij}+{B}_{ij} = {C}_{ij} .\
|
| 53 |
+
\end{equation}$$
|
| 54 |
+
|
| 55 |
+
**Batch-norm:** A batch-norm layer is defined as $$\begin{equation}
|
| 56 |
+
\hat{\omega}\frac{{\hat A} - \hat\mu }{\sqrt{\hat\sigma^2 + \epsilon}} + \hat{\beta}\text{.}\
|
| 57 |
+
\label{eq:batchnorm}
|
| 58 |
+
\end{equation}$$ For this type of layer, it is important to compute signal statistics such as mean $\hat\mu$ and variance $\hat\sigma^2$ correctly in integer arithmetic. Thus, the fixed-point mean $\hat{\mu}$ is also an unbiased estimator of the true mean $\mu$ $$\begin{equation}
|
| 59 |
+
\mathbb{E} \left\{\hat{\mu} \right\}= \mathbb{E} \left\{ \frac{\sum_{i=1}^N \hat{A}_i}{N} \right\} = \frac{1}{N} \mathbb{E} \left\{ \sum_{i=1}^N ({A}_i+\delta_i) \right\}=\mu.\
|
| 60 |
+
\end{equation}$$ Then the following derivation holds for fixed-point variance $\hat{\sigma}^2$ $$\begin{equation}
|
| 61 |
+
\begin{gathered}
|
| 62 |
+
\mathbb{E} \{{{\hat\sigma}^2}\}= \mathbb{E} \left\{ \frac{\sum_{i=1}^N (\hat{A}_i - \hat \mu)^2}{N} \right\} =
|
| 63 |
+
%\frac{1}{N} \mathbb{E} \{ \sum_{i=1}^N ({a}_i + \delta_i - \mu)^2\}=\
|
| 64 |
+
%\frac{1}{N} \mathbb{E} \{ \sum_{i=1}^N ({a}_i^2 + \delta_i^2 + \mu^2 + 2a_i\delta_i -2\delta_i\mu_i - 2 a_i\mu)\} \\
|
| 65 |
+
\frac{1}{N} \mathbb{E} \left\{ \sum_{i=1}^N [({A}_i - \mu)^2\ + \delta_i^2] \right\} = \sigma^2 + \sigma_\delta^2 ,\
|
| 66 |
+
\end{gathered}
|
| 67 |
+
\end{equation}$$ where $\sigma^2$ is the true floating-point variance of the batch and $\sigma_\delta^2$ is the variance of the noise introduced by the linear mapping to fixed-point. Note that the error variance $\sigma_\delta^2$ is rather small and can be integrated to $\epsilon$ in the denominator of equation [\[eq:batchnorm\]](#eq:batchnorm){reference-type="eqref" reference="eq:batchnorm"}.
|
| 68 |
+
|
| 69 |
+
The generic equation of weight update in the $k^\text{th}$ iteration of SGD is $$\begin{equation}
|
| 70 |
+
{w}_{k+1}= {w}_{k} + \alpha_k g(w_k,\xi_k),\
|
| 71 |
+
\label{eq:sgd}
|
| 72 |
+
\end{equation}$$ where $g(w_k,\xi_k)$ is the estimated gradients of random samples of the batch generated by the seed $\xi_k$, and $\alpha_k$ is the learning rate. We make the following common assumption in the sequel.
|
| 73 |
+
|
| 74 |
+
**Assumption 1 (Lipschitz-continuity).** The loss function $\mathcal{L}(w)$ is continuously differentiable and its gradients $\nabla \mathcal{L}(w)$ satisfies the following inequality where $L > 0$ is the Lipchitz constant $$\begin{equation}
|
| 75 |
+
\mathcal{L}(w) \leqslant \mathcal{L}(\bar w) + \nabla \mathcal{L}(\bar w)^\top(w-\bar w) + \frac{1}{2}L|| w- \bar w||^2_2; ~~~ ~~~ \forall ~ w,\bar w \in \mathbb{R}^d.\
|
| 76 |
+
\end{equation}$$ **Assumption 2.** (i) $\mathcal{L}(w_k)$ is bounded. (ii) Estimated gradients $g(w_k,\xi_k)$ is an unbiased estimator of the true gradients of the loss function $$\nabla \mathcal{L}( w_k)^\top \mathbb{E}_{\xi_k}\{ g(w_k,\xi_k) \} = ||\nabla \mathcal{L}(w_k)||^2_2 = ||\mathbb{E}_{\xi_k}\{ g(w_k,\xi_k) \}||^2_2,$$ and (iii,a) there exist scalars $M \geqslant 0$ and $M_V \geqslant 0$ such that for all iterations of SGD $\mathbb{V}_{\xi_k} \{g(w_k,\xi_k)\} \leqslant M + M_V || \nabla \mathcal{L}( w_k)||^2_2$ .
|
| 77 |
+
|
| 78 |
+
Note that here we define $$\mathbb{V}_{\xi_k} \{g(w_k,\xi_k)\} := \mathbb{E}_{\xi_k} \{||g(w_k,\xi_k)||^2_2\} - ||\mathbb{E}_{\xi_k} \{g(w_k,\xi_k)\} ||^2_2.$$
|
| 79 |
+
|
| 80 |
+
Also from *Assumption 2. (ii) and (iii,a)*, the second moment bound can be derived $$\begin{equation}
|
| 81 |
+
\mathbb{E}_{\xi_k} \{||g(w_k,\xi_k)||^2_2\} \leqslant M + M_G || \nabla \mathcal{L}( w_k)||^2_2 \; ~~~~~ \text{with}~~ M_G := 1+M_V.
|
| 82 |
+
\end{equation}$$
|
| 83 |
+
|
| 84 |
+
**Effect of gradient variance on convergence:** The quality of the estimated gradients $g(w_k,\xi_k)$ directly affects the convergence of the SGD. The effect of first and second moments of gradient are already studied on *real numbers* in the literature.
|
| 85 |
+
|
| 86 |
+
**Lemma 1.** Suppose *Assumption 2* is true, then we have $$\begin{equation}
|
| 87 |
+
\mathbb{E}_{\xi_k}\{\mathcal{L}(w_{k+1})\} - \mathcal{L}( w_k)\leqslant -(1-\frac{1}{2}\alpha_kLM_G)\alpha_k ||\nabla \mathcal{L}( w_k)||^2_2 + \frac{1}{2} \alpha^2_k LM. \\
|
| 88 |
+
\label{eq:sgd-moments}
|
| 89 |
+
\end{equation}$$ ***Proof:*** See @bottou2018optimization [Lemma 4.4].
|
| 90 |
+
|
| 91 |
+
Inequality [\[eq:sgd-moments\]](#eq:sgd-moments){reference-type="eqref" reference="eq:sgd-moments"} shows the effect of gradient variance bounds, $M$ and $M_G$, on each iterate of SGD, and shows the greater the variance, the more deterioration in the quality of SGD steps. The first term, $-(1-\frac{1}{2}\alpha_kLM_G)\alpha_k ||\nabla \mathcal{L}( w_k)||^2_2$ contributes to the decrease of the loss function while the second term, $\frac{1}{2} \alpha^2_k LM$, prevents it. Upon choosing the correct gradient estimates, the right hand side of inequality [\[eq:sgd-moments\]](#eq:sgd-moments){reference-type="eqref" reference="eq:sgd-moments"} is bounded by a deterministic quantity, and asymptotically ensures sufficient descent of the loss $\mathcal{L}(w)$. Note that the expectation $\mathbb{E}_{\xi_k}$ is taken over random samples with seed $\xi_k$.
|
| 92 |
+
|
| 93 |
+
When performing representation mapping in the back-propagation, the quality of the gradients deteriorate. Thus, there is a need to consider the effect of fixed-point mapping variance. Then, *Assumption 2 (iii,a)* should be modified accordingly.
|
| 94 |
+
|
| 95 |
+
**Remark 1.** Note that *Assumption 2 (i), (ii)* still hold after fixed-point mapping because of stochastic rounding, i.e. the fixed-point gradient remains an unbiased estimator of gradient (refer to Appendix [7.1](#appendix:stochastic-rounding){reference-type="ref" reference="appendix:stochastic-rounding"}).
|
| 96 |
+
|
| 97 |
+
**Assumption 2 (iii,b).** When the gradients are in fixed-point format i.e. $\hat{g}(w_k,\xi_k)$, there exist scalars $M \geqslant 0$ , $M_V \geqslant 0$, $M^q \geqslant 0$ and $M^q_V \geqslant 0$ such that for all iterations of SGD $$\mathbb{V}_{\xi_k} \{\hat{g}(w_k,\xi_k)\} \leqslant M + M^q + (M_V+ M^q_V )|| \nabla \mathcal{L}( w_k)||^2_2 .$$
|
| 98 |
+
|
| 99 |
+
If $\Tilde M := M + M^q$ and $\Tilde M_V := M_V+ M^q_V$ , then *Assumption 2 (iii,b)* takes the exact form of *Assumption 2 (iii,a)* i.e. $\mathbb{V}_{\xi_k} \{g(w_k,\xi_k)\} \leqslant \Tilde M + \Tilde M_V || \nabla \mathcal{L}( w_k)||^2_2$. However, here we separated $M^q$ and $M_V^q$ to emphasize the effect of fixed-point mapping on the true gradients.
|
| 100 |
+
|
| 101 |
+
**Remark 2.** If *Assumption 2 (iii,b)* holds true, inequality [\[eq:sgd-moments\]](#eq:sgd-moments){reference-type="eqref" reference="eq:sgd-moments"} can be transformed to its fixed-point version $$\begin{equation}
|
| 102 |
+
\begin{gathered}
|
| 103 |
+
\mathbb{E}_{\xi_k}\{\mathcal{L}(w_{k+1})\} - \mathcal{L}( w_k)\leqslant -(1-\frac{1}{2}\alpha_kL(M_G+M^q_G))\alpha_k ||\nabla \mathcal{L}(w_k)||^2_2 + \frac{1}{2} \alpha^2_k L(M+M^q) \\
|
| 104 |
+
\text{with}~~ M^q_G := 1+M^q_V.
|
| 105 |
+
\end{gathered}
|
| 106 |
+
\label{eq:sgd-moments-q}
|
| 107 |
+
\end{equation}$$ Inequality [\[eq:sgd-moments-q\]](#eq:sgd-moments-q){reference-type="eqref" reference="eq:sgd-moments-q"} shows the effect of ***added*** representation mapping variance with bounds, $M^q$ and $M^q_G$, on each iterate of SGD. This observation shows that fixed-point mapping degrades the convergence of SGD unless its variance bounds are relatively small, or controlled by the learning rate. As an example, refer to Appendix [7.2](#appendix:lin-var-q){reference-type="ref" reference="appendix:lin-var-q"} for analytical derivations of $M^q$ and $M^q_G$ for the back-propagation of a linear layer involving a fixed-point inner product.
|
| 108 |
+
|
| 109 |
+
**Assumption 3 (Strong convexity).** The loss function $\mathcal{L}(w)$ is differentiable and strongly convex. We recall that strong convexity for differentiable functions is equivalent to the following inequality with some constant $c > 0$ $$\begin{equation}
|
| 110 |
+
\mathcal{L}(w) \geqslant \mathcal{L}(\bar w) + \nabla \mathcal{L}(\bar w)^\top(w-\bar w) + \frac{1}{2}c|| w- \bar w||^2_2; ~~~ ~~~ \forall ~ w,\bar w \in \mathbb{R}^d.\
|
| 111 |
+
\label{eq:convex-1}
|
| 112 |
+
\end{equation}$$ A strongly convex function has a unique minimum point at $w_*$ with the loss value $\mathcal{L}_* = \mathcal{L}(w_*)$.
|
| 113 |
+
|
| 114 |
+
**Theorem 1.** Suppose *Assumptions 1, 2(i), 2(ii), 2 (iii,b), 3* are all true, then a SGD method running with fixed-point gradients i.e. $\hat g(w_k,\xi_k)$ and a fixed learning rate $0<\bar\alpha \leqslant \frac{1}{L(M_G+M_G^q)}$ satisfies the following bound for its optimality gap with the minimum loss $\mathcal{L}_*$ at the $k^\text{th}$ iteration $$\begin{align}
|
| 115 |
+
%\begin{gathered}
|
| 116 |
+
\mathbb{E}\{\mathcal{L}(w_{k}) - \mathcal{L_*} \}
|
| 117 |
+
\leqslant& \frac{\bar\alpha L(M+M^q)}{2c} + (1-\bar \alpha c)^{(k-1)} \left(\mathcal{L}(w_1)-\mathcal{L}_*-\frac{\bar \alpha L(M+M^q)}{2c} \right) \nonumber\\
|
| 118 |
+
\xrightarrow{k\xrightarrow{}\infty} &\frac{\bar\alpha L(M+M^q)}{2c}.\
|
| 119 |
+
%\end{gathered}
|
| 120 |
+
\label{eq:sgd-convex}
|
| 121 |
+
\end{align}$$ ***Proof.*** See Appendix [7.3](#appendix:proof-sgd-q){reference-type="ref" reference="appendix:proof-sgd-q"}.
|
| 122 |
+
|
| 123 |
+
**Remark 3.** Note that when ${k\xrightarrow{}\infty}$, $\frac{\bar\alpha LM}{2c}$ is the original optimality gap (i.e. $\mathbb{E}\{\mathcal{L}(w_{k}) - \mathcal{L_*} \}$) with *real* gradients, see @bottou2018optimization [Theorem 4.6], and then, the optimality gap is also increased by $\frac{\bar\alpha LM^q}{2c}$ due to fixed-point representation. Here we argue that by keeping the variance bound $M^q$ relatively small via choosing the correct number format, we can theoretically achieve the original performance. On the other hand, optimality gap is related to $\bar\alpha$, which means smaller learning rates leads to smaller optimality gap.
|
| 124 |
+
|
| 125 |
+
**Remark 4 (Local convexity).** Strongly convex loss is not a realistic assumption in large deep learning models. However, local convexity is a more realistic assumption i.e. $\mathcal{L}$ is convex around the minimum point ${w}_*$. Hence, inequality [\[eq:sgd-convex\]](#eq:sgd-convex){reference-type="eqref" reference="eq:sgd-convex"} still holds around the minimum point ${w}_*$ of a locally convex loss. We essentially train our ImageNet ResNet18 classification in the same way: first we train the network with a fixed learning rate until reaching a certain optimality gap; then the learning rate is reduced to a smaller fixed value in order to shrink the optimality gap.
|
| 126 |
+
|
| 127 |
+
<figure id="fig:loss" data-latex-placement="ht">
|
| 128 |
+
<p><embed src="figs/3dsurface_fp.pdf" style="width:30.0%" /> <embed src="figs/3dsurface_dfx.pdf" style="width:30.0%" /> <embed src="figs/loss.pdf" style="width:30.0%" /><br />
|
| 129 |
+
<span>(a)</span><span>(b)</span> <span>(c)</span><br />
|
| 130 |
+
</p>
|
| 131 |
+
<figcaption> (a) Floating-point loss landscape, (b) Fixed-point <code>int8</code> loss landscape c) Training loss trajectory comparison. <strong>Integer training setup:</strong> <code>int8</code> linear layer, <code>int8</code> convolutional layer and <code>int8</code> batch-norm layer. </figcaption>
|
| 132 |
+
</figure>
|
| 133 |
+
|
| 134 |
+
**Empirical evidence:** Figure [3](#fig:loss){reference-type="ref" reference="fig:loss"}(a) demonstrates the locally convex loss landscape of ResNet18 training on CIFAR10 dataset using floating-point computation. Figure [3](#fig:loss){reference-type="ref" reference="fig:loss"}(b) shows the same loss landscape in the fixed-point `int8` format. We perturbed the weights around the ${w_*}$ in $x$ and $y$ axes using Gaussian noise and evaluated the loss $\mathcal{L}$ in the $z$ axis to acquire Figures [3](#fig:loss){reference-type="ref" reference="fig:loss"}(a) and [3](#fig:loss){reference-type="ref" reference="fig:loss"}(b). Comparing these two figures pronounces our assumption of local convexity in both integer and floating-point tests. Figure [3](#fig:loss){reference-type="ref" reference="fig:loss"}(c) shows a comparison of the loss trajectory for floating-point and integer training. The fixed-point gradients are unbiased estimators of the true gradients, so the trajectory of the integer training closely follows the trajectory of its floating-point counterpart.
|
| 135 |
+
|
| 136 |
+
**Remark 5.** We implemented an integer weight update, hence, the computations of equation [\[eq:sgd\]](#eq:sgd){reference-type="eqref" reference="eq:sgd"} is also performed in integer arithmetic. It is shown in Appendix [7.4](#appendix:wu){reference-type="ref" reference="appendix:wu"} that integer weight update with stochastic rounding is an unbiased estimator of the true weight update.
|
2209.06203/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2023-05-26T18:31:46.678Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36" version="21.3.4" etag="rXHp0A1r-bmQFnVGlcbi" type="google">
|
| 2 |
+
<diagram name="Сторінка-1" id="nC6kD3jU2D91glZRwtfH">
|
| 3 |
+
<mxGraphModel dx="1509" dy="714" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="827" pageHeight="1169" math="1" shadow="0">
|
| 4 |
+
<root>
|
| 5 |
+
<mxCell id="0" />
|
| 6 |
+
<mxCell id="1" parent="0" />
|
| 7 |
+
<mxCell id="2" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=0;glass=0;labelBorderColor=none;fontSize=8;strokeColor=#666666;strokeWidth=1;fillColor=#E6E6E6;fontColor=#333333;" vertex="1" parent="1">
|
| 8 |
+
<mxGeometry x="255" y="555" width="93" height="135" as="geometry" />
|
| 9 |
+
</mxCell>
|
| 10 |
+
<mxCell id="3" value="<font style="font-size: 8px;">Nuisance flow<br></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;dashed=1;labelBorderColor=none;fontSize=6;" vertex="1" parent="1">
|
| 11 |
+
<mxGeometry x="255" y="558.04" width="93" height="8.48" as="geometry" />
|
| 12 |
+
</mxCell>
|
| 13 |
+
<mxCell id="4" value="FC\(_2\)" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
| 14 |
+
<mxGeometry x="280" y="622.2439024390244" width="40" height="16.95121951219512" as="geometry" />
|
| 15 |
+
</mxCell>
|
| 16 |
+
<mxCell id="5" value="FC\(_1\)" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
| 17 |
+
<mxGeometry x="300" y="658.2317073170732" width="40" height="16.95121951219512" as="geometry" />
|
| 18 |
+
</mxCell>
|
| 19 |
+
<mxCell id="6" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=0;exitDx=0;exitDy=0;entryX=0.75;entryY=1;entryDx=0;entryDy=0;endArrow=classicThin;endFill=1;strokeWidth=0.85;endSize=4;startSize=4;" edge="1" source="5" target="4" parent="1">
|
| 20 |
+
<mxGeometry relative="1" as="geometry" />
|
| 21 |
+
</mxCell>
|
| 22 |
+
<mxCell id="7" value="<font style="font-size: 5px;">\(+ \xi_x\)</font>" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=5;labelBorderColor=none;" connectable="0" vertex="1" parent="6">
|
| 23 |
+
<mxGeometry x="-0.0994" relative="1" as="geometry">
|
| 24 |
+
<mxPoint y="1" as="offset" />
|
| 25 |
+
</mxGeometry>
|
| 26 |
+
</mxCell>
|
| 27 |
+
<mxCell id="8" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=0;glass=0;labelBorderColor=none;fontSize=8;strokeColor=#666666;strokeWidth=1;fillColor=#E6E6E6;fontColor=#333333;" vertex="1" parent="1">
|
| 28 |
+
<mxGeometry x="530" y="550" width="80" height="60" as="geometry" />
|
| 29 |
+
</mxCell>
|
| 30 |
+
<mxCell id="9" value="<font style="font-size: 8px;">Target flow \(a\)</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;dashed=1;labelBorderColor=none;fontSize=6;" vertex="1" parent="1">
|
| 31 |
+
<mxGeometry x="531" y="551" width="79" height="14.55" as="geometry" />
|
| 32 |
+
</mxCell>
|
| 33 |
+
<mxCell id="10" value="<font style="font-size: 8px;">\(X\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 34 |
+
<mxGeometry x="227" y="676" width="20" height="20" as="geometry" />
|
| 35 |
+
</mxCell>
|
| 36 |
+
<mxCell id="11" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;endArrow=classicThin;endFill=1;strokeWidth=0.85;endSize=4;startSize=4;" edge="1" source="12" target="4" parent="1">
|
| 37 |
+
<mxGeometry relative="1" as="geometry">
|
| 38 |
+
<Array as="points">
|
| 39 |
+
<mxPoint x="290" y="654" />
|
| 40 |
+
</Array>
|
| 41 |
+
</mxGeometry>
|
| 42 |
+
</mxCell>
|
| 43 |
+
<mxCell id="12" value="<font style="font-size: 8px;">\(A\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 44 |
+
<mxGeometry x="227" y="644" width="20" height="20" as="geometry" />
|
| 45 |
+
</mxCell>
|
| 46 |
+
<mxCell id="13" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;endArrow=classicThin;endFill=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=0.85;endSize=4;startSize=4;exitX=0.75;exitY=0;exitDx=0;exitDy=0;" edge="1" source="5" target="16" parent="1">
|
| 47 |
+
<mxGeometry relative="1" as="geometry">
|
| 48 |
+
<mxPoint x="330" y="680" as="sourcePoint" />
|
| 49 |
+
<mxPoint x="359" y="672" as="targetPoint" />
|
| 50 |
+
<Array as="points">
|
| 51 |
+
<mxPoint x="330" y="634" />
|
| 52 |
+
</Array>
|
| 53 |
+
</mxGeometry>
|
| 54 |
+
</mxCell>
|
| 55 |
+
<mxCell id="14" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;" edge="1" source="16" target="25" parent="1">
|
| 56 |
+
<mxGeometry relative="1" as="geometry" />
|
| 57 |
+
</mxCell>
|
| 58 |
+
<mxCell id="15" value="\(\mathcal{L}_{\pi}\)" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=8;" connectable="0" vertex="1" parent="14">
|
| 59 |
+
<mxGeometry x="0.019" y="2" relative="1" as="geometry">
|
| 60 |
+
<mxPoint y="1" as="offset" />
|
| 61 |
+
</mxGeometry>
|
| 62 |
+
</mxCell>
|
| 63 |
+
<mxCell id="16" value="\(\hat{\pi}_A (X)\)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;" vertex="1" parent="1">
|
| 64 |
+
<mxGeometry x="359" y="624" width="30" height="20" as="geometry" />
|
| 65 |
+
</mxCell>
|
| 66 |
+
<mxCell id="17" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=-0.001;entryY=0.439;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=0.85;startArrow=classicThin;startFill=1;endSize=4;startSize=4;entryPerimeter=0;" edge="1" source="81" target="22" parent="1">
|
| 67 |
+
<mxGeometry relative="1" as="geometry">
|
| 68 |
+
<mxPoint x="243" y="589" as="sourcePoint" />
|
| 69 |
+
<mxPoint x="265" y="580" as="targetPoint" />
|
| 70 |
+
</mxGeometry>
|
| 71 |
+
</mxCell>
|
| 72 |
+
<mxCell id="18" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=0.996;entryY=0.444;entryDx=0;entryDy=0;fontSize=6;endArrow=classicThin;endFill=1;strokeWidth=0.85;startArrow=classicThin;startFill=1;endSize=4;startSize=4;entryPerimeter=0;" edge="1" source="72" target="22" parent="1">
|
| 73 |
+
<mxGeometry relative="1" as="geometry">
|
| 74 |
+
<mxPoint x="360.25" y="589" as="sourcePoint" />
|
| 75 |
+
<mxPoint x="334.5" y="587" as="targetPoint" />
|
| 76 |
+
</mxGeometry>
|
| 77 |
+
</mxCell>
|
| 78 |
+
<mxCell id="19" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;dashed=1;dashPattern=1 1;fontSize=8;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" target="26" parent="1">
|
| 79 |
+
<mxGeometry relative="1" as="geometry">
|
| 80 |
+
<mxPoint x="409.75" y="598" as="sourcePoint" />
|
| 81 |
+
</mxGeometry>
|
| 82 |
+
</mxCell>
|
| 83 |
+
<mxCell id="20" value="\( \mathcal{L}_{\text{NLL}}\)" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=8;" connectable="0" vertex="1" parent="19">
|
| 84 |
+
<mxGeometry x="0.0404" y="-1" relative="1" as="geometry">
|
| 85 |
+
<mxPoint y="-6" as="offset" />
|
| 86 |
+
</mxGeometry>
|
| 87 |
+
</mxCell>
|
| 88 |
+
<mxCell id="21" value="" style="group;rounded=1;arcSize=26;" connectable="0" vertex="1" parent="1">
|
| 89 |
+
<mxGeometry x="265" y="569" width="70" height="46" as="geometry" />
|
| 90 |
+
</mxCell>
|
| 91 |
+
<mxCell id="22" value="CNF<br>" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;" vertex="1" parent="21">
|
| 92 |
+
<mxGeometry width="70" height="46" as="geometry" />
|
| 93 |
+
</mxCell>
|
| 94 |
+
<mxCell id="23" value="\( \theta(X, A) \)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;dashed=1;strokeColor=#666666;" vertex="1" parent="21">
|
| 95 |
+
<mxGeometry x="12.40506329113924" y="22.41190476190476" width="45.189873417721515" height="15.333333333333334" as="geometry" />
|
| 96 |
+
</mxCell>
|
| 97 |
+
<mxCell id="24" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=1;endSize=4;startSize=4;" edge="1" source="4" target="23" parent="1">
|
| 98 |
+
<mxGeometry relative="1" as="geometry">
|
| 99 |
+
<mxPoint x="295" y="619.24" as="targetPoint" />
|
| 100 |
+
</mxGeometry>
|
| 101 |
+
</mxCell>
|
| 102 |
+
<mxCell id="25" value="<font style="font-size: 8px;">\(A\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 103 |
+
<mxGeometry x="364" y="670" width="20" height="20" as="geometry" />
|
| 104 |
+
</mxCell>
|
| 105 |
+
<mxCell id="26" value="<font style="font-size: 8px;">\(Y\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 106 |
+
<mxGeometry x="399.25" y="670" width="20" height="20" as="geometry" />
|
| 107 |
+
</mxCell>
|
| 108 |
+
<mxCell id="27" value="<font style="font-size: 6px;">\(+ \xi_y\)</font>" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=8;labelBorderColor=none;" connectable="0" vertex="1" parent="1">
|
| 109 |
+
<mxGeometry x="397.0048988914343" y="662.0004449914576" as="geometry">
|
| 110 |
+
<mxPoint x="11" y="-2" as="offset" />
|
| 111 |
+
</mxGeometry>
|
| 112 |
+
</mxCell>
|
| 113 |
+
<mxCell id="28" value="" style="group;rounded=1;arcSize=26;" connectable="0" vertex="1" parent="1">
|
| 114 |
+
<mxGeometry x="540" y="564" width="79" height="50" as="geometry" />
|
| 115 |
+
</mxCell>
|
| 116 |
+
<mxCell id="29" value="NF" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;spacingTop=-4;" vertex="1" parent="28">
|
| 117 |
+
<mxGeometry width="60" height="40" as="geometry" />
|
| 118 |
+
</mxCell>
|
| 119 |
+
<mxCell id="30" value="\( \beta_a \)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;dashed=1;strokeColor=#666666;" vertex="1" parent="28">
|
| 120 |
+
<mxGeometry x="17.5" y="19.276785714285715" width="25" height="16.66964285714286" as="geometry" />
|
| 121 |
+
</mxCell>
|
| 122 |
+
<mxCell id="31" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=0.6;startArrow=classicThin;startFill=1;endSize=4;startSize=4;" edge="1" source="84" target="29" parent="1">
|
| 123 |
+
<mxGeometry relative="1" as="geometry">
|
| 124 |
+
<mxPoint x="520" y="589" as="sourcePoint" />
|
| 125 |
+
<mxPoint x="533.5000000000001" y="588.89" as="targetPoint" />
|
| 126 |
+
</mxGeometry>
|
| 127 |
+
</mxCell>
|
| 128 |
+
<mxCell id="32" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=0.6;startArrow=classicThin;startFill=1;endSize=4;startSize=4;" edge="1" source="29" target="101" parent="1">
|
| 129 |
+
<mxGeometry relative="1" as="geometry">
|
| 130 |
+
<mxPoint x="593" y="590.89" as="sourcePoint" />
|
| 131 |
+
<mxPoint x="625" y="589" as="targetPoint" />
|
| 132 |
+
</mxGeometry>
|
| 133 |
+
</mxCell>
|
| 134 |
+
<mxCell id="33" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=-0.002;entryY=0.626;entryDx=0;entryDy=0;fontSize=8;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;strokeWidth=0.85;entryPerimeter=0;" edge="1" source="34" target="90" parent="1">
|
| 135 |
+
<mxGeometry relative="1" as="geometry" />
|
| 136 |
+
</mxCell>
|
| 137 |
+
<mxCell id="34" value="<font style="font-size: 8px;">\(a\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFFF;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 138 |
+
<mxGeometry x="505" y="661" width="20" height="20" as="geometry" />
|
| 139 |
+
</mxCell>
|
| 140 |
+
<mxCell id="35" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.466;entryY=1.023;entryDx=0;entryDy=0;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryPerimeter=0;" edge="1" source="93" target="100" parent="1">
|
| 141 |
+
<mxGeometry relative="1" as="geometry">
|
| 142 |
+
<mxPoint x="652.4999999999998" y="641" as="sourcePoint" />
|
| 143 |
+
<mxPoint x="653" y="597.9999999999999" as="targetPoint" />
|
| 144 |
+
</mxGeometry>
|
| 145 |
+
</mxCell>
|
| 146 |
+
<mxCell id="36" value="\(\mathcal{L}_{\text{CE}}\)" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=8;" connectable="0" vertex="1" parent="35">
|
| 147 |
+
<mxGeometry x="0.019" y="2" relative="1" as="geometry">
|
| 148 |
+
<mxPoint x="2" y="-1" as="offset" />
|
| 149 |
+
</mxGeometry>
|
| 150 |
+
</mxCell>
|
| 151 |
+
<mxCell id="37" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=8;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;strokeWidth=0.85;entryX=-0.006;entryY=0.904;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" source="39" target="90" parent="1">
|
| 152 |
+
<mxGeometry relative="1" as="geometry">
|
| 153 |
+
<mxPoint x="571.0000000000002" y="697.0000000000001" as="sourcePoint" />
|
| 154 |
+
<mxPoint x="540" y="690" as="targetPoint" />
|
| 155 |
+
</mxGeometry>
|
| 156 |
+
</mxCell>
|
| 157 |
+
<mxCell id="38" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1.008;exitY=0.601;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fontSize=8;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;strokeWidth=0.85;exitPerimeter=0;" edge="1" source="91" target="95" parent="1">
|
| 158 |
+
<mxGeometry relative="1" as="geometry" />
|
| 159 |
+
</mxCell>
|
| 160 |
+
<mxCell id="39" value="<font style="font-size: 8px;">\(X\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#f8cecc;strokeColor=#b85450;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 161 |
+
<mxGeometry x="484" y="675" width="20" height="20" as="geometry" />
|
| 162 |
+
</mxCell>
|
| 163 |
+
<mxCell id="40" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;shape=link;strokeColor=#FF0000;width=-2.4637681159420293;" edge="1" parent="1">
|
| 164 |
+
<mxGeometry relative="1" as="geometry">
|
| 165 |
+
<mxPoint x="678.5" y="634" as="sourcePoint" />
|
| 166 |
+
<mxPoint x="665.5" y="634" as="targetPoint" />
|
| 167 |
+
</mxGeometry>
|
| 168 |
+
</mxCell>
|
| 169 |
+
<mxCell id="41" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" source="95" target="103" parent="1">
|
| 170 |
+
<mxGeometry relative="1" as="geometry">
|
| 171 |
+
<mxPoint x="721.5229999999999" y="676.2600000000001" as="sourcePoint" />
|
| 172 |
+
<mxPoint x="672.25" y="686" as="targetPoint" />
|
| 173 |
+
<Array as="points">
|
| 174 |
+
<mxPoint x="735" y="670" />
|
| 175 |
+
</Array>
|
| 176 |
+
</mxGeometry>
|
| 177 |
+
</mxCell>
|
| 178 |
+
<mxCell id="42" value="<font style="font-size: 14px;">Target flow</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 179 |
+
<mxGeometry x="455" y="701" width="305" height="19" as="geometry" />
|
| 180 |
+
</mxCell>
|
| 181 |
+
<mxCell id="43" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 182 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 183 |
+
<mxPoint x="455" y="700" as="sourcePoint" />
|
| 184 |
+
<mxPoint x="760" y="700" as="targetPoint" />
|
| 185 |
+
</mxGeometry>
|
| 186 |
+
</mxCell>
|
| 187 |
+
<mxCell id="44" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 188 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 189 |
+
<mxPoint x="455" y="710" as="sourcePoint" />
|
| 190 |
+
<mxPoint x="455" y="670" as="targetPoint" />
|
| 191 |
+
</mxGeometry>
|
| 192 |
+
</mxCell>
|
| 193 |
+
<mxCell id="45" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 194 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 195 |
+
<mxPoint x="760" y="710" as="sourcePoint" />
|
| 196 |
+
<mxPoint x="760" y="670" as="targetPoint" />
|
| 197 |
+
</mxGeometry>
|
| 198 |
+
</mxCell>
|
| 199 |
+
<mxCell id="46" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 200 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 201 |
+
<mxPoint x="175" y="701" as="sourcePoint" />
|
| 202 |
+
<mxPoint x="440" y="701" as="targetPoint" />
|
| 203 |
+
</mxGeometry>
|
| 204 |
+
</mxCell>
|
| 205 |
+
<mxCell id="47" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 206 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 207 |
+
<mxPoint x="175" y="710" as="sourcePoint" />
|
| 208 |
+
<mxPoint x="175" y="670" as="targetPoint" />
|
| 209 |
+
</mxGeometry>
|
| 210 |
+
</mxCell>
|
| 211 |
+
<mxCell id="48" value="" style="endArrow=none;html=1;rounded=0;fontSize=14;fontColor=#000000;strokeColor=#000000;strokeWidth=2;" edge="1" parent="1">
|
| 212 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 213 |
+
<mxPoint x="440" y="710" as="sourcePoint" />
|
| 214 |
+
<mxPoint x="440" y="670" as="targetPoint" />
|
| 215 |
+
</mxGeometry>
|
| 216 |
+
</mxCell>
|
| 217 |
+
<mxCell id="49" value="<font style="font-size: 14px;">Nuisance&nbsp;flow</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 218 |
+
<mxGeometry x="175" y="701" width="265" height="19" as="geometry" />
|
| 219 |
+
</mxCell>
|
| 220 |
+
<mxCell id="50" value="" style="rounded=1;whiteSpace=wrap;html=1;shadow=0;glass=0;labelBorderColor=none;fontSize=6;strokeWidth=0.5;arcSize=3;" vertex="1" parent="1">
|
| 221 |
+
<mxGeometry x="780" y="535" width="180" height="182" as="geometry" />
|
| 222 |
+
</mxCell>
|
| 223 |
+
<mxCell id="51" value="<p style="line-height: 90%;"><font style="font-size: 12px;">FC</font></p>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
| 224 |
+
<mxGeometry x="793" y="559" width="35" height="20" as="geometry" />
|
| 225 |
+
</mxCell>
|
| 226 |
+
<mxCell id="52" value="<font style="font-size: 8px;">Fully-connected subnetwork</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 227 |
+
<mxGeometry x="840" y="561" width="110" height="16" as="geometry" />
|
| 228 |
+
</mxCell>
|
| 229 |
+
<mxCell id="53" value="" style="group;rounded=1;arcSize=26;verticalAlign=top;spacing=0;" connectable="0" vertex="1" parent="1">
|
| 230 |
+
<mxGeometry x="791" y="585" width="40" height="30" as="geometry" />
|
| 231 |
+
</mxCell>
|
| 232 |
+
<mxCell id="54" value="<font style="line-height: 1.1;"><font style="font-size: 6px;">NF</font><br></font>" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;align=center;spacing=-4;" vertex="1" parent="53">
|
| 233 |
+
<mxGeometry width="40" height="30" as="geometry" />
|
| 234 |
+
</mxCell>
|
| 235 |
+
<mxCell id="55" value="<font style="font-size: 5px;">\( \theta \)</font>" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;dashed=1;strokeColor=#666666;" vertex="1" parent="53">
|
| 236 |
+
<mxGeometry x="12.669113924050635" y="15.996071428571433" width="12.658227848101266" height="10.001785714285717" as="geometry" />
|
| 237 |
+
</mxCell>
|
| 238 |
+
<mxCell id="56" value="<font style="font-size: 8px;">Normalizing flow, parametrized by \(\theta\)</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 239 |
+
<mxGeometry x="840" y="592" width="110" height="16" as="geometry" />
|
| 240 |
+
</mxCell>
|
| 241 |
+
<mxCell id="57" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;" edge="1" parent="1">
|
| 242 |
+
<mxGeometry relative="1" as="geometry">
|
| 243 |
+
<mxPoint x="797" y="677" as="sourcePoint" />
|
| 244 |
+
<mxPoint x="827" y="677" as="targetPoint" />
|
| 245 |
+
</mxGeometry>
|
| 246 |
+
</mxCell>
|
| 247 |
+
<mxCell id="58" value="\(\mathcal{L}\)" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=8;" connectable="0" vertex="1" parent="57">
|
| 248 |
+
<mxGeometry x="0.019" y="2" relative="1" as="geometry">
|
| 249 |
+
<mxPoint y="3" as="offset" />
|
| 250 |
+
</mxGeometry>
|
| 251 |
+
</mxCell>
|
| 252 |
+
<mxCell id="59" value="<font style="font-size: 8px;">Losses</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 253 |
+
<mxGeometry x="840" y="670" width="110" height="16" as="geometry" />
|
| 254 |
+
</mxCell>
|
| 255 |
+
<mxCell id="60" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;shape=link;strokeColor=#FF0000;width=-2.4637681159420293;" edge="1" parent="1">
|
| 256 |
+
<mxGeometry relative="1" as="geometry">
|
| 257 |
+
<mxPoint x="817" y="692" as="sourcePoint" />
|
| 258 |
+
<mxPoint x="804" y="692" as="targetPoint" />
|
| 259 |
+
</mxGeometry>
|
| 260 |
+
</mxCell>
|
| 261 |
+
<mxCell id="61" value="<font style="font-size: 8px;">Gradient blocking</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 262 |
+
<mxGeometry x="840" y="685.21" width="110" height="16" as="geometry" />
|
| 263 |
+
</mxCell>
|
| 264 |
+
<mxCell id="62" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.181;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=0.6;startArrow=classicThin;startFill=1;endSize=4;startSize=4;entryPerimeter=0;" edge="1" source="87" target="90" parent="1">
|
| 265 |
+
<mxGeometry relative="1" as="geometry">
|
| 266 |
+
<mxPoint x="522.9999999999999" y="649" as="sourcePoint" />
|
| 267 |
+
<mxPoint x="552" y="632.94" as="targetPoint" />
|
| 268 |
+
</mxGeometry>
|
| 269 |
+
</mxCell>
|
| 270 |
+
<mxCell id="63" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.993;exitY=0.167;exitDx=0;exitDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=0.6;startArrow=classicThin;startFill=1;endSize=4;startSize=4;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitPerimeter=0;" edge="1" source="91" target="94" parent="1">
|
| 271 |
+
<mxGeometry relative="1" as="geometry">
|
| 272 |
+
<mxPoint x="596.0000000000002" y="633.98" as="sourcePoint" />
|
| 273 |
+
<mxPoint x="618" y="650" as="targetPoint" />
|
| 274 |
+
</mxGeometry>
|
| 275 |
+
</mxCell>
|
| 276 |
+
<mxCell id="64" value="<font style="font-size: 5px;">\(+ \xi_x\)</font>" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=5;labelBorderColor=none;" connectable="0" vertex="1" parent="1">
|
| 277 |
+
<mxGeometry x="297.0489802515803" y="619.9978955288949" as="geometry">
|
| 278 |
+
<mxPoint x="514" y="85" as="offset" />
|
| 279 |
+
</mxGeometry>
|
| 280 |
+
</mxCell>
|
| 281 |
+
<mxCell id="65" value="<font style="font-size: 8px;">Noise regularization</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 282 |
+
<mxGeometry x="840" y="699" width="110" height="16" as="geometry" />
|
| 283 |
+
</mxCell>
|
| 284 |
+
<mxCell id="66" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;" edge="1" source="96" parent="1">
|
| 285 |
+
<mxGeometry relative="1" as="geometry">
|
| 286 |
+
<mxPoint x="691" y="686.0000000000001" as="sourcePoint" />
|
| 287 |
+
<mxPoint x="748" y="624" as="targetPoint" />
|
| 288 |
+
</mxGeometry>
|
| 289 |
+
</mxCell>
|
| 290 |
+
<mxCell id="67" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;shape=link;strokeColor=#FF0000;width=-2.4637681159420293;" edge="1" parent="1">
|
| 291 |
+
<mxGeometry relative="1" as="geometry">
|
| 292 |
+
<mxPoint x="716" y="676.5" as="sourcePoint" />
|
| 293 |
+
<mxPoint x="716" y="663.5" as="targetPoint" />
|
| 294 |
+
</mxGeometry>
|
| 295 |
+
</mxCell>
|
| 296 |
+
<mxCell id="68" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;" edge="1" source="93" parent="1">
|
| 297 |
+
<mxGeometry relative="1" as="geometry">
|
| 298 |
+
<mxPoint x="687" y="650" as="sourcePoint" />
|
| 299 |
+
<mxPoint x="722" y="621" as="targetPoint" />
|
| 300 |
+
<Array as="points">
|
| 301 |
+
<mxPoint x="722" y="648" />
|
| 302 |
+
</Array>
|
| 303 |
+
</mxGeometry>
|
| 304 |
+
</mxCell>
|
| 305 |
+
<mxCell id="69" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;" edge="1" source="100" parent="1">
|
| 306 |
+
<mxGeometry relative="1" as="geometry">
|
| 307 |
+
<mxPoint x="681" y="589" as="sourcePoint" />
|
| 308 |
+
<mxPoint x="722" y="604" as="targetPoint" />
|
| 309 |
+
<Array as="points">
|
| 310 |
+
<mxPoint x="722" y="584" />
|
| 311 |
+
</Array>
|
| 312 |
+
</mxGeometry>
|
| 313 |
+
</mxCell>
|
| 314 |
+
<mxCell id="70" value="" style="group;spacing=0;" connectable="0" vertex="1" parent="1">
|
| 315 |
+
<mxGeometry x="359.25" y="579.5" width="80" height="20" as="geometry" />
|
| 316 |
+
</mxCell>
|
| 317 |
+
<mxCell id="71" value="<p style="line-height: 60%;">\( \sim \hat{\mathbb{P}}(Y | X,A) \)</p>" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;spacing=2;spacingTop=-2;" vertex="1" parent="70">
|
| 318 |
+
<mxGeometry x="16" y="1.5" width="60" height="17" as="geometry" />
|
| 319 |
+
</mxCell>
|
| 320 |
+
<mxCell id="72" value="<font style="font-size: 8px;">\(\hat{Y}\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;strokeWidth=1;fontSize=8;" vertex="1" parent="70">
|
| 321 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 322 |
+
</mxCell>
|
| 323 |
+
<mxCell id="73" value="<font style="font-size: 8px;">Outputs with corresponding distributions</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 324 |
+
<mxGeometry x="838.5" y="647" width="111.5" height="16" as="geometry" />
|
| 325 |
+
</mxCell>
|
| 326 |
+
<mxCell id="74" value="" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;" vertex="1" parent="1">
|
| 327 |
+
<mxGeometry x="804" y="646" width="31" height="18" as="geometry" />
|
| 328 |
+
</mxCell>
|
| 329 |
+
<mxCell id="75" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFFF;strokeColor=#000000;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 330 |
+
<mxGeometry x="789" y="645" width="20" height="20" as="geometry" />
|
| 331 |
+
</mxCell>
|
| 332 |
+
<mxCell id="76" value="" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFFF;strokeColor=#000000;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 333 |
+
<mxGeometry x="789" y="620" width="20" height="20" as="geometry" />
|
| 334 |
+
</mxCell>
|
| 335 |
+
<mxCell id="77" value="<font style="font-size: 8px;">Inputs / connections</font>" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 336 |
+
<mxGeometry x="840" y="621" width="83" height="16" as="geometry" />
|
| 337 |
+
</mxCell>
|
| 338 |
+
<mxCell id="78" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;fontSize=8;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;strokeWidth=0.85;" edge="1" parent="1">
|
| 339 |
+
<mxGeometry relative="1" as="geometry">
|
| 340 |
+
<mxPoint x="812" y="624.9200000000001" as="sourcePoint" />
|
| 341 |
+
<mxPoint x="833" y="624.9200000000001" as="targetPoint" />
|
| 342 |
+
</mxGeometry>
|
| 343 |
+
</mxCell>
|
| 344 |
+
<mxCell id="79" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;fontSize=8;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;strokeWidth=0.85;dashed=1;" edge="1" parent="1">
|
| 345 |
+
<mxGeometry relative="1" as="geometry">
|
| 346 |
+
<mxPoint x="812" y="634.9200000000001" as="sourcePoint" />
|
| 347 |
+
<mxPoint x="833" y="634.9200000000001" as="targetPoint" />
|
| 348 |
+
</mxGeometry>
|
| 349 |
+
</mxCell>
|
| 350 |
+
<mxCell id="80" value="" style="group" vertex="1" connectable="0" parent="1">
|
| 351 |
+
<mxGeometry x="180" y="579" width="68" height="20" as="geometry" />
|
| 352 |
+
</mxCell>
|
| 353 |
+
<mxCell id="81" value="\( \sim N(0, 1) \)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;" vertex="1" parent="80">
|
| 354 |
+
<mxGeometry x="16" y="1.5" width="48" height="17" as="geometry" />
|
| 355 |
+
</mxCell>
|
| 356 |
+
<mxCell id="82" value="<font style="font-size: 8px;">\(Z\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=1;fontSize=8;dashed=1;" vertex="1" parent="80">
|
| 357 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 358 |
+
</mxCell>
|
| 359 |
+
<mxCell id="83" value="" style="group" vertex="1" connectable="0" parent="1">
|
| 360 |
+
<mxGeometry x="456" y="574" width="68" height="20" as="geometry" />
|
| 361 |
+
</mxCell>
|
| 362 |
+
<mxCell id="84" value="\( \sim N(0, 1) \)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;" vertex="1" parent="83">
|
| 363 |
+
<mxGeometry x="16" y="1.5" width="48" height="17" as="geometry" />
|
| 364 |
+
</mxCell>
|
| 365 |
+
<mxCell id="85" value="<font style="font-size: 8px;">\(Z\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=1;fontSize=8;dashed=1;" vertex="1" parent="83">
|
| 366 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 367 |
+
</mxCell>
|
| 368 |
+
<mxCell id="86" value="" style="group" vertex="1" connectable="0" parent="1">
|
| 369 |
+
<mxGeometry x="458" y="639" width="68" height="20" as="geometry" />
|
| 370 |
+
</mxCell>
|
| 371 |
+
<mxCell id="87" value="\( \sim N(0, 1) \)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;" vertex="1" parent="86">
|
| 372 |
+
<mxGeometry x="16" y="1.5" width="48" height="17" as="geometry" />
|
| 373 |
+
</mxCell>
|
| 374 |
+
<mxCell id="88" value="<font style="font-size: 8px;">\(Z\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;strokeWidth=1;fontSize=8;dashed=1;" vertex="1" parent="86">
|
| 375 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 376 |
+
</mxCell>
|
| 377 |
+
<mxCell id="89" value="" style="group" connectable="0" vertex="1" parent="1">
|
| 378 |
+
<mxGeometry x="540" y="640" width="60" height="50" as="geometry" />
|
| 379 |
+
</mxCell>
|
| 380 |
+
<mxCell id="90" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=0;glass=0;labelBorderColor=none;fontSize=8;strokeColor=#666666;strokeWidth=1;fillColor=#E6E6E6;fontColor=#333333;" vertex="1" parent="89">
|
| 381 |
+
<mxGeometry width="60" height="50" as="geometry" />
|
| 382 |
+
</mxCell>
|
| 383 |
+
<mxCell id="91" value="<font style="font-size: 8px;">Nuisance flow</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;dashed=1;labelBorderColor=none;fontSize=6;" vertex="1" parent="89">
|
| 384 |
+
<mxGeometry width="60" height="50" as="geometry" />
|
| 385 |
+
</mxCell>
|
| 386 |
+
<mxCell id="92" value="" style="group;spacing=0;" connectable="0" vertex="1" parent="1">
|
| 387 |
+
<mxGeometry x="617.25" y="638.5" width="94" height="20" as="geometry" />
|
| 388 |
+
</mxCell>
|
| 389 |
+
<mxCell id="93" value="<p style="line-height: 60%;">\( \sim \hat{\mathbb{P}}(Y | X,A=a) \)</p>" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;spacing=2;spacingTop=-2;" vertex="1" parent="92">
|
| 390 |
+
<mxGeometry x="16" y="1.5" width="78" height="17" as="geometry" />
|
| 391 |
+
</mxCell>
|
| 392 |
+
<mxCell id="94" value="<font style="font-size: 8px;">\(\hat{Y}\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;strokeWidth=1;fontSize=8;" vertex="1" parent="92">
|
| 393 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 394 |
+
</mxCell>
|
| 395 |
+
<mxCell id="95" value="\(\hat{\pi}_a (X)\)" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;" vertex="1" parent="1">
|
| 396 |
+
<mxGeometry x="617.25" y="660.23" width="30" height="20" as="geometry" />
|
| 397 |
+
</mxCell>
|
| 398 |
+
<mxCell id="96" value="<font style="font-size: 8px;">\(A\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#dae8fc;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 399 |
+
<mxGeometry x="738" y="640.5" width="20" height="20" as="geometry" />
|
| 400 |
+
</mxCell>
|
| 401 |
+
<mxCell id="97" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;fontSize=8;endArrow=classicThin;endFill=1;strokeWidth=1;endSize=4;startSize=4;" edge="1" source="98" target="30" parent="1">
|
| 402 |
+
<mxGeometry relative="1" as="geometry">
|
| 403 |
+
<mxPoint x="575" y="607" as="targetPoint" />
|
| 404 |
+
<mxPoint x="575" y="622" as="sourcePoint" />
|
| 405 |
+
</mxGeometry>
|
| 406 |
+
</mxCell>
|
| 407 |
+
<mxCell id="98" value="<font style="font-size: 8px;">\(a\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFFF;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 408 |
+
<mxGeometry x="560" y="615" width="20" height="20" as="geometry" />
|
| 409 |
+
</mxCell>
|
| 410 |
+
<mxCell id="99" value="" style="group;spacing=0;" connectable="0" vertex="1" parent="1">
|
| 411 |
+
<mxGeometry x="619" y="574" width="96" height="20" as="geometry" />
|
| 412 |
+
</mxCell>
|
| 413 |
+
<mxCell id="100" value="<p style="line-height: 60%;">\( \sim \hat{\mathbb{P}}^{\text{A-IPTW}}(Y [a]) \)</p>" style="rounded=1;whiteSpace=wrap;html=1;labelBorderColor=none;fontSize=8;strokeWidth=1;strokeColor=#666666;align=right;spacing=2;spacingTop=-2;" vertex="1" parent="99">
|
| 414 |
+
<mxGeometry x="16" y="1.5" width="80" height="17" as="geometry" />
|
| 415 |
+
</mxCell>
|
| 416 |
+
<mxCell id="101" value="<font style="font-size: 8px;">\(\hat{Y}^a\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#d5e8d4;strokeColor=#82b366;strokeWidth=1;fontSize=8;" vertex="1" parent="99">
|
| 417 |
+
<mxGeometry width="20" height="20" as="geometry" />
|
| 418 |
+
</mxCell>
|
| 419 |
+
<mxCell id="102" value="<font style="font-size: 14px;">Legend</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;" vertex="1" parent="1">
|
| 420 |
+
<mxGeometry x="780" y="535" width="180" height="24" as="geometry" />
|
| 421 |
+
</mxCell>
|
| 422 |
+
<mxCell id="103" value="<p style="line-height: 1.1;"><span style="font-size: 8px; background-color: rgb(255, 255, 255);">One-step bias correction \(a\)</span></p>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;shadow=0;glass=0;labelBorderColor=none;fontSize=6;fontColor=#000000;spacing=0;" vertex="1" parent="1">
|
| 423 |
+
<mxGeometry x="710" y="607" width="50" height="18" as="geometry" />
|
| 424 |
+
</mxCell>
|
| 425 |
+
<mxCell id="104" value="<font style="font-size: 8px;">\(a\)</font>" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#FFFFFF;strokeColor=#6c8ebf;strokeWidth=1;fontSize=8;" vertex="1" parent="1">
|
| 426 |
+
<mxGeometry x="737" y="570" width="20" height="20" as="geometry" />
|
| 427 |
+
</mxCell>
|
| 428 |
+
<mxCell id="105" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;dashed=1;dashPattern=1 1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" target="104" parent="1">
|
| 429 |
+
<mxGeometry relative="1" as="geometry">
|
| 430 |
+
<mxPoint x="747" y="606" as="sourcePoint" />
|
| 431 |
+
<mxPoint x="758" y="601" as="targetPoint" />
|
| 432 |
+
</mxGeometry>
|
| 433 |
+
</mxCell>
|
| 434 |
+
<mxCell id="106" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fontSize=12;startArrow=none;startFill=0;endArrow=none;endFill=0;strokeWidth=0.6;shape=link;strokeColor=#FF0000;width=-2.4637681159420293;" edge="1" parent="1">
|
| 435 |
+
<mxGeometry relative="1" as="geometry">
|
| 436 |
+
<mxPoint x="716" y="654.5" as="sourcePoint" />
|
| 437 |
+
<mxPoint x="716" y="641.5" as="targetPoint" />
|
| 438 |
+
</mxGeometry>
|
| 439 |
+
</mxCell>
|
| 440 |
+
<mxCell id="107" style="edgeStyle=none;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;endArrow=classicThin;endFill=1;strokeWidth=0.85;endSize=4;startSize=4;" edge="1" source="10" target="5" parent="1">
|
| 441 |
+
<mxGeometry relative="1" as="geometry">
|
| 442 |
+
<Array as="points">
|
| 443 |
+
<mxPoint x="320" y="686" />
|
| 444 |
+
</Array>
|
| 445 |
+
<mxPoint x="247" y="684" as="sourcePoint" />
|
| 446 |
+
<mxPoint x="290" y="669" as="targetPoint" />
|
| 447 |
+
</mxGeometry>
|
| 448 |
+
</mxCell>
|
| 449 |
+
</root>
|
| 450 |
+
</mxGraphModel>
|
| 451 |
+
</diagram>
|
| 452 |
+
</mxfile>
|
2209.06203/main_diagram/main_diagram.pdf
ADDED
|
Binary file (65 kB). View file
|
|
|
2209.06203/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Causal inference increasingly makes use of machine learning methods to estimate treatment effects from observational data [e.g., @van2011targeted; @kunzel2019metalearners; @curth2021nonparametric; @kennedy2022semiparametric]. This is relevant for various fields including medicine [e.g., @bica2021real], marketing [e.g., @yang2020targeting], and policy-making [e.g., @huenermund2021causal]. Here, causal inference from observational data promises great value, especially when experiments for determining treatment effects are costly or even unethical.
|
| 4 |
+
|
| 5 |
+
The vast majority of the machine learning methods for causal inference estimate *averaged* quantities expressed by the (conditional) mean of potential outcomes. Examples of such quantities are the average treatment effect (ATE) [e.g., @shi2019adapting; @hatt2021estimating], the conditional average treatment effect (CATE) [e.g., @shalit2017estimating; @hassanpour2019learning; @zhang2020learning], and treatment-response curves [e.g., @bica2020estimating; @nie2021vcnet]. Importantly, these estimates only describe averages *without* distributional properties.
|
| 6 |
+
|
| 7 |
+
However, making decisions based on averaged causal quantities can be misleading and, in some applications, even dangerous [@spiegelhalter2017risk; @van2019communicating]. On the one hand, if potential outcomes have different variances or number of modes, relying on the average quantities provides incomplete information about potential outcomes, and may inadvertently lead to local -- and not global -- optima during decision-making. On the other hand, distributional knowledge is needed to account for uncertainty in potential outcomes and thus informs how likely a certain outcome is. For example, in medicine, knowing the distribution of potential outcomes is highly important [@gische2021beyond]: it gives the probability that the potential outcome lies in a desired range, and thus defines the probability of treatment success or failure.[^1] Motivated by this, we aim to estimate the ***density*** of potential outcomes.
|
| 8 |
+
|
| 9 |
+
<figure id="fig:cond-inter-counter" data-latex-placement="tbp">
|
| 10 |
+
<div class="center">
|
| 11 |
+
<div class="minipage">
|
| 12 |
+
<img src="figures/cond-inter-counter" />
|
| 13 |
+
</div>
|
| 14 |
+
<div class="minipage">
|
| 15 |
+
<p><span class="math display">$$\begin{align*}
|
| 16 |
+
X := & U_X; \quad U_X \sim \text{Mixture}\big(0.5 N(0, 1) + 0.5 N(b, 1) \big) \\
|
| 17 |
+
\pi(x) & = \frac{N(X; 0, 1)}{N(X; 0, 1) + N(X; b, 1)} \\
|
| 18 |
+
A := & \begin{cases}
|
| 19 |
+
1, & -U_A < \log \big( \pi(x) / (1 - \pi(x))\big)\\
|
| 20 |
+
0, & \text{otherwise}
|
| 21 |
+
\end{cases}; U_A \sim \text{Logistic}(0, 1) \\
|
| 22 |
+
Y := & U_Y + \begin{cases}
|
| 23 |
+
X^2 -1.82 X + 2, & A = 1 \\
|
| 24 |
+
2.18 X + 1.5, & A = 0 \end{cases}; \quad U_Y \sim N(0, 1)
|
| 25 |
+
\end{align*}$$</span></p>
|
| 26 |
+
</div>
|
| 27 |
+
</div>
|
| 28 |
+
<figcaption>Motivating example showing the densities of observational, interventional, and counterfactual distributions of outcome <span class="math inline"><em>Y</em></span>. These are simulated via the structural causal model on the right (here: <span class="math inline"><em>N</em>(<em>x</em>; <em>μ</em>, <em>σ</em><sup>2</sup>)</span> are densities of the normal distribution; and <span class="math inline"><em>b</em> = 3</span> is a covariates shift, which regulates the probability of treatment assignment). Potential outcomes have different distributions but the same mean <span class="math inline">𝔼(<em>Y</em>[0]) = 𝔼(<em>Y</em>[1]) ≈ 4.77</span> and the same variance <span class="math inline">var (<em>Y</em>[0]) = var (<em>Y</em>[1]) ≈ 4.06</span>. Here, <span class="math inline"><em>Y</em>[<em>a</em>]</span> is the potential outcome given treatment <span class="math inline"><em>a</em></span>. <strong>(a)</strong> Interventional distributions. <strong>(b)</strong> and <strong>(c)</strong> Observational and counterfactual distributions for the same outcomes. As shown here, the observational, interventional, and counterfactual distributions can be substantially different.</figcaption>
|
| 29 |
+
</figure>
|
| 30 |
+
|
| 31 |
+
An example highlighting the need for estimating the ***density*** of potential outcomes is shown in Fig. [1](#fig:cond-inter-counter){reference-type="ref" reference="fig:cond-inter-counter"}. Here, we simulated outcomes according to a given structural causal model (SCM). The potential outcomes $Y[a]$ can be sampled by setting the binary treatment to a specific value in the equation for $Y$ (cf. Appendix [8](#app:background){reference-type="ref" reference="app:background"}). At the same time, by filtering for only the (un)treated population and applying the same equation with a counterfactual treatment, we obtain counterfactual outcomes $Y[a] \mid A = a'$. We observe that the potential outcomes have the same mean (i.e., $\mathbb{E}(Y[0]) = \mathbb{E}(Y[1])$) and the same variance (i.e., $\operatorname{var}(Y[0]) = \operatorname{var}(Y[1])$). Hence, the ground-truth ATE equals zero. Nevertheless, the distributions of potential outcomes (i. e., $\mathbb{P}(Y[a])$) are clearly different. Hence, in medical practice, acting upon the ATE without knowledge of the distributions of potential outcomes could have severe, negative effects. To show this, let us consider a "do nothing" treatment ($a=0$) and some medical treatment ($a=1$). Further, let us consider an outcome to be successful if some risk score $Y$ is below the threshold of five. Then, the probability of treatment success (i. e., $\mathbb{P}(Y[1] < 5.0) \approx 0.63$) is much larger than the probability of success after the "do nothing" treatment (i. e., $\mathbb{P}(Y[0] < 5.0) \approx 0.51$), highlighting the importance of treatment.
|
| 32 |
+
|
| 33 |
+
In this paper, we aim to estimate the ***density*** of potential outcomes after intervention $a$, i. e., $\mathbb{P}(Y[a] = y)$. From this point on, we refer to this task as ***interventional density estimation*** (IDE). Estimating the density of interventions has several crucial advantages: it allows to identify multi-modalities in the distribution of potential outcomes; it allows to estimate quantiles of the distribution; and it allows to compute the probability with which a potential outcome lies in a certain range. Importantly, traditional density estimation methods are **not** applicable for IDE due to the fundamental problem of causal inference: that is, the counterfactual outcomes are typically never observed, and, hence, the sample from ground-truth interventional distribution is also inaccessible. Efficient IDE is also significantly more challenging than an efficient estimation of the averaged causal quantities. The reason is that density is a functional, infinitely-dimensional target estimand, and, hence, standard efficiency theory is **not** applicable.
|
| 34 |
+
|
| 35 |
+
:::: table*
|
| 36 |
+
::: center
|
| 37 |
+
:::
|
| 38 |
+
::::
|
| 39 |
+
|
| 40 |
+
Existing literature offers either *semi- or non-parametric* methods for IDE.[^2] Examples are kernel density estimation [@kim2018causal] and kernel mean embeddings of distributions [@muandet2021counterfactual]. However, both methods have a crucial limitation: estimated densities could be unnormalized or even return negative values (which, by definition, is not possible). Furthermore, both methods neither scale well with the sample size nor with the dimensionality of covariates. As a remedy, @kennedy2021semiparametric introduced a theory for efficient *semi-parametric* IDE estimation, rendering fully-parametric modeling possible. However, the authors did not provide a proper, flexible instantiation of the theory: the solutions proposed in [@kennedy2021semiparametric] are either (i) non-universal (e. g., limited to the exponential family) or (ii) not proper density estimators (e. g., the truncated series estimator).
|
| 41 |
+
|
| 42 |
+
Here, we propose a *proper fully-parametric* method. Different from semi- and non-parametric methods, our fully-parametric method has several practical advantages: it automatically provides properly normalized density estimators, it allows one to sample from the estimated density, and it generally scales well with large and high-dimensional datasets. However, to the best of our knowledge, there is no fully-parametric, deep learning method for IDE. To achieve this, we later make a non-trivial extension of the theoretical results for semi-parametric IDE estimation from [@kennedy2021semiparametric] adopted to fully-parametric IDE estimation.
|
| 43 |
+
|
| 44 |
+
In this paper, we develop a novel, fully-parametric deep learning method: ***Interventional Normalizing Flows*** (INFs). Our INFs build upon normalizing flows (NFs) [@tabak2010density; @rezende2015variational], but which we carefully adapt for causal inference. This requires several non-trivial adaptations. Specifically, we combine two NFs: a (i) nuisance flow for estimating nuisance parameters, and a (ii) target flow for a parametric estimation of the density of potential outcomes. Here, we construct a novel, tractable optimization objective based on a one-step bias correction to allow for efficient and doubly robust estimation. In the end, we develop a two-step training procedure to train both the nuisance and the target flows.
|
| 45 |
+
|
| 46 |
+
Overall, our **main contributions** are following:[^3]
|
| 47 |
+
|
| 48 |
+
1. We introduce the first proper fully-parametric, deep learning method for interventional density estimation, called *Interventional Normalizing Flows*(INFs). Our INFs provide a properly normalized density estimator.
|
| 49 |
+
|
| 50 |
+
2. We extend the results of [@kennedy2021semiparametric] and derive a tractable optimization problem with a one-step bias correction for efficient and doubly robust estimation. This allows for an effective two-step training procedure with our INFs.
|
| 51 |
+
|
| 52 |
+
3. We demonstrate in various experiments that our INFs are highly expressive and effective. A major advantage owed to the parametric form of the target flow is that our INFs scale well to both large and high-dimensional datasets in comparison to other non- and semi-parametric methods.
|
| 53 |
+
|
| 54 |
+
# Method
|
| 55 |
+
|
| 56 |
+
**Notation.** Let $\mathbb{P}(Z)$ be a distribution of a random variable $Z$, and let $\mathbb{P}(Z=z)$ be its density or probability mass function. Let $\pi_a(x) = \mathbb{P}(A = a \mid X = x)$ denote the propensity score. Further, $\mathbbm{1}(\cdot)$ is the indicator function; $\mathbb{P}_n\{f(X)\} = \frac{1}{n}\sum_{i=1}^n f(X_i)$ is the sample average of a random $f(X)$; and $\mathbb{P}_b^\mathcal{B}\{f(X)\}$ is the average evaluated on a minibatch $\mathcal{B}$ of size $b$. For readability, we sometimes highlight random variables and the corresponding averaging operator in [green color]{style="color: ForestGreen"}. Furthermore, $\mathbb{P}(Y \mid X, A)$ is the conditional distribution of the outcome $Y$.
|
| 57 |
+
|
| 58 |
+
**Problem statement.** In this work, we aim at estimating the ***interventional density*** from observational data, namely $\hat{\mathbb{P}}(Y[a] = y)$. To compare the goodness-of-fit of different estimators, we evaluate the distributional distance between the ground-truth interventional density and the estimated density. Such distributional distances include, e.g., the average log-probability and the empirical Wasserstein distance.
|
| 59 |
+
|
| 60 |
+
We build upon the standard setting of potential outcomes framework [@rubin1974estimating], where $Y[a]$ stands for the potential outcome after intervening on treatment by setting it to $a$. That is, we consider an observational sample $\mathcal{D}$ with $d_X$-dimensional covariates $X \in \mathcal{X} \subseteq \mathbb{R}^{d_X}$, a treatment $A \in \{0, 1\}$, and a $d_Y$-dimensional continuous outcome $Y \in \mathcal{Y} \subseteq \mathbb{R}^{d_Y}$, drawn i.i.d. We consider $d_Y = 1$ if not stated explicitly. We assume the treatment to be binary, but note that our INFs also work with categorical treatments. We denote $\mathcal{D} = \{X_i, A_i, Y_i\}_{i=1}^n \sim \mathbb{P}(X, A, Y)$, where $n$ is the sample size, and $i$ is the index of an observation. For example, in critical care, the patient covariates $X$ are different risk factors (e.g., age, gender, weight, prior diseases), the treatment is whether a ventilator is applied, and the outcome is the probability of patient survival. The covariates $X$ are also called confounders if $\mathbb{P}(Y[a]) \neq \mathbb{P}(Y \mid A = a)$.
|
| 61 |
+
|
| 62 |
+
**Identifiability.** To identify the interventional density, we make the following identifiability assumptions with respect to the data-generating mechanism of $\mathcal{D}$: (1) *Positivity:* For some $\epsilon > 0$, $\mathbb{P}(1 - \epsilon \ge \pi_a(X) \ge \epsilon) = 1$. (2) *Consistency:* If $A = a$ for some patient, then $Y = Y[a]$. (3) *Exchangeability:* $A \perp\!\!\!\!\perp Y[a] \mid X$ for all $a$. Note that these assumptions are standard in the literature [@kim2018causal; @muandet2021counterfactual; @kennedy2021semiparametric]. Under assumptions (1)--(3), the density of interventional distribution $\mathbb{P}(Y[a])$ can be expressed in terms of observational distribution with back-door adjustment, i.e., $$\begin{align}
|
| 63 |
+
\label{eq:backdoor}
|
| 64 |
+
\begin{split}
|
| 65 |
+
\mathbb{P}(Y[a] = y) & = \underset{X \sim \mathbb{P}(X)}{\mathbb{E}} \big( \mathbb{P}(Y = y\mid X, A=a) \big),
|
| 66 |
+
\end{split}
|
| 67 |
+
\end{align}$$ where $\mathbb{P}(Y = y \mid X, A)$ is the conditional density of the outcome. For more details on the potential outcomes framework and identifiability, we refer to Appendix [8](#app:background){reference-type="ref" reference="app:background"}.
|
| 68 |
+
|
| 69 |
+
**Plug-in estimator.** A straightforward approach for IDE [@robins2001comment] is the following: first, one estimates the conditional outcome distribution, $\hat{\mathbb{P}}(Y \mid X, A)$ (here, any method for conditional density estimation could be used). Then, one takes a sample average over covariates $X$: $$\begin{equation}
|
| 70 |
+
\label{eq:plugin}
|
| 71 |
+
\hat{\mathbb{P}}^{\text{PI}}(Y[a] = y) = \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y = y \mid \textcolor{ForestGreen}{X}, A = a)\}.
|
| 72 |
+
\end{equation}$$ This estimator is an unbiased but inefficient estimator of interventional density, which is known as *semi-parametric plug-in estimator*. Semi-parametric IDE, unlike, e. g., semi-parametric ATE estimation, is highly problematic. For large sample sizes, the semi-parametric estimator requires averaging over the full sample for each evaluation point. Motivated by this, we aim to develop a proper fully-parametric estimator.
|
| 73 |
+
|
| 74 |
+
[]{#sec:fully-parametric-ide label="sec:fully-parametric-ide"} In this section, we introduce a theory for fully-parametric estimation of interventional density. First, we provide a theoretic background, as introduced in [@kennedy2021semiparametric]. Here, we describe a projection parameter as a solution to the moment condition and then we list two estimators, i. e., *covariate-adjusted (CA) estimator* and efficient *augmented inverse propensity of treatment weighted (A-IPTW) estimator*. Second, we elaborate on the A-IPTW estimator and translate it into an optimization objective, which constitutes one of our contributions.
|
| 75 |
+
|
| 76 |
+
We start by defining a parametric model, $\left\{g(y; \beta_a) \mid \beta_a \in \mathbb{R}^d\right\}$, where $\beta_a \in \mathbb{R}^d$ are parameters of estimator, and $g(\cdot; \beta_a)$ is a density, i. e., $\int_{y \in \mathcal{Y}} g(y; \beta_a) \mathop{}\!\mathrm{d}y = 1$. For IDE, we approximate the interventional distribution $\mathbb{P}(Y[a])$ with a distribution from our parametric model. We aim at minimizing the distributional distance (specifically KL-divergence) between $\mathbb{P}(Y[a])$ and $g(\cdot; \beta_a)$ via $$\begin{align}
|
| 77 |
+
\label{eq:kl-min}
|
| 78 |
+
\begin{split}
|
| 79 |
+
\hat{\beta}_a = & \mathop{\mathrm{arg\,min}}_{\beta_a}\operatorname{KL} \big(\mathbb{P}(Y[a]) \;\big\Vert\; g(\cdot; \beta_a) \big) \\
|
| 80 |
+
%\argmin_{\beta_a} \int_{\mathcal{Y}} \log{\bigg(\frac{\mathbb{P}(Y[a] = y)}{g(y; \beta_a) }\bigg)} \, \mathbb{P}(Y[a] = y) \diff y \\
|
| 81 |
+
= & \mathop{\mathrm{arg\,min}}_{\beta_a} \underset{Y^a \sim \mathbb{P}(Y[a])}{\mathbb{E}} \big( - \log g(Y^a; \beta_a) \big) ,
|
| 82 |
+
\end{split}
|
| 83 |
+
\end{align}$$ where $\hat{\beta}_a$ are called *projection parameters* as they project the true interventional density onto a class $\{g(\cdot; \beta_a); \beta_a \in \mathbb{R}^d\}$.
|
| 84 |
+
|
| 85 |
+
**Covariate-adjusted estimator.** Let the $d$-dimensional random variable $T(Y; \beta_a) = - \nabla_{\beta_a} \log g(Y; \beta_a)$ denote the score function. Following @kennedy2021semiparametric, the projection parameters can be equivalently expressed under mild conditions[^7] as a solution to the *moment condition* $m(\beta_a) \stackrel{!}{=} 0$, where $$\begin{align}
|
| 86 |
+
\label{eq:moment-cond}
|
| 87 |
+
\begin{split}
|
| 88 |
+
m(\beta_a) & = \underset{Y^a \sim \mathbb{P}(Y[a])}{\mathbb{E}} T(Y^a; \beta_a) \\
|
| 89 |
+
& = \underset{X \sim \mathbb{P}(X)}{\mathbb{E}} \Big( \mathbb{E} \big( T(Y; \beta_a) \mid X, A = a \big) \Big).
|
| 90 |
+
\end{split}
|
| 91 |
+
\end{align}$$ Here, the moment condition is the expected score function of the potential outcome. Throughout the paper, we assume that the moment condition has a unique solution, and, therefore, the minimization task in Eq. [\[eq:kl-min\]](#eq:kl-min){reference-type="eqref" reference="eq:kl-min"} and the root-finding task in Eq. [\[eq:moment-cond\]](#eq:moment-cond){reference-type="eqref" reference="eq:moment-cond"} are equivalent.
|
| 92 |
+
|
| 93 |
+
In practice, we have neither observations from the interventional distribution nor counterfactual outcomes. Therefore, we cannot use the ground-truth $\mathbb{P}(Y[a])$ but, instead, must use the plug-in estimator distribution from Eq. [\[eq:plugin\]](#eq:plugin){reference-type="eqref" reference="eq:plugin"}. Specifically, we can obtain a plug-in estimator of projection parameters, i. e., $\hat{\beta}_a^{\text{PI}}$, either by minimizing a cross-entropy loss or by solving the moment condition, both of which are equivalent: $$\begin{align}
|
| 94 |
+
\begin{split}
|
| 95 |
+
& \hat{\beta}_a^{\text{PI}} = \mathop{\mathrm{arg\,min}}_{\beta_a} \underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)\}}{\mathbb{E}} \hspace{-0.5cm} - \log g(\hat{Y}^a; \beta_a) \label{eq:projection-plugin} \\
|
| 96 |
+
\Longleftrightarrow \, & \hat{m}^{\text{PI}}(\beta_a) = \underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)\}}{\mathbb{E}} \hspace{-0.5cm} T(\hat{Y}^a; \beta_a) \stackrel{!}{=} 0.
|
| 97 |
+
\end{split}
|
| 98 |
+
%\label{eq:moment-eq-plugin}
|
| 99 |
+
\end{align}$$ Then, we can define a *parametric covariate-adjusted* (CA) estimator as $\hat{\mathbb{P}}^{\text{CA}}(Y[a] = y) = g(y; \hat{\beta}_a^{\text{PI}})$. By choosing a sufficiently expressive class of densities for both $g$ and the conditional density estimator $\hat{\mathbb{P}}(Y \mid X, A)$ (e. g., normalizing flows), CA can be shown to consistently estimate the interventional density (see Appendix B.5 in @kennedy2021semiparametric).
|
| 100 |
+
|
| 101 |
+
**Augmented inverse propensity of treatment weighted estimator.** In the following, we aim to develop an efficient estimator of the projection parameter $\hat{\beta}_a$ from Eq. [\[eq:kl-min\]](#eq:kl-min){reference-type="eqref" reference="eq:kl-min"} or, equivalently, the moment condition $\hat{m}(\beta_a)$ at fixed $\beta_a$ from Eq. [\[eq:moment-cond\]](#eq:moment-cond){reference-type="eqref" reference="eq:moment-cond"}. For this, we make use of semi-parametric efficiency theory [@van2003unified; @kennedy2021semiparametric]. We provide a background on efficiency theory in Appendix [8](#app:background){reference-type="ref" reference="app:background"}.
|
| 102 |
+
|
| 103 |
+
@kennedy2022semiparametric showed that the efficient influence function $\phi_a(T, \mathbb{P})$ for the functional $\mathbb{E} ( \mathbb{E} ( T \mid X, A = a))$ equals to $$\begin{align}
|
| 104 |
+
\label{eq:eif}
|
| 105 |
+
\begin{split}
|
| 106 |
+
& \phi_a(T; \textcolor{BrickRed}{\mathbb{P}}) = \frac{\mathbbm{1}(A = a)}{\textcolor{BrickRed}{\pi_a(}X \textcolor{BrickRed}{)}} \Big(T - \textcolor{BrickRed}{\mathbb{E}(} T \mid X, A=a \textcolor{BrickRed}{)} \Big) \\
|
| 107 |
+
& \, \, + \textcolor{BrickRed}{\mathbb{E}(} T \mid X, A=a \textcolor{BrickRed}{)} - \textcolor{BrickRed}{\underset{X \sim \mathbb{P}(X)}{\mathbb{E}} ( \mathbb{E} ( T \mid X, A = a))}.
|
| 108 |
+
\end{split}
|
| 109 |
+
\end{align}$$ Here, we use [red color]{style="color: BrickRed"} to show the nuisance parameters of $\textcolor{BrickRed}{\mathbb{P}}$ that are influencing the functional. We emphasize that the nuisance parameters (i. e., the propensity score and conditional expectations/probabilities) can be either known or estimated.
|
| 110 |
+
|
| 111 |
+
The efficient influence function in Eq. [\[eq:eif\]](#eq:eif){reference-type="eqref" reference="eq:eif"} allows us to construct an efficient estimator of the moment condition. Following [@kennedy2021semiparametric], we transform the plug-in estimator $\hat{m}^\text{PI}(\beta_a)$ from Eq. [\[eq:projection-plugin\]](#eq:projection-plugin){reference-type="eqref" reference="eq:projection-plugin"} into an efficient estimator with the help of a *one-step bias correction*. In our case, the bias-corrected moment condition has the following form: $$\begin{equation}
|
| 112 |
+
\label{eq:one-step-corrected}
|
| 113 |
+
\hspace{-0.2cm}
|
| 114 |
+
\hat{m}^\text{A-IPTW}(\beta_a) = \hat{m}^\text{PI}(\beta_a) + \mathbb{P}_n\big\{\phi_a (T(Y; \beta_a); \hat{\mathbb{P}})\big\} \stackrel{!}{=} 0,
|
| 115 |
+
\end{equation}$$ where $\hat{\mathbb{P}} = \{\hat{\pi}_a(x), \hat{\mathbb{P}}(Y \mid X, A)\}$ are the estimated nuisance parameters of $\mathbb{P}$. We call the solution of the bias-corrected moment equation $\hat{\beta}_a^{\text{A-IPTW}}$ an *augmented inverse propensity of treatment weighted* (A-IPTW) estimator of the projection parameters. Then, estimated interventional density is $\hat{\mathbb{P}}^{\text{A-IPTW}}(Y[a] = y) = g(y; \hat{\beta}_a^{\text{A-IPTW}})$.
|
| 116 |
+
|
| 117 |
+
Previously, @kennedy2021semiparametric proposed to directly solve the bias-corrected moment condition, i. e., a system of nonlinear equations, yet which is generally much harder to solve computationally. In contrast, we develop an optimization objective that can be directly incorporated into a loss of a deep learning density estimator. For that, we transform the bias-corrected moment condition into the following tractable optimization task (see Appendix [9](#app:aiptw-optim){reference-type="ref" reference="app:aiptw-optim"} for all details).
|
| 118 |
+
|
| 119 |
+
We first note that the plug-in estimator of moment condition $\hat{m}^\text{PI}(\beta_a)$ can be rewritten as $$\begin{align}
|
| 120 |
+
& \hat{m}^{\text{PI}}(\beta_a) = \underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)\}}{\mathbb{E}} T(\hat{Y}^a; \beta_a) \\
|
| 121 |
+
&= \int_\mathcal{Y} T(y; \beta_a) \, \textcolor{ForestGreen}{\mathbb{P}_n}\{ \hat{\mathbb{P}}(Y = y \mid \textcolor{ForestGreen}{X}, A = a) \} \mathop{}\!\mathrm{d}y \\
|
| 122 |
+
% &= \textcolor{ForestGreen}{\mathbb{P}_n}\bigg\{ \int_\mathcal{Y} T(y; \beta_a) \, \hat{\mathbb{P}}(Y = y \mid \textcolor{ForestGreen}{X}, A = a) \diff y \bigg\} \\
|
| 123 |
+
& = \textcolor{ForestGreen}{\mathbb{P}_n}\Big\{ \hat{\mathbb{E}}\big(T(Y; \beta_a) \mid \textcolor{ForestGreen}{X}, A = a \big)\Big\},
|
| 124 |
+
\end{align}$$ where the last equality follows from the definition of the conditional expectation. Then, we notice that the last term of the influence function, $\textcolor{BrickRed}{\underset{X \sim \mathbb{P}(X)}{\mathbb{E}} ( \mathbb{E} ( T \mid X, A = a))}$, is, in fact, non-random and could be brought out from the sample average in Eq. [\[eq:one-step-corrected\]](#eq:one-step-corrected){reference-type="ref" reference="eq:one-step-corrected"}. Furthermore, after switching from $\mathbb{P}$ to $\hat{\mathbb{P}}$, this term exactly coincides with $\hat{m}^{\text{PI}}(\beta_a)$, so that the one-step bias-corrected equation is simplified to $$\begin{align}
|
| 125 |
+
& \hat{m}^\text{A-IPTW}(\beta_a)
|
| 126 |
+
= \underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\textcolor{BrickRed}{\hat{\mathbb{P}}(}Y \mid \textcolor{ForestGreen}{X}, A = a \textcolor{BrickRed}{)}\}}{\mathbb{E}} T(\hat{Y}^a; \beta_a) \\
|
| 127 |
+
& + \textcolor{ForestGreen}{\mathbb{P}_n}\bigg\{ \frac{\mathbbm{1}(\textcolor{ForestGreen}{A} = a)}{\textcolor{BrickRed}{\hat{\pi}_a(}\textcolor{ForestGreen}{X} \textcolor{BrickRed}{)}} \Big(T(\textcolor{ForestGreen}{Y}; \beta_a) - \underset{Y \sim \textcolor{BrickRed}{\hat{\mathbb{P}}(}Y \mid \textcolor{ForestGreen}{X}, A = a \textcolor{BrickRed}{)}}{\mathbb{E}} T(Y; \beta_a) \Big) \bigg\}.
|
| 128 |
+
\end{align}$$ After taking the antiderivative with respect to $\beta_a$, we yield the following optimization objective $$\begin{align}
|
| 129 |
+
\label{eq:aiptw}
|
| 130 |
+
\hspace{-1.2cm}
|
| 131 |
+
& \scriptsize
|
| 132 |
+
\hat{\beta}_a^{\text{A-IPTW}} = \mathop{\mathrm{arg\,min}}_{\beta_a} \Bigg[ \underbrace{\underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)\}}{\mathbb{E}} \bigg( - \log g(\hat{Y}^a; \beta_a) \bigg) }_{\text{cross-entropy loss}} \hspace{-0.3cm} \\
|
| 133 |
+
& \scriptsize
|
| 134 |
+
- \underbrace{\textcolor{ForestGreen}{\mathbb{P}_n} \bigg\{ \frac{\mathbbm{1}(\textcolor{ForestGreen}{A} = a)}{\hat{\pi}_a(\textcolor{ForestGreen}{X})} \Big(\log g(\textcolor{ForestGreen}{Y}; \beta_a) - \underset{Y \sim \hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)}{\mathbb{E}} \big( \log g(Y; \beta_a) \big) \Big) \bigg\}}_{\text{one-step bias correction}} \Bigg] .
|
| 135 |
+
\nonumber
|
| 136 |
+
\end{align}$$
|
| 137 |
+
|
| 138 |
+
Unlike the plug-in estimator ($\hat{\beta}_a^{\text{PI}}$), the A-IPTW estimator achieves efficiency and possesses a double robustness property. Here, formally speaking, we still mean efficiency with respect to the moment condition, i. e.$m(\beta_a)$. This way of defining efficiency is particularly useful when the solution to the moment condition in Eq. [\[eq:moment-cond\]](#eq:moment-cond){reference-type="eqref" reference="eq:moment-cond"} is non-unique, e. g., due to the usage of parametric deep learning models. In this case, we can informally define the so-called efficient estimation of the projection parameters with respect to the equivalence class. All the parameters $\hat{\beta}_a^{\text{A-IPTW}}$, which fall into this class, will satisfy the efficiently estimated moment condition, Eq. [\[eq:one-step-corrected\]](#eq:one-step-corrected){reference-type="eqref" reference="eq:one-step-corrected"}.
|
| 139 |
+
|
| 140 |
+
<figure id="fig:tar-norm-flow" data-latex-placement="tbp">
|
| 141 |
+
<div class="center">
|
| 142 |
+
<img src="figures/INF-scheme" style="width:94.0%" />
|
| 143 |
+
</div>
|
| 144 |
+
<figcaption>Overview of <em>Interventional Normalizing Flows</em>. Our INFs combine two normalizing flows, which we call “<span>nuisance flow</span>” and “<span>target flow</span>”. The <span>nuisance flow</span> estimates the nuisance parameters, i.e., the propensity score <span class="math inline"><em>π̂</em><sub><em>a</em></sub>(<em>X</em>)</span> and the conditional outcome distribution <span class="math inline">$\hat{\mathbb{P}}(Y \mid X, A)$</span>. The <span>target flow</span> utilizes them to estimate the projection parameters <span class="math inline"><em>β̂</em><sub><em>a</em></sub><sup>A-IPTW</sup></span>. </figcaption>
|
| 145 |
+
</figure>
|
| 146 |
+
|
| 147 |
+
In the following, we describe our *Interventional Normalizing Flows*: a proper fully-parametric method for interventional density estimation via deep learning. First, we describe all the components of our architecture and, then, introduce an efficient estimation using one-step bias correction.
|
| 148 |
+
|
| 149 |
+
In our INFs, we combine two normalizing flows, which we refer to as (i) *nuisance flow* and (ii) *target flow* (see Fig. [2](#fig:tar-norm-flow){reference-type="ref" reference="fig:tar-norm-flow"}). The rationale for this is based on our derivations in Section [\[sec:fully-parametric-ide\]](#sec:fully-parametric-ide){reference-type="ref" reference="sec:fully-parametric-ide"}, according to which a fully-parametric IDE requires two models: (i) one for the estimation of nuisance parameters, and (ii) one for the subsequent optimization of the learning objective with respect to projection parameters. Accordingly, both NFs in our INFs have thus different objectives: (i) the nuisance flow estimates the nuisance parameters (i.e., the propensity score and the conditional outcome distribution); and (ii) the target flow uses the estimated nuisance parameters to estimate the projection parameters.
|
| 150 |
+
|
| 151 |
+
**(i) Nuisance flow.** The nuisance flow has three components: two fully-connected (FC) subnetworks and a conditional normalizing flow parameterized by $\theta$. The first FC subnetwork (FC$_1$) takes the covariates $X$ as input and, then, outputs a representation $R \in \mathbb{R}^{d_R}$ together with a propensity score $\hat{\pi}_a(X)$. The second FC subnetwork (FC$_2$) takes the representation $R$ and the observed treatment $A_i$ as input and, then, outputs the parameters of flow, conditioned on $X$ and $A$, i. e., $\theta(X, A)$. Together, FC$_1$ and FC$_2$ form a so-called hypernetwork [@ha2017hypernetworks] for the conditional normalizing flow, which allows us to learn the conditional outcome distribution via back-propagation.
|
| 152 |
+
|
| 153 |
+
Let $\mathcal{L}_{\text{N}}$ be the loss of the nuisance flow. Here, we combine a conditional negative log-likelihood ($\mathcal{L}_{\text{NLL}}$) and binary cross-entropy loss for the propensity score ($\mathcal{L}_{\pi}$), i.e., $\mathcal{L}_{\text{N}}(\hat{\mathbb{P}}, \hat{\pi}_a) = \mathbb{P}_n \{\mathcal{L}_{\text{NLL}} + \alpha \mathcal{L}_{\pi}\} \text{ with } \mathcal{L}_{\text{NLL}} = - \log \hat{\mathbb{P}}(Y = Y \mid X, A); \; \mathcal{L}_{\pi} = \operatorname{BCE}(\hat{\pi}_A(X), A)$, where $\alpha > 0$ is a hyperparameter. In general, conditional normalizing flows are prone to overfitting when trained via a conditional negative log-likelihood. To address this, we later employ noise regularization [@rothfuss2019noise] in the conditional density estimation.
|
| 154 |
+
|
| 155 |
+
**(ii) Target flow.** The target flow uses the outputs of the nuisance flow and then learns the interventional distribution. We first describe the naïve variant of the target flow without one-step bias correction (we introduce this later in Section [5.2](#sec:bias-correction){reference-type="ref" reference="sec:bias-correction"}). Different from the conditional normalizing flow in the nuisance flow, the target flow is a non-conditional normalizing flow, parameterized by $\beta_a$. Specifically, we consider two separate normalizing flows, that is, one for each potential outcome (i.e., $a = 0$ and $a = 1$, respectively).[^8]
|
| 156 |
+
|
| 157 |
+
To fit the target flow, we must solve the moment condition from Eq. [\[eq:projection-plugin\]](#eq:projection-plugin){reference-type="eqref" reference="eq:projection-plugin"} or, equivalently, minimize a cross-entropy loss: $$\begin{align}
|
| 158 |
+
\label{eq:target-ce}
|
| 159 |
+
& \mathcal{L}_{\text{CE}}(\beta_a) = \underset{\hat{Y}^a \sim \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y \mid \textcolor{ForestGreen}{X}, A = a)\}}{\mathbb{E}} - \log g(\hat{Y}^a; \beta_a) \\
|
| 160 |
+
& \scriptsize = - \int_{y \in \mathcal{Y}} \log g(y; \beta_a) \textcolor{ForestGreen}{\mathbb{P}_n}\{\hat{\mathbb{P}}(Y = y \mid \textcolor{ForestGreen}{X}, A = a)\} \mathop{}\!\mathrm{d}y, \nonumber
|
| 161 |
+
\end{align}$$ where the later integration is performed numerically with quadrature ($d_Y = 1$) or Monte Carlo ($d_Y > 1$) methods.
|
| 162 |
+
|
| 163 |
+
To provide an efficient estimation for the parameters of the target flow, we augment the cross-entropy loss (Eq. [\[eq:target-ce\]](#eq:target-ce){reference-type="eqref" reference="eq:target-ce"}) with a one-step bias correction. To evaluate the bias correction term, we need to compute a conditional cross-entropy loss: $$\begin{align*}
|
| 164 |
+
& \mathcal{L}_{\text{CCE}}(X; \beta_a) = \underset{Y \sim \hat{\mathbb{P}}(Y \mid X, A = a)}{\mathbb{E}} \hspace{-0.4cm} - \log g(Y; \beta_a), \\
|
| 165 |
+
& = - \int_{y \in \mathcal{Y}} \log g(y; \beta_a) \hat{\mathbb{P}}(Y = y \mid X, A = a) \mathop{}\!\mathrm{d}y.
|
| 166 |
+
\end{align*}$$ Finally, we obtain the loss of the target flow ($\mathcal{L}_{\text{T}}$), which is now suitable for our A-IPTW estimation from Eq. [\[eq:aiptw\]](#eq:aiptw){reference-type="eqref" reference="eq:aiptw"}. We thus yield $$\begin{equation}
|
| 167 |
+
\label{eq:tar-aiptw}
|
| 168 |
+
\scriptsize \mathcal{L}_{\text{T}}(\beta_a) = \mathcal{L}_{\text{CE}}(\beta_a) + \textcolor{ForestGreen}{\mathbb{P}_n} \bigg\{ \frac{\mathbbm{1}(\textcolor{ForestGreen}{A} = a)}{\hat{\pi}_a(\textcolor{ForestGreen}{X})} \Big(- \log g(\textcolor{ForestGreen}{Y}; \beta_a) - \mathcal{L}_{\text{CCE}}(\textcolor{ForestGreen}{X}; \beta_a) \Big) \bigg\}.
|
| 169 |
+
\end{equation}$$
|
| 170 |
+
|
| 171 |
+
<figure id="fig:poly-normal-results" data-latex-placement="tbp">
|
| 172 |
+
<embed src="figures/polynomial-normal-results.pdf" style="width:95.0%" />
|
| 173 |
+
<figcaption>Results for synthetic data based on the SCM from Figure <a href="#fig:cond-inter-counter" data-reference-type="ref" data-reference="fig:cond-inter-counter">1</a>. Reported: mean over ten-fold train-test splits. Some runs for MDNs resulted in the <span class="math inline">log-prob<sub>out</sub> = −∞</span> and, thus, are not shown. </figcaption>
|
| 174 |
+
</figure>
|
| 175 |
+
|
| 176 |
+
**Training.** To train both components in our INFs, we make use of a two-step training procedure. Specifically, we first fit the nuisance parameters using the nuisance flow. Then, we freeze the parameters of the nuisance flow and fit the target flow. We additionally employ the exponential moving average (EMA) of the target parameters with a smoothing hyperparameter $\gamma$ to stabilize the training for small minibatch sizes [@polyak1992acceleration]. We show the full algorithm in Appendix [10](#app:algorithm){reference-type="ref" reference="app:algorithm"} and further implementation details in Appendix [11](#app:implementation){reference-type="ref" reference="app:implementation"}.
|
| 177 |
+
|
| 178 |
+
**Inference time.** One main advantage of our nuisance-target model is that the target flow has constant inference time (e.g., during the evaluation phase). Hence, contrary to state-of-the-art baselines, the inference of our INFs do [not]{.underline} depend on the dimensionality of covariates (or representation) and the size of the training data. This is a major advantage over semi-parametric plug-in estimators. For a detailed runtime comparison, we refer to Appendix [18](#app:runtime){reference-type="ref" reference="app:runtime"}. To this end, our method offers great scalability, such as required in medicine.
|
2209.15172/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2209.15172/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Text to image generation has seen major recent advances with the release of DALLE [\(Ramesh et al.,](#page-10-0) [2021\)](#page-10-0) and diffusion models such as DALLE2 [\(Ramesh et al., 2022\)](#page-10-1), CogView2 [\(Ding et al., 2022\)](#page-9-0) and Latent Diffusion [\(Rombach et al., 2022\)](#page-10-2). A natural next step is the task of generating 3D objects from text input. However, supervised methods relying on paired image-text data are less suited to text to 3D generation as large-scale paired text and 3D datasets are less available. Thus, the regime of little to no 3D data training supervision is beneficial. While this might seem daunting, recent work in text to 3D generation showed promising results without using large-scale datasets by bridging the gap using guidance from pretrained vision-language models such as CLIP [\(Radford et al., 2021\)](#page-10-3).
|
| 4 |
+
|
| 5 |
+
At the same time, advances in differentiable neural rendering and the development of NeRF [\(Milden](#page-10-4)[hall et al., 2020\)](#page-10-4) now allow for direct optimization of a 3D representation to match input images. Combining these approaches with CLIP guidance, we can generate 3D representations from text directly without paired text–3D data, by optimizing the similarity of the text to the rendered images. Work that leverages CLIP for text to 3D generation can be grouped by the amount of 3D data required. The first set of methods train a generative model on a 3D dataset, and then optimize a mapping network from text to the latent space of the generative model using CLIP guidance and differentiable rendering. The second set of methods utilizes no 3D or text supervision and has access to only the pretrained CLIP model. We refer to this latter regime as *pure CLIP guidance*. Given the scarcity of text–3D pair datasets, we focus on this regime.
|
| 6 |
+
|
| 7 |
+
A prominent example of the pure CLIP guidance regime is Dream Fields [\(Jain et al., 2022\)](#page-10-5) which uses Mip-NeRF [\(Barron et al., 2021\)](#page-9-1) and CLIP to guide the 3D optimization process for every new input text prompt. Unfortunately, this approach requires significant computational resources and exhibits poor quality generation with low-density artifacts when using direct voxel grid optimization (see appendix of original paper). We also find that the quality of the results in Dream Fields is largely attributable to the LiT [\(Zhai et al., 2022\)](#page-11-0) guidance model. When using the vanilla CLIP models as in our work, results are far worse. Optimizing the CLIP similarity is also prone to adversarial examples where generated images with high similarity according to CLIP have little perceived resemblance to the text description for a human [\(Liu et al., 2021\)](#page-10-6). Recent text to 3D methods use image-based augmentations as regularization to prevent these issues. However, there has been no systematic study of which of these regularizations matters and how much. In addition, there are several possible design choices for the NeRF and CLIP modules, including the use of explicit voxel grids without any neural networks vs implicit neural representations. We systematically compare these and other factors that impact generation quality, and show that it is possible to generate highly detailed 3D representations with voxel grids alone.
|
| 8 |
+
|
| 9 |
+
Our main contributions are: 1) We conduct a systematic study of augmentations and their effect on text to 3D generation results with pure CLIP guidance; 2) We compare different CLIP backbones for guidance as well as model ensembles for finer 3D object detail; 3) We compare the regularization effects on geometry of explicit vs implicit voxel grids; and 4) We demonstrate generation of highresolution grids using CLIP guidance only.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
**Model architecture.** For our NeRF model, we implement two variations of the voxel grid representation: $Vox_{Exp}$ , an explicit voxel grid representation, and $Vox_{Imp}$ , an implicit version that uses MLPs to predict the density and color. $Vox_{Exp}$ explicitly models the two voxel grids consisting of the density $V^{(density)} \in \mathbb{R}^{1 \times N_x \times N_y \times N_z}$ and color $V^{(rgb)} \in \mathbb{R}^{3 \times N_x \times N_y \times N_z}$ . $Vox_{Imp}$ is an implicit coordinate-based MLP voxel grid representation with positional encodings (Mildenhall et al., 2020) of the grid vertex coordinates formulated as $V^{(PE)} \in \mathbb{R}^{L \times N_x \times N_y \times N_z}$ , where L is the channel size after positional encoding of grid vertex coordinates. Separate density and color MLPs are applied on the positional encodings to obtain the density and color predictions. We base our voxel grid model implementations on DVGO. However, note that our positional encoding feature grid $V^{(PE)}$ is fixed and not learnable like the DVGO feature grid $V^{(feat)}$ for color. Our overall model illustration can be found in Fig. 2. The trainable parameters for the explicit model are the parameters of the explicit grids and the bias term in the softplus activation of the density value. For the implicit voxel grid the trainable parameters are the density and color MLPs and the bias term in the softplus function. During training we also add progressive scaling of the voxel grid resolution as in DVGO.
|
| 14 |
+
|
| 15 |
+
Augmentations. We study the impact of combining three augmentation schemes: Background augmentation (BackAug) from Dream Fields Jain et al. (2022), Diff augment (DiffAug) Zhao et al. (2020), and perspective augmentations (PerspAug) from Text2Mesh Michel et al. (2022). BackAug consists of alpha compositing checkerboard, textures or gaussian noise backgrounds to the image. DiffAug contains several image augmentations, including color jittering, image translation, and
|
| 16 |
+
|
| 17 |
+
cutout. Liu et al. (2021) used it for text to image generation and showed it prevents adversarial generations. PerspAug denotes random perspective transformations applied to the image.
|
| 18 |
+
|
| 19 |
+
**Losses.** We combine the CLIP and transmittance losses introduced in Dream Fields, with losses from DVGO to reduce noise and promote smoothness. In addition, we introduce a spherical prior loss term to encourage a coherent object. For a model parameterized by $\theta$ , the CLIP loss (Eq. 2) enforces the cosine similarity between the NeRF generated image $I(\theta, p)$ for a camera pose p and the input caption $x_T$ to be high in CLIP space, The transmittance loss (Eq. 3) prevents the scene from being overcrowded by applying a loss when the average transmittance is over the threshold $\tau$ and $\text{Tr}(\theta, p)$ is the transmittance image.
|
| 20 |
+
|
| 21 |
+
<span id="page-4-0"></span>
|
| 22 |
+
$$\mathcal{L}_{\text{CLIP}}(\theta, \boldsymbol{p}, x_T) = -\text{Enc}_I(I(\theta, \boldsymbol{p}))^{\top} \text{Enc}_T(x_T)$$
|
| 23 |
+
(2)
|
| 24 |
+
|
| 25 |
+
<span id="page-4-1"></span>
|
| 26 |
+
$$\mathcal{L}_{Tr} = -\min(\tau, \text{mean}(Tr(\theta, \boldsymbol{p})))$$
|
| 27 |
+
(3)
|
| 28 |
+
|
| 29 |
+
To encourage centered objects and uniform size, we introduce a spherical prior (Eq. 4) where the probability is 1 for coordinates q within a sphere of radius 1. We calculate the KL divergence between the spherical prior and the density voxel grid with the sampled point coordinates q from grid vertices (Eq. 5). This loss serves the same purpose as ray shifting in Dream Fields. However, it is not trivial to shift and scale the voxel grid directly. Therefore, we promote centering and uniform size through this loss instead.
|
| 30 |
+
|
| 31 |
+
<span id="page-4-2"></span>
|
| 32 |
+
$$P_{\text{sphere}}(\boldsymbol{q}) = \begin{cases} 1, & \text{if } \|\boldsymbol{q}\|_2^2 \le 1\\ 0, & \text{otherwise} \end{cases}$$
|
| 33 |
+
(4)
|
| 34 |
+
|
| 35 |
+
<span id="page-4-3"></span>
|
| 36 |
+
$$\mathcal{L}_{\mathrm{KL}_{s}} = \sum_{q} D_{\mathrm{KL}}(P_{\mathrm{sphere}} \| \alpha^{(\mathrm{post})}(q, V^{(\mathrm{density})}))$$
|
| 37 |
+
(5)
|
| 38 |
+
|
| 39 |
+
In our model, we enable the ensembling of different CLIP models by adding a second similarity loss using a different CLIP model as shown in Eq. 6, where $\text{Enc}_{I_2}$ and $\text{Enc}_{T_2}$ are the image and text encoders for the second CLIP model respectively.
|
| 40 |
+
|
| 41 |
+
<span id="page-4-4"></span>
|
| 42 |
+
$$\mathcal{L}_{\text{CLIP}_2}(\theta, \boldsymbol{p}, x_T) = -\text{Enc}_{I_2}(I(\theta, \boldsymbol{p}))^{\top} \text{Enc}_{T_2}(x_T)$$
|
| 43 |
+
(6)
|
| 44 |
+
|
| 45 |
+
Following DVGO, we also add a total variation loss $\mathcal{L}_{TV}$ to reduce noise and promote smoothness and a background entropy loss $\mathcal{L}_{\sigma}$ to encourage density values be either 0 or 1. Our complete loss is shown in Eq. 7, where the $\lambda$ s are the weights for the different loss terms.
|
| 46 |
+
|
| 47 |
+
<span id="page-4-5"></span>
|
| 48 |
+
$$\mathcal{L}_{\text{Total}} = \mathcal{L}_{\text{CLIP}} + \lambda_{\text{Tr}} \mathcal{L}_{\text{Tr}} + \lambda_{\text{TV}} \mathcal{L}_{\text{TV}} + \lambda_{\text{KL}_s} \mathcal{L}_{\text{KL}_s} + \lambda_{\sigma} \mathcal{L}_{\sigma} + \lambda_{\text{CLIP}_2} \mathcal{L}_{\text{CLIP}_2}$$
|
| 49 |
+
(7)
|
| 50 |
+
|
| 51 |
+
We find that during optimization, it is important to schedule the $\mathcal{L}_{TV}$ and $\mathcal{L}_{KL_s}$ loss terms so that they are turned off toward the end of the optimization. Please see Appendix A.1 for more discussion and details of other hyperparameter values.
|
2210.12152/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-24T19:40:48.448Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="5MdGDifC5l7p5_0xCE4N" version="20.3.6" type="google"><diagram id="KmEHNfg37X-dL-7dM44d" name="Page-1">7PxHk6xKtDUI/po37DK0GAKBDLQKYIbWWvPrC897n/isrNuq5+/YscwIcFxttdb2Tf4HyvWXuMRTpY1Z3v0HAmXXf6Cf/0AQGEGx9xe4cv9zhYT+vVAudfZvo/++4NRP/u9F6N+re53l6//RcBvHbqun//NiOg5Dnm7/x7V4Wcbz/2xWjN3/OeoUl/+OCP33BSeNu/z/0exXZ1v1z1UK/x+tpbwuq/8cGYb+vdPH/9n43wtrFWfj+T8uofx/oNwyjts/n/qLyzuwef+5L/88J/x/uftfE1vyYft/80DBaFf30XZfyVSzRuovPh7/H5T4p5sj7vZ/V/zvbLf7P7dgPeu+i4f3G7tu8bL9KySUfC8U47AJcV93QLxqvI3vtWXchywHo8Lvt3hJ/30ARkAP/3am1qDDDwT6qLuOG7txeb8P4984oFtn3JcUtKm27RU2gqPM++NdHvgBGqz/VzmOZZfHU73+X+nY/91I17+mQvHPpN6P/zmt/+d+/buFR75s+fU/Lv27f2I+9vm2vJ1A/95FYPqfR/5V5n9V+fxvxUDQfzut/odS0Oi/+vivLpb/1fF/i+v98K/E/v+RHvX/QnpVPIGPdf+n6SxYbf3qtxoneWeOa73V4/DeT8Zte/cQZTtwg43TtvwT5H9KJsuLeO+2/9ED09UleHIbJyDndfrH/or6AsJn/wZk/vMq9J9XQFfxFv8HyvzzFRGmofwPhKt91rBP6CuWI/P+0x2v4r3y/RSBr6LFMeH7m2OOJ9He33XEd7zl21iwwxmLnumV/YojI739PxA2yHdK7EX+VA7PHOXIa1uT6OiLdNp304WtvByn5eVSDgWm1LmEsGJNy8msq5Glw439laHwyoxNUK9pKGxAM1xZlqeLbjcjc1J7b4tFf7j5wa18IFOMrDFfjeFMhvswXMMwzcmUJzOezP/e+997/3vvf+/9773/vfe/9/5/36uukiVtDi1oLibf6ItudHxFK5LYU4sk+BFAtGCJJcU7b/w19UhV4heYIaymgJD/BnY2lHz4xSkvKmILM1CFG0tVNCAwKN80LBvluIbietvJnZy+ca/s1WmwsjSzgw3TxdBoRUNvqgPNjUt4Zz5Lkfj2RBPItiS36u/IoxNYmpvUMhN73KHBViQIPczJEyX8Oycmnb/3RsYMokkPDR6l7kh7UkRvEqRRI70YsJHZqkfouWxq3yfoYqEWfmyxVQydZEnJagDYAocKVbBKPjCEE5ElWji5DArfDn9pPDSGOEa3RmTGvRa5wbHZ89w3pwULPMPEUhvNiMViU/5mqGz5+pu55b7Azgi5tlYqpk2KDQ0XYsGSRNc0zdA3uWGaKNnhUVa8owMAhR45vD3vBwN/fwjlUQrMGGkeSRaV5Nr7oRCK+EKqgpZNgtmV9XuvC3e+TwfmhjcspauJXp9hNmUZjcLLnucv0mNJSKdpisKb9/Pb9sCdRte/5OnYLn+nhiShax+owbLUdX3s5oFCEE6xp3X+tDu0sHF5W7zPvfIV5LQojtrxYZqmLcvC3AU6o4LV4Snw6eztH2rbYUkBrhZogLL//h9ggUKAHioRZ5BLfIouedtS+L+3GRwbTGldgQbl7BzZIVkk2e/qGe+GH/VMKIgKFqvDmWjbNUXbF9n0YRgnSZfa972ubfgE6JFlYkbUEb/z/ctLEhsdc2ZEAkrT4sXRK3aANeyAfTwPusOvTb0bpZP8uc7sj5+0XWAUaviaWLULXowz1FRVHx4aA6LYDZskPnAT/CJif1b16+G5MU8ThUcbQmblay46mH2reyYnnk6Sc+cvpVXz1VtxS199RIqGc/KU0pYoAusGiiq3w7CUzu/rMtn3Ko25v0hlo4RLwFn/bcPFZZXHL51g9dxNu0JFz7NcB1QiSTKnpMPv7yTbEeLHWYJri8hAtSP/jtV7Er0hz7tFMAw2+vzAWeleuP8xdebRaPaVB5u+OvJcn/phfKStQvkr/4yyPNfkoF/xsY8l38WPrd7RhS5+f7SvcxCACtm2MgGxYWcFJP6RFc+Aw885xyfo12ok0FpX3c8sI2gbmO/FUYIrCvsN7tSj2fY7ArCpz3a8LUmCwI7XFMw/h9LleVsghthQNDvQl1JNMni8FKrjT3tZLQUaU72fBhb699LPC4f0IwIj/lYfFB0zuvJsvtQT/YdEJt39bjuC1lZizXI3P+sWPPvfSsgm1ZqBJI/jaDfTLAIveqTiW1iV4koK/C5PACPVQPUV6hlzl/OWmqYJbwVcRRWaC3Nt+mOSSPE6AqAB4xLDL/FFhJjxMxLuPwiqfxMbGIBZwPTHWpU8JLta+9ypSrwPLP78/syyogiAzG9bVPWU14IP539xZQtguDhYmllf72FMLljjIpR+Sd54egRnwk5DjhVRgQ9MRe0L6PHY/T+LOxIDKz549NrJ0lHXeEYvWWe9sKK3964uhTKnOVrGVM+OkhR2AYMtQ/ezplyCPsKXNUkKf/5xGgJkIxfKqN6AGy1R7T7LhO3KaPj0Vd8WYrjVh22CURf3wYh7Pr/JTSrvJCcBsbZtV4EeRcdRC0CPMJluEbTnB6JwlgDs5Uw/TN6OjgJZZ2VxXqekSaF/4pFFlR2p9+Mw5dzhpw6obgL1YLIUvuqYiUNtJo0H5jTXXYcy4pZOGHIQ2g6hlSsP3u8LZLytWYx5lH805r9d0woDg4kZOS+h6xPWs7KwVq+IusXZA9xTsW/EnqZ1MFFBagqttiybXcLt8LAc67/uSzRRSKHHjzcCW5unwgVphUSKN0HSis1BQbgsfn8yQW0oxFSFRG+NwrDzYkfcnFwapwi3HRjOMuOO7rRgI8k1pGmSjakN+UJFZJoSeqxrTH+2zEk+NyUH2qDlG3lsMue+QY01twR4abLw5bML5ZJ0f3i3NzQt7QjFT83roRFyGQaznLY7Kr/KzxAgXyKlTgZqpwlsWC9CWuTRwiM7BlT/UAXC//0sV9/m8VuKFYoMNxz+jmS/taIDVkEuTRNUyOYzjn83qnPkWXIqqtVay3bk7wpAWJsn/gsyAHdKLzjRygb/RlXBBoFODL/W58Z7w4/H2I+A22MPY/uxpY0K1k73mk+jYzoa409X0epLf09jQAmCIMmdKt/GXwsRVU39zsw9s8sxuWEnNPYBG8WaRVImwR73uF7wHUlop+yVylXhS2Y76k7dJptgmUb6jxxhM0qOOHX8pyPcIXie06FVZvi9t36QozZKVhAf4Nue8ktoEX92dybMTQa/SiXgwdVuRPYnB0XpFs4uK27qIhrHp86dA9qJuRpaQAjFB4ErHrJAcTOHmu1TaIKTstsbD/kYudzviAiJZQaK7oiCZ9YFuqwjlRpjFCnKjX6duTCl1x6pMeMuIIK7tHOjrqqbaC/HC8In/LTa/SnmxTE8ZFQ4wrnjWis3oilmag+PFpd+u8LZFlkxbzw32Tcw1RLQggQ6kgU7jq8PZnox21FMLtAqQrgX1j/WFxsUTdcpGV12qjntdqfLw4A+N4j4ahay8wdqfuEFU90xybiX0B4zM45lTimZaC2xOcbAUGcbUF6NH4Qk1yxeDMPwUC+60fvMMRaW72dXtIoZbHzU1mQGzCmfRL7lvqrgXbdzXcNe91qq2o3xy4A/IdEgtZZKMf2QuUZEhp3TkXaI3j733ABrpF//tqKCq+Uzy9KGBvJMPzJu6A7Jk84R2wVB4EiTlxRep1n+lt1/ugh2yw6m4gvYa3/fjR4UmS9qa1CPGTgVC8JKAX7RVWhCwRfD5VVQrigQv1sO3JqA7HICrNNKCwLLzmu9mtdtsO+6eOZTGZ+B/H1LR1uG907HbuvEKfnr9vkPEbQEUgixLUlzyeMUv1lndLZwpolnAjW5Vq82VTXQNItG9Xg6XUISAZysux5FkefXMNF7m2exMfX3RaLcyHqxzxpOtFZajR7Hb0uIHWALw/vzTAAYI/ZK7xRakfx2j4jlqvOwlz1J+kTIAFAe9tRx197NtduF4cng0bi+hIhnT4r8eMV6DL50dhoVmJg8+l8MW7L6AY76+Wc3U1JfXxrBIoWpIjo9OjYQ3rDmxYI/6D/b3XuqZFU4pjFjwjfoJvge3fMr1q4nnt9ot3ep3wgulZBNVYEorl0DwUCYXilP8Lighw+TQkdxiKpLdPSEkm4vqVGW1yC3KEcyjRSuXMyaxDQgUGdq/yIfijKhng+54IQvfPl6ild2NT1+LZvzmtiidlH6XffaeVw6nQ83gInHtspa5tXvD4GJ8AthJL7/hb8EiT8idXO0EbR9uR4THPnuTV3tZwJsIiNst4lmBLJXztKIaTV12GWRk4ZxcaxCS8Q831ZN/zi6s7mL1nYSbZzwQCjH5AABbBddU7afO4wbY04nDzef73CPbttrmQvwyrG1A9qyfNo/xKFB331N5QkrCgd59WSBh14pO0+f+fuN+12RySJVoH86AOBIlZ6KFlKqSj6nXPz4b5pWKA6pGTlJQxk+MXqdyaWy/pbvis5CA6o4NUreLU6tv7KsVEqgYqigpaNhcKU+tyVG4J4tWEni5A8YJbQ7i+5IDcaTtASaQfyxrPe/zwCBFiJlc/ydGc0vI0ib4WCZ7tZRCATB1tzGTntjruEpGz+uPR88jlJu84rULI74KWcPWc+Lz1vYNh/yqj2gftJzoiYwQ/71WXJUFO7HlFTdDa3yH8V0Oo0l8Y6+T2pqik/i+j8opZq6ICCrxSGhVvFW9VkSFZqFXnV/1ER2qa9wy1dzFVVz0yIlcviUQUBU8wkGG9w84T6mZv+zsmswAVR4rDXjwMGEAI0XYAGLAzR2MmOrcDTKzztyl3z5gcr0laqMU+fNBwM3uRHgS5NRFJqkGUbJO2bBKsUvD4yxhxNWI3FvHn4nvFKjNwPEfQ4AxA+IV6AzfwEk2xKXDFLsJLIBVgggKWIBhyG5GTkPPBM7YWCLgV6+tOF6OvxU89cLF/cxq502iMCNps54yHCAQcq9PxhYh4qWRWa+EKw8ozgr3iYoNPlxq7ZyLaIie3uvxxrbV99guVKn3xOtaMwxYAKHgmvE7ESRw95txEsC0+xowExobUqDBH3H4MUmdfPhwVQJk99RiSp6V+PfWbMNTklA1w2L30Q3pVx3G0QAwAXPLpRnTuMaR4sDbZAd0gJqBiuniUprGm+g1W7LRkNuyFsXv5Jgke7Vc2oJoqmtuPZ1SX4sNKXPniDpEYW+9GhNZ2QAyJ0XCCiOQeoG+bMiDrtd1kkxGnNCgZV83VMpK1THwop+OWip4MJvI6oD0zN0bHMiSp3LYlyRTKpcyTlGqwAY5wLETDaEKuyj5iq3nDTkt+lkCU8+taLAapw84ROwEV/DHDpzzE2wfP4sS0NMU9UjcPlFRoEGRJ1lijd7nG0qpqUjBEclns4BJM9d3iquqyAs8yJRNcQYxYCi/3he4XJiUZNq9WfFNV/pmCcILc1ynBl939ARcp11IgDiRmoZM7FyPX4NDfO2d8/Z2fWAoaajvWzLshr3ps8jgCU5aTY02dFuKqdD6ESfe3uEtky56n8mKhsqIz4vqGJVSdAQi8JQgM0d2jmAJ6C/AJv98aKXnVb5/gUMs9rwDyuU8KJME2A0uO3e8B3szWOVVklbL83OSdk4L8ykIwv6mmt0E35Pjr8X1//8XwKpJpaj0tEaUUeLL8J5ST1MfMhIaSwHgXqfXQ2CSNkReBIzg9nIi40v1q1kmLVZH/nXMiANe6Ks5bi/xjxYk9ErR2NOui9gDaoE+INcfFvE6K/mgfmfYJpRkhzDODQwSSxSyC6IxuufQS57sSzpqY3HPPhFniJ9m2NPaZLsm5YVX+v4RC7PGV1AzHEp+iFfKk992ncffYnBjKCBQ7YrUwAYHqlZMiq+tyYh3kOoT/9oICclz+/T35YBBD0YGYnbt1q10lkWa+cz8YFB8hENnrKdMVtqWKsAMBAN7e91M4O3Dyb8fbHiBsxrok7tSmCpKefXAy1PWe4j6HauQ/bFNOMb2GbqqdjBI0aPSHHjjKf+iA7gclIctJzQ38/B6y8QsCuWH3y7rPj7YQK0Iwsri/n5bmV6+S+KBg4rcZ2GIYhOBxcnKDC5OTu8yPuoivkS9fd7YRp/9Ocy5KxOIoeJxnRhWvsKVhnTqjaDrAom1W6qulkc554laFzuHDhSAKEUuGuwRrIJ9NWiBZxUE4WTGexFjQ3TehpXJgdi1CwXnRrrgTnFSD8VTfUBQ2JRptjhKWq98mXKQy0BX6bgvCLU4HZcfJMJKEGtIMcZS/lMsbjVspGnKYW70SB2VbheU8P2xhPrJYsyjsazd0oylo0cBZfrFpFM4hfLeKFstpcRtTQOBkA/HMiMyETNdaf2EMM/caau+Ws/14wdqAOagfSt/Swd1h9N/R6S0dzHgI5A8ikgJ4gI5yAYlklRNUw26wW4ZWOrDIMTospypi/VwGpCoo1kIADwoPlffpWWNMJZBg2Md1SdyTcRkNEjZvt01MHB0b9kfl2NeX5Yxb0mZT1wYbiJWfI0pHHZ/HZ0jQqDeyCG9FPIBBw12ZexJBXlKQm/4X+gCFWK8eeajwCsF8cBIjdEClX+skbwcm/VX1BcOkyWsLQfhiCJa9KSuICGyf0Hch3Z65ze32X+a2gLySBNOfAX3YkpDxWzEzQjGl643FMtA0XEsIYN/aBwl8UUauSQeQDH9NW/7SP/ZS1+GNU/mcB9RKsDNQlgwz3roSCb/KghisExDq6WQxMWP1kXFdKEgy8D29cLBzKbvZTdHzGMa5OHf/DZnKWqeXl83aREPMd4SvjMBWlCuf7gRbr4ouqPcXP5xJDVLpV58d4bM0G4IxENbNDc+Ms5XB1XEbbeuYUVjzaDcDVGuHqzOF/x71yIYLijSRh4oYBMY/AhqGpl7SxACP2m0gvuJpcB5FdHOCU7yURVl2Mtyglwlmv+oZiKHdhfiice2xiYHlz4mS+uxrTFJW1UfXxZIC5Sq1NOrj06y9zrjWwwvzjhIzDOSTzQl/MB/La1eWsW9xOiduOwe9Y6VnE6oUjjBK3R60duqT0gSwZ0iD2/C1WXoeWHLySygHmTrDY8lm4Qr02GYbZ8rlY12+LoUJ3JH6TQR5C6vJ6nSIJQi1qqhJm36y7xf4WFK/4lkPxjNm4BvJG5k+BfDVJqLzsEgebkUBIEm49WaOZK+GKnJ64UCSumBLpW64OiS1qhKFmSXkjkO9+Jr9HwJaLyou4/U0q0489jAV/c1He+1E+S2Z38y6pK8jMWq0BEAaFNSL58IVpXSZkgZdfDrwM0qlGahNScrTd+RgjIhgJrC0gbhfZ01JQWiTfz+P3ALVp7WX9rTv2AvmwP5FPkrbZcH/b0dYTT/Oi0vLJdD3NdLMqfLG/wq6x7+4mNsiZSua4n2fnFuvZriQ8xYVhGUsnNidX+6SkhnGAduOoEOTztLwd6uC92UxSAAk2EbTHzY8uB6Fozch0GACI03wYgh9D/kA8/zU54zvBIEFFiGNS9W0/TsPag3PqkkLi6aJqibNedUNdUOzgf6jiJZ37pqudZoSiHxJHDEQR7BLleDOKpU0HJfByRbZR7rM/I29G0Fie2yYmMsPiY02DXVnh6l7NmamR97VSA7B08GrO2G8VRhMo96JTH02a5zRZOXv6cYLoV01rBr264ZqbcjfNLLK+TmIoLlogTo05IzqudzqRMDRmhVCNj9zWP5RcbwPid80DdSFpXVAn/HJ1ayeM0ZF1tz5si6c/YERLhrmcSieKYpoD4nyZ+/NZwG5PH1cOF2EcQBkclkAkAbZo8pTnyOJMRlkLqs9HD8XptzRA7uRAJkkwSVHAVcWbVCWw1H+3ScgMSu7Zu0fxlp8jTcs2tF70S8RL6I8CyDBBmHAoEERgw+AwPwI47gwaC6/AuFZfpGn3ARBxgGVgdpQVDK5XwB/GPnH4y2Ip7vrzDNn9e8t3gX0NsbtrTdzC65UQLHApGu9TWXdgTlmt4FrGt0vglGkRa2ei2egNa8vM3rnHVtg/4JUsr0Wrl9sN6Uw+CWXJ9rG2BZuwTFZsEfEBUypwsTKZD18pAZKpjy2fd+fo8o6LekqWgT+hKDrshPMCMj39SeawCdXrhOog0s4v4saiouxI7ID96u1EaxpJI5auPz2OCBikVwJr5C7dQITEiQQR6g/2c8xsVEuw1TyN/zG19lKuwztb6goMH3eXevX0CAxkV+nLdZuhsV49Z8tVqdaW9brMfh4k59jrt9h6sYzfbYesPyPS/u0sMOTmUvPrOL2jF6wc3L4muh6wCRkxp7V59Zv13cRuW2tW00lals0zMqxCWjHS5v1ESG/02WyfnXNmPU9mcaVWVnHVYtkJOdo10Q9f0o9msT3Vi55tTptX3vdgOJ/v264lyYNpi4TTTJ8C+A8LE/YOeGE39g7dYQMbHChYi7DMmSzBXR7h1fJNfRc0OxM+RaP+f2Cdgpy+wRuaHaf+XBeVfKPNDkQ9WC/Sh/Njt08v5F3jDOyE6ZFxKLzynNGi30mE2eaKOjuuZSpir9LmW7sEJUt0aoVveWHxQAsQPrNwr+2G6nNVRqLZgc5Bw+8cisb4xaIX4vjhITYNXITq6zt1I1z0RK+5pBdhEIFFoltlvlVw2uT6UiCmRB+uu6t7l9EbfkAlumULFaVyYqAUxjvb8rNMrLR+GlkK+FUkNzest51SovPLWKJvFRujYFROcipVy5uvSi9wQQpLTpTw8YDo5RgAdmQfkd0CbKR5T0lS9CAPjYkJl25z6CleRoUkmd2/HENA2C8zjGaV13ofPcOE/SHUN6B1glwGo+/g4hrbjdIkSM7S1z/aS1LfdbNY2OMZh1r/TVjCmRqf6LKP2OADw/DFTZ7K5MLU23L0CP5R503NAOiSSATZ6rZjGJV8Y2k/0U8yBH9rR+VEYHK1GzhcyJeAy7kGymRd/KQo+9xMrp9B2hhb01IeXBqTpV/ryFs6GOp33pesNOyjS84V0fZz43J5uBnkLbMJ01k2xhzFKhQD5Pz3BBXZ0/e/ngzKT1zEKKhMIJFl5zqL/lXilL4p9NTAY9xmfDcw03ISrqZD/9Uk8NNMPu5/eDFOaedHUlxs1YLmupiLBrnpLPP7S6bms6FgY+ZN43z2vp2VuPnTo9cPtaIsxndEy5/SF1cxmmXtfPy+E/P3AFn2w2/xzJrLoHD1LpS5byTE2eTNqkd0526gSaCXMQ9bUlDfNd7pLRhIrjgCbymuvuR8IYpus88Uiy9mAHqldviDOCjbhmKDZgF4oNimZzLRJ2Lo2LusRsxxhhWTEN+zcBy8epWhrTt1zB66p63PMScUptX5YZGacBZuahc9UzKp46UKOSo/2P1+xvd6WefiMuZhecvs3wxP+L06tW5lR/pLe4dk2fIn+0C/gTfvP96GbrKI1KhEr11UqCKQPKz9NBdJXJY3pWZbN6WDb9JXR6vhzE0bgyQqB4mx6YO0MP5tFTxtsfGBLuvYv3Wem0gqp75HliD0+QO97JobLRUFd8DvryO/7ji5qqfEHVPO8/hx5yHSSMWFFkJByvNaRukWmNputwSEs674cgPdp5rZtHRwdu873bDUzstVwfaewGMOH1jItEBp1AZbjZ4KL0zY4oKOD/p1fPH7ilj/Nrw724AOijbDEOenx93hxk1dGUR3LlmVTBId93EUfhn90ECS9ga1nZhfwD6OjrXxpB4dpsP2hOXy+QWqbHU0cAN0Jfr1ZDlvBHjPlG/oR/SUwWjguz2jz7rb4CJm1fBVexxQMvaNMlzJrI6YqAGCZmUOQ6efLvYu4LO6573Z5WaRFE6+fHtGr8VKBoXqB81p55x7oua++E7ygR+x+GhIkNFnn2NOHFerm/NIUfSWE+YM8ZK4j1nlKif90L7bxxy3hpEOEPWnV4doJXhhHuMr+QWtImbLb21jpxbeFdbz+OcigMSqr026yNBaM5i8pE0CM4LNZULxrEfRyiGojtT5HuxteVFMr5/iSgMP2gqPNkNKoQ4yvZGvhV1cSJlr+V9m/o9gCsS4pxC7cN4NNWpU+wp5nSY384Cl+F4vqoPBiyGhWeLRGsy3gSfg20VBloz9Eazu2ryQWgXa7O8zNqCt23J9fEJnc7xhAy+2wI2vwgsPyivcbKGECEN/Nx4dnqYck5hgbfKf2GqALorXDra8xAmLF6hmk67D2ANdQcd1ZJvXpL+L7XD09AQyjPf3e6JHiQqQ9JFzgynEWMeR0J5JMdOxQMOMP/dhr+0YVMvEb68TrsA19GfaEXrJUtYgvaX6D33NyMlSdLPpkkvOXE/WMU8Nf5uW2q+J0/ufeOynu5xGwzylHRpfxFUf62YSjuH3fY/03S7XsRRiazTxZpbpDWj0yrVw+wuiuLASQ6PIudQvPL6wLQOr4505Yikhjgqgd5lC1q75angw/uaWwKP494wJxacOYi/CNGBqSPNrFXAIyZ29PJiJobYactn9/shw7BEAxvVDA5t7+wh8vRGZTfmksypsqOFp6MUyAM7GKk+P4WIcY7Y+SmBsbdVRSKcNvEO0dMVaDNjOsMGlszE3tAMj2t74Ey1Ije9K0KRfosjjRaKa7xd5xfc/y6QPLoYiAWAFCsfvYp/e5U486BTelBBkX8vbb7YK32owC4xFTbvNBL+wcCY55IqEy8gv1Ka1q4qGmtnylJxzjHAdON005+TyN8I0BGr6QAuVYB4cANprTqx/D1exfFNaEbL867KuA0tarSJFOBBu+EbOp/JC9/xK/a1Xvws9gMPyDqvuj16+TVYxh9c8p3FwEsAGorWQJ5E4qn5DoX2fjoy9TOJ/46ZctXmz5ebw8fv0qskr6LBTlyfsDCPUq2q5hVDbJyE0vhESw1eFg57OHKSF8Jtuiq9IB4D6bPueEpR8U+aExMW+TxvGMTDQvb2ZrxSQOO8eNaT6HVZXKHdBNuBganZiGYwJyoKVi3WlIJX7jN6kr4J0/DHBEzodWl3G2Hn7OC1NVVpTli5v6kUtXyhthm3wwGrRsghIducVb3v0Ce3ajcijPiHgadRutFK5Vzu2Fv7jdOjL7ly74FuBgtaoCzTi0fK+SNy62TaY4o+vpHqXrucN/98RHcTGmFG2ZEa7k0BtvXh4sAtwn8cALOKMtaAMFVqE1l2PvP0V4GSRGhMCpD3ubDGX9qqj9xtF8KhRJOCC+0Zd6UzdB90u8lr7dDH9JtQ/9GiPPrsK1+IVJ3c3bj/EDvEeKRuzLtuz4zQOgL1XIdTEpVRXK7rCzC8pH8H+TwZdNlKs9teg/qRmji8piw/Bbv442XQWKJlhabmbTDutVgpO7uU9Fc/R1psr+l4vRJi8BZKesF3iy3PSqsPORpm+Z0ohmIFklgxj/uqh+vchboA2sGrllAGRwZOBfqx4tlynf4h4/gnxbTftLW76Dbc9xMoIBVHBi7JWp1eFr6sM6+6CUrrgxsWmN6EJKYiSaZS7pGyW6qz9w04OwHwSO0+g87vJcMVXgzeGEACNWnazYZetbtswuRTkfqUKKI8k5NuR4yoEuxP0JMafsvuLajAk8j9qqLar/nbwHT4XS2rBeMdjB4jRXE3dHmqJH2hz+8ucU1nbCwCu8kTbgEWnyR9Uw/Q00kLTZXZH7jVq1aP28KGel+JzvtgAuLES9EC4zxtE0bLE5FyTc1VJle/+dZX5ZvIoXcx8hFT3X6ZjFyOI14/W5F6pil7JTiIH47vSRwHnksD0sFN5PBcV1B/3CWqMXYOk3mTVtKvMcO9n8CYEWaz79qLBCWPEzN1POuI4e8tLEa6xscyVeaL5ugSSja2iRU8isk9BbOJwOiRz5fAWKGxBB87NuD1ZI4oP2Vlt/UesRpB0ozV6OCyzYaPUCOs+EDajTQJKtW7R4vsNtriKihtTfFbf4p8Zb7GwVs8vvWmetlpWV+4V5av9LZhLqGp+KvrEzYZDCi86lAOZhbs7A68317lUNyLVWTq9VxeXlkoGx51FAZOyIli1RWEUsgo1WIo7GbVifK31sy+jRU+28kI3L+rD9zMcK5zOuMqAUqA82huGiob/i+yCNpOvtZv+t+WJihZPL+Jc9zA/HEXl1rGaGi9jiXhvkc4VoRRo4iBAKtYmtXZ18+f4a50e16tj3iXwqFbnZQh7v4dgan2jmAHvxsHJuLLzfjA8yYhHWyMDqRC4cAhJBfgDpN3n1nOTPw1tYlGmNgqrZYsqf7hLj7f8ijoy3fTVeHSN/Fu+xo7JGlW5CKHWf1DUvpmnq2viMmWbRUQHp9RdGTgIQLTzYfq4j6g124a2XYMD9xAcfK6W0edRfzZ9VgWPwsBb7FoSFLMrN6jyDu4PVuNNcfcJ8LqwT1wlzVAcZzqElxu+irBrhdFwbftRrtKfrOPgipqE0ZQiJMrFGLEbV82974jfdmlwD2LW8zdr3Jyh4/1E4FRKDG2KwCo9lZQlvOvWfo5y3TJKqW947JH1CxrhyJ/btnxZl1M1PBajIFPBweylHUHBzj/1efXF35Km2+6uIXvsdHsJKviR0dFTqYN6Ib1qVCai5s6SU1YjiB4llrArG4fqsD6LRt4gRE0Fa0RAnBRjOeTjV+4iOi3MML40szwGp/532wsydO834b/2eYaClNKzfr5thOe5/0lrgmGJEiGr8GNHkiaxDE/rMwXzkfIhj976XZ2bF1YIXAENG3KFPbE7mJulJK+XER84ej51+xSFDL48aeyXVP/3Lij9uLTqpVs/5oFx36Avsj7P+Kt8jXHAJmFS6Gycle19b9+Itvhx5nQ2lzoOzI1hgrP62EKN/M+aIdi3Q3ChuM+gjee1f8QM7fPPQj/GWts/2nA9iJixdRW2R9Xcs9eTZoUs9ZMVfZINjRNt2jq+kF0wqTyRaTq2Whgg4Bq3zsZOaefTHz6RMgrXmx+2drP6JfDXat2L68e6KVdAOfEf1dUrzbn+g0PwZ/mo40+SvKDmCYkwMD08dZCPFYWrdAqed2pyTqcmrqCA9Jkf7nCtNK+Eiv+hpFNKUgmAa4xiYH5v5M0c020/qF5Q+/D6yz4CXRNnzDGUZMg8flzjMoyFirQy+rqPqdH3VEKxPwjar39/KEyy3TPZ027VRlTK0t8aWEZC+moGwHnWzwkH9l1WP7Vvxl3WzmruKuU3Ps0n9QxP0ecj9+6jux95olwy1WWZqR/rH0RdBCSiiFbJhAjW4bTWPdo+a8w7djOHmpLFb0X6ogHzWGORz8XKdpvPhdhwFzLPgQPkw5Gn+01kKNJEbfRQxt8vq9x+iLYz3mIogvXBxUnVtMrP9r8yqFfJuEOiphxU/25f0hjWFtmiurKQX/k5q5LNWAqtK6BLs98sZ+SKa2ahqf3UrQlixli6s3Wy6q6NsqT2/eJ62iLlyJPl7FXxhyjARtlAft59fVwIfm4EINOAhy1+bo40fOj2fB39u8r9TLNB8Z1Hl+9mH8nTgKB+lffTb+IUbj4JcxqCTl/mMxinIFKQ40da7wxPRl/vBRJAogbi04RQhzbYjRt7Qu7U0aX1x35l/HKmtxeiKBh7NLuE27ddxSf2jpD+U7Sk2UU2jzgSlnsjW9D9v/FlpkXpk9gXs7CjMjcfAlCTKfIOVNse/hlCZ81/dHoV6feBenRxofJu/1EOnofKPPsUgZ5z5QfAgLDOpWlj35rx1BDw8ZvO6fVw6UFBWCJPz2HI+DSoLFfuR9eyn0fyBto31rTYTCPX15wM2geG4n7EncjCzZZCeckLad+We/aybIEaCAte/yratQzVM1PhytYjKncHendxtf4uqyp2PB0pYfF6giKGR1RZ8c7CU/0ZPJz4axM7614FI9lSV7BtfHYfz8wj7HhyhwEDXpGgugv077+WtaS3m+BheFPOSWLmSJ4F+OH/8mozUFd/9+tKi3t5PB1100sbCmBCuGFZRxQC7/zzxtHuNWzivT8hrh3OS54XXsib4qq2w06uSMWczLZDtNOpOFh9N2JbpPq7enc0vd+VC9UDVv7QS4qRmj3XYg9MJHuG3BLERzm1AvRpRR4g6onInzz5k+lUnT2kRMoRX2G/3gSsZ6O/U8KShD+hK+GbAXczTJSme0bTAjgV1LXOvs75Tbt5NHOWFHhYMH/Cc5cLd+BDn3NM/GSEe2B5uA60WM/QJ0Th0K5EOtZ40P21eRPMBRSvlsVH0PxkdAf4v/d/S5Pw8jid6UVbFD4WZkERRTdN/z6Vs29jXeGkuMi0d+riP8u+n7Xth1YoEyWjjgRpjigxdJqcPn7lcBpEkigZlBA95725/tSl3sP5UmKTEskT2mDKQpW//mYQtk4RICAOLUTFb2F7wghRFpQtkfleXXA4o3Zg2W4Q+t9VdeMFe9b8aQB1N0+C0AI0vEO99MmgrxXWiz2RQlRp7/2StNN4UGxKqfLydtOBhi41oLxGH1AQrBk0OGSS1qftD1hEXegkxBaAA8FVnILzVT7nXU4+b28WDqmMZVnHRXAeNAJ+dVNhUe6WBin0mBCICc/q9YJ6YJEI9tayNpAqUHZj9k60cHeE60n2/YpGdz31F627BJDN8PAwVnUVwo1Fe73z9aO5BKi4PQ096+k1mRH3w0ewPvmc2E7Fn95ejqZZk4Fx2+8u4lZZhnIrUXegFm0zIER+6rUR7uwurcveVChgGDjhfmt+BDAIVPlD7Ogzt1wzFGWtvLPLxnstiqKceo6lu3yOL8yBRTHDm0QSYbD0p4A93gWXz8XcuuYcwqYNLYyvOr8p0UV/w/CSFB7D0585BKIdGQJDpMiKkw8mUrbKewiFbaZ6PEnt+vmnmOzOd4cbcfcCrcUpgAt32UZ8NbhfS7O6pAP7p5WyUcfC6fuU2iNnXj9NiMrakUroeV0IlAp4nPEPG5H29re+IjmktqX3JQsmdMHU/87j5w+4J+0hqkTn6SpXWg0Z7Jmk5lxRrsgJP8BdW1CJfNaGUGpVUv5CwEl1/jVACw7JWHR+sRxaRfdHjxVqzzbO+P6oxPjtEuqlEtx8RaVZjYjL7D1M4mzpYldW8BeL3m23QYB4z6yZon+UtPAxRmf4bEqlnk5B5sllKbUxfNAoiVZvShmNWK0JGm90IT8l9vnIRWH5IS35SN+ZaE9THHr5fidSmytQ89NZOr0718uFZFpJPY9KNj81Mkn2hX/cGNUMtTlrKN/6Gh/BGSF5nft0SQrThqpeUFOClmy7knTT4hgFSkcVwUaxiKsuoHtrM3zd6htXYc8kbjggruPTZR+RZe2kzGUQp/UvFX8+DJDEJgRTDC7evvSg8UCJ1zk/eII8HllsVyxCOpBV3KMWixtQo+u680sE8DRjz51YxZiTX6vUjqi71OARm/hF+eUNSGI7j1z+oRfgdBQxtdZLHjyRiWqRmXO1jKpWgKAXS3BCwV8UWqMpUfoZvZg5ihdgv3sqYPQHFL4qXoOj1wuj+neAvt9rWjYsQ9x8/eNJk7fd95CfuOq+s7zrtqxAr/YHx4uaY23Y+FWuPIkdcuX2dI3mztBrN+GbYoRIpotN2bPU7xAPxKHa/+ZAZml6QlVyuXiH0R8iWFYeGTfGf7wg1oc9r6+5yW1wJSRynFGpgvGaaAbC6ICiyzEayfGzDmxTtIkzbuNHmfbg2gHQOkHdsWor5p2hZaCTx7k1JfmpA64RDsYccToAnbIfjEfwsTUFmCVZefL97YxmZuUQK8ei5mJk6kMAh/t/bX30xiMppyF83HCEyhbS0tnseHImDYkUVxSseI9lW7mVWDXIOWh3rcyeRC/HRzzw1V0v8ESuUYTf7WgXxjCoyPKlN+KP87FLmckaMfracmZRgciBnYnMlPwdRWzd0ksZWM3xh8EqrQYOXB5C8rduapf1h5qEHvC/H0JAZNm3h2OzyqJayU1nwFb4J2fn+qlmjk8o7zlq3Nxx38S5uBmcXEI8TIpw3y/pjwJEPwQN0yP5VcpFbcaKIyFDsqSe0PX6BN1vXdc8pDqiVZxp8wkKjK7dCkcZaULqFvaQ6RtH0VkU3nyL633s86Uw7CcsactmeDZFa+t/qSHAqzxb9Cbx1xLFqmsUzaxFTpeU6egA+qjjowD/VnbmOCS8/mibu+MvEEvKVxEx0qDBvBri2SmWKGJFBx0J0FV2nRvTl0gDypSvxfXUpq7hE9L4irmwi59cP0ArFLKIfLyxL4XIWhsYZEW5i4XB6vSuImck8ps/KMeESUUXE8M4/e27flefwRtqtS00HYLvP8fG5sx10SWFZiUBvI5VQFIICY2TLoevXw9c3FTgCqN9zV9O6bWQZkq9aypZWh3agSdthcq+yikx10COFO/8ggNKUBvpXN0Ysr1dg2QwEdH8BcnnNJLCxNTGl25MtSQbv7gohcJXp0c7ENxa1pOhTjj5BCuoaZdTEi1xmXCE72XoGEbiwV7wtl1p6Y2IEolP6jE1ZkFYZqe+y7qviSpIbsweDkce83RALpKf/bBypu66Efq/2qRWgJAvWxoVsL8SDJIX6z9TBa12gihxsc4fCRb4cMnZeC7lCTNko7U85g7GPbvVnfZ5NfuJXa+vB1TBLNEbUMRoGVlHroL2idp5Q2o2AaH8gg1rULiUNz3la+41jn7x1hVtE4MEbuZBcM/kn2oC9tWbepcMyiVkdYs+RsstROp9WJMjD0H9BqX9QT+Xyvuj8C+KEJbfJ9BLwvFBYJ5+WL+erx9L5B5HTDfFSkHE/wFtMN5BMQoAy6/hahmHoNs8MujsSPlNXzCQpm0SL4hK6AIogJH/n7GtAVw8yVHshFx0GeY2qdhXrv2sgzOTB8WlVY503MtQVcdwdH2+277a+aUQn1Cu+Rge3fWtsXOuMyJfHtAMCMh0M7xnmYoxRo41koN/r8k2qOcF2uEdGxCO57O8l3he3gWNvH5ab2vx2WsjumI1HvD4YRBvU7ty2F9XPcnA3JFb9IzFWNWy81AzhMc5klyGlU1VnNPZ8lfP69RkbKAhsd+JHANPG3Hg8fobj5VOkybSlzYipkuS/vGfdJaLp9C/DdEpym2SFt27fXqz7l6xxi2h7OVsPqQc7Mihr+b8ASlsJ7Uw3WZ7r2l48RBla3X0zhrABIcgDey8zLOC9muH00cgUq1ULeXzhIDQm7Tu2N3rjl1ag2frJQK/POrkUM2PzKnVIcTzwcSPSyrVjHMXx7zcSNude3+CnqrR49Fq1HBj7UTBqnUhmlu7L/35bHBwLL0Mbko1mEXtFooeRI6jmi/HXo/7H286NtI7eMFnWnLpN1Sq+Cyl3glVN2FxLcxyHaRBYDhSIff6eirIOxI85fH04Idr2zTTfTxVM3/E4utjLtp6KOR1kLYA320F5lt3NpCGKwTwXcWGgbm/Y+dmR4C8EaNtfRbeyf+NGydo6BMJfTge1ifH5IjRPPKLCoUqgdkv8lTNSb6e1dG9F78HfHBPy7HEKi4+PiINEyPrmpYXyzWWZCMmOBOEuUttR0OgHTsvfi6Uo/m3pXgdZ7bCM+bk45yJhKOBm2MUVe5Uftdpwxl3DybbesSZKHBs5qxsIsKLuktsTutcvKgz70d85+j6ekMxFCbRVMzjHKk5xqN+AXm+jS+IoZaaNfHKcpKIEMW+BBf/V/0c+8RIDe7H42weJly1BafJDqvgwdql30lbDOHCtKR6HSw0RnDfVfmupuXBoOPS9fIDTgt3h0zCUTnRYCKr2MCkn4OcW91uszCOsJwiId5Y8of8RnFvaw4SuNzEmkmXSL9ds1mcrh0ZIHDFO6DZtZz02akDxUL4mwaz8BSXhEHHQo+kKiXDUw3uED3DF337f/3gCfoCG6OSSFKiKEs5f2NqQ2Joniv6V8dlL54S4Qf8me1WkePQBdf3qwdn9VHfKreS8Kmqg8592RhvF7aKFMV/TEEkrQs5OjNvPBLRNxEBpkHfMTUa1CPExHKizJ10pPCel5TgkvlIuHTRfO5Jwei5V0IoEBbBUhAx4Z+8bWMoLd30TJVL9C9gye8E6igfxZz0vCR8mRa+WRXQuce+lpCaDGik6VAdpruGnGAWqbeMeGp44TtDl//2RkEtscWd6PShNHNsNsEVLYQ2gclK/U6nkErB+P79XOw0LiwMEvFL1NCAN4U/xYUYLBQLj7QSpL5/U14ZnOvYeUIkmaGPYpCYvgPyg8C0q89HFvdEWcFYPB95C4hkC3l5cUAO4bfwBPSU/z5zNOE/TdVaj7QUc4Kr6ejZs9NRkLPbBjXYkiCD1rItgFzyCInD0yA8j3EYC0V421oQWUzrOs9IVauyu0YvO9LVb7YoZz2LCIyxyh5aLPI1kAZrwcRLe0WVnymzFm75ez5dcl9BNerW5ytrp65DBAkQl2lvE0JloDmhPEPCxy5YKddDMyEHAoYac8LH77g02OSH02/VVNRVikuyF6/wLLT5x5socdwz2Fp+An2SqDwfEkEbguJy6ZKqw9r4ziABd8jmox7aZaaDMEYdU7Phsv9QzLXLz/l4E+Z3yZHxTxGbKwNM/GNYvbZ7dn3hEuL4fvA9yTyrCIZNyOdJXv1/e+frcoiVDX/ep1AyBJMRMa0KiH020Grg5d/hMPzz53b8Cbi9cItuBawvEKkYXruFxuybzr3IoysL52BnyE0LZdPZgjh84azU7E9lziZasoLvJT2/EKTbu8Au/X2u8G7TjeAxSpR4E8z5ZBlknz901x7k1Si2NWcO3HlSnbrBlG+eXJacpKKr+iSVXOMMjJ3HZ2G25RcLCVPas7OIXFE9w3wT9v5l7jyS7kWhJdDU9hxZDaK01ZtBay4vVfyBZr/st4dNoxmLSshI3EOHH3Y8I565SG1iHxJijBX/x7KnFDae9qpl94XQ+bGQyWTYhTykg9U4jS8KdWO/orftf3XczgeEppzlqI0He6s/6FDNc06Bd9IrSmOulqUZbns8zd+k9fucOxmen+LMHCU9u2OpLdPOWHBKNSQY7wV0PsJxq9BKBVbDESfnhJIcvKhFPJQc/ky2Xwqp3supCsiQMMOixZguqyI+dTchktRQbCa8FB+4m1corIArS9COMHH39spNfUJKG75ON1rl4nyUpfCmUBmdGGRvNbm5cSLu7axrCe1sQ7MvCA/BjHzpfCHuLegG7aeelXeDI+J+x/uxxSZ75ldbCcV/1JjfQQgz89eifZx0r/Rif2xwxB/xc6XcqNrWguVfybZaYYx5pVaeVMA3SVB3ImX91LYkXi+3UgQw75MZh4UpUBtRFEBktOmZyVeqNE2Uz5p/sPjbneglTbGeoacGgI1BeGY2S+IWoWp4bSO26vwIQq4lTTlpMSDlsGxpdwfFaPsHbkvfGgJd010BkiBVfLFGDGHOcmStVfYIvE+Q4+PqxqIlZTw1CHDJg2grazOytSzZDGkiR8qD7Go1bhqbn9KP/41MdM9GLly7I3yr3uStzuMDNaHgHjcSATBTVJvd5dBEOcuWZoQ8Oc8RyNfChF3FKL6t08qJOB8rR92CQn7ctMZ8fuCk33OIxACZkPLz04V99Rzd+DIIme1MRu7Vv27T12BN9F36bjUtnFCaj6f0LwtHkKwD6lf8vU7yZh/8jje0RvP5zEHgOf2NGr4o16sH/bRgO05I02vO8KIj/8Vv5/N8fpSUVnppLc+cig+qXKL+1oFZoYrBZn/KdPpb6K3/ItJ1jW//7pvPaSbbMdmtXAt/8HMHFBIe8C+mCkA0Xm3VfiO0FBVWoD7RyI0Q0/obGFH/Rx45boIzVMfg+7xeFeyaPL+/nTekXG7qJ7t84whhG9ROIvqi+cJ1tGR7BpRNnc/5i8vmzTbGyznHQh2mg2OINwTGOlTLFq6ELQI8vPThYnpINbLsjkCDmHH6htNim9XNv4edOQ4sREXus79qrYM9OTar4HH1GGV7m5mxb/65/lVb7iPpkZpdLwdUMJymIXuQM+1O7WVOMPHuqAFyLFUscsHI99zwMw4hS6ET5tq69MM4RELR1VMhqyHamv77Ahy+yretZkOGtwWrGvpPsfYStxV/qgvpw6UzTFCZC0qzMqALSsMMXE+l8zb2iH1cC17b6PbofxzGuyo4BNlK6ZLxHXzg5QTa7fxxjMoJcRYw939U2r+RfKXBIviRzizUTqrVsBGp2sSb192wv6uw/1Wdw0N5TbM2dM5Kr7qTzGbZXWfjkbt/PoKSvqOr/pDuXVzNOR5ltWLVUa2sLz6nTJlGuhaYbMHGd57gwHhDdIO9FUrndHc1C/ogHZ5pj10HEsyHM07ic88hgt1cgOkJWtbWwTzBfZeAD4pynEp7PbTy33ALmsp4W92D1QwHTZIagShgBLHG3u6uDAhd6KLD6oSi/3+CURLdZZ6mhPrR7iUBK3GlPRdYAbmkYGLYJQrnsKPRSy3E1LYuiOWc86lGZttuSevqEJul2eR5iC3ZeWuKiJaLmr5b63+/RlKP609xWETHeeNBj7GjM4VU+BdrhRxxNHTokIgnThIKb7wh+agGX77HVR3aWnvSuQaBXHlZyg+3rQjmSOxpZsYl5w4lM+vnNbtda299huVizFrUAvr/5X5bEVqF8eYraaojhtEFK/whCYTH9sHTRXDxv+Mu96RiN+2K3SMnhHaWXetBC9Vr8VUD4F2B6xCM93KhCLwXC8TTk5YZKsur06+yjaUYykqGSWUddLtMhgimhA5SBOyHIppr7xZPujFTx89dz9ik3J2vrXRo3XpX1PhnSPkRns9XGk7WzUiTVtgKt76n87AM/S7YXrAElJJkA1rwekJDJH+PGhBd+zg1Jll3MPhZmR0FFnOv89eja012oVzKWGkHdvy72/SNDq309bkLLuc8+RpEUF5sSql/RZyCEtIYcJlLw02kkg0GUVPB1OT5GKrH+p9oeU7v4Dy1R+qWSy7K2guPUPiqBOKm0XZh/CzGPUHkIoUapaVSe6kXRWscyl9g9XEQ1PpO8fyk+PO6wR5nkNF3HfiJHDtskVYUviX5PW6Ug6Q5I+f6zWZPLs1uZcC2qwxF1DAbqTFZA/Xupl0jxFfyU4a5FfP0lpOAeHbP812xuRLCIiTAqKrgoYRZncPtMwWC4LZ+FoPRmWUyhI3A3pkTbjRmYW0RtVOd5rv+JZRvdm5Zr/+QRLueGwwpf2bt3SiTahorYWvIpStX58l86RjmQ/Lzal07gQKnXgKl5TXYp2ZfvH35/k/FE5pjFH2SErPlJG/hiG376dF0IKKQ9PqtXLpIEfpFRY+r+N27dPNX11lGfFnfyIeD5eoI1TQ4+Y5Zndo/idePcwmZIFNMK2M8g5g+SOrILs0WwrIkvEomLLQecMztZZdkxUjvjIl9JNNmTpnUBle0/w2osSf2l6OfadBJZMyd1CFjfU0QVtv/OLxD9ua20tzXrY4N6p19KO0+lexJ/1qFpzrNcEEs34nIEGLbXWr+55xdWV+kQLvP8K65iUc7CcElNrIstZM5Fy0pziEb6HbJy93FlF/5sF6Ql+MwbrFqcHlRAgvmfwZEdTJQFFaCizFyIKMeWIT5Jx2Cd/or3HCizAQtKQSB8Afe5SiyGXxesQ4fUBvydUJA79rhSscBYuhBvl//XMXle1rgX0sO7XWwO39f9LlK0U3Cye/z1i9FaQ/A3hNBxnfi8DO7FashfZ/ALHN3mTqWVDLC3pGUL48CE55ZeaVFmbUfYpKIzDnUsLGa0feTT+PQlu5gmHIlkQZFhYYe0BxhW5v2NjrNRH6gaqvmaYEMNc49MXUvb6Kyg+Xvl9D5wpC9YwF/toquxl4DT8Il0ZGY9h5jHc6VHu5znXJ4bY1fg0zyowxB9mir89yIx8KNl73mkFMgb5QHZs4IY55r/sYTfcVdPvbGFicjGNlTmsMqjGKQf91HOqB4lY2aohAe7KTNAbngWGpH4xVDlylM1IC4UkubTzrbCYLtplsQtX0414/JtgX3UYmKJFNj4ziXvOPDCjh+gSaGBSha8m3mms2AkYYr5r3krjrcUePkcRCNv8LbsRhm7OvI0RwMGF7HrdaJ9G3HP6Vu25JA+JybHOZzRXqqS7J/r5+yQmaOdUtePW9kpqs2/oIyqTgY3o4jo+GIrddzJZtgSolfQZC8mRDMrd2PJUTp8UoIC7jJnwINlIdJ7mkCUpYb2axo3u0BDr7uKPkUC7W8gpRS+oMrBpjZytqZHIwpYk51yP7IXyIbWIocKpOvj5h+efNEOzO9wJUVhzurlvLOkxe8CeMVjlXEhV9lPytVwecbO6F74EdafY3xmjJR6YKAMMU9za8heLFqTxeo7IEqSedB70qjV1QJ2/V8PpHgRdhq6jnVgxOXku8gymCICum+Z4hpaO3pfV9Wtk5TUQJUKTTSuYFzTY0HWNIXY1K7shE2BDwEUkdV2SptRPunPG0V+UDDXbdt3fOg2vs4ymepUFNuuz6h6P5/LpKPPwUH5+rqiUkeb/aXuD40c6vFGkM2kcunPTfKFx9EK1lIu2HfpzdxRYl5cKgqcLPncpV9Kq48kGx8I2ecLAxz4Li+JBR34FZIt5PgRP92Eu/juaPugKKmiWsSzN+Cj3H/ICT/+R0M6ik359UEUTqGlHK/18t61K2kERYeqn6utCCYfynA8LM+qqI0s2/0HqERPPW5iTxBbEzRjuJVzHIailMAhd8k6d87EwPUmyvqYAtFYBVDu/c0qJbKpuYSdh6X7CwL6b+uIEljBNLZfJEm+r81Vsi5Bf2nNdHlT0Cv2Sq6R9mtRyzYkybJUIXsg/yzr6tFv4+60Lm0xts74zHw/6TT2Dm9I+ZjEoHVSD08L1gcchqepfBBTyzDEIvMtAdO75VLP2m5LL1MUugGdk6TDKM1sfBznOYEKn0q7kNiZee7pkv819/QLllYxSb+RVgS0+e0c9iKLh5P6XdoipNYXSX2b81GkN9Ivym8jSRCGSW/6DLId/s22ZiX28ljNNrgV8w0OnSzqprlJ2Nzb5e1+KJWz227VlazWrgxB55GX3jOcARKnpiugnc2W9OecEbuH27bmlGQXMcCtMrP6lfU26rSrkkhxVnZnceVoLpUGznenAlx+YHJwl9ALNlqRUg+0BrGJz1edKaJNg4KYzlPU33DL4+5gzFDq2In5X6+v/YJmh9fQeu3zsXky3iM7RR8pShHoMq7qbBtD0TTXFlbWXIb/uaGEBFMWx71xqQk5W6AsMZCq3pmO36ZztaFVuRSkBvcF32+D5kHzL59BwmfaOLXsF4cMSUtDi3EZ28yV6Q0tfW0HNNPGyHqjBBkr8/KqTJl2pGEC/toTQJumrPv47HGbJBxDqdrqyMvBfd4DfWZJCvYpQ4j750Vmwjc1WbpeHK80lakc4ill22hG0gjCERR05lEgCTv3GhOFP5859gqW9EfI02rXhRjhWhmNPbCPP3b9sHoBjsYQ6Zdim7UaZbcWg5LKrNUiGibAbkWOiq2qcH/5a35JQkKkd1WR4JWCze62I+YLTnGPSm6//sQgCH1NAIoqHOBOhAntm/QsSpYspTAQzx5uxo0bl5DZzkRbNbFbn1jUBFNZe2qt0ldHF7yFLvlUA9+kMR6LOFFc/uasOsar2PnjXTWdWQfoR2mqkGyqMqJQJ6XS41JMK9ExrVSN+JclPNFVhbjOifBXZ/jBaV2QymO9kLccv2oP9FWqE/hJMxW0f5mALztxfwwCLIcLU0Fr+d5IjN733Zssadsz/h4dY20EX+O4O80h0UdaVEcuaropz6qtJwCVyBKEKGle+S17CUCP4O2IT2ZLU5cXodNIuZUvFUq8jPgAFy1FJ88wLpGZSaE3JKchaCQ04e23MZBOAiwB2neCvpQwuV+cNEMPg51Nx1gJZE5Ip/qZ93MPRwX6pu+KthsKprrWNYP/81/vIG8OXjruEq9KRGPTrpeCcyz2yTET3e9rMKuhlW9vwBVIouYrQ/p1ykoO9KuzDRFm6BYAyKvJw9trndTk+apBkQ3x05hV8l82x7b9FIi4nEwNTn/tOns/ug/68wql0C5FsziEc4yVAP6yAiRl/FqvBf0Uw+B+WNbd+cXZXxpXN/7ySh9k93JEomwAFfH6SADVh3uxBa6RojP8i3zBKz5H0/JEMnCUgpT+z//MQFV80HiOk+MAktDz/XKrX//NfmOTrFgrwSuhFWG7tHoJCUVmrllRpa53veRTgydzvT9s18Cd8rm0vrHbxCQ8lDbXxjd7mC7sJmttMNh+5d9gyrs+n8xbEXi1ZwBPlcKP83ToiiYQphmptTQcWeP//Bu8dt8IHj21t9CR7st33du8bBX2v0m6r0hHDl3sgG9C+kYTzPISnI/dUqJ50mIq/pZyyCxuGcX5yw/I19dhwf8C7AVZ25s7BYQBIeqqLVT7W633ahzHtmEN9z1gbEffGR0ZqDDMfPvLEfBb6nRzYwVZfmOAueav0/6KEwN7iXadkLH31/K9SSR0Yg8C+g9b3ev64nU54VhZzuUaEYHCe5uBbw2qewnFuXXHvCr9hODhkYf0olxI+t1L3vuN3HdLewK/Y0gcw0kmkOlihHmVi+TspEzpeFNvgyy9u1Gwk2MJAghHMSEIwzAq3QrWlUG3PvV9OjjsYpf562XLsEeiHxMKzv4rvyF5dKFZTVhyyHo4c8VP0zSKyzmub+wrrVVa82GRyABSJwq+zwtLMG2ds4yFxxEl5OD0zdddcZBb7heCw4S1s17O+OlH3oYeBc3aLRgPABGj6wbEuP/aKLtsGdaPnBCzYP7xFaiV4RgN7Hnc3cwX5cJnPf+JM+xT3dXh+qgbT4eDFPayJsuCbd/a3wmz87pdxDequWq12xP18ofLmiiZmtls3P0I0hypkanBdPl343jWT5mlWr4Np1S1yBPXH6p1uCMtX+Qv0xTG/QNO51gznd8AvwIroty7vzq1fKxvgBKPw98ufoEP24eRj5/OOyi8XgBQHV1WCpd46nB8EcVxnWBOCb7xO/9rFpaayKiPduFZoezlNOKJhFtMeWmlcH71kUs8Z18hnpNQbjONhAnvYzJu20m29Ea1Hgftv2Ebnzmiz/62/oimogCdDeB3754H8i9HNn6IQbmhcgfUosE7XuCgqKQPzkYbMafxDj1/o8WtrDTNf2WjZ+EjLB/BzMMG7iVi16NWA/xb67k8LNnCffibw0xPOMRd2d0Q9UODI0Nh3se7BoYIKwY/4zSVSW8YlT0W7uZ2H2fEBC6guLaLOEVqN1LXzRBDlQOwgPeTLMootCiukKXidkpH3EkXh6SPWOeNKdNSOqfvqsiC4fg6YwiR6AAmKlocjFjIZCy20kCOF+EkfjGOyG3UqxugOwKV71Bvm2LM5sYziT0W7zMZYLFWbgBhN9zIs2Sn71j5BMFD1MI4KaWeuyD4C+xH43zzqtXjuvkT3027suhQZp8//iZ+/Rf0v9+i5I7yfGnopKOM0dKtLdAN9WIV+2XMqrJSl14W+aKrvA+ajUHFWccLqWdQiR8c2t87appmjIGlLP/+NuQkjWEdbf1YSb9/2B1vT2yOD0omIJiH9hZ3zBu0QE1nFrdNpBAQK+d26XuDmNGgu7hjrwnSgyc/xjBANyiNMWPD8GoP8FSjD2OCcH8Ff07X0L0WMt390SKh22zplYWW0E4Wj8SFrrwxZdBZODImphWDS1goDO+u1Av/anmL+29K/PlXnwZFmiK0AX8fZxkGwoMTiPVvu1dFIsPd/ke9JlfT3HZ6ta1S/nFs+mo1WmIejjG606fp94RB5zlkBEHmv0lvoib6NY8X2ZItczJrc8icQRMhAUfJVYLTcl8RoUMSbmZJ99N1llwKzs1q4S6zE3XVE3ZXcNpfq7OsEHLkxe8AOKs+eKPJvyI9pLKRgpBK74+n/JRzQntP7RxrdKv7MFnX+uDREp99s2mcs2SXflJPoQ/g+wblFh7pXY1KbDEmI55tncbjg3nHvjoADTT/kZWojpwGugiMPRlpz3kc9aNl+uUChdw8r7BAMVAg71c7C7VhlIqiaGpJVdkN1pwgjV/vuVl05Etd8qIAfAZyOgoWLyAMRFFy6MqZCf9Xp+KSkm4l87i4dpiHMnWhLIniJqZgS6iqu0xKuR9O3pJM8eIoLr6lcVKf8XmszjfJj1fJ2CxsGLR9111KV7XcHWmhXwB26x91NfPOuNkuafVwvHoWy0HhAWpN1tI4HHnJxpWyLQz181mehoX4cHDtwLmVOTOSpl7nuke/HXPJzyEkJ0zAW/RhnhpJVFRz2pibxuhu6aeL4T2FiXbAmYlljDD5NzBQ749F02wyFrNa1GK0NMgQhkrhOJyvRAPeYtmMyesRd93FZUmLm4izLqL2xT76IYZQ+CVHIpHlhuf495T4L51/zt3+LAL9Y0+Gc6JsM61QvdLB1ZxrKXVHXmuVol0Sz1Bm8AvT/4fkX1KsMH73jX/V9SDWbdWw7d1Y9Z+xtgDX9H6tVdC4Dr6Nencekh1xTa+7oQlOUo+z83N/cIwHTvkz1VpCe2kfOSIjZzlWpWGElQqd88bTmtlyVfeOtvCjMBYFWAavf/8ZcmGlSggFwFjuGNWtvNjGqc9+qTHJqdGwY29MGrlpHh52lFaG195jBHMn6aewOzOLCg+DQCXjcgiXMMvYDxOLjKYl7wJEX2Z10LDwqZn0lfT8SuxS5fNvDPML1ohvUL0n5n9jpYvgu43mAruEApkK6cO7wnKlV5D2ipl8FjlUEZE+jpUjbYzaAfifsBgozLRCK9WbDqCsuRwBg6VmDfK447dOhsESJDdrKzkfQ8j6e58Eh/+qO5Cqu1aaANDbfo/siDYg+Ps5f1nHoEaCvbfCC0KeU+T/bs7RGNoWWHfuQPdna1LY3qfVzWZMm07IS9jo/yQjpr2VnLa62hdf96vDeo6I4KZ6PETnqB8tW2uB7kj5P01gtrD9PRU1MGAc+nBO4MFeHtCIEoc3ZugKUHj/58lB77roF8QYV0z8l8nc/5O8a1ZaE9tsj9Qf8sQPUgtk3M1wIzPjW98bKMtB05MkzC9uBkchZtgz2y99m2oGQppWgs8OAwCXNURg6EFxawxdRtamIgYWx7bUX9IYmsqIFa+EKKPLqU6MpHKzz2hmLj5b8IeyeGILPenTrQjZ11Rd0CMheWfxoLZQtw8+cqZyS/R4mVAajmhRHxJhCcKPw2zF4k8DnJecIjYWuzq/o7qKEMNfqc52b+4ZWe9GvZyosq9Dg4WIvDS6cAMp6ZG/OTEXugjgrR1SNH/2wah0E53ro2ZV6LfFAuyzeS+EE2mXsyFQmbuvQOj7Tg+YmZ5Lv/XsuLvemLNwDejWXQ/E5UpsqqJ6D7NmG4pkR1T8sz+8L6diI4RCsbnBEkiTHK/oL8GIv2S53xXFUyv9+FVhNw6UIa/zyxUTj/6eYTbfkF5oZNRIjs2+suCV5icoraSsLn5sfKQIAtDQHB69o2pECdENy49boEtAQ399DTdfdYDIvCz53ZOsEU0DAVImnT0s45bOUQGpvt0slEe9AafbIK3lq2DwV/Esm5V04F1re1dp9Yn23wdJbrHSly/b9GtE/kEOy0SPNLax0+OZOyGa2BTSlvmyiHzzd3kF2dI6qnw/u3BOnWy4TIEC3A6xhVmcdkQ+lfJKCFoqgmlab9+TAj8ffRAsZM+7MB8YGEPBZt46c2ukTN6H3F8ceXN4J1KJ2L/oJ4z+X9cS4orKArx4MN31B5Po50bzqhBXbkQG2Q+lcaYLTff/Nzdo0ffMaE6rZaiQKbYOVeK23oicHC8d0UuOylwkESdQRZ02cRNVkI6C7WtOGWAfF3wPnZNzfyjm2NGVh1el6b4S/Vet7c36IpJTL5i6FrgnJcmLLqUWgQk4/BY5yyKLuKlnZOTh2g7sGKVgGA5o56X+ekNfmFunJujCdx1BlTkoY3ZiORdzrsrqKTDxaVkW00o9HK95j6hxk7+olPtNAkSKZzlRLhmE1oS50UAh6H0REpvO4RbJokYysbMctsFAHL2sn+rovv/4bOir7tq38LKsN+wA4Ic1hNjPMaNZyte0/qBxYM6YSbZ2su5w+Nnbfz2JtbtHA8ugniDnBkCuEH6+QXkr3ki6jWYQ+b34y59n+Q7BiHDhlmoo1ebHKkyGjl2reJBOcQCDRHKgAWEz+El4OiJk/leY64XqVHyiYJZ5VsX98Px8+cZXn8wfxRIv/S6cTM0E8FSea7tUtaioxrtVD5c4P/mJ4ji6LKNaUSzhzhaqKlBsLu3DUQYQ2dd2ZTLlgEeOlbgT+jYWEV0xeS/qFcFO/zrwS1dE8WlP/e8s4AdBuQoP+gkq916SYuD39Pbvnp+IhA3NKmZt54oHPM6AAn7z5g4CTBwji1MLOAuAwYWB6nxsIFZn2RiANqo1+NevVE1YI4czEip8C+p6L+ffFjKTzZ0DeUzQJqlRkyfYEfdeyXYGGXOH5wgWcbAGoPNfSImpXihj3/+yy8VmMEQOg6p7xUE1DV0Dw+cU3gYBugJZtsnMkY/dOU2IWe7XHwdphVclzmeOvG93eWw9b0sd2TUTDdtv3JR6Dbxpb7NCqtbTepDXLKFzHZLSjh3+N8lcHSIuq2f9R6iLt/ctuHPeC5SMcS8KXAl11HZVU+jUtQFxLoAkibJzWZp7VqORVTrA/XnCTJkvz6UHtyjikxl8zFQ4GjHvVqW38yVxp6brJ2oE9wqKVJHenTMJGZhRbe8yxHzp/2bnQWp8Md+eHWYHgnJ9rR0m57nq4ZrBkDiB0K3oin8Mn+WvBEhyyGn7I0NSl1dLyk3X66uOU7ViS5aIQqBRPBb64z5nQdSyeCi2V2GGM9Y2PUXmLfcPw5/ihOVo8fjy/TKnCgrzhILMdYo7qcc+3YhqeDnPZg5g2RacP2nccVwjYKcwrB/MCOoTpw5WWodj7mv6UQ0PsXCSX+vo77qBYDDsusbqGGgGiykXkYF2+gJF7q7uTtAoIIV8JpB/t+385Wu9N3h8T3y8chFwfRTAl1f3bIl6Ha+m21pc0L56pvzGlp7SVy36y9JPXFjia1uYOLk86NwgX5MC4XTeyxUIfGI7aR2aoGeQ4+9eLa9S3VZCvL9LxhZmLynp1cHnugkgs/0KXi6SX/JGmwPUIW+Q531XbvnGRNWi+Zy5869mU/hzoQKTBwiEAPB6gWq5NoBgTZxhQSpYDDBz4X5c+W2PlEOqHtutzTDyRM2UGrK1xg8FALIain8iS/RasKc1EcBLjoms+GcIUGuRS3ldaaNBDjiFflVRk62LSxOi3ByXPyVwEA9pnlF6VFL7pKmnlAvFJkPg7YCZfj5JRXVG0dQTn3Cw3KTfOXG8wAat7q8P0xHFxJSrIR8MiOKrU5Aeyq4Fs4HBszLmN947wBUGGmGz8JY51q7XmyHiCUDHW4XZTtC8i/dKSvK5R3+AFreotSYlCcwH2fPXDP7feOPPfIGAV2cB7ybDgCZwNuDIQWi9jwankGsPGcgf+s/5PzebGBcgo4iVjvKH0jVaJcq02GtI6LBkrTpip5DaqxCW5ilIVhK/BvZ4FJeiJn+QZxGEFj7CllsvTPhDuyH80OWkYIWkis5u0AayKOltMqQxy7d1HDE0hOJebVrhMFKupY2VN6vgpdludu7dPAtnY+ukePCMbDHoUzuR5Kg3PqXYx8ECIApwIEj5ido+wtJRC+VtL+u990FFvO8oJ/XXk9Jj9F0Nh5IHIvG1sJyr06tkuCqqHtjGl7dYdN3Gp3XGXEyjir/KgUOyQi3G7G8zuyhH8VJcTf0FuIy/B8GqeSMvVpd8M6oojA2VJcg2RGo8KA1cNVT6sqxB0LlvYOZK6Y+YZYExYeD+PdV6uFJnUNbG+lN/qE1aWpzbcdIohfwDRMdvXvaUelqdazFrzWfGrhZV0OpWARSCZjbV1XWx5bT7/Xf6p/fhUxf+ge2kcrAApEeiMhpJzYDMzGl9oOiy6VrtGNdLUVlXDbswg7bOYs6TBgl20LqmyExMOpB9jklb2LZmgNU3+jCUDxkJA4RLjmKdhfkMupHyrhbd+3PfjzFa5PFls0+9BG9zl9SEzt4YSiYfh4sdKVB2r/1sTEt9w9LJHRiyyS6UGOh9BtGQqjVOslS0SABhQlLVyCzyNdtVMh2lhu00crg3Sga4P8cDFbAnZ4Vmkxm/qh4vggI4Mf1LEwiUuQXKX9MAucklsFi55Y4uZjMNXfI95qZjIDuqrc2KKO6NHlzy8sOwruq2EgVONAL59cgVj0m+Iwve/OeTrvS0icE0fwbhc3y3mlQhb7ZeLVbSRq5TGcjfgGVakK7ttDwcsF/w6j4UCL5YvO1QBcpVS5jvg0jrHKA1ciVdCVRfX83QW2qTT6fz1xOodPwPCIgVnLgAoiL6Z2h66VVlg2g+FGlAHbq7LmWsJoTqqmMt67c79IrR6DnmgmEjmoxCKDrEkrOuCas20IZHly+b2GYD5SdTodpa9U9QkRYElAw1He0ns0AAvnF8B1YOAvYrOd/IFmyzZmiAqSnXffR5MVgsugrEV8r3Ze0mI5pFz2CM8pY8fCpfMScLUnTxg8YD4oOFUT9NVGKsB99dB38dzAYzOZBL8iXCqEVlaea2Qrev57azG2jxFrIBgBO6UEXvJL39LIpSdT6zkzqSbhqbakrpfpg9mcB7dNuvhjhco7obgOYzSM7Aq2/WpH6V625gngR2yxh7xJwvz5lvFq0rjbG4MonPQak81tUn6uwgUfj27THftFKXHYBZiEllVN1tthb6bQmO1o8fyxiOA6F0GpsdeqghSwBqlb9y9BQbN6OoioYMoBLaD4RYClhHyQckvoQzORNPj+f5xz8q/n39qYov0Jiv6AVio77au8FIHobhdgdQOy0Z0K+3ihzeFxK4i4L4G672XRY6rfJhXO4RMJezaMr39VDowDNE4HqyE5RI0r8RIUCBkU9o4MaZGtix70Md6fnsRt2IlXCwusI9D7sHpQUxs+MoflaapTuAsHaNK4NZR2MURkpkHnzg30LP36yoz+XCfuTxRAdbdjjWeQuuMt00JnZrmFwC9MbgftwkA8d25cuvnGQvT47xQdcG89I2l+RIdhEamgsVA2/N3Mpb67OerC8tz09pDPtn8aVLG3xNh6FED3Ab5J9Ce1OeO/j0cQbXJ2M3bMn67+aw7nDIxCHiXUlDX6f++egfD1iUZIzT959Hj2k7ifx9ZuqgMyw8AjF1EFDx15WQ4uRhILD4NMOf/of+2tJw45d+HHjjK2QY1I8+QQoEwkWnFHfkneRq9iaGcHl71ekqh8UY7WhtrsnSAn0+Nl/apdm5Vdne5UqGDJ/TvMZPEiLTF8uhHvBZJtiWJdg/B0QpaGjf1ZUZ1h1z4FJ/gN01/0bOcqKZw+QyJG/Ai72lLPf0uI+0naD21K0Ls+cVmo81BypzXEQbaQyt5mRwaoLT2vAdusg1hGGyC2xP35Aws6ltuGW4QA279cXcnO7mJmITKt8IJP+wYSKS9neGLSKJW/RXhQEVmIaP8Kwbdwje4WV8UyHhVnu01EOehr7H2GQGMC/CcYXHYKwrPUdAU55ZwHbBcc/XvfD69ZW9ULSpQ5ed0nKGJdlgCpGu24GzfIeuWquzlyJVns7EdTkJ+i/bcldGAiz/upyXRMcAnU535SI5a40l2WA9ABbSJ1htU3w/wIA/tjOvMQblxrebYxP+QjUA/0WCxCYz/MPgGsh2LSqBsmbThTPdS6uBkWdBQ5RSWtm5WQSXSTQfrcswT5YVuwHIAk/Nze/LY0+bVd/s8Cw4PpXx5dOD4Y/AOjUY1PtvP31NgwGiBt3ffb42/zKuF0NLTP/QBbM/5xmVwVVd1gSRZxRh8bl4pSNNCJkN5+doPzs0QrZ+/84dtPUAE2dkxWI0MmsVYi0SFyNELIWidcgCVuF5KIWezz0ciQfIpFEVqoGmUSPnx1NMYLurSTJQGQzXCoHfEyATnvrx6SZbAYlbGIoR37aZ5WpfW8/FkOCiqppGUyTcRzOpx8lozqTpn7BK7QKwpgvUDecu3Ohuy59jzIw6dYigphJGciUN9oP8J/u/nnZ+4Dp+Q5brRI5ehKeWLXdDIj7lMH42BG1RbGUJ1RgeIs8xzAVFMxVYRjl7EI3zEgaD9mNjYC0hSQki/d0EdiT8j0jK+M/7x6iMzY/cnzLHFlkmM5OADG9g/Cqokn1FSHYMctrlmOeSf/PtqFK0MCJ1xPftxP3FDX3ipcKak8foDG1pjDCe9F804eavsrQ2vMiodvl9mkRt+9Iklr/rSzcD31fw5zIvJHWltd4O1IPBw6RNy9BEzu60caPJKJzpUZ7858/O5fE3Yz/2//KiN+7p94PEzAdTGXa6GuSK2asKU5MRbciVWZyH0bYtPyA8TyFTfqhkL0bn8dnBXAGS+oxih/TUxcFyWRkrFucejfm4LxY+H8SXSWj+yrZtslju5yySrYl6K4S9C2SVe6l/Sf1+vOjqRdHcKBn5BU0itxc4pPD4vMcfzCGsXH4pBTZ59QvIaXaW20AiV4s7mBLeiN+EtYvtVa5aZOnh53PTWeLiI4bBy0beoR643BR4KSCZNc0b7Cuaz6I04hSP9+JgVRtw7W/KL7oQu2Kzk3+HG4h4IFsgKRwctDxyM1MY8HcNOv8N+zTtMNoL2GSJ2zeH0dRPM+10psSX4JzYoutj2xnybrpW/qMlO8tG//ZghIIr+cCOA2X5lysVBVXc0WsFqmoPU3Eg9XervLicI7tCYDKJu96RMwH/AoStJXTfX28sVwQtHS4i/DPcBzHY23IT0YF8IVMktaZwofZgAghFFwYm42AFF+jz553CdnTqKXfO3//MAzg5+Ds/xiTt4lHHU6/hGQPB8/LbHfBJYn83uXh97ATvPm4mP7aRLKX+7i5xOc3Pj/4l6dijscfvN+K0a7DkywjOUjMLrCnhZRxLhEyJRQegNRHy5eMmr7j55qD9MW/Fbp9I7iJxj5dMs6oKxFevFM17+sRQiVZskjdw3JS90M7w08p/efxEiHEFC06w/xvTLRfrYC7G6DRig6NHKwra1sw9svuq9Ryol3zJDIEho/8KFDL4msH0bg+w07dB6plV9iyUUkuW+Svm/wiSHwd48VXacByR2L5RAwXnPQ1VJFezLCHosE79uDDUytniNvsODHpqLer+FAak/htM9xFme5xSZSn1csgNBONEAEQta4fIjAl3FcDiR8duBBtnXDmwJCPU2U8DEEDLb445QrXs6AfAhPPol5HN90IpUnDbX02VvnAf0PYHDniziBt+5zZ7CnkAZlkIu9CcHQ0cnqlor3BaxUj06l/E3T+4h30EKwj5uL++f546QM6qmOv7gQ5qviSxVcR3eYrnKhIaBp9msWHV70hQ+3IWKzMfeRa2bONMkQMTiKx+b2TIpV9goCo9SJxk9mCERpi3VzHc72EyvQd4MAJ53ETV//VeWJdzIiD1Jb8BLtp/SFbydx+aIRRc/pgmQNr5yzfQ90MMiHk2NA/UuuaKG/mgPFGOP0Z5kypcGjM2xSFgt8lxFk/8V2NGTmRWLKOfvIfQCfVxlzMhWaakwOZFCMNE1HPR3V4qG5OnUU/LsgvnifcvZ1z8EQeb4hjGxSgIAyvFI0bfINTTvUDL87IVdeAcup3AcIoCDbQn5X7gtQmDZS1G8DxrNOJMK0diyeKeeR/B6ZliCrs1RD/WIoR/mxo/EhnwarVxoZzYXTwaQ/O/q2VT/GWyhmF2S3YWBAB+HNcLWtPCws91/2ouCpUYjjE6sHHdkYQEdvxbi6JR0AOIRUPecizaMnJXpPuPXl4A8Eu8rplBj4ii8fNmayBgva2QWXC/goF/eanv0z8EVrDqhmZVBjSaUROyM08cm7XUR/9x2CI/hjgB9fSHXoFk9OxwOl4vO7yz7SPJNoT/FeWClvJuj7ajTwcyqhwuWDkf+wbxk12NchXFE/AuiRPvwvLA8sI0hDfijojURAFyYhiIklHZ+XM2HE2CvuoMBXvCYjMHrtcZz+TiOcq1WEGDhI5ADtQCkupxXSkYxOUuZMIMp8Kq1f2xRPG2KAspA4Ww9Ef42X1gVt/NIgF1kkngLzmf1D5g7nO6pIFjBAwJRDuZXg5dAGR0FAL44YBy4828KkcLHvl1p0Dv/4wXylQTOVnA+Hi48oUEH3IhdEmxGTBqArKZfsnWqV6/dzCN6+koWI+QkBn5z599fXNe2C1df5ym/Ar1OChWb4KYLoyAhs0D6XZNCdAdFAB0HBvUVTxPBMMIkmAxFW5Nm82E7iSz87fCVtck+Zav2DFQlaCGzS6f25e6iJFpGywFbZTEjlRWu+eRrbTIjhYubYT9gPM9+ZFpkRPjHe2Sr5e93NTzq3Ow5m/uwfP7brRz0VAJHaDtq0Y8PaHqIfDdvbDrulmYr/6RjSsm7LEw7YCuXfjfiO0z8T1VJPKURgeCswoHFoAtxIeu/n2IVaSofP24byEYKmuGI4k7ABU1xEMr0PiOEr1Q+99VBpUVylGk9Hdg2ogxJ9zlJLa7+FS/cm7m3KYccK3jfPPkR61yCAb3Q8hWSJfWT5FxQLszuDxpWOuQWX3ucqJYnZFVcqFHzZ9/CJ3zM26L9yQ+rCVZXdexGCiNdPDNsqi+qtMY0SRSd/bSVEoTGQTjl0fWDvvuUl+hG+EDmxaMkpYMHAjp+j4X0IhsBY8xqfwElKt43ok+vyfwmjOP1TKJbM/C4qu19RB/LPyvrFWAX6CcuaNztAT9wQkpzF8m+LYOZocZDXgXMIGP4PZJL4yz6SjOSypN9Yobrk1XVUBuNNCYNWtVef7+h0cpfjQvGLwRt758wcuI0W10n/OeihVtbsQ6h8niBaJMP2tImrVEWKt2G1u9/pxx8I1K79fNuZpU2FkLKPkb2fQPCFAyWMnd5xE4UV1sVSAWbsX9wVpeFRf0GM3kt8g/gh3nuKbL1AObDqziFNSjqmjO/xlpDv8eSI5/uPvKT9/0gTSeyDU4u8IHQ9I+GMKDXo3M/poCmj/UIhf3T4BW7EP+bnFdQgs9YzNxsk/ggy57QmYr1rd5cHN+mT0xcOJHUxiJbt7Pk7DwMZCpr8MmXML6cyQj3v/1NsAFWMFm6NYguMkjqbUHuwEkELVenz1s8jeL5evuSgQ0/RY3NEj8MwdciJxhJmyFeYLw33b6bvw5+PAiwhluFPyGzfEsCo7E/01zthsxcqVAPc7sNEdgXREppVDwePwUjCid5nsNZnoToGXdT5tNTD3yI0swurd+OgyTO7R74u1wZArlQKQ5e5aJRugBwAjkvOMUalG+FH5rEfxgf7sE+yRzbMTxaz6F0hdKITrJtDwC2JmVf6B+2vgMJQnC0lF31iZZKzjtvWLnizp7EhRGQkDgK9BCNu/x3WjS8woaxZHsTCCk3OCjz2+muAXYxFL07HRKbMXekR3/u3wcFGzygO5cmGB8B8HcCMHupYJqM2UuxPhwZqynB7yxY/56XOmXVd/B0hVvyNhD9QxG8AcXwtWMSXIYw+OPT/Sb5eeZ1nnFR5HRYWTQPguFQXyps8lGIvH6V15fapU8EJQkkwE/60lZ/6ZlblPEC4biy7n+csVANG7cN786KfpKo6uoyyDwOmYGHUvqq0Bk1L6p5hwoMleOUDZFyW3YjU/5CxmMf2+GJqYukYxGxZYhGSp/ddtzBHZCIYkntAW/5RBMlt3qOHjGrIO7ygzhM6yS5TxgccRnYJRxwN0PNef+RqbPywsdq4wNm2IwDumD309LzOQ/o4tKy50fTWYgeQOp9JoufpOvB8Jyl5hpr6TTevYNt8Ufyy034yAzshBL81wK9ISe3zeDIg9Fs/1gdVR6iWL6H5Tfwhr8+ZWGd5t5j8KE/Chw7Ptnhdnp9wjB5CR85cAf/YPs0N7LgGIEKUm1UH/WIUwNFfFPZPgr2BjDOiu9XrUGKyarwhsGHPTHl+bfLoiYbLKpgY+d6RoIJnRJIaLSqiRjHUdPMKcf8a7umxKdZpemqQcMofNGWb6S8e89pS9wv1TRn7EyCcHgUnKgHP3YbYU6TYrjWOj5wf9K0aCdlG0Px5Av80YvpJ97Lgh6kD9Aj9+Xm9t2ORJSzteSQJNRVOrc6NU8DYmlc5cazXsIIWUsB8i8VHOqg5DJqkc/TxTb20Re5Fn7/9tDRefYsfeLeIA2Mpr+S9XBT9tUJH+XdRil+eJ/IHPX0QNHOokXrGaJmbVQxejQ615pTQvhereesmFAoYlhcUm6yxET+GXF8Ysvu6bBZVslDsVGTA7MN8wEOLlHfyPLU9NXSRAs8a6PXi1P3KIdQmFlA8mdO5/iI1C6gILsGzMhdc4sooZPAM0mwyv+m4i/tG3hTq348qOsKKNSHGxlN8hsH1O5SgKNeAlIqygD2w1tBXRaLjuqo4uwz/3p+UsQdHDL9uBguQbDyMC7/2qWPpapF4+DmwHLLetA0MNNS72lCNP62TRLVoj4x9a/jDPnhp9gEi005aWfahJw+68CmVej1G38HEfpv9t6oONP/b1MAvExfn5C5QPcHa2AAhTXXf38LkIY6wGPE0ibKiXOOIQHfi+nreY/Ao04yCqRJye3AC+mvs/attZAVvMNcFITBbiZM0j7Cr987YnLkpCLkoDdv4vMRaKFlD01jBNODEwDXAg3gY8gfXvsZQXwBZOWgmBxLK4/4I319vnhPsvE0TGoOBqyiA8lIHoa1cBQtG9fi/zXNPw3J2/GTWYuTvFXGJORRigs+BHJuf4AYl/ln1Hcq66Y+3d228/FzpLUF2S8IoJtOP7mn+SG2e8eGOmRQ8meRlozJTcSdb3bs/WBJnXmCKbvulND9Hh33FfSznzFVu9u43VYmePPWHem9EV7CHsMr1IbKtOh0ocfJi68XPm9BHYzv9fyDdCmb/VObe8p2rhUlmU8pTd2tpxF7Iomz893Bubmqqqin4BhkppLfoQhANJ9tcZndb6dt+BkT0h/CcyFOVUOIr5Slu/TJkVJ/g57XCtX6ayoi4CV//F/19L6cBomgV9vGME5wAMpsO8FL7X9hBVh1m4JFZl08MCc9+JKz5IgptMSfmvYAD2Ei89efZXq2BurTgjna/CsJfRpSBOv02+LBHMChd7D6c1C9gu18hTCBvvMOI19aYbnv4TwFE+j4YcihGh6FplvICy/s64I9iJZ/UUUbzNJSkaXK3AqVWe/XWIpK20TQqSDb3DvhC/VGF+gqJm0s+WBc4USx6MWzZ6GgbnuvkmvvBkUUhw2T+4GCeEX+ekdzqwxmFdqI6tJ5yC+dLB2fFYOG3yuBb8k/3DpwRkQJoPRXbbsg2VtDIMmzNOlIxHPqPszr1SluNqEP2HDH6cljpZEWmpLS4iOwmzzjZTLj7AzOY2QVAjf5TIWjCx2AcMSE/791WQXx1iusgav/x9779UDOZNsB/6aC+w+aEBXNI/03nu+LOi9t8Vfv2T1N3euNJIg7Gq1EDCNRjc9WZmREedERkSeaI9ka3F1f9Utl3N+hb6BbTQloetO/FVY8K0zRxlMj/PidlZ3PnyvL3nE+CdWg6Na7XXh3mZCBY8F8SuocCOgnIALg7jq5/PThNuhOPe9fCZFmwFcIXKMbhPOzdV9WKIFvo/XExQw5ZAAF19YPBa9EOfnc0/cx4zKlYTtEMdhGPO6yVFiOT9LX2F7Ih8bA9V171cqvANhDC1BpsPKMdcpwZ094LDYb6J1M5Vf7M3WKwnAzCyIbfK6Za16942MHe4bJin37T7t6YXdxP6JQQiNPOheA6JNQHyneTAqShldpJC7UHJYACH9WSA85ykU2RU4I2uHwG7M6p7u43Y9JxTFjr3WNj/4t36haHhBr5qrblHL+YNFv5euPw2wt2EdVFecZ2qwcYmRZ2M6Xn6n5qxWWrWayBg3VbyBN+MgDIL3+o0JZFgQiO0Bf5QxsGyTHxDQSvDED+4VJDWaKyV0I7kGtWmmJErivDk/YtprXteRXQJQwn0I6OsTxSMM0K8sKbt332qoQEfCturzzn+Bnv9eT5VCHeOuRO1JQ+slYug5u8BJv5W9ohyQlxHIVJzZ9aqONKzB+DM8RixIQ8IX1OSi6d36jBBIRAaU0R0RYWW2HNw7ibD5yqPlUfSs2NSPp689JTDRb/jmjPeDJ8wXQU1OWZ16XQYM87Y54MAonh9/r5ogZxAGtd5rz/ObbBv9NUBRG7526a1P1PpYSOaxpZ1TopZEMySNzOgxq/nQqJ6XoVS76vbxTohO1gW/AHL4l8OOhzd0xOtwTfqJtQ3UTBGoPkjImrWpO/cOX5Br/Eq6yuKNjkVf2pDYcIoGAyRob6278E1i4swOtgCXHvqEZIdCzNyDgARcyzGICI463CNmAvKIrpRc5HCLbHvSzeZtT51upV8bk8zOdjoGWYYOeCFCOTM8foQVxgcjhWpg1VPII+1fi1X56jDyIUUxItF+yW1vMgzlN+I0+h7DKlSSCSdtfIWw2+qANFSKSbUxDynMPWkyKLO0cXqPUIWQ+1ZKAoLfX4XeATNQBtQAaFbCtAkNH+9efokhY7JY6ih9Ln99WXJOzsck7iZ2l1SWkFNvLWYbVrU6xejcxusUu4lW7nk0W0xksMTHzKQv3B3TtqxT3mEam2tXV6f9zysZDeErxjmLTOFiq1cDAw5KIkpGAPsz8PEJNwVqtyCdtR6lIc5B1hcl6btgh3xLCSeY7AiMYPPlavDn+hlZUJ6luZydm3zmd9OzYXbpsf1bfdZmuba2m4NLmntY0Fdw3pVBucpRZd8jBGv10XPo6/GNHv8I+5K9UxAtViToOCU77H6vNDv6w8oAiYGGPA5WIIKwvg1SK647RaWiLyTA5tY9vVzPb0w2VwTj4KD8Z9oCAUdiFVgCvoHwyemXERXRdliy+NGa2LLNwyzYsLX5Z+jBFuWVUMxQl+o6FkUucQt4ZNUr4PZprhJGsA4JxAf2tQXwSxisjpgJqnUfDpkisqZr1sCx2TGNRz+pGybIe/f9uYvXvtWiIdqBPj1b6J/KeGcPNMu5DwIMHoF+bGLTXAV7Q0hxHjdI62HMNOmEq9zIPAArWerG7C39NHUJftr8r7lzyVKL1tQLf8WmOGtvoMvcAa1/hg2dR/0vjz0SfRE2lN7FTS3mmpFmjquqvdn2OLpVe7hmZqPOG4xLx8EEsUEn8/U+R/Kbq0V9sVKurAhk+jCvtk+hRuoZflMaj+95nAl4Lxl93cErFALS2eBDQyza6FSrMk+2EQV3vJK7DaJ0pslP4psWTWkPsYHAamAsprUZO+WEWZ6tfCVRxN7XqyLr9v1q1P4tiZayrkGj9uhIH/giXovu3c7SJbPgZf0MXY9of4Bt9CKU9SZkYWqXA08mI4P0+2Wzk96ijY1OuwmyZgDXylXJZQk0OnpsdQNdCBILsJXsMX9BOjA43K70hyAUE0Wghx/ncBwi0hEt95cXc03BmEEUyNOyWz81LB66WbpPI4c51fobHreHOOL7heJ3TPih4JADabjz53FUSbgZXufjiN+h6vqpSgAjq7Q8nDz0fEb3uERaURHxq57nV9teZP0Q2ZxMn44qijm5b2Jy5+35khycNYdrhW/m3nE5Od6JLZVuUQ6yQkhlLccqDHR4fFstCK0tmcFiE+w1iKVV8AKxY4xlzhIbcI6lDDSdiLTzGj2V+cL7/qtP5ReasX4OdlzSDRxKud7xUVn3nEEt9itJ9alfLIF9y0hnZnOn2igAAQJn2ysjcOeDQdtUiJgjFpNcf0aHrWMnxxkRxCJ03F2SLDx/U8b1/oTh5/p05wTSOCXKVELgF3yKjF3f7ypxFOlGowZn31m2BACyBZsLegxBVkOD/H5Bv/nH0B4uDB3s54QqW7dGwfB7LboglhMj7b52k0nK0rXh81WggdkjKA9+EEKHu9RoZekDZs1D7XDQqOYepaEeFfNFMLE+KDDM9nraAFp9J8AQwDI3mxYaD7emzFhxNGOpborAOGTMafparyaQ2iUJojUz0y5d9aJH/xPPSLu91erPYKkF/q2ML/zWacqKd9ZtI2AgLgY0Thah/kjco2s9ej3jSs5WuNitgjs/w/ySuwcBVib3feelKTX+fgstP6lRMUFGCy5cSLqvHyk33bdWuJRejkDtUM+jCnI6RVpKg/GzuLz+wbP/7P6vACVoEUUcO96+YyD06d6Tg8D81tf5rrpf7PJQ4hXBMtM4XGPe3lHV8wjJ+6T12Fa9dOEXvSwkEdQL3SrNFPp1zTq0SDHP6KMwgCPHMCpVZA5EQ6KUKmuH6oNb2PfY+kGZK40TaxKf7LcazBuwZ7ScZ3fLREIaswGPhv4O6yz3c53b4nTyBrt2FBQ8nMPWleWAA332CE3JDBs/BuwbdeLF/SpXDEZuA1YQZZewNDlnct1nlaspGqPCGxYqNigeY/bUz0UySg6GOUjPuspmfMvcP78eGXm1vUXEvLBAQ8rL4e0F2UkOfJcUpoajlDxaiRMqRSoCYBQrVIx1GQ8VthCcIgFCBECw9DKa593hWpdrCnWLh5kiN26KnBX6ZNcGPk7iMiLqFz3M8LAHO9hvtXi35KkTzCzwtDNoSDTofjr5NpwHs8KXLI2/8ORuU4ts8l2CW1Lmq3Jle8qcIwpOVGNy5QTAux6p3WufMD+ir+yqUG3abk6x6+v9Ew3/xD4C34XhMyxv66NzQ4XYnk0L5UaookQZGrc9nXyeYhzKnlPHEySJ2W5JC/154JO1aVQ0WG2sMJ9b9RNTvricd+JPYIoQQ1XrOX2YEPAVF7aCBF4+wS+Q8khexvF+/0ZmujoGDo1x6nEkP9Yuv5PJPbKgzFrHMmXnQPYtXh8Kr312sMHRL7qTx6Eg4sZSfbzxTCVruegm6GcfQbgku4rGGQ0wmFuqw1qBCLoi3MfoiA2v3QNqhV2yN70Jd7c9D5tlwGbsqgZ6yDqMbotmyzpAcaxkq+vyQMZ2/4R/nCwhgQMjuIOQEveSjXwczRXN4COCz1tGMbomB3mMHDpdthDNZ0dlL0BH3StGv6uHmvx4XHeQkBRvlG8Hj5fLrVq+VBIsUbPAo3HFu3U3Qi2LfuRGiG64Mt1cL9b9hNMv8A5VFJQuQnyE2+XBcigzlz/DAX55TlOcL/4OKVwMGl58rPZttvMlleqWI5gzCpI0QXK9ljPvBsVEpoePwrYWkO7F5+NeJm5xr0bV9iZazhUM/NaSMWnVfI0+le1Z7Pp23N3zEUWwrCYs3VlUGtLOuRHgMUC+/HZeAJCNHdFmqk4h0bNrAUtgVhVjka9Ddnxu7X0akATfR3RWIKYJA4R/yxsfsgIToqIM+/mZbnPat024VAc1heRe4C0wTw7kLwRfH9TWF5DI+kg0aUgKtJI1sWNA9o/Bk+htkEJW4WnfNk3ScwLbrY95pS3YYU/ZNSW29r4S7YsZGL3etRVjc1asPUeo01xkv8z8iD9n8ecbIrxBuc2mziMrh2gJlDBJbMfEv1TzWmwASEBXs3aA6cR1cIwBxlNByq6+6CyhtxZLs3wFdvs5l4dJO6K4MzFdtSpQwfjimrRcxxnMtlPzFgjk027EKufhM9dId9zDJbZI5sQQMMBl4mOx03QxbBs2AgCWgUjyaQjd9BRDi1v5w7PFNuW0RYkda6Mt9mZr2Uymx0iQkkjUiKwe1rsAGxBJVPHoxOfqCcMRQ3OSsE4lmL6fu6Xd0LnlvJBbVdAKfD09FeomhI5egg+aSMI04FlV68Jw73Lt+kLWbzghh8zVWQKx3J9CmgK1gbd07rsfPUg5DOHxkTDgNAe09RTT4kDDHDP1XjyspgwbJKFKpwlFgne5/WtYK4uPWgec4AuGsRC+ISI69B5evmN+yIdWzcXCE9aFXT9AO2d5ntPE9ygfNK/7Al7QKAJUpagvbd91AGSWxjLtqkV20VbQ38VUKbPUg+9ojoUXz9frmdF5xNWFZjKtU5IBFkju1wfNuzeaCEkPuGuZVK4oqvEO0982MQ3sV7nwmGqvzkzr1h7NS87qsqRwQdE8B07LOEis40TSobxeOFL86KIsXjENM19wpfMYZUQa/NDDXBJ8S4UrtCpuagzB8FgeJLWYDKQgw5BJ6kuIYpXZIF+0SCkOmX+IbRwIFM4xQrOdfYLIFCdr1sc5LE1IMnbEkTesw3wrYHB0laF+xkyZ+kZRDmPXLguIb996g+uqspoJ4+V2b5tB1ChLM2XA/1Vk5R7ps4VAkPM5BmtwHcgvTuJqe1FHcyVoBX+3i9tUuBwm7ntu6BsUMOGxHuqtGyuHUhK64E0lqhKEe7pufCYSJo1v3T7q61hwINXzQhzRgJ/xmj/ajMNe8j99d8CAgHXUzt8aG0L4RtxTE1VffSYRIX4AH7FmeiJaqKvFqarE1Xfil1Ieff5vf2LZMUybNLSvbvW0Pnb5ikh/XYd1fqNTx2eBjFX9OkUfY7X4hbW9EvM2ZpsWZq+GDaFpqktFCmX3PskJrwVEEnYdDNcF4Dx6K12tS7hF5s7p8mZHo43OFwOwjTQVBD7qLovs1Wg/ShIOkyQaxWqcCK86rbFtfPm7pfCdiG9BFoocOcYZ8WBjHMtFEGJG8k3KIBdNe4fu/BYpAHfL2uZRXZsXu53/jJVV9ew6qDkbUNEDjtMFhGn020qiJ2rLWhJfUrGwkA+a7+41669a7tkxyy+eMJXGCIdWVvShdpvpEygspK2njz/TMvCtKIRB07GQ71PXVOKsR1dStCZjtwVEiSQ1ZPrUsx0qwvZkw7nSfV2TD1R3EMr+LadlIdYH2ewdjAKG6SlvSzBg783jgiuymepcJgGx2sS8rKx7itPaKX0TWtpDk6csNp0vTYZVMznB1Do0rYCYE6fi2kIoLL0BEgEtKHN4mCf9Ti1wI/iy+Rb31XeNFc46BvEMDAvOtmL3VBu1YTLokK/aewpLnjBNeoxQxeag81kKeEs9H5g/r4SarKMHKal2M27qnWFpSF+gIy0CNlgvln5lXcQ8t4536JDvK1/f14ZeTGIlbBIqNVSjovZI+uwt60bVOiGMu3DJny1BJtYirCn9StvwvDVf21/kKB4h7svRtX7P5DTPwjIDZQnBYKoVncJ6c0szSxrX77iyjRJSNFRWqTw4s8riN9niyx27K0sFCskuZov2ZrPUX39bDjylOt2HrjNzuZrq62Y0zBm1uVsxFA75TT3JiYq1OqcBiEqwniWmfJV7gDt27mc8ffvLKAgOOMpw6SBNXQ97OSjRbq1VfG6jKJrCoLJgRK6XlIoGJxsoLltl8vvwN5u2JPfNk2crmmMM3SkL89sC40QGCdQZmo+io7TN1ztG0yL3wVcz2eHZBX28u2mp8y2bynKhVRDu8dRrfqDYoWG/MYWvBJ1vQXcu19PVCHZWIsrxEBfC6WRGjVud/2TKyn5qubpbujP3jHTZSjhjBiaMWO2sgPzkv9xEKZ/p4uEcy6S3paadcQCHGLkRTs4n4DIczc/aB8aQPybqsQuQf0Lb6csmtEqK3us2Jw4J6CJjz1VnkMKlZ5VoHPvL9OMYKya7u+1fQnH2McNQ+GPzq/PiIMO8n69vQ2j4Fm2EkyXQCfyj+jywTe/BD6Um6XOrMPT8RcWdS5evs3jXtsV0Xze6uroY6ym1iYh0JPIMieDdLgyOlXy4676TAVlCe4Z31WuNK2dPosUzrLxahY1UYLfLi+fHARnf3D7eHDkaPTZR32TrpFuZCyypdw+1Vz6nlfWTR+CtogVbquMbm6Elj5vuXoHvfCMlWCyCV/a3Zc1kfScP6uorrRLkzJwhikuIREwq+sFsC24ui3wSt+5h8E4JJiY30dyDtKVylx3rUd5C9oid2iQBqhFNTQyNzcp8w6YhOwXSFsoI9lFjwnVCsZuDAGOqT/DqzD14aN3odsmYj/MvxCTGCg+LLvT+TYZwCoou8WOm38QSFSKxUJvk8bJEFcQtpkyWLLFBZ2zgZPoWhI+GL/FG7hmSeXdqMLgLAFu2vhXDqKEnFH0e6hfX/KmtxZ0qCEScf+WxKJ2hijOMJ2kXylYtaXXrrCsX/Iu3YYp3ruCMUo3xkIAQslgGDuFqN0l6HXThI2HzuCVXtdn1AoW3WrElxwCay9cFKTNSUc2qd3gA8cae3pt/xQKxfNDwnSgDjjvUrjJqlE9w3r2r3AI+y9T1Fvh1zQsFXiu6/vneIWYXjp/ukSS/FrCMjYrpvl3L/EWvvSOtgzmgv8Wn9h2c3ri84hcL8yqpWAERLIjhXH0xebIGp6pGNcMtKF8P6zfHQzEQ5zaqadpkg7P0YCKnA220AM4V9TN6gKvLfwck4ErB4HF1SX7TsFrxUKP8VCO1ZnPs7ppBOZmvif/8HlWFMFeUrmlE8RloPnB4TS8SrudJxdQEIPFajJjvq/2Zo01Et9YafYszO+EjkVT9SELdwaAbAgoqCR2m39pYxsPy2oV+uPBXyNGqLHgeyeKsHFxwwTXP2D/C9c30Z1yy0buMNPWGgYD5jQfxG7VHGl4Odp35x6261ugbpdMLlFa4xpj5AFKaZrV16qkQDweKrnykou8kQ/q82KFRDXur9gRwR3DCnB2bf+e6Tkpz36+3e1zA3om4qiHCv76eqMfUyfIrB9WMdTxgTzXFCY0pHui9KDNjM7Nv6MpU6AbqrrVktZf5r0OqECqNG7QSjSx5b1kmKoMTZ8fdw14TkJRryB+CZYLxAHBhfMM4/6xwdp+qdTbcB0+wvOWWYIWX4gPjjhlZYlkP9/mnCDPEywusGE18+VKgXZQJBLD5jiqzSTqX1MBqHtaDC/G+9+CYkZ5fnJtUpaTTtp/6OcfUfna+AoSKDFOb18WT3tMDuszZgt18rLNi0cJj4g/KBW40Ssjl6dbHlCFv3JyKY7p+slS/3zL8kSUiD5ZXQXP7M5bij5753yMOjnthvrtteMY7ZWWERSx+pk6B4VlpcrrOTUxs57Il4wWkC/5rmdwMqJAFFqgQod83ZVDg6i01TtLUyeR+HbHekJQuhiEtddr2KkWBlxZNkhz4GNZFRbc7mxrZMALBo2669rtCoggDLsUdXJjcXVIKO03aVFy5CTvZ82ZQtK761ZXNALbGHgqvpHVkHxeoqnA9dsc4G5jIfH6KgQhlNrexXHBTXTpgo1oM+Ewc9FIZjangcoViFSDAr47rPZPWOXsIqpORPftUFXuxj85OV/Q83YP8mFB5kL1PFm/RIes4NckJz8tjT5a5Yni5e9RfndUKDWdmXtBMGXpgp71g4HDow6lZ8t5MoVMzZKM/uWKesFUGnfKwfGMV6V6/6S+ShHRVcfDkr9IpBz9M6og2QVE6/mWIr3auQhsWS+tkKBJN0ctl38Ru48JgpBtEbrIeU3RaibKUTctwFE1I0+WjVm6TfEDT39yhvyvH0gNCDP7yKuE2lyl7hmqVSVvZq6F5ByXgkw6Zgsr3Bi/MOYDZtgdPZw0joY6+BM/RKzOm6DO7Hm2GLm1i6382KtZTjTNsWesBGw8/K0c/UCpTMCHGwF45gvhwioIDQL4N9GHj7dxceHDVkWuOA1/LcbUEmBrfxTcqJDEx3CgXmXbsN4AlG2CtBZlhjl3D9Jrm4/7Kft8x2pqlRV48Ize9ksAeEsXahBfoH/UCi1zNzjJAIH77/eB6HocE5it6bK3I6lMfiRG3beM3dPw6dOuS0OaKUvGhFBUPGXzhSl6c8Pu4kXG58smKmITi3MhOFO+IPHCHaPD0vh/LLZKm9Gj01Y/mCME9gGUt8z2zX0UXMCdcNZ3lE0pj7rv/FrBo4Mo/aPrT12TS3238jF1+LjguiZZsMo8SUgEwnRSUQx6m0h5N35h3U5jH0Kp4Dewsf6KwRTKqi1DzblBv+S7KHt+J5NJP9Um0PkKyBxUYFcxx9gMT2j7akYO7rUixkZMBFRm+K3xDORfI8smJaPbuIZadNbRZHwubIBJ/HXqhpcMr7XmWuynrm/rYrLX+V9VezFK3psA+CkRqXT5/wV/9J5SB09jQlVXo+0yBUPp6zDHztgO0JI6CWC/7CLBWMJIAUWlJ08usahiTIYyTe9e3CN2DboYrN+Z8+EURourFjFirXNMuMp/KzFV+nBiSN73asraccct8rCHpdE3z9Go76EVSwx51JOSvAR4s1qFCel8+X9nL1df/4en3pyYJovnamCis5yWqvjioZx2UJ4tuz677bbjv1l9c3Che2T9A5jC5zBMXXVR2KGKHbFdc3zmBOdxq9njYcd/xz+GAcoMY2bXaM1m8KR5+3zFYjsYox8NF480WIqe01oMmQjOaLDWuC9ckTLTfCfkKjHeza/Z6GbqhhVJte3gKYdPu6/uz5a4DDaq2qoiSWd+rR1NSRGdlEn44M8e+I/H+jOlJXqMLSMerR9rOllPShQWKcLMMJh3TL46jAElcZDZS8o28P9uPTn/Kr8ixSAjuY/hJ1bc2IPWd37xC6P7lHIPdC8U7LnXyIz4OBVBegPNhMWkbP7FKy6LsveONQ0c7FLKHDby9bT1q0b2c06exPcyQS/XoW091Q22vjwTVU2kRSYbkwLJcGgrh9tVDdXFgb64CEKlSDrsYUHpzDi7+PWpZSI5pcs5y9rKO3iAGhBtRo5mr30oTDUNaKcXPJbG4mXyyFPkROBmgHuNE2blsfvBIYIdBweRoBbtJUeEsDXAys8mSW9eAO9eZhwKRA3l6iLnQXKBeWCZeQ91JKYN3OU5ZaUQUnedk1pd0lSGVcZq8VKYdznHcxdrCJhhh4L+V9cLGyxP3GQc7SGoebp56zq+sDRTl7e//sDpNOstlnSM/R+yZF6id57at73wmDlOunprrFwr31+m8M1C7YGLJkzj3WJJIVMnrFDBqEhxkmvVpOY8jCKlO9NpZeqm0e17vfYeFT2CIG1VPitNsL3XRfaLozVJOY6wRqR1UKX5xQt8GrQVweP5AW2cJXe5p8B5AOvke8S/0Z6TzhzC8UkgliM/9IkvXxMr9U0m4/Ff4DEvweuNXCR0N15dISaeCqSKLvg3cLDav1paWt9bI69Rhr3HATZ6Muy80vPmqtrfPxComJnQXmy6pD0P6mOxPecfUTnBp/DW1jQjXXIn3cG96Hh1569RRyXC2IBgngM58cy3H1+/P6pgk4TCoERxhRbxtj0fkn2HdkAMXCpVU9oaiAdaWtDNUGpaol/lvBhjfL9YTXMBxa3OzXCtXQJsOM8exGmBBZ1VbfrWIWwvODHOU0rUe7/zhwM9gEkcQVJnGc+n+U0SAR3iGP8WXPvGPWkniPc+5uQ0xJOS/2hS8I0qKmPmGV/QhaLfpzM5iDdy0LDUdrOHH+3Dy8zZuis3Y1b9XfFCtqQ+mA9r25IuyVTvoQhosUAl7LTZZqF6dryGnSerSR6a8b2inbMcLgeks39ywQyu2PXWLJDOsKM3x8Ej8lVtQvxVqObW1XSw9wjTh+m90K/ehrjz1dX5e4f3RHpWJ8liuEPqbJk4FWla9lZ+oTZMerRcwmrEfMJKiKHp7XcSdCOevvp7ZxKtnueHlIEkubjM8tLYuXxFPF8jr8sGFX2jmQSIP7Mho82IxX7+sgQDDqLle1xrFbdtsiOampV9+cPKBZaKHUINXVUYIyPoNtPkEgvdqvfe/MIBM9YFgxfftk5fH+AuvPBOvKWYLtWf9KjTfmADKkn8GgOyz9x1xh2JgAm5E7Cy2B3oiDw4wP1uvrMdfhAmdmBNI2bH5bzEyO686VB3N/ZdBl5cZRwH40sm0pRiiVq/9FSQwUz740W7HgNq9C6jgZQZPW7EAnPLlAqBaIiEdi0NvuGazpXt9FgnfQ5LKyb2o6VlVObPtmZQgzA/yWB4uBl5fw8/HQDveOtzN7QhzfBYIEo6w82lgD2JqkPF72noR5NIsi9s3+c5OXKlZ5ZoXTPcYb3YKrEmcVBsG9TcsloJmLA7XjzRykNmjbuFesPcrTqVcjBQqs8wBuZFxsbJYZBxf03k/j4HTba1TyxhVet2ikxzdhwAPcQbyKFRyzYlTQ7RA7U2/XAf2wBwuT7DrWQbNsV0YCy9s46z66oz2NLZn+kesWZ9fISKuczxDJdKWdmhXqpWK1rFw6+8DbmAHDXq0bYfvwwsZHql2E+yh9oiRI3JkgpcYy584taHaAFTQbOJOy4dv5423+dV6Rn8BAH0Nd0qRi1i++Xf/y2QtOuPn285ex4awlsrVRUc926apnz3MsbvNAIz3jGT7ZHYXpk2Mz8dUbCMg03zCsOn8lRMGlAHXmuOlFIGIhTBIY5K2rG4pfysfcBPLSamLewMLVnt66A7wRilFvxXkInm5jd43gi8AZZpzLxhR2JCD0C8uH4GQ+SjyYpsfkVCHQ8la9jQELEDSADsUy55S5pW+YqY+hxVCJHUQpQZJIuwIx8955iXwsVZ9dcfKm0GRpwV7P3Qkue8Kk+YsL+1qOZECpJazaBcqaGlOdyvmdd94lhz7Ih4ZshyR22n1TCedVSgkbg6YPCPUD7lrg16mDavzEfJtxuZoTG+5XZIvyYk9c7qveSlVdyX+wnvdkQioOx50pwc3Q8tShQ7iuR4GLb6gBbzrEQbytTvK5rKt80i/Rt4B4sANgAFZCz4jPergSdSwaSreogydoAJikPozRbpWTbIxKl63NPPvgq9U7MPlr1ZeRCWexj5ClYV3Vms0dEYzM/J4g3ohXhNsBahSWAKv3Ctw6bl2F4kQrCB6q8Xjbg5b88lVHQhfurOixIGHD67j6D/cIjdwIVrTZtgmx8FgT0lOilNteaxDHu0lM+pwUwVGP776ESibvq40WQM+mfXYE696TXQ5Bx5wZ/yDXp2BrY3t6L3qRjAUVz5vFLjV4lDofnExNgPRPmFVtu0EFLXIiBFPHNsPJu5Yehyyl2HRXhQADSQZODf+q4hU/mvXp0GBnnUbUm8ukQyymDnSYcKzHTxYnwWtogc23/TGoYsjrxkB4oSqQYGWnFbjbIwyHicx5KBjFRbRd2hBEqWyw5armqGesaUm8Vu1xiox32LFHp+9/3qL7XotkUMaRKzmj8nFGvaFRg+N1ZXRVQxFrNMsoTSiKBdRrhrhA5dQSfKEK1vOV7Y79r7zUKzYYfidhcqEPSuMlJkbw+XUhwe9zzTxzMDsN1dgiKIvsNwNwGrLIwzxaGbm5IgiiQGC1sxpRjFYF2pUnFafXwQ8q0i68WJcHZjRr8VdbxoddRfy+1xfxvwtN5ow/K1VPj2jLkslffzVnAn7V/KCaH5BHCu1Pb3dr+j4+DFwTfsmUBQhHHIrNrBrWdesIaWaBGfvRAfF1mnppatfS7D/MUSCJCUnNSn3zT4pRLdHFq1MwxbgRpebo8doaUxYsqqkxGNw7UJz+IKFZkW6HUtXvdEdTWXMc4Ef8vd+WvsC4CJL6Pdp35s7zmS/iuqk3wYcn4EtM4oTvYP0AoBqEbJVq2OXyUMo7nYwfWeyJi8ndiwwgdsZrvjVAfBHXucz27QHfTtt5RUOtk3d662rXrRPBdgGiZgHAFYSr/5h0A+7TyGng844n9W3jZAgC/Scoz9t8VzlmppeJ0jcxnHM1TzC9r9cARR4WGl7oqGDEUJjGLclSKYUFAa14CsNUtbIiTfSlUXThkyXpqBALQgD8OfAami6Mt1qkx0u84Fm5cRYRtOgBI5xb3b7xuk/b7i+r8bN/RzdcR1E3ny8jsR+P7pGez8oXOGAL4lUzDhdhdg1v14ggTMU5C8eReeme9Bk/nZ98ajchjd8jO3rPXuQYl2KLCaTtjOVtCHOWWW9ZBkByZuycFmET8YEp9LjNjbBvWi+DMQBkcqGOIghWDYyMJmfzp0wpGAqOK958FPJWebXHRYE+NZo3tDG+tqC82uotMoX7/xAB8XFCvp6UnSKvQee/pO7GICUrMezo5iNBzeA29OaML6a3Ax+td8aQi5naX55erZnT64eKCwhoZpKOSAYmjD8TqNVy4htEppuUgKNltFWbYh0Ek5kREsdKw/KbwrjtrwRRA6yLUup/moHnUzRfLiD+3m83gmMTuGNgNmsXuEeaylqKf40ZZZv0KFAs4f1cOS2F5W75zDUyNCFa7YfQIUcKmNxdcf12VrtZ9m/n8Q60ev3/yx9ACKNiCSrC8FGen9w+0Mg9Tf1j3fYdjY49BMdJaj9to2lsY2QtY4gQonUAtgndbcYRHAIwqa5B1/2umYY2odp/snDTJe2fChBt6rWmZIIIdE4mYP9B6sFqPJOmwkoiXIfWdLyb6QbBd6/yaFJwPb5PAHDUhjC8aCUN2AD+3x/Md8gh8RwJnlfL3HqDRhQtr0091oEnY6l5YMMRE9EKZRcBHqzVIFTiZ9PNJ8B39WHjgCldz2wt1rqJTeWo5v1poyQqbq36+T+1jlDH8ZD4XUeKjuGTrlCxph8lSTgcrVJjsTOScBdJncw83nzQbk1xoQnhgWHMyEJBA+wohFU5sOG0LxsdV4ULtB0MNIAG7oE/DV378iRR/lVsYBcVTP/ADYlZGwhkhIGwvlxV3+Jewpk2awn5bKfk6YfvYH6j10vextd74G9Jr/F1ctDzZCWY1tZXxYBb2VDvnMqYbObaEPbloEIl7hwLjYxQYBhw+L9gpJh523UewnHB3nix/x5rMIKw9rFOqQBCR7cw0H8xX8a1F8WOcYWAZOtPpswEwm7GSCOAARRr0bMca0F0G63+8zRujeqVst/1WUGEz9L5h21Xa4MlEmefdhqbj4v3Zm2g2+J3TGPxoP3CMK3knkr8qMIQMoGkg2/xDh24R/bj4qkx8h6DH1+HgwrZXeQOpSYQ2o0UTAePuaYtx4w5vui/rSYpPkNyLnh8BEGn2ikk+/EAB48RVQZ7bqWI+dbb5RKu6znLxvXc0uZkLwVCbzU8IH1U28wQ3T0Ps1PWNa8LuCuP45DQN1bf80Ul/EiRY7N+UAodn2Z8QD7kfodLos9SJZwKhzI/GCZv9nBfozJa7OHt0GfHUsYWITdB5DwzOo4kkdWdnSnIqmfyjkauOiWTEL6VClVES4iX3Xf0fDNnbkuQ593MxtDngR31k0OOqTJlk9TyLAsxceAMX59WyRiSgICObLldDMu+D7INY5+5DRDwOG+v12RQETxqmenE/cPXCV7HD+IAhtdGR29Thke7EC2bJRU8on2o359JQsTXlqp+1TEEZ/vPtjyWmhQSMdiXRoPoJkHRXnjAI5F/0yD5fNZpmlUevQwEUNBPLVgwWhtvm/JPJszksSfVq/C+XUy57A3F3VpibCkc98BzN1Oi3lpQl8RZn0qrKwkr1AkYvFzsO9fjSxLV3kYLFur8J1sSvzBA5V470oYlEEiLqYbJKJAIOycUOPtKIwt6ogQqEExMPSmnVMc3+u6rVxTTh6TGn1gAIPp88AmAkSXP8urcrF3PbKjDdCjRFBmXuDwt4ZfvDNkgmNj5LcL79/ZhPzx4URWshUKH/UT937ilelFzq4XMcLYje8Hqi/xJ/xledJDiPfJ3hk7tqVEESRasi6N5C1O4YAN+xqhLekveP3MPKy0ug7sqj/UlcAQSK+/DiGudqDrL+qFyr7Jzsc2Kw8rgqZXQUQXK1/l56WYiZMLTWrHut4K7YB8KeMBuD6UBhNzmGnbNuTM1rJDqecu3tCSWzNG+NWQ6i2owdd6B3+S/ShBi/KkGzJCh/Ytwn4LnGgQjmLEWDyf+879E6Qzg9sqOF0uoGCASM0eigSbztQyM/uio/0ny/Ck8X7VIyeQ7ETPRGM3izZ0wwsR16r8QTE5C2PKyQX5x/x2St4vipeteiJRl/K+BcgZy2XXLyFD+WeCZEVT9D782NVGfEIwWB5Uc+1KEEXh5OlGly/ZDkwYtEXGWNwJ09k3fg+P2aR0yykpX6I+5w6MdGa5N3HlKjRQMRE4zlI945VShsGbwIEldHFIg9x/84xiecn3gXP8nxeF4ZKNV+m5cShIquftbko6luF6IEVzAOzcyieLdGVC6dYpFr7rmPjoJKNKsdpqrMxNfNDMYz9F6/JO8VfgPuge9G6Rna+gZNkbur9AFgU42ezZ38Csgwyl0Jd9Vt/tutTXO5tkuYAZj1bfizcW4+gF5rt6kvf8Hnefd0i59wN68FkF0645Umy/8MdodphBazCowsjHHCb8jRPmOMKiv4NTtyKu4zrkajv6rr7sSqNuZMsCdKfnRof3m3EUrLu2UzKpt3mE6gu2qdYbYhSjKRKIF9XXRMBtu+NVTD2Tymlm9I5NmPZay5q5hnUbRHy7Y+wQFby3M3BDrFReL6weUYSPEXrqWSzBbVi86gwxQI8e0lFJsbZi76UD/0xNu7zN5S6qYD92FKzkgUrCSjCuGmBqq8eMvuQx3bfkbaGwwm2ynWg3EwmcMqN2XgOzFW/bd0AqNh6I25EbsBTfKwb1S6XpH2hiMnWsvoqsluNY10zlWuu+HsF7g85rd55C7Ra4SkcgO6yiJbwQQ7Rs3R2ZoecIthEByGpCz3DroZ437IUteopFiy0zJiP+ozQm7LAF4BOOq6hwyMdmte9XG+w9V3z5s6O3Ew03woqulBExunx6DEfgt4bXqlfphi99yYA8YLJsN1bwXG7yovjd6wHlPeRXpvRsj01+OtF7M5GrVsGi0WJynLhiTrBeZzb2kEFTB0CFGCPjEmcmh5BpW1xhSx8tyfQjJu+veeM4nkjKSH6o2FCQB/96a8f4g2Uf6y2/QqGuPQgQWosIvOkqw6GjPh1OyRMxcPjRcZZLdD/iYUKGJ9CRLuChMmg8G2xzsG0BZEarQn7yg3wVb3c733L3pzrvHWU+11fXrOQQwEd/xjgG58IcMlL3q/rQfW4CgdvgoUUHNFPEMV2GEgarZ7ftC5bbd9VQDlmDLPHWfNm8FDEXvkrS8eMxfd2UwGRkyfKu2E59WyKI9r0UxNr13rRI/I3gegvwcx1oS8qnQeMo62nXEpeeecQ2B+fAetp6gh30ecMoORpZC1qRz2KSD8Z4H/75Ks/uE50xdMDDNMzQ4pwErCHEXSukEPk70Q3mgyAe491JB7l77zsDD1yxyNXWCws4xyg6CRnF+MuL5z4ng1IRLoE+VHvQfrUcH8SLfl8I0nSQAg0HfBtiQwVnTZuDG4cbZvySpF/uGmpOhCDRx4WK7dOc3WvUTagSlRPha0lMbNPkYkzYbhgZu7w6sN9CLe8syRgMxiUUOiGsHuZ0y8XqI2N+lvKGMgkIlHpIofB1qhzku1QzFeL1NCO32WA1qrgx9lsvW2d3MZDIZYEmuFuesXyE9xa+U3LUA/lzVo3ofG/UnMYmR3m4ytEr9ZXsCm1hX/BU2U9kafNlbSrJhNTPw5xrOKFcxzGvLLdpmerD9m5/MijOv6RflvchLhWc/MokaQRWWYtNjbCSoqhUwavx8AjYRADu5UB4EsA7f2YuAeYTTAxlp2CjqdGwfxFAT+evFO51yvefG+VuRvE3vniGCHalF0rl5qdHC6l6tBwzy77ervlpWXcBYW8QEWbEuZJmwthDDjDzuj8VGtNGBZgbl6dkLjjeBRJ3IyR/POcxnChseugyw9/O5/maRNDEhWuBSUC92afprRwJQqk9A0XC9uKCAlOcMKi1wytmod9plqtpMOp9nmTEbh6VV65qAUCEOBWQh33QTzPQCDtp9QsBPQyelEDaGXBSCCCoyTqxR08kRMQZgj2YRJ1KYy4Edp49KcHH3qVIzbWoZqAOHiXCBnk/zDQyFcjM+nPCvsqnKPoQgZiQEPYzmtCifgz77CyTHMPph//JhuLcb4ELan8Y314ZtywWtOIlGDcNiVuDzZRpMk4YGDWgD1TSXinP2XKHvE8Sk2sj1gmpRyDUTnw0p0DQYFwm8/3r81X05bzB/sN9swaUL8HeVKB5s6snnylaTi6y973Z5YCzUy9dvW2Kyo8j3Lg7kCzjG1wd9bndHKgNvRmEFHdoWoJpijxYOZhnEKF/cX8OjU3qECgpFPdpbB50+RS8Ed/kHQAOF0952AycgBhWoIPJIUoQwqSgAPrV3EfnHXtqEEUuGKdhEYTZdKaK8zOlAwJwKpufECNbwEL1dLXxTrPs66sT6s9heoOHGWad5xnVwJn/FnCfy2thKDcYyY2vFFuSL2XHsUaceij00x6hQBBklZ/f6jgelvNCUxPMjPrYXekss/TOVSEDkQxgk98EBWuGnJl/Wpr43i7VXbxs5R71jGCO/Gr6Zjlh3sBJWtU+KD6Usx9Hj5ZA89OMp8FeSJlWJ5JfCJnzWnMrwwRCdJuRzUSUiq60ca+T+zj6U/k1IV/tEeNm9Y0TSWqlWuTrl4lsMTWfP9ak4GQ+iGsyYWiIr1PBOx02LpsGFAK3ctLX+a/ZvPy8TdB8HHsbiULXF1yqcnp6wse8wS4VUzDHnPQynO4AItVd9fB4vcv8NZ4hLPCJTHxXcUozyGkfwgjebLztVp4Fbu0CRlmyCGtaU+lTlWq+Omw3i2/0m2c1ccTwVgvhJWFj+Wfsky5zP3A9fB9Pri/xP8tvKz5Sz6ysGvOxg4DETRCPofl8qh5++rm1PtsjAF9+Nh5M+CtNEtXZGTO26tXRbJav7g1bUqiOB77xdBmNcXhjKmNHtVSs7mUuVExRsClAbNntLo1DyLvMQUgRCxoxxPO7sDGsO3umf6GYavFxMSZvaVMKRW0NkFG4Kh3QrOIXM1kg23QUGufGn94pJ9dkJT2jnn8VjO3NtNQ48HSrZ2y+k6yOqGkEQmx8qNSWVBWqcMfZy9luD+v8NibhVUDJZfUe3hXO16Ee0mL0ZvR9zQN5G8lhaiAMMbppKd+NLcU4T5Ny4Ccl1pJKKpTKFGWBQd/UDpadayQZl0Rh1SC6+lMLGNGiBn+3QpjG4F5vO8KrFM1DJUn4hOu74MxtOAR4P3hkz4T5iut7jbJ7WoeSWlGVgRC9St7gpmBeiE9AS7SvN328+B2RYBtnoj65EL0KVg8H4TLiZWUNixqUzqKRbfZAY7/he5w5ydWrJvYKs92FF9+aYfFsdck7d2TjBLgDPef5L1vTtPsZK/BP723f+Zh5FvgV5ttvXXOb3Re4WU9GUzy1uZXkvJS5CyJ3HTS0iVepFy+WVad0PiXLySL7eR6oLTCupf0tvSQJ20CEqrpGDlskgUcjvb5CiyspqqTe1YRD0gzJd51mn2R5kuNL6l/n/nXuX+f+de5f5/517n/fcwhNVvjL9auHkZnMheP4ST5/KMlyP+zSSmVZ/hvM/P5SaVdPRrxVz149rPn2f0DE397JuYcgA+i/b0HYv2/+tfV/PvcW47BxcV933+duJd7Gv47Z476k+XOs2rZpfS5+K0p8Xrfp512uddjWv5XjWHZ5PNXr39Kx/51I19+lXPHnic/m35/5b9DDdx5St+TD9n41BBWkenWMuntSphg1VMuf8fhPMPrnwiNftvz6cyH4HoLZf4Pp/s2M7/NteR4N/P3s588d3z+7EPS3z1/POOvs1ybPHvH5G0z8OVrldVn99QkI9rfX4fkejtc/h8p/f8EbjPbntc9Gf9F51/39K37bEFBnf+4h9QkkWI75jP8J9/4v/Gw2gftP0F8/JO72/K+3/e3ZV519eVAUsG5x+RbpB4pxef5N4/2N7nleBeRFkafbn+14yN4j19TFQ7zVb0gBkFZxPfx5+Lp9u78e/rTW86Mecemfr2TAZ3PdlrHN6bF7XgAzwzjkb9fWXfdfHIq7uhye3S4v3ie8LV+ncUf+dbivs+x9CXVW9ZbbU/yTinOJp+fYMu5Dlr/NALxvrOJsPP/aKbt4Xf/+KW2+pdVfJ/73lzjwH9L0l9TB4N8+/yR0IIj8DQX/Wepg4P+9yP1XfwoC/ZNY5FmZ23/tjstWjeU4xB37j6P/RRf+4xplHKe/uq/Jt+1r1/f7kHj/Ne5/kLP8qrfgvf1pgj974X84w1x/Pfm38/37zvD83v9w07sb/sdz/7jtt/f9/112/urH9e8v+m/2Afznwi1eyvy/J3fIX3L39tB/V+qWvHuG/pH/Z9/xP1924H/SV/8kTOtZ948iyn/KJV62v0QCxv5bPfPPyubvj1Dqd/fXp/8QQPB/dW/+D4z5vwY48k+jGwKAfx7aIPD5/6p/kP9K/6Ddq7HXKR7+s45C533cfr/7P2yV7//ga4L+cZgfx9e+kK/G715r9I/rgWJ52xWN+1fPD8k6/XlM9+c09+elf576/+BD2KHs6rV6rvbrtp7yrI7/mw//l6X7n2Pp/n2I/w9bOuK/sHL/DKxA4H+pgfv8DyipKp7ezbqPX8X6772sxEneGeNa/2AUzCTj9n+3d0XLicJQ9Gt8JBMSEshjO93O7Ex31unD7mMHkVZnKbiIbf37zQWCkKRVqrRs275YQUC9J/ece+5VC/nO0vMEdpyH0Z+7MsYKM/P4NtwkNpwUQIzn4XoFMo1e3C6fABfn5QXP1FastjRhhiBDd3KzSrJwjh4l7u8B9yjLodv4uFsHlzE0J4vF5h48fuholNNVzUpxEknRzgNB6wc40pWpaPXkWHejVXr3EZDoeh0kur6BRJ+jwCLxfYxU8j45HDl9D731H2igk0ub+tBptpTPpQEFIbiLikCLdCXC6qO0YDdP44h0pEqHV2gm8Uwk9wskC6/lUX1al49XQrmqZmrTh7CIKDEYffifgz5kOO+zdN3iEOgARzBjPN3KfJNWDJFmIKW6JGLfP1IW6V+5a/lCIFPXMw/5zARla/PpcRkYuCQg1KdhDqYQDht9XrtFKyk4a/vocVksav9Iytti+yWX3w9evtuFF0MBE62/wMx/xJL/WICIGAhqDB9JWSZMuvRk4a8WPZEx6JfDA9ocoiJKTAqjxKI6XTwUhzH3gACOncNkMkhLFgOekVSWwhA0/C9vrjez7U2W3sBPB6z1+86VOqCmJLP14VctDqGaHtUtZP4x9jua9Xg4h3HexaRJYS6liFlQyTBiJ2h32F+I2e+gwGEXYRHOwpLGaqq6/gY/O4HPpt9Ne2mE3PUBAEM7gOFeg4K2namaB29i5LAD3GbIRavxBiDK0lTmuXCmnm/vwJDuQjYtDRdzxAMzLAFHQ7UBmM1mfoZeom2ylAslp8+so9aynVVL6mrWbGjI5uemSCqtUK7vWigwbVlPCJ2HcXAbGTlA7sHl3wihcrjNpUfdb+vGt0XAJ/FYzSIZ3jgQc5fyFRR3ebz+m9zESbxahKmqkrlXFsn2/eOsknuA8EkpWuT5HsOsWldYm7Cg1EhUnot8IrAgwncJ564lZ3kUYU8QH3NMsfA8zIfC7wCm3F4HzlL1jLSkYYHmgTCzpLGZcv5gUsBmylWdwIWrdxjhPZvYOoyww6nI4wxkv1ym1r4jNEl+L7atXmN1yu5l4HNl7oANyEjGL84/li7t7ZY4BGnKVNlzbSjy5kHdSmYoNJpWnBH/OJ2f5XnpZ0XgZS2jLhq6cdqNukzagy67uZexjrrs7d60TVObjVVvO7LJw5jmwQityVO1o4wmj3kiX7x8ooG7RSrR9oZWSVF9EGcdyGpmsKwDWV9oU+JGazST4HVo008kgqHAZp0+dQ+p43qOc3VVkObq2hSQSYAjFUWelhoslpqwzLkIOhALeYe49B+yBiu/jAK+Luw6DqPCWUZZWpdeUObL2qu7/d1LrpcXX48hdhSQDgSJqYU4RsK3ZL6hUMhNS1cJ5PnywSrDQRo7tcoFHV7br6YO3zsyuDOInKhCKpxvmUpYh4n1lAzs5l/y4nM1KV/O2E/qpulpJhn7P62/m7ovC46hrBOSPZeG9gi8u6MuPsbe0D3Rsgy6Di3FqDFFSmPEpAlsGR2gpJlMO/2nTA5oB36JyvcuYZS3f6yo1E90MlEp7+YZpK3dw+WyX/zI5jE84h8=</diagram></mxfile>
|