Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2006.11197/record.json +32 -0
- 2010.09345/main_diagram/main_diagram.drawio +1 -0
- 2010.09345/main_diagram/main_diagram.pdf +0 -0
- 2010.09345/paper_text/intro_method.md +80 -0
- 2104.05670/main_diagram/main_diagram.drawio +0 -0
- 2104.05670/paper_text/intro_method.md +19 -0
- 2104.07555/record.json +32 -0
- 2106.00660/record.json +32 -0
- 2106.02796/main_diagram/main_diagram.drawio +1 -0
- 2106.02796/main_diagram/main_diagram.pdf +0 -0
- 2106.02796/paper_text/intro_method.md +217 -0
- 2106.06499/main_diagram/main_diagram.drawio +0 -0
- 2106.06499/paper_text/intro_method.md +147 -0
- 2108.00230/record.json +32 -0
- 2201.01819/record.json +32 -0
- 2203.06074/record.json +32 -0
- 2203.08195/record.json +32 -0
- 2203.08734/main_diagram/main_diagram.drawio +1 -0
- 2203.08734/main_diagram/main_diagram.pdf +0 -0
- 2203.08734/paper_text/intro_method.md +66 -0
- 2204.06260/main_diagram/main_diagram.drawio +1 -0
- 2204.06260/main_diagram/main_diagram.pdf +0 -0
- 2204.06260/paper_text/intro_method.md +76 -0
- 2205.00320/main_diagram/main_diagram.drawio +1 -0
- 2205.00320/main_diagram/main_diagram.pdf +0 -0
- 2205.00320/paper_text/intro_method.md +9 -0
- 2205.05861/record.json +32 -0
- 2205.07177/main_diagram/main_diagram.drawio +1 -0
- 2205.07177/main_diagram/main_diagram.pdf +0 -0
- 2205.07177/paper_text/intro_method.md +51 -0
- 2205.11438/record.json +32 -0
- 2205.15376/record.json +32 -0
- 2207.10040/main_diagram/main_diagram.drawio +0 -0
- 2207.10040/paper_text/intro_method.md +157 -0
- 2207.10553/main_diagram/main_diagram.drawio +1 -0
- 2207.10553/main_diagram/main_diagram.pdf +0 -0
- 2207.10553/paper_text/intro_method.md +68 -0
- 2209.12561/record.json +32 -0
- 2210.06170/main_diagram/main_diagram.drawio +1 -0
- 2210.06170/main_diagram/main_diagram.pdf +0 -0
- 2210.06170/paper_text/intro_method.md +208 -0
- 2210.08410/main_diagram/main_diagram.drawio +1 -0
- 2210.08410/main_diagram/main_diagram.pdf +0 -0
- 2210.08410/paper_text/intro_method.md +161 -0
- 2211.03295/record.json +32 -0
- 2211.09394/record.json +32 -0
- 2301.05434/main_diagram/main_diagram.drawio +0 -0
- 2301.05434/paper_text/intro_method.md +119 -0
- 2303.16268/main_diagram/main_diagram.drawio +1 -0
- 2303.16268/main_diagram/main_diagram.pdf +0 -0
2006.11197/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2006.11197",
|
| 3 |
+
"month": "2020_06",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "ICLR",
|
| 6 |
+
"title": "Abstract Diagrammatic Reasoning with Multiplex Graph Networks",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2006.11197",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.11197",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/tex_files_extracted/2006.11197",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.11197/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.11197/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.11197/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.11197/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2010.09345/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-06-02T21:45:42.672Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36" version="14.7.3" etag="xOTyONpDbIc1YYKFbL-y" type="google"><diagram id="Sg5OcvY4vEHv1vMEeHYW">7V1tc6M2EP41nsl9uBvQC+CPiZNrO3PXydxdp+2nDgHZpoctF5PE7q+vAAkss5xJYxCxfblJwiKEWO2zq109OCM8WWx+SvzV/DMPWTxCVrgZ4dsRQjamVPzIJNtC4rpeIZglUSgbVYKv0b9MCi0pfYxCttYappzHabTShQFfLlmQajI/Sfiz3mzKY/2uK3/GaoKvgR/Xpb9HYTovpB61KvnPLJrN1Z1tS55Z+Kqx7GI990P+XIjyNvhuhCcJ52nx22IzYXGmPKWXoqOPDWfLgSVsmba5ABUXPPnxo3w2Oa50qx6WheLZ5SFP0jmf8aUf31XSm4Q/LkOW9WiJo6rNJ85XQmgL4d8sTbdyIv3HlAvRPF3E8qwYbLL9I7v+A1WHf8ru8oPbjXa0lUfFWLMB6krlj0kgRViah5/MmNTIuK4ku1S9sFnGF0zcRTRJWOyn0ZPeuy+NZ1a2q/QrfpEqhtWNAXU7sRjEzZSLsezq3fnnkasT79e55q5FA9tbbaqTmfH6gX7Bt2ghsIGsX9mz+P6FL/zl7gXOLPs54cun9+KBrCvxbZL16xQ/sTx8pwYmHqkYW3FhzTz0yX+eRyn7uirG9CzQr0/0NIrjCY95kl+Lp17AgkDI12nCv7OdMw8eJdQqzyiUoXIETyxJ2abR6hsmVF1AJB6l/7E9efxcodkeS9l8B8kSTa+yATIIyLWADq1DB5uDDj0EnZcC4eVYUwgg2X86yZy5iAkj92Yzcm+luAvUlEHCgqDi5P8AcNHsS8hniR9GYr524ZV/dQWvMk4reFl1eFEAXfQI6HIGga4OA5pbR6VtEJbuoCIayiOaCmXojEMacnoMad4gQNcCPOM6eFxz2BkfP6SpVEnJbCh+FfdYr7KLW+CTQlHwC/v02w6eis70G3QJM2aHlLkQzMaOi30HhFm02E3MRLCWorINORIWnT0sIgCLUAA8BhZV7D3dCGirlH83BDrmYKzGc8HxieMYkz5xPIzSTBs8AjUWu72yj4/Hg1WWAePxs7+55zx7nCu5fEXvDOJToNMLCYRPDz1gZ2D4BPLM7vAJ1XFemPnQo2Q+2fJbqERYitfWJITOU33e/TiaLcXvgdA9E1N8k81MFPjxtTyxiMKwcClMPIH/kHeVGdSKR8s0VyS9GdHbrC/hRSRi7OPMNKKONtO20v7OTDtdTTRUeRqmI3YAR+wZdMRQMcZIceBj5kivHORKn2ojr59qgPjneZD7xA4e47CvagDus8BtQzWhYQLGqwNGacAIYKBCyltZuVwyifYrFdJrRQCqMw0Sj+ppNTwazCQQVEsxGMDsbIWXxy/rrMIX6bOYjToo5xyyEqGcxp3G6dXm3cs2F9/a6r6cSjnd1K5Pd2ez3abMsgyvM9ZQpsDYX6+jQNduHQOFA7tniTAMofAMcNFyJk5mYUh0J51kEZT8JJXHdr4lmztE4NoqFtWcaAtdt9x7be1D5R3uM/vYSdQw1Reejqd3IZ+uuKqaplpHeD+33+9Iqni/IzFR/nanmbTfxgE33acyn6LHyphKnbazr+ZiVOvl1g9oCNO/NCLCLyP3FnIX+mLsjbsLNCb6lCk64W508LryF8Og77T3GlbN4zQ6p/rmdMc7ORDFiJpc7x0kGfXPz1MsBoecEZuBeLpXtt0+F4DD4BD1CPE2UAV4RyaZtIeJRwMulVw/zdQmj4QzvmzyNLgCaumuQPiGuivoqnQyEGbTwFwBwKKiBimIh3lU/VZpqCsJiIiedJVmP0gjD0BmV0EaD4PmNCxkYoAZhQ0iEw+slCZy5ft1dCWSZvemljcXqbNArJV3b2WN59HJF9/IHlkf2irEuCsUt6m+nR2KAT4VJgZRPJi31or4mmXpZ7AJsh9eicpLewmvzeymH6c+U38Rxdti1sUpf7HKtYFxlg6kcpKX+SQncpL1Nrn/zU2kvV+fX2nevPDcpT8/5VoocfR6OwG8d2kQRzeSYTCjTqMWigHylqpHGvH6zeSty8bF//XoOlih9yI727jAw2BlDWypBRDAiEHGJO6AAPbKhAkOq6edE7k1oNbpxsjqCqjDoGsNC6iKEKQBFZkDKoHKT0Zyom9JtVeIisTI03YKUU9ZUhDAO4XdZknuHncIAW8GdPdRHlB96+yhigCoGixfqPG8yZ3CC6m6GfreXuUSUQD6Xe0MEqgodvbQJwD0Db6ZTZqrWAajtOJwn3OUHvdYyyRnV6ZqA1Wg3EQM8nnIwXcFL1H6JKI0VuuxXqL0pegFQB8qehl865EcLHqZ2V8cn/Y7wvsx2XYBYHYWk5s5WwPcXwzPdn/Rdffe5xnXjaSz/UUKFdnO3Xur9yO0fUKDH2JHoRrYW1m4fWL+9+1l9fYC9rXVI/uaQpW7k8Z/E7/gvfUBlYIGjkF+VN5p1Jp4QCG6mcGXsOhButnFoZyOQ8GoT4dysB7YqWk9tLEr5PyANFNbbD681LDe2gLUHusLUOBd364YM7SDj5zv01pmI0Vln49yOnsQJcFIEdvPz5TI3qv+gCmhroju1Gx589W2FJ6fuXikL88jDqu/+FN8xkT1d5Pw3X8=</diagram></mxfile>
|
2010.09345/main_diagram/main_diagram.pdf
ADDED
|
Binary file (21.1 kB). View file
|
|
|
2010.09345/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Interpretability in machine learning systems [17, 39, 47] has recently attracted a large amount of attention. This is due to the increasing adoption of these tools in every area of automated decisionmaking, including critical domains such as law [26], healthcare [54] or defence. Besides robustness, fairness and safety, it is considered as an essential component to ensure trustworthiness in predictive models that exhibit a growing complexity. Explainability and interpretability are often used as synonyms in the literature, referring to the ability to provide human-understandable insights on the decision process. Throughout this paper, we opt for interpretability as in [16] and leave the term explainability for the ability to provide logical explanations or causal reasoning, both requiring more sophisticated frameworks [18, 20, 49]. To address the long-standing challenge of interpreting models such as deep neural networks [48, 10, 9], two main approaches have been developed in literature: *post-hoc* approaches and "*by design* methods".
|
| 4 |
+
|
| 5 |
+
Post-hoc approaches [7, 45, 40, 50] generally analyze a pre-trained system locally and attempt to interpret its decisions. "Interpretable by design" [3, 1] methods aim at integrating the interpretability objective into the learning process. They generally modify the structure of predictor function itself or add to the loss function regularizing penalties to enforce interpretability. Both approaches offer different types of advantages and drawbacks. Post-hoc approaches guarantee not affecting the performance of the pre-trained system but are however criticized for computational costs, robustness and faithfulness of interpretations [60, 29, 5]. Interpretable systems by-design on the other hand, although preferred for interpretability, face the challenge of not losing out on performance.
|
| 6 |
+
|
| 7 |
+
Here, we adopt another angle to learning interpretable models. As a starting point, we consider that prediction (computing yˆ the model's output for a given input) and interpretation (giving a human-understandable description of properties of the input that lead to yˆ) are two distinct but strongly related tasks. On one hand, they do not involve the same criteria for the assessment of their quality and might not be implemented using the same hypothesis space. On the other hand, we wish that an interpretable model relies on the components of a predictive model to remain faithful to it. These remarks yield to a novel generic task in machine learning called Supervised Learning with Interpretation (SLI). SLI is the problem of jointly learning a pair of dedicated models, a predictive model and an interpreter model, to provide both interpretability and prediction accuracy. In this work, we present FLINT (Framework to Learn With INTerpretation) as a solution to SLI when the model to interpret is a deep neural network classifier. The interpreter in FLINT implements the idea that a prediction to be understandable by a human should be linearly decomposed in terms of attribute functions that encode high-level concepts as other approaches [4, 19]. However, it enjoys two original key features. First the high-level attribute functions leverage the outputs of chosen hidden layers of the neural network. Second, together with expansion coefficients they are jointly learnt with the neural network to enable local and global interpretations. By local interpretation, we mean a subset of attribute functions whose simultaneous activation leads to the model's prediction, while by global interpretation, we refer to the description of each class in terms of a subset of attribute functions whose activation leads to the class prediction. Learning the pair of models involves the minimization of dedicated losses and penalty terms. In particular, local and global interpretability are enforced by imposing a limited number of attribute functions as well as conciseness and diversity among the activation of these attributes for a given input. Additionally we show that FLINT can be specialized to post-hoc interpretability if a pre-trained deep neural network is available.
|
| 8 |
+
|
| 9 |
+
- We present FLINT devoted to Supervised Learning with Interpretation with an original interpreter network architecture based on some hidden layers of the network. The role of the interpreter is to provide local and global interpretability that we express using a novel notion of relevance of concepts.
|
| 10 |
+
- We propose a novel entropy and sparsity based criterion for promoting conciseness and diversity in the learnt attribute functions and develop a simple pipeline to visualize the encoded concepts based on previously proposed tools.
|
| 11 |
+
- We present extensive experiments on 4 image classification datasets, MNIST, FashionM-NIST, CIFAR10, QuickDraw, with a comparison with state-of-the-art approaches and a subjective evaluation study.
|
| 12 |
+
- Eventually, a specialization of FLINT to post-hoc interpretability is presented while corresponding numerical results are deferred to supplements.
|
| 13 |
+
|
| 14 |
+
# Method
|
| 15 |
+
|
| 16 |
+
Predictor Fig. 5 and 6 depict the architectures used for experiments with predictor architecture based on LeNet [35] (on MNIST, Fashion-MNIST) and ResNet18 (on CIFAR10, QuickDraw) [22] respectively.
|
| 17 |
+
|
| 18 |
+
Interpreter The architecture of interpreter g = h ◦ Φ and decoder d for MNIST, FashionMNIST are shown in Fig. 5. Corresponding architectures for QuickDraw are in Fig. 6. For CIFAR-10, the interpreter architecture is almost exactly the same as QuickDraw, with only difference being output layer for Φ(x), which contains 36 attributes instead of 24. The decoder d also contains corresponding changes to input and output FC layers, with 36 dimensional input in first FC layer and 3072 dimensional output in last FC layer.
|
| 19 |
+
|
| 20 |
+
The choice of selection of intermediate layers is an interesting part of designing the interpreter. In case of LeNet, we select the output of final convolutional layer. For ResNet, while we tend to select the intermediate layers from the latter convolutional layers, we do not select the last convolutional block (CBlock 8) output. This is mainly because empirically, when selecting the output of CBlock 8, the attributes were trivially learnt, with only one attribute activating for any sample and attributes exclusively activating for a single class. The hyperparameters are much harder to tune to avoid this scenario. Thus we selected two outputs from CBlock 6, CBlock 7 as intermediate layers. The layers in the interpreter itself were chosen fairly straightforwardly with 1-2 conv layers followed by a pooling and fully-connected layer.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
Figure 6: Architecture of networks for experiments on QuickDraw with network based on ResNet [22]. Conv (a, b, c, d) and TrConv (a, b, c, d) denote a convolutional, transposed convolutional layer respectively with number of input maps a, number of output maps b, kernel size c × c and stride size d. FC(a, b) denotes a fully-connected layer with number of input neurons a and output neurons b. AvgPool(a, a) denotes the output shape a × a for each input map. Notation for CBlock is explained in the figure.
|
| 25 |
+
|
| 26 |
+
QuickDraw. We created a subset of QuickDraw from the original dataset [21], by selecting 10000 random images from each of 10 classes: 'Ant', 'Apple', 'Banana', 'Carrot', 'Cat', 'Cow', 'Dog', 'Frog', 'Grapes', 'Lion'. We randomly divide each class into 8000 training and 2000 test images.
|
| 27 |
+
|
| 28 |
+
Input pre-processing. For MNIST, FashionMNIST and QuickDraw, we use the default images with pixel values in range [0, 1]. No data augmentation is performed. For CIFAR-10 we apply the most common mean and standard deviation normalization. The training data is generated by randomly cropping a 32 × 32 × 3 image after padding the original images by zeros (size of padding is 2).
|
| 29 |
+
|
| 30 |
+
| Variable | MNIST | FashionM | CIFAR10 | QuickDraw |
|
| 31 |
+
|--------------------------------------------|-------|----------|---------|-----------|
|
| 32 |
+
| Nepoch<br>– Number of training epochs | 12 | 12 | 25 | 12 |
|
| 33 |
+
| β – Weight for Lof | 0.5 | 0.5 | 0.6 | 0.1 |
|
| 34 |
+
| γ – Weight for Lif | 0.8 | 0.8 | 2.0 | 5.0 |
|
| 35 |
+
| δ – Weight for Lcd | 0.2 | 0.2 | 0.2 | 0.1 |
|
| 36 |
+
| η – Relative strength of `1-regularization | 0.5 | 0.5 | 1.0 | 3.0 |
|
| 37 |
+
|
| 38 |
+
Table 3: Hyperparameters for FLINT
|
| 39 |
+
|
| 40 |
+
Tab. 3 reports the setting of our hyperparameters for different datasets. We briefly discuss here our method to tune the different weights.
|
| 41 |
+
|
| 42 |
+
We varied γ between 0.8 to 20 for all datasets, and stopped at a value for which the Lif loss seemed to optimize well (value dropped by at least 50% compared to the start). For MNIST and FashionMNIST,
|
| 43 |
+
|
| 44 |
+
| | η = 1 | η = 2 | η = 3 | η = 5 |
|
| 45 |
+
|--------------|-------|-------|-------|-------|
|
| 46 |
+
| no entropy | 92.7 | 90.4 | 91.2 | 84.2 |
|
| 47 |
+
| with entropy | 91.2 | 90.7 | 90.8 | 82.9 |
|
| 48 |
+
|
| 49 |
+
Table 4: Fidelity (in %) variation for η and entropy losses for QuickDraw. δ = 0.1 is fixed
|
| 50 |
+
|
| 51 |
+
the first value, 0.8 worked well. For the others, γ needed to be increased so that the autoencoder worked well. Too high γ might result in failed optimization due to exploding gradients.
|
| 52 |
+
|
| 53 |
+
The variation of β was based on two indicators: (i) The system achieves high fidelity, for eg. at least 90%, so too small β can't be chosen, (ii) For high β, the attributes become class-exclusive with only one attribute activating for a sample and result in high Lif . Thus, β was varied to get high fidelity and avoiding second scenario. β = 0.5 worked well for MNIST, FashionMNIST. For QuickDraw, we needed to decrease β because of second scenario.
|
| 54 |
+
|
| 55 |
+
The system is fairly robust to choice of δ, η. Too high `<sup>1</sup> regularization results in loss of fidelity (Tab. 4). These values were mostly heuristically chosen, and small changes to them do not cause much difference to training. We kept the effect of entropy low for ResNet because of its very deep architecture and high computational capacity of intermediate layers which can easily sway attributes to be class-exclusive.
|
| 56 |
+
|
| 57 |
+
In our case this optimization problem for an attribute j is:
|
| 58 |
+
|
| 59 |
+
$$\arg\max_{x} \lambda_{\phi} \phi_{j}(x) - \lambda_{tv} TV(x) - \lambda_{bo} Bo(x)$$
|
| 60 |
+
|
| 61 |
+
where TV(x) denotes total variation of x and Bo(x) promotes boundedness of x in a range. We fix parameters for AM+PI for MNIST, FashionMNIST, QuickDraw as λ<sup>φ</sup> = 2, λtv = 6, λbo = 10 and λ<sup>φ</sup> = 2, λtv = 20, λbo = 20 for CIFAR10. For each sample x<sup>0</sup> to be analyzed, we analyze input for this optimization as 0.3x<sup>0</sup> for MNIST, FashionMNIST, QuickDraw and as 0.4x<sup>0</sup> for CIFAR10. For optimization, we use Adam with learning rate 0.05 for 300 iterations, halving learning rate every 50 iterations.
|
| 62 |
+
|
| 63 |
+
The models are trained for 12 epochs on MNIST, FashionMNIST and QuickDrawm and for 25 epochs on CIFAR-10. We use Adam [30] as the optimizer with fixed learning rate 0.0001 and train on a single NVIDIA-Tesla P100 GPU. Implementations are done using PyTorch [44].
|
| 64 |
+
|
| 65 |
+
Number of runs: For the accuracy and fidelity results in the main paper, we have reported mean and standard deviation for 4 runs with different seeds for each system. The conciseness results are computed by averaging conciseness of 3 models for each reported system.
|
| 66 |
+
|
| 67 |
+
Compared to f, Ψ, h and d have fewer parameters. For networks shown in Fig. 5, the LeNet based predictor has around 800,000 trainable parameters, interpreter g contains 70,000 parameters, decoder d contains 3000 parameters. For networks in Fig. 6, ResNet based predictor contains 11 million parameters, interpreter g contains 530,000 parameters, and decoder d contains 4.9 million parameters (almost all of them in the last FC layer). In terms of space, FLINT occupies more storage space according to the decoder, but is still of comparable size to that of only storing predictor.
|
| 68 |
+
|
| 69 |
+
Training time In terms of training time consumption there is lesser difference when f is a very deep network, due to all networks Ψ, h, d being much shallower (lesser number of layers) than f. For eg. on both CIFAR-10, QuickDraw, FLINT consumes just around 10% more time for training compared to training just the predictor (BASE-f). The difference is more pronounced on with shallower f where Ψ, h, d also have comparable number of layers to f. Training BASE-f on MNIST consumes 50% less time compared to FLINT.
|
| 70 |
+
|
| 71 |
+
We compare the average training times (for four runs) for SENN and FLINT in Tab. 5. Each model is trained for the same number of epochs, on the same computing machine (1 NVIDIA Tesla P100 GPU). It is clear that SENN requires significantly more time to train. This is primarily because of gradient of output w.r.t input being part of their loss function. Thus the computational graph for a forward pass is twice as big as their model architecture and followed by a backward pass through the bigger graph.
|
| 72 |
+
|
| 73 |
+
| Dataset | SENN | FLINT |
|
| 74 |
+
|--------------|-------|-------|
|
| 75 |
+
| MNIST | 2311 | 518 |
|
| 76 |
+
| FashionMNIST | 2333 | 519 |
|
| 77 |
+
| CIFAR-10 | 10210 | 1548 |
|
| 78 |
+
| QuickDraw | 10548 | 1207 |
|
| 79 |
+
|
| 80 |
+
Table 5: Training times for FLINT and SENN (in seconds)
|
2104.05670/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2104.05670/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Despite decades of research on modeling human motions [\[4,](#page-8-0) [5\]](#page-8-1), synthesizing realistic and controllable sequences remains extremely challenging. In this work, our goal is to take a semantic action label like "Throw" and generate an infinite number of realistic 3D human motion sequences, of varying length, that look like realistic throwing (Figure [1\)](#page-0-0). A significant amount of prior work has focused on taking one pose, or a sequence of poses, and then predicting future motions [\[3,](#page-8-2) [6,](#page-8-3) [22,](#page-8-4) [71,](#page-10-0) [74\]](#page-10-1). This is an overly constrained scenario because it assumes that one already has a motion sequence and just needs more of it. On the other hand, many applications such as virtual reality and character control [\[28,](#page-8-5) [61\]](#page-10-2) require generating motions of a given type (semantic action label) with a specified duration.
|
| 4 |
+
|
| 5 |
+
We address this problem by training an action-conditioned generative model with 3D human motion data that
|
| 6 |
+
|
| 7 |
+
<span id="page-0-0"></span>
|
| 8 |
+
|
| 9 |
+
Fig. 1: Goal: Action-Conditioned TransfORmer VAE (ACTOR) learns to synthesize human motion sequences conditioned on a categorical action and a duration, T. Sequences are generated by sampling from a single motion representation latent vector, z, as opposed to the frame-level embedding space in prior work.
|
| 10 |
+
|
| 11 |
+
has corresponding action labels. In particular, we construct a Transformer-based encoder-decoder architecture and train it with the VAE objective. We parameterize the human body using SMPL [\[46\]](#page-9-1) as it can output joint locations or the body surface. This paves the way for better modeling of interaction with the environment, as the surface is necessary to model contact. Moreover, such a representation allows the use of several reconstruction losses: constraining part rotations in the kinematic tree, joint locations, or surface points. The literature [\[40\]](#page-9-2) and our results suggest that a combination of losses gives the most realistic generated motions.
|
| 12 |
+
|
| 13 |
+
The key challenge of motion synthesis is to generate sequences that are perceptually realistic while being diverse. Many approaches for motion generation have taken an autoregressive approach such as LSTMs [\[16\]](#page-8-6) and GRUs [\[49\]](#page-9-3). However, these methods typically regress to the mean pose <span id="page-1-0"></span>after some time [\[49\]](#page-9-3) and are subject to drift. The key novelty in our Transformer model is to provide positional encodings to the decoder and to output the full sequence at once. Positional encoding has been popularized by recent work on neural radiance fields [\[50\]](#page-9-4); we have not seen it used for motion generation as we do. This allows the generation of variable length sequences without the problem of the motions regressing to the mean pose. Moreover, our approach is, to our knowledge, the first to create an actionconditioned *sequence*-level embedding. The closest work is Action2Motion [\[21\]](#page-8-7), which, in contrast, presents an autoregressive approach where the latent representation is at the *frame*-level. Getting a *sequence*-level embedding requires pooling the time dimension: we introduce a new way of combining Transformers and VAEs for this purpose, which also significantly improves performance over baselines.
|
| 14 |
+
|
| 15 |
+
A challenge specific to our action-condition generation problem is that there exists limited motion capture (MoCap) data paired with distinct action labels, typically on the order of 10 categories [\[31,](#page-9-5) [63\]](#page-10-3). We instead rely on monocular motion estimation methods [\[38\]](#page-9-6) to obtain 3D sequences for actions and present promising results on 40 fine-grained categories of the UESTC action recognition dataset [\[32\]](#page-9-7). In contrast to [\[21\]](#page-8-7), we do not require multi-view cameras to process monocular trajectory estimates, which makes our model potentially applicable to larger scales. Despite being noisy, monocular estimates prove sufficient for training and, as a side benefit of our model, we are able to denoise the estimated sequences by encoding-decoding through our learned motion representation.
|
| 16 |
+
|
| 17 |
+
An action-conditioned generative model can augment existing MoCap datasets, which are expensive and limited in size [\[48,](#page-9-8) [63\]](#page-10-3). Recent work, which renders synthetic human action videos for training action recognition models [\[65\]](#page-10-4), shows the importance of motion diversity and large amounts of data per action. Such approaches can benefit from an infinite source of action-conditioned motion synthesis. We explore this through our experiments on action recognition. We observe that, despite a domain gap, the generated motions can serve as additional training data, specially in low-data regimes. Finally, a compact action-aware latent space for human motions can be used as a prior in other tasks such as human motion estimation from videos.
|
| 18 |
+
|
| 19 |
+
Our contributions are fourfold: (i) We introduce ACTOR, a novel Transformer-based conditional VAE, and train it to generate action-conditioned human motions by sampling from a sequence-level latent vector. (ii) We demonstrate that it is possible to learn to generate realistic 3D human motions using noisy 3D body poses estimated from monocular video; (iii) We present a comprehensive ablation study of the architecture and loss components, obtaining state-of-the-art performance on multiple datasets; (iv) We illustrate two use cases for our model on action recognition and MoCap denoising. The code is available on our project page [\[57\]](#page-9-0).
|
2104.07555/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2104.07555",
|
| 3 |
+
"month": "2021_04",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "EMNLP",
|
| 6 |
+
"title": "Data-QuestEval: A Referenceless Metric for Data-to-Text Semantic Evaluation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2104.07555",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.07555",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/tex_files_extracted/2104.07555",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.07555/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.07555/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.07555/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.07555/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2106.00660/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2106.00660",
|
| 3 |
+
"month": "2021_06",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICML",
|
| 6 |
+
"title": "Markpainting: Adversarial Machine Learning meets Inpainting",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2106.00660",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.00660",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.00660",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.00660/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.00660/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.00660/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.00660/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2106.02796/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-08-04T09:40:30.969Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36" etag="CrxVzQ52qTnkw3Wugxp8" version="13.3.8" type="device"><diagram id="0D80gqDUd5TodILnSBM2" name="Page-1">3Vhbb5swGP01SO3DJC6BNI/NZRetlTp1ah8rF1zwZGxknFt//T7HBgqmSdesyZYXZB/fzznfZwsnmOSrLwIV2TVPMHV8N1k5wdTx/ZEXwlcBaw1EwVADqSCJhrwGuCXP2ICuQeckwWWro+ScSlK0wZgzhmPZwpAQfNnu9sRpe9UCpdgCbmNEbfSeJDLT6EXoNvhXTNKsWtlzTUuOqs5mijJDCV9qaNMnmDnBRHAudSlfTTBV3FW86Ik+v9Jab0xgJt8yAF8Xd+43uqYjfPWd3aVJenP36ULPskB0bg5sNivXFQM4AUJMlQuZ8ZQzRGcNOhZ8zhKslnGh1vS54rwA0APwF5ZybdRFc8kBymROTateUy306tkqEvlcxHjLgcz+JRIpllv6DWsFwLmY51iKNYwTmCJJFu19IOOhtO7X0AwFw/QfsO5brF8RhpEAbMZiCCFVcvyIwgnGj1CJUlU6c8LJ2T18zs8BQjmQO2aPZVEz+EK1tibLjEh8W6ANdUsI1D7+F1hIvNqugM1YNSAwvjdx7l+Y+rKJGs841M1eREzkfhDJnndq3t7Xs2boDSewcq1c2FEu8DqK6Fgyozqi1Nt4v05DS6Yfc8Qk8CmO7urg33P14NRcPfgQVw+P7GpbpjrFT7FJ8b35/WeV3zuNR4+Fbp44fiyEbwgFllyqVyDUYorKksTv87HNyotThz2HrrA9bTzY4WIdXZaLrXm61/OhwyGypQKvrx6IcrulGphUtnVClKRMiQiyQOgEY2VlAs/0S9OQkyTRqQ2X5Bk9bqZSgVGoI20OGY6dcKrmgmxW6sT2l0IjCtuRMbIjo88j/kcFxqiXbXg5zpwhkDCRkOWHU7X/ExHAHwx2KjA6pAKe/cTvk0Dxr/YwVt9wsoADFyWhnJ2EKgPP3amKFx1Ulp4rA2QBRF27FD9JVX67Spu6UBs/1+X/X7Qo6twUri1afS3tKRpUm/8f+qppfiIFs98=</diagram></mxfile>
|
2106.02796/main_diagram/main_diagram.pdf
ADDED
|
Binary file (11.5 kB). View file
|
|
|
2106.02796/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Autoencoders are an effective method for representation learning and dimensionality reduction. Given a centered dataset $x_1, x_2, \ldots, x_n \in \mathbb{R}^d$ (i.e., $\sum_i x_i = 0$ ), an autoencoder (with *latent dimension* $k \leq d$ ) consists of an *encoder* $f: \mathbb{R}^d \mapsto \mathbb{R}^k$ and a *decoder* $g: \mathbb{R}^k \mapsto \mathbb{R}^d$ . The goal is to select f and g from prespecified classes $\mathcal{C}_f$ and $\mathcal{C}_g$ respectively such that if a random point x is picked from the data set then g(f(x)) is close to x in some sense, for example in mean squared error. If $\mathcal{C}_f$ and $\mathcal{C}_g$ consist of linear mappings then the autoencoder is called a *linear autoencoder*.
|
| 4 |
+
|
| 5 |
+
Autoencoders have achieved striking successes when f and g are selected through training from the class of functions realized by multilayer perceptrons of a given architecture [HS06]. Yet, the canonical autoencoder formulation described above has a notable failing, namely that for linear autoencoders, optimal choices of f and g do not necessarily identify the principal components of the dataset; they merely identify the principal subspace [BK88, BH89]. That is, the
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup>This research was supported by the US National Science Foundation under grants CCF-2008266, CCF-1934985, CCF-1617673, CCF-1846300, CCF-1815893 and the US Army Research Office under grant W911NF-18-1-0426.
|
| 8 |
+
|
| 9 |
+
components of f(x) are not necessarily proportional to projections of x against the eigenvectors of the covariance matrix
|
| 10 |
+
|
| 11 |
+
$$\boldsymbol{K} \stackrel{\text{def}}{=} \frac{1}{n} \sum_{i=1}^{n} \boldsymbol{x}_i \cdot \boldsymbol{x}_i^{\top}, \tag{1}$$
|
| 12 |
+
|
| 13 |
+
which we assume without loss of generality is full rank. Thus, linear autoencoders do not recover Principal Component Analysis (PCA). The reason for this is that both the objective (the distortion) and the constraint (the dimensionality of the latents) are invariant to an invertible transformation applied after the encoder with its inverse applied before the decoder. It is desirable for linear autoencoders to recover PCA for two reasons. First, from a representation learning standpoint, it guarantees that the autoencoder recovers uncorrelated features. Second, since a conventional linear autoencoder has a large number of globally optimal solutions corresponding to different bases of the principal subspace, it is preferable to eliminate this indeterminism.
|
| 14 |
+
|
| 15 |
+
Autoencoders are sometimes described as "compressing" the data [San12, BK88, LZW+21, Bis06], even though f can be invertible even when k < d. We show that by embracing this compression-view, one can obtain autoencoders that are able to recover PCA. Specifically, we consider linear autoencoders with quantized (or, equivalently, noisy) latent variables with a constraint on the estimated number of bits required to transmit the quantized latents under fixedrate coding. We call this problem *Principal Bit Analysis (PBA).* The constraint turns out to be a strictly Schur-concave function of the set of variances of the latent variables (see the supplementary for a review of Schur-concavity). Although finding the optimal f and g for this loss function is a nonconvex optimization problem, we show that for any strictly Schur-concave loss function, an optimal f must send projections of the data along the principal components, assuming that the empirical covariance matrix of the data has only simple eigenvalues. That is, imposing a strictly Schur-concave loss in place of a simple dimensionality constraint suffices to ensure recovery of PCA. The idea is that the strict concavity of the loss function eliminates the rotational invariance described above. As we show, even a slight amount of "curvature" in the constraint forces the autoencoder to spread the variances of the latents out as much as possible, resulting in recovery of PCA. If the loss function is merely Schur-concave, then projecting along the principal components is optimal, but not necessarily uniquely so.
|
| 16 |
+
|
| 17 |
+
Using this theorem, we can efficiently solve PBA. We validate the solution experimentally by using it to construct a fixed-rate compression algorithm for arbitrary vector-valued data sources. We find that the PBA-derived compressor beats existing linear, fixed-rate compressors both in terms of mean squared error, for which it is optimized, and in terms of the structural similarity index measure (SSIM) and downstream classification accuracy, for which it is not.
|
| 18 |
+
|
| 19 |
+
A number of variable-rate multimedia compressors have recently been proposed that are either related to, or directly inspired by, autoencoders [TAL18, TVJ+17, BLS16, TOH+16, TSCH17, RB17, HRTC19, AMT+17, BMS+18, ZCG+18, ATM+19, BCM+20]. As a second application of our result, we show that for Gaussian sources, a linear form of such a compressor is guaranteed to recover PCA. Thus we show that ideas from compression can be fruitfully fed back into the original autoencoder problem.
|
| 20 |
+
|
| 21 |
+
The contributions of the paper are
|
| 22 |
+
|
| 23 |
+
- We propose a novel linear autoencoder formulation in which the constraint is Schur-concave. We show that this generalizes conventional linear autoencoding.
|
| 24 |
+
- If the constraint is strictly Schur-concave and the covariance matrix of the data has only
|
| 25 |
+
|
| 26 |
+
simple eigenvalues, then we show that the autoencoder provably recovers PCA, providing a new remedy for a known limitation of linear autoencoders.
|
| 27 |
+
|
| 28 |
+
- We use the new linear autoencoder formulation to efficiently solve a fixed-rate compression problem that we call *Principal Bit Analysis (PBA).*
|
| 29 |
+
- We demonstate experimentally that PBA outperforms existing fixed-rate compressors on a variety of data sets and metrics.
|
| 30 |
+
- We show that a linear, variable-rate compressor that is representative of many autoencoderbased compressors in the literature effectively has a strictly Schur-concave loss, and therefore it recovers PCA.
|
| 31 |
+
|
| 32 |
+
**Related Work.** Several recent works have examined how linear autoencoders can be modified to guarantee recovery of PCA. Most solutions involve eliminating the invariant global optimal solutions by introducing regularization of some kind. [OSWS20] propose a loss function which adds k penalties to recover the k principal directions, each corresponding to recovering up to the first i ≤ k principal directions. [KBGS19] show that `<sup>2</sup> regularization helps reduce the symmetry group to the orthogonal group. [BLSG20] further break the symmetry by considering non-uniform `<sup>2</sup> regularization and deterministic dropout. [LNP19] consider a nonlinear autoencoder with a covariance loss term to encourage finding orthogonal directions. Recovering PCA is an important problem even in the stochastic counterpart of autoencoders. [LTGN19] analyze linear variational autoencoders (VAEs) and show that the global optimum of its objective is identical to the global optimum of log marginal likelihood of probabilistic PCA (pPCA). [RZM19] analyze an approximation to the VAE loss function and show that the linear approximation to the decoder is orthogonal.
|
| 33 |
+
|
| 34 |
+
Our result on variable-rate compressors is connected to the sizable recent literature on compression using autoencoder-like architectures. Representative contributions to the literature were noted above. Those works focus mostly on the empirical performance of deep, nonlinear networks, with a particular emphasis on finding a differentiable proxy for quantization so as to train with stochastic gradient descent. In contrast, this work considers provable properties of the compressors when trained perfectly.
|
| 35 |
+
|
| 36 |
+
**Notation.** We denote matrices by bold capital letters e.g. M, and vectors by bold small, e.g. v. The j th column of a matrix M is denoted by m<sup>j</sup> and the j th entry of a vector v by [v] j . We denote the set {1, 2, · · · d} by [d]. A sequence a1, a2, · · · a<sup>n</sup> is denoted by {ai} n <sup>i</sup>=1. We denote the zero column by 0. Logarithms without specified bases denote natural logarithms.
|
| 37 |
+
|
| 38 |
+
**Organization.** The balance of the paper is organized as follows. We describe our constrained linear autoencoder framework in Section 2. This results in an optimization problem that we solve for any Schur-concave constraint in Section 2.1. In Section 3, we recover linear autoencoders and PBA under our framework. We apply the PBA solution to a problem in variable-rate compression of Gaussian sources in Section 4. Section 5 contains experiments comparing the performance of the PBA-based fixed-rate compressor against existing fixed-rate linear compressors on image and audio datasets.
|
| 39 |
+
|
| 40 |
+
Throughout this paper we consider $C_f$ and $C_g$ to be the class of linear functions. The functions $f \in C_f$ and $g \in C_g$ can then be represented by d-by-d matrices, respectively, which we denote by W and T, respectively. Thus we have
|
| 41 |
+
|
| 42 |
+
$$f(x) = \mathbf{W}^{\top} x \tag{2}$$
|
| 43 |
+
|
| 44 |
+
$$g(x) = Tx. (3)$$
|
| 45 |
+
|
| 46 |
+
We wish to design W and T to minimize the mean squared error when the latent variables $W^{\top}x$ are quantized, subject to a constraint on the number of bits needed to represent the quantized latents. We accomplish this via two modifications of the canonical autoencoder. First, we perturb the d latent variables with zero-mean additive noise with covariance matrix $\sigma^2 I$ , which we denote by $\varepsilon$ . Thus the input to the decoder is
|
| 47 |
+
|
| 48 |
+
$$\mathbf{W}^{\top} x + \varepsilon$$
|
| 49 |
+
(4)
|
| 50 |
+
|
| 51 |
+
and our objective is to minimize the mean squared error
|
| 52 |
+
|
| 53 |
+
$$\frac{1}{n} \sum_{i=1}^{n} \mathbb{E}_{\varepsilon} \left[ \left\| \boldsymbol{x}_{i} - \boldsymbol{T} \left( \boldsymbol{W}^{\top} \boldsymbol{x}_{i} + \varepsilon \right) \right\|_{2}^{2} \right]. \tag{5}$$
|
| 54 |
+
|
| 55 |
+
This is equivalent to quantizing the latents, in the following sense [ZF92]. Let $Q(\cdot)$ be the function that maps any real number to its nearest integer and $\varepsilon$ be a random variable uniformly distributed over [-1/2,1/2]. Then for X independent of $\varepsilon$ , the quantities $Q(X+\varepsilon)-\varepsilon$ and $X+\varepsilon$ have the same joint distribution with X. Thus (5) is exactly the mean squared error if the latents are quantized to the nearest integer and $\sigma^2=\frac{1}{12}$ , assuming that the quantization is dithered. The overall system is depicted in Fig. 1.
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
|
| 59 |
+
Figure 1: Compression Block Diagram
|
| 60 |
+
|
| 61 |
+
We wish to constrain the number of bits needed to describe the latent variables. We assume that the jth quantized latent is clipped to the interval
|
| 62 |
+
|
| 63 |
+
$$\left(-\frac{\sqrt{(2a)^2\boldsymbol{w}_j^{\top}\boldsymbol{K}\boldsymbol{w}_j+1}}{2},\frac{\sqrt{(2a)^2\boldsymbol{w}_j^{\top}\boldsymbol{K}\boldsymbol{w}_j+1}}{2}\right],$$
|
| 64 |
+
|
| 65 |
+
where a > 0 is a hyperparameter and the covariance matrix K is as defined in (1). The idea is that for sufficiently large a, the interval
|
| 66 |
+
|
| 67 |
+
$$\left(-a\sqrt{\boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j}},a\sqrt{\boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j}}\right)$$
|
| 68 |
+
|
| 69 |
+
contains the latent with high probability, and adding 1 accounts for the expansion due to the dither. The number of bits needed for the jth latent is then
|
| 70 |
+
|
| 71 |
+
$$\log\left(\sqrt{4a^2\boldsymbol{w}_j^{\top}\boldsymbol{K}\boldsymbol{w}_j+1}\right) = \frac{1}{2}\log\left(4a^2\boldsymbol{w}_j^{\top}\boldsymbol{K}\boldsymbol{w}_j+1\right). \tag{6}$$
|
| 72 |
+
|
| 73 |
+
We arrive at our optimization problem:
|
| 74 |
+
|
| 75 |
+
$$\inf_{\boldsymbol{W},\boldsymbol{T}} \frac{1}{n} \sum_{i=1}^{n} \mathbb{E}_{\boldsymbol{\varepsilon}} \left[ \left\| \boldsymbol{x}_{i} - \boldsymbol{T} \left( \boldsymbol{W}^{\top} \boldsymbol{x}_{i} + \boldsymbol{\varepsilon} \right) \right\|_{2}^{2} \right]$$
|
| 76 |
+
subject to $R \geq \sum_{j=1}^{d} \frac{1}{2} \log \left( 4a^{2} \boldsymbol{w}_{i}^{\top} \boldsymbol{K} \boldsymbol{w}_{i} + 1 \right).$ (7)
|
| 77 |
+
|
| 78 |
+
Note that the function
|
| 79 |
+
|
| 80 |
+
$$\{\boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j}\}_{j=1}^{d} \mapsto \sum_{j=1}^{d} \frac{1}{2} \log \left(4a^{2}\boldsymbol{w}_{i}^{\top}\boldsymbol{K}\boldsymbol{w}_{i} + 1\right)$$
|
| 81 |
+
|
| 82 |
+
is strictly Schur-concave (see Appendix A for a brief review of Schur-concavity). Our first result only requires that the constraint is Schur-concave in the set of latent variances, so we will consider the more general problem
|
| 83 |
+
|
| 84 |
+
$$\inf_{\boldsymbol{W},\boldsymbol{T}} \frac{1}{n} \sum_{i=1}^{n} \mathbb{E}_{\boldsymbol{\varepsilon}} \left[ \left\| \boldsymbol{x}_{i} - \boldsymbol{T} \left( \boldsymbol{W}^{\top} \boldsymbol{x}_{i} + \boldsymbol{\varepsilon} \right) \right\|_{2}^{2} \right]$$
|
| 85 |
+
subject to $R \geq \rho \left( \left\{ \boldsymbol{w}_{j}^{\top} \boldsymbol{K} \boldsymbol{w}_{j} \right\}_{j=1}^{d} \right)$ (8)
|
| 86 |
+
|
| 87 |
+
where $\rho(\cdot)$ is any Schur-concave function.
|
| 88 |
+
|
| 89 |
+
Expressing the objective in (8) in terms of K, the optimization problem reduces to
|
| 90 |
+
|
| 91 |
+
$$\inf_{\boldsymbol{W},\boldsymbol{T}} \operatorname{tr}(\boldsymbol{K}) - 2\operatorname{tr}\left(\boldsymbol{K}\boldsymbol{W}\boldsymbol{T}^{\top}\right) + \operatorname{tr}\left(\boldsymbol{T}\left(\boldsymbol{W}^{\top}\boldsymbol{K}\boldsymbol{W} + \sigma^{2}\boldsymbol{I}\right)\boldsymbol{T}^{\top}\right)$$
|
| 92 |
+
subject to $R \ge \rho\left(\left\{\boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j}\right\}_{j=1}^{d}\right)$ . (9)
|
| 93 |
+
|
| 94 |
+
Since T does not appear in the rate constraint, the optimal T can be viewed as the Linear Least Squares Estimate (LLSE) of a random x given $W^{T}x + \varepsilon$ . Therefore, the optimal decoder, $T^*$ for a given encoder W is (e.g. [Kay98]):
|
| 95 |
+
|
| 96 |
+
$$T^* = KW(W^{\top}KW + \sigma^2 I)^{-1}. \tag{10}$$
|
| 97 |
+
|
| 98 |
+
Substituting for T in (9) yields an optimization problem over only $\boldsymbol{W}$
|
| 99 |
+
|
| 100 |
+
$$\inf_{\boldsymbol{W}} \operatorname{tr}(\boldsymbol{K}) - \operatorname{tr}(\boldsymbol{K}\boldsymbol{W}(\boldsymbol{W}^{\top}\boldsymbol{K}\boldsymbol{W} + \sigma^{2}\boldsymbol{I})^{-1}\boldsymbol{W}^{\top}\boldsymbol{K})$$
|
| 101 |
+
subject to $R \ge \rho \left( \left\{ \boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j} \right\}_{j=1}^{d} \right)$ . (11)
|
| 102 |
+
|
| 103 |
+
This problem is nonconvex in general. In the following subsection, we prove a structural result about the problem for a Schur-concave $\rho$ . Namely, we show that the nonzero rows of W must be eigenvectors of K. In Section 3, we solve the problem for the specific choice of $\rho$ in (7). We also show how this generalizes conventional linear autoencoders.
|
| 104 |
+
|
| 105 |
+
The following is the main theoretical result of the paper.
|
| 106 |
+
|
| 107 |
+
**Theorem 1.** For Schur-concave $\rho: \mathbb{R}^d_{\geq 0} \to \mathbb{R}_{\geq 0}$ and R > 0, the set of matrices whose nonzero columns are eigenvectors of the covariance matrix K is optimal for (11). If $\rho$ is strictly Schur-concave and K contains distinct eigenvalues, this set contains all optimal solutions of (11).
|
| 108 |
+
|
| 109 |
+
*Proof.* Let the eigenvalues of K be $\{\sigma_i^2\}_{i=1}^d$ with $\sigma_1^2 \geq \sigma_2^2 \geq \ldots \geq \sigma_d^2$ . Let the eigendecomposition of K be given by $K = U\Sigma U^{\top}$ where U is an orthogonal matrix whose columns are the eigenvectors of K and $\Sigma$ is a diagonal matrix with entries $\{\sigma_i^2\}_{i=1}^d$ .
|
| 110 |
+
|
| 111 |
+
We first prove that the optimal value of (11) can be achieved by a W such that $W^{\top}KW$ is a diagonal matrix. Let $\widetilde{W} = WQ$ where Q is the orthogonal matrix obtained from the eigendecomposition of $W^{\top}KW$ i.e.,
|
| 112 |
+
|
| 113 |
+
$$\boldsymbol{W}^{\top} \boldsymbol{K} \boldsymbol{W} = \boldsymbol{Q} \boldsymbol{\Lambda} \boldsymbol{Q}^{\top},$$
|
| 114 |
+
|
| 115 |
+
where $\Lambda$ is a diagonal matrix formed from the eigenvalues of $W^ op KW$ . Note that
|
| 116 |
+
|
| 117 |
+
$$\operatorname{tr}\left(\boldsymbol{K}\widetilde{\boldsymbol{W}}\left(\widetilde{\boldsymbol{W}}^{\top}\boldsymbol{K}\widetilde{\boldsymbol{W}}+\sigma^{2}\boldsymbol{I}\right)^{-1}\widetilde{\boldsymbol{W}}^{\top}\boldsymbol{K}\right)=\operatorname{tr}\left(\boldsymbol{K}\boldsymbol{W}\boldsymbol{Q}\left(\boldsymbol{\Lambda}+\sigma^{2}\boldsymbol{I}\right)^{-1}\boldsymbol{Q}^{\top}\boldsymbol{W}^{\top}\boldsymbol{K}\right)$$
|
| 118 |
+
$$=\operatorname{tr}\left(\boldsymbol{K}\boldsymbol{W}\left(\boldsymbol{Q}\boldsymbol{\Lambda}\boldsymbol{Q}^{\top}+\sigma^{2}\boldsymbol{Q}\boldsymbol{Q}^{\top}\right)^{-1}\boldsymbol{W}^{\top}\boldsymbol{K}\right).$$
|
| 119 |
+
|
| 120 |
+
Since $Q\Lambda Q^{\top} = W^{\top}KW$ and $QQ^{\top} = I$ , the objective remains the same. We now show that the constraint is only improved. Denoting the eigenvalues of $W^{\top}KW$ by $\{\nu_j\}_{j=1}^d$ , we have
|
| 121 |
+
|
| 122 |
+
$$\rho\left(\left\{\widetilde{\boldsymbol{w}}_{j}^{\top}\boldsymbol{K}\widetilde{\boldsymbol{w}}_{j}\right\}_{j=1}^{d}\right) = \rho\left(\left\{\boldsymbol{q}_{j}^{\top}\boldsymbol{W}^{\top}\boldsymbol{K}\boldsymbol{W}\boldsymbol{q}_{j}\right\}_{j=1}^{d}\right) = \rho\left(\left\{\nu_{j}\right\}_{j=1}^{d}\right).$$
|
| 123 |
+
|
| 124 |
+
Now since the eigenvalues of a Hermitian matrix majorize its diagonal elements by the Schur-Horn theorem [HJ13, Theorem 4.3.45],
|
| 125 |
+
|
| 126 |
+
$$\left\{ \boldsymbol{w}_{j}^{\top} \boldsymbol{K} \boldsymbol{w}_{j} \right\}_{j=1}^{d} \prec \left\{ \nu_{j} \right\}_{j=1}^{d}.$$
|
| 127 |
+
|
| 128 |
+
Since $\rho$ is Schur-concave, this implies
|
| 129 |
+
|
| 130 |
+
$$\rho\left(\left\{\boldsymbol{w}_{j}^{\top}\boldsymbol{K}\boldsymbol{w}_{j}\right\}_{j=1}^{d}\right) \geq \rho\left(\left\{\nu_{j}\right\}_{j=1}^{d}\right) = \rho\left(\left\{\widetilde{\boldsymbol{w}}_{j}^{\top}\boldsymbol{K}\widetilde{\boldsymbol{w}}_{i}\right\}_{j=1}^{d}\right).$$
|
| 131 |
+
|
| 132 |
+
Therefore, if $\rho$ is Schur-concave, the rate constraint can only improve. This implies an optimal solution can be attained when W is such that $W^{\top}KW$ is diagonal. If $\rho$ is strictly Schur-concave, the rate constraint strictly improves implying that the optimal W must be such that $W^{\top}KW$ is diagonal. This implies that
|
| 133 |
+
|
| 134 |
+
$$\operatorname{tr}\left(\boldsymbol{K}\boldsymbol{W}\left(\boldsymbol{W}^{\top}\boldsymbol{K}\boldsymbol{W}+\sigma^{2}\boldsymbol{I}\right)^{-1}\boldsymbol{W}^{\top}\boldsymbol{K}\right)=\operatorname{tr}\left(\boldsymbol{W}^{\top}\boldsymbol{K}^{2}\boldsymbol{W}\left(\boldsymbol{W}^{\top}\boldsymbol{K}\boldsymbol{W}+\sigma^{2}\boldsymbol{I}\right)^{-1}\right)$$
|
| 135 |
+
$$=\sum_{i=1}^{d}\frac{\boldsymbol{w}_{i}^{\top}\boldsymbol{K}^{2}\boldsymbol{w}_{i}}{\sigma^{2}+\boldsymbol{w}_{i}^{\top}\boldsymbol{K}\boldsymbol{w}_{i}}.$$
|
| 136 |
+
|
| 137 |
+
Note that minimizing the objective in (11) is equivalent to maximizing the above expression. Perform the change of variable
|
| 138 |
+
|
| 139 |
+
$$egin{aligned} oldsymbol{w}_j &\mapsto egin{cases} \left(
|
| 140 |
+
rac{oldsymbol{K}^{1/2}oldsymbol{w}_j}{||oldsymbol{K}^{1/2}oldsymbol{w}_j||}, ||oldsymbol{K}^{1/2}oldsymbol{w}_j||^2
|
| 141 |
+
ight) & ext{if } oldsymbol{K}^{1/2}oldsymbol{w}_j
|
| 142 |
+
eq oldsymbol{0} \ &= (oldsymbol{y}_j, y_j). \end{aligned}$$
|
| 143 |
+
|
| 144 |
+
The assumption that ${\pmb W}^{ op} {\pmb K} {\pmb W}$ is diagonal and the normalization in the definition of ${\pmb y}_j$ implies that
|
| 145 |
+
|
| 146 |
+
$$Y = [y_1y_2, \cdots, y_d]$$
|
| 147 |
+
|
| 148 |
+
is a matrix whose nonzero columns form an orthonormal set. Rewriting the objective in terms of the $(y_i, y_i)$ , we have
|
| 149 |
+
|
| 150 |
+
$$\sum_{i=1}^{d} \frac{\boldsymbol{w}_{i}^{\top} \boldsymbol{K}^{2} \boldsymbol{w}_{i}}{\sigma^{2} + \boldsymbol{w}_{i}^{\top} \boldsymbol{K} \boldsymbol{w}_{i}} = \sum_{i=1}^{d} \boldsymbol{y}_{i}^{\top} \boldsymbol{K} \boldsymbol{y}_{i} \frac{y_{i}}{\sigma^{2} + y_{i}} = \sum_{i=1}^{d} \boldsymbol{y}_{i}^{\top} \boldsymbol{K} \boldsymbol{y}_{i} m_{i},$$
|
| 151 |
+
(12)
|
| 152 |
+
|
| 153 |
+
where $m_i = \frac{y_i}{\sigma^2 + y_i}$ . Observe that under this new parametrization, the constraint only depends on $\{y_i\}_{i=1}^d$ . Without loss of generality, we assume that $y_1 \geq y_2 \geq \cdots \geq y_d$ , implying that $m_1 \geq m_2 \geq \cdots \geq m_d$ . We now prove that for given $\{y_i\}_{i=1}^d$ , choosing the $y_i$ along the eigenvectors of K is optimal.
|
| 154 |
+
|
| 155 |
+
Denote the diagonal elements of $\mathbf{Y}^{\top}\mathbf{K}\mathbf{Y}$ by $\{\lambda_i^2\}_{i=1}^d$ and let $\{\lambda_{i,\downarrow}^2\}_{i=1}^d$ denote the same diagonal elements arranged in descending order. Denote the eigenvalues of $\mathbf{Y}^{\top}\mathbf{K}\mathbf{Y}$ by $\{\mu_i^2\}_{i=1}^d$ where $\mu_1 \geq \mu_2 \geq \cdots \geq \mu_d$ . Again invoking the Schur-Horn theorem, the eigenvalues of $\mathbf{Y}^{\top}\mathbf{K}\mathbf{Y}$ majorize its diagonal entries
|
| 156 |
+
|
| 157 |
+
$$\{\lambda_i^2\}_{i=1}^d \prec \{\mu_i^2\}_{i=1}^d$$
|
| 158 |
+
(13)
|
| 159 |
+
|
| 160 |
+
Substituting $\lambda_i^2 = \boldsymbol{y}_i^{\top} \boldsymbol{K} \boldsymbol{y}_i$ in (12), we have
|
| 161 |
+
|
| 162 |
+
$$\begin{split} \sum_{i=1}^{d} \lambda_{i}^{2} m_{i} & \stackrel{(a)}{\leq} \sum_{i=1}^{d} \lambda_{i,\downarrow}^{2} m_{i} = \lambda_{1,\downarrow}^{2} m_{1} + \sum_{i=2}^{d} \left( \sum_{j=1}^{i} \lambda_{j,\downarrow}^{2} - \sum_{j=1}^{i-1} \lambda_{j,\downarrow}^{2} \right) m_{i} \\ & = \lambda_{1,\downarrow}^{2} m_{1} + \sum_{i=2}^{d} m_{i} \sum_{j=1}^{i} \lambda_{j,\downarrow}^{2} - \sum_{i=2}^{d} m_{i} \sum_{j=1}^{i-1} \lambda_{j,\downarrow}^{2} \\ & = \lambda_{1,\downarrow}^{2} (m_{1} - m_{2}) + m_{d} \left( \sum_{j=1}^{d} \lambda_{j,\downarrow}^{2} \right) + \sum_{i=2}^{d-1} (m_{i} - m_{i+1}) \sum_{j=1}^{i} \lambda_{j,\downarrow}^{2} \\ & \stackrel{(b)}{\leq} \mu_{1}^{2} (m_{1} - m_{2}) + m_{d} \left( \sum_{j=1}^{d} \mu_{j}^{2} \right) + \sum_{i=2}^{d-1} (m_{i} - m_{i+1}) \sum_{j=1}^{i} \mu_{j}^{2} \\ & \stackrel{(c)}{\leq} \sigma_{1}^{2} (m_{1} - m_{2}) + m_{d} \left( \sum_{j=1}^{d} \sigma_{j}^{2} \right) + \sum_{i=2}^{d-1} (m_{i} - m_{i+1}) \sum_{j=1}^{i} \sigma_{j}^{2} \\ & = \sum_{i=1}^{d} \sigma_{i}^{2} m_{i}, \end{split}$$
|
| 163 |
+
|
| 164 |
+
where inequality (a) follows from the assumption that $m_1 \geq m_2 \geq \cdots \geq m_d$ , and (b) from the definition in (13). Since $\mathbf{Y}$ 's nonzero columns form an orthonormal set, the eigenvalues of $\mathbf{Y}^{\top}\mathbf{K}\mathbf{Y}$ , when arranged in descending order, are at most the eigenvalues of $\mathbf{K}$ from Corollary 4.3.37 in [HJ13], and therefore (c) follows.
|
| 165 |
+
|
| 166 |
+
This upper bound is attained when $y_i = u_i$ for nonzero $y_i$ , where $u_i$ is the normalized eigenvector of K corresponding to eigenvalue $\sigma_i^2$ . To see this, note that when $y_i = u_i$ , $\lambda_i^2 = \mu_i^2 = \sigma_i^2$ . From the definition of $y_i, w_i = K^{-1/2} u_i \sqrt{y_i} = u_i \frac{\sqrt{y_i}}{\sigma_i}$ . Therefore, for a Schur-concave $\rho$ , the set of matrices whose nonzero columns are eigenvectors of K is optimal. We now prove that for a strictly Schur-concave $\rho$ , if K has distinct eigenvalues, this set contains all of the optimal solutions W.
|
| 167 |
+
|
| 168 |
+
We know that for a fixed $y_1 \geq y_2 \geq \cdots \geq y_d$ , (implying a fixed $m_1 \geq m_2 \geq \cdots \geq m_d$ ) the upper bound $\sum\limits_{i=1}^d \sigma_i^2 m_i$ is attained by the previous choice of $\boldsymbol{y}_i$ . Note that if all nonzero $m_i$ are distinct, equality in (b) and (c) is attained if and only if the nonzero diagonal elements of $\boldsymbol{Y}^\top \boldsymbol{K} \boldsymbol{Y}$ equal the corresponding eigenvalues of $\boldsymbol{K}$ . This implies that, if all nonzero $m_i$ are distinct, the upper bound is attained if and only if $\boldsymbol{y}_i = \boldsymbol{u}_i$ for nonzero $y_i$ . Therefore, it is sufficient to prove that for the following optimization problem
|
| 169 |
+
|
| 170 |
+
$$\sup_{\{y_i \ge 0\}} \sum_{i=1}^d \sigma_i^2 \frac{y_i}{\sigma^2 + y_i}$$
|
| 171 |
+
subject to $R \ge \rho\left(\{y_i\}_{i=1}^d\right)$ , (14)
|
| 172 |
+
|
| 173 |
+
any optimal $\{y_i\}$ must be such that the nonzero $y_i$ are distinct. Firstly, note that since $\sigma_1^2 > \sigma_2^2 > \cdots > \sigma_d^2$ , we must have $y_1 \geq y_2 \geq \cdots \geq y_d$ . Assume to the contrary that for an optimal $\{y_i\}_{i=1}^d$ there exists $1 \leq j, \ell < d$ such that $y_{j-1} > y_j = y_{j+1} = y_{j+2} = \cdots = y_{j+\ell} > y_{j+\ell+1} \geq 0$ , where $y_0$ is chosen to be any real number strictly greater than $y_1$ and $y_{d+1} = 0$ . Take $\delta > 0$ small. Denote a new sequence $\{y_i'\}_{i=1}^d$ where $y_j' = y_j + \delta, y_{j+\ell}' = y_{j+\ell} - \delta$ and $y_i' = y_i$ for $1 \leq i \leq d$ with $i \neq j$ and $j + \ell$ . Since $\rho$ is strictly Schur-concave, the constraint is strictly improved,
|
| 174 |
+
|
| 175 |
+
$$\rho\left(\{y_i'\}_{i=1}^d\right) < \rho\left(\{y_i\}_{i=1}^d\right).$$
|
| 176 |
+
|
| 177 |
+
Since $\sigma_j^2 > \sigma_{j+\ell}^2$ , the objective is strictly improved for sufficiently small $\delta$ ,
|
| 178 |
+
|
| 179 |
+
$$\sum_{i=1}^{d} \sigma_i^2 \frac{y_i}{\sigma^2 + y_i} < \sum_{i=1}^{d} \sigma_i^2 \frac{y_i'}{\sigma^2 + y_i'},$$
|
| 180 |
+
|
| 181 |
+
as desired. $\Box$
|
| 182 |
+
|
| 183 |
+
As a consequence of Theorem 1, encoding via an optimal W can be viewed as a projection along the eigenvectors of K, followed by different scalings applied to each component, i.e. W = US where S is a diagonal matrix with entries $s_i \ge 0$ and U is the normalized eigenvector matrix. Only S remains to be determined, and to this end, we may assume that K is diagonal with nonincreasing diagonal entries, implying U = I. In subsequent sections, our choice of $\rho$ will be
|
| 184 |
+
|
| 185 |
+
of the form P d i=1 ρsl, where ρsl : R≥<sup>0</sup> → R≥<sup>0</sup> 1 is (strictly) concave, making ρ (strictly) Schur-concave (see Proposition 9 in Appendix A). Therefore, (11) reduces to
|
| 186 |
+
|
| 187 |
+
$$\inf_{S} \operatorname{tr}(\boldsymbol{K}) - \operatorname{tr}(\boldsymbol{K}\boldsymbol{S}(\boldsymbol{S}^{\top}\boldsymbol{K}\boldsymbol{S} + \sigma^{2}\boldsymbol{I})^{-1}\boldsymbol{S}^{\top}\boldsymbol{K})$$
|
| 188 |
+
subject to $R \ge \rho_{sl}\left(\left\{s_{i}^{2}\sigma_{i}^{2}\right\}\right)$ , (15)
|
| 189 |
+
|
| 190 |
+
where the infimum is over diagonal matrices S. To handle situations for which
|
| 191 |
+
|
| 192 |
+
$$\lim_{s \to \infty} \rho_{sl}(s) < \infty, \tag{16}$$
|
| 193 |
+
|
| 194 |
+
we allow the diagonal entries of S to be ∞, with the objective for such cases defined via its continuous extension.
|
| 195 |
+
|
| 196 |
+
In the next section, we will solve (15) for several specific choices of ρsl.
|
| 197 |
+
|
| 198 |
+
Given a centered dataset x1, x2, · · · , x<sup>n</sup> ∈ R d , consider a linear autoencoder optimization problem where the encoder and decoder, W and T , respectively, are d-by-k matrices where k ≤ d is a parameter. The goal is to minimize the mean squared error as given by (5). PCA corresponds to the global optimal solution of this optimization problem, where W = T = Uk, where U<sup>k</sup> ∈ R d×k is a matrix whose columns are the k eigenvectors corresponding to the k largest eigenvalues of K. However, there are multiple global optimal solutions, given by any encoder-decoder pair of the form (UkV , UkV ), where V is an orthogonal matrix [BH89].
|
| 199 |
+
|
| 200 |
+
We now recover linear autoencoders through our framework in Section 2. Consider the optimization problem in (15) where ρsl : R≥<sup>0</sup> → {0, 1} is a concave function defined as
|
| 201 |
+
|
| 202 |
+
$$\rho_{sl}(x) = \mathbf{1} \left[ x > 0 \right]. \tag{17}$$
|
| 203 |
+
|
| 204 |
+
Note that this penalizes the dimension of the latents, as desired. Note also that this cost is Schur-concave but not strictly so. The fact that PCA solves conventional linear autoencoding, but is not necessarily the unique solution, follows immediately from Theorem 1.
|
| 205 |
+
|
| 206 |
+
**Theorem 2.** *If* ρsl(·) *is given by* (17)*, then an optimal solution for* (15) *is given by a diagonal matrix* S *whose top* min(bRc, d) *diagonal entries are equal to* ∞ *and the remaining entries are* 0*.*
|
| 207 |
+
|
| 208 |
+
*Proof.* Let F def <sup>=</sup> {<sup>i</sup> <sup>∈</sup> [d] : <sup>s</sup><sup>i</sup> <sup>&</sup>gt; <sup>0</sup>}, implying |F| ≤ <sup>R</sup>. Since <sup>K</sup> and <sup>S</sup> are diagonal, the optimization problem in (15) can be written as
|
| 209 |
+
|
| 210 |
+
$$\inf_{\{s_{\ell}\}} \sum_{j \in [d] \setminus \mathcal{F}} \sigma_{j}^{2} + \sum_{\ell \in \mathcal{F}} \frac{\sigma^{2} \sigma_{\ell}^{2}}{\sigma^{2} + \sigma_{\ell}^{2} s_{\ell}^{2}}$$
|
| 211 |
+
subject to $R \ge \sum_{i=1}^{d} \mathbf{1} \left[ s_{i} > 0 \right].$ (18)
|
| 212 |
+
|
| 213 |
+
<sup>1</sup> "sl" stands for single-letter
|
| 214 |
+
|
| 215 |
+
Since the value of $s_{\ell}$ , $\ell \in \mathcal{F}$ does not affect the rate constraint, each of the $s_{\ell}$ can be made as large as possible without changing the rate constraint. Therefore, the infimum value of the objective is $\sum_{j \in [d] \setminus \mathcal{F}} \sigma_j^2$ . Since we seek to minimize the distortion, the optimal $\mathcal{F}$ is the set of indices with the largest $|\mathcal{F}|$ eigenvalues. Since the number of these eigenvalues cannot exceed R, we choose $|\mathcal{F}| = \min(|R|, d)$ .
|
| 216 |
+
|
| 217 |
+
Unlike the conventional linear autoencoder framework, in Section 2, the latent variables $W^{\top}x$ are quantized, which we model with additive white noise of fixed variance. Therefore, an infinite value of $s_i$ indicates sending $u_i^{\top}x$ with full precision where $u_i$ is the eigenvector corresponding to the $i^{th}$ largest eigenvalue. This implies that PCA with parameter k corresponds to W = US, where S is a diagonal matrix whose top k diagonal entries are equal to $\infty$ and the d-k remaining diagonal entries are 0. Therefore, for any R such that $\lfloor R \rfloor = k$ , an optimal solution to (15) corresponds to linearly projecting the data along the top k eigenvectors, which is the same as PCA. Note that, like [BH89], we only prove that projecting along the eigenvectors is one of possibly other optimal solutions. However, even a slight amount of curvature in $\rho$ would make it strictly Schur-concave, thus recovering the principal directions. We next turn to a specific cost function with curvature, namely the PBA cost function that was our original motivation.
|
2106.06499/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2106.06499/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
We consider the following question: *How should an intelligent agent act if it has epistemic uncertainty over its objective function?* In the fields of reinforcement learning (RL) and optimal control, researchers and practitioners typically assume a known reward or cost function, which is then optimized to obtain a policy. However, even in settings where the reward function is specified, it is usually only a best approximation of the objective function that a human thinks will lead to desirable behavior. Furthermore,
|
| 4 |
+
|
| 5 |
+
*Proceedings of the* 38 th *International Conference on Machine Learning*, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 6 |
+
|
| 7 |
+
human-designed reward functions are also often augmented with human feedback. This may also result in reward uncertainty since human feedback, be it in the form of policy shaping (Griffith et al., 2013), reward shaping (Knox & Stone, 2012), or a hand-designed reward function (Hadfield-Menell et al., 2017; Ratner et al., 2018), can fail to perfectly disambiguate the human's intent true (Amodei et al., 2016).
|
| 8 |
+
|
| 9 |
+
Reward function ambiguity is also a key problem in imitation learning (Hussein et al., 2017; Osa et al., 2018), in which an agent seeks to learn a policy from demonstrations without access to the reward function that motivated the demonstrations. While many imitation learning approaches either sidestep learning a reward function and directly seek to imitate demonstrations (Pomerleau, 1991; Torabi et al., 2018) or take a maximum likelihood (Choi & Kim, 2011; Brown et al., 2019) or maximum entropy approach to learning a reward function (Ziebart et al., 2008; Fu et al., 2017), we believe that an imitation learning agent should explicitly reason about uncertainty over the true reward function to avoid misalignment with the demonstrator's objectives (Hadfield-Menell et al., 2017; Brown et al., 2020a). Bayesian inverse reinforcement learning (IRL) methods (Ramachandran & Amir, 2007) seek a posterior distribution over likely reward functions given demonstrations, but often perform policy optimization using the expected reward function or MAP reward function (Ramachandran & Amir, 2007; Choi & Kim, 2011; Ratner et al., 2018; Brown et al., 2020a). However, in many real world settings such as robotics, finance, and healthcare, we desire a policy which is robust to uncertainty over the true reward function.
|
| 10 |
+
|
| 11 |
+
Prior work on risk-averse and robust policy optimization in reinforcement learning has mainly focused on robustness to uncertainty over the true dynamics of the environment, but assumes a known reward function (Garcıa & Fernandez ´ , 2015; Tamar et al., 2015; Tang et al., 2020; Derman et al., 2018; Lobo et al., 2020; Thananjeyan et al., 2021). Some work addresses robust policy optimization under reward function uncertainty by taking a maxmin approach and optimizing a policy that is robust under the worst-case reward function (Syed et al., 2008; Regan & Boutilier, 2009; Hadfield-Menell et al., 2017; Huang et al., 2018). However, these approaches are limited to tabular domains, and maxmin approaches have been shown to sometimes lead to
|
| 12 |
+
|
| 13 |
+
<sup>1</sup>EECS Department, University of California, Berkeley <sup>2</sup>CS Department, University of New Hampshire. Correspondence to: Zaynah Javed <zjaved@berkeley.edu>, Daniel Brown <dsbrown@berkeley.edu>.
|
| 14 |
+
|
| 15 |
+
incorrect and overly pessimistic policy evaluations (Brown & Niekum, 2018). As an alternative to maxmin approaches, recent work (Brown et al., 2020b) proposed a linear programming approach, BROIL: Bayesian Robust Optimization for Imitation Learning, that balances risk-aversion (in terms of Conditional Value at Risk (Rockafellar et al., 2000)) and expected performance. This approach supports a family of solutions depending on the risk-sensitivity of the application domain. However, as their approach is built on linear programming, it cannot be applied in MDPs with continuous state and action spaces and unknown dynamics.
|
| 16 |
+
|
| 17 |
+
In this work, we introduce a novel policy optimization approach that enables varying degrees of risk-sensitivity by reasoning about reward uncertainity while scaling to continuous MDPs with unknown dynamics. As in Brown et al. (2020b), we present an approach which reasons simultaneously about risk-aversion (in terms of Conditional Value at Risk (Rockafellar et al., 2000)) and expected performance and balances the two. However, to enable such reasoning in continuous spaces, we make a key observation: the Conditional Value at Risk objective supports efficient computation of an approximate subgradient, which can then be used in a policy gradient method. This makes it possible to use any policy gradient algorithm, such as TRPO (Schulman et al., 2017a) or PPO (Schulman et al., 2017b) to learn policies which are robust to reward uncertainity, resulting in an efficient and scalable algorithm. To the best of our knowledge, our proposed algorithm, Policy Gradient Bayesian Robust Optimization for Imitation Learning (PG-BROIL), is the first policy optimization algorithm robust to a distribution of reward hypotheses that can scale to complex MDPs with continuous state and action spaces.
|
| 18 |
+
|
| 19 |
+
To evaluate PG-BROIL, we consider settings where there is uncertainty over the true reward function. We first examine the setting where we have an a priori distribution over reward functions and find that PG-BROIL is able to optimize policies that effectively trade-off between expected and worst-case performance. Then, we leverage recent advances in efficient Bayesian reward inference (Brown et al., 2020a) to infer a posterior over reward functions from preferences over demonstrated trajectories. While other approaches which do not reason about reward uncertainty overfit to a single reward function hypothesis, PG-BROIL optimizes a policy that hedges against multiple reward function hypotheses. When there is high reward function ambiguity due to limited demonstrations, we find that PG-BROIL results in significant performance improvements over other state-of-the-art imitation learning methods.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
We model the environment as a Markov Decision Process (MDP) (Puterman, 2005). An MDP is a tuple $(\mathcal{S}, \mathcal{A}, r, P, \gamma, p_0)$ , with state space $\mathcal{S}$ , action space $\mathcal{A}$ , reward function $r: \mathcal{S} \times \mathcal{A} \to \mathbb{R}$ , transition dynamics $P: \mathcal{S} \times \mathcal{A} \times \mathcal{S} \to [0,1]$ , discount factor $\gamma \in [0,1)$ , and initial state distribution $p_0$ . We consider stochastic policies $\pi: \mathcal{S} \times \mathcal{A} \to [0,1]$ which output a distribution over $\mathcal{A}$ conditioned on a state $s \in \mathcal{S}$ . We denote the expected return of a policy $\pi$ under reward function r as $v(\pi,r) = \mathbb{E}_{\tau \sim \pi_{\theta}}[r(\tau)]$ .
|
| 24 |
+
|
| 25 |
+
We are interested in solving MDPs when there is epistemic uncertainty over the true reward function. When we refer to the reward function as a random variable we will use R, and will use r to denote a specific model of the reward function. Reward functions are often parameterized as a linear combination of known features (Abbeel & Ng, 2004; Ziebart et al., 2008; Sadigh et al., 2017) or as a deep neural network
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
Figure 1. The pdf f(X) of a random variable X. $VaR_{\alpha}$ measures the $(1-\alpha)$ -quantile outcome. $CVaR_{\alpha}$ measures the expectation given that we only consider values less than the $VaR_{\alpha}$ .
|
| 30 |
+
|
| 31 |
+
(Ho & Ermon, 2016; Fu et al., 2017). Thus, we can model uncertainty in the reward function as a distribution over R, or, equivalently, as a distribution over the reward function parameters. This distribution could be a prior distribution $\mathbb{P}(R)$ that the agent learns from previous tasks (Xu et al., 2019). Alternatively, the distribution could be the posterior distribution $\mathbb{P}(R \mid D)$ learned via Bayesian inverse reinforcement learning (Ramachandran & Amir, 2007) given demonstrations D, the posterior distribution $\mathbb{P}(R \mid \mathcal{P}, D)$ given preferences $\mathcal{P}$ over demonstrations (Sadigh et al., 2017; Brown et al., 2020a), or the posterior distribution $\mathbb{P}(R \mid r')$ learned via inverse reward design given a humanspecified proxy reward r' (Hadfield-Menell et al., 2017; Ratner et al., 2018). This distribution is typically only available via sampling techniques such as Markov chain Monte Carlo (MCMC) sampling (Ramachandran & Amir, 2007; Hadfield-Menell et al., 2017; Brown et al., 2020a).
|
| 32 |
+
|
| 33 |
+
We are interested in robust policy optimization with respect to a distribution over the performance of the policy induced by a distribution over possible reward functions. Consider a policy $\pi$ and a reward distribution $\mathbb{P}(R)$ . Together, $\pi$ and $\mathbb{P}(R)$ induce a distribution over the expected return of the policy, $v(\pi,R)$ , $R \sim \mathbb{P}(R)$ . We seek a robust policy that minimizes tail risk, given some risk measure, under the induced distribution v. Figure 1 visualizes two common risk measures: value at risk (VaR) and conditional value at risk (CVaR), for a general random variable X. In our setting, X corresponds to the expected return, $v(\pi,R)$ , of a policy $\pi$ under the reward function random variable R, and the objective is to minimize the tail risk (visualized in red).
|
| 34 |
+
|
| 35 |
+
Given a risk-aversion parameter $\alpha \in [0, 1]$ , the $VaR_{\alpha}$ of a random variable X is the $(1 - \alpha)$ -quantile outcome:
|
| 36 |
+
|
| 37 |
+
$$VaR_{\alpha}[X] = \sup\{x : \mathbb{P}(X \ge x) \ge \alpha\},\tag{1}$$
|
| 38 |
+
|
| 39 |
+
where it is common to have $\alpha \in [0.9, 1]$ .
|
| 40 |
+
|
| 41 |
+
Despite the popularity of VaR, optimizing a policy for VaR has several problems: (1) optimizing for VaR results in an NP hard optimization problem (Delage & Mannor, 2010),
|
| 42 |
+
|
| 43 |
+
(2) VaR ignores risk in the tail that occurs with probability less than (1 − α) which is problematic for domains where there are rare but potentially catastrophic outcomes, and (3) VaR is not a coherent risk measure (Artzner et al., 1999).
|
| 44 |
+
|
| 45 |
+
CVaR is a coherent risk measure (Delbaen, 2002), also known as average value at risk, expected tail risk, or expected shortfall. For continuous distributions
|
| 46 |
+
|
| 47 |
+
$$\operatorname{CVaR}_{\alpha}[X] = \mathbb{E}_{f(X)}[X \mid X \leq \operatorname{VaR}_{\alpha}[X]].$$
|
| 48 |
+
(2)
|
| 49 |
+
|
| 50 |
+
In addition to being coherent, CVaR can be maximized via convex optimization, does not ignore the tail of the distribution, and is a lower bound on VaR. Because of these desirable properties, we would like to use CVaR as our risk measure. However, because posterior distributions obtained via Bayesian IRL are often discrete (Ramachandran & Amir, 2007; Sadigh et al., 2017; Hadfield-Menell et al., 2017; Brown & Niekum, 2018), we cannot directly optimize for CVaR using the definition in Equation (2) since this definition only works for atomless distributions. Instead, we make use of the following definition of CVaR, proposed by Rockafellar et al. (2000), that works for any distribution:
|
| 51 |
+
|
| 52 |
+
$$\operatorname{CVaR}_{\alpha}[X] = \max_{\sigma} \left( \sigma - \frac{1}{1-\alpha} \mathbb{E}[(\sigma - X)_{+}] \right) , \quad (3)$$
|
| 53 |
+
|
| 54 |
+
where (x)<sup>+</sup> = max(0, x) and σ roughly corresponds to the VaRα. To gain intuition for this formula, note that if we define σ = VaRα[X] we can rewrite CVaR<sup>α</sup> as
|
| 55 |
+
|
| 56 |
+
$$CVaR_{\alpha}[X] = \mathbb{E}_{f(X)}[X \mid X \le \sigma]$$
|
| 57 |
+
(4)
|
| 58 |
+
|
| 59 |
+
$$= \sigma - \mathbb{E}_{f(X)}[\sigma - X \mid X \le \sigma] \tag{5}$$
|
| 60 |
+
|
| 61 |
+
$$= \sigma - \frac{\mathbb{E}_{f(X)}[\mathbf{1}_{X \le \sigma} \cdot (\sigma - X)]}{P(X \le \sigma)}$$
|
| 62 |
+
(6)
|
| 63 |
+
|
| 64 |
+
$$= \sigma - \frac{1}{1 - \alpha} \mathbb{E}_{f(X)}[(\sigma - X)_{+}] \qquad (7)$$
|
| 65 |
+
|
| 66 |
+
where 1<sup>x</sup> = 1 is the indicator function that evaluates to 1 if x is True and 0 otherwise, and where we used the linearity of expectation, the definition of conditional expectation, and the definitions of VaRα[X], and (x)+. Taking the maximum over σ ∈ R, gives us the definition in Equation (3).
|
| 67 |
+
|
| 68 |
+
In Section 4.1 we describe the Bayesian robust optimization for imitation learning (BROIL) objective, previously proposed by (Brown et al., 2020b). Then, in sections 4.2 and 4.3, we derive a novel policy gradient update for BROIL and provide an intuitive explanation for the result.
|
| 69 |
+
|
| 70 |
+
Rather than seeking a purely risk-sensitive or purely riskneutral approach, we seek to optimize a soft-robust objective that balances the expected and probabilistic worst-case performance of a policy. Given some performance metric ψ(πθ, R) where R ∼ P(R), Brown et al. (2020b) recently proposed Bayesian Robust Optimization for Imitation Learning (BROIL) which seeks to optimize the following:
|
| 71 |
+
|
| 72 |
+
$$\max_{\pi_{\theta}} \lambda \cdot \mathbb{E}_{\mathbb{P}(R)}[\psi(\pi_{\theta}, R)] + (1 - \lambda) \cdot \text{CVaR}_{\alpha} \left[ \psi(\pi_{\theta}, R) \right]$$
|
| 73 |
+
(8)
|
| 74 |
+
|
| 75 |
+
For MDPs with discrete states and actions and known dynamics, Brown et al. (2020b) showed that this problem can be formulated as a linear program which can be solved in polynomial time. However, many MDPs of interest involve continuous states and actions and unknown dynamics.
|
| 76 |
+
|
| 77 |
+
We now derive a policy gradient objective for BROIL that allows us to extend BROIL to continuous states and actions and unknown transition dynamics, enabling robust policy learning in a wide variety of practical settings. Given a parameterized policy π<sup>θ</sup> and N possible reward hypotheses, there are many possible choices for the performance metric ψ(πθ, R). Brown et al. (2020a) considered two common metrics: (1) expected value, i.e., ψ(πθ, R) = v(π, R) = Eτ∼π<sup>θ</sup> [R(τ )] and (2) baseline regret, i.e., ψ(πθ, R) = v(πθ, R) − v(πE, R) where π<sup>E</sup> denotes an expert policy (usually estimated from demonstrations). In Appendix A we derive a more general form for any performance metric ψ(πθ, R) and also give the derivation for the baseline regret performance metric. For simplicity, we let ψ(πθ, R) = v(π, R) (expected return) hereafter.
|
| 78 |
+
|
| 79 |
+
To find the policy that maximizes Equation (8) we need the gradient with respect to the policy parameters θ. For the first term in Equation (8), we have
|
| 80 |
+
|
| 81 |
+
$$\nabla_{\theta} \mathbb{E}_{\mathbb{P}(R)}[v(\pi_{\theta}, R)] \approx \sum_{i=1}^{N} \mathbb{P}(r_{i}) \nabla_{\theta} \mathbb{E}_{\tau \sim \pi_{\theta}}[r_{i}(\tau)]. \quad (9)$$
|
| 82 |
+
|
| 83 |
+
Next, we consider the gradient of the CVaR term. CVaR is not differentiable everywhere so we derive a sub-gradient. Given a finite number of samples from the reward function posterior, we can write this sub-gradient as
|
| 84 |
+
|
| 85 |
+
$$\nabla_{\theta} \max_{\sigma} \left( \sigma - \frac{1}{1 - \alpha} \sum_{i=1}^{N} \mathbb{P}(r_i) \left( \sigma - \mathbb{E}_{\tau \sim \pi_{\theta}} [r_i(\tau)] \right)_{+} \right)$$
|
| 86 |
+
(10)
|
| 87 |
+
|
| 88 |
+
where (x)<sup>+</sup> = max(0, x). To solve for the sub-gradient of this term, note that given a fixed policy πθ, we can solve for σ via a line search: since the objective is piece-wise linear we only need to check the value at each point $v(\pi, r_i)$ , for each reward function sample from the posterior since these are the endpoints of each linear segment. If we let $v_i = v(\pi, r_i)$ then we can quickly iterate over all reward function hypotheses and solve for $\sigma$ as
|
| 89 |
+
|
| 90 |
+
$$\sigma^* = \underset{\sigma \in \{v_1, \dots, v_N\}}{\operatorname{argmax}} \left( \sigma - \frac{1}{1 - \alpha} \sum_{i=1}^N \mathbb{P}(r_i) \left[ \sigma - v_i \right]_+ \right). \tag{11}$$
|
| 91 |
+
|
| 92 |
+
Solving for $\sigma^*$ requires estimating $v_i$ by collecting a set $\mathcal{T}$ of on-policy trajectories $\tau \sim \pi_\theta$ where $\tau = (s_0, a_0, s_1, a_1, \ldots, s_T, a_T)$ :
|
| 93 |
+
|
| 94 |
+
$$v_i \approx \frac{1}{|\mathcal{T}|} \sum_{\tau \in \mathcal{T}} \sum_{t=0}^{T} r_i(s_t, a_t).$$
|
| 95 |
+
(12)
|
| 96 |
+
|
| 97 |
+
Solving for $\sigma^*$ does not require additional data collection beyond what is required for standard policy gradient approaches. We simply evaluate the set of rollouts $\mathcal{T}$ from $\pi_\theta$ under each reward function hypothesis, $r_i$ and then solve the optimization problem above to find $\sigma^*$ . While this requires more computation than a standard policy gradient approach—we have to evaluate each rollout under N reward functions—this does not increase the online data collection, which is often the bottleneck in RL algorithms.
|
| 98 |
+
|
| 99 |
+
Given the solution $\sigma^*$ found by solving the optimization problem in (11), we perform a step of policy gradient optimization by following the sub-gradient of CVaR with respect to the policy parameters $\theta$ :
|
| 100 |
+
|
| 101 |
+
$$\nabla_{\theta} \operatorname{CVaR}_{\alpha} = \frac{1}{1 - \alpha} \sum_{i=1}^{N} \mathbb{P}(r_i) \mathbf{1}_{\sigma^* \ge v(\pi_{\theta}, r_i)} \nabla_{\theta} v(\pi_{\theta}, r_i)$$
|
| 102 |
+
(13)
|
| 103 |
+
|
| 104 |
+
where $\mathbf{1}_x$ is the indicator function that evaluates to 1 if x is True and 0 otherwise. Given the sub-gradient of the BROIL objective (13), the only thing remaining to compute is the standard policy gradient. Note that in standard RL, we write the policy gradient as (Sutton & Barto, 2018):
|
| 105 |
+
|
| 106 |
+
$$\nabla_{\theta} \mathbb{E}_{\tau \sim \pi_{\theta}}[R(\tau)] = \mathbb{E}_{\tau \sim \pi_{\theta}} \left[ \sum_{t=0}^{T} \nabla_{\theta} \log \pi_{\theta}(a_{t} \mid s_{t}) \Phi_{t}(\tau) \right]$$
|
| 107 |
+
(14)
|
| 108 |
+
|
| 109 |
+
where $\Phi_t$ is a measure of the performance of trajectory $\tau$ starting at time t. One of the most common forms of $\Phi_t(\tau)$ is the on-policy advantage function (Schulman et al., 2015) with respect to some single reward function:
|
| 110 |
+
|
| 111 |
+
$$\Phi_t(\tau) = A^{\pi_{\theta}}(s_t, a_t) = Q^{\pi_{\theta}}(s_t, a_t) - V^{\pi_{\theta}}(s_t).$$
|
| 112 |
+
(15)
|
| 113 |
+
|
| 114 |
+
If we define $\Phi_t^{r_i}$ in terms of a particular reward function $r_i$ , then, as we show in Appendix A, we can rearrange
|
| 115 |
+
|
| 116 |
+
terms in the standard policy gradient formula to obtain the following form for the BROIL policy gradient which we estimate using a set $\mathcal{T}$ of on-policy trajectories $\tau \sim \pi_{\theta}$ where $\tau = (s_0, a_0, s_1, a_1, \ldots, s_T, a_T)$ as follows:
|
| 117 |
+
|
| 118 |
+
$$\nabla_{\theta} \text{BROIL} \approx \frac{1}{|\mathcal{T}|} \sum_{\tau \in \mathcal{T}} \left[ \sum_{t=0}^{T} \nabla_{\theta} \log \pi_{\theta}(a_t \mid s_t) w_t(\tau) \right]$$
|
| 119 |
+
(16)
|
| 120 |
+
|
| 121 |
+
where
|
| 122 |
+
|
| 123 |
+
$$w_t(\tau) = \sum_{i=1}^{N} \mathbb{P}(r_i) \Phi_t^{r_i}(\tau) \left( \lambda + \frac{1-\lambda}{1-\alpha} \mathbf{1}_{\sigma^* \ge v(\pi, r_i)} \right)$$
|
| 124 |
+
(17)
|
| 125 |
+
|
| 126 |
+
is the weight associated with each state-action pair $(s_t, a_t)$ in the set of trajectory rollouts $\mathcal{T}$ . The resulting vanilla policy gradient algorithm is summarized in Algorithm 1. In Appendix C we show how to apply a trust-region update based on Proximal Policy Optimization (Schulman et al., 2017b) for more stable policy gradient optimization.
|
| 127 |
+
|
| 128 |
+
Consider the policy gradient weight $w_t$ given in Equation (17). If $\lambda = 1$ , then
|
| 129 |
+
|
| 130 |
+
$$w_t(\tau) = \sum_{i=1}^{N} \mathbb{P}(R_i) \Phi_t^{R_i}(\tau) = \Phi_t^{\bar{R}}(\tau)$$
|
| 131 |
+
(18)
|
| 132 |
+
|
| 133 |
+
where $\bar{R}$ is the expected reward under the posterior. Thus, $\lambda=1$ is equivalent to standard policy gradient optimization under the mean reward function and gradient ascent will focus on increasing the likelihood of actions that look good in expectation over the reward function distribution $\mathbb{P}(R)$ . Alternatively, if $\lambda=0$ , then
|
| 134 |
+
|
| 135 |
+
$$w_t(\tau) = \frac{1}{1 - \alpha} \sum_{i=1}^{N} \mathbf{1}_{\sigma^* \ge v(\pi, R_i)} \mathbb{P}(R_i) \Phi_t^{R_i}(\tau)$$
|
| 136 |
+
(19)
|
| 137 |
+
|
| 138 |
+
and gradient ascent will increase the likelihood of actions that look good under reward functions that the current policy $\pi_{\theta}$ performs poorly under, i.e., policy gradient updates will focus on improving performance under all $R_i$ such that $v(\pi,R_i)\leq\sigma^*$ , weighting the gradient according to the likelihood of these worst-case reward functions. The update rule also multiplies by $1/(1-\alpha)$ which acts to normalize the magnitude of the gradient: as $\alpha\to 1$ we update on reward functions further into the tail, which have smaller probability mass. Thus, $\lambda\in[0,1]$ allows us to blend between maximizing policy performance in expectation versus worst-case and $\alpha\in[0,1)$ determines how far into the tail of the distribution to focus the worst-case updates.
|
| 139 |
+
|
| 140 |
+
- 1: Input: initial policy parameters θ0, samples from reward function posterior r1, . . . , r<sup>N</sup> and associated probabilities, P(r1), . . . , P(r<sup>N</sup> ).
|
| 141 |
+
- 2: for k = 0, 1, 2, . . . do
|
| 142 |
+
- 3: Collect set of trajectories T<sup>k</sup> = {τi} by running policy πθ<sup>k</sup> in the environment.
|
| 143 |
+
- 4: Estimate expected return of πθ<sup>k</sup> under each reward function hypothesis r<sup>j</sup> using Eq. (12).
|
| 144 |
+
- 5: Solve for σ <sup>∗</sup> using Eq. (11)
|
| 145 |
+
- 6: Estimate policy gradient using Eq. (16) and Eq. (17).
|
| 146 |
+
- 7: Update θ using gradient ascent.
|
| 147 |
+
- 8: end for
|
2108.00230/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2108.00230",
|
| 3 |
+
"month": "2021_08",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICML",
|
| 6 |
+
"title": "Pure Exploration and Regret Minimization in Matching Bandits",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2108.00230",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.00230",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.00230",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.00230/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.00230/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.00230/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.00230/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2201.01819/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2201.01819",
|
| 3 |
+
"month": "2022_01",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Proxy Learning of Visual Concepts of Fine Art Paintings from Styles through Language Models",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2201.01819",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01819",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/tex_files_extracted/2201.01819",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01819/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01819/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01819/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01819/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2203.06074/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2203.06074",
|
| 3 |
+
"month": "2022_03",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ECCV",
|
| 6 |
+
"title": "TAPE: Task-Agnostic Prior Embedding for Image Restoration",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2203.06074",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.06074",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/tex_files_extracted/2203.06074",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.06074/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.06074/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.06074/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.06074/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2203.08195/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2203.08195",
|
| 3 |
+
"month": "2022_03",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "DeepFusion: Lidar-Camera Deep Fusion for Multi-Modal 3D Object Detection",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2203.08195",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.08195",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/tex_files_extracted/2203.08195",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.08195/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.08195/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_03/main_diagram_database/2203.08195/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2203.08195/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2203.08734/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-02-24T13:43:30.072Z" agent="5.0 (Windows)" version="16.6.3" etag="YyiTKry81gL27h3gXkmH"><diagram id="VqyGpv_fJaaKGblq52SX">7V1bk5s2GP01+xiN7hKP2SRtX9pmJg9tHqlNvEywcTG76+2vrzBgJAFeGcOufKmbGRBCCJ3z6bsJ7R35tNz+moXrh9/TeZTcYTjf3pHPdxgjivFd8T+cv5QlglUFiyyeV5Wagm/xf1FVCKvSx3gebYyKeZomebw2C2fpahXNcqMszLL02az2I03Mp67DRfVE2BR8m4VJ1Kr2VzzPH8pSiUVT/lsULx7qJyMelFeWYV25anjzEM7TZ62IfLkjn7I0zcuj5fZTlBSDV49Led8vPVf3HcuiVe5yQzXuT2HyWL1b1a/8pX5ZdYMaV3Vy//wQ59G3dTgrrjwraFXZQ75M1BlSh+FmXQ72j3gbqfbvf6Sr/JdwGScFzL9FyVOUx7OwulChinB1/ilN0mz3REIlR0HRYtW5KMujbe8Lov2wKb5F6TLKsxdVpbqBcAG4VBAEGDIcyPL+inecYyBIgAikhAqJeXn1ucEUYYA5lDxgnGGk6lbj86DD21MnrFi22HeqAUIdVFh040KuGheqcNnhUY8q8wYYetXAMAWMMaLe4MIuHhcsARYSUabGjXBaTxHeI8MdkJkr9VqdbqLFUg3Jl6boPlrNPxZaW12dJeFmE89MvLL0cTUvgNqdnQxWtI3zv1UZrI6/F8eAVWeft9qlzy/1yUqNyt9NxeL0e9Wh3Ulz2+6svm+TZ+nPqJ8yxcgYhNmkj9ksMpR3HmaLKDfEwIFWGjMYbDOhLsuiJMzjJ7MTXXSonvA1jdWD96xFUM3kDBKCEGbKQJLYYC0mAdDphpj5gPJtqzZ1M6b1GKgUBpLqn3pSQAJqPiYAUkjCIOYCq8cIYj6mHMHWYxTpwhet2rqosOl/WUIY0F4VmZ3gXAL9VcnBNzrcFhO0vy11UPa8Ec897k4SK85EYuHuP11i91L6XRfSVyRWl1dNfKeQWNaWWOKZxCr1z2igyCoFRQIj0wBA2FIzloJwFlhMgKQIsoAroaQBRMZjBAI1swvTXL6DuHYr1GHiShkDRFesbFR5lQ7yek4C+YYqlN4EcjqBHEDl4EblY6is85Z6xluibBTceCvStobM0MtAw09az+CWcCja6nYSnYq3dWTy/GwmpLPa1ctBd29nM3V4Odc5RQcUaExH0OQ6luZDHLl+rNFEkaknLKE+xsU52BTl1vvQUU0mhLxVNIME8p2dGN90D+MAK/oFUCgNIbgVdUAEA9k2x4+3mRAGlblfCabxGInM5IIl9iMqH9wiM9mSFp/zaJubDDZpsEpXhR76ESeJVRQm8WJViIFCN1Ll90UkU7E7+VhdWMbzedIXWW3kBLrKCWzLyZixVBxAc96CamotSzR+9szqOl0JBPyAXTwkXIraqZ8bmIfAJAwCEVABUSC5UIhZhiZkXa7L++PcziRtfsbrG9D9QAdWLNnAmTEM9NkW+YJzOzN1k+eDMGMIihA9g0FAiKiVcw0zVR5fA5Bo57neCeZ2muuCYd7Vq94KjoC5MtbAPnRa2FKmb8MgAnvBLsw26gvq7VQJ2qJLRX0M4aYB0EBS0nwmQLdj7PGqhfN5rzBQkqf0q/azDCnaNpExAhJpP9wGo6fKSWC0o8RdUnfRaDDmCxq4Hfrs0nwXjQaH0Bc02oGt9DG/LjQo90Y23CIzZ44GtNAwLXaPZMNlVa1z0NfZpDo0/LPH7GkfQn7LVGP54gfjvLXNowd6mwXxnkR6lUMIDO6ZngMn0GzRNbaLAwgCqP1M7RtgYAWNxwvnOq0xvm6aekM/VGQa9GmqZ+47OrVwuF0mxIT8c1lLfd38M6bJwHdOmtRB01CSyikpeVtEPoEux54R9xVdzuTAPO0rupxOSdx2gPDqHACPQhXtKN71oYG8ccfaYbzODOiZwyEpEFj/eSocxGUJ43Wbfd5oSoQ4QFLTaJbXO9jtONzupG4HcVmS56WNN2zF3tvYeP45J9icDi2HmRMweDkeJoAT7WfF6O1Q0IjUxedK3UGTKQKQUp28EBAkXiHw7uxrlMVqXItMtiurawYbrPZuPhYMINHoY9H+9BPDod4LJIBprMaWP6/8bhrYMjPSam8UMFOkLJ9f1GHQsdZlE5cQva+SxPAAw8SSIzUAA+SoaHuIbHVpDO9kCwugKww72RfIwRpD3aob0cgMDOzTiGMLFRKm8W7t5AClKVRv3sAEYj1qSuPsPrdwEUXRFkXvIsuEWm6nyTsx0OsgwpRDy0MmE+baiEuuw099MwJvOR/TajO0iPSMunZUzHI7EAJDP0EVHOgWEhGWULi6zMeqEYEPxZW0N+rt+MEGGB5ZBbikcBpRqta/9saWzNCQJqFplj+ki3QVJrqQ9lDWG3oSbFo1koqBc6nVkKhV79ibZ6DuDvf2C3X3azR6uWxac7X0opbTKNs5L1eC2U0pICea4Cjp63Rv30hf30ajWddeK2pmI9uPOxrwpFjoP4+f1OGiOFT48XBZJCpW/2zWuzrF1hpOZa73dtXrK9v175/sUPdcy6bo8ojdm+LVbt07pcu37l1o9/bcU9OnNveZM2NzaKnlM/o66lBAoVIMJ30dVW9LUeuv2hXWt/Gs0xjmF0/9FoFz/rdr76US1uKVDcz4v49pfeHDZjc4HwuaovW2uVjT4I+02Ky7YUjZWk2R8yUDbJNh76Vnaa7stXRleMynfSIpzT0LBBRtbgQTcYN2rQ24ceNkbigUR2IHtNhRx+LehB1dmXsXS/jY8y7D5+g2VD8GdGXa23qU6hhD5mgXOrY0Rm/8JYHjKJwwfq8j8S5D6hEsp7bxZm16CqUnMAxpw9Hd8Bg9t5lhkpoj4H4dc90ZC8glzXUXBMO1Q3lr4x2htENu3T7zK19O9Pq0I7im0oppkfYOfPvtk42/RVMHHU5yTXHbNWX3ux0xjh7yYsOf46e5YmMaF425+xDp6OYVkMffVH5l82q1XeyEfb7UMMwI1K63dK2pXbdgBF3YREGX4zazuLKEt7UAWzI5NN9ttSQomyjdbe8ezF5bxcn7ejZWsrteaXujWOe6fEpagNWbmB+9xt9qq8DSbmskmmHW2+3e3rHe3o1Gta51mGMlGNTFP9dRtgu6X1uyYQwtZyee3jK10LVq8JZ48ifx9MFmx4SJJ3Xa/GXkcpJp/r40+fI/</diagram></mxfile>
|
2203.08734/main_diagram/main_diagram.pdf
ADDED
|
Binary file (20.1 kB). View file
|
|
|
2203.08734/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
<figure id="fig:teaser" data-latex-placement="t">
|
| 4 |
+
<table>
|
| 5 |
+
<tbody>
|
| 6 |
+
<tr>
|
| 7 |
+
<td style="text-align: center;"><embed src="img/teaser_arch.pdf" style="height:2.7cm" /> <embed src="img/teaser.pdf" style="height:2.7cm" /></td>
|
| 8 |
+
</tr>
|
| 9 |
+
</tbody>
|
| 10 |
+
</table>
|
| 11 |
+
<figcaption> (<strong>left</strong>) Our search method generates architectures from points in an architecture representation space that is iteratively optimized. (<strong>right</strong>) The architecture representation space is biased towards better-performing architectures with each search iteration. After only <span class="math inline">48</span> evaluated architectures, our generator produces state-of-the-art performing architectures on NAS-Bench-101. </figcaption>
|
| 12 |
+
</figure>
|
| 13 |
+
|
| 14 |
+
The first image classification network [@2012AlexNet] applied to the large-scale visual recognition challenge ImageNet [@2009ImageNet] achieved unprecedented results. Since then, the main driver of improvement on this challenge are new architecture designs [@2014VGG; @2015GoogleNet], [@2016Inception; @2016ResNet] that, ultimately, lead to architectures surpassing human performance [@2015ReLU]. Since manual architecture design requires good intuition and a huge amount of trial-and-error, the automated approach of neural architecture search (NAS) receives growing interest [@2017EvolutionaryNAS; @2018LearningNAS; @2019NB101; @2020NB201; @2020NBNLP; @2021HWNNB]. Well-performing architectures can be found by applying common search practices like random search [@2012RandomNAS], evolutionary search [@2017EvolutionaryNAS; @2019EvolutionaryNAS], Bayesian optimization (BO) [@2018BONAS; @2020BONAS; @2021BANANAS], or local search [@2020LocalSearchNAS] on discrete architecture search spaces, such as NAS-Bench-101, NAS-Bench-201, DARTS and NAS-Bench-NLP [@2019NB101; @2020NB201; @2018DARTS; @2020NBNLP]. However, these methods are inefficient because they require to evaluate thousands of architectures, resulting in impracticable search times. Recent approaches avoid this problem of immense computation costs by either training surrogate models to approximate the performance of an architecture [@2018DARTS; @2019ProxyNAS] or by generating architectures based on learned architecture representation spaces [@2019VAENAS; @2021SVGe]. Both methods aim to improve the query efficiency, which is crucial in NAS, since every query implies a full training and evaluation of the neural architecture on the underlying target dataset.
|
| 15 |
+
|
| 16 |
+
This trade-off between query efficiency and resulting high-scoring architectures is an active research field. Yet, no attempts were made so far to leverage the advantages of both search paradigms. Therefore, we propose a model that incorporates the focus of promising architectures already in the architecture generation process by optimizing the latent space *directly*: We let the generator learn in which areas of the data distribution to look for promising architectures. This way, we reduce the query amount even further, resulting in a query efficient and very effective NAS method. Our proposed method is inspired by a latent space optimization (LSO) technique [@2020Reweighting], originally used in the context of variational autoencoders [@2014VAE] to optimize generated images or arithmetic expressions using BO. We adapt this concept to NAS and pair it with an architecture performance predictor in an end-to-end learning setting, so that it allows us to iteratively reshape the architecture representation space. Thereby, we promote desired properties of generated architectures in a highly query-efficient way, i.e. by learning expert generators for promising architectures. Since we couple the generation process with a surrogate model to predict desired properties such as high accuracy or low latency of generated architectures, there is no need in our method for BO in the generated latent space, making our method even more efficient.
|
| 17 |
+
|
| 18 |
+
In practice, we pretrain, on a target space of neural architectures, a GNN-based generator network, which does not rely on any architecture evaluation and is therefore fast and query-free. The generator is trained in a novel generative setting that directly compares generated architectures to randomly sampled architectures using a reconstruction loss without the need of a discriminator network as in generative adversarial networks (GANs) [@2014GAN] or an encoder as in variational autoencoders (VAEs) [@2014VAE]. We use an MLP as a surrogate to rank performances and hardware properties of generated architectures. In contrast, previous generative methods either rely on training and evaluating supernets [@2021SGNAS], which are expensive to train and dataset specific, or pretrain a latent space and search within this space directly using BO [@2019VAENAS; @2020Arch2vec; @2021SVGe], reinforcement learning (RL) [@2021GANAS] or gradient based methods [@2018NAO]. These methods incorporate either GANs, which can be hard to train or VAEs, which are biased by the regularization, whereas our plain generative model is easy to train. In addition we enable backpropagation from the performance predictor to the generator. Thereby, the generator can efficiently learn which part of the architecture search space is promising with only few evaluated architectures.
|
| 19 |
+
|
| 20 |
+
By extensive experiments on common NAS benchmarks [@2019NB101; @2020NB201; @2020NB301; @2020NBNLP; @2021HWNNB] as well as ImageNet [@2009ImageNet], we show that our method is effective and sample-efficient. It reinforces the generator network to produce architectures with improving validation accuracy (see [1](#fig:teaser){reference-type="ref+label" reference="fig:teaser"}), as well as in improving on hardware-dependent latency constraints (see [4](#fig:feasibility){reference-type="ref+label" reference="fig:feasibility"}) while keeping the number of architecture evaluations small. In summary, we make the following contributions:
|
| 21 |
+
|
| 22 |
+
- We propose a simple model that learns to focus on promising regions of the architecture space. It can thus learn to generate high-scoring architectures from only a few queries.
|
| 23 |
+
|
| 24 |
+
- We learn architecture representation spaces via a *novel generative design* that is able to generate architectures stochastically while being trained with a simple reconstruction loss. Unlike VAEs [@2014VAE] or GANs [@2014GAN], no encoder network nor discriminator network is necessary.
|
| 25 |
+
|
| 26 |
+
- Our model allows sample-efficient search and achieves state-of-the-art results on several NAS benchmarks as well as on ImageNet. It allows joint optimization w.r.t. hardware properties in a straightforward way.
|
| 27 |
+
|
| 28 |
+
# Method
|
| 29 |
+
|
| 30 |
+
Intrinsically, Neural Architecture Search (NAS) is a discrete optimization problem seeking the optimal configuration of operations (such as convolutions, poolings and skip connections) in a constrained *search space* of computational graphs. To enable benchmarking within the NAS community, different search spaces have been proposed. The tabular benchmarks NAS-Bench-101 [@2019NB101] and NAS-Bench-201 [@2020NB201] provide both an exhaustive covering of metrics and performances. NAS-bench-NLP [@2020NBNLP] provides a search space for natural language processing. In addition to these tabular benchmarks NAS-Bench-301 [@2020NB301] provides a surrogate benchmark, which allows for fast evaluation of NAS methods on the DARTS [@2018DARTS] search space by querying the validation accuracy. NAS-Bench-x11 [@2021NBX11] is another surrogate benchmark. It outputs full training information for each architecture in all four mentioned benchmarks. NAS-Bench-Suite [@NBSuite] facilitates reproducible search on these NAS benchmarks.
|
| 31 |
+
|
| 32 |
+
Early NAS approaches are based on discrete encodings of search spaces, such as in the form of adjacency matrices, and can be distinguished by their *search strategy*. Examples are random search [@2012RandomNAS; @2019RS], reinforcement learning (RL) [@2017ReinforcementNAS; @2018ReinforcementNAS], evolutionary methods [@2017EvolutionaryNAS; @2019EvolutionaryNAS], local search [@2020LocalSearchNAS], and Bayesian optimization (BO) [@2018BONAS; @2020BONAS]. Recent NAS methods shift from discrete optimization to faster weight-sharing approaches, resulting in differentiable optimization methods [@2018ParameterSharingNAS; @2018DARTS; @2018ONESHOT; @2019ProxyNAS; @2019SNAS; @2020RobustDarts]. Several approaches map the discrete search space into a continuous architecture representation space [@2018NAO; @2019VAENAS; @2020Arch2vec; @2021SVGe] and search or optimize within this space using for example BO (e.g. [@2020Arch2vec]) or gradient-based point operation [@2018NAO]. In this paper, we also learn continuous architecture representation spaces. However, in contrast to former works, we propose to optimize the representation space, instead of performing point optimization within a fixed space such as e.g. [@2018NAO]. A survey of different strategies can be found in [@2019NASSurvey].
|
| 33 |
+
|
| 34 |
+
All NAS approaches are dependent on *performance estimation* of intermediate architectures. To avoid the computation heavy training and evaluation of queries on the target dataset, methods to approximate the performance have been explored [@2021HowPP]. Common approaches include neural predictors that take path encodings [@2021BANANAS] or graph embeddings learned by GNNs [@2019NASPredictor; @2020NP] as input. WeakNAS [@2021WeakNAS] proposes to progressively evaluate the search space towards finding high-performing architectures using a set of weak predictors. In our method, we integrate a weak expert predictor with a generator to yield an efficient interplay between predicting for high-performing architectures and generating them.
|
| 35 |
+
|
| 36 |
+
*Graph Generative Models* Most graph generation models in NAS employ variational autoencoders (VAE) [@2014VAE]. [@2018NAO] uses an LSTM-based VAE, coupled with performance prediction for gradient-based architecture optimization. Note that [@2018NAO] optimizes the latent point in a fixed latent space while our approach optimizes the latent space itself. [@2019VAENAS] use GNNs with asynchronous message-passing to train a VAE for BO. [@2021SGNAS] combines a generator with a supernet and searches for neural architectures for different device information. [@2020Arch2vec] facilitates [@2019GIN] with an MLP decoder. [@2021SVGe] proposes smooth variational graph embeddings (SVGe) using two-sided GNNs to capture the information flow within a neural architecture.
|
| 37 |
+
|
| 38 |
+
Our proposed model's generator is inspired by SVGe with the aim to inherit its flexible applicability to various search spaces. Yet, similar to [@2020Arch2vec], due to the intrinsic discretization and training setting, SVGe does not allow for backpropagation. Recently, [@2021GANAS] facilitates GNNs in a GAN [@2014GAN] setting, where the backpropagation issue is circumvented using reinforcement learning. In contrast, our proposed GNN generator circumvents the intermediate architecture discretization and can therefore be trained by a single reconstruction loss using backpropagation. Its iterative optimization is inspired by [@2020Reweighting], who proposes to use a VAE with weighted retraining w.r.t. a target function to adapt the latent space for the optimization of images and arithmetic functions using BO. Our model transfers the idea of weighted retraining to NAS. It uses our plain generator and improves sample efficiency by employing a differentiable surrogate model on the target function such that, in contrast to [@2020Reweighting], no further black-box optimization step is needed. Next, we describe the proposed generator network.
|
| 39 |
+
|
| 40 |
+
<figure id="fig:generator-training" data-latex-placement="t!">
|
| 41 |
+
<embed src="img/generator-training.pdf" style="height:3cm" />
|
| 42 |
+
<figcaption>Representation of the training procedure for our generator in AG-Net. The input is a randomly sampled latent vector <span class="math inline"><strong>z</strong> ∈ ℝ<sup><em>d</em></sup></span>. First, the input node is generated, initialized and input to a GNN to generate a partial graph representation. The learning process iteratively generates node scores and edge scores using <span class="math inline"><strong>z</strong></span> and the partial graph representation until the output node is generated. The target for this generated graph is a randomly sampled architecture. </figcaption>
|
| 43 |
+
</figure>
|
| 44 |
+
|
| 45 |
+
*Preliminaries* We aim to generate neural networks represented as directed acyclic graphs (DAG). This representation is in line with the cell-based architecture search spaces commonly used as tabular benchmarks [@2019NB101; @2020NB201]. Each cell is a DAG $G=(V,E)$, with nodes $v \in V$ and edges $e \in E$. The graph representations differ between the various benchmarks in terms of their labeling of operations. For example in NAS-Bench-101 [@2019NB101] each node is associated with an operation, whereas in NAS-Bench-201 [@2020NB201] each edge is associated with an operation.
|
| 46 |
+
|
| 47 |
+
**Generative Network** Commonly used graph generative networks are based on variational autoencoders (VAE) [@2014VAE]. In contrast, our proposed network is a *purely generative* network, $p_G$ (see [2](#fig:generator-training){reference-type="ref+label" reference="fig:generator-training"}). To generate valid graphs, we build our model similar to the graph decoder from the VAE approach SVGe [@2021SVGe]. The generator takes a randomly sampled variable $\textbf{z} \sim \mathcal{N}(0,1)$ as input and reconstructs a randomly sampled graph from the cell-based search space. The model iteratively builds the graph: it starts with generating the input node $v_\textrm{0}$, followed by adding subsequent nodes $v_\textrm{i}$ and their labels and connecting them with edges $e_{(j,i)}, j<i$, until the end node $v_\textrm{T}$ with the label *output* is generated. Additionally, we want to learn a surrogate for performance prediction on the generated data and allow for end-to-end training of both. To allow for backpropagation, we need to adapt several details of the generator model. We initialize the node-attributes for each node by one-hot encoded vectors, which are initialized during training using a 2-layer MLP to replace the learnable look-up table proposed in SVGe. The output of our generator is a vector graph representation consisting of a concatenation of generated node scores and edge scores. It is important to note that the iterative generation process is independent of the ground truth data, which are only used as a target for the reconstruction loss. Note that the end-to-end trainability of the proposed generator is a prerequisite for our model: It allows to pair the generator with a learnable performance predictor such that information on the expected architectures' accuracy can be learned by the generator. This enables a stronger coupling with the predictor's target for the generation process and higher query efficiency (see [4.4](#sec:ablation_studies){reference-type="ref+label" reference="sec:ablation_studies"}). In contrast, previous models such as [@2021SGNAS; @2021SVGe; @2020Arch2vec] are not fully differentiable and do not allow such optimization. Our generative model is pretrained on the task of reconstructing neural architectures, where for each randomly drawn latent space sample, we evaluate the reconstruction loss to a randomly drawn architecture. This simple procedure is facilitated by the heavily constrained search spaces of neural architectures, making it easy for the model to learn to generate valid architectures without being supported by a discriminator model as in generative adversarial networks (GANs) [@2014GAN]. An evaluation of the generation ability of our model and implementation details are provided in the supp. mat. [\[supp:sec_generator_ability\]](#supp:sec_generator_ability){reference-type="ref+label" reference="supp:sec_generator_ability"}.
|
| 48 |
+
|
| 49 |
+
**Performance Predictor** This generative model is coupled with a simple surrogate model, a 4-layer MLP with ReLU non-linearities, for target predictions $C$. These targets can be validation or test accuracy of the generated graph, or the latency with respect to a certain hardware. For comparison, we also include a tree-based method, XGBoost (XGB) [@XGB] as an alternative prediction model. XGB[@XGB] is used as a surrogate model in NAS-Bench-301 [@2020NB301] and shows high prediction abilities. The input to XGB is the vector representation of the architectures. Since this method is non-differentiable, we additionally include a gradient estimation for rank-based metrics [@RankbasedGradients]. This way, we are able to include gradient information to the generator. Yet, it is important to note, that this approach is not fully differentiable. This comparison will allow us to measure the trade-off between using supposedly stronger predictors over the capability to allow for full end-to-end learning.
|
| 50 |
+
|
| 51 |
+
**Training Objectives** The generative model $p_G$ learns to reconstruct a randomly sampled architecture $G$ from search space $p_D$ given a randomly sampled latent vector $\textbf{z} \sim \mathcal{N}(0,1)$. The objective function for this generation process can be formulated as the sum of node-level loss ${\mathcal{L}}_V$ and edge-level loss ${\mathcal{L}}_E$: $$\begin{equation}
|
| 52 |
+
\label{eq:generator_loss}
|
| 53 |
+
{\mathcal{L}}_G(\tilde{G},G) = {\mathcal{L}}_{V} + {\mathcal{L}}_E;
|
| 54 |
+
\;\tilde{G}\sim p_G(\textbf{z});\;G\sim p_D, \
|
| 55 |
+
\end{equation}$$ where ${\mathcal{L}}_V$ is the Cross-Entropy loss between the predicted and the ground truth nodes and ${\mathcal{L}}_E$ is the Binary-Cross Entropy loss between the predicted and ground truth edges of the generated graph $\tilde{G}$. This training step is *completely unsupervised*. [2](#fig:generator-training){reference-type="ref+label" reference="fig:generator-training"} presents an overview of the training process. To include the training of the surrogate model, the objective function is reformulated to: $$\begin{equation}
|
| 56 |
+
\label{eq:search_loss}
|
| 57 |
+
{\mathcal{L}}(\tilde{G},G) = (1-\alpha){\mathcal{L}}_G(\tilde{G}, G) + \alpha {\mathcal{L}_C}(\tilde{G},G),
|
| 58 |
+
\end{equation}$$ where $\alpha$ is a hyperparameter to trade-off generator loss $\mathcal{L}_G$ and prediction loss $\mathcal{L}_C$ for the prediction targets $C$ of graph $G$. We set the predictor loss as an MSE. Furthermore, each loss is optimized using mini-batch gradient descent.
|
| 59 |
+
|
| 60 |
+
**Generative Latent Space Optimization (LSO)** To facilitate the generation process, we optimize the architecture representation space via weighted retraining [@2020Reweighting], resulting in a sample efficient search algorithm. The intuition of this approach is to place more probability mass on high-scoring latent points, (e.g. high performing or low latency architectures) and less mass on low-scoring points. Thus, this strategy does not discard low-scoring architectures completely, which would be inadequate for proper learning. The generative model is therefore trained on a data distribution that systematically increases the probability of high-scoring latent points. This can be done by simply assigning a weight $w_i$ to each data point $G_i \sim p_D$ , indicating its likelihood to occur during batch-wise training. In addition, the training objective is weighted via a weighted empirical mean $\sum_{G_i \sim p_D} w_i~\mathcal{L}$ for each data point. As for the weights itself, [@2020Reweighting] proposed a rank-based weight function $$\begin{align}
|
| 61 |
+
\label{eq:weights}
|
| 62 |
+
\begin{split}
|
| 63 |
+
&w(G;p_D,k) \propto \frac{1}{kN + \textrm{rank}_{f,p_D}(G)}\\
|
| 64 |
+
&\textrm{rank}_{f,p_D}(x) = \vert\{G_i : f(G_i) > f(G), G_i \sim p_D\} \vert,
|
| 65 |
+
\end{split}
|
| 66 |
+
\end{align}$$ where $f(\cdot)$ is the evaluation function of the architecture $G_i$; for NAS-Bench-101 [@2019NB101] and NAS-Bench-201 [@2020NB201] it is the tabular benchmark entry, for NAS-Bench-301 [@2020NB301] and NAS-Bench-NLP [@2020NBNLP] it is the surrogate benchmark prediction. Similar to [@2020Reweighting], we set $k = 10e-3$. The retraining procedure itself then consists of finetuning the pretrained generative model coupled with the surrogate model, where loss functions and data points are both weighted by $w(G;p_D,k)$.
|
2204.06260/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-09T00:13:08.005Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36" etag="wB5fv1zGdGXibGIwvPBb" version="15.4.3" type="google"><diagram id="GivCoD3KeIWZvgs8L_di" name="Page-1">7V1tc5s4EP41mWlvJh4k8fqxTdJ07qXXa27mmo8YZJspBhdwbPfXn3gzkhDGJkAwdj7ERhLLot19eKRdkht0t9w+BuZq8ZdvY/cGSvb2Bt3fQKgBlfyOG3ZpA9JR2jAPHDttAkXDk/MLZ41S1rp2bBwyAyPfdyNnxTZavudhK2LazCDwN+ywme+yV12Z8+yKUtHwZJkuLg37z7GjRdqqQ61o/4yd+SK/MlCNtGdp5oMzweHCtP0N1YQebtBd4PtR+m25vcNuPHf5vKTnfaro3SsWYC865oTd9O/fp677+enR0u9XK/+P7fOv20zKi+musxvOlI12+Qxgz/4QTyQ5slwzDB3rBn1cREuXNADyNYwC/8d+chBpmTmue+e7fpAIQDPdwpa1H0n1THVFViTSg7dO9J20SRMlO3qOj7Lv91v6YJcdlO8/u5nQXwcWPnDT2T1ie04bOZu1R+wvcRTsyIBNYXAlM+KCsnXeFmDXjJwX1mHMzO/me3H7K3z1HaIxlLIQkeVMThYhKpJYEen9ZGfRBq4RpEBOUGQGcxyVBBHrmjtq2CoeEB5QGHEKS9JJenHjyZdUg/yIskHRlDjzCY4NBY6tulHsnX6iXOHh6s+1n3fchgkCfSADgLHaFp3k2zz7pKXMTIsVssDuC44cy6RPbXqxD5a/DokwMuATNqN1QJAQquZyRfq8abii9CGTlKrEqhmuTK/6+jNz6bi7VIO9YBKXSObuhO0pK/qdUiO9JKvGNOBbSvpyqBPhbdy+WTgRflqls7whTxgWe0zXmXsxMBEMwEEj6KmEkRccECUOAkTWi3gHh0p6TAEI0AUIgpRqsKDC43Tv1xrCekggIuoc7SunfCCIDOuA9FhEVntCZF7hOkTm9eoFkY1XIzIEIpD8gteB6ZaR5guONn7wozkEBf7as7Gd0Y0aJGoBRoDKWkXOj2kYgQK3Vzmvag1GQOv0EJ5D+ANjohnUjy42y6loEMs1ICuL53oVgNBWDIJqWmQ7L2fCiv4l/uQlc5iTnPU074viFU1BRYqOGp5EmpMJaJeNtAEKmjyRNMZpkKGVcMEAE10TEIzOkAFd0byGFA4PzeVLRHOZoG5LAC4PAMCrWdRIAHxc8I10ONFZ90M5n6D8X0MTTekRvuEVvmsMpwwOvvNNhcuCbxVJE10qfkA7UB6LfWMoh/r4ofzjyNi4orP+hwy1jAqSaKevK1RAohXd6FGBX6LrFSukFpboSOVkdQwLqPrRfD6w8IR/rrFHrgCl55pY70Wfi9gpUGTWcWGZZwLUKzZd7IKTwSbOLE2xKVl9Akqu8rY4pV6icVM6WrU13Ni2KR2tFNuzaeXW4zbOExKLBru0yAOhfcNzNj45KAo9kqMdffQVBw65sRgtk0Yu7WibWJ8J046qpePp7JBvpdN5YEKMQfkgnydSmi6CEODWuOC4FdDJ6UlJrHCn6UZZtFy+8qj2edS4WBRSuRWeiEWJ9n06Y1Gy6EF7deQuHHl02xWqyjozEmxi9rokkJtWJh1NLaDBUIuJBPSh04s8mToQfgEN9nGNlIb8Qub5hd4Nv0BQrHAl74EH+Uh5vGG8AX8RbQ+PPFIGEgByWwGgStIkT7XtSwDJRNPbA51EhAKB8A6qFFWgtE9AHhkUimHUnNJNXOS2ODIuPN/DdUExPg/WUEtLRE2qCYWWHFYDByG2djzn4B25nqgckmLib8yrfyO/3s2TRPltFKyjxfub014W6IbH8tGlYN2WRdGlwykihLWTWtr9W1h0/Kki5ouqY+1VzFcR5XVaxa0RVN7zZssL00/HLVgjqOMtVKX1dc457I5DbtYJr2hmvpIgrd8dcKV18n0W5oszVyrFTbkMEzQaWjMuwTSoIh42Ww+PJMGtGXcMNZTf8MYM7NjmF5Hb1tj1DMzfsn6zuhtVxAVHjxAyt66ECkeXjt4a4gVpeq8YoMLLNB9Z5mtVxZSo8YvnRCyN75zYnqmXOoYCqsPwPi5wR5o6AewuGdTKyzVD6vcVJ/Uiq5fUvKD0tQhfEtQ3wl9ouTyYaFwxbFNYj2UNCNbHkAavg/XRJaA1HgRACdf7raZQT1vZd5M+sM1wkbzLdPBlpYFACqjIlp6MJ0CrEdQ1gojW/VfTH0wjqe2YXqsrWezY9LnnVb/v2PFrUs4SJ2fjFfmIKuF8MMCNuA0XYJQJOZCMHoE7V2ggJmR22AZqRKAoE0OutyPU9+uvfkwp2nx5y2j8eAbG5F9IFlpS7pNKaUPIaJ7V85SnUs3Tm3V50vaep+uH6Z8P3x7X28Xshzf/5yW8N8Bt7Z8PaBzAVmrjOHiD+fQduS2ioZR/vE9PE0U51ZEKjXs8P1iaLtW3yYwad8pSmhKXXByRML0NSTw73rx8Zhz0t1lMx31UWKd9jmcnHhd3SrkuSU8UmF44I7JyqUkMxA7nBzZ7xf2JU9P6kZVxcLMB46LAeCKgbGRflGxOeIzrusr6aqd27NRK9q7RQyoWkf29ZWBw2Lu//9cTSZklkkpeD0khrqgOR27hqSWELtGublchUURFERhQqo4Ntq8UHkw3HyGwPkjyIRVxwnTzocJ0iqIlHyAOmLy3JmYkKmwkKnIGAG9XW3ZlSyEEwvFAIFCkYUFg9ZuR17A5n7C5QuB4bNlCkn/AAKj0B4DksPiPGulqt/i3JOjhfw==</diagram></mxfile>
|
2204.06260/main_diagram/main_diagram.pdf
ADDED
|
Binary file (12.8 kB). View file
|
|
|
2204.06260/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In recent years, sequence-to-sequence automatic speech recognition (ASR) models, such as CTC [@kim2017joint], LAS [@chan2016listen], RNN-T [@graves2013speech], and Transformer [@mohamed2019transformers], have achieved impressive performance and dominate the leader-board. However, these models still suffer from mismatches between their training and testing stages.
|
| 4 |
+
|
| 5 |
+
The first mismatch is between the training objective and the testing metric. Most sequence-to-sequence model is typically optimized by the cross-entropy (CE) criteria, which corresponds to maximizing the log-likelihood for each frame [@prabhavalkar2018minimum]. However, during testing, the metric to evaluate a trained model is the task-specific criteria, such as the word error rate (WER), not log-likelihood. This discrepancy might lead to sub-optimal performance in terms of WER. The second mismatch is caused by the widely used teacher-forcing [@williams1989learning] during training, which maximizes the log-likelihood of the current token given a history of the ground truth. The dependence on ground truth leads to exposure bias [@ranzato2015sequence], where the model can see the ground truth history during training. However, the ground truth is unavailable in testing so that the current prediction has to rely on its past predicted tokens. As a result, the incorrect predictions in the early time steps might result in error accumulation [@rennie2017self].
|
| 6 |
+
|
| 7 |
+
There have been some previous works to alleviate the mismatches in the context of sequence-to-sequence models. The sampling-based method according to minimum Bayes risk (MBR) has been successfully applied to CTC model [@shannon2017optimizing] and RNN-T model [@weng2019minimum]. Furthermore, minimum WER (MWER) training has been proposed in attention-based model [@prabhavalkar2018minimum] and RNN-T model [@guo2020efficient], which build MWER loss by sampling methods. However, these works only focus on the word error number of the entire utterance, while ignore the distinction of each token in a sequence.
|
| 8 |
+
|
| 9 |
+
Another alternative approach to address the mismatches is based on Reinforcement Learning (RL) since the sequence generation in ASR can be viewed as a sequential decision process in RL [@tjandra2018sequence]. The main idea of the RL-based method is to maximize the accumulative reward along all the time steps, and the customized reward function can build a direct link between the training objective and testing metric. Different from MWER training, a particular definition of reward function in RL is able to consider the impact of each token prediction on the entire generated sequence. In other words, MWER training is a case of RL that only considers the final reward of whole sequential decisions. In addition, RL-based methods have demonstrated their effectiveness in other sequence generation tasks, such like machine translation [@williams1992simple; @bahdanau2016actor] and image caption [@rennie2017self].
|
| 10 |
+
|
| 11 |
+
In this paper, we present a RL-based optimization method for sequence-to-sequence ASR task called self-critical sequence training (SCST). SCST associates the training loss and WER using WER-related reward function, which considers the intermediate reward at each token generation step. Furthermore, SCST utilizes the test-time beam search algorithm to sample a set of hypotheses for reward normalization. As a result, the high-reward hypotheses that outperform the current test-time system are given positive weights, while the low-reward hypotheses are given negative weights. In conclusion, the proposed SCST optimization method pushes the training procedure much closer to the test phase. The experiments on clean and noisy datasets show that it brings a relative improvement of 8.7% and 7.8% in terms of WER, respectively.
|
| 12 |
+
|
| 13 |
+
In this section, we briefly introduce the general sequence-to-sequence ASR system from a decision-making perspective, and then explain the mismatches in this system.
|
| 14 |
+
|
| 15 |
+
Given a sequence of acoustic features $X = (x_1,x_2,...,x_N)$, the neural network in ASR system is expected to predict a sequence of tokens $Y = (y_1, y_2, ..., y_T)$, which is corresponding to the input acoustic sequence. Instead of directly outputting the tokens, the network usually predicts a possibility distribution of each token: $P_\theta(y_t | y_{t-1}, y_{t-2}, ... , y_0, X)$, where $t\in [0,T]$ refers to the $t$-th prediction. Accordingly, despite different structures of the network, two part of information are considered in $t$-th prediction: 1) The sequence of acoustic features $X$ and 2) previous generated tokens $Y_{t-1} = (y_{t-1},y_{t-2}, ... , y_0)$. Therefore, the sequence generation in ASR can be viewed as a sequential decision process along with $T$ time steps. Given the ground truth sequence $Y^* = (y^*_1, y^*_2, ..., y^*_T)$, most sequence-to-sequence models apply the cross-entropy criteria to calculate loss function: $$\begin{equation}
|
| 16 |
+
\mathcal{L}_{ce} = \sum_{t=1}^T - \log P_\theta(y_t^* |\ y_{t-1}^*, y_{t-2}^*, ... ,y_0^*,X)
|
| 17 |
+
\label{CEloss}
|
| 18 |
+
\end{equation}$$
|
| 19 |
+
|
| 20 |
+
The CE loss in Eq. ([\[CEloss\]](#CEloss){reference-type="ref" reference="CEloss"}) aims to maximize the log-likelihood for each token, while we typically use WER to evaluate the performance of trained model. However, WER is a non-differentiable task metric that can not be directly utilized in training loss. Therefore, the current training strategy suffers from this mismatch between training and testing. Furthermore, according to teacher-forcing algorithm, the prediction of current token relies on ground truth $Y*$. When ground truth is unavailable during testing, the incorrect predictions in an earlier time steps will accumulate errors through subsequent time steps.
|
| 21 |
+
|
| 22 |
+
In this section, we first illustrate how to model ASR task as a reinforcement learning problem. Secondly, the self-critical sequence training (SCST) method is proposed for ASR optimization. Finally, we discuss reward shaping with regards to sequence-to-sequence ASR.
|
| 23 |
+
|
| 24 |
+
We first build the connection between the reinforcement learning formulation and ASR system. Basic reinforcement is modeled as a Markov decision process (MDP) which contains a tuple of ($S$, $A$, $P$, $R$) in successive time steps $t\in[0,T]$. The environment offers a current state $s_t\in S$. The agent takes $s_t$ into account and generates an instant action $a_t\in A$ which interacts with the environment. The $P_t$ denotes the transition probability from $s_t$ to $s_{t+1}$, and the $r_t$ refers the reward which is the feedback signal from environment. In reinforcement learning, the objective of training is to maximize the expected cumulative reward $R$: $$\begin{equation}
|
| 25 |
+
\mathbb{E}_{a_t\sim A} \ R = \mathop{\max}_{a_t\in A} \sum_{t=0}^T r_t
|
| 26 |
+
\label{reward}
|
| 27 |
+
\end{equation}$$
|
| 28 |
+
|
| 29 |
+
<figure id="f1" data-latex-placement="h!">
|
| 30 |
+
<embed src="strcture.pdf" style="width:49.0%" />
|
| 31 |
+
<figcaption>The sequential decision model of ASR. Three temporal time steps are presented from right to left. For each time step, the neural network takes previous prediction <span class="math inline"><em>Y</em><sub><em>t</em> − 1</sub></span> and acoustic features <span class="math inline"><em>X</em></span> as input, generates a current token <span class="math inline"><em>y</em><sub><em>t</em></sub></span>, then updates the hypotheses sequence and calculated the reward <span class="math inline"><em>r</em><sub><em>t</em></sub></span> by comparing with ground truth <span class="math inline"><em>Y</em><sup>*</sup></span>.</figcaption>
|
| 32 |
+
</figure>
|
| 33 |
+
|
| 34 |
+
For the sequence-to-sequence ASR task, it can also be viewed as a sequential decision model as shown in Fig. [1](#f1){reference-type="ref" reference="f1"}. The whole encoder-decoder neural network can be viewed as an agent. In each time step $t$, acoustic feature $x_t$ and previous prediction $Y_{t-1}$ are concatenated as current state $s_t$. The output token is the action $a_t$ that will update the generated hypotheses sequence. After comparing it with ground truth sequence $Y^*$, a reward $r_t$ of this time step is calculated. Therefore, we define the training loss function to be the negative cumulative reward: $$\begin{equation}
|
| 35 |
+
\mathcal{L}_\theta(X,Y^*) = - \mathbb{E}[R(Y,Y^*)] = \sum_Y P(Y|X,\theta) R(Y,Y^*)
|
| 36 |
+
\label{rlloss01}
|
| 37 |
+
\end{equation}$$
|
| 38 |
+
|
| 39 |
+
where $R(Y,Y^*)=\sum_{t=0}^T r_t (Y_t,Y^*)$ refers to the reward of a hypotheses sequence that considers each time step from 0 to $T$. This is the difference between reinforcement learning and MWER training, where the latter only calculates the word error number of the entire hypotheses $Y$.
|
| 40 |
+
|
| 41 |
+
In this part, we introduce a self-critical sequence training (SCST) approach and explain how it optimizes the ASR system using the N-best list.
|
| 42 |
+
|
| 43 |
+
In order to calculate the gradient $\nabla_\theta \mathcal{L}_\theta$ of Eq. ([\[rlloss01\]](#rlloss01){reference-type="ref" reference="rlloss01"}), the REINFORCE algorithms in [@williams1992simple] is employed to compute expected gradient of a non-differentiable reward function as follows: $$\begin{equation}
|
| 44 |
+
\nabla_\theta \mathcal{L}_\theta = - \mathbb{E}_{Y^n\sim P(Y^n|X,\theta)}[R(Y^n,Y^*)\nabla_\theta log P(Y^n|X,\theta)]
|
| 45 |
+
\label{rlloss02}
|
| 46 |
+
\end{equation}$$ where $Y^n$ = ($y_0^n$, $y_1^n$, \... , $y_T^n$) is hypotheses of sequence which is sampled from the current model. Instead of using other sampling method, we directly use N-best list [@bahdanau2016actor] hypotheses computed by Beam search decoding [@sutskever2014sequence] for the input utterance. Therefore, the number of samples is equal to beam size $N$, and we denote the $n$-th hypotheses as $Y^n\in \text{Beam}(X,N) = (Y^1, Y^2, ... , Y^N)$. Furthermore, we introduce the in [@sutton2018reinforcement] to normalize the rewards of the all hypotheses in N-best list: $$\begin{equation}
|
| 47 |
+
\begin{small}
|
| 48 |
+
\nabla_\theta\mathcal{L}_\theta = -\frac{1}{N}\sum_{Y^n\in \text{Beam}}^N \nabla_\theta log P (Y^n|X,\theta) \ [\ R(Y^n,Y^*)-\Bar{R} \ ]
|
| 49 |
+
\label{final_loss01}
|
| 50 |
+
\end{small}
|
| 51 |
+
\end{equation}$$ where $\Bar{R}$ is the , and we define it as the average reward of hypotheses in $\text{Beam}(X,N)$. Subtracting $\Bar{R}$ from $R(Y^n,Y^*)$ does not change the expected gradient, but importantly, it can reduce the variance of the gradient estimate [@rennie2017self]. In order to simplify the calculation, we assume that the probability mass is concentrated on the N-best hypotheses only, thus the SCST loss $\mathcal{L}_{scst}$ could be approximated as: $$\begin{equation}
|
| 52 |
+
\begin{small}
|
| 53 |
+
\mathcal{L}_{scst} \approx -\sum_{Y^n\in \text{Beam}}^N log\hat{P} (Y^n|X,\theta) \ [\ R(Y^n,Y^*)-\Bar{R} \ ]
|
| 54 |
+
\label{final_loss02}
|
| 55 |
+
\end{small}
|
| 56 |
+
\end{equation}$$ where $\hat{P}(Y^n|X,\theta) = \frac{P(Y^n|X,\theta)}{\sum_{Y^n\in \text{Beam}} P(Y^n|X,\theta)}$ represents the re-normalized distribution over the N-best hypotheses, and $R(Y^n,Y^*) = \sum_{t=0}^T r_t$ has a temporal structure along the sequence. Eq. ([\[final_loss02\]](#final_loss02){reference-type="ref" reference="final_loss02"}) shows the central idea of the proposed SCST, which is to baseline the REINFORCE algorithm with the reward obtained by the current model using its inference mode. Accordingly, in an N-best list, the probability of hypotheses with higher reward than the average will be boosted, while the hypotheses which achieves lower reward will be suppressed. Therefore, in order to pursue higher reward, the SCST loss forces the trained model to explore the better WER performance using its inference mode.
|
| 57 |
+
|
| 58 |
+
In practice, the initial parameters of model is trained using CE loss $\mathcal{L}_{ce}$ in E.q ([\[CEloss\]](#CEloss){reference-type="ref" reference="CEloss"}). When $\mathcal{L}_{scst}$ is employed, we retrain the $\mathcal{L}_{ce}$ with a smaller weight $\lambda$. This operation is helpful to stabilize training for the sudden detachment of teacher-forcing.
|
| 59 |
+
|
| 60 |
+
Reward shaping plays a significant role in almost all RL-related tasks. In this part, we discuss the set of reward functions $R(Y^n,Y^*)$ in the sequence-to-sequence ASR task. Since $R(Y^n,Y^*)$ is the medium building connection between training loss and testing metric, it is typically defined based on edit-distance which is directly related to WER.
|
| 61 |
+
|
| 62 |
+
**Reward** . Intuitively, the simplest reward function is to set rewards as negative edit-distance between a hypothesis and the ground truth, which is equal to MWER training. We denote the edit-distance between sequence $a$ and sequence $b$ as $ED(a,b)$, and the reward function for the hypotheses $Y^n$ the is shown as follows: $$\begin{equation}
|
| 63 |
+
R_{\uppercase{\romannumeral1}} (Y^n, Y^*) = -ED \ (Y^n,Y^*)
|
| 64 |
+
\label{reward1}
|
| 65 |
+
\end{equation}$$ where $Y^n$ denotes the $n$-th hypotheses, and $Y^*$ denotes the ground truth sequence.
|
| 66 |
+
|
| 67 |
+
**Reward** . Since reward only focus on the entire utterance of hypotheses, it ignores that the reward has a temporal structure along the sequence of predicted tokens. Therefore, we define the intermediate reward for each hypotheses $Y^n$ at each time step $t$ as $r_t(Y_t^n,Y_{t-1}^n,Y^*)$. Furthermore, we also retain the possibility history of each token $P_t(y_t|X, \theta)$, then multiply it by the reward for each token to calculate the temporal reward: $$\begin{equation}
|
| 68 |
+
\begin{aligned}
|
| 69 |
+
&R_{\uppercase{\romannumeral2}} (Y^n, Y^*)= \sum_{t=0}^T \ r_t(Y_t^n,Y_{t-1}^n,Y^*)*P_t(y_t|X, \theta)
|
| 70 |
+
\\
|
| 71 |
+
&r_t(Y_t^n,Y_{t-1}^n,Y^*) = - [ED \ (Y_t^n,Y^*) - ED \ (Y_{t-1}^n,Y^*)]
|
| 72 |
+
\label{reward2}
|
| 73 |
+
\end{aligned}
|
| 74 |
+
\end{equation}$$ where $P_t(y_t|X, \theta)$ is the probability to predict the token $y_t$ at time step $t$. The $r_t(Y_t^n,Y_{t-1}^n,Y^*)$ is calculated to indicate whether the current new sequence $Y_t^n$ reduces the edit distance compared to previous sequence $Y_{t-1}^n$.
|
| 75 |
+
|
| 76 |
+
We notice that the reward may end up with a special case that the higher-reward hypotheses have more error words than lower-reward hypotheses. However, since the hypotheses in one N-best list are usually similar to each other, this case occurs with an extremely low probability, which means the higher reward can be approximated as lower WER.
|
2205.00320/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-04-07T19:58:42.434Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36" etag="u_nN8jfDrHp9ZTOGt2gM" version="17.4.1" type="google"><diagram name="Page-1" id="edf60f1a-56cd-e834-aa8a-f176f3a09ee4">5Vpdd6M2EP01Pts+4MOHwc6jHSdOe7KbbZKeNE97ZKMAu4AIiNjeX1/JSAYJgsmCcZq+JGgkjYzmzp3RiIFxHmwWMYjcz8iG/kBX7c3AmA90XdONEflHJdtMMtaYwIk9mw3KBXfeT8iEKpOmng0TYSBGyMdeJApXKAzhCgsyEMdoLQ57Qr64agQcWBLcrYBflj54NnaZVFPVvOMKeo7Llp6YrGMJVj+cGKUhWy9EIcx6AsDVsKGJC2y0LoiMi4FxHiOEs6dgcw59uq18x7J5l6/07n9yDEPcZELob/+eo5tQXaiKZa3xw+f5tTLOtLwAP2VbMdAtn+ibLcmDQx+44AmRdchr4C3bNes5RbxDSXY2nZIBmhFt8k6u5R5tvBXpPUdxlCZcJ/m1mVpxKSIuLK8Lq+rkrQgsSGO2dj0M7yKwoj1rgkwic3Hgk5a2n1ncILZnLzDGcFMQsQ1bQBRAHG/JENarW2wKw7U2Zu11ASXcwG4RIEwGGDCdvercQuSBGekNBuMeI1hM3qHQnlKnIK2VD5KEbLywMTu8QrqKSlpw4+F/6PPQZK1HNo4+zzeFYfMtb4TkXQqTaPOx2JdP27X4vFdNkqA0XsHDQMUgdiCu2x82ENqCx5ctXLCgWWFALouhD7D3IvJElVXZCl+Rt3MTBiCDEyEDkG5KKrIXZ7OK3ntAkaFKirKdKSnagWz/2i1wZ5VwN59+vS9jj+z7NViS+CBADvieE1I8EsPDmAioD3qEgKesI/Bsm+qYxZBQCVju9FHIRPSNdu9ozgbmnPENCyGaXgmqes+RvX8fVtiqAj9XsYKiDg1NE5lB7wQuilGllM9HT08JbGvd0S1+UL5b43j5jM0/Ly+/WP5M4eCSDHnHmijGLnJQCPyLXCrRSD7mGqGImf07xHjLbAVSjERQtGIeTeCdnIY6Zx7NaEg9xqQh9bwNJITKwbYwgDnEqxgyeezhTMFi2GvMIo83NVWCWPYLuqUToy7x+NUk4wsZRPMMZY4C4IVk3NQGESYY5bpjPvQahE5Ks0JdZelsg9QjR7zWS/ph8mgmk/6h9MPqIP2oJgrzP0cUam9E0TRH6Z4o2qWU/R4CKpxTrfHGN58P+nZS+YzwDpx0fFInLbho7rDVTtrC2c6OEmzLebcuRseRLlmtu7y72paTU9pS+1C2lDOd3m15VkG0JzBu74d3/uKnSqFLSBiZEhLOjoaE2g2pCLntImwpcN9C4O/Croe3X2MURDgpJ8PVcTWTeVxwhwY6eVv1j0/B7vcBcoAPHWpVRP+4XviDyl34iaygPqW+PxwOyWQQ0JgbLpNIXMmrWF0M9pJrJC6I6GMUoxVMksPBPS/O3qTY90LYXdA3LPGkbnAAFYP+uM+gf9oT/BviRP/0ozdNzJvWDjvPy8dXgWvdzoLnxc/FQr2ffvv2+JfCQdmCI/QqjrgiaCxzACeJpt7YQ6GvbPE65HdS19Mt6xh1Pal4q8iJxxEre6dJMX4hf3wfvFC5ie+LF/Qj8cI1Wn8kWuDI74YXJvpEDPid8IIi3SLsC/GdEkMt+I9f9bmJIK32PEBa2b2HG9yquuOiYJk2SP66SPKk8+LELOd4o6rL39HrOGhVqePHo+PbbA6XHkiy/P7iOSUYR9SGv8Hnof57G/u9qyT+TPTp0cQqG1ituRzu3sBNKgQf8HafA/tghj7SGobifq73Tak6aBiWqKLp9b45li7hJpKiIxckRh0cNpoVJPYVAT/nmd0dAIsMWefBskTTYgFVasxslGaxvuwgfYQRkWbMcUUcqbog6IJm6vLZGmPzL/S4ObVay7a9rx1UfBf2Zov3+V2YfDHb63dhlSatygx6DxyNAkCrE1ZPtD4WPbbkic1pXb7A7/fGoZxOvN+Ptmph/b/6Zos088+Cs+H5Z9fGxb8=</diagram></mxfile>
|
2205.00320/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.4 kB). View file
|
|
|
2205.00320/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Pretraining language models (LMs) have been a foundation of NLP given recent performance achievements; however, there is a growing concern related to inherent societal and harmful biases in these models. Due to historical biases embedded in training corpora, it is unavoidable for the language models to absorb, reproduce, and even amplify such undesired biases [@Schick2020Self-DiagnosisNLP].
|
| 4 |
+
|
| 5 |
+
@Gehman2020REALTOXICITYPROMPTS:Models showed that pretrained LMs generate toxic text even when conditioned on innocuous prompts. One of their proposed debiased techniques is Domain-Adaptive Pretraining @Gururangan2020DontTasks, or DAPT, on a non-toxic corpus. @Schick2020Self-DiagnosisNLP proposed a self-debiasing approach that uses only a handful of templates that contain the definition of undesired attributes. DAPT is a data-based approach where internal weights are updated with an additional phase of pretraining. On the other hand, self-debiasing is a decoding-based approach that does not require additional resources. The difference between the two debiasing paradigms is a trade-off between the computational cost and the quality of debiasing.
|
| 6 |
+
|
| 7 |
+
In this study, we propose to ensemble the data- and decoding-based approaches by using a toxic corpus as a detoxifying strategy. Our study attempts to invalidate the belief that only non-toxic corpora can reduce the toxicity of language generation. We use GPT-2 [@Radford2018LanguageLearners] as our primary language model and OpenWebText (OWTC; [@Gokaslan2019OpenWeb]), a large corpus of English webtext, as our training corpus. We measure the toxicity of each document using PerspectiveAPI[^1] and collect non-toxic and toxic corpora that satisfy our toxicity requirements.
|
| 8 |
+
|
| 9 |
+
Our results demonstrate that using the toxic corpus indeed reduces the toxicity level of text generated from pretrained language models, which can be further improved by ensemble with the non-toxic corpus.
|
2205.05861/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2205.05861",
|
| 3 |
+
"month": "2022_05",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "Learning Multi-View Camera Relocalization With Graph Neural Networks",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2205.05861",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.05861",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/tex_files_extracted/2205.05861",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.05861/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.05861/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.05861/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.05861/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2205.07177/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-04-28T07:34:52.730Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" version="15.5.8" etag="jbYtuVD6itsY2WVLiRxu"><diagram id="CYG8-R96zUILAOIgP0km">7V1td+K4kv41OWf3Q3z0/vKx0z1zZ8/O7PRu37Nz70canIQZAlkg3Z3761cyNtiWbAkjCwPOzOkEYySj56lSqVRVusMfX378bT15ff5tNUsXdwjMftzhT3cIQSig+qWvvO+uMCp3F57W81l+0+HCl/m/0vwiyK++zWfppnLjdrVabOev1YvT1XKZTreVa5P1evW9etvjalHt9XXylPcIDhe+TCeL1Ljtj/ls+7y7KhA/XP8lnT89Fz1Dln+/l0lxc97w5nkyW30vXcI/3eGP69Vqu/vr5cfHdKEHrxiX3ed+bnh3/2DrdLn1+QDafeDbZPGWf7f8ubbvxZdNl7MPeszUq+listnMp3f44Xn7slAXoPrzcbXc/jx5mS80mH+fvyhgEPiv9Lv6939WL5NlfkuOIsTqtXq49fs/1CuQQLm/8E914R4kAMLiyifNGLB/9V5+9Tldq7626Tq/uHvudPZUxWg7WT+l+VAgQszxgftRV3RNV6rJ9bu6peAqSQAACCNKBcSSFi3kxIWC50B+P/CA5peeSxQork1y5j3te9p3/nk1V8+075lAmUAgDz+VjhHCsNriZvW2nqZ5I2XEXe0yR8O7ETQaVpyYvJdue9U3bNq/D+C1vgArN6n+2LVavCohcriUMdjOZnztbL4I4hLRD3FVu0AywcDuB9baJSCRHOx/cD8s1g/RM4vJyOLzs5gy3guLdbuaxbSh3YzFvVA365k1f6PALKYjiwfAYtmPLtbtnonFuud4LGZuFq9Xb8tZOsvJ8v15vk2/vE6m+t3vavlTpfTe3tf3PmnS539v/kq30+fixXa9+mu/tOgoCo/zxeJ39SDzrf4MBCC/9nG1WK2zR8ef6E/iE9l3WHrnQf4EPtA9+b+l6236oyP9sUDGnAkYSwAyWI+YhfaQ4kSKZuqXwG7Fkg8AS2SC8Jj96CbWk9lcjWnx3nK1TOvQ/5IuvqXb+XTiBFz3PZtsnrNvA20Y5/ITBGMIYCJEHWMLwBgmEJkYY4pOB1gMBeBThdXArkaZmQCAYxumHygABOQd2EgWQp4JTzgtqWDsxh1ji2AHAV0OAHQLRGq00XTajQwGqjP2lVHWoNMDIMoBaZtUIeA2ZS2JCSkiKIiuLjx9/cBagbKMcYCxRFjNbIwCijnDhABeNfaQupAAYipGnkjKpJCIcAyhQJbB3QvRSSMLLSPLFtuch5UhZv/3tireuN9kDP2gboDk9cfhTfXXk/79eZ3O5tPtfLVUVyYvevyXXzevuzsWuzt/3nWx+4ABqBr0bRW1yWL+pCTk01QNvrbHHzQ0avZbfMjfeJnPZvrjD+tUPd/ka9aURjK3I1W79OGOftJtvW1Xm1zKakJH2qbZAJwQmdHKMYYCI0kFqlECmnwQFn2JQsCPToefKfgzVGsM+CNdTNUTDRbuI9QwC2QW0daVn1JzMh7yNu9oIOS3qxH0g4+QJQwzyiUkhAtWE3ZFA9M66gtymysxFOTPo6CXMAcJhlRiRgSBSqzrcs5M0ElfoNs8b4FAXz0+zqfzyWJE/mDvMWXwSsKlUAY046S+nwalCb3NyxEEepu7KhD039fz7Xz5NCLvjfx+bRoDeZtzKxDy03SdvqyW7yP03nYdohHtOpvbK5i+H0H3tOvUIi+eXWdzegWC/JfVqOTLzk6UMCqAIBRBiAGqucYwNFG3bd2FQB3ZfGKBUP/PEfVjUCcRUQ/gr2tC/ctPn0fQD6DLhJGSk662rYF5vDUcsnnpaog8rVdvr41fOw99zUf9rhTJ6r+lZ4Y6AU5NkxaZYxDEU20N59uxdTb/ZiW+Zul9TkzN/D/fNtv543vGD1CXCtwgFUUnX9dewuVqRn3N7HEbBOuoXYz6TpEX63fcacaZJZAIiill2S9hmjE44QgLInH+ywJ4CLw9At+GOlrv1WE4w+B5xFsNfPDY+QYvdJhP/MGT5xu80HE18QcPovONXuiglTOMHj3f6IWO/jjD6ImzjR62LSgva/T2gSJnGD3bwuzCRo+cb/Q8VjhDHz1L2Has0fPIdBr46GFwvtELsC1urvrA425wD9dsngtzmUgfPv76RbtBhuqJCc0LnkgkKaICQP2r5lszSNGXYw0H2CcPxoI/iq43b1879rxrCJZolLVlOCOum1wOlU0T04FlDU0OQbAAu/HDIxgaCdZCMMrj0cu55X8sUwI4OzevuskTGgrGU+zk6e5ZR/qW6Csi0jdA2MLwtCMZtWObrwNGnX8DREkMj2F0ZFgbw0hMhhGbA+3iGcZunWEkIQxhgoHc/arGdUIelWIBwj+GRzE+UqyNYgjEs8OIzRF78QQTI8FaCYYjEixA3tnwCCZHgrUSjEWdJAfm0Ndxlbfl0Meg9FNkoO13erDBhL6c+sQjDKr/wEmSSAo5AkosIOC0VqkOCJkIeCi2YKriLLGoKATAJbXug4FENX0Y8wBj5xEFFaBk10bXr1g+3e1qpagGvxzYudlO1gU9NTv/fHt5zV8egnxbK3HtCNCIzT1niRAlbFAFm3tUhWZf2yR4Za57iBPJBZFaizJKZfVB6lrRtzBXA3P2rSrOEKPOR+ASXV2ewfg+1XEIVlrRI1ZtpHgcirNeKM5a1WIUhrOjGM4CM9wjnnBkeM8MD0vpGqPBPnYqKqtrpBaCYet3CkZkj9DOkchxVDWsm+ZhiA3r1D4DrWGd2O3aGqKw6pradhBGlp+H5UQm5cco6tsFJj0dAOl3z9DOdPtoBCO+R/T0SPw4xJe9aHd5fp7Lo3S7DKza0cjwoTAcgRiqHcHzUz5/hlaiN4xGMOLHOYZlJL4P8XEU4pMBEN/6DI0iYB+XYCIQ5wyXUQR8zHoexawX5xeB/Blaid8wGsGIH+fYl5H4PrpfxiC+YyMy0p7SUbrfPi7BRGDcRh2OCLAo5g8/vwjYn6FRBOzjEkwEfLZZF4v560a9cKUmTzavuzNcH+c/dDrzwyGx2f+8jiMrinlHxjgqxArzzEvArYdpIJoU9WMrMTGHy6eEdlCfXcEbBaThdJOeAfHZ3bpVQPAZAGE+GzE3CoiO8IsPiM8GwY0CAtk5AEEjII2AyHMA4uNnvVFAMMcJjQ6Ij9fvRgFRa4IzSIizxEu3EP9/VbJIbqTwigtzhS+hEuJ8OV47EQHww8GAETJCmLP2yqnI+xa6AInAGNsOzLOQCDlJdJ1FK1zcEgmgJW4ZW57xUo1Y8KorUYh1q9VQTlNaECPbpNUbt5wlUQbJLXfNk5FbFm6xuNxyFkMZJLfc1U5uk1sioazErRq1ZFRqcZvrbPjUcpc5uVVqtdla6icqt5zlTwbJLXd9k9vkFm9TW4jiqNSy+TOHTy1d8Hzkll1vNXMLcxCxHgU/qjy3HhGHM7A3b99ssnneP8RkPS2CRjKU5ovFx9Vitc4eGM9oKmYkv/67jj7ZvhdPsV2v/kr/yAdWt/uarlX/ijdf9mEqhxtLjQr0gFmo81i5sZFIE2Ae2qWTX6gJfD0LohPwRx1hNBjga0g/Pj6i6dROCBfwut3Pk61Cfpk1rtSbDfdP7IHRULgLtAe0zSVkO6xNZ0IVB6ieBHyzp7l9GnjMYdMTgXpr8vKaDQrGWtK2OZTLDMp1Pt9U7zEnjedO3mnbpNH7o4Pdv10mxFf7V3t1frWaODoEsHqKQkUcO0hdVV5gm0wHEAyuT/BjpSivmozoaqzMEBNk248JsBnDm13ygxCRbr73UUSuWkQIjCoizZsLgxCRbrsIo4hctYgwHlVEmvdIBiEi3TZDRhG5ahGROKqING/1DEJEuu3pjCJyzSKiy/pHFBHRvGU1CBHptjc1ishViwhBUUXEI/7+hsHQEfgRwWjeq5rNv1llXm/o3Od7OFpK820cx8ld1WtN+2B2bXHMOWLMpi2SJCmpgep2lWtzKxuG9r2tI49A9aDoerWdbOcr7emWoXgHu/AOSgvvZIJoAOo1l+8fxFR5xZ7tqkdynCpLUqJTBNvddmZV9d6Uc/PpA4OQkCt2bI8S0lVC1DwSUUIGvjt6xX7tUUK6SojEMSVk4JujV+zWHiWko4QoEYkpIQPfG71ir/YoIV0lhKCYEjLwrdErdmqPEtJVQpiIKSEeVZduGAtl8aJoWMjmLbjRpX1dLm3SgXY9erRlc07ZIObJK/ZoVx0t4zxZEhLGhuOvK6qoDlVCrtijPUpIVwmJ6q+TA98VvWKP9ighHSUkrr9ODnxX9Io92qOEdJUQQmNKyMB3Ra/Yoz1KSFcJieqvkwPfFb1ij/YoIV0lRJKYEuJx0sbtYoGVxRvRo928/zZ6tK/Lo8070K5Pj7bPaSKnHzlVH/yq5lBjt37/h36R0OLlP8vvffpReVXUkEl/zLe7jwlO8tfZ53QxsN3Lwyf1i/IHPxd1hio6xTjaancEU34JkWPxVngCABBGlAqIJSXVWp5QFCUQg59vhQFKJNuf6gQKPbUvq83Uo+EDEWtk8j3fSuhvyEvdVHuBtNZuoCOtMMDm+UgUtD4qrSfm1AuNOz7vSLBwfpyd9HFRlBdr+MDdqUdvQeCsxulW/NCm+P+7Ue3XNY2Svl8nX9NFVcGErCfXOg38ki6+pbrlNmU/XS2X6XSb96KuAVMp7MA1tcI6XagZ5VtVxfjL9P4jOQeKcyqKFlaPj5t0W2PF8TzwyN+rKvCupcgqpcQqFl7VGNTavF5pDEwwmHSbfZqqkZWaf/ikdVr+2WqBM/UTxhZghgIqDsopz/1F2YpKpcE68F2mfljo6uEcOmIuAcz6crvhf1pPZvP0AM1ytUy7kSEAkILI9v0lYsCKLHM8apnj/UGNc4R6N4MOeht0NCsbeDDopFquBrLodpZNzaKrmHmFYBxRSBC0mnnahdKTmWeYBUY1Sl9LTkLQ6p3gZH8MS2BjzvgOmDtsIVpfN/HQtlCcY9jrYtR1JVQVvwbiXy7HUYPpe/xqhXvZ4MEJTlsniPCWvMdZ6k+KM6+Nk5+PkeuYFEWrpwMwky0QWuiCSZBp0eNo7QAjAtoPvyYiYVQAQaj6rmoJKypjYtadZbbxCGMmOA8j8nPyVfc7OtY9/913fXi2UuP+9YZ3RGv2B6EE49JZ3FWpuI9XaBwC0+f8+/kH/Ij5MigmBEEigDIjgESoOrFhU1HZQAkjlqY79oZBwVk5foQZpcWRC+9VxRgDk2JSGjHJMOGCQsmQlIDBKigyJiimj+qGQSHaHsdECMyhqIICUUxUTI/SiIoVFRoTFdMldMuoNM/0UMRExfQw3DAqzVM9srgV+gPFeeyze1lEbFtgD/c/f/g42GXOEaiTk1EvcCUmrrb1bhhcnYc6d1nu7uD7j2uB9nSBLqC1BDX0B20vnoyhOybOoqdZIkXJd8Gq8RcYxFTUtoi5Gigu/7xlj7mLx3725QDN7hkMz/slONkxrp/XWXcz+TrZlcmbAHJw/NbCdHTomqgFTTb42bv4vqHpQjmRGSfv5dyVd3IOGzteezmlLVEu78pbooQcvyV6YCu6bLZyKRMg0D7srMZdKPn+QNBOEW11G1Qeiud23hs6hdXIdEKFic36z5uLzco1RCOp70GCaFGZtWvAVtiILGSLyDrXbs3HX78M1iw63uZppQKBCcMcYygwklSgmpYB3O+oyCD7NcU0VuLAb2+L7fz++3ypQ6sQ+LDdqu+l49UHgwPpJMXH1DwnSi+XEKrPP2bkFbRNQEFsUmT62f42WT6pK7+tZm9qAG8GFqLP4z4SFtAbLEedrHtsOGslfrEc2BhgGK2WCJHG2CkzTFqiRGGQyAnkEUsy2tNXbk8jShJRs35Osaez9k7NDDnJojIdgv9r0vpqTF7UHpekTF64b3AgJq9HeuowtTbUMYaEAoo5w4QAXk+84ubsh3giKSt514gp1DiMGdmcanraDsff15Pl5nG1ftFZqODXyXuWjdpxmVDNxyiC+0u5APklf1G0saMlf7kxRaFRQM2I+ODJoxBqr8Yh165I1Dl41kDChGlaWS1eZDhIuvHJ5m4b+XQZfOJUaaoSn+qzNFLrX9Pc7JVPuNnR1aH6RWtCe+WWO2vhC5I7TfI3dj3rd5aKmJNF6b3v+VjoNwnI62gsUrUkX99vdK6ZWggan2xI4j+8p5b2GZb6TVA8S/bOthCPotWMvxqo1XpW7XH/wdl887qY5GM0Xy7mxWceF6vJttZQe/Z+awmPAAn5ZUlYaBPsYTL96ykTsJrIBpADBOvuY2g6dmwZW0FmZBy0qOzI+JHxXRhvCUjrj/GmK3Nk/Mj4yIwXMXV80KKvI+NHxndgPLKEgvfH+KBFXEfGj4zvwngaU8cHLco6Mn5kfBfGW5Kw+mN8nKTq1hGADCWg2YUFpKkBIAcJK8Un2grsIJBAXrqFhBgujy2NSDupx+5a4obIYM99yFO3lHy3GiFJJC2hVqTT9hBmioMHIF87mDlGVdmTRFSLICCRKNxKWxy1kjPeVKAJA4duWH0r3dFNSKYMJyD5qpgi21V0IKI4egnIE2Lb+Rh5cjJPWA1BzHohiqubkEzxKNA4MuV4pkCH1deRKZwe101Iptg87SNT3EyBCSoRRdaKJ0MiE1g2NDsWT4YsQaVmivKfnr2E5EmAqpEjTwyeIAgSXAaY98ETVy8heRKgLOLIE1OfiBg8cfUSkifDCRq/LJ64VsckioXi6iYkUzx8iCNTjp95WAwLxdVLSJ6MztM+eIJBdU7oZeJxdBKSJeNBStdzkFKT3BQfwdZtnP3Oj30KK++LUVPocIAzlfYpeiMLb4+FvG5J4Xi8o8E9yEGK4hiHqwj0gNkhRb/b7FmkrkGGdbUZzjghUDBQq0p0z0giSsdgQROPMBmUkNEEoiZ1BOsA+8+sJCnP38XJL/tCkv2ZXPRy3cytlEE1iiAgZFLkqAfnRWtvx7IBCZ4IgDBEJPuHupsOSQg0EiIIIRIJsCCYi+yfat8QiqTjnjcSIuGAQcp0BgurHRZmazkkOS7XhXxB5BCgH2rU2w1JjMv1GV8QMbqeiOMihueJOJ2IcblO4sshxr58cmBiGO2GJMbl+oQHRQw13QPOBKfZP/Xz3LAwznPz54ZMGGyuQ2BpOiQ9LtcVfEH04CDpGnvpooel6ZD0uNzI3EHRo3VawRhUwu47ejcck4yjl5Ckudwg3UGRplWnYB1MW4q6xr2oF0cvAUnDTH/rL+l6dXd7BR/V6j+R5FDwEdY3Y6JWfGTNFTXG/Zfz7b8E4BkFOAFNK5wSt9TchfoiF3LPFH0nvRFGE1BX7EWdubKICYuIYRFiFDy8fQFGYTfYraETVjVSZJ+f9h19HFeLxfx1k7rlZbJ5Vd9WvXic/9AlwkrFwozqhH7FwkIeb7+Ds5Fv2gojQDCg/UEUc8RQTQirdemkLe+aJpJZZPJw+TSwnCc3dat2/q96Nbvd2B5uhC2TxNvX4hIu6+TDZbcCH2Z99dOZFeOIW+bhR6qK4TE1RENK6Wyyed4/xGQ9zd/LWitVKbxDeEZTMSP59d91svz2vXgIY+vetfN+olq4r2Wf31uMPOuR3kFmIA8vUO/zsPSbh20MD1Kcm/n4Oq5kiuIXP0U5S6WeOEVZJhbfGqzStsIgHusLj7nsAietdq7dH2qdxpjGuOlmiK7pqBICwKiEWPLsqM6qbNlOGYaGTFqUIA8xPj5xStehBHdUuGQlWJzu2p8SbK21VHKqgERgjG16z6JPtRnjUHoNrpZL14VNlIui+eJ4GTy+YjxldjtOB37xTgfet9OhH2VGb1aXOdazJAHmErY/7TaA8mkEmytYmzHX3yD4BHNcib5rz2S4BH3nPDxmkPoO3ay+c2ee9ifYHhEV59gni6rdhMfS/Vq0m7x07SaawwaGrN3cW0vXqt0aGBdFsG1ujNg+Oap9cqXcQLeis8cDJCGST4VP/s91qLod+Bet6pqr8w9Z1bFbVXVNjIui6nzSdK5DsNuNyWyz13QBHvStdVuob0m2ORGGL8m360/34Jh7Hu9P2AcaVRF3EG4nqkJcfFSF6D2qohcFSG5V/zkYF9kHL2/HQdM67Fl8R9m4qUo50GeaR7dtZO8OmcABU7RRTq88YMrNrWY/hW/AZBh5t/ltjksN+vNts50/vmdjYxy7hfMTq5pmgK9rr6nE1Ywjhad6EPmRmTwBEJdAHxzOmdIkDKlfdW3CLMYcShimUmCU/+qLAB5uqqEPn+WMr2jD5xGeMvDhsy4lYg2fhy9l6MNnOSk62vAdldAxzOETZxw+j8X9wIfPluwZbfg83AJDHz5yxuHzCAgY+vDxsw0fAh4r1YEPHwZnHD6PJIGhDx8+4/DZ1k0XNnznW3UgYFt1HOdwgDXPYkffx8a2GXPb/gnNGw50sXjIsP5V5Q2gLDG92n15KBDoaWveBjwagW9bZwsaFfiekglswB9bjuC2gIeQR4S9pw18G+zuTaebhp3EhN22Fu8Jdne+z03DzuOq+Z5yKGzAu+Plbhl4NeNGBb6nQAUb8PzWgQe6WoGAVEhdjZDUgMdRgYc2z01PwIsR+DbgWcQZHvYUvWCDXY6wt8EuY8Ju85T1BLtHFO0t445RXD0/bCffzXv5UFsUEo7r5YOh4ygagwsrpQUrkYaWo/6qtQYfqf7vrlNMYlN5wlLzLPvJP1u6vvsJg7qQIKEtsWeWYAZYbBVVQC8K3p6GevATb4Ic0Lh7hhOqzGPGdZ1+rXCVFsaS1hQxgGohZkb5hSk6z4qIioN5f+jt2NryTOcelI/zNJsuTsFzFJRXIE7eS7flOq/5a0ja+DUO/Nq1emDbcRXrERxqyRELO5C9dGiIZFUEo6S55MNtjkSUGeZmklhyNC84iQXBy0xiQU6T8jqTWBoZF0Owkc2dFFuH2wqrWHT4vnZp6MrH++8y/NreNaM6hTOa8jtbme+jy3lLxvEH73LeOc6NoN7zqiF1b1GDfVWyRgV8NzBdOWC4gOkKOT0dg5yubrVSTiPjokxXHp6Oc1evts1clCQSl+QQWcSJBDhmRj3d7Wg+cvGabyy3cWGqr4FyUVTfzdQ2vQbJvszaprday9nFuLh1JBAaQLXTXT3Ao6yc3sYDezgprkX1XXrhU4Qvs/DprVYDbGRcFMH28NNEKRB25HJOVJdzhWkWfjmHb6YOas6Fi9Z8l1kH9XaXc02Ui6L6bsdRcwWSfZmOmlstC+hiXOTlHI4SH+KwcsCAlnO3E0yCLz6YBF9mMIk7PvlaVd8Zg0mIh5+md7+VGI6iIzdzlmwO/SUrOmLzRQxf0bmTM69U0TUxLopgN4es+BZ+zcexGWILak083OY8XGY8XNd56EE5ZmNbkiSNfHGxq62gbE6uI2s6eaguL+I4pg8AqyketeQDKKEls6eIuqgQLURoOfGIdwmT2dOSzJOhES6Z5yypO5hKI8ZUqfykOBS6rDUg3JcnLwMKWRDV4eH36dtGwjrgtpa8hARPoLkotp3xRYIw+6jqq8FCbt3MFtN0Og3E7K+CEmoG5EIA/Kc56pzm+sPIdFxMj1lrDUFRZT+6ifVkNk8P+mW5Wqa9GsQNuBUJWhXp44kUJeu3yLaJYsqYzpjnEeMgGBdW4gBAHkBACeG2SYcmLOKkQz0cFOOkU/DlPJMONZ0mtknnrzs0KqVjsBvaxFNkPjsmnhHnbjgPZ/KhHjEsUbzCorFyQ8M8JEkigTTy9yuLQkQSzEoDG6KyAz1q1X+709SOWWeapsx1vG2aQqP+OhK8wc1TpqPCNk+NQHcEekATlU+GjaOgTqVAUuchPaV6jtXRaRYmClMuhxDRNq9iANS8ekCzNjn61tEhHJ/QS0NJnS4Vb6hP5MrIkCp2sBU7KIIwhHB3L/3RwsO9MtKiAhilrYoDySC0oKJVcTh6CcgQ5uF4GhlSw86hOFgQhlCH4mB9Kg7mESDU+2agBKZflljXw71ZXsz0BZnicR2BUqwdjQsIlGI2f07IQKmivu1ubA83Qq+AqPn9X86QqIZYlksPiXJw6x7t4xqKokwooowPoXQLOEnXBalNxW4n8YddfB0H1nviT0lxHam2LlA/tfPBDMVCvSmjAaTkMAhaFyd2vURsQxIi1ZjdTlYOu/isHHaZWTlKxT24rbMrDVh3sK6wxQrTDMYzzbjNOzGGs19BODuhNoPXdNn0FsDOm6vBjNS6aGpR5jBfgM186Y9oPi4kh4f1+p2qBJBW0Dgx3J3eWzCOpgXb5xf04EnlHkFGI/wUtu+HnAC/q+me4fdwLo3wM9Sb9Lua7hl+H5+aQulL/nK13j6vnlbLyeKnw9WHalTOn28vrznWOjft8JFfV6vXnDJ/ptvte36TXiP0RagdCvmXLTI2duNXjAAPSr1GTh17EBFB2DBAMZYFzxq1CTCTBcyP3Z18hBH3yEK7COIo0Nfv/9BPkNDi5T/zB8pefPpReVWcC+FDuCICrko4MUzCMdIeK+TBPdwaK9wLDZvPby+vGp4dq4kO+VqOpUWV2RW3XecY4HKMYqPv7y7A2teysY2psG72IHN6La6dtipprgPdEdij4kyvGF2KWpef0YFudsg2ulKnuwBc7VZYP339N6UJ1f+qK1D669/1n5lrInNEPOaQfMjuYJOX1+xNjEmjc6N6T7W5+lmxlfd2z6vfXK7WL5NF9e3v+Qjq98nuibM3F2pmS9f3Gx3Qv3yyfr7JwVN+e66IucybB6VHy97crifLzaNqtGh+me5v+L5az6q9lz/+dTL96ymj/X1t9DOzKh/1LLem+JtqDOoOH1sEuHv/ziraA2bHyIADAxrExmf747WUy1ZiyKuTIRfkDbQ5mrXBZ3MB9rWNUeT6nHNTV+hQ9ZYjgrlZfkynPGBzSJCUiZAhhsWnCtlR5wR3XqMU5wSj6ooFcFJZs6gL0LFuyV59Tteq42x3r20xU9mFE+2Fyu4l1cVBISecAUyJqNb/udeVU8o/ZswCpYmEAqqPCgg4lcKEVstK+afFAKktUQoeKWqUbB8heLUFX6+Noh7mQEBBKIRC7ssNeXYT0IMj0EjSKyMpZYksBWgD0QtHHb2EpGiALYaRosOg6D7Sh9JKJ1X3lJAJKR3fLjoeA59N8JYv69dLSAYH2CUZGTwMBuePAaFMMGlsIJCSdfQSkqI+OzkjRS+KooQluOyp65iq5eKoo5uQJPXZNRpJehEk9bMEoAAJ5CVyddezbaaAq5uQHA5Q7GDk8DA4XCzXdUJByffUz4LL0UtIigaotjBSdFgUxTyB/diotqZDkjFAjYeRjMMgo9+cj3i1A9jLlO/oJSCDZYAaFCODh8HgvYPe3CMK5vs3mw5JxnFTyp+MkCQUC44AYhJwSWQ1HfEe1shYRNmdVZ9SUJSx7KIx0cHApP2REFlI6JP/aA20aE0xqtxyZw0lIK8/7tqCL5ojL1xhF20xF80BF+3RFi2hFtlbs/nmdTHJx2i+XMyLzzwuVpNtraH2fKq+gyJ+SRffUp0aqt5YTL6mi4d9jEitRmJOl1MCJZDOk6A1yxVZEoj7ipKQHttYPRSX7aCaawUsUzijKVfXJ+tpfhsu6lwe6seatTDR/kqpMck4/sACQSp4Qmu2IygSHx35uwiKEJh6bOwMA9NqETOjQinKygsfADbrA9vx1c1+nmgFuMzaVurUhvon9sBoINQJokY4O4SmIENugT1IJRk5vL0S66jvzAk/U6h9zHmirHGtLqmAWGJcLRLMQKKulsJVTCsoUBYdqOlvVrNRvHMma5ac2VJIa2d4uxY90wWxRNISX0grXSxGcxi6oHpcZFe6UMPlFYwu6uV6pW2ww+1qUnj+bTVL9R3/Dw==</diagram></mxfile>
|
2205.07177/main_diagram/main_diagram.pdf
ADDED
|
Binary file (55.4 kB). View file
|
|
|
2205.07177/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Named entity recognition (NER) is one of the most important and fundamental research topics in natural language processing (NLP), which recognizes named entities (NEs), such as person, location, disease from raw text. NER has attracted substantial attention in the past decades owing to its importance in downstream tasks, e.g., knowledge graph construction [@bosselut2019comet], question-answering [@pergola2021boosting], and relation extraction [@he2019classifying].
|
| 4 |
+
|
| 5 |
+
In the early stage, the popular methods for solving NER are some traditional machine learning methods, e.g., Hidden Markov Model (HMM) [@morwal2012named] and conditional random field (CRF) [@mozharova2016combining], which require extracting features manually, making the process inefficient and time-consuming. With the breakthrough of recurrent neural networks (RNN) in NLP, Long short-term memory (LSTM) [@hochreiter1997long] and Gated Recurrent Unit (GRU) [@GRU] have become mainstream methods for this task and have achieved promising results since neural networks can automatically extract features from the sequence and also take each token's position information into consideration [@lample2016neural; @chiu2016named; @huang2015bidirectional]. Nevertheless, RNN fails to perform well with long sequences due to the gradients exploding and vanishing. In recent years, Transformer-based models [@vaswani2017attention] have become mainstream methods because these models are able to capture long-term dependencies with the help of multi-head attention and thus provide better global context information, especially for long sequences [@biobert; @yangxlnet]. However, these Transformer-based models usually are insensitive to the local context since the representation of each token is computed by the canonical point-wise dot-product self-attention [@li2019enhancing; @huang2021missformer]. Besides, although some studies [@shaw2018self; @BERT; @liu2019roberta] have been proposed to inject position information into Transformer, they are still inadequate to help Transformer obtain appropriate position information [@huang2020improve; @qu2021explore]. In other words, the self-attention mechanism is effective in overcoming the constraints of RNN from the perspective of long-sequence context information extraction, but is inferior to RNN in terms of local contextual and position information extraction. Yet, both long-term dependencies and local context information are essential for the NER model to correctly identify entities.
|
| 6 |
+
|
| 7 |
+
Thus, to alleviate the shortcomings in RNN and Transformers while maintaining their respective strengths, in this paper, we propose a novel Hero-Gang Neural model to leverage both global and local contextual information to improve NER. In doing so, on the one hand, we utilize a Transformer-based sequence encoder (i.e., Hero module) to extract effective global contextual information with the help of the self-attention mechanism. On the other hand, a multi-window recurrent unit (i.e., Gang module) is applied to extract local features from multiple sub-sequences under the guidance of the extracted global information. Afterward, we propose to use multi-window attention to elaborately combine global and local contextual features. The performance of our proposed model significantly outperforms the strong baseline models on several NER benchmark datasets (including both general and biomedical domains) and achieves new state-of-the-art results on some datasets.
|
| 8 |
+
|
| 9 |
+
# Method
|
| 10 |
+
|
| 11 |
+
[ NER is usually performed as a sequence labeling problem. In detail, given a sequence of $X =x_{1},x_{2},...,x_{N}$ with $N$ tokens, we aim to learn a function that maps the input sequence into another one with the corresponding label $\hat{Y}=\hat{y_{1}},\hat{y_{2}},\hat{y_{3}},...,\hat{y_{n}}$ in the same length. As summarized in Figure [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}, the Transformer-based models (e.g., BERT [@BERT], XLNET [@yangxlnet]) are regarded as the Hero module to model the entire sentence for global sequence information extraction and the Gang module is responsible for local and relative position information extraction. Afterward, we employ the multi-window attention to elaborately combine these different features (i.e., features extracted from the Hero and Gang modules), which is then used to predict labels for each token. Therefore, the aforementioned process can be formulated as: $$\begin{equation}
|
| 12 |
+
\setlength\abovedisplayskip{6pt}
|
| 13 |
+
\setlength\belowdisplayskip{6pt}
|
| 14 |
+
\hat{Y} = f(X, \text{H}(X), \text{G}(X)),
|
| 15 |
+
\end{equation}$$ where $\text{H}(\cdot)$ and $\text{G}(\cdot)$ refer to the Hero and Gang modules, respectively, and the details of them are presented in the following subsections. ]{style="color: black"}
|
| 16 |
+
|
| 17 |
+
[ [The role of the Hero module in our proposed model is similar to that of the leader in a team, who is responsible for providing guidance, offering instructions, giving directions, and assigning sub-tasks to fellow memberships. Therefore, the Hero module is required to have a comprehensive understanding of the task, including overall and local progress.]{style="color: black"} Thanks to the characteristics of the multi-head self-attention mechanism, Transformer is powerful in modeling long sequences and can provide more effective global information than other counterpart models, and it has already achieved promising results in the NER task [@luo2020hierarchical; @beltagy2019scibert]. Thus, we employ a Transformer-based encoder as our Hero module to obtain the global context information $\mathbf{z}_{i}$ for each token $x_{i}$ by $$\begin{equation}
|
| 18 |
+
\setlength\abovedisplayskip{6pt}
|
| 19 |
+
\setlength\belowdisplayskip{6pt}
|
| 20 |
+
[\mathbf{z}_{1},\mathbf{z}_{2},\cdots,\mathbf{z}_{N}] = f_\text{H} (x_{1},x_{2},...,x_{N}).
|
| 21 |
+
\end{equation}$$ Herein, $f_\text{H} (\cdot)$ refers to a pre-trained Transformer-based sequence encoder (e.g., BERT [@BERT] and BioBERT [@biobert]). ]{style="color: black"} [The features $\mathbf{z}$ are then input to the Gang module for extracting local contextual features and their corresponding relative position information.]{style="color: black"}
|
| 22 |
+
|
| 23 |
+
[As introduced in the previous section, although pre-trained models are able to provide effective global contextual representation, it lacks the ability to extract local features and relative position information. [Thus, we propose a multi-window recurrent module, named Gang, to enhance local information extraction.]{style="color: black"} Recurrent structures (RS), such as LSTM, GRU, and tradition RNN are effective in extracting both local and relative position information from the sequence, owing to characteristics of the recurrent mechanism. To better emphasize the local features of each word without being disturbed by long-distance information, we construct a sliding window with a fixed length to generate shorter sub-sequences, where each sub-sequence includes several consecutive elements in $\mathbf{z}$. An additional advantage of this operation is that, in comparison with the whole sequence, the sub-sequence is much shorter so that it is easier to be modeled by the RS. ]{style="color: black"}
|
| 24 |
+
|
| 25 |
+
[ In detail, for a single sliding window with length $k$, each hidden state $z_{i}$ from the Hero module, the corresponding sub-sequence is $\mathbf{z}_{i-k},\mathbf{z}_{i-k+1},...,\mathbf{z}_{i},...,\mathbf{z}_{i+k-1},\mathbf{z}_{i+k}$ that includes $2k+1$ consecutive tokens. This sub-sequence of length $2k+1$ contains rich local contextual information of $x_{i}$, and thus we utilize an RS to encode it for obtaining local semantic and relative position information. To extract the local information of two directions, we utilize a bidirectional structure to encode this sequence span, where the forward RS computes a representation $\overrightarrow{\mathbf{h}_{2k+1}}$ from left to right, and the other backward RS computes a vector $\overleftarrow{\mathbf{h}_{1}}$ for the same sub-sequence in reverse.]{style="color: black"} [We concatenate the $\overleftarrow{\mathbf{h}_{1}}$ and $\overrightarrow{\mathbf{h}_{2k+1}}$ as the local feature $\mathbf{h}_{i} = [\overleftarrow{\mathbf{h}_{1}}, \overrightarrow{\mathbf{h}_{2k+1}} ]$ for token $x_{i}$, and then we can obtain local features for each token in sequence $X$ via the similar way, denoted as $\mathbf{h}=\mathbf{h}_{1},\mathbf{h}_{2},\cdots,\mathbf{h}_{N}$.]{style="color: black"}
|
| 26 |
+
|
| 27 |
+
[In practice, we need to consider two situations. First, each token might have multiple levels of local information, such as phrase-level and clause-level, which may affect the understanding of the current token. Second, since different tokens or the same token in various contexts might have different relationships with their surrounding words, we need to consider more sub-sequences with varying lengths for obtaining more comprehensive local contextual information. Therefore, we propose to utilize multiple sliding windows with different window sizes to extract richer local features to alleviate the above issues. We assume that local features $\mathbf{h}^{1}, \mathbf{h}^{2}, \cdots, \mathbf{h}^{M}$ are extracted from different groups of sub-sequences, whose corresponding window lengths are $k^{1}, k^{2}, \cdots, k^{M}$. This process can be formulated as: ]{style="color: black"} [ $$\begin{equation}
|
| 28 |
+
\setlength\abovedisplayskip{6pt}
|
| 29 |
+
\setlength\belowdisplayskip{6pt}
|
| 30 |
+
\mathbf{h}^{1}, \mathbf{h}^{2}, \cdots, \mathbf{h}^{M} = \text{Gang} (k^{1}, k^{2}, \cdots, k^{M},\mathbf{z}),
|
| 31 |
+
\end{equation}$$ [ where $M$ is the number of sliding windows and $\mathbf{h}^{j}$ is a group of local features extracted from the corresponding sliding window with length $k^{j}$. The process is similar to the task assignment in the team, where different members are responsible for their own sub-tasks. ]{style="color: black"}]{style="color: black"}
|
| 32 |
+
|
| 33 |
+
[We obtain global representation $\mathbf{z}$ from the Hero module and multiple local features $\mathbf{h}^{1}, \mathbf{h}^{2}, \cdots, \mathbf{h}^{M}$ from the Gang module. Next, we apply the multi-window attention to effectively combine global contextual information and local features. In doing so, two types of attention methods are proposed in our model: MLP-Attention and DOT-Attention, respectively.]{style="color: black"} **MLP-Attention** [We first concatenate these local features with global information and obtain the intermediate state $\mathbf{m}$ by a fully connected layer.]{style="color: black"} $$\begin{equation}
|
| 34 |
+
\setlength\abovedisplayskip{6pt}
|
| 35 |
+
\setlength\belowdisplayskip{6pt}
|
| 36 |
+
\mathbf{m} = \text{MLP}([\mathbf{z},\mathbf{H}]),
|
| 37 |
+
\end{equation}$$ [where $\mathbf{H}=[\mathbf{h}^{1},\mathbf{h}^{2}, \cdots, \mathbf{h}^{M}]$ and $\mathbf{m}$ have the same dimension as $\mathbf{z}$. MLP represents a fully connected layer. Then $\mathbf{m}$ is used as a query vector and $[\mathbf{z},\mathbf{H}]$ serves as the key and value matrix.]{style="color: black"} [The final token representation can be computed by]{style="color: black"} $$\begin{equation}
|
| 38 |
+
\setlength\abovedisplayskip{6pt}
|
| 39 |
+
\setlength\belowdisplayskip{6pt}
|
| 40 |
+
\mathbf{s} = \text{softmax}(\mathbf{m}([\mathbf{z},\mathbf{H}])^{\top})[\mathbf{z},\mathbf{H}].
|
| 41 |
+
\end{equation}$$ **DOT-Attention** [Instead of using a fully connected layer to generate a query vector, in this approach, we directly regard $\mathbf{z}$ as the query vector and $\mathbf{H}$ as the key and value matrix. We can obtain the final local feature by]{style="color: black"} $$\begin{equation}
|
| 42 |
+
\setlength\abovedisplayskip{6pt}
|
| 43 |
+
\setlength\belowdisplayskip{6pt}
|
| 44 |
+
\mathbf{u} = \text{softmax}(\mathbf{z}(\mathbf{H})^{\top})\mathbf{H}.
|
| 45 |
+
\end{equation}$$ [ Since $\mathbf{u}$ is a weighted sum of different local features without considering global information, we use the sum of $\mathbf{u}_{i}$ and $\mathbf{z}_{i}$ as the final representation for each token $x_{i}$. Thus, the final representation can be obtained by]{style="color: black"} $$\begin{equation}
|
| 46 |
+
\setlength\abovedisplayskip{6pt}
|
| 47 |
+
\setlength\belowdisplayskip{6pt}
|
| 48 |
+
\mathbf{s} = \{\mathbf{z}_{1}+\mathbf{u}_{1},\mathbf{z}_{2}+\mathbf{u}_{2}, \cdots, \mathbf{z}_{N}+\mathbf{u}_{N}\}.
|
| 49 |
+
\end{equation}$$
|
| 50 |
+
|
| 51 |
+
[After obtaining the final representation from MLP-Attention or DOT-Attention, $\mathbf{s}$ is sent to the corresponding classifier implemented by the softmax function to predict the distribution of labels for each token in $X$.]{style="color: black"}
|
2205.11438/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2205.11438",
|
| 3 |
+
"month": "2022_05",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "NAACL",
|
| 6 |
+
"title": "Contrastive Representation Learning for Cross-Document Coreference Resolution of Events and Entities",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2205.11438",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.11438",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/tex_files_extracted/2205.11438",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.11438/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.11438/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.11438/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.11438/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2205.15376/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2205.15376",
|
| 3 |
+
"month": "2022_05",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Reinforcement Learning with a Terminator",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2205.15376",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.15376",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/tex_files_extracted/2205.15376",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.15376/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.15376/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_05/main_diagram_database/2205.15376/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2205.15376/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2207.10040/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2207.10040/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In long-range imaging systems, atmospheric turbulence is one of the main sources of distortions that causes geometric displacements of the pixels and blurs. If unprocessed, the distorted images can have significant impacts on all downstream computer vision tasks such as detection, tracking, and biometric applications. The atmospheric turbulence effects are substantially harder to model and mitigate compared to the commonly seen image degradations such as deconvolution, as the turbulence is an entanglement of pixel displacement, blur, and noise. As a result, a dedicated image restoration pipeline is an essential element for long-range computer vision problems.
|
| 4 |
+
|
| 5 |
+
Image processing algorithms for mitigating the atmospheric turbulence effect have been studied for decades [@Anantrasirichai2013; @mao_tci; @Lau2017; @Lau2021; @Milanfar2013; @Yasarla2021ICIP; @Hirsch2010; @Lou2013; @Nair2021ICIP; @Li_2021_ICCV]. However, many of them have limitations that prohibit them from being launched to practical systems: 1) Many of the existing algorithms [@Anantrasirichai2013; @mao_tci; @Lau2017; @Milanfar2013; @Anantrasirichai2013; @Hirsch2010] are based on the principle of *lucky imaging* that requires multiple input frames. These methods often have a strong assumption that both the camera and the moving objects are static, which can easily become invalid in many real applications. 2) The conventional algorithms are often computationally expensive, making them unsuitable for processing large-scale datasets to meet the need of the latest computer vision systems. 3) Existing deep learning solutions [@Yasarla2021ICIP; @Nair2021ICIP; @Lau2021] are not utilizing the physics of the turbulence. Many of them are also tailored to recovering faces instead of generic scenes. The generalization is therefore a question. 4) The algorithms may not be properly evaluated due to the absence of a widely accepted real large-scale benchmarking dataset.
|
| 6 |
+
|
| 7 |
+
To articulate the aforementioned challenges, in this paper we make three contributions:
|
| 8 |
+
|
| 9 |
+
1. We present a comprehensive benchmark evaluation of deep-learning based image restoration algorithms through atmospheric turbulence. We tune a sophisticated physics-grounded simulator to generate a large-scale dataset, covering a broad variety of atmospheric turbulence effects. The highly realistic and diverse dataset leads to exposing shortages of current turbulence mitigation algorithms.
|
| 10 |
+
|
| 11 |
+
2. Realizing the existing algorithms' limitations, we introduce a novel physics-inspired turbulence restoration model, termed *TurbNet*. Built on a transformer backbone, *TurbNet* features a modularized design that targets modeling the spatial adaptivity and long-range dynamics of turbulence effects, plus a self-supervised consistency loss.
|
| 12 |
+
|
| 13 |
+
3. We present a variety of evaluation regimes and collect two large-scale real-world turbulence *testing* datasets, one using the heat chamber for classical objective evaluation (e.g., PSNR and SSIM), and one using real long-range camera for optical text recognition as a semantic "proxy\" task. Both of the new testing sets will be released.
|
| 14 |
+
|
| 15 |
+
# Method
|
| 16 |
+
|
| 17 |
+
Consider a clean image $\mathbf{I}$ in the object plane that travels through the turbulence to the image plane. Following the classical split-step wave-propagation equation, the resulting image $\widetilde{\mathbf{I}}$ is constructed through a sequence of operations in the phase domain: $$\begin{equation}
|
| 18 |
+
\mathbf{I} \rightarrow \text{Fresnel} \rightarrow \text{Kolmogorov} \rightarrow \cdots \text{Fresnel} \rightarrow \text{Kolmogorov} \rightarrow \widetilde{\mathbf{I}},
|
| 19 |
+
\label{eq: Kolmogorov}
|
| 20 |
+
\end{equation}$$ where "Fresnel" represents the wave propagation step by the Fresnel diffraction, and "Kolmogorov" represents the phase distortion due to the Kolmogorov power spectral density [@Kolmogorov1941].
|
| 21 |
+
|
| 22 |
+
Certainly, Eqn. [\[eq: Kolmogorov\]](#eq: Kolmogorov){reference-type="ref" reference="eq: Kolmogorov"} is implementable as a forward equation (ie for simulation) but it is nearly impossible to be used for solving an inverse problem. To mitigate this modeling difficulty, one computationally efficient approach is to approximate the turbulence as a composition of two processes: $$\begin{equation}
|
| 23 |
+
\widetilde{\mathbf{I}} = \Big( \;\; \underset{\;\; \text{blur}}{\underbrace{\mathcal{H}}} \;\; \circ \underset{\text{geometric}}{\underbrace{\mathcal{G}}} \Big) (\mathbf{I}) + \mathbf{N},
|
| 24 |
+
\label{eq: main}
|
| 25 |
+
\end{equation}$$ where $\mathcal{H}$ is a convolution matrix representing the spatially *varying* blur, and $\mathcal{G}$ is a mapping representing the geometric pixel displacement (known as the tilt). The variable $\mathbf{N}$ denotes the additive noise / model residue in approximating Eqn. [\[eq: Kolmogorov\]](#eq: Kolmogorov){reference-type="ref" reference="eq: Kolmogorov"} with a simplified model. The operation "$\circ$" means the functional composition. That is, we first apply $\mathcal{G}$ to $\mathbf{I}$ and then apply $\mathcal{H}$ to the resulting image.
|
| 26 |
+
|
| 27 |
+
We emphasize that Eqn. [\[eq: main\]](#eq: main){reference-type="ref" reference="eq: main"} is only a mathematically convenient way to derive an approximated solution for the inverse problem but not the true model. The slackness falls into the fact that the pixel displacement in $\mathcal{G}$ across the field of view are correlated, so do the blurs in $\mathcal{H}$. The specific correlation can be referred to the model construction in the phase space, for example [@chimitt_chan_sim]. In the literature, Eqn. [\[eq: main\]](#eq: main){reference-type="ref" reference="eq: main"} the shortcoming of this model is recognized, although some successful algorithms can still be derived [@Anantrasirichai2013; @Milanfar2013].
|
| 28 |
+
|
| 29 |
+
The simultaneous presence of $\mathcal{H}$ and $\mathcal{G}$ in Eqn. [\[eq: main\]](#eq: main){reference-type="ref" reference="eq: main"} makes the problem hard. If there is only $\mathcal{H}$, the problem is a simple deblurring. If there is only $\mathcal{G}$, the problem is a simple geometric unwrapping. Generic deep-learning models such as [@Yasarla2021ICIP; @Nair2021ICIP] adopt network architectures for classical restoration problems based on conventional CNNs, which are developed for one type of distortion. Effective, their models treat the problem as $$\begin{equation}
|
| 30 |
+
\widetilde{\mathbf{I}} = \mathcal{T}(\mathbf{I}) + \mathbf{N},
|
| 31 |
+
\end{equation}$$ where $\mathcal{T} = \mathcal{G}\circ\mathcal{H}$ is the overall turbulence operator. Without looking into how $\mathcal{T}$ is constructed, existing methods directly train a generic restoration network by feeding it with noisy-clean training pairs. Since there is no physics involved in this generic procedure, the generalization is often poor.
|
| 32 |
+
|
| 33 |
+
Contrary to previous methods, in this paper, we propose to jointly estimate the physical degradation model $\mathcal{T}$ of turbulence along with reconstruction of clean image from the degraded input $\widetilde{\mathbf{I}}$. Such formulation explicitly forces our model to focus on learning a generic turbulence degradation operator independent of image contents, along with the reconstruction operation to generate clean output. Moreover, our network training is assisted by high-quality, large-scale, and physics-motivated synthetic training data to better learn the key characteristics of the atmospheric turbulence effect. The detailed model architecture will be presented in the following subsection.
|
| 34 |
+
|
| 35 |
+
{#fig:overall_architecture width="\\textwidth"}
|
| 36 |
+
|
| 37 |
+
CNNs have been *de facto* choice by most of the previous image restoration algorithms, yet they are limited by two primary issues: 1) The convolutional filters cannot adapt to image content during inference due to their static weights. 2) The local receptive fields cannot model the long-range pixel dependencies. A key characteristic of the atmospheric turbulence effect is the "lucky effect\" [@Fried78], meaning that **image regions** or **frames** with less degradation will randomly occur due to the distortions being spatially varying. Previous restoration methods treat turbulence restoration as a regression problem using CNNs but ignore the fact that turbulence is highly location adaptive and should not be represented as static fixed kernel applied to all locations. It is not difficult to see that applying static weight convolutions to regions with drastically different distortions will lead to sub-optimal performance.
|
| 38 |
+
|
| 39 |
+
The *self-attention* mechanism proposed in recent work [@vaswani2017attention; @Wang2018NonlocalNN; @dosovitskiy2020image] can be a powerful alternative, as it can capture context-dependent global interactions by aggregating information across image regions. Leveraging the capability of multi-head self-attention, we propose the ***TurbNet***, a transformer-based end-to-end network for restoring turbulence degraded images. Transformer-based architecture allows the creation of input-adaptive and location-adaptive filtering effect using *key, query,* and *weight*, where *key* and *query* decide content-adaptivity while *weight* brings location-adaptivity. Our design, as shown in Figure [1](#fig:overall_architecture){reference-type="ref" reference="fig:overall_architecture"}, is composed of several key building blocks:
|
| 40 |
+
|
| 41 |
+
Our proposed network consists of a transformer-based backbone that has the flexibility of constructing an input-adaptive and location-adaptive unique kernel to model spatially- and instance-varying turbulence effect. Inspired by the success of [@ronneberger2015u; @wang2021uformer; @zamir2021restormer] in various common image restoration tasks (e.g., denoising, deblurring, etc.), TurbNet adopts a U-shape encoder-decoder architecture due to its hierarchical multi-scale representation while remaining computationally efficient. As shown in Figure [1](#fig:overall_architecture){reference-type="ref" reference="fig:overall_architecture"} (b), the residual connection across the encoder-decoder provides an identity-based connection facilitating aggregation of different layers of features. Our backbone consists of three modules: input projection, deep encoder and decoder module. Input project module uses convolution layers to extract low frequency information and induces dose of convolutional inductive bias in early stage and improves representation learning ability of transformer blocks [@xiao2021early]. Deep encoder and decoder modules are mainly composed of a sequential cascade of Multi-head channel attention (MHCA) based transformer layers. Compared to prevalent CNN-based turbulence mitigation models, this design allows content-based interactions between image content and attention weights, which can be interpreted as spatially varying convolution [@cordonnier2019relationship].
|
| 42 |
+
|
| 43 |
+
The primary challenge of applying conventional transformer blocks for image restoration task comes from the quadratic growth of key-query dot product interactions, i.e., $\mathcal{O}(W^2H^2)$, for images with $W \times H$ pixels. To alleviate this issue, we adopt the idea of applying self-attention across channels instead of spatial dimension [@zamir2021restormer], and compute cross-covarience across channels generating attention map. Given *query* ($\mathbf{Q}$), *key* ($\mathbf{K}$), and *value* ($\mathbf{V}$), we reshape $\mathbf{Q}$ and $\mathbf{K}$ such that their dot-product generates a transposed-attention map $\mathbf{A} \in \mathbb{R}^{C \times C}$, instead of conventional $\mathbb{R}^{HW \times HW}$ [@dosovitskiy2020image]. Overall, the MHCA can be summarized as: $$\begin{equation}
|
| 44 |
+
\mathbf{X'} = \mathbf{W_p}\ \text{Attention} (\mathbf{Q, K, V}) + \mathbf{X}
|
| 45 |
+
\end{equation}$$ $$\begin{equation}
|
| 46 |
+
\text{Attention} (\mathbf{Q, K, V}) = \mathbf{V} \cdot \text{softmax} \bigg\{ \frac{\mathbf{K \cdot Q}}{\alpha}\bigg\}
|
| 47 |
+
\end{equation}$$ where $\mathbf{X'}$ and $\mathbf{X}$ are input and output feature maps, $\mathbf{W_p^{(\cdot)}}$ is the 1 $\times$ 1 point-wise convolution, and $\alpha$ is a learnable scaling parameter to control the magnitude of $(\mathbf{K \cdot Q})$ before applying softmax.
|
| 48 |
+
|
| 49 |
+
To further enhance deep features generated by the transformer backbone, TurbNet uses the reconstruction block. The primary job of the reconstruction block is to take deep features corresponding to turbulence degraded input image $\widetilde{\mathbf{I}}$ by the transformer backbone, further enrich it at high spatial resolution by encoding information from spatially neighboring pixel positions. Next, the enriched features pass through an output projection module with 3 $\times$ 3 convolution layers to project it back low dimension feature map corresponding to the reconstructed clean image $\mathbf{J}$. The design of the reconstruction block is very similar to the encoder block having MHCA, with an introduction of Locally-Enhanced Feed Forward Network (LoFFN) [@wang2021uformer].
|
| 50 |
+
|
| 51 |
+
<figure id="fig:LoFFN" data-latex-placement="h!">
|
| 52 |
+
<embed src="images/LoEFFN.pdf" />
|
| 53 |
+
<figcaption>Locally-Enhanced Feed Forward Network (LoFFN) used in the image reconstruction block and the turbulence degradation block.</figcaption>
|
| 54 |
+
</figure>
|
| 55 |
+
|
| 56 |
+
Precisely, the work of Reconstruction module can be summarized as: $$\begin{equation}
|
| 57 |
+
\underset{\substack{\text{Deep Features of degraded} \\ \text{Input Image $\widetilde{\mathbf{I}}$}}}{\underbrace{\mathbf{F_{\widetilde{I}}}}}
|
| 58 |
+
\rightarrow \text{Reconstruction Module} \rightarrow
|
| 59 |
+
\underset{\substack{\text{Reconstructed Clean} \\ \text{Output Image}}}{\underbrace{\mathbf{J_{\widetilde{I}}}}}
|
| 60 |
+
\end{equation}$$
|
| 61 |
+
|
| 62 |
+
In TurbNet, the turbulence degradation module learns the physical turbulence degradation operator $\mathcal{T}$ from the input synthetic training data. The primary job of turbulence degradation module is to take clean reconstructed image $\mathbf{J_{\widetilde{I}}}$ corresponding to degraded input image $\mathbf{\widetilde{I}}$, apply the learned degradation operator $\mathcal{T}$, to construct back the **re-degraded** input image $\mathbf{\widetilde{I}_\mathcal{T}}$. This formulation enriches the training set by incorporating additional latent degradation images ($\mathbf{\widetilde{I}_\mathcal{T}}$), in addition to synthesized degraded images ($\mathbf{\widetilde{I}}$), during the training process. Additionally, this module facilitates self-supervised learning without the availability of ground truth. The architecture of this module is the same as Image Reconstruction Block with LoFFN.
|
| 63 |
+
|
| 64 |
+
Precisely, the work of Degradation Block can be summarized as: $$\begin{equation}
|
| 65 |
+
\underset{\substack{\text{Reconstructed Clean} \\ \text{Output Image}}}{\underbrace{\mathbf{J_{\widetilde{I}}}}}
|
| 66 |
+
\rightarrow \text{Degradation Operator $\mathcal{T(\cdot)}$ } \rightarrow
|
| 67 |
+
\underset{\substack{\text{Re-degraded} \\ \text{Output Image}}}{\underbrace{\mathbf{\widetilde{I}_\mathcal{T}}}}
|
| 68 |
+
\end{equation}$$
|
| 69 |
+
|
| 70 |
+
TurbNet optimization requires the joint optimization of reconstruction operation and the turbulance degradation operation. Given the synthetic training pair of degraded input $\mathbf{\widetilde{I}}$, and corresponding ground truth image $\mathbf{I}$, we formulate following two losses: $$\begin{equation}
|
| 71 |
+
\underset{\text{Supervised Reconstruction Loss}}{\underbrace{\mathcal{L}_0}}\ \ \ \ = || \mathbf{J_{\widetilde{I}}} - \mathbf{I} ||_1
|
| 72 |
+
\end{equation}$$
|
| 73 |
+
|
| 74 |
+
$$\begin{equation}
|
| 75 |
+
\underset{\text{Self-supervised Reconstruction Loss}}{\underbrace{\mathcal{L}_1}} = || \mathbf{\widetilde{I}_\mathcal{T}} - \mathbf{\widetilde{I}} ||_1
|
| 76 |
+
\end{equation}$$ where, $\mathcal{L}_0$ is responsible for constructing a clean image $\mathbf{J_{\widetilde{I}}}$ given the degraded input image $\widetilde{\mathbf{I}}$, $\mathcal{L}_1$ helps to ensure degradation operator $\mathcal{T}$ can reconstruct the original the original input $\widetilde{\mathbf{I}}$ from the reconstructed clean image $\mathbf{J_{\widetilde{I}}}$.
|
| 77 |
+
|
| 78 |
+
Eventually, the overall loss $\mathcal{L}$ to train TurbNet can be summarized as: $$\begin{equation}
|
| 79 |
+
\mathcal{L} = \alpha \times \mathcal{L}_0 + (1 - \alpha) \times \mathcal{L}_1
|
| 80 |
+
\end{equation}$$
|
| 81 |
+
|
| 82 |
+
As shown in Figure [1](#fig:overall_architecture){reference-type="ref" reference="fig:overall_architecture"}(a), TurbNet utilizes a U-shape architecture built upon transformer blocks to extract deep image features. As suggested in [@xiao2021early], an initial convolution-based input projection is used to project the input image to higher dimensional feature space, which can lead to more stable optimization and better results. After obtaining the feature maps, TurbNet jointly learns the turbulence degradation operator ($\mathcal{T}$) along with the reconstructed image ($\mathbf{J_{\widetilde{I}}}$), in contrary to general image restoration methods [@wang2021uformer; @Liu2021SwinTH; @Chen2021PreTrainedIP; @zamir2021restormer] that directly reconstruct the clean image. This design facilitates spatial adaptivity and long-range dynamics of turbulence effects, plus a self-supervised consistency loss.
|
| 83 |
+
|
| 84 |
+
With a pre-trained TurbNet model $\mathcal{M(\cdot)}$ using the synthetic data, TurbNet design allows an effective way of generalizing $\mathcal{M(\cdot)}$ on unseen real data (if required) with the help of degradation operator $\mathcal{T}(\cdot)$ in a self-supervised way. Starting from model $\mathcal{M(\cdot)}$, we create a generalization dataset by incorporating unlabelled real data with the synthetic data to fine-tune $\mathcal{M(\cdot)}$. For input images with no ground truth, $\mathcal{M(\cdot)}$ is optimized using Equation (9), while for input images from labeled synthetic data $\mathcal{M(\cdot)}$ is optimized using Equation (8, and 9). Note that we incorporate synthetic data into the fine-tuning process to mitigate the issue of catastrophic forgetting during generalization.
|
| 85 |
+
|
| 86 |
+
Training a deep neural network requires data, but the real clean-noisy pair of turbulence is nearly impossible to collect. A more feasible approach here is to leverage a powerful turbulence simulator to synthesize the turbulence effects.
|
| 87 |
+
|
| 88 |
+
Turbulence simulation in the context of deep learning has been reported in [@Yasarla2021ICIP; @Nair2021ICIP; @Lau2021]. Their model generates the geometric distortions by repeatedly smoothing a set of random spikes, and the blur is assumed to be spatially invariant Gaussian [@Lau2021_sim]. We argue that for the face images studied in [@Yasarla2021ICIP; @Nair2021ICIP; @Lau2021], the narrow field of view makes their simplified model valid. However, for more complex scenarios, such a simplified model will fail to capture two key phenomena that could cause the training of the network to fail: (1) The instantaneous distortion of the turbulence can vary significantly from one observation to another even if the turbulence parameters are fixed. See Figure [3](#fig:illustration){reference-type="ref" reference="fig:illustration"}(a) for an illustration from a real data. (2) Within the same image, the distortions are spatially varying. See Figure [3](#fig:illustration){reference-type="ref" reference="fig:illustration"}(b).
|
| 89 |
+
|
| 90 |
+
{#fig:illustration width="7cm"}
|
| 91 |
+
|
| 92 |
+
In order to capture these phenomena, we adopt an advanced simulator [@Mao_2021_ICCV] to synthesize a large-scale *training* dataset for atmospheric turbulence. The clean data used by the simulator is the *Places* dataset [@zhou2017places]. A total of 50,000 images are generated, and the turbulence parameters are configured to cover a wide range of conditions. The details of the simulation can be found in the supplementary material. We remark that this is the first attempt in the literature to systematically generate such a comprehensive and large-scale training dataset.
|
| 93 |
+
|
| 94 |
+
Our real benchmarking dataset consists of two parts: the *Heat Chamber Dataset* and the *Turbulent Text Dataset*. Although this paper focuses on single frame restoration, both our benchmarking datasets contain 100 static turbulence degraded frames for each scene. We believe that by doing so, researchers in the field working on multi-frame reconstruction can also benefit from our dataset. Both datasets will be made **publicly available**.
|
| 95 |
+
|
| 96 |
+
**Heat Chamber Dataset.** The *Heat Chamber Dataset* is collected by heating the air along the imaging path to artificially create a stronger turbulence effect. The setup for collecting the heat chamber dataset is shown in [4](#fig:heat_setup){reference-type="ref" reference="fig:heat_setup"}. Turbulence-free ground truth images can be obtained by shutting down the heat source. The images are displayed on a screen placed 20 meters away from the camera.
|
| 97 |
+
|
| 98 |
+
{#fig:heat_setup width="80%"}
|
| 99 |
+
|
| 100 |
+
We remark that while similar datasets have been collected in [@Hirsch2010; @Anantrasirichai2013], our data has a clear improvement: we use a long path and more evenly distributed heat so that the turbulence effect is closer to the true long-range effect. The captured images have a better anisoplanatic (spatially varying) effect such that an almost distortion-free frame is less likely to occur compared with the dataset in [@Hirsch2010; @Anantrasirichai2013]. In addition, our dataset is much large in scale. It contains 2400 different images, which allows for a better evaluation of the learning-based model. Sample images of the *Heat Chamber Dataset* can be found in Figure [5](#fig:heat_sample){reference-type="ref" reference="fig:heat_sample"}.
|
| 101 |
+
|
| 102 |
+
<figure id="fig:heat_sample" data-latex-placement="h!">
|
| 103 |
+
<table>
|
| 104 |
+
<tbody>
|
| 105 |
+
<tr>
|
| 106 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat1.png" style="width:12.0%" alt="image" /></td>
|
| 107 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat4.png" style="width:12.0%" alt="image" /></td>
|
| 108 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat3.png" style="width:12.0%" alt="image" /></td>
|
| 109 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat2.png" style="width:12.0%" alt="image" /></td>
|
| 110 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat5.png" style="width:12.0%" alt="image" /></td>
|
| 111 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/heat6.png" style="width:12.0%" alt="image" /></td>
|
| 112 |
+
</tr>
|
| 113 |
+
<tr>
|
| 114 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt1.png" style="width:12.0%" alt="image" /></td>
|
| 115 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt4.png" style="width:12.0%" alt="image" /></td>
|
| 116 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt3.png" style="width:12.0%" alt="image" /></td>
|
| 117 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt2.png" style="width:12.0%" alt="image" /></td>
|
| 118 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt5.png" style="width:12.0%" alt="image" /></td>
|
| 119 |
+
<td style="text-align: center;"><img src="./images/dataset_samples/heatchamber_sample/gt6.png" style="width:12.0%" alt="image" /></td>
|
| 120 |
+
</tr>
|
| 121 |
+
</tbody>
|
| 122 |
+
</table>
|
| 123 |
+
<figcaption>Sample turbulence degraded images (top) and corresponding ground truth (bottom) from our <em>Heat Chamber Dataset</em>. The <span class="math inline"><em>D</em>/<em>r</em><sub>0</sub></span> is estimated to be around 3. </figcaption>
|
| 124 |
+
</figure>
|
| 125 |
+
|
| 126 |
+
**Turbulence Text Dataset.** Due to the nature of the problem, it is extremely difficult, if not impossible, to capture ground truth clean images in truly long-range settings. Therefore, we adopt the idea of using the performance of high-level vision task as an evaluation metric for image restoration [@Li2019dehaze; @ICCV17aodnet]. Specifically, we calculate the detection ratio and longest common subsequence on the output of an OCR algorithm [@tian2016detecting; @OCR1] as the evaluation metrics. The terms will be defined in section 5.4.
|
| 127 |
+
|
| 128 |
+
There are several advantages of using text recognition: 1) The degradation induced by atmospheric turbulence, the geometric distortion and the loss of resolution, can be directly reflected by the text patterns. Both types of degradation need to be removed for the OCR algorithms to perform well. 2) The OCR is a mature application. The selected algorithms should be able to recognize the text patterns as long as the turbulence is removed. Other factors such as the domain gap between the training and testing data will not affect the evaluation procedure as much as other high-level vision tasks. 3) An important factor to consider when designing the dataset is whether the difficulty of the task is appropriate. The dataset should neither be too difficult such that the recognition rate cannot be improved by the restoration algorithms nor too easy making all algorithms perform similarly. We can easily adjust the font size and contrast of text patterns to obtain a proper difficulty level.
|
| 129 |
+
|
| 130 |
+
The *Turbulence Text Dataset* consists of 100 scenes, where each scene contains 5 text sequences. Each scene has 100 static frames. It can be assumed that there is no camera and object motion within the scene, and the observed blur is caused by atmospheric turbulence. The text patterns come in three different scales, which adds variety to the dataset. We also provide labels to crop the individual text patterns from the images. Sample images from the dataset are shown in Figure [6](#fig:text_samples){reference-type="ref" reference="fig:text_samples"}.
|
| 131 |
+
|
| 132 |
+
<figure id="fig:text_samples" data-latex-placement="h!">
|
| 133 |
+
<embed src="images/dataset_samples/text_sample/text_samples.pdf" style="width:10cm" />
|
| 134 |
+
<figcaption>Data collection site of the <em>Turbulence Text Dataset</em>. The distance between the camera and the target is 300 meters. The <span class="math inline"><em>D</em>/<em>r</em><sub>0</sub></span> is estimated to be in range of 2.5 to 4 (varies due to the temperature change during the collection process). The collected text patterns are in 3 different scales. </figcaption>
|
| 135 |
+
</figure>
|
| 136 |
+
|
| 137 |
+
::: {#table:syn_psnr_ssim}
|
| 138 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 139 |
+
| | TDRN[@yasarla2020learning] | MTRNN[@park2020multi] | MPRNet[@zamir2021multi] | Uformer[@wang2021uformer] | Restormer[@zamir2021restormer] | **TurbNet** |
|
| 140 |
+
+:=====+:==========================:+:=====================:+:=======================:+:=========================:+:==============================:+:===========:+
|
| 141 |
+
| | **Synthetic Dataset** |
|
| 142 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 143 |
+
| PSNR | 21.35 | 21.95 | 21.78 | 22.03 | 22.29 | **22.76** |
|
| 144 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 145 |
+
| SSIM | 0.6228 | 0.6384 | 0.6410 | 0.6686 | 0.6719 | **0.6842** |
|
| 146 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 147 |
+
| | **HeatChamber Dataset** |
|
| 148 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 149 |
+
| PSNR | 18.42 | 18.12 | 18.68 | 19.12 | 19.01 | **19.76** |
|
| 150 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 151 |
+
| SSIM | 0.6424 | 0.6379 | 0.6577 | 0.6840 | 0.6857 | **0.6934** |
|
| 152 |
+
+------+----------------------------+-----------------------+-------------------------+---------------------------+--------------------------------+-------------+
|
| 153 |
+
|
| 154 |
+
: Performance comparison of state-of-art restoration baselines with respect to TurbNet on synthetic and *Heat Chamber* dataset.
|
| 155 |
+
:::
|
| 156 |
+
|
| 157 |
+
[]{#table:syn_psnr_ssim label="table:syn_psnr_ssim"}
|
2207.10553/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-06-02T03:56:47.638Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36" etag="cQP9blXWvn_uR9-FtkF0" version="18.1.2" type="google"><diagram name="Page-3" id="s-Gj2U2n1-xarc2_In8l">7Vztc6I4GP9r/LgOEMLLR2vb7cxtezvXmb3ep5sIUbkicSFWu3/9JRKQACq+gLFb7UxJSKI8z++X5HkxPTCcrb7GaD59JD4Oe4bmr3rgtmcYumkYPf6n+e9pje2aacUkDnzRaFPxHPzColITtYvAx4nUkBIS0mAuV3okirBHpToUx2QpNxuTUP7UOZqIT9Q2Fc8eCnGl2d+BT6dprWPYm/oHHEym2SfrlpvemaGssRg4mSKfLAtV4K4HhjEhNL2arYY45MLL5PLkvsxA8Ne7S+6dVfKwtH58//ElHez+kC75I8Q4oucdWij3DYULIS/xrPQ9EyD2mTxFkcR0SiYkQuHdpvYmJovIx/xjNFbatPlGyJxV6qzyP0zpuwAHWlDCqqZ0Foq7YxLRezQLQo6xBxy+YRp4SNwQvYAmykMSkphV+HiMFiGTx01D8QgxJmQRe3hHOyBQiuIJ3jmeaMgFVMCakP5XTGaYxu+sQYxDRIM3GZBI4HqSt9vojl0I9R2gSrBflRtFcakvpwHFz3O0FsaSsf9wpejscW8SGpPXnFzgYIW84Zji1U4JZnc1q29YaS8xHQEg2LnckNty7b4pWDwtcNuwtZZkb9bI3lpjM5mjSFKC9XPBJ4y1FL8kazEOWAOgzVdryWX32dWE/39EyStTmaE9MrAHXx4w4oUBpUy2AYmyzxnFWYdDatjDpl8wq+4QMBmPe0yHFnCBXwulcRCGhZbjscdebUIMOn1XQpgJqwiDrt6HdhVhugFbQhjcz+496kHJPF1fx8GKq7Qs2pHnQ+TVqcd2EISoW6a7mqQEy6xRglZVAGyL4dbvJf98qlRF/vbvJX8AFZO/07r8keX7GNXK3/KAZ3cqf6grJn/3ovIfgmHH8nfcvlt8qaWNDB3dmkl4FdAX3r0PRemfwp3blRh5XXjPChF73kInXsx78cKm27qU9WuymTPgVpPsVKTsNdLEfLTXSDPFgOcz0tZdB3GM3gsN5iSIaFIY+TuvKOAZQAnBtlky1kvtXVPb1Z5dpN9gA+H8UU5AdUbT32SSsSy1J5kGzpkT1TE2XB84depArsVMn07VYQLF1ly9zqWSGs581mtk1htbzPpb7BEfxwVDPB1yryGu7TfEK8p0LZsxr4l9jXUfYrtT+zr3FJfsa72qaas1Tdc5cMoLfOQPuD+albwQJUngyVKXfSXJK6betLCYFoQMh/xdpyhr/aqqxdCH2oCvtt4ifsthUNXmsat9/c4i30zU7izOvKCDLDhwNrdrw2njsIW/slKbJUvN1EogTZ9c9Nqx5ANrz0CpZCoDnW31b+Bi+uRAqxzQG4ceXKU4wHa3fU2X0KuXfe1NaWA61bE6JoKivr4JZ5wAnrwfyCOTWpl1PkqmOR8J+74B5UJ1DgfvAUu8bfYteY13L72TU9R9eC0qNaB6Km3fI/mhVQqAeiq9rJOzcyez6SqngizXRTUX0LWwCvK1z9nmUrq4dvWKMi/mtt69sd62id9s2ztyW09JHPxibVH2NCEa4ZCJwcdx1j8iEW7BGBDYuRoXtyHbr+AwD7fcvB0HdzaZfc5uRzrModKzW4MMuEv4y69Fu6au3nakgV92X1ZaOYEMW17tnt633ZFW71JqS+LlLCeoXVrep6eZqSzvclbT5eV9uqtJZXmXs5guLm9wunmjsrzLEdTLy7suo+DACOq2xOh7vE6LvifxEsX86gnTJYlfDw+pKpLb7HkYjsdtLjfQ7juGhJA86CQFX0Hfhl2ipMG2/CoiT2c0dPdFqw7DyF5jU6h7r62ZbcgUCTw58nx3dNDJKc+b8jAtB5xAA8PlE/+t4t9qin+oFP7ZQOVgqVUGb1MOGK6cogjL2TZts+Aqc3COxe952HNmFthNWWCpxYJq+oFVBm9jFpQSdWF5SWmbBVeZhfOhWJDl3e6lQWZtqUIDs0qDYzdEbCyZBk7HNGjgGPmkQas0cNQCN6yCu4zJpuAu/xgDdr3fb5CNpB64W/9Jj1o5jWwjXQGceyTgLP3CgGuQK6Ue4D7UbJr9tqq4qfgXvwwev/18tmea9sfT+HEWkT+zE1lUYYFdYYF9bIK7VTYwu2ZBg/Qy9VjwoRLczSoJdsYNFGGBydcCDWrZSyYEMI8kBBtWGsjoeJOdfe8TojMm3BKdeeoNQc+1mwZjKF5RmWkoDCYRpyFDHmYMueFxjsBD4UDcmAW+nyaoYfZ90Gg9FMesSFRi48KbHrzlYy0oSUSQpsJIkb1VJGNWVXP61NYg0KnHUh1yCpKEGrPmZ5L2Di6c//Cj04N8xjYY5UcX5cchPd+x9t9IkhSgNao51+h8P6dsfDpZd4Fep5TIUBfG0806EJzhfKLabcvpU4neHAMoeT1F+wdG+CF/71zIa3K+YuQHWNoQjNbvXinrqy2ElA8w6TIVoBYgp08SnwA5K0AumHu2y/D5BIgiALG6S5Zjxc0xqunudnMYLbj7Hw==</diagram></mxfile>
|
2207.10553/main_diagram/main_diagram.pdf
ADDED
|
Binary file (19.9 kB). View file
|
|
|
2207.10553/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The study of behavior in multiple interacting agents is an element of diverse scientific and engineering applications, from designing safer autonomous vehicles , to simulating realistic behavior in virtual worlds , to understanding the biological underpinnings of neurological disorders . Across disciplines, there is a need for new techniques to characterize the structure of multi-agent behavior with greater precision, sensitivity, and detail. Towards this, many works have made efforts to discover behavioral representations with unsupervised or self-supervised learning, often without manual annotations. To evaluate progress in learning behavioral representations, we introduce a new benchmark of multi-agent behavior in mice and fruit flies, two common model organisms from neuroscience and biology.
|
| 4 |
+
|
| 5 |
+
Historically, animal behavior has been investigated by collecting video recordings and producing manual frame-by-frame annotations of animals' actions, a costly and time-consuming process that has been a bottleneck in behavior analysis. New advances in machine learning and computer vision have more recently enabled the automated tracking and quantification of animal behavior, promising to overcome the annotation bottleneck in behavioral experiments . In scientific domains, the promise of these automated methods has fueled a new interest in long-term, high volume recording of animal behavior.
|
| 6 |
+
|
| 7 |
+
Still, extracting behavior annotations from behavioral videos or tracked animal poses is not trivial. Due to issues such as cost and subjectivity in obtaining supervised behavior labels, many research groups have studied unsupervised or self-supervised methods to identify animal behavior from trajectory data, without human labels. However, due to a lack of standardized benchmark datasets or metrics in this emerging field, each group typically develops and evaluates methods on its own in-house data: there is no consensus as to what constitute a "good" unsupervised representation of animal pose and movement. As a result, it is difficult to evaluate progress in unsupervised and self-supervised representation learning methods for behavior analysis.
|
| 8 |
+
|
| 9 |
+
To address this challenge, we have developed a novel dataset and benchmark for representation learning of multi-agent behavior. We take a utilitarian approach that is informed by current common objectives of automated behavior analyses performed in biology and neuroscience. Specifically, our benchmark scores the "quality" of a learned representation of animal behavior in terms of its performance on a gauntlet of downstream tasks, modeled after common scientific applications (Figure ). Similar evaluation methods has been used in other domains for evaluating visual representations and neural mechanistic models .
|
| 10 |
+
|
| 11 |
+
We present the 2022 Multi-Agent Behavior (MABe22) Dataset, a dataset of tracked social behavior trajectories using common model organisms in behavioral neuroscience: groups of the vinegar fly {\em Drosophila melanogaster} and triplets of laboratory mice {\em Mus musculus}. Our initial benchmarking efforts include both standard baseline methods as well as novel methods solicited from the Multi-Agent Behavior Challenge hosted at CVPR 2022, using MABe2022. Improvements of learned representations on our benchmark corresponds to representations that are more discriminative of common behavior analysis tasks, and evaluating on multiple settings and organisms tests the general applicability of the representation learning method.
|
| 12 |
+
|
| 13 |
+
\centering
|
| 14 |
+
\includegraphics[width=\linewidth]{figures/MABe_2022_Intro_2.pdf}
|
| 15 |
+
\caption{Overview of MABe2022. We aim to benchmark representation learning for behavior analysis using model organisms across different experimental setups from behavioral neuroscience. Our evaluation procedure consists of a broad range of hidden tasks, such as biological variables (ex: strain), environmental manipulations (ex: optogenetic stimulation), and annotated behaviors (ex: aggression), used to evaluate behavioral representations. In comparison, past behavior analysis datasets usually focus on a specific experimental setup with specific behaviors of interest.}
|
| 16 |
+
|
| 17 |
+
# Method
|
| 18 |
+
|
| 19 |
+
Our dataset is intended for development and evaluation of new representation learning methods for multi-agent behavior. The agents in our dataset are common model organisms in behavioral neuroscience: mice and flies (Figure ). We curated 968 30-second clips for flies at 150Hz and 5336 60-second clips for mice at 30Hz, sub-sampled from a larger repository of video data. Agents' postures and movements in each clip are provided in the form of trajectory data: we track a set of anatomically defined keypoints on each agent (Figure (e)), and also track identity across time. Pose estimates are derived from top-view video using either an HRNet-based approach for mouse or FlyTracker for flies .
|
| 20 |
+
|
| 21 |
+
For each dataset, we constructed a collection of hidden labels, with 13 for mice and 50 for flies (Figure ). These labels include manual or semi-automated behavior annotations as well as experimental setup in a particular video that we expect to have an effect on animal behavior. Examples of tasks include biological variables (e.g. animal sex or strain), experimental manipulations (e.g. optogenetic stimulation of a population of neurons known to elicit a behavior), or environmental variables (e.g. light cycle, habituation to an environment, or time of day). These hidden labels are defined either at the sequence level (one label per sequence, as in the case of animal strain) or at the frame level (one label per frame, as in the case of behavior annotations).
|
| 22 |
+
|
| 23 |
+
For the purpose of establishing a benchmark, we defined a "good" learned representation of animal behavior as one from which we can decode these biologically meaningful hidden labels. Specifically, given a learned representation of each frame of a dataset, we trained a linear classifier to predict, for each frame, the value of each hidden label given the representation of that frame. Importantly, hidden labels are not used during training the representation learning model itself.
|
| 24 |
+
|
| 25 |
+
\centering
|
| 26 |
+
\includegraphics[width=\textwidth]{figures/data_fig.pdf}
|
| 27 |
+
\caption{Task Subset Label Distribution. Task label distribution on a subset of hidden tasks, where the top row corresponds to mouse and bottom row corresponds to fly. Note that the task distribution for fly is computed only for frames where the task is applicable (ex: frames from other lines are ignored for classifying pC1D activation). The full task set is available in the Appendix.}
|
| 28 |
+
\vspace{-0.1in}
|
| 29 |
+
|
| 30 |
+
To encourage exploration of different representation learning methods, our evaluation procedure does not have requirements on the method or form of the learned representation, aside from placing an upper limit on the dimensionality of the representation of each frame (128 for mice and 256 for flies, where there were more agents present).
|
| 31 |
+
|
| 32 |
+
Data Description. The mouse dataset consists of a set of trajectories from three interacting mice, recorded from an overhead camera in an open field measuring 52cm x 52cm, with a grate located at the northern wall of the arena giving access to food and water. Animals were introduced to the arena one by one over the first ten minutes of recording, and were recorded continuously for four days at a framerate of 30Hz and camera resolution of 800 x 800 pixels. Illumination was provided by an overhead light on a 24-hour reverse light cycle (lights off during the day and on at night); mice are nocturnal, and thus are most active during the dark. Behavior was recorded using an IR-pass filter so that light status could not be detected by eye in the recorded videos.
|
| 33 |
+
|
| 34 |
+
The pose estimation model is based on HRNet with an identity embedding network to track long-term identity (see Appendix). Similar mouse datasets have been used for studies in neuroscience, pharmacology, and biomechanics, for example in quantifying gait differences across strains , effects of pharmacological manipulation , and the relationship between neural activity and behavior . For similar datasets in single animals, representation learning methods have been shown to produce behavior motifs that agree with human-identifiable animal actions , thus increasing quantitative precision and resolution and reducing human effort for behavior analysis. These models can also help create data-efficient classifiers for supervised behavior analysis .
|
| 35 |
+
|
| 36 |
+
Tasks. The representations on the mouse dataset are evaluated on a set of 13 tasks that capture information about animal background, environment, and behavior. These tasks were selected based on their relevance to common scientific applications such as identifying the behavioral effects of differences in animals' genetic backgrounds or experimenter-imposed changes in their environment. In this dataset, we examined capacity of learned representations to determine animal strain, as well as environmental factors such as whether room lights were on or off (a proxy for day/night cycles, which modulate animal behavior). We also included two tasks to predict the day of the trajectory relative to the start of recording (animal behavior changes across days as they habituate to a new environment ), and the time of day of the trajectory (animal behavior changes over the course of a day, driven by circadian rhythms.)
|
| 37 |
+
|
| 38 |
+
A learned representation of behavior should also be rich enough to recapitulate human-produced labels of animals' moment-to-moment actions. Therefore our evaluation tasks include detection of specific behaviors, such as bouts of chasing between mice, or periods of huddling. These behaviors were annotated programmatically, using heuristics generated by trained human experts: for example, chasing is defined as periods when two animals are within a distance D, moving at a speed of at least S, for a duration of at least T frames, with no fewer than G "gap" frames in that duration that do not meet distance and speed criteria. A full description of each behavior is provided in the Appendix.
|
| 39 |
+
|
| 40 |
+
The majority of tasks are binary classification problems, such as distinguishing between two strains of mice or detecting the presence or absence of a given behavior. The two exceptions, the day of recording task and the time of day task are regression problems. Because we observed occasional identity swaps between mice in the tracking dataset, behavior-based tasks were not animal-specific, but rather were based on detecting whether a given behavior was occurring at all.
|
| 41 |
+
|
| 42 |
+
Data Description. The dataset consists of trajectories of groups of $\approx 10$ {\it Drosophila melanogaster} interacting in a 5cm-diameter dish. The trajectories were derived from 96 videos of length 50k-75k frames, collected at 1024x1024 pixels and 150 frames per second. The flies' bodies and wings are tracked using FlyTracker and landmarks on body were tracked using the Animal Part Tracker (APT) for a total of 19 points (Figure ).
|
| 43 |
+
|
| 44 |
+
Similar to mice, flies are also often used as a model organism in neuroscience , genetics , pharmacology , and biomechanics studies. Unsupervised methods to study latent structure in fly behavior have provided insights into the organization of fly movements and stereotyped structure of behavior. Learned representations can also lead to more data-efficient classifiers .
|
| 45 |
+
|
| 46 |
+
As the brain controls behavior, a good representation of behavior should change with neural activity. Thanks to its tractable genetics, precise neural activity manipulations are straightforward for Drosophila. We thus chose to perform experiments using optogenetic (light activated, via Chrimson) and thermogenetic (heat activated, via TrpA) activation of selected sets of neurons. We chose neurons (and the associated GAL4 lines) previously identified as controlling social behaviors including courtship, avoidance , and female aggression . For thermogenetic experiments, neural activation is constant and continuous for the entire video. Our optogenetic experiments consisted of activation for short periods of time at weak and strong intensities interspersed with periods of no activation (see Appendix). We combined these neural manipulations with genetic mutations and rearing conditions. Specifically, we selected populations of flies with the norpA mutation which induces blindness , and either raised groups of flies together, or separated by sex.
|
| 47 |
+
|
| 48 |
+
Tasks. The representations on the fly dataset are evaluated on a set of 50 tasks. Many of these tasks differentiate which populations of neurons are activated, and how they are activated. For example, Task 5 compares groups of 5 female and male flies for which courtship neurons targeted by the R71G01 GAL4 line to all other fly types. Task 31 compares how neurons were activated -- it compares strong and weak activation of aIPg neurons, which regulate female aggression. Besides neural activation, tasks also differentiate flies based on sex, how the flies were raised, which strain they are from, and genetic mutations. A full list of tasks and the types of flies used is in the Appendix.
|
| 49 |
+
|
| 50 |
+
Each task is based on binary classification. For some frames, the task was irrelevant, and we indicated these frames by setting the task annotation to nan, meaning no data. These frames were not used in evaluation for the task. When comparing across fly lines, we only used frames during activation periods, and frames outside of activation periods were set to nan. For comparing behavior during activation (lights on) periods to not activation (lights off) periods for a given line, the task was nan for all other lines. Videos were either of $\approx 5$ males and $\approx 5$ females or $\approx 10$ females. Mixed sex flies were either raised together or separately. A full list of the types of comparisons is in the Appendix.
|
| 51 |
+
|
| 52 |
+
Besides biological differences, we also include tasks based on manual annotations of the flies' behavior for the following social behaviors: any aggressive behavior toward another fly, chasing another fly, any courtship behavior toward another fly, high fencing, wing extension, and wing flick. We annotated behaviors sparsely across all videos with human experts using JAABA , with the goal of including annotations in a wide variety of flies and videos.
|
| 53 |
+
|
| 54 |
+
We develop an initial benchmark based on standard sequence representation learning methods, as well as include top performing methods on our dataset from the MABe Challenge at CVPR 2022. We outline our evaluation procedure on the set of behavior analysis tasks (Section ), describe benchmark models (Section ), then present results on the set of hidden tasks (Section ).
|
| 55 |
+
|
| 56 |
+
[c]{0.65\linewidth}
|
| 57 |
+
\centering
|
| 58 |
+
\vspace{-0.151in}
|
| 59 |
+
[H]
|
| 60 |
+
\centering
|
| 61 |
+
\includegraphics[width=\linewidth]{figures/MABe_evaluation.pdf}
|
| 62 |
+
|
| 63 |
+
\hfill
|
| 64 |
+
[c]{0.35\linewidth}
|
| 65 |
+
\caption{Evaluation Setup. Learned representations from an input sequence is used in an linear evaluation protocol to predict labels across the MABe2022 hidden tasks. We evaluate predictions against groundtruth task labels in each frame, using MSE for regression and F1 score for classification.
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
\vspace{-0.2in}
|
2209.12561/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2209.12561",
|
| 3 |
+
"month": "2022_09",
|
| 4 |
+
"year": 2025,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Understanding Data Influence in Reinforcement Finetuning",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2209.12561",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_09/main_diagram_database/2209.12561",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_09/tex_files_extracted/2209.12561",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_09/main_diagram_database/2209.12561/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_09/main_diagram_database/2209.12561/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_09/main_diagram_database/2209.12561/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2209.12561/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2210.06170/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-19T14:40:20.996Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.0.0 Safari/537.36" etag="CzlyXK7t9Ytwz4UJRbIJ" version="18.0.7" type="google"><diagram name="even better vertical fracs" id="fwE8MhNm8QmNq7_HjLff">xLvXruS8lib4NOdyAHlzKRtyIe/v5L338fQjZv6nuwrVg54GpjCJ3BvaCokhLnJ9ZpH6F8oN92dN5vo75UX/LwTK73+h/L8QhISJ9zc48fw9gaLI3xPV2uR/T8H/84TT/Iq/J/F/Th5NXmz/6bp9mvq9mf/zyWwaxyLb/9O5ZF2n6z9fVk79f/7SOamK/3LCyZL+v54Nmnyv/55FSPx/npeKpqr3f/cO+vvBkPz72n9a2Ookn66/p/5cgwr/Qrl1mva/R8PNFT2I3L+jcrSTyKWr9+glNT6JdFPR8X/9bV38P7nlf/RgLcb9/9um/xnJM+mPf8L1T1/359/xW6djzAvQCPQvlL3qZi+cOcnAp9c7X95z9T7071/we1g2fc9N/bT+uRcVRZGj6ff8/8se/NPTs1j34v4Pwf+nR59iGop9fd5L/pmL8L+H8fpfDG39H4YVxv85mfwznar/0db/jNl78E/Y/g9CiP6fhBD+34cwT7b6f1zbJ2nRs0nWVX/a+Hdox2ks/hfRxkWERv87ov3Pp9jfG/6JPUKh/zX2BP1fY4/9d4Ue++8M/f/PwcWQ/0Vwqf8aXJT4bwou/r8P7ouIMzhshj8YzILONi/yamDWmtPW7M00vp+n075Pw//zdH7DW/759x/aYPqmAvfuExikZJv/ckPZ3GCE2D9fyfz7LPTvM+9xnuzJv1Dm75+IOI/VvxCu8VnDviD1U03M+093vFrwqvcott5foscxETjPa9nkgQMjtMVAst0UiaEcEZ/YYvUotK/0Q0Ppp29kKe6zUZ9TBPtprXd8natKJBvKpC+hPfT+H6/VBvqJ3/EQo6hUadA6F7JyEH7fo819f2nCJTDDfL2PwIaG0AuWb2OjgdBprLZGbewHcoc0Td6/4/hi0e9uD8lIoPxtsjfQH4MZ4iPcvdN09SbGgu0XvKDX/p3piZoN4TuQoqFMyacGP16PoufE/TmeQn1iPv6fw0fweFXQeQvSea2emY8NTs8PZPEqBE4rnMb23N9G+h+kFChlkiVom4q8+F8IuxIWeq/vdGSbFW2703zhQQwI9DBp8GlXnCS97iU6/mjYOIkjp5BMqsNy/pnreyUJpisilne+vJejzGRBnT6wt6mDjobJSpmi/X4iGwb6YFLetGHxqzayc4c1N7WJb/KS3Cnd1ZdSa8FjLO4SqOdKEMU7BVgpNdZtRafJub6DvvuUcKTvw3WDkYL20YEDgTLvWAOzZlmNw1xvkTaCjrn9kbe2M2GdiLS5sdGLi+nv+Zjt2au7DV+FSMeLiuj4feZm4wTdycOUXmdkWN6cFZFinrFLssDz6zkt2r+QmvlLdfGGwbt6Invoi/eJxCdu402xZ3CxUaBRLbY+yUfxT824QYIuTu0osnIrTOp+OlOLKg+TIXnJTe0h0zuVW+YK2MefeWE5L0L7vd/2/g8l2rgI78snDEYwv+MWTT8y5VmWR5/h7qOMIUbtTPzTMfjQLuaxrByu677g/HKLCOq6OxX4YtUpeoeIfeiMdnnS771moRUc4uPwNlBGflvPnAyE0BdRPpaZbs2IGluIz3nRUgnz2Rv3yYYlSaez5cuOzmIp2BkShY39fc76uBKLG60uppvTJKMwkJzhOXA0V8qzqN2JVzJnzxKSrmz4M3q4RbjwKTPV7krga8nihMJXfCJsBWE0vvonEjkl9faLV2DxU4brwjveYe1S5MFmn1jf+M4y7+gp9jyktgvkMZprEacTopAXLuSrwEUPdfb3sv2w9DujRO0xJQnSCTdgukU086aQKqE/BUPysHY1eSWoydZhoPWbpeSaX6odYCU0CYboUfRqcrP/0Y5g4nGWTLTbtduQxN1pdbUb9mX24l5QZs2YhPvy0/oe89Xrn+r5SnYZqZxC2Urq7oJd70WiUzeWPrEt1BREZ+A1xXR1A1usVqUL//QMCkZzS1dsAknIPU7BKXqqTIY6YL5KQBanel0OKYS3HEOPiTklOXr6lDyN1NtkTwRVB9Z3Motu8+JiVO+hwa0g+EKfvopirXPDQooCg+52qFAaiBWGXaH3QCbWgSbmzhbb+sWLI94NswseMvk1XnhYsI/wjK1H2Z37RcPLb0/c9weW59OmEfoz80r+SBOMxkNrry0/l6JaNpSyLqXQhcUcjGwR7t96Z7Kx9sbUz7hK0cMuITIuH5lpWP2bbYK5wCD8wGNzUTOjh9iZMaL+5S4W+W6HIyKGJJ4iHTAP9HlnqNy+IaqIzGa+UzW9eNItXml/pzoapPZgP5qpUFoSMmglLGlFf2KAu7ShVB/PTL0uMY4zEB9lqi2qirUn9LuFOSZfT+QzjRLrI0L9C2xsYFGSAHosEwZUN4WHWVo6fMCZ5e0xXPEJv80xDOAiLF6o6d5PeMe5TRFkFZczv18LZ8+Rzyd7CO2paND5mCnAtmiXU9l/DxK4bEswU6XGnFIEDUjTPF7uYWPaqJ5eqr4yQiwRfeG/l+cHxP0txr59v51oQ+Bb9IB1uTnKlo9bjNoROz6tYKg5ovfMNKHl5bi6xu+48I5bMFyH709eORv7MGr17VF5a+1ZTQB8T26LGQqYzygN5+Yyirlmwx12tw9nBt4GsElO2oDJWgZ0udAXzjbOaTxANnVFiuAkIrTLi1xE+XFZ1IJg5sUgYzY/7vne7LUlHiEYVg31Sy/iV6iurXM1Me1bhl05NghZomP7YkosfrCOiSCymX4cQDTy72JspyaYlA1YKUNmX3fk04otcUb9MbT0Dn5y0XE8p8fX5Wo2CAgJJONGOiEfUkvd3/fzcZPN6ETO0WnbYcpPh/sSgHfS7dpbUj6G/0abHc2tOKYVWwmakwTmaSeUmJCGnIna6/pOaC6Kb967uPEVaWx69hYT3BVhW8ttr3yjHjP5O+dP6HiLuksD3/Aza1XSkf8q79Z2WMH9ETJ8OdYIPyvDl0Wm3xZ/lRECXPZ5I8seM/tKUDcAx/P7U37fX04L97CoLon2rLofEkYO0e7RLGX4nVmhOzGdmU49anBca4PPRPSJLoR3CC8O2WM3qiK+64+5IziFJK8xnjkyno3y+WckT3kpnK9dX8fs0rQucsvdQ+Sh6Pz12PyZ+XHYzITrR1eAZUF+1+9N9CunXxoQLTB5hqYqNZs45L47ZvlnTbbz9esvWQK1UdrVgZctgUjlb6Jhhf0VZqiwHuQXIk6sZw3v4Uu57IbA8LDttq+RiekQKvVrXymgMIgEzd3H1eInd+wSBAUOed/DXhwSUzKSUWbXNRCm1pnm25mn7sR3vV1lTHK2lzyXfSrDC4lenZXdzgst0Xtxljy9Jfb1YgWhaVaGTqibWTq2VfQhHGVaYuJz8uU/0yqRFucxnRHxxvuFLB5XGjmS14wsS+69YI73MhBC2LFMPlo6bm73jSssF2e9Z+G2Y2zOgdnpWNDyU3mLW3vTDEPaXwadcrieWgJdN7rplFtkA4biRWtJ1sYrSy9eSVX8rIeL+q7S5805XbgwziMd1reUO8grsUJrR3phkJLaKdAJNIvKGqVP25H8iG/JdV5IqCrrFdAr4N/BBNjjvqj9pa1Y1toZpHUzfFzOjl5GY6CAwYTeJyQhBBCWNdf4VIuhJhuQEn4TiUKc/gBercRy9OgX30uG/n4TbcDJeMw/s9cVW7nH9lZat1WM1ui/eLu9bLfjxCVDrKOuxMFr/Etz7A6xarfkdGDwPyL5ExJxVz+2Or4HC7+ITYEoiEU4YSU0S9EkPMhrW+tZe0zAzF1UqgizNW5A8khgXLcVYsesKTcXqbALKgOfx9VWvbeDXZqprPDCMzqAwPW8vLJV/LimcgDFt/9BtbCWQiWK5kthxgq7f5k/l8n5KowOhHZKuTOZq/Xu2LkZNuMu5+FOJxVDMWPU7/f+02ZAstYodCrKjz581fBYoaKvwDyNd87vm2QpYhIa32MFdAn4dGSf3O9QKlKV+cU04bPRKVn68I0tp/UGGy4g3/MrrTBOaTFOz9vuxGJ6m0QMUlhmHa+mnjEcqeJesUTCQ46ZSsqZnBIMbDnVu4fpvU4UzJjoR4V9ulu1JAHCM/7zkrdfqkQ9+a86Qux3giTo70y0JBlcrb0iN4WLxrcQBhfsNeyOs1ZpyUNzG4YBBq6HD4sgmxJn8Ux8VyZY1BaCtBkiQoGO9c/DHXhHaGwpkOa+nODjnUAeqR2ss/h2SHdJgj5zbqzyA8OvvSzgKNFkykFvN+gBOGO1tsJ1sRFAocMACFu7fGDvh/T7VZBojb8XB4cPmNBJf1T3AN92EwHnvCn7GZjDCmu/HOiUTVhEQxaoe1VHtYrLFiZ9BHXk7aMppPu36p3gmeEyGNw3t/BonFiet80FHUV1Juuq64mfv0re8bgYTPYS+WZF2WBFTao0G3ofyrw4j/g97zzPKkDoWvb+biqTUbgwbIRixumiP2SgSmnSBJcARAmnUdg/W9ngfvKM3jLS8YsRx5utUln2w1q56xLzPsV1mMHczDeDV2KfNImxrSNwMVYV4ADr1+/2gvyWGrzru+eC1MLQ161k8kD6EKRGIN23B9jJ+PyTkohrgmSpV4CbhQXqDXYPqQvrNelOZE02S3Y1qHH/qcYX/BT7WV9xI/6koWVG2VE3iQ4X0b5wureWkNltjFzjG9hHMXzU/LzbnxwoIlRBS/UxcOglNxZ4yvDQle4+pFjX1WRuCLvZFFlePyvVxxqYUkZSWgCSIIK3GSP1SDWEErVpOfIKXLFhFSYz24LbIU3i7aoLuGasX61Xy1k9qyazQn9xtYIsoxdRz8/HDLC4hQvcINoEMVHI9BITiL23Mt4Re4nHvZJ4BhBujySEndecNBV92DoKO/dgbN+uL0XtK0o+e11wDSK2H+e6OHm6Qit5CH0RBcRriZcOaiQuFY2aPDwamk7xMNQZh8Vg5r4ArRQpfhQ7kWGdb28yLQkwIOftcDyWCQpOqMr1sw/cZY3bE6VVzTIk8TKpqaU3M0gYgogqGwtZFRksRDbWvfBMkV4bYbCNxy6vr/tAeTHNsbOuz6+6PgBi5xZB01Fmhk/VSMO+0WZ9o+GbQQaG7wltFjOtylDt7FHkZb9Zfe3zNl1YLXWckls2CrX9d9a619qjBlOrerDp/HT/mFnEf8QIuWDMjcB7/VTgQr1DGAFz8FUXZ+Qd7hGjKYGQYrWr/iTXPB1ZGBXu/pkDCnlIERQ7FofFF5mLzw20M1lhGp99HMiv0lMKfy/WrVkkZUW4QgI/W+n2FQDceIxnra4kNz2Ls93L3qhJQJsucbNlbUs+qzvM3ouaexrNkICHP/JdAmm7VVxbjMIYnF6UF5DSIfa3w2pc0ySbQtO41Hzvt3bwClFunUspaX8iQDSPb+APhuXydua20y4pzyyelNs6TFqN62uIgPaY0c0v6Uz5x984rxI8Lr+OaftOO1QXy43bDLR/VIoqCORFPMLDuty6XO9tfRMczp7lWG0/x8EEcbLyBJ022wtbGKddOIEud8MHjnwBFKzW5FffCi2FixpGufdOVo7bDaR7cMzdqLu6VCtQWEfe/ZsYfosPiFUUWPF6neH48ioBXxnIEyx656/P1+90BIm7Mn+0p30dVLskxgPcTIlhpGGJ31mppc9dwvv00HQ4OysMDNJrw1granXieO9Hh19uN0dLp3AvO9CSfaY5ryBJWD6hjiizoogS6T+V3KEOX8+oUCkGixlbSfqbyB6lim6Z/jYoJFCHixux+/LJrALRx5LfuBN2yM4JakQ9F2ZtI2HL8h1QzbXGn5HZMQrV4leXCaGjg1dGk1CP5SpQGLFOGm4JtDpG4/ql5j8s/FP76n1uMYCVm4vJpBnJmib7fpndvPvV/bCQPcNeAK1X5JBLODC2KO+ZWPeHFemyqFdTRN1/6guRY+yXJwxTcjpwjgPrVv3IU8xA24SAI006ZUST9DrOZD6kov3zcbyqg5wpigMk1W66lO0gVBBQCDmJ17y9qqMKdakQJmkXgBTq7a4mQcSrPI/ciPixjBHLpJNNJHFMntLAbDSJ8pp2c6hRCSS7oUsJ/R66NIVGN+wXEzJNELdcH1+/PWBfBtOPMnu86ulTQws1aq6QIwoVLybLfufXwTQSGgi78d2KYqWWYJdPJ0Ho4fO9EvclqiQF2v7na+ZEeCWYxiymv6a4RL2DPqduLAqXv28445U9JhznWVVcyBorqoxw8q4jXhrt9igI8dYhbFVeab4CJMTjK2iYZsTqmXY8rMds5wMcoGQ3Bek5Bzq761Nu3E/zfy1/fw3fU7Ss4SzNFla2y42TVU4M2caJaFPUup+SidPE08RvOdwvQI2TqrzNHYj03efylImnPWZ9smxg6onHwy4sKm5vFlSp0GUqogZrXDNtPk4zcHC2/RDdjpl5zwhUij1C3Aw/WAkTDCjJ9TNZZQTZIQnqPRgcGshoFk70ZpBbf/cDnjqRcZKrVd4L889ElXinR2FJLb86z8ED+NUsNt/suYIdUsRIt6avuprRknONZC0x+BZW0oMVRKTbJraBC/qlbbGvumpzNM+irkCg68tJKHPqD6lhbmfjJjpiPMgUv6e6ZqXtu96rm6oblsIBv3tY2vDrk8OyA+aXy9XsVNuCn/nS1d7+mYKU01vNQwTabMCM31+QGw47rMH4hy1/jqgMDZ9MmCdgC5KJpNQJUPndziVoco3CNMnz0RvbaL0Ul1PwAlazHZFZe9rrmtce+vP6EhGi8MEc4HshRaxvoHQKtEtSVdrxDSXY4aib9D8N/yQRVltSDPeL7DmD6JLc7UO+mDnb2FXiUyyjmTVGMpcEbaapk/nopEsTATpY9xN3pvnbIMhPZ/B1/6cGm1EL+gr3lHvdpIC6kpI3rr2odzravWK8oiNa/Sh1F6qQTCs84ii7Ic334QQW1LhanzHQ0WyBx5ezuV6HHd8y9yTv9VjvhRDz2V2nfIjvcAVTr6xbQInPXdMLU3SgEQ84DqMKU9k70wKtil4BwM7RCq89/wqvd9AKydG4cTcLdVVQLy0KpCcOUpCL9ZF0qWfs74H1Uz5hD+qtUkDva6eqPTQX0tiWxOOs57K5lVyFlTlzzfeYYv8DeYINfBDkmT8D19oGOrvZsEA9s3jgm/yjXRFaz87wy89ejkbt+PEmx4RZeciANk+a76Ypigu9Gr4/ocVwI1Hi8FLPvOWCvtxw2XK5/+DxNaebrx6FN44eTDS9a0yGKr1weQTh1ZqLvNi3gp5crIcFJsyN5+Z6YdU3cbwyolWemUfO3ETgS1MWC2J6PbUjBvffxKB5xvAtrLAgnNp1BbXm0r9FvkB7t63gVdm3dt8UpdfIxYLpjr4Lh8K3YiRKvJ1VBVB7vpegr0q0oofn1Ufw15qLCtEnzZN+ut4LHnG8uXJRlyFphvhzhsfrC8qJ9LlKIgPvpWMV2GOic3ZbpA89BETW2cpuu7thy9hK4gB2Bo+jm4OA/d/ktfuRuOu+VsFvFbr2p38iMRm7w4iYpCr6vvYbDYO+S5bZ+H691ggPN2QzQ1zuXz9NpsDsfk3hw4Xel4017+v/Sg/eCePFX8fH7MC4kGgJA4pZToAt14r7c3O/A+0vth3xQeY0t/8BKQufb36DWusqmNZVk1tSSzqRB0ZA7fLkK/PZqmTjUGxr9F4r/Uz45XsWTwrC9I9JiOjbX6oez2MoPYJfeeSYnzblsJFiXtghHhPPyMxw6BT+T9P+PHa82qxJz98wL4LvXAGKe4FFCA1QnsDL2BthgyTdZCkp6B5ZTCxvKby/en2M6VPW8KpDQmlR2Im4jeLjQH4QqD6h3pwBJygi6jKZhDrbLjVkw0/JmuU6Kl3TOMY3XGu93ZarFeF1By+JNFoByYqQOcLUQ9927zFZZ6+p4u1M0fQpehUpB+oLJ1pP70DSSjoRFM2d1hhEA0b+djz2fVS5yd2nyuCTC/Bzya9zzp1XL9/toFAHarYr4u6KthgfnkkOxeGUwg+cY1rchHEe3zYn37k04TSR3ehzUecz/yNqupaPPu8ciqnaZZ185eEHumcfJunHKMLhRqWnrwEXa6KEJXWMqulxV/ab/j5ATi84RrqnVWqjTNvBAqaguQAErddDyT30s4P6gwNcda14caq1XDDNB5jK3Fmox7IZrxqz8yzeP19N8hxozt/5xrdZGHiRg/4YVY0poYU7H5sE8nWj2srTkEZ6rtM2obSGjP3h/TJ2i9GrFToI/ap/Hd3v0K1gN2hvd3svsk/G1hdRL5Y1+/iZjHelDwkZawOLytrfsQc0skBOUC7+OotYCPspsBAlSF9yFM1sNK1XStbBD9vBQGG+jCDHzpZ+UUn6kmfc8j3WeE8BFrdsTOLfVluJsmWP7utEHK3z1roBAhbeWcywulossPb5U4QP3ga99s2LTm45RHqKqWdwnL2f6jo64evb2pgZb758ckN6tczPFg4+/mL1EcNxsP0IgedybyC3c+HX3zD1dSXDSCvC1ZY2oP4v6Hzge0u17XO44ETje9Rxez1pagCEJIkgM6ewT9U4xPfJd9fAujMA7Kp8YSqNLx2p1aBnMTcgO3s8XsxqaxpwG/z5rJ0I+yuSxya2iylCi5U3vG4UzuY16aAlybGfAM+ht+GZAQ86Vjb2Rheo0NdySKbzch/b8GKXc/CNbd9OggFL4Dr7wn268VValGg/Cp0Cl1ki+Dr5j2YC/XuA1b0k8WZYBHwydq2Rlk14FtVMSitJvxx5lrHNv/ZlXBP5uB+V0qWPXWE9oiiFpL4OQFigFEu4qoq80fvEu8TUd7syOu5FHUwqjwSWntMuEJIdUnVWNbJnv0zdp3qYGIRt+ZG9C0rqdnKCaiJLTHAn5k+DGXjKH7XyuvuPh5+tEQUiqmwgwe9dQHAhV2FH+LOyzfY5/LLA7Qy0jZBraFKjvPbrpBbpz1n8+lMeoef9UiJSSseb0awrjtVHiycN9J8C4gBWsCizO0M07+nE90zR1Izp43Kd2MCztn0fJ96+fBJlTjrROXApeYTvL4T4uZNufuI/PazuctbnCuHsYhI++U914PHXZtLTFVh3I412II66wLVch7n2WqvxZGDwxcovCexb6696bKdRD7qbnIrv+Xp3YYsr2Xdf8LLsgME9q+stLg7g7mNjpYxOzku67QVgxdiOVxE7yxEbHy8upUbF6MjWPppoURv6lI0UO18/mxKrC7STVJtM2Ep/CENgW9jw/oheeQLL/jrwS8f31PyEu+nQs1ed5LD5pdUvdCuE9uotv7F8hg/tAYogBG8J3udATU4xdyoHRu30YUXdkyY9VwfpoaTPCA/UetreJ5z32rNzjjvRZKAeE4z7/vglczQXS5y6Nk7k9yoxePY/XUzFc+nAqrR9c2kQDFIQpJca3+4ymftRbJfQyaPWVoGcYXx89vgdCyB1zbAfp13NiyZz/iyBgGRAxvhWTeXC+o83lN6anmRp5ly358r1SnfZIxvLz8MXIN1QguVU2XqIeYo1GSQC1Jzw3CgUx6BydTlYPAeyqu4WZVHiSgH2GmSSuiXumzLFhYMS9KK/35jOIKi+7intkz+qg6gUBWXPX5OopsfNjQu3BwunUaj7tcb0iGZzx00zCcaREaQ/3n5PyrVRFMTMfpwDION1nHRC+xKz7jGi5mMXEDDfQIeEz8wiCeGRL0+WY89sysPpzTkeerJ11l43O2p7zikx7eMH4OV6omaNCf4SEsf+w+XcCJzo53QfXj19Hu7zZsd1278GSdxjWPVcqJ9jRBLAQq09vza8V7LfHOaoggnUtODZILvJ50nmozvVJmfgmnTWSb8Fci8QsBmEzD5T5u+4VPSwTSvuNZDDDvOdns9p7D4JMeIcqVnb8LwqUSMbsmc+69OM237Pu7/ZoACaLMfLEndFNezB665+scqNR0xHTmYCwCaXSFA4QDKglWD8Ewc/UGsuSh9gHpE3tkGVqek+LRm/GjgJ2v3XosKeB9aZufRthn5i2UDlRbMfmt4rhZez3pps6FH1lbONUdFMgcoDkiaTNb7WLHGJKAGgEPXa4sxg7eMbZaGr7zOeKUo1xdIAOJIhUv3yQyn7is8e3Ua70lOqokrWM6fBPKdSxKw7wxRo4PIFUAhbFC9JlTahg++ZTDwe8scsOdH2Mb3e/6JTaqGuMDPdKLfB1HTpiRcXlraF03pFUMqdjHVVd2ZKlW1UWb55xgTep/dduJ8PxR6MPtJfFjuQJOWe2BbCZKdfdg33+G/enFKUu4YM1l1QlCJoCjgisCq8gRBP7JI95TL9QQ48/5U8HIbIp4TbbppnMR/GboIl337l9aBH6ciVpR4d4hf2Oy+Pgx6lt6BWkoQbh2/0g/peGr0ejY+n6xc19Qra3Mk5drL6WlJQbDZ7t36adX8IJ5hw0dq//YCqRTcoKtafmyog6eRfDDAFIB+PWAr3ue2vRhte5A5bcwyN+OeFr4QXKXYXd8UO2DvMwsb/xLbb+CMKanDe/I7Mbqzv8Nsv/Ox0vD5dmCMKLEZxnr9cv6fTsKZ+DXUUdNT0WNCt6pfcL27CcXdGmHrIzsxuLs7vPzvlKLvQDMftoicdOrQqj9P55opZ/mRAHEkvjzDQLwgoMgZXcB1xeFCUVHrby/kPrYENh15emOnk4pBCJbkt7nlo6EnHgnJ/jqN/VnrlVvN51wGlgCSqtq5SqrPG8uLxCZHiB5OHO9C/vABpwpWHtoey1dsB7/2IS1CXsb6/SLPizlfPmXahIh7UDmgm860zCJJ9+ZwxejMDqKMWlRqE35EMNvOMYwaoMli2hOz19pdeFOQlfw+cmOgCYpyMF6c9ddxe5rv5LkAaTpq+PfB5E28wSet7JBYIVOf18mPaLGqMgEmQPsAhUB30wV+i98TIhtTLOzjXn4XEHZs2ihu0n7kXUIt7a5PmJvp7NQOriFYsPeJr7eQeU4jndagLPt608riw6x17LE61mFHTccJSDx4IJcAiD1bPWvin/mCPAkvQk+wHhm1P7Jw8q4eAjAw6YF26gtgyOvWIe13fiRxFmy/QfCMh8JZhop6bEXzlCWnKQXUUwVnd81WbPFBZBxObnYcKDXrIGVfkH0ZtQJiB3kf/uE8YKoM/6xxTR3sU8TGk5KJz/Q6kP7Ppz5a7ZW8vBGaMSk1bDH1tdOMfJkwgSOQSsAxBcd7bhWLmzVhseSkxX29b3UPBDdEwjWkgEXgeRH7XXa6ean01+Zher0wpft93jCmxvgVnql06Ybhf0aJagavGRWmoh60sNiDwVAKv+ipYYxR3E8oa2gJDVIWDhgbL+hDjoh1o/FPwQSyEpX+l7X6OTOofCPy6I3OH4gxnv7CAwMqWR4P/xoIlzLV/5//uqyb5AIOAbnmfYSjwF4jnFg5yfvMq5LuvMZGsx38jnb/wdjWLGyRI/Uo2Qjth2ZXwMkznGgTzR3zaFf8+pBLBIQuImdAXg7XV8MA6VMyjRqLPB2RM2smh5bS8bYtPmkZyyzig0H46ef5ov5kgJZzbfDGal9MzFJxlyl/Z07MIxWJU6qRIgsVNVoKLvQNpbCOT9xE7V8YIDSF/f/ow80fA1c6X1ovxKom7pvNidtr6Jg/38OBRN5fq8v3XN2dPUSedn3s7LHx20WeeWBsoSN6YOxTArird/gV2n5Dp57f1zg2qDPl+IllKn+UTZyNq2OZKBKAQmMNlitgZOn5+06szuU260spfB8ClWd3YBAYaZNNUurPccmh6BcnlasjNYIC2AjpS4mFLlhnIq56h7XEiy/4eZTR3Xvm3GAedxnsMVobgdlB/ifJhmrZwHVv8eQQHIeigNmsEuZzn4HyHedoLUhKf8zW2oJXpw87I3dUn+HAnU/DOG0/94jpb8fq9qKvv+mRVM51mvse0fOIoJ5/TCqyVM8WUH1zJx13pFKHE6bcJT7XjRm0kddqg9IF4fn6gnvNV2buWO+/suW1mou4j17LwiifuVHkJo9SCBJu9xbS/ZEoDttotUJFhkzWgBYMn4kVd+nYxJOFB9gAWdcfCBni51b8kicJ8nDZeMcris3xUKrYeNOqAdPvJoBT8AlbpPUS6yDHke7zSa0IfLejK8bbVWGJ7/0KhiyvfpLyYa7DtJTPWTtqAFDwCfh14gqGM7c8/gN0ZIR/O3oGxgzrsUBLvJcnP7St03AMAi8acl/C3232jUvfgA1zuea7mDuqHwM2b82Jecas1xhEhYxqyln/qEh031zvY3MMGaJPST29Ma26hNYHX5UMskUT+kI2BwwPkVImR7AQmNMofFfQrGEUGYZj98zFv8u5tEt9F3vHBOsn4PSClFCCG3ShYjg4Lq8nFwkq6lR8ayX2FHXahP+QSp3/8umy7reuqDqNHAYxZyzOxNiNyITehPPpdZlWNCHEstpwBjkTY+aIEK1MUaeAjKObXhAlk2J5Ze84Z9jt+pOT5zm0oAW42zukExPdcL412ZoToXz9AvhlkHPjTP+mhjswIhuvrkEnNlZ/ZA8u+UebqiLCs4tYOQbP2auSwQvgJ3lFOVLZhHGu8cPDd5ZfjwqAANcpDZ783hYbC1leWJkS6stv+Giaep/aqXfbRt4+16YGXaoUV7ZstsAEKkSwNzNg3JeAPpCtR6m11OUc9sWjJ+aenUvIafbRlb2NWV45TUrr1Wh/i24bmadPLYaDDprMZZZC6LHNceQM1gczAgUhKN4L1ml7zgV/M73zSTOdVtsdOjyrM/KiVn4s7BrQrMpwNOaoIBRi0/ZD4ZUkCRwPil9CV7D5x4FXC2ytrgxRhpEvw5OcAQFr8s/Fc7l5Zy8HOh7Ojlcu4z2UuQiPmzUo0Z36Dotqrrap4oW2i5Oty4p2rHT3tmwAQexayw9Vp+RWRoxKdyLl3Gz8kBUpLkyeU+EE2Du327SXUl/PquGZ7GSMOcAqyBL9SX3UtwaIkHA5EHXcgyFTcNxpekAWITfmTKign5P02ciH0VdvMgTUwoEvwEVl0rKkRWoBjN9j9wdgnpJdde79qmnEkVqk8H1JhZb9tYZlCJW8aW6cVb4H+7Ghy6DHmSp9nFh/Y+qT9NgchkILZW7om0v6QGYREsvTwarlrAt/XWFGjhvX0Z1vHr/RcsKUdlJNau1I/FbSpGregTGO1G/tMtTr5pf9pzyHtRaPVX51ZPEtMl3HpeE1w+zfNEM54GFNFFjaFgn0cnpsnKJFPjjL3WHPYsqooctoAsVmhX/CY+ccJc/P6vkgv8tmGbQp4w4UVmivagOy37uaVaRWzWC+PPq9Dl7BFfalcpmGHlOa0MuSM9LxAPbF2xx+yWR0XhBruyOMWVbm/f/oO1DgBwxPa5dsSibKVeV3F5BiabYN+s3ydnmCPIkcYRyC9klHlVMpWMu3Dxu21tGg0jb7MQRTNv9n8tOeaSki1Qgd5eN0Y9UgrIcRm1K8+30Glwd3cUdao/EBADyg/gzAntw1itemvrmIda2tK6zNWZ/EP+QDtDBbWJbCTLgKJJoLb2n0Zwvvv7hX1CmaGfZE1SD7U1NC31J2mdoTMXY4Z8gG3TMaFD64YYP38+LWDHKyuGwhUh/UOr2kyI3vCN25aDIBS39YzB+xismDMbOLnA0oItu78cXxWJVPCywCpsvqMjbDhemyT+BW+IsSW85moZyBJEJz4mlfmZMtJguEsxIDSCar6PrF/o4SsXMfH4Q6PLiidnCeCJ1lRubpxZ4TtEMrtgbpK6ej4tMxW39KhhCg03g/7SURu5fYZblRasSjutLUQPfBerOgqUMFmPnaUpd52FFQKxwPv4JBm0cCB2+dkrosaDIdVrMjhL54SPb+YUGxrgzKXGCd555S3GW+ExGdmOKYmO5u4bACy98EotIBnTRKVLzOWO4c7NNYQZp6eXBl369DL7YHHdOeeNl8biGZeE/yK6Lio/IkcqMjTxOC+ZZ3lbEbR2mCIKw+8JsOeAFPtShlO6JKD1j/vmpme5nTqw6i1bohAISSJ97o7612S5zHzizPP7depaog/6v4dHRlXOqvU6A01xLdSvTmFGA2hHrlP419lCDNghsP0rxog0FL8s/BijsR1RtfhSnRKsA9YQHzZtgPOGj3JcJO6P+90kK9NBUoMLNkWZRSS2EMDFL5acpbIhqLSUOyBwWOllaMRQIuoSx9/dmnRA0DfEhjy/R0IvIyYc83EkSLsffiE18XfbCA+J5VuoAafEn1PHqY55eXrwedzwtIeAtzFdbNaGn+wh71WE7H7EBrJBUzHa1kDVNEZM21cn/sUyXIoxxLvCLkou861YpP+aIjD37SqhWZGeNVOv27lpPlZ8vTi13rmGnWhR3jnH7iHMaf+qBX7tcOpSmkn7H/ucdHK4TlIeG3VTu9L3Y6gtvjppg0nu9dg+I+C2GmUuED10b91HwRyCbAnLYwy4eHaei3DIlVrXjm1PvM10bx8Q5blZW8xfb+Ka3I01rl/PPdqS1X0PEaS5cw3LRHpRjKdbRQRnxr+aLrvxAsoG2lEqGbN4+OEC5nf/jYJBoPBB4Yolfq2Nm6unfifARdjzlQ57ZjYHbUsxkzUlenux1JV1mEmsHuQPAC4IkBlF+Qv/9Jh+YrAK7yqkIPrZpEdhzujZRH5n89yjPW0DoM6us2u+xyPUtfoSwuGDzVCRKWwPIVXnIHbWaUvvCfJVlvu5gx9AetHPzA/zAqEKYtwvhgeIpVB0at6VKnRHFg2ZHa6vh/FQKOPaz1OpmyiZopXdHFbQIocm7ED1RiMGwtFIOQlqtpSxSQiLirDHsozDvRkkd6vY0n6m1Q8sVfwQPwlM7ZfrrMn9dWrgm0TIebhb0L6n9C37HCeWPLy3GcOKfHL2KCe+rbDX2CYZ86igJAOF9v8kVIJ3lkqP2Q9XYlyq49vHZEaeRbVEc/ASBIiylPm+qg8k6iZhNGajorN0a69oovKd4MwrkFCz0j58XB5+DY4rejp9upKAz5RFSFuWySZ/ZrK9NrFz2euI7kh1d6rqslhGqawSgYGCoDF6UM8cwXglnNNW9e/LFgtzM+Vd4tlZ6EMvMSlytL6IMwITcRvLjfvvCVB3foj/nlb2xrz9CUSlq4rqJwlotGyOI23+KSbPv9pqUn4tPke7C5k542jUjr0G+Q0vAWDm39upnQg6w+UOv9ONLSkN3L5ix/0gb76+e/5vBS7TgKzM9fIZ6DElQcexqx/x+fVYzXojkKIFnawZMo2lYnL1Re+sIFJxYDiBDlzwJse7Flu1PfCqjnzDFkJ+nDwKppjZFDzg92gurByb2wnfL0qfgbuM4rhr+frz4G/fXYkFJSKgWyiffc188oeaN0z/AXFHVQ65GgvGaYwywYyO7p8biMrt+irgc2tZoht/tXiftXtuPYSH7vshQK9DMEY55yQwFKLkRwnTR84f0US9xLhujZfQzquRF2g5VWTv2Zd/BzL52xDtZzL/QOZqTD3hbLtvJ02KlYg5mCqBIWNBc/iLbj3X/4SQsBCxYlbmKWxdrSwy7f5CpPzPseGLx0jyCdkMZYW5TZQCSa0JXZ9HwKS9LTaGB/ah7eM936rTX2En8fgaadSXUtWqx/9zMEOzTQ3COHim4XDmPR1FJ/X9rj+Al2T/Sj+YnksM6tZ/LspouIyKrwmj3eoZf8ZWsZtLB/nBBr27vPsuSt8YZPJazvEXmLCxnGObXXDXI0Wp6Q1I4Iw2XrNV3cKU3r+ihcWO1uW54/E6uiTFUBk9oXxapZ8QP96HtRbONLy/BIr03r58kGi3tnQc8ddyW+UvHHT3waykrpDvrlwbfEFbmIQs1rR18e/qOHMb+qIIcTmr0TWMSfqYCcr/9QmW71dE2NGNIBhw3hDJ+xRnucn68Ftx92bpKbvDlnZjL9b4mpsA1b9MFdphDuADOWkQdGrebXiMjPmQZkI5aktdpgh6QPZdGiG4f/hVg2uDQKfNBKsJ4KNk4z4Gv5S3TUpeScn4DeL+DS8h7Rykao2sjm/tSCRlw9T6AFbGlKXxzGxxRkd4BP285rUNf17T8Pe0FZuW7hMKchH/Sbnj0Rpsw2ehFwOmoGFD+qxBL6FYKMQ27FcwvCK9pdAb/9Zl2b7U40CFZQWgyfsxpVHxRWXyZiAqD4T8XSMFMJ6W96Z07S6YCiZHh101tLnOab1KYr12R/StpRIo20bVLstUkIFWTnEIp+UyepN5+XmIHJGzHcmdl+TYf5Oqj9pMZp/393BLxRJ4VvQv0RaAKpfaWyKvD9Vrvo2BfW2GLqX2sQrcMdavAz4koDZjUeTKCbqSRUS6EeivwxKhnQ58gnrfcebSXMc7EgifHSZHbZMme/K10XQv4ToZCHmKwKdViV23AbTrstA0jqABGMkTaXZ5Mas6wYVw6aXZ9ViW8naqTL2WBasVzsXMreLwKVGMTGSC4khbf48JP7skgXxz9WZR397R253oUGNJPNBl1VzxKUk2KAtCVzOXshACxqhz0Y7V+rmUx4Cbwpf2uXZuV+DcgHLORxf65NhyfbHHYB70b6N9OelKGAOPjlcvnMPmDySBFpLCdsvB+o7zMEkhxVaL1MyIlKNaCpfWMPScnVaHEHc4GJpqV/RZWGgtYGFm2AOP3Axl6saZtN6F+TmWhzJMSR2dcELM503ced3zkJU+IGVet2Ff9jUfr/E+wiOXc4RcBCLrNICk/e4QsiKPLxy5Rbp0/hIm//D17A0msDysuVngEcAe+Hv7wmKsOLwi0frW2CESjZu4TYoRDCBW7DHxVqd1IFOnijdlwtKyGdfjkDaqoekNhvank+ZIeY5rJ63tH0gaLagf86fAcFZ2C4LLOd1LtsCfsvM0+ix2VVrytB6uqz+/sBmTh7upnAnbwuFCXa0AHKg08jWysHQd0ZorNpQq4gNffd6Dj6v5B1R2We1TR+ks1LfMp/cpSIWFXtAx+2cW+WbGiN+Z0B4GhsrD/qVLmxyrfMZSt30Fl2qJJeueWJI80tEdEiZAHn4bFZry1BX22krqbiLbrzb/vyAZaYvSTRbgIunlrQio9yvfgEimXYhv6DMEsdf6aNq4E2BqME1BNTjGwSGPenTEyUxztASSc5zVac/h/v4CcHn5YGrP4ROovVmvg8sQGCVamlOajwnZmXV6DaPUW8VIds8th6/GxX6j9scf6lu5Mg5xp4SpIR+VnwxW7wq+H9dAhqU2UlWqVaDp7TwszB2dhRdHllHz4wx6z5BJe5zei7xuB+/DL27vv/sLgYbxZU/5QeBQX+eGC1Zx7VMLck9gXzjHBXxAWg0pExcVuhpgJH8hC2G63kXRzqXCFGwzCzy+vs0gXTu+SNiKa5yoLRqiMTiXSOsvK6/KIoXSEC9YZ6fB3E/CXyLF4d8WFr7krL2OtumCl1vL632UDxLAVvFECqDKe6WFb6e7xvWqwIwwa/ypuk5/Di0lmrAM+5hqJpA05l8epz/3TEdo/f16shrvPvcKjyZGomHCPISUpTgR9shtDfe3jVk8zqSWcwvcxr0aX9eieR4cSYq24kyXQVUT7jjCtjKP2FIc71OBgrvTOrL/M7OC2CYNnlv7ho1LheG6hCS/tJq1r2U5rXbzyOSAexN6n21bymKcyqTSK712U9yI/1VDKNX23XdtL5JgVuBmwNl57kupYOlIASmCrTNUPaok2N2dBXs/qqWPnN6JhzBlyfLlZKJtx/94Q3gFZJnN89HgtOsoV3FPXwS+2SsEsdPrRcFG/ZqCrywnOWX77zYzKQxrmB8PiZ2V0Wd0PXKnQkD3pdtEhFNNy201Qt7PJYHgrqxFuCP2ds6jKepOUS0c1G99WkMmq8Sb7qdoeIn8c8IEY+XqpCPMKWsK6+9U06izBo3XYAef/X069MUUF8bC3xJqcnzOQX1ZurM5eVRrLWh90s7vJJtFIdkfdV0MrEp12ypX8ZNgiSPbbTEux8+5YGxZeQ+fmsHJpMpom/q2H+sTpcYilKu9zLTJfXk5DiFqOf00b8OdRM5j8wyZ0CtibowSFT21MBEs4xUPJ38z2VsH8eJGQRvTYsovmI+UUAMpDK5yMqk3t5n9D2xMP9sx/PaZ7Gf1XEAG+Bq0C8KvreN2q5lchVK21lY5rAxeBv2JeHqZvLco/9s4gyrSPPfiIouFdX6gnmHiMpJYXTtt5xd1yBGtYMxAUbn8fPMaK0+w8qB6TSNUG3O8YIrtIlm+yU+muc6tN3X1jCWf5YycbYYRdEtEDS0RRTd6POdvtytGPS09q1NGUqMmkLwRukR6b6WGvP2LW4owD5bFiq6IqlwlcTldZa+kyiXIY1nnj8QKec1x61LYti2Z7x3p5m9it5mvtrSQj3nrAEhbIm+XhkQOyvz9c9N9I4EVxpHgZ931n/bMydprhXhh5D4nsGy1kKkbGDn7Eg3zZwWMLM//fwOYTw4dj5YoUIRefL5OCWMJZq4TJp+eBxsLwxjTs35ZUzqSh09t3dTt/XzzePfK87Hef/zzhzlDrH5ChVoeSf38OocxcZb9duMiu6buvQJFY48uvcm1okCG1f/rGHZHmGYqt/OxehRNgDPMPDiP2s5LLJMeczX1Cs09DhdKsEDFj8BbxKyqm/BJ7HZbhdB/c01/j7542ahojsPnAMBtRpKxHHSUih7+ocQhXPzC23TZNLx5sb0S42BBm2iRA43q2z6bScwO+Li/Ygn4E7V8Taso+Gvi1Llj9yshm9eAWJYmLTsmvPitwcVSuyM355o0CgT/7yp4iqt8zrHV8LgNs8mYGXZj9jq0b7YT2W5EEV2SfHHRuvwJCeU/5u299iRnFmyhJ/mAv+/mAYZQbmk1lpzR621jqcfen63gZ7p2XYBVRkVGZkk3c2OnWNubg5Lo566I8hJjdwZe3A0Ks5flVguMxWyTwGkj66AbPC3+bJLE/g78Vg/LOh13e2mSJVB9rT3JdOqFhcaeNZhtiKBgQssmPr61nbjSWZHNxlRTzf6GBi63yLRaZBw6qMMecBbSu83N0FKh7AOkPtcweKLcqJ+6rb378B8CcYm+NSFzopQzKeAMvCkEDX+1JTZsAvsvEouV/1So2dys0Pp61+fUmUxGpIMuFtlx+Dpsnv5exnGOFsjhZB0TYKxGKW1oE/P+yCV4E2JYJDGiX4y3agRbKdiy/B56Tj034wTfnse2w7WPcRx3c68bIqB+EeRP0/F/a2LfKGVPIliwhlyZ9BXMKg3Lr2hWyunkjSywQyrE8giGgY0gg+/o1Vi32j0QaTHGXdC5paxsB4FC7mps2uwLtGPt4cEdeFwCdC29ZFGaUaELJxSMKI8A6JD3dMRI0X9OdHLARvrULAuFZuFn01GFq0lWkgPzwMgqvHvxIKnWBDh62zPFzQFIp57uQ4ZPf/Whs97o4huR4QlhYZvi42AgU5f2URXVAXzqo6rpil0cpNs89Xl81rGGvCj0/vrbMOrjQMeoMQOsLT8NQ2GA/wHDbgAJNHK78wyIXR4Ko4TxBAW6BeVbIXKBoLpFfcluHdyNfmgzmsN9sDRs/PLbbyK8i/519llS0+UJLqAtXeDF4/n1dPzw6xNBnqgMDT4kQV76MSx6MnLo9DA0wYHFQwejwG6v0kjJhpUtPLo728xBn1tATIDkHgBmQG1f7YZcwrDVC2+UPZnxJt9ywso/KJsPNRWiAKerLpB1hunc7ZEfTCB5JiLUW5RAPj3NCf77Cxw/XVAtsuksoXueb17baJtdlh5yaPUvMZ6PQ1/oAOv/35z4u4whfnHpfAqk/DMN29I+6P0ES0X7W1/79JznxC2soQPiMigFwcJXh7KgYTPOdcd0aKeubzR4R1J+t4ybum+iSIW0l8Ti1bWY7cLedl8dkXPUqx21hcP27R5MV/hO6gUuftBUdLCIS6SkNRiB110XpMG6eZRzF1f/jTi8h1wgGzTYqJ3jZYvsSrN4n5OPZ4tRdENPPJ+kFaQhvL0ycyq6SRqVhlkgyIXkhutP1NTZLFYJD2Fn8CYJphcFeRlpEunn/RVf39/e1eBZKyI8ES8C/Feihcg+/5FHGV07Kz8qtxQCpUMosfv5/7mXgZ7a/YsynrkjW6ZbQdp6t8KlSPVD9sHJiKNONFIZokfjJHP5LV2K/B5voJC1OkGBjDv9czXX6jNvXK2WFOgdDkg5RDFSB408MDlEKPAOBm+k0le7W8+Ud2P9RbskngHIShwscEARy98+jwaD8EbEnPSN9AXAZTL5VMMb+AN4A4fykBwrcoRfYv6QoPJK1Y36NTrE+5xM+bRHdMoWz6axj+FtTvgfdjvklHoVympuFgOiak3lTjt3HgpjgoOjIGadT6kE6QG9t2AAmiQNnt8BFaDNPRfeHw1TP8gEe5dTaIdq50iNHjoNB6dkBcLnju2cXivygY/zxJW6Ks1BiG7FdeRusHRjofNLBUFcMmUmBX1wdekuB4OIp207X2k/50dByjwzCNy0LZKKgH9nKAfiOo3gwk4/rDCtfe9k6hQFzfIF+T5yWzYrFaBQTH8UaVNLEkC1mFFoSC/ZDvOXzN/1O2ZyU7VuaY1RjzAxLbpszcqiN+/4b2W7C/1dMVSyzbWPEiuczVu787x/nsFmQ8AQRYTBG4baH60fEfqd5YmWG7DmOT7l3nCer2Pj9r52JwvPtzrv6j5ajPFwlofSjOG5dvobXTKM4C1d4R3+8S1UbRgTBHDWJn7oKDSrB9mKEE4ArGTLv+iWZwbr8+ymN8gvyFZV6+5Y9RpUWYfjZjgeyaLQ4xxbHeYXULCtlFbEKT5VgwjIBn02fZkNPLsWX6ofXQ6ny17JmtqBL8IBGDr7qk9qR4H1NPxEm8etNiytbLkiX7/7SrRFPGWCKRB3+jEOy7ANqjB+3AoxYdJ2pNdUoOx7KWbZR2i4+yP1areBqjPYdmH/NXMi4V12771dnUrrWvbkRGu7+hVDGaAxallzlzbf1Dv2vmS65shdeVMlkLRa2j5iFU/d0Gymef8HNaqo331QfnB64uQiiwvtuEMXLXLETG9itXEVyXGs0PLUrik5EpcnsT3RSkhxr9mXdHpdkQhZM0Ru2h9hmPZ70esdHYIu5DtdWwfgpVdmIn0fhjfgOnAZomptNQJ8in+3cUDeXEpjF7LFwBiEAULwVNvI15wO4jCiYwLBg/XbDdZUmbI/uFXse8OYgZyQfQOLXmzKpTc2uP+V51uJEE3L/crrnvtg/YiXRam0de1I6aDv9iIwVPQrUAnB+qSpOfShs9SqwgEitDwX+EbKD/cn7NRt8MhwzeS36/s/BLt4R1Hq3ZD4U6ZJ81y+vI85YvNK3dc/Tsj3xNyP+jdrOivOZUkuDq/zJyrbcvVsZgw43fsQn+RFxiK7bdc5YHIeS8i3mbg0ofyrfTDZW0b9J/gqaNEyC+aRqY6loO+yuiFfzw7+xlk9Wso2xLT+uMYX2X7WOLzAkMNGYoa+sH95YVx89nc8EJq/lVEDyu+C/GezHsgi1OnK5YFqEhp69+Ce8W1iosZVVdp3NIXHKDtBRzsxqVZLdYqOKhwPfpH/hAfUi1VItusl6kAnSN7yKMNrihrRjgn1I8/iYSqvbk8ZRQtMaz4YCQhioDCKEf4QSlAz1JSKuFD+KgjRNZM8l31H1smD2JGEk6CDoXZy2bKEFjH+qWBLZzoN8YJFrq+/gukpAaWqP5owV38tQItcUCfoqQENTqd+bfNjaxB0Ad3eSn+iGeAfrX337Jw1wCywIv8Fy0AwRDB9yaA1ykoJAC3gIizfY4maYPvmyHelGCrEP/POkkV/q3VoFmKA+MOwWdaMGWgCotX/xq6lBYTpBYdgIxLlv4JoeNE889UxO18gM0/fOtKTFTaGAMt30P5BB+RsX3opyvOlMCRmkPqkSMLrjKbFvzJzuXnMh4YS3sZk/xIQWLKtd858UES9USEk2SGv6QpXhG7+XFCbMPHsvJPmvZ0wnw/gF34ToDBesEbPHVJ2C525ldZ6GjX8b/zMkBRCm9xRIpmPL//VcIbplj8TkBL76hJYMAsaUL/LGA0WRP09+Kpv+p70yDCkGKBnpifqqqyA8xIuSplQoPMe4Caf6nnb4GrbPFbyb6pqPWutd/hl5ch1TxFV2Dij+9jexRzV03CPTi6n48hfgqfRCoQ1Wpqp66HDkiV+3VNrdqhgwK22wLxfauzLIqhnhy88T3PGf/ksrrEwlf1FxSG/pnFmGFMgdFOftojlHQIW9p+tZb94Q1FgF8D6clOwvAhldlc9wEGvzJ8cu5ZEfWXAN3niBMZcJ/xJHwur14pFOCXWXC+lTXwyJOuUarf6OVFobFiHSevoF6PM4gvHwZ8XbYigv/pNPwuvy2NhnICqghoUMIaefkn8Bg7a36qliEulwxIQtpmyVqNCinhSZLAAbYaTz4vYeyA0VrHzR+2s4xlQ6K8XHzho1nvbgFW68Lg4/NHA1H3bxofrER6gy7YnKkDmgqo3MHv82OnfNR4Df8CEiz8po90JNFH4Z+/7YPwFQ0lILZf2omLF7OHqHBIt8SKZeUnuTUPbix8bHH/iuBot+KlpLjCp1nhgRDqdsXH/FXeYegEU5e5+CHG/B67Ip02h+To/Eujydt6ffXiNa9++a6k8RcVHRT1GpMF3SSH/XRp4roQbCkyTD0fBsNhUD2GleYYlXr1tzfOSpHikAfRnMKR5ZkIg7wQhNXR9HkZWBzDQ0FeOUhxqK9CtG1DCiou3+TcFnRdC9HgDcbZX/MQxrsONOTjUFuVw72OQ0UrAvSH57cy8ARnovpGJtneogaNA1vqpqPro5EqH4Th8vF6RRNFqPBua9HcdFgTOzAbqliHDoPYoB3cZAYqmIl9f8cE7GaaGvi7U7mCV3+rZ6d56g8Wb7E7GP07y//sWLKnIy5ntvzApg3ABhjV5X/d1+B5Gda+WPGyw0ntfTp4udCDUoHHF6B1Ka3Z5NxRz54LeHcAhlzMLZfHeuzrLsxK5/liQND5EIoPrlt/1X1d8HadTbXUcmCn5lfYC1soK8W/e+QnMe3liAsr/61lzvJPzhUdcXdinECLB31R/iBrUGgHADH/K6fvsghcs0PKRJkqLCAqD7Gj1TzNRIOCBF8hApmjyUH/nfDlDf3SrsQIowby5U7aWEFJDrfcYG5WNZXwxJa/YwpAnhqtrEh71YPyu1XhB3AADQ6z+REjW3W8NwqTD2zzrws5XuHUV4ilvsVEGVh545UL6ftkoygLsaW1XZFpzh+NpnmwLKfVRRSisuLdQ+OCPQlBPy1od3KhZhD9LT8hXwDkQ28f1gy8+Os8wWFND7FzOOVzoG57sYqefJtLFKtTopzYXzZADD891gyzBHocqpXEUlRcfUZKa/aP+ocVSgSy0IuJUIKjupl7S5SO+a5+TrLvURvLoF+MWAftbqJScfRjeX7SYn5I/wEkcN973qj/GiO67iciNufn2mVEOw0J88rmdfdF/o1Tr06Bset4ioqJ7PmkZ0beBA1HpTVn73MKH/H9LhE001iadnE6rTwIBkEUN3Gck+4Fwk2M8AauHxVNTcmlnl/IBjLzuT/mzJNu0QmL7aHsYZx/dBbeRSX6kK2uCuF+WVGHZc7Ltn6fTSUZuzr/NheCUKrlZfqpskosyNjWFsn9+Ux9p5VHCZH54ucWqQv/GzyPnnvXI87tRkWZepncyrwWw1CkBqJcZsmr0S3SttzcuZ96OnctFmv5kd9auYliCQcsQRsgd09UKtqqyK52ryCOATtXCqd3WscC8bxfm7UvITuP3YWRSapZzuVj1NPyWSlRBRXFinoaH1goTlRaidLMp+DzsSilYjpiJxnUVC0k4vjob+EAn2EZ828Yxo/ZFEfPgMRehRuVKODTdZXc6YP7HRbuC+f4bibG7wZeJk6Kn9im5XgA1OOg5flmErb9USnNVewVB14k1Cip3l8RMaQD4PArZagAbTVmEJ6UNzR4Vl5e+0nCZDRNvsuTrzWCyPzBwFogLowFwKJBj2hz/trHfgdk/SU5lQ/X7pAjYN7tn5IUgn3+1izYwNWUut9K2HvtXqCCeBax+7PKoae3yJe+hTekj9RxqxRd64tOZxUQDu7z0dQuW8ktYlTfdV/Si3vWeo9x7+5eof0ZJy+26ov/nV1Y7fr9qBq6wtrS38b7DNhQaCcJ4IDmWfe7elYVUnHX3HSz1A/jp4MmkSg8D17ExJ9L+nSS40htJUsTVcgcEysXT0SsrGb1lS68Ix1Nu2MdMpVem9HFpfNKGT3PtZD7Ss/ZbU3+o+QRdP4W38IPnVSD4yvV4bx4gylgFRUrYLxHJWMoM+uJcQ19p+XT5jVRNEELwJlkqvpR1hOLPnBwSnQUipWVK7VQTe8MlnVfnxaHyJGFiH8ZDLRhUoiH+x3DXrvuyTEbGVyAthtvvE3uHteBp8QKOZ+nN8rjD80ZumDaFfZTSEKG1ytrks+QCUbcyheai5HmutrC5itYis3DY11DW7ovdCxfxYNq8chdUG7rhpg5D6xIJj7hWBDua3/l0qv1F26+z1lWGd/DWrbi+9BP2drBOp8MFoqODDujlqvSEP2j6ELW09weRZgQZFwva/C02eitbp3EdRzH4K/NJp4duvuXO26AwweqWfKr0eUn3X2TffVwtf9B4ChlbkqOfuYstUfKHiHfOun47IvGOO+JQ15FnuLD+attw1ABkEey1bR1g4I4tNYnSIcBonYlSMWJYkPRKcvIMUHjV+f8/D5T5IV/AIcVVnOuQf8RWrrFtHG/h3ZcL9CZmV/YX2qGK5VM5v1nZt5DTAfjtC3GXBSIri7H8wG7+ocUqbwl9SuNpMiXRQ99tZCEmWivgb7Ri4zCh+szBpZ4GYtsvsVqsIxNk69v6Q9ekFYlkY+S8A4drm8MJZIX76A0L9y/AveYTEjQMYUPwCo5oH2AjqB3+oZs0vP1qrPdd1hmbJW+4stWQPoLBusX/EsEfhSPkFN2cTJCLa5PQx0dm86LjVxd07BXfAb8hofbBuQoZpt+3G+5KSeq6fgxe4b+BT94pHOP4r8E75m8KPF1AB/1ZYJQLy2PQpIR4jKb/pLKKtUnq/Srmtqqiiqeg1bo2loHB4gPeo9sWRG+Qc+cQmPqP7nKy/2ZPjjFFJUoKI6EM6pDDPq52HaIzm88LQk9z73K9PfQOnz49s0bA4MASNBdNhrPbFwF9313h0qFqvGMtKL08nCZ9ILDvxNGsfEYrSj3atIWTDZeMI7TnItyxH97G6vsVBAQylt5susD6eCsGf2CWTVn82KPVMPEhV6g30idiChFL7u7qIOzxEwJPD8f7F1BgR0fayoQvZ2h2bbMN1SpISVDNqjqZhhW1u8zd/x6wOGXpy3L8K2F0D79ZYYkozuzlzUz6PuAtXt/wFJ/rS5L0TWn7QUANKeAMS4Y2xhCiiVRMsjgQ+l9X+i01YjCftbd9HKNZSsdVMXmIg1u0avZRqFCRpNJpKw2dXHgNMojQZkHxFqadOms8Re8syvmT9Zndml1kAZKk8ZsVmxfCH+6txjVyrOgAPBOpSWzjsmV1fxZIz5R6ap0jD58n7uODnLgyQC4LDfEGuZQnLrLJCR1FORIYDXGX0HKn8bmynM5WdYUcZH/OtzTB1Lk4csnTnvMmpxKIXqe4+mYHRZj5uAdnsYV+/217dcyqjuyJ3mPWmaTUmXeLGZLLjHI5J77ujSB4HzEbXRT8eyu2tQLbhR1MpsMlDhYHoVurrv/OCJV5LGh503LHQSm4ASTUxpKZdFAskftJJ1fLmU3Q1tzBj0FWhStbIDyrcYx20tS+rFU4W/PwrdKxy6s4HrYyzWihtNp/8K/1tdV/mooN2Q5y6Lym9IOuBTvA1jBMKTsxNr6zrnod01w3RxOL/ZFui3RCa9cxJcyhlBYSdFpYAjwSxhv1JqKaZ6S26LKvyZkPVWkwBlygCFU86P2LXpelstFCb9eZSyUTJg9eUrXzBTF9VLaF+2yNFOxjHrnFSz5+5ZJGO7rXRHN0noN6yJ/nuymNiNnREl8Fv5FXXNS0Gj0FccOSeMuFR4EM35SnVf3WvqynxqOYB0hMLDfFmnJxeNBsA99QAtVa9AhtB7k6UJsw0gowZkrwpctHxycITwXN5eIX46zprOiHGO03CCpYY8WU+I81fkRSIzN4qeXcIKYLHWKyWD8SlOgvrJQjxMUL+ZBOKPcoPMKg7G/cnxe/hLP3WYbccvwmsGwRFG2ZFXuRWUjFpjwbSOibLc7HyZ/vc4kPGN8SlaU/FKszxs/28SJyPpzP69U+XX2Q51UpSP7pXSXJ/k4YXyy72aoGxucPr1bKwcWSG7ptRFLCftFosSuYiIypRyW7X1WXAvuSlGQJ/pdhM6euatRvc/MZnNsoxxvbgi9Qr8XlhIbGu1cfupX1Pm/sz0miqOzVf4nkQ+JWrMJfomLymVW2FUFfh5/h++IEHKKo/XP8bfr11nQBiGOKBHCt6IwMqd2rqdlFheYWb0KAprWPfg8sv+3/yj6xC4rizaSOeQydr3p5UH8pXqLcwbikIuN+Bz1Wn9XZechk7eYtHi1r8uYFCXDaZPm8VWqCYejKo5BTGgoLUanNmmNuNMFTISmzBxS1pCvzv15KcGAmy8CUuePuWEOTlQTkQy6bAwYxGj28CTIzmqQHns8oadoaNZKAH26KxEgozZ8t+FqR2kLVIO/kG+Qv8Gq3nbAYXd934qovzQMeY7ZRfbYpmQdPWhvQNwQVFTBNUhZfZNQVzrQ2l5exSzK5VZKUTa/ZI/uFvPBBeSvjDoouqX7vPeAaYMp+RYV+nD/WeKf8jlFIl8Cijub0VoxpOAYSKJkEFaOO1kXN7CVa29Zz/o4v/Wmusgm+nVwk28dDYdjYMGmdilrNkbpw38ZMg/sAbV5cSZL9xIX6woi6UHJMPCw+4gDSgVLTlyNmLO5Iw6iqiBxSTfqPaXcUBG2x7efMtcP2QZZvffubEMmDedFrc+eP+90fJdEPTS5rdisWhUDnU8Cb4uqaX8ccdkldSu38UoBi2Ia8Y32OvZ0+2QrHyShmNXYFujHY/tPyk/8G7+EzgH3AGtEoCmbHeFcBOmWVVyfKsQIjsvYwd5c0JSCh621cbN19HqkxC9DQNC/TkfRwMUQVS6YE/l2mLw+N68LBiHnkw/9SL3IoVkxGf+l83NbPABrgN2iDyrxuSi+1tXYxaZAgqEZuqY4koZubSxuDLzKoTzzIK0XYn5f7u9oDB26y2JmP0WUmHbt+dmeXxJETUvqfF6Q9wdYgi2qY80F1ER+QtQibQ97/pppsflzbshngbX0pTNN9gaAhs5BmY646p1Dj9NPwbpjm33qHXytIUryZ704lTw+FW6gKkpjXHY5BrT1D3ejhXbxzEQ547+WqrOZ0ElH98mRu/4lUq88SUWkDLOyryYv4W+QrBPLLCTvEDo+UCVu+J1+KdT5oRs0bG3wV9vvLIZEQGWK/dLMnmJJ7kYaZxo3RX5IKtfrzoWhnzMl1TnKGTjhmt78U2crMCpzH1A5A8flIHAfDHhpTZpEfxRS/4g4yzkbx2/Md1YkfSg+KEVfHxVsEBGyofaqpSRTYPUxZexYsp8H1i1DuY3dAIvIdxCc5caatY4nZs7LVyx7JF0y7HMCpRqmXAEiSHYf836+AeHG/koZkkfRLqbH2zLw7zq8DfyXZ9SHoW7nb1sWLJZEsDSfpyR+L0k/vpfWfT7mL8wGaIWMaGTaZTlgo2I0rKcuH4Y1rKYmyvtLoH3mcvQXMH8oXW+fFieTK1RPjlB7LCCp12ePFCymUyzkGUJP2zw05vjFSyf46acIPbrqqxavPjd6WfIrQo8hgmliKtQuaHFqMA2I678LMYPMI4/DL36U+lfJKYwgl5Cm9ra+NoHLnjT7xRt+IrmDoq4xI7Ji5WLB1Cr1Q32tzMdV9s3UwA8zmEBammApdBLjW1EK5ZyYY6LUCWsm6icgPCTxfTJnwoMrJ8Wg6vti8NrGhTWQS/TvWUpecgppqFx1zV/+H0W+/lRR2dSuf9lAIC/878TCms6qLCqwB2021iOcgQzRYJ1qJAuaSqW0Cq036oAqMT6hYRyH3aTz1Ioq28SviOmX+dyn/82cyjF0fhhl9qjWaWqI0XVQKSRol8F3fb++QaG6uc4H96mgqYD8skYkZeNni6vaRV7IshTgPA7mNX82ikWT66zU6XLVXUVM9SslGFGkXviZVsYORQikzR0BHT8g17hDkjSmzkzmyMEr85qOVTbE8aarnz2oWaXqJkp+VT5D7fxG7y0XbS+ku+K3gYsSm2dl2aDfCGka/9Cn7rsK80Bn8+kRfsKJjZPH96XGESLX/3s1IQdk7x+juRxs7Q9oX3JKeOdJQhTeYwoPYB5ifyjKUWnrHcPoHmjXhI/LNc/Qy5uPGimfgMyYR2TiDyxQw8uOvH2uL+y9P/BQ3cBujUspSVzaIdPI7vAFqpm9Bez8KpZrbrn0YJXq3aAHE82ZwhjqnxqaTAcUV02YT9HPYEfUk05KJJ3UfJLn11qsdhOH6AkuSnR0Oqbzh3xKusFdIxUcn6/NykkPGbV8gU9H6YIEiGK49C6+i2U4rr+xjgL2XfpIg1SA3YtPjC9IhxcgSWX7OYcYykSrm41NKJyAJJOLVkM5/8lAMnu+j0bLLzqHMDvbP+1L4xZrDwVqD3ziFUYTYHyhfjMtRiyp3FyLVeERJtYhn8xBrOf3Wy9tl7zprzJTNoopUERGjHV7pfJpeVni/lPg35WpG4MXQDF+UEP4LSWOFmKDn+VepgqVuWOX4V0qBp3BW6/b34TBGC1YkGmX/PNpfzBt1fjJiRxNPUlGTtbrFrwViv5noViZoZ7rDdNz6aIRJTh4f3QZ9jGD+wr32hD+FJHLEF9VhNGIa/1+i2I8HivNxtY2kOTBWIpaqUzsFQD7mr46dbRAxy/0yZWKOtZJQYw5mAfjQ9pvCGR+P823c5kKQhIXI0dStVz/rblTH5jy8ah+ki0lbfFX/YCkOfekwbk5E5tHpiLnsQ42TPACT8fp3/kk/iZ9FrvSHipTO5D1DrHwl0aE+cvasuSFurm9EBXlTiWwEVWs60Uvnhljg6torvNZYu54eqdEy88sUXyFrfkqmIljWkp72cMF9vdC8AvTk4RTlah8rTW+ae6M2ts7vN+WRpq8NhopY7ZICsLKRUq/FjKZ5h3hWcof9j0UM+tfKdfRp+N+Yv6O1izohdAy9+Id1aGcDgaqurpOeNUu5kWKtFIVq3xf5n5/VGsXn9s9fgriGjBUycMR5bu50tSHCAPIo6Zk+9UIpavS+nukTcsDqhrDdKM69/elaaOKFPE03a9AMytd0lTk3R/W1etpo4CzESO57KF1+jzKCA5ZiTTazkJHL3T19XEScy5K6OwRub24am0l/owRxW2cn10SK7PHa7IayDxAr/qmfND0hqcqzdNe/ZBGt8AwbOwugVh7AsbD6ofJ+B5vzA3MrZ4ouOeWO3mm80cYRvNwp4FyN9WiqETqQWvC871WJr6vjb6GlgOjq/VCfLZua7DDjg6DS+wE6d7n6w0oUq5+hDJSf++Eh4A7+QbMnS0IZdOrAo4evztBju25lt1uUTB/pmJ9Dihaqb6y9968dEy6RIBMaICqxfoqOUqkqPnSlooyR8+/eDSlopXi1qSXOJb/Z/dTqIEcylyIDFhTpe99DUCRUqK9SunLMw5FsbEwnw4pHWrsv/wKwrzJQ/Fk8V9foS9OyRhlmjX0bDOKKq5LiKUXr5wD6ZfONP1nIIeRooyxvg5yhSytqyeSl10v+VkUzTasc3hbrQ3fL32N1quMpBuHoMhQxutlmwYde5Rs85HEpO9oQa8PUnR1VC1hDHjYX6+2WSnbKBmcMvkL9ZcvfQr+y8sDfZYIv6Gn+q8jodksC0pRrSMfEkVzGGTQtlOxWEUtPBlS+S9jFuZcLwpSKZoU2Xj/IZkqUfpg9InAiuGB+heuqjvjWJohURD63rm7hYvk6icsgb6zEqV172DJDZFTTkpRkdioxzioSi2Hli8dd7gEWrJT5ooxA8U2ZFu8YZqitxtaqSvG9sdRTLZzRLDNmOhb2KQ9/LWlsuHa/bhoGevbybIkbeBr2FFeZZWYTnZZwocrGCrs7g/zNHelRsjKgEXn1DrZuJjNu7sqCnmwH4omskNdjLzOTEqDI3Gmc8sJqUtG82n+8Mowdljl3p9rPdgRaApkGDfaSKvgqq5QyhuYEvu/fa3uXc0j7naU8JnE8XuUf2mJDpkz7afQWQP9bq6pGFKqBLTqzo992oQBbJOlXrPEqmqtfP4Np5yprBYdRxQPTYRC0UzlZYxgchf1I+NDYBUGN0LTWhxuVwWlR15vHn3PsDW3nU3EYGu5uhTbq69IgSjSq6jJFmR29HtVlD/TVrGIAZxarbp2Pj4NwcbHUp4Eb2/y4Beo+e2rKHHgU8cAmZY9h3Kljsb/ZrIqPSTiGJF+PUjRUypfaDqgc4agxWiiXtZ5pLWkozumrxK5pLvlFCHfU5aNnaIlBJRWQ2qJWJQVS0nz9NngRBST0cxOlS8jIAGWHXVQGX1DlfrLPzSE/1CyABEFPgbfiDBS5JpBqnldLpZy3RfKHGoxVzl3TAp/Z4zufi9hGShqBRXGkoJWj30HLG8/Ctavi2UF1mu+lC6glDB+qaOiwNaT8Gtu8Qd6kNg8LtTrLpV9KJ3jgfAG8syjEMcFS1FXZVmRprxq4+J7fyCP25J8yeADdNhoojq/i4szAuL6u0UNB0VhF0VBsQI4vDfCatL6RMEu4dfuZ0QRj+OyGuvVwJS9oNzAUrLCu1QVTuyzUGAZthdQPvtB8/5Tv1TJkgoPvTExCy+a3gWHZcnkY4t/nVe7uAtEJV4h3U6dhntZyBtimdeP50uVN0auLNmr3/+YavYoT6ZQqHLQLsvQTiWwDFsWvHFF5bG9gPCYXDy9MTxeN4KVOgYVLj57vSz7oB6FIpqciS8NoL1/2GYq1MElZACuk+d2wleIvMScBjs+nDSjJVBaSN8ZYwSF+sIeH+DJtUKiLbHHn+eqroe9FgW6Cl5GHFTgQNrjqzIPRgktrZ4IBndYaDI9LY2tGXJ+p5DklA50zlE0LLEmpHemc7YXpYCEiUUbX6puVwVd122iejgmbHWibI07y8Svo3RXUMLksBNaARN4p4kKLYsqf1mjJmePvcBgxRq5q2+cCCZduZFXXaSU8/IAU4epcLko7VtRO/U1a+6xaH56hH3yWd1WSj15wwr30T46Zbw2XVKxRv35N9P8itFaLc2LWJ1hKZ1B+e4oCQvWKOquclZYiac+Xn2ZNfxsIKxKR7/x29bMTUpdblHvOLHtLCEc40uWAunOTnHJRZnYIRe3C32IOhKS2Uo1LIfyQ1a4LdOe5+X319enXprzXvzZaLPahovKWDqoKL6gsogSl8vyLuoTB9iUpHT8E5RJFVRmIyjz0Hda5ioZpqjqDr1OBFtHCd/39wBjerbzJ1b4+w1dmJrfgPU5afA1XD8qm1FHWlKsrHhwDhyZCAT23dPUeB3uG2t6znsdJ6hY5qbkPGM+BuWQLakWNz7b80QLEKpPCm0zbvPXvHMf4qiiDETiXlOdV0rwLD6oqeMS+IWhk0vcMmk0o9+3hLqykY9KEtnCf8WvGz4Gxcv3WRkuIXSM7LMQQyFzqylUHUj0c1A0dmnuuKFYiAPGRsZasCgrhIoV1Vws/z77E9Xg8MGXpO6X4lWXP1F0UbEsexee5oN+Mnz+nLoQsOpf4d87B1Uma/xPZtt1DjHDHNbOj9jiol6m98YBkYEpQzCmxDA9afSn70EqfXQtxzsZzSX+c83MWC8V+9PLYbZafP7a5fo6sMXSWyO6yiXoaJ2janh8pH/w9PfaPq8EyfFO7xv7qTp8JVbFsQSba+rrXnaOqH67ZoW63xLE/GgkojvcEjzpXpA+ovL3ZwxC4glK1Fbd0smppZZ0VLZc0/dn+753ZeVKqxLkeEigclP8O6F6w1bRLxXh/jvcGuuu2PrnyOs40yXq+Tvn2nAaKnb+3pbsTJ+o6+/t47XDOABvt5JdEqawsWcHxvNgu7/TwynH8w1bQZlIksDp5f9zZ8LD6H9A//UP/H+cEQ/a7fxfR8TDH/S/HxEPQ+h/wOT/0DHx2H87Jt5phqNP/o5+/0D/Qpn/758v6dTn2zO8X/6F0/e/QFUoePv//2+nyr9j8944Xe/DeyMs/L7c9nXqiv88LX6cRnDYfNn0/f/1VvLvM+OzdxqK9f9xmPzQ5Dm4DH3VzV44c5KBa15rAs6Y/zuSHhwu/3ec/Drtyb+Pr/9fJHijnMbdeRHzfYP8H5zy//XF/wP9P6aZRP/zjf8y0e8t/fd5Rv+H5hj/b3P83yZtq5MZvGyGpCr+y8irSVr05rQ1/x7LdNr3aXg/0INv0EnWVX/j/p8z+a/Pt/z78/+YvX0C05Rsc5GBJy6bG8wW/XdJ6j/fhf7znfd1nuzJv77UP//98PNY/evDND5t2BekCNWfM+uOV3MvG6Wo+A26FO8xVATeZ9Vs8sALI7T5QLTd9BND+Yd/YovWo9C+UoGEUqFvJDHus1Gf0w/yU1vv0JyrSkQbykQNUx9y/6+fVQfyicG6TRSVCgl+OxPSUhC+8Zba3Pcflbs4apiv9xbo4PF6zvJtZDQ+ZIKm7aFgu/HxxdgATSt5d96Pl4KmbksG7fbqVZBKtV7WUlWUbDauqq/IvTUzw5J7/aCvnKjqv0YLW94eGtLuporl3XmJXHZ/OzPkqferkElpUxkS3hqcflkh+J4gIJLagPdYjeNv8N49Mv4HB6D4A/+U+Bny8j/5PQgFx8Hx5QR2B/J10qLjRADGun1NDRQmn98QZOz/Dq09Dzd9PQMhiRPkT1sgbAmiP26Cwv/JuPIx2Ar6+WuRz2b2ZSfZJcmCLlYS5IMlRVpTs8iRaNjE+JeZTGMmMIr+6kDzPzf2gMQsMSblWUu1O7dOp5/z1oRuSyCHFIpQGa0wPrWHET3RIDugfCvCJfiyvrPHNtm0e7Zy7l4afrtv1z8V69qkZ//EwK4f6HmlWrBDRCUQ80IRFi6idmqzvaE0SFthB2SNIl24qkLMyl8NeMO0fBZfn1s+4M8Mltq8FJctCQcH19AWMisUgVoQNhIHCm3TdIfqF4EFBSGtHzJuN6lEZwskoEm+7vhjON4OZDdC0YJWAnVqrKU99M3pei2fYGM2RcZo4khZ7+KRKTHsautlpjjBn6fe3qCXD9shsJFsf/0Q66q8jk1xFnQoHidQJjKpYJx6kCqfiUfCFDdp5AxpBCp7XIIO+97ShUZ6mKzKliu0o2PiuDi7aKw3OKxws4VTS9PKD2UG9bGCJvKx1S4JAduF3d8c/uupj3r/bY169TgzZT4vDaPydedqOV5t8BO13PCwZraHZ5j4WBMIcNH7Jfweh8rqmORsOYlFjvYqMK5ewlI7KPvdeoXlxyi8V5EB9iP/RkXcvCbWPv6uCc0LAWTHFKJPF3YE5z+tmN3WgLBBcc+k8lmWK8QF+xJNTSd+XJWlZOYJFOdN1eo1f5cdpzeE+21NX3Ze/hlEWdBTHdmrwHrILCbBmrzRfMb21RJiEeYfIDik9U5svBVrNuxQa1k6Xc17vlvhye6ieTznZxRRrBVb9mypgmD5hlbIArECosLYjdSS9Zoll3eINp+vYayxnDEcDiQISAmNGsE0KaPqpMkfJz+bm6AhpE7ZQeWm/ldpEf/2ZLp1dhm23+WyGQd8UJGsv9JOhcq8aFB5NEtJCXd/oPJa8VlXv83C4unOpWZyS+bNcBshDrFsMsHG7YAx+mNNbMZayhwOkcbjUafybC9kp9sVhSwXlgQhJQRJQE0vJgXxSRVXq3jO1uQT6oS8k/0dXQ/8PF0KhUnPm9skZ4KahkiJuuEHP3aezRNPQyEVgvhajVF37O8FdN1FnCkiCEZhpFbE2gJDG/X1vCohka/rgkb5/Nqw3087zzFW0D0d+OHZ4at5Bb9FrWn8V9kTLOdQWYrJZqfTrWH04AoV0JHjmRMxZmGVfrod6NXED1Uri0Y8ev0VmN58fxHHd8Jc/X4daVGT8cXDtKvCzOmZ76uSbj7Ch2pVurOqT6hdzVd7WqkHf5oXZ5oujiXbpuLUUz+gHrwbL2qTn0bwgxeDh14u053CRaICIMIP4lIAhND7eotZIlfPYUkr61nIlS4827c1IOeNWSWKmpUhO0TN5dPp2ndnLBpGM0+dYt9DM272RuG0DmVDbCOzEiJsnEG9/o5oAyQXD73x/awlwHKJhTqCbTtaRmhzU4TNERsKd0rF/8qusjO3N6T003AF9PGpXZOOdUdkb0+zTfnrqsyXIL9kjKHekN6n4OWB6B7OPOWmYgVTnd36N62KbYFGr7Sd+Sgdr8PIulevR0tKXp52sk/Vy/1PIpFzIV2SI7ocz37S6fWR2TXHUiG/j/Pz+bzPBVnP479dpxDOxTisjmEmQZ3fc3FYLw7JZ68lZxGqZNiy3lTcTFUv9Unb3fU0v3QHCO9qvJJIdawCbbyDrHRejB+F+SK8oGSQ7WnBkGcfpbZci/C28ZcdpNGB07H4D77+5mi3OdvTicFL4vxlEM1qnNOt5EH4haFoPG1B6qMbLJl/sNdXGaZqVJu0uAgaKJiH52c1wuwKddc0w16UAntx0B9B5JS6hzyh1S0LLkWqA9r/6KLUH235w4wXJ5cET8axl+UiMq6/ullfrOj0QaYgjTbRS9qFbq93Sn9vJGCWZedddeIwCQJNV3g6CVc2x293EbGr3gid8N843wU1dMOp/cStXIGVKrDng2ZKpnnnpJM/5r/+afsDAzayjCIknZ/qtfX77vUerbehmJpEkJmTNFD8650/aBVT4psy2npw9y++g+RvPfALo271GFiA3Ww3PGfxdXwASTfI99sFvdNCxMGxBguypvCB/0XRpDtRKIxV3e8nT9ey1haVTRhxm4kA1ZDNVwx7g/U9FGlptgGEAPdYVTsL5yjXvf7x3uBEO66UPjfbP+ABhKfaVb9wK8nhjN4nIH54wYL6O0heVAp/nQ8bNMEBSfhrkU8b/X5pBmDviyreX0WfxBFIgFXv3TNgXsKKXfXQ6Hw9u7R0zO72Z4CM7KuEF9BhYVIAPM1rlJlejxwKcT1Gb8wLF+uj7Rrn3WXSU/OWSjS5B5NByFsZ8+jkX8/29+/Qu6E4c3L0jBNSjNBnwdmaz36oxIUvCLenIX6JZ0TlG/+6nvmLMOCVFJ35f5uqJuzzulm30yq1Bed6JiReqNXYENWXYE61qXilu9H6n0lWv1ly78+k1LhSB53dR94440hmY2b63hEKJ4HcZNEwTeo4gyXBL3pAHBq0QikH4BgNYcYxoter+BGq+ymQsjNmxdSefapZwPQsm4+QDQcLHTufpCiyQzOphO3ykIMitFz8jcFMyExyXrwIdomnAXGvJ06fFIhi+YvDD7KwK/NLScXCXp+W9/XucP+l5IZVwn4R9nThZP1qByHCTMShdIgIaCWti/uuvhd68qnXdmp0xY8qUZ83JNKqTX9etjGWnMqs6j6tHo8ZQ7LF/J0N9/IhGaFg0pSJhhZq+S9lXjVoFkEH3161nJGxg5jifnl1LkxfEv0t+HzcmY9X7064xw5C14X/+0iq5grjMdm7nmJyQNtgA6MMSkgauFSOkTGIBz8q0zOqnLwddnSHdSuwSPYfBAY023aU/n4pJNk26MBDTcjsg9C+lGo+sZmg0keahqfH7JgonpescGh4eIeavHiAK94b8EBeFBmckKls5BY8XMrHxyovhWgSyjNyKrfSYML5W3lBbTL0Q35iNiakICDlJ+n55IsxVf4DGz1p8xKLgH+Vwv2UUXa9bMyvPaa/W7CdtJlOBcXf0VqT4Yj9N5qkfuqbKr38bOfbx5auW3gdJbAXp2Kq1pOaSM8xO0iSEjSPht8oaiJHywz1n44Bcx0p+LOdqlSz+GNP1A+XqLRI8p8JArh1fgLiwsZslm20gtZjOlmcvQWXc71nTDHl6PgVced5Dq2Cn0HVePl7ee33ZclN72IKC3/FZaEYfYM9V8QVR3dCQrofnDuUP9PZAHU9ZshCQpUUvpgtuKJaOtbpuq/+Tm7KmR7QpKZdH3iiHpQi68pOE2d5zRSgzsfD1N/uhitlF56/lF/tlXkpOHYmGhTZVn7l/UhlulUswHLbWbQiREeth/spFQuisA5kAPluBqw+BC+JvLywN1/VMgyvQ6Dy4d2CFdJLKIcgZcBTOap5ylU6EnGLkFpyJYPPz2wHGR7f0MvoebdoA76R6m8KLUL9emlCdv06/G7aVVYXAXhgUeO2WvYo/JqA4nteJv/O5nAvPvN1jJ+CVxI9Y9QVr08Z9YtnZj7N41UK5PRQWikx7WDPXJqdG2aXROcssqMdXWif3UwRi/wOhS/hCjwIdyzVP8vrP4n2d/JM9Ymfs5l+KPsH8hewAYtnlJizB1v6oLnJLzzuDcp2fFpuLEEDBL/s+B9lRAyx3YaqRLdRArr5xJkrSgzUQMsooWiq0X8HVZaUcM0vNQKlMqH1jGuurLnOKzy+dcp4IkoaQLosQ17YHe7tua23ljMXa3ZjBKz5Zw1R8+VQ+yiGVAOsO64TKODx78abb0SYTikbG2Tw/3Zh/32Awy8zuV8k/co0M7JFCdEt8sq6megFDgRz/gfNf2uAkKk02YKlMVMii/p4wR3/gqC52xztWLPWXS5GJiJ6kpQ1MSYBIKm1tOH7EOf0sS6drLF+0UR9xNVGfCI0s6EsZYGyO1jgBCjf17lTjmuAhVM9kRc3xNfmnqsohoKavtEA1RvzXd+I0t9Y60t8bwuBBMBb6ryQrz9REoD8ghdHm+yGDlhz70OONucDTn+lb9hb7eT1eeFEnIAN3YgAXxW0edl+6iQYiheT+BKFi8+LeFetTG/gxTS0luuJZdlaDZSmy/ewzQMgJe4/uBFr98VZpzs0D++eTUrFXjpSxqslf4FAtIUMZlxe1BJlNWgEwtXs7ipSPnBVa4opC1TTHd1I/AQGGu2RkBznomM7KWW36HPsy9Xvo7VJIn7LvPbcv21ZfMj57+M9GyktRqJVX3eoCVB9/Lemz9CvEFe0YLHwPg+54yJRxY7tIM1BP3vZAt5eH0aTtd1U86iHjc00jBT8ZwuMOei2Q3JnbiI9EkBcbGSvGxDJS/jF8sTSDqRIEOLlGEaUS+oNheMLRa/eJzkdujWjgxGmI2HH9nNSPao5rZUvBg/BLhdgD4HzGjixKTq85UZRhwXr3D7Rvv4CzNF5hR8v3rxeI8WOvY/NzeKS8+ccqHEeSIyXuZ0iNNxJxUQjlbLT5q2jymK2SvqZeH4PKu8ssZ4qBQKpoP1QWY1UkXB9EcHPOfiS7LJtBalxPGd5AwP2DsGllbhMMCLjTa5EFpEO/7VRpnZHeRZ0KF1fbuGL4V87ajdd72RPvw1X9H5IxViDmruIulvfn70+v3FTnfXVZavARBg7u1si2b4bRL8CxDdaRualJrqVAV46Hu/9p/cRRm64rBP2zi1IzPE+mgn9bjb/Hqzh2hUwMQ4zhdkgfU7nwhb9s6+h7At3T2uhG87AENdjQYa89aw/TIJrgcm636v2B3m9/Veyn7NJ50vj/3G+jeBprZSf1gxGRi0v3qNs4QMjLSCXbaMsXDWYUTiev8zj7SJ4Ef2L04pPqYfCJL7oyqr4jE+od6IS8Y4hg1AyvfKxVJj6pZwezoeTqH2nIU1X8D1xDvva8JHYL0dJaKjpt6bmuVytipE2abvM5YeWoA581XDJMekaUSefC5SqJRhxJuyGJKbnjeY4c97Kv/MM9KqkGlIyUYJ5EmZeMY8Yu97l2b0P9dn/Zuk6Fh1VduDX3D05LAGTg8lph8k5x69/9Jm3mnBmbOiWSlWSWm23KvGSnmq5SYBd8qEhZfPG3XZltobl/MUB8ydKCbfFH/S9YEmIkyN8/c7xAvW2POBfF1iq17oop0Dt8Xe3keLgiT5mt2YP1k6PSCsPwd91NID0Q/3oeaLK6tRHle9VZW0mjCJJHFiKCjoCKY47nZlfFPLY0vdww1eCSaDlzdYjSSp/Z+C9jvvaX0wbmUGjGt3t4hFXO48+aJz3GQVKDbpuAUsgKKenVQcvG6XhcToLM73vKG2OcN1qaBxVOws3N/sboi6tfIVojzhjN6/i70oLoKJSHGBTqGSAu84BEW7pATi91+cXIwYDqPptj3HKWwEWyj23NvvQCaVAe24iRi78/Zp5C9BI5hFymdlVx4FX344AIQMlm+8w7P1tNg9yzD/0yQsUnHIzp3x8AkKKm0IFPf4DAMAbRMST0rCA3BdO1jpwzuDoxsTltXiXMsu7CHWZ26DgktdGGt4kMiw+91dv8/PgotWf+ZV8OubWIx5SNETUYMCj+xccSJ2fW111ilzuFLuZiujsNjxLlrzFrsJbYAZeOXWpUSFm7RlPYlQUfnjwRkDaF+7snkhN11y1Xgw8dRkI8gTkgxl0OXkcNvgFKMZK6RPNH4qAfbZ9eb+Mezd/Iz/tuZziXCP5Lme0H9q/GSsaUkc1NAg1mJrCtEjdHr5vCuPkHT4KEw42fU0TJwrdiCUeTPH1jjkP9DaA+lo8uL8WykK+yk2mJCxwnEiYsucXhCrclVOMXHTUqEt3Y8NLAEzentnIj8QbouqB8w4EbSrGBdlZ7gM/rmtdWJ5/+7WORmIHc+BkMjMwUr7e8KfjsxyrsgvieH8qZdlpqnqTAJmQWbwjpjTzAK6T6mvF0SvRl7a1+HuM9nwRtbTqRGLsDRY48RMzktld1suwxuCVfeMqCEr44P4qyDk4luPX+sQtRMn8XnYwpjKwlJGcD8IrFZ36w7QtfT+7Z0KiMSS5lm8//GpfQfqDPsIhy/P0O9Z8OPENmeiCDZRNCw4mGjzrT806U0Vyj418BaWMgdiQthBIzCfBzDmpfbWlMjl2tMpT4dCEY2jBVfaGebzSMtH5OFgLxcunvXb+moSV8Vy0FYUET2jnD8RVxeQJUfk1JC1D/y7CXvpD+0aBvn8CymuVC2vsNOD7Eogl1GyHMqKcTkx0swRIdpKSbgd9QuiAZYbCG60j8lXW4fqtZxw+MqYnap56ZgG3mlrfHVhUZcdIQ5jpVcc2SFFVbd/XmuLOzHB5AxpIZmpXeagr7fNtslDelqgOSHQlkN/ZOuHQU0ElKworUSlVDAoZZdUQBQam2BzJ0D3eR6xo6SzSyZq1P+6e5EXzLpjK9eBYRB5CQjmHVwjwGsAFNkvtvBe+12iYGsnV+/JFcJYFBY8gsQd/v7LIh9CQ2nzdzsiPUWKB/aAudVNh+W3c8QfRgaJ3+Da6ZfxtYPx3cifUNU44FUJGCJ53DPcAXDq4/lqTaSJIvXx43+RHjz+SZTaAVSGMdAqNdwToNBNoJuElUWO+zdYeM8kPAbNJDjDKWEEPhGO603nftFPmAT8mGI4ZGOeGfUDci9MD1eC3YHT8dgLlobYvYwzpzTMf8GViZUNcApvbmGAubFHFkWnlDZuPBci+PUmojEUw5GT2nQOcEQNAW1ttKEivrmP8pugP7K8vtbNV72aDyOkOJlrucTUceEzWfVPRe4+xUnFRya9HF84FfAqCUAbVK2b6yxa+PqmYuOVKvHNwKMiRTSZFVnYSfvyTT6o8HLFOw7/TRowLwrzuSBPWg1OLbxIOVLmLrJET7VGxyOk9MO2XpwawvTcwwVJlYh2iCXNSi3vlF/PE91s1072aWt4/f3ccJbY6CkzX4+X4YvQ5fgvx5OTV110U6MG2faS7feHepgR5O6zW2Gu8AyOjM+3EiHoEg+WFbfSCTu9GtjEJlVYqsj43+tnmQK+iiihsrRS/yu3VV6uwajVuU/KS00erS5NtpvH8kIsMc22i+nieG6+XQUM7Ldx6WkvoaPddV2JhY6GnOH2uqsH2OTTuN42D9PLSvQxz6tVDvQVtn33AnkqWeoDnPzH4YAfIUvl/c+QNs+sX9x5mDKWMX7ZJblTut3t/8+Jmi74ok97jcappd82Cu8uB2kuzj15JPQY+4MRxRasrcW1KAyYUMD2Ww9wXpxWL+mlNMPyrBk4kvSJTCXWEypU6s9UqWpS9pmkijpfDSicJACXruFT+yvCR9kVE3xNYg5sI9BEVbsKHt/9LZF99vTVD993BBKWCXF3Hgh82RcNo4fc8G0690O0wALU5Ac7hQA+2pY9C2juILXeW7SpjuWJ/OBUtHOx3PvT9Lsa2jfwBNf/GelnCiyBEy2Q/QoX9lZ1Ht6u/ikNn010gW/cSxegS3Riu2x4qxp51+YmzOmGn4sfR4dgNuglACjunPP+Srq/pVWrYNfu9vCze7BNmNm1ehT6worMD1z4whef8xS3qB1fk1EovJte4VvKKJvls1qZqcAzGiAmEk2HgwheWTJvcXCeKFNCVEe8u43Pw13XxpPj9dy1UkBK/hVAbJL+PK5oJmjNtj6G7aCu/98J84kPxP3E1eiYxmw/mI3VnG7Lrn+R4KeX34RXvI8QbFQICwO7wkJtQC6pu7cY2N2Ocf4jpNGzqq65PHdhHLLl6LeFqee58PSsz80z5gzvXrj/iJ6KxBaCoULsqNzKuCXomGN2iT5mjold+2ciwghpJCG1CM/WLrNYhu4Kzd91fVT1X69it58pz8ulVZVn2xglr+mO/ERVbYXSuBZuXKH5sqrrpXgBO/hEz6eRcydGLUGmlJSIGj2HPJKtfamrtclgS4lacenJDnX2uwF2+Ts4MnYy4QQX9rXfCgHVvwom1jPWwvuVdLAeYVsLaMoplOslbiO7B8MWNl6SuLJGhEtArhQBeRlk+8A2HfnWKyhKsolhr/vghaiaQ/27ZGhY0a4SlTgMjFpWUdV9FA0WQPoHxg0Jl8sdcE+rWdUs3wE6/P6Rtnjxl7QOJfQz6yH5b2tv1XLjOoy8/wH6fvwNBsQJ07Dx0lfepCyYYX2IgaVLL9goYMcy7Ir+EZlesv0bxT8e3OaXeg3vUUF4xWmbHwX8Px7z8JaO/gaVMfHCBOgubb9zw5RqjuEv6G+P1kGwP3phGRVgbbAB/a8imu242UDoG8QDvTG9jLqWWRiaMSx0nTOHISph/qyOO4dZ6zFTx+PbV7nL4JLPuyJ+wwBG4uTxn+6oXdiN4kSSOY5FbDNHa1/WhDf1QoDW3KGU7kBfp9EbtoeajIhoRYHfH53Xo30sofcXlMg93dyYmJm2HJyUrhcbPNv6Cvve9TjCInwd4i2Pa6odr9znWFiRQHQq2RduZ5rjnShBr1enlYQybeEWxC92VWMAsHa/K7CdAhksbS01WXa50YWuNGzT7QAxEyqnMvQrBRlhi/RFi3aq81zmO83JwUNuItySVBPUjKvh0ysaT4wlZimGtpgmJEWFAgJMn9UtM1JyKQAeIuYJSm+riqmkiIsa5if6Eo9xzdqdwrqGUQTmtN6kBolW0t+oypb6frGxB4qaGnCpdDHorZlgbqeIKLS7bLdpQ+2V7WUonQvzBvg2CXm66UPu3uxR/1HN+mewZjUGbowB12Kg55fopFJv5EUH4irpo1tI76CxGkCCfMdtZ5xi7263vWltrycEwFd2j4qR1ycMy8wlo1mF+Xfr8Noi+NqtS6E8qj53cGG8cGlroIWlwmE4w4MvyRexf4RTRiOzvwLGdGZZKhrnANCXFQrvjFyk/jnFu2zXkAyrCpg8NBV1InqPaS/Zfx8nIfIMb2316jrqAMRAIzKFz9PyDnyZq7JbX8SUXVT04A63Ydwj1av03ATOaliO47nxbDjGalPCbP5Ni0h28A+wR1pI5VI4vf0L5mcGakNiFO3u/g2oxZoa0PV6HEKr1yKTSotsg3grJ0ePqEm7B/gtHL10sOT8ZaSYiELDY1SrlkY/lzZM5WM0xHGAcyuN6IieSRLFrlUK24wxfvCDlCRCFeYA8JRjBv3PKAVXl4w2K4Qf8eacJ0aCQmj/nr3+EiE8p+OlZfDlRPAMDKh9LxMaG8ocaRcoWiW1MP8q0dQJ3Ald2ve550uAVWvKmBs1Ib+Y4dJtvoeBwTMxWuffR4EmjPv34/AnggUQ7cNdz9Gi0vhOc/isFAaCMEJwJM+kqswcdSCooNTwz+VOffetzGML5iG63R4t9f5Cwve9NSCIddbLD7xAx+E0Zsbz/K2joSNYHV7LmRYC4eSVANdlUp3+m75xb+bwwaa9wNJYei+MnSKaEfUwdbiQZCELXGyPLZ6lF4/VKts0pgq/X1WVlCaxaas4cFzQ2XHZI/7S6wLZi5HgizEnCHFaAgVQYLdmr8zjVTBBLB40fdc381meYtaHhKSqs+ED1u0lgnYZUWzZhXZBkkUgVZXqHZ51K1ArzEjN+/BR4mn7m5eys15FmvmKxxXbC50FMweanC7YmRXPPWU8CbDkyYDaUc2GY5ljqbjLrGdJl/wy4/OCv8pY1Qnw+xOaMYDoildQnJZadM4rCBf+1//yuOIRmlileIuqVyxmxamSJN9aohX+J/o9ry1JamtbWnZG9fUanX/nFX7fvHdQbd4x2UfywukAH4QsS764ztUELHJMp2lSEVBB2O6aFCLNrwj6c4kwJFrudpfqa93EfYy+A9UOmhVAwVj5r5rCUu7gfSfEYjHLaV1WNehzLMpVBR7wWEbmkh4DS7FKXP/nx7XPQM3hyIcSV+uH7Qohld90NQfUrfZFv/+Ic84Y4YWVWzbQy5juwA7V3vR6UXXqhHleC42PKlfuPXoDDxqw6H1HUeUE/0eUsHqp/DEu+SNiqcfN3+Xj7pzlV3jAaL04vLaQJkuozHTfm4ZwVQX2gaRWDPwPgBQNM1UiV+3lQAnmdlul0AYrModjs2Qidb/JaLXx1kcunNSxeV+GE288X1y9vr39Vo/wzLaOIjabrEIQOIvSmaWAnU5rHSASPQ6S6KehwjxayJlpKvnVXhDj+vnHA+GOxQkhk8fPFeOenAAMrpckRtsS23JHHNTz/tHP9iwxXEi7Z6TJQZOKjYjQat1vhSa/7gdhDDjBImcNm9i+Gj4K+h8E/ENlsQYVhP5JR+2WTBEXJ3C3wr1jKLOPHgDPV3QO3Dp8yu/I3ucezhswfJzWYcVbz5QnKbzLXdt8NWVaMEIueFkur1QApVDzUAvyBMzHg55pZ7LD0giPGuPPR89azNkuYGmx7adK0J88WjXIB31qrPwMp7hSOOa0ghA9B0LEvkXt9C0efcZgJ5CgaXP2XwxYG7uFI029XygPIg1ASBFUQSmIHAxnhQD4YoBccplsHLSJejgY4bxQDLb50dlpqGmODc//GyFXOS9RLjdMePfQ5C2S+K6vUoBprvS6zVM5FdqV/VQMyNb3x+VtKoLFe1RGJsYRwG5Ah4SpIzI+JqDstib3rihHKllRtiBqbgrI/Dld/QRMORlvWra96MFJX9T6NBv7JFL+PyUUw7zaOE+FeYAqr7PzsbTTF3XYCGIZPbEIKo72W1KTbV1Y7GcQkuJY0G6XO8P4tuHEOGYZVN9k894liifHyGWH64UZ2CLGL531iOLLN4A7Y/ifgllR2XCYlVAfZV6PlQ749okRuMNAUSLla9dtOy5ei/US0KtM7o223QCPTaJ819NOL2jOb/l7iI6H6ub++IY/tovjlnzCc4q9AkMY5kCvxRGtuFHws5mPAZbdzVdAEU+7fvbntJXJWS/vLEpyD2T3mz6UjckKa1IaaW4EzI/N/dZlpatdcMu324fTykAJMqvODd0FATqQwX6a7wlWDvE/zG6DNKP/mikoJ1VkNAkgv6kT7/eCbciCM+tGGE2qeJrn+7bTJvXZ9IL4XLS8IVeCAYlfLzh1z4V/FtM/q7Ss42ChEz+1lCYmSt8m0t3kU6DLuTkIbAj0FJP9uchd9wlXCKEp6v6ykUFM6V435+HP/KyZs0Sgy9cRh6WPaDsFpkEEBDSUX74wcQRsB9RNxr+WjpRK3b+Bl9QfK4cA0EB8b1VuGdoUgmPSInG6DNaQwf0IHzxfIED5ie9p+08O4IGoeG4fK79HJ7x4QM+1s0Nep5dsECidFSmW1Gu34UMXr+EHSGjbCI4CPCJ8qybQrzT9b6n0UqLPoA1d1PuiU8v3WVFBRkMe3f7Yz2LpSQgE9p344aJ7e0INak0/WL8E/mqf8LP876DWzWYrzO//RPR5/Se4FHFDaDF94WcDxTaczttc40iHUDoWaCtHSCjt+niEYi3DiC1l/Y7M1K6aXFaInZ0v0xGXxPfcrJ1wxmNdk+XhfB8r4n67p9mBVoplp5x4qCeL/dEq+f9cGcpEcNBwDa0w7kiAP9KOStQ6Vxnb+b+1FwbKBv3AvrqQNplZaO07nwXMR1aVrLpU+Rk63tcqvtQOYObFPEWPd0BdyfTFmP76kS4ifGpFdhg4YS4IS+YV2zG7NOl5rzG+LCr3eyP9KcS2HZmwg/VHFaUbOJSAMLz8bUPYMO0ZtbiwdENO8nXsjTfnCXf1lY+Pk5fW1cafxcbsqGSuvHH5pPb37cjZy0WJ0UkoWM9yyfSc5mdaoBbLgmZO0AU+Sqer8XOOFsofAv+aUFg9ZWHi67eP5vbZLMvX5MYh9XhUBt1ny7waZ0AwSqfyymZbNUdDFwyUGpegmKS1aTY10Z6pdoVAyOe3c5kytOsM5Gd+XduNA1xvjwPeHnPh6PMhPtb26ifz0AorP4TOUlAg5+Xdk84dAY7qAW6K9xJWYSlNf4qq/4389Tji1N9hcUXVhn0Cx1Z5XUr/uEvezrV+yMlFS/wu7qk4WTu8E7SuBSACyxp664LK1qVzAlXvh2+qWMaWCZnG9b6lc5yHB5VlQQ9Oe1ubj0q4EmaerXUXOsrm35MKkQdUjIlaBu1MERIjMTvRtdzb5uSKfkJUIElW0VucfK46eoMqb9hULWkS9SvU9NUiOj6GJaL1W1M+oc64crJicNCJB0lao9yFkUsEOiX3pouQrX2FeLKV93LB/7WFNUMyNjWwbw9IZW76LURr1Ulr2JWGVxRASaIkk9s1jSzvHnCBrXwr8DXFerE9iqkffrxIAffHlRd8GpzI6lciw7a5XNXbISvGqau+/LXv1yKsBUIOTlNprpn2WQvfrDIZimWBgo9md01GXmybr1Cy5dBGDvAC4hFbRrGHV1ciDfyArJsSBlbjChYCx35EKgcZ9Ngc09L+/ZrjfG+8WZ/g4oZcpmbUnmQCO6wIa2+Mfa0APN3jad93gh/7MZXMLgjqb8uYfV7oLdOpJcY0tN5avYOpFccGMMEo1O7HOngy8KNrsQXYBL0TdVvwdaXs//Bti/TeiVLhjd3cWwwwkeMnvFYSx1lrnSIuB/ONUzoCG3HEhHF/mJXs1Ut9P+CLvqxjC7pVQIEHP7iDWzx5Oes6iVZApQDbQ+vchE+0pgg/WPT3Dh8tULCL6SdhfYuzBO6Pyhaa/qFpz0DK3wFqs1mM3109BmuDhKcS8Iasp0O79PlZ2HM9W4bM3bFXBFSoa7cKQ8mbVgQsRykSuyBuUO/VM/A+sF5cj7z8fUUjnB0qkzUW547lrxt/VlJ0QlUatqdGg5eND0XjAX622peud1MwHn2uIUVad/jZF85dV5S9poiHRnB6v8PoNt0g3fF/pO8qAFFdz97ws70VnhnwJAdjeZV/56/panH5BdWF2wuyyVguC6uW+ig+TmjUo4l8j7VaR+Vx5wEHePJq5RURbOr3ErLoxfdpfthDJ1R2v3CEvnPMGI2NEtM9nqnrowwe54p0D9q4+GFoIbBJw48/sPQcrtD1EiRj9cUXEem5Yc7RfQ9UY5ptzLz9WxpGhXvczZJlkBOq9XhN+/eK8w5MU7f8s6cdhPVw27KYss8GDLgqav48eKYSv8fqfiHCJYVHejRVDphQ5TBxE5viienXepBT3leMOoKXPHlQVX0pTdfsBz4SqqtL5qld8eGe6zfK/uvsI9BIcXq32+ysZ4wGj1wV/qrDW23lsTuLkrQUb7CllGr9voD+r56UpBmz2bw5Rs2yRr0gnVEIzZyNoHrdlQCSj15lTtsmUUicuHvNVxggVeFdR5nMo+bQs7wJCZFVcu6xgJz8CHxjJrzK3+5NbvdZPfxmS75zu4hcpxO/HfrXgzfkfmvIwnCRUHQxmNNOVGSf0EYVPHRTGdvYKC/w31hxGfUyGXIIs4cDBqct2ycNh6j95ZlSq24llM4TZXBPflkDvO10WvloIEZ4hKvp+wDWmgq2/Ar32GwGHnHt9rQndCYKbUC4Ig60zjQemeSvxoAHTIDRQ2Vt2tp8lOwQ2/8wurrslyn8trn+HYpQ3Vst+yjgyu/w3eUacpWEbynYKGlCYPTrJbHtpFsBoIaFt4WEnk7+3Dk4aqHscun7a91AcjguzJ7J+NLVMM7kAskAwrEcTvpEPhD2LAbR+bdm5rtqQpSkoYOeEF2yUW7nAzlz+CaJMBsX3QSTQiM4172c6xa12O/4MWKTdH9YqlMw10Vca3fe5O+ZENJFsqpMBN+vkmC+8iNzkQDG85Pe8bpLWptvZSfMNO4uYqGoUEEV3M6X2dyG5VauK84UHqRNlb7+AceYx1ZF+GhhXoVYF8pLq/kUzaNWDNbc4rKCo5vSfrHOo4jMg9Hac/GLpGsgNgsvPBu+yHQiPGEO21p6rH9DpZ5tQQgS/xAMLmJdZ1SJ1WVsN9OvJ4MtP3+SZ9sISsdaOUxn/y7uT4StOw/hoykUWzC/qf4M/ts3Q2qsP3oVTzJ/KAkdnlkePSpn66itD4Y1lSPzkWGuDgJgkaaDzaljKKmb7CWr3hBIH5RbLTy8N34asRecroCc6tvWVOfo2LXbi1ulJmLIXzWLyW/qSbKrl4oPPX3BiuZjKjnvdVfkrF6xjPMcL8XLUvfxTd2RfYCDUGJyV7sZO45fAgiCZ0BRs7JOf/Pr8BWUsrzRL71XJF+pNzx/AhRZCDCJEQGj2+6gKhEEcjt6mLQNm5UKI59WnUhzlcId9R65AygA5ik/v3lAlNcUU+ZCpjktBEVV09a6oWtXYkIPGRkEGeeoCRhywgLQ7J2+cUGIwXUWgvp8fkkiaoUr7xfFESRt3R9R/06RlRFZkuuNtTTGIAf/pRtMqO4ioFDtrFMhb6GEaVtNn544GfDYPX1bQtbCq/LpueBwSoaHP2AJmrulch2fyxmA4hJow4q2h0JbE0vgzk76qVd+1n9hfPV22B2gIe2EDzoqpOBC+koAG/dZ9FLxU8+/wIxVqFUzxQuywP3DR9T1mS8G68BZkjhw+5ddu+4OGOXHc05vyOqCytiqvhYXI7BpnisJ1giPweQT+EXhw15xrenfb9uPHKxSySr1ErfekSL4hZzYgU7CDNKu8q5uSQv98XviN4BZCwba7NW1LnO0wQ/wePtSH/oU5I+hkBF7R4/7rcIY0/t21foeS0c7rObJYsZEjLgbiqXxfS8pVg/ECnUgf0s9a++dz+/FG2I3Ojsd+Gc0U/WF798SLudjVKyWn1XxpZdgaP5YhhgCd2u2AO+UuambNsHpzo523/ZeQdSXOtGvJ56vV47CokDkrgZyn5PuJuZrgQTNJ2t0Qk2kDDpbbhgRcsKZ58Lp1/+gfNwSxjmMD78yJgE7Mooq0aeGjFRs9apGtJCidVncG61zLWXmfeMsbVatvL7+jTUsX+36+htzVxKK4hRuqJBJ1opraCWkkoZ9XejmkXyF2X7R7FRMHwJyssfFzunIOCbLYBmoIq96RVY93/jVn9LiRrrwl4ONcWkUrjuLM+Y6cF+2lhsLvFLBgPIvHyey0eqX66DwplVBxys8EftojVo0DRG8Z+/4oE0B6Rr3re6ZExaR5tUr3J8Ik7NvvIB5wHLT248amzOYP7cHOmQlIHLvX2u1qv9NzHNHmpjhNWhQET8CPRu4E6/ozl5uPxurw+61SX0up+aHNDna15SkTo7/eMNCVJGVmRkLOa2Q/SNGQfgxeopNQ/gej7jZVysvLOrNksgQbTRLNO77qKq2Uk98bBXibvFGaIfcnfX5LGdn2sDS5WnNWacSUvUr4FyvE2euImIarxQVoEzRENgKXNYzHYWi08LxQYL7cgdSODtWTPzwTdqwj6ch1N9VOmSxIK6jfPSO+SHiMklkz+6tSEMbtvw2Gbi9O5q/ovEyxfmVhO1aROe0HdUKh7YBR56yfB1C5vLJBG9rHDratA7Sj2U1nccDVQGxGn/uF1XNOZqsuxBb7W3DRSV7qG1Ow9kqdW3P3WENM5++YL0sQa7WEKj7Mo1BtcRUnh+CJpkRQRdhKICGrSciTtDBcY7VnVtlkXkEM7rB4CRuXwNdxQYNN2i/nzYEfG+YkaRWxDdew44T6mp9IvnGx7hJk3IMPRKHKh1ShURbcz7kDh/jt3/JKsYKGMEMWly8FgCI9qBTuCuHzJ2fPMxRwLMEgQKX2vOxaNjCqNpGgwBxYo8W8BxrkBeKio9NfIj+26SpPZjt4y6TxlVL8izmvK6tMWqVnomaq2hx/12B+UeHGssdcx3Rzz/moFunTvF9mfQM8roubbKd7HJT3Gcr6vDsVWhTbKUXK+szj9ZnMn0F9tuWy/Hv860awX/XmBejH60AjRjYOLvXuQ5DNUIcxqQlWzkSWlaKgv86+8KO5CJ3s4gfR0r/rdegPdZhUwWHnlFeCciAfJRarjrEGm8DQronIwlvZuYDAmyrjpAm84RuaBgn3i7MU4HW/adlT6leXH1Ubx/PScyxE+MfdrpdE/mUIPjIcDR9GT6WsGUsxii9+Afptu75wWMTGyLPOYFYOpjZmP1dD1MHuWHaNE9DGEx47Guc8m87qgvBiodlbu6ZV+PmYW6qnvUOAHmXEBU/wk6upsW0Vu8apMCLqLx0P5GQtJRqFzDh9HeC2I2S6o0JBl+Nwwgs76obWa11F0qdHnrPd6mmHj8ZjICbpcFBBF1b5r18KtRW7TWRPLXz4gP7uEQiJWJpDtiXvoI4/d10RJMSc93fIzn09pusMOy5z+sP/tRs4nwi2gfyc6cfgiQ+cIpF2IhGKCzb6/LC/fE627PnAjbcPHXvTFRLC8Q3QfveduVSQTpTIwF422flSCJ7X/aSYcqTU7yGhDrUFZNPALTTsiw0iNd6QT+AxLtmv2MnxhpTlG/GJwLTM6JkaXc5Ynbmoryvhy0T25svCOXT4M5uz4cYLfsaMX9AaOsD1SQBYK0L6q3HY9123quy8RlBIObksfiiwSFzZf0csNqTdFqECqIrkA+JIoo2ewkqtg2t51I+QMJ+cwUdqQ6ESIagzbaSJL8slxeKixTXqJ79Q3Av7lc0u8MuN/LuIeLXhpvx60dJPfuWW0185dyfMGwu6eOJychi3UODy61vN2j1sgg+U1RoF/SQMMCj+MZ8Q6cv1ekgaXfb3GZmToLEZaTrX1BMqqXANNQdvF3so52T1FXgu4d6YzA07jUkC7rKBWBgKsHOkf/DNuKNlN9sWmwezy+cFB8j5Mhmr3T/AKekHtQv0ub8QUXVAaAsARJVwVV5eIRLznBJaIIO2CJTk1OeIw2p1fGetRTiUxohS4HQn36hGU/KLsJkOIkA0DUF7oPXtPNMcIguznp12GXhYloikMUJJkCCowa9qgbag6jlkrkcS4UFhw4581cFn2Rpt1xzUovhxv+qbYF5Y4unnX0bvBhYA1lIDa3kr+KS9lG2Y0vChaKGRuvruD5vz0kRXPZbyiyECtAlN3ZROF3abvOSN5QGYgypAHHykq6mkrVqbFbOabpgLqIvmtAD0NzCrOfmbq3wI1cQNffZ6tbJR+/YVexMl/IUhUkg3iZn/W73IftmN9pdyQjI4NaK67+JZIta2ZH/VquFFAYaTXrSd5j22gHQ0e2DLA57PRWt8VXMSrdawU6CoLX7q9nMON1w7PLV+omlDjAn5ubNtGeZ0YBHx/MB+6eNvUx2z6gI++32hlv5UQRgS2SLXY+X0w+VV0V/3kgWjnk9maP/yIf+0wvaW4SWBCYfezFd1uC9Z8v7qvb/FYQ8yt45JLIh7u9QfoicIB4HmdrYyc96sc4My8YAbU2R2pWE47RmCoeABHGcid3FctbuNdCdcYx/VvHzYlMQBW5c82CShJKZqvdK4HSa2pcsjf3N/9RRrTVxjqYf7hvQR0j7oleoiNhnbMVmdGi/Q8oF4oSPeiNspf1djbsDNhjwW2m9+/l1oSbwRh/J/sAfthQEKAmQsZC/lTdrvDnX8N7ddqh0caSODzMi9YwAlgmK0hF0SaUmm/V/+PmSvZBm7fvOiQOACRkNiYzRf5G/Asf4O7137l729oFe8AdySJHSoRiInUvA7ASzVvnvi65bvjn5GT7tLYXx3ks2l58Km1zhpJT+kByvG5fi7hRqFCP3zI291wPyzAtUSpXYdULldafwRFvA79KD9iWpsbGU29zIjwy04AiCPTgLHHl6i7guvb88zjSinN+yY6hXimL1ME4A0RqgcYCHKgJRv0BiXFOpw7e+4yX7S5rb+YiKixqW6o373RDieuR5oXm8DmwMKUiH4GHLEorx8HQhYfPekiKBAvcAR8/qPe05umClLPn9En9ZBRxcCH3Sek9sogcrh7E0k5ZQCr+FRus0a+Eh7dMLDpnZpQmpsnvJ5GJQG+nzrDcciNkMcSP+oWRzCwOiHwlI5mA8nIR6gjkxiPTEAr+G7clsIh5gyuIBEmYa6czIzKCRNA4ZZ4bI+oxrRtaKD1uQtLZ3Q6NjCCVuOame9I+0gYC18WvDbtJYu/YvckDU4dR21kGUCTtnYR/la5p6E0whY448QMQYSj76gIMpbMvdOn6FJG7TKm3WrhpNTr+I7/fYgep6M/r10W3z0VvkhgrUdH28j2zRvzXIu9IJ+XiZMBsELMi2up6N5mhf0oTW6ZQz8HnXqIg7vQ2WUdck+CVq3jufuBPVD7v2OKo97Ttj+dzAY4KIl+FqOmAmBWb/fag+Eh5Nq/ndvMUfA2QU5ah1T7ZeKi2845CMhN+DKH6GoCcsU5VcYC2rn8GBdh5QuttAJQ+uWIRh1sxRDbT9ZAtcNozfywBgj4NYHik2E2eOXth9eJc+HfVLH8P36ktgdC6IlM61of2Udmh8QT2YJx0d4uwWtpe7nb1wQuW8wnxGNzF2ez1nPMRWCeUst1KHOQ6QLnb8WPYGnDNwny4IKJOZLkrRCWcvq1o/vzxuX1v6Ff5g6ou+lUzdz+vyNtV+bB2dQ2aHA/iZd1OYrccBaQcD8QQrAamoK/MWk3OsM15WQLDvoeXNAc4NQNCN40BAA9svPY++X524TBlnjhiA+aBw1uwQ/kX6CoS5t5Jbrjs5IBpuRU+waU+MnCnHwXTBq3MzqWL1haa+tCnKSGINZg+pSTwx/gwqx3fbFCQ9l+k/HIEF54VywExxcoxBIpXSZkCogSrGvqmCpr0fPy2q9tDZsP+nno644X5E5PDhhJxwgdy/fS46mU162ESs6i/fAMd7A+6tuz+qT1+8DvdR2ZRwqXNoOfTzFcYes+54bue/Huauf9RLiaaHBmu4ACKpT+3DQG+XNTche8nxjPbJ/yBydthNHb/zGwxEW/sgYWNMQFrxk4ZCwHOj9VJJ93JF5L6Zl/VjPKgumgcAvTRpdxOaR9lgs4fnVY7DjYqXkcBugjJy7dgSgotdsQnZUzk6dGmOo+XGjekoCMLyO/SkcWZl851mTXbvhNjUOyPUIrKFqG+SLBjzbObQCxgLENDjyxzmbkX4Xv8Gckeg2IY0Scf7ZwmJfvOW2lVRT4TEfhEmnT5HLPIjtj2/J59+lypXnOR2Ym8WGfyfy9xgCR5Ywu3HCzGayvoW98d2plJbs3JOC7psjXmQ4ucC1l4H06wUqJ0LQ2cc0MLEGMKW2yHKxa9/X01q+repKEqnLtVZx+jemOqB78QpbfX8j+Q+Os2RxXcNMO1ioyZJIyAGTtTuWp0kjXrTIS1z9nb5Duw46Ncje3PwkJhlt30mvV2Ix/CWoAe93bfl3h7fIjQmd8VWarxk7sn7dCPIeRhPU/V43jIoOZFNBc2uQ9B6dbZ+Uh2hUsuWwk7/3pEGcSNEKlsARwc/fkf476fg3XKk/ph5PbZ7HlYAqmgzKiqVBxLPrsPBF8pOG2QpKVBv4HvE4a2dtEuGQzo7shNvxPD99+mFgnmIDS5B+cSVy+M1Fi608D/UZnOFwXxrI2M/RWEMc4WU8gApB0vegPQrQPVoV8D+S+611fFea1lZtSVWvaMB07YOi2atdsVX4XK2qesHLf56jAtwsWigeIyKyA4lDcDM2e2z+Ul2nuog5/bg05gIEHNWAtldcuR3IDJXZXasF8Z7fNv08Cii9ObwPBMAIN90Zrwk2KztgZUDsjt011Io3quFrqzVX3Xp25+AKBE68DJntZ333TXLIUKrvkfH6bg4gv0vSIH3JpudfVolqJ+/h22mJDUe/s47yv9a22MdSJLMb0vHlvRbdFxATf/MW2/Rcg71AUmtoVOHqhyRkOUb4hPHad88d/aDyZVlewcLmluIg87ZgDvqhVoZ5vjH1a3G1Fj59mUQ7aYcYUBMH2urBqomKw09a4eGQBR+fYWjj1aupXWa9QKuXCaOd7igIusilg3SOG/BeXQ56wCwbtTG0lKfW+6BE/YfjJH+wEPwCq6agrakcL7k3/b+BwUc+4fHftdoVQMn5B3gQdbRMquvzJ5XAvhVaZzhcYT/fBxLVT+sraMgsQRuC6QYhD+DJxTP+FZgmYJJu0U1O7qC3gQNGZnloi8gp0V3jH4bWYref6itE+aRJ4GCQ0Iv7vkpCmC9ds3DVM4G0zX3Y1YOXl7jiVVfNUBJIOzU8MJpcAJZbeNtmXw3ACXQTHO4oseQo2Dcega0GzFHdNtWhly+6TDkAyjl/Hkj5iZr4eVfkDfJ7UfAy7nWygC5+zD23Ha87/LjHo5Q7jVjf0vx1IS2H22MnZ+AbBXUur4z6gODhER/oxFMce5DeQWp9OAibpBHsH7SY4ZSEfbLOxeSoIBqR4O7JmlJrPywu/wjRm9vipyjbeodODPIpcu00rJJeSsbO1099be79XzzwvBROH0x/w1xn2sg46QFSdlK64xjZeOQ4qqfiROBQw6taF6FcKPomhjgFXfdNR6OuZtdjwxWE0r089vitXPH6vCwWIo78RIYcX76h7XWo6y/f+T6K+VL595+ABYypV7hh68o55zdSOFDDJQHp+/mBMUWbvMjLIGGl3HDWq7WCgGzN5PaMv8rOUexgoBtD/91hyY1P+MrRIawJ4qqwe98V+N2bdY8ovaP50n6XYkEfTTkeOYc6N/65GGNUP2dcBRyWo0MtiOAMAMsR3G/RDVa4f/mgRYJdEnoITGTaU3jS5i4NlENHxEVMiPXOpuEyj6KNwT77oTJpsAgadg0m5NX1ie/wkbqEdDy4Lh+AyOmrvF4Lz155s4hyJRWZ5IQDHT+zBnmz7Iu1B00LAq1EhacaWKM6h+PlhtT527kFfOo0+bI58HVOA3KRS/35aIOFW87mcGHcqg+/q0ysY8Nf/xtSlNcK6vxjZrCfIi1yRFSFMerkXx40rdQbbdKin/6VQviE83FzK+YcxFQCb8SeNdd0XC+XIz4vf3GJaCF9X/54QIbBT79RfPko4m+kbeIY2swNyAxGUA3No84IctZmbdsJiYubP9ZQFeZB/o5fp/4mrd3m2c/4F9k8acj52WDf6GsF07cFyoLPXg/sDxEn5D+LWC4PMdQeXVGMpjI3BNvPQPEp3T26Mc6z+ZSCbD8qTEClUOiu9z0/i0RQiNQXpjOw1td9FVmenNQt0bsFbtYFh1On4W+OBRDjwMeHgaIPIn9eqPzRwA4Aejmjg1S3160BPMkBlKv9CWuFFtTjX26EDF8Z0l79hmWSvJrvrjC6N9qvT35c99+uUIs14jaOwJY/Za6zn1NVxzeopX8MGuwRfQcyGTMTBtIY0l/laP6Srj5r/cGzxwuxPrfoI26kc/iJhm59ji14zaAdljcE3ONuFxp/h5Q2pZXphnOmJZOat5sVIDHRnlnkeA1pp2E7bbDR0geZvtGSLBI761g3GtQ3eME4xHzhqe3rynbY+1xVYcZdhBtoUmV7sEi/SyUcjEmWWfO6F7dV92U+0YwaLwpTKLZRsoD7XvTTsQ9VASopEP/OYW02t5/dZdwpTM98wnwHuKnsfjMq3B5su/y3+EcBN/R0qVOq4vbPYYzPMmm2/00/wPO0kGW/VkX+eRb1J4RdF38evNSFk/H6ptN6HMYNNFubIVEE1fnr5wSLC+r4dqikFlUh/SzmFhRYidadN4iDte/CwqZkXxdAyGluSJNjs2nSuVb5u75/M2DpJvUj+Kcpp0caOUADjEjBnYDrnnZp7oD765Pmv9hnKngqAImQGR9k/sC+O9U0U4LUx/l5pPJvq/8D0yK/BGIkQLEB6eqKh3q4NZn5uX7RnjXAY6+XAjZ/+OoZKQ3h9iKHYES67q/nE5o7aLnHy3s3flLaKQcyhSWV6uSCofdR5QI0XCtgm9XIJhKCNC0rQ6j0l9AlevsuvEbLeOX8eATjMOKEtkqjiij67c5Nvy9i4JpXi8PQUZeDXPabkjMOumW/dsObc+hNHeYv+Ae3i71XrMZf4H2PEHp/eklgFTMcmQ8JZCI7Vq3N4B5jjUiwbZxAvVHoN7Ar36TmEagG1Wxq/nGYnzX/9vnlMK4/P0WrOpWe5bHjuJKb6vxiq9M1j5MNVKGw5C/H8mlOEZzfMCoffAeFvLLt11ouJNeBugCbfQeT3elqxtdHyvU8p/Po7I/U+eQhJv7av1yr9nCVVSQK2CsN79+6NRTJfn07PWjAA+XiXYlHOF82FlkkFn2ckLc+i83b5W/yzyB1O6QT5N/s19+OmEn6UL6bPu//6gWIt+T3bddFUEenl6Uwxnk0elivTPu7sxiO70PlpK8bWvzVWlP5OY/J+evkCU1SquYuEUTe9x0II3+3eyRRRip+xqkBV1PH86JhRn20CtiVTOY5B6EFBWXPP4daQ0glITeE4+cjVstTZWotS9IpM4pBWMt3jxPkLpanzxLkNBcGHO2F6r8E6nnPCsagMkhBDCaRAUYO1FSZHcnfh7P733oB/QS+zQoLBC9AxlDxGir4uwvzMgFt+p0QLa8UUE5o8vzNXvtbanWJ66TwvZfZe3mdUt9deoBUed+Qlp3B+tw2n/xN9JOOGkrHBZql748WOlbrla9Tdxwz71aAOYs5TJkdgjTIPVbgIyIS+obWpSYBkUsuJ9C8OjuYgXRiZNd5N3RIM1bGJs9m9Bod4IEaiKVWOFZ11xlJumCXQ7EIwf9NOH1K4+54NeRi8pvETp05KrItSRFCNfXiA/BhMsUQX9TAzX3UxKuQ8XPCSm0HpW4vejbefSHGoMwqLwN3X6eDZd9/NwDOhO6ABOy38tLxNRY3xfHqlxZcD/36cXJxHOmmIsAzjQodtMGgZrZtrqUUzTKeq6UmN2yoFJxeSOTrN4GhkUK+UF6gksHLU1Y7pDd75zwCyuYV+vjm877Gfe6wS09wM26vnqL1mq/bXxR1JdjSgnDYL0JKkM0NAe0ro4PHzmVn2ecO0y0zYW+P5L/xKrZ7So7D+PL3FNFJlxzfmPhii90wpAI5ucxXoGF9yMnuqFroTbpeHVFyM16dVUqb3OtX84N/K1jiv8wI8dNARoUFjPQl5Hg0kozGefB9nhS5PBtBYx6DLs+YCrsYAcM8xLSy8eJkYXRhVgSzrQNn3WCYX+zGApCZAG5EBjWzN1y08rVivrh+/CC4s8NnbMwljaIRlHJsyBt8W4/Sl3lQCoeNC4Z5k1eOnjUyKGhpW0o+CzPjtTvhAcc/O5SRSpNPpzGUWmjso8O8CkDuVAZ4Af2xNWfWsH1khHr+m8VReAgPWi0UN1ZXwTZnsIhOv8/WHf3w4NCiBLvlU4SsQe5J8bigXeVoeXO7oxaUm+JK4XXcuvZlNU5tBoG1vwYovHDIEuKwLvO/vwdDSsEx8mrBu+whEUdqRDC9NHXGkx32Rd268lr0CJObudpZ0DYt/upSYs/pjT260s5xnxl5bl4o+kaTyeKWnmHBZEvhYICz98EmOhcvLKkHapad7VuMosv3jzqKKl8fStArJp1qfmKpFlYgfuYmrSd5lKDcaV3bFwnfeAOtJpqJyhN0ZyXm5DDFofDJmVhD7imy/g72psbYOSewSCfWU6XWZzmAjQIqbq8Cy+zI9cj4ohhvdBucwhuwSamAoHkflEoGlS1h8TqECuF8crUaEneDIj+kXSc5iZZ62LGZ4KD7RGlW+Jf2bpiEoZ1lqNoiUpUbtx1ksei/2sD70z3T1eXoBbjc1MO49uUDLi4WZBp6taTyuuobV1+U6hiqhTCvC8L+u6gd00sOZ4VD9mocL5fd5sky/0o9AWNAEnCK3Eg3RNuav96qjs0RmsI3whNrsmqxsCgaNN6wph+jC1/9dXHpEVXgmbqgAeCubDozzMiVe8lhEU+SYvcy7YpOwZNNiMa5Ya1ObkD3+1inmHHHqUIZaeTyOOtkunYRX+dJeQGPXjJes3vdGZ+Gg0uAhywXv5xweUPd2NRjTBtL2rGA4wwo4dw1tavrrlnuFbIRM2qG1IDjoCLTIYelMoNS8ETWM444/eaIj6mFx+MgIm1RoTxExeGXJysrCPPVFxurTLaBnaUmB+OnCA7qlaPdUk7J4iquWtv6CRc1fEn7CQXCr6S2oEtsJ9F9mU0+6J0mc+4WGYmNXW21FCrchp9hn3rswbRPI/O6zmo/adyeKJiRuSCZlw65rVYZohdol3oG5m3WcTUvxGuO/r/2vqzJbVtp+7d8F6e++CIuUlxEXpIUKW5auJO6SVGUuO879etfQDN2xjNjJ048jnOOXalouIEE0Hj66UYD7WzcaokIwg01kQT0IySITZMkdKgkjVHPMk+Eong7DlLHcMsgJDPHlHSpt9PoKIaBWewQb0fdCNlkd7cNDjJiOnp2Vdt2D6vsbgGzKZmJi/J+q5/Y5JCdAkPW7hH4AfR6j2y5qsmsS3r1rjShrvWlXuZPbh5rrNKrh2AJNMGJrkJVekgunYAJhPcyrLPT1OpuUEv4lC2n7NGMDm1V7/WiNnmSck2Xgl9hCA2CrovEI/cA4SZJ03QnGxxq06Gk4lo5IHencw4dnDeAKwLPSseuSe8TXyKHD5qa5ceYUzmBcbIyXzvWxA3XiE/EHvX9bDh3+llOBfJyUC28Uhe2mC71aRoaMg0gWxCDdFPkNYSxtcCqJIyP6ZniyE2rpCuglyFxzhZLJhMimN7hrpjWI5Z1dJHahmWPi9lxZLZWk7Njs4Cc8gtiswGGlwdzK4mAiUHHjBnJV2WcdACj1Ha3VxJuSASuSkLBFs9nKe23KEdWvRtaBeBSWGv3Nk9BD7Qp6+us1jXvshVqyS7XR/1gx8cV7XWKy3JJPiRzPiU82Z2UvZnUanzC3Dlyc9OPBknDPcBRT/petYQZC0lHFxefEddKNpJNwIcFXobQReVgpCOna/ogYDwnQRuFZR1e9s8bNEXSsOw3giNJKseyopIbVM9qLsJRRe8XQ5Gq8WqI+ZqT7n0KqcSBoxrCk29QnS28VRc5HErchDHy7R4yBAUsrdkSWV009wy+B1I6QC/hXhHCncVITHagc41x85uL5h7t1VK85WeZDI+sOkGz2oq2lJ0RNYBWk6eQYieUMYKkB90Kjg+fseGoSrX6burSVO+MvVIZ6IkYDkZjbPTNsRKk0LKNKmOL42SZtpZpUd0lts/IW0mWZUGNHPPCp/Ko6xIwAe1kykVBF9JS53t0BiQTkCVgNwxnwTuJMrTEjuCNZ89USxYJam3fGLHeIhGHoXLoksi4WFmTUgfoUXJoWSjQbOSYDsbgJJP12CzAOjhPfZoBZb2zMmmHYIzPSYZxmwZbd02AHfWtIwMT2idrYIde7a1v4BExJwncmtBTlqOzOdYBmY0d7cFSrTNvcFaPWx6mQc+lANgqYG7nDSlrLL8zCgDKLG9yS61mzAov9/lU1CKuCb2dTuRVowEukbvWl6yhqVPOWWsMtrdAGdiEKmM7n/StxScqXO8tcGOX85oQJVnD9GI3ZbxXeE2l8qAusNq1cowum6bqzRRAwC1oRRhNPsUTYTT8jZQE5AyehAqmboSTM1Iz9Cn12GLx0bxWtVXBazOpakqcSxo/CdEseUgYAb5kNNkw9ieAPAjpBHrmsXtp2u8vLR6pSzwsJ0Jb3Ku3tqkiuttSJ/q4hxsPspVacTBifCRjLaxXo+GNmFCrhMoYdVRNim0REsMbrWpVN1xBEyupdKIphoBfeh4ShEtLWNIecCUYryeiswZEi7UCYGlbB2AbUIUk1axLtiSSabktIizP9rJaGAPMTSeUy02RrFUid7qBNltXZEX8cDTxEjDEFTXLdqozJll3THcC3SHpm9kyN9V8KGNmVJLbPruFOQcskUG/c2bK3CKy1kcGhrD+xQXGN7armWtiQLd/ng3q2K5GEyhH2pD6SDXpsDmnFdBDVEAv2zG3d1eFGiSICk1WzbiFaYluxMqFVWMkyM84ri51otctejMXCTTRBejKVWFNY08SF9ZwaaGVy8a+r5EqEWQV1pO0G/wGdcIg8B3EEWsecms/4ieRmRu/bLpak0/0VVFBGx5rcQ4wINfa4Sr4bianvQL0gpvp0MvMehwkgJG8Gs8ykWDFdVU3x8wHpsu2SGdcSKrcRp0TU+z8pt25NYJ3jED42EnKWtl0zxnbIoVS3pRkbx8re0Dmc2Zc2pS778mXclqh6Yl768uUqxrHT9CdkdFtFYtdvbKK3fY8ycQ2jKMR8GLWyU+7QRpXU73blpxEBZnSxW6IrS7Q9DtEI4D3WzPKJKerSmLeoDbJLIHkkCqi2ykZdc6/CHCPz049eZ2llRtgnT1B0eHUbX1XAy/xI5RQQ3erVIVcXeEwnpGLe7ivdIBfbnJGtrudCV1zJe4WOzNnWlUxCcFKEmcFBmPw5jalGGH2srqVL1xFGIEGX9MXN9m3Vbbb+JJx6+7h4kpb8kuTivnCZcxGs9CZ6YkUqdWqqmOvFUGzwtWGe0RW76lBkvwwrSR2yAnmRg43kdbPABOwvc7YrMtY3ulydG/xma7Is5TJzCYT2CDajhvmUgCzysNwwgniQ7xtxs0EcdQCzHRDBr6ipCfbFk9AQ9L26ZBjvUkOHaAgG8uQFYVryay/ZqrgMIZ4ofjjGOQJE0UcUuJHhJ6ZsFGS5cjripzNyn2THM4jHY+ohYk8mj2+EQgNm+YlR0Rsa+vo7RSAZlNpgULPpHXH6ayQuhHJi9BNLSOBVBfcyESCzTNCLoGqNMyJs+75Js/nrWxoWY9arpxPfbbNGH2EFxRiUFhKyRvFwHsjZU60Etq5HyCJWVYU6UucdrRt5qjJxsQ3YsW3iR1lkra1GsaohHiv+GTTLlXqPjIZmPSgXxm6crWZheSHWl2trwaJDXCeZt/qjKuFskvAoxVPuLUq4oO6HG+BzkcM3FfDG3f0en8g2WmPjlEO7A87T8OE0ZfrIYpGRb7tJqPI9+EFS8+CMKdVlSmtJhzv+8LEUOzYQ7ElXIvCy6An6q15uZKmzrHATjZYUbY2XT6x2hyYSYFxjYVi6o3uBZki1hDx62Eyag2IgOlh8T1Q7lTy7LzXXbi6Rch37qkmArMdU1NnDU62u7ugucGgHkSdHLacVFUSz2SyxAf3iDYIbzt/awGi2TQ+KSyAl+RDyiMoNsbaqNT0zqsd+EYNk62cjs9uscPuSRf9RU3aZbfjuBwmNmXC4VI3BhINFgV6mXEsYFcmOsPX7p1H06a7G0c9FU5uKyYFAucE8pPaOOXaScVUVK4ch9ema8wiQBlg+vrZiA19y59u0O3J7mWbAeByQ7ZTzF0T9MwJCWPnZXHO1NYQal4hkKSnIktnHClmLkols/NGmlZqAZT7Pd/jXlUgszXGkNiLMo7voRzXidsL9tZby9GN3DlNe48FOwsjne9H7NCySm5TsZQVq0wT0bDzUz0kIde75Tinbnl/J3M9Kg4odkAmISSPjV4mhsTpvMzRCECNkrnnBfGNWTGQEdDupuvO2qZBB4XBd2tbT4qNZpDFKG0Uo7zJwLLSj5yfTVUoq3zsyHvGWh1vZ7rUOvV4UkzoJb1PbEy6wMcCA7TudADjVjUpLNmV5KGXDM8+QPeUUJy2oegISXct3QlRk3i8FSd9ifYNcrgYLqGEfHuaIc+EUt9X9411jQ6tou2VifbKlKlYsFFnw5iujCXwpCpYBsMQSlSKqBfL2lHe1wJxAWpD9TrNPG7qGUa93bI6zQUFYmwMgGG3zcxVKM2EluQus7NYz2F2m9Bw1nSVrvAu9WWZU2/QsuBXqlGf+no7TLo3Z9aMLyaw/YZ2IXe6HvAiwaj+werPqZLLYVTTC8CsmrhciATwhCbEyRYLF0wI5FyyhPNmPHkxd/DVLpJKbS3oKnmQVUIcT60Xq1IRbtSdvzdczTia8XIAn25WiFWjqZxA31MDY6iWPLAZuMyctS1NsLij3cn8st34N+qi9OkSOcdjguNtDV6SMIMhyMclrNeJs/Yaz2DtCyaZ5VhlNXqCWZ92JYygiGnmUtsMaUGnTGAS9+XR/YjlrGWniSXoFSU0KsfY0mleOkk/I7QQs/j4oGvtWCTQsqpKmSoXhbvnt2eLoWnW+93qmAcwW92KbA5ncXC04V6LpAxq+A6Zq9hKLxEPNODpCHViCqesvNiLxMT3BszIezC+WGDVrAPIHqiTpImGIAT4nEjFqEgtQYyam6/6xscNsUYpNcErETHbYdzvgXlS0BbgM9u1QSCGYGMUGzTUePa3g7hvgquAVtbIM1YsploGCRMKAJ4SqUHuF+aeLa/2CnUx9dpwdQDbbAUGCsmEsmZIDoIBilkV2KhDf0XhNxmAgGDxtqOUnykbuniUYvTJJQizqlYmWF6XU5hA7Ozs6s68ua67zhH2l1iW9DUtyqcHzlDwu8RWcM2nWajBQI+st9JSzpQLzCkfHZxrEZMmr4z0PZHfLYrhz4bC1guaSLmNtDCqBxaW+IXNhoK/09ZpNCjdijtYCRhwEK47A3rO1fsCjKVhuPq+Uz+PWO1kZ5KvHibxAqdTcmHIZhkwXiqTOFIk5IMaZHJvwIRkQlXUwKZZvMilDikfWCKhSaetousp2fUJkoX5zOvWqVNWK2UpFXMz09rI7dq7EmYT5GAggWVv7tBfCLFMlvU55caSHVIi6dqlc7IFT7E4PAaRHljyIlm1YeyvEeocoG8iV6tIispwloBxeWXq4YjiWTse24gB5+nUUzQw5tsqkksnWNZ0KJzE1cDCkA/eircXX4wRKi+AyEyVDZdX+wxWtziSrfFM6Y2qJiVuB2c/8kyTz1VswScTbnPchFfyOq85bKF5n+1uhysfb4giIWJJbm9enq85yPAZQINkkkdWZ56gbWsi3YzoLNuSBbipVdP7x71RqKkWrNooJbbW5raLk/NDYiRlUsP5ytfHnNLdcJbNZpszxRqoC4iPW47qoJxIVQEscNo+OOdgyQjCDsODlPJwO9WZpXO4565STLd9j8jSdgYkQPLYwNBYzzx0dBa2Mw7TGbWrG5ciBHp1zcTaVw5SnmHYNlNdSDSPbteTw6bKqWyHEjkZ0IcpxjvhNnRpLa6YK0zwIri7Y6pEN3/PN/IwrjQY1njGMBe7J/TMl5LEkJqlSHkJW2zZqhxBtBpNCEBHOzxSb5plEPXFCRM+aERkPuQpX9OFsbN3GWgr0Fpzvo80udoW66vu5iMYxhzNoQWCJiOoIoFcF7ypzvQZyZfkbg6mcm/BlZTyhAG2auu8ChCONm3PVjRlJ7G43Aln6Xrfs8NoO++Cj9Bx0RGCdlTWIV8SOC5TgNGYWcKcgUV6VVpgJB/FSLlS5WFidtwN1e1o21NrQFLmTo7sLVJsbmMa2cK41kNzEoSGqyHndEdLkKFv+LhN9/sITbOaE/TLMdDcW2ijfWDsZt+a7z1giAmP96ZZAPYrrnpg/UlRYp8dYIcy8t667/s3z6zKB9OieOJeJfyTzoWFp3OXVUghI1wB76GJ1mcRKBC2Khj+ErMAG8h0hoJRt8xpe9ZZpAE6gptV9upg9MVRmgXGlAEVPua6J1W0UDBImMeko3OKKG8ICii5y4Drol2a52Czjc9y2gTx5ApKsPcJ3cD9iyYLPCDroBxgdDmbjtGyyM9yeVEY00qYyVGNYqgMiBfLdcvhyWzpS4FWsZIIqtRD51Ixa7bB7IWKvWzhUnChPLGe0Oo6Y2gwoVs1A6AQ7J2U1Qneb+XdzLgCd0kyWnNP2l4v3CKbhLjeaazt7CCISSzQ86x/4nuZ0I5ToN2ZP9LAPRrZW0JlGkaaRnIMq5kRwJCdrw7Q6LagXXupUiJO9xmDKyY/mYCo4apIb/RCQ+pdGPMWLcNd2XYkajkArk4VYrKQ7jRBMuOrM7DiBHR7A6KWZ40MvtMogR204aJ8dm7T0UYHaphJq5AgGkdCyxlUT1xd0QkpIYALY0QXoe4Lfv3RdZbcK9KVw2rmbIaAodrE5SDxR16e+yawmhPbW43Gaco95xh03h2tFcOg2ygHEu9EPOiL22RJEuoZMS8fkUtXG4ps+Dh1ldkRb7JhxmtRkdYSwiWCRaU9hcQ5Vp8OYWPqS3vr28xrLXMv8ImrUaQhTbi8CsvoqhgA+Ih7cMQIxEKleVPcnxYcjF93GyAukGhgCdigTXlrJsEXWAQwK5gc013WkbCoYZStcgrnEPICOONrxwpl51eHMrD2mjjIfUdDutVGm/ZujF81Q8Qdzfo2Q+6qNKjK+r1nHivF0rZR3TFMs5M4id1CX/hlWvEnUUq0mD5febSlR1MQLoipDN5ObIVZ5LPjwc33+wN0tyXNid7Oa3VEgKFfTIllsUfW3kLYS1Dwdtqw+tMiY8dy6Q9JKLpK2OWnVJR3DkO4oP1knOT1JT7kuWJZNUbiY99Ks83srwN7sTeFf2YJbG5mRhStYLu70U4vnEz0eK6UsBg1wOzyiCf2DkYZKb8KNMCVD5l/tmwkt5SNkzCxbnoMZ+zZROMiQPTwjhdON6PztkEime4W7ojMRkSyM0QoITKkLThmxyWyvdzTi24TJtnMdsTA7XskPJVCBU40Hno8kU6DAQRKhinLhVNidaoAaGIM6IO3lUXSiKNYyTiDiXaglbfMqKY7oENmonPkeM5iI4DLUtjE6b1GjRqDLFmgA09oD+OjWBMYlxXTNIrNSLQ/pQVsb/Ryab0qE3sY0rlNYKA12zm85DVyvW6CjcuyQTAhulEyjsCzJpdNjFQA+I23qrc1NNmMmFbu2pkzrrC302JjeJSE35g4sGq8LqyrKPZepazIaz4dTnR3Zh78V/nVw8qgMQAfHUKVrgru5Dm5tiEg492dRVKOAevjYHZlEWg01tQriAan2LFxlT+leSHJns9HiZbTos7hDRP4SuIpamLuFGE2yiLf1irJsjaz0e1J8IAyrqZIyY5VljG1KgUbExDZdS3ocpTKMd5042nLwq8uykSrNVqwmJNk58cMd6Ezw/IiQPHusZxUIcAQBzbgDt6g6uCNhjDr5W5VAzM33pm5AbWyfYoLj96zWN7Wp1Ce9pfiRlx5VgTMxCYV0sVDdN4cAx1atqy9UAffTA+g7UCbWzPGB6cJ9DzMMb3DtS/lnaaO1CYPtlRbeG07TDBhNmNY9kFXCM6TJJgv/O0ysaM4/Z5++m/9SVp2KMrPkrKjK+KVrOwI8R6l3ygzO/UiM7tUXq71FfwPNMaK9AuYMr08d/W9oci8f8xkDx4KPqZcJ5uh6u83YOgVQcLw6Skyuv8S3C/gmeyeYX2FgMN3H4qDFs29xMc7n6eGB80Nz8d9Aaq2QcGfXd9W2fVDyveyKmHG+DDJ82en/MfE7wGozLV9JSN8kVwu8DXsFCf91aj9AL5zan1Y3XteeZgh/p4THn6ikdzgdfoNhQZbvSdeyAXcav+5WKyQNxIJ+oVIbP6OQCDImcLpLwjEXRiiK0BiBP0pGV+AE5J4j5AvZIOi31PUdxQPFHkhH49d+etD7+VVBP745bE7H092SVT4D38GCfQEIOFv/1lDIThX+aVbCvADjqf/rDfgv1+eCsHXoc0T8fhwEhbwa3fvIKBgkXU9P4jhawL57HvAcR9fex9+FqCRa/aX87v735+X0XtbPFyCa6n+xvesuGfD4Z9sifnrmuDdu4/f/qOP2fVbjNlHHb8i1i+VPIZ/z+GKvhiuR7/1QeHX9qGLfnn4+Zzc/5b9a3ryTdGX+IS8oX+Wvb1dx65+duy36VjkR+tZ7KWGfUTarvbLV5H9USdAVG+j8y/Ig/L48PPuQSHc4T/0iyRfHm4Vr/l4hf3z5Prv6oF+VA+P5x/eCS+UVVv4+ZNro98mPvgFnez3Q3vt/uC+wK8/d8v02MLwIo4gD1fyaw/E6VdQ+yApo5dPVm0d++VjkauHc1CIf32UR3j6o0h+uJY8UNr7ReRDVe9X+hYUFoLyP7zpLtsPIlFNn75mqtrLpx/2sSxQl3OWgOJgmQ8j6NdHoXr5zss1qFq/T6ry1z5Ogqy8do8vSsqkTz7U9Pm9T3rli/c9kY9P7gvzyu+fV/OSdHXuLx9uzxNwYYX8v6Soq7b3y/7zpOEOKHf4WLN7nf+VgUzhfjKB5OTO9P3igQlC8YUUEUqo8vTEcxPgQei/bAK8BidP8Cv3z9ec9YMsumMM95FBYQIhrGjsDfGF/ARdVthLBv86ISDeCl3wL/D3b8Jbn5oAn6P+r/H+b/L2P206vLRL39x0+P/vvlk1/9AC+Rrb4a2bYn7RBj+Nhz9l8K8+xQ7iFexA1t+TmRA/mclPZvJfwUzYLzATcGP1eK0M++UJRfmCo/LfzVI+tW5Xa+ol0uD092Qp5E+k+Yk0/xVIw31EGkhrfqmGtvt+uCEIHP2W3hP8U9/JGn2/ejl9hZLf1cBZf8HAucN4/dsjmD/YKf8Opkm9JdNc0Z/2I42/MguJY9+zF1/OTP8+uq5z/Z+nBmX0LeaSvqNB+Fv20yT8SpPwXzNO39QiXH/qq15RL33V9Guu6o9BBd9+nL4MF/h9nHZDAUdmcu+ZDx2uPEj8p4EEr47p/ykn0W/JT0z4iQl/KSzkE0zAEOIV3Y0i3xcWVq9EiTzvpmt5Ydq2mn5v/ic99mlbXuekdx+vwL89eB58/MPRZn5y22b5cFCCinx8CB54T6/8/tD96MNTX9dNHWDzwfVL7fBIRnu/ja5fLPARR6+X6PrFbn/SqcQrYWEfzrXXHFg949OyXu/mxzccq+Q+8ucPLkTsmVSRnxbxUPPHp34XlhcFYegfFPTQMi8Kukvdx2r/DUH87HTHpx6f5+on/l9TPz8EJf2sb+dPx0z98436VWrsM36An9rto3aj3z+LvKHw17Tb+9f025tpt9fmQf66dvu6xvtBNAS6fjY7hTyLUf+zGmK1xr5c0FtriC/6iz54ib4GDqnPo9Oz8Ol/yQh/U98T9ixCgsZf8yGuXpt9eEP++opZ+wNE4T5V2OhXRSQ/atr8UvUdPPpBwoqfVkj52gq98CH8EFX6mkjpb8WyfrnH2RTPm6T+sgC9ex2QggcZQb78tPLu1Vtg7d99s3r9C7DxTdnPs2lZ5CXzeS0yFXur+A/sNav+n2fcr6Lin/EgvYTFH8cb9ioyfmfb7K8B7I9oYH07hP0DWPz7oPr6Eru/V4HXcfrhywDs/ecPFgq8+wnFz5cJfE8sFquLg66TX49NpPo3sc+XxPr1NSgGljF+n9pesS8cXF+Yi/lr/q8Pvqz7W3984SDfUjie+eBXH1ZdPZGO9Sum+LdwULwqHC8XfT0D7s8gyZ8PjCJ/Bkb9twdGPdHfX7+y+40mOsm/wGb+ibjBn8Pjv354fPO4wefWx4vgwJ9y/FOO3xLm/84c2tvOh/0U/Z+i/+ND+LtvvyQECsWTeG6SFMC///yjId8rDH1mCb/c4wJ/JeD7zbySq1esHdAbyk+nBb5+T3w6u4ZS9IvOIj6zg9Cbza2t/qJ1+mfiP/6iC+QPw1HfNh7o0YfyL9316i1FePU8/mOFUK/EN65XrwWAYMjbTRB/dn0CjBqof3vc5A7GDvwrYOhNpvg/LichX+myD/P+335G/3WX2Csd9s1Q55+fdX7VwfIP7lX2PSL5vrjX5N+flX81KO/fvDvhm86PIMhHFvE7Ur8khuj7DxtXfQLU6NuN+9cief7HYvU+bDDzd0P1UOz5JNi3CtUDh20FB/HvtwNhjnfV5Qrv+D8=</diagram></mxfile>
|
2210.06170/main_diagram/main_diagram.pdf
ADDED
|
Binary file (95.4 kB). View file
|
|
|
2210.06170/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
::: wrapfigure
|
| 4 |
+
r0.5 {width="50%"}
|
| 5 |
+
:::
|
| 6 |
+
|
| 7 |
+
We begin with a motivating example: Consider the task of inferring the mass of an exoplanet $\boldsymbol{\theta}_o$ from the light curve observations $\boldsymbol{x}_o$ of a distant star. We design a computer program that maps hypothetical mass $\boldsymbol{\theta}$ to a simulated light curve $\boldsymbol{x}$ using relevant physical theory. Our simulator computes $\boldsymbol{x}$ from $\boldsymbol{\theta}$, but the inverse mapping is unspecified and likely intractable. *Simulation-based inference* ([sbi]{.smallcaps}) puts this problem in a probabilistic context [@sisson2018handbook; @Cranmer2020]. Although we cannot analytically evaluate it, we assume that the simulator is sampling from the conditional probability distribution $p(\boldsymbol{x}\, | \,\boldsymbol{\theta})$. After specifying a prior $p(\boldsymbol{\theta})$, the inverse amounts to estimating the posterior $p(\boldsymbol{\theta}\, | \,\boldsymbol{x}_o)$. This problem setting occurs across scientific domains [@cole2021fast; @alsing2018massive; @brehmer2018constraining; @hermans2020towards; @lensing] where $\boldsymbol{\theta}$ generally represents input parameters of the simulator and $\boldsymbol{x}$ the simulated output observation. Our design goal is to produce a surrogate model $\hat{p}(\boldsymbol{\theta}\, | \,\boldsymbol{x})$ approximating the posterior for any data $\boldsymbol{x}$ while limiting excessive simulation.
|
| 8 |
+
|
| 9 |
+
::: wrapfigure
|
| 10 |
+
R0.5 {width="50%"}
|
| 11 |
+
:::
|
| 12 |
+
|
| 13 |
+
Density estimation [@bishop; @papamakarios2017masked; @papamakarios2019normalizing] can fit the likelihood [@drovandi2018approximating; @papamakarios2019sequential; @alsing2019fast; @lueckmann2019likelihood] or posterior [@blum2010non; @papamakarios2016fast; @lueckmann2017flexible; @greenberg2019automatic] directly; however, an appealing alternative for practitioners is estimating a *ratio* between distributions [@izbicki2014high; @Cranmer2015; @thomas2016likelihood; @Hermans2019; @Durkan2020]. Specifically, the likelihood-to-evidence ratio $\frac{p(\boldsymbol{\theta}\, | \,\boldsymbol{x})}{p(\boldsymbol{\theta})} = \frac{p(\boldsymbol{x}\, | \,\boldsymbol{\theta})}{p(\boldsymbol{x})} = \frac{p(\boldsymbol{\theta}, \boldsymbol{x})}{p(\boldsymbol{\theta}) p(\boldsymbol{x})}$. Unlike the other methods, ratio estimation enables easy aggregation of independent and identically drawn data $\boldsymbol{x}$. Ratio and posterior estimation can compute bounds on the mutual information and an importance sampling diagnostic.
|
| 14 |
+
|
| 15 |
+
Estimating $\frac{p(\boldsymbol{x}\, | \,\boldsymbol{\theta})}{p(\boldsymbol{x})}$ can be formulated as a binary classification task [@Hermans2019], where the classifier $\sigma \circ f_{\boldsymbol{w}}(\boldsymbol{\theta},\boldsymbol{x})$ distinguishes between pairs $(\boldsymbol{\theta}, \boldsymbol{x})$ sampled either from the joint distribution $p(\boldsymbol{\theta}, \boldsymbol{x})$ or the product of its marginals $p(\boldsymbol{\theta}) p(\boldsymbol{x})$. We call it [nre-a]{.smallcaps}. The optimal classifier has $$\begin{align}
|
| 16 |
+
f_{\boldsymbol{w}}(\boldsymbol{\theta},\boldsymbol{x}) \approx \log \frac{p(\boldsymbol{\theta}\, | \,\boldsymbol{x})}{p(\boldsymbol{\theta})}.
|
| 17 |
+
\end{align}$$ Here, $\sigma$ represents the sigmoid function, $\circ$ implies function composition, and $f_{\boldsymbol{w}}$ is a neural network with weights $\boldsymbol{w}$. As a part of an effort to unify different [sbi]{.smallcaps} methods and to improve simulation-efficiency, @Durkan2020 reformulated the classification task to identify which of $K$ possible $\boldsymbol{\theta}_k$ was responsible for simulating $\boldsymbol{x}$. We refer to it as [nre-b]{.smallcaps}. At optimum $$\begin{align}
|
| 18 |
+
g_{\boldsymbol{w}}(\boldsymbol{\theta}, \boldsymbol{x}) \approx \log \frac{p(\boldsymbol{\theta}\, | \,\boldsymbol{x})}{p(\boldsymbol{\theta})} + c_{\boldsymbol{w}}(\boldsymbol{x}),
|
| 19 |
+
\end{align}$$ where an additional bias, $c_{\boldsymbol{w}}(\boldsymbol{x})$, appears. $g_{\boldsymbol{w}}$ represents another neural network. The $c_{\boldsymbol{w}}(\boldsymbol{x})$ term nullifies many of the advantages ratio estimation offers. $c_{\boldsymbol{w}}(\boldsymbol{x})$ can be arbitrarily pathological in $\boldsymbol{x}$, meaning that the normalizing constant can take on extreme values. This limits the applicability of verification tools like the importance sampling-based diagnostic in Section [2.2](#sec:diagnostic){reference-type="ref" reference="sec:diagnostic"}.
|
| 20 |
+
|
| 21 |
+
The $c_{\boldsymbol{w}}(\boldsymbol{x})$ term also arises in contrastive learning [@gutmann2012noise; @van2018representation] with @ma2018noise attempting to estimate it in order to reduce its impact. We will propose a method that discourages this bias instead. Further discussion in Appendix [8](#apndx:mutual-information){reference-type="ref" reference="apndx:mutual-information"}.
|
| 22 |
+
|
| 23 |
+
There is a distinction in deep learning-based [sbi]{.smallcaps} between *amortized* and *sequential* algorithms which produce surrogate models that estimate any posterior $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$ or a specific posterior $p(\boldsymbol{\theta}\, | \,\boldsymbol{x}_o)$ respectively. Amortized algorithms sample parameters from the prior, while sequential algorithms use an alternative proposal distribution--increasing efficiency at the expense of flexibility. Amortization is usually necessary to compute diagnostics that do not require samples from $p(\boldsymbol{\theta}\, | \,\boldsymbol{x}_o)$ and amortized estimators are empirically more reliable [@hermans2021averting]. Our study therefore focuses on amortized algorithms.
|
| 24 |
+
|
| 25 |
+
We design a more general formulation of likelihood-to-evidence ratio estimation as a multiclass problem in which the bias inherent to [nre-b]{.smallcaps} is discouraged by the loss function and it does not appear at optimum. Figure [\[fig:conceptual-hyperparameters-to-c2st\]](#fig:conceptual-hyperparameters-to-c2st){reference-type="ref" reference="fig:conceptual-hyperparameters-to-c2st"} diagrams the interpolated performance as a function of hyperparameters. It shows which settings recover [nre-a]{.smallcaps} and [nre-b]{.smallcaps}, also indicating that highest performance occurs with settings distant from these. Figure [\[fig:loss-diagram\]](#fig:loss-diagram){reference-type="ref" reference="fig:loss-diagram"} shows the relationship of the loss functions. We call our framework [nre-c]{.smallcaps}[^1] and expound the details in Section [2](#sec:methods){reference-type="ref" reference="sec:methods"}.
|
| 26 |
+
|
| 27 |
+
An existing importance sampling diagnostic [@Hermans2019] tests whether a classifier can distinguish $p(\boldsymbol{x}\, | \,\boldsymbol{\theta})$ samples from from samples from $p(\boldsymbol{x})$ weighted by the estimated ratio. We demonstrate that, when estimating accurate posteriors, our proposed [nre-c]{.smallcaps} passes this diagnostic while [nre-b]{.smallcaps} does not.
|
| 28 |
+
|
| 29 |
+
Taking inspiration from mutual information estimation [@poole2019variational], we propose applying a variational bound on the mutual information between $\boldsymbol{\theta}$ and $\boldsymbol{x}$ in a novel way--as an informative metric measuring a lower bound on the Kullback-Leibler divergence between surrogate posterior estimate $p_{\boldsymbol{w}}(\boldsymbol{\theta}\, | \,\boldsymbol{x})$ and $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$, averaged over $p(\boldsymbol{x})$. Unlike with two-sample testing methods commonly used in machine learning literature [@sbibm], our metric samples only from $p(\boldsymbol{\theta}, \boldsymbol{x})$, which is always available in [sbi]{.smallcaps}, and does not require samples from the intractable $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$. Our metric is meaningful to scientists working on problems with intractable posteriors. The technique requires estimating the partition function, which can be expensive. We find the metric to be well correlated with results from two-sample tests.
|
| 30 |
+
|
| 31 |
+
We evaluate [nre-b]{.smallcaps} and [nre-c]{.smallcaps} in a fair comparison in several training regimes in Section [3](#sec:experiments){reference-type="ref" reference="sec:experiments"}. We perform a hyperparameter search on three simulators with tractable likelihood by benchmarking the behavior when (a) jointly drawn pairs $(\boldsymbol{\theta}, \boldsymbol{x})$ are unlimited or when jointly drawn pairs $(\boldsymbol{\theta}, \boldsymbol{x})$ are fixed but we (b) can draw from the prior $p(\boldsymbol{\theta})$ without limit or (c) are restricted to the initial pairs. We also perform the [sbi]{.smallcaps} benchmark of @sbibm with our recommended hyperparameters.
|
| 32 |
+
|
| 33 |
+
# Method
|
| 34 |
+
|
| 35 |
+
The ratio between probability distributions can be estimated using the "likelihood ratio trick" by training a classifier to distinguish samples [@hastie2009elements; @sugiyama2012density; @goodfellow2014generative; @Cranmer2015; @thomas2016likelihood; @Mohamed2016; @Hermans2019]. We first summarize the loss functions of [nre-a]{.smallcaps} and [nre-b]{.smallcaps} which approximate the *intractable* likelihood-to-evidence ratio $r(\boldsymbol{x}\, | \,\boldsymbol{\theta}) \coloneqq \frac{p(\boldsymbol{x}\, | \,\boldsymbol{\theta})}{p(\boldsymbol{x})}$. We then elaborate on our proposed generalization, [nre-c]{.smallcaps}. Finally, we explain how to recover [nre-a]{.smallcaps} and [nre-b]{.smallcaps} within our framework and comment on the normalization properties.
|
| 36 |
+
|
| 37 |
+
@Hermans2019 train a binary classifier to distinguish $(\boldsymbol{\theta}, \boldsymbol{x})$ pairs drawn dependently $p(\boldsymbol{\theta}, \boldsymbol{x})$ from those drawn independently $p(\boldsymbol{\theta}) p(\boldsymbol{x})$. This classifier is parameterized by a neural network $f_{\boldsymbol{w}}$ which approximates $\log r(\boldsymbol{x}\, | \,\boldsymbol{\theta})$. We seek optimal network weights
|
| 38 |
+
|
| 39 |
+
$$\begin{equation}
|
| 40 |
+
\boldsymbol{w}^{*} \in \mathop{\mathrm{arg\,min}}_{\boldsymbol{w}} - \frac{1}{2 B}
|
| 41 |
+
\left[ \sum_{b=1}^{B} \log \left(
|
| 42 |
+
1 - \sigma \circ f_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b)}, \boldsymbol{x}^{(b)})
|
| 43 |
+
\right)
|
| 44 |
+
+ \sum_{b'=1}^{B}\log \left(
|
| 45 |
+
\sigma \circ f_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b')}, \boldsymbol{x}^{(b')})
|
| 46 |
+
\right) \right]
|
| 47 |
+
\end{equation}$$
|
| 48 |
+
|
| 49 |
+
$\boldsymbol{\theta}^{(b)}, \boldsymbol{x}^{(b)} \sim p(\boldsymbol{\theta}) p(\boldsymbol{x})$ and $\boldsymbol{\theta}^{(b')}, \boldsymbol{x}^{(b')} \sim p(\boldsymbol{\theta}, \boldsymbol{x})$ over $B$ samples. [nre-a]{.smallcaps}'s ratio estimate converges to $f_{\boldsymbol{w}^*} = \log \frac{p(\boldsymbol{x}\, | \,\boldsymbol{\theta})}{p(\boldsymbol{x})}$ given unlimited model flexibility and data. Details can be found in Appendix [5](#apndx:other-sbi){reference-type="ref" reference="apndx:other-sbi"}.
|
| 50 |
+
|
| 51 |
+
@Durkan2020 train a classifier that selects from among $K$ parameters $(\boldsymbol{\theta}_1, \ldots, \boldsymbol{\theta}_K)$ which could have generated $\boldsymbol{x}$, in contrast with [nre-a]{.smallcaps}'s binary possibilities. One of these parameters $\boldsymbol{\theta}_k$ is *always* drawn jointly with $\boldsymbol{x}$. The classifier is parameterized by a neural network $g_{\boldsymbol{w}}$ which approximates $\log r(\boldsymbol{x}\, | \,\boldsymbol{\theta})$. Training is done over $B$ samples by finding
|
| 52 |
+
|
| 53 |
+
$$\begin{equation}
|
| 54 |
+
\label{eqn:nreb-loss}
|
| 55 |
+
\boldsymbol{w}^* \in \mathop{\mathrm{arg\,min}}_{\boldsymbol{w}}
|
| 56 |
+
\left[ - \frac{1}{B} \sum_{b'=1}^{B} \log \frac{ \exp \circ g_{\boldsymbol{w}}(\boldsymbol{\theta}_{k}^{(b')}, \boldsymbol{x}^{(b')}) }{ \sum_{i=1}^{K} \exp \circ g_{\boldsymbol{w}}(\boldsymbol{\theta}_i^{(b')}, \boldsymbol{x}^{(b')}) } \right]
|
| 57 |
+
\end{equation}$$
|
| 58 |
+
|
| 59 |
+
where $\boldsymbol{\theta}_{1}^{(b')}, \ldots, \boldsymbol{\theta}_{K}^{(b')} \sim p(\boldsymbol{\theta})$ and $\boldsymbol{x}^{(b')} \sim p(\boldsymbol{x}\, | \,\boldsymbol{\theta}_k^{(b')})$. Given unlimited model flexibility and data [nre-b]{.smallcaps}'s ratio estimate converges to ${g_{\boldsymbol{w}^*}(\boldsymbol{\theta}, \boldsymbol{x}) = \log \frac{ p(\boldsymbol{\theta}\, | \,\boldsymbol{x}) }{p(\boldsymbol{\theta})} + c_{\boldsymbol{w}^*}(\boldsymbol{x})}$. Details are in Appendix [5](#apndx:other-sbi){reference-type="ref" reference="apndx:other-sbi"}.
|
| 60 |
+
|
| 61 |
+
Our proposed algorithm [nre-c]{.smallcaps} trains a classifier to identify which $\boldsymbol{\theta}$ among $K$ candidates is responsible for generating a given $\boldsymbol{x}$, inspired by [nre-b]{.smallcaps}. We added another option that indicates $\boldsymbol{x}$ was drawn independently, inspired by [nre-a]{.smallcaps}. The introduction of the additional class yields a ratio without the specific $c_{\boldsymbol{w}}(\boldsymbol{x})$ bias at optimum. Define $\boldsymbol{\Theta}\coloneqq (\boldsymbol{\theta}_1, ..., \boldsymbol{\theta}_K)$ and conditional probability
|
| 62 |
+
|
| 63 |
+
$$\begin{equation}
|
| 64 |
+
p_{\textsc{nre-c}}(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y = k) \coloneqq
|
| 65 |
+
\begin{cases}
|
| 66 |
+
p(\boldsymbol{\theta}_1) \cdots p(\boldsymbol{\theta}_K) p(\boldsymbol{x}) & k=0 \\
|
| 67 |
+
p(\boldsymbol{\theta}_1) \cdots p(\boldsymbol{\theta}_K) p(\boldsymbol{x}\, | \,\boldsymbol{\theta}_k) & k = 1, \ldots, K
|
| 68 |
+
\end{cases}.
|
| 69 |
+
\end{equation}$$
|
| 70 |
+
|
| 71 |
+
We set marginal probabilities $p(y = k) \coloneqq p_{K}$ for all $k \geq 1$ and ${p(y = 0) \coloneqq p_{0}}$, yielding the relationship $p_{0} = 1 - K p_{K}$. Let the odds of any pair being drawn dependently to completely independently be $\gamma \coloneqq \frac{K p_{K}}{p_{0}}$. We now use Bayes' formula to compute the conditional probability
|
| 72 |
+
|
| 73 |
+
$$\begin{equation}
|
| 74 |
+
\begin{aligned}
|
| 75 |
+
\label{eqn:cnre-posterior}
|
| 76 |
+
p(y=k \, | \,\boldsymbol{\Theta}, \boldsymbol{x})
|
| 77 |
+
&= \frac{p(y=k) \, p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=k)/p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)}{\sum_{i=0}^{K} p(y=i) \, p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=i)/p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)} \\
|
| 78 |
+
&= \frac{p(y=k) \, p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=k)/p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)}{p(y=0) + \sum_{i=1}^{K} p(y=i) \, p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=i)/p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)} \\
|
| 79 |
+
&= \begin{cases}
|
| 80 |
+
\frac{K}{K + \gamma \sum_{i=1}^{K} r(\boldsymbol{x}\, | \,\boldsymbol{\theta}_i)} & k=0 \\
|
| 81 |
+
\frac{\gamma \, r(\boldsymbol{x}\, | \,\boldsymbol{\theta}_k)}{K + \gamma \sum_{i=1}^{K} r(\boldsymbol{x}\, | \,\boldsymbol{\theta}_i)} & k=1, \ldots, K
|
| 82 |
+
\end{cases}.
|
| 83 |
+
\end{aligned}
|
| 84 |
+
\end{equation}$$
|
| 85 |
+
|
| 86 |
+
We dropped the [nre-c]{.smallcaps} subscript and substituted in $\gamma$ to replace the $p(y)$ class probabilities. We train a classifier, parameterized by neural network $h_{\boldsymbol{w}}(\boldsymbol{\theta}, \boldsymbol{x})$ with weights $\boldsymbol{w}$, to approximate [\[eqn:cnre-posterior\]](#eqn:cnre-posterior){reference-type="eqref" reference="eqn:cnre-posterior"} by
|
| 87 |
+
|
| 88 |
+
$$\begin{equation}
|
| 89 |
+
\label{eqn:classifier}
|
| 90 |
+
q_{\boldsymbol{w}}(y = k \, | \,\boldsymbol{\Theta}, \boldsymbol{x}) =
|
| 91 |
+
\begin{cases}
|
| 92 |
+
\frac{K}{K + \gamma \sum_{i=1}^{K} \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}_i, \boldsymbol{x})} & k = 0 \\
|
| 93 |
+
\frac{\gamma \, \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}_k,\boldsymbol{x}))}{K + \gamma \sum_{i=1}^{K} \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}_i,\boldsymbol{x})} & k = 1, \ldots, K.
|
| 94 |
+
\end{cases}.
|
| 95 |
+
\end{equation}$$
|
| 96 |
+
|
| 97 |
+
We note that [\[eqn:classifier\]](#eqn:classifier){reference-type="eqref" reference="eqn:classifier"} still satisfies $\sum_{k=0}^K q_{\boldsymbol{w}}(y = k \, | \,\boldsymbol{\Theta}, \boldsymbol{x}) =1$, no matter the parameterization.
|
| 98 |
+
|
| 99 |
+
We design a loss function that encourages $h_{\boldsymbol{w}}(\boldsymbol{\theta}, \boldsymbol{x}) = \log \frac{p(\boldsymbol{x}\, | \,\boldsymbol{\theta})}{p(\boldsymbol{x})}$ at convergence, and holds at optimum with unlimited flexibility and data. We introduce the cross entropy loss $$\begin{equation}
|
| 100 |
+
\begin{aligned}
|
| 101 |
+
\ell(\boldsymbol{w})
|
| 102 |
+
&\coloneqq \mathbb{E}_{p(y, \boldsymbol{\Theta}, \boldsymbol{x})} \left[ -\log q_{\boldsymbol{w}}(y \, | \,\boldsymbol{\Theta}, \boldsymbol{x})\right] \\
|
| 103 |
+
&= - p_{0} \mathbb{E}_{p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)} \left[
|
| 104 |
+
\log q_{\boldsymbol{w}}(y =0 \, | \,\boldsymbol{\Theta}, \boldsymbol{x})
|
| 105 |
+
\right]
|
| 106 |
+
- p_{K} \sum_{k=1}^{K} \mathbb{E}_{p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=k)} \left[
|
| 107 |
+
\log q_{\boldsymbol{w}}(y = k \, | \,\boldsymbol{\Theta}, \boldsymbol{x})
|
| 108 |
+
\right] \\
|
| 109 |
+
&= - p_{0} \mathbb{E}_{p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=0)} \left[
|
| 110 |
+
\log q_{\boldsymbol{w}}(y =0 \, | \,\boldsymbol{\Theta}, \boldsymbol{x})
|
| 111 |
+
\right]
|
| 112 |
+
- K p_{K} \mathbb{E}_{p(\boldsymbol{\Theta}, \boldsymbol{x}\, | \,y=K)} \left[
|
| 113 |
+
\log q_{\boldsymbol{w}}(y = K \, | \,\boldsymbol{\Theta}, \boldsymbol{x})
|
| 114 |
+
\right]
|
| 115 |
+
\end{aligned}
|
| 116 |
+
\end{equation}$$ and minimize it towards $\boldsymbol{w}^* \in \mathop{\mathrm{arg\,min}}_{\boldsymbol{w}} \ell(\boldsymbol{w})$. We point out that the final term is symmetric up to permutation of $\boldsymbol{\Theta}$, enabling the replacement of the sum by multiplication with $K$. When $\gamma$ and $K$ are known, $p_{0} = \frac{1}{1 + \gamma}$ and $p_{K} = \frac{1}{K} \frac{\gamma}{1 + \gamma}$ under our constraints. Without loss of generality, we let $\boldsymbol{\theta}_1, \ldots, \boldsymbol{\theta}_{K} \sim p(\boldsymbol{\theta})$ and $\boldsymbol{x}\sim p(\boldsymbol{x}\, | \,\boldsymbol{\theta}_{K})$. An empirical estimate of the loss on $B$ samples is therefore $$\begin{equation}
|
| 117 |
+
\label{eqn:nrec-empirical-loss}
|
| 118 |
+
\begin{aligned}
|
| 119 |
+
\hat{\ell}_{\gamma, K}(\boldsymbol{w})
|
| 120 |
+
&\coloneqq - \frac{1}{B}
|
| 121 |
+
\Bigg[ \frac{1}{1 + \gamma} \sum_{b=1}^{B} \log q_{\boldsymbol{w}} \left(y = 0 \, | \,\boldsymbol{\Theta}^{(b)}, \boldsymbol{x}^{(b)} \right) \\
|
| 122 |
+
&\phantom{\approx - \frac{1}{B} \Bigg[ } + \frac{\gamma}{1 + \gamma} \sum_{b'=1}^{B} \log q_{\boldsymbol{w}} \left(y = K \, | \,\boldsymbol{\Theta}^{(b')}, \boldsymbol{x}^{(b')} \right) \Bigg].
|
| 123 |
+
\end{aligned}
|
| 124 |
+
\end{equation}$$
|
| 125 |
+
|
| 126 |
+
In the first term, the classifier sees a completely independently drawn sample of $\boldsymbol{x}$ and $\boldsymbol{\Theta}$ while $\boldsymbol{\theta}_K$ is drawn jointly with $\boldsymbol{x}$ in the second term. In both terms, the classifier considers $K$ choices. In practice, we bootstrap both $\boldsymbol{\theta}_1^{(b)}, \ldots, \boldsymbol{\theta}_{K}^{(b)}$ and $\boldsymbol{\theta}_1^{(b')}, \ldots, \boldsymbol{\theta}_{K-1}^{(b')}$ from the same mini-batch and compare them to the same $\boldsymbol{x}$, similarly to [nre-a]{.smallcaps} and [nre-b]{.smallcaps}. Proof of the above is in Appendix [6](#apndx:proof){reference-type="ref" reference="apndx:proof"}.
|
| 127 |
+
|
| 128 |
+
[nre-c]{.smallcaps} is general because specific hyperparameter settings recover [nre-a]{.smallcaps} and [nre-b]{.smallcaps}. To recover [nre-a]{.smallcaps} one should set $\gamma = 1$ and $K=1$ in [\[eqn:nrec-empirical-loss\]](#eqn:nrec-empirical-loss){reference-type="eqref" reference="eqn:nrec-empirical-loss"} yielding
|
| 129 |
+
|
| 130 |
+
$$\begin{equation}
|
| 131 |
+
\begin{aligned}
|
| 132 |
+
\hat{\ell}_{1, 1}(\boldsymbol{w})
|
| 133 |
+
&= - \frac{1}{2 B}
|
| 134 |
+
\Bigg[
|
| 135 |
+
\sum_{b=1}^{B} \log \frac{1}{1 + \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b)}, \boldsymbol{x}^{(b)})}
|
| 136 |
+
+ \sum_{b'=1}^{B} \log \frac{\exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b')}, \boldsymbol{x}^{(b')})}{1 + \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b')}, \boldsymbol{x}^{(b')})}
|
| 137 |
+
\Bigg] \\
|
| 138 |
+
&= - \frac{1}{2B} \left[- \sum_{b=1}^{B} \log \left(
|
| 139 |
+
1 - \sigma \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b)}, \boldsymbol{x}^{(b)})
|
| 140 |
+
\right)
|
| 141 |
+
+ \sum_{b'=1}^{B}\log \left(
|
| 142 |
+
\sigma \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}^{(b')}, \boldsymbol{x}^{(b')})
|
| 143 |
+
\right) \right]
|
| 144 |
+
\end{aligned}
|
| 145 |
+
\end{equation}$$ where we dropped the lower index. Recovering [nre-b]{.smallcaps} requires taking the limit $\gamma \to \infty$ in the loss function. In that case, the first term goes to zero, and second term converges to the softmax function.
|
| 146 |
+
|
| 147 |
+
$$\begin{equation}
|
| 148 |
+
\begin{aligned}
|
| 149 |
+
\hat\ell_{\infty, K}(\boldsymbol{w})
|
| 150 |
+
&= \lim_{\gamma \to \infty} \hat\ell_{\gamma, K}(\boldsymbol{w})
|
| 151 |
+
&= - \frac{1}{B}
|
| 152 |
+
\Bigg[ \sum_{b'=1}^{B} \log
|
| 153 |
+
\frac{\exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}_k,\boldsymbol{x}))}{\sum_{i=1}^{K} \exp \circ h_{\boldsymbol{w}}(\boldsymbol{\theta}_i,\boldsymbol{x})}
|
| 154 |
+
\Bigg]
|
| 155 |
+
\end{aligned}
|
| 156 |
+
\end{equation}$$
|
| 157 |
+
|
| 158 |
+
is determined by substitution into [\[eqn:nrec-empirical-loss\]](#eqn:nrec-empirical-loss){reference-type="eqref" reference="eqn:nrec-empirical-loss"}. Both equations are obviously the same as their counterparts.
|
| 159 |
+
|
| 160 |
+
In the limit of infinite data and infinite neural network capacity (width, depth) the optimal classifier trained using [nre-c]{.smallcaps}(with $\gamma \in \mathbb{R}^{+}$) satisfies the equality: $$\begin{align}
|
| 161 |
+
h_{\boldsymbol{w}^*}(\boldsymbol{\theta},\boldsymbol{x}) & = \log \frac{p(\boldsymbol{\theta}\, | \,\boldsymbol{x})}{p(\boldsymbol{\theta})}.
|
| 162 |
+
\end{align}$$ In particular, we have that the following normalizing constant is trivial: $$\begin{align}
|
| 163 |
+
Z(\boldsymbol{x}) & := \int \exp\left(h_{\boldsymbol{w}^*}(\boldsymbol{\theta},\boldsymbol{x})\right) p(\boldsymbol{\theta})\,d\boldsymbol{\theta}= \int p(\boldsymbol{\theta}\, | \,\boldsymbol{x})\,d\boldsymbol{\theta}= 1.
|
| 164 |
+
\end{align}$$ This is a result of Lemma [1](#lem:optimal=log-ratio){reference-type="ref" reference="lem:optimal=log-ratio"} in Appendix [6](#apndx:proof){reference-type="ref" reference="apndx:proof"}. However, practitioners never operate in this setting, rather they use finite sample sizes and neural networks with limited capacity that are optimized locally. The non-optimal function $\exp(h_{\boldsymbol{w}}(\boldsymbol{\theta},\boldsymbol{x}))$ does not have a direct interpretation as a ratio of probability distributions, rather as the function to weigh the prior $p(\boldsymbol{\theta})$ to approximate the unnormalized posterior. In other words, we find the following approximation for the posterior $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$: $$\begin{align}
|
| 165 |
+
\label{eqn:normalizing-constant}
|
| 166 |
+
p_{\boldsymbol{w}}(\boldsymbol{\theta}\, | \,\boldsymbol{x}) &:= \frac{\exp(h_{\boldsymbol{w}}(\boldsymbol{\theta},\boldsymbol{x}))}{Z_{\boldsymbol{w}}(\boldsymbol{x})} p(\boldsymbol{\theta}), &
|
| 167 |
+
Z_{\boldsymbol{w}}(\boldsymbol{x}) & := \int \exp\left(h_{\boldsymbol{w}}(\boldsymbol{\theta},\boldsymbol{x})\right) p(\boldsymbol{\theta})\, d\boldsymbol{\theta},
|
| 168 |
+
\end{align}$$ where in general the normalizing constant is not trivial, i.e. $Z_{\boldsymbol{w}}(\boldsymbol{x}) \neq 1$. As stated above, the [nre-c]{.smallcaps}(and [nre-a]{.smallcaps}) objective encourages $Z_{\boldsymbol{w}}(\boldsymbol{x})$ to converge to $1$. This is in sharp contrast to [nre-b]{.smallcaps}, where even at optimum with an unrestricted function class a non-trivial $\boldsymbol{x}$-dependent bias term can appear.
|
| 169 |
+
|
| 170 |
+
There is no restriction on how pathological the [nre-b]{.smallcaps} bias $c_{\boldsymbol{w}}(\boldsymbol{x})$ can be. Consider a minimizer of [\[eqn:nreb-loss\]](#eqn:nreb-loss){reference-type="eqref" reference="eqn:nreb-loss"}, the [nre-b]{.smallcaps} loss function, $h_{\boldsymbol{w}^{\ast}} + c_{\boldsymbol{w}^{\ast}}(\boldsymbol{x})$. Adding any function $d(\boldsymbol{x})$ cancels out in the fraction and is also a minimizer of [\[eqn:nreb-loss\]](#eqn:nreb-loss){reference-type="eqref" reference="eqn:nreb-loss"}. This freedom complicates any numerical computation of the normalizing constant and renders the importance sampling diagnostic from Section [2.2](#sec:diagnostic){reference-type="ref" reference="sec:diagnostic"} generally inapplicable. We report Monte Carlo estimates of $Z_{\boldsymbol{w}}(\boldsymbol{x})$ on a test problem across hyperparameters in Figure [11](#fig:partition-function){reference-type="ref" reference="fig:partition-function"}.
|
| 171 |
+
|
| 172 |
+
[sbi]{.smallcaps} is difficult to verify because, for many use cases, the practitioner cannot compare surrogate $p_{\boldsymbol{w}}(\boldsymbol{\theta}\, | \,\boldsymbol{x})$ to the intractable ground truth $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$. Incongruous with the practical use case for [sbi]{.smallcaps}, much of the literature has focused on measuring the similarity between surrogate and posterior using two-samples tests on tractable problems. For comparison with literature, we first reference a two-sample exactness metric which requires a tractable posterior. We then discuss diagnostics which do not require samples from $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$, commenting on the relevance for each [nre]{.smallcaps} algorithm with empirical results. Further, we find that a known variational bound to the mutual information is tractable to estimate within [sbi]{.smallcaps}, that it bounds the average Kullback-Leibler divergence between surrogate and posterior, and propose to use it for model comparison on intractable inference tasks.
|
| 173 |
+
|
| 174 |
+
Assessments of approximate posterior quality are available when samples can be drawn from both the posterior $\boldsymbol{\theta}\sim p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$ and the approximation $\boldsymbol{\theta}\sim q(\boldsymbol{\theta}\, | \,\boldsymbol{x})$. In the deep learning-based [sbi]{.smallcaps} literature, exactness is measured as a function of computational cost, usually simulator calls. We investigate this with [nre-c]{.smallcaps} in Section [3.3](#sec:benchmark){reference-type="ref" reference="sec:benchmark"}.
|
| 175 |
+
|
| 176 |
+
Based on the recommendations of @sbibm our experimental results are measured using the Classifier Two-Sample Test (C2ST) [@friedman2003multivariate; @lehmann2005testing; @lopez2017revisiting]. A classifier is trained to distinguish samples from either the surrogate or the ground truth posterior. An average classification probability on holdout data of 1.0 implies that samples from each distribution are easily identified; 0.5 implies either the distributions are the same or the classifier does not have the capacity to distinguish them.
|
| 177 |
+
|
| 178 |
+
<figure id="fig:importance-sampling-diagnostic" data-latex-placement="hbt">
|
| 179 |
+
<figure>
|
| 180 |
+
<img src="figures/importance-diagnostic-posterior" />
|
| 181 |
+
<figcaption>Posteriors</figcaption>
|
| 182 |
+
</figure>
|
| 183 |
+
<figure>
|
| 184 |
+
<img src="figures/importance-diagnostic-likelihoods" />
|
| 185 |
+
<figcaption>Likelihood and reweighted marginal generative models.</figcaption>
|
| 186 |
+
</figure>
|
| 187 |
+
<figure>
|
| 188 |
+
<img src="figures/importance-diagnostic-roc" />
|
| 189 |
+
<figcaption>Receiver operating characteristic.</figcaption>
|
| 190 |
+
</figure>
|
| 191 |
+
<figcaption> The figures visualize the importance sampling diagnostic on ratio estimators trained using <span class="smallcaps">nre-b</span> and <span class="smallcaps">nre-c</span>. (a) Both methods produce satisfactory posterior estimates that agree with <span class="math inline"><em>p</em>(<strong>θ</strong> | <strong>x</strong>)</span>. (b) <span class="math inline"><em>p</em>(<strong>x</strong> | <strong>θ</strong>)</span> is shown along with <span class="math inline"><em>p</em>(<strong>x</strong>)</span> samples weighted by <span class="smallcaps">nre-a</span><span class="math inline">exp ∘ <em>f</em><sub><strong>w</strong></sub>(<strong>θ</strong>, <strong>x</strong>)</span> and <span class="smallcaps">nre-b</span><span class="math inline">exp ∘ <em>g</em><sub><strong>w</strong></sub>(<strong>θ</strong>, <strong>x</strong>)</span>. Each plot corresponds to a different <span class="math inline"><strong>θ</strong></span>. Despite high posterior accuracy, the <span class="smallcaps">nre-b</span> estimates are distinct from <span class="math inline"><em>p</em>(<strong>x</strong> | <strong>θ</strong>)</span>. (c) Two classifier’s <span class="smallcaps">roc</span> curves, each trained to distinguish <span class="math inline"><em>p</em>(<strong>x</strong> | <strong>θ</strong>)</span> samples from <span class="math inline"><em>p</em>(<strong>x</strong>)</span> samples weighted by the corresponding <span class="smallcaps">nre</span>’s <span class="math inline"><em>r̂</em></span> estimate. The classifier failed to distinguish likelihood samples from the <span class="smallcaps">nre-c</span> weighted data samples, but successfully identified <span class="smallcaps">nre-b</span> weighted samples. <span class="smallcaps">nre-b</span> accurately approximates the posterior, but fails the diagnostic. <span class="smallcaps">nre-c</span> produces an accurate posterior surrogate and passes the diagnostic. </figcaption>
|
| 192 |
+
</figure>
|
| 193 |
+
|
| 194 |
+
An accurate likelihood-to-evidence weight transforms the data distribution into the likelihood by ${p(\boldsymbol{x}\, | \,\boldsymbol{\theta}) = p(\boldsymbol{x}) r(\boldsymbol{x}\, | \,\boldsymbol{\theta})}$. Since [nre]{.smallcaps} necessitates simulator access, we can test the ratio estimator by training a classifier to distinguish unweighted $p(\boldsymbol{x}\, | \,\boldsymbol{\theta})$ samples from weighted $p(\boldsymbol{x}) \hat{r}(\boldsymbol{x}\, | \,\boldsymbol{\theta})$ samples, where $\hat{r}$ implies an estimate. Indistinguishability between samples implies either that the approximate ratio is accurate for parameter $\boldsymbol{\theta}$ or that the classifier does not have sufficient power to find predictive features. Issues with classification power can be detected by assessing the classifier's ability to distinguish $p(\boldsymbol{x})$ from $p(\boldsymbol{x}\, | \,\boldsymbol{\theta})$. The performance can be visualized in a receiver operating curve ([roc]{.smallcaps}) or measured by the area under the curve ([roc]{.smallcaps}[auc]{.smallcaps}). This diagnostic has been used for ratio estimators before [@Cranmer2015; @Hermans2019] but it comes from training models under covariate shift [@shimodaira2000improving]. It is particularly appealing because it does not require samples from $p(\boldsymbol{\theta}\, | \,\boldsymbol{x})$.
|
| 195 |
+
|
| 196 |
+
@Durkan2020 do not mention this diagnostic in their paper, but due to its intrinsic bias [nre-b]{.smallcaps} does not fulfill the identity necessary for this diagnostic to hold at optimum. The unknown factor that depends on $\boldsymbol{x}$ implies ${p(\boldsymbol{x}\, | \,\boldsymbol{\theta}) \neq p(\boldsymbol{x}) \exp\circ g_{\boldsymbol{w}}(\boldsymbol{x}\, | \,\boldsymbol{\theta})}$. We provide empirical evidence of this issue in Figure [1](#fig:importance-sampling-diagnostic){reference-type="ref" reference="fig:importance-sampling-diagnostic"}. Although [nre-b]{.smallcaps} accurately approximates the true posterior, it demonstrably fails the diagnostic. Given the limited options for verification of [sbi]{.smallcaps} results, this presents a major problem by significantly limiting the trustworthiness of [nre-b]{.smallcaps} on any problem with an intractable posterior. In Appendix [6](#apndx:proof){reference-type="ref" reference="apndx:proof"}, we show that the unrestricted [nre-b]{.smallcaps}-specific $c_{\boldsymbol{w}}(\boldsymbol{x})$ bias means approximating $p(\boldsymbol{x}\, | \,\boldsymbol{\theta})$ with normalized importance weights will not solve the issue.
|
| 197 |
+
|
| 198 |
+
Selecting the surrogate model most-similar to the target posterior remains intractable without access to $p(\boldsymbol{\theta}\, | \,\boldsymbol{x}_o)$. Nevertheless, practitioners must decide which surrogate should approximate the posterior across training and hyperparameter search. Unfortunately, the validation losses between different versions of [nre]{.smallcaps} and different $K$ and $\gamma$ settings are not comparable. A good heuristic is to choose the model which minimizes the Kullback-Leibler divergence *on average* over possible data $p(\boldsymbol{x})$. In Appendix [8](#apndx:mutual-information){reference-type="ref" reference="apndx:mutual-information"}, we prove the relationship between $I(\boldsymbol{\theta}; \boldsymbol{x})$, the *mutual information* with respect to $p(\boldsymbol{\theta}, \boldsymbol{x})$, our models' variational bound $I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x})$, and the average [kld]{.smallcaps} $$\begin{equation}
|
| 199 |
+
\label{eqn:average-kld-is-mi}
|
| 200 |
+
\mathbb{E}_{p(\boldsymbol{x})} \left[ \textsc{kld}(p(\boldsymbol{\theta}\, | \,\boldsymbol{x}) \, \Vert \,p_{\boldsymbol{w}}(\boldsymbol{\theta}\, | \,\boldsymbol{x})) \right]
|
| 201 |
+
= I(\boldsymbol{\theta}; \boldsymbol{x}) - I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x}),
|
| 202 |
+
\end{equation}$$ $$\begin{equation}
|
| 203 |
+
\label{eqn:define-mi-0-in-paper}
|
| 204 |
+
I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x})
|
| 205 |
+
\coloneqq \mathbb{E}_{p(\boldsymbol{\theta}, \boldsymbol{x})} \left[ \log \hat{r}(\boldsymbol{x}\, | \,\boldsymbol{\theta}) \right] - \mathbb{E}_{p(\boldsymbol{x})} \left[ \log \mathbb{E}_{p(\boldsymbol{\theta})} [\hat{r}(\boldsymbol{x}\, | \,\boldsymbol{\theta})] \right].
|
| 206 |
+
\end{equation}$$ The non-negativity of all terms in [\[eqn:average-kld-is-mi\]](#eqn:average-kld-is-mi){reference-type="eqref" reference="eqn:average-kld-is-mi"} implies $I(\boldsymbol{\theta}; \boldsymbol{x}) \geq I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x})$; that means the model which minimizes $-I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x})$ best satisfies our heuristic. We propose to approximate $-I_{\boldsymbol{w}}^{(0)}(\boldsymbol{\theta}; \boldsymbol{x})$ with Monte Carlo using held-out data as a metric for model selection during training and across hyperparameters. The expectation values average over $p(\boldsymbol{\theta}, \boldsymbol{x})$, $p(\boldsymbol{\theta})$, and $p(\boldsymbol{x})$. We can sample from all of these distributions in the [sbi]{.smallcaps} context. Since the second term in [\[eqn:define-mi-0-in-paper\]](#eqn:define-mi-0-in-paper){reference-type="eqref" reference="eqn:define-mi-0-in-paper"} computes the average log partition function, our metric can compare [nre-b]{.smallcaps}-based surrogates to [nre-c]{.smallcaps}-based ones. However, the metric comes with the normal challenges of estimating the log partition function which can be very expensive or high variance in some cases. We go into more depth in Appendix [8](#apndx:mutual-information){reference-type="ref" reference="apndx:mutual-information"}, including mentioning the relevance to Neural Posterior Estimation [@papamakarios2016fast; @lueckmann2017flexible; @greenberg2019automatic; @Durkan2020]. While the application to [sbi]{.smallcaps} is novel, bounds on the mutual information have been broadly investigated for contrastive representation learning and mutual information estimation [@gutmann2010noise; @gutmann2012noise; @gutmann2022statistical; @belghazi2018mine; @van2018representation; @poole2019variational].
|
| 207 |
+
|
| 208 |
+
For a candidate distribution to qualify as the posterior, integrating over data must return the prior. A measurement that follows from calibration to the prior is called expected coverage probability. Expected coverage probability can be estimated with samples from $p(\boldsymbol{\theta}, \boldsymbol{x})$ and any amortized [sbi]{.smallcaps} method. Although important, ability to compute this metric does not distinguish [nre-c]{.smallcaps}. We refer the interested reader to @hermans2021averting. We note that popular sequential techniques generally render this diagnostic inapplicable, with exceptions [@miller2020simulation; @miller2021truncated; @cole2021fast].
|
2210.08410/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-05-18T09:56:14.086Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36" etag="wk1UZEaojUE_WAMZr69w" version="18.0.2" type="device"><diagram id="44ZEIqMaInlNNZ-rveip" name="our-index">7Z1fc6I6FMA/jTP3PnQHCP98rLrb9m577badbbsvdxCiMgXjAlbtp79EiELUipSELLbd2UKIAc/5JSc5nCQt0PUXF4E1Hd8gB3otRXIWLdBrKUpb1eP/ccIySdBJwihwnSRJ3iTcu28wTZTS1JnrwDCXMULIi9xpPtFGkwm0o1yaFQRons82RF7+rlNrlN5R2iTc25YHt7I9uk40TlJNLZP7ErqjMbmzLKVXfItkTosIx5aD5pl7ga8t0A0QipIjf9GFHpYdkUtS0Lc9V9cPFsBJVOQD9rNxd2FK6K0zCX9dg7Nvl4//nJmpNl4tb5Z+4/RpoyURAZw451iS8ZntWWHo2i3QGUe+FyfI8WGAZhMH4rtI8RlcuNETPv6ipWfPaT583FtksvWW5GQSBcvMh/Dpc/ba5mOrM/K5MArQy1ov+B7bQiHSR7PAhu9IgsBlBSMYvSexJB90RllCUpFfQOTD+AnjDPMNLQSWcQYUkhZAz4rc1zxtVgrtaF3c+g63yI2/mSKl9UtV03LS6iWTc1JE8r3TT2XpoAqSlXxBik4VlAhmq6D4IPO1N0kr+I4AUS7AoefFtT4+6czHbgTvp9ZKofO43ckDaYXTpCkYugvMJc3JCrK9pLzCIIKLd3WbXgV6XmZE9hnVqztUT6soq+WcWI+VocJShkPX87rIQ8GqIDDU8C9OR5Mok578rGWeuaKvfthpg64Jes3aACetDUUSTB0qR3U4FjSH9k6x2yYcDNmJnW7Gaxe7xlPsGjQddZfYTWUAmNJuCib2It265jY+tGGuXR3GSatjq5NatzpMnuoYQt3eaQscoz2QdgxjqhK7JpotaJ90LdBF6xERF8up6kM0m61wsxIOHFozL77aGQWW48KNRiZoAlkOCnISX/eaMhJXdkhcYSZxboagLonrokmcmw2oS+KyJJjIAbdmvjaRq6KJXGm8yE3RRM7N3Vab8RTNegJuLrXaRC6a+QTc3Gl1iRwIZz65udJqE7lw5rPxAyEgnPls/EhIFc58Nn4opIpmPklD11yRa6KZT5VpYIcQIhfNfKqNH31qoplPtfGjT10086k2fvSpC2c+Gz/6NIQzn40ffRqimU+tSMNyVEj0/nAknsHS+4Kl9urwYMh02hwcDJkmbyAEiZkGbYo4o2TMNB3XssUk45hprUh7/EnqMaQaYpEqU4BpJUmlw0IBXRBrUiufZvIOqRVTl7d7oGRd4MAuEItduitVemaKXjO7RbzRNbNbhsjivHNgVxOL3araXXpSCnd2i7j1P9n9GLt6M9kFNfcZ9CKe4waxy4FUpaGk1txD0It4fz9G6nreWJ3jsPystmpaWbUgu4pY864bMzLT2Xu7BGZX4cNuWyh2FVARuzV3bnX27i+h0GUPqmCO2spArb2RZe/+EorUOjoIgnVuq2O37s4te/fXhl3mHPJ13RZmVxWLXTrWo6zrll4EhDu77N1fzNgVlFSxHLWVtbL0Ah68SSWvoBmSul5uoMYeArUYQjU9BLMou2K5EJoyDDOU00KXA6hivVHQtKpArZtU9n5aoUito5El8f6isEvH0NLLWhZml6oEgPP6mAZ7Z5fA7H7MT1uYXbEiaDSDYrd0nCLtQqALYs0ue/eXwOxyanfFciHsnaFzNLtUJQB0QazZZe/+EopdDqSK5ULQ6GjwdllSaRcCXRBrUjnEekn4t2ZSyUJy5UktutS7IhSojZm2QOrJJ6hVgSpWv5UGtWyDSnd/Zc6Ycog/rAPTOjbNEKtzqtIrsUolCaU7ubwbUuWT0KoIFatTqlLhhTLd9hUllO4zAM6Esve4npipF+vVAL2okky3gIWjXA7NFmMNKoeptKcFqlgzaVXqZatMN4SFQT0U+M0aVA6zEU8LVLGCs+n3THLpuEE6vICz09/8dEdVDCppeUQhdd8mO0eTum8FmOpJ1S7bD48vwXhggR+L/q+Xs9fb30W2HsTL2bi25V1bA+jdotCNXDSJLw1QFCEfs0k298SIOFY4XlNree4IZ7VjLmCQ55sUe57midB0a/ijJKVP8ZP4ixHeZPWLj+yX2fSLbwX4jz0LvGUnWK36s11JpNVP68jFfgIUWel3PJPNd2MLj1hOWcvpeT2HKsOrQQIMq17q5+qm/Xgn/Qx/fH39/vTWfzLO29+KaJ6Ifi3kF1xHD620tGm48NnQc6eX5Lj8kkvvvgQvv+DSrn1Pdi24VMW+J53L8PuP5evg6aL/CJ56t3fz4c1OLegrseC9YnLq0H/PELlwFq42Cz6PMyjmdLG5GB+N8N+v11fn9/HVq1gVC1Jk/IRJqUmeLX3HQozyuszXqFQzWTWmSVsVna7dvus4+DY74cmbOvyI6V7Iss6k8snaltJlfUfdo/vJZbT+fW4qZz+9dmBIF4vFvR39e/1fFVqXYq2v1E0pftVKh3+sxpWKmltAhyyB7Yq+q3/ATOW73JUVqbylqPG/v1paF++7HaugZXSeW0bv7+TCJwtt/SALOiMWpmDck3/rrm/d3KrRs381vnyrqvpvgdD1ZmGsgD+s9leg4a3tzJSaNVxRbS9U1btJVU+KHwT5rCeHwroTXQMKct+d/Or3OqrR9h/6D3f3wyf7cGUfWnaehG48dnRjUSrSv3CeZeBDNiLl42OlaJ27fv+hpfUyXA3oe/wZrDEwNPSbK3W7n8kVPS59DuhPo2UIo8/eRqp0XeEEQXwaIKyVjUMIe0hukANxjv8B</diagram></mxfile>
|
2210.08410/main_diagram/main_diagram.pdf
ADDED
|
Binary file (28.4 kB). View file
|
|
|
2210.08410/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Many real-world problems require making accurate predictions from a large number of potential output choices. For example, search advertising aims to find the most relevant ads to a given search query from a large corpus of ads [26, 14], open-domain question answering requires finding the right answers to a given question from a large collection of text documents [8, 29], and product recommendation requires recommending similar or related products from a large product catalog, based on past searches and interactions by users. eXtreme Multi-label Classification (XMC) is a popular framework for solving such problems [4], which formulates these problems as a multi-label classification task with very large number of labels; here each output choice is treated as a separate label. A label $\ell$ is often parameterized by its *one-versus-all* classifier vector $\mathbf{w}_{\ell}$ and the relevance between label $\ell$ and input $\mathbf{x}$ is formulated as $\mathbf{w}_{\ell}^T \phi(\mathbf{x})$ , where $\phi$ is an encoding function which maps an input $\mathbf{x}$ to its vector representation.
|
| 4 |
+
|
| 5 |
+
<sup>\*</sup>This work does not relate to Hsiang-Fu's position at Amazon
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
|
| 11 |
+
Figure 1: Traditional partition-based index vs ELIAS index; here an arrow from a cluster to a label denotes the assignment of the label to the cluster, arrow width indicates the weight of the assignment. (*left*) Existing partition based XMC methods use a shallow balanced tree as the index structure with a label uniquely assigned to exactly one cluster; moreover, they initialize the clusters over pre-defined features and keep them fixed throughout the training procedure. (*right*) ELIAS generalizes the tree based index to a sparsely connected graph-based index and learns the cluster-to-label assignments end-to-end with the task objective during training.
|
| 12 |
+
|
| 13 |
+
Evaluating $\mathbf{w}_{\ell}^T \phi(\mathbf{x})$ for every label $\ell$ in an XMC task can get computationally expensive since the number of labels could easily be upwards of millions. To reduce the complexity, most existing methods employ a search index that efficiently shortlists a small number of labels for an input query and the relevance scores are only evaluated on these shortlisted labels. The quality of the search index plays a pivotal role in the accuracy of these methods since a label $\ell$ outside the shortlist will be directly discarded, even if it can be correctly captured by its classifier vector $\mathbf{w}_{\ell}$ . Moreover, the label classifier $\mathbf{w}_{\ell}$ is a function of the quality of the index as during training, the label classifiers are learned with negative sampling based on the search index. Therefore, how to improve the quality of the search index becomes a key challenge in the XMC problem.
|
| 14 |
+
|
| 15 |
+
There are two main formulations of the search index: 1) partition-based approach [25, 31, 7, 18, 32] and 2) approximate nearest neighbor search (ANNS) based approach [16, 9, 13, 10]. In partition-based approach, labels are first arranged into a tree-based index by partitioning the label space into mutually exclusive clusters and then a ML model is learned to route a given instance to a few relevant clusters. In an ANNS-based approach, a fixed, black-box ANNS index is learned on pre-defined label embeddings. Given an input embedding, this index is then used to efficiently query a small set of nearest labels based on some distance/similarity between the input and label embeddings. Both of these approaches suffer from a critical limitation that the index structure is fixed after it's initialized.
|
| 16 |
+
|
| 17 |
+
This decoupling of the search index from the rest of the ML model training prevents the search index from adapting with the rest of the model during training, which leads to sub-optimal performance.
|
| 18 |
+
|
| 19 |
+
To overcome this challenge, we propose a novel method called ELIAS: End-to-end Learning to Index and Search, which jointly learns the search index along with the rest of the ML model for multi-label classification in large output spaces. In particular, as illustrated in Fig. 1, ELIAS generalizes the widely used partition tree-based index to a sparsely connected weighted graph-based index. ELIAS models the discrete cluster-to-label assignments in the existing partition based approaches as soft learnable parameters that are learned end-to-end with the encoder and classification module to optimize the final task objective. Moreover, because ELIAS uses a graph-based arrangement of labels instead of a tree-based arrangement, a label can potentially be assigned to multiple relevant clusters. This helps to better serve labels with a multi-modal input distribution [22].
|
| 20 |
+
|
| 21 |
+
Through extensive experiments we demonstrate that ELIAS achieves state-of-the-art results on multiple large-scale XMC benchmarks. Notably, ELIAS can be up to 2.5% better at precision@1 and up to 4% better at recall@100 than existing XMC methods. ELIAS's search index can be efficiently implemented on modern GPUs to offer fast inference times on million scale datasets. In particular, ELIAS offers sub-millisecond prediction latency on a dataset with 3 million labels on a single GPU.
|
| 22 |
+
|
| 23 |
+
# Method
|
| 24 |
+
|
| 25 |
+
The multi-label classification problem can be formulated as following: given an input $\mathbf{x} \in \mathcal{X}$ , predict $\mathbf{y} \in \{0,1\}^L$ where $\mathbf{y}$ is a sparse L dimensional vector with $y_\ell = 1$ if and only if label $\ell$ is relevant to input $\mathbf{x}$ . Here, L denotes the number of distinct labels - note that $\mathbf{y}$ can have multiple non-zero entries resulting in multiple label assignments to input $\mathbf{x}$ . The training dataset is given in the form of $\{(\mathbf{x}^i, \mathbf{y}^i) : i = 1, ..., N\}$ . XMC methods address the case where the label space (L) is extremely large (in the order of few hundred thousands to millions). All deep learning based XMC methods have the following three key components:
|
| 26 |
+
|
| 27 |
+
**Deep encoder** $\phi: \mathcal{X} \to \mathbb{R}^D$ which maps the input $\mathbf{x}$ to a D-dimensional dense embedding through a differentiable function. For text input, a popular choice of $\phi$ is the BERT [11] encoder where each input $\mathbf{x}$ is represented as a sequence of tokens.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
Figure 2: Illustration of ELIAS's search procedure: an input $\mathbf{x}$ is first embedded by the text encoder $\phi$ to get its embedding $\phi(\mathbf{x})$ . Only a few (beam-size) clusters are shortlisted based on cluster relevance scores $\hat{s}_c \sim \hat{\mathbf{w}}_c^T \phi(\mathbf{x})$ . All potential edges of shortlisted clusters are explored and assigned a score based on the product $\hat{s}_c * \hat{s}_{c,\ell}$ ( $\hat{s}_{c,\ell}$ is normalized form of learnable edge weight parameter $a_{c,\ell}$ between cluster c and label c. Top-c paths are shortlisted based on their assigned scores and the final label relevance is computed as c0 ( $\mathbf{w}_\ell^T \phi(\mathbf{x})$ ) \* $\hat{s}_{c,\ell} * \hat{s}_c$ , here c0 is the sigmoid function. If a label c0 can be reached from multiple paths then the path with maximal score is kept and rest are discarded.
|
| 32 |
+
|
| 33 |
+
Search Index $\mathcal{I}: \mathcal{X} \to \mathbb{R}^L$ shortlists K labels along with a score assigned to each shortlisted label for a given input $\mathbf{x}$ . More specifically, $\hat{\mathbf{y}} = \mathcal{I}(\mathbf{x})$ is a sparse real valued vector with only $K \ (\ll L)$ non-zero entries and $\hat{y}_\ell \neq 0$ implies that label $\ell$ is shortlisted for input $\mathbf{x}$ with shortlist relevance score $\hat{y}_\ell$ . As illustrated in Figure 1, many partition based methods [18, 33] formulate their index as a label tree derived by hierarchically partitioning the label space into C clusters and then learn classifier vectors $\hat{\mathbf{W}}_C = [\hat{\mathbf{w}}_c]_{c=1}^C (\hat{\mathbf{w}}_c \in \mathbb{R}^D)$ for each cluster which is used to select only a few clusters for a given input. More specifically, given the input $\mathbf{x}$ , the relevance of cluster c to input $\mathbf{x}$ is quantified by cluster relevance scores $\hat{s}_c = \hat{\mathbf{w}}_c^T \phi(\mathbf{x})$ . The top-b clusters based on these scores are selected and all labels inside the shortlisted clusters are returned as the shortlisted labels, where $b(\ll C)$ is a hyperparameter denoting the beam-size.
|
| 34 |
+
|
| 35 |
+
**Label classifiers** $\mathbf{W}_L = [\mathbf{w}_\ell]_{\ell=1}^L$ where $\mathbf{w}_\ell \in \mathbb{R}^D$ represents the classifier vector for label $\ell$ and $\mathbf{w}_\ell^T \phi(\mathbf{x})$ represents the *label relevance score* of label $\ell$ for input $\mathbf{x}$ . As explained above, $\mathbf{w}_\ell^T \phi(\mathbf{x})$ is only computed for a few shortlisted labels obtained from the search index $\mathcal{I}$ .
|
| 36 |
+
|
| 37 |
+
ELIAS formulates its label index as a specialized weighted graph between a root node $\emptyset$ , C cluster nodes $\mathcal{C} = \{c\}_{c=1}^C$ and L label nodes $\mathcal{Y} = \{\ell\}_{\ell=1}^L$ . As illustrated in Figure 2, all cluster nodes are connected to the root node and all label nodes are sparsely connected to few cluster nodes. ELIAS parameterizes the cluster-to-label edge assignments by a learnable adjacency matrix $\mathbf{A} = [a_{c,\ell}]_{C \times L}$ , where the scalar parameter $a_{c,\ell}$ denotes the edge importance between cluster c and label $\ell$ .
|
| 38 |
+
|
| 39 |
+
Note that $\mathbf{A}$ can be very large for XMC datasets and using a dense $\mathbf{A}$ will incur $\mathcal{O}(CL)$ cost in each forward pass which can be computationally prohibitive for large-scale datasets. To mitigate this we restrict $\mathbf{A}$ to be a row-wise sparse matrix i.e. $\|\mathbf{a}_i\|_0 \le \kappa$ where $\|.\|_0$ represents the $\ell_0$ norm, $\mathbf{a}_i$ represents the $i^{th}$ row of $\mathbf{A}$ and $\kappa$ is a hyper-parameter which controls the sparsity of $\mathbf{A}$ . During training, only the non-zero entries of $\mathbf{A}$ is learned and the zero entries do not participate in any calculation. We defer the details of how we initialize the sparsity structure of $\mathbf{A}$ to Section 3.4.
|
| 40 |
+
|
| 41 |
+
Existing partition based XMC methods can be thought of as a special case of this formulation by adding additional restrictions that 1) each label is connected to exactly one cluster node, and 2) all cluster-to-label connections have equal importance. Moreover, existing methods initialize the cluster-to-label adjacency matrix A beforehand based on clustering over pre-defined features and keep it fixed throughout the training procedure. ELIAS overcomes these shortcomings by enabling the model to learn the cluster-to-label edge importance.
|
| 42 |
+
|
| 43 |
+
ELIAS trains the entire model, including the deep encoder $\phi$ , the search index parameters $\hat{\mathbf{W}}_C$ , $\mathbf{A}$ and the label classifiers $\mathbf{W}_L$ in an end-to-end manner. We now describe the details of the forward pass of ELIAS.
|
| 44 |
+
|
| 45 |
+
**Text representation**: An input $\mathbf{x}$ is embedded by the encoder $\phi$ into a dense vector representation $\phi(\mathbf{x})$ . In particular, we use BERT-base [11] as the encoder and represent $\phi(\mathbf{x})$ by the final layer's CLS token vector.
|
| 46 |
+
|
| 47 |
+
Query search index: Recall that the goal of the search index $\mathcal{I}$ is to efficiently compute a shortlist of labels $\hat{\mathbf{y}} \in \mathbb{R}^L$ , where $\hat{\mathbf{y}}$ is a sparse real valued vector with $K (\ll L)$ non-zero entries and $\hat{y}_\ell \neq 0$ implies that label $\ell$ is shortlisted for input $\mathbf{x}$ with shortlist score $\hat{y}_\ell$ . Similar to existing methods, ELIAS achieves this by first shortlisting a small subset of clusters $\hat{\mathcal{C}} \subset \mathcal{C}$ based on cluster relevance scores defined by $\hat{\mathbf{w}}_c^T \phi(\mathbf{x})$ . But unlike existing methods which simply return the union of the fixed label set assigned to each shortlisted cluster, ELIAS shortlists the top-K labels based on the soft cluster-to-label assignments and backpropagates the loss feedback to each of the shortlisted paths. More specifically, ELIAS defines the cluster relevance scores $\hat{\mathbf{s}}_{\mathcal{C}} \in \mathbb{R}^C$ as:
|
| 48 |
+
|
| 49 |
+
$$\hat{\mathbf{s}}_{\mathcal{C}} = [\hat{s}_{c}]_{c=1}^{C} = \min(1, \alpha * \operatorname{softmax}(\hat{\mathbf{W}}_{C}^{T} \phi(\mathbf{x}))). \tag{1}$$
|
| 50 |
+
|
| 51 |
+
Here hyperparameter $\alpha$ is multiplied by the softmax scores to allow multiple clusters to get high relevance scores. Intuitively, $\alpha$ controls how many effective clusters can simultaneously activate for a given input (in practice, we keep $\alpha \approx 10$ ).
|
| 52 |
+
|
| 53 |
+
Given cluster relevance scores $\hat{\mathbf{s}}_{\mathcal{C}}$ , we define set $\mathcal{C}_{topb}$ as the top b clusters with the highest cluster relevance scores, where b ( $\ll$ C) is the beam size hyperparameter. In the training phase, we further define a parent set $\mathcal{C}_{parent}$ to guarantee that the correct labels of $\mathbf{x}$ are present in the shortlist. More specifically, for each positive label of $\mathbf{x}$ , we include the cluster with the strongest edge connection to l in $\mathcal{C}_{parent}$ . The shortlisted set $\hat{\mathcal{C}}$ is defined as the union of these two sets and the selection process can be summarized as follows:
|
| 54 |
+
|
| 55 |
+
$$C_{topb} = \arg \operatorname{top-}b(\hat{\mathbf{s}}_{\mathcal{C}}), \text{ where } b(\ll C) \text{ is the beam size,}$$
|
| 56 |
+
(2)
|
| 57 |
+
|
| 58 |
+
$$C_{parent} = \begin{cases} \{ \} & \text{during prediction,} \\ \bigcup_{\ell: y_{\ell} = 1} \{ \arg \max_{c} (a_{c,\ell}) \} & \text{during training} \end{cases}$$
|
| 59 |
+
(3)
|
| 60 |
+
|
| 61 |
+
$$\hat{\mathcal{C}} = \mathcal{C}_{topb} \cup \mathcal{C}_{parent.} \tag{4}$$
|
| 62 |
+
|
| 63 |
+
After shortlisting a small subset of clusters $\hat{\mathcal{C}}$ , all potential edges of shortlisted clusters are explored and a set $\hat{\mathcal{P}}$ of explored paths is constructed, where $\hat{\mathcal{P}} = \{\emptyset \to c \to \ell : c \in \hat{\mathcal{C}} \text{ and } a_{c,\ell} > 0\}$ . Furthermore, each path $\emptyset \to c \to \ell \in \hat{\mathcal{P}}$ is assigned a path score $\hat{s}_{\emptyset,c,\ell}$ , where the path score $\hat{s}_{\emptyset,c,\ell}$ is expressed as the product of cluster relevance score $\hat{s}_c$ (defined by Eqn. 1) and edge score $\hat{s}_{c,\ell}$ which quantifies the probability of label $\ell$ getting assigned to cluster c and is defined in terms of the learnable edge weight parameter $a_{c,\ell}$ as follows:
|
| 64 |
+
|
| 65 |
+
$$\hat{s}_{c,\ell} = \min(1, \beta * a_{c,\ell}^{norm}), \text{ where } a_{c,\ell}^{norm} = \frac{\exp(a_{c,\ell})}{\sum_{\ell'=1}^{L} \exp(a_{c,\ell'})}, \text{ and } \hat{s}_{\emptyset,c,\ell} = \hat{s}_c * \hat{s}_{c,\ell}.$$
|
| 66 |
+
(5)
|
| 67 |
+
|
| 68 |
+
Defining edge scores $\hat{s}_{c,\ell}$ in such a manner allows modelling the desired probability distribution of label assignment to a cluster, where a few relevant labels are assigned to a particular cluster with probability 1, and all other labels have probability 0. Hyperparameter $\beta$ controls how many effective labels can get assigned to a cluster, we choose $\beta \approx L/C$ . Figure 3 empirically confirms that the trained model indeed learns the desired edge score distribution with most of the probability concentrated on a few labels and the rest of the labels getting assigned low probability. Moreover, this formulation also prevents labels with high softmax scores from overpowering edge assignments because as per 5, a relevant label $\ell$ for cluster c gets positive feedback for $a_{c,\ell}$ only if $a_{c,\ell}^{norm} < 1/\beta$ ,
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
|
| 72 |
+
Figure 3: $a_{c,\ell}^{norm}$ (edge weight) distribution averaged over all clusters of trained ELIAS model on Amazon-670K dataset
|
| 73 |
+
|
| 74 |
+
otherwise $a_{c,\ell}$ does not participate in the calculation of $\hat{s}_{c,\ell}$ . This allows clusters to learn balanced label assignments. Note that, because of the assumption that **A** is a row-wise sparse matrix, Eqn. 5 can be computed efficiently in $\mathcal{O}(\kappa)$ instead of $\mathcal{O}(L)$ time.
|
| 75 |
+
|
| 76 |
+
Since there can be multiple paths in $\hat{\mathcal{P}}$ which reach a particular label $\ell$ , ELIAS defines shortlist score $\hat{y}_{\ell}$ for label $\ell$ by the maximum scoring path in $\hat{\mathcal{P}}$ that reaches $\ell$ , i.e.
|
| 77 |
+
|
| 78 |
+
$$\hat{y}_{\ell} = \max_{c'} \{ \hat{s}_{\emptyset, c', \ell} : \emptyset \to c' \to \ell \in \hat{\mathcal{P}} \}. \tag{6}$$
|
| 79 |
+
|
| 80 |
+
Finally, only the top-K entries in $\hat{y}$ are retained and the resulting vector is returned as the shortlist for input x.
|
| 81 |
+
|
| 82 |
+
Evaluating label classifiers: label classifiers $[\mathbf{w}_\ell]_{\ell=1}^L$ are evaluated for the K non-zero labels in $\hat{\mathbf{y}}$ and the final relevance score between label $\ell$ and input $\mathbf{x}$ is returned as $p_\ell = \sigma(\mathbf{w}_\ell^T \phi(\mathbf{x})) * \hat{y}_\ell$ , here $\sigma$ is the sigmoid function.
|
| 83 |
+
|
| 84 |
+
ELIAS is trained on a combination of classification and shortlist loss where the shortlist loss encourages correct labels to have high shortlist scores $(\hat{y}_\ell)$ and classification loss encourages positive labels in the shortlist to have high final score $(p_\ell)$ and negative labels in the shortlist to have low final score. More specifically, the final loss $\mathcal{L}$ is defined as $\mathcal{L} = \mathcal{L}_c + \lambda \mathcal{L}_s$ , where $\lambda$ is a hyperparameter and classification loss $\mathcal{L}_c$ is defined as binary cross entropy loss over shortlisted labels
|
| 85 |
+
|
| 86 |
+
$$\mathcal{L}_c = -\sum_{\ell: \hat{y}_{\ell} \neq 0} (y_{\ell} \log(p_{\ell}) + (1 - y_{\ell})(1 - \log(p_{\ell}))), \tag{7}$$
|
| 87 |
+
|
| 88 |
+
shortlist loss $\mathcal{L}_s$ is defined as negative log likelihood loss over the positive labels
|
| 89 |
+
|
| 90 |
+
$$\mathcal{L}_s = -\sum_{\ell: y_\ell = 1} \log(\hat{y}_\ell). \tag{8}$$
|
| 91 |
+
|
| 92 |
+
Previous sub-sections described the ELIAS framework for learning the index graph along with the ML model in an end-to-end manner. Although, in principle one can optimize the network with the given loss function from a random initialization but we highlight a few key challenges in doing so: 1) Optimization challenge: because of the flexibility in the network to assign a label node to various clusters, it becomes hard for a label to get confidently assigned to only a few relevant clusters. As a result, the model is always chasing a moving target and for a given input it is not able to be sure about any single path; 2) Computational challenge: the full cluster-label adjacency matrix $\bf A$ can be very large for large datasets and will incur $\mathcal{O}(CL)$ cost in each forward pass if implemented in dense form. To address these challenges we train the ELIAS model in two stages. In the first stage, we only train the encoder $\phi$ , cluster classifiers $\hat{\bf W}_C$ , and label classifiers ${\bf W}_L$ keeping A fixed and assigned based on traditional balanced partitions. We then utilize the stage-1 trained model to initialize the sparse adjacency matrix $\bf A$ . In the second stage, we take the initialized $\bf A$ and rest of the stage 1 model, and jointly train the full model $\phi$ , $\hat{\bf W}_C$ , ${\bf W}_L$ , ${\bf A}$ .
|
| 93 |
+
|
| 94 |
+
Stage 1: In stage 1 training, similar to existing partition-based XMC methods, we partition the label space into C mutually exclusive clusters by performing balanced k-means clustering over pre-defined label features. The adjacency matrix induced by these clusters is then used as fixed assignment for $\mathbf{A}$ . Keeping $\mathbf{A}$ fixed, we train the rest of the model (i.e. $\phi, \hat{\mathbf{W}}_C, \mathbf{W}_L$ ) on the loss described in Section 3.3. More details on clustering are provided in Section C.1 in the Appendix.
|
| 95 |
+
|
| 96 |
+
Initializing A: As highlighted before, to overcome the $\mathcal{O}(CL)$ cost associated with a full adjacency matrix $\mathbf{A}$ , we want to restrict $\mathbf{A}$ to be a row-wise sparse matrix. In other words, we want to restrict each cluster to choose from a candidate subset of $\kappa$ labels instead of the whole label set. Intuitively, in order for the model to learn anything meaningful, the candidate subset for each cluster should contain approximately similar labels. To achieve this, we utilize the stage 1 model to first generate an approximate adjacency matrix $\mathbf{A}'$ and then select the top- $\kappa$ entries in each row of $\mathbf{A}'$ as non-zero entries for $\mathbf{A}$ . More specifically, we first identify top-b matched clusters for each training point $\mathbf{x}^i$ by computing the cluster matching matrix $\mathbf{M} = [m_{i,c}]_{N \times C}$ as:
|
| 97 |
+
|
| 98 |
+
$$m_{i,c} = \begin{cases} \hat{\mathbf{s}}_c^i & \text{if } c \in \mathcal{C}_{topb}^i, \\ 0 & \text{otherwise} \end{cases}$$
|
| 99 |
+
(9)
|
| 100 |
+
|
| 101 |
+
where $\hat{\mathbf{s}}_c^i$ represents the cluster relevance score and $\mathcal{C}_{topb}^i$ represents the set of top-b clusters for $i^{th}$ training point $\mathbf{x}^i$ . After computing $\mathbf{M}$ , we define the approximate adjacency matrix $\mathbf{A}' = [a'_{c,\ell}]_{C \times L} = \mathbf{M}^T \mathbf{Y}$ , where $\mathbf{Y} = [\mathbf{y}^1, ..., \mathbf{y}^i, ..., \mathbf{y}^N]^T$ . The element $a'_{c,\ell}$ essentially denotes the weighted count of how many times the cluster c got placed in top-b positions for positive training points of label $\ell$ . Finally, the top $\kappa$ elements in each row of $\mathbf{A}'$ are selected as the non-zero parameters of $\mathbf{A}$ , i.e.
|
| 102 |
+
|
| 103 |
+
$$a_{c,\ell} = \begin{cases} \text{random(0, 1)} & \text{if } \ell \in \arg \text{top-}\kappa(\mathbf{a}_c') \\ 0 & \text{otherwise} \end{cases}$$
|
| 104 |
+
(10)
|
| 105 |
+
|
| 106 |
+
We choose a large enough $\kappa$ to provide the model enough degree of freedom to learn cluster-to-label assignments. In particular, $\kappa \sim 10 \times L/C$ works well across datasets without adding any computational burden. For efficient implementation on GPUs, we store matrix ${\bf A}$ in the form of two tensors, one storing the non-zero indices and the other storing the values corresponding to those non-zero indices.
|
| 107 |
+
|
| 108 |
+
Stage 2: In stage 2 training, we initialize $\mathbf{A}$ as described above, and $\phi$ , $\hat{\mathbf{W}}_C$ from stage 1 model. We then train the full ELIAS model (i.e. $\phi$ , $\hat{\mathbf{W}}_C$ , $\mathbf{W}_L$ , $\mathbf{A}$ ) end-to-end to optimize the loss defined in Section 3.3
|
| 109 |
+
|
| 110 |
+
State-of-the-art XMC methods like XR-Transformer [33] and X-Transformer [7] utilize high capacity sparse classifiers learned on the concatenated sparse bag-of-word features and dense embedding obtained from the deep encoder for ranking their top predictions. Because of the high capacity, sparse classifiers are able to represent head labels more elaborately than dense classifiers. Moreover, bag-of-words representation is able to capture the full input document instead of the truncated document that the deep encoder receives.
|
| 111 |
+
|
| 112 |
+
To compare fairly with such methods, we explore an enhanced variant of ELIAS represented by ELIAS ++, which additionally learns a sparse ranker that re-ranks the top 100 predictions of ELIAS. In particular, the sparse ranker takes the concatenated sparse bag-of-word and dense embedding input features and learns sparse linear classifiers on the top 100 label predictions made from the trained ELIAS model for each training point. Because these sparse classifiers are only trained on 100 labels per training point, they can be quickly trained by parallel linear solvers like LIBLINEAR [12]. We use the open-source PECOS<sup>2</sup> [32] library to train and make predictions with the sparse ranker.
|
| 113 |
+
|
| 114 |
+
During prediction, the top 100 predictions are first made by ELIAS and then the learned sparse ranker is evaluated on these top 100 predictions. We empirically observe that the scores returned by ELIAS and sparse ranker are not well calibrated across different label regimes. As shown in Figure 4, the sparse
|
| 115 |
+
|
| 116 |
+

|
| 117 |
+
|
| 118 |
+
Figure 4: True label's score distribution of sparse ranker and ELIAS $^{(d)}$ over different label deciles on Amazon-670K dataset. $1^{st}$ decile represents labels with most training points while $10^{th}$ decile represents labels with least training points
|
| 119 |
+
|
| 120 |
+
ranker underestimates scores on tail labels while ELIAS scores are more balanced across all label regimes. To correct this score mis-calibration, we learn a simple score calibration module which consists of a standard decision tree classifier<sup>3</sup> that takes both of these scores and the training frequency of the label as input and predicts a single score denoting the label relevance. The score calibration module is learned on a small validation set of 5,000 points. More details on the sparse ranker are in Appendix Section C.2.
|
| 121 |
+
|
| 122 |
+
The time complexity for processing a batch of n data-points is $\mathcal{O}(n(T_{\text{bert}} + Cd + b\kappa + Kd))$ where $T_{\text{bert}}$ represents the time complexity of the bert encoder, C represents the number of clusters in index,
|
| 123 |
+
|
| 124 |
+
<sup>&</sup>lt;sup>2</sup>https://github.com/amzn/pecos
|
| 125 |
+
|
| 126 |
+
<sup>&</sup>lt;sup>3</sup>https://scikit-learn.org/stable/modules/generated/sklearn.tree. DecisionTreeClassifier.html
|
| 127 |
+
|
| 128 |
+
Table 1: Performance comparison on extreme classification benchmark datasets. Bold numbers represent overall best numbers for that dataset while underlined numbers represent best numbers for dense embedding based methods. Methods which only use sparse bag-of-word features are distinguished by $^{(s)}$ superscript, dense embedding based methods are distinguished by $^{(d)}$ superscript and methods that use both sparse + dense features are distinguished by $^{(s+d)}$ superscript
|
| 129 |
+
|
| 130 |
+
| Method | P@1 | P@3 | P@5 | PSP@1 | PSP@3 | PSP@5 | P@1 | P@3 | P@5 | PSP@1 | PSP@3 | PSP@5 |
|
| 131 |
+
|------------------------------|-------|-------|-------|------------|-------|-------|-------|-------|---------|-------------|-------|-------|
|
| 132 |
+
| | | | Am | azon-670K | | | | ] | LF-Amaz | zonTitles-1 | 31K | |
|
| 133 |
+
| DiSMEC <sup>(s)</sup> | 44.70 | 39.70 | 36.10 | 27.80 | 30.60 | 34.20 | 35.14 | 23.88 | 17.24 | 25.86 | 32.11 | 36.97 |
|
| 134 |
+
| $Parabel^{(s)}$ | 44.89 | 39.80 | 36.00 | 25.43 | 29.43 | 32.85 | 32.60 | 21.80 | 15.61 | 23.27 | 28.21 | 32.14 |
|
| 135 |
+
| XR-Linear(s) | 45.36 | 40.35 | 36.71 | - | - | - | - | - | - | - | - | - |
|
| 136 |
+
| Bonsai <sup>(s)</sup> | 45.58 | 40.39 | 36.60 | 27.08 | 30.79 | 34.11 | 34.11 | 23.06 | 16.63 | 24.75 | 30.35 | 34.86 |
|
| 137 |
+
| Slice <sup>(d)</sup> | 33.15 | 29.76 | 26.93 | 20.20 | 22.69 | 24.70 | 30.43 | 20.50 | 14.84 | 23.08 | 27.74 | 31.89 |
|
| 138 |
+
| Astec <sup>(d)</sup> | 47.77 | 42.79 | 39.10 | 32.13 | 35.14 | 37.82 | 37.12 | 25.20 | 18.24 | 29.22 | 34.64 | 39.49 |
|
| 139 |
+
| $GLaS^{(d)}$ | 46.38 | 42.09 | 38.56 | 38.94 | 39.72 | 41.24 | - | - | - | - | - | - |
|
| 140 |
+
| AttentionXML <sup>(d)</sup> | 47.58 | 42.61 | 38.92 | 30.29 | 33.85 | 37.13 | 32.55 | 21.70 | 15.64 | 23.97 | 28.60 | 32.57 |
|
| 141 |
+
| LightXML <sup>(d)</sup> | 49.10 | 43.83 | 39.85 | - | - | - | 38.49 | 26.02 | 18.77 | 28.09 | 34.65 | 39.82 |
|
| 142 |
+
| $XR$ -Transformer $^{(s+d)}$ | 50.11 | 44.56 | 40.64 | 36.16 | 38.39 | 40.99 | 38.42 | 25.66 | 18.34 | 29.14 | 34.98 | 39.66 |
|
| 143 |
+
| Overlap-XMC $^{(s+d)}$ | 50.70 | 45.40 | 41.55 | 36.39 | 39.15 | 41.96 | - | - | - | - | - | - |
|
| 144 |
+
| ELIAS (d) | 50.63 | 45.49 | 41.60 | 32.59 | 36.44 | 39.97 | 39.14 | 26.40 | 19.08 | 30.01 | 36.09 | 41.07 |
|
| 145 |
+
| ELIAS $++^{(s+d)}$ | 53.02 | 47.18 | 42.97 | 34.32 | 38.12 | 41.93 | 40.13 | 27.11 | 19.54 | 31.05 | 37.57 | 42.88 |
|
| 146 |
+
| | | | Wiki | pedia-5001 | K | | | | Am | azon-3M | | |
|
| 147 |
+
| DiSMEC <sup>(s)</sup> | 70.21 | 50.57 | 39.68 | 31.20 | 33.40 | 37.00 | 47.34 | 44.96 | 42.80 | - | - | - |
|
| 148 |
+
| $Parabel^{(s)}$ | 68.70 | 49.57 | 38.64 | 26.88 | 31.96 | 35.26 | 47.48 | 44.65 | 42.53 | 12.82 | 15.61 | 17.73 |
|
| 149 |
+
| XR-Linear(s) | 68.12 | 49.07 | 38.39 | - | - | - | 47.96 | 45.09 | 42.96 | - | - | - |
|
| 150 |
+
| Bonsai <sup>(s)</sup> | 69.20 | 49.80 | 38.80 | - | - | - | 48.45 | 45.65 | 43.49 | 13.79 | 16.71 | 18.87 |
|
| 151 |
+
| Slice <sup>(d)</sup> | 62.62 | 41.79 | 31.57 | 24.48 | 27.01 | 29.07 | - | - | - | - | - | - |
|
| 152 |
+
| Astec <sup>(d)</sup> | 73.02 | 52.02 | 40.53 | 30.69 | 36.48 | 40.38 | - | - | - | - | - | - |
|
| 153 |
+
| $GLaS^{(d)}$ | 69.91 | 49.08 | 38.35 | - | - | - | - | - | - | - | - | - |
|
| 154 |
+
| Attention $XML^{(d)}$ | 76.95 | 58.42 | 46.14 | 30.85 | 39.23 | 44.34 | 50.86 | 48.04 | 45.83 | 15.52 | 18.45 | 20.60 |
|
| 155 |
+
| $LightXML^{(d)}$ | 77.78 | 58.85 | 45.57 | - | - | - | - | - | - | - | - | - |
|
| 156 |
+
| $XR$ -Transformer $^{(s+d)}$ | 79.40 | 59.02 | 46.25 | 35.76 | 42.22 | 46.36 | 54.20 | 50.81 | 48.26 | 20.52 | 23.64 | 25.79 |
|
| 157 |
+
| Overlap-XMC <sup>(s+d)</sup> | - | - | - | - | - | - | 52.70 | 49.92 | 47.71 | 18.79 | 21.90 | 24.10 |
|
| 158 |
+
| ELIAS (d) | 79.00 | 60.37 | 46.87 | 33.86 | 42.99 | 47.29 | 51.72 | 48.99 | 46.89 | 16.05 | 19.39 | 21.81 |
|
| 159 |
+
| ELIAS ++(s+d) | 81.26 | 62.51 | 48.82 | 35.02 | 45.94 | 51.13 | 54.28 | 51.40 | 49.09 | 15.85 | 19.07 | 21.52 |
|
| 160 |
+
|
| 161 |
+
d is the embedding dimension, b is the beam size, $\kappa$ is the row-wise sparsity of cluster-to-label adjacency matrix A, and K is the number of labels shortlisted for classifier evaluation. Assuming $C = \mathcal{O}(\sqrt{L})$ , $\kappa = \mathcal{O}(L/C) = \mathcal{O}(\sqrt{L})$ and $K = \mathcal{O}(\sqrt{L})$ , the final time complexity comes out to be $\mathcal{O}(n(T_{\text{bert}} + \sqrt{L}(2d+b)))$ . Empirical prediction and training times on benchmark datasets are reported in Table 6 of the Appendix.
|
2211.03295/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2211.03295",
|
| 3 |
+
"month": "2022_11",
|
| 4 |
+
"year": 2024,
|
| 5 |
+
"conference": "ICLR",
|
| 6 |
+
"title": "MogaNet: Multi-order Gated Aggregation Network",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2211.03295",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.03295",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/tex_files_extracted/2211.03295",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.03295/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.03295/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.03295/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.03295/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2211.09394/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2211.09394",
|
| 3 |
+
"month": "2022_11",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "EMNLP",
|
| 6 |
+
"title": "ConNER: Consistency Training for Cross-lingual Named Entity Recognition",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2211.09394",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.09394",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/tex_files_extracted/2211.09394",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.09394/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.09394/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_11/main_diagram_database/2211.09394/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2211.09394/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2301.05434/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.05434/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Image enhancement and restoration have been a critical area of research using both traditional digital image processing techniques[@geman1984stochastic] [@besag1991bayesian], and the recent deep learning frameworks[@nah2017deep][@qin2020ffa][@zhang2017beyond]. The goal of image restoration is to recover a clear image, whereas image enhancement is to improve the quality of the degraded image. In this study, we perform recovery of the clear image from the hazy version while performing low-light image enhancement using a single convolutional network, which could further be applied to tasks such as search and rescue operations using object detection.
|
| 4 |
+
|
| 5 |
+
Using deep learning algorithms for image recovery has many benefits, the most important being that it can generalize to different variations in the images captured. Hence, we observe that deep learning-based methods on most benchmark datasets often outperform traditional methods significantly. However, there are still challenges that the researchers have to tackle for image restoration. Publicly available datasets containing a variety of degrading factors that model real-world scenarios are few. Hence, most previous works have focused on removing one type of degradation with a specific intensity level. From the perspective of computational complexity, recent deep learning methods are computationally expensive, and thus they can't be deployed on edge devices. Moreover, image restoration has been a long-standing ill-posed research problem, as there are infinite mappings between the degraded and the clear image. Thus, the existing methods still have room for improvement in finding the correct mapping.
|
| 6 |
+
|
| 7 |
+
In this work, we focus on developing an end-to-end lightweight deep-learning solution for the image restoration task. Our major contributions are listed below:
|
| 8 |
+
|
| 9 |
+
- Taking inspiration from Non-linear Activation Free Network (NAFNet) [@chen2022simple] and Level Attention Module [@zhang2021benchmarking], we propose a novel algorithm - Low-Visibility Restoration Network (LVRNet), that can effectively recover high-quality images from degraded images taken in poor visual conditions (Figure [1](#fig:abstract){reference-type="ref" reference="fig:abstract"}).
|
| 10 |
+
|
| 11 |
+
- Due to the lack of available datasets that exhibit a combination of adverse effects, we generate a new dataset, namely LowVis-AFO (abbreviation for Low-Visibility Aerial Floating Objects dataset). We use AFO [@afo] as our ground truth dataset and synthesize dark hazy images. The data generation process has been elaborated in Section [4.1](#dataset){reference-type="ref" reference="dataset"}.
|
| 12 |
+
|
| 13 |
+
- Benchmarking experiments have been provided on the LowVis-AFO dataset to help future researchers for quantitative comparison. Along with that, LVRNet surpasses the results obtained using previous image restoration techniques by a significant margin.
|
| 14 |
+
|
| 15 |
+
- We perform extensive ablation studies to analyze the importance of various loss functions existing in current image restoration research. These experiments are discussed in detail in Section [5](#res){reference-type="ref" reference="res"}.
|
| 16 |
+
|
| 17 |
+
<figure id="fig:arch" data-latex-placement="ht">
|
| 18 |
+
<embed src="figures/method2.pdf" style="width:80.0%;height:200pt" />
|
| 19 |
+
<figcaption><strong>Model architecture of the proposed LVRNet.</strong> Starting from the top-left: The input image is passed to the pre-processing convolution layers where feature maps are learned and passed to NAF Groups (here we have used 3 groups). The features extracted from each group are concatenated (or stacked) along the channel dimension and sent as input to the Level Attention Module (LAM). Finally, we pass LAM’s output to CNN layers for post-processing, adding the original image through residual connection and extracting the restored image at the bottom-left.</figcaption>
|
| 20 |
+
</figure>
|
| 21 |
+
|
| 22 |
+
# Method
|
| 23 |
+
|
| 24 |
+
In this section, we provide a detailed description of the overall architecture proposed and the individual components included in the network.
|
| 25 |
+
|
| 26 |
+
Like the group structure in [@qin2020ffa], each group in our network consists of a $K$ NAF Block [@chen2022simple] with a skip connection at the end as shown in Figure [3](#fig:naf){reference-type="ref" reference="fig:naf"}. The output of each group is concatenated, passed to the level attention module to find the weighted importance of the feature maps obtained, and post-processed using two convolutional layers. A long skip connection for global residual learning accompanies this.
|
| 27 |
+
|
| 28 |
+
To keep this work self-contained, we explain the NAF Block [@chen2022simple] in this subsection. NAF Block is the building block of Nonlinear Activation Free Network. Namely NAFNet [@chen2022simple]. To avoid over-complexity in the architecture, this block avoids using any activation functions like ReLU, GELU, Softmax, etc. hence keeping a check on the intra-block complexity of the network.
|
| 29 |
+
|
| 30 |
+
The input first passes through Layer Normalization as it can help stabilize the training process. This is followed by convolution operations and a Simple Gate (SG). SG is a variant of Gated Linear Units (GLU) [@dauphin2017language] as evident from the following equations [\[glu\]](#glu){reference-type="ref" reference="glu"} and [\[sg\]](#sg){reference-type="ref" reference="sg"}
|
| 31 |
+
|
| 32 |
+
$$\begin{equation}
|
| 33 |
+
\label{glu}
|
| 34 |
+
GLU(X, f, g, \sigma) = f(X) \odot \sigma(g(X))
|
| 35 |
+
\end{equation}$$
|
| 36 |
+
|
| 37 |
+
$$\begin{equation}
|
| 38 |
+
\label{sg}
|
| 39 |
+
SimpleGate(X, Y) = X \odot Y
|
| 40 |
+
\end{equation}$$
|
| 41 |
+
|
| 42 |
+
and a replacement for GELU[@hendrycks2016gaussian] activation function because of the similarity between GLU and GELU (Equation [\[gelu\]](#gelu){reference-type="ref" reference="gelu"}).
|
| 43 |
+
|
| 44 |
+
$$\begin{equation}
|
| 45 |
+
\label{gelu}
|
| 46 |
+
GELU(x) = x \phi (x)
|
| 47 |
+
\end{equation}$$
|
| 48 |
+
|
| 49 |
+
In Simple Gate, the feature maps are divided into two parts along the channel dimension and then multiplied as shown in Figure [4](#fig:sg){reference-type="ref" reference="fig:sg"}. Another novelty introduced in this block is Simplified Channel Attention (SCA). Channel Attention (CA) can be expressed as:
|
| 50 |
+
|
| 51 |
+
$$\begin{equation}
|
| 52 |
+
\label{ca}
|
| 53 |
+
CA(X) = X \otimes \sigma(W_2 max(0, W_1 pool(X)))
|
| 54 |
+
\end{equation}$$
|
| 55 |
+
|
| 56 |
+
where $X$ represents the feature map, $pool$ indicates the global average pooling operation,$\sigma$ is Sigmoid, $W1, W2$ are fully-connected layers and $\otimes$ is a channel-wise product operation. This can be taken as a special case of GLU from which we can derivate the equation for Simplified Channel Attention:
|
| 57 |
+
|
| 58 |
+
$$\begin{equation}
|
| 59 |
+
\label{sca}
|
| 60 |
+
SCA(X) = X \otimes W pool(X)
|
| 61 |
+
\end{equation}$$
|
| 62 |
+
|
| 63 |
+
Once we have extracted features from all the NAF Groups, we concatenate them and pass them through the Level Attention Module (LAM) [@zhang2021benchmarking]. This module learns attention weights for features obtained at different levels.
|
| 64 |
+
|
| 65 |
+
In LAM, each feature map is first reshaped to a 2D matrix of the size $K \times HWC$, where $K, H, W,$ and $C$ are the no. of NAF Groups, height, width, and no. of channels of the feature maps respectively. We find a correlation matrix of this 2D matrix by multiplying it with its transpose matrix. Finally, we multiply the 2D matrix with this correlation matrix and reshape it to $K \times H \times W \times C$ tensor. Inspired by residual learning, this tensor is substituted for residual and is added to the original concatenated feature maps. The resultant features are then reshaped to $H \times W \times KC$, passing through $1 \times 1$ convolution operation to get the $H \times W \times C$ feature map. This is passed through some post-processing convolutions to get the final enhanced output. We include its architecture diagram in the supplementary material for a better understanding.
|
| 66 |
+
|
| 67 |
+
![Simple Gate as represented by Equation [\[sg\]](#sg){reference-type="ref" reference="sg"} $\otimes$ denotes channel-wise multiplicaWere](figures/simplegate.pdf){#fig:sg width="40%"}
|
| 68 |
+
|
| 69 |
+
Four loss functions, namely, reconstruction loss, perceptual loss, edge loss [@edgeloss], and FFT loss[@fftloss], have been used to supervise the task of image restoration.
|
| 70 |
+
|
| 71 |
+
The total loss L is defined in Equation [\[tloss\]](#tloss){reference-type="ref" reference="tloss"}, where $\lambda_1 = 0.04$, $\lambda_2 = 1$ and $\lambda_3 = 0.01$.
|
| 72 |
+
|
| 73 |
+
$$\begin{equation}
|
| 74 |
+
\label{tloss}
|
| 75 |
+
L = L_s + \lambda_1 L_p + \lambda_2 L_e + \lambda_3 L_f
|
| 76 |
+
\end{equation}$$
|
| 77 |
+
|
| 78 |
+
The restored clear output image is compared with its ground truth value in the spatial domain using a standard $l_1$ loss as demonstrated in Equation [\[L1loss\]](#L1loss){reference-type="ref" reference="L1loss"}. We use $l_1$ loss instead of $l_2$ loss as it does not over-penalize the errors and leads to better image restoration performance [@zhao2016loss].
|
| 79 |
+
|
| 80 |
+
$$\begin{equation}
|
| 81 |
+
\label{L1loss}
|
| 82 |
+
L_s = \frac{1}{N}\sum\limits_{i=1}^{n}\parallel x_{i}^{gt} - NAFNet(x_{i}^{dark, hazy})\parallel_{1}
|
| 83 |
+
\end{equation}$$
|
| 84 |
+
|
| 85 |
+
In the above equation, $x_{i}^{gt}$ refers to the ground truth clear image, and $NAFNet(x_{i}^{dark, hazy})$ denotes the output of our proposed NAFNet when a dark and hazy image is fed to the network.
|
| 86 |
+
|
| 87 |
+
To reduce the perceptual loss and improve the image's visual quality, we utilize the features of the pre-trained VGG-19 network [@vgg] obtained from the output of one of the ReLU activation layers. It is defined in Equation [\[ploss\]](#ploss){reference-type="ref" reference="ploss"}, where $w_{ij}$, $h_{ij}$, and $c_{ij}$ refer to the dimensions of the respective feature maps inside the VGG-19 architecture. $\phi_{ij}$ denotes the feature maps outputted from the jth convolutional layer inside the i-th block in the VGG network. $$\begin{equation}
|
| 88 |
+
\label{ploss}
|
| 89 |
+
L_p = \frac{1}{w_{ij}h_{ij}c_{ij}}\sum\limits_{x=1}^{w_{ij}}\sum\limits_{y=1}^{h_{ij}}\sum\limits_{z=1}^{c_{ij}}\parallel \phi_{ij}(I_{gt})_{xyz} - \phi_{ij}(I_{out})_{xyz} \parallel
|
| 90 |
+
\end{equation}$$
|
| 91 |
+
|
| 92 |
+
To recover the high-frequency details lost because of the inherent noise in dark and hazy images, we have an additional edge loss to constrain the high-frequency components between the ground truth and the recovered image.
|
| 93 |
+
|
| 94 |
+
$$\begin{equation}
|
| 95 |
+
\label{eloss}
|
| 96 |
+
L_e = \sqrt{(\nabla^{2}(I_{gt}) - \nabla^{2}(I_{out}))^2 + \epsilon^2}
|
| 97 |
+
\end{equation}$$
|
| 98 |
+
|
| 99 |
+
In Equation [\[eloss\]](#eloss){reference-type="ref" reference="eloss"}, $\nabla^2$ refers to the Laplacian operation [@laplacian], which is then applied to the ground truth and the predicted clean image to get the edge loss.
|
| 100 |
+
|
| 101 |
+
To supervise the haze-free results in the frequency domain, we add another loss called Fast Fourier transform (FFT) loss (denoted by $L_f$ in Equation [\[floss\]](#floss){reference-type="ref" reference="floss"}. It calculates the loss of both amplitude and phase using the $l_1$ loss function without additional inference cost.
|
| 102 |
+
|
| 103 |
+
$$\begin{equation}
|
| 104 |
+
A_{x_{i}^{gt}}, P_{x_{i}^{gt}} = FFT(x_{i}^{gt}),
|
| 105 |
+
\end{equation}$$
|
| 106 |
+
|
| 107 |
+
$$\begin{equation}
|
| 108 |
+
A_{x_{i}^{out}}, P_{x_{i}^{out}} = FFT(x_{i}^{out}),
|
| 109 |
+
\end{equation}$$
|
| 110 |
+
|
| 111 |
+
$$\begin{equation}
|
| 112 |
+
\label{floss}
|
| 113 |
+
L_f = \frac{1}{N}\sum\limits_{i=1}^{n}(\parallel A_{x_{i}^{gt}} - A_{x_{i}^{out}} \parallel_{1} + \parallel P_{x_{i}^{gt}} - P_{x_{i}^{out}} \parallel_{1})
|
| 114 |
+
\end{equation}$$
|
| 115 |
+
|
| 116 |
+
<figure id="fig:dataset-img" data-latex-placement="ht">
|
| 117 |
+
<embed src="figures/dataset.pdf" style="width:90.0%" />
|
| 118 |
+
<figcaption><strong>Visual illustration of a few sample images from our dataset.</strong> Columns 1 and 3 show original images taken from AFO Dataset <span class="citation" data-cites="afo"></span>, whereas Columns 2 and 4 show their corresponding images generated as explained in Section <a href="#dataset" data-reference-type="ref" data-reference="dataset">4.1</a> simulating low-visibility conditions. </figcaption>
|
| 119 |
+
</figure>
|
2303.16268/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-11-19T02:38:10.655Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" version="20.5.3" etag="2QTatTCxC-X-xz_r8KJR" type="device"><diagram id="Q28-paovCBosxBUSdCQy">7VzRkps2FP0aP64HEGD8uOtdpw/pTKZpp+1ThwXZpsEWxXLWm6+vMBIISWBsC4gT9iGBiyTgnnuupCPhCVhsjx9SP9n8ikIYTywjPE7A88SyZp5F/s0M77nBdozcsE6jMDeZpeFz9A1SIyt2iEK4rxTECMU4SqrGAO12MMAVm5+m6K1abIXi6l0Tf03vaJSGz4EfQ6nYn1GIN7nVc7jSv8BovWF3Ng16ZeuzwrSJ/cYP0Rt3L/AyAYsUIZwfbY8LGGe+Y37JG1rWXC0eLIU73KYCBeKrHx/ou9Hnwu/sZVN02IUwK29OwNPbJsLwc+IH2dU3gi6xbfA2ppdXURwvUIzSU12wWq2sICD2PU7RF8hdCd1X13HJFfoAMMXwWPsSZuEaElIQbSFO30kRVsGl7qXhZDLY3kpwbI/aNhwwzObTeFgXTZcuIwfUa2oPAslhMCTBQk9RijdojXZ+/FJan0qXGuSsLPMRoYQ68l+I8TuNfP+AUdXN8Bjhv7LqU4ee/c1deT7Slk8n7+xkR96Mq5SdFrWyk7La6YzVk6AzyN9yWUCXvW41ptEhDajJpuz00zVkWNqt8U1h7OPoa7X5W8CypXD/iAitiWkRR8mDKWF5S/CHPvRWyuB3Aw++rrIaaIcpyGZXZAAyGQqC8GRwNZDBGcnQQAZXQYbZcGRwm8mgtydoIMPCe3ladkOGIsMPQYbZSIYGMngKMsyHI4PXTAa5l7+/nsE2BiTDfCRDAxnYxIZnwwUAa2cDe546Otg/AB0ca0A6mPJQczA+GC35cDMbDKMlGywFGwacNbDnaZolk4l8kh1iEuzwG8qqPiUwjcjNYMrbP5XG8zQ5QqZ+ZOf7LxAHm7Me5tm1Q7s8cjBxCdoRy8PckEJpT++RRUUYpTCgZQkoGRN0sA0IIzHbdCS22QqyzXSQ7Tuao7clW7XzMa6hW+vORzFJtwacl5jyLP2O6WZ6zXzTwC3TcyrcshQ92UzBLVsHt4aZ8uscpBkGz1aeqwVz1WxtwS1FT8Z6E53UOlV9TFP/nSuQoGiH91zLnzJDGTZzYfzjClJxc3F73lxcnG0I5clB/rxllBUv3i7w3DGpNyX1mSKpDzi/Zs8zJvW2AyZjwKTujdxq4tZc5hYYcrY+H7l1mRJmDcctpiiM3FIPmNh6Ps8tMBy3WBsjt9rKavaA3Goh1FwkVDZ7vdjDULfrwd8n+faTE5qaxgWCbAlm7YQU05vargYXg5/Axe6wLm6hf9y7i8XZYd8udn4CF9vDuvg7mp+3XfGrjHKmVjHqaT/QWSzyblhYrgoO6dcilPZkbIMfs22IxBDE/n4fBcy8jOK6RTIHeqF9eoaQVaY9PLHQem1VKZU40IEs1TpWWogDGulYrirWrEN2QUdLyHiFntYTHa+Z0lfCdhBuStgtl4D8KSjWklXcpMa6aFajg3YK3aAL2l2lBgOxu7AtPr7k8qCx/M36rtVC1Lj3nCCO5XvOCeAaKWL4nKBNl7g9l9ycEoBC7vhuUoItzoPsWXNKkFLIbKIzJYAWWsy9pwRx1N53SpA9emcpYXb/OQH0mBMkEjvCRh3Abs2ayB+V1mrKBpY3BYZjFH9utd0ZuWyVV9lyF7tN/vbSbS7NYdLrOGeGNU5j+dtzWL/62SA5zHGHzWEq/cyNMSV1xdfufwfELjzkSvgjKWC6yfHkC3adHK2z/3+H2wSlfhxnr/vHLkEoJl6zjCX08SGFe3Yj8oz5vfJqEsTEuVjAsQIdzR0KOd+Po3Wm5QcEjNNCQgZVFPjxI72wjcLwlJZVgSNI/3zsaADenFe1/0Ll47fUuo6MvOVOHVsD8ipZrwvkP424V3A3hTUft2TyG/+BrIy8a+pBXv6YSRvy1YZy24cYvWbb4fMrrymzZ7vja2NBLHzHQZM9b/3u7puCaQ4qsWSyFScukFxF5wE0rB8ClQ6pLYpImNurf57zgzvGXgPGkhgK7KkCZ9UgAWhKGPIHXyPUXUAtaVz9Q61SEkeo9UMtaRe9Q82aHqHuGGppitc/1CoxcIS6gwQufrnVP9SWAmrB92vihKT2bekPAfmvrLhxqRccU9CFZqqprUrUsDQMTO1+VaNyO6EgggrD/kwsPW3sVIR0q8jLka11+oPg9Ac57EzVXkI9Pm+xDYtTe0tJmHOzwMzajbFt/VxR2vOnkaTkNv7lHOgo/Mds10rJxRfVhP6cwmtVG2grJAuK7JlWa3TjK6Rau0M1if+avr/uJYYr3HfncobijrwpTSUTaRIIbZVMpJ/TF+XO++K0bU0Nj2OfAMqVnD7TqkZOd6jwME1wJLUjyMGeMmP3RHmV0KOb8jXbPn8Myrve1OUXaoUluispf6ZVjZTvUP55xDj1AzySvcTVcZSpnP/6pzuyOyr9Ry/ZazeI/Bhkn8+66N/PtKqP7E6HqtBvMIHxSPUSVU89E+MX6rRRnZyWv4qcB0b509Lg5X8=</diagram></mxfile>
|
2303.16268/main_diagram/main_diagram.pdf
ADDED
|
Binary file (17.4 kB). View file
|
|
|