Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2003.12039/main_diagram/main_diagram.drawio +1 -0
- 2003.12039/paper_text/intro_method.md +27 -0
- 2012.15115/main_diagram/main_diagram.drawio +1 -0
- 2012.15115/main_diagram/main_diagram.pdf +0 -0
- 2012.15115/paper_text/intro_method.md +95 -0
- 2103.03027/main_diagram/main_diagram.drawio +0 -0
- 2103.03027/paper_text/intro_method.md +52 -0
- 2106.07175/main_diagram/main_diagram.drawio +1 -0
- 2106.07175/paper_text/intro_method.md +106 -0
- 2110.03888/main_diagram/main_diagram.drawio +1 -0
- 2110.03888/main_diagram/main_diagram.pdf +0 -0
- 2110.03888/paper_text/intro_method.md +46 -0
- 2110.08387/main_diagram/main_diagram.drawio +0 -0
- 2110.08387/main_diagram/main_diagram.pdf +0 -0
- 2110.08387/paper_text/intro_method.md +122 -0
- 2110.10668/main_diagram/main_diagram.drawio +0 -0
- 2110.10668/paper_text/intro_method.md +17 -0
- 2112.08078/main_diagram/main_diagram.drawio +1 -0
- 2112.08078/main_diagram/main_diagram.pdf +0 -0
- 2112.08078/paper_text/intro_method.md +121 -0
- 2203.14250/main_diagram/main_diagram.drawio +1 -0
- 2203.14250/paper_text/intro_method.md +72 -0
- 2204.11752/main_diagram/main_diagram.drawio +1 -0
- 2204.11752/main_diagram/main_diagram.pdf +0 -0
- 2204.11752/paper_text/intro_method.md +161 -0
- 2204.14017/main_diagram/main_diagram.drawio +1 -0
- 2204.14017/main_diagram/main_diagram.pdf +0 -0
- 2204.14017/paper_text/intro_method.md +89 -0
- 2208.09215/main_diagram/main_diagram.drawio +0 -0
- 2208.09215/main_diagram/main_diagram.pdf +0 -0
- 2208.09215/paper_text/intro_method.md +182 -0
- 2210.01953/main_diagram/main_diagram.drawio +0 -0
- 2210.01953/paper_text/intro_method.md +86 -0
- 2212.04755/main_diagram/main_diagram.drawio +530 -0
- 2212.04755/main_diagram/main_diagram.pdf +0 -0
- 2212.04755/paper_text/intro_method.md +84 -0
- 2302.01520/main_diagram/main_diagram.drawio +1 -0
- 2302.01520/main_diagram/main_diagram.pdf +0 -0
- 2302.01520/paper_text/intro_method.md +133 -0
- 2303.10971/main_diagram/main_diagram.drawio +0 -0
- 2303.10971/paper_text/intro_method.md +102 -0
- 2303.15027/main_diagram/main_diagram.drawio +1 -0
- 2303.15027/main_diagram/main_diagram.pdf +0 -0
- 2303.15027/paper_text/intro_method.md +0 -0
- 2304.06976/main_diagram/main_diagram.drawio +0 -0
- 2304.06976/paper_text/intro_method.md +120 -0
- 2305.11553/main_diagram/main_diagram.drawio +1 -0
- 2305.11553/main_diagram/main_diagram.pdf +0 -0
- 2305.11553/paper_text/intro_method.md +79 -0
- 2305.15925/main_diagram/main_diagram.drawio +52 -0
2003.12039/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-07-16T18:09:12.514Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" etag="s3gAV618SbvgIFHs5AKT" version="13.4.3" type="device"><diagram id="BlZfnAgwjy0eONEpCxrb" name="Page-1">7VZdj9owEPw1PILyAfTuEQK0lXrS6ei1Ul9OvsSJLeys6yyE66/vOnEIFE5qpVJVannxenbtdWbGIYM40fu3lhlxBxlXgyjI9oN4MYii8CYKaXDIS4vc3k5aoLAy80U9sJbfuAcDj25lxquTQgRQKM0pmEJZ8hRPMGYt1KdlOajTroYV/AxYp0ydo59lhqJFbyZBj7/jshBd5zDwGc26Yg9UgmVQH0HxchAnFgDbSO8Trhx5HS/tutUr2cPBLC/xZxYME4HJ3VJsFo/i/sv4Pcze4NDvsmNq6x/YHxZfOgbo3MaFUjdUzXfcoiSCPrBnru6hkiihpPwzIIKmAuUSc5ZuCgvbMktAgW22ivPmd7THTMnCrUUwhLLKtBLmcs/p1POm5axDgw6hWCDSipl72mhVyRK5GslqhFs+0qYYZZzgrVHAsooipgqwEoV+osaVOy5hiCwV2pEXraJxOKXhYbb6+PTwKaHQ+W8VODpzBfXIlAV192TR4fn+VRXCg7Z0KThojvaFSvyCSezt4O/D2E/r3lyTzjHiyFi9qb2hi8PWveYUeNl/wQLRfws0FqBL7S1wRfnDv039+IL6U0Vt5zk4Xo5sMP26hS4xrJo3NbEfRGOz75MUFW6cSyVLziwVPJqKaUPTotva0drs3tae+Y34dbhATc+yCCms0MKGdz4qoXQ2zKVSP0DMuyklPbi9YDMts8y1mddCIl8blrqeNf15EdaY1dmuMdrvkDs61Ts81zsOLukdXEnu8ZXkTqDcEUv/tNjT8R8Tm6b9h0OTO/r8ipffAQ==</diagram></mxfile>
|
2003.12039/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
<figure id="fig:A1" data-latex-placement="h">
|
| 4 |
+
<embed src="figures/Architecture.pdf" />
|
| 5 |
+
<figcaption>Network architecture details for the full 4.8M parameter model (5.3M with upsampling module) and the small 1.0M parameter model. The context and feature encoders have the same architecture, the only difference is that the feature encoder uses instance normalization while the context encoder uses batch normalization. In RAFT-S, we replace the residual units with bottleneck residual units. The update block takes in context features, correlation features, and flow features to update the latent hidden state. The updated hidden status is used to predict the flow update. The full model uses two convolutional GRU update blocks with 1x5 filters and 5x1 filters respectively, while the small model uses a single GRU with 3x3 filters.</figcaption>
|
| 6 |
+
</figure>
|
| 7 |
+
|
| 8 |
+
<figure id="fig:Convergence" data-latex-placement="h!">
|
| 9 |
+
<embed src="figures/UpsampleModule.pdf" />
|
| 10 |
+
<figcaption>Illistration of the upsampling module. Each pixel of the high resolution flow field (small boxes) is taken to be the convex combination of its 9 coarse resolution neighbors using weights predicted by the network.</figcaption>
|
| 11 |
+
</figure>
|
| 12 |
+
|
| 13 |
+
<figure id="fig:Upsampling" data-latex-placement="h!">
|
| 14 |
+
<img src="figures/UpsamplingFigure.png" />
|
| 15 |
+
<figcaption>Our upsampling module improves accuracy near motion boundaries, and also allows RAFT to recover the flow of small fast moving objects such as the birds shown in the figure.</figcaption>
|
| 16 |
+
</figure>
|
| 17 |
+
|
| 18 |
+
**Photometric Augmentation:** We perform photometric augmentation by randomly perturbing brightness, contrast, saturation, and hue. We use the Torchvision `ColorJitter` with brightness 0.4, contrast 0.4, saturation 0.4, and hue 0.5/$\pi$. On KITTI, we reduce the degree of augmentation to brightness 0.3, contrast 0.3, saturation 0.3, and hue 0.3/$\pi$. With probablity 0.2, color augmentation is performed to each of the images independently.
|
| 19 |
+
|
| 20 |
+
**Spatial Augmentation:** We perform spatial augmentation by randomly rescaling and stretching the images. The degree of random scaling depends on the dataset. For FlyingChairs, we perform spatial augmentation in the range $2^{[-0.2, 1.0]}$, FlyingThings $2^{[-0.4, 0.8]}$, Sintel $2^{[-0.2, 0.6]}$, and KITTI $2^{[-0.2, 0.4]}$. Spatial augmentation is performed with probability 0.8.
|
| 21 |
+
|
| 22 |
+
**Occlusion Augmentation:** Following HSM-Net [@hsm], we also randomly erase rectangular regions in $I_2$ with probability 0.5 to simulate occlusions.
|
| 23 |
+
|
| 24 |
+
<figure id="fig:Convergence" data-latex-placement="h">
|
| 25 |
+
<p><embed src="figures/updates_graph.pdf" /> <embed src="figures/convergence_graph.pdf" /></p>
|
| 26 |
+
<figcaption>(Left) EPE on the Sintel set as a function of the number of iterations at inference time. (Right) Magnitude of each update <span class="math inline">||<em>Δ</em><strong>f</strong><sub><em>k</em></sub>||<sub>2</sub></span> averaged over all pixels indicating convergence to a fixed point <span class="math inline"><strong>f</strong><sub><em>k</em></sub> → <strong>f</strong><sup>*</sup></span>.</figcaption>
|
| 27 |
+
</figure>
|
2012.15115/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-12-16T13:24:25.623Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" etag="zc3HofyPk4rho2DD2j9s" version="14.0.1" type="device"><diagram id="38cWNcsq3AymZE2LnnE_" name="Page-1">7V1Nd5s4FP01XiaHLwm8bPPRLppzOpPMNF1io9i0GFwsN/b8+hEGYUCKIQZLoJKThRECm/uuHvc9icfEvFntPsXuevkQeSiYGJq3m5i3E8PQLcOYJP+at09bbNNOGxax72Wdjg2P/n8oa9Sy1q3voU2pI46iAPvrcuM8CkM0x6U2N46j13K3lygof+vaXSCm4XHuBmzrN9/Dy7TVAdqx/TPyF0v6zbqW7Vm5tHPWsFm6XvRaaDLvJuZNHEU4/bTa3aAgAY/ikh53/8be/IfFKMRNDniIn8N/8Kt2/+2nsXv4DNBfe3Cla9Ps1+E9vWTkEQSyzSjGy2gRhW5wd2z9GEfb0EPJeTWydezzJYrWpFEnjT8QxvvMnO4WR6RpiVdBthftfPycHZ58/p58vgbZ1u2usOt2TzdCHO+fixvpUQag28fjDlv0QM/dLA8/NvnmDY6jn7kpjUOLG+MPCVVIQxiFiLbd+0GQn9Cr9CAthf2sLTLzbKJtPEcnDZANEPKNC4RP9syIlNin8CWZtT+haIXIZZMOMQpc7P8uM9jNBsIi73fkCvmQ0eU91KFDdKRO/6mjd02dw6Hkwtx9ocM68kO8KZz5a9JAOmS3Axs46Rmzm4FJDFByWTX9ATArtE1/wZHE+aW04bUhhddvcNTuAUfPGXMieD2dXoTWLA+hVuKhMTXLp0ivKTvqSFDR44P+rsuOj/Qrf7vBNsNzYkB3RVj+McAJufLNBc55UBhL5ZHyuvQxely7B0a8Eg1ZHhUvhIQ3URDFh2NNDyDHs3KeF/Y4xsyE8BTvfqMYo91JmtC9etncppltvx4VIG1aFsQfbev8VmswkD8t0RCBteqBhSKBNRlgb10/2A8QWkPrGbSA5yZSB7F2wxK88Nc2CYI+kkvFV27gL8KJ+YH0CNALPu4lnw4O5W63jtFmQ89Gflx6QjhUd2PAnpkOckxXFURBQCJwVI+ou1mnYfmLv0usIAfiHFIKscVCrHMg1i8Fsa0exKBnEDvqQWz3DOLppXz8tTre3Zz2TExSNnRvtXIckJiuGgqoYlTL6JtR2agsiecHiGz1NiIdWTb40oeIq9M3XNnYy98MEFjQt2SBbiknfKAJ+iV8dF50O3CMQd8wVi8MhXbfMOYFSaMWfJdR7d7dALhhmdIZegs6cjGn8YByKXoeskKTkQYb2aiSo5ePLRvbjEn6Zkl6+bZj46ehy6Nqlt6yWYyFyiNDvVCqmqaXj7F6oVQ1Ty8fY14oNSbqTyfq5WtK3iThGJy9y6jVRL18o7IRtxqJevnIsmGvCol66biabGirRKJePrDcJVJK52mgBiRjziohNfI0PGTFLqVk1YoqeRr52F4sTa98nka+7XjZ+IHHt5U8DdRZjIXGtxYrUQaPMegbxrznPgaOsd03jI1L+XmF8zTSNaXFy8SPeZpWeRr5RmVT/2rkaeQjy4a9KuRp5OPKhrZK5GmkAws4yidx2sNDtsHjl2KRZZ0s9nEwxHwMqD6bw8HWEoqtevOqsG9z10DFJap9w1i9RyVhdaZZOsbjMuDWRq0uA5Y+vQQ4iUclZIt0ZGn+uIAs2mB3FviHSjwDRLjq9DkI20IRVi/ZCC27hLH0ZCM01MMY9g1j9RbpQqdvGPMW6Y7ipZV4kZ5zgZzoVQnxIh9ZNmYN3ZUKKRfpU/06faZdTkVU+rlNRdS6YpPvrCY5D9zNxp9PigUl2TKV+ik+1JaLhFmcI6FaZLuByMZnf0czwn736ktyKSdGpP7uEfnizNF8zhuRMwckU0QXWe6XBw3FESnU29lNRC6Pr7yKw/mY+l4cUjXj6zopZ1oYY3rNCDtsfUWxTwBA8aSmmGot6WFT0hdMBDgWom0tK6nqtlViiFm1/BuVVHknutb16fGvdFodmte0iZ45hYo586Sjsqh2E6k/YKb1hEC2AbohEDnRNX0uip5L08RSpsnjhSNlWvucqpmN6XmUISe6NsD0LadjCPY4Oq30M8pNgXKTlnAZ3PsabDakVk1vWhrnoXyxerPJjPCAXXot6ynL+uL7K3ozn9noVm+ami5cbzaZFx8w03pCoKrePJtAqd7U8j+jfFrTEcueJtWRR/Z0LT0tOkHVsfS0TFsofXQ66TNKT5HSs2mm8wLve2qnitRPdQKLM/kgVHrS9eiquvR66dnvVGc+PdWt9CTKQbT0dJqszhkw03pCoKr0PJtAp6UnsIFY9hgjeyRIT6hZF5GewLaE0kendaEHKz2HJzwv9J7F974ecVp93aFz+vWI1f5mNgS4r0dkea9p/MNFMX3Kquaa+kAaWs2Q5/nhghkibRS15yLnhauo4dxBs5duFLVtV6zLWTygC32ESqfl0UdXI8zVXGDW5DxfA8tstIzSq1Xr+1snXsVa72vo4cLuqhovevxznA1v5kiws6EVEEZnI9DZdJ4nO9PZmGVNnS1bfdvZVPpnbD7T2dDDxTkbXgLhz3E2vFyhaGVjD8XZ9NJtGBdxG92FIG0jq7oXz9dEVs0v5LJvqqd2KviZmzhKyh1qHzAmdvejUAF/YuryI6Xp6E9a+JOmi2a69idNw4zW0RNsFT01v5AL+xN2CZ2K/sSy5AdDbEmwnvoThYIh5yJeqG0wBDXFgyF2KZaKTgU40oMewxidiiIZFtYL6BW6wQqNGk93nusQepX0OcM7wdsfj8F2fb/+9eV+Hzz9+/zqL67YRM1N4PorWvlgFtOiB0/xltCllZvqwOkQulWeQHXY0Mg5MenexulwAWQj0Cd3llQ4rAJ4s4z8ecuVcF0gqGv1CPKWLdCUQfcQsg9WDouDls7KaaEcZB8zHBaAALDSQSiADUppni3Isvu05x5qh3UG4rRSYzCvhFGUXzwQzeqNswGKZDOOktI5x1sPueblQ+ShpMf/</diagram></mxfile>
|
2012.15115/main_diagram/main_diagram.pdf
ADDED
|
Binary file (26.9 kB). View file
|
|
|
2012.15115/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Verifying whether a given fact coheres with a trusted body of knowledge is a fundamental problem in NLP, with important applications to automated fact checking [@vlachos-riedel-2014-fact] and other tasks in computational journalism [@cohen2011; @flew2012promise]. Despite extensive investigation of the problem under different conditions including entailment and natural language inference [@dagan2005pascal; @bowman-etal-2015-large] as well as claim verification [@vlachos-riedel-2014-fact; @alhindi2018your; @thorne-vlachos-2018-automated], relatively little attention has been devoted to the setting where the trusted body of evidence is structured in nature -- that is, where it consists of tabular or graph-structured data.
|
| 4 |
+
|
| 5 |
+
Recently, two datasets were introduced for claim verification over structured tables [@chen2019tabfact; @gupta-etal-2020-infotabs]. Common to these datasets is a conception of the task wherein each claim can be either verified or refuted given a single associated table. While highly useful for the development of models, this *closed* setting is not reflective of real-world fact checking tasks where it is usually not known which table to consult for evidence. Fact verification systems as such need to operate in an *open* setting, where evidence must first be retrieved from a large data source.
|
| 6 |
+
|
| 7 |
+
<figure id="fig:example_query" data-latex-placement="t">
|
| 8 |
+
<img src="media/table_example.png" style="width:40.0%" />
|
| 9 |
+
<figcaption>Example query to be evaluated against two retrieved tables. Named entities represent a strong baseline for retrieval, but ultimately a more complex model is required to distinguish highly similar tables.</figcaption>
|
| 10 |
+
</figure>
|
| 11 |
+
|
| 12 |
+
<figure id="fig:model_overview" data-latex-placement="t">
|
| 13 |
+
<img src="media/table_bert.png" style="width:90.0%" />
|
| 14 |
+
<figcaption>A diagram of our model, using the joint reranking- and verification approach described in Section <a href="#section:loss" data-reference-type="ref" data-reference="section:loss">4.1</a>.</figcaption>
|
| 15 |
+
</figure>
|
| 16 |
+
|
| 17 |
+
In this paper, we investigate fact verification over tables in the open setting. We take inspiration from similar work on unstructured data [@chen2017reading; @nie2019combining; @karpukhin2020dense; @lewis2020retrieval], proposing a two-step model which combines non-parametric, retrieval-based memory with a neural reader operating on top of retrieved tables. Drawing on preliminary work in open question answering over tables [@sun2016table], we perform retrieval based on simple heuristic modeling of individual table cells. We combine this simple heuristic with a RoBERTa-based [@liu2019roberta] joint reranking-and-verification model, performing fusion of evidence documents in the verification component as suggested for question answering by @izacard2020leveraging.
|
| 18 |
+
|
| 19 |
+
We evaluate our models using the recently introduced TabFact dataset [@chen2019tabfact]. While initially developed for the closed domain, the majority of the queries are sufficiently context-independent that the claims can be understood without knowing which table they were constructed with reference to. As such, the dataset is suitable for the open domain as well. Our models represent a first step into the open domain, achieving performance exceeding the previous closed-domain state of the art (outside of @eisenschlos-etal-2020-understanding, which includes pretraining on additional labeled data), with even larger gains when operating in the closed domain. We demonstrate significant improvements from including multiple tables, with verification accuracy increasing as more tables are used. We furthermore present results using a more realistic setting where tables are retrieved not just from the 16,573 tables present in TabFact, but from the full Wikipedia dump. Our contributions can be summarized as follows:
|
| 20 |
+
|
| 21 |
+
1. We introduce the first model for open-domain table fact verification, demonstrating strong performance exceeding the previous closed-setting state of the art.
|
| 22 |
+
|
| 23 |
+
2. We propose two strategies with corresponding loss functions for modeling table fact verification in the open setting, suitable respectively for high verification accuracy or high performance on identifying if appropriate information has been retrieved for verification.
|
| 24 |
+
|
| 25 |
+
3. In addition to our open-domain performance, our model achieves a new closed-domain state-of-the-art result.
|
| 26 |
+
|
| 27 |
+
4. We report the first results on Wikipedia-scale open-domain table fact verification, using all tables from a Wikipedia dump as the backend.
|
| 28 |
+
|
| 29 |
+
Formally, the open table fact verification problem can be described as follows. Given a claim $q$ and a collection of tables $T$, the task is to determine whether $q$ is true or false. This corresponds to modeling a binary verdict variable $v$ by $p(v|q,T)$. This is in contrast to the closed setting, where a single table $t_q \in T$ is given, and the task is to model $p(v|q,t_q)$. Since there are large available datasets for the closed setting [@chen2019tabfact; @gupta-etal-2020-infotabs], it is reasonable to expect to exploit $t_q$ during training; however, at test time, this information may not be available.
|
| 30 |
+
|
| 31 |
+
We follow a two-step methodology that is often adopted in open-domain setting for unstructured data [@chen2017reading; @nie2019combining; @karpukhin2020dense; @lewis2020retrieval] to our setting. Namely, given a claim query $q$, we retrieve a set of evidence tables $D_q \subset T$ (Section [3](#section:retrieval){reference-type="ref" reference="section:retrieval"}), and subsequently model $p(v|q,D_q)$ in place of $p(v|q,T)$ (Section [4](#section:model){reference-type="ref" reference="section:model"}).
|
| 32 |
+
|
| 33 |
+
We begin by designing a strategy for retrieving an appropriate table or subset of tables with which to answer a given query. For question answering over tables, @sun2016table demonstrated strong performance on initial retrieval of tables using entity linking information, following the intuition that many table cells contain entities. We take inspiration from these results. In their setting, claim entities are linked to Freebase entities, and string matching on the alias list is used to match entities to cells. To avoid reliance on a knowledge graph, we instead use only the textual string from the claim to represent entities, and perform approximate matching through dot products of bi- and tri-gram TF-IDF vectors.
|
| 34 |
+
|
| 35 |
+
We first pre-compute bi- and trigram TF-IDF vectors $z(c^1_t), ..., z(c^m_t)$ for every table $t \in T$ with cells $c_t^1, ..., c_t^m$. Then, we identify the named entities $e^1_q, ..., e^n_q$ within the query $q$. For our experiments, @chen2019tabfact provided named entity spans for TabFact as part of their LPA-model, and we reuse those[^2]. We compute bi- and trigram TF-IDF vectors $z(e^1_q), ..., z(e^n_q)$ for the surface forms of those entities. To retrieve $D_q$ given $q$, we then score every $t \in T$. Since we are approximating entity linking between claim entities and cells, we let the score between an entity and a table be the *best* match between that entity and any cell in the table. That is: $$\begin{equation}
|
| 36 |
+
score(q,t) = \sum\limits_{i = 1}^n \max\limits_{j = 1}^m z(e^i_q)^\intercal \cdot z(c^j_t)
|
| 37 |
+
\end{equation}$$ That is, we compute for every entity the best match in the table, and score the table as the sum over the best matches. To construct the set of evidence tables $D_q$, we then retrieve the top-$k$ highest scoring tables. Our choice to use bi- and tri-gram TF-IDF as the retrieval strategy was determined empirically -- see Section [5.1](#section:retrieval_performance){reference-type="ref" reference="section:retrieval_performance"} as well as Table [\[table:retrieval_performance\]](#table:retrieval_performance){reference-type="ref" reference="table:retrieval_performance"} for experimental comparisons.
|
| 38 |
+
|
| 39 |
+
::: table*
|
| 40 |
+
Dataset H@1 H@3 H@5 H@10
|
| 41 |
+
---------------------------------- ------ ------ ------ ------ -- -- --
|
| 42 |
+
Query-level word TF-IDF 41.7 54.2 59.0 65.3
|
| 43 |
+
Query-level (2,3)-gram TF-IDF 34.7 45.5 50.2 56.8
|
| 44 |
+
Entity-level exact match 48.2 57.9 64.2 67.3
|
| 45 |
+
Entity-level word TF-IDF 56.0 65.6 74.1 81.2
|
| 46 |
+
Entity-level (1,2,3)-gram TF-IDF 62.3 75.2 80.1 86.1
|
| 47 |
+
Entity-level (2,3)-gram TF-IDF 69.6 78.8 82.3 86.6
|
| 48 |
+
:::
|
| 49 |
+
|
| 50 |
+
To model $p(v|q, D_q)$, we employ a RoBERTa-based [@liu2019roberta] late fusion strategy. Given a query $q$ with a ranked list of $k$ retrieved tabular documents $D_q = (d_q^1, ..., d_q^k)$, we begin by linearising each table. Our linearisation scheme follows @chen2019tabfact. We first perform sub-table selection by excluding columns not linked to entities in the query. Here, we reuse the entity linking obtained during the retrieval step (see Section [3](#section:retrieval){reference-type="ref" reference="section:retrieval"}), and retain only the three columns in which cells received the highest retrieval scores. We linearise each row separately, encoding entries and table headers. Suppose $r$ is a row with cell entries $c_1, c_2, ..., c_m$ in a table, where the corresponding column headers are $h_1, h_2, ..., h_m$. Row number $r$ is mapped to *"row $r$ is : $h_1$ is $c_1$ ; $h_2$ is $c_2$; \... ; $h_m$ is $c_m$ .\"*
|
| 51 |
+
|
| 52 |
+
We construct a final linearisation $L_{q,t}$ for each query-table pair $q,t$ by prepending the query to the filtered table linearisation. We then encode each $L_{q,t}$ with RoBERTa, and obtain a contextualised RoBERTa-embedding $f(d_q^k) \in \mathbb{R}^{n}$ for every table as the final-layer embedding of the CLS-token. We construct the sequence of embeddings $f(d_q^1), ... f(d_q^k)$ for all $k$ tables.
|
| 53 |
+
|
| 54 |
+
When the model attempts to judge whether to rely on a given table for verification, other highly-scored tables represent useful contextual information (e.g. in the example in Figure [1](#fig:example_query){reference-type="ref" reference="fig:example_query"}, newspapers newspapers with similar political leanings may be more likely to belong to the same owner). Nevertheless, each table embedding $f(d_q^k)$ is functionally independent from the embedding of the other tables. As such, contextual clues from other tables cannot be taken into account. To remedy this, we introduce a cross-attention layer between all tables corresponding to the same query. We collect the embeddings $f(d_q^k)$ for each table into a tensor $F(D_q)$. We then apply a single multi-head self-attention transformation as defined in @vaswani2017attention to this tensor, and concatenate the result. That is, we compute an attention score for head $h$ from table $i$ to table $j$ with query $q$ as: $$\begin{equation}
|
| 55 |
+
\alpha^h_{ij} = \sigma\left(\frac{W^h_Q f(d_q^i) (W^h_K f(d_q^j))^T}{\sqrt{dim(K)}}\right)
|
| 56 |
+
\end{equation}$$ where $\sigma$ is the softmax function, and $W_Q$ and $W_K$ represent linear transformations to respectively queries and keys. We then compute an attention vector for that head as: $$\begin{equation}
|
| 57 |
+
a_i^h = \sum\limits_{j \in D_q} \alpha_{ij} W^h_V f(d_q^j)
|
| 58 |
+
\end{equation}$$ and finally construct contextualized table representations through concatenation as: $$\begin{equation}
|
| 59 |
+
f^*(d_q^k) = [f(d_q^k), a_k^1, ..., a_k^h]
|
| 60 |
+
\end{equation}$$ We subsequently use $F^*(D_q)$, e.g. the tensor containing $f^*(d_q^1), ..., f^*(d_q^k)$, for downstream predictions. We note that our approach can be viewed as an extension of the Table-BERT algorithm introduced in @chen2019tabfact to the multi-table setting, using an attention function to fuse together the information from different tables.
|
| 61 |
+
|
| 62 |
+
At training time, relying on a closed-setting dataset allows us to identify which tables contain appropriate information for answering each query (e.g., the table against which the claim is to be checked in the closed setting). Although this information is not available at test time, we can construct a training regime that allows us to exploit it to improve model performance, as well as obtain a test-time indicator that an appropriate table has not been retrieved. We identify and experiment with two different approaches to modeling this problem. The first option is to jointly model the choice of table and the truth value of the claim. We refer to this as *joint reranking and verification*. The second option is to model for each table a choice between indicating that the claim is true, that the claim is false, or that no information about the claim is given. We refer to this as *ternary verification*. In Sections [5.2](#section:verification_results){reference-type="ref" reference="section:verification_results"} and [5.4](#section:insufficient){reference-type="ref" reference="section:insufficient"}, we demonstrate how the former leads to increased performance on verification, while the latter gives access to a strong predictor for the cases where no appropriate table has been retrieved to verify the query.
|
| 63 |
+
|
| 64 |
+
For the joint reranking and verification approach, we assume that a *best* table for answering each query which can be used to learn a ranking function. We model this as selecting the right table from $D_q$, e.g. through a categorical variable $s$. We then learn a joint probability of $s$ and the truth value of the claim $v$ over the tables for a given query. Since there is one correct table and one correct truth value, $p(s, v | q, D_q)$ is also a categorical distribution. As such we let: $$\begin{equation}
|
| 65 |
+
p(s, v | q, D_q) = \sigma(W( F^*(D_q)_s)_v)
|
| 66 |
+
\end{equation}$$ Where $W: \mathbb{R}^{2n} \to \mathbb{R}^2$ is an MLP and $\sigma$ is the softmax function. At train time, we correspondingly obtain one cross-entropy term per query. At test time, we marginalize over $s$ to obtain a final truth value: $$\begin{equation}
|
| 67 |
+
\label{equation:ternary_table}
|
| 68 |
+
p_v(v | q, D_q) = \sum\limits_{t \in D_q} p(v, s=t | q, D_q)
|
| 69 |
+
\end{equation}$$[]{#equation:ternary_expectation label="equation:ternary_expectation"} This formulation has the additional benefit of also allowing us to make a prediction on which table matches the query. We can do so by marginalizing over $v$: $$\begin{equation}
|
| 70 |
+
\label{equation:reranking}
|
| 71 |
+
p_s(s | q, D_q) = \sum\limits_{v_q \in \{true,false\}} p(s, v=v_q | q, D_q)
|
| 72 |
+
\end{equation}$$ With this loss, we train the model by substituting for $D_q$ a set $D^*_q$ containing wherein the gold table is guaranteed to appear. We ensure this by replacing the lowest-scored retrieved table in $D_q$ with the gold table whenever it has not been retrieved.
|
| 73 |
+
|
| 74 |
+
At test time, there may be cases where a table refuting or verifying the fact is not contained in $D_q$. For some applications, it could be useful to identify these cases. We therefore design an alternative variant of our system better suited for this scenario. Intuitively, each table can represent three outcomes -- the query is true, the query is false, or the table is irrelevant. We can model this through a ternary variable $i$ such that for table $t$: $$\begin{equation}
|
| 75 |
+
p(i | q, t, D_q) = \sigma(W^\prime(F^*(D_q)_t)_{i})
|
| 76 |
+
\end{equation}$$ Where $W^\prime: \mathbb{R}^{2n} \to \mathbb{R}^3$ is an MLP and $\sigma$ is the softmax function. During training, we assign *true* or *false* to the gold table depending on the truth of the query, and *irrelevant* to every other table. We then use the mean cross-entropy over the tables associated with each query as the loss for each example. At test time, we compute the truth value $v$ of each query as: $$\begin{equation}
|
| 77 |
+
\sum\limits_{t \in D_q} p(i = true | q, t) > \sum\limits_{t \in D_q} p(i = false | q, t)
|
| 78 |
+
\end{equation}$$
|
| 79 |
+
|
| 80 |
+
::: table*
|
| 81 |
+
Model Dev Test Simple Test Complex Test Small Test
|
| 82 |
+
--------------------------------------------------- ---------- ---------- ------------- -------------- ------------
|
| 83 |
+
Table-BERT [@chen2019tabfact] 66.1 65.1 79.1 58.2 68.1
|
| 84 |
+
LogicalFactChecker [@zhong2020logicalfactchecker] 71.8 71.7 85.4 65.1 74.3
|
| 85 |
+
ProgVGAT [@yang-etal-2020-program] 74.9 74.4 88.3 67.6 76.2
|
| 86 |
+
TAPAS [@eisenschlos-etal-2020-understanding]\* 81.0 81.0 92.3 75.6 83.9
|
| 87 |
+
Ours (Oracle retrieval) 78.2 77.6 88.9 72.1 79.4
|
| 88 |
+
Ours (1 retrieved table) 74.1 73.2 86.7 67.8 76.6
|
| 89 |
+
Ours (Ternary loss, 3 tables) 73.8 73.5 86.9 68.1 76.9
|
| 90 |
+
Ours (Ternary loss, 5 tables) 74.1 73.7 87.1 67.9 76.5
|
| 91 |
+
Ours (Ternary loss, 10 tables) 73.9 73.1 86.5 67.9 77.3
|
| 92 |
+
Ours (Joint loss, 3 tables) 74.6 73.8 87.0 68.3 78.1
|
| 93 |
+
Ours (Joint loss, 5 tables) **75.9** **75.1** **87.8** **69.5** **77.8**
|
| 94 |
+
Ours (Joint loss, 10 tables) 73.9 73.8 86.9 68.1 76.9
|
| 95 |
+
:::
|
2103.03027/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2103.03027/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In this supplement, we present additional ablation experiments (Section [9](#ablation){reference-type="ref" reference="ablation"}) and a more detailed comparison with other methods (Section [10](#comparison){reference-type="ref" reference="comparison"}). We also present additional information about the parameters in our MLAD layer (Section [11](#parameters){reference-type="ref" reference="parameters"}) and experiments on an additional baseline (Section [12](#baseline){reference-type="ref" reference="baseline"}). Lastly, we present additional results using our proposed metric (Section [13](#metric){reference-type="ref" reference="metric"}) and additional visualizations of learned attention maps (Section [14](#analysis){reference-type="ref" reference="analysis"}).
|
| 4 |
+
|
| 5 |
+
# Method
|
| 6 |
+
|
| 7 |
+
We present additional results for our proposed metric on MultiTHUMOS with various values of $\tau>0$ in Table [\[tab:metric1\]](#tab:metric1){reference-type="ref" reference="tab:metric1"}. By varying the $\tau$ parameter, we can measure short-term or long-term temporal dependencies. In general, we find that our proposed method outperforms the previous approaches on all metrics. We also evaluate our method, and our baselines, on the Charades dataset with the action-conditional metric. These results are shown in Table [\[tab:metric2\]](#tab:metric2){reference-type="ref" reference="tab:metric2"}.
|
| 8 |
+
|
| 9 |
+
As stated in the main text, one benefit of our proposed action-conditional metric is that we can obtain the pair-wise performance between two dependant action classes. We present the pairwise scores for various action classes in the MultiTHUMOS dataset with co-occurrence dependancies ($\tau=0$) and temporal dependencies ($\tau=20$) in tables [\[tab:pairwise1\]](#tab:pairwise1){reference-type="ref" reference="tab:pairwise1"} and [\[tab:pairwise2\]](#tab:pairwise2){reference-type="ref" reference="tab:pairwise2"} respectively. These results show that our method successfully models the various dependencies between different actions. Furthermore, it suggests that improvements in previous methods for temporal action localization do not necessarily come from improved modeling of action dependencies. Although TGM greatly outperforms the I3D baseline on this dataset, there are several action relationships on which it performs worse than the baseline (e.g. "CricketBowling\" and "Throw\" in table [\[tab:pairwise1\]](#tab:pairwise1){reference-type="ref" reference="tab:pairwise1"}, and "Volleyball Spiking\" and "VolleyballSet\" in table [\[tab:pairwise2\]](#tab:pairwise2){reference-type="ref" reference="tab:pairwise2"}).
|
| 10 |
+
|
| 11 |
+
::: table*
|
| 12 |
+
:::
|
| 13 |
+
|
| 14 |
+
::: table*
|
| 15 |
+
+------+----------------------------------------------------------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------------------+
|
| 16 |
+
| | **$\tau=0$** | **$\tau=20$** | **$\tau=40$** |
|
| 17 |
+
+:=====+:=================:+:=================:+:==================:+:===================:+:=================:+:=================:+:==================:+:===================:+:=================:+:=================:+:==================:+:===================:+
|
| 18 |
+
| | $\textbf{P}_{AC}$ | $\textbf{R}_{AC}$ | $\textbf{F1}_{AC}$ | $\textbf{mAP}_{AC}$ | $\textbf{P}_{AC}$ | $\textbf{R}_{AC}$ | $\textbf{F1}_{AC}$ | $\textbf{mAP}_{AC}$ | $\textbf{P}_{AC}$ | $\textbf{R}_{AC}$ | $\textbf{F1}_{AC}$ | $\textbf{mAP}_{AC}$ |
|
| 19 |
+
+------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+
|
| 20 |
+
| I3D | 14.34 | 1.33 | 2.10 | 15.17 | 12.68 | 1.94 | 2.93 | 21.43 | 14.93 | 2.02 | 3.07 | 20.26 |
|
| 21 |
+
+------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+
|
| 22 |
+
| CF | 10.27 | 1.04 | 1.63 | 15.77 | 9.01 | 1.50 | 2.23 | 22.23 | 10.69 | 1.58 | 2.36 | 21.04 |
|
| 23 |
+
+------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+
|
| 24 |
+
| Ours | **19.33** | **7.23** | **8.86** | **28.94** | **18.85** | **8.88** | **10.52** | **35.74** | **19.64** | **9.04** | **10.77** | **34.78** |
|
| 25 |
+
+------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+-------------------+-------------------+--------------------+---------------------+
|
| 26 |
+
:::
|
| 27 |
+
|
| 28 |
+
::: table*
|
| 29 |
+
Action1 Action2 I3D Baseline CF Baseline TGM [@piergiovanni2019temporal] Ours
|
| 30 |
+
------------------- ------------- -------------- ------------- --------------------------------- -----------
|
| 31 |
+
BasketballDunk Jump 88.12 91.32 90.46 **93.18**
|
| 32 |
+
CliffDiving Jump 86.18 88.20 90.86 **95.01**
|
| 33 |
+
VolleyballSpiking Jump 45.97 52.32 50.77 **64.63**
|
| 34 |
+
PickUp Squat 62.34 63.17 60.79 **65.29**
|
| 35 |
+
Throw HammerThrow 29.89 40.57 36.74 **47.49**
|
| 36 |
+
BodyBend Diving 36.89 39.73 42.28 **45.45**
|
| 37 |
+
BasketballDribble Run 45.60 55.33 47.26 **58.85**
|
| 38 |
+
CricketBowling Throw 78.23 78.18 77.83 **87.16**
|
| 39 |
+
:::
|
| 40 |
+
|
| 41 |
+
::: table*
|
| 42 |
+
Action1 Action2 I3D Baseline CF Baseline TGM [@piergiovanni2019temporal] Ours
|
| 43 |
+
-------------------- ------------------- -------------- ------------- --------------------------------- -----------
|
| 44 |
+
BasketballDunk BasketballShot 67.14 73.00 67.96 **74.60**
|
| 45 |
+
VolleyballSpiking VolleyballSet 60.99 66.22 51.14 **70.21**
|
| 46 |
+
Fall Jump 63.38 72.96 72.27 **78.12**
|
| 47 |
+
FrisbeeCatch Throw 37.33 39.51 43.30 **59.22**
|
| 48 |
+
BasketballGuard BasketballShot 67.11 69.35 66.21 **75.95**
|
| 49 |
+
HammerThrowSpin HammerThrowWindUp 96.52 97.32 96.28 **97.47**
|
| 50 |
+
HammerThrowRelease HammerThrowSpin 61.52 69.46 72.89 **75.35**
|
| 51 |
+
DiscusRelease DiscusWindUp 65.88 65.55 63.41 **69.51**
|
| 52 |
+
:::
|
2106.07175/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-28T12:35:51.281Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" etag="kOKttm7i0QZNB7lQam21" version="14.6.13" type="google"><diagram id="snqD6rr1u1Y9LLqXWqiE" name="Page-1">7R1rc5u49td4Jr0zZhDi+TGxk+Zu02e6t+1+yWCDHW+xcTFpk/31F4HAIB1eRhCn63Sa2AJJoPPUeWmEJ+vH14G9vX/rO643UmTncYSnI0WxLDP6TRqekgZNN5KGZbBykia0b7hd/ePSRpm2Pqwcd1e4MfR9L1xti41zf7Nx52GhzQ4C/1fxtoXvFWfd2kuXa7id2x7f+mXlhPdJq6nJ+/Zrd7W8T2dGMr2yttOb6RC7e9vxfyVN8T34coQnge+Hyaf148T1yNql65IMdFVyNXuwwN2ETTrcbL/dTqeB+3l783e4fePJ48e7MabD/LS9B/rG9GnDp3QJft2vQvd2a8/J918RlEf44j5ce9E3FH3M3kuOviz8TUiBiDD5vvK8ie/5QTwUvlLIP9IpDPzvbnpl42+iHhf8G6VP5wah+5hrom/42vXXbhg8RbfQq7qiSFrSKUU4Co9fe+hhy0hvus/BTrXou9sUZ5bZ8PtljT7QlW2zynLPqxx9z62yHP8Aq29dGZfTHlc/XWu69Dq/9FraVlh4Q+6+8B/9N9M/bt8stJ//qMsffz04S8UaIx1YeN0L6ZoVIKD/ePDTC+NdvLrn0Q3W9nF/Lfq0JH8VKbryxn3akYfeONHv/5EZdjEjWm8fQjtc+ZtkgHTCWdo5bVilDdf2dutuSGd/4z3FfyIsoHdF771ie0ZtudGipZE/PrgxINjZdXtNEGkz222hmbln++RuXTt0yRvZZHlce34fr5K7hSevfMhkjdPmLgifQ2Rx6KpiXVKK3EJJxVMOZ5GOJQPiF1pfWKuKZRdFco8YgT433dmCZxCO7ZqLuZjFxajIDBSs8SurANxAF8AMvl7Lzib84/HL/27C+ead8/bu45dxNpuodfXsmetd2PPvy8B/2Dg9SDNsMNJMMfhVtIBFRFZvqygb9asYr4fr0EWrW1MWPRWsqprDo+elTv4JWlpVlbBRXFuLp31jzyHyyytCYL1R1HN8N3v38OCdqx+vH+1teD3GZmcU3a+9zCKsY+/uM6hUYy+z9FPt0pyqELBM5QLrgoBiaEhS9AJQkClnEMjBRVFlEDBItTJeLRw4AvhHBXDKyaAAth5pglHfsALobxrAbBRZADn8HVzffAq/nd/cfBsvvj29/vnu7mKsdFbfkALpb7xiJANNdr45nnUctdz89/ZzpjcF7UacdRgRJS80B4eYvP/zXTSGfJZdiseQXyWT1o2tJGM74NjTT+8/0IntMo2yfGicDO2CQ789JyOf/WeMXiXT142mJqMtwNGu/nvz+fJTYRW8bBVccPBGmmpEPiFDxm6EYfYsvoHQrv0Q+ju6ISNfvdUy0r+nnrsgHQkFrua2d06b1yvHIV0vtv5qE8Y0o12MtCnHH/L7PDP9Tp8rvX5lr1ceodiJ/xCs3Ojl5Hfur1r+nlz1A8cNmCscH5qZmqrtOVRqBlF4ZrQw5+5ckP6o4iI3MnhmhAxI89H6Yka4OzPSS5gRvEGkw86zBd73xIsFjv7nm9pPLy8SMbVvu3a9ny7B1SYsszE5Ve4boVu3gdvofXDVctLVa/aOgibrOEr/GFA2PQNajKRW0JQjBoY0NbFFjBVNST8pJv1kavQDwjphdfEX0ku2ks96egNO/hoo+YtMK75fHhdFW9I9/j3tjJtiwFcFLfIjDFocsrDgS+R5C7ojazlGiK59pF1TYGgpdApAGyOyVYqho8kV0BnvB6IDHAWcWpHZTDOw5fREZrgIp2aENk7XXtYzmjLz4FEUSksZlIzfgYQWC8uKVqwBJH5RfYDMNyMeH0WmAu9AOKn1cKp6clleLJoRv+gn7xNkhA/sGUXKrjWKe0YmDVDZY+RQFtNeZio5kIyj62UIO86mGUfzxNh7fNhKWH4/fKPy1VorEaKeMoJhBZXII4ihaRemkUIcUZBG2kNeQTBSJmbh9M4qdjam3euYaklzrHoeiCtqnXr92b7317Bu/ZK2pg2sh8WN6XpFjAe3duzjens7Ku5s98OVb0fzm047mNOugI/T0VzTUUegoXImzFCp1+5NMeToRLivvSmCTJODo+8h0/S84SmbrRc5CU72xQ++72IJHnRQIZ7t5Vts22H409chDxQsZ2dEUaBKQ+6jommvirrOgrIP6kJPHpmozIS473N7+OKVtgoTVqpetMyGEJZSwqQJMPnN4KGgaQ6GIRf1kF1VBhC1FUI+nzxkbZ/WwiDSsT85qbaUk5wEtIypHGlTeQmqyWIEYrzRz0lEVWvo7VcESETQWQcFtrVk5+jf5Tp6P50+i7voeBXd4/HBOIY1k5v5YBauLsoHw6i5GIilRKCeK8IhDLvgT1TdjKoBX+jeI3yAG7eS5Itu3PmJyptReRnltqF+EbIbNyBz0NXaG5kLcLX+O8j8N477ODGMY2UYKhPai1VA2YcCxYTYv0CGAUVM9xKb8eGS7NDcaAd1T8L9W9hI2gUKloa/R9B1kaO5BgR3SzewLSwYswDkTEoMEb8NwljrDmPQXjEJ/B3ZcJ8vl4G7tMMKO504GE7PL82rCQRDfWJeXlwJgqGOJYSLcDR0Do6RFi9piAel2pt8V7m1LLJXPwjv/aW/sb0b39/S9fzbDcMnynQJWy+utvu4Cr/mPn8jQ0ka/TZ9pCPHX57SL5vobb7GN2JdSRtIz7EsIS1t2HeOv+17O+ckxTD6OvP8+fcE5FvyeAvPfaTXLtKFVpMuKTPWk6/0fRTJUHksmdlz08EQlmTBwfEjfXCDVQQZN6BPVoo6u0gSUVsmDBmKHKEdLN2wCoIUNVynkD/JY2Lgena4+ukWHgNCKNr1AxG8eww2UbQy2Nr/FJGZzcFJnpuOsUfSCBT2U+42Kt1ZNM4e+3DM7p5wVhaxHJGM+uVuZETqyGR7vxoZ06RtAGbVAA0FMCtTtiRdLcAXmZbE6xZgDHpvQf/9mRxi6GkT29veky3Hf0ZJeIM2+fFgxyo5/RQ7u0cK2UicEZfJOLkW93uV67jvAX1KRsHJWG0Q52Adex7hAmFJh1rVQWMYbWya5SIAL3VTl0yTUYZ4rMQYQsu+0rCw3oME3UvDTPR9y18rkYOVQo8OtZdzGtaKglCV9MKPkRPmRAqn36lYjhhktUyPvrDyUKDyVStBtaEEo8ZyS8VSi6Mkz8pJQ24sw+IxXLMaSVZRchNMkacM8t4OI7G3iyQeEX9onPHMSARql+l9L0/eWbxyDvGVIb1mGNJfQDD0DoSBdkimxuWAYw1QOgZ1Xqomt5KEn6QGplQwAnxXgBDgNjXpKhgSLv4UODuWDLPI2CMOZRR+LAiatqsiDDmybMdytCoo981iNaVo5+Lg3Xq30XAezWAwKBkR7s0mSqW966TAAZwbrFaQCUYYV/coeLlvFYGmma5iFJSVvXpSoqwULKKoRn3RGRRHOqO8QFVusjIK5YUX5g/Bz3odtXYXrg5ECRyOIbkRjrWlBGxp4DwgJYhSPdJF7N94TC2NrLODNzyCjpBeTczPUyNEPjYbswoZmVtzt6EE854zqTEI7SDMfc9zKcSIbsDW2IBrPZskxroqsf4IGUmq0ogLNRoO88MJEu/QbCibrc0zcr16YogNKrwcIxEo0SaWsyRUUYXOWB7MF08VmFVRu5AEW0OoP3rARTl/QJfOlABa2ZRy1aAiyjqnF8hVesGqyxB0U07KTM5tL9qQX0U78bMt2Zr/TGrRGSSckzH01pSSe3mW33wchqBYZpOJZc62Z3XhDf0ZeyGGzGX7cWaavF2mAbQ72Gqywh/lxUMEAMbSsGQUy0NBBjPFJL7UAaFjNRCXe7lF0bjgpeCqoTWK2KmVLzU+s7StoxjSEVNxFePDZBA3UFb9TrwRGoSkCsUdPpMdLoUdZhwoSDJYB0qkFiraQUSZs6Loup43oyBJrg9/ACIP9o6bNPLiW+4K7LEpRfC8r6WSMeYNJTBkBwpXsHSFs+k33KWIQuKUqqtxVli4TRtA53DWzGMfymNezvnYwt+YN9LhmEDyYTccBWAdWxgoMXl1ZVlXjf1/lahWi5NaP9uEtuq4pTIxF3q1Lm4y0Z/M/f0o4irkHmxnowMLORNiSMsbyz4pKzdz7TVpikM8+7C0taq7OZBiBUXe6BJQaxZKA8BKOUZ2UqxSxOSckds7VOJvfAE7lmFLCGc5Gum2GTCxQilcvWnLoIU1AapyAmrDjB3j2IBaFjawvcMnoDbMquDK7/Lsd1CgapDxaxDBR5uWge2s3P3RFmXJsVdXGMdKWz/ZLvwpAQjKkEsz3oeBTX2GXHqUTqryoAYOSsCs+CcUaNzaFfkCqF2EHoXZ02dUy4LKl0Ml+/vDld4L9mdpR2WJSgLWVscm6wKAyi5hgAoNRTLVvha3QcH+boubnebTR1wzw9gMPXOC5lmbHN0IWFR7O6lDgyyq7XZ7lQEZTQcxS+oetix1y8V0tCh7y/ZVweFGaQ3PyrpAQXeuDfJRgN82Z98QOQhnzLrJnfZhwaiOhuTMulKK57utvQFRlCmnJY/SElq5T72U0kqehIyx8YO17RUv5wuXqsmzxBc9N4wQYBy9zny1WYL9CZaNKcLEme4JzhQuryJ82NDhY4do/mIY2JvdIho0HT426iQ3/PIDpzh7vvss00bHzLoqJAQ3LU6mWvvPWm51ndVu69l0ZVcbb5WbeOH5dph/oK7MCLRfJZqZS7y+P+IjwGKvX1xXjQwln23vlklRMq5KgTYhwfT0rZI/lAdR5qbFt0wpP+J9ySXFuRLMreYoNTJQAMUbGskUKW6oVEnO//B5sJCTTETKOkj8VoPt1ZFqDqbMLS6sORiwK7Y3xcEqZ6hHoTg0KLEOSHe+3Dp7E4L78eXXT/pBSRYIZjcXx6AfWA380UfKIiyZW1GYRegKdBZrfxyie6WQXjlEaTlsaBNRWhqbvVmB+UOTUtn/Yq5gmkWL7FGwBINb78Eyww7w+Ld2r/ceoMFbcwlYUa7ChF4cs2n0UvuRe45mylx/VWGDkpQ/6edFh4IO664xjYYqw6BlAZAM7SpaJjNVBi1nKHLr+WQ4lDv0YS8r6EWl6iIGL75/CLcPYRtfAxlFFLZ2KwN9rLiqq7wyhoqsyeLwFtoMizj3tgRvGyReHamqq2uKpBbrEmGZd00MrOgiuXspot9G04332SdltxKLFfascej832GVXSRD3rVjNZAXMf5kO+/Vdh6bvUepjRunx0URm3ad3fo4DNQ6E+6j6MZRmaczSu87jryk6FRdHYeynWUxJ7oUULXR2bGHdogdqW4ywUVp2OtA+QII8ZrWcAkD6ZV25oMikHMopFq4gESSbOIaRDq8gOZz440qYb20MiZJbtZyV5miML3bHspDOEoldyPFMebm17QCVXjvhjYUoFbJ48UFLfKbDNeUTSCNfCa72BV1+JxmVpRERSavtQ2arIoQtPEYRJLkmUIp6ziADXXhBk0z1FCa/CiOb3QEY/d6210oeKB8m2eiYG6vZZpQnChIuLg3iLes+/FbE26K/g0IdyiBb1hsEJvK5l41TrU2zNqx+lYREKQiQLzhzH1VkkTSIx9w9JmuATHNi8VCEXa+Fm85VDGCGAGY+GH2xQhSuhfuRHg/C+3VhpgTmIA0Uv86PlqDt8TtQjskpgF3PXMdJ+k79GkxLyoLQceylEaWZscxNk5DEHHmQwlWleescD6ebmg2SQx5xJT0+sNtfcjii3Z7CsAYM8IYNsVJlxvzod4wBiwKXaGQHEshMnXUuRrfpU7+QYLpIv5XBfhnLiiAFIsr1iVX1xSo7zKqLoILDMCVL2uqG0EP07Dy7wG60c322+10Griftzd/h9s3njx+vGtY//mZj8wpGtpqi3YQemBqIMVtVyuyNuyBOnPP3u1W86SR3lLt1s1XAsP0O3Rfhz0BblqXVxdNiE35KIxN1vFjEznYQS46ACImab4UpIJrIWUCodJuIBDvjsyElD53D8FI8D6RzYOpS1oZbjuZHRxRdtSEkO0ka1ZSkAqdnwRlqgs4P6kECco3k6WegArfMnn4zJ9c+Mw49fc4FK2KiECW0mFozvzMXa42EQLO1nYYrB5joya7U+VQ9iyuZbN3XqO881p+9Yp81SYHDKQ0HChqdHyizskHT6WxU7UbS6l8//KxDnvy8lCBTosk4GEKr64M+urxbMAkkQRjEFpADuALN9oBJ8ZgDJpXDE1KfeuFU3B1xNa1FKbqKc/ne6tTsvKqlFWnSiWGftNqdQ5ZTUFKnB0cRkMDVLVaNwUOEuuixOHnUuJK8hUUXbIsZqOr6orEqgLNEyCYfFudeaa+HQq4vihOHzJefoiPJUs+l/HjhztUx8zb93y4Uw4eFB/Ys7eBSU8BMmg4YaPpGjJcXtg48tx1BRWfsnQuqahE2CiWhIBUa2LHNfrT7btnUx5EcVdUu1na63Ws3ewyEkx2faV0eHjPep1L9FMpWc+yR2LLOhz4iLh2ok48oXJSfOikdaPGi3C0zMMxrJkMhDguFq4uSlMlB0yyhmysAwYB1ZLSOKUC7zCxCEW1hHVAof1JycgIYsscrBigvACX3LDZXRpTkDmCW9PSvSJcd+ChgOW5R0ect5GcvH5K3Og9cSNW9eQY+rIsqUKjd8uUsXL1TQQJ5qJ20p2UaQKcFvSfCwjoA4mw/NSYExGeiJBsHn8rIlRYW4apNBOCfdHfi0xePNHfYPSHfiPiY88ZUPd7h+civ2fNa8usyWUH4eQiA/gjqUsBUmtmTkvn1p8+IzxqvRuwypWVTgUykRRduYqYyIgGGO987yFc+ZtdxjHjSbiDEbOY0Gt7u3U3xKjib7yn+M/czZEqF7s6go9ZFGZpAPakIqQnQ8CKYXDkC8ULi0hAhs+Vb3kY228U/JnVTS+vtF4K8b7dRkB0JLIsKV9Khs017RB4SczaVUMLOpIWmrnmSNr6LqOuJ2GBHt7ykKojViqtk045iE5ZOLnjKxgSxxvSj7VChqXJkmkWOQ2Wn6tIBkiM3R3OSC1qLfLCnhc7fLbv/bUNQTxTVNpPA0bAzTM5s78RLxY4+l94xMNnY4qQV4YRHbqCyShf/OD7LmYnQbN5juvlm9S9q4d/JRcPU7xiWbgYLEn7YnTAq1aXiBD1WpMKoBRZ4FGttGWRQBdgpXGrlRblVutWMpG8kbUwiDZ+UazvV3NcWPR+V3TRpxN/vZpHr3Nrx/u0t7ejYriZmg0XyW83AB16+VPDeLXbmMoGUcjtYE4HFXX2JnOUn6rxVhOkQGaTvmRa95COk0w7ybTfQqZVcdoq4dH07Q99zQOs+rgLrZSsgnaSNy9P3qicoR7yUw8rctKBmxX/Pdxol7O556LMsZylGR4cZc6b8A9xC5SCt0F0OV2xY4kuN4zizh3rTLWSpkZBExkkDyDb8bMx5sMmZivl5jfB5dXThpwrgbPwXGcWnu/u0y76wpty8p6IMnFVL5VfXgieCM1cI+GTcgnmZVF3ea6pDso1uxdo6wUXqxNwTwh5eH6vzPgbVN5Fh/qq0AKjYKPTaHqqLGBUlxYo1AbYJ6XlywM0zFxrVh6gKqftcMGeFnKq9eenNx5JzQlt2OCLNjUnBBWN2OuTuDGOdK5LAq+1PIxOp7G2KsyE7vSsfGkNtgmiUzby27Raxh+Xq5xauqAyoSp/opSqKNBuLXfY3iB8X4PUYCLodyT971I5pW40jBtXm4C3r8wNb/Zl/acxuUerN+P3a+Ov8dPmclyuVFapg+3UzoqMv7ObUZy4FUbba2Jb+avxucNQzFUP5qcXpTGa/MFPqtWwzLAABPv2+dcPb2utAmU5vbi9e2tOnKux0n3XgiD8ee35MxIHwoY33Cb1Q081QytjLRjRbpKj7HJxFkojnBERZ7F6/cfmNbr+8TV0bv9xnNvr87v3p6CnU9BTq6CnYw1pYg/9wYC3ty/COl/d3l2r1x+Xwddvzt8flS/Lt58qCOsk7Z+DiXOoBCBcORNXLEkvPRgEMlj2JfhBXIME/2BOHl0rHAz07E4eZq9+qNeH7rGaeX2EGwRMXZbM0iOION9M40JCZDurarIBD8syQ3FWBxBtW1QtH4JF1ta0PLHIKhZpKhK7H3pmttg93kpvWEIngRhUlvImr/FS7ZZMKL8tIt+74tezkTEhtSCNuKxgXG8/blG4Fhz9bl7GBnoSXJxaK341Rmkqc3yScGkZqf0SCKwD08QuOIl/xCCxjk12V6/JUF2XSD5kp7x0VCejr4FP0GvPWqN1uH/rOy654/8=</diagram></mxfile>
|
2106.07175/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Program synthesis from examples tackles the problem of coming up with a computer program that satisfies a given set of Input-Output (IO) examples. Since the space of possible programs is large, an exhaustive search can be extremely time-consuming. Therefore, development of systems for program synthesis that can come up with a solution (program satisfying the given IO examples) within a *limited time*, such that it is practical for real-world applications, is a challenging task.
|
| 4 |
+
|
| 5 |
+
Neural-guided program synthesis systems [4, 32] try to expedite the search by using a neural network conditioned on the IO examples as a learned heuristic for the search procedure. In these systems, a neural network outputs probabilities over programs or properties of programs (e.g. functions). These probabilities are then utilized to guide a search like depth-first or beam search. These systems try to find a program that satisfies all IO examples *simultaneously*, which under most of the settings can be hard. What if instead, we try to find this program in parts? To understand this motivation, imagine a process wherein a programmer is asked to write a program that satisfies a set of unit test cases. They may begin by figuring out a program that satisfies a subset of unit test cases first, and later modifying the program to incorporate other corner cases. Shi et al. [28] uses this intuition to iteratively refine a program by mining fragments of Java code from partial solutions, based on a set of rules and predefined heuristics. Gupta et al. [12] also uses the same intuition, but in a different application for program repair.
|
| 6 |
+
|
| 7 |
+
In this work, we consider breaking the complex problem of finding a program that satisfies all N given IO examples (called the *global solution*) into N smaller, easy to solve sub-problems, where each sub-problem involves finding a program satisfying only one IO example (called *per-example*
|
| 8 |
+
|
| 9 |
+
<sup>∗</sup>Correspondence to: <dishu.905@gmail.com>
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
Figure 1: **Idea of N-PEPS:** (*Left*) Illustrating the two stages of N-PEPS with an example; (*Right*) Synthesizing line 2 of $p_g$ using contributions from CA and GPS, with details of how query, keys, values and relation scores are formed. White box shows an example of obtaining a PE state embedding.
|
| 14 |
+
|
| 15 |
+
solution). The cues present in these per-example (PE) solutions are then combined to provide useful signals that can help guide the search for the global solution effectively. As a motivating example, consider the left part of Figure 1, where five IO examples are given as a specification (green box) and we need to find a global solution $p_g$ (red box) that satisfies these five examples. The first stage of our approach consists of performing per-example searches to find a program $p_i$ conditioned on the i-th IO example. In our example, we start from IO example #1 and find program $p_1$ . In addition, we also check if $p_1$ satisfies any other examples (#3 in figure). Iterating through the examples in this way results in a set of programs $(p_1, p_2, p_3)$ that, taken together, in the ideal scenario, would satisfy all five IO examples. Looking closely at the discovered PE solutions, we see that they contain fragments of the global solution. This brings us to the second stage of our approach that addresses the challenge of how best to aggregate these PE solutions to produce a global solution. Towards that goal, we propose a neural network based architecture, which we refer to as Cross Aggregator (CA). It is designed to learn to combine the cues present in these PE solutions, in a way that helps guide the search for $p_q$ . We model this aggregation using a multi-head cross-attention mechanism, which leverages the state of step-wise execution of the PE solutions and the synthesized global solution so far (see Section 3.2 for details). Our key contributions can be listed as follows:
|
| 16 |
+
|
| 17 |
+
- We consider breaking the standard program synthesis pipeline into two stages: (a) discovering PE solutions, and (b) aggregating the PE solutions such that it leads to a global solution. We refer to our approach that uses neural networks at both these stages as *Neural Per-Example Program Synthesis* (N-PEPS).
|
| 18 |
+
- We propose a neural network based multi-head attention architecture called Cross Aggregator (CA)
|
| 19 |
+
that makes use of step-wise execution information to *learn* to combine the PE cues such that it
|
| 20 |
+
helps guide the search for the global solution.
|
| 21 |
+
- We demonstrate via experiments with programs of different lengths and under two different
|
| 22 |
+
evaluation settings that when given the same time budget, our formulation shows significant
|
| 23 |
+
improvements in success rate when compared to PCCoder [32] (one of the leading techniques for
|
| 24 |
+
neural-guided program synthesis) and other ablation baselines.
|
| 25 |
+
|
| 26 |
+
Suppose we are given a set $X = \{(x_i, y_i)\}_{i=1}^N = \{r_i\}_{i=1}^N$ of N IO examples and our task is to come up with a program $p_g$ that satisfies these examples. The i-th IO example $r_i$ consists of a pair of input $x_i$ and output $y_i$ . The program consists of T lines (excluding lines with input variable declarations), i.e. $p_g = [p_g^t]_{t=1}^T$ . To be practically meaningful, we impose the constraint that $p_g$ has to be found
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
Figure 2: (*Left*): Sample program along with two IO examples that forms the program state at t = 0; (*Right*): Block Diagram explaining the training of PCCoder at line 2 of the program.
|
| 31 |
+
|
| 32 |
+
within a given time budget, specified by a *timeout* value. The syntax and semantics of $p_g$ are governed by a domain-specific language (DSL). We use the DSL provided by Balog et al. [4], which contains first-order functions (e.g. SORT, REVERSE) and higher-order functions (e.g. MAP, FILTER) that can take *lambda* functions (e.g. (\*4), (<0)) as input. The inputs and outputs can be either an integer or a list of integers (see Appendix F of Balog et al. [4] for more details about the DSL). The Predict and Collect Coder (PCCoder) [32] provides state-of-art results for this DSL and is illustrative of methods that directly solve for all available IO examples at once. We refer to these methods as Global Program Search (GPS). We will be building on PCCoder to propose our per-example approach.
|
| 33 |
+
|
| 34 |
+
PCCoder synthesizes programs one line at a time, through a model based on the notion of a *program state*. The program state is a two-dimensional memory of size $N \times (\nu + 1)$ obtained during the execution of t lines (steps) of a program on a set of N inputs. This means that for each IO example $r_i$ , there are up to $\nu$ slots for storing the input and intermediate program variables, with an additional slot for storing the output (see Appendix A.2 for more details). Note that the initial state at t=0 consists of only the IO examples (see left part of Figure 2).
|
| 35 |
+
|
| 36 |
+
PCCoder consists of two learnable components (i.e. neural networks), $H_{\theta}$ and $W_{\phi}$ , with parameters $\theta$ and $\phi$ . $H_{\theta}$ obtains the embedding of the current program state by average-pooling the representation of the $\nu+1$ slots corresponding to individual examples (white boxes inside the state in Figure 2) into a vector of fixed size in $\mathbb{R}^Z$ , where Z denotes the embedding size (see Appendix A.2 for details of how these representations of slots are obtained). $W_{\phi}$ maps this state embedding to predictions of three quantities of interest for the next line in the program: (a) the next operator $\hat{o}^t$ (or function e.g. MAP); (b) the next statement $\hat{s}^t$ (operator along with its arguments e.g. MAP(/2) b); and (c) next drop vector $\hat{d}^t$ which represents positions of variables that can be dropped from the state. The dropping is desirable as it creates slots for storing new variables, which in turn allows for synthesizing longer programs. There is a module called DropExec which executes a given line of the program against an example $r_i$ and stores the resulting variable $c_i$ in the next available slot in the state. If all $\nu$ slots in the state are filled, a variable is dropped from one of the slots using the drop vector and $c_i$ is stored there. The updated state can then be used for predicting the next line (see right part of Figure 2). Next, we provide details of how training and inference is done in PCCoder.
|
| 37 |
+
|
| 38 |
+
**Training:** For training $H_{\theta}$ and $W_{\phi}$ , several instances of a specification X and the ground-truth program $p_g$ are provided. Given an instance and line t of the program, training operates by obtaining the ground-truth values of statements $(s^t)$ , operator $(o^t)$ and drop vector $(d^t)$ . The statement and operator values are represented as one-hot vectors of size equal to the number of statements $(n_s)$ and number of operators $(n_o)$ , respectively in the DSL. The drop vector is a multi-hot vector of size $\nu$ with ones at positions corresponding to the variables in the program that can be dropped, i.e. variables that don't appear in subsequent lines in the program. The step-wise loss $\mathcal L$ is the sum of cross-entropy losses between the actual and predicted statement and operator, and the binary cross entropy loss between each position in the actual and predicted drop vector. The task of predicting the
|
| 39 |
+
|
| 40 |
+
operator is an auxiliary task, i.e. it is used only during training and not at inference time, and is found to improve the training performance. During training, to obtain the updated state, the DropExec module chooses the drop-index to be a random entry from those positions in the drop vector d t that are ones. The right part of Figure 2 illustrates the process of training at step 2.
|
| 41 |
+
|
| 42 |
+
Inference: Inference is done using complete anytime beam search (CAB) [31] where the time for search is upper bounded by the timeout value. The CAB algorithm operates by performing different beam searches repeatedly in an outer loop. The pruning conditions of the beam search (i.e., beam size, expansion size) are weakened with each iteration of the outer loop, until a solution is found. The inner loop consists of different steps of a single beam search. At each step, the beam consists of the most promising program prefixes, with each prefix represented as a tuple of the current program state, synthesized program until now and the product of the probabilities of the statements in the synthesized program. To synthesize the next line of the program, prefixes are expanded by executing the statements in decreasing order of statement probabilities and taking the argmax of the drop vector probabilities. The statement and drop vector probabilities are obtained using the trained neural networks H<sup>θ</sup> and Wφ. The search terminates if we find a candidate program prefix that satisfies all N IO examples. The corresponding program is the synthesized global solution pg. Note that the search may fail and not discover a global solution within the specified timeout. Appendix A gives details of training and inference procedures, and modules of PCCoder.
|
| 43 |
+
|
| 44 |
+
As stated in Section 1, in this work, we decide to break the complex problem of finding a global solution p<sup>g</sup> that satisfies all N IO examples, into N smaller sub-problems. Each sub-problem aims to find a program p<sup>i</sup> that will satisfy only the IO example r<sup>i</sup> . The cues present in these PE solutions are then aggregated to help guide the search for pg. We constrain the process of breaking and combining to fit within the specified timeout value. The distribution of total timeout between these stages is treated as a hyperparameter. In this section, we discuss our process of finding PE solutions and follow it with a description of our neural network module that learns to combine the PE solutions.
|
| 45 |
+
|
| 46 |
+
We refer to the general framework of finding PE solutions first and later aggregating the PE cues to find a global solution, as *Per-Example Program Synthesis* (PEPS). We call the module that finds PE solutions as the *PE Searches* module. To train the PE Searches module, we use the PCCoder model as it is, except that it is trained to take a single IO example as input as opposed to all the examples in X. We will call this trained model as the *PE model*. We allocate a fixed value of *PEPS timeout*, which is the maximum time given to find each PE solution. The sum of PEPS timeouts across all PE solutions should be less than the total timeout, so that there is some time left for the CA module to aggregate the PE cues (i.e., N× PEPS Timeout < Total Timeout). We start from the first example, and using the PE model, try to find a solution that satisfies it. Once found, we also check if this solution satisfies other examples in X. We record the fraction of IO examples satisfied by p<sup>i</sup> , and call it the *PE solution score* u<sup>i</sup> . If p<sup>i</sup> satisfies all examples in X (i.e. u<sup>i</sup> = 1.0), we stop and return p<sup>g</sup> = p<sup>i</sup> as the global solution. Otherwise, we proceed to find the next PE solution (based on the order of examples given in X). Note that it is possible that for certain examples in X, we fail to find a PE solution within the PEPS timeout. Once we have our list of M PE solutions (0 ≤ M ≤ N), which ideally satisfies all N examples but may not necessarily, we proceed to aggregating them. Note that when comparison with baselines is not a requirement, we can increase speedup by finding PE solutions in parallel (see Appendix D.1 for more details).
|
| 47 |
+
|
| 48 |
+
Notation: To formulate the program state, we define a basic unit called an *execution tuple* (ET). An ET e = (p, S, t) is a tuple consisting of a program p, a subset S of example indices in X and a step number t. Executing the first t steps (lines) of a program p on every example r<sup>i</sup> for i ∈ S yields a program state which we note as X (e). Like PCCoder, we pool the representation of slots of the state corresponding to each example r<sup>i</sup> for i ∈ S to obtain a state embedding (see Section 2.1), hence making its size independent of the size of S. To represent different combinations of programs executed against different sets of examples at different time steps, we define a list e of such execution
|
| 49 |
+
|
| 50 |
+
tuples, with its size denoted by L. $(p_1, \{1\}, 0)$ and $(p_3, \{2\}, 2)$ in the bottom right of Figure 1 are examples of such combinations. We then execute each entry in $\mathbf{e}$ to get a list of states $\mathcal{X}(\mathbf{e})$ . This is followed by embedding each entry in states $\mathcal{X}(\mathbf{e})$ using $H_{\theta}$ to yield a tensor of state embeddings $\mathcal{H}(\mathcal{X}(\mathbf{e})) \in \mathbb{R}^{L \times Z}$ (henceforth referred to as $\mathcal{H}(\mathbf{e})$ for simplicity). The white box towards the bottom of Figure 1 shows an example of obtaining a single entry of a PE state embedding.
|
| 51 |
+
|
| 52 |
+
**Motivation:** To explain the motivation behind CA, let's look at Figure 1, which illustrates the process of synthesizing line 2 of $p_g$ . Intuitively, at this step, we will want our aggregation mechanism to have more contribution from line 2 of $p_1$ and $p_3$ (i.e., DROP c a). A simple way of aggregating the PE solutions can be to take the sum or mean of the PE one-hot statement vectors (these form our ablation baselines as detailed in Section 4.2). However, this strategy will fail for scenarios that require taking a non-trivial combination of the PE solution statements or cases where the global solution requires the generation of a new statement that is not found in the PE solutions.
|
| 53 |
+
|
| 54 |
+
In this work, we propose another way of anticipating what line of $p_g$ comes next, that makes use of the execution information of the programs. The idea is to compare the state embedding obtained before executing line 2 of $p_g$ with the PE state embeddings corresponding to each step of execution of the PE solutions. Then, based on the learned relevance of these state embeddings, their corresponding next PE program statements can form valuable cues for synthesizing the next line. In other words, if a particular PE program state has high relevance with the global program state at a given step, then the following PE program line is likely to be useful in synthesizing the next line of $p_g$ . We measure this relevance by employing a cross-attention mechanism, with the query formed by the global program state embedding at step t, a key formed by the PE program state embedding at step t and the corresponding value formed by the PE program statement at t+1. We take a set of such keys and values to form the key matrix $\mathbf K$ and the value matrix $\mathbf V$ , respectively.
|
| 55 |
+
|
| 56 |
+
**Model:** For synthesizing line t+1 of $p_g$ , the query $\mathbf{Q}$ is formed from the global state embedding at step t, denoted by $\mathcal{H}(\mathbf{e}_{\mathbf{query}}^{\mathbf{t}}) \in \mathbb{R}^{1 \times Z}$ , where $\mathbf{e}_{\mathbf{query}}^{\mathbf{t}} = [(p_g, \{1, 2, \dots N\}, t)]$ . The keys $\mathbf{K} \in \mathbb{R}^{L \times Z}$ are formed from the state embeddings $\mathcal{H}(\mathbf{e}_{\mathbf{keys}})$ of the PE solutions. Let P denote the list of M discovered PE solutions, then the list of execution tuples $\mathbf{e}_{\mathbf{keys}} = [(p_m, \{j\}, t)]$ , where $p_m \in P, j \in \{1, 2, \dots N\}, t \in \{0, 1, \dots |p_m| - 1\}$ , making $L = M \times N \times \sum_{m=1}^{M} |p_m|$ . The corresponding PE solution statements form the values $\mathbf{V} \in \mathbb{R}^{L \times Z}$ (more details on how values are obtained is given later). In addition, we have the relation scores $\mathcal{U} \in \mathbb{R}^{L \times 1}$ obtained by taking the PE solution score $u_m$ corresponding to $p_m$ that is part of each ET in $\mathbf{e}_{\mathbf{keys}}$ . Note that entries in $\mathcal{U}$ are dependent only on the program part in the ET, and independent of the subset of example indices and the time index.
|
| 57 |
+
|
| 58 |
+
We add position encodings (depending on the time step value of each ET) to $\mathbf{Q}$ , $\mathbf{K}$ and $\mathbf{V}$ . This is followed by multiheaded relative attention between our keys, values and query as described in Equation 3. For each head, we perform a scaled dot-product attention [30](Equation 1) and a form of relative attention<sup>2</sup>, i.e. taking a mean of the relation scores and attention scores before normalizing with softmax and multiplying with values (Equation 2).
|
| 59 |
+
|
| 60 |
+
$$Att(\mathbf{Q}, \mathbf{K}) = \frac{\mathbf{Q}\mathbf{K}^T}{\sqrt{d_k}} \tag{1}$$
|
| 61 |
+
|
| 62 |
+
$$RelAtt(\mathbf{Q}, \mathbf{K}, \mathbf{V}) = \operatorname{softmax}\left(\frac{\mathbf{U}^T + Att(\mathbf{Q}, \mathbf{K})}{2}\right)\mathbf{V}$$
|
| 63 |
+
(2)
|
| 64 |
+
|
| 65 |
+
$$MultiHead(\mathbf{Q}, \mathbf{K}, \mathbf{V}) = \operatorname{concat}(head_i, head_2, \dots head_{\tau})W^{O}$$
|
| 66 |
+
where $head_i = RelAtt(\mathbf{Q}W_i^Q, \mathbf{K}W_i^K, \mathbf{V}W_i^V)$ (3)
|
| 67 |
+
|
| 68 |
+
In the equations above, $d_k$ is the dimension of the key, $W_i^Q$ , $W_i^K$ , $W_i^V$ are the query, key and value projection matrices, $\tau$ is the number of heads and $W^O$ is the linear projection that combines the heads. The output from Equation 3 is fed to a positionwise fully-connected feedforward network. We employ a residual connection [13] followed by layer normalization [3] before and after the feedforward network. The resulting encoding is then linearly projected and softmax is applied to get the prediction of the statement for line t+1 of $p_g$ . We see that our model resembles one layer of the transformer
|
| 69 |
+
|
| 70 |
+
<sup>&</sup>lt;sup>2</sup>Note that our formulation of relative attention differs from the formulation used in Shaw et al. [27], Hellendoorn et al. [14], where the relation scores are added either to the query or values.
|
| 71 |
+
|
| 72 |
+
encoder block [30]. Since the keys and query come from different sources, we refer to our model as a *cross* aggregator. Like standard transformers, we can stack multiple blocks of CA. However, since we are operating on a low timeout (5s), we opted for a simple network consisting of only one layer. Details of model parameters can be found in Appendix D.3.
|
| 73 |
+
|
| 74 |
+
**Obtaining V:** For a key corresponding to an ET consisting of the PE solution $p_m$ and having step index t, the value is associated with the statement vector (one-hot vector of size = $n_s$ ) for step t+1 of $p_m$ . Putting together the statement vectors for all execution tuples that are part of $\mathbf{e}_{\mathbf{keys}}$ , we get a tensor $\mathbf{p}_{\mathbf{values}}$ of size $L \times n_s$ . Embedding each entry in this tensor using an embedding layer $F_{\gamma}$ gives us $\mathbf{V} = \mathcal{F}(\mathbf{p}_{\mathbf{values}})$ of size $L \times Z$ . This is then fed as input to the model described above. The output from the model is then linearly projected to give the logits for the statement predictions $\in \mathbb{R}^{n_s}$ for step t+1 of the global program $p_g$ . In addition to the statement predictions, we can also obtain the operator predictions $\in \mathbb{R}^{n_o}$ , starting from the operator vector (one-hot vector of size = $n_o$ ) and following a process similar to the statements, except that we use a different embedding and final projection layer. The right of Figure 1 shows an example of how a query (top) is combined with keys, values and relation scores (bottom) for our model.
|
| 75 |
+
|
| 76 |
+
The two main components of N-PEPS, the PE Searches module and the Cross-Aggregator, are trained separately. To create samples for training the PE model, we take one data point $(X = \{r_i\}_{i=1}^{N} \text{ and } p_g)$ from the GPS approach and create N data points out of it. Since we do not have supervision for the PE solutions, for every example $r_i$ in X, we use $p_g$ as a proxy for ground-truth PE solution. We believe that using $p_g$ as proxy supervision even though not being entirely correct, forces the PE search component to avoid overfitting to a single example and hence is more likely to produce PE solutions that generalize to examples outside the ones given as specification (see Appendix D.2 for more details).
|
| 77 |
+
|
| 78 |
+
For training the CA module, we generate data points that we call aggregator instances. Each aggregator instance consists of X, a list Y of tuples of PE solutions $p_i$ and corresponding PE solution scores $u_i$ , and global program $p_g$ . The $p_i$ 's and $u_i$ 's are generated via CAB from a trained PE model (more details on how they are generated in Appendix C.2). Given X and Y as input, the objective is to learn the parameters of the CA module such that the output is the line-wise statement and operator predictions corresponding to $p_g$ . The net loss at step t is the sum of two terms: (a) a cross entropy loss between the predicted statement $\hat{s}^t$ (obtained from CA) and the actual statement vector $s^t$ (obtained from $p_g^t$ ); (b) a cross entropy loss between the predicted operator $\hat{o}^t$ and the actual operator vector $o^t$ . Like PCCoder, the operator loss is used as an auxiliary loss to improve training. Note that for each aggregator instance, since we have X and Y to begin with, we need to compute the keys and values only once. However, the computation of query has to be done at each step of the global program execution. While training, since $p_g$ is known, we can use teacher forcing and increase efficiency by batching, where an element in the batch corresponds of one step of execution of $p_g$ .
|
| 79 |
+
|
| 80 |
+
The process of inference in PEPS is the same as in PCCoder (see Section 2.1), except that in addition to the contribution from GPS, we add another term that accounts for the contribution from CA. The contribution from GPS is obtained by using a GPS model that is trained as in standard PCCoder. The net value of the predicted statement at step t is then obtained by taking a weighted contribution from the statement predictions from the trained GPS model $\hat{s}_{1-\alpha}^t$ and the statement prediction from the trained CA module $\hat{s}_{\alpha}^t$ . For predicting the drop vector $\hat{d}^t$ , we take contributions only from GPS. When $\alpha=0$ , our approach becomes equivalent to GPS.
|
| 81 |
+
|
| 82 |
+
$$\hat{s}^t = \alpha * \hat{s}^t_{\alpha} + (1 - \alpha) * \hat{s}^t_{1-\alpha}$$
|
| 83 |
+
|
| 84 |
+
$$\hat{d}^t = \hat{d}^t_{1-\alpha}$$
|
| 85 |
+
(4)
|
| 86 |
+
|
| 87 |
+
We perform CAB until we find a global solution or we exceed the specified timeout. The right part of Figure 1 illustrates an example of the steps involved in synthesizing step 2 of $p_q$ .
|
| 88 |
+
|
| 89 |
+
# Method
|
| 90 |
+
|
| 91 |
+
In addition to the standard GPS baseline (PCCoder [32]), we experimented with three ablation baselines that represent simple ways of aggregating the PE solutions without making use of the program state. Hence, they help us understand the role of PE cues alone. These baselines are: (i)
|
| 92 |
+
|
| 93 |
+
<sup>&</sup>lt;sup>3</sup>We used the implementation from PCCoder [32], at https://github.com/amitz25/PCCoder (MIT License) for data generation and obtaining results for PCCoder.
|
| 94 |
+
|
| 95 |
+
| | | | | , | Attention scores at $t = 2$ | | a 1 | - 0.25 |
|
| 96 |
+
|------------------------------------|--------------------------------------|-----------|--------------------------------|----------------------------------------------------------|--------------------------------------------------------------|-------------------------------------------------------|-----|--------|
|
| 97 |
+
| Model | Success Ratio | 80 | | P <sub>1</sub> 0: a <- LIST 0: b <- LIST | P2 0: a <- LIST 0: b <- LIST | p <sub>3</sub> 0: a <- LIST 0: b <- LIST | | - 0.20 |
|
| 98 |
+
| PCCoder [32] | $77.75 \pm 0.38$ | (%) 60 | | 1: c <- COUNT (ODD) b<br>2: d <- DROP (c, a) | 1: c <- FILTER (>8) a<br>2: d <- MAP (*-1) c | 1: c <- COUNT (>0) b<br>2: d <- DROP (c, a) | | |
|
| 99 |
+
| Sum-PEPS<br>Mean-PEPS | $82.71 \pm 0.32$<br>$82.68 \pm 0.33$ | ess Ratio | — GPS | | Attention scores at $t = 3$ | 3: e <- MAP (*-1) d | | - 0.15 |
|
| 100 |
+
| Mean-PEPS+ $\mathcal{U}$<br>N-PEPS | $82.70 \pm 0.32$<br>$86.22 \pm 0.25$ | S 20 | Sum-PEPS Mean-PEPS Mean-PEPS+U | P1<br>0: a <- LIST | P <sub>2</sub><br>0: a <- LIST | p <sub>3</sub> 0: a <- LIST | | - 0.10 |
|
| 101 |
+
| N-PEPS+U | 87.07 ± 0.28 | - • | N-PEPS N-PEPS+U | 0: b <- LIST 1: c <- COUNT (ODD) b 2: d <- DROP (c, a) | 0: b <- LIST<br>1: c <- FILTER (>0) a<br>2: d <- MAP (*-1) c | 0: b <- LIST 1: c <- COUNT (>0) b 2: d <- DROP (c, a) | | - 0.05 |
|
| 102 |
+
| | | , | Time Taken (s) | | | 3: e <- MAP (*-1) d | J L | |
|
| 103 |
+
|
| 104 |
+
Figure 3: **Results for E1**: (*Left*) Success Ratio with standard error for all models (top row = GPS); (*Center*) Success Ratio vs. time taken; (*Right*) Visualization of attention scores for N-PEPS+ $\mathcal{U}$
|
| 105 |
+
|
| 106 |
+
**Sum-PEPS:** Replacing the contribution from CA module in Equation 4 by a module that combines the PE solutions by taking the sum of all PE one-hot statement vectors; (ii) **Mean-PEPS:** Same as (i) except that sum is replaced by mean; (iii) **Mean-PEPS+U:** Same as (ii) except that the one-hot PE statement vectors are multiplied by their corresponding solution scores before taking the mean. To understand the benefit of aggregating with our proposed CA architecture on top of the value brought by the PE cues, we experimented with the following variations: (i) **N-PEPS:** Our neural model of PEPS described in Section 3.2 with $\mathcal{U}$ being a zero tensor; (ii) **N-PEPS+U:** Same as (i) but with $\mathcal{U}$ included. Complete details of hyperparameters for all methods can be found in Appendix D.
|
2110.03888/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-01T14:44:04.747Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36" version="15.4.0" etag="E66bxpzOB_jFFEa842l7" type="github"><diagram id="Gw1GahR13AaIXd8cvcJe">7V1bk6I4GP01Po5FboCP3T2XfditmqqprZl5ZCUqu7S4iNP2/voNkohJsDvSIWBjv7QJEEPOSb7jx4lO0MPj/ksebVZ/ZDFNJ9CL9xP0cQIhCCBh/8qa56qGhLOqYpknMT+prviW/Ed5pcdrd0lMt9KJRZalRbKRK+fZek3nhVQX5Xn2JJ+2yFL5XTfRkr+jV1d8m0cp1U77nsTFqqoNYVDX/0aT5Uq8M/D5/T1G4mTe8HYVxdnTSRX6NEEPeZYV1avH/QNNy8ET41Jd9/nM0WPHcrouTC6A1QW/onTH7433q3gWN5tnu3VMy/O9Cbp/WiUF/baJ5uXRJwYvq1sVjykrAfZyka0LjhcIWDmOtqvDtYeDSZo+ZGmWs3JMF9EuZX285z2geUH3Z+8CHMeGkYpmj7TIn9kp/IIPghiCTz4vP9Xo+CGvW50iI3CIOCOWx7brQWMv+Lg1jyF6l2OIgOEYQmxhDPHrY0hjNgd5McuLVbbM1lH6qa69l0e5Puf3LNvw4fubFsUzH9xoV2Qvj3zVh/KNpUHdZrt8zqv4SlZE+ZKKcfaMxz6naVQkv+Tm3zKORBvHzxPoHziy3URraUT9f3flMnO46w/bw23fsRMA2ezrg+zVsvzviWZYD6qWqnodpjRlyzB9nebRdlOtzYtkX4ImUXsCUUxoGOOy40We/UNPjoTwL+T7Cl6hnWmAwZTI8wDq8wA3TAMbs8DX0Lu/oXcJejNlDQvdYRdo2N1dK3aLxQLO503Yxf5fPnGDHQlmzrALNey+O8auDl3gMoEQtoLPAlxopiyUPmgADDYABt8O2GyQcoGNXP78g7d3KPwsC2ycePHj/vTgx2deMpAZQlNIOsPvT2eI/tgXGmAkoQoQJVY51BkADHL+mMwDqM+DPqeB/vnZkmIbyzRg968KboeiDeif3S2pNvf49aPadPxcCjeg5w0sKTdj/K5PuQEc9ibcAOkj0NB9Uvw4eX0iy1ipVmVlQYiytmKuTVDz9aCGiP2oxi/9miWsxSMdiK+twJjIjVQd49cpYB87Yoa/nuKwpBvhWAKm36Nu1JMc16IbQ32KCfHWi3DUUw6WhONY5gFSn1e4lI16/sGSbHSPXj+yUUXPpWiEeu7Ckmg0Ru/6RCMk/YlGOMx0xVEhgssUokG4Qg2KUISMPhQh9hRFWHXWhiIUWL4I7jq+K20brDRPo+02mb+CDNf7tcb/eSrxX9H7NYA/T8E9g6b0dN4A2gYhYi71TyYbaVgdRd0b8QdYXpx94hmhrzWElYawarew98HCxIXhlkaOSWSeBXVDIqQ8z8PBFZDIxIZihUTgRRJdSoeGaHG0ot3o0J4OupvmXYcmkU1taWZyQyQ8k2MTCtvxSOVjhzTSc14X00j2APYWp9qSyDzT4oZEUHn4SsKWqxHylIZQdzQySb7Zp9HLkerc0uRZIBJsIBIaGJHUsOZ7lsIaUTW3RSLpucchrUeXxTgDIomVvaV35kaks0QysWG9m8DWSCPznMyNRmf3T+j52Pcd2JqeRc1uRHo7kUzyxFaIdDmN7OeIA51Ft9XIAongmEgkBLW0FuGB0QgAMg1JEPoYBT70wxmWWQXRFGB2K4S9ChBQ2jflWKBwNUCdUcxCrvqKKDbTKVbvqx0KxeDLFENWKBYqFAu7o9jVbqhEDTsqsTldrDt1UGdbKtcfxmLyVsO3S88a6mxT5Yjw63FfJepsY+V48cPEIX6dba68AD+bfisDCDtYMo8QOvBboVkfWqGtBFU1hgaLd/gzUx9i1CX1AftTH6I/HaiPkax9hPSnPfAwjYsm8wDq8wD1uNFS9KcDFTeSeeB7/Wk43Nk2y7Gi51LB4c42WRqjd336jSibLF3qNzwy3xpqeKB2wf5JNxlGRgj1C1Pa2qpDrakO3WvYgnvteozVjVQamlckmGn4t3XTuqWSBQebdXu1cXS45GNk0/4dNDRL9tXSyIJ/7ZrCm9BfLZ+IOLJlk0ANb22d2VjnZYdkGpWHrZFK5uktN1TyiYp/a3t24GlNdWfQFrf/LnxsnYTFpqwSHpqlWw+LrW1Leljs0LhE3o/7rRtN1mCYQ0NzzDVoMr+t2UTXZKpvxSL54I18L658DT47EYwHQz4C8RSdmqBk9oRY8kB5fjte4kCPybgzXuqZ5+l0qlGzoPtCJqOM9jpbU4UavCpKk+W6ZDSDkLL6+zJrmMyj9I4feEziOD2Xd5Yf7ZybAJDYyUki1YbT8BX/TUlkGz+SQPQk8niBwOp3FwUOgdCTw1++/nllQLz5sYpY07DDgff1iOjuqw8v+cygDvSbIh9p8HOSDr7c0BgGPQn5MFr+H394yT7/WbH+GaMqctc/BoU+/Q8=</diagram></mxfile>
|
2110.03888/main_diagram/main_diagram.pdf
ADDED
|
Binary file (39.4 kB). View file
|
|
|
2110.03888/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
The expeditious growing of foundation models on broad data highly contributes to the development of the whole deep learning and artificial intelligence community. Foundation models with self-supervised learning on big data have become an emerging paradigm of artificial intelligence systems [\[3\]](#page-9-0), as they mostly possess high transferability to a wide range of downstream tasks and even multiple modalities. The scale of foundation models across domains, including natural language processing, computer vision, and cross-modality representation learning, have been growing tremendously from millions to trillions of parameters [\[9;](#page-10-0) [26;](#page-11-0) [27;](#page-11-1) [29;](#page-11-2) [36;](#page-12-0) [40;](#page-12-1) [4;](#page-9-1) [32;](#page-11-3) [17;](#page-10-1) [13;](#page-10-2) [19;](#page-11-4) [50;](#page-13-0) [49;](#page-13-1) [45;](#page-12-2) [47;](#page-13-2) [41\]](#page-12-3) thanks to the concurrent advancement in distributed training framework [\[23;](#page-11-5) [30;](#page-11-6) [34;](#page-12-4) [31;](#page-11-7) [2;](#page-9-2) [14;](#page-10-3) [33\]](#page-12-5) and hardware design, and these studies have made a demonstration of the neural scaling law [\[15\]](#page-10-4). However, the training of these transformer-based models incurs high financial costs and even environmental damage due to the massive carbon footprint and thus training extreme-scale models under a decent amount of resources but with high efficiency should be a fundamental goal for both the research and industrial communities to achieve, which promotes the progress of greener AI [\[25;](#page-11-8) [37\]](#page-12-6).
|
| 4 |
+
|
| 5 |
+
Generally there are two tracks of research in large-scale pretraining, dense models and sparse expert models respectively. A typical case of large-scale dense models is GPT-3 [\[4\]](#page-9-1). It is a 175 billion-parameter transformer model trained with 10, 000 GPUs for months, incurring striking financial and environmental costs. Researchers have been looking for methods to training largescale models with a decent amount of costs. Solutions include effective management of memory with gradient and optimizer state partitioning [\[30\]](#page-11-6) or more efficient model parallelism and pipeline
|
| 6 |
+
|
| 7 |
+
<sup>\*</sup>Equal contribution.
|
| 8 |
+
|
| 9 |
+
<sup>†</sup>Corresponding author.
|
| 10 |
+
|
| 11 |
+
parallelism [\[40;](#page-12-1) [23;](#page-11-5) [14\]](#page-10-3). A series of following studies apply those techniques to realize fast training of 10-billion-parameter transformers with hundreds of GPUs in 1 − 2 months. [\[19;](#page-11-4) [49;](#page-13-1) [47;](#page-13-2) [41\]](#page-12-3) Sparse expert models with large model capacity are capable of fast training owing to the combination of data parallelism and expert parallelism [\[38;](#page-12-7) [17;](#page-10-1) [13;](#page-10-2) [35\]](#page-12-8), and it is even accessible to train a 1-trillion-parameter transformer with no more than 500 GPUs [\[45\]](#page-12-2).
|
| 12 |
+
|
| 13 |
+
Be there as it may, a question emerges in our mind: is it possible to train an extreme-scale model with only a decent amount of resources, e.g., training a 10-trillion-parameter model with 500 GPUs? Such training requires large memory for parameters, including weights, gradients, and even optimizer states. Tackling the problem requires the utilization of external memory except for GPU memory, for instance, CPU memory or even NVMe storage [\[34;](#page-12-4) [31\]](#page-11-7). These methods resolve the problem of high memory footprint, but instead, their extra cost is low training efficiency caused by the frequent swap in-and-out between memories.
|
| 14 |
+
|
| 15 |
+
In this paper, we provide a solution to training large models that require high memory footprint, and we demonstrate a successful practice of pretraining unprecedented extreme-scale models with over 10 trillion parameters, an order of magnitude larger than the previous state-of-the-arts [\[13;](#page-10-2) [45\]](#page-12-2). The whole pretraining was conducted on solely 512 NVIDIA-V100 GPUs and lasts around 10 days. A simple and effective training strategy called "Pseudo-to-Real" enables sharing and delinking parameters. This training strategy is compatible with architectures built by stacking layers with an identical structure, including dense models like GPT [\[26;](#page-11-0) [27;](#page-11-1) [4\]](#page-9-1), BERT [\[9\]](#page-10-0), or sparse expert models like M6 [\[19;](#page-11-4) [45\]](#page-12-2). It is essentially a two-stage training, where in the first stage, we apply cross-layer parameter sharing that requires much less memory footprint for efficient convergence, and in the second, we delink the parameters for better performance. It first trains a relatively small model but with a computation graph of a large one with the utilization of cross-layer parameter sharing, and we name it "Pseudo Giant". Then it builds a correspondingly large model and delinks the parameters of the shared layer for second-stage model initialization. In this way, we achieve fast convergence in the first stage as the training costs much less memory and speeds up with large batches. Parameter sharing that addresses the communication overhead improves training speed as well. The second-stage training is responsible for the final convergence for better performance.
|
| 16 |
+
|
| 17 |
+
We unlock the secret of pretraining an unprecedented extreme-scale model with over 10 trillion parameters on limited resources of 512 GPUs. Compared with the previous M6-T on around 500 GPUs, we do not have a significant increase in computation resources but level up the model scale by an order of magnitude. Besides the application of the "Pseudo-to-Real" training strategy, we provide a faster offloading mechanism for both management of CPU memory for parameter storage and utility of GPUs. We successfully train the M6-10T within 10 days to reach strong performance in log perplexity evaluation and outperform the baseline M6-T.
|
| 18 |
+
|
| 19 |
+
Contributions at a glance are below:
|
| 20 |
+
|
| 21 |
+
- We illustrate the training difficulty of extreme-scale models on limited resources and provide a simple but effective solution called "Pseudo-to-Real". Upstream and downstream evaluation demonstrates the effectiveness of the strategy.
|
| 22 |
+
- We further demonstrate a successful practice of pretraining a 10-trillion-parameter model on 512 GPUs and reach an outstanding performance within 10 days.
|
| 23 |
+
|
| 24 |
+
# Method
|
| 25 |
+
|
| 26 |
+
Choice in model architecture depends on several factors. First, the architecture should contain a sequence of stacking layers, as the sequential structure enables cross-layer parameter sharing. We prefer a simple encoder or decoder architecture, instead of an encoder-decoder framework where there are cross attentions that bring extra parameters and incur difficulties in activation checkpointing. Second, a model of such architecture should be compatible with different types of downstream tasks including understanding and generation, and it is even better that it can be compatible with multiple modalities. Third, as we mention that dense models and sparse expert models are two main tracks of large-scale pretraining, we prefer the model that can flexibly become whether dense or sparse expert models. Therefore, we select M6 [\[19\]](#page-11-4) as an option, and we evaluate the effects of our method on M6 of different scales and types.
|
| 27 |
+
|
| 28 |
+
M6 is built with stacking transformer layers, which includes self attention and feed-forward neural nets (FFN). For the transformation from dense models to sparse expert models, we should only replace FFN layers with the Mixture-of-Expert (MoE) layers. MoE consists of multiple experts, which are usually FFNs distributed on different devices. A gating network decides the dispatching and combining behaviors of each token and thus tokens can be processed in diverse devices. Such mechanism is a combination of data parallelism and expert parallelism, and thus it is highly efficient though with large model capacity. For the training, to realize the learning of both understanding and generation, the model is trained with text denoising and language modeling on plain text data and with image-based text denoising and image captioning on multimodal data. The model is compatible with different types of downstream tasks and can process information of multiple modalities.
|
| 29 |
+
|
| 30 |
+
This section demonstrates the details of "Pseudo-to-Real" two-stage training strategy that enables fast training of high-memory-footprint-required transformer models. The strategy consists of two stages. The first stage trains a model with many fewer parameters but with a large computation graph ("Pseudo Giant"), and the second stage trains a corresponding large model ("Real Giant") initialized with the delinked weights of the shared layer. Thus we name the strategy "Pseudo-to-Real", and the general idea is demonstrated in Figure [1.](#page-2-0)
|
| 31 |
+
|
| 32 |
+
The core of "Pseudo" stage is to train a Pseudo-Giant that shares parameters across layers. Cross-layer parameter sharing has proved successful in maintaining satisfactory performance while keeping a much smaller amount of parameters. The method was first mentioned in the original Transformer [\[42\]](#page-12-12), and Dehghani et al. [\[8\]](#page-10-9) and Bai et al. [\[1\]](#page-9-3) both illustrated that it is effective for vanilla transformer with encoder-decoder work. Lan et al. [\[16\]](#page-10-10) introduced the method to pretraining and proposed a lite BERT with different sharing techniques. Owing to its effectiveness, we introduce it to training an extreme-scale model, and we hypothesize that the first-stage training can gain benefits from crossparameter sharing as it can address communication overhead and it consumes much less memory footprint. Also, as Pseudo Giant with much fewer parameters is not bounded by memory, it can be trained with large batches for acceleration.
|
| 33 |
+
|
| 34 |
+
Suppose we build an M6 model with L layers that share parameters across all layers. The Pseudo Giant though consists of a computation graph of a L-layer transformer, its number of weight parameters and optimizer states should be 1/L of those of the original one. As to the gradients, we can accumulate the gradients of each layer in the backward computation process, and therefore the amount of gradients becomes 2/L of the original one. Such saving in memory enables much faster training with larger batches. Also, it is capable to use fewer resources even due to lower memory consumption.
|
| 35 |
+
|
| 36 |
+
This can also be applied to MoE models, as their architecture is stacking transformer layers. However, different from dense models, MoE models partition their weights to all devices and redistribute token representations by sparse activation. This limits the flexible usage of GPU resources. It is available to choose different numbers of GPU devices at different stages in training dense models. In contrast, the restoration of MoE model parameters requires the same number of GPU devices used at the last stage. To tackle this problem, we take advantage of the memory efficiency of the Pseudo stage, and distribute more experts to a single GPU and partition experts to more GPUs. Suppose we train a model with 512 experts at each MoE layer. It is possible to train a Pseudo Giant with only 256 GPUs, where there are 2 experts on each GPU, and then train a Real Giant on 512 GPUs where there is only 1 expert on each GPU.
|
| 37 |
+
|
| 38 |
+
We name a large model without cross-layer parameter sharing "Real Giant", in comparison with Pseudo Giant. Both Pseudo Giant and Real Giant share a computation graph, but they have different numbers of parameters. In this work, we discover how to build a connection between the two models. Given a Pseudo Giant fully trained until convergence, we apply the delinking of cross-layer shared parameters to accelerate Real Giant training. There is no need to train a large model from scratch. The model can start its convergence from low perplexity.
|
| 39 |
+
|
| 40 |
+
Embedding initialization can be directly restored, but the layer weights should be treated specially. In practice, there is only one layer of weights θshared in Pseudo Giant, and there are L layers of weights {θ1, θ2, · · · , θL} in Real Giant. Thanks to their identical structure, each layer of Real Giant can be initialized with θshared. Without further training, this model is equivalent to a fully-trained Pseudo Giant.
|
| 41 |
+
|
| 42 |
+
This extremely simple training strategy is highly beneficial for the high-memory-footprint-required large models, especially extreme-scale models like the 10-trillion-parameter M6. While the first stage of training saves much time for faster convergence, we can use a decent amount of computational resources in this stage as lower efficiency in this stage becomes acceptable. Therefore, in the practice of training an extreme-scale M6, we apply CPU offloading to utilize CPU memory. Therefore, we can use a limited amount of resources, e.g., 512 GPUs, to train an unprecedented 10-trillion-parameter model efficiently, which is an order of magnitude larger than the state-of-the-arts.
|
| 43 |
+
|
| 44 |
+
A question naturally emerges: when should we switch from the Pseudo stage to the Real stage? As mentioned above, the greatest advantage of Pseudo stage for training is the significantly faster convergence speed. Yet the performance of Pseudo Giant is bounded by its limitation in the number of parameters. Training Pseudo Giant until its final convergence apparently incurs much waste of time.
|
| 45 |
+
|
| 46 |
+
In practice, we present a simple strategy to determine the training step to switch from Pseudo to Real based on the convergence speed. During the training of the Pseudo stage, we evaluate a training step in a fixed interval by attempting to transfer it into the Real stage and training for a small while (e.g., 30 minutes). After that, we will revert the model parameters to the evaluated step and continue the training of the Pseudo stage for the same training time as the Real stage. If the decreasing speed of loss in the Real stage surpasses that of the Pseudo stage, we determine the evaluated training step as the best switching point for the next-stage training.
|
2110.08387/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2110.08387/main_diagram/main_diagram.pdf
ADDED
|
Binary file (55.7 kB). View file
|
|
|
2110.08387/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
It remains an open research question whether external knowledge is needed for commonsense reasoning. On one hand, a substantial body of prior work has reported that integrating external knowledge can help improve task performance [\(Mitra et al.,](#page-9-0) [2019;](#page-9-0) [Bian et al.,](#page-8-0) [2021,](#page-8-0) *inter alia*), especially if the knowledge is high quality (e.g. hand-crafted by experts). On the other hand, recent leaderboards are often dominated by large-scale pretrained models that are fine-tuned on a target benchmark [\(Khashabi](#page-9-1) [et al.,](#page-9-1) [2020;](#page-9-1) [Lourie et al.,](#page-9-2) [2021\)](#page-9-2), suggesting that the benefits of external knowledge may wash away as the underlying models increase in size and are pretrained on ever larger amounts of raw text.
|
| 4 |
+
|
| 5 |
+
Even if external knowledge is found to be effective on a particular task, *flexibility* remains a fundamental hurdle to integrating external knowl-
|
| 6 |
+
|
| 7 |
+
<span id="page-0-0"></span>
|
| 8 |
+
|
| 9 |
+
Figure 1: Generated knowledge prompting involves (i) using few-shot demonstrations to generate questionrelated knowledge statements from a language model; (ii) using a second language model to make predictions with each knowledge statement, then selecting the highest-confidence prediction.
|
| 10 |
+
|
| 11 |
+
edge, as many benchmarks currently lack appropriate knowledge bases with sufficient coverage. Furthermore, prior methods often require task-specific, custom supervision for knowledge integration [\(Mi](#page-9-0)[tra et al.,](#page-9-0) [2019;](#page-9-0) [Chang et al.,](#page-8-1) [2020\)](#page-8-1), introducing a burden for rapidly adapting new pretrained models to a wide variety of tasks.
|
| 12 |
+
|
| 13 |
+
In this paper, we investigate whether external knowledge can be helpful for commonsense reasoning, even on top of the largest state-of-the-art pretrained models (e.g. T5-11b [\(Raffel et al.,](#page-9-3) [2019\)](#page-9-3) and its variants), with a focus on four recent commonsense benchmarks. To facilitate easier adaptation with any zero-shot or finetuned models, we propose an approach that does not require access to a structured knowledge base or joint finetuning for knowledge integration.
|
| 14 |
+
|
| 15 |
+
The key insight behind our method, Generated Knowledge Prompting (sketched in [Figure 1\)](#page-0-0), is that we can generate useful knowledge from a language model, then provide the knowledge as an input prompt that is concatenated with a question. To
|
| 16 |
+
|
| 17 |
+
<span id="page-1-0"></span>
|
| 18 |
+
|
| 19 |
+
| Dataset | Question / Knowledge | Prediction | Score |
|
| 20 |
+
|------------|-------------------------------------------------------------------|--------------------|-------------|
|
| 21 |
+
| NumerSense | the word children means [M] or more kids. | one | 0.37 0.35 |
|
| 22 |
+
| | The word child means one kid. | two | 0.91 |
|
| 23 |
+
| CSQA | She was always helping at the senior center, it brought her what? | feel better | 0.97 0.02 |
|
| 24 |
+
| | People who help others are usually happier. | happiness | 0.98 |
|
| 25 |
+
| CSQA2 | Part of golf is trying to get a higher point total than others. | yes | 1.00 0.00 |
|
| 26 |
+
| | The player with the lowest score wins. | no | 1.00 |
|
| 27 |
+
| QASC | Sponges eat primarily | cartilage | 0.95 0.00 |
|
| 28 |
+
| | Sponges eat bacteria and other tiny organisms. | krill and plankton | 0.99 |
|
| 29 |
+
|
| 30 |
+
Table 1: Examples where prompting with generated knowledge rectifies model prediction. Each section shows the correct answer in green, the incorrect answer in red, and the prediction scores from the inference model that only sees the question (top) and the same model that sees the question prompted with the given knowledge (bottom).
|
| 31 |
+
|
| 32 |
+
support a variety of settings without finetuning, the quality and flexibility of knowledge is crucial. We propose a simple, yet effective, method that elicits *knowledge statements* (i.e. knowledge expressed as natural language statements) from generic language models in a few-shot setting. Compared to prior work that elicits knowledge via clarification questions [\(Shwartz et al.,](#page-10-0) [2020\)](#page-10-0) or contrastive explanations [\(Paranjape et al.,](#page-9-4) [2021\)](#page-9-4), our approach can generate knowledge flexibly, beyond the scope of pre-defined templates [\(Table 1\)](#page-1-0).
|
| 33 |
+
|
| 34 |
+
Experiments show that our method improves both zero-shot and finetuned models on numerical commonsense (NumerSense [\(Lin et al.,](#page-9-5) [2020\)](#page-9-5)), general commonsense (CommonsenseQA [\(Talmor](#page-10-1) [et al.,](#page-10-1) [2019\)](#page-10-1), CommonsenseQA 2.0 [\(Talmor et al.,](#page-10-2) [2021\)](#page-10-2)), and scientific commonsense (QASC [\(Khot](#page-9-6) [et al.,](#page-9-6) [2020\)](#page-9-6)) benchmarks, setting a new state-ofthe-art on three of these datasets. It outperforms the template-based knowledge generation method *self-talk* [\(Shwartz et al.,](#page-10-0) [2020\)](#page-10-0), while performing comparably to retrieval-based systems.
|
| 35 |
+
|
| 36 |
+
We find three factors contribute to the performance of generated knowledge prompting: (i) the *quality* of knowledge, (ii) the *quantity* of knowledge where the performance improves with more knowledge statements, and (iii) the strategy for integrating knowledge during inference. Our qualitative analysis suggests that the generated knowledge statements cover a variety of types, and can transform commonsense question answering to explicit reasoning procedures, e.g. deduction, that are supported by off-the-shelf and finetuned language models.
|
| 37 |
+
|
| 38 |
+
A multiple-choice commonsense reasoning task involves predicting an answer a ∈ A<sup>q</sup> given a question q ∈ Q, where the set of choices A<sup>q</sup> is finite and can vary by question, and both questions and answers are variable-length text sequences. Our method answers commonsense questions in two steps.
|
| 39 |
+
|
| 40 |
+
The first step is *knowledge generation*, where we use a language model pG(k|q) to generate knowledge statements conditioned on the question:
|
| 41 |
+
|
| 42 |
+
$$K_q = \{k_m : k_m \sim p_G(k|q), m = 1 \dots M\},\$$
|
| 43 |
+
|
| 44 |
+
where each knowledge statement k<sup>m</sup> is a variablelength text sequence. Intuitively, each statement contains information that is helpful for answering the question (e.g. [Table 1\)](#page-1-0).
|
| 45 |
+
|
| 46 |
+
The second step is *knowledge integration*, where generated knowledge is integrated into the decision process of a language model used for inference,
|
| 47 |
+
|
| 48 |
+
$$\hat{a} = \arg\max_{a \in A_q} p_I(a|q, K_q).$$
|
| 49 |
+
|
| 50 |
+
In contrast, the *vanilla* setting of using the inference model without knowledge is represented by aˆ = arg maxa∈A<sup>q</sup> p<sup>I</sup> (a|q).
|
| 51 |
+
|
| 52 |
+
Next, we describe the knowledge generation and integration steps in detail.
|
| 53 |
+
|
| 54 |
+
We generate question-related knowledge statements by prompting a language model. The prompt consists of an instruction, a few demonstrations that are fixed for each task, and a new-question placeholder. The demonstrations are human-written, and each consists of a question in the style of the task and a knowledge statement that is helpful for answering this question. For a given task, we write five demonstrations using the format in [Table 2.](#page-2-0)
|
| 55 |
+
|
| 56 |
+
We write questions (or select them from the training set, when available) that are representative of
|
| 57 |
+
|
| 58 |
+
<span id="page-2-0"></span>
|
| 59 |
+
|
| 60 |
+
| Task | NumerSense | QASC |
|
| 61 |
+
|--------|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|
|
| 62 |
+
| Prompt | Generate some numerical facts about objects. Examples: | Generate some knowledge about the input. Examples: |
|
| 63 |
+
| | Input: penguins have <mask> wings.<br/>Knowledge: Birds have two wings. Penguin is a kind of bird.</mask> | Input: What type of water formation is formed by clouds?<br>Knowledge: Clouds are made of water vapor. |
|
| 64 |
+
| | | |
|
| 65 |
+
| | Input: a typical human being has <mask> limbs.<br/>Knowledge: Human has two arms and two legs.</mask> | Input: The process by which genes are passed is<br>Knowledge: Genes are passed from parent to offspring. |
|
| 66 |
+
| | Input: {question}<br>Knowledge: | Input: {question}<br>Knowledge: |
|
| 67 |
+
|
| 68 |
+
Table 2: Prompts for knowledge generation for two of our tasks, NumerSense and QASC. The prompt consists of an instruction, five demonstrations of question-knowledge pairs, and a new question placeholder. For full prompts on all the tasks we evaluate on, see Appendix [A.2.](#page-11-0)
|
| 69 |
+
|
| 70 |
+
challenges posed by the task (e.g. numerical commonsense, scientific commonsense). We pair each question with a knowledge statement that turns the commonsense problem posed by the question into an explicit reasoning procedure, without directly answering the question. For example, the knowledge statement *Birds have two wings. Penguin is a kind of bird.* is helpful for the question Penguins have <mask> wings, because it turns the problem into deductive reasoning. Meanwhile, *Penguins have two wings.* would be a poor knowledge statement to demonstrate according to our guideline.
|
| 71 |
+
|
| 72 |
+
When generating knowledge for a new question q, we plug the question into the placeholder, and repeatedly sample generated continuations of this prompt to obtain a set of knowledge statements K<sup>q</sup> = {k1, k2, . . . , kM}. For full prompts on all the tasks we evaluate on, see Appendix [A.2.](#page-11-0)
|
| 73 |
+
|
| 74 |
+
In the knowledge integration step, we use a language model – called the inference model – to make predictions with each generated knowledge statement, then select the highest-confidence prediction. Specifically, we use each knowledge statement to prompt the model, forming M knowledgeaugmented questions:
|
| 75 |
+
|
| 76 |
+
$$q_0 = q, q_1 = [k_1||q], \dots, q_M = [k_M||q],$$
|
| 77 |
+
|
| 78 |
+
where [·||·] denotes text concatenation.
|
| 79 |
+
|
| 80 |
+
We compute an aggregated score for each answer choice a using the augmented question that best supports it under the inference model:
|
| 81 |
+
|
| 82 |
+
$$p_I(a|q, K_q) \propto \max_{0 \le m \le M} p_I(a|q_m).$$
|
| 83 |
+
(1)
|
| 84 |
+
|
| 85 |
+
Intuitively, this favors knowledge statements that strongly support one of the choices.
|
| 86 |
+
|
| 87 |
+
The predicted answer is then,
|
| 88 |
+
|
| 89 |
+
$$\hat{a} = \underset{a \in A_q}{\operatorname{arg\,max}} \max_{0 \le m \le M} p_I(a|q_m),$$
|
| 90 |
+
|
| 91 |
+
which is the choice that gets most support from one of the knowledge statements. This prediction uses a single knowledge statement, which we refer to as the *selected knowledge*:
|
| 92 |
+
|
| 93 |
+
$$\hat{k} = k_{\hat{m}}$$
|
| 94 |
+
where $\hat{m} = \underset{0 \leq m \leq M}{\operatorname{arg}} \max_{a \in A_q} p_I(a|q_m)$ .
|
| 95 |
+
|
| 96 |
+
The inference model may be any existing language model taken off-the-shelf (i.e. zero-shot) or finetuned on the task. We do not do any further finetuning with knowledge prompting.
|
| 97 |
+
|
| 98 |
+
# Method
|
| 99 |
+
|
| 100 |
+
[Table 3](#page-4-0) reports the performance with different knowledge generation baselines. Generally, random sentences barely help and even hurt the inference model, whereas context sentences of the question provide some gain. In contrast, knowledge generated by our method consistently leads to substantial performance improvements, which implies that our knowledge is of high quality.
|
| 101 |
+
|
| 102 |
+
Knowledge is an essential factor. The few-shot GPT-3 model is poorly calibrated to directly answer commonsense questions, underperforming our best models by 14% to 20% across all tasks. Even when we use answers generated by few-shot GPT-3 to prompt the SOTA inference models, this still significantly falls behind our method on almost all the tasks and models we consider (with one exception – CSQA with T5 inference). Through the medium of *knowledge*, our method can effectively leverage useful information possessed by GPT-3 to help improve even the SOTA models on various commonsense reasoning tasks.
|
| 103 |
+
|
| 104 |
+
Our knowledge outperform template generated knowledge. We compare our knowledge generation method with the template-based *self-talk* on the CSQA dev set. (CSQA is the only task we experiment with that has self-talk templates available.) Our method leads to a larger improvement over the T5-11b baseline than self-talk (by 1.89%), showing that it is better at eliciting helpful knowl-
|
| 105 |
+
|
| 106 |
+
<span id="page-5-0"></span>
|
| 107 |
+
|
| 108 |
+
Figure 2: Performance with different number of generated knowledge statements per question (QASC dev set, T5-11b inference model).
|
| 109 |
+
|
| 110 |
+
<span id="page-5-1"></span>
|
| 111 |
+
|
| 112 |
+
| Integration method | QASC-dev |
|
| 113 |
+
|--------------------|----------|
|
| 114 |
+
| ours | 58.32 |
|
| 115 |
+
| Mixture-of-Experts | 56.26 |
|
| 116 |
+
| Product-of-Experts | 55.94 |
|
| 117 |
+
|
| 118 |
+
Table 4: Performance with different knowledge integration methods (QASC dev set, T5-11b inference model).
|
| 119 |
+
|
| 120 |
+
edge from models.
|
| 121 |
+
|
| 122 |
+
Our knowledge is comparable with retrievalbased knowledge. On NumerSense, the retrieved knowledge only improves inference performance by 0.18% on test-core and 1.02% on test-all, while our method further outperforms it by 8.83% and 7.37%, respectively. This shows that knowledge retrieved from a loosely-related knowledge base can be far less useful than our generated knowledge. On CSQA2, although we are not able to beat the web-retrieved knowledge, our method still bridges the performance gap without referring to Google search. For QASC, the "retrieved" knowledge is actually gold knowledge from a knowledge base that was used to construct the dataset. As a result, our generated knowledge falls significantly short of the retrieved knowledge. In summary, our generated knowledge is roughly comparable with retrieved knowledge in terms of downstream performance, and is most valuable when there is no appropriate in-domain knowledge base to retrieve from.
|
2110.10668/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2110.10668/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Textual style transfer (ST) is defined as a generation task where a text sequence is paraphrased while controlling one aspect of its style [\(Jin et al.,](#page-10-0) [2020\)](#page-10-0). For instance, the informal sentence in Italian *"in bocca al lupo!"* (i.e., "good luck") is rewritten to the formal version *"Ti rivolgo un sincero augurio!"* (i.e., "I send you a sincere wish!"). Despite the growing attention on ST in the NLP literature [\(Jin](#page-10-0) [et al.,](#page-10-0) [2020\)](#page-10-0), progress is hampered by a lack of standardized and reliable automatic evaluation metrics. Standardizing the latter would allow for quicker development of new methods and comparison to prior art without relying on time and cost-intensive human evaluation that is currently employed by more than 70% of ST papers [\(Briakou et al.,](#page-9-0) [2021a\)](#page-9-0).
|
| 4 |
+
|
| 5 |
+
ST is usually evaluated across three dimensions: style transfer (i.e., has the style of the generated output changed as intended?), meaning preservation (i.e., are the semantics of the input preserved?), and fluency (i.e., is the output well-formed?). As we will see, a wide range of automatic evaluation metrics and models has been used to quantify each of
|
| 6 |
+
|
| 7 |
+
these dimensions. For example, prior work has employed as many as nine different automatic systems to rate formality alone (see Table [1\)](#page-2-0). However, it is not clear how different automatic metrics compare to each other and how well they agree with human judgments. Furthermore, previous studies of automatic evaluation have exclusively focused on the English language [\(Yamshchikov et al.,](#page-11-0) [2021;](#page-11-0) [Pang,](#page-10-1) [2019;](#page-10-1) [Pang and Gimpel,](#page-10-2) [2019;](#page-10-2) [Tikhonov et al.,](#page-11-1) [2019;](#page-11-1) [Mir et al.,](#page-10-3) [2019\)](#page-10-3); yet, ST requires evaluation methods that generalize reliably beyond English.
|
| 8 |
+
|
| 9 |
+
We address these limitations by conducting a controlled empirical comparison of commonly used automatic evaluation metrics. Concretely, for all three evaluation dimensions, we compile a list of different automatic evaluation approaches used in prior ST work and study how well they correlate with human judgments. We choose to build on available resources as collecting human judgments across the evaluation dimensions is a costly process that requires recruiting fluent speakers in each language addressed in evaluation. While there are many stylistic transformations in ST, we conduct our study through the lens of formality style transfer (FoST), which is one of the most popular style dimensions considered by past ST work [\(Jin et al.,](#page-10-0) [2020;](#page-10-0) [Briakou et al.,](#page-9-0) [2021a\)](#page-9-0) and for which reference outputs and human judgments are available for four languages: English, Brazilian-Portuguese, French, and Italian.
|
| 10 |
+
|
| 11 |
+
- We contribute a meta-evaluation study that is not only the first *large-scale* comparison of automatic metrics for ST but is also the first work to investigate the robustness of these metrics in *multilingual* settings.
|
| 12 |
+
- We show that automatic evaluation approaches based on a formality regression model fine-tuned on XLM-R and the chrF metric correlate well with human judgments for style transfer and meaning preservation, re-
|
| 13 |
+
|
| 14 |
+
spectively, and propose that the field adopts their usage. These metrics are shown to work well *across* languages, and not just in English.
|
| 15 |
+
|
| 16 |
+
- We show that framing style transfer evaluation as a binary classification task is problematic and propose that the field treats it as a regression task to better mirror human evaluation.
|
| 17 |
+
- Our analysis code and meta-evaluation files with system outputs are made public to facilitate further work in developing better automatic metrics for ST: https://github.com/Elbria/xformal-FoST-meta.
|
2112.08078/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-12-02T12:53:24.746Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" etag="NFv-iXdwE9kqjpwP_ajK" version="15.8.8" type="device" pages="6"><diagram id="AycadanMtXLR8t8vNYp7" name="overall architecture">7V1bc9q6Gv01mdn7zMSjiyXZjwmhac+0SaZJZ+9zXjoONuAWMNuYXPrrtww22EgFQWwhE5OHYGELo7W+i5YuPsOd8ct17E2HXyI/GJ0h4L+c4aszxF8M8X9pyeuyhOUFgzj0l0VwXXAf/gqyQpCVzkM/mJVOTKJolITTcmEvmkyCXlIq8+I4ei6f1o9G5W+deoNAKLjveSOx9K/QT4bLUoeAdfnHIBwM82+GIPtk7OUnZwWzoedHz4Ui3D3DnTiKkuW78UsnGKWNl7fL8OGG9r91euPw8hfx/Jv44yQ+X1b2YZ9LVj8hDibJwVV/+O/PH2z81P9+fn/5z/9/dCmzf2aXgCdvNM/aK/utyWvegHE0n/hBWgk4w5fPwzAJ7qdeL/30mVOGlw2T8YgfQf52MPJms+zUfjgadaJRFC/qwX2S/vFy35sNF/WlF8ySOPoZFE6ji1d6eTRJCuXLV1ae0Qza/FixcbJGfAriJHgpUCNrrOsgGgdJ/MpPyT49d3ImZMy3s8PnNY2gzSyyLB0WSITzK72MvINV7WuA+JsMoz3wgvrw8kng+PZOvBz0iFd46cAFAmohUkYG5iiUsAEiMsh18lMrxwZptKV+H/V6O7Hx6SMlOrE5pza2mAI4TDc4WCM4Ti9QAOfRITYBOsGBVACHiD5tVVaChtbl0WydVhNQFath7iPQCgzFKsDYVCswZDcwvBaexgW7QdHWkswtN2OB8cXQgCVBm9blemiFDenNpsskuR++pAzepHhAeNgDMlZfIgBWrC6HEv7SihEu51WuHCMJ2WFdXGctRGWInDJEEBwfI6fFqIwRLWPEjg+R20JUhggYBxFU6PG/K4yoY1w0ggq9/GDiX6TqGD+aRJOgjAtvo/j17ywJXhz8Lz2wSH549VL88Oo1P3oJk7/zOvj7wlX8aH1RepBfcxjkh0M7i+ZxL9gdyxMvHgTJ7rQs8Ev6oUiUAgtkPaO8LA5GXhI+lVVHGTWyb7iLQv5zCzwkGyG34Czyapa/PbuyKPUJlW04HojEypYNJFS2YOzq57+BxApyyCEkPoDCsFkEdppJ4M18BELncAIztJmAipXVTWAFyUiNwPAwLwya7IXz4Ng4FtvQsqltUwpcG9B8sCtPDpCFHEIwdgFxsSAZqNOb2wbB64rYBtmJheD6HrBm4itIcocQ/wDaN8xzuy3nm8r5k1A7EQKrobGjyZ1Qp97Z96gjt/kOp5CshwlAJb5gH5QYsjZwstnxO5k6Nc9GwGQLMGEDYNIpezYBJuoaCZNO6bMJMDEowETlwUknTEhF/axTWQOW6+JSvw6jrTluenAXxCH//UEsz3sV6VBnZy8L+TsT3zzoGJP50k2WQoesWHqAasGrA6sXLifSkIlV15zWopqU5LpFOAMorSoj557fGEoTzkHqrl+CE3YLFAUHUx1vozriN6FZrsuZ3TTVwgCqqwrOxlEdYctd6xZog+qEFoWLXNXYn+kAWgAL9eTfwqDFoHAPukhfj0YNFUl/DiwAWCmdoa5zAvmMqpAHTVPyyhaxydWyRSDGqjEJ5GwzCZaTVJdNVCZfF9L6vULBaqxmfwkb935D/UvgLqlfmrtasx00Nq0HPDkp68vIJhbDhcTn4HBAiibG4MbXYNtiKxNDDtbMfRUZu+W+CveVQ4BjGPch2+zS2m8aiN+sDkGqvRurMqTQ0lqF1srdWtMym1OktTgEc/09B/LA5S9NnIEpSMVMXPwiW/tS2xAmEgddrr+LLuctwKw6OcX1Sgbr+YQISikWxXzZqj67NjHf3YFIFCfDaBBNvNHnKJpmOPwIkuQ1azJvnkQbgUI6A5Ztn3x16KBAza4e58vtd/l6Gyn6emUn/ralmqDFtRpcVVNTTbiKgxE83FXrVhWyLQNdqSTeyTxpbfEuV+/fOzJiJiILcrJUpLYgh0WFl9/4+TSOXsJxmLwKGPHfmpSBKDd41jkqopMVeaNwMOGHPd6sqTZ7mbZc2PNGF9kH49D306+RIl/mRmkVtj74VJZRrzZ4KS2jrg0/UY28a3P89ALXkc3a0ZrnY1Euu2vzfGHejnRFnNZUH9M2JdyaEqoO4ZqWEjIB1/TO7rPDLDC1UB8EtWG9OlFTuXuPOaZESJFHQr09AHH+6rtER8xTfhP+tHYC8oqL6Nx+4lVd+N40sSxLwOld9wIEA2NSA9PbEbB3Jf2VxbVGhrU8Wu0MawQaFdby+y6Y5ifebt75OPIDXt6J4uWNRJMZP1zsrxpOBm9zrEexK9sVetcQSbrXaMsoYPXNj02zKgssYChYloPZDttaHAmz4uo1ONX12Uh1bo8me7NNA7wMNjuyH1WFNdelTIFVVGJSVynGTPMSmwpnb7xhaylbWaSpL8MRZ+l8WQZBsR/RwpjBuLHPCXIlUx8kq7frA1FUZFr5RSKgNS5PFeWXT6kBSfLUk0tTETh6mqptqkojjSq3ld0bp5ilaRJtM1VOHFZiFqziRJWvmW/00osXT0pZ+MzJUzSaL31m4xwlJmJ/nqgLZQQK02WrA0AUVc4QHaUZ42Opmek/8/TpK4tGO58tWu2CnwDt6cui6fLP+btB+v9j96H79fa6e9O9/XbPT7y/u3j4dPE5jXi3V98+d/Nv4Xf9mF9jaO6qL6Cycooqe2SAu547XX4OSl0EEWUffeOGzfTFlff3fzOzXnzGxMp9aJpWTyRTbuLwyUvSPPfz/cOXBjprKDprR7RDpnNQihBjbbBuW1JdG1O9LSEHWszFEGDbdhAjjl7LEhWelWVRb5wayuRxNl20+AlZGqLSRxhpNbZjTpRpZsBT3aziBAKe05KjMeRIPbhd2J6HEL1cESf63A95s/on5bEhPXJyREVlqGiSa+vrrktbC5Uofbt1drMGMmk7zacaWCv30G+DVVSksrHNN67l2PK4wias60Dikwsl/VLZxgD1ud7t2lDrenfaKFG10cr7wW8D3vCpQY2B1TDXK58adCabVlLZk2IbsMOyxPXKhQq93ldcqdX2RbfbpeqWPMysGSVU3L3mIRhPo3gxRvphPmvmsKjYYz+20E5beWdfk1LdvI1hs0xKFGdO06QMUNTZdoWmtarfxp/dVmXWkoH8vtu+e8ks0XrOhiE993y74tYklU0Sq5qkWX06Ju5q0/bpZCZpQI+OyfbTrmMy3kP3y93t13Y23tZcCorJlGxCXmFITMuEPCaKNDXO2LyVUAX88Z+bP1vGSOZvFvbJzRiDoUgZLNvRoD6+tDLRvqFeVSZyzBoQZaJMdBX0eLCPU5v90LmZ/VlhyNdkVBCqJM/SNUb1xelWJtrXpFRlIses9SlMlIlO0qTkya9eq3L0ykTlBw2ArVZVs3U4SNE6DJN7HFHV0YfY9kdDFHYJ2esxFMYgbZaKkN93wQ92eEul6T0C/cBL5nGQbrrzxzN/P0y9Yye9+aR3Gu4RUiBdrafXP8qeQFdFx++q27m96n5txYC9OCHb71jGh9o6do6oFt3Ok+k8Ncn3LbcT4/R2RxRtVli9bx1WxKpWIZYfxlHq/VafLZaYL3bh4IX/Ag==</diagram><diagram id="QliaBsbfwv5VZwm9EBae" name="network framework">7V1dk5o6GP41Xi6TDwhwuWvbbWdOezpnO3PaSxaicorGQdx1++tPkIBAUKKViBb2YslrCJLneT/yJsERHs83j7G3nH1mAY1GCASbEX43QvywEf+XSt4yiZ0LpnEYZCK4EzyFv6gQAiFdhwFdVSomjEVJuKwKfbZYUD+pyLw4Zq/VahMWVe+69KZUEjz5XiRL/w2DZJZJHQvs5B9pOJ3ld4ZAfDL38spCsJp5AXstifD7ER7HjCXZ2XwzplHaeXm/vDybgL6GXkD++Qa+gl8f7z/5d1ljH465pHiEmC6S8zYtsHzxorXoL/GsyVvegTFbLwKaNgJG+OF1Fib0aen56aevnDJcNkvmES9BfjqNvNVKVJ2EUTRmEYu37eCJlf5xeeCtZtv20gtWScx+0lI1sj3Sy9kiKcmzQ8gFzaDJy4qdIzrxhcYJ3ZSoITrrkbI5TeI3XkV8eme7jmFb2WWC/ERQ4XXHJGjahqg0K/GoqOoJ/k6LG+ww4icCpiMgczViFljUCcxWzBz0jAvMtGCTq+9+XDAGOlHBGhVpMkG+3wpKQJ6JpRUUYraiYjlaUbEkEGjAvYMoLtiC/3uo4sLiZMambOFFfzG2FJ37H02SN9GH3jphVazoJky+l85/pE0Zlii924iWt4W3vLDgz/e9XChdlRZ3l21L+XW/B+aKrWOfHqhHhIf24ik91J5pZxXT7jzIjZhGXhK+VJ3x2XEm7drHW+EhB23XPI3+pepcXCg7F9zgWojVUS/aZ+xFb7XMorlJuEl1q27EqAVNAJrs1gMCAORUr5o9fmgFCFftGUQNCDXYM9iVOXMGgKoAOTWA8IUBcgeAqgCRGkDgwgDlDQ8I5QiBviEEB4SqUXXdyF3aC0GFAShdBPdp8mYXZZfj5RNj31Pi7NMA7zDCFj68NcJWDbDLg6sGFuQy5Thc3OErC/njllhYG3EjUGsje3BxWTkNJbVUtzhWraWsa6SWtlwtHvw36KswVD+FvieQF14Xdd3rpK4UhJjkROraqB5v1lrqmrrmuagLT7O84Jotbx5ZXB1/TWiYxDQJAa4JSD7/knMQuoaFLQtjF1gulrID6uSGTqUhu0Z1VPkSuHabrplvdcP8E3h/ZUY7DwMH1l8h64lM8iF/fZDtZ89L74tia7lBZKFislAXO86Zr71Y1puPJAwkRBfLfEOdmdWJx4f0jT5hzA1MU9IBgLP4imNQspFRw8m0C4JfLPOgM796FTCZEkz48jAhnUnWa4CJuL2ESWem9RpgsqEEE2l2TlphunS6FRiuiyuhIUaHY0Ne+ErjkD8/jUeN4yJFOnQZKaomYnOn05uBEamzFDpWwdITclq8OVAcuDbOsuWmOw5r0ZXmZ3tAadUEbc663lCaD54s4u4OyQi7JYqCk6mOD1H9AiM41FE+t+us1uWpjlSzWr2jOsKGu8sooRrVLWIgp8hG5Umv45kOoAGw1E5+FxsaNpS+gy7SqyzWU7PvuxDmKNoXqa3j07nY30P8B+BmxK8syu1YC+xrDWEAN8TVTCsyLcPGJSN/MvWtsoLZsHYbbBp2oWDIwVgv91VSdgP3lYIdZQ8AekZ+aNfjd9ORIo8j4vd6cwgS7TG7Sv504LUKr5WHpc5A665pTW5v/u3MbMWqIYipGofr2TCCZUf8ifeTdzdnAeXyz4/jLxL2p2/g0rctrrYDCzdmmZs2YZld5S+x7BvSXcZQ6l7+kEm1D6sWWahb2XwLkReF0wUv+rw/04zjQ9ploe9F9+KDeRgE0b4sdhXWMm5ntPtHpaFVQDQbMESdYShPvH3OFEX28wOMuQerrRBw5e2QpGGSuzMQbTx4s8PeLHdS7WulVMcUerxZ/r1L6nmfJLxPQrbg4vvpNKZTLytdnU9D9f0PDTtUmhwadrvq7aa1iCRKjd4zP5mmJ0/f7h4i5v/MP+A3Kj7rqcXUB6i5C+QP2Ebo7KpVcO3KPhI5JPmjjaFqaI97the8IfUwuLmTkLVU88aa3Jw8aPtG50sWe+mlj17C8URgzBYvV+jm6gtKIWgM+12dQzeraaqm5un+XifLdZq5GfxdM7IQ4fprhBrdnaXb3Q3DgRY7aSnbSdVcrCYPaA7InglZ0q+BnvUneUBu+poWX2r1gMQeVKlFlVQnrHo2A2DJaelPaVBxYzMAEDmXnwIgzqBFLVqkunQzzxj3RYvkiYFb1CIEmrdr6NUid9CilrcXqq4J6pkW5d/7xmejkY0ur0X5WthBi9qUo30WrF8RHZHnZW53FmzP4EirKjnyhMmgSo3zxO2qZPZKlWzZId1OngEqvJRSa5LBGVxSmx4pL8zoV2CXf+8/Qo/6kK9zhkmNNlUyVVWJ9EuV5L2St6NKZt880jCD1KZGyoOkfq2OseVB0s2qUR8cknt7Y6QV53xS28q1lX0I074RVxSbvfyUCaGfCUUV2IFCqv50idMzhZTX2Hz48mV1fdp34jLd7nzY7aXLe6p5qospnH4tk7HlxRQ3oXm9SA3Kfdu86etsnbvvxbv7fr9h/wt5u4IJun2zkM078+RsVFcgFe+R2ffqt/3vl9EFUrfKxIu73w/Ndi3vfoUVv/8f</diagram><diagram id="PMmH-ys2Tt7qpbaDPQ0v" name="relation illustration">7VzbcqM4EP2aPIbSBRA8TpzJ7NTObk3tbM3laUsxss0ORi6sjJ39+hW2MBcpCWDAxLEfHCNBO/Q5LXUfCV/hyXL7IaGrxR88YNEVAsH2Ct9eIfkiSP5JWx73LdDLWuZJGKi2vOFL+B9TjUC1PoQBW5dOFJxHIlyVG6c8jtlUlNpokvBN+bQZj8rfuqJzpjV8mdJIb/0WBmKxb/UckLf/xsL5IvtmCFTPkmYnq4b1ggZ8U2jC76/wJOFc7D8ttxMWpd7L/PL9M/Yn/0znP/mPKHE3X79++v3xem/srsklh1tIWCy6NW27e9u/aPSgHKZuVjxmHkz4Qxyw1Aq8wjebRSjYlxWdpr0bSRrZthDLSHXPeCzu6DKMUr78HS4l9Aj8yTby/S++pHF6ShhFEx7xRJ4R81jauaFROJddtxGbyRu8WYuE/2SVc2o6QTnrF0sE2xYooJzygfElE8mjPCXrxcgizv4qRfJr6CvMNzllEFFtiyJdsLqSKprOD/ZzKOQHhUYDZJD/MjKSkav0o3QKjSIW8XlCl9JVK5ZIzwuWVPs+5x0vAhluWRbOsILaFcIzb8qmUw0r2XPvObaMrv4Au4bEQjUAg44BMBf0hBdG48ZrxlwzXgHx70GveCGo4yUxdE6MmP0yYNKKnKfYy96n69V+8trBoLufOdDeOVlz/w0CYNeTjpwlwNJXj7A4BlRcQxSZRr2+IHHeNiQGRJwTI1IjPzhnREyQeCeGhLxtSLwRQoI8HYJAliHqUGWweSKd+o0nYsHnPKbRJ85XCo9/mRCPahanD4KX0WLbUHxPL5dz5/7oR6Hndqss7w4es4NY3mDhovTwR7Evv2x3lF8XvEvLr/zfly13YeqXWyMhPHSPXVf2BHS9OJQLFT4FDvMCuzE71vwhmbJnzlP+FzSZs2ftqfEsRedZsiUsoiL8VS4iO+eN97ZDGYIR5iA16i49NopBegi4Urjl0fdUwKnghoXQzgPdHNzNIe447pyacWfXDLsC6KZaIGurHZ3qGz7zUN5uoeTXeedV+LS/dXVhUVap2jLNR7hibO8fzdiOn4e7b0/ZLBa65mwLxj4/GTXna2ky6Zi87qskryklrapQdcnrGmzZFVt9czebNY7mLmw33oJex9s++Vs36RkXf7FvEc9xMPaB42NEXLeaAEDLdm3bdYFvA5fgltRGFoEFKxrNPcvBhX9jWNLXEAvbkL4F5bsesDvmOLlw/LVyfEh9dUZdz0zMiXSLqbgBoBVhG+gU2qqSj0ya95C1DRxSYB0hJlgHBYLTozKkyDpCVLxxojKkzjpCVFwdlVPLrLCOXtanNgMs38elegGj59VXeVBYxDXlVDWg7zinymbml9VSZ1RZFTSME07LyteRtmw/e7kesasZ2sBlcE+yY98SzgnYW1dzzAbwkbBXJoSpq/IXKTOO+BbyC6+WxLZfILZ32HgxELNRZ+LksLXuCZhdV5AcGbMdYqG82iXAq1S7Hrb8vNjN1kqbEtsDFoKalew73FKpS4A/LMf7ETFhTY5fAysdTop5iet7rzAxqav2wHHJPeUA0OWecgAQ1FLuKUfAwYw5BBAZVu7JGN9BAtNwo0KLTRFP7UqouamhY9q/0nScyISG6AnLgfQWgIVeD7UjPSrGFvJwts823/tTiC0CXbcyvfRNe3zWtO+Y6bXH93ElOC42CFRtC0/dFh48Ja8jzV9I23CPy8iSkrMjrb52sdRYK9hWlKlqfJbomUeQppI0u+ctUqU0nNLonepYhkEQPSX1lrd2ziO6XqvPRyQO9bVa37CPw/BEjIltqC+xFumrGvEFL5U6AQtWsndieiDGyZ90GgQzfGw203y/wL4nezSzudNHMtpCz/AUIWk53ELbaMzClcKx5xEXH5smHLld6pyp4bgdUsNxLVzUjgemSZ1NBQ1oMgMUA2rUne5KitR5MqPtXk4jM7BzUmbU2djQLTPezAACiQVJh0yR9mz/dEw5dmW/eZV4xuTwHckNfaWxC55I08OWffjYhfEjtd4zpgnpjyZkaJrY+irzx1gk9HrJAybbpYeuVwnfhstQpF7nM/nWh36gfsLkiGq0xk+oNKNggwfyPKLPDJ5hI543pJJg66urJWxXVAiWxCmSEt6IJheIG0Lsnhxi1CJ8+5CTzg9b++TY6qJS7fC9QFwHYnRyiHWl6GMqyz4VvvdMbBiL9+Pz5IJzTZyBjjOCQ+rDti71lHB+IpQvcLeB25B3IbsbtOVh/ruF+xQ8//lH/P5/</diagram><diagram id="qFIP0BvcqyvCwJYqx_Jc" name="ST-MRGNN">7V1bc9u4Ff41fjSHuJHgY2Jn00432Z0kbbN96cgSbauRRa1Ex/b++oIWQQEkKIIyCQIUtTMbiTdLOB/O5cM5Bxfo6uH543a2uf+ULOLVBfQXzxfo+gKyVwjZP9mRl/2RkB+42y4X+0PgcODr8q84P+jnRx+Xi3gnXZgmySpdbuSD82S9juepdGy23SZP8mW3yUr+q5vZXVw58HU+W1WP/nu5SO/3RynxD8f/Fi/v7vlfBn5+5mHGL84P7O5ni+RJOIQ+XKCrbZKk+3cPz1fxKhs8Pi6Lf5DnbUz+fvcnXd1930XBD395uX/YL21uKX7CNl6nJz/66ol+2q3v1hh/+fDbNQg/fHyElzDIf9zP2eoxH7H816YvfAi3yeN6EWePARfo/dP9Mo2/bmbz7OwTAw07dp8+rPLTt8k6/WX2sFxlePm2fGCyh/7n+In9/0vyMFvnl+RAAZh9nq2Wd+z49Zz9vHibHdjO8/MB+7SY7e6Lv367XK2uklXCLrteJ2t2zXvN0clH8We8TeNnARv5aH2Mk4c43b6wS/hZlI8NR37+8ekAI8zBci9AiB3N4ZtD96549EE87E0uoRbSArBZWHdMWps3DkkxG2c3/LH+0aHCxPN9HB1eRBo5AjxIFKeFkQxDj8LqWAI/u/Xtw5l+3zys/vXl63/Rjy/rf/7nD7r49vlSA/pszm+yt2woZ6tVvErutrMHhrlNvGXQfkWrfO73w4nGmbJ8jrnC1J05AvovILql83g+F2bQKr5lA/d+m6SzdJlkRyKffd6l2+RHLNx4QwkmvnLuHAee9uS5RF4kYaD4XBI6qAo97Gv+aAicPYVZprhZerPdZm+uXsVYlU1MQKYdFIP/Hvr+65lM4KI0X199CoUQj809//CSp2ngewGsTk2g0HGgNxV35iIKPKZMBQlhWURM0Q4uIg0jdM4iAsgLsWKSDSUuWpXOgnnI+cfchzq4eNmQJtv0PrlL1rPVr0myyUX1vzhNX3JzNXtME1mQ8fMy/Z7fnr3/Q3h//Sx+eOEf1uzXfee3Zx/+ED8cbnr9xO+SncFmixmvF++yKOLwU9mRX5bZEF4fx9X+DI8Zyr6niM2WSNslj9t5fORCtL8unW3v4mMPzJ+XifMobrfxirkDP+WAqHvN7RsFGhCABvSA5otA8yegaQPNtwtoYDCNpgk00AZoZ44tYBW20Jn7NgizQF5wXoAcw7Egbmh3Bp+5hHB0NIaDvl3eZzSYU6Dpffqej6ikr72QYjd09uuXEhin6yEVObZKkWMYNSuKGq603UxtTZZeYlRwneYYUPUooYkD1Z0wB0S5TYJiNC6O7QS5OMCDYjQumu0EKTlAhWI0roCheynZxoZihI16pBMf2gy2RhcUo1DXCeUT0hY3FBGjcJtYUcNws4u+wigYTLtN3Gg/CLNrlaf45ufr8TjAkSJ67kJyjCbFaCJKLSVK+9Hq1C6tjjV4wDZsaf2MbU+XQuBR5B+dq4Nwp1iDR5u40xK+HOdO8chYufZycYE7xSNj5dpLyQXuFI9rKb97KVnHnWKzZNbEnTaDrdkpxVTXKcWWLeFjs2TWxJ0ahptlzBYOB9NuE3faD8IsWwzCI6Pl2ns8DnCnWCNxbNxCco075bX8E3dqHXfai1aPrNLqAOqwDQdBzVez3W45l7Elg1ND8hXJBq8vHcnekuw/pRCPdQloHG5h/hPF/OfHtKWS/4XfkyX7egJ1Id+xR1Z+0UGUh/typRbJzRzKj9nDrvKYV0QUv/F0kECoWjYNVmkubQktwZ+PCT9xuXtVR+/YBQBvng8n2bu77N/08ht/EPtm+2ftz1RAyNR/KsNORlGuQhQtNiodOjJjspzPVu/yEw/LxWJVZw9bg1toEAJxa6C24ZhlUPDcMAHJyKQlg3xJYQhL5pFTYt79XW+zYuV+MDs2GdOSXXs9Jlg2pUI9GL8quMVVi2YVyRdG2iGv0cpBqGvlILTLykHD0fEETQ7N2vZHduCUJzhag1Oz/Q7a4LQtAWgB+OrwduqUGxCnnWdcqP1MSMs+Rclb6NvTJMjoBPBbTIC3Yv6AaBH24JRA2CJfoG9EAiwjMopOi6AAIBJ5B0DpsV5IEXNhIxRQSIHpAKvKrX6LHzbJdpbde/X5864yLeQ50H1fQ81smzbZNR11NiwhgqssicFTBD69NTaEfC3IQqs9bu/SmLKLtA21Xd1mim8uKJZPj6t0ebn/+xn+OAFzs+Xcy0cb9U3B3pjXOMi3TeMEZusAJ41jWuNwPdKscQK7irigoinD5MrUrVVC2xQLNht/nZliWYTRjd4C5u1tHPSjWLC2K2MbNzZgkVFP0LSVU3MKp7atNQyZ0DHh1F6c2lY1BwbsJaKN0wM0O8oaNUXRWgTN3ilagL1AplMB9T2ASCXLzhSfiqrVObYFIVzqTUFIAaS+ghCAoor4At/zw8FjkYnk6NN2BjcB0Uu5u72F/Sgoom07A6tsZ/HN3aNVy2qnnlbtXfFki25WKp5guLZX56B4LPCMAl3FE0DLFE8wOTba7CqwVL8YrkQ+M/1C4Q3SqyVYkJgu2qdoa+gX3VY6tpECkIwvh9tW8sopnFpHso4v5WbCaRf61LJFKzJcWX4LkhVDmWb1fD9qQKuysNIi/tUi1PbPv2Ylz4cdikMq+byQZgW8QbmVXO8UbLVm0LJIpRB8U6RSYKmvSAVg3wuQLLVg+FyQ4TrYTGGKEQWlX39imfunaBHsBv9a0Tn1/GvvWgf5NmqdYNI6I9c62sn0HJzWaJ1qMv3k0tSnttqnXPCA2zz3XV44rvz37rPa1bFTEMgZ2AUeTYVJ/IcKOuW3x3TzmH3HX2cvzDUYej3HokIcYl3pH18ftFd8Fi34E0szjXCxnexkFuz2Hbv3CGvNQsV7oaYtQ7Uzm12qxaZYltjIoGFf1TK1k75pU9e0E4FCQwkkqIoR3otBappGezM99a31brhUq9SWz4abvU8P0ef7VTL/IaDixnJIvHW1sy98QD/0SAki1a1bihQeESSwNz3CDViNf3JwRT4cjna4qOgRploFl6VXh6XDNcB96YHSYZk/bn8W8BPLTcLgQuT2Lo4ze8pV0Gw4mlvOvqlb6RvdGwRDD0WHFw0kxHMKtsHTUTwWelCx11XNY2ta9zAZz16EyzbZBbsjP8YPVX+m9kuWLscNl6M3XS5/GfZm/+s69RQxrAahFRtycYUuojAzFJORePvKCSIqUDS1Xu3RQNjb39KiALb3zPPew1ISeZxx4gRKGHk0EnX5abo7QK0fXaO/O1NrgcZmvTXbOraa5q03dazsdBV6Ea2Ok7jVX5ahIxhGUFUO2W6AQfWSHjq+V2uXKsNq+c6PNSUGjTs/HmE4GyDYaudHKE6aqBRwepG0HWiV+3wFggIffRkPoGg+WLUmXe0ZcuvPkD9TCef9dajcM6Tw4vsTWraVnSiUkh5EXijuGAKr89voliGg2NN32slOb98Qjqxmu8/R2RZrjdQ1QDmoGtMeivloSdoDIAPC7e3OpQm0qHF/SkPprlDWe84xU5mCHSsioFxlkpLKRFpOaVeeYzEK52vRMFY6nFw8VCmewQwaMrnlsZXyIkflBdXTaTB5EbPbFBysQAsXpNQ6qMkJscSSDOl3ILvSLXFQrXU/M61wPC7BoeeD6tnBdjIMNOLIGr6m5YC1ZmwucXWbx1AxOKZIGBxo7NM5kTAlXI2YhMH8yaNRdq2F5hYJg0OznRImEqYZa837tIZI0xkq5qMtzlCoUAcTCWOUhGmNsrMmYYpROF+L5hQJg8Nqz9Qzk5dTJAwOzXYRm0gYM35H502b34iysZEwHccltpEw4elJMy0HrD0Jk2lQkV3xLSNlQg0CayJlSjgbNSmjwdK5pPzaC80xUobX6E2kzODOUai7QoUp1HaO7GpDhOmAHOBEypyGsvMmZfgonK9Fc4uUoSMj0drLyy1ShhreVGsiZUz4HdSyzBiqUcIxbq3gFClDIpW89lWYuw2bQSf3gfiULOLspFDTuX+g1WWdnbSD6CrHCqhLs3IkXQLs+YHIWVSbAQQqLPXVMIK8Icuq76o4z/exMFRylRxExAsjcmxSKsk/6mG/yhl1n9Oq4xnbTf7VNX9sJP+OdOdpwKA++Yc9KIFD7lFeOsu9KBEbDAeqqsnecpyNet7BgoYBVskGvSMYcTzIm/KxV68yY5qRgiMyizxExdPDGllATa5fWymwTAGTOktGG6eYUXFBw61YfY8pC4k/8+lpmxB3EC+VvZvW8VMBssb4qQCqxCXrtag5jsvm0gOqu+pdzN3GAIvPA9lnK7n/JGtDU9+3oEOVY7Jy2k6VA72IijoHS5JAdtkIyFt5WdgnRd1c3lFlM6gu0S2ftkqXQN9QTZ16D4MJZrIhshhmtctOCAqaFsnPByz4Ruw/fhro9WLrat2J6NQ7tUlLqjdVp9SGeWXWh3mLIn/oh1G1w9eQbAXRKRea2IoS9sbLVhBejTQaT7S1zNxiKwiv5zlfgbnEVhCewD2xFa6xFTzia3L9ChXa6PoVc9etCIMYLfCxU+U4xVYQwxU+E1sxhC5xkvkkoaFdGs+SrdCHGbQeZk6yFV0XUdWbqhOKqDK/WeImpKFzgLpwvsrKHHVxQpWVe9SFySIrA25pe5k5Rl1QDe5x3AJzirowXaQ0URcdURfameyFCm32A/ncdSzcMFq2ZKfKcYu6oGb50om6GEKXuEmDmirROkfqQh9m3BBZDDMnqQud0rBBykOyLZXLOwKyiNjT7gJjip+gGh2PJn6ihLYR8xNUgwx0yfdsLzPX+ImRZWW3F5hb/ITZnOyJn+iKn+C41HD2Qm1nz8nkbRKNjBI9QeW4xU9EZknRiZ8YQJdEbnKdkUJ1TPyEcZhR62HmIj+Boaqsft+8JAOOhPuaVihQ2QrF8zyhC8r+Wa53QSkH7P2Zb5R12RIbaclEzeUrUaNo7iEa8KyRStWGB73ZcKzhdA1HdclBF0DZ6FTpK0taHxNOqbhLehlrfXzA3YhbHxMMDUY0/bcYO0FobrU+Zi6T0YiGhy1T6+N6rDV7mXxbwmYvk89HS1oQEmy40WWLANoStPTc+vgElJ116+NiFM7XojnV+pjgce08c4K8nGp9THBYFc/U+th5vyOwzO8wubhrpVZwrPWxRhtNW0gaom4FLIbhsDp2WWjuV5mx7rt+E42Q3HKOhs7jkziaG8pUVptpVcCuFUcjMpwhrHA0UtpEFRsMByYTkzAx2aE2JgD7aoMGfaUqbL3o3F5mhBxzkNgED0TWbejNqYjJ/gtWyqtsuiLZdBHL5GUyYHRQXhkFKmUFVguNDQvMUFOC0VCgHFmNoUiBzrZYa96FhejWmxTzUXeh/dIvr1kyF0pe1yxdQQHkPGsP6DQbH4spR0APna0yQSZ0qr0xV9FpqP2sQndqohO0QecEyMLddBWQJttCWOl9oVIKbRaBWuQe88Se8xUQPrp1FJstdrnHwYA5z5oOiOcjKql5L6TYDVX/+qVOLaXoQ//z+emo/gf1e+bpJoqq98xjt74Ho0sVPXHDvFO0HiryPHl2KPWguBIJqiVlPD3KzB55kYppm5KMT00ybrUQU0/W+hJogCIF1ngKcaTKMehwY054hhtzdoOW4HjuA4AV8JjdhjNSVYV3iJzPE3JORQ70KKxFDgLVVd2OkMM+bpNMlsW5j2yo7jOBZlf8Hw==</diagram><diagram id="WNd-Dn3DtBQc3bzuKFJP" name="MRGNN">7V1rl6I4E/41ftRDuPPR7p7pnUv3md3eme19v+xBjcoMgoPY2v3r3yBBgQSMCgnS9J7dlXARU089VamqJD3ldrG9D+zl/MGfQLcnS5NtT7nryejPkNH/opbXuMVIGmaBM4mbwKHhyXmDuFHCrWtnAleZC0Pfd0NnmW0c+54Hx2GmzQ4Cf5O9bOq72W9d2jNINDyNbZds/ceZhPO41dSkQ/sf0JnNk28GEj6zsJOLccNqbk/8TapJ+dBTbgPfD+NPi+0tdKPOS/pF+ayMf5rm8/fZ7Mf4+fbPz9t/t/34YR9PuWX/EwLohWc/+of70/7n5tG5VYb91+/bvjf/b9NX1PjZL7a7xh2Gf2z4mvQg+t3L6CP6ftt1oevPAnvRU26WMHAWMIRB/ty3w4mbzdwJ4dPSHkdP2CCIobZ5uHDREUAfp84WJqDZHfte+NFeOG6Etr/RU1boXR7hBv33L39he7tbXPfWd/1g93bKRIPmREXttuvM0Pk7F05RJ90EfmiHjh+1AFNCDasw8H/B1J2mPFJ0HZ1h7F4shhcYhHCbAhfu7nvoox8dvKJL8Nk+MLWBYmnxfVh/ZFUbaMA6/Jnx6c0BnACoAxN/2zyFTU3BaoFVYrb/xoPY0QcseToKXh5C+UFd6Tef5sPN0/arOlxtkl+WkjmcICXCh34Qzv2Z79nuh0Mr6t+1N4HRU6POPVzz1feXWJg/YRi+YuHa69DPih5unfAZ3x59/jf6PNDw0d02deruNTnw0M99Th+k7ooOD7ftjpL7WGCFLkmAqO7gYgfhMCIg1OL5HkzaPjpR5+IvmSRXjF17tXLGcSO+BNBQd3NnSNIBjwknARLZU8lWJDt+In4z9WS0rvx1MIZlDIBBhX7aDOIHTr++TIKn0QQ+eB9/j5bB4+KH1Qc6poYIG6XwD6CLVO8lS8A00O5uRT1ov6YuWPqOF65ST/4WNaS0SpcyCgVMI8t7x643pJzCxG9wUJ/9Tzlfo0gW7TRKgEZlNadIvyb2ar7rdyBUvZRWqJcsVatepb16vW7LdAr18fgct2ViWCMK3GXejgzQpYFkcHVeqFgA1tVjwRzD87AwMjVVk2qVvGENrJzkDUW8y2qKNLDJ53Ya2KzRJM0tYng2BxZqQJVOR2faplJNpSqRNvXpZhtsPnxcvw7fnl7GP58/+9/dvgWqNqmsmC197xRTDXuy7ka6vlqP0MdZ9PG+J6PnS2CRnENfljpNAD8L62NsdTqsjrHVGEl1R5QC6ElFNij9BzJEZZkDjaSqJMCT5qkkxlM5T1lt56mM89yRVrF/r5mMrAWkZtHW/s2P8Na39vHWBSPDykgMyAbywATTWBLNby+PNZe6Tg8YVk1dSeSPgboqjxFeRl3Jm7O5XE1lrnysoJi5RAQH1FzAqCAuwJevhKY0Or+rSeRFicAWkFflEdgLyYuMcpb4XS0irwy2BTKZ1QQmU9rOZI0lL2WoqQoJQQp56RPT0C9LzhZQgMpMXmrDyIusLCnxvB4byl5EdUkxe9VfTpKjJ/TcQTJWFEdPatvp6TocrSZwlcbMVVrDuEo7xdFqE1fxCnBliUvjS1xUEFIrH7HIl6i705LUf6+jIs+dAPqrnQSGUaRTXW4PJxMYPFOQA4oQc2jdfWcBkFA/h1m0ZCWKWSWNB9xEQCGSmjO23SE+sXAmkx0n0+CZBfBpCJVPpxh2RCXOBQaUSWZ5FAqWkkqByo2grLfdCDbW7nFN7JQmmdNmj44SYVav9L1TBHh/f/vYJaQvIaa9DWtOBlo2OnK6HnKqqBa1PLF8bVS1f/HjXNUloashLiBT5qXw5a2uxO9dZG3KE8nXR1VkxrncrWoqUzU66UzQVROSznLri/3axFj1elqUpPN10BeZcy73tFpEX4LSzgSXNSHtrHQFf+8ilVOeTL4++iKj+uXeV5uSObwIqhGJZ6X1FX5tIqh6nS1K4vk62IrMO5c7W21iKyFhLd6pZ/rs9fJ0IdbbjqsyXIWvOBCVREMX13QgVbgJatJkRJ+3Lqxgr/S9y6bRo8c4yxU8zij2ahmv5bWbHF/S+cUCjOCSGc9Ff73MpHirzkxeH+gGMfMdJPOgMtxBUgeoizrk8nK7OqkDnEMcR2jj1Nq6k/kgXmKLhQ+mWvRPDXygM/JBooBH+WAPUAkULs1QPfDIDNw7pweNpAeDNk2TIzso5an+zrGoxbHIkhgnL4OSHqN7GcKGPKXv3dEIppEo1itpRXO/DVrxEE93o3zyUdWEIqUJhY1OQJpOwBEy6dwNUhMb6m5owoDH6OdmgHfMz33vWGNd0EQI1pTyQqMGek0V05xoB4qXz8QKV2A0y2fSO58pyw6KTLCDpVILrXkOvviarG7wxZU7DFbuELZ2V+l7d9yRcAfYDbgy3CE+qivQA2EkjgNXsAy03ru/y+pAiPF3KRRQ59hqb5yO461Bq8rH4Rgq3CqGVTIt9bhtsRplWwDPlMC+bLe49DdnW/YC5GdbJHKtbKArgyTTK8q8AM4Kf7p5aVYUj+v0l9LagOrczT1GDW1gmTmMRkUxRl3gExhCPjIsagh2CkKKR0dnNWGuVijhp+X3EAEa5VEG+ahYH/DdOWBWsK0IYNgN7X1ZtKgGJi8XS7xFU8pnWnWxlvNjLVxnH5SmrI8aP2HLdZa+dkcee3dYJd3horlNPN1hvuW3p4x/mxRvuSYvmLXIRZwXzDMOexXkoJmkBHRFODmo5dOKRA5Xzh0ndJ7FPi7LyicNK83nyB37ri8Wn/gcjg4GWo47ZDSClAUPSjox5UsbNUNN7XSdFZhqDDQ5dVY08XN2CmXZyHC/ZLKOKyt3DfOrdvJIzWWCuNEPTe24WUd0i5X6WceUBz9GJblIkalbvFeP2fJFGjpnhauzUuc20qwhkYaVn/AMiVyDSQSKhWxiqtxfzhKHiciEsrkuTzsocG4yo/1rgfUTZNxODpOING6c5rKdVWH9PmBFWcOsEbAqSiICk3xsNMooeWzcB1UkFOm7o9BKt7tNBq5ykwEAFNo+rFw3GqCGkevC068OT7XiSVVpeXS+eBKzxnJjU+TM1aFnzEjILqp1GsiqG84V2E4zO+8YKGx2knxQfgtFSR+oRsr+5oqaCwp6kDzs19Rly+iC1envX/iaunTZ9ThNddC9+I2rdR7ELCfcauXMKmENmzBcr6Yq5hVqaj2aJ5fHVTvNK9K8M6KmzTaLudUDlZxSMCubYg3UXKxvN4k1FQlUa9I3+k+oV3/ErM3aav0ptVxV5CCuWpl0jYsyaVaZMh2/Xtc5KB/umi5G0IIYgWHQKuL4bm5Jq2upC0894/YB/duhqk5UyVIDUMVz+90OVTxQpRi0pbXrQtV38Pb49unT4/OX2ZdgePfF/tZ/SxKhYvYEyBb6sy6Xm63mqjbxyHFPgFqmy5cu4n90m3lLr9qxvYzxKJvMhyHqE8f3+iN7hSCImWgUJCT01+7N/IgOh7NZAGf4KAfyK9hbid92SnI+sHXC1nCqNcjHvarbsMSkWLycIGdIkssLO0aWxr7nwXFoj5LHSid1GDoe6LSVjLlu78JQTX0S7C9fFC8P4DO3dsUUTEq5HDXH8Z+SlUw1ujVim2HidMOkNZ3KQqW1zUpKpPAYqjSbJTzW7WbqFp7aAOExLA7ZLOEJ58lk+NIA4THMCm2Y8BpCmyZX4ZUOL1LC+9917fJcFJ4o9uKrlyf2Oft8tzylBg9oGyDFMos6kSUiJVMjUoPBICX8+FnXHmNKzZjoA7OWORP7afnawJLJCt40TKx9YWZG+Q/r1FSOleKYeIcVAVhJzuaH/o3EDs3d7rAjGjvIAlmmlcZKLryNkELbOZI/fornFHT4Ecg9SoQPKVWDIB4+v1fWfd//9fD5P/hVXnsPhn6zLXFzLku8PfgTGJ3kl08L4g5rSTqtLysHMGDUmDplVwOdNtCpYBYeFS20NX4rRMuiQ8u5aCHSHIhgDJJ/RAFHrhc4jx1wqqMZhAxaxocrXsRMdmjVAp8j1x//6p2YuNd3fz1a4r76acZUyVN2VaBeJ2xXprK3TvEbYoDQsaMb7+AS9Tr0xhFTRYTlOt6MwPcBvaCARC4L9ZECZQvaUqiwRjLK5Z4t+goYhrYv+c3k6yuIBdLdHjEzQDo6Ek9HlJ2eqNeZjaIjMpN0Dz0YII1/iwqLpHvEKHP0/1vfe/HddTQmXnWMRGMkJkKSqdVDdbERWTl0cmHYuxXofqWkIxaGVqd6hkDRYeBHA5b9uZ3m7ZwA1Ph/</diagram><diagram id="8-gO3fiIU8AhQXWREMRV" name="MRGNN(2mode)">7V1bk6O2Ev41rsp5GAqJ++NcspNUnaSmdlN1snljbI3NCQYHM5c9v/4IjLhJgLBBCA9OVdaIizX011+3Wt3SSrvffzxG7mH3W7hB/gqqm4+V9rCCEJimhv9JWn6cWnQdnBq2kbfJLioavnn/Q1mjmrW+eht0rFwYh6Efe4dq4zoMArSOK21uFIXv1cteQr/6qwd3i6iGb2vXp1v/423i3anVNtSi/RfkbXfkl4Gandm75OKs4bhzN+F7qUn7eaXdR2EYn77tP+6Rn7w88l7UX/55+0N7eobayzr6cL4+/PVrdHN62Jc+t+R/QoSCeOBHQ/308DfXf83eWPbXxj/IK8R/+CH5ijvg+j7yw23k7lfa3QFF3h7FKKqfeypO3L3vvBh9O7jr5AnvGGO4bRfvfXwE8NcX7wMR1KTHYRB/cfeen8DtD/yUI+7L7+gd//9ruHeD9Bbfvw/9MEp7p728IHO9xu2u723x+QcfveC3dBeFsRt7YdICbBU3HOMo/BuV7txYzrNanCHwSLrB+cYzybyhKEYfJbxlEnhEIX4N0Q98SXb2BjgZmjJ1gnZ2/F6AE0BDAfapeVfCppGpopupxDZ/fCF2/CWTfA8UaJTI0QYrUXYYhAFK3+drsEHJY5JXFkbxLtyGgev/OwwP2Vv7L4rjH5kw3dc4rIoafXjxn9ntyffvyXfFyI4ePkqnHn6QgwD/fX+WD0p3JYfFbekRua8JI80Y6Cfx2I22qO06wzxdmLzIVmBEyMcwfauyFUvC2a1PoYc7WADKSrCiO+SjOxV4aaqG31XlmcfwNVqj7DE14OT9Oh9L+tVhaeMed2lfgRTAsj8psMyrA1YdS/YasbH0bBu6MQKWHCmghM2dUCBZVwekVoYSjioAPiWs7FZYFQj6uWgdDmVABF1xuOb4EuLM6ynm3Ci+TUZxhWalbV+85OVmP7IhV6x993j01qfG7BJwOsqeqrOAfAdVtcGPrykDMoB+hlE+IaebSLtVQxtaNS7y+x1q7He7gqafjJ6Or8/46zb5+riC98l4mJzCP1U6S6G+iumu4V9/TDXxGxn+rbFI05HnUITXY3SnVwd3eSyhPLgjbeWRHQkxDC5hEoW5Xlaq2r6ForqscjdHGVJxFOl3B0k9XR1JVYAtjLG0yRmLjj/Oio/Gdr+7FV3jVHQdyqXoGqXov+I3697sw42b3P14//vinVyg67ket3knplBdp4OFs9J1oePubsU3Zqr4xlmKv1j8YViAZfHFsgAd2Z0VC9QkvzGQvWEOAmz4rJnmCIpvzVTxLZbio4it+Aei2j/t/1XR+UOh843eQSAnS+RY6WaJHDziJqspqjAnpwo6yLpQRS+q4I1UmoNPM14meDpUyUUVwUIV01CFMzVVkDQ1QdN8nyK0QPyHTvYgSU2SsAfpdzmGGMf4nXhhcPPsHjEAMp1/joi+f017FmI2UG+32whts6P5DSvEsYBGjy0U1aKJgBVQ1B3FGSu/Ddjtk7OzJAMMlJK07++zCQaOuQoKKqqa3VxHm5H8tyrPbmgjsAp33MKSi1XouAVW/dhL/ZEHdMAvDQXrpD9pVrUXbFvYA4zCHrTqd/BGpgllHGRNY3HGDdBtBWoV3tB0hvdg48sggzfGG2s4QkjjAkWuqmWzWveT3dh5O7CWAq2ZNRE2JOpQD9Jgx4NOjDJeqiszflEKXLoBdizU2kAlKbPA/96HwVvovyZOxXHhBZoXrJpsGZSQDx6E8IFuU9Lu7SN+WmlqJBmvjeGJQRUjTprd5+X/TR1KMrgTSuRy20jl11wF3zbpKDMKBk82vwwFfGlFVxgUHM2R1/Fgv/wBFcYHlp4P8CdLLjLgvHV/enXXOdXdlCsCaNAljrMSvGSkz40CuSacDbrYuS2X9IpIf5JkUuzlT833BiWsK0t/lzbjvaW4vqmGdmiWMnlZSpeLpcw+rqmkJEWtytBMUlMsw1BnKtYqDGKZqr0q9QqYah6FOhLQls1LW4MvMXEZgun4aItzdUW0VQH2dBymgck5bOYhVbkGWWQEPbesXtLvEbN6lwHaMFl6ORV0ZekBp77Yy3BwofP0FtI4nzQgr/MgV34v6feI+b0LaQxEGgYnaYzmZ5gzD+aOvYZYN0twx28lYwk6ftujRFhSxZc7UkJpPytSIlb76ZjurLRf6HKC3VTAHSSVjAroIGmPouErooKJog91XmBGH8Tygth1/T6Ho8Abi7QHzx2+DAuMWORMC36k9g7qBT94bCBFwY8pJnX/XL0+Z23QyUMLNm9owRicC9jp/zeWoxhOKd+smmIMAd/6n/RzdbPyIFCH6chlBSTzXVLwVhyOK0fy4IkB0yIZQsFIppNsc5/84AYVjJv/vCYbaaT28OaYgvYWXwD0w0dxsjSip1z7Rp/edPeJWQ6ej4dVERlMrkr70OD6YwsYVxWGt4aCWOroZOvuEnPqrV3/Nmvfe5tNOn3Pch6qat3Pf4D6iKb+xsH22mpM74UGbfZ19jYWpG1wq2/RQeTL3LfLUxbKjhhkeHec5dwjV+fotUI6kEiOzt1jlVuBEb04i14RUHJ58gbxx5ZnfVVPSeTJsdeQXPKURT9NOeVJF8BLLk9Z9NOWU550TFVyefJGRwTbT2jKIU+6Pl1yecqin/X0YEnkSUc1JZenLPppyilPelE7yeUpi37aUsqT9Kokz78YgQoZVyyuhS4mXKWcNRZlTCnYIqcWbTqOxRKsjLPK3II9cwrxMidpcsFyRIzk3v2WYmTu3W8nWCWU2v0W2CZJLJ5y/1tH7PKAxQTJ91VpfoQ9WTJZoVTzcmKXAmb06RQAoUL6m4aqq8vLQc1RVEhFsnvPrlz2MyPPvQBN0Bw4awawFdL5xGM+C/m9fNA0Z14gNut6Addes4qq6jjjbLDFuww/IPZMkmwZwFiIn8aO73uHY9O0VQkQ7vGA1smfnxo3WjL5BmfNm6TVlkR8ST9Vo+aM6arcAMtRTM0odLc+IQsVmzZbrOHGaKtTAo5gwOeSGbCUJAEp/0DpZGbRhQvi/AxypitFsUgA+V4+dyYtC6Rf7k1YpaNfjjjQ51Jl2KrKeBShWKWzKmtZMZF6rQkeP/R3tWpaLd7ZuqBUrVv1iWpybE0uVxUDINGOK0pa720RzPRTx0hy8OTGMYrSGAZ+R6x6GbJA/eCI4i2+B5pcme95hGUKRJ2Bpy4m4lgdnYmyzqXRM15qYrLpACUbRUF6VmM87+RFdTXVZXonDxbTO8mjUuK8E8Nu907gxO6I4K2PzhpmVGI/lw4y5uuOyLU0NrDFImdxRwZHlAblQhR58mI9iPXQW60HTOa+ph7P6gwZCRrPzsN8DK7fcK7eoTUdVPgthuLYVcAohuN0ggYflabyGbVv0oQ++eFDCsBkgY8tFj6LwzE4ojTJEEV6vjgcxOFQoeJYHQ6HSp2ezPmAHFVfn2o2xGmV32kyZGqZmWK9gCWBaiCuPS+zSdOgYpQym1S9+gtDZVB1/c7YKVQkQV9CWJ8XTe/tmxRmrMHwDe14aPyRDskcD22JdPSKk0PbYOX+ip24b98fRg4F7zd132uI2h9Vg+v7bAca1nRJH2cFyYYfuIo3DvxgkWxWnvR8MQ7EOLSHwTVgK+bUYXBN5Eh0FkJrDyWchFZy16c27zrHgiNyl3dRC5Bwl3flhZoTlnclS0ZVPjQeRJd65ZuMS+jyLaGK4UMVwBATquj6nbFDFYy9l6RLQTadasWXogKz0ydlzML1d1TzWG5D9HdoR1XnLgUzJKtFMMWmj0o3Ip4TdnTJcgHIJj7LZC7DkSMztR2TvHBSRMmWj6gvham9ClMBNJWpR82MDd2kywjqlT42N/vBu2eDfL6H2PLYxX4MjijpshPF7glzxqho0DUw5kQ+1uABggtdjWU9hZqr0b6eghSuhthY3mIwOFnA4C92dORiAUveRLaxavmnNxr8cJHNaBgia2NnYTQ6Vu5QdaV8lqyJNpkFsa9vyMFRRTC8kRmaEoiiz6+ihbnn0WemhI709ZQRpmYBVp7AWPtUNS7/vOxTNQzeWvepsqbfpgrYrH1TRoPbsi/alHgDJswXnB8Fc/gwChMU5Oce8Rvb/RZuUHLF/wE=</diagram></mxfile>
|
2112.08078/main_diagram/main_diagram.pdf
ADDED
|
Binary file (55 kB). View file
|
|
|
2112.08078/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Urban transportation systems are generally multimodal in nature, consisting of several interconnected subsystems representing different modes of transportation, such as trains, buses, and cars. They are designed to meet diverse travel demand and provide urbanites with multiple travel options in cases of service disruptions. With growing urban population, limited capacities of transport infrastructure, and increasing concerns about urban resilience, it is more important than ever to plan, manage, and operate multimodal transportation systems in an integrated fashion. For example, ride-hailing services can be intelligently deployed to help people better access mass transit services, or replace certain transit trips when the transit system is overcrowded or delayed. Such multimodal operating strategies depend on the accurate and timely joint prediction of travel demand for different transportation modes, which is the focus of this study.
|
| 4 |
+
|
| 5 |
+
With the wide availability of mobility data and rapid advancement in computing technologies, short-term travel demand prediction has received much attention, though most studies have focused on demand prediction for a specific target mode. While earlier methods are based on various regression models (e.g., ARIMA), more recent efforts focus on deep learning-based approaches, particularly Graph Neural Networks (GNNs), due to their ability to extract complex spatiotemporal knowledge among large-scale mobility data [@li2017diffusion; @geng2019spatiotemporal]. Despite the improved prediction performance, these methods still regard the target transportation mode as a closed system and ignore its potential interaction with other modes. In practice, there usually exist certain spatiotemporal correlations between different transportation modes through individual mode choices, passenger transfers (between modes), or trip chaining activities. For instance, the passenger flows of a subway station may affect the usage of ride-hailing service in the area since travelers may use ride-hailing as feeders to subway stations [@irawan2020compete]. Therefore, it is likely that the demand patterns of one mode can help us predict the future demand of a different mode. In addition, a deeper understanding of the intricate dependencies for multimodal travel demand can allow us to better formulate multimodal operating strategies to mitigate traffic congestion, improve user experience, and enhance system resilience.
|
| 6 |
+
|
| 7 |
+
One major challenge for multimodal demand prediction is that different transportation modes have diverse spatial units: some are *station-based* (e.g., subway), while others are *stationless* (e.g., ride-hailing). For station-based modes, their operations are at the station level, and thus the demand prediction should match the same spatial granularity. For stationless modes, the operators usually define a number of service zones as the basic units of operations. To jointly model multimodal travel demand, recent works typically aggregate multimodal demand to a spatial grid [@ye2019co; @wang2021learning] or other well-defined zone partitions [@ke2021joint]. Based on the same spatial structure, a similar model architecture can then be performed for different modes to learn shared spatiotemporal features [@wang2020multi]. These methods are generally less suitable for station-based modes, as a zone may contain 0 or multiple stations. Focusing on multimodal demand prediction for station-based public transit services, Li et al. [@li2021multi] developed a memory-augmented recurrent model for knowledge adaptation from a station-intensive mode to a station-sparse mode, though it only stores and shares mode-level temporal knowledge and is unable to leverage cross-mode spatial dependencies. To summarize, despite extensive research, there are still two important research gaps to be addressed:
|
| 8 |
+
|
| 9 |
+
- Most prior works focus on single-mode demand prediction and only consider intra-modal spatiotemporal correlations, ignoring its potential interaction with other modes. Inter-modal relationships exist because of complex travel behavior and can vary over space and time, making them difficult to model.
|
| 10 |
+
|
| 11 |
+
- Existing approaches for multimodal demand prediction usually require aggregating multimodal demand data based on the same zone partition to enable shareable feature learning. These methods are unable to capture the cross-mode heterogeneous spatiotemporal correlations in general multimodal systems with multiplex networks and diverse spatial units.
|
| 12 |
+
|
| 13 |
+
The overall objective of this study is to address the aforementioned issues by developing a multi-task multi-relational spatiotemporal graph neural network (ST-MRGNN) approach to multimodal demand prediction. Based on the proposed approach, the spatial dependencies across different spatial units (e.g., stations or zones) of different modes are encoded with multiple intra- and inter-modal relation graphs, with node-specific representations learned through a generalized graph convolution network and summarized via an attention-based aggregation module. Spatiotemporal blocks are also used to extract heterogeneous spatiotemporal relationships from the data. Empirical validation is done based on real-world multimodal datasets from New York City (NYC). The specific contributions of this study are summarized as follows:
|
| 14 |
+
|
| 15 |
+
- We propose ST-MRGNN, a graph learning-based approach to demand prediction for multimodal systems. To the best of our knowledge, this is the first multimodal demand prediction model to account for heterogeneous spatiotemporal dependencies in multimodal systems with diverse spatial units.
|
| 16 |
+
|
| 17 |
+
- A multi-relational graph neural network (MRGNN) is developed to model spatial dependencies among diverse spatial units. Specifically, the cross-mode dependencies are encoded with multiple intra- and inter-modal relation graphs, the message passing mechanism within each relation graph is learned with a generalized graph neural network, and the information from different relations is summarized with an attention-based aggregation module.
|
| 18 |
+
|
| 19 |
+
- We introduce the design of multi-relational spatiotemporal (ST-MR) blocks to jointly model spatiotemporal correlations between heterogeneous spatial units. Specifically, each ST-MR block captures mode-specific temporal patterns with gated convolution layers and fuses the heterogeneous temporal information in MRGNN layers.
|
| 20 |
+
|
| 21 |
+
- Extensive experiments are conducted based on real-world subway and ride-hailing datasets from NYC. The results show that the proposed approach outperforms existing methods across different modes, and the improvement is larger for demand-sparse locations.
|
| 22 |
+
|
| 23 |
+
# Method
|
| 24 |
+
|
| 25 |
+
In this section, we first define a few important concepts and formulate our problem. Next, we introduce a new spatiotemporal modeling framework for multimodal demand prediction, which incorporates multi-relational graph neural networks (MRGNNs) with temporal convolution networks (TCNs) to jointly model heterogeneous spatiotemporal correlations across multiple modes.
|
| 26 |
+
|
| 27 |
+
*Definition 1 (Multimodal transportation System):* A multimodal transportation system $M$ is composed of $k$ $(k>1)$ transportation modes (e.g., subway, ride-hailing, etc.). Each transportation mode $m = 1, 2, ..., k$ has $N_m$ nodes (e.g., stations/zones) as the basic unit for passengers to travel from or to. For each node $i = 1, 2, ..., N_m$ at time step $t$, its outflow (or departure) and inflow (or arrival) demand can be represented as a 2-dimensional vector $x_{m, i}^t\in \mathbb{R}^2$. The demand of all the nodes from mode $m$ at time step $t$ is represented as $X_m^t=\{x_{m,0}^t, x_{m,1}^t…, x_{m,N_m}^t\},X_m^t \in \mathbb{R}^{N_m \times 2}$. Further, we use $X^t=\{X_m^t,\forall m\}$ to denote the demand of all the transportation modes at time step $t$.
|
| 28 |
+
|
| 29 |
+
*Definition 2 (Intra-modal Relation Graph):* To capture spatial correlations among stations/zones of the same mode, we define an intra-modal relation graph for each mode $m$, denoted as $G_m=(V_m, A_m)$, where $V_m$ is a set of nodes for mode $m$, $|V_m|=N_m$, and $A_m \in \mathbb{R}^{N_m \times N_m}$ is a weighted adjacency matrix representing the intra-modal dependencies between each pair of nodes in $V_m$. In the multimodal system $M$, a total of $k$ intra-modal relation graphs are defined, denoted as $G_{intra}=\{G_m, \forall m\}$.
|
| 30 |
+
|
| 31 |
+
*Definition 3 (Inter-modal Relation Graph):* To capture the pairwise correlations among nodes from different modes, we further define an inter-modal relation graph between each mode pair $m, n = 1, 2, ..., k$ ($m \neq n$), which is represented as $G_{mn}=(V_m, V_n, A_{mn})$. $V_m$ and $V_n$ are nodes from mode $m$ and $n$ respectively and $A_{mn} \in \mathbb{R}^{N_m \times N_n}$ is a weighted matrix indicating the cross-mode dependencies between each node in $V_m$ and each node in $V_n$. Note that, unlike $A_m$, $A_{mn}$ is usually not a square matrix. A total of $\binom{k}{2}=k(k-1)/2$ inter-modal relation graphs are defined in the multimodal system $M$, denoted as $G_{inter}=\{G_{mn},\forall m, n\}$.
|
| 32 |
+
|
| 33 |
+
*Problem (Multimodal Demand Prediction):* Given the historical observations of multimodal demand, and the intra- and inter-modal relation graphs, the multimodal demand prediction problem aims to jointly predict the demands for all transportation modes at the next time interval. Specifically, given the historical observations of two modes $m$ and $n$ denoted as $X_m^{t-T:t}, X_n^{t-T:t}$, the intra-model relation graphs $G_m, G_n$ and the inter-model relation graph $G_{mn}$ obtain a mapping function $F(\ast)$ to jointly predict the demands for all transportation modes at time interval $t+1$ denoted as $X^{t+1}$, given as: $$\begin{equation}
|
| 34 |
+
X^{t+1}=F(X^{t-T:t}, G_{intra}, G_{inter}).
|
| 35 |
+
\end{equation}$$
|
| 36 |
+
|
| 37 |
+
While the problem is general, we will only focus on a bimodal system with subway and ride-hailing as a case study to demonstrate the proposed model in our experiments (see Section [4](#sec:results){reference-type="ref" reference="sec:results"}). To highlight the issue of heterogeneous spatial units, subway is chosen as an example of station-based modes, and ride-hailing an example of stationless modes. In this case, $k=2$ and there are 3 relation graphs defined to encode cross-mode correlations, including 2 intra-modal graphs and 1 inter-modal graph.
|
| 38 |
+
|
| 39 |
+
Let us first introduce the overall framework of our proposed model. As shown in Figure [1](#fig:ST-MRGNN){reference-type="ref" reference="fig:ST-MRGNN"}, ST-MRGNN is composed of $L$ multi-relational spatiotemporal blocks (ST-MR blocks) to uncover the heterogeneous spatiotemporal patterns across multiple modes, and an output layer for each mode to generate the final predictions. Each ST-MR block comprises several gated convolution layers(TCNs) to capture temporal features and a multi-relational graph neural network (MRGNN) to capture spatial features. Specifically, in each ST-MR block, a separate TCN layer is first applied to each mode to capture mode-specific temporal patterns. The extracted mode-specific features from TCN layers are then fused in a MRGNN layer to jointly model heterogeneous spatiotemporal dependencies for each mode. The MRGNN layer is followed by another mode-specific TCN layer because through experiments we find that it can help improve the prediction performance. It is potentially because that such a \"sandwich\" structure can facilitate fast spatial-state propagation between graph convolutions through TCN layers [@yu2017spatio]. The details of each module are introduced below.
|
| 40 |
+
|
| 41 |
+
<figure id="fig:ST-MRGNN" data-latex-placement="ht!">
|
| 42 |
+
<img src="fig/ST-MRGNN.png" />
|
| 43 |
+
<figcaption>The architecture of ST-MRGNN (<span class="math inline">⊕</span> denotes residual connections)</figcaption>
|
| 44 |
+
</figure>
|
| 45 |
+
|
| 46 |
+
In this subsection, we introduce a novel graph neural network named MRGNN, which is capable of capturing heterogeneous spatial dependencies between nodes across multiple modes. To elaborate the key idea of MRGNN, we present the framework of MRGNN for a bimodal transportation system in Figure [2](#fig:MRGNN){reference-type="ref" reference="fig:MRGNN"}. MRGNN consists of three major parts: (1) *spatial dependency modeling*: two types of spatial dependencies are considered for each relation graph, namely geographical proximity and functional similarity; (2) *intra- and inter-modal graph convolutions*: a generalized GCN is introduced to aggregate the node-level neighborhood information from intra- and inter-modal relation graphs; and (3) *relation aggregation*: an attention-based aggregation module is designed to summarize the aggregated features from different relations. The three parts are described in details below.
|
| 47 |
+
|
| 48 |
+
<figure id="fig:MRGNN" data-latex-placement="ht!">
|
| 49 |
+
<img src="fig/MRGNN_2mode.png" style="width:90.0%" />
|
| 50 |
+
<figcaption>A bimodal framework for MRGNN</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
|
| 53 |
+
Previous research has shown that spatial dependencies exist between not only spatially adjacent locations, but also distant locations with similar functionalities and contextual environments [@geng2019spatiotemporal]. To capture both geographic and functional correlations among locations, we encode two types of spatial dependencies for each graph:
|
| 54 |
+
|
| 55 |
+
*Geographical Proximity:* Locations that are geographically close to each other are likely to display strong correlations. We encode such geographical relationships among nodes using a distance-based adjacency matrix $A_G$. Formally, with the geographic center of each zone and station, we can compute $A_G$ as: $$\begin{equation}
|
| 56 |
+
A_{G, ij} =
|
| 57 |
+
\begin{cases}
|
| 58 |
+
\exp(-(\frac{d_{ij}}{\sigma_d})^2) & d_{ij} \leq \kappa_d,\\
|
| 59 |
+
0 & d_{ij} > \kappa_d,
|
| 60 |
+
\end{cases}
|
| 61 |
+
\end{equation}$$ where $A_{G, ij}$ is the weight of geographical proximity between nodes $i$ and $j$, $d_{ij}$ is the distance between $i$ and $j$, $\kappa_d$ is the distance threshold and $\sigma_d$ is the standard deviation of distances.
|
| 62 |
+
|
| 63 |
+
*Functional Similarity:* Locations that display similar demand patterns are also likely to share some common functionalities or other contextual features. To capture such semantic correlations, we construct an adjacency matrix $A_P$ for each graph. To deal with the varying demand of different modes, we first normalize the demand series of each mode and $A_P$ is given as: $$\begin{equation}
|
| 64 |
+
A_{P, ij} = \frac{Corr(p_i, p_j)}{\sigma_{p,i} \sigma_{p,j}},
|
| 65 |
+
\end{equation}$$ where $A_{P, ij}$ indicates the weight of functional similarity between nodes $i$ and $j$, $p_i$ and $p_j$ are the normalized historical demand series of nodes $i$ and $j$, $Corr(\ast)$ calculates the correlation coefficient of two time series vectors, and $\sigma_{p,i}$ and $\sigma_{p,j}$ are the standard deviations of $p_i$ and $p_j$ respectively.
|
| 66 |
+
|
| 67 |
+
Generally, for each relation graph, we can model $u$ types of spatial dependencies. Therefore, in a multimodal system $M$, a total of $u \times (k + \binom{k}{2})$ relations can be encoded, including $u \times k$ intra-modal relations and $u \times \binom{k}{2}$ inter-modal relations. Note that this is the maximum number of relations to consider in the model, and not all of them are necessary depending on the system configurations and demand patterns. In our case where $k=2$ and $u=2$, there are 4 intra-modal relations and 2 inter-modal relations. Figure [3](#fig:graph construct){reference-type="ref" reference="fig:graph construct"} presents an example of a multimodal system with subway and ride-hailing. The solid and dashed lines are used to distinguish spatial dependencies based on geographic proximity or functional similarity. The line colors are used to distinguish cross-mode relations.
|
| 68 |
+
|
| 69 |
+
<figure id="fig:graph construct" data-latex-placement="ht!">
|
| 70 |
+
<img src="fig/relation_illustration2.jpg" style="width:95.0%" />
|
| 71 |
+
<figcaption>Modeling spatial dependencies for a multimodal system</figcaption>
|
| 72 |
+
</figure>
|
| 73 |
+
|
| 74 |
+
Based on the multi-relational graphs introduced in Section [3.3.1](#method:construction){reference-type="ref" reference="method:construction"}, each node is connected to heterogeneous nodes from multiple ($k$) modes via multiple ($u\times k$) relations. It is natural to aggregate the features of connected nodes from each relation using graph convolutions. However, most GCNs are developed for graph structures with a square adjacency matrix and cannot be applied to inter-modal relation graphs with heterogeneous nodes and a non-square adjacency matrix. To solve this issue, we introduce a generalized graph convolution network (GGCN), which is an extension of the standard GCN proposed in [@kipf2016semi]. Recall that the input of MRGNN is the mode-specific temporal features learned from TCN layers. Given an inter-modal relation graph $G_{mn} = (V_m, V_n, A_{mn})$ and the features extracted from TCN layers denoted as $H_m \in \mathbb{R}^{N_m \times c_{in}^s}$, $H_n \in \mathbb{R}^{N_n \times c_{in}^s}$, the graph convolution layer is then defined as: $$\begin{gather}
|
| 75 |
+
Z_{mn}^{(m)} = g({\widetilde{A}_{mn}^{(m)}} H_n W_{mn}^{(m)}+b_{mn}^{(m)}),\\
|
| 76 |
+
Z_{mn}^{(n)} = g({\widetilde{A}_{mn}^{(n)}} H_m W_{mn}^{(n)}+b_{mn}^{(n)}),
|
| 77 |
+
\end{gather}$$ where $g(\ast)$ denotes a non-linear activation function ($ReLU$ in our case), and $Z_{mn}^{(m)} \in \mathbb{R}^{N_m \times c_{out}^s}$, $Z_{mn}^{(n)} \in \mathbb{R}^{N_n \times c_{out}^s}$ are the output aggregated features of mode $m$ on mode $n$ and mode $n$ on mode $m$ respectively, where $c_{in}^s$, $c_{out}^s$ are the input and output vector dimension of each node. $W_{mn}^{(m)}, W_{mn}^{(n)} \in \mathbb{R}^{c_{in}^s \times c_{out}^s}$ and $b_{mn}^{(m)}, b_{mn}^{(n)} \in \mathbb{R}^{c_{out}^s}$ are the learned model parameters. ${\widetilde{A}_{mn}^{(m)}} \in \mathbb{R}^{N_m \times N_n}$,${\widetilde{A}_{mn}^{(n)}} \in \mathbb{R}^{N_n \times N_m}$ are the normalized adjacency matrices constructed from $A_{mn}$ as: $$\begin{gather}
|
| 78 |
+
{\widetilde{A}_{mn}^{(m)}} = \frac{A_{mn}}{rowsum(A_{mn})},\\
|
| 79 |
+
{\widetilde{A}_{mn}^{(n)}} = \frac{A_{mn}^T}{rowsum(A_{mn}^T)}.
|
| 80 |
+
\end{gather}$$
|
| 81 |
+
|
| 82 |
+
As introduced in Section [3.3.1](#method:construction){reference-type="ref" reference="method:construction"}, each graph can be encoded with $u$ types of spatial dependencies. To process multiple dependencies on a graph at the same time, we extend GGCN to multi-dimensional tensors, given as: $$\begin{gather}
|
| 83 |
+
\ddot{Z}_{mn}^{(m)} = g({\ddot{A}_{mn}^{(m)}} H_n \otimes \ddot{W}_{mn}^{(m)}+b_{mn}^{(m)}),\\
|
| 84 |
+
\ddot{Z}_{mn}^{(n)} = g({\ddot{A}_{mn}^{(n)}} H_m \otimes \ddot{W}_{mn}^{(n)}+b_{mn}^{(n)}),
|
| 85 |
+
\end{gather}$$ where $\otimes$ denotes the operation of batch matrix multiplication, $\ddot{A}_{mn}^{(m)} \in \mathbb{R}^{u \times N_m \times N_n}$, $\ddot{A}_{mn}^{(n)} \in \mathbb{R}^{u \times N_n \times N_m}$ are stacked normalized adjacency matrices and $\ddot{W}_{mn}^{(m)}, \ddot{W}_{mn}^{(n)} \in \mathbb{R}^{u \times c_{in}^s \times c_{out}^s}$ are the parameter matrices. The outputs $\ddot{Z}_{mn}^{(m)} \in \mathbb{R}^{u \times N_m \times c_{out}^s}, \ddot{Z}_{mn}^{(n)} \in \mathbb{R}^{u \times N_n \times c_{out}^s}$ are a set of cross-mode features between $m$ and $n$ from different spatial dependencies.
|
| 86 |
+
|
| 87 |
+
An intra-modal relation graph can be regarded as a special case of inter-modal relation graphs when $m=n$. Therefore, given a mode $m$ and its intra-modal relation graph $G_m=(V_m, A_m)$, the correlations among nodes of mode $m$ are modeled as: $$\begin{equation}
|
| 88 |
+
\ddot{Z}_{m} = g({\ddot{A}_{m}} H_m \otimes \ddot{W}_{m}+b_{m}),
|
| 89 |
+
\end{equation}$$ where $\ddot{A}_{m}$ are stacked normalized adjacency matrices for mode $m$, and $\ddot{W}_{m}, b_m$ denote the model parameters.
|
| 90 |
+
|
| 91 |
+
Through the intra- and inter-modal graph convolutions, each node receives $u \times k$ aggregated feature vectors from its heterogeneous neighborhood nodes. Given a node $i$ in mode $m$, we denote the set of its aggregated features as: $$\begin{equation}
|
| 92 |
+
{z}_{i}^{(m)} = \{{z}_{i}^{(m, n, r)}\}, \forall n \in \{1, 2, ..., k\}, r \in \{1,2,... u\},\\
|
| 93 |
+
\end{equation}$$ where ${z}_{i}^{(m, n, r)} \in \mathbb{R}^{c_{out}^s}$ is the aggregated features of nodes from mode $n$ to node $i$ in mode $m$ based on spatial dependency $r$. Note that ${z}_{i}^{(m, n, r)}$ denotes intra-modal relations when $m=n$ and inter-modal relations when $m \neq n$.
|
| 94 |
+
|
| 95 |
+
To summarize the learned features from different relations for each node, an intuitive operation is to simply add them. However, the contribution of different relations may vary for different nodes. For example, a ride-hailing zone that is close to a subway station may be easily influenced by inter-modal relations, while one that is distant from any subway station may rely more on intra-modal relations. To capture such variation, we design a relation-level attention module to learn the contribution of each relation to the target node. Given a node $i$ in mode $m$, the attention module is formulated as: $$\begin{equation}
|
| 96 |
+
{a_i}^{(m)} = softmax(concat({z}_{i}^{(m)})W_{a}^{(m)} + b_{a}^{(m)}),
|
| 97 |
+
\end{equation}$$ where $concat(\ast)$ concatenates the learned features from all relations into a high-dimensional vector, $W_{a}^{(m)} \in \mathbb{R}^{(u \times k \times c_{out}^s) \times 1}, b_{a}^{(m)} \in \mathbb{R}$ are model parameters shared by all nodes in mode $m$. The resulting weight vector ${a_i}^{(m)} \in \mathbb{R}^{u \times k}$ is the learned attention weights with each element ${a_i}^{(m, n, r)}$ representing the contribution weight of nodes from mode $n$ to node $i$ in mode $m$ regarding spatial dependency $r$. Finally, node $i$ is represented as a weighted sum of the relation-specific features, given as: $$\begin{equation}
|
| 98 |
+
{h}_{s, i}^{(m)} = \sum_{n=1}^{k}{\sum_{r=1}^{u}{{a}_{i}^{(m, n, r)}{z}_{i}^{(m, n, r)}}},
|
| 99 |
+
\end{equation}$$ where ${h}_{s, i}^{(m)} \in \mathbb{R}^{c_{out}^s}$ is the final output representations for node $i$ from the MRGNN layer.
|
| 100 |
+
|
| 101 |
+
We employ the temporal convolution network (TCN) proposed in [@yu2017spatio] for capturing temporal patterns of nodes in the multimodal system. Compared with RNN-based models that are widely used in time-series analysis, CNNs are advantageous in fast training time and simple structures. Given the input sequence of a node, the temporal convolution layer models the correlations between each time step and its $K_t$ neighborhoods using a 1-D causal convolution with a $K_t$-size kernel. Following [@yu2017spatio], the convolution layer is conducted without padding, and therefore the output sequence length is shortened by $K_t-1$ each time.
|
| 102 |
+
|
| 103 |
+
Previous research has shown that gating mechanisms are critical for temporal modeling in both RNNs and temporal CNNs [@wu2019graph]. To control the ratio of information that passes through layers, an output gate is incorporated in the convolution layer. Mathematically, given a node $i$ of mode $m$ and its input sequence $h_{in,i}^{(m)}$, the temporal gated convolution takes the form: $$\begin{equation}
|
| 104 |
+
h_{c,i}^{(m)} = (W_{c,1}^{(m)} \star h_{in,i}^{(m)} + b_{c,1}^{(m)}) \odot \sigma(W_{c,2}^{(m)} \star h_{in,i}^{(m)} + b_{c,2}^{(m)}),
|
| 105 |
+
\end{equation}$$ where $h_{c,i}^{(m)}$ is the learned representations for node $i$ in mode $m$ from the TCN layer, $W_{c,1}^{(m)}$, $b_{c,1}^{(m)}$ are the model parameters for information learning, $W_{c,2}^{(m)}$, $b_{c,2}^{(m)}$ are the model parameters for computing the output gate, $\star$ is the convolution operation, $\odot$ is the element-wise product and $\sigma(\ast)$ represents the sigmoid function.
|
| 106 |
+
|
| 107 |
+
To integrate correlations from spatial and temporal domains, we incorporate MRGNN with TCN in a ST-MR block. Each ST-MR block is comprised of two TCN layers and a MRGNN layer in between. The input of the first ST-MR block is the historical multimodal demand series. In implementation, an equal MRGNN layer is applied to each time step in parallel. Similarly, the TCN layers are generalized to 3D tensors by employing the same convolution kernel to every node. To increase the speed of training, we employ a residual connection [@he2016deep] between TCN layers, given as: $$\begin{equation}
|
| 108 |
+
\label{eq:st-block}
|
| 109 |
+
H_{\rho} = H_{c1} + H_{s},
|
| 110 |
+
\end{equation}$$ where $H_{\rho}$ is the input of the second TCN layer in a ST-MR block, and $H_{c1}$ and $H_{s}$ are the outputs of the first TCN layer and MRGNN layer, respectively. To stablize the model performance, layer normalizations [@ba2016layer] are implemented at the end of each ST-MR block.
|
| 111 |
+
|
| 112 |
+
Recall that the input length of historical demand sequence is $T$ and the sequence length is shortened by $K_t-1$ after each TCN layer. After stacking $L$ ST-MR blocks, the length of the output sequence from ST-MR blocks is shortened to $T-L \times (K_t-1) \times 2$. If the output sequence is still longer than one, an extra TCN layer is attached for each mode to downscale the outputs to a single time step. The output layer is a feed-forward network which maps the output signals of the ST-MR blocks to the prediction result of each mode.
|
| 113 |
+
|
| 114 |
+
The training objective of our proposed model is to minimize the difference between the real demand and the predicted one across all nodes from all modes. The loss function is defined as: $$\begin{equation}
|
| 115 |
+
\label{eq:loss}
|
| 116 |
+
L(\theta) = \sum_{m=1}^{k}{\sum_{i=1}^{N_m}{\epsilon_m||\hat{x}_{m, i}^{t+1}-{x}_{m, i}^{t+1}||}},
|
| 117 |
+
\end{equation}$$ where $\hat{x}_{m, i}^{t+1}$, ${x}_{m, i}^{t+1}$ are the predicted and true demand values for node $i$ of mode $m$ at time step $t+1$ respectively, $\epsilon_m$ are the pre-determined weights to balance the loss of different modes and $\sum_{m \in M}{\epsilon_m}=1$. The training process of our proposed model is summarized in Alg. [\[alg:train\]](#alg:train){reference-type="ref" reference="alg:train"}
|
| 118 |
+
|
| 119 |
+
::: algorithm
|
| 120 |
+
Initialize the parameters of ST-MRGNN
|
| 121 |
+
:::
|
2203.14250/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2022-02-19T15:13:04.402Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/16.5.1 Chrome/96.0.4664.110 Electron/16.0.7 Safari/537.36" etag="AI6gcpwpN2V2UGIrf_7N" version="16.5.1" type="device"><diagram id="0c6-S5VIK_WB8jHA2Mcn" name="Page-1">7V1be6K6Gv41vdgXzUMSwuHSnmb2nNe4p2vam/0gRGVqxYXY1v76lShgSILSCpS2di5GAkbI9+b73u+QcIRPbx8+xN5s/DUK6OQIGcHDET47QghCh7D/eMty3WIjd90wisMgvWjT0A8fadpopK2LMKDzwoVJFE2ScFZs9KPplPpJoc2L4+i+eNkwmhR/deaN0l80Ng1935tQ5bK/wyAZr1sdIlz9kYajcfbL0EjP3HrZxWnDfOwF0b3QhM+P8GkcRcn60+3DKZ3wwcvGZf29i5Kz+Y3FdJpU+cLHz7+Tj5+864er2dfht4vp8tfy83Hay503WaQPnN5sssxGII4W04DyTowjfHI/DhPan3k+P3vPZM7axsnthB1B9jF/SH7tMJomp9Ekilcd4fUfbw8nE6H9wuP/+JeTOLqh2ZlpNGW/caI+ZnbPNE7og9CUPvYHGt3SJF6yS9KzOZhSDEInPb4XJGqnbWNBmii70EtRNMr73gw0+5COtX7c7Z/o6vZbvHyMzi7Q8vqv44vZ4zHSjLs1SdJBKwjA+mcRZSeO56sJ0mMXQDh72Jxkn0bp/6teBkpDnLVchvOFx29QPXU+9dkEjrMT7MmUfljb+g6zZgktDAAz/jFh0KCPER+EkxmNQzZsNBbbf2wad4FqGD7QTDXw4zhKvCSMpuzQNVQ8BYQ6gangiZ1x0ABb1qqHDNMF0MKa8IYIgFYBcgi7ANrG5g8pCNQB0LKAYzUEQfxyEOwtgjB6q/AbDofI93XwC6yBRVqAHzYARgX4YcMFpgo54gLL0aCOAMduCnWmIjMaMIubHqZKv2hzojgZR6No6k2+RNEsHag/NEmWqVS8RRIVZcZGL17+5t8HJDu8Es+dPaSdr4+WgsnKRG1tE8c8WsQ+3fKg6XMmXjyiybYBScXCR2GrdGM6YbC7K/KS2uVjKlrhw+k31vDFW/KZWcoNYIV5JI3trnnkBW5AdPPIoyZkw1b/zDGtIlFADtPaxN382coUgpZGbWMbkKbmj8oc3pGACMIAO4qMiKPKhdjAxK2KxtSR6SdaVKSzqDljO51483k4DAs2cqs93Ev+T+dVBXmP+N2merUO2WOJxbs2cKEqeENH5Jvi8aaqL0s5ib+IJ8uT2PNvuEHYJYyi5ETRrESw/ry2boo87F7PsYz8TOa4onoEgQ1JS5oEIFUQSCMHYgLbbUoU9m5R0GnQ40EBLo3VXPLLBl0mA9v8jFHsBSEt+LquHRi2vX2uVJfNTm4gcjrdsKdtlSlE+gs/onCltzLBuxZApCh7GwFT6mhNjdLvigEJqTvTNOS+ih2tuZPS0Qod+cPvARjntQOGPoSJQHLZ0VXGatnnDcXlBxnD3TBjkRcLNLmEGYssfUPIzzete3BlYlUky2bHJoTMF03JzlSdCkpHssFqeCqQpqhLf0apP35x6lLBJ9fMyeEwcFVaU4cFRUVxY5tZULcilcGA4IZMqINVBdiAcy4KCyuKDerEdHLWg+bJfkqmdke7xKxJJhLLsZSm57LqIUpz2c/HdTNd8SDwoDkQm4x5lo3J2uDWaF8FHWHqdMSXbQG/vMHbJyzI5mVSxKA21yBqjbTJm4Qj7gD7DG+rYCGf5aHvTXrpidswCFbmT6ehyumBWVPEgnFvW8KbbQODqMqEaHRJU24RIQoGL59qUGwdVuDTrUeFrFVV3lbR8a3DQmCZWTDPSY1y6Hws6ICmXKyMp9WWLtwx8H5AB85AN/CufZbS5boHnnRy4Cv4tq8d8XYXB95S4zteoyNfgak2rWswRsA1tka9m5NC7//2PA6vrpefFzfWtQUHH63weHvOSO+N1spSt3PSzA2HghOefdY74c9nsVZqWXe6yqSqq9xOXslSGUGj84hi3zEMrdgMlxia8pR1Xr4Fk/Ki80svnJbtetfccbvrAspusLVQZS6hrXIoN1GvLraNXMmLMlEx9VvssHJgT470sG6JUAZkSNU9JbEBJlhvKVw24xfMy59G97NpRdIGhutOa4092FXoEusnnM3LXOZKKTBNFeG+OoXdRTgdnURJEt2mmE3b0hlTt9axpPQHtpgr7wiIsyppncZ8+CxR11alTn5QLR/xjCBinhZ9Pv3KYi1NBxEtBgZRTcACVJBpAMcSoOJU0iK1zXKVy+UxPDZnppVCgYYuvHMnhHfWPb2qgJ5QMKMhmDn8Vl9Ln0VTxv10TeJC4LhFhGA9Ql5KmWSF1AdlIs4jqxPKxNbbnbaUiep7HJRJx5QJZyZqerJdBQIVsR0UiG13QYFgiIHsHzWtNNR4+EFpdEtpYAMDbCqYeTH9oQmYvJnShixP0oIeKMqYUQdX6qXpma8r2ntOdUNJmUFNasQrVSNvrYhB0jgNKZMuMBBdkPyJNXJarJyFt4Cd/EkDoK6J62ihv7w0pPaVOGpdCyIWED1aFyt4IBjoqlxMABurmNMl5t8kKCotayyCos0cjq4QiqdxVNrRPkiym6gdJJe4KhJqiLlvUxTPLGgvxtelCDxuACVKzJ2RVFeMfah1c63aGBc1hRR0QMpeSEEdy864NSzf1yOlconlASl6pMihd9QaUrTFU6rDVE8pbvheS3FtF6jUs+WKOFcz+xsc9i4U4nZh2GGFOpzXDne5DrcT467jRbI5OtQ/7bW211H8JxMBQzRkxT6fu8wXZxvR1FzzZCKsfQJx1yUiIXG/Eqj5j+Ty/nef/Lns9aNr/Jd117s5Rk9bVTzw5vTMm48rYlVLfrYs92e/dBHyB1jTITbUieaXV+3CdfulDTqCaBu6QAwXyQUSJnDt8qx3VXTbcqUdQsCtBvDaEKcjA08MYmsX7/b81fAjoz+j3g1fuavZM+6MJtRfRX0qJs44PL54AzopQr56jDqm7Ma9wao/jspUMbDOyckROVMnjLKPjQRv6J4SV4I3v27C7/LE829Gq/m4c2PC7fpAtsD5npXpoxyJ20LqLPOxASAkZi2TQ0ItlJyPaDic00bAiiswqJrVY89yzkzUnHrME3ivSz3KiouYwCENKETXArYUWW1YIeLyeOt+CjHbzeDt6UAthmvTgfmk75QOlJe9NaACf5HB7ALeR9//XNiL72d0+uvHXZU9dwUNmI50qfYLmI7abCX56vwO03Xz1eqpIMxsB4MnexoSx4NOY3XRWsE+zUl944K1bQOYriRY8jzB2lJMArqNbY+ThHD+30+fLr98/u6Ep/8b/ulfjY41cmx+/amwhalpOUeFckTX3bWR6epI2ORWKM6QALKz5EiL9VRbistQh7/vP87/gfDm06/gw2R49fizf33c1qLTbTf5PvfKtOW4NS+rEYMhagYLWhhkvK/+TTO1IjosGKssUFdepmpi4JiKEJtKLum5TDtl2Y2UVTZdLZm7ULmHhat6Qkpf0FDdtUq2tDbSqiM3h7rLt1R3CQ2ewSmNmHJ1Iy4C0NiPdlWPbuvdQ63MC1RAOEqtjAXYrb5EVZUeKbq9Rw61Ml1ACoYAwxepqtIjpf3kXc5TDtHpAt/BkvWR9mRxnhlKUDu2N/uMtRQngk0l7A7x6Z3x6e3TvlPx6ewbma4sfr+5+DTSpegkuj74D7vgvNc/P2f//029mwkfhP5iRuO7cM6Un5Zvd5JY12HWpDcimNkQirEVR6MtcVOGLENLt95LYfkOHQy1tk6Y/8d8qFSu41FnWFKRZVPPonXJ0iL5+2RyloK1bxpx8nfRFC1gHWVweql2dn/uXLLPXkBfp7Sbjuw4DgOEkCuX4ELc52yoocaPDF69pnRdkfnUZg3K3WwlkrItEkQCe+BZ5ZGgyp4Y3rWHeNERqyOq5O+OKul+9BUYv+ajSq7ytiAHmJqAgAUMU1WmCIJs5f4+2rQ8NfVes0OO/MaR1bt9XjA7pBXRIcTXkcCNnHtC3Gtub0sPLTgOUb2uggPrCdBLAaXC7sYHX0grWFf1hUyUez0VfCEX4BreJKyVqn3whbrgC0EDEuCYpc4Qf0HgMx0gaHDH29pwEqx0TZBAWRrLiGvxV74R0Svyi/azlPPdDtDBLyrNtuMNnc4TpxDAbvhG7USaOrvdon5QzHZUqsWRgYrIMPjLjg1FFdafsdr65If9Fjuy36JlgGxHVPF12E9/aUVjpHtL0UWu/D0xi3SxmLzrJBIhyrZ3JtS/4txqKJWkF2T5vne5VPpj9uQBFyL1kkXM14yeT/0oKLzt9R3IEDnKLmQNy5AdxhHXqBt1z55t/JWNPb/iXw==</diagram></mxfile>
|
2203.14250/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In active speaker detection (ASD), the current speaker must be identified from a set of available candidates, which are usually defined by face tracklets assembled from temporally linked face detections [\[40,](#page-11-0) [5,](#page-10-0) [33\]](#page-11-1). Initial approaches to the ASD problem focused on the analysis of individual visual tracklets and the associated audio track, aiming to maximize the agreement between the audio signal and the visual patterns [\[40,](#page-11-0) [9,](#page-10-1) [57\]](#page-11-2). Such an approach is suitable for scenarios where a single visual track is available. However, in the general (multi-speaker) scenario, this naive correspondence will suffer from false positive detections, leading to incorrect speech-to-speaker assignments.
|
| 4 |
+
|
| 5 |
+
Current approaches for ASD rely on two-stage models [\[33,](#page-11-1) [30,](#page-10-2) [46\]](#page-11-3). First, they associate the facial motion patterns and its concurrent audio stream by optimizing a multimodal encoder [\[40\]](#page-11-0). This multi-modal encoder serves as a feature extractor for a second stage, in which multimodal embeddings from multiple speakers are fused [\[1\]](#page-9-0).
|
| 6 |
+
|
| 7 |
+
These two-stage approaches are currently preferred given the technical challenges of end-to-end training with video data. Despite the computational efficiency of these approaches, their two-stage nature precludes them from fully leveraging the learning capabilities of modern neural architectures, namely directly optimizing the features for the multi-speaker ASD task.
|
| 8 |
+
|
| 9 |
+
In this paper, we present a novel alternative to the traditional two-stage ASD methods, called End-to-end Active Speaker dEtEction (EASEE), which is the first end-to-end pipeline for active speaker detection. Unlike conventional methods, EASEE is able to learn multi-modal features from multiple visual tracklets, while simultaneously modeling their spatio-temporal relations in an end-to-end manner. As a consequence, EASEE feature embeddings are optimized to capture information from multiple speakers and enable effective speech-to-speaker assignments in a fully supervised manner. To generate its final predictions, our end-toend architecture relies on a spatio-temporal module for context aggregation. We propose an interleaved Graph Neural Network (iGNN) block to model the relationships between speakers in adjacent timestamps. Instead of greedily fusing all available feature representations from multiple timestamps, the iGNN block provides a more principled way of modeling spatial and temporal interactions. iGNN performs two message passing steps: first a spatial message passing that models local interactions between speakers visible at the same timestamp, and then a temporal message passing that effectively aggregates long-term temporal information.
|
| 10 |
+
|
| 11 |
+
Finally, EASEE's end-to-end nature allows the use of alternative supervision targets. In this paper, we propose a weakly-supervised strategy for ASD, named EASEE-W (shown in Figure [1\)](#page-1-0). EASEE-W relies exclusively on audio labels, which are easier to obtain, to train the whole architecture. To optimize our network without the visual labels, we model the inherent structure in the ASD task, namely the direct relationship between the audio signal and its possible
|
| 12 |
+
|
| 13 |
+
<span id="page-1-1"></span>
|
| 14 |
+
|
| 15 |
+
<span id="page-1-0"></span>Figure 1. **Fully and weakly-supervised audiovisual embeddings.** In the fully supervised scenario (left), we use the face crops as visual data and the Mel-frequency cepstral coefficients as audio data, we rely on visual and audio labels to directly optimize a shared feature embedding. In contrast, in the weakly supervised scenario, we omit the visual labels and optimize using only audio supervision. By modeling the visual-temporal consistency and speech-to-speaker assignments, we are able to optimize a shared embedding that can detect the active speakers without any visual supervision.
|
| 16 |
+
|
| 17 |
+
sound sources, i.e., the speakers.
|
| 18 |
+
|
| 19 |
+
**Contributions.** This paper proposes EASEE, a novel strategy for active speaker detection. Its end-to-end nature enables direct optimization of audio-visual embeddings and leverages novel training strategies, namely weak supervision. Our work brings the following contributions: (1) We devise the first end-to-end trainable neural architecture EASEE for the active speaker problem (Section 3.1), which learns effective feature representations. (2) In EASEE, we propose a novel iGNN block to aggregate spatial and temporal context based on a composition of spatial and temporal message passing. We show this reformulation of the graph structure is key to achieve state-of-the-art results (Section 4.1). (3) Based on EASEE, we propose the first weakly-supervised ASD approach that enables the use of only audio labels to generate predictions on visual data (Section 5).
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
The main goal of EASEE is to aggregate related temporal and spatial information from different modalities over a video segment. To enable efficient end-to-end computation, we do not densely sample all the available tracklets in a temporal window, but rather define a strategy to sub-sample audiovisual segments inside a video. We define a set of temporal endpoints where the original video data (visual and audio) is densely sampled. At every temporal endpoint, we collect visual information from the available face tracklets and sample the associated audio signal (See Figure 3). To further limit the memory usage, we define a fixed number of tracklets (*i*) to sample at every endpoint. Since the vi-
|
| 24 |
+
|
| 25 |
+
<span id="page-3-1"></span>
|
| 26 |
+
|
| 27 |
+
<span id="page-3-0"></span>Figure 3. EASEE Sub-Sampling. For every temporal endpoint, we sample i face tracklets and the corresponding audio signal. This sampling is repeated over l consecutive temporal endpoints separated by stride k. The i+ 1 feature embeddings obtained at each timestamp are forwarded through the audio (yellow) and visual (light green) encoders fused into the spatio-temporal embedding Φi,k,l,t.
|
| 28 |
+
|
| 29 |
+
sual stream might contain an arbitrary number of tracklets, we follow [\[1\]](#page-9-0) at training time and sample i tracklets with replacement. Hence, from every temporal endpoint, we create i+ 1 feature embeddings associated with it (i visual embeddings from f<sup>v</sup> and the audio embedding from fa).
|
| 30 |
+
|
| 31 |
+
We create temporal endpoints over a video segment following a simple strategy, we select a timestamp t and create l temporal endpoints over the video at a fixed stride of k frames. The location of every endpoint is then given by L = {t, t + k, ..., t + lk}. This reduces the total number of samples from the video data by a factor of k and allows us to sample longer sections of video for training and inference.
|
| 32 |
+
|
| 33 |
+
Spatio-Temporal Embedding. We build the embedding Φ over the endpoint set L. We define the spatiotemporal embedding e at time t for speaker s as et,s = {fa(t), fv(s, t)}. Since there may be multiple visible persons at this endpoint (*i.e*. |s| ≥ 1), we define the embedding for an endpoint at time t with up to i speakers as Et,i = {et,0, et,1, et,2, ..., et,i}. The full spatiotemporal embedding Φi,k,l,t is created by sampling audio and visual features over the endpoint set L, thus Φi,k,l,t = {Et,i, ..., Et+k,i, ..., Et+lk,i}. As Φi,k,l,t is assembled from independent forward passes of the f<sup>a</sup> and f<sup>v</sup> encoders, we share weights for forward passes in the same modality, thus each forward/backward pass accumulates gradients over the same weights. This shared weight scheme largely simplifies the complexity of the proposed network, and keeps the total number of parameters stable regardless of the values for l and i.
|
| 34 |
+
|
| 35 |
+
Upon computing the initial modality embeddings, we map Φi,k,l,t into a spatio-temporal graph representation. Following [\[33\]](#page-11-1), we map each feature in Φi,k,l,t into an individual node, resulting in a total of (i + 1) ∗ l nodes. Every feature embedding goes through a linear layer for dimensionality reduction before being assigned to a node. Unlike [\[33\]](#page-11-1), we are not interested in building a unique graph structure that performs message passing over all the possible relationships in the node set. Instead, we choose to independently model the two types of information flow in the graph, namely spatial information and temporal information.
|
| 36 |
+
|
| 37 |
+
In EASEE, the GCN component fuses spatio-temporal information from video segments. This module implements a novel composition pattern where the spatial and temporal information message passing are performed in subsequent layers. We devise a building block (iGNN) where the spatial message passing is performed first, then temporal message passing occurs. After these two forward passes, we fuse the feature representation with the previously estimated feature embedding (residual connection). We define the iGNN block at layer J as:
|
| 38 |
+
|
| 39 |
+
$$\Phi^s = M^s(A^s \Phi^J; \theta^s), \Phi^t = M^t(A^t \Phi^J; \theta^t)$$
|
| 40 |
+
$$iGNN(\Phi^J) = (M^t \circ M^s)(\Phi^J) + \Phi^J$$
|
| 41 |
+
|
| 42 |
+
Here, M<sup>s</sup> is a GCN layer that performs spatial message passing using the spatial adjacency matrix A<sup>s</sup> over an initial feature embedding (Φ J ), thus producing an intermediate representation with aggregated local features (Φ <sup>J</sup>+1). Afterwards, the GCN layer M<sup>t</sup> performs a temporal message passing using the temporal adjacency matrix A<sup>t</sup> . θ s and θ t are the parameter set of their respective layers. The final output is complemented with a residual connection, thus favoring gradient propagation.
|
| 43 |
+
|
| 44 |
+
In EASEE, the assignment of elements from the embedding Φi,k,l,s to graph nodes remains stable throughout the entire GCN structure (*i.e*. we do not perform any pooling). This allows us to create a final prediction for every tracklet and audio clip contained in Φi,k,l,t by applying a single linear layer. This arrangement creates two types of nodes: *Audio Nodes*, which generate predictions for the audio embeddings (*i.e*. speech detected or silent scene), and *Video*
|
| 45 |
+
|
| 46 |
+
<span id="page-4-2"></span>
|
| 47 |
+
|
| 48 |
+
<span id="page-4-0"></span>Figure 4. EASEE Weakly Supervised. We drop all the visual supervision (Lv) in EASEE (intermediate supervision included) and enforce positive predictions amongst the video nodes (light green) in the presence of a speech event (Ls), along with consistent visual feature representations if the nodes contain the same identity
|
| 49 |
+
|
| 50 |
+
*Nodes* which generate predictions for the visual tracklets (*i.e*. active speaker or silent). EASEE's final predictions are made only from the output of visual nodes.
|
| 51 |
+
|
| 52 |
+
Losses & Intermediate Supervision. Audio nodes are supervised in training, but their forward phase output is not suitable for the ASD task. The training loss is defined as: L = L<sup>a</sup> + Lv. where L<sup>a</sup> is the loss over all audio nodes and L<sup>v</sup> is the loss over all the video nodes. L<sup>a</sup> and L<sup>v</sup> are implemented as cross-entropy (CE) losses. iGNN is also supervised with CE loss, which is calculated individually at every node in the last layer.
|
| 53 |
+
|
| 54 |
+
State-of-the-art methods rely on fully supervised approaches to generate consistent predictions in the ASD problem. Typically, they work in a fully supervised manner in both learning stages, using audiovisual labels to train the initial feature encoder and also to supervise the second stage learning [\[30,](#page-10-2) [46,](#page-11-3) [1,](#page-9-0) [33\]](#page-11-1). The end-to-end nature of EASEE enables us to approach the active speaker problem from a novel perspective, where the multi-speaker scenario can be analyzed relying on a weak supervision signal, namely only audio labels. In comparison to visual labels, audio groundtruth is less expensive to acquire, as it only establishes the start and end point of a speech event. Meanwhile, labels for visual data must establish the fine-grained association between every temporal interval in the speech event and its visual source.
|
| 55 |
+
|
| 56 |
+
Directly training EASEE with audio labels only, would
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+
<span id="page-4-1"></span>Figure 5. Weakly Supervised Losses. We enforce an individual speaker assignment if there is a detected speech event (left). Temporal consistency pulls together features for faces of the same person and creates differences for faces of different persons (right).
|
| 61 |
+
|
| 62 |
+
optimize the predictions for the audio nodes (speech events). As outlined before, such predictions are suitable for the voice activity detection task, but the more fine grained ASD task will have poor performance as the visual nodes lack any supervision and yield random outputs. To generate meaningful predictions for the visual nodes while relying only on audio supervision, we reformulate our end-to-end training to enforce information flow between modalities by adding two extra loss functions on the graph structure. This reformulation enables meaningful predictions over the visual data despite the lack of visual ground-truth. We name this version of our approach EASEE-W, a novel architecture that is capable of making active speaker predictions that rely only on weak binary supervision labels from the audio stream. An overview of the key differences between EASEE and EASEE-W is shown in Figure [4.](#page-4-0)
|
| 63 |
+
|
| 64 |
+
Local assignment loss. We design a loss function that models local dependencies in the ASD problem: if there is a speech event, we must attribute the speech to one of the locally associated video nodes. Let V<sup>t</sup> be the output of video nodes at time t (|Vt| ≥ 2), and yat the ground truth for the audio signal at time t:
|
| 65 |
+
|
| 66 |
+
$$L_s = y_{at}(y_{at} - \max(V_t)) + (1 - y_{at}) \max(V_t)$$
|
| 67 |
+
|
| 68 |
+
The first term yat(yat − max(Vt)) will force EASEE-W to generate at least one positive prediction in V<sup>t</sup> if yat = 1 (*i.e*. select a speaker if speech is detected). Likewise, the second term (1 − yat) max(Vt) will force EASEE-W to generate only negative predictions in V<sup>t</sup> in the absence of speech. While this loss forces the network to generate video labels that are locally consistent with the audio supervision, <span id="page-5-2"></span>we show that these predictions only improve the performance over a fully random baseline and do not represent an improvement over trivial audiovisual assignments.
|
| 69 |
+
|
| 70 |
+
Visual contrastive loss. We complement L<sup>s</sup> with a contrastive loss (Lc) applied over the video data. As shown in Figure [5,](#page-4-1) the goal of this loss is to enforce feature similarity between video nodes that belong to the same person, and promote feature differences for non-matching identities. Considering that the AVA-ActiveSpeaker dataset [\[40\]](#page-11-0) does not include identity meta-data, we approximate the sampling of different identities by selecting visual data from concurrent tracklets[1](#page-5-0) . To simplify the contrastive learning, we modify the sampling scheme for EASEE-W, and force i = 2 regardless of the real number of simultaneous tracklets. If there are more than 2 visible persons in the scene, we just sample without replacement.
|
| 71 |
+
|
| 72 |
+
In practice, we follow [\[7\]](#page-10-22) and apply this loss on the second to last layer of the iGNN block. Let L<sup>a</sup> be the loss for the audio nodes in the last iGNN block (see Figures [4](#page-4-0) and [5\)](#page-4-1), then the loss used for EASEE-W is: L<sup>w</sup> = La+Ls+Lc. No video labels are required, *i.e*. the speaker-to-speech assignments are unknown.
|
2204.11752/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="Electron" modified="2021-08-14T08:28:33.112Z" agent="5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/89.0.4389.128 Electron/12.0.7 Safari/537.36" version="14.6.13" etag="StQjrLB3TudEjvqiNVpf" type="device"><diagram id="rP0cr3mq70OuXlc6z4M3">7V1rc9s4lv01qpr9EBbej49tJ9nZqu6u1HZvze6nlGKrbU5kyy3JnaR//QISIZEA+JAEEJRJVU/GoihIwj24uPfcA2CGb5++/+d6/vL4y+p+sZwhcP99ht/PEJKEqX/1hR/7C1Si/YWHdX6/vwSPF37L/14UF0Fx9TW/X2wqN25Xq+U2f6levFs9Py/utpVr8/V69a162x+rZfVTX+YPxSeC44Xf7ubLhXPbv/L77WNxlYHS7f9c5A+PxUdL88LT/HDz/sLmcX6/+lb6LPxhhm/Xq9V2/9fT99vFUved6Zd9Qx9rXj18sfXiedvlDUW//zVfvha/rfhe2x/mx65Xr8/3C30/nOGbb4/5dvHby/xOv/pNWVdde9w+LYuX/1g9bwt7QfP8drVcrXdtYbB7qOsr1UK+1eYn+ul8fVe8i5pGii9Q/KK/Fuvt4nvtj4SHrlOQW6yeFtv1D3VL8QZG5P4tBdy42D/9VrIdLSz0WDIbRLzATIGXh0PTxy5VfxS96u9h7Olhov57+jzjN6/5jL/fP++734V+utmuV18X5sbn1fNCvzdfLkvvvd09IpiFg6pZIICOXbDHLGZQXmIV4ljl18Xreq5v+XWx/bZafw1nD9DJHgy4Xf9x93C6fv/84/wpX+p3/p4/KWeov/g39e9/r57mz+qWEONGkKqBMHYMJHwGgpcbiMZzTHwwfglmGEMJAcMYE8Qqnc1ARp3eRoRkknk8FRDm7ks6nTl9vLhXE1/xdLXePq4eVs/z5Yfj1ZujFXb9d7jn59Xqpej7fy+22x9FP85ft6uqZRbf8+3/6rdnnBZP/69oTf/9/nv5yQ/z5Fn9tP27pBTmwu59meDm+fG9u2eVN39arNW42S7WxcXNdr7e/qSDg6Mn3F37mOv+Kt54b92hrpReL4961ICyf78+vRT3MT+udL+fiqr1Yjnf5n9V3+eDQ/HWT6tctXhAIzVT1WG0o2oTm9Xr+m5RvKscUzgNoUwQeXxUmpUsQxZUVS8/LLZOwzu0Hn5fJwDztAA+C7+MoTJ+4QTec8ALRSDwQp4xCg4P6A9RwkNXpIUuOgu7VLIKdjMAxYTfs/ALD27xcgijjJXcrzUyAM9wCeAAxUK0bER0YUPLGsHcL+wKYaSykEr4ACBNCeGWsL5rojcMWBMJquCD8DxMq4YywOtiCghVUFF6UUYLMAwDNYF6xKBmoUDNBgJqk8zaHNWM3q7uV9sagkolvtsqaufL/EHZ8/2dsoDG0I1Oj/O7+fKn4oWn/P5+H7ksNvnf8y+7prRlX/TP2P0wejOj73VbakxsCkxYEKGnQeSyHJ1b3GGRI5eSchmJo4Iu5eHxNdUYMKTnARiVY8JMB8Od/A+s+J4WvxPCxVRpzDIc9q8Ykr4jdVbCmmyAmsUV6b6w/WcXh7X3FrMyHb8f2vtLQob3a50RiBMjEJAKAiUnZyCwLaOeEDhgBLrTzvAZHXoK+k5Jay+NjcqGhsy1tCFRgsdQ6hfOf5RuKKb72hDLnnVRwbvURVKMkab71R/7b3B2hEQnGPYHw2PV/dpgKCPDkHkCdbbUUfjmRU0cZYCyP1+1iGBnkHf7QPondQOEL9+PL6q/Hvb/ryP8VxPo75tU32bfqrnHHgHLZf6y0VBpqb3NNy97FcYf+Xc9HjrMZ7R7dboMPAugQRMBZmNBOpkAipUJNNc1IrMOnR0OhqQS+at4LWzo9WW5uvv6+2P+XHFPp8dUjfywlwg52a0J163hCAxFZwQJj+fQg/3l8+tY9SeMWvIG5ub20UZ0UnK884iGqBpEZACyliEdgkfsZZyfnBkBd0iTdCPafB1nRC9GPKJtwVKPI/oAmSlFCJAiVNKBCKPMH79zO8bryOyfmlc4n9OSV3AaN68wrM+E3eDYZaPHroiM3U4cdfJISnA7ksKoFbNvNZIiw4qkXDG4iaTysUZSnCSMpJrJ1qGMaCasAQ3GO6A9JK5ZK5BkRPt4Uj2Q/xzxiOYJR/Q16rIHG1+WR55nJk057ppFzOE9d9muByufpE7J2nRxb8E/Yw9KEpLRfSuD4TkwARNMEs/iuFlrG2vW6GqkLmrE4xSEVe+WUQgAa8ZhAC1PjDFep7+oWcp7qiSWw2qUIli1nRrR68mFfeb9mFCchOl4T7VNL/rWa77ph6exRqJO3a3Hdd9mTU2qSJSIshvQ9TTeMh11jyubHFGn6SmEy6F9uRyOSFZdMMXO9TnIcgbR1vjhus0g/tR+IR+5X+AsoV9IozMNGWsMY3GMTfefHwngXiKBuu/btZwROnLw7XsRTuH3xz9eZ+g2/4/hCP2Ylx0peZzDZOXxTPbyoq0edxH2qeEVkxPguiXquiUcYEMOHFfwmQ8HBw2CzwZw7NpiLM50RKUVYPQp+cTcsfz/bBbrd/ofZaD5k+7y5y+bl52hwLmXnOc7IHxZHzFQveNWdfJ6vtlNEAj8vNpsIn6ZfgOggydqY19c5xQaehhKC3jcAR40SWbF54RAnk8qOonMexGZE2ovLe6vSIOvQ5JKobPlSB+S1BNX/fW86L1SEvLpVFH4+L/zVnt1QlU7GO3D3VsbutVst/ewnG82xsZfF9u7R/PEbNdZY5gLB3813BCu0ye+sR8gziQ+wnKKM3uJM4lI5/ONX7CjzP/aLp7UZV+0N+KgrDo8j+lBORGEHkuZxTcXWcqlDrWRCksFC8F9UXnKlMBzabQApKDqKLxZgamth84KSDNBOpToEDiiPDkFh6Xg0NRpBxMcXofWE+zLdhOu6nAleqv7ESAzUd7dsLptMud6Q1VweOBq++FqeaR5s+QJuVeCXAgjSJz80KWoCl2ZCLnXsZvAhNw25ILuIeSlyJXDQG7f+uJp983BCQwoqVZjIbT4r87bJBPetPsmApXdN03ZPwKopZPZT9xbT9wbxdy2upNSx+LeqI8XH1udrdnu1bpbTLsTl0qJZvc0G0Ucpyd+0sqmhFvzH6dhCSohZKZjjBjq+T5yALRbC15BH4fZjt4zD3TmjGaf4EKthsJNWqblkvP65MB68zh/0X/erVebzZf5uoJjjbtKOa2+6FYffFfdBVUh6vubsEGPJZBdr7bK+istNpP6+eNqnf+t3jo3UF/OvyyWN/O7rw+7n1pq8o/do93BBq8v4SpDAoWvbOH6OlwP7c6uzi1adEJJSw9NIIpRhUyHkqSlhSmrCzIt1hRJ7bUU7RMkRoHyO9XQCfldvNMVqE/S3W8Rc+d1GqUvgt+QDzEibWyfasTdRR2+s1dDqFqoTz09op5HVs+T/nreVS8Pr+drTlcN0fOw6gTdqTRax/vEu+PpeOsUQOEe9Byt45vVs8Pc4mQ6ejJs6EKt/PvcE/wwRWkOoTQnW6VC8XQC8FCQHOg4VUx5qrOAzXLwVFieTlQdCpgJC3a2KiZ8EGerMpd/HV/xKNEiLQzTFQ2ZbweJqVjcS7HYsXuPRUPmcqRpCNE+55XBTSQ2Ndlxp5j2hhjvNB2dM1GkOblsqjYPDbs6EI9Ud3ayhXh1Z9asuo6foHJkQaNlG79S/eiaBsLg4azicFkKtC2GRMCMlOPwc0tIpD9g+xjz/SrpXK+SntEP7xKulO64Irqv0oa1PwbkbiQWi+hlkwb5DVWrbSH76X7IPt3kbGKBNFSrEeA9VavNAsG63RoS+6Fh7dhgz2Oyv9o2b2bqJzd0TW5IXBwOyVCiGTkM0YxRiQy6jh0vvpHpRDPcxyyPqOeR1fP9iWa4j9sdWs/H027Iag7Xo2jG2HikHS+SiWZ4GnJyEs0MKHQh9qKVc0uzBCUSzfDUnOQkmhkIkgOJZghKJprhzXtDTKKZ0YAZhhPNEDgM0QyfdjZOJ5oR6UQzfNphIZ1oxrZ7j6IZ4XKkk2gmOTV5tmjGbiieaMZ8xX6joEFoXwSBJVgDc6r1SdqXcmR2SqocF7pmFjgpNc0AL4UmNitLMkjcuOb0cN/etgF0GiHnANsltiY12PWpwQKkqvHUYE4aHE80I3ASOE9qsKHBGfahBiOwP2BPW2K8oer+pSIjYh2KfLbISDU0CJGRGfANIqMZuplkRjUOskeZkUi6y/XkiEI6IsovnWeZvQLqXJmRamgQMiNxDfuUvP8gqKAR/AoV6WRG4ho2KonY88jq+f5kRsLHhg+t56OpXaioJgc9yoykb8Pf8XQ8TyYzMvHSJDPqjzYYWuhCRSBxBhU8jcxIut5ikhmNEckykGCOSpRKZiQTE7eTzGgoYBYwmMxIBdWDkBlJn5x8kpv0IjdhIJ3MSPpYzbHJy5rtHu1AD8fuPcqMpMuRTjKj5NTkuTIjp6F4MiOZRGw9CFHF25EZ2dA9XWakUlMtM6qiLpC2yEl742mLZPPGGJO26Cq0RSHy04gnHNm5bzwJhkmHJ21R3IEweDirrDK+toiK3oANwbR1yBuq6V8qLmIgkLhINTQIcdGBgveqi+iHd1phNGmLahxkj9oiaJA25T9XnP/Ye2Cdnv9g2ssyC+d0jnipEATN9Y0CU1Uoh5twq9CizcA6gKg62bZNtSFGUf2ZjPtX/lX4IONbu8/AsmFMHmlMop/ezzePB4ffZWjs8WYMXUwhe+yYi7zzIOp8mmx3p1pffdC/7lIW+tfPxRxqcdH7tmu46LNnVWQbSD/5NN8qJ/i8NxGIsliZgKqrMCWpMptMPbNiADYZAl8ZoboTbFpx9MD2grXk9X3uBQtBJ3n0EPz85K8rrrnir2FSf+2TIIf218phvG2PTWBKjz1uLTNMpyKH4BrEzBG7Hlld35+MHMJxy5lhMh05NB890p4HyYTkELodPVHn10qdg0upc4oDUeeqoWFQ59C3BfGROt+FkRN5XoJQOvIcuv0/kedXQ553ziNrHAaMKBlytrmIx5Ob1GESWcQdN5FEFhejuFFawXlFWgHZuXi26j5ExMPz1fCBPdV9vixXd19/f8yfHTS/ATIR+sjEGMOne1AQm0zMx0Am2mFdr2QidGW002qCnuYjEmgNgdNQvDUEEPoYUDtle722cmE8ytTK+nstFyIfZVpVpk2WqpM29mspH8UaeiJ94yoKW1rY6zTqocTHHYm/0Yo+9YTgRgIVPGJQfTn/UbrhRU/gm/qAwtZGQoIsEO9bPHvyR3V87YzfzOjt03z7eDdfqic/z/h79d9n9efru1w/8c8yarRvq5CeL/MH7SXuVFdq/uBG+4RctfpT8cJTfn+/52UWyvnNv+ya0nYteke1S9WXea/bUgNmUzilmlmqA8I6BpCX6Qms0+ioyNwCH8SRVhRDY5nT7Po62bVVLcas2gqE/Rq2TtzXYNh8GrAdgg0KbMP2adaJ90sabVxcNq7wesjL64nwQUV3fHXYp3TY50n5LRzSAwiaEYvp5yIjyC2cl10CySh3nULp8mV266AMHORGLYcNeuLbDQKa2bsUpDdc/ZFhRf5/d+isY4pvTFC61M4T8Jfve37AYQpO2zuovp1mHXB1F6FuVESqqIO7qOcffmIf3DkERUCqFWJwmAkKtRCSI8aIEQO10FMwRLyBfUTi9cPTJcQmcJ4JTjX1ZABzyjEREHLCXe4tHjpbydOrQ2dd3XPCZ1d8IiYyTqUQggp8WPptXClJ50rRmwTr5EojQVXlYhlX4THnQkLJiSGUesHqGcvKL1ELzM7QnhNJZiVmAapZ6HDhFO35CdICD1pamIWT6AByMoAYzAgUDBKJGFdujbEKhAhEegtxCiXAEB6OFesuVBAkQ5QKQrgAEDNSlaEfVgGdqpRTXpiRWjkeQRmHx4DCljQEVNFhl+aMCfMKfdYZ5pWtoN9p6WhKmB9TfhYK9SA06lWeD7AATJ/byDGT7FTYIwkzXO+XYcbQ0S9zFg+fzarlmG64Oz65tAGK2kTKo/PDFyOS44yXPKbtMjUkSy7TbEATA5JnFCB6h6TkNiT3CzeuGJJoeJBUqRRyVfNF+yqyVVA8JFooopNs3tx4oLHqtbvIQeKxPqgUMgOlvB9FjCmbdyceRkwp0Mx2kG8sqMSDwyeSKhnjx6CyGlUymXFeIqbi4bN5u+GY+OzuLkUZnvpsJzyhMz46aSnlsY65UP6Tl6ZzHG86J76KkwXYcwXKHiuz3WNmi0NLReoCOi8GXPqT8ueHAkNHYTvvio3LKEOo7ISwUCYmGHBLui5lBhlHgiGVA1BsFiSWKEPlZxilQCtGmbrPJK9lBpHRTE2QFEgkMRPAVKgvIhRNSjVZ1S8p0TwbUcMPK1cHmLWqp4NVuUoPJVGugSmjAdqXVX0ljQFYtSeDHYnRqvqauVUlbSAKCBGUUUiFkaFU9IFqZGrvCzEkgosg9vGptkdhn72bJFL9jzLlLAVvtZB2jJhSCCiShBgFbGwL9ctAV+iUrgLOQ6raTcA5oCTVt2YjaebaHReXH/HWXLx9/Qyv4pi3dtVnPzM0pFr1CQXnAmEMpHWYgfLvBFEGKMUMY4TcKRqhTMEDAMkxZ4ybYzgq/sV/y2VAYpGBlF8JkCyvk06FKrQKFVLIGeJCRW34SoDUL83ax1KDoc9U7CpmKp/yPKiD2Svi9AIl9etvYWmJUiqH0//sg2iGVb5OBYVMfQ+jJRu803DV7VmWORZKqf7qY92BzFSXcskZFBJgo1o1kkTUarxYMi9ar+4+Zex2G7ifxjhwBc/UpwsguDKwtCw/2GFL63XVAXDxOuFC79yTYYlVP0EVDBJ+LVGgOS1w1A4dYZ4xQqVmiijDjFUXlaV06D7CLyhtMAVj6mWWqUkcUk4wIgRYwpfBDt2JaoyYwBUWusIEjiYUmb55XNCUEPBX+tUcrrcfRhJjoicwa+cgiDOoEkzCtAaAHfYni1Dpp7GpydfPKPXkZB1kcQY1ebt79JOoNnPcEMCMCQVIIKRkFMvhTG31u9GGIrknJAUkuQeMpITqzTfPchuHXwmSaPcz8VIGSfXbdARnudFYE6sWlnu4XoO5POr4WJEWmhsC1mq9WLQIi8pnWkN3lHxmI8894IHr0plxiO7RAqOF6B4wNFwqdXw+vYXpTurTfTvXRqK6RxuRNVPdAx68E6cZNI0bTsZWs3qpWe7dRmoGPCWFuaRmtnsMauao2lyPhO18m6+ei+ZckITID5iKESGHGFFKqODAigQQyYgyHIeUQSkOesOSM/HtqhxmLkm3hPxcd8G5aHUYg15Bzrpvc9zViygAcaSSEOVBiEJaw/EsZ1VGKMgYhIIjTpnkhyPRI1RGWGxN5evnX1LHMtfFZ7dURrDMIOeSIa5DZiRd35UqEArDW9aWRSYYhSyLDBZGHPQ6QY6rLGK8fSXINgM3yfTYHRdhiNVOZZFfxpqEt5VFhus1XHZ1aIlQ+rIIIa3Wi5X28DBq0W5Dd5Tsd3NZZLgDNwy32k6sjhYYbWWR4ULDXZg+Pp/eVhZJ6dOjiyqniKytLDLcwXt9POeg07jhZGznlUWo6l/JGOGCYCKA2fMmQlmEp9NVdt91kFWhpwngK991cEhYFEh5RUEBpkCXd6z9giGjmdASUYixQELKM48WaN66nYBMlI/UqH5GQAafe3jX3eRauAwEXjeL9Ts1/z3tzJSv9bv19Lx9XKh/v8y3d4+DiapCnsFqpJKXMiLOSX6eo/uYJ8oKcgyrOIMPnY5hHeoxrMI3nQvUGZXhj2E13yjVcZ6daim+4zx7LKaIxpIcAqBNmxT5ZE8DoMmGTeRoY0FsAEb0UKS7ebx5+t7Mn6ZZvH0WJ84s7smVI87iPpqz7xFqG8gz6KrD8+PukWAwIpR6MHo2xdwNRjXcXpaq09WvVdjfTcX7Eaqa3I/RaQjWDEFkD0E1Bj12DTME1dP1SnOKx7RKjaPHX1b3C33H/wM=</diagram></mxfile>
|
2204.11752/main_diagram/main_diagram.pdf
ADDED
|
Binary file (78.9 kB). View file
|
|
|
2204.11752/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Collaborative Filtering (CF) [@pan2008one] is a standard approach to deal with implicit feedback (e.g., click, watch, purchase, etc.) in recommender systems, wherein observed user-item interactions are assigned with positive labels, and the rest are unlabeled. A common practice in CF methods is to uniformly draw negative instances from the unlabeled portion, a.k.a. *negative sampling* [@chen2017sampling], and then use both positive and negative instances for training, as has been adopted by existing *point-wise* [@mnih2008probabilistic] or *pair-wise* [@rendle2009bpr] approaches. However, this CF paradigm is considered as insufficient to provide informative and reliable training signals. Hence, enormous efforts have been made for improving the quality of negative instances for CF-based recommendation.
|
| 4 |
+
|
| 5 |
+
Particularly, *hard negative mining* has shown to be an effective approach, which aims to exploit negative user-item pairs whose embeddings are close yet expected to be far apart [@wu2017sampling; @park2019adversarial], as a means to provide informative training signals. A line of works [@rendle2014improving; @chen2017sampling] fallen into this category attempt to replace the uniform negative sampling distribution by some predefined surrogates, based on certain prior knowledge such as that more frequent items constitutes better negatives [@wu2019noise; @chen2017sampling]. In contrast, another line of works [@rendle2014improving; @park2019adversarial] seek to adaptively mine negatives by carefully examining relevance score of user-item pairs, which are generally more effective but often require sophisticated training techniques such as generative adversarial network [@park2019adversarial], reinforcement learning [@ding2019reinforced] and bi-level optimization [@shu2019meta]. This leads to the **first** trade-off between efficiency and effectiveness.
|
| 6 |
+
|
| 7 |
+
On the other hand, the soundness of these works resides on a problematic assumption that "all unlabeled interactions are true negative instances\", which is against the actual setting where unlabeled user-item pairs may potentially become positive instances, once the item is exposed to the user. These instances are termed as *false negatives*. The incorporation of false negatives would provide erroneous supervised signals for training and seriously degrade the performance [@hernandez2014probabilistic]. While it may sound attractive to identify and remove these instances, it is challenging to distinguish hard negatives and false negatives, given that both of them have large relevance scores in appearance and auxiliary information is often not available. Few works attempt to address this issue, especially in the context of negative mining for CF. This presents the **second** trade-off between informative negatives and reliable negatives.
|
| 8 |
+
|
| 9 |
+
Towards navigating these trade-offs, we propose a new framework named as Hardness-Aware Debiased Contrastive Collaborative Filtering (HDCCF). Specifically, a contrastive loss function is devised in place of conventional point-wise and pair-wise objectives, which will be shown by gradient analysis that can automatically and adaptively concentrate on optimizing hard negatives by contrasting with peers, notably, without relying on complex training tricks. We also devise an efficient sampling strategy that implicitly explores negative instances by incorporating item frequency information, without actually conducting negative sampling. On top of the new hardness-aware objective, we further propose a principled method to eliminate the risk of false negatives. Needless to explicitly distinguish hard and false negatives, this is achieved by directly debiasing the objective, such that its expectation is strictly equivalent to the ideal loss function that resembles sampling under true negative distribution.
|
| 10 |
+
|
| 11 |
+
There are also several additional novel designs in our framework: 1) It considers both negative users and items to avoid the case where all negative items are relatively discriminative for a specific user, and vice versa; 2) Two auxiliary contrastive losses are introduced to model user-user and item-item relationship, which could help to obtain more meaningful user and item representations; 3) A neural modulated mechanism is designed that takes a user's diverse preference on different items into account in the loss function. We validate the effectiveness of HDCCF by comparison experiments and ablation studies. The results demonstrate the superiority of HDCCF as well as the effectiveness of its components.
|
| 12 |
+
|
| 13 |
+
<figure id="fig:frame" data-latex-placement="t">
|
| 14 |
+
<embed src="images/framework.pdf" style="width:88.0%" />
|
| 15 |
+
<figcaption>Overview of Hardness-Aware Debiased Contrastive Collaborative Filtering (HDCCF) framework.</figcaption>
|
| 16 |
+
</figure>
|
| 17 |
+
|
| 18 |
+
# Method
|
| 19 |
+
|
| 20 |
+
Let $\mathcal{U}$ and $\mathcal{I}$ denote a set of users and items, $\mathcal{I}_u$ (resp. $\mathcal{I}'_u$) denote a set of items that user $u$ has (resp. has not) interacted with, $\mathcal{U}_i$ (resp. $\mathcal{U}'_i$) denote a set of users that item $i$ have (resp. not) interacted with. Observed interaction data are represented by a set of user-item pairs $\mathcal{D} = \{(u,i)\}$. Unobserved user-item pairs are denoted as $\mathcal{D}' = \mathcal{U} \times \mathcal{I} - \mathcal{D}$. For user $u\in \mathcal{U}$, our goal is to recommend a fix-sized set of ordered items $\mathcal{X}_u \subset \mathcal{I}'_u$.
|
| 21 |
+
|
| 22 |
+
For an observed instance $(u,i) \in \mathcal{D}$, we uniformly sample $S$ negative items (resp. users) that have no observed interaction with user $u$ (resp. item $i$), denoted as $\mathcal{N}_u^- \subset \mathcal{I}'_u$ (resp. $\mathcal{N}_i^- \subset \mathcal{U}'_i$), where $S$ is the *negative sampling number*. Then, the user-item contrastive loss $\mathcal{L}_{u-i}$ is defined as $$\begin{equation}
|
| 23 |
+
\label{eqn-uiloss}
|
| 24 |
+
\resizebox{.91\linewidth}{!}{$
|
| 25 |
+
\displaystyle
|
| 26 |
+
\mathcal{L}_{u-i} = - \sum_{(u,i)\in \mathcal{D}} \log \frac{e^ {f(u,i)/\tau}}{F^i(\mathcal{N}_i^-) + F^u(\mathcal{N}_u^-) + e^ {f(u,i)/\tau}},
|
| 27 |
+
$}
|
| 28 |
+
\end{equation}$$ where $f$ : $(u,i) \rightarrow \mathbb R$ is a similarity measure which outputs the relevance score of user $u$ and item $i$, and $\tau \in \mathbb{R}^+$ is a scalar temperature parameter (omitted in the following for brevity). The $F^u$ (resp. $F^i$) in Eqn. [\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"} is called *negative score* for user $u$ (resp. item $i$), and is formulated as $$\begin{equation}
|
| 29 |
+
\resizebox{.91\linewidth}{!}{$
|
| 30 |
+
\displaystyle
|
| 31 |
+
F^u(\mathcal{N}_u^-) = \sum\limits_{i^- \in \mathcal{N}_u^-} e^{f(u,i^-)}, \quad F^i(\mathcal{N}_i^-) = \sum\limits_{u^- \in \mathcal{N}_i^-} e^{f(u^-,i)},
|
| 32 |
+
$}
|
| 33 |
+
\end{equation}$$ where both $(u,i^-)$ and $(u^-,i)$ are unobserved (i.e., unlabeled) instances.
|
| 34 |
+
|
| 35 |
+
To explain the efficacy of the above contrastive loss formulation for hard negative mining, we peer into its gradients with respect to observed and unobserved instances for analysis. Denote the probability of an unobserved instance $(u',i')$ being recognized as positive by $$\begin{equation}
|
| 36 |
+
\begin{split}
|
| 37 |
+
P(u',i') &= \frac{e^ {f(u',i')/\tau}}{F^i(\mathcal{N}_i^-) + F^u(\mathcal{N}_u^-) + e^ {f(u,i)/\tau}},\\
|
| 38 |
+
\text{where}\quad (u'&,i') \in \{(u,i^-)\}_{i^- \in \mathcal{N}_u^-} \cup \{(u^-,i)\}_{u^- \in \mathcal{N}_i^-}
|
| 39 |
+
\end{split}
|
| 40 |
+
\end{equation}$$ Then, the gradients with respect to the relevance score of observed and unobserved instances are computed as $$\begin{equation}
|
| 41 |
+
\resizebox{.91\linewidth}{!}{$
|
| 42 |
+
\displaystyle
|
| 43 |
+
\frac{\partial \mathcal L_{u-i}}{\partial f(u,i)} = -\frac{1}{\tau} \sum_{(u',i')} P(u',i'),\;
|
| 44 |
+
\frac{\partial \mathcal L_{u-i}}{\partial f(u',i')} = \frac{1}{\tau} P(u',i'),
|
| 45 |
+
$}
|
| 46 |
+
\end{equation}$$ These equations reveal two properties [@wang2021understanding]:
|
| 47 |
+
|
| 48 |
+
1. For each individual unobserved instance $(u',i')$, the gradient is proportional to $P(u',i')$ and thus is also proportional to $e^{f(u',i')/\tau}$;
|
| 49 |
+
|
| 50 |
+
2. The gradient for observed instance is equal to the sum of gradients for all unobserved instances.
|
| 51 |
+
|
| 52 |
+
These properties have several implications in the context of hard negative mining in CF. **First**, according to the first property, a *harder* negative instance with larger relevance score has larger magnitude of gradients, which indicates the loss function could automatically concentrates on optimizing harder negative instances. The hardness level for each negative instance is adaptively updated for each iteration, and could be controlled by tuning temperature $\tau$. **Second**, the gradient is re-scaled by the sum of relevance scores of peer negative instances, which indicates the hardness for each negative instance is relative to the hardness of peer negatives in the loss function, distinguishing us from pair-wise loss functions. **Third**, according to the second property, the gradients of negative instances, whose sum is determined by ${\partial \mathcal L_{u-i}}/{\partial f(u,i)}$, are distributed over each negative instance, and thus are not sensitive to label noise, which is known as a limitation of BPR loss. **Fourth**, by considering two types of negative instances for every $(u,i)$ in Eqn. [\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"} (i.e., negative items and users), we could jointly mine negative instances from two facets and avoid the case when all negative items (resp. users) are easily discriminative for an individual user (resp. item).
|
| 53 |
+
|
| 54 |
+
Besides modeling user-item interactions, we further extend the advantage of negative mining to neighbored users (i.e. a pair of users $(u,u')$ that have interactions with the same item) and neighbored items [@sarwar2001item; @kabbur2013fism] by proposing two auxiliary contrastive losses. We uniformly sample $P$ positive items (resp. users) for user $u$ (resp. item $i$), denoted as $\mathcal{N}_u^+$ (resp. $\mathcal{N}_i^+$), where $P$ is the *positive neighbor sampling number*. The auxiliary loss function $\mathcal L_{u-u}$ is formulated as: $$\begin{equation}
|
| 55 |
+
\label{eqn:u-u}
|
| 56 |
+
\resizebox{.91\linewidth}{!}{$
|
| 57 |
+
\displaystyle
|
| 58 |
+
\mathcal{L}_{u-u} = - \sum_{(u,i)\in \mathcal{D}} \sum_{u^+ \in \mathcal{N}_i^+} \log \frac{e^{f(u,u^+)}}{e^{f(u,u^+)} + \sum\limits_{u^- \in \mathcal{N}_i^-}e^{f(u,u^-)}},
|
| 59 |
+
$}
|
| 60 |
+
\end{equation}$$ where $\mathcal{N}_u^-$ and $\mathcal{N}_i^-$ are the same as those used in Eqn.[\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"}, and $\mathcal L_{i-i}$ could be defined in the same way. Then, the final loss function $\mathcal{L}$ is the weighted sum of three terms: $$\begin{equation}
|
| 61 |
+
\label{eqn:loss}
|
| 62 |
+
\mathcal{L} = \mathcal L_{u-i} + \lambda_u \mathcal L_{u-u} + \lambda_i \mathcal L_{i-i},
|
| 63 |
+
\end{equation}$$ where $\lambda_u$ and $\lambda_i$ are weights to balance the importance for each type of relation.
|
| 64 |
+
|
| 65 |
+
For the similarity function $f(u,i)$, one could use dot-product [@koren2009matrix], Euclidean distance [@hsieh2017collaborative] or parameterize it with neural network [@he2017neural]. Particularly in this paper, we propose to use the following neural modulated similarity model: $$\begin{equation}
|
| 66 |
+
f(u,i) = (\mathbf{m}_{ui} \odot \mathbf{p}_u)^\top \cdot (\mathbf{m}_{ui} \odot \mathbf{q}_{i}),
|
| 67 |
+
\end{equation}$$ where $\mathbf{p}_{u}, \mathbf{q}_{i} \in \mathbb{R}^{d}$ are user and item embeddings, and $\mathbf{m}_{ui} \in \mathbb{R}^{d}_{+}$ is a modulating vector for element-wise scaling, which is computed by $$\begin{equation}
|
| 68 |
+
\mathbf{m}_{ui} = \sigma \left(g\big(\mathbf{e}_i\| \mathbf{e}_u\| (\mathbf{e}_i \odot \mathbf{e}_u)\right),
|
| 69 |
+
\end{equation}$$ where $\mathbf{e}_u, \mathbf{e}_i\in \mathbb R^d$ are user and item latent factors, $\odot$ denotes Hadamard product, $\|$ denotes vector concatenation, and $g: \mathbb R^{3d} \rightarrow \mathbb R^d$ is a neural network. The key insight is that using a fixed user embedding may fail to represent one's diverse preference on distinct items in the loss function (items may also have multiple attributes that could attract a user), especially in our case where the objective incorporate more candidates of items. To mitigate this issue, our design could capture a user's varying preferences by allowing more flexible representations, which could improve the discrimination ability of the model, empirically verified by ablation studies.
|
| 70 |
+
|
| 71 |
+
The sampling approach mentioned in last subsection samples $S$ negative users/items and $P$ positive users/items for each target observed user-item pair. Suppose the batch size is $M$, there are additional $2M \times(S+P)$ (where $M\approx S$, $S\gg P$) users/items to be sampled for each iteration besides target user-item pairs, which is impractical when scaling the training. Alternatively, we adopt a sampling strategy that uses the positive users (resp. items) from other observed instances in the same mini-batch as the negative users (resp. items) for the target instance. Formally, $$\begin{equation}
|
| 72 |
+
\resizebox{.91\linewidth}{!}{$
|
| 73 |
+
\displaystyle
|
| 74 |
+
\mathcal{N}_{u_k}^- = \mathop{\cup}\limits_{m\in\{1,\cdots, M\}\backslash k} \mathcal{N}_{u_m}^+, \quad\mathcal{N}_{i_k}^- = \mathop{\cup}\limits_{m\in\{1,\cdots, M\}\backslash k} \mathcal{N}_{i_m}^+,
|
| 75 |
+
$}
|
| 76 |
+
\end{equation}$$ where $\mathcal{N}_{u_k}^-$ and $\mathcal{N}_{i_k}^-$ are multi-sets, which allow multiple appearances of the same item or user in the set. In this way, the negative instance number is enlarged from $S$ to $P\times (M-1)$ with lower sampling overhead (from $2M \times(S+P)$ to $2M \times P$). We also highlight that such sampling strategy is free of explicit negative sampling.
|
| 77 |
+
|
| 78 |
+
To further shed lights on HDCCF's hard negative mining capability and its relation with frequency-based sampling methods, we investigate on: for a specific positive user-item interaction $(u,i)$ in a mini-batch, 1) the number of times a negative item $i'$ (resp. $u'$) appears in user-item contrastive loss, denoted as $n_{i'}^{u-i}$ (resp. $n_{u'}^{u-i}$) and 2) the number of times a negative item $i'$ (resp. $u'$) appears in auxiliary contrastive losses, denoted as $n_{i'}^{i-i}$ (resp. $n_{u'}^{u-u}$).
|
| 79 |
+
|
| 80 |
+
::: proposition
|
| 81 |
+
**Proposition 1**. *The expectation of $n_{i'}^{u-i}$ is proportional to the number of times this item appears in interaction dataset $\mathcal{D}$, i.e., $n_{i'}^{u-i} \propto |\mathcal{U}_{i'}|$. This property holds true for user $u'$ in $\mathcal{L}_{u-i}$. Formally we have $$\begin{equation}
|
| 82 |
+
\begin{split}
|
| 83 |
+
\mathbb{E}_{(u,i)\sim p^o\atop i^+ \sim p^o_u}\left[n_{i'}^{u-i}\right] &= \frac{M-1}{|\mathcal D|-1} \cdot P\cdot |\mathcal{U}_{i'}|,\\
|
| 84 |
+
\mathbb{E}_{(u,i)\sim p^o\atop u^+ \sim p^o_i}\left[n_{u'}^{u-i}\right] &= \frac{M-1}{|\mathcal D|-1} \cdot P\cdot |\mathcal{I}_{u'}|,
|
| 85 |
+
\end{split}
|
| 86 |
+
\end{equation}$$ where the observed interaction $(u,i)$ is sampled from $\mathcal{D}$ with distribution $p^o(u,i)$, and the neighbored item $i^+$ is sampled from $\mathcal{I}_u$ with distribution $p^o_u(i^+)$.*
|
| 87 |
+
:::
|
| 88 |
+
|
| 89 |
+
::: proposition
|
| 90 |
+
**Proposition 2**. *The expectation of $n_{i'}^{i-i}$ is also proportional to $|\mathcal{I}_{u'}|$. This property holds true for user $u'$ in user-user contrastive loss $\mathcal{L}_{u-u}$. Formally we have $$\begin{equation}
|
| 91 |
+
\begin{split}
|
| 92 |
+
\mathbb{E}_{(u,i)\sim p^o\atop i^+ \sim p^o_u}\left[n_{i'}^{i-i}\right] &= \frac{M-1}{|\mathcal D|-1} \cdot P\cdot |\mathcal{U}_{i'}|,\\
|
| 93 |
+
\mathbb{E}_{(u,i)\sim p^o\atop u^+ \sim p^o_i}\left[n_{u'}^{u-u}\right] &= \frac{M-1}{|\mathcal D|-1} \cdot P\cdot |\mathcal{I}_{u'}|,
|
| 94 |
+
\end{split}
|
| 95 |
+
\end{equation}$$*
|
| 96 |
+
:::
|
| 97 |
+
|
| 98 |
+
The proof of these propositions is shown in the appendix. As an observation, both $\mathbb{E}[n_{i'}^{u-i}]$ and $\mathbb{E}[n_{i'}^{i-i}]$ are proportional to $|\mathcal{U}_{i'}|$, which indicates that such sampling strategy is essentially frequency-aware, which enforces the loss function to concentrate on more frequent (popular) items. Since popular items are treated as harder negative instances [@chen2017sampling; @wu2019noise], such sampling strategy implicitly agrees with the negative mining efficacy of HDCCF.
|
| 99 |
+
|
| 100 |
+
As mentioned before, a user-item pair $(u,i^-)$ that we regard as a negative instance is potentially a positive interaction (i.e., *false negative instance*). The existence of false negative instance could introduce bias in the training signals, and hence may cause sub-optimal results. Particularly, in our case, there are two types of false negative instances: 1) The user-item pair is an observed instance, i.e., $(u, i^-) \in \mathcal{D}$; 2) Though $(u, i^-)$ is unobserved, the interaction will occur once $i^-$ is exposed to $u$. In a similar spirit with [@robinson2021contrastive] that considers a simpler case (without the first type of false negatives and user-user/item-item losses) in the general contrastive learning setting, we propose to eliminate the effects of false negatives by first formulating the expected loss function and then devising unbiased versions of Eqn. [\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"} and Eqn. [\[eqn:u-u\]](#eqn:u-u){reference-type="eqref" reference="eqn:u-u"} without violating their hardness-aware properties.
|
| 101 |
+
|
| 102 |
+
Our analysis mainly focus on the user side for brevity, while the same also applies to the item side. Given an item $i$, suppose a negative user $u^-$ is drawn from $\mathcal{U}'_i$ with a *negative sampling distribution* $p_i(u^-)$, i.e., a uniform distribution. Drawing from this distribution may either yield a false negative instance or a real negative instance. Suppose their probabilities are $\omega_u^+$ and $\omega_u^-$ (i.e., $1-\omega_u^+$) respectively. To investigate on the formulation of expected loss function, we denote $p^+_i(u^-)$ (resp. $p^-_i(u^-)$) as the sampling distribution for false (resp. real) negative instance, which are unknown for us. The marginalization of the negative sampling distribution induces a decomposition form $p_i(u') = \omega_u^+ \cdot p_i^+(u') + \omega_u^- \cdot p_i^-(u')$. Reorganizing it yields the following expression for real negative sampling distribution $$\begin{equation}
|
| 103 |
+
\label{eqn:decompose}
|
| 104 |
+
p_i^-(u')=\left\{
|
| 105 |
+
\begin{aligned}
|
| 106 |
+
&0 &, (u',i)\in \mathcal{D}, \\
|
| 107 |
+
\frac{p_i(u')}{\omega_u^-} &- \frac{\omega_u^+ \cdot p_i^+(u')}{\omega_u^-} &, (u',i)\notin \mathcal{D}.
|
| 108 |
+
\end{aligned}
|
| 109 |
+
\right.
|
| 110 |
+
\end{equation}$$ Equipped with these notations, we can formulate the ideal optimization objective for $\mathcal{L}_{u-i}$ as $$\begin{equation}
|
| 111 |
+
\label{eqn:ideal}
|
| 112 |
+
\resizebox{.89\linewidth}{!}{$
|
| 113 |
+
\displaystyle
|
| 114 |
+
\begin{split}
|
| 115 |
+
&\mathcal{L}_{u-i}^{ideal} = -\mathop{\mathbb{E}}\limits_{(u,i)\sim p^o} \\
|
| 116 |
+
&\left[ \frac{e^{f(u,i)}}{e^{f(u,i)} + Q\mathop{\mathbb{E}}\limits_{u^- \sim p_i^-}[e^{f(u^-, i)}] + Q\mathop{\mathbb{E}}\limits_{i^- \sim p_u^-}[e^{f(u, i^-)}]} \right],
|
| 117 |
+
\end{split}
|
| 118 |
+
$}
|
| 119 |
+
\end{equation}$$ where $Q$ is constant to facilitate the analysis. By comparison between the original formulation of optimization objective in Eqn. [\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"} and the expected objective in Eqn. [\[eqn:ideal\]](#eqn:ideal){reference-type="eqref" reference="eqn:ideal"}, we immediately notice the bias essentially stems from the underlying negative sampling distribution. Eliminating the effects of false negatives boils down to approximating the ideal optimization objective using biased observations in datasets. This is challenging due to the existence of two types of false negatives as stated before, and the intractability of the real negative sampling distribution.
|
| 120 |
+
|
| 121 |
+
Toward eliminating the effects of false negatives, we proceed to modify both formulations of Eqn. [\[eqn-uiloss\]](#eqn-uiloss){reference-type="eqref" reference="eqn-uiloss"} and Eqn. [\[eqn:u-u\]](#eqn:u-u){reference-type="eqref" reference="eqn:u-u"} such that they agree with the ideal optimization objective. Specifically, the debiased user-item contrastive loss can be formulated as: $$\begin{equation}
|
| 122 |
+
\label{unbiased_ui}
|
| 123 |
+
\resizebox{.91\linewidth}{!}{$
|
| 124 |
+
\displaystyle
|
| 125 |
+
\begin{split}
|
| 126 |
+
&\tilde{\mathcal{L}}_{u-i} = \\
|
| 127 |
+
&-\sum_{(u,i)\in \mathcal{D}} \log \frac{e^{f(u,i)}}{\tilde{F}^u(\mathcal{N}_i^-, \mathcal{N}_i^+) + \tilde{F}^i(\mathcal{N}_u^-, \mathcal{N}_u^+) + e^{f(u,i)}},
|
| 128 |
+
\end{split}
|
| 129 |
+
$}
|
| 130 |
+
\end{equation}$$ where $\tilde{F}^u(\mathcal{N}_i^-, \mathcal{N}_i^+)$ and $\tilde{F}^i(\mathcal{N}_u^-, \mathcal{N}_u^+)$ are debiased negative scores for negative users and negative items respectively, and the former one is defined as: $$\begin{equation}
|
| 131 |
+
\resizebox{.93\linewidth}{!}{$
|
| 132 |
+
\displaystyle
|
| 133 |
+
\sum\limits_{u^-\in \mathcal{N}_i^-} \pi^u_0(u^-,i)\cdot e^{f(u^-, i)} - \sum\limits_{u^+\in \mathcal{N}_i^+ \cup \{u\}} \pi^u_1(u^+,i)\cdot e^{f(u^+, i)},
|
| 134 |
+
$}
|
| 135 |
+
\end{equation}$$ where $\pi^u_0(u^-,i), \pi^u_1(u^+,i) \in \mathbb{R}$ are constants w.r.t. $|\mathcal{N}_i^-|$, $|\mathcal{N}_i^+|$ and $\omega_u^+$. Their exact formulations will be given in the appendix. By replacing $u$ by $i$, we can get $\tilde{F}^i(u, i, \mathcal{N}_i^-, \mathcal{N}_i^+)$ in the same way.
|
| 136 |
+
|
| 137 |
+
::: theorem
|
| 138 |
+
**Theorem 1**. *Equation. [\[unbiased_ui\]](#unbiased_ui){reference-type="eqref" reference="unbiased_ui"} is an unbiased estimation of the ideal user-item contrastive loss where negative instances are drawn from the real negative distributions $p_i^-$ and $p_u^-$.*
|
| 139 |
+
:::
|
| 140 |
+
|
| 141 |
+
The proof is shown in appendix. To prevent negative values in the logarithm, we can constrain the negative scores to be greater than its theoretical lower bound in practice $$\begin{equation}
|
| 142 |
+
\begin{split}
|
| 143 |
+
\tilde{F}^u(\mathcal{N}_i^-, \mathcal{N}_i^+) &\gets \max \big\{\tilde{F}^u(\mathcal{N}_i^-, \mathcal{N}_i^+),|\mathcal{N}_i^-|e^{1/\tau}\big\},\\
|
| 144 |
+
\tilde{F}^i(\mathcal{N}_u^-, \mathcal{N}_u^+) &\gets \max\big\{\tilde{F}^i(\mathcal{N}_u^-, \mathcal{N}_u^+),|\mathcal{N}_u^-|e^{1/\tau}\big\}.
|
| 145 |
+
\end{split}
|
| 146 |
+
\end{equation}$$
|
| 147 |
+
|
| 148 |
+
In the similar spirit, we develop unbiased formulations for user-user and item-item contrastive losses $\mathcal{L}_{u-u}$ and $\mathcal{L}_{i-i}$ in the following forms $$\begin{equation}
|
| 149 |
+
\label{unbiased_uu}
|
| 150 |
+
\resizebox{.91\linewidth}{!}{$
|
| 151 |
+
\displaystyle
|
| 152 |
+
\begin{split}
|
| 153 |
+
&\tilde{\mathcal{L}}_{u-u} = -\\
|
| 154 |
+
&\sum_{(u,i)\in \mathcal{D}}\sum_{u^+ \in \mathcal{N}_i^+}
|
| 155 |
+
\log \frac{e^{f(u,u^+)}}{e^{f(u,u^+)} + \sum\limits_{u^- \in \mathcal{N}_i^-}\pi^{u-u}(u,u^-)\cdot e^{f(u,u^-)}},\\
|
| 156 |
+
\end{split}
|
| 157 |
+
$}
|
| 158 |
+
\end{equation}$$ $\mathcal{D}^{u-u}$ is a set of observed neighbored users. By replacing $u$ by $i$, we can get $\Tilde{\mathcal{L}}_{i-i}$ in the same way.
|
| 159 |
+
|
| 160 |
+
::: table*
|
| 161 |
+
:::
|
2204.14017/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-04-07T12:10:52.676Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36" version="16.6.1" etag="uuu_yRRBiuVWwPYSdWYI" type="device"><diagram id="mxbtxpobgWqMKQT8Yk87">pLzXkq1MkyX4NP9tGRr2JVprzR1aa83TD5zz/V1V09XWYzZpljt3xoYIcNx9reUemf+C6f7il3iq1DHLu39BQHb9C2b+BUE4Abyv38D9dwCB4b8D5VJnf4fA/xyw6yf/Z/Cf88q9zvL1vx24jWO31dN/H0zHYcjT7b+Nxcsynv/9sGLs/vuqU1zm/9uAncbd/z7q19lW/R0lIPw/x4W8Lqt/rwxiv7+f9PG/D/7nTtYqzsbzvwzB7L9gehnH7e+7/qLz7rPdv+3y9zzu//Dp/7qwJR+2/y8nQH9POOJu/+fe/rmu7f73zZbLuE//HJYvW379TyaOk38fDvzvlwD+rxt7HSIf+3xb7veQf08E/f7j98P+8+sfK/7jGxj8HzgAEH/Hzv+0NgL/s1L1XywN4f8Mxv884fJ/LfefRnjf/GOH/9km8P/dJq9Jhiz/jgf+BVNnVW+5PcXp9+n5evw7Vm39Oz8Dvm/XbRnbnB67cXlHhnF4D6P+jwb9r4aD/mfD/VcrAMh/wNDv9z+YAib+AyX+fe3/f+yB/N/t8brx9L2t+z+RQ333Vb/xosRJ3hnjWm/1OLyfJ+O2jf17QPd9QMVpW/4x5b+N8y8ILv58/Zc5yK4uv3O38bNrvE5/I7qor+8BUH+WJP89Cvx75H2fxVv8L5j8+yvETUP5L4iuPUq3TkDmy5F8vzTbrVi3fN8J3wuF0GT4jaMRk9jfG4rPKMdlSVLhjc9SV0WZ7zBNNTYnae/nmPSeWIrvoPHNSJPY+5o574s+fVNi/fvCwXS6dyzxZ0bYkhzXFVCzdMnahHyRVO5vTjKqbSBjSYMg34XO1xjcqEBDe75TW5VksZyba8tWmHkqeBmKP+mzQeAB1fGWbe56g/HzWu0U1aREb9Eu2rZEqdRG0damSU6lCZ+kC1KUbJsyafoS3vkZkkHUS9lp+TR9RqGOZO0DaV2c2e87fE92+Ekv9A0ILs/URxugbuHTkid+G//L/gVRuKq/Pzj4n+9toIegkor3k2L+/AviHiktc3EoX4fi0Fw86m/wUmHFlC4TI6p3CaMG92ATj6Q7rEYHhfQ4LCAtdruDBFz8DieOfgGGJc6e40i1lsGRhkNwEVCYfFmkUCpsRnqP20uHdc5ITfONlPHEszWLv/mioOAAXRBuCsz3wvzU2cNrfd9dcBhsWsO8tyXjvP081haMhg81OEwW7Ttf/BPMk9gmBucPjytsZZpxHqyn7VKo55FCuZCBb121ZOoAQGhJUSkInE+EJ8r4piN0hkN8tSAlSYwS8n+co/hUVumle/W8bz9w0cT01qkijhyhui/eTm3Esu2JepzvtHZHKIZX+dF7rUFhTXDZCynZtKl8au/HJWQfkTW8H87Ir83DoYyAYH4KoMS7gZ9YhddJUB+M0ywPtPkpJ74KgGMHpcIOIt9uOqOEeWsmV+duuAHAyjvTO20z7DqkZ7evR7ocTbDK3oqKErT4WaRJv/Ugd1ZlATjY4HEvDaMvAwXWsM338pLxbbVAfT/hUG6LMrzRYRjj3Lq8sVYbNjKlWn3nwbCf7zVJPk1zdxQFdAOxAZqD4TcXdDWzEdl85BgqtsrbYZ1aWA1yogDxoXUEcfFi+E7Rfh7VNDdjYvZ73VTG3p+rwMVxxhTu0mQTVXA0SeKNRIUSAuf923s2r37MDi1+Fw7GrAnkqHXr+gSreNng5xOP9/ssP9xaFtRK+Hlf4heHEivsdGvDMoKgef04HOgoVxynYOwdpPKmcmwAW10ObHGwJ0gnwYAbAUOBJ+RlJV/GDG8nrtU8rj9vJnLNokk0WcqMnZG8Jl2n6uYQ9EhY6jxGKS+Nwdsrh8LKkZAsA3bp3XEkShh0DI19sxOqL53JOFtEnuVf6s7Mmsx5xd3Zhm24dgEkvMpM1ibrMHFSnEaJOxo2yx3XBKMGouA7m+rLU4kV6TuTiMmcT4KuMT7LOR25/NoB7SNbXfuE3SbvMMHbre8wFtF6r1kX0DLF0LeZNuaogyiLJuwMEqhcMAr8KahAtMOoRNL8VufCgmiCYdnJ/qkJOU97VaCVsGj0WgKapExHxJH7QmzvitNEZ9M8RI8iScJWGbwBjglSIIB8oT3OcpKFH6sOfanlQpQwaXy4r8bvyX9nj/v4UwqPU1O1gjil3F1qGfWFA/op6KsJBo8nl6eBNjRkuzj0F77Zc8EC9L394Apb+7HQ/cU5zjzbz5f2Vb+aH/JCXs2+Hs/hZ0EhczFuqUKmqYNiivsdzAZF6ZrQ9b6XZ6cfdwakAgZkCorHdtKfesGzK6afeDK6VhKgq6AUU3HjZIborAVAsW9uJ8LQgpcbp4QMzyxPybVCrhOzvlzaQbYb+RH6qIwl3Iq2QenfM3g8w150oIyTCQyLYHvx0fB7oZBj2rHE5UFR6w5q9d5DToxHNaPL2NQ40YiPSyPszXB0qvU8BMD+bSdIs7/sOD0oUNRoOJ6PFxRMm8EI54AwLupfWCUX00fdyS4zJdahM14KvbJ+2fJibNVhtkEvtHLCqAgQjOfKdYfRLHGqpO358GYy6l5o87lNFGzCaZXRq494BEq1Y7IZqyJ85LQYqFMCbnstSRH+6YtfcHe6YJczTJ9fziHrBeA9S/XWNJKvRyeGoIz0qcq3myLRVT9TpEhCLgMhjCfxbC8TocDFJIJTzeSjnoZ605p39VGVDObF5IDNmPkgi/AcHcuQN5RJI+Ta6FkOJ7cN5l7aCJEa1cSp42iMhTv43jj/psji6RkLyoI7/vKUzcR2+0Zl73YvwecwmPOtqUZT57QnWt425NHF+8rOlUXHpHSuYSOscg20+nwRmGpS8V5UOiVO0C23WCBwrKb416cgHkJ258v9Q/FhLOmsx4FYvNYDNDALEsC48YDL6qXGjy/N7qOemZb6M2UtjjiKuY9Agjxqibe/CZcSSgRFbnr5mXDj8Yl8yGGdyGPoVn07gQAkwL8TZ5apzspcW1vsrhy7C5+ER1GJapck1xNaW+VcBeycxnqXyN1Q4BwBcauCVqFRZ4pro4k23NWGi6ZxUTq/jHmhwvjTWvMS4/EBcewzYEnwRGeuPmxHBtEB5j80OMr20c/cdA7Z5vx6mEp/rAzmDWwuzI+sID9XnpOrIZ824UA/cXowtNuXj1Eduy+8PtPHJNBSnC+yJPG/dhEtBTu5tFWQkY5jjiaQZxlWjB5OD53qh/zYCRcWkjvoTnXrerPqRX9BQnu6/DlBqWXIJ5RO/Giv7w+4JfhSkk3IKPUxVGcUHaQxmAAlGbOTR83RG8HHR5JrYQx2jswh4fRy8NvA7Ibwtwquq6bjxc0U+TSJGqqEoqvjb6oCUKWpEB5b0ufSScWb4ksl7uv8lBW28m60fM9cbjlAcILk1hMa5SPFNEEdYL1Y/c1Gbt6C5eD5mRxqpygKoi0g4JI7aIKJV6+dw+/Da/6b1ml1eo4Wm1ZbAmmY87yp0eZ65Ieob0qBlKHO2OU9DpmT9xzuzXgJD46zT+OcFH9YO81bC/jcT5vlFtGrxyxKggHwkQkv9k58uxwEq6XLENLf8xebCSF66khs8lhsBGlshPgWg5h9wnaw1JAwGFm5jyhDvNMPhbwTnyu6vogpwbOAgozLN2qIvhgrndh1aHlE0l++kNZ9Q5YKWTv1qCb00ZYauL+IJownmYi8QK5ON4cP9mLn7MLmR0UPSz6+DN3PyBmtOa2esAqpoC44MDM1kR5a93fFnGd0xECbN4XKSgDKkxipPmk3LqOtCNz4p4NKSzwGKKU95uGX1mafWXnGowcIxl1nH1+vJ71KfO97iDzZXekWwi+FoPiYXi8NZahNBlPGAA4DHYSx4aZEkqmI/n/lkJGVNMzbRQ/6aVbClg1VJWy01JyXkeRVxdRUXM7UT6mFAOYxnmFEoULcmB+zLg1eQsWkzaUhM86C+TkIr3F+xSsoQpglOL3xRpkc6MhWPbQGVjMJQMZa4MCmsZHCkK5/SHJ2FdnLj+nXy3+ssqQbNvLepOFhHquRf8Ak5RseirIeuFAqF4edzaTIFlcUVc7p9/SLERNmDY3t9xcA0Bpl73r+V7mKb8d245ut73Z/Mo/IgaL0k7WMsdVUayBnOC1tdCxUR6VLPnefLi+d/pMAU43R4M+fRbP0DYybnZ/dvCeTIY3fjuZIXE8R0EOphjIKTzL0zq0SadtFv82Wxc1bS1ZaJNyBlFDWI7fsyy3HNnKPPw6xV2HAJqDhg+YPqrW6Ae4pOZJRS6Nza330ION5FNifTr4J8pYsoOPU271b4CF7JEcCscDWjTxTkGjWQtyaiE5aVkGVldbfldoZrNI3u0Ere38E48zuK36U2XQwN8TaGV+hYAx/97HzvQx2D9vgmj1zREDUV5VwHz1CsaMj2PVkvLl7GRjGm8ep5TNSuXQ5tEhxLWH6hTl10aV2m+BPLpJ0JeJl5V7KCZnqWouDa5WboYlaAIt+/VPMstx6hOXEtg0FY3uyWwT5SxEhM+Z/Nb4wly4FnJ7ENdXI1s2Bihw4ZBN6K3SeJwGpV5+GJ/2SR9DgY2gKu+5WvAZ7F+y7jm+XwAQI+QY//Qiam0b+oWvsPvE6FIc6OHXgdgVn36SO12iG4CurIK+k2YZtibMg4gMhXT6TqxLMmct8ZZdeOu1PMAJ0QTHSY266zvUhzLBqCu/Gi0L0cAHXGlnBjTbBw8Yj/q5Y81v55rBaQzFWpClEmF9jkaR/HR1tQhrR+D5J+FsYYdLmrycSc3UotHzTtt5LECk5Dy1K0i+608hxz7CF3vaVgcfr6tjGumEjRCcwfyWFRO5OMBL3PUVb4jORHWaE9xiLpYzgOOawUvMZhyt7RXKVGVWIpRiIL5d29qX+hvbNNQEMXk/v5I7nprkQJDCUVKj3C7rThcmK8Zo/JFRwZRbImb2C0McLWdKUbh1YsLKv3k5UN6oYCp676UZZUplfF6a4YWSq5QSlxFZIgDtmGr2CPr1o0tp527w9aWhf3qOUehHuv+5S9lG0ROJxifm0WJXoWNF61u/h9d6X3OLebm+RwpRrtvtIF0YzNgN6koB22auSmUoh5CojzFn0mdtmtj5YSSWfT0eLJJagIlsn8DQ21UmzobnqDv1Z0869Mzw5pZhH4XE8LLIHUwWN8hbRVHLqi3u9fOkQV3ViqTRgdbrezgTzwNDzJN0uqc7As5gkAoT7izGu1um2P79aDijKtC4lsALfu9PJ9ZHv3yUMvdZzSRVh8bC6hRUNoV+r6oKqjeMB7uD0HfqqTMo1EzLhLZbHQmnMFhMAP+1lsE872PZJXDLvdb40weIXevnIXN1UlZVwEsANhdCrVwSzVvpEpbmVpg26bsgGpxqmnzWOXXEhwpgVP9MERR8UFzRGZd81JYktAwShjl9LxIAhxsmnTEtz6XVvbtx1b8rMY0ZMepNp6dkTaJ4QElYb4tHLYkG2v/g/4HJeoUL6qzBZdGLHwD1KL9U4XynIvEJyCc8w9CSmC5yWV+CSOOQLvK+oEPo7pzerHRzlByd+FZg1bNer9jGSCAVOpd0gHt5g65RKU5tBJa/a5RzvUzxPyUbxAMPWvjq4AOPIAICiK/z9HkTlaXTZatbEAdK6z9oyA69hBjVQo5g7cuqNY0zMdeRPDYc3d82scv5j5+EreAFu65vbWySgBhhRfuGJ02SVvL3RUUe9lFCv+u2z6lCTMDFXdSUn7E+06GW6HV6kuB00s7cEdsiL1HasgEV8Zno/O3BbKEN/LneN9inGckJd4RmNZhb6Ta98xbGuYrh+EMH+JXMMQokCspX2IwZeJrnAcZ6CXsLuqF+cPbC1nOluBY/yCVjrm0KTgKNQ7eM1clrUtSAtGiVb9hqXFn9I8uCUv0cdW35iGAH26UZoXgs70fOx4ZpNNv0lCSNUBqdaT9RRhaDVtsU4NlK11l/1Y4CiN0msYg+Bj+BTXF9RYHWLcW1o6blFrg08p3bhrXovfnN8MnOOX9D9MrElI9b+bFyEySb6gRHnLxJiv3hGmxETtK5ATyU3HGgWpj4uW5tnjiqiBfqxu6rvdUnVUVgMMcIJdW4gq5VBL/zJhrRv0WXDsfVCmnuGhqyIqG6uXx2NrSM2THO3+O3ELgtOUY3wqHzvhZ/+BoivypIHl9K8XpXVQoNJe/HpXCMtyFdYUvvp/DYqGxg9vmal8OXtlt8Zi+HVI/IiSFYQeuV0YV8d4NChUGW67TRQhhySlT0Y9tU0EB5QZSoSjAVEaU9v1WmzKm7ezNCrlcArn2H4iTUXypBbk9CoCWNCOLT1BnrF7Kf7XiVif2Z0FkHtzkR2mG2Ddvl0sOTlyxwr07tmsKcfpA+GBIdD2ksjT8wiUp/H3rcy+7+k4WWCcQ7VlAwUJ5s+jwrJa9ugKcAr5smG0NDzVTD5HcUzXf6wAOc0SGkDRIvwr25Gbx9/O6q5OmJE2OFfKhvFxgra7wmY5486paKHS3MghQtj56WlmakrSnNQ1bQGDMuMvfiTZ0u+YRoxs+HcAJB6dK9Nu9XUgVVLoVXEVNQrh1v/ToT6fv5o9wJjdHwpq2FvndlJfhC+mN4rSrwVBhVh178EH9eiX63qPJ6bZL8X4LpTQYOb9RyEUrwcTdelGSUyPSl1sNUdO7H6bFWd3pXwhyOjT/L3Aunuoz29t4EIgmI0+uPC5wnv/kkZjIRwo1FtWLcQqjefPw7Aw5TEVt0MktsvmPkIhnMlJRSQHBxsT68DjOyzCvEyMXAwXRhuRoE6cVfSOIuY5TwGTHlmLLpF5t1VvuBYSeI9YTVT3dsF3v/saWrVl/9KR7b/2CJACGxhsEhu/7C333whfL6p+ycyTrwU+LIhJy9to0O6Mzfy0X6KI31peYuWBgnJ5ov4wk4Ijae4wVfkPK8m3hjEFYuAHErHgxHO7XsqYrb9MU7Y22Ikz43FVKy0iddASPVFA095uk5k7E9kKb1VcCS3KkNeZP6WKAZ0JR/6jEjaV7yVlxHiK9B08eEjGTmY7c/+bpOGY0M4HRfuAyZ5kPE0cnCM7t1+eSC2TvxNyMKAulVYYQ181+7AHzukI9v5mEB/LyYdOavDFJraTV4rbZ1EYZ1hfBELZXpn+wpXU/lOMIM7MK9oO3cb33CH/K2sGtwKC687NxPshiodf4lbhy2juUGA2BhhxNb3/lWDpDmqIqH8EqPwLBMz2Ck9Oafykh3PUZoGhL3zww1QOX0AY28YdbjovFPS9rUmoX9gBmc0OlIqxg0WaQGpsClZg2EMTEMMVZaBCWYuZzjQb1XqoznBCnf7wdR+V0SIztc3Osp+PxSVeZPwKo3ap+f6x/85B6V/lRXpCUutdBs1JtBjtO+FFiOve4C1HSGYP3OOLe5tYdTJWJcv0dmza0ygNkT0e2Vw3OBmV98R2dVrNF6if+0fvnn2zmuOEexPgleStVec81VlTRUlna81UD093SbImv8OzH2j3zy6lzz2R9qz+QZ+xbhN5QAIeZWJn+tP6ZdXFBHs7PHRBUPKS8MoEYsSX+urlyDG/FRH7+v9FVTGiI91xj+/9bRdhq7Z6mMfnMstBjIz31l4Q/KfiHepLknTKbgLwhbwvZRAWvSmyPCC82RnG9g/uuqCs5xza2auWbzGANkG5cM5j6uhghlkUD3aLYnBMb43hTEm4MeYqH7Qe+PJQ7R8WRd91fQak434ZtbEwxj7RAn/5fUlnNZSaWXjm58QIh9H4LwD+VPgGFtt8KdhFQBpfe4JEDg4+eaC8og6RMQYd3P9+lLEFaea9sMDGtnLFwIbwOt6ChF7UaI4+TwWlsL2ZR49WiBwlCYLBX3JeOrM2k0FL6lPmQLMP7r2KwKQ4ajnxV2cCj6oJfzypK+4YIQi7xBP2Iizq8A4lKBnCanSShAg+3DN2vvcv2L15d/YzcPjq46j0O/k/qrNGyh9d5OVTqrQx74z3chS6WcLRXycvSECNwYoqgDIuhHA6SzI6BrqV1/PYc8C9vwjGtAcI0qD+1817huGz0VpZk6BJ0kpv+wwIiJmhSUb4b2+5GHjR/IjgUqp8cy/hBaXBSWZ2+pquaJHF7P7OE+IuOtKNLaUyLi2Wqh262SunDSBvNSUvPE4HfMXp9TzKV4r9G/37x7tIZtil2ujNZ/WdOr5HGkPnDm8X9ylUjFgH6o5AIPBgso5J3S79M1J9FooGJnuYdWjBhYpcsxM3eqzhS3HP/ErimpbSGqThZy+0p2sDOYFxesinAPGRus+2lyTa2hji4pw2ps447cBNTUPx9XLmmoOCStaruAeqc3AZgnQg90U9ybHcMEn1yVLmq1XimXngeDm1semraA8rd6QKXn58eIhL/tjLN92ooseOR2Pod9WhXp3x/ovI+Eiiwgj9OmX0VFGPdq75oqVuocMfuhbzv9Gf3k5YjdwY+JPc0T/2o8ieKJldZJ0rw5GMbad99Ed1MdMEvnL6R0g0oTtlnzYgbPVuNkt+brROxlfCTuFb3yILy4hxuXDi4PPLP0SGgqUe1tXTvnDhOf00Lx+teWhlHCYP+WfSLwyMtwNE5ZfCGt4gd2qGnk1PBqWrND3kr1twOOgg5CMXahz8WKdYTufGFcbMTkznW7EqmpqIYf4b9j8pR/xFqX8CV9cPk/XZDi41Hxp5XRS+Npp+6FUv1z5SVe75PYMzW08dZ40jxBKKPXG/APf8qWWcHf7I2hXR4nFauhbH+k1m2r8bto/cZzZ0xqnXraPRrljvHlQHI8Nh3bsLglI9TQvWakcrrx9T3mwsxj01wOHucf7lpUzN4hi1ImEyJasZrp344xi6TGH+WUQDbZsx9W2r8OzTi15GoIh+rik0eGp80mhfI3Q3V3C/GfS57Mr/crRa62Mxd0nP61oXNuZcBxiGK+CiYzhEZQK2iiXH8BHPeYmJocnOITpA88mH8iDG/wCP2TgLEltXBHdrf0cryeDlpArvlrsj08oTdTCm+3N036kO7IA/sRJ29GwU+NsjlerCq/2qdwVk+Z21q2ZJ5DQzI+Ds2NyN9X0F3qRDnnRsip4kWaqE6FYjbEgevd40aRZism9Pkw5DdPYkF2HR8bNFVvDZvHL0DDAmPE3T2PW/QtYsR3CUda0I7kIKYw3wd0uHMGhiLltkfRUL2FAwjMByHoBQnukLOYyaQwHiMwU99iVW9+myphGTHT5L1U7DVXa3Wp0XFhuL6APSNf22uI1yxQWgYn5u4TRFGxyXSlbgkz72HW+5AhNSBrO5F/zZoXMVG2eIlQEym3yvSSARYmho48H6JqjHIK/+YjMaoT4aHy5WQUhexcCfCUFhv+6vufXb6A7BQa8rIT3Nkp2CHzZv/In39DMPbfx+gjbeTcsBpqyUM1LFVsaa3CzYbJDxXeEHlZfUwe44TS6Vd9FlbRhTEz8kYPK1RPwka7dKCiuMyDt0w1XAo4eAC3N5cRng3nj3j3dx81QBZXInLdvy4lnQwQ1GSQXYeuEbJNU3Nns4zrz2t6LaqKpJEAGrLGihRhCFu9xFoS7L+rIqi6elTkD6gf86pul6g9UZTk7x9ghVZUUG3dIvtrKxw9pKSs+csEjP02Cz+BjP9kDEMfgBmyj6vmffky1HAdsXkGxzwohhnBALp91C5LfDsys7raTzQtDYoiUNN0dcbRj8zNdWPbUnYRG33zg2gu1DiyMjQb28llUrzzIWPovYyERJ1adBDY5QXiy/wlDFNdWPTfCwqqJovp6zWeRmdZPAYn1Q3L6WA1Bt1PFVqzL3Rxhbd0+mTDukSHkMaJKIZ3hKyA/kUkP569exQA3f46Zw5xSlMdHV8GT1VAP1HZXa34+43/uwLJDOZZyhRDp4CUsK8W4zFcf+6LzH4yEyzL0eYM1oLbEtAOOggQo+8v+4tzzZ37nB2c1Tj049j9U6mtEQEa76l93hWt9D/NAnhliHT5KQiD9u3LhdSA93W6t42i7impfeQaawccTJHWFxpY6r1T/id9OAjjpHuOSGAAvc0WKsDHpu/IhYE2Jr+Xrh+NoW5zwkRE8iWT6evL8StTAWIomIdvcMIoQYDRMzj9+xgN8XsNCt8z+HhqAJx17PzW2/+TYSdQcE9JMS0AmoL1AG/NUa7IHOm0n/jei3rW+Qg39wCikwBPPAgQOzmhO2/kWmMECYhCNugeu5lbNyUzi5i/h5Or3itK/Z0NBSOxNswiVyQ1DFUy86n31LnKUeC8ugVPzXSFQURxxVUTu6KQf5NLj4bKLBzKktcq1QGlvMPnVK6rEbz/zXFchr4X2FzvGWjB7EwimmfNQe/PcYMaAQCeIihVwmoYgP0N8B7B63061CPjiLb3CohzAK7C+vISb6iP6j/sBmMJMAfJpxC1Mf9qLX23/vOKw3LZ454kb1MuKl9VDysi8fxRvafZ9Ws9GS682gJR8JiXi/tyw+SXJqzl0+lnilSJk4lVEgEkKTIVRyWfLgW9CXY5WErFUhrQpkSK5OpKmMt1kjePCOlzZeg8XvrDTQXDoF76urQ18lnWRp5BSPcAXVpK4VeJV/feSe1CV8nypdGohIEhvvpL4MqPIkq+LcBLRjJM8f6EJvTVS8eUH8v1WQYfrWqC7o5X21cNeDzRC+UPFzG+fEEgMMBufL4VSzNFdLQl+VpeiqV+4RJ2ZVTtK1OrJGKBMj/6Labbpf5ZDrgQ49SaR6kOyhSo8wGhWULJfCdJFllfEssHkx3TMwKtuDMTDGEVB8GN4LgaXqTMXOSDVsURP9K+CeS67ATCxlZ+gHsY7A3A+CkQV8DVVrSC9CPgufL2WSb/m3u8T50k3JZekd9Y2e6qWxZ+qjwWBJw53Usn4rz8a/Rf8U52foUP0rgiCjQQkqjeF8tNwwa+9LT9cUZ8oji8E/EolfJ68v/lHEU8A4qWKjNqU4KIjaZtnp7jnR5sfj8GPHOxRfuXDSXbQ4I3ZY+zHVbmp1tcF38c9sAcSR7BTIp5YdyPpI25OZ9pGI6JX6svnZW6TJo+cP1cyxwAFDTtcNa1m23o3zsW+eyOKIk/3sbKxvyNf/hx8Ya3mIRxeDi5e3cRr614BQm272cShAHc75xU07R71Kyi2J6QkPzlu6g6QEJ91FdPrXox8iqpU4C6vujGTafi+hd+tC+KksaqOn1OJpbnGN814odM451s03Fp7OIwItGZPiSb/CObH7EIvXD3fi7B8CXkuGX1fowhIlGzK5HhTkVDKjBLDnsVbhbVn/qok4hUeIvNso7oNgQp3YbnmtAspLYKcKBkAftUA7iiZdDlwN/3IliJHm3HLRBeZv16RrgZVwbETh2lg2s/MXSHB4c7wGUexCvUIVyZxmBWlNSh01b5wmSXO8k+ba+HBs/YjgdLpfqKfdXNxmXXk6cRtlqny1UamUJNANHjGeF0KhpfE5bFtS2Q6N6/q2ZiQcF4dZieFQINMcsG9pfNyq7qK4uE4yuvPT9M2sfTVnjaUr91QhWPEjuIA4tQe1ZlcLwtURJHMDsvX/8iHRavmvmEcsgP7xb0Cak27Gug1YsfYMwS+PZm5yKnslJLjmxYnFUTpV0S9FHrD5TbH6Jgte5/CBIWlEn9DNED2hNSiQy+x6r2PZIrnoorkK/yuC0H5zVMQczPRl7TiQHMJEE1Mn3jQb/LHm4xSw0eTLwKyq5GVKp7+Mmb3ZVhKqVQ6jSC+CLj0MFriJGe+/Fs1Dp58LZ8Ir96rJRYxNmveVOnWoF4zGx64YrSEI8eN0SxmNtucMMgvI4Wqp0CXCl1W1lgDIhA2aJHadEFroxpG923th/iV6vytYUYYkJfJaqPT7FZt2fFW9RcWpC5PzSt7FgzpjRUrnPzzKH5dpDPefDmXZ8ZuyoUzzZ5z72HlxJnD6M8AjaLXT1+RZb8a11eIkozla85kKL59ldNf6LIyG5zHDklBLeO0sLa3lv9oiTeeYdI1Zu9vZ9mnCIqeZ/pO1Hyl5js+uY91TZQ6FxUigNMAYH51hPZFmAB8vjk+MJ96Kyr3llLrJZmSXk+sDKRkWi+mybjk6z9iLnAQbN3t3ZX79nMDtXlUVdld10OC11WplqTzNGWu9dBt0JUH1Uw8++wBm9fyC/K0zKhQSezlJbJrL5ppODp5gjChIzp1kcw5WcaKRiBdXlcME+oVuYcf0rHrFZgW4lhcTkRS/IbhqKml6CIh5ZoD8HVXLaJvr26rnC/yQ3aAljssGYLOthvs3euTil1tcqWZoxGzPo4vVum52OyvBIPCML4BeMrWlWSgjzUayI81g5Q8vvo3opOz9LA3IOwa4+il3vUOvwbyOraGVVu3EYFp2bcbekNfn5K6CQLl/Gi4en7XSa6TFM6WfdHtdY8fiO1xH/IxzJg7yOHjgWEOn/vizZAZp6rxs2Oy47kA2KU+4uPG6DC6g9yRNGU7DQqRbE8a0qTNHO6NYou9NnoAErq8oyhednkbkLQdd9MQ6xRfVKeinzYWceXsbr094vcR0J9D0r/e6ApralgCNuOFEXO1C9bVq9ez2ge9J0yWhrj1CuU7KGvqkrDeVxnhF3l1P6yiY5qVR3pk8q3rybsjyforxkt3KMFZENAThhMW3GQvA6DiPyT/0soqYdVTd4WNFZ8GxXpJxCnEdTYCGRf4TGPtY3aVdRZajKPuBgdJuKoNOa0RPyqFUw1n3hk5Zgz8xHmRaKG5gadfLew9Qug0DpPYDEwHgMkv+CO/Yrsof6R9ZQGnEXQh1phVFo2hhAL6OiOJ8iCGcQQS87A8/C0u0rHVqEq3rlwFPJVJ3a1qh2DOToNWOSf6ogJHaC9Vp5xPvz1nmRwadRjl/kugfTS54seKt8a8xMHzoRd98hSX6xQnY4714QPPIb75s/3vUy4094AE9S0LGx2hkcVZDEcZbJTzp45lTgj4HAXzHYBjzbeeZX130w86i11pnfTkuizWTc96/gOjnln1m/SmJ7vsq+nv8rDy/dfBtPzUZyG2Ehq1jS4FV6MljoV4mejA8U0EQPbjOZQw/mpVSntkYyvOM3HLwJqSRSrvXNTBg1X+Mu/zK4+rjgATOcYibBWeILqsBjrgZCo8Aw1RCk88U1Lw0Tp5jiUOUtlplHizSedoOu+FGx8GEcAgmtrCKR90nwJ7ojB+eh1osgj9LiSQv/08fLEcp/mnJffqlabYj6lA8nk5yusTNuk0IDL4RZB9jPhXemoX7fau96GvmSLHcEhSE4gJDHBTFKy5PyaJCvFRACb09wrc+FiwTRDd1xf2O7WgcfpobV3g/QYK1995OBOjgrMxGB91NYZSuQ/cez314zfr74t0RMSKP3vhMXQrIgqUPHSy1aSL54Ofi5uwAa9qPU0XI4u9DF+waCqaNa9zKuYEg4/wcrsgML7dzKeLnOvXUugRuaQACc61vDkUVhpCJ1WZC2J1+9Lnumod02AJKPkaHqN/F9qkU1thA+PN2VUGIJHNOAeMmsaUI5WbkxYNkbNOU40Lamb7jPBWwfaMpT9VPxiLDSqz1AiWRI1Nvfavz0frjHTbbT4SIQalAvJe2+ezykR6CaAXbYplQriHEihq6BgAAZ+HCUPNX+3b1K6n43j7TDH3ZigCozlIxHyF7SpMkQ1PnGmtDqNFSj2J1vSfe9nRQY1T5PBV5ni9QoL9w8jpK2GlZ/ytF0zU9LcTOKsfR+zSGKGlmk5PDG6o7YdaLcUjpsWzhzn5uOeX6BDpVhM7AKWeKlUTGzqbR62xOuLgUIn6tZqIBEOG5YEPosNALwe5MPryqfRo6AkVJL8gcQUM9JEPPeDzqAhIsFnvBfFFq9XaE/501+zkpVvvfU/9DXVuVUDddiGQbkxcUGhmZc/h2/lIbiKVlgkOifMzCaFdGdkRYuvhzX0C9nsQJCK4uh356iCjk7/AHtFqF5Y41E2rTiz3g/NCL8kXNSjdANpBWGMwZWLRnede+p3dFyjfps1rgbxQcD5dQaaHI4rVSeSIwocoN5WhhKyjRemAPb8cccWgFjEzf8ypEcmgDptAnrTHn2AOO6RKPidDxsiawFSh+XyYTHZ2xV0rHMoht2XmL9mdzFCaeD2k+lTX6rsoWZ9bkq/593v5khbCzuDOWpcJjKsCggQyboGAO4DcFVpaZb6/ktiaaSzVEiS6nfjH+GYju5GIgnho03fbSrePtl94DxTIZUmKwPjU8Dxz/r4a19yet+NrDgNmkhKLTS8YTIgtI6upzlchZcqUHXjXoNuG6nyb5LKbtaejrssXJXXXUl7d+qoMb4LIPrOSHKUrG6eYvqHpX6KeyoZUQrhBK+7vqltBawSX8rFkXfUAZw2KKFJ5mel+26Ge9A4AWFTPbSCUQfF3ml0DGvB2x95rT9DAc4dYHDqoT31jfzb7nX9E7I3xOr3+XDePz0aI2J+vO+0aEvbypbolPFYUul7C6NPt6kCMK76yjFX2YLF6fmCH3D8xIsMxFwtjIo8S1D4fS3jpRcCwlr5VnqWG80u8YNYjERA6VC7nDacZ1aEgS1w7Ll68aqdOlOVnzatH9fHPP4ec9ILKyz/h0UONSukP8dBlAoZ/9vYRvIpG2vnqAYqRgXqrHNjOajnj7/ALkmYooRaEq85h45KIY/0gz+YBM+UNG2JPftl86bV+sDMkWiEW1EmMsSrCWG4ayyptrDSrj088YWI3NjvP1YaLpgZgGeKf+nT05Uk/aJ8BDUMmbF16NTt2f8TAhEC+HZ5bla+1MGt+2Eb2z07544qo7NI7FYx/omGTaZOWpktx9e1vggVE5YPgAhl/YuKbPM9U82XrQsnQNmlLVGMB/LWETjYx+6Or4M+Hd8jPyTPG8POZ4GE1nB0a6xPR7NvzmVCHcL8kxWMaL6/3YtfJc8B1RlfHt1fzMTD57ZVxeBSkhldDCxwsoviiRy+8G2/Kp24JwDtQ4hAECzhcENljP2F2jUIFP0NBH6uXtXHqFenCAVLKdYNeeF47xhnZWpSjKvxgPR/9liQ9vWrl7Z4ntDA8aSrazVMrHv+jz5yUt072vo7iFPdCaCXuyG3TYk0rBNMn0H9x+BWcfrCfqfqHaUWPydKZ5YwQWhfuVGeXu24YbYq65p3Yta+sV/SPMuyq1n1tmK1eZbj9YQ5yaSYgfGWEWzCyrzBIEglbhKcVvkl6FZ8wTZZr6PQU7RhLrLfxbxOH3i6es2smuir4t00TCFkmJueKfvdSSaYdqo7WOLcQWZPphtUwY2FmUW6sa5RVg1L5U/z6lY2aY1gfVoUBEwnM8nTRh3k5oeJ1cFswJmGQD/6pIIyHszUvMyH7AdpXSKl5BBHPJtWRiWkc33M6m9XLKj28E9cLm5b53VnqLQ9iu3u2eJYwC4j7ndzzPF5VLrowqkp6wf/3rmo77aWH1ziOD3Q7zjNpYs12CMZQFfygoE3xSZuoWqP+9pXo5N7MBuqjiz32EGnu8wlR33CSWNCd/UAFpq/WAVKqFTyMWEiqhwtyPUNjl5C19kt9vjUqniQJo0xPLeor20rVCIqcDAGYvXGyzC5UDJMRgvgsm+Wbzli7wFd4U62+0IBvVwHIoOZz7F+X+EtVO8aj+Zd1U2AhOKXpBppD0bz0MK4zr4CKus9sMUAxqByKVfs5AYsPXjnoTo47wwPW5y1dKEKTJ7REAKsFQ8dXvtvw0b4ckB3ZRfybVWOXLuyNLighCqCHPqb5K7bIazTki0+uw3lRU57tMXrKXvUg5GcWGdP+iA8t0xxkk5o9x7w80a/NAZ+tdwX9Prn8YUsi6dRzmNZrhwbaqrOvmQDVN6JqjtuOjUKNdjZ08Z+u5ukMy7SzNUFg1Evqc3LK929L0MVwrHES+8e4TogId7dJNSz+iDBfA3BCX4MuIIw3PCsTd543JvEiB9za0OjP1ZPUf5b2crcLNV62sNS/wjomoPNYJLL6LJHsRl/bu/XPViQ6We1+AFS/6SQwq6HABeLrVD3N7CyXn+blYAROLfIpWHY3aPJgIHR7vlRAj7PpDwLKddQ0ok3SMUn1HCE6ApbLr3lQP8PeUncW2d3kvWifUeTp0Zcl3664IKiy+hV7q2ShZX4wYnbDtB2uxjDY+Uo8MgKccXk1zq6UbaXIg+VoU8zUmxWX0qt8x0Dem27P7ruAKhasLLgnjrnHE0vpX61+eF4PuqdZ9fFi5yQId73bQ24vRAFoSz95mzCD3idFd2nw9nh1nStR8T0mCr0uXfxRB13ZYnVAfMw+WhGjfIYt03SEcKeFHixCy2PCWa0SxlZvJ9d+jw7oPl+Eq4g5TPSXZDo1J5PoRxCo8Uk+qjp71R6EhuTO8tLv3RxmiXti0Fj5dG2MD//px5Pvh1IOC0oqw2lb299+GfZH/kcY/LbBgf65Jo2L3T8camoZRa0PrxRvHEZLBohlMLMPTzftGnvNSwMr1vtel5baZpZmfGgKZgM9Q4rDNEDcU+xlqCJPI3rOPdkXYUPbHsxBtX6jNZNO4AXoHtccDvObY5fk1LuIRNFpXNLYjHjqq1agLXXlP3/cB7EsOmWlojKi+BQWLY+3ehZ8mU7Q7aR6Q0oKVpS6EtLCT/i7devr262Aept7VUEhEDY/SvzVIs2H6UY8OoyNjn91AfDCG4d4F6l891pVr+a02ZpJJqv1bsN3MB2oDYLMNFNBUPBx4I/9qEKBsRSCCIJd5wnrrLZsSbKZPGtfEbrOG6HQPyDaoJZfNceSN0E9fF3R0ZWi6eapQcUZLOSvna5g5CG/buE1mB2C3pmAkPChm9/fQxMjzqwGt4FiUv6KOt82Dt6+9LHpz0s6Gx7/JFtSoZB3DGJNYYmI6FvndVEiA121wi6d2cpvN+gbTxqZF8PCFrFXgfr1hqzctwdggoxQGmfDjCFrLgu6E3so7KrvT0rvWmaG9pdJF1KbtzngLFF6bR5N1Ha8gvdLaR+XYggqExHp+10r/+w8HlxfGGaAHnU8ie5cD5hxJHWcYMA/sPgbhd8cNDPX9Hkj12w8Cw4/xuJ0nijbSTORPFi2a3KOYY7uy0qLcQCHwKucSt5l83ce2uorZ+GmDNyXGS+dRQPzgrd2TLzaaWgVR+kK/Bk7pL5UAfrwINJMTHAg5Ed+u8JQkGGIE/UawjDwG2TomesGf6Pd26UaezaO+HOfuENwtLo3Acng8BsIOOTl6QGbJ9UY0Iul73ZhiigWoVNUChIM9CMsVEeSeSnKL4pgQD/STHBs5ak3VfXGm6tRlJBRWVlQErRa/aVxTUO7Jq4v7RSMCoqjXzJ/bQEX/hkmy+wQy05d3GCGvI+A0YHM3fzjP29Z8SwTnY8FLX1APr+W+7nR71ZTT6c1bjcVp7GUGMdi7qX9PXtBs1+pzchiG7rsVt8o/bz7NLHBsuv8eitCdSjtruHbt8HiNtFIdijxKNQJc1zmwPIYzmSFcKouhE4xn+Mw1Ms4yWcCoPZnlb5HGZPMDeCmHx3ELj0f1f88688lRqu5v7ry/MXkzL4jMw+YXTvkzUtgX/4sVS9mGKFFeFEt6zumoaqe5ag6/aQvxWFf34DTwivEj0uQsgJMcdaWFTVLjmgx5p+qinHAkZqBu6rvyjOiC1HNfyWtF8z0p8ABygEP+fcBZEgcVhLVPekNTIx9XWSjLTRNqAtywQ4CKSH1Ov+5yu8WPtbHO8EGtNKzKaFpYbzRg99/yvAKIV1NwFoiLU2yKETWV5n+8lHWfr+caAVyOcr3+TvobPkYQEjj0N2mF6Vy9jKr0JXAZdgFPx4Y8EcU5Zrmi8OWv7w/s3bygkmMJc1mXoJjL4mBw9hUEnzJdKgwKWiPf9f2xvJXRpPC0r30e33IJqKLSdplW0TFa9UkXxZsAifzvnaL/U+BbyleKv/tygd5BxEbM87mHfOD/ewyiU3l7tqO/VHdx2G7BnVOtuSckFt2g0F+XY8QSVz/mEcJezkM6FClCztmg74gpAF67rm5JRVWifb+s9GXDX9USOzSo10fjxoCK2uXJ35dVIVHKNfEl+I2XhIjGGIHSmscutBaY2HJ2/Zscv3nL0Eu5DaqZgMRy8qfQ7JAF09qm0IC2ukGo9ZPIXkn6ftQoAzzVfdy/4hp9EB3wRPaK+4OPnujKAmMH7/SG/fzX6po6NlSH3UhBEozNX2bAwDhWJcvt0CdRdDSStQdoEtvw7ShOoGm8x+VBwpiH550XgwK55sBsbHt9NN0l30PjZ9KeBztZbhMMEukhEayKR+/E7huHj15IaKZOxgWA/TxWI1qCgWd/Cs9Ht9/wBifLrWBy6yEr4hat4O+t8tH3z4cG3tTRiFVF4b/p73vWnZU2bL9mvt4K0h8PoIQIAknhH/DCiGE8O7rb6Zq13F7nza3T3ef7pCiItYqLQSpzGnGNDmSjn26Kezu0AW6E1eV4BUYyJ5NTjxNnOw019XS2lDH+ZO8atrlWpXHDqKobtnUct3r2b75RDAahKUNA/lUIOEqy+JUinT1K7tO39PtKTOKeYSZvPLxuT20ammk4SxnyaoRdKoJNmcN3aj3cWGqTHx9kgjn0mcFxdUxbTlRM9Qo3hxfccb0ftmY3PGKQiBW7R/b9X3KLo9DG7dP3AYkX/jLxBPzMs7GXbfODdtW8hHkwopdyolGtvxcTtvTdgTbIYeVfxqlHwN7MooKPkkCo3S5ZACO6qJBJ35mek9rcMUNzlm+EsV2tdbL2BlkgGXxVUspuJxS8nQ3PTcQ58InlXnPe65ePgwYns201mC9YBxsQ4mzNdjmZ+4SP+nD7cpdMjZxL9NTu9LMaWykJ2XNxWstVvI1SJ5l4twWjNQcM4p00DzJeYpmq+zUo3OUK3igXft4eOdPFyyP6HoqdPvaJPeqNq+ZYq/7CX8+j4uX4h5cUr6iQR7UF2k93Yv1NHAfx/ABe60PNt80qqFbj2v6QPoUvG58bScEEJJiCd8R3xfqeASZCALy2HDVOvkWsZGFYnPSPh89am4WScwoHd9uD8Hx+TyeL7eSNJvZvW3MLeO11kOB3giMnDduhirhsW2jlprzS7Lin+hKbE7cragdMWh7+e2Ib05ukV07Gvqqy4YvmyZ48CUuyvhn6Gp7NO6fTEwJ7qPV6NiHjdf3QTJ1DGjeq7BbCnkqX23Ck6fGf2z10xzhWOo+sZq+bdbtgELHibvSqXcrjfLGVrF+wgsvw/iAVomPTXtNG26oSXJYHqf4zeA2M05zLn66LPAebamF81FBUBGiOBQw56olfYELR3tDtx8bc3kFXngroBck5nZMcfeYzR/IhXCNhn4dQZc3nBUZFT0OutZJTvvO7TJ8PI+L13bgPBFVt9sv7j5n8EYe9GDvj40vMc3jzDsugfdFiNUagGfzJGczzcLOflGn86PhY03q6lR9Xlz+nlxDGnhBBS6vcPd14VQCApZH86nEKnxXr3ju0nVYwieuZN9q2TPNm1axZz/wbk1+ZM9bMBwS/RDMUuHGrnUzD1XAfyLZsPlQs0hCDIUwzJzZubMBlTgA2wynvU8TM8rmRXE85irGyhNnwqSTUiMY3FHvrZVBED+7a9sS+5qKVLE2ZLdDbr5m53B37q/KJl7gXVtKpE9FI6XyUctS83Y51vOnWaY2n9vgMtMD8sZi7wcb++fzsQ9kbxUXjjft27C8NRciC3PY9Qd9646xqXiuSqrtnSk1ttEOZjVu88EAm82TlmkGhcUaah0Fg3TQ7rNa3h0p88ypsU8tD+eTZeQmI3NsYBnnfpP8hreojBLf83CGbXOFKX2qt8DxRAgt3OXhuezr7dqn62D6L7ARzXlmXwHziB3nCKHPuQ+Rv1wdOO3hGCyDX+t9020mTYK4eeaL22m3h8a06hq/xoth7W/v0Lb3RAjM/AYkReSDeQHevFLi83XNnwqDcdFyvfCRdTwqeiN7jd0crNtO55dSDaLX3eorHoqBY/hjX2bn/cUu5mdb3QXnfIgn/t1qOyS2cGXwlNqCyEprjakZLo/MMvXb60ri8M8nmsczHQ3/EhEnhNlWs32tZYgd7Ys6F154jGkI35oe16TFecVKeNWVOlPlTjx8+hZh29Zvqnjj0D003A6kTsn7HhWEb9zeIjOL21X/WVQje+TQOYoo9kEGj1sgtLYhxIbtW5uwJ43lKMU4HpxUV47zdE2Cgq61k0erpN6vmvlg5aedqPuh4AZJDJ7qYY4W3zoQh8W9zUZ5Ns/z44ylNmhsz0PohPeqQyIcO2TnWB2Sww56o3vWpWS5LO5Aq+JEwvts22ujeCfecFvSkC9ydH+2INFSjjzi8lxJvNcr3qqMgr6GPrkVeSii6Oi8gRHgBmK8e5kGSXcwbx2/Hvs1ad/YYZEX3OkaCmLPsfRhS9qsNthzW2yRZG7v1if91iUc5wT2DVSlu366qPqHU7KaSlxhsJekSyOxQP7heVgBxZ9CoT3rO2lBE0E2azXNJYJr6eYbf8V4NF2feEGfjzcC28aeM3Z+dA7r1TQlP1rFGpdbQ2dK+e5tKgfghSdmuogZRhXZRT+iKPAkscxbusD7u8tPtAd340nCuqnn11NXBowVzXQ9X4SLE6nhjenHsyk4T2tAEXbwEKwiq+i6vKbkcqqfbpDj2DnNl4M8i9eRz3B+tm+Ot2sfQAnFSop6Dy/DuBPyLS/1Occ8NwgQQuF8gu6lOiM/rJNmPm0ipAav6fIYrT+vndxn/WZr6xl3nlPB0aNSgwZiuHhNlYfr2jmd+Xqu64kd8lL24WnObZNe+XGtnOqA4f+h4Vt+g27JMPqCt8MN1N75md3ZwX7mXo+XOnfb6UCYDsEIyqotoEmnzy4pbiJdIbmFaXOgxIPaEs+c7Rj2WOc++yScuHvqU+jRY2gZx1Wkh5rTrfs8chskJiZZO2klEQg/+HP79rrEbZ/2MvQyvEkm7p5SApxE1us5XIB8JUoxbZeCa20cjix37DZrHUhEfoZvxn4kEgrkIrfKd7zZ4HY3QkK/r+CNzOiWhjilZwgB4FUGC2mHCTT0k64eFcuqhF2k8+uIgqDoyjIhGUSsRdrkFc+Fx0eQt8ZOKS5QcXAkgROV8/0KYLhT1M9tpa5Bw3Lfs9nZ1Ysvkg8lqQjy3R/LSo0Fv0zWV3F/4UxixIs7n4YP26OOj2sjMJdZsD3l/TTOvOHE/gM9+oUHm2FnxGA9S7p0e7B6Kr6soJjKOctFIwHPC7Zp1CbOD+4CCilHisj3RvxWa0YIl6mRckqQMpbiFn7HXYa21TszNn0QCNia4fZPMbrjnMQgVOXmF9lImr2MPDyZDoH1vMWpc7wOokpGw+GgU4x4n4v3JWJ0+MBJ/ltjgDfOH9sZD5k8xT1l14rEIa0ZmenbOnnntVyYqkfAS6GfzHIgIr7FuYaqHvlLTGl0oVkcYVpv7RkUivfZqVgF+yQrHNkkEc4i5Q9qxohkvunBdYBXtTpxZ4sdpyjZnoD34lt5HHHaCAE0XQhvj413jPfVbveEVm9WiHNE7eWnwe11klPhJYnn9+u0D+l7y7r3ECGMYTcnLwSLjQ1ceH32NpR5qSLPwhJTV115dTtxjqnQVZyNpnJiT9hqTsoif4Zmm7Dd5d1QN1s+PrLy4PgIgcJFeJ+J80icBdpEOMAl8dbQV3HAjciSuiF9ea6NLUifRDs5u7mKHcdlvXusnRTCnTZIhcHb70SrPps9MRuvZ2zbkqZy8HWqB9G6P+L8uh5Scayl5JTBefUJ7mnY8GdVBZOhZMetPlvT0hTNOct2upDw7gIuQmFZmOc9is5ve+dw8/jmcok5WRlzgvfGfyfDoX7j/HHeP1Xk5c7kojKSPh/pznHNUjNvumfushhzbViNYfec9lt1sGexm0s2kIdPfAKT2JovOryd3EAtL5x5k+vxAZjbkaFY0+ocQT5SgtALBX0+PXnai/LKIhn+jA2LXuSckRbVdKqyGxSNgRpOFLvzBkGF9rWmtPrOGOQWP1XL4p+v2znDHbUbfd6K8lizNteBFq98Az47oplBK+020+tY35/dkre8uqGZmK1QPnsVFb2WCT+V1/NDhoLBarx0NIYGbkHaaW3y5WZftfY0l5LpJK+Xmcnake6FVZ+w/RT3fre7Kawxu8zVqLIazj1F2ih0nNvY6MPoTp34gqK5MUTR0NLHjorbRK2nMo4lmzw7ZK/TOHwP5sH1bHIkkiGcg8I7Zed8Cmpznc8AnFoy496TRN0n4frZSfIkHXNcE7oIPiCU86gwGXRSHDnrcOoQVh9e2qmI+W2x2xFcGxvnwqjwEYgVpPVL9MyZ/Fb72PXfVZvWbYl7P3Ra1t5rb34IfpgDNjK8aPR8RgpOFCva3SFXtTau6p1nyKpIOo05r0wyCLgTjN9ubx4HPy5fb0I6+tWuxfqxapm3q68Jv4vm3JNDrL4KMTqrA1YK3E2YLB+DT+X7y+8exuvIfXacJV3sNGN1xBEMHXZFFHgBmN2XkbUDlmT1upXa4e5sfsQflYA3tBWn3YidVouyGnr+saUJqZySpeNke27ocZxNLNf2i5RIiXPIjQrV4USy0maeF2uJF1byznYfb6QTt1Ac3XVksRu2Oom1sP9Wn4qROb65SwakkzrHMdoknJxHo18OthmO04W7JljL1YZYtAz7ItIYmE8vkUeeOTlJds95wNuROmu1OmtAKuawp1rfeg6ZozLIlYOieB7Ah+6TP9rNueP65Sb24XnE5VX5Q9gZHo17V0NS45QkSbxiisg+irjEtCpGVR9l9pjn923B+4/g7DrLDXfzrBhT8dgPpeKG3+DivlwwHWydHb0QRdllprhsSJ3rMLBrQTotenUcdek+6pXo6tKJ1feQ1aWw1yUB/RRt365VfINzYMu+aj/x58wXmCOlfuVXYUOfWTVHoDXntHiSsJkVAjGVQF52YUE/l5NwwybwMwjxbLvMsX+e7/c7JujF//5D5MW/PsD+gARBMCTBsCRJcORfkT5D+gfLcTRFEBRPU5Clf0f/zFI/CB5AiqUplmRZmv49A/LfueQ/QoXM/AEVMluPmHQY/XIfP3Pz843ijSsnf0GSzHbT+9cf/u/w4TQX0AWAbdc///HXXW7tux9R1EsY+YJ/2Hmc5f2ve6Nh/rz9Xz8Sp+H+/N7fUDSjpRr/DbzUxaOu/+at+DcC5hQtLRrD75mZX48sw4/5QyLsv6bK/gdIzv8FxA8kORxPMhzPAB78WpXfZIdCkkXzDGQ4mkNhIf176nBA0j94Gn2QBizP0OyvS/5SeDjyBwshz9GA5tAzGPY/Ljzs3xWe32Ql/RML9p/FAakAnWJ+7z+/9W+QKeaPZOrX05Jfb/xJttL365U3Wfxh6SbZ+IUXrkmG9i8/1f9JNvN+fqAl/rPUJX8giX8toP9bhREgYeSRAeMhS7CA4CD/V7IIWOIHZCCgCMhQgIXMH8gi+QMAiMwTSXEUh2KD/xJZ5P4rOO779xj/Rvz+D5pukuB/UBSgkO6SDPIc9F+7DR6iueQ4gmEImiZonvvddFPkD47jeZIFFLob+QeKTxI/0FKR6CEcQXNo3Zj/+Gzz//psI/UT8MkUWLTreBge6R9N8K/TJsCfpjPPfndUxd9MJnrMe+rTX1fx/+qZAswfHKzw670+r9GSzn/9yD+amt+eYL0/DQR/Wj/A/IBYlgkISA5A4q/Vhed+UBwJaIKnANIH9m+m/uf3+O2Wf3moxd88haeQB0CLCEkOaQtSq7/WSYr7gdaV5ni0wBTH/9K4X08Z4/6ej797ymeN/zRh/6Zlh/+awR9+nWzyy1SDv7Lzf+gPigI5P+JftO7/v6Djb02/0z/u97xHNoAkrHf9+NjZP2GSr/XHaoGgCMshWwNIyEHqb5AIieQQ2X6ahwyBLBbL/n+Zf55A8sr/A8zQr+Nz/itO8sjyIp4+0vBffI5Hj8OWC//bOR7G9FqlTxzzOms2cf3TyRoCjrjuc5zjkzruwrE+Xj2bbkwqy5j1GbvqJyyDQ3R8alp73o9H+XgUj7YrHp+P02t4C/ebZ4uibJowaujuZl+PR/cZHI+2/QyEmr0HciMKR1s/1Qz3WHBug8PxeG7hUkP5UJr2yVKeB7onptSgWBjMQcxCtt7n0cXxkByMBJKc5n4S/rtetHE3HZEQ7OrBTC8bsKMtCkZFWTUDc/N91Y5JGdlj2s5dzT44z74lpnCXmhxYgDX7M0iV80AJ+gFnJxNumDoP+EHjvE+CuMs6O884/9Fk/iEz1evBpEOnYzIXtHf6IOjF4mWu2uGCYWHe71xK+4ETxAn78JxyOQhissFp6N1TVkUHT1fviklv2VUUFFH0WHqyKPSnLlmGAe9sikwkKZJ0UNCVoZJetEUS0vbeTd3RDZDkRE+qFK/iHZR2juWFi2+twB9u1qVgGeNaCudHpoAkaPb3MWetRS/TT5+OoJzuwkDgjQVPXqGhu6LnnPFtUuEu104uHIWX5MsyGrHvGbRwPYRG6AbCSYxc6i7+7772AZ5saC46Lvu8545ly7MjgleFgi6cnKal7s0OF6EXrOLSszhphmSp0Zv9ftbaQpzmYXQdFkxW4F+loli6qy7s+jXpqNEr9hEc5buPZpwk1wY3FRJNJPoi7Gsfdn1ffv5sO/lehD3I/Jd0V4W5MWKka949fkmLdHrP57kHZj+5dCqEuOeCTWYXuHQuRJ5IGzD1umW01OtFKHKCdZc/vS+3Cy0sGkO94+fPOyGN2c/94YAkqz+8eCRu+ms4aUj+B9KvR7X+NMplSGK9ZKBh4HX8GDQsyAMKPIjnDSeRPon8fGUmqpcUbVqSw8Tzs5aXUOcyiuAzQRGUvvLAXfDSVmurLAESTglCDbqFd94ybZUjtDKK4AQXjn013jnkcDebAl3IfpTJIEVS5dAATwLWjEvSlQWx+j6Fi4gycmdGFYo3tKpSKAp5t4K0UUlL38JCUeVSO70iib66gjHIoCOHWTe5uOSTw4I1Bz9WpY6kMg2a4O+tJRyE83wHolxp3GGT9e223AdhuSztvzRflwTZcX2L/3mu3TJDdQXYe05HjP4ZrYSoCIFV2zCc3tfFOrbzcS8vozt7EHpBID004cBlicT+zP6/g27NjEZwabHWY1/L535my6meL1ovZVJtFz5HLvlcHf1QrFy1jFoklgzICr1xr/eDy7oSiYXjHbf3ReBcCl2xD8BRUuF1HiSQmli+B8fEbaHYEB4EOelbgJakub34/my2kn5IA2efO+7TxHDiz39/TroMjCXSPcxZZCBLejc7E9g1srwiia22XLs7PhtMnBz3dD10ChssSx3u7fX4P8g+/e+6doywDIhqmlDdfqklQb4GjfQWBgn5O2buemSLsYTft6fxL2rFZXdI5CcF+VEO7CRh2zi31WHbewgg8rBy8p7BmQ5v+udaFSSNUxKH8Ij5ikLkaReddoMxQebJq5CdFAdkBHqR3rNreY+W94wxT7KwCMWneEy98CwQDsapcb/tQl0wOctRppFMnlZbBGdMuy2+ce9oetTII130HmPPDmZJU6pLsupMwsf9jET+srkXcvgnsx3/8rWNYwPBiuEB3NCv5KEvDiue9NNVCq37e8Z7F6ob2+W9iVzFa79r+BgkM2XxpPQrCKymumuCPB4RQMr6WoHvvj+Aoxqaj8wz0Ae9jNym5rOJRrgsgmrQPkK+XZk/Oe1MQfadI9t2ErRYQmCJLfwCyJ6php/a5AVXcpEkPQRjmX02ml7ScBgCrWPBiFCRUfIY3cl9dnq1wi5wNbOwgnBIftqI5sro+EZ8kF7upnH7Qwm3R2u41DmhHXZ5ni1ov8kLslot8rOk5xTIO25sKep0r3p3NjWFVORfBkLFaO6m9KQN0qJ272FkvbLxXin+yaVeC+5xLSWXw94REPnvc1jp+Nq08XAN982N5JIV4Tb/Zp9b8vjzF5bHe4Bnkw00iyXzjkRWr7dw2xCedtcBj/en+3rml9RO7spxbQXjAQm2Czzc8N0jPasD4kYnViABBwbgQ/nxKcG+xg+voIe7KJKhGXgZYRtc7wdGlsL2vUgqghEHzmCrrp4G2tmCRKtuOX/0PjsBNxCY/YGlaEs1dnf2uR6PVT2+hGdpZ/dsYzwqyA2GG29ZyH8+Y4nA9fzixDJmX+PvirDs/Uy0+DEpZ/mfM2eOe1LgvkHVonas92qMRFQb9qfbmGYhe9TOEyDDjb5as8NQN9oCfVznkvNeNdNvf8kxA4BV7hDGrfDxUHcxm1IAUGRT6BiCBLO6Nlzx+NARZwNeS3PV6WsQNomJ4NpVBRSukssA9xnM03m3LhQ7rX4RslxOasJo9AdRllmBCsYpw20jSpDJPls0XuCNcCqYNEtQMHL7dAzCnU8Cv2drwxEXmYy6FNw1ipnzFj2A7Ng9Guz+qDMgmXuLfYa+KWVFfV1gf6DAGca89ZKXws8tZuR7rouu1M+mBdySgAKdEenye14MFo7k0XlUNUPGR3Ux/gE+MEHWb9itNu45Q1n3HHCAgwh5zJmlXN9C22nL0u/pHLRblrVl85ktdsqxhcT24HByheN+roU4xOVO+L5oNzTx0qzVApyT3vBLXVyE6Jorxxy/93bapZKOajdk3pmlUTgsDugmATMdc5C7vqLmBnun+x1FowoRlGUOqLfaLw8LbziYu/dd0A5nWebCos/HvgLYJghkr7D8+7CIg4ruNY5QAvM1RTbjcnjAASvB6I6wkj+Eh9cgCM4+isBO24sfDBwL9Io576pAnEFoBerJR5884U/2xACSQ7d+CAJbp4Y3+S6FPlqN5MKrRoyZCiYwBPHtAQVJ7NFn+MHW477F5e248BV2DDwNeIX5Ul4nQC7TzN+q2axAWmumQAsJgqoyx13XuA28Abbkpaf80aW8CiZz0VybvRAhssa+fl413JRkXIRDaKK1nUdxN5uunb0K0NO7Q4Ho/l50g1hfPuPLmiWGQiKiu+/cbLzTfKU0kS1xg+JpoGC0MEEdzV3MsvbFQkuU2Lki406IyRbTIhuSPr289XTd3mSVCLZ4PextIR3oU+/1dpjmQHN6n1movEu8Rko5tpvGt1YhCPCUZTq0LmKp05FMxbD1qoz2tTmJzTPtvKr1AhdhkF036ErkGz7xWYSZ7LLkGiVVU5jTCXcSyF3RV8fcmoOAOnfHqzAI+DNVgoOnNTbGUKpL6mmKnOvqdADTqnNvlhJs98S9CssVeX9OpaUSjB4A5euJovZQDYpjjhMjvkTiEGWau4QIeY7ItyOKNHTKugFBvodjNsA9F1X/EYrzwbkR455J8No4uPIpi2a3rzvLk2anYBGgZ330MpbNrfycgkvjaXY7Xfo9fL45hszbEbhTD/GV7Usx0ilpwYJ7dc56AiMS59PMLuOSQVYNZStModzU5SO12W/dollphXM8Fcl2bzrqfIC5YGPbJnZNU4UrBXGKIMgAuPq4KwEKLlXJU1CRYBovlwrnmCK2zvuApVEEek/LfQJ0mH9a3IPJHjjIqU8jwFbbclbiSJ6frZSvTbt7A5EMQU1WAOvA53wOKgDH3Cz8hJoH3Bdh9QHDrZYxE2Y0vJO9TcZoEYPIi5KiE3Ccgmd6XRoz5QgJiNC12mkz06jozlfc36EJQnJD0YTFjdoGXl6oZftdLACBYjlpj9LGmW8cGifbmsblxM2l2VKYOlPEfmCFtwAPTWa4UlqYXq/UoipL3r0L5JPXLoUWT+/iaYDm5mYscadi7XGZLo/Aw0c/hHRhFglSW7ysnckOo1rhL2klo83ORNh4+H8U3VhpyAdqs4+FBcvrB4lwIPA+RzLEk52t8yNN2c8pXIaKO6U/ja8btwJkBbCJokX2rVbVThWNZRxShuFst6AWnskpc8TZBhMYNIkbaPw5TCbL7v3BI18b8CilwltdYOb21azOFAggEVYVFY/rhbAGg8yMVGd27DsvHuc0l31jau8urXYOO13zqKzgK7RKHNNsacanpuWvKnBX3HKuFfyqyGKGrHE+4U4VERyZfW9+elpJ7o3ZkgsL3K/IVnLHjOo3lr1/Nkbz4sXKIlVJHGe9hzGHmyfkRmX7oJKqSeB0hYUK3we9+OEpo8i8P192Tk12f9D4eeJUR7poAvKyHPkGXjYma56leq3mc15ls0CrR4wIp9kNeWq3snPdcZOp4BbMgtIgS6y9mRKcJBHSHFgy8+kYXcrCY9BTLNUCF5hc2usHH1RRQjJuUfgn3jcmeOkR0FqXcDImRZr2fLyc+ABik5PlJ/4K+4QZ9914qgqu+YuFuku3stKympPkokbxNBcc3jiyeG6ZP5IUnVwlFfJJh1xjgbOOqWqT/gunYRXPOO0snbseWU6Tedi5OoG55dlE8TLHfezYyVCwvBDsU6/WFutxegzo9DqZM8lJcJhmpVE+Gd1bJWGwe7cuTjnDaUtqjg900yhLGBGziql8TqNUEQMZmD51SN3AhI9lLEQORUYHFfcUHTaK2YYUDWHmGCkwqgY6jxInnJohYZ7aBNrSUBdu4uvV6gqmeXyI03kDWZTJYgIE41w6C4c7n+Z3cTA8PcZwVp6IRNqWyyXPzUehsZ/8s3lmLFhN7SKlg9Wzx9woSEvoQaKaiYGkTis0pdORB3xhUhsZPPfLW8VQa9+79Zkmxp7xWdZRJvMuqTncLJmbbjM/rt0rue2Pq4Ubfu+BcFwV2sc6pmgzRIaIXSpvbGBH27lHDZwq1/Gbq0Tcv+tAv88oJec5kstH9sRnsAj8k8UspnC7PXhOzKqkDrmB6v0j7rPjUstXTzSzrnSYFENEI+15gRBGwbS+13oprY6qOSCowmFqeO1UBJfz6hGY9ZXttdOKA5tpcssOMDLl2+w9Q8oMKXrpaSLc4G3e7uz9rJkHXuhPFFodbhFooo/dtB87OFJGPEGcosfYmS8mwAmQjzVl13jYVHLwOSomGszwLSiLUUY5YwIpi3iHnj8xhhziHaWs5WfN/TL6o1yWVDiZ5LngiE3cVZ2K6xLfW7PLK0ILilxzDVdP7/jEh0WC8Km+TtQbho8UcDOQD5/eQN8tq0be9jOpYQfVe1oW0vNTDC/IP0/RtOugU9mP6xKR/62s+gkj/rrOLChh6FWf9lR73bF615xdK9yYkIQRPBEy9Z5aGaX05FIU97BkK2HuUyqtlqKq+nGfyd2/U13CTKuFbWKr9v3DUjSzN1ZsqIOs7Xvy0JyExTi1s75HRTw5Nr0WmWqeCxb3PWfPJ7PWdBuk0S06Fru/4XALzAdxrclIfEvni4a+QaFMPVIcjZ2DasWys5o6NnsFn3Fuv3txP6uqkhZkGV78s0fVPMzQvOcAuaP66CMQEeP1mGoKew1uknCX445xc6mozHO1HhnSX44Io8fclWpVPjgfW9DhQOWhyUTOsB5lFGsLSLanddwG2KldReB+GHm3PJ2I9ZkMQBQiETbUrnl8aLNpDl0T6xRLBR7Lh3rBqkOJ7iOOJroPCsyqLB8cmmJnkxw8phmrh0folqHvtZXxkvLs53fJBI1jYA8nszoVVg+uGy7CXdLelrDhdF428/ETTdfsjxlpGLs3+ZDhDG8xrLheFQLwwwga6kQGnoLHQJdkeD8gf/zipRN13rOpG0Ttt4PbY5yMbhRKW0l70F9HKmsnoYPdIaWQ5+C1e96vNI2d6eqOY6enSN8PtO9NSQ2zDH2Xwcl7NX/KIBixbgl/wkZj0Yzue/JoezA7KRzZ7dIdBF3ML8mOJOORgIzxZv9h4WT0mMLkatMkvBU5eeJx5DVTVfgizpcUJI8ecPk0v1oZxw0O8qzM5HQgBSp64rZxT5s7Pg2PPAdhurv3l8dd3+Nsyyo44arDXeTehbB92ptDusqsJrO25G5zEW+ktIKcduGzsr1SLm7IfTeVh2ffIPvbJ6qRhtegacF5jfEJvSL2Vf4cTGcGa+6OFQ3KlffME9nNCe5hXFRmKb2nEMrIBUzJQxv9AMkXNmcnDYUJ/Xjb95xkccNskGtrNmEH3c08DOfF8jrkDxHW5k79YQObPnnN2IdU4viLjVDGEGrHoSJrwNOaqjQm3mXkDknGupkgR0jKXLMfP6wCg/JoxvLqnovxciV7Y9q9le0abTrSGAqfAoCNvfnxftdC4cXwZNAueTKLV3UkNbNr0bfHX3HqLNAc0gh5cP99XsHrtSLrmfNUZ3wkU8SZuWFaRgSBxg8+h74KVrsee44yJcsLYWG3Qp2DBFBI1VB4IC46ikmNmszY0VR3bKnkeGhVk/WAPZO8tTfeDKmb9/LZeO+yj7Z5YLa9WghFG2kGSf2UoOuMkO71yu41Mwfrc8l8rVA9hs/1glG91T0KP5/0Mth10hWNn7rwYmkum67FB0bNHcfkaRRhMO7O5IywNQ4UuF5/9MiO6Sig7ZPodZm4hmxTP5u0ag902eZ6Ih3M4X6xkkIxLke2LZrHRz5Pt8xQryLJFk43EWggVAIeaORk+nSwAeILNDYJYN80X/QBL4ZfSOdyonxK4gV0oyTIglkrzlrcc96Ytbk9zo0xLUnhIp+dnBvNf9Ib94CnsPBeXJKeMUEacSM5yAyW0e0YxyQVVnHbaISQEZHGJlSMoEoYyZZfe9eQe2RXMJfvGW+maqIDCfROHSmMRvRywoEF846pJEDRTuhg45lEZaUMRG0+VePsWBlLva1TjsImZL+iLFm4F0l5fq/s7lAFD5WtAwRMFXNtr8o4BtB+79iVil3hlcFT0eNhK4YaxCCQfecc1A3c6GqVcPukSJEYTHajj3dUZTjsVmeCfvYWVhTZbSKKJk/GB9VOlno06UOFC1yP5Dgb5PBGoWo8etrTpgxntyebS9X+0J8O7z1K5kesBYycn8d0vKhLPwxGV+EzvURxsRyjyfrgc8zBVVRctYrslzl1KjkgtNLdH+vCVUeqh+SE47rhsScqcGbFwfJblaNdKFbVju6wcNn0Aa7I9aSW0TqHjQuZN9dX2KfG8J20DQNW4+508zSgG90aj4LwXa0G+4mK2dPgcs1TNLMUOufZ5baXmTTurxpPch6K7kAv4bw53uo9rwXEAAzj6wLnSRi7RjbQnXQSGbN39z7rjEo4jWVJCJujB7xMEqMA/1wV0yD2Z/Ww7SFDaPf+fYFdAmdeG6ZQvp0mFPs33fs0kG4a950wsx3nyV7oRDaGs2TQBVM3smTYWfMCNJoktuBjZV3hPePVvvkgyg0Ft2J4u9+q+YuTji9fI7sYGQN2/6R2dJOrcY47jjHclx3wIAeeWqcuYjkUhmCbTPXcI+V3z2Zb1RGJA3lu8JijlPvs+EYoSTPuXeO5MEQoAEUbKd6mIN989o1sb/l/fh1i0B8u/Hsr3QwF5/1K5RnJ0pQ3Io1uPLZ29njAG7ZmX45gk1TMtQhmwHUvSUBxCn91EnrUvAxNPFYP2w8WDqP+M6nU1zfeKXtT9+tR/yA6xUMRU25zBW2cNs37JEIpAcxYUNc7TCRsCq6UkyTsNg23W9WAF0ySzr4Cx4YxICBWYuOFTXmKbYXCN7O8DmRCYYthJu3j50mA8kWhtghmxptetON7VjZNd7K9yQrCYIeMjQlD2ZM0HdTVAhIMjGk957DwAhggX3ug8WhjXrCE6TZqo4qgCZ648LHryTiMkDJsUTgnYi3E5HJFZtlFFo14JeYVrgin9TI7I2V43/FGr0Ny4qO8zdVgXg6LfP8pA2KicFnNsEnaq/HTIptU1eA4Nc2YZy5n01uB3ImLEPTbXfvQU8eIezW6490STo8xSH3F7y5YJZ4CI7oq9P6VCpkfp0yfzJeoKhv2LgiJxMo15W/cLdGMV9gagkReQh/Z2N5GGhXWARz1PQ0FX0IR8TYQAPoaQ7OiZiwBgp+fjgFpeM+XHdnQEZk18tAHEYm3kcpdhPPovsCRysvIijHMwhnFoQPnwfzt1PM9jzNfZoFpFuhzWfDZRgJ2kgFhk3FL5jxW2o0HZy8Goy/GiTlm3YEhWFXeFprUIi7GRLtyIrJN4CXgQ0yQ3nRB39JlGNy9m/IChYKCJt7a6ir8zILygqCZn7q/rgwXbRGEgU0W4S4s8YLbFw47Lgv8s1x7JohMUKNlytsSxAh14VJ0Kubi26pv8N20T5jF5wHXTKzDFiefelhrdXSNZt55mA9QBJ3J2AUuuYAIWooQcgoCCuTme7fsrY4pr3YS2/ZOiXNryHJsEXPT39NwBmZHMR1bSsJe2rkxvMb2TiP09WGx5hXFbarorGQ68ud3aaPXDP3EdU2sxXbmLqlAnjCAMilX+HQbMSgQwxJh3CWKNz55m+R8ajzl05p0dwStx9t/xNn/U27kjPebdkltwRUiXTtGdM5bVH+HeiPo9KEcrUHBFZ6Jw54wS3LFWAXqfj0k7MvyNnZsvPPn7r9VKvDYyh1kumUhgYDDzRX/oO/JyVFcomC1oQVBiQzc6fEpWvxPvHYin93VEtxoDT874dlADS6r/BSqxjZhxHUNO+1nnj6opwKtfsxiPXI1EONeofOifg4sx7E01uhn5jFHMhWr4eKKVWL0wEPI4CmPgyUrHDNbftCddMnGqPf5+kCal9YdcZ0Np7a7vTZ/Vta08xlh6Q6NnuN1XJsSyXY+bDefXnDXhlUIUJJiFzeumY4IkKpA/kxfFL7/xGTbrWAGVtCxMMrmjL7uIfs885FZH3mkFuu+kDV+TlBfYZYj5KPNyzL1LbZZn564VzN91CKF+Qv3kwy0uXdu5mfDfDwmh4XrE7ndO4y3pqI32FfiRYUpKckhouoJjnN/AkekUDg76t2KXqb57HoXQmti0nv4j6qPc9KSo5VANuLydKkBgP39WM8sMYhXSahrgzZgEZ34OB/6k4vmM3EmNQuQrKPZuSGbnbDsYoe4+XIbSiT0Moq3M5CqK7HC8tTflqMwmO7nzijkbMU07fBIbFZdkj60NKeaKrM/IlgsCkbpHyb4+lkkPhxogu00vCAX3J9JEn1J+fusSiq54egHnw336TyLyaNvztOv8Zlqlpg4+g7ZC/oagibJssEZsA3tEGM87rdx6micn4MqmUevoznTBKkM8n2OC9kjOzpSfU/2P6O6HDBSfymU6cEt/PmdLvg7UZS0WcoFd8Qes2suFUWMoo45Cy64Uxbn97CjsmAWeMuSyEEf4wEsjwU/X0XP51W2RyBN4xMv9URBvCvIapGfuv6JT/Li5bbIXpPEsXZEUig19pmPphQm7DKR3OFJJiHrGiQ7PQF4uXohkFsMgT/3MhM2zrjKEwqFDW7kzb7Todci4IoRsASzqZc0+1B/utUGrY/A8SjI8+HO1nyM4xK8Akl+GhZZCYleyKzahOHg0ByA3lN0uTGXu7E/acnZ6g13gSZFYc00P09LkER8wC6C0929269FNpuUIAbI4g8/Lb6Tc4XSAD9TK+1nefMyEDKJJPGwiBwSjjC65SiePLMdQpE4C1VkgewheO3FWA+QDifV5RcqxzPmtRgBTWovPJrLsA1587nX+qkko9902qMmbnvn8yfTPUyiPPjc2mGcku/iaw/se4XkiQvOJsyu9idUTxByrAt3BQzuxFCTsjffNs4DExctAVEGCGVOASnpgAmB2zyondGTB56/R9H3MsByk4nKz56FrFScWEgCmcxw1aqztsQc4sduvzdI6+0NQTitsiuOzecoSVIk89v55tqid/br8Mi71ETt+xCObhBHZfc08LfWLKWNvJzgABG+dkHctfP6Jre+LuE1daK1rhzz+OHmTF5sd6rWhtmiWxMe92lM4kz1gjRBMXHTdQgggdJoLh53ZTASL+y8c9rncuBFc3/V3MaTISVmuR/2Yxod0QoXgOMdZGf2hI34OKtZbg21ymHhx6lixd3Ky7h3xX7ZLkJktzufG9uFmFYrZcLU2JhXdKEGd21JGScALUrjZ/tzuNo1wX3kM8Je/SLj2a9dhVzP6WiabDX59Rj+pifYrKAfuqLCB4ycjjGCo+CLWiObMOaV1QOwfh7A8gwjLrSTtUY3Cv3PXJKYbz0uTn3HwNC1Q2Ec8geT7wR775B2aGU/No+NucA48kRko+WMm2GQW7Nk0QVAKP6JwWehFFhTM8Vt51FqvMomNY6ZVvkhZ2hRZ2Oez2tLuTBtQWJu40MTijmy8Geiz2cEzn6vH5Lm9qCaSmLTq5crKo6aXoUETuFa4b9m58Pz00sq9Dhw2yF9B/3W2I8F9PvKz3v1NCo8FeSRHXSNkXDHQpq6IDqdIZfdlIQoK9z5AWF7v2uLaZudwQX0xAcBp7wCNZv8o/qIpcb8EAgS7WOhrGdsnlY4+ScdNwukMQ5QXPBo+axRmhAIJMbUuddpM4bK/FK2uxVLLCfQ0d5hzBSfCEbtoVQi+9P0lRJoEq7J+lz1MezY92EfCcWNE4rGD2QMt+ZcNbUexIskZ9PFwDWSbKrWB/Q80ODwGUUr/gVwBEtZgeHHUoH99EFKoK94I04LsiZLCjYjRJXJZkGkFUyeZmMbPrmH9Pjkwp0XftJ4bsMB02/BwjU2zte1hJxxfH/1aKGdre4zniI3n8TxVqxkicueU78+3+/tc0Af4Ollnpe8txSKNQT+w8Fz4f2C5umeQ7ZdUgO2HmIIGWYk64fwyFAIDqm2KBgyoMNQQ9cFsobvS82xf5k+8oxF2yiqI2gC9ZGAc4aj2JGb8QKyFyWhT8Ju8BVODqU5f6OcVd40c08mXMpTOYK/HqWmQLOO1rU22ADIWWaRtuA99g4UzThX3ELxRG/hdLGxZgOpy+Mu3iyWoG3ZKh8UpGgYNNaHgLbPigbYS3TfPS6crFy1icVJWe+THpCL+5rpx1WnXy3sMjqgQsuf2HvSV8suIBvv0Sy3FR3LEjf7ky2zXlI6aVWA13U2j89bKFd5feE0jiy6nGXgcBV6w/ikvgoEyJPXUHBDZHfUXmiidnQVbFDnI4Lf4zxf816lG4PTxd0I6g7FzpDjlq6PuCiCjlJz0sso+Cl/9+1y1zQzebKYQjzGSYMaFqk5z/dD2ACIUag1H55gnhHe1NJOZPPJKLjNbA7CpKdkL82TzV0SsuhtJm8S58UJhwG8/c7Yi4+YKKtLO9mbSD+nDOInINOmRS8B2aMFjhzpUW/Ls4BXzHkyCJfdB5zPaTnON/Q5SRPNdOf9wWp+KyHxi83cHYIegvaSY4wFG3bIA4ZcqfmO5kiYR22myktOkd7sNtCZ8IPHQb/LCfuU8FZEvONNRPGupGJP2OKha8/QHHDRKEVDGSfqwzCoNlKCFTZh3pvSRzLFM23ebOQxzOfsvXf9HKFoF7qYXlSeKCooRqEWgz7jCuTxtaTH4H87ZgBnTfA1JAT9R1VJ72RFk9wAqo0cdAufY++5X4Nnc+MP94vPukXQZjMuPKRHDjygK4OWY3mCRaNNst3F+WHSEhICxRM52d7bG5PMSVLXGSuBFHfPO0EwWb2giEyC3QcdZA+z0DacoR9j0LLMnJwfl+FcUMMpnyzHX+QuJTTD3yr1Be2dyV1IUAZ3F5PzoGJBKKj7OXMDjGKU3sjxW41wzgM4St5LfGRkFyaUGH3mJIik7FOBqrMs9Ij5gKIJLA6Kn+7qs86CUalDze/iGc8Lg9TgVRBNOAzXcFhuXrw3HVypiZ9farysuPaBy4P1VVmwBmTt6zxxFba8kCNf9ANdndX8pMFsgcWrqBmnRRqXgOuL2wVjGD7nq1Bj0D1+JqmYiktBclL5G/SyroPtxeAFB9/P1HxYY1nTC46dcj8j99A1qzek3lnTS4UpqsZr/ZjJnFldvEL7a6IfA6FP3m69H1zsfk6hGMyXr1ojsay+KK2QbeQadpM6+YvkMwjbeiyHxcLMPJVEg83vmXSbP2naGrxsP+PYTYmDxPINdX1F1WEDVRC3Hzp+c0KxkyQc+w3QSY5Tn7sNjnKiqmKg5C8DvOiouBRUhWNOXPSVPwWY46VMTly6/Eyy+7eyBT1XS5+DeO1IUA56tV+HJ/UK2FJ3IvKIzE19V66MO7JE7o3IriOUnkpXtWMzT2L5T8GGcJHUllFWT65ELvSNyw8EClevkglp3rgxDwSs3/f+Z78B26aj1iYfkp4uxt1NA/YOn44l78T2jQH2MrLhLZdfagc8KonuPfRcJ1rkO55Utg9qDuazf3kk5iLYdg4mcJEKAybAyt59v4zqUXid8d6UWC41CBpvQs9Z7pGdn5vbLQtAx4woQAAhzJXLcwsP4XlKSUdoIBN4N/aT4nNC84Viqygz0llRqQa7/2HuNvbWoNgWix1h2OX9ZyK3PcE4ViAKdQKNmgjTRIGQuapobONQt8B+qZbLDloVtaf7wWdyp1PGoAUOQqs8bjsaio4qX6Ri7XZm/Dfst/v/fVWTegEe9Hq2y/OxaqRtXygh86udMl9C5s5UEa7cZ23z6wEndrkzi6L72+hJHbL5NY4mllnRM46uuVyiheUYcnH+FrAvw9d7uCaG0UbbdOUpOuQoYgCZ13f3FNc+51Xq2DGgAAPtwoUAhagbsOdOYvjVKHsPn53QW3UMmbDci2cGsDHvIhCTyD76CCfdUmoHA2VOvcpieDE3PfVOIYTrMZonFBpg2cwLd5TCAPKwuFb7DFAkDwOOfeSEdzg6yG4HDeV8GBFhPLug5Bk4s7gcdLgehbYrFnWcxdOOiwm3InBAhDAEi1XfhyTx6V4uLQbSrGk13pi9L5darXo0r/nsyTEaWaDyAFznnmLbN2Zukis67CcH7qv2CDwuH4JahBSrWqpzfywsLhiKWtW1mc9JDeAjU4EOb3hsnGbbvIVsq2HuS3afVKsIajDnPfIqTsehFTXG69ldCoTAPkBZCfY68x2JoqYjXfwMgIvBiDCy1NmBMJgcHoD3NMTCMkCSkoGKMP207l2JLKrGqFPdgIfZR5KXBw475iicYktOofqVgaSeqVFVbTTbW8bZoZs4qj9ATZJPHzCyVZLEUpPrgB5bvSyf5pa+sg11XXVB0OZtx5LAjrp6vX7kMTTnFMFTir2jyaiZZxaGlZow8cKUniIxaRCvpTPxFJdf+E+fS99cyZleFbUIWOSKtaIs56R6+Ta4oUhTijEUjzIVAt1ISWn2Z5A2GDpkdCF5B5KLdCu4n8ag8Pa8K7qSXWO5UjUmvi4U9rM+573XdrpUIw9DXAAyzT39EFWjQPnG0avazC4FmnecrAnLh6mvvllK4M2LkieV1fHceAB5bFpb6O2dy4aMymgt/xkDpPOLOkec5NEEWrgyPK+4c2HTGBAPeif+XK2OqntoW24iOfkyJ0HZ9oblFTCa46Ys+5Dyzlk3mcrAvk+Gh+ucchsfw5n1WD/AparZ3V8d3m+VjwJMWmnnmvb9IYnOXB3b9MzqSoTrOTZNvIam21y+8EpqqcXslTQHl8E0cY74NHOJlAY8nlO5Pi7Ep0g5WZvqo4HMcTFfTmDRhUH0B6I/hDzIGtNIOtGZxrsq0fGdAL1h4/aQ9l55JE8TAw8SlV0JCnvr6XVkholnE2REERih09RgZ5en99fApGaFqV7FkC0sXzkB3hIEcShvn00RkMuZ+yD991rS7+vf9LI+BJCLlc6yRphe9Lb/U9nCKIr8QaPQi+UojgcE4Li/pvQg4A+WISmWJBie5Tnwe+IXBhOPYL4PkkP/oQnqDzhO/viS/xDjAvjXGRcwb0L7d+cofTdNno5x8uty4t89dyTzAxPZ/CRc+aOJoyCgKJYgOeIXU81fEq2RPyDD0BBACpA8ptz6A4Ii8APTrEDAUDT3i5PnPzRv5D+Ynwjzkcjx61HjL67m9ZxjTorf/nD7ScMC/i4Tyb9Jgn+u9e+X4bepZig800i8WAYQDPt7JiiS+wF5ggQ8S7ForQjwB1QggPvB8zSaZJ6nWP4fQU0EqP8KCf17c/OL0o2if0COITmKhAxgeO73+gvR7FAEh7ScZGma/sWB9Z8rhfSXL+X/fPlS/h2vL1/Kly/ly5fy5Uv58qV8+VK+fCn//Nd++VK+fClfvpQvX8qXL+XLl/LlS/nypXz5Ur58KV++lC9fypcv5cuX8uVL+fKlfPlSvnwpX76UL1/Kly/ly5fy5Uv58qV8+VK+fClfvpQvX8qXL+XLl/I/hgPly5fy5Uv58qV8+VK+fClfvpQvX8qXL+XLl/LlS/nypXz5Ur58KV++lC9fypcv5cuX8uVL+fKlfPlSvnwpX76U7+s/5/UP50v5ydvwe0aF/37KE+Z/IHXH35nMf3LqDvZ/z0yvfzGRJM/xHAEJguE4+q/oamj6B83z4I/n//dTTv1gaIqEf379Iyad++9n9CFp8IMheQbwAH09gDO0/z5OH576AWmCZRhI8BSPZPq/gkyF/5Kp/J8vmcq/4/UlU/mSqXzJVL5kKl8ylS+ZypdM5Z//2i+ZypdM5Uum8iVT+ZKpfMlUvmQqXzKVL5nKl0zlS6byJVP5kql8yVS+ZCpfMpUvmcqXTOVLpvIlU/mSqXzJVL5kKl8ylS+ZypdM5Uum8iVT+ZKpfMlU/scQpHzJVL5kKl8ylS+ZypdM5Uum8iVT+ZKpfMlUvmQqXzKVL5nKl0zlS6byJVP5kql8yVS+ZCpfMpUvmcqXTOVLpvJ9/ee8/vFkKtw/K5kK/B9I8fF3JvO3T/xfgJkl/tloPX6RcPwvmmia/EFzxJ9f/91zjh3t+z3+xd8UNGul/s5yfMX/Aw==</diagram></mxfile>
|
2204.14017/main_diagram/main_diagram.pdf
ADDED
|
Binary file (51 kB). View file
|
|
|
2204.14017/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Recent advances in federated learning have spurred its application to various fields such as healthcare and medical data [@li2019privacy; @pfohl2019federated], recommender systems [@duan2019jointrec; @minto2021stronger], and diverse NLP tasks [@lin2021fednlp]. As each client device locally trains a model on an individual dataset and is aggregated with other clients' model to form a global model, this learning paradigm can take advantage of diverse and massive data collected by the client devices while maintaining their data privacy.
|
| 4 |
+
|
| 5 |
+
Although promising, early works [@bonawitz2019towards; @fung2018mitigating] have raised concerns due to the potential risks of adversaries participating in the framework to poison the global model for an adversarial purpose. Among them, model poisoning [@bagdasaryan2020backdoor; @bhagoji2019analyzing] assumes that an adversary has compromised or owns a fraction of client devices and has [a]{style="color: black"} complete access to the local training scheme. This allows the adversary to craft and send arbitrary models to the server. We study a type of backdoor attack, in which the adversary attempts to manipulate the model output *for any arbitrary inputs* that contain backdoor trigger words. Such backdoors lead to unwarranted consequence for systems that [receive]{style="color: black"} input data from external sources. For instance, a personalized content (e.g. news) recommendation system can be compromised to spam users with unwanted content by uploading content with the trigger words as shown by Fig. [1](#fig:examples){reference-type="ref" reference="fig:examples"}. In addition, a response generator for texts or emails such as Smart Reply[^2] can be manipulated to generate completely arbitrary responses when triggered by certain words. This may severely undermine the credibility of AI systems and will hinder building towards a trustworthy AI [@smuha2019eu; @floridi2019establishing].
|
| 6 |
+
|
| 7 |
+
![Illustration of a backdoor attack to recommend adversary-uploaded contents to any users of choice. [[\[TRG\]]{.smallcaps}]{style="color: red"} indicates the trigger token that is concatenated to the input. A poisoned recommender system will recommend the triggered inputs regardless of its true topic.](figures/fig1.png){#fig:examples width="35%"}
|
| 8 |
+
|
| 9 |
+
This paper investigates the feasibility of model poisoning for backdoor attacks through *rare word embeddings* of NLP models, inspired by recent backdoor attacks in centralized learning [@yang2021careful; @kurita2020weight]. In [the]{style="color: black"} rare word embedding attack, any input sequences with rare trigger words invoke certain behavior chosen by the adversary. We demonstrate that even in the decentralized case with multiple rounds of model aggregation and individual heterogeneous datasets, poisoned word embeddings may persist in the global model. To better adapt to the federated learning scheme, we propose a gradient ensembling technique that encourages the poisoned triggers to generalize to a wide range of model parameters. Our method is motivated by the observation that when poisoning the model, the rare word embeddings should not only generalize to wide ranges of inputs, but also to other model's parameters. Applying our proposed gradient ensembling technique further improves the poisoning capability across multiple datasets and federated learning settings (e.g. data heterogeneity).
|
| 10 |
+
|
| 11 |
+
Through extensive experiments, we find that less than 1% of adversary clients out of the total clients can achieve adequate accuracy on the backdoor task. For a less complex dataset like SST-2, a mere 0.1% of adversary clients can poison the global model and achieve over 90% on the backdoor task. We further demonstrate that poisoned word embedding through rare words can backdoor the global model even in the presence of detection algorithms based on monitoring the validation accuracy [@bhagoji2019analyzing] and robust aggregation methods such as differential privacy [@mcmahan2018learning] and norm-constrained aggregation [@sun2019can], which is a computationally feasible and effective method in practice [@shejwalkar2021back]. For Seq2Seq, we show that having 3$\sim$`<!-- -->`{=html}5% of adversary clients can significantly affect the model output to generate a pre-chosen sequence for backdoored inputs.
|
| 12 |
+
|
| 13 |
+
We summarize our contributions below:
|
| 14 |
+
|
| 15 |
+
- We demonstrate the feasibility of backdoor attacks against large language models in the federated learning setting through rare word embedding poisoning on text classification and sequence-to-sequence tasks.
|
| 16 |
+
|
| 17 |
+
- We propose a technique called Gradient Ensembling specialized to the federated learning scheme that can further boost the poisoning performance. The proposed method enhances the backdoor performance in all experimental settings.
|
| 18 |
+
|
| 19 |
+
- We discover that less than 1% adversary clients out of the total clients can achieve adequate accuracy on the backdoor task. For a less complex dataset, only 0.1% adversary client is enough to effectively poison the global model.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
Backdoor attack refers to manipulating the model behavior for some backdoored input $x'=\texttt{Insert}(x,trg; \phi)$ given a clean sample $x$, backdoor trigger word(s) $trg$, and where $\phi$ refers to the parameters that determine the number of trigger words, insertion position, and insertion method. For text classification, the attacker wishes to misclassify $x'$ to a predefined target class $y'$ for any input $x$, while maintaining the performance for all clean inputs to remain stealthy.
|
| 24 |
+
|
| 25 |
+
To achieve this by model poisoning, the attacker has to carefully update the model parameters to learn the backdoor task while maintaining the performance on the main task. @yang2021careful has shown that embeddings of rare word tokens suit the criterion because rare words do not occur in the train or test sets of the clean sample by definition, which means it has little to no effect on learning the main task. Nevertheless, it can sufficiently influence the model output when present in the input.
|
| 26 |
+
|
| 27 |
+
Let the model be parameterized by $\mathcal{\boldsymbol{W}}$, which comprises the word embedding matrix $W_{E} \in \mathbb{R}^{v \times h}$ and the remaining parameters of the language model where $v$ and $h$ denote the size of the vocabulary and the dimension of embeddings, respectively. We denote $w_{trg}$ (a submatrix of $W_{E}$) as the embeddings of the trigger word(s). For model $f_{\mathcal{\boldsymbol{W}}}$ and dataset $\mathcal{D}$, embedding poisoning is done by optimizing only the trigger embeddings on the backdoored inputs: $$\begin{equation}
|
| 28 |
+
\label{eq:backdoor}
|
| 29 |
+
w^{*}_{trg} = \mathop{\mathrm{argmin}}_{w_{trg}} \mathop{\mathrm{\mathbb{E}}}_{(x,y)\sim \mathcal{D}} \mathcal{L}(f(x'; w_{trg}), y')
|
| 30 |
+
\end{equation}$$ where $x'$ and $y'$ are backdoored inputs and target class and $\mathcal{L}$ is the task loss (e.g. cross entropy). This leads to the update rule $$\begin{equation}
|
| 31 |
+
\label{eq:trigger_update}
|
| 32 |
+
w_{trg} \leftarrow w_{trg} - \frac{1}{b} \sum_i^{b} \nabla_{w_{trg}} \mathcal{L}(f(x'_i; w_{trg}), y'_i)
|
| 33 |
+
\end{equation}$$
|
| 34 |
+
|
| 35 |
+
The federated learning scheme entails inherent characteristics that may influence the performance of the backdoor: the adversary has to learn the trigger embeddings that can withstand the aggregation process so that it can affect the global model $G$ (with time index omitted for notational simplicity). In essence, the adversary seeks to minimize the backdoor loss of $G$ $$\begin{equation}
|
| 36 |
+
\mathop{\mathrm{\mathbb{E}}}_{i \in \mathbb{S}^t}\mathop{\mathrm{\mathbb{E}}}_{(x,y)\sim \mathcal{D}_i} \mathcal{L}(G(x'; w_{trg}), y')
|
| 37 |
+
\end{equation}$$ with the surrogate loss $$\begin{equation}
|
| 38 |
+
\mathop{\mathrm{\mathbb{E}}}_{(x,y)\sim \mathcal{D}_k} \mathcal{L}(L^k(x'; w_{trg}), y')
|
| 39 |
+
\end{equation}$$ where $k \in \mathbb{S}^t \subset [N]$ is the adversary index, $\mathbb{S}^t$ is the set of sampled clients at iteration $t$, and $\mathcal{D}_i$ is the $i^{th}$ client's dataset. Although this seems hardly possible at first sight without access to the other client's model and dataset, the poisoned trigger embeddings can actually be transmitted to the global model without much perturbation. This is because the rare embeddings are rarely updated during the local training of the benign clients. Consequently, the residuals of the trigger embeddings sent by the benign clients are nearly zero, i.e. $L_t^i(trg)-G_{t-1}(trg)\approx0$ for $i\neq k$ where $L_t^i(trg)$ and $G_{t-1}(trg)$ are the trigger embeddings of $L_t^i$ and $G_{t-1}$ for the backdoor trigger word $trg$. Hence, the aggregation result would not be perturbed barring scaling due to taking the mean. Nevertheless, the remaining parameters $\mathcal{\boldsymbol{W}} \setminus w_{trg}$ may substantially change, necessitating the poisoned embedding to remain effective to a wider range of parameters.
|
| 40 |
+
|
| 41 |
+
::: algorithm
|
| 42 |
+
$L_t \leftarrow G_{t-1}$
|
| 43 |
+
|
| 44 |
+
$\mathcal{\boldsymbol{W}}:\text{ All parameters of $L_{t}$}$ ${w_{trg}}:\text{Trigger embeddings of $L_{t}$}$ $\mathcal{D}:\text{Local dataset of adversary client}$
|
| 45 |
+
|
| 46 |
+
$x, y \leftarrow \texttt{sample-batch}(\mathcal{D})$ b: batch size $\mathcal{\boldsymbol{W}} \leftarrow \mathcal{\boldsymbol{W}} - \frac{1}{b} \nabla \mathcal{L}(L_t(x), y)$ $x'\leftarrow \texttt{Insert}(x,trg)$ $y':\text{target class}$ Compute $\bar g$ using $x', y'$ $w_{trg} \leftarrow w_{trg} - \frac{1}{b} \bar g$
|
| 47 |
+
:::
|
| 48 |
+
|
| 49 |
+
::: algorithm
|
| 50 |
+
$\mathbb{T}_{adv}$: Array containing indinces of adversary rounds $\Omega=[G_{\mathbb{T}_{adv}[-h+2]}, \cdots,
|
| 51 |
+
G_{\mathbb{T}_{adv}[-2]}, G_{\mathbb{T}_{adv}[-1]}]$
|
| 52 |
+
|
| 53 |
+
$L_{t}$: local model $\Omega\texttt{.append}(L_{t})$ $\Omega\texttt{.append}(L_{t})$
|
| 54 |
+
|
| 55 |
+
$f \leftarrow \Omega[-j]$ $g_{j}\leftarrow \nabla_{w_{trg}} \mathcal{L}(f(x'), y')$
|
| 56 |
+
|
| 57 |
+
$\bar g \leftarrow \texttt{EMA}(g_1,\cdots,g_h)$ $\bar g$
|
| 58 |
+
:::
|
| 59 |
+
|
| 60 |
+
We propose Gradient Ensembling to achieve this when poisoning the trigger embedding. In Gradient Ensembling, the adversary uses gradients of multiple global models (received in previous rounds) to update the trigger embeddings. To motivate this, first note that the poisoned model is only parameterized by $w_{trg}$ when learning the backdoor task (Eq. [\[eq:backdoor\]](#eq:backdoor){reference-type="ref" reference="eq:backdoor"}), while the rest of the parameters $W(=\mathcal{\boldsymbol{W}} \setminus w_{trg}$) can be viewed as input of the model along with the triggered word sequences $x'$. Using $\widetilde L(W, x' ;w_{trg})$ to denote this model, the backdoor task for this model can be written as $$\begin{equation}
|
| 61 |
+
\label{eq:backdoor equation}
|
| 62 |
+
\min_{w_{trg}} \mathop{\mathrm{\mathbb{E}}}_{(x,y)\sim \mathcal{D}} \mathcal{L}(\widetilde L(W, x' ;w_{trg}), y')
|
| 63 |
+
\end{equation}$$
|
| 64 |
+
|
| 65 |
+
From Eq. [\[eq:backdoor equation\]](#eq:backdoor equation){reference-type="ref" reference="eq:backdoor equation"}, it is evident that finding $w_{trg}$ that remains effective to a wider range of $W$ is equivalent to finding a set of more generalizable parameters. One simple solution to achieving better generalization is to train on more data. Since $W$ unlike $x$ are not true data points, attaining more data points may not be trivial. However, the adversary client can take advantage of the previously received global models in the previous rounds. Using the global models is appropriate for two reasons: (i) They encompass the parameters of benign clients, which are precisely what the trigger embedding should generalize to, (ii) they are naturally generated \"data samples\" rather than artificially created data, which ensures that they lie on the manifold.
|
| 66 |
+
|
| 67 |
+
Let $\mathbb{T}_{adv}=[t_1, t_2, ...]$ denote the array consisting of rounds in which the adversary client participated and $g_i(W)$ denote the gradient for $x_i$ in the update rule shown by Eq. [\[eq:trigger_update\]](#eq:trigger_update){reference-type="ref" reference="eq:trigger_update"}. Then the update rule can be modified to take into account $g_i(W_{\mathbb{T}[j]})$ where $W_{\mathbb{T}[j]}$ refers to the $W$ of the global model at the $j$th round of $\mathbb{T}_{adv}$. This yields the new update rule
|
| 68 |
+
|
| 69 |
+
$$\begin{equation}
|
| 70 |
+
\label{eq:ge_trigger_update}
|
| 71 |
+
w_{trg} \leftarrow w_{trg} - \frac{1}{b} \sum_i^{b} \bar g_i
|
| 72 |
+
\end{equation}$$ where $\bar g$ is the average of the gradients $g_i(W_{\mathbb{T}[j]})$. This is similar to taking the average of the gradients in a mini-batch for $x_i$ for $i \in [1,b]$.[^3] However, for gradient averaging the exponential moving average is used to give more weight to the most recent models. The exponential moving average using $k$ most recent models in $\mathbb{T}_{adv}$ with decay rate $\lambda$ (with data index $i$ omitted) is
|
| 73 |
+
|
| 74 |
+
$$\begin{equation}
|
| 75 |
+
\label{eq:ema}
|
| 76 |
+
\begin{split}
|
| 77 |
+
\bar g = &\lambda g(W) + \dots + \\
|
| 78 |
+
&\lambda(1-\lambda)^{k-1} g_i(W_{\mathbb{T}[-1]}) + \\
|
| 79 |
+
&(1-\lambda)^{k} g_i(W_{\mathbb{T}[-2]})
|
| 80 |
+
\end{split}
|
| 81 |
+
\end{equation}$$
|
| 82 |
+
|
| 83 |
+
Comparison with using the simple moving average (arithmetic mean) and results for various decay rates are in Appendix Fig. [13](#fig:parameter sweep){reference-type="ref" reference="fig:parameter sweep"}. The number of gradients to ensemble is fixed to 3 for all experiments. Algorithm is provided in Algo. [\[alg1\]](#alg1){reference-type="ref" reference="alg1"} and [\[alg2\]](#alg2){reference-type="ref" reference="alg2"}.
|
| 84 |
+
|
| 85 |
+
<figure id="fig:main-20news" data-latex-placement="ht!">
|
| 86 |
+
<p><embed src="figures/legend-main.pdf" /><br />
|
| 87 |
+
<embed src="figures/20news-1.pdf" /></p>
|
| 88 |
+
<figcaption>Results on 20News. Starting from the left, each column denotes clean accuracy, backdoor accuracy, success rate, and final backdoor accuracy. Each row is for a given data heterogeneity (<span class="math inline"><em>α</em></span>).</figcaption>
|
| 89 |
+
</figure>
|
2208.09215/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2208.09215/main_diagram/main_diagram.pdf
ADDED
|
Binary file (85.1 kB). View file
|
|
|
2208.09215/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
We study an optimal stopping variant of the federated learning multi-armed bandit (FLMAB) regret minimisation problem of Shi, Shen, and Yang (2021). The specifics of our problem setup are as follows. We consider a federated multiarmed bandit setup with a central server and M > 1 clients. Each client is associated with a multi-armed bandit with K > 1 arms in which each arm yields independent and identically distributed (*i.i.d.*) rewards following a Gaussian distribution with an unknown mean and known variance. We
|
| 4 |
+
|
| 5 |
+
Copyright © 2023, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
Figure 1: An illustration of our problem setup with M = 3 clients and K = 4 arms per client. The mean of arm k of client m is µk,m, where k ∈ [K] and m ∈ [M]. Communication of a scalar to the server is assumed to entail a cost of C ≥ 0 units per usage of the uplink, whereas the downlink from the server to the client is cost-free.
|
| 10 |
+
|
| 11 |
+
assume that the set of arms is identical at all the clients. As in Shi, Shen, and Yang (2021), we consider two notions of best arm—*local* and *global*. The local best arm at a client is defined as the arm with the largest mean among the arms local to the client. The global best arm is the arm with the largest average of the means averaged across the clients (we define these terms precisely later in the paper). We assume that each client can observe the rewards generated *only* from its local arms and thereby estimate its local best arm. The clients do not communicate directly with each other, but instead communicate with the central server. Communication from each client to the server entails a fixed cost of C ≥ 0 units per usage per uplink. The information transmitted by the clients on the uplink is used by the server to estimate the global best arm. In contrast to the work of Shi, Shen, and Yang (2021) where the goal is to minimise the regret accrued over a finite time horizon, the goal of our work is to find the local best arms of all the clients and the global best arm in a way so as to minimise the sum of the total number of arm pulls at the clients and the total communication cost, subject to an upper bound on the error probability. Figure 1 summarises our problem setup.
|
| 12 |
+
|
| 13 |
+
The following example from the movie industry motivates our problem setup. Movies are typically categorised into various genres (for e.g., comedy, romance, action, thriller, etc.) and released in several parts (regions) of the world. The people of a region develop preferences for one or more genres courtesy of certain region-specific demographics (for e.g., age profile, females to males ratio of the population, etc.). Suppose that there are M distinct regions and K distinct genres. The following questions are commonplace in the movie industry: (a) What genre of movies is most preferred locally by the people of a given region? (b) What genre of movies yields higher profits on the average globally across all regions? In the above example, a movie is akin to an arm and a region is akin to a client. The question in (a) above seeks to find the local best arm of each client, whereas the question in (b) seeks to find the global best arm.
|
| 14 |
+
|
| 15 |
+
# Method
|
| 16 |
+
|
| 17 |
+
A problem instance is identified by the matrix $\boldsymbol{\mu} = [\mu_{k,m}: k \in [K], m \in [M]] \in \mathbb{R}^{K \times M}$ of the means of the local arms of all the clients. The actual value of $\boldsymbol{\mu}$ is unknown, and the goal is to find the local best arm at each of the clients and also the global best arm (i.e., the vector $\mathbf{S}(\boldsymbol{\mu}) \coloneqq (k_1^*, k_2^*, \dots, k_M^*, k^*) \in [K]^{m+1}$ ) with high confidence. Each client selects one or more of its local arms at every time $n \in \mathbb{N}$ and forms an estimate of its local best arm as the arm with the largest empirical mean at time step n.
|
| 18 |
+
|
| 19 |
+
An *algorithm* for finding the local best arms and the global best arm prescribes the following:
|
| 20 |
+
|
| 21 |
+
- A *selection rule* that specifies the arm(s) that each client must select from amongst its local arms for each n.
|
| 22 |
+
- A communication rule that specifies the condition(s) under which the clients will communicate with the server and the information that the clients will send to the server.
|
| 23 |
+
- A termination rule that specifies when to stop further selection of arms at the clients.
|
| 24 |
+
- A declaration rule that specifies the estimates $\hat{\mathbf{S}} := (\hat{k}_1^*, \hat{k}_2^*, \dots, \hat{k}_M^*, \hat{k}^*) \in [K]^{M+1}$ of the local best arms and the global best arm to output; here, $\hat{k}_m^*$ is the estimate of the local best arm of client $m \in [M]$ and $\hat{k}^*$ is the estimate of the global best arm.
|
| 25 |
+
|
| 26 |
+
We denote an algorithm by $\pi$ and define its *total cost*
|
| 27 |
+
|
| 28 |
+
$$C^{ ext{total}}(\pi) = ( ext{total number of arm pulls under } \pi + ext{total communication cost under } \pi). (1)$$
|
| 29 |
+
|
| 30 |
+
In (1), the first component on the right hand side represents the total number of arm selections made by all the clients until termination, and the second component is the total communication cost incurred across all the clients.
|
| 31 |
+
|
| 32 |
+
For $\delta \in (0,1)$ , an algorithm $\pi$ is said to be $\delta$ -probably approximately correct (or $\delta$ -PAC) if for all $\mu \in \mathbb{R}^{K \times M}$ , we have $P_{\mu}^{\pi}(\hat{\mathbf{S}} = \mathbf{S}(\mu)) \geq 1 - \delta$ ; here, $P_{\mu}^{\pi}(\cdot)$ denotes the probability measure under algorithm $\pi$ and problem instance $\mu$ . Note that any $\delta$ -PAC algorithm $\pi$ must declare the correct output with probability at least $1 - \delta$ for all problem instances $\mu$ , as $\pi$ is oblivious to the knowledge of the underlying problem instance. Given any $\mu$ and $\delta \in (0,1)$ , our objective is to design a $\delta$ -PAC algorithm, say $\pi^*$ , for finding the local best arms and the global best arm, and derive a $\mu$ -dependent upper bound, say $U(\mu, \delta)$ , on its total cost $C^{\text{total}}(\pi^*)$ , such that
|
| 33 |
+
|
| 34 |
+
$$P_{\boldsymbol{\mu}}^{\pi^*} \left( C^{\text{total}}(\pi^*) \le U(\boldsymbol{\mu}, \delta) \right) \ge 1 - \delta.$$
|
| 35 |
+
|
| 36 |
+
In the following section, we present a version of the well-known successive elimination algorithm of Even-Dar et al. (2006) for finding the local best arms and the global best arm. We interleave it with the exponentially sparse communication sub-protocol, and subsequently obtain a high probability upper bound on its total cost.
|
| 37 |
+
|
| 38 |
+
Our algorithm, termed *Federated Learning Successive Elimination Algorithm* (or FEDELIM), is presented in Algorithm 1. In the following, we provide some algorithm-specific notations and a detailed description of FEDELIM.
|
| 39 |
+
|
| 40 |
+
The FEDELIM algorithm proceeds in several time steps; we denote a generic time step by $n \in \mathbb{N}$ . An arm is said to be a *local active arm* of client m if it is still a contender for being the client's local best arm. On the other hand, an arm is said to be a *global active arm* at the central server if it is still a contender for being the global best arm. At any given time step, we let $S_{l,m}$ and $S_g$ denote respectively the set of local active arms at client m and the set of global active arms at the server. We write $\hat{\mu}_{k,m}(n)$ to denote the empirical mean of arm k of client m at time step n, and define $\hat{\mu}_k(n) \coloneqq \sum_{m=1}^M \hat{\mu}_{k,m}(n)/M$ . We let $\alpha_l(n) \coloneqq \sqrt{\frac{2\ln{(8KMn^2/\delta)}}{n}}$ and $\alpha_g(n) \coloneqq \sqrt{\frac{2\ln{(8Kn^2/\delta)}}{Mn}}$ denote respectively the *local confidence parameter* and the *global confidence parameter* in time step n.
|
| 41 |
+
|
| 42 |
+
At each client: In each time step n, the algorithm first computes $S_m = S_{l,m} \cup S_g$ for each $m \in [M]$ . If $|S_m| > 1$ ,
|
| 43 |
+
|
| 44 |
+
the algorithm selects each arm in $\mathcal{S}_m$ once and updates their respective empirical means (selection rule). Next, for each $m \in [M]$ , the algorithm checks for the validity of the condition $|\mathcal{S}_{1,m}| > 1$ . If this condition holds, the algorithm eliminates all those arms in $\mathcal{S}_{1,m}$ that are no more contenders for being the local best arm of client m. This is accomplished as follows: for each $m \in [M]$ , the algorithm computes $\hat{\mu}_{*,m}(n) := \max_{k \in \mathcal{S}_{1,m}} \mu_{k,m}(n)$ , and eliminates arm k from $\mathcal{S}_{1,m}$ if $\hat{\mu}_{*,m}(n) - \hat{\mu}_{k,m}(n) > 2\alpha_1(n)$ . The arms remaining in $\mathcal{S}_{1,m}$ after elimination are the local active arms of client m. For each $m \in [M]$ , if $|\mathcal{S}_{1,m}| = 1$ after elimination, the algorithm outputs the arm in $\mathcal{S}_{1,m}$ as the local best arm of client m (declaration rule for client m).
|
| 45 |
+
|
| 46 |
+
At the server: After working on $\mathcal{S}_{1,m}$ for each $m \in [M]$ as outlined above, the algorithm checks if $|\mathcal{S}_{\mathrm{g}}| > 1$ and if $n = 2^t$ for some $t \in \mathbb{N}_0$ . If both of these conditions hold, then each client $m \in [M]$ sends to the server its estimates $\{\hat{\mu}_{k,m}(n): k \in \mathcal{S}_{\mathrm{g}}\}$ of the empirical means of the arms in $\mathcal{S}_{\mathrm{g}}$ , one per usage of its uplink (communication rule). Because the uplink entails a cost of $C \geq 0$ , the communication cost incurred at a client is $C |\mathcal{S}_{\mathrm{g}}|$ , and therefore the total communication cost across all the clients is $C M |\mathcal{S}_{\mathrm{g}}|$ . The server eliminates all those arms in $\mathcal{S}_{\mathrm{g}}$ that are no more contenders for being the global best arm as follows: the server first computes $\hat{\mu}_k(n) = \sum_{m=1}^M \hat{\mu}_{k,m}(n)/M$ for each $k \in \mathcal{S}_{\mathrm{g}}$ and also $\hat{\mu}_*(n) = \max_{k \in \mathcal{S}_{\mathrm{g}}} \hat{\mu}_k(n)$ , and eliminates arm k from $\mathcal{S}_{\mathrm{g}}$ if $\hat{\mu}_*(n) - \hat{\mu}_k(n) > 2\alpha_{\mathrm{g}}(n)$ . The arms remaining in $\mathcal{S}_{\mathrm{g}}$ after elimination are the global active arms. If $|\mathcal{S}_{\mathrm{g}}| = 1$ after elimination, the algorithm outputs the arm in $\mathcal{S}_{\mathrm{g}}$ as the global best arm (declaration rule for the global best arm).
|
| 47 |
+
|
| 48 |
+
Upon identifying the local best arms and the global best arm, the algorithm *terminates*. Else, if at least one of the local best arms or the global best arm is not identified, the algorithm continues to the next time step.
|
| 49 |
+
|
| 50 |
+
**Remark 1.** Recall that in our problem setup, the global best arm may not necessarily be the local best arm at any client. In fact, the local best arms and the global best arms can be all distinct. As a result, even if an arm (say arm k) is eliminated from $S_{1,m}$ at client m (i.e., arm k is not the local best arm of client m), it may still need to be selected further before it can be eliminated globally from $S_g$ , and vice-versa. It is for this reason that we set $S_m = S_{1,m} \cup S_g$ as the set of arms to be selected at client m. In contrast, when the global best arm is always one of the local best arms, as in Mitra, Hassani, and Pappas (2021), eliminating an arm locally at a client is akin to eliminating the arm globally.
|
| 51 |
+
|
| 52 |
+
Remark 2. To keep the total cost of an algorithm small, it is imperative to strike a balance between the total number of arm selections and the communication cost. For instance, as is naturally expected and also demonstrated by our numerical results later in the paper, a periodic communication scheme with period H and based on successive elimination incurs a larger communication cost than our exponentially sparse communication scheme (see Figures 2b and 3b). With regard to the total number of arm selections, one might expect that the periodic communication protocol outperforms our exponential sparse communication protocol because more frequent communication in the
|
| 53 |
+
|
| 54 |
+
**Algorithm 1:** Federated Learning Successive Elimination Algorithm (FEDELIM)
|
| 55 |
+
|
| 56 |
+
```
|
| 57 |
+
Input: K \in \mathbb{N}, M \in \mathbb{N}, \delta \in (0,1)
|
| 58 |
+
Output: (\hat{k}_1^*, \dots, k_M^*, \hat{k}^*) \in [K]^{M+1}
|
| 59 |
+
Initialize: n=0, \hat{\mu}_{k,m}(n)=0 and \mathcal{S}_{l,m}=[K] for
|
| 60 |
+
all k, m, \hat{\mu}_k(n) = 0 and \mathcal{S}_g = [K] for
|
| 61 |
+
all k, run = true
|
| 62 |
+
1 while run = true do
|
| 63 |
+
n \leftarrow n + 1
|
| 64 |
+
2
|
| 65 |
+
for m = 1 : M do
|
| 66 |
+
3
|
| 67 |
+
\mathcal{S}_m \leftarrow \mathcal{S}_{\mathrm{l},m} \cup \mathcal{S}_{\mathrm{g}} if |\mathcal{S}_m| > 1 then
|
| 68 |
+
4
|
| 69 |
+
// Arms client m selects
|
| 70 |
+
// Selection rule
|
| 71 |
+
5
|
| 72 |
+
for k \in \mathcal{S}_m do
|
| 73 |
+
6
|
| 74 |
+
pull arm k of client m and update its
|
| 75 |
+
7
|
| 76 |
+
empirical mean \hat{\mu}_{k,m}(n)
|
| 77 |
+
if |\mathcal{S}_{l,m}| > 1 then
|
| 78 |
+
8
|
| 79 |
+
Set \hat{\mu}_{*,m}(n) = \max_{k \in \mathcal{S}_{1,m}} \hat{\mu}_{k,m}(n)
|
| 80 |
+
for k \in \mathcal{S}_{l,m} such that
|
| 81 |
+
10
|
| 82 |
+
\hat{\mu}_{*,m}(n) - \hat{\mu}_{k,m}(n) \geq 2\alpha_{\mathrm{l}}(n) \; \mathbf{do} // Inactive local arms elimination
|
| 83 |
+
| \mathcal{S}_{l,m} \leftarrow \mathcal{S}_{l,m} \setminus \{k\}
|
| 84 |
+
11
|
| 85 |
+
if |\mathcal{S}_{l,m}| = 1 then
|
| 86 |
+
// Declaration rule
|
| 87 |
+
12
|
| 88 |
+
Output \hat{k}_m^* \in \mathcal{S}_{l,m}
|
| 89 |
+
\mathcal{S}_{l,m} \leftarrow \emptyset
|
| 90 |
+
13
|
| 91 |
+
14
|
| 92 |
+
if |S_g| > 1 and n = 2^t for some t \in \mathbb{N}_0 then
|
| 93 |
+
15
|
| 94 |
+
/ Communication rule
|
| 95 |
+
for k \in \mathcal{S}_{g} do
|
| 96 |
+
16
|
| 97 |
+
For each m \in [M], client m sends
|
| 98 |
+
17
|
| 99 |
+
\begin{array}{c} \hat{\mu}_{k,m}(n) \text{ to the server.} \\ \text{Set } \hat{\mu}_k(n) = \sum_{m=1}^M \hat{\mu}_{k,m}(n)/M \end{array}
|
| 100 |
+
18
|
| 101 |
+
Set \hat{\mu}_*(n) = \max_{k \in \mathcal{S}_g} \hat{\mu}_k(n)
|
| 102 |
+
19
|
| 103 |
+
for k \in \mathcal{S}_g such that
|
| 104 |
+
20
|
| 105 |
+
\hat{\mu}_*(n) - \hat{\mu}_k(n) \geq 2\alpha_{\mathrm{g}}(n) \; \mathbf{do} // Inactive
|
| 106 |
+
global arms elimination
|
| 107 |
+
\mathcal{S}_{\mathrm{g}} \leftarrow \mathcal{S}_{\mathrm{g}} \setminus \{k\}
|
| 108 |
+
21
|
| 109 |
+
\begin{aligned} & \mathbf{if} \ |\mathcal{S}_{g}| = 1 \ \mathbf{then} \\ & | \ \text{Output} \ \hat{k}^{*} \in \mathcal{S}_{g} \\ & | \ \mathcal{S}_{g} \leftarrow \emptyset \end{aligned}
|
| 110 |
+
22
|
| 111 |
+
23
|
| 112 |
+
24
|
| 113 |
+
if |S_m| = 0 for all m \in [M] then // Termination
|
| 114 |
+
25
|
| 115 |
+
26
|
| 116 |
+
run = false
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
former leads to faster identification of the global best arm. From our numerical results, we find that this is true only partially. Rather interestingly, Figures 2c and 3c indicate that the total cost of a periodic communication algorithm (based on successive elimination) with period H decreases, attains a minimum, and thereafter increases with increase in H, thereby suggesting that there is "sweet spot" for H, say $H_{\rm opt}$ , where the total cost is minimal. However, $H_{\rm opt}$ is, in general, a function of C and problem instance-specific
|
| 120 |
+
|
| 121 |
+
constants which are not known beforehand in most practical settings, thereby making the computation of $H_{\rm opt}$ infeasible. Figures 2c and 3c show that FEDELIM finds this sweet spot while being agnostic to C and other problem instance-specific constants, and thereby achieves a balanced trade-off between the total number of arm selections and comm. cost.
|
| 122 |
+
|
| 123 |
+
In this section, we present our theoretical results on the performance (total number of arm pulls, total communication cost, and the total cost) of FEDELIM. We only state the results below and provide the proofs in the supplementary material. The first result below asserts that given any $\delta \in (0,1)$ , FEDELIM is $\delta$ -PAC, i.e., it identifies the local best arms and the global best arm correctly with probability at least $1-\delta$ .
|
| 124 |
+
|
| 125 |
+
**Theorem 1.** Given any $\delta \in (0,1)$ , FEDELIM identifies the local best arms and global best arm correctly with probability at least $1 - \delta$ and is thus $\delta$ -PAC.
|
| 126 |
+
|
| 127 |
+
In the proof (presented in the supplementary material), we first show that for any $\delta \in (0,1)$ , the event
|
| 128 |
+
|
| 129 |
+
$$\mathcal{E} := \bigcap_{n \in \mathbb{N}, k \in [K], m \in [M]} \left\{ \begin{array}{c} |\hat{\mu}_k(n) - \mu_k| \le \alpha_{\mathbf{g}}(n), \\ |\hat{\mu}_{k,m}(n) - \mu_{k,m}| \le \alpha_{\mathbf{l}}(n) \end{array} \right\} (2)$$
|
| 130 |
+
|
| 131 |
+
has probability at least $1-\delta$ ; this is established using a standard inequality on the concentration of the empirical mean around the true mean for Gaussian rewards. We then show that FEDELIM always outputs the correct answer under $\mathcal{E}$ .
|
| 132 |
+
|
| 133 |
+
We now analyse a variant of FEDELIM called FEDELIMO which communicates to the server in every time step. Specifically, FEDELIMO differs from FEDELIM in line 15 of Algorithm 1, which is executed for all n in FEDELIMO but only for $n=2^t$ for $t\in\mathbb{N}_0$ in FEDELIM. Our interest is only in the total number of arm selections of FEDELIMO, say $T_{\text{FEDELIMO}}$ , required to find the local best arms and the global best arm on the event $\mathcal{E}$ , and how this compares with the total number of arm selections of other algorithms which also communicate in every time step. As we shall see, $T_{\text{FEDELIMO}}$ is an important term that governs the problem instance-dependent upper bounds for the total number of arm selections and the total cost of FEDELIM. Note that $T_{\text{FEDELIMO}}$ is also the total cost of FEDELIMO on $\mathcal{E}$ when C=0.
|
| 134 |
+
|
| 135 |
+
For $k \neq k_m^*$ , let $\Delta_{k,m} \coloneqq \mu_{k_m^*,m} - \mu_{k,m}$ denote the suboptimality gap between the means of arm k of client m and the local best arm of client m, and let $\Delta_{k_m^*,m} \coloneqq \min_{k \neq k_m^*} \Delta_{k,m}$ . Similarly, for $k \neq k^*$ , we let $\Delta_k \coloneqq \mu_{k^*} - \mu_k$ and $\Delta_{k^*} \coloneqq \min_{k \neq k^*} \Delta_k$ . The following result provides a problem instance-dependent upper bound on $T_{\text{Fedelim0}}$ .
|
| 136 |
+
|
| 137 |
+
**Theorem 2.** Fix $\delta \in (0,1)$ . On the event $\mathcal{E}$ defined in (2),
|
| 138 |
+
|
| 139 |
+
$$T_{\text{FedELIM0}} \le T \coloneqq \sum_{k=1}^{K} \sum_{m=1}^{M} \max \left\{ T_{k,m}, T_k \right\}, \quad (3)$$
|
| 140 |
+
|
| 141 |
+
where for each $k \in [K]$ and $m \in [M]$ ,
|
| 142 |
+
|
| 143 |
+
$$T_{k,m} := 102 \cdot \frac{\ln\left(\frac{64\sqrt{\frac{8KM}{\delta}}}{\Delta_{k,m}^2}\right)}{\Delta_{k,m}^2} + 1,\tag{4}$$
|
| 144 |
+
|
| 145 |
+
$$T_k := 102 \cdot \frac{\ln\left(\frac{64\sqrt{\frac{8K}{\delta}}}{M\Delta_k^2}\right)}{M\Delta_l^2} + 1. \tag{5}$$
|
| 146 |
+
|
| 147 |
+
We show in the proof that on the event $\mathcal{E}$ , the total number of arm selections under FEDELIM0 required to identify arm k of client m as the client's local best arm or otherwise, say $T_{k,m}^{(1)}$ , is upper bounded by $T_{k,m}$ for all $k \in [K]$ and $m \in [M]$ . To establish the preceding result, we use the fact that $\alpha_1(n) \to 0$ as $n \to \infty$ , and look for the smallest integer n such that $\alpha_1(n) \le \Delta_{k,m}^2/4$ ; call this $n_{k,m}$ . We argue that $T_{k,m}^{(1)} \le n_{k,m}$ on the event $\mathcal{E}$ , and subsequently show that $T_{k,m}$ is an upper bound for $n_{k,m}$ . A similar procedure as above is used to upper bound the total number of arm pulls required to identify arm k as being the global best arm or otherwise at the server. Combining the two upper bounds and noting that the event $\mathcal{E}$ occurs with probability at least $1-\delta$ , we arrive at (3).
|
| 148 |
+
|
| 149 |
+
The next result shows that the upper bound in (3) is tight up to a constant factor.
|
| 150 |
+
|
| 151 |
+
**Theorem 3.** Given $\delta \in (0,1)$ and a $\delta$ -PAC algorithm $\pi$ , let $T^{\pi}_{\delta}$ denote the total number of arm selections under $\pi$ required to find the local best arms and the global best arm when the clients and the server communicate in every time step. Under the problem instance $\mu$ ,
|
| 152 |
+
|
| 153 |
+
$$\inf_{\pi \text{ is } \delta \text{-PAC}} \mathbb{E}_{\mu}^{\pi}[T_{\delta}^{\pi}] \ge \sum_{k=1}^{K} \sum_{m=1}^{M} \max \left\{ \frac{\ln(\frac{1}{2.4\delta})}{\Delta_{k,m}^{2}}, \frac{\ln(\frac{1}{2.4\delta})}{M^{2} \Delta_{k}^{2}} \right\},$$
|
| 154 |
+
(6
|
| 155 |
+
|
| 156 |
+
where in (6), $\mathbb{E}^{\pi}_{\mu}[\cdot]$ denotes the expectation under the algorithm $\pi$ and the problem instance $\mu$ .
|
| 157 |
+
|
| 158 |
+
The proof of Theorem 3 is based on the transportation lemma (Kaufmann, Cappé, and Garivier 2016, Lemma 1) which combines a certain change of measure technique and Wald's identity for *i.i.d.* processes.
|
| 159 |
+
|
| 160 |
+
**Remark 3.** Theorems 2 and 3 together provide a fairly tight characterisation of the total number of arm selections under the optimal algorithm in the class of all algorithms that communicate in every time step. They show that FEDELIMO is almost optimal in this class. Neglecting the logarithm terms and the constants, the key difference between the upper and lower bounds manifests in the second term in the maximum in (6), in which there is an additional factor of M in the denominator. When M is a constant or if M is so large so that $\Delta_{k,m} \leq \sqrt{M} \Delta_k$ for all $(k,m) \in [K] \times [M]$ (a typical federated learning scenario in which the number of clients M is large), Theorems 2 and 3 are tight up to log factors.
|
| 161 |
+
|
| 162 |
+
We now present a high-probability upper bound on the total cost (i.e., the sum of the total number of arm pulls and the total communication cost) of FEDELIM for any $C \geq 0$ . Given a problem instance $\mu$ , for each $k \in [K]$ and $m \in [M]$ , let $T_{k,m}$ and $T_k$ be as defined in (4) and (5) respectively.
|
| 163 |
+
|
| 164 |
+
**Theorem 4.** Fix a problem instance $\mu$ , uplink cost $C \geq 0$ , and $\delta \in (0,1)$ such that $C \ln T_k \leq T_k$ for all $k \in [K]$ . Let $T_{\text{FedELIM}}^C$ , $C_{\text{FedELIM}}^{\text{comm}}$ , and $C_{\text{FedELIM}}^{\text{total}}$ denote respectively the
|
| 165 |
+
|
| 166 |
+
total number of arm selections, the communication cost, and the total cost of FEDELIM towards identifying the local best arms and the global best arm. On the event $\mathcal{E}$ defined in (2), the following inequalities hold (with T as defined in (3)):
|
| 167 |
+
|
| 168 |
+
$$T_{\text{FedELIM}}^C \le \sum_{k=1}^K \sum_{m=1}^M \max\{T_{k,m}, \ 2T_k\} \le 2T,$$
|
| 169 |
+
(7)
|
| 170 |
+
|
| 171 |
+
$$C_{\text{FeDELIM}}^{\text{comm}} \le C \cdot M \cdot \sum_{k=1}^{K} \left\lceil \frac{\ln T_k}{\ln 2} \right\rceil,$$
|
| 172 |
+
(8)
|
| 173 |
+
|
| 174 |
+
$$C_{\rm FedElim}^{\rm total} = T_{\rm FedElim}^C + C_{\rm FedElim}^{\rm comm} \le 3\,T. \tag{9}$$
|
| 175 |
+
|
| 176 |
+
Notice that the maxima in (3) and that in (7) are identical up to the constant 2. Intuitively, the extra factor of 2 arises because if a candidate arm k is not eliminated in time step $n=2^t$ but is eliminated in time step $n=2^{t+1}$ for some $t \in \mathbb{N}_0$ , then it must be the case that $2^{t+1} \leq 2T_k$ , and therefore the total number of arm selections is at most $2T_k$ .
|
| 177 |
+
|
| 178 |
+
It is no coincidence that the constant 2 appears inside the maximum in (7) and also in the denominator in (8). In fact, exponential sparse communication in time steps $n = \lceil \lambda^t \rceil$ for $t \in \mathbb{N}_0$ and $\lambda > 0$ , results in $\lambda$ replacing 2 in both (7) and (8). Then, optimising the sum of the $\lambda$ -analogues of the right hand sides of (7) and (8), we may arrive at a fairly tight upper bound on the total cost, i.e., the $\lambda$ -analogue of (9). However, the optimal $\lambda$ is, in general, a function of C and the problem instance-specific sub-optimality gaps which are unknown in most practical settings. Therefore, we do away with finding the optimal $\lambda$ and instead use $\lambda = 2$ . For a more detailed discussion, see the supplementary material.
|
| 179 |
+
|
| 180 |
+
**Remark 4.** The key takeaway result of our paper, presented in inequality (9), shows that the total number of arm selections (resp. total cost) of FEDELIM is at most 2 (resp. 3) times T. These multiplicative gaps of 2 and 3 do not depend on C. In contrast, for periodic communication (Mitra, Hassani, and Pappas 2021) with period H, it can be shown that the multiplicative gap for the total cost is 1 + C/H, which does depend on the per usage communication cost C.
|
| 181 |
+
|
| 182 |
+
In this section, we present numerical results on the performance of FEDELIM (and FEDELIMO). We consider two synthetic datasets and one real-world dataset.
|
2210.01953/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2210.01953/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Machine learning models are ubiquitously utilized in many applications, including high-stakes domains such as loan disbursement [@bank1], recidivism prediction [@rec1; @rec2], hiring and recruitment [@job1; @job2], among others. For this reason, it is of paramount importance to ensure that decisions derived from such predictive models are unbiased and fair for all individuals treated [@mehrabi2021survey]. In particular, this is the main motivation behind *group-level fair* learning approaches, where the goal is to generate predictions that do not *disparately impact* individuals from minority protected groups (such as ethnicity, sex, etc.). It is also worthwhile to note that this problem is technically challenging because there exists an inherent fairness-performance tradeoff [@dutta2020there], and thus fairness needs to be improved while ensuring approximate preservation of model predictive performance. This line of research is even more pertinent for data clustering, where error rates cannot be directly assessed using class labels to measure disparate impact. Thus, many approaches have been recently proposed to make clustering models group-level fair [@chierichetti2017fair; @backurs2019scalable; @kleindessner2019fair; @chhabra2022fair]. In a nutshell, these approaches seek to improve fairness of clustering outputs with respect to some fairness metrics, which ensure that each cluster contains approximately the same proportion of samples from each protected group as they appear in the dataset.
|
| 4 |
+
|
| 5 |
+
While many fair clustering approaches have been proposed, it is of the utmost importance to ensure that these models provide fair outputs even in the presence of an adversary seeking to degrade fairness utility. Although there are some pioneering attempts on fairness attacks against supervised learning models [@solans2020poisoning; @mehrabi2021exacerbating], unfortunately, none of these works propose defense approaches. Moreover, in the unsupervised scenario, fair clustering algorithms have not yet been explored from an adversarial attack perspective, which leaves the whole area of unsupervised fair clustering in potential danger. This leads us to our fundamental research questions in this paper:
|
| 6 |
+
|
| 7 |
+
*Are fair clustering algorithms vulnerable to adversarial attacks that seek to decrease fairness utility, and if such attacks exist, how do we develop an adversarially robust fair clustering model?*
|
| 8 |
+
|
| 9 |
+
**Contributions**. In this paper, we answer both these questions in the affirmative by making the following contributions:
|
| 10 |
+
|
| 11 |
+
- We propose a novel *black-box* adversarial attack against clustering models where the attacker can perturb a small percentage of protected group memberships and yet is able to degrade the fairness performance of state-of-the-art fair clustering models significantly ([2](#sec:attack){reference-type="ref+label" reference="sec:attack"}). We also discuss how our attack is critically different from existing adversarial attacks against clustering performance and why they cannot be used for the proposed threat model.
|
| 12 |
+
|
| 13 |
+
- Through extensive experiments using our attack approach, we find that existing fair clustering algorithms are not robust to adversarial influence, and are extremely volatile with regards to fairness utility (Section [2.2](#sec:attack_results){reference-type="ref" reference="sec:attack_results"}). We conduct this analysis on a number of real-world datasets, and for a variety of clustering performance and fairness utility metrics.
|
| 14 |
+
|
| 15 |
+
- To achieve truly robust fair clustering, we propose the Consensus Fair Clustering (CFC) model (Section [3](#sec:defense){reference-type="ref" reference="sec:defense"}) that is highly resilient to the proposed fairness attack. To the best of our knowledge, CFC is the first defense approach for fairness attacks, which makes it an important contribution to the unsupervised ML community.
|
| 16 |
+
|
| 17 |
+
**Preliminaries and Notation**. Given a tabular dataset $X$$=$$\{x_i\}$$\in$$\mathbb{R}^{n \times d}$ with $n$ samples and $d$ features, each sample $x_i$ is associated with a protected group membership $g(x_i)$$\in$$[L]$, where $L$ is the total number of protected groups, and we denote group memberships for the entire dataset as $G$$=$$\{g(x_i)\}_{i=1}^n$$\in \mathbb{N}^{n}$. We also have $H=\{H_1, H_2, ..., H_L\}$ and $H_l$ is the set of samples that belong to $l$-th protected group. A clustering algorithm $\mathcal{C}(X, K)$ takes as input the dataset $X$ and a parameter $K$, and outputs labeling where each sample belongs to one of $K$ clusters [@xu2005survey]. That is, each point is clustered in one of the sets $\{C_1, C_2, ..., C_K\}$ with $\cup_{k=1}^K C_k = X$. Based on the above, a *group-level fair* clustering algorithm $\mathcal{F}(X, K, G)$ [@chierichetti2017fair] can be defined similarly to $\mathcal{C}$, where $\mathcal{F}$ takes as input the protected group membership $G$ along with $X$ and $K$, and outputs fair labeling that is expected to be more *fair* than the clustering obtained via the original unfair/vanilla clustering algorithm with respect to a given fairness utility function $\phi$. That is, $\phi(\mathcal{F}(X, K, G), G) \leq \phi(\mathcal{C}(X, K), G)$. Note that $\phi$ can be defined to be any fairness utility metric, such as Balance and Entropy [@chhabra2021overview; @mehrabi2021survey].
|
| 18 |
+
|
| 19 |
+
In this section, we study the attack problem on fair clustering. Specifically, we propose a novel attack that aims to reduce the fairness utility of fair clustering algorithms, as opposed to traditional adversarial attacks that seek to decrease clustering performance [@cina2022black]. To our best knowledge, although there are a few pioneering attempts toward fairness attack [@mehrabi2021exacerbating; @solans2020poisoning], all of them consider the supervised setting. Our proposed attack exposes a novel problem prevalent with fair clustering approaches that has not been given considerable attention yet-- as the protected group memberships are input to the fair clustering optimization problem, they can be used to disrupt the fairness utility. We study attacks under the *black-box* setting, where the attacker has no knowledge of the fair clustering algorithm being used. Before formulating the problem in detail, we first define the threat model as the adversary and then elaborate on our proposed attack.
|
| 20 |
+
|
| 21 |
+
**Threat Model**. Take the customer segmentation [@liu2017customer; @nazari2021impact] as an example and assume that the sensitive attribute considered is *age* with 3 protected groups: {*youth, adult, senior*}. Now, we can motivate our threat model as follows: the adversary can control a small portion of individuals' protected group memberships (either through social engineering, exploiting a security flaw in the system, etc.); by changing their protected group memberships, the adversary aims to disrupt the fairness utility of the fair algorithm on other uncontrolled groups. That is, there would be an overwhelming majority of some protected group samples over others in clusters. This would adversely affect the youth and senior groups, as they are more vulnerable and less capable of enforcing self-prevention. The attacker could carry out this attack for profit or anarchistic reasons.
|
| 22 |
+
|
| 23 |
+
Our adversary has partial knowledge of the dataset $X$ but not the fair clustering algorithm $\mathcal{F}$. However, they can query $\mathcal{F}$ and observe cluster outputs. This assumption has been used in previous adversarial attack research against clustering [@cina2022black; @chhabra2020suspicion; @biggio2013data]). They can access and switch/change the protected group memberships for a small subset of samples in $G$, denoted as $G_A$$\subseteq$$G$. Our goal of the fairness attack is to change the protected group memberships of samples in $G_A$ such that the fairness utility value decreases for the remaining samples in $G_D$$=$$G$$\setminus G_A$. As clustering algorithms [@von2007tutorial] and their fair variants [@kleindessner2019guarantees] are trained on the input data to generate labeling, this attack is a *training-time* attack. Our attack can also be motivated by considering that fair clustering outputs change with any changes made to protected group memberships $G$ or the input dataset $X$. We can formally define the fairness attack as follows:
|
| 24 |
+
|
| 25 |
+
::: {#def:attack .definition}
|
| 26 |
+
**Definition 1** (Fairness Attack). *Given a fair clustering algorithm $\mathcal{F}$ that can be queried for cluster outputs, dataset $X$, samples' protected groups $G$, and $G_A\subseteq G$ is a small portion of protected groups that an adversary can control, the fairness attack is that the adversary aims to reduce the fairness of clusters outputted via $\mathcal{F}$ for samples in $G_D=G\setminus G_A \subseteq G$ by perturbing $G_A$.*
|
| 27 |
+
:::
|
| 28 |
+
|
| 29 |
+
**Attack Optimization Problem**. Based on the above threat model, the attack optimization problem can be defined analytically. For ease of notation, we define two mapping functions:
|
| 30 |
+
|
| 31 |
+
- $\eta$ : Takes $G_A$ and $G_D$ as inputs and gives output $G = \eta(G_A, G_D)$ which is the combined group memberships for the entire dataset. Note that $G_A$ and $G_D$ are interspersed in the entire dataset in an unordered fashion, which motivates the need for this mapping.
|
| 32 |
+
|
| 33 |
+
- $\theta$ : Takes $G_D$ and an output cluster labeling from a clustering algorithm for the entire dataset as input, returns the cluster labels for only the subset of samples that have group memberships in $G_D$. That is, if the clustering output is $\mathcal{C}(X, K)$, we can obtain cluster labels for samples in $G_D$ as $\theta(\mathcal{C}(X, K), G_D)$.
|
| 34 |
+
|
| 35 |
+
Based on the above notations, we have the following optimization problem for the attacker: $$\begin{equation}
|
| 36 |
+
\label{attack_opt}
|
| 37 |
+
\begin{aligned}
|
| 38 |
+
\min_{G_A} \quad & \phi(\theta(O, G_D), G_D)\ \
|
| 39 |
+
\textrm{s.t.} \ \ O = \mathcal{F}(X, K, \eta(G_A, G_D)) .\\
|
| 40 |
+
\end{aligned}
|
| 41 |
+
\end{equation}$$ The above problem is a two-level hierarchical optimization problem [@anandalingam1992hierarchical] with optimization variable $G_A$, where the lower-level problem is the fair clustering problem $\mathcal{F}(X, K, \eta(G_A, G_D))$, and the upper-level problem aims to reduce the fairness utility $\phi$ of the clustering obtained on the set of samples in $G_D$. Due to the black-box nature of our attack, both the upper- and lower-level problems are highly non-convex and closed-form solutions to the hierarchical optimization cannot be obtained. In particular, hierarchical optimization even with linear upper- and lower-level problems has been shown to be NP-Hard [@ben1990computational], indicating that such problems cannot be solved by exact algorithms. We will thus resort to generally well-performing heuristic algorithms for obtaining solutions to the problem in Eq. ([\[attack_opt\]](#attack_opt){reference-type="ref" reference="attack_opt"}).
|
| 42 |
+
|
| 43 |
+
**Solving the Attack Problem**.[]{#sec: solving_attack label="sec: solving_attack"} The aforementioned attack problem in Eq. ([\[attack_opt\]](#attack_opt){reference-type="ref" reference="attack_opt"}) is a non-trivial optimization problem, where the adversary has to optimize $G_A$ such that overall clustering fairness for the remaining samples in $G_D$ decreases. Since $\mathcal{F}$ is a black-box and unknown to the attacker, first- or second-order approaches (such as gradient descent) cannot be used to solve the problem. Instead, we utilize zeroth-order optimization algorithms to solve the attack problem. In particular, we use RACOS [@yu2016derivative] due to its known theoretical guarantees on discrete optimization problems. Moreover, our problem belongs to the same class as protected group memberships are discrete labels.
|
| 44 |
+
|
| 45 |
+
**Discussion**. Note that [@chhabra2021fairness] propose a theoretically motivated fairness disrupting attack for k-median clustering; however, this cannot be utilized to tackle our research problem for the following reasons: (1) their attack only works for k-median vanilla clustering, thus not constituting a black-box attack on fair algorithms, (2) their attack aims to poison a subset of the input data and not the protected group memberships thus leading to a more common threat model different from us. We also cannot use existing adversarial attacks against clustering algorithms [@cina2022black; @chhabra2020suspicion] as they aim to reduce clustering performance and do not optimize for a reduction in fairness utility. Thus, these attacks might not always lead to a reduction in fairness utility. Hence, these threat models are considerably different from our case.
|
| 46 |
+
|
| 47 |
+
<figure id="fig:toy" data-latex-placement="t">
|
| 48 |
+
<figure id="fig:sfd_pre">
|
| 49 |
+
<embed src="figs_imgs/SFD_PRE_MOD.pdf" />
|
| 50 |
+
<figcaption>Pre-attack</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
<figure id="fig:sfd_post">
|
| 53 |
+
<embed src="figs_imgs/SFD_POST_MOD.pdf" />
|
| 54 |
+
<figcaption>Post-attack</figcaption>
|
| 55 |
+
</figure>
|
| 56 |
+
<figcaption>Pre-attack and post-attack clusters of the SFD fair clustering algorithm on the synthetic toy data. The labels of Cluster A and Cluster B are shown in green and blue, and these samples in two clusters belong to <span class="math inline"><em>G</em><sub><em>D</em></sub></span>. The <span class="math inline">∘</span> and <span class="math inline">△</span> markers represent the two protected groups, and points in red are the attack points that belong to <span class="math inline"><em>G</em><sub><em>A</em></sub></span>. Observe that before the attack, the SFD algorithm obtains a perfect Balance of 1.0. However, after the attack, once the attacker has optimized the protected group memberships for the attack points, the SFD clustering has become less fair with Balance = 0.5.</figcaption>
|
| 57 |
+
</figure>
|
| 58 |
+
|
| 59 |
+
# Method
|
| 60 |
+
|
| 61 |
+
To achieve a robust fair clustering, our defense utilizes consensus clustering[@liu2019consensus; @fred2005combining; @lourencco2015probabilistic; @fern2004solving] combined with fairness constraints to ensure that cluster outputs are robust to the above attack. Consensus clustering has been widely renowned for its robustness and consistency properties but to the best of our knowledge, no other work has utilized consensus clustering concepts in the fair clustering scenario. Specifically, we propose Consensus Fair Clustering (CFC) shown in Figure [\[fig:cfc_framework\]](#fig:cfc_framework){reference-type="ref" reference="fig:cfc_framework"}, which first transforms the consensus clustering problem into a graph partitioning problem, and then utilizes a novel graph-based neural network architecture to learn representations for fair clustering. CFC has two stages to tackle the attack challenge at the data and algorithm level. The first stage is to sample a subset of training data and run cluster analysis to obtain the basic partition and the co-association matrix. Since attacked samples are a tiny portion of the whole training data, the probability of these being selected into the subset is also small, which decreases their negative impact. In the second stage, CFC fuses the basic partitions with a fairness constraint and further enhances the algorithmic robustness.
|
| 62 |
+
|
| 63 |
+
::: wrapfigure
|
| 64 |
+
r3.4in {width="51%"}
|
| 65 |
+
:::
|
| 66 |
+
|
| 67 |
+
**First Stage: Generating Co-Association Matrix**. In this first stage of CFC, we will generate $r$ basic partitions $\Pi = \{ \pi_1, \pi_2, ..., \pi_r\}$. For each basic partition $\pi_i$, we first get a sub dataset $X_i$ by random sample/feature selection and run K-means [@lloyd1982least] to obtain a basic partition $\pi_i$. Such a process is repeated $r$ times such that $\cup_{i=1}^{r}X_i = X$. Given that $u$ and $v$ are two samples and $\pi_i(u)$ is the label category of $u$ in basic partition $\pi_i$, and following the procedure of consensus clustering, we summarize the basic partitions into a co-association matrix $S \in \mathbb{R}^{n \times n}$ as $S_{uv} = \sum_{i=1}^r \delta(\pi_i(u), \pi_i(v))$, where $\delta(a,b) =1$ if $a=b$; otherwise, $\delta(a,b) =0$. The co-association matrix not only summarizes the categorical information of basic partitions into a pair-wise relationship, but also provides an opportunity to transform consensus clustering into a graph partitioning problem, where we can learn a fair graph embedding that is resilient to the protected group membership poisoning attack.
|
| 68 |
+
|
| 69 |
+
**Second Stage: Learning Graph Embeddings for Fair Clustering**. In the second stage of CFC, we aim to find an optimal consensus and fair partition based on the feature matrix $X$, basic partitions $\Pi$, and sample sensitive attributes $G$. The objective function of our CFC consists of a self-supervised contrastive loss, a fair clustering loss, and a structural preservation loss.
|
| 70 |
+
|
| 71 |
+
*Self-supervised Contrastive Loss*. To learn a fair graph embedding using $X, S,$ and $G$, we use a few components inspired by a recently proposed simple graph classification framework called Graph-MLP [@hu2021graph], which does not require message-passing between nodes and outperforms the classical message-passing GNN methods in various tasks [@wang2021decoupled; @yin2022dynamic]. Specifically, it employs the neighboring contrastiveness and considers the $R$-hop neighbors to each node as the positive samples, and the remaining nodes as negative samples. The loss ensures that positive samples remain closer to the node, and negative samples remain farther away based on feature distance. Let $\gamma_{uv} = S_{uv}^{R}$ and $S$ is the co-association matrix, $sim$ denote cosine similarity, and $\tau$ be the temperature parameter, then we can write the loss as follows: $$\begin{equation}
|
| 72 |
+
\mathcal{L}_{c} (Z,S) \coloneqq -\frac{1}{n} \sum_{i=1}^{n} \log \frac{\sum_{a=1}^n \mathds{1}_{[a\neq i]} \gamma_{ia} \exp(sim(Z_i, Z_a)/\tau)}{\sum_{b=1}^n \mathds{1}_{[b\neq i]} \exp(sim(Z_i, Z_b)/\tau)}.
|
| 73 |
+
\end{equation}$$
|
| 74 |
+
|
| 75 |
+
*Fair Clustering Loss*. Similar to other deep clustering approaches [@xie2016unsupervised; @li2020deep], we employ a clustering assignment layer based on Student t-distribution and obtain soft cluster assignments $P$. We also include a fairness regularization term using an auxiliary target distribution $Q$ to ensure that the cluster assignments obtained from the learned embeddings $z \in Z$ are fair. We abuse notation slightly and denote the corresponding learned representation of sample $x \in X$ as $z_x \in Z$. Also let $p_k^x$ represent the probability of sample $x\in X$ being assigned to the $k$-th cluster, $\forall k \in [K]$. More precisely, $p_k^x$ represents the assigned confidence between representation $z_x$ and cluster $c_k$ in the embedding space. The fair clustering loss term can then be written as: $$\begin{equation}
|
| 76 |
+
\begin{aligned}
|
| 77 |
+
\mathcal{L}_f(Z,G) \coloneqq KL(P||Q) = &\sum_{g\in [L]}\sum_{x\in H_g} \sum_{k \in [K]} p_k^x \log{\frac{p_k^x}{q_k^x}},\\
|
| 78 |
+
% \textrm{where } p_{k}^x = \frac{(1 + ||z_x - c_k||^2)^{-1}}{\sum_{k'\in [K]} (1 + ||z_x - c_{k'}||^2)^{-1}}, &\textrm{ and } q_k^x = \frac{(p_k^x)^2/ \sum_{x' \in H_{g(x)}} p_k^{x'}}{\sum_{k'\in [K]} (p_{k'}^x)^2/ \sum_{x' \in H_{g(x)}} p_{k'}^{x'}}.
|
| 79 |
+
\end{aligned}
|
| 80 |
+
\end{equation}$$ where $p_{k}^x = \frac{(1 + ||z_x - c_k||^2)^{-1}}{\sum_{k'\in [K]} (1 + ||z_x - c_{k'}||^2)^{-1}}$ and $q_k^x = \frac{(p_k^x)^2/ \sum_{x' \in H_{g(x)}} p_k^{x'}}{\sum_{k'\in [K]} (p_{k'}^x)^2/ \sum_{x' \in H_{g(x)}} p_{k'}^{x'}}$.
|
| 81 |
+
|
| 82 |
+
*Structural Preservation Loss.* Since optimizing the fair clustering loss $\mathcal{L}_f$ can lead to a degenerate solution where the learned representation reduces to a constant function [@li2020deep], we employ a well-known structural preservation loss term for each protected group. Since this loss is applied to the final partitions obtained we omit it for clarity from Figure [\[fig:cfc_framework\]](#fig:cfc_framework){reference-type="ref" reference="fig:cfc_framework"} which shows the internal CFC architecture. Let $P(g)$ be the obtained soft cluster assignments for protected group $g$ using CFC, and $J(g)$ be the cluster assignments for group $g$ obtained using any other well-performing fair clustering algorithm. We can then define this loss as originally proposed in [@li2020deep]: $$\begin{equation}
|
| 83 |
+
\begin{aligned}
|
| 84 |
+
\mathcal{L}_{p} \coloneqq \sum_{g\in[L]}||P(g)P(g)^\top - J(g)J(g)^\top||^2.
|
| 85 |
+
\end{aligned}
|
| 86 |
+
\end{equation}$$ The overall objective for CFC algorithm can be written as $\mathcal{L}_c + \alpha\mathcal{L}_f + \beta\mathcal{L}_p$, where $\alpha, \beta$ are parameters used to control trade-off between individual losses. CFC can then be used to generate hard cluster label predictions using the soft cluster assignments $P \in \mathbb{R}^{n \times K}$.
|
2212.04755/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2023-09-30T09:42:08.590Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" etag="RrEeX_CcFNF7fXLPntoe" version="22.0.0" type="device">
|
| 2 |
+
<diagram id="RyoTd8fXAzrNwCFh7pb-" name="第 1 页">
|
| 3 |
+
<mxGraphModel dx="1306" dy="720" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="0" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="630" math="1" shadow="0">
|
| 4 |
+
<root>
|
| 5 |
+
<mxCell id="0" />
|
| 6 |
+
<mxCell id="1" parent="0" />
|
| 7 |
+
<mxCell id="t72f99bNQpci66oiNbC7-178" value="" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;arcSize=11;dashed=1;fillColor=none;fontFamily=Times New Roman;" parent="1" vertex="1">
|
| 8 |
+
<mxGeometry x="55" y="522.96" width="730" height="97.04" as="geometry" />
|
| 9 |
+
</mxCell>
|
| 10 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-50" value="" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=4;fontSize=14;align=left;arcSize=4;fillColor=#E5F1E5;strokeColor=#91C275;" parent="1" vertex="1">
|
| 11 |
+
<mxGeometry x="20" y="70" width="800" height="436" as="geometry" />
|
| 12 |
+
</mxCell>
|
| 13 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-51" value="<font style="font-size: 40px;">PMR</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=40;" parent="1" vertex="1">
|
| 14 |
+
<mxGeometry x="687" y="78.48" width="98" height="45" as="geometry" />
|
| 15 |
+
</mxCell>
|
| 16 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-57" value="<font style="font-size: 24px">Extractor</font><div></div>" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=14;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
| 17 |
+
<mxGeometry x="65" y="300" width="710" height="50" as="geometry" />
|
| 18 |
+
</mxCell>
|
| 19 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-58" value="[CLS]" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;shadow=0;sketch=0;" parent="1" vertex="1">
|
| 20 |
+
<mxGeometry x="65" y="533.96" width="70" height="40" as="geometry" />
|
| 21 |
+
</mxCell>
|
| 22 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-59" value="Tok 1" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;" parent="1" vertex="1">
|
| 23 |
+
<mxGeometry x="165" y="533.96" width="70" height="40" as="geometry" />
|
| 24 |
+
</mxCell>
|
| 25 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-60" value="Tok |Q|" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;" parent="1" vertex="1">
|
| 26 |
+
<mxGeometry x="285" y="533.96" width="70" height="40" as="geometry" />
|
| 27 |
+
</mxCell>
|
| 28 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-61" value="[SEP]" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;" parent="1" vertex="1">
|
| 29 |
+
<mxGeometry x="385" y="533.96" width="70" height="40" as="geometry" />
|
| 30 |
+
</mxCell>
|
| 31 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-62" value="Tok 1" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#f8cecc;strokeColor=#b85450;" parent="1" vertex="1">
|
| 32 |
+
<mxGeometry x="485" y="533.96" width="70" height="40" as="geometry" />
|
| 33 |
+
</mxCell>
|
| 34 |
+
<mxCell id="t72f99bNQpci66oiNbC7-3" style="rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;strokeWidth=2;fillColor=#f8cecc;strokeColor=#b85450;endArrow=open;endFill=0;edgeStyle=orthogonalEdgeStyle;" parent="1" source="Fr_ZPRIASCwx132rSDbO-62" target="t72f99bNQpci66oiNbC7-1" edge="1">
|
| 35 |
+
<mxGeometry relative="1" as="geometry">
|
| 36 |
+
<Array as="points">
|
| 37 |
+
<mxPoint x="538" y="580" />
|
| 38 |
+
<mxPoint x="714" y="580" />
|
| 39 |
+
</Array>
|
| 40 |
+
</mxGeometry>
|
| 41 |
+
</mxCell>
|
| 42 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-63" value="Tok |C|" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;" parent="1" vertex="1">
|
| 43 |
+
<mxGeometry x="605" y="533.96" width="70" height="40" as="geometry" />
|
| 44 |
+
</mxCell>
|
| 45 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-64" value="[SEP]" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;" parent="1" vertex="1">
|
| 46 |
+
<mxGeometry x="705" y="533.96" width="70" height="40" as="geometry" />
|
| 47 |
+
</mxCell>
|
| 48 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-65" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 49 |
+
<mxGeometry x="243" y="543.96" width="34" height="20" as="geometry" />
|
| 50 |
+
</mxCell>
|
| 51 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-66" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 52 |
+
<mxGeometry x="558" y="543.96" width="40" height="20" as="geometry" />
|
| 53 |
+
</mxCell>
|
| 54 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-78" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-61" target="Fr_ZPRIASCwx132rSDbO-56" edge="1">
|
| 55 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 56 |
+
<mxPoint x="49" y="668.48" as="sourcePoint" />
|
| 57 |
+
<mxPoint x="99" y="618.48" as="targetPoint" />
|
| 58 |
+
</mxGeometry>
|
| 59 |
+
</mxCell>
|
| 60 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-86" value="$$H_1$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;glass=0;shadow=0;sketch=0;" parent="1" vertex="1">
|
| 61 |
+
<mxGeometry x="65" y="374" width="70" height="40" as="geometry" />
|
| 62 |
+
</mxCell>
|
| 63 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-87" value="$$H_2$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 64 |
+
<mxGeometry x="165" y="374" width="70" height="40" as="geometry" />
|
| 65 |
+
</mxCell>
|
| 66 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-88" value="$$H_{N-1}$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 67 |
+
<mxGeometry x="285" y="374" width="70" height="40" as="geometry" />
|
| 68 |
+
</mxCell>
|
| 69 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-89" value="$$H_N$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 70 |
+
<mxGeometry x="385" y="374" width="70" height="40" as="geometry" />
|
| 71 |
+
</mxCell>
|
| 72 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-90" value="$$H_{N+1}$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 73 |
+
<mxGeometry x="485" y="374" width="70" height="40" as="geometry" />
|
| 74 |
+
</mxCell>
|
| 75 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-91" value="$$H_{M-1}$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 76 |
+
<mxGeometry x="605" y="374" width="70" height="40" as="geometry" />
|
| 77 |
+
</mxCell>
|
| 78 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-92" value="$$H_M$$" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#cce5ff;strokeColor=#36393d;" parent="1" vertex="1">
|
| 79 |
+
<mxGeometry x="705" y="372" width="70" height="40" as="geometry" />
|
| 80 |
+
</mxCell>
|
| 81 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-93" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 82 |
+
<mxGeometry x="243" y="392" width="40" height="20" as="geometry" />
|
| 83 |
+
</mxCell>
|
| 84 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-94" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 85 |
+
<mxGeometry x="558" y="384" width="40" height="20" as="geometry" />
|
| 86 |
+
</mxCell>
|
| 87 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-123" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="t72f99bNQpci66oiNbC7-358" target="Fr_ZPRIASCwx132rSDbO-122" edge="1">
|
| 88 |
+
<mxGeometry relative="1" as="geometry">
|
| 89 |
+
<mxPoint x="274.1199999999999" y="86.48" as="sourcePoint" />
|
| 90 |
+
</mxGeometry>
|
| 91 |
+
</mxCell>
|
| 92 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-119" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;edgeStyle=orthogonalEdgeStyle;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-57" target="t72f99bNQpci66oiNbC7-358" edge="1">
|
| 93 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 94 |
+
<mxPoint x="46.10000000000002" y="523.48" as="sourcePoint" />
|
| 95 |
+
<mxPoint x="270" y="256.48" as="targetPoint" />
|
| 96 |
+
<Array as="points">
|
| 97 |
+
<mxPoint x="420" y="290" />
|
| 98 |
+
<mxPoint x="260" y="290" />
|
| 99 |
+
</Array>
|
| 100 |
+
</mxGeometry>
|
| 101 |
+
</mxCell>
|
| 102 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-122" value="<font style="font-size: 24px;">$$L_{wae}$$</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;dashed=1;fontSize=24;" parent="1" vertex="1">
|
| 103 |
+
<mxGeometry x="365.0000000000002" y="20" width="110" height="36" as="geometry" />
|
| 104 |
+
</mxCell>
|
| 105 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-127" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;strokeWidth=3;fillColor=none;fontSize=18;align=center;direction=north;" parent="1" vertex="1">
|
| 106 |
+
<mxGeometry x="198.5" y="575.96" width="121.5" height="20" as="geometry" />
|
| 107 |
+
</mxCell>
|
| 108 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-128" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;strokeWidth=3;fillColor=none;fontSize=18;align=center;direction=north;" parent="1" vertex="1">
|
| 109 |
+
<mxGeometry x="520" y="575.96" width="120" height="20" as="geometry" />
|
| 110 |
+
</mxCell>
|
| 111 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-129" value="<font style="font-size: 20px;">Query</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 112 |
+
<mxGeometry x="227.79999999999998" y="595.96" width="61.4" height="20" as="geometry" />
|
| 113 |
+
</mxCell>
|
| 114 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-130" value="<font style="font-size: 20px;">Context</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 115 |
+
<mxGeometry x="530.7000000000002" y="595.96" width="98.6" height="20" as="geometry" />
|
| 116 |
+
</mxCell>
|
| 117 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-152" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-58" edge="1">
|
| 118 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 119 |
+
<mxPoint x="114.38" y="533.96" as="sourcePoint" />
|
| 120 |
+
<mxPoint x="100" y="504.48" as="targetPoint" />
|
| 121 |
+
</mxGeometry>
|
| 122 |
+
</mxCell>
|
| 123 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-153" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-59" edge="1">
|
| 124 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 125 |
+
<mxPoint x="187" y="533.96" as="sourcePoint" />
|
| 126 |
+
<mxPoint x="200" y="504.48" as="targetPoint" />
|
| 127 |
+
</mxGeometry>
|
| 128 |
+
</mxCell>
|
| 129 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-154" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-60" edge="1">
|
| 130 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 131 |
+
<mxPoint x="287.0000000000001" y="533.96" as="sourcePoint" />
|
| 132 |
+
<mxPoint x="320" y="504.48" as="targetPoint" />
|
| 133 |
+
</mxGeometry>
|
| 134 |
+
</mxCell>
|
| 135 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-155" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-62" edge="1">
|
| 136 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 137 |
+
<mxPoint x="504.5000000000001" y="533.96" as="sourcePoint" />
|
| 138 |
+
<mxPoint x="520" y="504.48" as="targetPoint" />
|
| 139 |
+
</mxGeometry>
|
| 140 |
+
</mxCell>
|
| 141 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-156" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-63" edge="1">
|
| 142 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 143 |
+
<mxPoint x="614.5000000000001" y="533.96" as="sourcePoint" />
|
| 144 |
+
<mxPoint x="640" y="504.48" as="targetPoint" />
|
| 145 |
+
</mxGeometry>
|
| 146 |
+
</mxCell>
|
| 147 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-157" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-64" edge="1">
|
| 148 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 149 |
+
<mxPoint x="714.5" y="533.96" as="sourcePoint" />
|
| 150 |
+
<mxPoint x="740" y="504.48" as="targetPoint" />
|
| 151 |
+
</mxGeometry>
|
| 152 |
+
</mxCell>
|
| 153 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-9" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-86" edge="1">
|
| 154 |
+
<mxGeometry relative="1" as="geometry">
|
| 155 |
+
<mxPoint x="115.10000000000002" y="378.48" as="sourcePoint" />
|
| 156 |
+
<mxPoint x="100" y="350" as="targetPoint" />
|
| 157 |
+
</mxGeometry>
|
| 158 |
+
</mxCell>
|
| 159 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-10" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-87" edge="1">
|
| 160 |
+
<mxGeometry relative="1" as="geometry">
|
| 161 |
+
<mxPoint x="187.51999999999998" y="382.48" as="sourcePoint" />
|
| 162 |
+
<mxPoint x="200" y="350" as="targetPoint" />
|
| 163 |
+
</mxGeometry>
|
| 164 |
+
</mxCell>
|
| 165 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-11" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-88" edge="1">
|
| 166 |
+
<mxGeometry relative="1" as="geometry">
|
| 167 |
+
<mxPoint x="287.5200000000001" y="382.48" as="sourcePoint" />
|
| 168 |
+
<mxPoint x="320" y="350" as="targetPoint" />
|
| 169 |
+
</mxGeometry>
|
| 170 |
+
</mxCell>
|
| 171 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-12" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-89" edge="1">
|
| 172 |
+
<mxGeometry relative="1" as="geometry">
|
| 173 |
+
<mxPoint x="364.14" y="382.48" as="sourcePoint" />
|
| 174 |
+
<mxPoint x="420" y="350" as="targetPoint" />
|
| 175 |
+
</mxGeometry>
|
| 176 |
+
</mxCell>
|
| 177 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-13" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-90" edge="1">
|
| 178 |
+
<mxGeometry relative="1" as="geometry">
|
| 179 |
+
<mxPoint x="437.5200000000001" y="382.48" as="sourcePoint" />
|
| 180 |
+
<mxPoint x="520" y="350" as="targetPoint" />
|
| 181 |
+
</mxGeometry>
|
| 182 |
+
</mxCell>
|
| 183 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-14" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-91" edge="1">
|
| 184 |
+
<mxGeometry relative="1" as="geometry">
|
| 185 |
+
<mxPoint x="537.5200000000001" y="382.48" as="sourcePoint" />
|
| 186 |
+
<mxPoint x="640" y="350" as="targetPoint" />
|
| 187 |
+
</mxGeometry>
|
| 188 |
+
</mxCell>
|
| 189 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-15" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="Fr_ZPRIASCwx132rSDbO-92" edge="1">
|
| 190 |
+
<mxGeometry relative="1" as="geometry">
|
| 191 |
+
<mxPoint x="616.3000000000001" y="382.48" as="sourcePoint" />
|
| 192 |
+
<mxPoint x="740" y="350" as="targetPoint" />
|
| 193 |
+
</mxGeometry>
|
| 194 |
+
</mxCell>
|
| 195 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-16" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-92" edge="1">
|
| 196 |
+
<mxGeometry relative="1" as="geometry">
|
| 197 |
+
<mxPoint x="740" y="440" as="sourcePoint" />
|
| 198 |
+
<mxPoint x="616.0999999999998" y="413.9999999999999" as="targetPoint" />
|
| 199 |
+
</mxGeometry>
|
| 200 |
+
</mxCell>
|
| 201 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-17" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-91" edge="1">
|
| 202 |
+
<mxGeometry relative="1" as="geometry">
|
| 203 |
+
<mxPoint x="640" y="440" as="sourcePoint" />
|
| 204 |
+
<mxPoint x="537.2899999999996" y="413.9999999999999" as="targetPoint" />
|
| 205 |
+
</mxGeometry>
|
| 206 |
+
</mxCell>
|
| 207 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-18" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-90" edge="1">
|
| 208 |
+
<mxGeometry relative="1" as="geometry">
|
| 209 |
+
<mxPoint x="520" y="440" as="sourcePoint" />
|
| 210 |
+
<mxPoint x="437.2899999999996" y="413.9999999999999" as="targetPoint" />
|
| 211 |
+
</mxGeometry>
|
| 212 |
+
</mxCell>
|
| 213 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-19" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-89" edge="1">
|
| 214 |
+
<mxGeometry relative="1" as="geometry">
|
| 215 |
+
<mxPoint x="420" y="440" as="sourcePoint" />
|
| 216 |
+
<mxPoint x="363.9099999999995" y="413.9999999999999" as="targetPoint" />
|
| 217 |
+
</mxGeometry>
|
| 218 |
+
</mxCell>
|
| 219 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-20" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-88" edge="1">
|
| 220 |
+
<mxGeometry relative="1" as="geometry">
|
| 221 |
+
<mxPoint x="320" y="440" as="sourcePoint" />
|
| 222 |
+
<mxPoint x="287.2899999999996" y="415.9999999999999" as="targetPoint" />
|
| 223 |
+
</mxGeometry>
|
| 224 |
+
</mxCell>
|
| 225 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-21" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-87" edge="1">
|
| 226 |
+
<mxGeometry relative="1" as="geometry">
|
| 227 |
+
<mxPoint x="200" y="440" as="sourcePoint" />
|
| 228 |
+
<mxPoint x="187.28999999999962" y="413.9999999999999" as="targetPoint" />
|
| 229 |
+
</mxGeometry>
|
| 230 |
+
</mxCell>
|
| 231 |
+
<mxCell id="RfRBeTnbp2E6eNjK2-D9-22" value="" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;jumpSize=5;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="Fr_ZPRIASCwx132rSDbO-86" edge="1">
|
| 232 |
+
<mxGeometry relative="1" as="geometry">
|
| 233 |
+
<mxPoint x="100" y="440" as="sourcePoint" />
|
| 234 |
+
<mxPoint x="114.66999999999962" y="413.9999999999999" as="targetPoint" />
|
| 235 |
+
</mxGeometry>
|
| 236 |
+
</mxCell>
|
| 237 |
+
<mxCell id="t72f99bNQpci66oiNbC7-1" value="<font style="font-size: 20px;">Answers</font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=18;" parent="1" vertex="1">
|
| 238 |
+
<mxGeometry x="675" y="595.96" width="78.6" height="20" as="geometry" />
|
| 239 |
+
</mxCell>
|
| 240 |
+
<mxCell id="t72f99bNQpci66oiNbC7-177" value="" style="endArrow=classic;html=1;strokeWidth=3;fontSize=18;entryX=0.5;entryY=1;entryDx=0;entryDy=0;edgeStyle=orthogonalEdgeStyle;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="t72f99bNQpci66oiNbC7-178" target="t72f99bNQpci66oiNbC7-378" edge="1">
|
| 241 |
+
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
| 242 |
+
<mxPoint x="681.1" y="566.48" as="sourcePoint" />
|
| 243 |
+
<mxPoint x="639.1199999999999" y="176.48" as="targetPoint" />
|
| 244 |
+
<Array as="points">
|
| 245 |
+
<mxPoint x="800" y="570" />
|
| 246 |
+
<mxPoint x="800" y="290" />
|
| 247 |
+
<mxPoint x="576" y="290" />
|
| 248 |
+
</Array>
|
| 249 |
+
</mxGeometry>
|
| 250 |
+
</mxCell>
|
| 251 |
+
<mxCell id="t72f99bNQpci66oiNbC7-179" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;startArrow=none;startFill=0;endArrow=classic;endFill=1;strokeWidth=3;fontSize=18;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="t72f99bNQpci66oiNbC7-378" target="Fr_ZPRIASCwx132rSDbO-122" edge="1">
|
| 252 |
+
<mxGeometry relative="1" as="geometry">
|
| 253 |
+
<mxPoint x="539.1199999999999" y="86.48" as="sourcePoint" />
|
| 254 |
+
<mxPoint x="378.4076923076924" y="52.480000000000004" as="targetPoint" />
|
| 255 |
+
</mxGeometry>
|
| 256 |
+
</mxCell>
|
| 257 |
+
<mxCell id="t72f99bNQpci66oiNbC7-358" value="" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;arcSize=11;dashed=1;fillColor=none;fontFamily=Times New Roman;" parent="1" vertex="1">
|
| 258 |
+
<mxGeometry x="102.49999999999997" y="100.00000000000001" width="315" height="170" as="geometry" />
|
| 259 |
+
</mxCell>
|
| 260 |
+
<mxCell id="t72f99bNQpci66oiNbC7-322" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
| 261 |
+
<mxGeometry x="258.5" y="152.50000000000003" width="60" height="25" as="geometry" />
|
| 262 |
+
</mxCell>
|
| 263 |
+
<mxCell id="t72f99bNQpci66oiNbC7-263" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 264 |
+
<mxGeometry x="303.5" y="232.8947368421053" width="60" height="37.89473684210526" as="geometry" />
|
| 265 |
+
</mxCell>
|
| 266 |
+
<mxCell id="t72f99bNQpci66oiNbC7-343" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 267 |
+
<mxGeometry x="108.49999999999997" y="110" width="300" height="47.89736842105262" as="geometry" />
|
| 268 |
+
</mxCell>
|
| 269 |
+
<mxCell id="t72f99bNQpci66oiNbC7-269" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="t72f99bNQpci66oiNbC7-343" vertex="1">
|
| 270 |
+
<mxGeometry x="55" y="10" width="20" height="18.94736842105263" as="geometry" />
|
| 271 |
+
</mxCell>
|
| 272 |
+
<mxCell id="t72f99bNQpci66oiNbC7-302" value="" style="group" parent="t72f99bNQpci66oiNbC7-343" vertex="1" connectable="0">
|
| 273 |
+
<mxGeometry width="50" height="30.5" as="geometry" />
|
| 274 |
+
</mxCell>
|
| 275 |
+
<mxCell id="t72f99bNQpci66oiNbC7-239" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-302" vertex="1">
|
| 276 |
+
<mxGeometry y="5.5" width="50" height="25" as="geometry" />
|
| 277 |
+
</mxCell>
|
| 278 |
+
<mxCell id="t72f99bNQpci66oiNbC7-240" value="" style="group" parent="t72f99bNQpci66oiNbC7-302" vertex="1" connectable="0">
|
| 279 |
+
<mxGeometry x="9" width="32" height="20" as="geometry" />
|
| 280 |
+
</mxCell>
|
| 281 |
+
<mxCell id="t72f99bNQpci66oiNbC7-241" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-240" vertex="1">
|
| 282 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 283 |
+
</mxCell>
|
| 284 |
+
<mxCell id="t72f99bNQpci66oiNbC7-242" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;">1,1</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-240" vertex="1">
|
| 285 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 286 |
+
</mxCell>
|
| 287 |
+
<mxCell id="t72f99bNQpci66oiNbC7-303" value="" style="group" parent="t72f99bNQpci66oiNbC7-343" vertex="1" connectable="0">
|
| 288 |
+
<mxGeometry x="80" width="60" height="30.5" as="geometry" />
|
| 289 |
+
</mxCell>
|
| 290 |
+
<mxCell id="t72f99bNQpci66oiNbC7-298" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-303" vertex="1">
|
| 291 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 292 |
+
</mxCell>
|
| 293 |
+
<mxCell id="t72f99bNQpci66oiNbC7-299" value="" style="group" parent="t72f99bNQpci66oiNbC7-303" vertex="1" connectable="0">
|
| 294 |
+
<mxGeometry x="9" width="32" height="20" as="geometry" />
|
| 295 |
+
</mxCell>
|
| 296 |
+
<mxCell id="t72f99bNQpci66oiNbC7-300" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-299" vertex="1">
|
| 297 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 298 |
+
</mxCell>
|
| 299 |
+
<mxCell id="t72f99bNQpci66oiNbC7-301" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;">1,<i>N</i>+1</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-299" vertex="1">
|
| 300 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 301 |
+
</mxCell>
|
| 302 |
+
<mxCell id="t72f99bNQpci66oiNbC7-305" value="" style="group" parent="t72f99bNQpci66oiNbC7-343" vertex="1" connectable="0">
|
| 303 |
+
<mxGeometry x="150" width="60" height="30.5" as="geometry" />
|
| 304 |
+
</mxCell>
|
| 305 |
+
<mxCell id="t72f99bNQpci66oiNbC7-306" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-305" vertex="1">
|
| 306 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 307 |
+
</mxCell>
|
| 308 |
+
<mxCell id="t72f99bNQpci66oiNbC7-307" value="" style="group" parent="t72f99bNQpci66oiNbC7-305" vertex="1" connectable="0">
|
| 309 |
+
<mxGeometry x="9" width="32" height="20" as="geometry" />
|
| 310 |
+
</mxCell>
|
| 311 |
+
<mxCell id="t72f99bNQpci66oiNbC7-308" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-307" vertex="1">
|
| 312 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 313 |
+
</mxCell>
|
| 314 |
+
<mxCell id="t72f99bNQpci66oiNbC7-309" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;">1,<i>N</i>+2</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-307" vertex="1">
|
| 315 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 316 |
+
</mxCell>
|
| 317 |
+
<mxCell id="t72f99bNQpci66oiNbC7-310" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="t72f99bNQpci66oiNbC7-343" vertex="1">
|
| 318 |
+
<mxGeometry x="215" y="10" width="20" height="18.94736842105263" as="geometry" />
|
| 319 |
+
</mxCell>
|
| 320 |
+
<mxCell id="t72f99bNQpci66oiNbC7-311" value="" style="group" parent="t72f99bNQpci66oiNbC7-343" vertex="1" connectable="0">
|
| 321 |
+
<mxGeometry x="240" width="60" height="30.5" as="geometry" />
|
| 322 |
+
</mxCell>
|
| 323 |
+
<mxCell id="t72f99bNQpci66oiNbC7-312" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-311" vertex="1">
|
| 324 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 325 |
+
</mxCell>
|
| 326 |
+
<mxCell id="t72f99bNQpci66oiNbC7-313" value="" style="group" parent="t72f99bNQpci66oiNbC7-311" vertex="1" connectable="0">
|
| 327 |
+
<mxGeometry x="11" width="32" height="20" as="geometry" />
|
| 328 |
+
</mxCell>
|
| 329 |
+
<mxCell id="t72f99bNQpci66oiNbC7-314" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-313" vertex="1">
|
| 330 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 331 |
+
</mxCell>
|
| 332 |
+
<mxCell id="t72f99bNQpci66oiNbC7-315" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;">1,<i>M</i></span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-313" vertex="1">
|
| 333 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 334 |
+
</mxCell>
|
| 335 |
+
<mxCell id="t72f99bNQpci66oiNbC7-357" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;rotation=22;" parent="t72f99bNQpci66oiNbC7-343" vertex="1">
|
| 336 |
+
<mxGeometry x="55" y="26" width="20" height="18.94736842105263" as="geometry" />
|
| 337 |
+
</mxCell>
|
| 338 |
+
<mxCell id="t72f99bNQpci66oiNbC7-347" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 339 |
+
<mxGeometry x="208.5" y="138.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 340 |
+
</mxCell>
|
| 341 |
+
<mxCell id="t72f99bNQpci66oiNbC7-348" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 342 |
+
<mxGeometry x="278.5" y="138.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 343 |
+
</mxCell>
|
| 344 |
+
<mxCell id="t72f99bNQpci66oiNbC7-349" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 345 |
+
<mxGeometry x="368.5" y="138.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 346 |
+
</mxCell>
|
| 347 |
+
<mxCell id="t72f99bNQpci66oiNbC7-316" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 348 |
+
<mxGeometry x="188.5" y="147.00000000000003" width="60" height="30.5" as="geometry" />
|
| 349 |
+
</mxCell>
|
| 350 |
+
<mxCell id="t72f99bNQpci66oiNbC7-317" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-316" vertex="1">
|
| 351 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 352 |
+
</mxCell>
|
| 353 |
+
<mxCell id="t72f99bNQpci66oiNbC7-318" value="" style="group" parent="t72f99bNQpci66oiNbC7-316" vertex="1" connectable="0">
|
| 354 |
+
<mxGeometry width="32" height="20" as="geometry" />
|
| 355 |
+
</mxCell>
|
| 356 |
+
<mxCell id="t72f99bNQpci66oiNbC7-319" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-318" vertex="1">
|
| 357 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 358 |
+
</mxCell>
|
| 359 |
+
<mxCell id="t72f99bNQpci66oiNbC7-320" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>N</i>+1,<i>N</i>+1</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-318" vertex="1">
|
| 360 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 361 |
+
</mxCell>
|
| 362 |
+
<mxCell id="t72f99bNQpci66oiNbC7-321" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 363 |
+
<mxGeometry x="258.5" y="147.00000000000003" width="60" height="30.5" as="geometry" />
|
| 364 |
+
</mxCell>
|
| 365 |
+
<mxCell id="t72f99bNQpci66oiNbC7-323" value="" style="group" parent="t72f99bNQpci66oiNbC7-321" vertex="1" connectable="0">
|
| 366 |
+
<mxGeometry width="32" height="20" as="geometry" />
|
| 367 |
+
</mxCell>
|
| 368 |
+
<mxCell id="t72f99bNQpci66oiNbC7-324" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-323" vertex="1">
|
| 369 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 370 |
+
</mxCell>
|
| 371 |
+
<mxCell id="t72f99bNQpci66oiNbC7-325" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>N</i>+1,<i>N</i>+2</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-323" vertex="1">
|
| 372 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 373 |
+
</mxCell>
|
| 374 |
+
<mxCell id="t72f99bNQpci66oiNbC7-326" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 375 |
+
<mxGeometry x="323.5" y="157.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 376 |
+
</mxCell>
|
| 377 |
+
<mxCell id="t72f99bNQpci66oiNbC7-327" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 378 |
+
<mxGeometry x="348.5" y="147.00000000000003" width="60" height="30.5" as="geometry" />
|
| 379 |
+
</mxCell>
|
| 380 |
+
<mxCell id="t72f99bNQpci66oiNbC7-328" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-327" vertex="1">
|
| 381 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 382 |
+
</mxCell>
|
| 383 |
+
<mxCell id="t72f99bNQpci66oiNbC7-329" value="" style="group" parent="t72f99bNQpci66oiNbC7-327" vertex="1" connectable="0">
|
| 384 |
+
<mxGeometry x="5" width="32" height="20" as="geometry" />
|
| 385 |
+
</mxCell>
|
| 386 |
+
<mxCell id="t72f99bNQpci66oiNbC7-330" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-329" vertex="1">
|
| 387 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 388 |
+
</mxCell>
|
| 389 |
+
<mxCell id="t72f99bNQpci66oiNbC7-331" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>N</i>+1,<i>M</i></span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-329" vertex="1">
|
| 390 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 391 |
+
</mxCell>
|
| 392 |
+
<mxCell id="t72f99bNQpci66oiNbC7-332" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 393 |
+
<mxGeometry x="258.5" y="181.00000000000003" width="60" height="30.5" as="geometry" />
|
| 394 |
+
</mxCell>
|
| 395 |
+
<mxCell id="t72f99bNQpci66oiNbC7-333" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-332" vertex="1">
|
| 396 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 397 |
+
</mxCell>
|
| 398 |
+
<mxCell id="t72f99bNQpci66oiNbC7-334" value="" style="group" parent="t72f99bNQpci66oiNbC7-332" vertex="1" connectable="0">
|
| 399 |
+
<mxGeometry width="32" height="20" as="geometry" />
|
| 400 |
+
</mxCell>
|
| 401 |
+
<mxCell id="t72f99bNQpci66oiNbC7-335" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-334" vertex="1">
|
| 402 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 403 |
+
</mxCell>
|
| 404 |
+
<mxCell id="t72f99bNQpci66oiNbC7-336" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>N</i>+1,<i>N</i>+2</span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-334" vertex="1">
|
| 405 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 406 |
+
</mxCell>
|
| 407 |
+
<mxCell id="t72f99bNQpci66oiNbC7-337" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 408 |
+
<mxGeometry x="323.5" y="191.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 409 |
+
</mxCell>
|
| 410 |
+
<mxCell id="t72f99bNQpci66oiNbC7-338" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 411 |
+
<mxGeometry x="348.5" y="181.00000000000003" width="60" height="49.44736842105263" as="geometry" />
|
| 412 |
+
</mxCell>
|
| 413 |
+
<mxCell id="t72f99bNQpci66oiNbC7-339" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-338" vertex="1">
|
| 414 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 415 |
+
</mxCell>
|
| 416 |
+
<mxCell id="t72f99bNQpci66oiNbC7-340" value="" style="group" parent="t72f99bNQpci66oiNbC7-338" vertex="1" connectable="0">
|
| 417 |
+
<mxGeometry x="5" width="32" height="20" as="geometry" />
|
| 418 |
+
</mxCell>
|
| 419 |
+
<mxCell id="t72f99bNQpci66oiNbC7-341" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-340" vertex="1">
|
| 420 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 421 |
+
</mxCell>
|
| 422 |
+
<mxCell id="t72f99bNQpci66oiNbC7-342" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>N</i>+2,<i>M</i></span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-340" vertex="1">
|
| 423 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 424 |
+
</mxCell>
|
| 425 |
+
<mxCell id="t72f99bNQpci66oiNbC7-350" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 426 |
+
<mxGeometry x="348.5" y="221.00000000000003" width="60" height="30.5" as="geometry" />
|
| 427 |
+
</mxCell>
|
| 428 |
+
<mxCell id="t72f99bNQpci66oiNbC7-351" value="" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="t72f99bNQpci66oiNbC7-350" vertex="1">
|
| 429 |
+
<mxGeometry y="5.5" width="60" height="25" as="geometry" />
|
| 430 |
+
</mxCell>
|
| 431 |
+
<mxCell id="t72f99bNQpci66oiNbC7-352" value="" style="group" parent="t72f99bNQpci66oiNbC7-350" vertex="1" connectable="0">
|
| 432 |
+
<mxGeometry x="9" width="32" height="20" as="geometry" />
|
| 433 |
+
</mxCell>
|
| 434 |
+
<mxCell id="t72f99bNQpci66oiNbC7-353" value="<span style="color: rgb(0, 0, 0); font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><i style=""><font style="font-size: 22px;">S</font></i></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-352" vertex="1">
|
| 435 |
+
<mxGeometry y="-0.6666666666666666" width="20" height="20" as="geometry" />
|
| 436 |
+
</mxCell>
|
| 437 |
+
<mxCell id="t72f99bNQpci66oiNbC7-354" value="<span style="color: rgb(0, 0, 0); font-size: 12px; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;"><span style="font-size: 12px;"><i>M</i>,<i>M</i></span></span>" style="text;whiteSpace=wrap;html=1;fontSize=12;fontFamily=Times New Roman;fontStyle=0;labelBackgroundColor=none;" parent="t72f99bNQpci66oiNbC7-352" vertex="1">
|
| 438 |
+
<mxGeometry x="11" y="9" width="22" height="14.666666666666668" as="geometry" />
|
| 439 |
+
</mxCell>
|
| 440 |
+
<mxCell id="t72f99bNQpci66oiNbC7-356" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;rotation=22;" parent="1" vertex="1">
|
| 441 |
+
<mxGeometry x="323.5" y="209.95000000000002" width="20" height="18.94736842105263" as="geometry" />
|
| 442 |
+
</mxCell>
|
| 443 |
+
<mxCell id="t72f99bNQpci66oiNbC7-355" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;" parent="1" vertex="1">
|
| 444 |
+
<mxGeometry x="368.5" y="209.00000000000003" width="20" height="18.94736842105263" as="geometry" />
|
| 445 |
+
</mxCell>
|
| 446 |
+
<mxCell id="t72f99bNQpci66oiNbC7-383" value="" style="group" parent="1" vertex="1" connectable="0">
|
| 447 |
+
<mxGeometry x="486" y="100.00000000000001" width="180" height="170" as="geometry" />
|
| 448 |
+
</mxCell>
|
| 449 |
+
<mxCell id="t72f99bNQpci66oiNbC7-378" value="" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=18;align=center;arcSize=11;dashed=1;fillColor=none;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 450 |
+
<mxGeometry width="180" height="170" as="geometry" />
|
| 451 |
+
</mxCell>
|
| 452 |
+
<mxCell id="t72f99bNQpci66oiNbC7-275" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 453 |
+
<mxGeometry x="39.78378378378375" y="17.5" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 454 |
+
</mxCell>
|
| 455 |
+
<mxCell id="t72f99bNQpci66oiNbC7-276" value="<font style="font-size: 18px;">1</font>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;direction=south;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 456 |
+
<mxGeometry x="9.945945945945937" y="15" width="24.864864864864867" height="25" as="geometry" />
|
| 457 |
+
</mxCell>
|
| 458 |
+
<mxCell id="t72f99bNQpci66oiNbC7-277" value="<font style="font-size: 18px;">-</font>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 459 |
+
<mxGeometry x="59.67567567567562" y="15.000526315789443" width="24.864864864864867" height="25" as="geometry" />
|
| 460 |
+
</mxCell>
|
| 461 |
+
<mxCell id="t72f99bNQpci66oiNbC7-278" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 462 |
+
<mxGeometry x="124.32432432432438" y="17.5" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 463 |
+
</mxCell>
|
| 464 |
+
<mxCell id="t72f99bNQpci66oiNbC7-279" value="<span style="font-size: 18px;">-</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 465 |
+
<mxGeometry x="144.21621621621625" y="15" width="24.864864864864867" height="25" as="geometry" />
|
| 466 |
+
</mxCell>
|
| 467 |
+
<mxCell id="t72f99bNQpci66oiNbC7-359" value="<span style="font-size: 18px;">-</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 468 |
+
<mxGeometry x="94.48648648648646" y="15.000526315789443" width="24.864864864864867" height="25" as="geometry" />
|
| 469 |
+
</mxCell>
|
| 470 |
+
<mxCell id="t72f99bNQpci66oiNbC7-366" value="" style="group" parent="t72f99bNQpci66oiNbC7-383" vertex="1" connectable="0">
|
| 471 |
+
<mxGeometry x="59.67567567567562" y="52.5" width="109.40540540540542" height="25.00052631578947" as="geometry" />
|
| 472 |
+
</mxCell>
|
| 473 |
+
<mxCell id="t72f99bNQpci66oiNbC7-362" value="<font style="font-size: 18px;">1</font>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-366" vertex="1">
|
| 474 |
+
<mxGeometry y="0.0005263157894717096" width="24.86486486486487" height="25" as="geometry" />
|
| 475 |
+
</mxCell>
|
| 476 |
+
<mxCell id="t72f99bNQpci66oiNbC7-363" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-366" vertex="1">
|
| 477 |
+
<mxGeometry x="64.64864864864866" y="2.5" width="13.997997997997999" height="18.94736842105263" as="geometry" />
|
| 478 |
+
</mxCell>
|
| 479 |
+
<mxCell id="t72f99bNQpci66oiNbC7-364" value="<span style="font-size: 18px;">-</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-366" vertex="1">
|
| 480 |
+
<mxGeometry x="84.54054054054056" width="24.86486486486487" height="25" as="geometry" />
|
| 481 |
+
</mxCell>
|
| 482 |
+
<mxCell id="t72f99bNQpci66oiNbC7-365" value="<span style="font-size: 18px;">0</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-366" vertex="1">
|
| 483 |
+
<mxGeometry x="34.810810810810814" y="0.0005263157894717096" width="24.86486486486487" height="25" as="geometry" />
|
| 484 |
+
</mxCell>
|
| 485 |
+
<mxCell id="t72f99bNQpci66oiNbC7-367" value="" style="group" parent="t72f99bNQpci66oiNbC7-383" vertex="1" connectable="0">
|
| 486 |
+
<mxGeometry x="59.67567567567562" y="86.5" width="109.40540540540542" height="25.00052631578947" as="geometry" />
|
| 487 |
+
</mxCell>
|
| 488 |
+
<mxCell id="t72f99bNQpci66oiNbC7-369" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-367" vertex="1">
|
| 489 |
+
<mxGeometry x="64.64864864864866" y="2.5" width="13.997997997997999" height="18.94736842105263" as="geometry" />
|
| 490 |
+
</mxCell>
|
| 491 |
+
<mxCell id="t72f99bNQpci66oiNbC7-370" value="<span style="font-size: 18px;">-</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-367" vertex="1">
|
| 492 |
+
<mxGeometry x="84.54054054054056" width="24.86486486486487" height="25" as="geometry" />
|
| 493 |
+
</mxCell>
|
| 494 |
+
<mxCell id="t72f99bNQpci66oiNbC7-371" value="<span style="font-size: 18px;">0</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-367" vertex="1">
|
| 495 |
+
<mxGeometry x="34.810810810810814" y="0.0005263157894717096" width="24.86486486486487" height="25" as="geometry" />
|
| 496 |
+
</mxCell>
|
| 497 |
+
<mxCell id="t72f99bNQpci66oiNbC7-372" value="<span style="font-size: 18px;">-</span>" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=12;strokeWidth=2;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 498 |
+
<mxGeometry x="144.21621621621625" y="126.5" width="24.864864864864867" height="25" as="geometry" />
|
| 499 |
+
</mxCell>
|
| 500 |
+
<mxCell id="t72f99bNQpci66oiNbC7-373" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 501 |
+
<mxGeometry x="65.10616216216226" y="38" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 502 |
+
</mxCell>
|
| 503 |
+
<mxCell id="t72f99bNQpci66oiNbC7-374" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 504 |
+
<mxGeometry x="99.45945945945948" y="38" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 505 |
+
</mxCell>
|
| 506 |
+
<mxCell id="t72f99bNQpci66oiNbC7-375" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 507 |
+
<mxGeometry x="149.18918918918916" y="38" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 508 |
+
</mxCell>
|
| 509 |
+
<mxCell id="t72f99bNQpci66oiNbC7-376" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;fontFamily=Times New Roman;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 510 |
+
<mxGeometry x="149.18918918918916" y="109" width="13.997997997997997" height="18.94736842105263" as="geometry" />
|
| 511 |
+
</mxCell>
|
| 512 |
+
<mxCell id="t72f99bNQpci66oiNbC7-379" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;rotation=22;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 513 |
+
<mxGeometry x="36.83978378378379" y="36.44999999999999" width="19.891891891891895" height="18.94736842105263" as="geometry" />
|
| 514 |
+
</mxCell>
|
| 515 |
+
<mxCell id="t72f99bNQpci66oiNbC7-380" value="$$\cdots$$" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=12;rotation=22;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 516 |
+
<mxGeometry x="121.3803243243243" y="109" width="19.891891891891895" height="18.94736842105263" as="geometry" />
|
| 517 |
+
</mxCell>
|
| 518 |
+
<mxCell id="t72f99bNQpci66oiNbC7-389" value="<font style="font-size: 30px;"><i style="font-size: 30px;"><span style="font-size: 30px;">Y</span></i></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=30;" parent="t72f99bNQpci66oiNbC7-383" vertex="1">
|
| 519 |
+
<mxGeometry x="6" y="122" width="32" height="30" as="geometry" />
|
| 520 |
+
</mxCell>
|
| 521 |
+
<mxCell id="t72f99bNQpci66oiNbC7-284" value="<font size="1" style=""><i style="font-size: 30px;">S</i></font>" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;labelBackgroundColor=none;fontFamily=Times New Roman;fontSize=24;" parent="1" vertex="1">
|
| 522 |
+
<mxGeometry x="102.49999999999997" y="222.00000000000003" width="60" height="30" as="geometry" />
|
| 523 |
+
</mxCell>
|
| 524 |
+
<mxCell id="Fr_ZPRIASCwx132rSDbO-56" value="<font style="font-size: 24px">MLM</font><div></div>" style="rounded=1;whiteSpace=wrap;html=1;strokeWidth=2;fontSize=14;align=center;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
| 525 |
+
<mxGeometry x="65" y="440" width="710" height="50" as="geometry" />
|
| 526 |
+
</mxCell>
|
| 527 |
+
</root>
|
| 528 |
+
</mxGraphModel>
|
| 529 |
+
</diagram>
|
| 530 |
+
</mxfile>
|
2212.04755/main_diagram/main_diagram.pdf
ADDED
|
Binary file (34.9 kB). View file
|
|
|
2212.04755/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Span extraction, such as Extractive Question Answering (EQA) and Named Entity Recognition (NER), is a sub-topic of natural language understanding (NLU) with the goal of detecting token spans from the input text according to specific requirements like task labels or questions [44, 53]. Discriminative methods were used to execute such tasks and achieved state-of-the-art performance. As shown in the left part of Figure 1, these works tailored a task-specific fine-tuning head on top of pre-trained language models (PLMs) to perform sequence tagging or machine reading comprehension (MRC) [12, 35, 26]. The base PLMs are usually selected from pre-trained masked language models (MLM), such as RoBERTa [35] or BART [28] due to their comprehensive bi-directional modeling for the input text in the encoder. However, given the disparate nature of the learning objectives and different model architectures of MLM pre-training and task-specific fine-tuning, the discriminative
|
| 4 |
+
|
| 5 |
+
<sup>\*</sup>This work was supported by Alibaba Group through Alibaba Research Intern Program. The work described in this paper was also partially supported by a grant from the Research Grant Council of the Hong Kong Special Administrative Region, China (Project Code: 14200620). † This work was done when Weiwen Xu and Meng Zhou interned at Alibaba DAMO Academy. ‡ Xin Li is the corresponding author.
|
| 6 |
+
|
| 7 |
+
<span id="page-0-0"></span><sup>&</sup>lt;sup>2</sup>The code, data, and checkpoints are released at https://github.com/DAMO-NLP-SG/PMR
|
| 8 |
+
|
| 9 |
+
<span id="page-1-0"></span>
|
| 10 |
+
|
| 11 |
+
Figure 1: Comparison among three fine-tuning strategies for NER and EQA, namely, vanilla discriminative fine-tuning, generative fine-tuning, and fine-tuning by using the proposed PMR.
|
| 12 |
+
|
| 13 |
+
methods are less effective for adapting MLMs to downstream tasks when there is limited fine-tuning data available, leading to poor low-resource performance [6].
|
| 14 |
+
|
| 15 |
+
As shown in the middle part of Figure 1, generative fine-tuning is a popular solution to mitigate the gap between pre-training and fine-tuning [48, 49, 33]. This solution achieves remarkable few-shot performance in various span extraction tasks [10, 6, 37]. Specifically, generative methods formulate the downstream tasks as a language modeling problem in which PLMs generate response words for a given *prompt* (i.e., a task-specific template) as task prediction. Despite its success, tackling extraction tasks in a generative manner leads to several disadvantages. First, if it is used to generate the label token (e.g., "person" for PER entities) for a candidate span, the generative method needs to enumerate all possible span candidates to query PLMs [10]. This requirement can be computationally expensive for tasks with a long input text, such as EQA. Second, if the desired predictions are target spans (e.g., the "answer" in the EQA task), generative methods usually need to explore a large search space to generate span tokens. Moreover, it is also challenging to accurately generate structured outputs, e.g., the span-label pairs in the NER task, with PLMs originally trained on unstructured natural language texts. These limitations impede PLMs from effectively learning extraction patterns from increased volumes of training data. As a result, even instruction-tuned large language models like ChatGPT<sup>3</sup> are less effective than discriminative methods with smaller MLMs on extraction tasks [40, 42, 59, 30].
|
| 16 |
+
|
| 17 |
+
To bridge the gap between pre-training and fine-tuning without suffering from the aforementioned disadvantages, we propose a novel Pre-trained Machine Reader (PMR) as a retrofit of pre-trained MLM for more effective span extraction. As shown in the right part of Figure 1, PMR resembles common MRC models and introduces an MRC head on top of MLMs. But PMR is further enhanced by a comprehensive continual pre-training stage with large-scale MRC-style data. By maintaining the same MRC-style learning objective and model architecture as the continual pre-training during fine-tuning, PMR facilitates effective knowledge transfer in a discriminative manner and thus demonstrates great potential in both low-resource and rich-resource scenarios. Given that MRC has been proven as a universal paradigm [31, 32, 62, 22], our PMR can be directly applied to a broad range of span extraction tasks without additional task design.
|
| 18 |
+
|
| 19 |
+
To establish PMR, we constructed a large volume of general-purpose and high-quality MRC-style training data based on Wikipedia anchors (i.e., hyperlinked texts). As shown in Figure 2, for each Wikipedia anchor, we composed a pair of correlated articles. One side of the pair is the Wikipedia article that contains detailed descriptions of the hyperlinked entity, which we defined as the *definition*
|
| 20 |
+
|
| 21 |
+
<span id="page-1-1"></span><sup>&</sup>lt;sup>3</sup>https://chat.openai.com
|
| 22 |
+
|
| 23 |
+
<span id="page-2-0"></span>
|
| 24 |
+
|
| 25 |
+
Figure 2: Construction of MRC-style data by using Wikipedia anchors.
|
| 26 |
+
|
| 27 |
+
article. The other side of the pair is the article that mentions the specific anchor text, which we defined as the *mention article*. We composed an MRC-style training instance in which the anchor is the answer, the surrounding passage of the anchor in the *mention article* is the context, and the definition of the anchor entity in the *definition article* is the query. Based on the above data, we then introduced a novel Wiki Anchor Extraction (WAE) problem as the pre-training task of PMR. In this task, PMR determines whether the context and the query are relevant. If so, PMR extracts the answer from the context that satisfies the query description.
|
| 28 |
+
|
| 29 |
+
We evaluated PMR on two representative span extraction tasks: NER and EQA. The results show that PMR consistently obtains better extraction performance compared with the vanilla MLM and surpasses the best baselines by large margins under almost all few-shot settings (up to 6.3 F1 on EQA and 16.3 F1 on NER). Additionally, we observe that sequence classification can be viewed as a special case of extraction tasks in our MRC formulation. In this scenario, it is surprising that PMR can identify high-quality rationale phrases from input text as the justifications for classification decisions. Furthermore, PMR has the potential to serve as a unified model for addressing various extraction and classification tasks in the MRC formulation.
|
| 30 |
+
|
| 31 |
+
In summary, our contributions are as follows. Firstly, we constructed a large volume of general-purpose and high-quality MRC-style training data to retrofit MLMs to PMRs. Secondly, by unifying pre-training and fine-tuning as the same discriminative MRC process, the proposed PMR obtains state-of-the-art results under all few-shot NER settings and three out of four few-shot EQA settings. Thirdly, with a unified MRC architecture for solving extraction and classification tasks, PMR also shows promising potential in explaining the sequence classification predictions and unifying NLU tasks.
|
| 32 |
+
|
| 33 |
+
This section describes PMR from the perspectives of model pre-training and downstream fine-tuning. For pre-training, we first introduce the proposed model with the training objective of WAE and then describe the curation procedure of WAE pre-training data from Wikipedia. For fine-tuning, we present how PMR can seamlessly be applied to various extraction tasks and solve them in a unified MRC paradigm.
|
| 34 |
+
|
| 35 |
+
PMR receives MRC-style data in the format of $(Q, C, \{A^k\}_{k=1}^K)$ , where Q is a natural language query and C is the input context that contains the answers $\{A^k\}_{k=1}^K$ to the query. Each answer is a consecutive token span in the context, and zero (K=0) or multiple (K>1) answers may exist.
|
| 36 |
+
|
| 37 |
+
**Model Architecture.** PMR has two components: an MLM encoder and an extractor (Figure 3). The encoder receives the concatenation of query Q and context C as input X and represents each input token as hidden states H.
|
| 38 |
+
|
| 39 |
+
$$X = [[\mathtt{CLS}], Q, [\mathtt{SEP}], C, [\mathtt{SEP}]]$$
|
| 40 |
+
|
| 41 |
+
$$H = \mathbf{MLM}(X) \in \mathbb{R}^{M \times d}$$
|
| 42 |
+
(1)
|
| 43 |
+
|
| 44 |
+
<span id="page-3-0"></span>
|
| 45 |
+
|
| 46 |
+
Figure 3: Model architecture of PMR. "-" indicates illegal candidate spans.
|
| 47 |
+
|
| 48 |
+
where [CLS] and [SEP] are special tokens inserted into the sequence, M is the sequence length, and d is the dimension of the hidden states. The encoder $\mathbf{MLM}(\cdot)$ denotes any pre-trained text encoder for retrofitting, e.g. RoBERTa.
|
| 49 |
+
|
| 50 |
+
The extractor receives the hidden states of any two tokens and predicts the probability score that tells if the span between the two tokens should be output as an answer. We applied the *general* way to compute the score matrix S [38]:
|
| 51 |
+
|
| 52 |
+
$$S = \operatorname{sigmoid}(\mathbf{FFN}(H)^T H) \in \mathbb{R}^{M \times M}$$
|
| 53 |
+
(2)
|
| 54 |
+
|
| 55 |
+
where **FFN** is the feed-forward network [56], and $S_{i,j}$ is the probability to extract the span $X_{i:j}$ as output. The *general* way avoids creating a large $\mathbb{R}^{M \times M \times 2d}$ -shape tensor of the *concatenation* way [31], achieving higher training efficiency with fewer resources.
|
| 56 |
+
|
| 57 |
+
**Training Objective.** PMR is pre-trained with the WAE task, which checks the existence of answers in the context and extracts the answers if they exist. For the first goal, PMR determines whether the context contains spans that can answer the query:
|
| 58 |
+
|
| 59 |
+
<span id="page-3-2"></span>
|
| 60 |
+
$$L_{cls} = \mathbf{CE}(S_{1,1}, Y^{cls}) \tag{3}$$
|
| 61 |
+
|
| 62 |
+
where $\mathbf{CE}$ is the cross-entropy loss and $S_{1,1}$ at the [CLS] token denotes the query-context relevance score. If $Y^{cls}=1$ , the query and the context are relevant (i.e. answers exist). This task mimics the downstream situation in which there may be no span to be extracted in the context (e.g. NER) and encourages the model to learn through the semantic relevance of two pieces of text to recognize the unextractable examples.
|
| 63 |
+
|
| 64 |
+
Secondly, the model is expected to extract all correct spans from the context as answers, which can be implemented by predicting the answer positions:
|
| 65 |
+
|
| 66 |
+
<span id="page-3-1"></span>
|
| 67 |
+
$$L_{ext} = \sum_{N < i \le j < M} \mathbf{CE}(S_{i,j}, Y_{i,j}^{ext})$$
|
| 68 |
+
(4)
|
| 69 |
+
|
| 70 |
+
where $Y_{i,j}^{ext} = 1$ indicates that $X_{i:j}$ is an answer to Q, and N is the positional offset of the context in X. Note that only $X_{i:j}$ with $N < i \le j < M$ are legal answer span candidates (i.e., spans from the context). MRC-NER [31] predicted the start and end probabilities as two additional objectives. However, we find that these objectives are redundant for our matrix-based objective and incompatible with multi-span extraction.
|
| 71 |
+
|
| 72 |
+
The overall pre-training objective $L_{wae}$ is:
|
| 73 |
+
|
| 74 |
+
$$L_{wae} = L_{cls} + L_{ext} (5)$$
|
| 75 |
+
|
| 76 |
+
Data Preparation. MLM training can be easily scaled to millions of raw texts with a self-supervised learning objective [\[12\]](#page-10-0). In contrast, training PMR in the MRC paradigm requires labeled triplets (query, context, and answers) as supervision signals, which is expensive to prepare for large-scale pre-training. To address this limitation, we automated the construction of general-purpose and high-quality MRC-style training data by using Wikipedia anchors.
|
| 77 |
+
|
| 78 |
+
As illustrated in Figure [2,](#page-2-0) a Wikipedia anchor hyperlinks two Wikipedia articles: the definition article that provides detailed descriptions of the anchor entity "Silicon", and the mention article where the anchor is mentioned. We leveraged the large scale of such hyperlink relations in Wikipedia as the distant supervision to automatically construct the MRC triplets. Specifically, we regarded an anchor as the MRC answer for the following context and query pair. The sentences surrounding the anchor in the mention article serve as the MRC context. The sentences from the first section of the definition article, which usually composes the most representative summary for the anchor entity [\[7\]](#page-10-3), comprise the query. The query provides a precise definition of the anchor entity, and thus serves as a good guide for PMR to extract answers (i.e., anchor text) from the context.
|
| 79 |
+
|
| 80 |
+
Concretely, we considered sentences within a window size W of the anchor as the MRC context and used the first T sentences from the definition article as the query. Note that the context may cover multiple mentions of the same anchor entity. In this case, we treated all mentions as valid answers (i.e., K > 1) to avoid confusing the model training. More importantly, the preceding scenario naturally resembles multi-span extraction tasks like NER. To prevent information leakage, we anonymized the anchor entity in the query by using "it" to substitute text spans that overlapped more than 50% with the anchor entity name. we did not use the "[MASK]" token because it does not exist in the data of downstream tasks.
|
| 81 |
+
|
| 82 |
+
In addition to the above answerable query and context pairs prepared through hyperlink relation, we introduced unanswerable examples by pairing a context with an irrelevant query (i.e., query and context pairs without the hyperlink association). The unanswerable examples are designed to help the model learn the ability to identify passage-level relevance and avoid extracting any answer (i.e., K = 0) for such examples.
|
| 83 |
+
|
| 84 |
+
We unified downstream span extraction tasks in our MRC formulation, which typically falls into two categories: (1) span extraction with pre-defined labels (e.g., NER) in which each task label is treated as a query to search the corresponding answers in the input text (context) and (2) span extraction with natural questions (e.g., EQA) in which the question is treated as the query for answer extraction from the given passage (context). Then, in the output space, we tackled span extraction problems by predicting the probability Si,j of context span Xi:<sup>j</sup> being the answer. The detailed formulation and examples are provided in Appendix [A.2.](#page-14-0)
|
2302.01520/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2023-01-20T08:20:41.242Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36" etag="FLAZWjRn5DOYpJaWnLjS" version="20.8.5" type="device"><diagram id="PX8xwPn50SSuaM-9N3OH" name="Page-1">7Vxtc6I6FP41ztz7oQ4kJMDHVdt9uXu3nWtndrdfOhSiZoriAm51f/0NkijEaKEXEPbKdCycxBDzPDnn5HBIDw7n6/ehs5z9HXjE7wHNW/fgqAcA1m32mQg2qQDYXDANqZeK9L1gTH8RLtS4dEU9EuUqxkHgx3SZF7rBYkHcOCdzwjB4yVebBH7+rktnyu+o7QVj1/HJQbWv1ItnqdQC5l7+gdDpTNxZx/z3zR1RmTcRzRwveMncC1734DAMgjg9m6+HxE/GToxL2qGbI6W7joVkERf5wpenXw9Xn+5u3b/fG9/iL2gFHn9cIZw289PxV/wX897GGzEErOPL5NRdPbF/g5cZjcl46biJ7IWBzmSzeO6zK52dPgWrhUe8z087geM+T8NEeruKfbogXO454fMta4bGCTW0vobyQrCVJjUn1PeHgR+E2/5ACG/YweRRHAbPRJQsgqTpQZRSiPEHDg6HiI/aTxLGZJ0R8SF7T4I5icMNq8JLEea84PzVbZRev+zZAHmVWYYIQuZw/k13Le8hYiccpTKIoS4ilmDWFGJG2xAzuoiYbTeHGGgZYrB7gBHdQ8Q8AIyV2NiEDq4eNV2aZ1CgmEFN15qErcQ8m/hk/S5xD9hYkIXHT0eu70QRdfPghSl4ycintYUjoGt9YCFTN8QnTsu5F2P0tYNSMTZY8c3yiB5FkXjCqzmCYQYjpIBIyELiOzH9mfeFVLjxO9wFlPVkTxFLooguQR8Fq9Al/FtZ30VqaMeZnRcpNRQ74ZTEBw1tabT72W9nVge9JM8h1sRV0Qe7Fnma1K8QdPvcCsH8nWE7qQCKowYk1IBxaHxVKgLUBZrVPdCKG9+6QLPODJpQ7L+J7S0wC7tmew+07JttL2rW9uqgjEII/c0gZPObxK8rhj25tnPap8sP/Nx3noh/F0Q0psGCycIUnkEyQ6nr+J+l8jn1vKQvA8en00Tgk0m2/jsu3tWbBIv4xplTPxnUezonEftBX8gL+/wnmDuLHfUE30E1igNqMpiHikNXkNKoTXF0cK11jYZDxeL4iI2eho5HGWKZaiZxMKnI64LS4hngM5sC0EGvq1BIsSLbbWC7j151ufRdpWZA66DXVSiqWBVoIlDeIsjsLkJWIKxYFWQGbBtksISX3BbIjrjC9UAmu5otgEzvHmQ3Nw3OskNrpliLNg1aqSVDO0Azhppm22eyZi2ArIPrgOHwjNasBZB18LnmAGDcGGSyNWsBZCWSBy5huibCdFAO05lvDNMZchxARHYaCtPBEs/IOkCt3+Dp6wG1rKqoZTdMrQLBiXI0KhB+TaqMMw9IyZrG37ZmxcSIX39PjU+CwPZ6tBY0TS42mYs7ErK7xCTkshLB3RSlE8MjAgEpCicqiiBuW/kpB5kL81MytHrD2QGwQCSmSX4ikOOnbaPS/GQkDDdpc0hcfs+W7ZvaXm1q4LVYErzOa6NVvDaw2bd107DFIdllrS86XJroiLWsWdhGGAHdhHZ+5Q1lv7Fm3hsFwlmsGbqMCvj+TrRM87cndJ3Mg5JzAIhpk7HU2vZQ2XCpJMvXrB/AlwVHKVxm+caQyx5GnhQaMyPZg1M/Q1igIGxt6wSjQNSr08hma1YBL8J94xS8Vh+dhhceogvrArdAdKw5c5U1VVkzpes5QwVet1Q5O7U3Ww1YKtxNBwxUlSICZQdMXmnUbYhUwUMj+UPDiE7nzh/sxPWC+E8ulunOpnmcJ7gyPqSwDSLhw2WkSTh5POVDpSXz0yhPSf1tE6sKbQYM5kTmqaFI7lal5AO9j47T8L+prQLhxk7bpMa8jd10PZt7USAM2Wkom3UvILT6ijdmGvQoCsT+Lh5FqbVvUY+iXUtfOS8ZGpI1KL7StU43VLdHoUpiu3gUFXoUUMNn9yhUcWXsx3wMcoDiH6tAFFylzwnfsQo6Wq63QyTK2dk0/Z/Q4uP9YyAIkjbMOpq2LapdeHPqlSKct3SqN0OxgjT1vWKoCvVWSpnxhTKVUgaCM1MGqaKklVLm4cKXKvPnDxdHKo+pPr50MBOqUJJoZdsFSO+wnH27ANTBNx4K5YjWtYmKSic3i1gHX3colCBa1yYq50esg287FMoPrWsTlbMjhlV+T8sRK5QeWpcdOz9iRZ761hnG20XY0swTDVj5CJxhgldicNurOpOthF58NTIn3LiWROagLTlNb84zlXf5aDjpBIP/PUlbQqltnpNtaabFFt0IWXI2kqH1IeMdstkNWbn5Nr4xLdm3jMPcid2zD7uPrWYZqFqgVR02dC9r+up8IXDuGJCISdUaNrxQpkrKNBhpvvuqgeGnh3uwWD/ejm6BMbLurlS+WI4xE8fNE0Y1Vhm2FKCYcYRiHwKfRgzhq/sZXTzTxVR05SkUVW6IE69C0i3yVcAbIPlVQJFboat2VKmCONHPMf0RwetxaK3x6K+N9bzZKDevLKlqwElV8/XxoYeue+bg433PHPXQgD8K7aFRFzVQVpkY29LY4dv67DlyQAgFbY4/kkAyR5rTLUqKVGCNClJkfKHI2yhiGnVRhF3uNx1PfeL9zu3w+l8=</diagram></mxfile>
|
2302.01520/main_diagram/main_diagram.pdf
ADDED
|
Binary file (16.6 kB). View file
|
|
|
2302.01520/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Object navigation [@COS-POMDP; @ForeSI; @VTNet; @RES-StS] is a challenging task that requires an agent to find a target object in an unknown environment with first-person visual observations. Numerous techniques have been developed to advance this field by incorporating different inductive biases (Figure [1](#compare_method_model){reference-type="ref" reference="compare_method_model"} (a)) due to the task's complexity. However, regrettably, the object navigation field does not form a unified inductive bias paradigm similar to the CV [@Tobias; @convit] or NLP [@NLM_IB; @sq2sq_IB] fields. Inspired by the flaw, through the induction and sublimation of the current mainstream methods, we propose a meta-ability decoupling (MAD) paradigm, hoping to unify and connect various object navigation methods.
|
| 4 |
+
|
| 5 |
+
This paper involves two important new concepts: meta-ability and thinking. **Meta-ability** refers to every essential ability needed to complete a complex task. For instance, solving a mathematical problem requires the integration of various meta-abilities such as text comprehension, logical reasoning, and conceptual abstraction. Without these meta-abilities, relying solely on intuition is insufficient to complete complex tasks. **Thinking** refers to the information abstraction for a certain ability. Typically, this abstraction is modeled end-to-end using neural networks.
|
| 6 |
+
|
| 7 |
+
<figure id="compare_method_model" data-latex-placement="t">
|
| 8 |
+
<div class="center">
|
| 9 |
+
<embed src="image/compare_method_model.pdf" />
|
| 10 |
+
</div>
|
| 11 |
+
<figcaption>(a) Existing methods directly improve the overall object navigation ability by introducing various inductive biases into the black box model. (b) Our proposed meta-ability decoupling (MAD) paradigm decomposes the overall object navigation ability into multiple meta-abilities, and designs specific inputs, thinking encoders, and rewards for each meta-ability.</figcaption>
|
| 12 |
+
</figure>
|
| 13 |
+
|
| 14 |
+
According to the definition of meta-ability and thinking, we summarize the current mainstream object navigation methods and identify their limitations. As shown in Figure [2](#related_works){reference-type="ref" reference="related_works"}, object navigation methods are divided into four categories: association methods [@DOA; @HOZ], memory methods [@DUET; @OMT], deadlock-specialized methods [@TPN; @MAAD] and SLAM methods [@SemExp; @SSCNav]. The different inductive biases introduced by these four types of methods determine which meta-abilities are emphasized and which are overlooked. Therefore, the existing methods all attempt to use biased thinking to abstract the ultimate ability for object navigation (Figure [1](#compare_method_model){reference-type="ref" reference="compare_method_model"} (a)). Nevertheless, due to the sparsity and ambiguity of the reward signal, it is challenging for biased thinking to implicitly decouple complete meta-abilities which are crucial in object navigation.
|
| 15 |
+
|
| 16 |
+
To address the above issues, we propose a meta-ability decoupling (MAD) paradigm (Figure [1](#compare_method_model){reference-type="ref" reference="compare_method_model"} (b)), which solves embodied AI tasks in five stages: () selecting meta-abilities based on prior knowledge; () determining the input features of each thinking according to the characteristics of its corresponding meta-ability; () designing suitable encoding networks for each thinking; () designing the collaboration modules between different thinking according to the characteristics of the task; () designing rewards and punishments for each meta-ability. During this process, meta-abilities are decoupled in three aspects: input, encoding, and reward signals. In this paper, we primarily focus on the investigation of object navigation tasks, however, we believe that the MAD paradigm can be extended to other similar embodied AI tasks.
|
| 17 |
+
|
| 18 |
+
Guided by the MAD paradigm, we design a multiple thinking (MT) model for the object navigation task. First, we select five meta-abilities (explained in Sec. [3](#sec:MAD){reference-type="ref" reference="sec:MAD"}): intuition, search, navigation, exploration and obstacle. Subsequently, for these five meta-abilities, we use overall image features, object detection features, target-oriented memory, historical state memory, and obstacle location memory as input for corresponding thinking. Each thinking uses a simple encoding network with necessary inductive bias. Furthermore, we devise a multiple thinking collaboration (MTC) module to facilitate cooperation between the different meta-abilities. Finally, meta-ability reward is designed to guide each thinking's abstract understanding for the corresponding meta-ability.
|
| 19 |
+
|
| 20 |
+
Extensive experiments on the AI2-Thor [@AI2-THOR] and RoboTHOR [@RoboTHOR] datasets show that our MAD paradigm not only outperforms SOTA methods on the typical object navigation task, but also on the zero-shot object navigation task. Moreover, an interpretability analysis of MT model based on MAD demonstrates that our method contributes significantly to both the interpretability and flexibility of object navigation tasks. Our contributions can be summarized as follows:
|
| 21 |
+
|
| 22 |
+
- We propose a general meta-ability decoupling (MAD) paradigm to generalize and unify various current object navigation approaches.
|
| 23 |
+
|
| 24 |
+
- Following the MAD paradigm, we design a multiple thinking (MT) model for the object navigation task, which outperforms existing models in both typical and zero-shot object navigation tasks.
|
| 25 |
+
|
| 26 |
+
- Our meta-ability interpretability framework provides a novel analytical mode for future researchers.
|
| 27 |
+
|
| 28 |
+
<figure id="related_works" data-latex-placement="t">
|
| 29 |
+
<div class="center">
|
| 30 |
+
<embed src="image/related_works.pdf" />
|
| 31 |
+
</div>
|
| 32 |
+
<figcaption><strong>Summary of various object navigation methods</strong>. We categorize the mainstream methods for object navigation into four classes, which achieve the enhancement of certain meta-abilities by improving the neural network.</figcaption>
|
| 33 |
+
</figure>
|
| 34 |
+
|
| 35 |
+
# Method
|
| 36 |
+
|
| 37 |
+
Association methods can be divided into three categories, object association, zone association, and room association, from detailed to rough. Representative object association methods include SP [@SP], DOA [@DOA], and CKR [@CKR]. SP and DOA only rely on data in the environmental scene to model the spatial correlation between known objects. CKR incorporates semantic correlation between objects from a large-scale external knowledge graph into the model. HOZ [@HOZ] proposes the zone association to guide an agent in a coarse-to-fine manner. BRM [@BRM] takes the form of a probabilistic room relation graph to capture the layout prior.
|
| 38 |
+
|
| 39 |
+
Memory methods explicitly store a large amount of historical information, such as visual features, coordinate features, object features, etc. VGM [@VGM] is constructed incrementally based on the similarities among the unsupervised representations of observed images, and these representations are learned from an unlabeled image dataset. OMT [@OMT] uses transformer to salient objects stored in memory. DUET [@DUET] proposes a joint long-term action planning to enable efficient exploration in global action space.
|
| 40 |
+
|
| 41 |
+
Traditional navigation methods in known environments are all dependent on SLAM maps, thus exploring unknown environments through real-time mapping is also a viable approach. Due to the high cost of mapping, most methods now choose more rough semantic maps. GOSE [@GOSE] builds an episodic semantic map and uses it to explore the environment efficiently based on the goal object category. SSCNav [@SSCNav] explicitly models scene priors using a confidence-aware semantic scene completion module to complete the scene and guide the agent's navigation planning. PONI [@PONI] proposes a network that predicts two complementary potential functions conditioned on a semantic map and uses them to decide where to look for an unseen object.
|
| 42 |
+
|
| 43 |
+
Deadlock-specialized modules are frequently a part of the overall method to assist the agent in breaking out cyclic states. TPN [@TPN] employs a pre-trained primary model to explore the environment and provides expert actions for deadlock states. SAVN [@SAVN] uses the similarity of observation data as the basis for determining the success of actions and incorporates it into the loss function.
|
| 44 |
+
|
| 45 |
+
In the main text, we introduce new metrics to evaluate four meta-abilities. In this chapter, we will provide a more detailed explanation of these four metrics. In order to differentiate search ability and navigation ability, we divide the entire episode into \"search for\" phase and \"navigate to\" phase based on the first target-visible frame as the boundary. The agent primarily relies on its search ability to locate the target object during the \"search for\" phase. Once the target object is observed, the agent enters the \"navigation to\" phase and primarily relies on its navigation ability to navigate to the location of the target object.
|
| 46 |
+
|
| 47 |
+
SSR is the success rate for the "search for\" phase and is formulated as $$\begin{equation}
|
| 48 |
+
SSR = \frac{1}{F}\sum_{i=1}^{F}Nav_i
|
| 49 |
+
\end{equation}$$ where $Nav_i$ indicates whether the $i$-th episode enters the "navigate to\" phase.
|
| 50 |
+
|
| 51 |
+
NSNPL considers the navigation efficiency during the "navigate to\" phase and is defined as: $$\begin{equation}
|
| 52 |
+
NSNPL=\frac{1}{F_{Nav}}\sum_{i=1}^{F}Suc_i Nav_i\frac{L_i^{*Nav}}{max(L_i^{Nav}, L_i^{*Nav})}
|
| 53 |
+
\end{equation}$$ where $Suc_i$ indicates whether the $i$-th episode succeeds and $F_{Nav}$ is the number of episodes that enter the "navigate to\" phase. $L_i^{Nav}$ is the path length in the "navigate to\" phase and $L_i^{*Nav}$ is the theoretical shortest path length in the "navigate to\" phase. During testing, we calculate $L_i^{*Nav}$ in real time according to the starting position of the "navigate to\" phase (the position where the agent first recognizes the target) in each task path. Intuitively, NSNPL can be conceptualized as the SPL of "navigate to\" phase.
|
| 54 |
+
|
| 55 |
+
REP, which utilizes the probability of the agent returning to previously visited states, reflects the efficiency of exploring the environment. $$\begin{equation}
|
| 56 |
+
REP = \frac{1}{F}\sum_{i=1}^{F}\frac{L_i - RS_i}{L_i}
|
| 57 |
+
\end{equation}$$ where $RS_i$ is the number of distinct agent states encountered in the $i$-th episode.
|
| 58 |
+
|
| 59 |
+
In the real world, collisions with obstacles are to be avoided as much as possible. CP reflects the proportion of actions that resulted in collisions with obstacles throughout the entire episode. $$\begin{equation}
|
| 60 |
+
CP = \frac{1}{F}\sum_{i=1}^{F}\frac{OA_i}{L_i}
|
| 61 |
+
\end{equation}$$ where $OA_i$ is the number of obstacle collisions that occurred in the $i$-th episode.
|
| 62 |
+
|
| 63 |
+
In the main text, we only compare the SR and SPL metrics, but the analysis of meta-ability indicators for various methods is insufficient. Tables [\[tab:SOTA_AI2\]](#tab:SOTA_AI2){reference-type="ref" reference="tab:SOTA_AI2"} and Tables [\[tab:SOTA_ROBO\]](#tab:SOTA_ROBO){reference-type="ref" reference="tab:SOTA_ROBO"} respectively comprehensively present the performance metrics of various methods on the AI2-Thor and RoboTHOR datasets. SLAM methods () and deadlock-specialized methods () belong to modular methods, while memory methods () and association methods () belong to end-to-end methods. Our MAD paradigm, while ensuring end-to-end training, incorporates the advantages of the above methods, and provides a clear theoretical framework for future researches.
|
| 64 |
+
|
| 65 |
+
SLAM methods based on the AI2-Thor and RoboTHOR platforms are relatively rare, therefore, we adapt the SOTA methods (SSCNav [@SSCNav] and PONI [@PONI]) from the Habitat [@Habitat] platform to the AI2-Thor and RoboTHOR datasets. SLAM methods commonly use the form of waypoint prediction to guide the agent's navigation. This discrete navigation mode greatly prolongs the path to search for the target, thus reducing the overall SPL. More seriously, building an accurate map requires a significant amount of computation resources and exploration time, resulting in several times longer episode time compared to other methods. However, it is clear that SLAM methods obtain a strong navigation ability (NSNPL). Because once SLAM methods correctly establish a semantic map of the target and its surroundings, navigating to the target location becomes much easier. Another important reason why SLAM methods are favored by some researchers is their strong interpretability. We hope that on the basis of the MAD paradigm, the interpretability of end-to-end methods in navigation tasks will be gradually improved.
|
| 66 |
+
|
| 67 |
+
Currently, most memory methods are a crude form of modeling historical memory. Although mining meta abilities from all available historical information may enhance the overall ability of the model, particularly in terms of exploration ability (REP), the redundant information structure decreases generalizability and even affects other meta-abilities. The exploration thinking in our MT model draws on the historical memory structure in memory methods, but our memory features are more streamlined, thereby reducing the learning burden of the model.
|
| 68 |
+
|
| 69 |
+
In [@SAVN], the deadlock problem in the navigation process began to be noticed. TPN [@TPN] utilizes a supervised-trained deadlock escape module to make REP and CP reach 5.83/9.76 and 5.22/10.47 (AI2-Thor/RoboTHOR, %) respectively. However, this extrinsic deadlock-specialized module requires a significant amount of human-annotated escape actions. Therefore, if dataset migration occurs, a significant annotation cost would have to be incurred again. Our MT method, while ensuring model scalability, yields REP and CP metrics that are 1.80/2.14 and 0.29/0.56 (AI2-Thor/RoboTHOR, %) lower than the TPN method.
|
| 70 |
+
|
| 71 |
+
Association methods primarily learn the intrinsic correlation between objects to accelerate the visual capture of the target object, thereby yielding a strong search ability (SSR). However, excessive focus on semantic information at the object level may overlook navigation details, as evidenced by excessively high REP and CP. Our MT method, by decoupling more meta-abilities, helps association methods optimize environment exploration and obstacle avoidance, thus improving SR and SPL by 8.82/5.95 and 9.96/7.02 (AI2-Thor/RoboTHOR, %) respectively, with almost no additional parameters introduced.
|
| 72 |
+
|
| 73 |
+
::: table*
|
| 74 |
+
+:--:+:------:+:------------:+:-------------:+:-------------:+:---------------:+:---------------:+:--------------:+:------------:+:-------------:+:-------------:+:---------------:+:---------------:+:--------------:+:------------------------:+
|
| 75 |
+
| ID | Method | ALL (%) | $L\ge 5$ (%) | ---------------------- |
|
| 76 |
+
| | | | | Episode |
|
| 77 |
+
| | | | | Time (s)$\downarrow$ |
|
| 78 |
+
| | | | | ---------------------- |
|
| 79 |
+
| | +--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+ |
|
| 80 |
+
| | | SR$\uparrow$ | SPL$\uparrow$ | SSR$\uparrow$ | NSNPL$\uparrow$ | REP$\downarrow$ | CP$\downarrow$ | SR$\uparrow$ | SPL$\uparrow$ | SSR$\uparrow$ | NSNPL$\uparrow$ | REP$\downarrow$ | CP$\downarrow$ | |
|
| 81 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 82 |
+
| | SSCNav | 77.14 | 31.09 | 89.14 | 51.72 | 5.14 | **4.58** | 71.73 | 34.33 | 89.02 | 50.73 | 7.30 | 5.41 | 1.342 |
|
| 83 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 84 |
+
| | PONI | 78.58 | 33.78 | 89.48 | **52.39** | 5.29 | 4.90 | 72.92 | 36.40 | 89.13 | **51.82** | 7.64 | 5.75 | 1.591 |
|
| 85 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 86 |
+
| | OMT | 71.13 | 37.27 | 93.17 | 41.36 | 4.62 | 9.88 | 61.94 | 38.19 | 92.23 | 42.63 | 6.81 | 10.74 | 0.645 |
|
| 87 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 88 |
+
| | VGM | 73.95 | 40.69 | 93.20 | 44.21 | 4.51 | 9.30 | 64.07 | 40.73 | 92.14 | 45.97 | 6.62 | 10.14 | 0.714 |
|
| 89 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 90 |
+
| | TPN | 67.32 | 37.01 | 91.07 | 40.24 | 5.83 | 5.22 | 58.13 | 35.90 | 90.27 | 38.69 | 8.06 | 6.89 | 0.241 |
|
| 91 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 92 |
+
| | HOZ | 68.53 | 37.50 | 91.44 | 40.83 | 8.32 | 10.77 | 60.27 | 36.61 | 90.31 | 39.82 | 11.54 | 11.36 | 0.283 |
|
| 93 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 94 |
+
| | VTNet | 72.24 | 44.57 | 94.18 | 46.74 | 7.91 | 10.71 | 63.19 | 43.84 | 92.85 | 46.15 | 10.88 | 11.52 | 0.321 |
|
| 95 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 96 |
+
| | DOA | 74.32 | 40.27 | 95.82 | 44.11 | 7.14 | 10.26 | 67.88 | 40.36 | 93.92 | 44.03 | 10.39 | 10.95 | 0.334 |
|
| 97 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 98 |
+
| | **MT** | **83.14** | **50.23** | **97.76** | 51.39 | **4.03** | 4.93 | **77.03** | **50.88** | **96.47** | 51.49 | **6.25** | **5.26** | 0.352 |
|
| 99 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 100 |
+
:::
|
| 101 |
+
|
| 102 |
+
::: table*
|
| 103 |
+
+:--:+:------:+:------------:+:-------------:+:-------------:+:---------------:+:---------------:+:--------------:+:------------:+:-------------:+:-------------:+:---------------:+:---------------:+:--------------:+:------------------------:+
|
| 104 |
+
| ID | Method | ALL (%) | $L\ge 5$ (%) | ---------------------- |
|
| 105 |
+
| | | | | Episode |
|
| 106 |
+
| | | | | Time (s)$\downarrow$ |
|
| 107 |
+
| | | | | ---------------------- |
|
| 108 |
+
| | +--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+ |
|
| 109 |
+
| | | SR$\uparrow$ | SPL$\uparrow$ | SSR$\uparrow$ | NSNPL$\uparrow$ | REP$\downarrow$ | CP$\downarrow$ | SR$\uparrow$ | SPL$\uparrow$ | SSR$\uparrow$ | NSNPL$\uparrow$ | REP$\downarrow$ | CP$\downarrow$ | |
|
| 110 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 111 |
+
| | SSCNav | 38.12 | 14.10 | 61.37 | 35.14 | 8.93 | 10.83 | 33.46 | 11.04 | 60.91 | 33.92 | 10.17 | 13.44 | 4.145 |
|
| 112 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 113 |
+
| | PONI | 38.42 | 16.30 | 58.46 | **39.83** | 8.32 | 11.22 | 34.72 | 13.22 | 58.11 | **38.44** | 11.64 | 14.27 | 4.582 |
|
| 114 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 115 |
+
| | OMT | 32.17 | 20.09 | 61.77 | 24.51 | 7.72 | 16.45 | 25.33 | 18.16 | 57.35 | 23.82 | 9.70 | 18.83 | 2.011 |
|
| 116 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 117 |
+
| | VGM | 33.95 | 22.74 | 62.10 | 25.96 | 8.21 | 15.81 | 26.82 | 19.44 | 57.51 | 24.77 | 10.66 | 18.25 | 1.984 |
|
| 118 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 119 |
+
| | TPN | 30.51 | 18.62 | 59.64 | 20.64 | 9.76 | 10.47 | 23.89 | 14.91 | 54.64 | 19.51 | 12.28 | 13.51 | 0.769 |
|
| 120 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 121 |
+
| | HOZ | 31.67 | 19.02 | 60.11 | 21.02 | 12.49 | 18.55 | 24.32 | 14.81 | 54.23 | 20.38 | 15.79 | 22.02 | 0.808 |
|
| 122 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 123 |
+
| | VTNet | 33.92 | 23.88 | 63.29 | 28.26 | 11.26 | 17.04 | 26.77 | 19.80 | 57.72 | 27.50 | 14.63 | 21.10 | 1.325 |
|
| 124 |
+
| +--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 125 |
+
| | DOA | 36.22 | 22.12 | 64.18 | 25.88 | 11.33 | 17.14 | 30.16 | 18.32 | 61.39 | 25.11 | 14.82 | 21.52 | 1.247 |
|
| 126 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 127 |
+
| | **MT** | **42.17** | **29.14** | **68.05** | 36.68 | **7.62** | **9.91** | **37.98** | **23.80** | **66.93** | 36.50 | **9.25** | **12.48** | 1.225 |
|
| 128 |
+
+----+--------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------+---------------+---------------+-----------------+-----------------+----------------+--------------------------+
|
| 129 |
+
:::
|
| 130 |
+
|
| 131 |
+
We categorize 22 objects into two classes, namely seen and unseen. Based on the variations in classification proportion, there are two experimental setups. () 18/4: One object is extracted from each scene (bedroom, living room, kitchen and bathroom) and placed into the unseen objects category. () 14/8: Two objects are extracted from each scene and placed into the unseen objects category. Once the target set has been divided, it will not be changed. During training, seen objects are used as targets to be found, and unseen objects cannot be recognized by detectors such as target detection or instance segmentation. During testing, the agent is instructed to navigate to any given target set.
|
| 132 |
+
|
| 133 |
+
The MT model is not suitable for the zero-shot object navigation task because it encodes object semantics using one-hot encoding and employs a fixed-size object attention matrix, thereby limiting the number of object categories from the model's perspective. The zero-shot object navigation task requires the agent to locate an arbitrary number of target objects. Therefore, we represent object semantics using Glove encoding [@Glove] and base object association on the semantic cosine similarity relative to the target. The continuous semantic space centered around the target allows the agent to accept requests for finding any target.
|
2303.10971/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.10971/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Matching 3D shapes, i.e. finding correspondences between their parts, is a fundamental problem in computer vision and computer graphics that has a wide range of applications [@dinh2005texture; @loper2015smpl; @eisenberger2021neuromorph]. Even though it has been studied for decades [@tam2012registration; @van2011survey], the non-rigid shape matching problem remains highly challenging. One often faces a large variability in terms of shape deformations, or input data with severe noise and topological changes.
|
| 4 |
+
|
| 5 |
+
<figure id="fig:teaser" data-latex-placement="t!">
|
| 6 |
+
<div class="minipage">
|
| 7 |
+
<embed src="main/figures/teaser.pdf" style="height:4cm" />
|
| 8 |
+
</div>
|
| 9 |
+
<p> </p>
|
| 10 |
+
<div class="minipage">
|
| 11 |
+
<embed src="main/figures/dpc_deepshells.pdf" style="height:4.5cm" />
|
| 12 |
+
</div>
|
| 13 |
+
<figcaption> <strong>Left:</strong> Our method obtains accurate correspondences for triangle meshes, point clouds and even partially observed point clouds. <strong>Right:</strong> Proportion of correct keypoints (PCK) curves and mean geodesic errors (scores in the legend) on the SHREC’19 dataset <span class="citation" data-cites="melzi2019shrec"></span> for meshes (solid lines) and point clouds (dashed lines). Existing point cloud matching methods (DPC <span class="citation" data-cites="lang2021dpc"></span>, green line), or mesh-based methods applied to point clouds (Deep Shells <span class="citation" data-cites="eisenberger2020deep"></span>, red dashed line) are unable to meet the matching performance of mesh-based methods (solid lines). In contrast, our method is multimodal and can process both meshes and point clouds, while enabling accurate shape matching with comparable performance for both modalities (blue lines). </figcaption>
|
| 14 |
+
</figure>
|
| 15 |
+
|
| 16 |
+
With the recent success of deep learning, many learning-based approaches were proposed for 3D shape matching [@litany2017deep; @masci2015geodesic; @groueix20183d; @eisenberger2020deep]. While recent approaches demonstrate near-perfect matching accuracy without requiring ground truth annotations [@cao2022unsupervised; @eisenberger2020deep], they are limited to 3D shapes represented as triangle meshes and strongly rely on clean data.
|
| 17 |
+
|
| 18 |
+
::: table*
|
| 19 |
+
Methods Unsup. Mesh Point Cloud FM-based Partiality Robustness w.o. Refinement Required train data${^*}{^*}$
|
| 20 |
+
--------------------------------------------------------- -------- ------ ------------- ---------- ------------ ------------ ----------------- -------------------------------
|
| 21 |
+
FMNet [@litany2017deep] $^*$ Small
|
| 22 |
+
GeomFMaps [@donati2020deep] $^*$ Small
|
| 23 |
+
DiffFMaps [@marin2020correspondence] Moderate
|
| 24 |
+
DPFM [@attaiki2021dpfm] $^*$ Small
|
| 25 |
+
3D-CODED [@groueix20183d] Large
|
| 26 |
+
IFMatch [@sundararaman2022implicit] Moderate
|
| 27 |
+
UnsupFMNet [@halimi2019unsupervised] $^*$ Small
|
| 28 |
+
SURFMNet [@roufosse2019unsupervised; @sharma2020weakly] $^*$ Small
|
| 29 |
+
Deep Shells [@eisenberger2020deep] $^*$ Small
|
| 30 |
+
ConsistFMaps [@cao2022unsupervised] $^*$ Small
|
| 31 |
+
CorrNet3D [@zeng2021corrnet3d] Large
|
| 32 |
+
DPC [@lang2021dpc] Moderate
|
| 33 |
+
Ours Small
|
| 34 |
+
|
| 35 |
+
[]{#tab:comparison label="tab:comparison"}
|
| 36 |
+
:::
|
| 37 |
+
|
| 38 |
+
Since point clouds are a common representation for real-world 3D data, many unsupervised learning approaches were specifically designed for point cloud matching [@groueix2019unsupervised; @zeng2021corrnet3d; @lang2021dpc]. These methods are often based on learning per-point features, so that point-wise correspondences are obtained by comparing feature similarities. The learned features were shown to be robust under large shape deformations and severe noise. However, although point clouds commonly represent samples of a surface, respective topological relations are not explicitly available and thus cannot effectively be used during training. In turn, existing point cloud correspondence methods are unable to meet the matching performance of mesh-based methods, as can be seen in [1](#fig:teaser){reference-type="ref+label" reference="fig:teaser"}. Moreover, when applying state-of-the-art unsupervised methods designed for meshes (e.g. Deep Shells [@eisenberger2020deep]) to point clouds, one can observe a significant drop in matching performance.
|
| 39 |
+
|
| 40 |
+
In this work, we propose a self-supervised learning framework to address these shortcomings. Our method uses a combination of triangle meshes and point clouds (extracted from the meshes) for training. We first utilise the structural properties of functional maps for triangle meshes as strong unsupervised regularisation. At the same time, we introduce a self-supervised contrastive loss between triangle meshes and corresponding point clouds, enabling the learning of consistent feature representations for both modalities. With that, our method does not require to compute functional maps for point clouds at inference time, but directly predicts correspondences based on feature similarity comparison. Overall, our method is the first learning-based approach that combines a unique set of desirable properties, i.e. it can be trained without ground-truth correspondence annotations, is designed for both triangle meshes and point clouds (throughout this paper we refer to this as *multimodal*), is robust against noise, allows for partial shape matching, and requires only a small amount of training data, see [\[tab:comparison\]](#tab:comparison){reference-type="ref+label" reference="tab:comparison"}. In summary, our main contributions are:
|
| 41 |
+
|
| 42 |
+
- For the first time we enable *multimodal* non-rigid 3D shape matching under a simple yet efficient *self-supervised learning* framework.
|
| 43 |
+
|
| 44 |
+
- Our method achieves accurate matchings for triangle meshes based on *functional map regularisation*, while ensuring matching robustness for less structured point cloud data through *deep feature similarity*.
|
| 45 |
+
|
| 46 |
+
- Our method outperforms *state-of-the-art* unsupervised and even supervised methods on several challenging 3D shape matching benchmark datasets and shows previously unseen *cross-dataset generalisation ability*.
|
| 47 |
+
|
| 48 |
+
- We extend the SURREAL dataset [@varol2017learning] by SURREAL-PV that exhibits disconnected components in partial views as they occur in 3D scanning scenarios.
|
| 49 |
+
|
| 50 |
+
# Method
|
| 51 |
+
|
| 52 |
+
[]{#subsec:fmaps label="subsec:fmaps"} Our approach is based on the functional map framework, which we recap in the following. Unlike finding point-to-point correspondences, which often leads to NP-hard combinatorial optimisation problems [@lawler1963quadratic], the functional map framework encodes the correspondence relationship into a small matrix that can be efficiently solved [@ovsjanikov2012functional].
|
| 53 |
+
|
| 54 |
+
**Basic pipeline.** Given is a pair of 3D shapes $\mathcal{X}$ and $\mathcal{Y}$ with $n_x$ and $n_y$ vertices, respectively. The functional map framework uses truncated basis functions, i.e. the first $k$ LBO eigenfunctions [@pinkall1993computing] $\Phi_{x} \in \mathbb{R}^{n_{x} \times k}, \Phi_{y} \in \mathbb{R}^{n_{y} \times k}$, to approximate given features defined on each shape $\mathcal{F}_{x} \in \mathbb{R}^{n_{x} \times c}, \mathcal{F}_{y} \in \mathbb{R}^{n_{y} \times c}$. To this end, the corresponding coefficients $A=\Phi_{x}^{\dagger}\mathcal{F}_{x} \in \mathbb{R}^{k \times c}, B=\Phi_{y}^{\dagger}\mathcal{F}_{y} \in \mathbb{R}^{k \times c}$ are computed for each shape, respectively. The functional map $C_{xy} \in \mathbb{R}^{k \times k}$ can be computed by solving the continuous optimisation problem $$\begin{equation}
|
| 55 |
+
\label{eq:fmap}
|
| 56 |
+
C_{xy}=\mathrm{argmin}_{C}~ E_{\mathrm{data}}\left(C\right)+\lambda E_{\mathrm{reg}}\left(C\right),
|
| 57 |
+
\end{equation}$$ where minimising $E_{\mathrm{data}}=\left\|CA-B\right\|^{2}$ imposes descriptor preservation, whereas minimising the regularisation term $E_{\mathrm{reg}}$ imposes certain structural properties [@ovsjanikov2012functional], see [4.4](#sec:selfsuploss){reference-type="ref+label" reference="sec:selfsuploss"}. From the optimal $C_{xy}$, the point map $\Pi_{yx} \in \{0,1\}^{n_{y} \times n_{x}}$ can be recovered based on the relationship $\Phi_{y}C_{xy} \approx \Pi_{yx}\Phi_{x}$, e.g. either by nearest neighbour search or by other post-processing techniques [@melzi2019zoomout; @pai2021fast; @vestner2017product].
|
| 58 |
+
|
| 59 |
+
{#fig:framework width="\\linewidth"}
|
| 60 |
+
|
| 61 |
+
The whole framework of our approach is depicted in [2](#fig:framework){reference-type="ref+label" reference="fig:framework"}. Our approach aims to train a feature extraction network that can be used to extract expressive features for multimodal shape matching. To this end, we pursue a self-supervised training strategy using multimodal data that comprises meshes and point clouds extracted from these meshes.
|
| 62 |
+
|
| 63 |
+
To be precise, our multimodal training strategy utilises the shapes $\mathcal{X}$ and $\mathcal{Y}$ represented as triangle meshes, together with corresponding point clouds $\mathcal{\hat{X}}$ and $\mathcal{\hat{Y}}$ that we obtain by discarding the mesh connectivity information and perturbing the vertex coordinates. The same feature extraction network is used to process both triangle meshes and point clouds, resulting in pointwise features in both cases. Analogous to previous deep functional map methods [@roufosse2019unsupervised; @sharma2020weakly; @cao2022unsupervised], a non-trainable FM solver is used to compute bidirectional functional maps $C_{xy}, C_{yx}$ based on the features extracted from the triangle meshes. At the same time, the features extracted from the point clouds are used to construct a soft correspondence matrix $\hat{\Pi}_{xy}$ via feature similarity measurement. To enable the self-supervised training of our feature extractor, we use functional map regularisation. In addition, by using a contrastive loss we enforce that the features from the triangle meshes and the point clouds are similar. At inference time, the functional map framework (see [\[subsec:fmaps\]](#subsec:fmaps){reference-type="ref+label" reference="subsec:fmaps"}) is used for finding correspondences for 3D shapes represented as triangle meshes, while the correspondences for point clouds (or between triangle meshes and point clouds) are computed based on deep feature similarity, thereby avoiding the problem of point clouds only admitting an inaccurate estimation of the LBO eigenfunctions [@clarenz2004finite; @boscaini2016anisotropic; @sharp2020laplacian]. In the following, we explain the individual components of our method in detail.
|
| 64 |
+
|
| 65 |
+
The feature extractor aims to extract features of both triangle meshes and point clouds that are robust to shape deformations and to the sampling. To this end, we use the DiffusionNet architecture [@sharp2020diffusionnet] throughout our work, similar to other recent methods [@attaiki2021dpfm; @cao2022unsupervised]. DiffusionNet is based on an intrinsic surface diffusion process [@sharp2020diffusionnet] and leads to the state-of-the-art performance in the context of shape matching [@attaiki2021dpfm; @cao2022unsupervised; @donati2022deep; @liu2022wtfm]. Moreover, DiffusionNet allows to extract features from both meshes and point clouds.
|
| 66 |
+
|
| 67 |
+
Following [@cao2022unsupervised], our feature extractor is used in a Siamese way, i.e. the same network with shared wights $\Theta$ is applied for both source shapes $\mathcal{X}, \hat{\mathcal{X}}$ and target shapes $\mathcal{Y}, \hat{\mathcal{Y}}$.
|
| 68 |
+
|
| 69 |
+
The goal of the functional map solver (FM solver) is to compute the bidirectional functional maps $C_{xy}, C_{yx}$ based on the extracted features $\mathcal{F}_{x}, \mathcal{F}_{y}$. The basic pipeline is explained in [\[subsec:fmaps\]](#subsec:fmaps){reference-type="ref+label" reference="subsec:fmaps"}. Analogous to previous methods [@attaiki2021dpfm; @cao2022unsupervised], we use a regularised functional map solver [@ren2019structured] to improve the robustness when computing the functional map. To this end, the regularisation term $E_{\mathrm{reg}}$ in [\[eq:fmap\]](#eq:fmap){reference-type="ref+label" reference="eq:fmap"} can be expressed in the form $$\begin{equation}
|
| 70 |
+
\label{eq:fmap_reg}
|
| 71 |
+
E_{\mathrm{reg}}=\sum_{i j} C_{i j}^{2} M_{i j},
|
| 72 |
+
\end{equation}$$ where $M_{i j}$ is the resolvent mask that can be viewed as an extension of Laplacian commutativity, see [@ren2019structured] for details.
|
| 73 |
+
|
| 74 |
+
The goal of the deep feature similarity module is to predict a correspondence matrix $\hat{\Pi}_{xy}$ to explicitly indicate the correspondences between given input point clouds $\hat{\mathcal{X}}$ and $\hat{\mathcal{Y}}$ with $n_{x}$ and $n_{y}$ points, respectively. Theoretically, $\hat{\Pi}_{xy}$ should be a (partial) permutation matrix, i.e. $$\begin{equation}
|
| 75 |
+
\label{eq:permutation_mat}
|
| 76 |
+
\left\{\Pi \in\{0,1\}^{n_{x} \times n_{y}}: \Pi \mathbf{1}_{n_{y}} \leq \mathbf{1}_{n_{x}}, \mathbf{1}_{n_{x}}^{\top} \Pi \leq \mathbf{1}_{n_{y}}^{\top}\right\},
|
| 77 |
+
\end{equation}$$ where the element at position $(i,j)$ of $\hat{\Pi}_{xy}$ indicates whether the $i$-th point in $\hat{\mathcal{X}}$ corresponds to the $j$-th point in $\hat{\mathcal{Y}}$. However, the construction of such a binary matrix is non-differentiable. To this end, a soft correspondence matrix is used to approximate the binary constraints [\[eq:permutation_mat\]](#eq:permutation_mat){reference-type="eqref" reference="eq:permutation_mat"} in practice [@eisenberger2021neuromorph; @cao2022unsupervised; @saleh2022bending; @zeng2021corrnet3d]. The key idea to construct the soft correspondence matrix $\hat{\Pi}_{xy}$ is based on the similarity measurement between features $\mathcal{F}_{x}$ and $\mathcal{F}_{y}$ defined on each shape. The construction process can be expressed in the form $$\begin{equation}
|
| 78 |
+
\label{eq:corr}
|
| 79 |
+
\hat{\Pi}_{xy} = \mathrm{Corr}\left(\langle\mathcal{F}_{x}, \mathcal{F}_{y}\rangle\right),
|
| 80 |
+
\end{equation}$$ where $\langle\cdot, \cdot\rangle$ is the $(n_x {\times} n_y)$-dimensional matrix of the dot products between pairs of feature vectors and $\mathrm{Corr(\cdot)}$ is an operator to construct a soft correspondence matrix based on the similarity matrix [@mena2018learning; @jang2017categorical].
|
| 81 |
+
|
| 82 |
+
In this work, we use Sinkhorn normalisation [@sinkhorn1967concerning; @mena2018learning] to construct the soft correspondence matrix. Sinkhorn normalisation iteratively normalises rows and columns of a matrix using the softmax operator. During inference, we quantise $\hat{\Pi}_{xy}$ to a binary matrix.
|
| 83 |
+
|
| 84 |
+
To train our feature extractor in a self-supervised manner, we combine unsupervised functional map regularisation [@roufosse2019unsupervised; @sharma2020weakly; @cao2022unsupervised] with self-supervised contrastive learning [@xie2020pointcontrast]. Our unsupervised functional map regularisation can be divided into two parts.
|
| 85 |
+
|
| 86 |
+
The first part regularises the structural properties of the predicted functional maps $C_{xy}, C_{yx}$. Following [@roufosse2019unsupervised], the functional map regularisation can be expressed in the form $$\begin{equation}
|
| 87 |
+
\label{eq:fmaps}
|
| 88 |
+
E_{\mathrm{fmap}} = \lambda_{\mathrm{bij}}E_{\mathrm{bij}} + \lambda_{\mathrm{orth}}E_{\mathrm{orth}}.
|
| 89 |
+
\end{equation}$$ In [\[eq:fmaps\]](#eq:fmaps){reference-type="ref+label" reference="eq:fmaps"}, $E_{\mathrm{bij}}$ is the bijectivity constraint to ensure the map from $\mathcal{X}$ through $\mathcal{Y}$ back to $\mathcal{X}$ is the identity map, $E_{\mathrm{orth}}$ represents the orthogonality constraint to prompt a locally area-preserving matching, see [@cao2022unsupervised] for more details.
|
| 90 |
+
|
| 91 |
+
The second part regularises the predicted soft correspondence matrix $\hat{\Pi}_{xy}$ based on the relationship $\Phi_{x}C_{yx} \approx \hat{\Pi}_{xy}\Phi_{y}$. Following [@cao2022unsupervised], our unsupervised loss can be expressed in the form $$\begin{equation}
|
| 92 |
+
\label{eq:align_unsup}
|
| 93 |
+
E_{\mathrm{align}} = \|\Phi_{x}C_{yx}-\hat{\Pi}_{xy}\Phi_{y}\|^{2}_{F}.
|
| 94 |
+
\end{equation}$$ It is important to note that our correspondence matrix $\hat{\Pi}_{xy}$ is directly predicted based on the deep feature similarity between point clouds. This is in contrast to [@cao2022unsupervised], where a universe classifier is required to predict shape-to-universe point maps. In turn, our framework is more efficient and flexible, since we do not rely on the universe classifier and the knowledge of the number of universe vertices.
|
| 95 |
+
|
| 96 |
+
In addition to functional map regularisation, we further utilise a self-supervised contrastive loss to encourage similar features for corresponding points from the input mesh and the corresponding point cloud. To this end, we use the PointInfoNCE loss [@xie2020pointcontrast], which maximises the feature similarity between corresponding points in a given triangle mesh $\mathcal{X}$ and a given point cloud $\hat{\mathcal{X}}$, while at the same time minimising the feature similarity between other points. The loss term can be expressed in the form $$\begin{equation}
|
| 97 |
+
\label{eq:nce}
|
| 98 |
+
E_{\mathrm{nce}} = -\sum_{i=1}^{n_{x}} \log \frac{\exp \left( \langle \mathcal{F}_{x}^{i}, \hat{\mathcal{F}}_{x}^{i} \rangle / \tau \right)}{\sum_{j=1}^{n_{x}} \exp \left( \langle \mathcal{F}_{x}^{i}, \hat{\mathcal{F}}_{x}^{j} \rangle / \tau \right)},
|
| 99 |
+
\end{equation}$$ where $\tau$ is a scaling factor. Similarly, $E_{\mathrm{nce}}$ is also applied to both shape $\mathcal{Y}$ and $\hat{\mathcal{Y}}$. Finally, the overall loss for training is a weighted sum of the individual losses above, i.e. $$\begin{equation}
|
| 100 |
+
\label{eq:total_loss}
|
| 101 |
+
E_{\mathrm{total}} = E_{\mathrm{fmap}} + \lambda_{\mathrm{align}}E_{\mathrm{align}} + \lambda_{\mathrm{nce}}E_{\mathrm{nce}}.
|
| 102 |
+
\end{equation}$$
|
2303.15027/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-18T16:51:37.881Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="ec8Ki3DCUQr4-OGf_-6I" version="20.3.2" type="device"><diagram id="AVpwD6WEP6i5-fl5W4gB" name="Page-1">1Zddk9ogFIZ/jZc6CSRqL/1Y2+luZ5zadreXmGBCl4SUoMb++h4S8qmOux27H94E3sAB3vMEoYdnUfZRkiT8InzKe8jysx6e9xCyLTyGh1YOheI6o0IIJPNNo1pYsT+07GnULfNp2mqohOCKJW3RE3FMPdXSiJRi3262Ebw9akICeiSsPMKP1Xvmq7BQx65V658oC8JyZNsybyJSNjZCGhJf7BsSvunhmRRCFaUom1GuzSt9KfotzrytJiZprJ7SYXL749t0+3W4dKeOEN8//7qN7vsmyo7wrVlwDw05xJuuoRDowl2pQOhKNEtSh9InGAtSApXpPmSKrhLi6Td7oAK0UEUcajYUSZoUedqwjPpVqB2VimZnl2ZXhgFpVERUyQM0MR3wyHhsIKvo2dcpc4wUNrJVasRAElSRax+hYKx8hq3osq2rd2Cr1bH1wyvbii/bOnv7tjrOG7PVOWFr17DYn+jdFGoeJ2nKvLZPUmxjXxs0t6AGRsjDg64MEHZL4Wcu6H8EI8yzZvv5oVlbUslgdVQasZgR9Y92647rMGuxlR69vOcpIgOqLtF2nMVGltwTWSo1STlRbNee7qnUmRGWgsFCakjsDiSok/1imaZXc9vvBnI7gdxOoMKHo0A5SdWy/x0u9z/CNXI6bDnvhS30mmzh8Zm/y+eyhbtsDV+WrdG12dqIWC1IxLheEExQBoxoajKmHkwHXS5oc02tRk1XDkeYthl1LxB6RRjRu9jouoeM8bU2OvyyMI4vwwhXgEQXWZTfOpoc6hMFg2vHhLMgBk2JpKHekTXlS5EyxYR+uxZKiQgacP1iSrzHIAd5JriQ+Vh4k/+gST7YpDzHWKcONWY+81Apfa2aaG/QwvNje8DgYrVh8InIgQcjooVPFIGH1lPzFEz2d4JrjhYIDhOL31ua6pn2IyIf+zYaD5I4OP95XeFMhUfuaYoaEOMTEHcZecKZCqr1pa2Ap7764pu/</diagram></mxfile>
|
2303.15027/main_diagram/main_diagram.pdf
ADDED
|
Binary file (9.24 kB). View file
|
|
|
2303.15027/paper_text/intro_method.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2304.06976/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2304.06976/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Image restoration is a long-standing problem in computer vision that has been extensively studied. Given a degraded image, e.g., noisy, downscaled, hazing, or masked image, existing image restoration works in image deblur [2,23], dehazing [26], inpainting [16,37], superresolution (SR) [6,36] are capable of restoring the high-quality counterpart, respectively. These methods are mainly based on pre-defined image degradation models in the pixel domain, but few attempts
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
Figure 1. (a) Our work considers a real-world JPEG image restoration problem with bit errors on the encrypted bitstream, where En/De represent JPEG encoding/decoding and $E_K/D_K$ represent encryption/decryption employed in disks with the secret key K. We propose a robust JPEG decoder, followed by a two-stage compensation and alignment framework to address this problem. (b) Comparison of the standard decoder results with our robust decoder results and our proposed two-stage framework results. The proposed robust decoder can decode the corrupted JPEG bitstream and the proposed two-stage framework can ultimately restore high-quality images gradually from the decoded color-casted and misaligned images.
|
| 8 |
+
|
| 9 |
+
have been made in JPEG image restoration with the corrupted bitstream. The big challenge of bitstream-corrupted image restoration is the incurred JPEG decoding failures make the decoding process stop at the bit errors and the following bits cannot be decoded, as shown in Fig. 1 (b1).
|
| 10 |
+
|
| 11 |
+
In the real world, bit errors occur naturally in JPEG bitstream stored in digital devices, and as memory cells wear out [27], uncorrectable bit errors are exposed externally. NAND flash memory, as a type of non-volatile storage technology, is widely used in portable devices to store users' data. Due to technology trends, it exhibits progressively shorter lifetimes and increasingly relies on error correction codes (ECC) to ensure the data storage integrity [20,24]. It is well-known that [20,33] raw bit error rate (RBER) of NAND flash memory grows rapidly as the program/erase cycle, temperature, and retention years increase. As a result, bit errors
|
| 12 |
+
|
| 13 |
+
<sup>\*</sup>Corresponding authors
|
| 14 |
+
|
| 15 |
+
may exceed ECC's error correction capability and cause unrecoverable bit errors. In addition, if the storage device is severely damaged, or the ECC controller is not functioning correctly, standard data reading [32] may not be possible. Chip-off analysis [31] is often required to expose data in this case, but it may more likely result in unpredictable bit errors in the resolved data.
|
| 16 |
+
|
| 17 |
+
File carving [22] is an essential memory forensic technique that allows files to be recovered from unreliable NAND flash memory. While existing JPEG file carving methods [7, 21, 29, 30] mainly focus on JPEG file carving in the absence of filesystem metadata, few consider the situation when the JPEG file itself is corrupted. Bit errors in the JPEG bitstream can severely deteriorate the decoded image quality by two kinds of error propagation [12]. In addition, from Android 5.0, full-disk encryption (FDE) [10,11] is introduced to protect users' privacy. Once an Android device is encrypted, all user-created data will be automatically encrypted before committing it to disk and automatically decrypted before accessing it from disk. For encrypted files stored in an Android device, bit errors caused by the unreliable NAND flash memory are directly reflected on the encrypted data, making bit errors of the decrypted file become much more serious. This issue brings a significant challenge to existing works.
|
| 18 |
+
|
| 19 |
+
Recently, deep learning methods [16, 36, 37, 41] have shown great power in image restoration problems due to their powerful feature representation ability. However, existing image restoration methods may not be apt for the abovementioned problem because of unpredictable color casts and block shifts of decoded image contents caused by bit errors. As Fig. 1 (b1, b2) shows, decoders fail to generate visually consistent images that may not be directly used for the endto-end training of existing image restoration methods.
|
| 20 |
+
|
| 21 |
+
Given the facts above, it is natural to raise a question: given a corrupted JPEG bitstream, is it possible to restore the image contents? With consideration of the FDE employed in smartphones for privacy, the damaged JPEG image y in the pixel domain can be formulated as:
|
| 22 |
+
|
| 23 |
+
$$y = De(D_K(Bitflip(E_K(En(x)))))$$
|
| 24 |
+
(1)
|
| 25 |
+
|
| 26 |
+
where x represents the initial JPEG image, D<sup>K</sup> and E<sup>K</sup> represent decryption and encryption of FDE, De and En represent JPEG decoding and encoding, EK(En(x)) represents the corresponding encrypted JPEG bitstream by the secret key K, and Bitflip represents random bit errors on the encrypted data. To simplify the problem, we assume the secret key is already known.
|
| 27 |
+
|
| 28 |
+
In this paper, we propose a robust JPEG decoder, followed by a two-stage compensation and alignment framework to restore bitstream-corrupted JPEG images. Specifically, the robust decoder adopts an error-resilient mechanism, which can decode the corrupted JPEG bitstream completely (see Fig. 1 (b2)), compared to the aborting of JPEG decoding in the standard decoder (see Fig. 1(b1)). To further resolve the color cast and block shift problem in our decoded images, we propose a two-stage compensation and alignment framework, i.e., self-compensation and alignment (SCA) stage and guided-compensation and alignment (GCA) stage. In the first stage, SCA cast the problem as a segment detection problem and adaptively estimates suitable color and block offsets for each segment to perform block-wise image color compensation and alignment via image content similarity. In the second stage, GCA leverages the extracted low-resolution thumbnail (normally 160×120 [9, 25]) from the JPEG header to guide full-resolution pixel-wise image restoration. The GCA is achieved by coarse-to-fine neural networks, including a coarse-guided pix2pix network and a refine-guided bi-directional Laplacian pyramid fusion network. As Figs. 1 (b3, b4) show, the proposed two-stage framework deals with the color cast and block shift problem and restores high-quality images ultimately. In summary, our contributions are as follows:
|
| 29 |
+
|
| 30 |
+
- To the best of our knowledge, this is the first work to restore the JPEG image with bit errors on the encrypted bitstream. Unlike existing works based on pre-defined degradation models in the pixel domain, the discussed problem in the bitstream domain causes unpredictable color casts and block shifts on decoded images, which is challenging and of great practical value.
|
| 31 |
+
- We propose a two-stage compensation and alignment scheme for this problem, where the SCA stage and GCA stage are proposed and combined into an end-to-end architecture. The SCA is based on image content similarity without training data, and the GCA employs the coarse-guided pix2pix network and the refine-guided bidirectional Laplacian pyramid fusion network to gradually restore full-resolution images.
|
| 32 |
+
- Extensive experiments and ablation studies have been conducted to demonstrate the superiority of our proposed method. Even for 2k-resolution images, our proposed method can restore high-fidelity images with faithful details, achieving PSNR up to 38.92 dB with 5.52 dB significant improvement compared to the baseline EPDN method [26].
|
| 33 |
+
|
| 34 |
+
# Method
|
| 35 |
+
|
| 36 |
+
The overall structure of our model is shown in Fig. 2, consisting of a robust JPEG decoder and a two-stage alignment and compensation framework, i.e., a self-compensation and alignment (SCA) stage and a guided-compensation and alignment (GCA) stage. Given a corrupted JPEG bitstream after decryption, it is first processed by the JPEG robust decoder to make the compressed image data fully decoded, and extract the thumbnail from the JPEG header. For the corrupted image, it is then sent to the SCA stage to adaptively perform block-wise image color compensation and alignment based on the estimated color and block offsets $\{\Delta_{Color}, \Delta_{Block}\}$ . After that, GCA first leverages the bicubic upsampled thumbnail to coarsely guide the self-compensated image to achieve pixel-wise restoration through a pix2pix network, and then gradually fuses the low-resolution thumbnail with multiscale bicubic downsampled images from the pix2pix network by the proposed Laplacian pyramid fusion network, restoring the final full-resolution refined image.
|
| 37 |
+
|
| 38 |
+
**Self-synchronization.** Huffman coding is a variablelength encoding method that is widely used in JPEG to compress data. Recently, a published study [29] observed that the self-synchronization property of JPEG files seems to be possessed by JPEG files thanks to the large use of **EOB**, a special codeword that is used to indicate the rest of decoded AC coefficients of a block are all zeros. Self-synchronization property means that bit errors in a bitstream can cause incorrect decoding at the beginning, but the decoder eventually can get the same decoding sequence as the original. As Fig. 5(a) shows, although there are three bits changed in the bitstream that is going to start block $_{21}$ decoding, the decoder still can re-synchronize at block<sub>23</sub>. However, this property is not utilized by standard JPEG decoders. The core reason is that standard JPEG decoders lack error-resilient techniques. Once a decoding failure occurring in decoding, an exception is reported to abort the remaining blocks' decoding.
|
| 39 |
+
|
| 40 |
+
**Robust JPEG decoding.** Here, we propose an errorresilient mechanism in our robust decoder shown in Fig. 4. There are two potential causes of a decoding failure that can be detected in our decoder when processing Huffman decoding in a Minimum Coded Unit (MCU) block. The first is when the decoder encounters an invalid codeword that cannot be found in the Huffman tables of the JPEG header. The second is when the number of decoded coefficients of an 8×8 block is more than 64, called coefficients overflow. Once a decoding failure is detected, the error-resilient mechanism will discard a few bits to make the rest of the JPEG decoding can be continually processed, even though it may get some wrongly decoded blocks. As Fig. 4 shows, assuming $MCU_1 \sim MCU_{i-1}$ are already correctly decoded and MCU<sub>i</sub> is being decoded, the proposed decoder start decoding at bit address k to get Y, Cb, Cr blocks of $MCU_i$ , respectively. Once a decoding failure is detected in the whole MCU<sub>i</sub> decoding, our decoder will discard already decoded blocks of MCU<sub>i</sub> and restart decoding JPEG bitstream at bit address k + 1 until MCU can be fully decoded without any errors. After that, the decoder will save the decoded $MCU_i$ ,
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+
Figure 4. Error resilient mechanism in our robust JPEG decoder.
|
| 45 |
+
|
| 46 |
+
| Bitstread | Bitstream vs Bit-corupted Bitstream JPEG decoding | | | | | | | | | | | | | | | | | | | | | | | | |
|
| 47 |
+
|--------------------------------------------|---------------------------------------------------------------------------------|-----------------|-----------|-----------------|----------|------------|------------------|-------------|------|----|---|-------------|-------------|-----|----|----|-----------------|-----|----|---|---------|-----|----|-----|---|
|
| 48 |
+
| DC DIFF = -9 | | AC=3 0, | | | | -2 | EOB | | | | | DC DIFF = 3 | | | | | <i>AC</i> = EOB | | | - | | | | | |
|
| 49 |
+
| 4 | - | 9 | (0, | 2) | 3 | | (1 | , 2) | | 2 | | | (0, | ,0) | | | 2 | | -; | 3 | | (0, | 0) | | = |
|
| 50 |
+
| 1 0 1 | 0 1 | 1 0 | 0 | 1 | 1 1 | 1 | 1 | 0 1 | 1 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 - | ☶ |
|
| 51 |
+
| $\downarrow\downarrow\downarrow\downarrow$ | Bitstream, Intermediate codes, block coefficients Assuming next DC DIFF a=b=c=0 | | | | | | | | | | _ | | | | | | | | | | | | | | |
|
| 52 |
+
| 0 1 0 | 0 1 | 1 0 | 0 | 1 | 1 1 | 1 | 1 | 0 1 | 1 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | ☳ |
|
| 53 |
+
| 1 | -1 | (1,1) | | 1 | | | (6,1) | | | -1 | | ( | 0,5 | ) | | | | -24 | | | | (0, | 0) | ŀ | ☶ |
|
| 54 |
+
| DC DIFF | <b>≕-1</b> | AC = | 0, 1 | | ( | ), 0, | 0, 0, | 0, 0 | , -1 | | | | | | -2 | 4 | | | | | | EO | В | ŀ | = |
|
| 55 |
+
| -9->-9⊦ | | <b>≯</b> -6 -> | a | _ | -6ŀ→ | . h | - <b>></b> -6 | | _ | 7 | | _ | | | 21 | | - 4 | _ | | 5 | | _ | 71 | | _ |
|
| 56 |
+
| // F | | _ | a | | _ | | | - | | 1 | - | B | ιτ-<br>ipte | od | Ľ. | 11 | ۲ | _ | 2 | ۴ | ່<br>13 | ı | ۷. | 4 | ı |
|
| 57 |
+
| | , 0, -2,<br>EOB | E | ОВ | 3 | | ame<br>AC | ' | sar | | Ι. | | | | age | | | | ٠ | 2 | | 13 | | ٠ | 4 | ı |
|
| 58 |
+
| Block | 21 | | 22 | | | 23 | | 24 | | ļ | | | loc | | × | | 3 | Ç | | × | Г | ┪ | ж | | 1 |
|
| 59 |
+
| \ — | | | _ | _ | lati . | | | | | = | ı | lay | out | t | | 22 | - 1 | 2 | 3 | | 24 | | 3 | 1 | ı |
|
| 60 |
+
| DC D | IFF A | > (1)<br>0, 1, | ►a<br>n n | <del>></del> | <u> </u> | ∙ b<br>ame | . " - | ı≻ ر<br>sar | c -∹ | 1 | | | | | y. | | ۷, | | | Ļ | _ | _ | 91 | | 4 |
|
| 61 |
+
| Last | DC 0 | | , o, · | | _ | AC | ٦ | A | | | | | | | ٣. | ~~ | ľ | , " | _ | r | 1 | ı | Δ, | | ı |
|
| 62 |
+
| (a) | _ | | , EC | | | 23 | | 24 | - | ļ | | (b) | ) | | ' | 32 | - | 3 | 3 | | 34 | | 4 | 1 | ı |
|
| 63 |
+
| | sync block 22 DC error propagation and block shifting | | | | | | | | | | = | | | | | | | | | | | | | | |
|
| 64 |
+
|
| 65 |
+
Figure 5. (a) Decoding comparison of the correct bitstream and a bit-corrupted bitstream whose first three bits are bit-flipped. The bit-corrupted bitstream achieves self-synchronization after block<sub>22</sub>. (b) Two major problems are introduced after self-synchronization, i.e., DC error propagation and block shift.
|
| 66 |
+
|
| 67 |
+
record the bit address g, and start the next $MCU_{i+1}$ decoding at bit address g.
|
| 68 |
+
|
| 69 |
+
Although the proposed error-resilient mechanism can make JPEG files be decoded completely, the decoding results still have two problems as Fig.5(b) shows. The first problem is called DC error propagation. Since DPCM is employed to encode DC coefficients, the encoded DC value is the difference between the current block DC with the previous block DC. Therefore, although the remaining blocks' decoding is the same after block<sub>22</sub>, the sync block<sub>22</sub> DC changing leads to the DC of these blocks being changed as Fig.5(b) shows. The second problem is called block shift. Since JPEG files construct 2D images by stacking blocks one by one from top to bottom, left to right, self-synchronization eats up the bitstream belonging to the block<sub>21</sub>, and hence causes the remaining blocks to left shift one block.
|
| 70 |
+
|
| 71 |
+
Segment detection and normalization. In JPEG images, the DC coefficient of a block represents the average intensity of the 8×8 pixels. A small DC coefficient variation $\Delta_{DC}$ can shift all 64 pixels of a block by $L \cdot \Delta_{DC}$ , where L is a constant value. The high value of the DC variation undoubtedly causes the pixels to exceed the specified pixel range [0, 255]. Observing that the DC error propagation results in the same pixel shift for the following consecutively and correctly decoded blocks (namely, blocks segment) before the next self-synchronization, we intuitively cast this problem as an image segment detection problem. Blocks inside a segment share the same DC shift and hence have smooth image contents after decoding, and blocks between two consecutive segments have a big difference in image contents after decoding. Taking advantage of this feature, we propose a segment detection method based on the 2D image content similarity. A segment point is deemed to be detected when image contents have an abrupt change. Each segmented point is determined by a horizontal and a vertical coordinate as s = (h, v) in a 2D image, where $h, v \mod 8 = 0$ . We first detect h and then detect v by fixing
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
Figure 6. Vertical coordinate detection of the segmented point by calculating the coherence of ED (CED). The vertical coordinate v is determined when CED reaches the maximum at $8j_k$ .
|
| 76 |
+
|
| 77 |
+
For the horizontal coordinate h, it can be easily determined by checking the pixel similarity across the horizontal boundary of consecutive row blocks. Sum of Differences (SoD) and Euclidean Distance (ED) are two commonly used similarity metrics [28]. Lower SoD/ED values mean higher pixel similarity. Here, we select ED as a similarity metric to measure the pixel similarity across the horizontal boundary of consecutive row blocks, expressed as:
|
| 78 |
+
|
| 79 |
+
$$ED_h = \frac{1}{W} \left( \sum_{i=1}^{W} \left( p_{h+1,i} - p_{h,i} \right)^2 \right)^{1/2}, \tag{2}$$
|
| 80 |
+
|
| 81 |
+
where $ED_h$ represents the pixel similarity between row h and row h-1, $p_{h,i}$ is the corresponding RGB values of the pixels of the image at the position (h,i), and W is the width of the image. Once we calculate all EDs for each h, those having big ED are regarded as the horizontal coordinates of the candidate segmented points.
|
| 82 |
+
|
| 83 |
+
After the horizontal coordinate $h=8i_k$ of a segmented point is detected, the corresponding vertical coordinate $v=8j_k$ is required to be detected as Fig. 6(a) shows. A naive idea is to check the pixel similarity across the vertical boundary of consecutive blocks at $h=8i_k$ . However, it is easier to be misled since it only considers 8 pixels difference across the vertical boundary. To address this problem, we propose a new vertical detection method based on the coherence of ED (CED) [28]. CED is defined as the difference between adjacent EDs of row pixel blocks, expressed as:
|
| 84 |
+
|
| 85 |
+
$$CED_{h,v} = |ED_{h+8,v} - ED_{h,v}|,$$
|
| 86 |
+
(3)
|
| 87 |
+
$$ED_{h,v} = \frac{1}{W} \left( \sum_{i=v}^{W} (p_{h+1,i} - p_{h,i})^2 + \sum_{i=1}^{v} (p_{h+9,i} - p_{h+8,i})^2 \right)^{1/2},$$
|
| 88 |
+
(4)
|
| 89 |
+
|
| 90 |
+
where $CED_{h,v}$ represents the pixel similarity at the given segmented point (h,v) and unlike $ED_h$ calculation, $ED_{h,v}$ is calculated by two parts addition according to the vertical coordinate v. Assuming two segments are divided by the segmented point $s_k = (8i_k, 8j_k)$ in Fig. 6(a) where
|
| 91 |
+
|
| 92 |
+
the horizontal coordinate h is already determined, to detect the vertical coordinate v, we calculate CEDs for all points $(h=8i_k,v=8j)$ for j=0,1,2... Since lower ED values mean higher pixel similarity, when $v<8j_k$ shown in Fig. 6(b), compared to $v=8j_k$ , the $ED_{h+8,v}$ decreases because adjacent row blocks are more similar and the $ED_{h,v}$ is almost no changed because adjacent row blocks still belong to the same segment, resulting in the overall CED decreasing. And when $v>8j_k$ shown in Fig. 6(c), the overall CED decreased due to the same reason. It can be seen that the vertical coordinate of the segmented point is determined when CED reaches the maximum at $v=8j_k$ .
|
| 93 |
+
|
| 94 |
+
After all segment points are detected, pixel normalization is performed on each segment. At each color channel, it first centers all pixels by subtracting the mean pixel value of the segment to compensate for the abnormal DC shift of each segment. To do so, most pixels are centered at zero with only a few isolated pixel values of sync blocks that are extremely high or low due to self-synchronization. These isolated pixel values stretch the real pixel range and are required to eliminate. We then use the *Clip* function to ensure the overall pixel values in [-150, 150]. Finally, the min-max normalization is applied to the whole image to re-scale the pixel range back to [0, 255] for image display.
|
| 95 |
+
|
| 96 |
+
**Block alignment.** To ease the block shift problem caused by self-synchronization, we add an additional block alignment processing. Given a misaligned image, the alignment processing is to determine how many blocks each row should shift to align with the upper row. For each row h, each time the row shifts a block left or right, the corresponding $ED_h$ is calculated. The number of blocks required to be shifted for a row is determined when the $ED_h$ reaches the minimum. This alignment operation will continue until the last row of the image is aligned with the upper row.
|
| 97 |
+
|
| 98 |
+
Thumbnail integration. For most JPEG photos created by phones or digital cameras, the thumbnail is auto-created and embedded into the JPEG header of APP0 marker segment [9,34], which are stored separately versus the actual compressed data. Since the thumbnail is very small (typically $160\times120$ [9,25])) compared to the actual compressed image data, it is more likely to escape random bit errors. Therefore, we introduce the thumbnail as the network additional input to guide the image reconstruction. The introduced thumbnail is first bicubic upsampled and then concatenated with the self-compensated image as the inputs of pix2pix network [26]. Although the blurred thumbnail lacks details, it can bring the required color and block alignment information.
|
| 99 |
+
|
| 100 |
+
**Pix2pix network.** The pix2pix network is based on the previous works [8, 26], consisting of multi-resolution generators and multi-resolution discriminators. In our paper, the pix2pix2 network concatenates the thumbnails and self-compensated images from the SCA to coarsely guide the
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+
Figure 7. The structure of bi-directional Laplacian fusion network. Black dotted arrows indicate de-convolutional layers for upsampling and red dotted arrows indicate downsampling by bicubic interpolation. The convolution block is composed of 11 convolutional layers of a $3\times3$ convolution and a Leaky Relu activation, plus one more $3\times3$ convolutional layer.
|
| 105 |
+
|
| 106 |
+
resulting images with more consistent color and aligned textures. The details of the network can be seen in the Supplementary Materials.
|
| 107 |
+
|
| 108 |
+
Laplacian pyramid fusion network. To refine the coarse image from the pix2pix network, a pooling-enhanced module is used in EPDN [26] to integrate the pix2pix network input, i.e., the thumbnail and the self-compensated image. However, observing that the thumbnail is blurred and the self-compensated image is not fully aligned, neither of them is a good choice for the pooling-enhanced module as they can make the network focus unrelated features for fusion, e.g., the upsampled thumbnail makes the network results blur. Conversely, instead of refining the coarse image, the proposed bi-directional Laplacian pyramid fusion network aims to refine the thumbnail gradually under the different scales of the coarse image guidance, as Fig. 7(b) shows. Unlike the Laplacian pyramid structure from existing works [13, 14], our structure is bi-directional and relies on both upsampling and downsampling processes. At first, from right to left, the coarse image is bicubic downsampled to generate images in different scales, which are then convolved with a convolution block to generate pyramid high-frequency features (gradually finer details). From left to right, the thumbnail as low-frequency residual is gradually element-wise added pyramid high-frequency details to get the final output. At each step, the element-wise added feature is 2x upsampled by a trainable de-convolution layer.
|
| 109 |
+
|
| 110 |
+
**Loss Function.** In this paper, we follow the same loss functions from EPDN [26], including the adversarial loss $L_A$ , the feature matching loss $L_{FM}$ , the perceptual loss $L_{VGG}$ . Considering the employed Laplacian structure, we adopt a robust Charbonnier loss function [13] $L_C$ to replace the $\ell_2$ fidelity loss, i.e.,
|
| 111 |
+
|
| 112 |
+
$$L_C = \frac{1}{L} \sum_{i=1}^{L} \left( (\hat{X}_i - X_i)^2 + \epsilon^2 \right)^{1/2}, \tag{5}$$
|
| 113 |
+
|
| 114 |
+
where $\hat{X}_i$ and $X_i$ denote the *i*-layer of the pyramid output and the ground truth, L is the number of levels of the pyramid outputs, and $\epsilon$ is empirically set to 1e-3. To make the network focus on the block alignment information learning, we add an additional edge loss $L_E$ [18], expressed as $L_E = \|S(\hat{X}) - S(X)\|_2$ , where $S(\hat{X})$ and S(X) denote the Sobel Operator
|
| 115 |
+
|
| 116 |
+
on the estimated network output and the ground truth. The overall loss function is:
|
| 117 |
+
|
| 118 |
+
$$L = L_A + \lambda_1 L_{FM} + \lambda_1 L_{VGG} + \lambda_2 L_E + \lambda_3 L_C, \tag{6}$$
|
| 119 |
+
|
| 120 |
+
where $\lambda_1, \lambda_2, \lambda_3$ are user defined hyper-parameters. We follow the same alternative iteration training scheme [26], where the GAN module (generator and discriminator) is first optimized by the $L_A, L_{FM}$ and $L_E$ , and the Laplacian pyramid fusion network and the generator is optimized by $L_{VGG}, L_E$ , and $L_C$ in one training step. The generator's weights are updated twice during each training step. More details of the loss function can be seen in the Supplementary Materials.
|
2305.11553/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2022-09-26T11:27:36.730Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36" version="20.0.4" etag="Mvr2saakYj1WmKZfGUtT" type="google"><diagram id="KGEJUhQdOWef2wOyim_J">7V1bk5s4Fv41rtp9CIVuXB6TnmR3HjKVqjzszCNt0za1tvFinHTm169kEBchGzASSJ24H9rIIOA7R0fnk46OVujp8PqvLDrtPqebeL+C7uZ1hX5bQQhdN6T/WMmPogS4kBQl2yzZlGV1wdfk77hdeEk28blVlKfpPk9O7cJ1ejzG67xVFmVZ+r192ku6b9/0FG3LG7p1wdd1tI87p/0n2eS7ohQht3H6v+Nkuytvjb2gvOQQ8bPLU8+7aJN+bxShjyv0lKVpXnw7vD7Fe4Yex6W47tONX6sny+JjPuQCWFzwLdpfypdbQW9PL/3wktIa6APmP8rX9v53SfkP785XmbynJwDvRAX7of6dftuy/47j8Lro/Yvqil/KV69qhnn8ysp3+WFPCwD9es6z9L/xU7pPM1pyTI8xu3Oy3wtF0T7ZHunhmr5vTMs/fIuzPKGiel/+cEg2G3abD993SR5/PUVrds/vVDFpWZZejpuYQeFej/IoT1J2FXarx2QVxq834QWV0Ki6x+khzrMf9BR+gReWus5VHQO/KPhea45XKsOuoTOoLItKXd1WddfSpF9KgcqFiyTCFZCvEQA3MGoIpYn/CqL4+ukIi/6CPBSijRoIgxC3EURhF8FQgiAACiDEbwHC0EWCErozQkjeBIQAiu3Ymw9Crx/CLcXw1Or2omf+m6sCAH5JBQAXbB8AMJwOgG+HDnlyCOdQkWAMQu5ohCLqtVGBSxDyPrn0U/oFjfKX62cicvwC4Li0J60+yGvpYtDtUX0JzApQDvWiDJ4jEEMZyq7rfXz/aVGU+bvOADOv443i7DohrGEOgjbOqNuxaMMZvGmc7+szcefDWUazfhacPTwfzqMYj3U4C3ZDwNmfrxsEo2iRdd7GfZzDGftBGXf6NTwzYnjmvqTfAWF0xpXIVtPgDDCB1CFxdAp3AQgCiXIr0W5LWB3op3WBzAAowUgzr9tEcfCyllradRA/D7aotzAyjcABzQzODDxnpGpQM1WbC1BjOBnUzMnM0NAZyRfH720DOiPLGjevNB7Ql2Adr6WAPgcEk+FTc7bQqXGzTLYCOiNvks45jeRN8AZv+oM92zNFMlrn5+vr0Lehtz4meRKxhzjvk3Vy3J7tYlfsEctACypmRXNmbksBAPG6Vh+4mvjVkEmz+Lh5z+JBGIb76HxO1m1BDIEo3vBgkeEANd6fSF6fl2XxnjLeb+3qZZiUd/iSJlflrtojbrdAgANhLu6cXrJ1XF5XYyupKuyrKo+ybZx3qrqKqXr1YZKzhRf29Nd89GaRwIVxE4LmggiF6I9ZQxfGzfeZCyIS4z/mDF5AAxhiMdAl7eSknWFRgZYxMQD4GNi9SAfoS1wXyFnjJLhG8b8hOtfotqA+HUTufVghcAR7SEAHVoAJP6vtE6rQw1E8cLnGfAvIWdqqZmqne2KnTwlNGYZEmhmf7onKaTjzl50D6FHRh/YBbcywJRpAt2wG2pjhTDSKHb01oGcc5kSap96WBtqY4U80imXZ53QYMyyKR83VmecBl1dg0iZo7zhNn2VBiHJ6tgyGXjucv4qEmQVDy5kYx9B3BQy7XoA+DAcwtenhQ70IhAICAwFQME6CdS/NAhsS+zIlCj0fRYPnSApBLUPnsWb2o7tnvYWdaXQeW05+puHMX3YOoC0nP31AG0PnseXkZ5pGz0jnseVLyqYBPSOdJ6PIj31AG0PnySiGZJ/TYQyd5zR4WQrAgxr4rF63j9K2gIBYkuCiENRdCqBtAQHRPNOkKvr1FkamufpEM6UyA88ZXXqimTvNBagxrjvRzJHM0NAZXXRiyRqsaYDO6YpbsghrZJNfzuX2NHMbVQsIpgE6o2vtyTjMr4XXIxZe99CGd3xh6wIrr70BvGkKbXgh7E9qfq6fLkEtPqrmmxF0xKhpP3C6MayypQeeCnwtz8LRi2/goKBhpgQCHHgOdkP+150RQ74DW5+uFIiDSetPgVAs4YFDwA/d6tOexYZut89dCO4uTbzafSMN/ENrv3oFhUUrVFXTDPgOJR24CvxHsUrrbDx0Q4mNn8/Ca6aYC0dsQUDuW3jihAZa+C5N/blMDmO4HZPjShwfbUany2u/GIr/JjrvKtOnxf7DNnmTrfbR5eD7XTr89LPKAYq5n2UDZ7rkYHl6yYlEIPScANTdhGTh6jIdhT+K/xpNBTx0kwpIAloXgvttr6ljzpLXmvmwpBVYknRlGvzQ9R1aWsEPuqP8C8GveR7VEPhBYCj8Mn5cDEqfT9GxJYexGXLW6ZGCflmzYfJTFh+Sc8zqi9lxdNysrqEo6/3lzMaUyx+q8fDi7kaPh+txl31H8B+qmfEmc0HQ8SUL5JV4awNIvTRhzjmPsvzBPDrMe4tfk/zP8kz2/S92nkPKo99ey8uuBz/4wZG+3Z/Ng8ZV7LC+7HrEryve6G7OHo5lkRun9JBLKIokN2VZf1pQjcl9kODXIzFFwtDUPoi0K6oeUH1iH182ka4oLdfn3+2aVGs2AlWdEfBAZR2qsSrYMSKeLgMyYFqftbyv5WG8f06/f6wLPlwL6A8c1mvrfTRDF2xaCdC2En6fmeAmqTZDfzUMlNwkPWhaePNrmha0aN4wgpFDHyvwIfHpdzcUNMp/zNBUHUztGw0yNFT+0Y/GaSd2wvmOZQwFH6wciaq1uKjxUSsWDM5ndC3VlKSIj9kskrk7GDWws9zau6A/lY62wFtueE0fZbmFkWmBt4HmYRQz8ITdVqwNUEsiAvoANSbwNrAkNHyahs4YeBtYEho+DdAZA28DzfP2qgbdRjb55QJvA82h4WYAOmPgbSDjkIqGCLLouEkPe/beL3G+Zlm7ed3PGT+JMX1YJfi2d0xBUQ5vAJAQZkR49P4cSbzDAVzr4WSoDLIoW5eHgaT18SCvW6Fct4O/VECPoMDxZAmkAU/M3Q5mUYG9JSSvD0VRgWUZpHkKstZoClSAoSUksBfDwPHFhb6Y9hp+AKq/bicBQ8dFzY9k4BE7Pmz9EQWoW0IVH0Pdd7oO42JIW8IhhyCNvBo+AXOKnQsaqm4O/pZQzoc0HbmOh+o/SRL2pVDXzEuV7dfzEOq+AxuxBgahbgl5nWhrMHKCBnAG2XpLuO5E/Inr4EZ0vznwa2TG6/T4kmydVbWb1SF6ZUeff//HlxWLnXaf/mkvFValOZiHsFdUmCeDbaiHL2ESSrYLdk2YdkRcC5cgU8C1hZG6/fOO+kDSTDmVmelbIPHEUtQ3aYya+0K4U4t60r+uGi5lqIFrC/2cJAHK/s3xTYBrCxHtx7zhm1RTitUazzYPlSRbW04CtlDRSVpPDU+Tk3rIJLtjyXTpRMvvtvhpFQ1vhARsoajjrJAH22YIeS2KWmFnhAhsYanTRIBRi6UaxFOBKyOqcxMVzCP2FyEqQPm0pS6iEsphnAUkzauD1bXUGyBZT1SANVRxigTMIirAGnLYi7mlRAVYQxWnaL3JRAVYQxUnWX6DiQqwhiqOskI2ERVgDVecJAKDiQoYQBX1z6h0wtO4GZnFBx/A1YwgKoWsliEqcBSbW9BY3gLJeqICNVNFMyRgFlHhEL8BzC0lKtAWqjhJ600mKtAWqjjN8htMVKBmqqjMSx5nhWwiKtCWaNdpIjCYqMBHMzMNSpRSO+F3c5cMccE1JiUBYVtc78TVhoPTkGAh4llbuiMAjSCYnZC9OQkmtIVgwgUJJtJMMJVZ2FsgWU8wkS0Ec5IEzCKYyBaC2Y+5pQQT2UIwJ2m9yQQT2UIwp1l+gwkm0kwwF7JCNhFMZAvBnCYCgwkmGjUXOSnNBsCSNkLiYINlAgrgM1KWTwMTca2r113JjSVOPo8lnIaxCWSwGxY5JxlEtpBBtCAZxLbMNt4CyXoyiG0hg5MkYBYZ5Nlr3wDmlpJBbAsZnKT1JpNBbAsZnGb5DSaD2BYyOM4K2UQGsS1kcJoIDCaD2IjZRjm4M801QkLB9+ut5UTSFNBehNQWLmzfYOhMJOhsrYBFpqlwMhLr3HyFXb1P1slxW1DXl2R7yagI0uPZrrwxw9r+3ZYPkJgnhhqCTvOu/C3liWKwjGX3bO0lSwl0OF1ylgtXlgSozpdL/0cHhuPx+Xy6qkzG2nbE0uu2f6mSDEmqsGj3LyUa4gaOwAxkuYSqpbTKdYQMGGQYtFXPLs2Sv2mznrhZDzt+ZM+dehuweuev6qrx24D17cvD0xGb01N5nsDqCU9JPro3wsLOCcDDwvMo2qAHeIHw0F6p6DefjZDg7hWrqZv6VNtTz5N4XDSB+Y5Z2iMzcPBX8vHbikL4tkdzJB8HfDxXxyaZFE7qIDGpX455wm4bXW/+R0MBzux7FnO1oVBb1VPq0Ihqm6va3nU1gp7m8NBt9UoxYJxsGj1qdGouRKvWznUh7t26jh19ibOEvhqToLCfXUhwo3dlx/c7WHogVjag86ymepq9J0fOnN4Th0KXR3io7ejek4QdUyVUpZDLEdlIoaLO6nyh1ifJr76/rf2PKj+dIFE/JFlkdG2nCciA0chp+2PeaMnmNFCIBb+PEGGIa3ADheLUN+G5lXU0UI3ba19Om6jVOH9WVwAJFNqDkqkEra5Ad6T0OU1z5rydTmws7CpI+j1Lo/WuGBwrPP3tPn2OWF2H6DU5XA6Gim0TnXdVLIbceEw0sK4gQyTZXq7aOHaiAOlhlrKmVLdq+tK7z+kmZmf8Hw==</diagram></mxfile>
|
2305.11553/main_diagram/main_diagram.pdf
ADDED
|
Binary file (29.6 kB). View file
|
|
|
2305.11553/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
In scientific papers, abstracts are short texts that summarize the findings written in the body text [@bahadoran2020principles]. A well-formulated abstract forms a clear pathway of scientific inference from the premises (e.g., shared knowledge, experimental evidence, or observation) to the conclusions (e.g., suggestions, claims) [@ripple2012structured]. Being able to split an abstract into a conclusion segment and a premise segment not only helps readers better comprehend how conclusions are drawn [@bahadoran2020principles] but also contributes to many downstream research tasks such as argument generation [@schiller2021aspect], knowledge retrieval [@hua2019argument], opinion analysis [@hulpus2019towards], and text summarization [@cho2022toward].
|
| 4 |
+
|
| 5 |
+
However, segmenting abstracts is not always easy. Many abstracts, especially those from the biomedical domain, are structured such that the reader can easily extract the conclusions (e.g., abstracts follow the IMRaD format [@nair2014organization; @dernoncourt-lee-2017-pubmed]) or the CONSORT format [@hopewell2008consort]. In contrast, abstracts from many other research domains typically do not explicitly indicate the position of the conclusions, which means readers must perform the cognitively demanding task of identifying the conclusions themselves. The difficulty in segmenting abstracts triggers the research interest in developing automatic approaches for splitting scientific abstracts into conclusion and premise sentences.
|
| 6 |
+
|
| 7 |
+
Existing text segmentation approaches [@somasundaran2020two; @lo2021transformer; @barrow2020joint; @koshorek2018text] can be applied to automatic scientific abstract segmentation. However, fine-tuning such models typically requires large amounts of labelled data that are expensive to collect. In comparison, unsupervised approaches require no annotated data and can provide massive amounts of segmented scientific abstracts with minimal human involvement. Thus, we focus on developing an unsupervised framework for scientific abstract segmentation.
|
| 8 |
+
|
| 9 |
+
To this end, we propose an explorative unsupervised framework for the task. Given a set of abstracts, we want to determine how each abstract should be split into a premise segment and a conclusion segment. Combining the premises from all abstracts gives us a premise space, and similarly, we obtain a conclusion space. We hypothesize that the best segmentation for each abstract can be found when the **N**ormalized **M**utual **I**nformation (NMI) between the conclusion space and premise space is maximized. To approach the maximal NMI, we use an exhaustive greedy approach that iterates over all abstracts and determines the best segmentation for each abstract. To reduce the search space at each step, we first stitch the start and end of each abstract together to form a cycle, then select two segmentation boundaries with constraints based on our prior knowledge (see Figure [\[fig:cycle\]](#fig:cycle){reference-type="ref" reference="fig:cycle"}). We name our approach the **Greedy** **C**ycled **A**bstract **S**egmentation (GreedyCAS) framework.
|
| 10 |
+
|
| 11 |
+
To test our proposed approach, we create two datasets. One dataset comprises non-structured abstracts with human-annotated conclusion sentences. The other dataset contains structured abstracts in which conclusion sentences are explicitly indicated.
|
| 12 |
+
|
| 13 |
+
Our main **contributions** are as follows:
|
| 14 |
+
|
| 15 |
+
- We propose GreedyCAS, an unsupervised approach for scientific abstract segmentation that optimizes NMI.
|
| 16 |
+
|
| 17 |
+
- On the dataset of non-structured abstracts, we show that GreedyCAS achieves promising segmentation results.
|
| 18 |
+
|
| 19 |
+
- We find a strong correlation between NMI and our evaluation metrics, which proves the effectiveness of NMI being indicative of good segmentations.
|
| 20 |
+
|
| 21 |
+
# Method
|
| 22 |
+
|
| 23 |
+
We formulate the task of segmenting scientific abstracts as follows. Given an abstract $A = (s_i)_{i=1}^n$ containing $n$ sentences, we define $G^A = \{g_j^A\}_{j=1}^m$ as the space of all $m$ possible segmentations of $A$. Each segmentation $g_j^A = (P_j^A, C_j^A)$ contains a premise segment $P_j^A$ and a conclusion segment $C_j^A$ whose segmentation boundaries are uniquely determined by two indices $\alpha^{A}_j, \xi^{A}_j \in \mathbb{Z}_{1:n}$ (range of integers from $1$ to $n$): $$\begin{align*}
|
| 24 |
+
C_j^A &= \{ s_i \in A \mid \alpha^{A}_j \leq i \leq \xi^{A}_j \}, \\
|
| 25 |
+
P_j^A &= \{ s_i \in A \mid s_i \notin C_j^A \}.
|
| 26 |
+
\end{align*}$$
|
| 27 |
+
|
| 28 |
+
For a corpus $\mathbb{A} = \{ A_i \}_{i \in \mathbb{Z}_{1:k}}$ of $k$ abstracts, we would have a set $\mathbb{G} = \{ G^{A_i} \}_{i \in \mathbb{Z}_{1:k}}$ of $m^k$ possible segmentations. Searching for the best segmentation for $\mathbb{A}$ within $\mathbb{G}$ as measured by an optimization objective involves exhaustively enumerating over $m^k$ segmentations, which is impossible under limited computational costs. Therefore, in this work, we concentrate on greedily approaching a reasonably good segmentation that is a tight lower bound of the actual global optima.
|
| 29 |
+
|
| 30 |
+
To reduce the search space for each abstract, we make use of our prior belief that conclusion sentences tend to be located at the start or the end of abstracts. This allows us to reshape each abstract as a cycle of sentences in which the end of the abstract meets its beginning and consider six (instead of $m$) possible segmentations per cycled abstract based on the following criteria: (1) each abstract contains at most three conclusion sentences; (2) conclusion sentences are located at the stitching point (start or end position of the abstract) in the cycled abstract; (3) conclusion sentences are fenced by the segmentation boundaries $\alpha$ and $\xi$.
|
| 31 |
+
|
| 32 |
+
Table [\[tab:config-bound\]](#tab:config-bound){reference-type="ref" reference="tab:config-bound"} demonstrates one example cycled abstract with seven sentences. For each sentence in the cycled abstract, if the sentence is the final sentence within the current segment, then we mark the sentence as a segmentation boundary.
|
| 33 |
+
|
| 34 |
+
To fully utilize the power of parallel computing, we use multi-threading[^2] for our greedy algorithm in our work to compute the optimization objective for different possible segmentations.
|
| 35 |
+
|
| 36 |
+
Our next step is to choose an optimization objective for the greedy search. Inspired by the previous work on text summarization [@padmakumar2021unsupervised] and birdsong analysis [@sainburg2019parallels], we explore using mutual information as the optimization objective. As a probabilistic measure, mutual information $I(X; Y)$ indicates the absolute reduction of information uncertainty in bits for a random variable $X$ after observing the other correlated random variable $Y$; therefore, $I(X; Y)=0$ if and only if $X$ and $Y$ are independent. In our case, we aim to know how much uncertainty of observing the conclusions can be reduced with the presence of the premises.
|
| 37 |
+
|
| 38 |
+
We denote $\mathbb{C} = \{ C_{j_i}^{A_i} \}_{i \in \mathbb{Z}_{1:k}}$ as one possible *conclusion space* spanned by all conclusion segments from the $k$ abstracts, characterized by the segmentation boundaries $( j_i )_{i=1}^k$, and $\mathbb{P} = \{ P_{j_i}^{A_i} \}_{i \in \mathbb{Z}_{1:k}}$ being one possible *premise space* obtained in the same way. Note that the segmentation number $j_i$ can be different for different abstracts $A_i$. Now the task can be formulated as follows: given the $k$ abstracts in $\mathbb{A}$, search for the particular premise space $\mathbb{P}$ and conclusion space $\mathbb{C}$ in which the mutual information $I(\mathbb{P}; \mathbb{C})$ is maximized.
|
| 39 |
+
|
| 40 |
+
More formally, we compute $I(\mathbb{P}; \mathbb{C})$ as follows: $$\begin{align*}
|
| 41 |
+
& I(\mathbb{P}; \mathbb{C}) = \\% \sum_{P_{j_i}^{A_i} \in \mathbb{P}} \sum_{C_{j_i}^{A_i} \in \mathbb{C}} I(P_{j_i}^{A_i}; C_{j_i}^{A_i}) \\
|
| 42 |
+
& \sum_{A_i \in \mathbb{A}} \sum_{w_p \in P_{j_i}^{A_i}} \sum_{w_c \in C_{j_i}^{A_i}} p(w_p; w_c) \log \frac{p(w_p; w_c)}{p(w_p)p(w_c)}
|
| 43 |
+
\end{align*}$$ where $w_p$ and $w_c$ are unigram tokens in the $i$-th premise segment $P_{j_i}^{A_i}$ and the $i$-th conclusion segment $C_{j_i}^{A_i}$, respectively. $p(w_p; w_c)$ indicates the joint probability of the premise word $w_p$ appearing in the premise segment $P_{j_i}^{A_i}$ and the conclusion word $w_c$ appearing in the conclusion segment $C_{j_i}^{A_i}$, and $p(w_p)$ and $p(w_c)$ the marginal probability. Applying language modelling statistics, we compute the marginal probabilities as follows: $$\begin{align*}
|
| 44 |
+
p(w_p) & = \frac{c(w_p, \mathbb{P})}{\sum_{w_p'} c(w_p', \mathbb{P})} \\ % \frac{|\{ w_p \in \mathbb{P}\}|}{\sum_{w \in \mathbb{P}} |\{ w \in \mathbb{P} \}| } \\
|
| 45 |
+
p(w_c) & = \frac{c(w_c, \mathbb{C})}{\sum_{w_c'} c(w_c', \mathbb{C})} % \frac{|\{ w_p \in \mathbb{P}\}|}{\sum_{w \in \mathbb{P}} |\{ w \in \mathbb{P} \}| } \\
|
| 46 |
+
\end{align*}$$ where $c(w, \mathbb{P})$ denotes the number of occurrences of $w$ within the tokenized premise segments in $\mathbb{P}$ and $w_p'$ is a token from the premise segment of any abstract. The terms $c(w, \mathbb{C})$ and $w_c'$ are defined analogously.
|
| 47 |
+
|
| 48 |
+
The joint probability is then computed as $$p(w_p; w_c) =
|
| 49 |
+
\frac{\sum_{i=1}^k c\bigl( w_p, P_{j_i}^{A_i} \bigr) c\bigl( w_c, C_{j_i}^{A_i} \bigr)}{\sum_{(w_p', w_c')} c(w_p', \mathbb{P}) c(w_c', \mathbb{C})},$$
|
| 50 |
+
|
| 51 |
+
Mutual information is an unbounded measure that increases with the size of the abstract corpus $\mathbb{A}$ and is thus not comparable across different $\mathbb{P}$ and $\mathbb{C}$ [@poole2019variational]; therefore, we use a normalizing scalar to map $I(\mathbb{P}; \mathbb{C})$ onto the interval $[0,1]$ and use Normalized Mutual Information (NMI) as the final optimization objective.
|
| 52 |
+
|
| 53 |
+
Taken from @kvaalseth2017normalized, we compute NMI as follows: $$\begin{equation*}
|
| 54 |
+
\mathrm{NMI(\mathbb{P}; \mathbb{C})} = \frac{I(\mathbb{P}; \mathbb{C})}{\mathcal{U}_a}
|
| 55 |
+
\end{equation*}$$ where $\mathcal{U}_a$ denotes the non-decreasing theoretical upper bound of $I(\mathbb{P}; \mathbb{C})$ and is parametrized by the $a$-order arithmetic mean $$\begin{equation*}
|
| 56 |
+
\mathcal{U}_a= \left( \frac{\mathcal{U}_{\mathbb{P}}+ \mathcal{U}_{\mathbb{C}}}{2} \right)^{1/a}.
|
| 57 |
+
\end{equation*}$$ Here, we have $$\begin{equation*}
|
| 58 |
+
\mathcal{U}_{\mathbb{P}} = - \sum_{w_p} p(w_p) \log p(w_p) = H(\mathbb{P})
|
| 59 |
+
\end{equation*}$$ and $$\begin{equation*}
|
| 60 |
+
\mathcal{U}_{\mathbb{C}} = -\sum_{w_c} p(w_c) \log p(w_c) = H(\mathbb{C})
|
| 61 |
+
\end{equation*}$$ essentially being the entropy of the premise space and the conclusion space, respectively. For the least upper bound ($a=-\infty$), we have $$\begin{equation*}
|
| 62 |
+
\mathcal{U}_{-\infty} = \lim_{a \rightarrow -\infty } \mathcal{U}_a = \min \{ \mathcal{U}_{\mathbb{P}}, \mathcal{U}_{\mathbb{C}} \}
|
| 63 |
+
\end{equation*}$$ In this work, we use $\mathcal{U}_{-\infty}$ to normalize $I(\mathbb{P}; \mathbb{C})$ to ensure that the maximal attainable NMI value is 1. This brings us the benefit of having the NMI scores comparable for different corpus sizes $k$.
|
| 64 |
+
|
| 65 |
+
We now introduce our GreedyCAS approach to segment abstracts of scientific papers. GreedyCAS performs an exhaustive search, where we first explore the best segmentation of one particular abstract that maximizes $\text{NMI}(\mathbb{P};\mathbb{C})$, then iterate over all abstracts for the same purpose.
|
| 66 |
+
|
| 67 |
+
Algorithm [\[alg:greedy-base\]](#alg:greedy-base){reference-type="ref" reference="alg:greedy-base"} describes the basic approach *GreedyCAS-base*. Given the input abstracts $\mathbb{A}$, the algorithm greedily searches for the segmentation that leads to the maximal $\text{NMI}(\mathbb{P};\mathbb{C})$. The output is the optimized segmentation $\mathbb{G}^*$ that contains the best segmentation for individual abstracts.
|
| 68 |
+
|
| 69 |
+
Algorithm [\[alg:greedy-plus\]](#alg:greedy-plus){reference-type="ref" reference="alg:greedy-plus"} illustrates the advanced approach *GreedyCAS-NN*, where we first split the abstract corpus $\mathbb{A}$ into a series of chunks; then, for each seed abstract $A^s_{j_i}$ sampled from the current chunk, we perform embedding-based nearest neighbour (NN) search within the chunk to construct the batch comprising the most semantically relevant abstracts to $A^s_{j_i}$. The most semantically relevant abstracts of $A^s_{j_i}$ are fetched according to the cosine similarities calculated using their abstract embeddings. Finally, the same greedy process is performed to find the best segmentation for each abstract.
|
| 70 |
+
|
| 71 |
+
Since we calculate NMI scores using the lexical co-occurrences of the words, to have reliable estimations of the probabilities, and to obtain well-parsed scientific abstracts, we use the COVID-19 Open Research Dataset (CORD-19) released by @wang-etal-2020-cord. This dataset is a massive collection of scientific papers on SARS-CoV-2 coronavirus research published since March 2020. These papers share higher lexical commonality than biomedical papers in general due to the focused research interests in COVID-19.
|
| 72 |
+
|
| 73 |
+
To collect structured abstracts, we then work with abstracts that are structured into *Background*, *Methods*, *Results*, and *Conclusion* categories. We trust the categorization of these structured abstracts from the CORD-19 corpus since scientific papers are peer-reviewed and multi-round revised. We automatically aggregate the dataset **CAS-auto** from 697 structured scientific abstracts whose paper title contains the keyword "vaccine". Inspired by @shieh2019towards, we take sentences in *Background*, *Methods*, *Results* categories as premises, and sentences in *Conclusion* category as conclusions.
|
| 74 |
+
|
| 75 |
+
In addition, we manually construct a dataset **CAS-human** of 196 non-structured abstracts from CORD-19, using the keyword "antigen" to filter out target abstracts. We then ask four human annotators to label the conclusion sentences within those abstracts. All human annotators were not instructed about the potential positions of conclusion sentences in scientific abstracts. By doing this, we minimize the bias introduced to the human annotators. To facilitate the annotation process and reduce the annotators' workload, we use the interactive data labelling platform Doccano[^3] [@doccano] for constructing the CAS-human dataset.
|
| 76 |
+
|
| 77 |
+
Table [\[tab:stats\]](#tab:stats){reference-type="ref" reference="tab:stats"} shows the overall statistics of our proposed datasets for scientific abstract segmentation. During the data preprocessing, we intentionally remove stop words, numbers, and punctuations (except ".", which is essential for the sentence tokenizer[^4] we use) in the abstracts for meaningful and reliable word counts. We also lowercase all tokens in both datasets for better computational efficiency.
|
| 78 |
+
|
| 79 |
+
Figure [\[fig:con-pos-stat\]](#fig:con-pos-stat){reference-type="ref" reference="fig:con-pos-stat"} shows the positions of the conclusion sentences in the CAS-human dataset as labelled by the human annotators. Similarly as shown in previous works [@fergadis2021argumentation; @achakulvisut2019claim], 95% of our annotated non-structured abstracts, the positions of conclusion sentences are consistent with our prior knowledge.
|
2305.15925/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2023-05-16T00:28:10.091Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36" version="21.2.9" etag="8Pv-EnalbFM7NRTFoP59" type="google">
|
| 2 |
+
<diagram name="Page-1" id="qLuds1qjT3MeigbLdOwr">
|
| 3 |
+
<mxGraphModel dx="825" dy="455" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0">
|
| 4 |
+
<root>
|
| 5 |
+
<mxCell id="0" />
|
| 6 |
+
<mxCell id="1" parent="0" />
|
| 7 |
+
<mxCell id="2" style="orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;strokeWidth=2;curved=1;strokeColor=#739E5A;fontSize=18;opacity=50;" edge="1" source="7" target="6" parent="1">
|
| 8 |
+
<mxGeometry relative="1" as="geometry">
|
| 9 |
+
<Array as="points">
|
| 10 |
+
<mxPoint x="470" y="450" />
|
| 11 |
+
</Array>
|
| 12 |
+
</mxGeometry>
|
| 13 |
+
</mxCell>
|
| 14 |
+
<mxCell id="3" value="0.07" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;" vertex="1" connectable="0" parent="2">
|
| 15 |
+
<mxGeometry x="-0.1244" y="4" relative="1" as="geometry">
|
| 16 |
+
<mxPoint y="15" as="offset" />
|
| 17 |
+
</mxGeometry>
|
| 18 |
+
</mxCell>
|
| 19 |
+
<mxCell id="4" style="orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;curved=1;sourcePerimeterSpacing=0;strokeWidth=2;strokeColor=#739E5A;opacity=70;" edge="1" source="6" target="7" parent="1">
|
| 20 |
+
<mxGeometry relative="1" as="geometry">
|
| 21 |
+
<Array as="points">
|
| 22 |
+
<mxPoint x="470" y="420" />
|
| 23 |
+
</Array>
|
| 24 |
+
</mxGeometry>
|
| 25 |
+
</mxCell>
|
| 26 |
+
<mxCell id="5" value="0.16" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;" vertex="1" connectable="0" parent="4">
|
| 27 |
+
<mxGeometry x="-0.17" y="-16" relative="1" as="geometry">
|
| 28 |
+
<mxPoint y="-10" as="offset" />
|
| 29 |
+
</mxGeometry>
|
| 30 |
+
</mxCell>
|
| 31 |
+
<mxCell id="6" value="ENSO" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fontSize=20;" vertex="1" parent="1">
|
| 32 |
+
<mxGeometry x="320" y="430" width="80" height="80" as="geometry" />
|
| 33 |
+
</mxCell>
|
| 34 |
+
<mxCell id="7" value="AIR" style="ellipse;whiteSpace=wrap;html=1;aspect=fixed;fontSize=20;" vertex="1" parent="1">
|
| 35 |
+
<mxGeometry x="540" y="430" width="80" height="80" as="geometry" />
|
| 36 |
+
</mxCell>
|
| 37 |
+
<mxCell id="8" style="orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;curved=1;strokeWidth=2;strokeColor=#2570BA;" edge="1" source="7" target="6" parent="1">
|
| 38 |
+
<mxGeometry relative="1" as="geometry">
|
| 39 |
+
<Array as="points">
|
| 40 |
+
<mxPoint x="470" y="510" />
|
| 41 |
+
</Array>
|
| 42 |
+
</mxGeometry>
|
| 43 |
+
</mxCell>
|
| 44 |
+
<mxCell id="9" value="0.24" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;" vertex="1" connectable="0" parent="8">
|
| 45 |
+
<mxGeometry x="-0.18" y="-14" relative="1" as="geometry">
|
| 46 |
+
<mxPoint y="14" as="offset" />
|
| 47 |
+
</mxGeometry>
|
| 48 |
+
</mxCell>
|
| 49 |
+
</root>
|
| 50 |
+
</mxGraphModel>
|
| 51 |
+
</diagram>
|
| 52 |
+
</mxfile>
|