Eric03 commited on
Commit
8a9c3a5
·
verified ·
1 Parent(s): a8d43fb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2004.05773/main_diagram/main_diagram.drawio +1 -0
  2. 2004.05773/main_diagram/main_diagram.pdf +0 -0
  3. 2004.05773/paper_text/intro_method.md +99 -0
  4. 2005.01348/main_diagram/main_diagram.drawio +1 -0
  5. 2005.01348/main_diagram/main_diagram.pdf +0 -0
  6. 2005.01348/paper_text/intro_method.md +19 -0
  7. 2006.16309/main_diagram/main_diagram.drawio +1 -0
  8. 2006.16309/main_diagram/main_diagram.pdf +0 -0
  9. 2006.16309/paper_text/intro_method.md +64 -0
  10. 2012.10974/main_diagram/main_diagram.drawio +0 -0
  11. 2012.10974/paper_text/intro_method.md +95 -0
  12. 2105.02961/main_diagram/main_diagram.drawio +0 -0
  13. 2105.02961/paper_text/intro_method.md +27 -0
  14. 2105.09356/main_diagram/main_diagram.drawio +1 -0
  15. 2105.09356/main_diagram/main_diagram.pdf +0 -0
  16. 2105.09356/paper_text/intro_method.md +56 -0
  17. 2106.01354/main_diagram/main_diagram.drawio +1 -0
  18. 2106.01354/main_diagram/main_diagram.pdf +0 -0
  19. 2106.01354/paper_text/intro_method.md +104 -0
  20. 2106.09614/main_diagram/main_diagram.drawio +0 -0
  21. 2106.09614/paper_text/intro_method.md +121 -0
  22. 2106.10512/main_diagram/main_diagram.drawio +0 -0
  23. 2106.10512/paper_text/intro_method.md +144 -0
  24. 2108.11636/main_diagram/main_diagram.drawio +1 -0
  25. 2108.11636/paper_text/intro_method.md +56 -0
  26. 2108.13493/main_diagram/main_diagram.drawio +1 -0
  27. 2108.13493/paper_text/intro_method.md +182 -0
  28. 2110.04176/main_diagram/main_diagram.drawio +1 -0
  29. 2110.04176/main_diagram/main_diagram.pdf +0 -0
  30. 2110.04176/paper_text/intro_method.md +372 -0
  31. 2111.04670/main_diagram/main_diagram.drawio +1 -0
  32. 2111.04670/main_diagram/main_diagram.pdf +0 -0
  33. 2111.04670/paper_text/intro_method.md +27 -0
  34. 2111.15000/main_diagram/main_diagram.drawio +0 -0
  35. 2111.15000/paper_text/intro_method.md +191 -0
  36. 2112.05125/main_diagram/main_diagram.drawio +0 -0
  37. 2112.05125/paper_text/intro_method.md +82 -0
  38. 2112.11909/main_diagram/main_diagram.drawio +1 -0
  39. 2112.11909/paper_text/intro_method.md +140 -0
  40. 2201.07788/main_diagram/main_diagram.drawio +0 -0
  41. 2201.07788/paper_text/intro_method.md +84 -0
  42. 2203.05843/main_diagram/main_diagram.drawio +1 -0
  43. 2203.05843/main_diagram/main_diagram.pdf +0 -0
  44. 2203.05843/paper_text/intro_method.md +161 -0
  45. 2203.11284/main_diagram/main_diagram.drawio +0 -0
  46. 2203.11284/paper_text/intro_method.md +90 -0
  47. 2204.01613/main_diagram/main_diagram.drawio +1 -0
  48. 2204.01613/main_diagram/main_diagram.pdf +0 -0
  49. 2204.01613/paper_text/intro_method.md +78 -0
  50. 2204.10211/main_diagram/main_diagram.drawio +1 -0
2004.05773/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-12-05T08:08:05.990Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36" etag="Xht7qL39BzMyQLEOSI1V" version="12.3.6" type="google" pages="1"><diagram id="ohCJ068hNRihqZjMbE14" name="Page-1">7V3fc5s4EP5r/BgPQogfj42ba28m6XTq3ty9dYjBNlcMPowb5/76EwY5RitPnStYK5y+1BZEhu9bab9drWBEJ6vdhyJcLx/yKE5HthXtRvT9yOb/GOX/VS3PdYvnWXXDokiiuom8NEyTf+OmUZy2TaJ40zqxzPO0TNbtxlmeZfGsbLWFRZE/tU+b52n7V9fhIgYN01mYwtY/k6hc1q2+7b20f4yTxVL8MnGD+sgqFCc3d7JZhlH+dNRE70Z0UuR5WX9a7SZxWoEncKn/7rcTRw8XVsRZec4fTL/8EZQfd39t1xv/+7fd33/Sb8831Km7+RGm2+aOm6stnwUERb7NorjqxRrR26dlUsbTdTirjj5x0nnbslyl/BvhH+d5VjYskoB/D9NkkfEvM36VccEbmt+LizLenbwTcsCHG1acr+KyeOanNH/gNog2JkVoA/nTC0Ge1ZyzPCLHdprGsDGKxaHrF9z4hwa6V8BILPIaHMnrcPRUOM6TNJ3kaV7sO6fz+dyezXj7pizy7/HRkch9dJnbDfKehHxgAeRdX4E8sXtDHuD+e7be8p99N7LdcFWhmj1u1vvbt2ZpmKxG1bVU34ptmmSL/dyxWnEANoAxDlTZpqWNbpZnsURF0yToSuN51UMFesInlXdN8yqJoupHlFbQHm+yIXTA4g2TaGQM0HhgrDWAWE8s2pDGr/l33vAJcMKn0XX1Mc3z9X2ySsoOxtKmOWh1gy9xiIQvHCZMAS/ta5CIyeh4dmK307vPI/a+a4CVk37XAAcUGcBUBfDkfmoowLbDcAFMXQDwHZ/g0wq7zTrMWgi7/2wrUbVH7aZGhnsDi3jr3R4ecZx/WjT/1x1tH3+pHyI64re476vdfdW6v1TR3JXOUs5pHViB33YTthA52ozAM8AIjibWQRmDPOfqtwbfDGsQXmBQ1iA7CP3WECisYWKPbh10NjE0N2EHHi5bEGF+Kxj/iRHscbzZ1EBW9GV5sQrTOm48l1nJ4gCTl7iGrqz0YWBWSlmAzEphzIvSfw1SzTiEILMGGKKjtIZBqhnHRaZtHZhPEL7lTdNcwiIYYWNkCteBSzW4beJ+aDbhYdO5zAyfMUgF4VJsCqKLFKnbkTWc7EezgnD7sgbfwWUNHsyO1StqBIBpwIqalInWvhzhwWzTeDwGyP76+vEB0I5WkJ1u+CDEbRPiQkIcBSG9lQH4Z9Rf1NiLKh5bIqYZBKvdoipkGs/T/Gm2DItyHGZZXoZlkmff7BFY2U/Dxzj9nG+S6gTeXNQ3e7vOk4q5ux/7wgL1qCmafvn3oKuVUdJWrT5c2VfxEvTGCwxrJ3URBvqh0lGxxaGQ4rkNtbaRAkPL2jE8mOgY5Kyeds/gKwrqTC61kBNl+gGGQY/RpRZy7kk/wDCOMFg5yotT+uGFRQxXJR0plexdu3SEkRI09CuQjpQik44wxvoiKnan/J7GyikJ37jpSEfaUoZBoSOJaiLrbdyICwB+4t5EP8E8T0r4E8cfwyFwUV8RKKJak8WkSy2EIMMQ1WhB6foUIcinok4jRSVcHSROoB1iGHlelbBkjMikMBUpFxWXgSJavUZxyZjdokaImZ+JS9oXLzDIBeISMoVv7HQkLh0pStYvLk0o9f80sCVt4sjyTPsyZgCD86tyah6R89u+XodGxLR4xMj7ZFMm6S2/TwOI6WjGpPJ2mJexo48ahchGN2kOrVLQ8+U509OcYSYWjATw2cFQ68LAPjnxQBB95gBjEJTmgK+0XH7Ghj+L1c/YePSZw3rae43AgBQrgkhrjofmXcDeOv3GACOzn1pBJxvbvr5trkNrpmBznX4NpKh/RTdbDVUDgd112s1BPE5O4cLeHJmmDVXaXZmw0s6Mou/dE7p2VPW1awIuuiMwCpjfenMcl9tUBTLQ+g3ChKzaW/h8uvwBgQmZkJAb3moWkc1Ad4nGYXfOtS5nHUbCITDQvZxFYG7rqhiRq34PWy31MWJqgXxHK4uwjgkBJ6Yuw3fEiWdLvkSEjvoYufIdyHJhhP4xYsMU01UxIu8Jt0UkrY8RmN8xg5GualQo8CTa5y0bplfM4KSjUaLw7vo5URbnx1VYerd6jKMoyRZX+3IE2zqvVNl2eqNHVUNeB+XV/Z8d9IOIf8JP4iRuw1Ri+hDx1/2fiPixGQDryQC8Mw2gv/EJtR4gQ+wT2HAG0/hd9eYjDkeUFPGsKeXf5Nvq8l+TmmGQKz0lMS8bOQQpAczpiX5bqRyvL1IoFBfXRYorarMbSqgFFfiFKYHaAlDyv1/CpIC9AxA9CUMBzvELlTxbMdn0tTGGKN60oc2uuwDYas/mVPG4gYtaqa14Tp/JADuyCesHWFWhZi7A0PkhgPgMRZIme60HdzeqUL4ooocC+cOsC1chiErh2X3hyWDuyCQ8pUe6IMDzDHGGGE8XHZ6K95sZhGdgYcPzjNeu4sXTlnLvCPA8Y5s9YjwdbHiKkMNQPD2GDU+j50+KTi95Rs+flKLD0+j5k7oEG55nxPiI8USnl7wzQnq8eDo2OjyNjt8dx0OGp290/O54LjY8jY7fmYUOT6P1PEOXX1I8RN8kPF0bG55G63kWoMPTaD3vEnlVVDueRut5F59eMlrPux46PM94hj1ePD10+SXFs+dNwhOdXlI8m/hjEkX7EtP78DkuALrYigt7qi6lDlzKv2xxIT0jdB10HZtcwGIHcLRcto7NgrMPvu3CbzvOT749vL/95vxrkVc0HI59qJ4K/pBHcXXGfw==</diagram></mxfile>
2004.05773/main_diagram/main_diagram.pdf ADDED
Binary file (14.9 kB). View file
 
2004.05773/paper_text/intro_method.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ When a potentially viral news item is rapidly or indiscriminately published by a news outlet, the responsibility of verifying the truthfulness of the item is often passed on to the audience. To alleviate this problem, independent teams of professional fact checkers manually verify the veracity and credibility of common or particularly check-worthy statements circulating the web. However, these teams have limited resources to perform manual fact checks, thus creating a need for automating the fact checking process.
4
+
5
+ :::: center
6
+ ::: tabular
7
+ \|L\| **Claim**: The last major oil spill from a drilling accident in America happened over 40 years ago in 1969.\
8
+ **Ruling Comments**: (\...) [The last major oil spill from a drilling accident in America happened over 40 years ago in 1969.]{.mark}\
9
+ (\...) The largest in volume was the Santa Barbara spill of 1969 referenced by Murdock and Johnson, in which an estimated 100,000 barrels of oil spilled into the Pacific Ocean, according to the API. [The Santa Barbara spill was so big it ranked seventh among the 10 largest oil spills caused by marine well blowouts in the world, the report states.]{.mark} Two other U.S. spills, both in 1970, rank eighth and 10th. [Fourteen marine blowouts have taken place in the U.S. between 1969 and 2007.]{.mark} Six of them took place after 1990 and spilled a total of nearly 13,700 barrels.\
10
+ (\...) We interviewed three scientists who said that the impact of a spill has little to do with its volume. [Scientists have proven that spills far smaller than Santa Barbara's have been devastating.]{.mark}\
11
+ **Justification**: While the nation's largest oil well blowout did take place in 1969, it's not factually correct to call it the "last major oil spill\". First of all, two of the largest blowouts in the world took place in the U. S. the following year. More importantly, experts agree that spills far smaller in volume to the 1969 disaster have been devastating. From a scientific perspective, Johnson's decision to single out the 1969 blowout as the last "major\" one makes no sense.\
12
+
13
+ **Ruling**: Half-True\
14
+ :::
15
+ ::::
16
+
17
+ The current research landscape in automated fact checking is comprised of systems that estimate the veracity of claims based on available metadata and evidence pages. Datasets like LIAR [@wang2017liar] and the multi-domain dataset MultiFC [@augenstein-etal-2019-multifc] provide real-world benchmarks for evaluation. There are also artificial datasets of a larger scale, e.g., the FEVER [@Thorne18Fever] dataset based on Wikipedia articles. As evident from the effectiveness of state-of-the-art methods for both real-world -- 0.492 macro F1 score [@augenstein-etal-2019-multifc], and artificial data -- 68.46 FEVER score (label accuracy conditioned on evidence provided for 'supported' and 'refuted' claims) [@stammbach-neumann-2019-team], the task of automating fact checking remains a significant and poignant research challenge.
18
+
19
+ A prevalent component of existing fact checking systems is a stance detection or textual entailment model that predicts whether a piece of evidence contradicts or supports a claim [@Ma:2018:DRS:3184558.3188729; @mohtarami-etal-2018-automatic; @Xu2019AdversarialDA]. Existing research, however, rarely attempts to directly optimise the selection of relevant evidence, i.e., the self-sufficient explanation for predicting the veracity label [@Thorne18Fever; @stammbach-neumann-2019-team]. On the other hand, @alhindi-etal-2018-evidence have reported a significant performance improvement of over 10% macro F1 score when the system is provided with a short human explanation of the veracity label. Still, there are no attempts at automatically producing explanations, and automating the most elaborate part of the process - producing the *justification* for the veracity prediction - is an understudied problem.
20
+
21
+ In the field of NLP as a whole, both explainability and interpretability methods have gained importance recently, because most state-of-the-art models are large, neural black-box models. Interpretability, on one hand, provides an overview of the inner workings of a trained model such that a user could, in principle, follow the same reasoning to come up with predictions for new instances. However, with the increasing number of neural units in published state-of-the-art models, it becomes infeasible for users to track all decisions being made by the models. Explainability, on the other hand, deals with providing local explanations about single data points that suggest the most salient areas from the input or are generated textual explanations for a particular prediction.
22
+
23
+ Saliency explanations have been studied extensively [@Adebayo:2018:SCS:3327546.3327621; @arras-etal-2019-evaluating; @poerner-etal-2018-evaluating], however, they only uncover regions with high contributions for the final prediction, while the reasoning process still remains behind the scenes. An alternative method explored in this paper is to generate textual explanations. In one of the few prior studies on this, the authors find that feeding generated explanations about multiple choice question answers to the answer predicting system improved QA performance [@rajani-etal-2019-explain].
24
+
25
+ Inspired by this, we research how to generate explanations for veracity prediction. We frame this as a summarisation task, where, provided with elaborate fact checking reports, later referred to as *ruling comments*, the model has to generate *veracity explanations* close to the human justifications as in the example in Table [\[tab:Example\]](#tab:Example){reference-type="ref" reference="tab:Example"}. We then explore the benefits of training a joint model that learns to generate veracity explanations while also predicting the veracity of a claim.\
26
+ In summary, our **contributions** are as follows:
27
+
28
+ 1. We present the first study on generating veracity explanations, showing that they can successfully describe the reasons behind a veracity prediction.
29
+
30
+ 2. We find that the performance of a veracity classification system can leverage information from the elaborate ruling comments, and can be further improved by training veracity prediction and veracity explanation jointly.
31
+
32
+ 3. We show that optimising the joint objective of veracity prediction and veracity explanation produces explanations that achieve better coverage and overall quality and serve better at explaining the correct veracity label than explanations learned solely to mimic human justifications.
33
+
34
+ Existing fact checking websites publish claim veracity verdicts along with ruling comments to support the verdicts. Most ruling comments span over long pages and contain redundancies, making them hard to follow. Textual explanations, by contrast, are succinct and provide the main arguments behind the decision. PolitiFact [^1] provides a summary of a claim's ruling comments that summarises the whole explanation in just a few sentences.
35
+
36
+ We use the PolitiFact-based dataset LIAR-PLUS [@alhindi-etal-2018-evidence], which contains 12,836 statements with their veracity justifications. The justifications are automatically extracted from the long ruling comments, as their location is clearly indicated at the end of the ruling comments. Any sentences with words indicating the label, which @alhindi-etal-2018-evidence select to be identical or similar to the label, are removed. We follow the same procedure to also extract the ruling comments without the summary at hand.
37
+
38
+ We remove instances that contain fewer than three sentences in the ruling comments as they indicate short veracity reports, where no summary is present. The final dataset consists of 10,146 training, 1,278 validation, and 1,255 test data points. A claim's ruling comments in the dataset span over 39 sentences or 904 words on average, while the justification fits in four sentences or 89 words on average.
39
+
40
+ # Method
41
+
42
+ We now describe the models we employ for training separately (1) an explanation extraction and (2) veracity prediction, as well as (3) the joint model trained to optimise both.
43
+
44
+ The models are based on DistilBERT [@sanh2019distilbert], which is a reduced version of BERT [@devlin2019bert] performing on par with it as reported by the authors. For each of the models described below, we take the version of DistilBERT that is pre-trained with a language-modelling objective and further fine-tune its embeddings for the specific task at hand.
45
+
46
+ <figure id="figure:separateModels" data-latex-placement="t">
47
+ <img src="explfc.png" />
48
+ <figcaption>Architecture of the <em>Explanation</em> (left) and <em>Fact-Checking</em> (right) models that optimise separate objectives.</figcaption>
49
+ </figure>
50
+
51
+ Our explanation model, shown in Figure [1](#figure:separateModels){reference-type="ref" reference="figure:separateModels"} (left) is inspired by the recent success of utilising the transformer model architecture for extractive summarisation [@liu-lapata-2019-text]. It learns to maximize the similarity of the extracted explanation with the human justification.
52
+
53
+ We start by greedily selecting the top $k$ sentences from each claim's ruling comments that achieve the highest ROUGE-2 F1 score when compared to the gold justification. We choose $k = 4$, as that is the average number of sentences in veracity justifications. The selected sentences, referred to as oracles, serve as positive gold labels - $\mathbf{y}^E \in \{0,1\}^N$, where $N$ is the total number of sentences present in the ruling comments. Appendix [\[appendix:a\]](#appendix:a){reference-type="ref" reference="appendix:a"} provides an overview of the coverage that the extracted oracles achieve compared to the gold justification. Appendix [\[appendix:o\]](#appendix:o){reference-type="ref" reference="appendix:o"} further presents examples of the selected oracles, compared to the gold justification.
54
+
55
+ At training time, we learn a function $f(X) = \mathbf{p}^E$, $\mathbf{p}^E \in \mathbb{R}^{1, N}$ that, based on the input $X$, the text of the claim and the ruling comments, predicts which sentence should be selected - {0,1}, to constitute the explanation. At inference time, we select the top $n = 4$ sentences with the highest confidence scores.
56
+
57
+ Our extraction model, represented by function $f(X)$, takes the contextual representations produced by the last layer of DistilBERT and feeds them into a feed-forward task-specific layer - $\mathbf{h} \in \mathbb{R}^{h}$. It is followed by the prediction layer $\mathbf{p}^{E} \in \mathbb{R}^{1,N}$ with sigmoid activation. The prediction is used to optimise the cross-entropy loss function $\mathcal{L}_{E}=\mathcal{H}(\mathbf{p}^{E}, \mathbf{y}^{E})$.
58
+
59
+ For the veracity prediction model, shown in Figure [1](#figure:separateModels){reference-type="ref" reference="figure:separateModels"} (right), we learn a function $g(X) = \mathbf{p}^F$ that, based on the input X, predicts the veracity of the claim $\mathbf{y}^{F} \in Y_{F}$, $Y_F =$ *{true, false, half-true, barely-true, mostly-true, pants-on-fire}*.
60
+
61
+ The function $g(X)$ takes the contextual token representations from the last layer of DistilBERT and feeds them to a task-specific feed-forward layer $\mathbf{h} \in \mathbb{R}^{h}$. It is followed by the prediction layer with a softmax activation $\mathbf{p}^{F} \in \mathbb{R}^{6}$. We use the prediction to optimise a cross-entropy loss function $\mathcal{L}_{F}= \mathcal{H}(\mathbf{p}^{F}, \mathbf{y}^{F})$.
62
+
63
+ <figure id="figure:jointmodel" data-latex-placement="t">
64
+ <img src="multitask.png" style="width:180pt" />
65
+ <figcaption>Architecture of the <em>Joint</em> model learning Explanation (E) and Fact-Checking (F) at the same time.</figcaption>
66
+ </figure>
67
+
68
+ Finally, we learn a function $h(X) = (\mathbf{p}^E, \mathbf{p}^F)$ that, given the input X - the text of the claim and the ruling comments, predicts both the veracity explanation $\mathbf{p}^E$ and the veracity label $\mathbf{p}^F$ of a claim. The model is shown Figure [2](#figure:jointmodel){reference-type="ref" reference="figure:jointmodel"}. The function $h(X)$ takes the contextual embeddings $\mathbf{c}^E$ and $\mathbf{c}^F$ produced by the last layer of DistilBERT and feeds them into a cross-stitch layer [@misra2016cross; @ruder122019latent], which consists of two layers with two shared subspaces each - $\mathbf{h}_{E}^1$ and $\mathbf{h}_{E}^2$ for the explanation task and $\mathbf{h}_F^1$ and $\mathbf{h}_F^2$ for the veracity prediction task. In each of the two layers, there is one subspace for task-specific representations and one that learns cross-task representations. The subspaces and layers interact trough $\alpha$ values, creating the linear combinations $\widetilde{h}^i_E$ and $\widetilde{h}^j_F$, where i,j$\in \{1,2\}$: $$\begin{equation}
69
+ \centering
70
+ \begin{bmatrix}
71
+ \widetilde{h}^i_E\\
72
+ \widetilde{h}^j_F
73
+ \end{bmatrix}
74
+ =
75
+ \begin{bmatrix}
76
+ \alpha_{EE} & \alpha_{EF}\\
77
+ \alpha_{FE} & \alpha_{FF}
78
+ \end{bmatrix}
79
+ \begin{bmatrix}
80
+ {h^i_E}^T & {h^j_F}^T\\
81
+ \end{bmatrix}
82
+ \end{equation}$$
83
+
84
+ We further combine the resulting two subspaces for each task - $\widetilde{h}^i_E$ and $\widetilde{h}^j_F$ with parameters $\beta$ to produce one representation per task: $$\begin{equation}
85
+ \centering
86
+ \widetilde{h}^T_P
87
+ =
88
+ \begin{bmatrix}
89
+ \beta_P^1\\
90
+ \beta_P^2
91
+ \end{bmatrix}^T
92
+ \begin{bmatrix}
93
+ \widetilde{h}^1_P & \widetilde{h}^2_P\\
94
+ \end{bmatrix}^T
95
+ \end{equation}$$ where P $\in \{E, F\}$ is the corresponding task.
96
+
97
+ Finally, we use the produced representation to predict $\mathbf{p}^{E}$ and $\mathbf{p}^{F}$, with feed-forward layers followed by sigmoid and softmax activations accordingly. We use the prediction to optimise the joint loss function $\mathcal{L}_{MT}= \gamma*\mathcal{H}(\mathbf{p}^{E}, \mathbf{y}^{E}) + \eta * \mathcal{H}(\mathbf{p}^{F}, \mathbf{y}^{F})$, where $\gamma$ and $\eta$ are used for weighted combination of the individual loss functions.
98
+
99
+ We first conduct an automatic evaluation of both the veracity prediction and veracity explanation models.
2005.01348/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-11-24T11:29:19.321Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36" version="12.2.9" etag="i90P-2FrEdTwGeM8nz54" type="device" pages="1"><diagram id="CiYovPnxwpekhhyCI0ww">7VlLb9swDP41wdbDhviZ5Nimr8MGDOiArUfFZmKtipXJch779ZMiKbZsp3VbJ2uHXBKLkkiKH0mRds8bz9c3DC2SrzQG0nP78brnXfZcd+SH4lcSNooQup4izBiOFckpCHf4D2hiX1NzHENmLeSUEo4XNjGiaQoRt2iIMbqyl00psaUu0AxqhLsIkTr1B455oqhDd1DQbwHPEiPZCUdqZo7MYn2SLEExXZVI3lXPGzNKuXqar8dApO2MXdS+6z2zO8UYpLzNBldtWCKS67P13JCIrRdTKjgIBflGnzr8nVMz8SnbYnIuFghp62JSPM3kPzoTU98T2J453UKRkzj9IFkSPJV/Cc4kfypnJxChPJOrK+IjSiizxPdcD1A4CcMy6aVqSgWVPGEiJVJNiB0rpPSrKzVFkS3rBjE0p2lcF4DP9gtIAC03r+P+GPsVoAfJPbg0aybs5ZbaK8e1eLoc1lvD8jkRBEc8ZpzRBxhrIFOagpSJCamQEMGzVAwJTCWHJTCORcida/Icx7EUcrFKMIe7hbLSSuQXQWM0T2OQPt3XB9JJwzXjazTHRKabWyBLkJx32ktJsN4bQM4uLEU6AzoHziRqekOgM5fOZLsUtSrywsAEe1LKCYaGdCqa7TgX0SoedMA2B69XD1752//ITeCdHQCfSFgGWGcIOeGBEfLCvg3RoA6R4xwIIn8fRLvsd4KoDpETHBGioAYAxOK210PKeEJnNEXkqqBWTFWs+ULpQmP2CzjfaAOinFMbUVhj/lNu/xzo0X1p5nKtOW8HGzNI43NZvBR4C8o1lgerpT33acTkKS28MpozffWY1MIRmwG3SC1QZUAQx0ub+2sgChuqlCpmhXEigrIMR7bBy+483JpO6PDT2FwO7gs45LDAYDvalIz8HKOWLei3tmDJ74MGtze01obWEr5RvC0xzN0VVDJjvxJOyiP0rnIVWWHk9O1b0HMqjJQVaowEYmhTWraQC7L2CjuhVd2KB8WxcKqdTVv52eD/8LP2kXocPwu9ip/5L/SzwKtUW8PD+FlN4WG3fjZs8LMuuq5JqeuC5q5LlIaYqcoj+4eNl1Bj81jrwuDUe516r+f1Xn5T7xUODlM1juohbPVecCrs5U00shOpf8zey/BtxGiXBE8w1WHyjtl/OU4NgVMDZvJLuawbtYa18wbMaXpP/A4rY+PDb6Y0DqoJsqsWLDhUC1ZNFR23YE7DS8336Gntg/VIPVjlFZvfVQ/mH6oHqyrccQ/mNLyafcTRdMJv9rI2lW7NW96Ma/iViK5e6m09Y+A9wWiPZzwNnhgWn0TV8uK7snf1Fw==</diagram></mxfile>
2005.01348/main_diagram/main_diagram.pdf ADDED
Binary file (17.5 kB). View file
 
2005.01348/paper_text/intro_method.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Large-scale pre-trained language models have recently led to improvements across a range of natural language understanding (NLU) tasks [\(Devlin](#page-9-0) [et al.,](#page-9-0) [2019;](#page-9-0) [Radford et al.,](#page-10-0) [2019;](#page-10-0) [Yang et al.,](#page-11-0) [2019\)](#page-11-0), but there is some scepticism that benchmark leaderboards do not represent the full picture [\(Kaushik and Lipton,](#page-10-1) [2018;](#page-10-1) [Jumelet and Hup](#page-9-1)[kes,](#page-9-1) [2018;](#page-9-1) [Poliak et al.,](#page-10-2) [2018\)](#page-10-2). An open question is whether these models generalize beyond their training data samples.
4
+
5
+ In this paper, we examine how pre-trained language models generalize on the Winograd Schema Challenge (WSC).
6
+
7
+ Named after Terry Winograd, the WSC, in its current form, was proposed by [Levesque et al.](#page-10-3) [\(2012\)](#page-10-3) as an alternative to the Turing Test. The
8
+
9
+ <span id="page-0-0"></span>The man couldn't lift his son because he was so heavy. The man couldn't lift his son because he was so weak. The men couldn't lift their sons because they were so heavy. The men couldn't lift their sons because they were so weak. (a) (b)
10
+
11
+ Figure 1: An example pair from the Winograd Schema Challange (a) and its perturbation (b). The pronoun resolves to one of the two referents, depending on the choice of the discriminatory segment. The perturbation in (b) pluralizes the referents and the antecedents.
12
+
13
+ task takes the form of a binary reading comprehension test where a statement with two referents and a pronoun (or a possessive adjective) is given, and the correct antecedent of the pronoun must be chosen. Examples are chosen carefully to have a preferred reading, based on semantic plausibility rather than co-occurrence statistics. WSC examples come in pairs that are distinguished only by a discriminatory segment that *flips* the correct referent, as shown in Figure [1a.](#page-0-0) [Levesque et al.](#page-10-3) define a set of qualifying criteria for instances and the pitfalls to be avoided when constructing examples (see §[3.2\)](#page-3-0). These combine to ensure an instance functions as a test of what they refer to as 'thinking' (or common sense reasoning).
14
+
15
+ Recent work has reported significant improvements on the WSC [\(Kocijan et al.,](#page-10-4) [2019;](#page-10-4) [Sak](#page-10-5)[aguchi et al.,](#page-10-5) [2019\)](#page-10-5). As with many other NLU tasks, this improvement is primarily due to largescale language model pre-training, followed by fine-tuning for the target task. We believe that further examination is warranted to determine whether these impressive results reflect a fundamental advance in reasoning ability, or whether our models have learned to simulate this ability in ways that do not generalize. In other words, do models learn accidental correlations in our datasets, or do they extract patterns that generalize in robust ways beyond the dataset samples?
16
+
17
+ In this paper, we conduct experiments to investigate this question. We define a set of lexical and syntactic variations and perturbations for the WSC examples and use altered examples (Figure [1b\)](#page-0-0) to test models that have recently reported improved results. These variations and perturbations are designed to highlight the robustness of human linguistic and reasoning abilities and to test models under these conditions.
18
+
19
+ Contributions We introduce a new Winograd Schema dataset for evaluating generalization across seven controlled linguistic perturbations.[1](#page-1-0) We use this dataset to compare human and language model sensitivity to those perturbations, finding marked differences in model performance. We present a detailed analysis of the behaviour of the language models and how they are affected by the perturbations. Finally, we investigate the effect of fine-tuning with large task-specific datasets, and present an error analysis for all models.
2006.16309/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="www.draw.io" modified="2019-12-17T17:03:57.159Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36" version="12.4.2" etag="7Q3YXzqdZJU4Ci8xAeOU" type="google" pages="1"><diagram id="MimTu4lmE2ScTQbKooTq">5Vhbb5swFP41SNtLBOYS+tgk7fZSqVo0bXt04QSsGBwZpyH79TtO7BAw0aqpF7WTIsX+fHz7vnMBvHBetV8k3ZR3IgfuET9vvXDhERJEhHj65+f7IzKNDVBIlhujDliy32BA36BblkPTM1RCcMU2fTATdQ2Z6mFUSrHrm60E7++6oQU4wDKj3EV/sFyVRzSN/Q7/Cqwo7c6Bb0Yqao0N0JQ0F7szKLzxwrkUQh1bVTsHrsmzvBzn3V4YPR1MQq2eMsHw/kj51txtMbuHnFFzPLW3d5ZiW+egp/leONuVTMFyQzM9ukOVEStVxbEXYNOsClJBe/Fkwem+6CggKlByjyZ2QmooMj4SJqa/6xg/8VqesW3tqBG5OC3d8YANQ8U4LaFDyzdotuhgdeGRhONOsweJrUK3FlTRBtSbM5b4b8hY5DDm8IG+vtHNFYf2WkchXhvq3DQXGadNw7I+LSvG+VxwIQ9LhMEq8qcU8UZJsYbzkVscuT6siCf/aUg/dH7pziS23UV7PrjY217L1E+zq26fzcJeN0l37JzjKWwKCDodIR8kkEZsZQa9mFNUFqB6/vYErc+0jEektJgEThV77B9iTF+zw71guPHF4IvDgYsc72NmneeXwUIk7C8UpYOFjiw4Cx3c7XTtJ3lg7HjgXNQNFgoMVF/UfO9G7velJh3EBl30kreW0NJC1Gi9AcnwPCA79N5C5FUinFwNIjwdiXAy4hZD1v8lwhOH3yVwXV2ttyHb74LFcFhZyCuyOB1hsaK1wsTnuGfGtw2yMlp0xMqu8j5Ij6Zv6LrpSHEyhFo+49S/Q5NPUBecNeVnnaAlw8TQWFPc5GTtUI7EqD5n/QpVixoG5cxAlLOi1uUPidT6zDTNDJ81r81AxfJcbzMqUv8Z4xl0CpKBTmHs6hSN6ESeQacrR6eVFBUiwSS9OzzkowC00hevHxr9l8TrQwqXzVj2+TCqkIEqURK5qqQvpIp963JliSZk3U//B4n8IPi4UoSDREauXjFALLGuFPRBbHUpjv21VSGcpJdrxIfRI4riv4fG9KX0cN+fncpCkmj9v1aTePio9YLVBLvdd5Pj+0P39Sm8+QM=</diagram></mxfile>
2006.16309/main_diagram/main_diagram.pdf ADDED
Binary file (18 kB). View file
 
2006.16309/paper_text/intro_method.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multi-relational graphs, composed of entities (nodes) and edges representing semantic meaning, popularly known as Knowledge Graphs (KG) [\[25\]](#page-6-1), are gaining increasing industrial applications. For instance, the Google search engine uses the Google Knowledge Graph to facilitate linking semantic information from various websites in a unified view. Other applications of KGs include data governance, automatic fraud detection, and knowlege management. As a consequence, academic research on KGs both from the lens of machine learning and representation learning is gaining a lot of momentum. Research on machine learning on KGs identify a diverse set of inference techniques that can be applied on KGs, including logical rules mining [\[13,](#page-5-0) [14\]](#page-5-1), semantic parsing [\[2,](#page-5-2) [12\]](#page-5-3), named entity disambiguation [\[8,](#page-5-4) [28\]](#page-6-2), and information extraction [\[3,](#page-5-5) [5\]](#page-5-6). Research on KG representation learning aim to build useful representations for entities and relations with high reliability, explainability, and reusability. Representation learning on KGs is a very active line of research, with numerous novel Knowledge Graph Embedding (KGE) algorithms being proposed recently, including TransE [\[4\]](#page-5-7), TransD [\[16\]](#page-5-8), TransH [\[25\]](#page-6-1), RESCAL [\[20\]](#page-5-9), DistMult [\[26\]](#page-6-3), HolE [\[19\]](#page-5-10), CrossE [\[27\]](#page-6-4), ComplEx [\[24\]](#page-5-11), and Analogy [\[17\]](#page-5-12). Simultaneously, in the related field of network and graph representation learning, several advances have been made in the development of accurate graph embedding methods, including Deepwalk [\[22\]](#page-5-13) and node2vec [\[10\]](#page-5-14).
4
+
5
+ Together with these advances in embedding learning methods, recent years has also witnessed various anecdotal evidences suggesting that these methods amplify biases already present in the data [\[23\]](#page-5-15). Empirical investigations have also identified biases embedded in knowledge graph representations. For instance, a recent article by Janowicz et al. [\[15\]](#page-5-16) identified the existence of social biases in KGs. The existence of such biases is detrimental to the usability of the knowledge graph. This is especially true when KG applications such as search engines [\[25\]](#page-6-1) and knowledge management systems are penetrating the social spheres. Besides a few exceptions [\[9\]](#page-5-17), research works on the identification and mitigation of such social biases in KGs remain absent. The absence of coherent and useful debiasing frameworks for KGs is problematic, and could lead to detrimental societal consequences, in particular with respect to minorities. To tackle this problematic gap in the literature, we aim to characterize, investigate, and develop methods for mitigating social biases that arise from network and knowledge graph embedding algorithms.
6
+
7
+ <sup>∗</sup>The first three authors contributed equally to this research.
8
+
9
+ Our empirical exercise comprises of two elements. First, we examine simple networks, with unlabelled relations, and identify the existence of what we call a popularity bias, i.e. a correlation between the popularity (degree) of the nodes and the link prediction accuracy of the embeddings. Research in recommendation systems have reported the presence of popularity biases in well-established ranking algorithms and ways to mitigate them. As network embeddings find use in downstream tasks like search and recommendations, it is important to study the presence and ways to mitigate such biases as well. Our findings suggest that structural information on low-degree nodes is captured more accurately than on high-degree nodes by popular network embeddings algorithms such as Deepwalk [22] and node2vec [10]. Second, we intend to characterise and mitigate the inference bias that arises while training rules with classifiers operating in the KGE space [15]. As a result, we identify how some sensitive attributes, such as gender, are captured by popular KGE algorithms, such as TransE [4], TransD [16], and TransH [25]. Additionally, we find that the gender attribute is still captured in the embedding even when gender relations are explicitly removed from the graph.
10
+
11
+ To summarize, our findings suggest that sensitive attributes in KGs are not only represented by explicit relations bearing the name of such attributes, but rather they are embedded in the structure of the whole graph structure. An important implication of this finding is the necessity of fine-grained debiasing algorithm operating *on the embeddings*, instead of just removing the sensitive relations from the KG. As a useful solution, we develop a debiasing method based on adverserial learning that modifies the embeddings by filtering out sensitive information, while aiming to preserve all the other relevant information. With empirical examination, we show the applicability of our method in removing gender bias in KGEs for both high-degree and low-degree entities.
12
+
13
+ We present our method and findings of this **work in progress** in the following order. In Section 2.2.3 we introduce the embedding methods and the biases we aim to study. In Section 3, we present our approach for debiasing KGEs, followed by experimental results in Section 4.3. Conclusions and direction for future work is presented in Section 5.
14
+
15
+ In this section, we briefly describe two different types of graph bias, and present the embedding algorithms examined throughout the paper. First, we provide a brief overview on networks and KGEs.
16
+
17
+ A network contains a set of nodes N and edges $E \subseteq \{N \times N\}$ that encode relationships between the nodes. KGs contain labeled relationships between entities in the form of triples $\langle h, r, t \rangle$ , where h is the head entity, t is the tail entity, and r is the relation between then. An example of a KG triple is $\langle Albert\_Einstein, born\_in, Ulm \rangle$ . Embedding learning algorithms aim at learning real-valued representations of nodes, entities, and relations in some low-dimensional space. Specifically, network embeddings learn a vector $n_i \in \mathbb{R}^d$ for each node $i \in N$ in the network. The dimension of the embedding is represented by d. Similarly, KGEs learn embeddings for entities and relations. Often, the embeddings are learned by training on an objective function that maximizes the probability of true edges
18
+
19
+ and triples (those that exist in the training dataset), and they are evaluated by their performance on link prediction on the testing dataset.
20
+
21
+ We define popularity bias as the bias resulting from correlation between the degree of a node in a graph and the accuracy of link prediction of the embedding of the node. In the recommendation systems literature, it has been reported that such biases lead to promotion of blockbuster items to the detriment of long-tail items, many of which could be interesting to the users [21]. Since network embeddings are also increasingly used in search and recommendations, such biases could affect these downstream tasks and lead to lack of diversity and filter bubbles in users' online experiences.
22
+
23
+ To investigate whether network embeddings exhibit popularity bias, we examined the popular node2vec [10] method on the benchmark AstroPh dataset. The AstroPh dataset represents the network of collaborations between astrophysicists extracted from papers submitted to the e-print website arXiv. The nodes represent scientists, and an edge is present between two scientists if and only if they are listed as co-authors in at least one paper present in the repository. The network consists of 187, 22 nodes and 198, 110 edges.
24
+
25
+ Before describing node2vec, we briefly discuss the DeepWalk [22] algorithm, upon which node2vec is based. DeepWalk extracts latent representations from networks in the following way. First, the algorithm iteratively builds a corpus of random walks for each node. Each random walk has a fixed length, and the next node in the walk is chosen at random among neighbouring nodes of the current node. Importantly, the same fixed number of random walks are calculated for each node, regardless of their degree. Next, this corpus of random walks is fed into a *SkipGram* [18] model to learn the latent representations. The embeddings can then be used for downstream tasks, including link prediction and node classification.
26
+
27
+ The node2vec algorithm works in a very similar way to Deep-Walk. The basic steps remain the same, i.e. the algorithm first builds a corpus of random walks for each node, and then this corpus is fed into a *Skip-Gram* model in order to learn the embeddings. The only difference between the two methods is the way in which the random walks are explored. Instead of being sampled uniformly from the current node's neighbourhood (as in Deepwalk), the random walk traversal in node2vec is done using a parametric set of transition probabilities. This parametric form allows for a fine-grained and balanced tuning between the extreme sampling scenarios of Breadth-First Search (BFS) and Depth-First Search (DFS).
28
+
29
+ KGEs might exhibit several societal biases, such as ethnicity, gender, religion, etc. We follow prior work in this area [7] to define the presence of such biases. We intend to explore how different attributes interact in KGEs and to remove sensitive attributes (e.g., gender, ethnicity) from the embedding while preserving all other information. In this prelimilary work, we limit ourselves to the problem of gender bias and expect that our filtered embeddings do not correlate gender information with non-gender information. To this end, we treat gender as a sensitive attribute and perform
30
+
31
+ <span id="page-2-2"></span>![](_page_2_Figure_2.jpeg)
32
+
33
+ Figure 1: FAN model: the filter takes as input a vector and outputs its filtered version (ideally without the sensitive attribute). The discriminator tries to predict the sensitive attribute (in the figure it is assumed to be binary) from the filtered embedding, and ideally will reach an accuracy of 50% (random prediction).
34
+
35
+ occupation prediction (which in our case is posed as an unbalanced multiclass classification problem) training a simple neural network operating in the embedding space. In this way, we can measure the interaction between the gender sensitive attribute and the occupation non-sensitive attribute, and use this information as evidence for the existence of bias.
36
+
37
+ Given its popular use and huge size, we adopt the *DBPedia* [1] dataset for our empirical investigation on KGEs. Based on scalability and simplicity of use, we focus our analysis on three popularly used KGE algorithms, namely *TransE* [4], *TransH* [25], and *TransD* [16]. These algorithms have increasing complexity, leading to more powerful and data-savvy embeddings, at the cost of more computationally-expensive training. For each of them we used the implementation provided by OpenKe [11]. These algorithms differ in the loss function used and in their number of parameters. We present a brief overview of the methods and their properties.
38
+
39
+ 2.2.1 TransE [4]. The basic principle behind TransE is the use of the translation operation to generate the embedding of a tail entity, given the embeddings for the head and the relation. It assigns one embedding to each node and one embedding to each relation. TransE uses minibatch stochastic gradient descent to minimize a loss function on the embeddings for real triples present in the graph, while doing negative sampling to generate false triples and maximizing their loss. The loss function $f_r(h,t) = \|\mathbf{h} + \mathbf{r} - \mathbf{t}\|$ is the euclidean distance between the embedding of the tail and the embedding of the head plus the embedding of the relation. The problem with this approach is evident in *many-to-1* relations, for example gender, because in this case to minimize the loss for all gender triples, all persons (which are different nodes in the graph) that have the same gender are forced to have representations that are close in the embedding space.
40
+
41
+ 2.2.2 TransH [25]. TransH overcomes the drawbacks of TransE by allowing an entity to have distinct representations when dealing with different relations, i.e. many-to-1 relations. In order to make this possible, the authors introduced to the TransE framework an additional relation-specific vector $\mathbf{w}_r$ to project the entities on an hyperplane with this vector as normal vector. For the loss function, we calculate the projected head and tail as $\mathbf{h}_\perp = \mathbf{h} - \mathbf{w}_r^\top \mathbf{h} \mathbf{w}$ and $\mathbf{t}_\perp = \mathbf{t} - \mathbf{w}_r^\top \mathbf{t} \mathbf{w}_r$ . Then we calculate the loss as before as $f_r(h, t) = \|\mathbf{h}_\perp + \mathbf{r} - \mathbf{t}_\perp\|$ and apply SGD using negative sampling.
42
+
43
+ <span id="page-2-0"></span>2.2.3 TransD [16]. TransD works following the same principle as TransH. However, instead of using a projecting vector, it utilizes a projection matrix which can be decomposed as the identity matrix added to the outer product of two vectors, one that is relation-specific and another that is entity-specific. The projection matrix is calculated as follows: $\mathbf{M}_r^t = \mathbf{w}_r \mathbf{w}_t^T + \mathbf{I}$ . We then calculate $\mathbf{h}_\perp = \mathbf{M}_r^h \mathbf{h}$ and $\mathbf{t}_\perp = \mathbf{M}_r^h \mathbf{t}$ and the loss in the same way as we did for TransH.
44
+
45
+ As it is evident that both TransD and TransH are able to capture *many-to-1* and *many-to-many* relations way more effectively than TransE, we use TransH and TransD for our experiments.
46
+
47
+ As a first solution for debiasing knowledge graph embeddings, we developed an adversarial model which we call FAN (Filtering Adversarial Network). The model is an adversarial network composed of two players, a filter module $F_{\theta_f}: \mathbb{R}^d \to \mathbb{R}^d$ that aims at filtering out the information about the sensitive attribute from the input, and a discriminator module $D_{\theta_d}: \mathbb{R}^d \to [0,1]$ that aims at predicting the sensitive attribute from the output of the filter (see Figure 1 for an illustration). The objective of the combined module can be formulated using Equation 1.
48
+
49
+ <span id="page-2-1"></span>
50
+ $$\mathcal{L}(F_{\theta_f}, D_{\theta_d}) = \lambda \mathbb{E}_h ||F_{\theta_f}(h) - h||_2^2 +$$
51
+
52
+ $$\mathbb{E}_h \left[ y \cdot \log(D_{\theta_d}(F_{\theta_f}(h))) + (1 - y) \cdot \log(1 - D_{\theta_d}(F_{\theta_f}(h))) \right]$$
53
+ (1)
54
+
55
+ The parameter $\lambda$ is a weight that controls the importance of the first term with respect to the second, and y is the ground truth gender label of the example (a protected attributed). Observe that when we dissect the objective function, we have two distinct terms. The first term represents the reconstruction loss. The reconstruction loss term is differentiable with respect to the filter parameters $\theta_f$ and it is independent of the discriminator parameters $\theta_d$ . The goal is to keep this term approximately at zero in order to attain perfect preservation of the original information. The second term represents cross-entropy. Cross-entropy measures how accurately
56
+
57
+ the discriminator is able to predict the sensitive attribute from the filtered embedding.
58
+
59
+ We minimize the combined loss over $\theta_f$ and maximize the combined loss over $\theta_d$ during training. On the one hand, the filter aims at minimizing the reconstruction loss. On the other hand, the discriminator aims at minimizing the cross-entropy loss. Intuitively, the optimum saddle point is reached when the discriminator cannot predict the sensitive attribute from the filtered input. The second term of the loss forces the filter to remove the sensitive information from the input embedding, while the first term of the loss (the reconstruction loss) forces the filter to leave the input as much unchanged as possible.
60
+
61
+ Note that our objective is markedly different from the compositional approach proposed by [6], where the non-sensitive information is preserved not through the reconstruction loss, but using the edge loss $f_r(h,t)$ coming from the embedding algorithm. The improvement of using the reconstruction is twofold.
62
+
63
+ - Only the embedding of the entities to filter are required; when using the edge loss, on the other hand, all the triples are necessary to preserve the non-sensitive information.
64
+ - The reconstruction loss can be used independently of the embedding algorithm used.
2012.10974/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2012.10974/paper_text/intro_method.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Human motion transfer methods, also known as performance cloning or reenactment methods, can generate realistic video animations of an actor following a target motion specified by a user. This has several applications in AR/VR and video editing. Building upon new advances in machine learning, current motion transfer methods tackle this challenging problem by learning a direct mapping between an actor-independent motion space and the resulting target actor's appearance space. These methods often require a training video of an actor performing a rich set of motions [\[6,](#page-8-0) [32,](#page-9-0) [22,](#page-8-1) [20,](#page-8-2) [2,](#page-8-3) [38\]](#page-9-1).
4
+
5
+ Some recent motion transfer approaches parameterize motion as skeletal pose sequences that can be computed from videos with off-the-shelf pose detectors [\[6,](#page-8-0) [32\]](#page-9-0). Others use pre-captured template meshes or parameterized body models to provide additional guidance to the synthesis step [\[22,](#page-8-1) [20\]](#page-8-2). Acquisition of such templates [\[20\]](#page-8-2), however, requires an extensive structure-from-motion reconstruction of the static target actor under constant lighting. Furthermore, existing human motion transfer approaches are likely to produce notable temporal and spatial artifacts when actors wear loose clothing, such as dresses, skirts and hoodies [\[6,](#page-8-0) [2,](#page-8-3) [38,](#page-9-1) [22\]](#page-8-1). On such garments, they struggle to realistically reproduce the appearance of fine-scale details like folds and wrinkles, as well as plausible dynamics.
6
+
7
+ In this paper, we present a new human motion transfer
8
+
9
+ <span id="page-0-0"></span><sup>1</sup>[https://graphics.tu-bs.de/publications/](https://graphics.tu-bs.de/publications/kappel2020high-fidelity) [kappel2020high-fidelity](https://graphics.tu-bs.de/publications/kappel2020high-fidelity)
10
+
11
+ <span id="page-1-0"></span>framework that generates visually plausible video animations of humans that are spatially and temporally coherent, and show natural dynamics, even for actors wearing loose garments (see Fig. [1\)](#page-0-1). Given a single monocular video of an actor performing a rich set of motions, we train a stack of deep generative networks to learn a mapping from 2D pose to a silhouette with semantic part labels, and per-pixel appearance of the actor. We model the person's shape as a dense foreground silhouette mask with per-pixel labels encoding assignment to limbs and garments. We further encode the structure of wrinkles and texture patterns of garments as the orientation and strength of local image gradients. We extract this structure from images using a bank of oriented filter kernels [\[26,](#page-9-2) [36\]](#page-9-3). Encoding the actor's appearance with these explicitly decoupled intermediate representations of silhouette and structure is key to enhance the temporal and spatial quality of synthesized videos comprising human actors in loose clothing.
12
+
13
+ Our method improves over current motion transfer approaches in terms of visual fidelity using a single RGB camera. Furthermore, our representation provides an additional level of control over the final image generation. For example, for the same overall dynamic geometric outline (*i.e.,* the same garment geometry), color and appearance, including fold and wrinkle style, can be manipulated in a purely image-based way. Overall, our contributions can be summarized as follows: (1) A new motion transfer framework with an emphasis on visually-plausible fine-scale deformations and dynamics in the actor's clothing. (2) For this, we propose to decompose the pose-to-image translation task into better conditioned cascaded processes, where the final appearance is conditioned on the predicted shape outline and internal structure of the clothing. (3) We show that our intermediate representations do not only help to provide more temporally coherent conditioning resulting in more appealing image synthesis, but also allow controlling individual aspects of the final rendering (*e.g.,* enhance wrinkles and transfer the clothing style).
14
+
15
+ # Method
16
+
17
+ Our framework $G_{\theta}$ generates photorealistic videos of a target actor mimicking the motion of another source identity. Given only a single monocular RGB image sequence $I = (i_n)_{n=1}^N \in \mathbb{R}^{N \times 3 \times h \times w}$ containing N frames of width w and height h showing a target actor performing a rich set of motions, we extract an identity-independent 2D skeletal pose $P(i_n)$ for each frame and fit the network parameters $\theta$ to approximate the inverse mapping $P^{-1}$ back to the original frames. Instead of performing the translation task within a single network, our framework can be thought of as a function composition of four generative neural networks $(G_{ref}, G_{app}, G_{str}, G_{shp})$ executed in a cascade fashion to progressively generate higher-level representations, as illustrated in Fig. 2. Next, we describe all individual framework components (Secs. 3.1-3.3), before providing details on the network architectures and implementation (Sec. 3.4).
18
+
19
+ As inputs, our method takes the target image sequence I and a static image of the scene background $b \in \mathbb{R}^{3 \times h \times w}$ that is fused with the synthesized actor later on. From the given image sequence, we extract actor-independent pose representations $P: \mathbb{R}^{3 \times h \times w} \rightarrow H \times D$ , where $H = \{0,1\}^{C_1 \times h \times w}$ symbolizes the set of rasterized binary pose skeletons with $C_1$ input channels, and $D = \mathbb{R}^{C_2 \times h \times w}$ is the set of temporal derivatives for a single pose consisting of $C_2$ channels. Similar to recent performance cloning methods [6, 32], we apply an off-the-shelf pose estimator [4, 34, 40] that predicts 2D keypoints $k_n \in \mathbb{R}^{127 \times 2}$ for the body (including hands and face), to generate the skeleton $h_n \in H$ by connecting adjacent keypoints via binary lines. We further distribute $C_1 = 9$ limbs (face, head, torso, arms, legs, hands) over multiple channels, which helps the networks to distinguish between overlapping or symmetrical body parts.
20
+
21
+ However, estimating soft body deformations from a single pose remains highly ambiguous as the states depend on the temporal order of poses. Thus, we calculate a temporal pose context $d_n \in D$ as the first and second temporal derivatives of $k_n$ with respect to the image index n in x and y direction, respectively $(C_2 = 2 \cdot 2 \cdot 9)$ , similar to velocities and accelerations in classical dynamics simulation:
22
+
23
+ $$k_n' = \left(\frac{\partial k_n}{\partial n}, \frac{\partial^2 k_n}{\partial n^2}\right). \tag{1}$$
24
+
25
+ We create rasterizations $d_n$ from $k'_n$ in the same way as for the pose skeletons, and linearly interpolate their values along the bones, similar to the depth representation of Shysheya *et al.* [32]. During the reenactment, we apply the same procedure to the source image sequence, but additionally perform pose normalization as described by Chan *et al.* [6] for inter-target appearance transfer before drawing the skeletons. Finally, our pose conditioning $P(i_n) = (h_n, d_n)$ is provided to our framework as the concatenation of the framewise pose skeleton and temporal context. We find that the described procedure results in a compact, yet expressive pose representation, while slightly outperforming a simple sliding window approach in our experiments.
26
+
27
+ In our framework, we use two dedicated networks that pre-estimate the actor's shape and the internal gradient structure of clothing in 2D space to improve the consistent modeling of deforming garments and the fall of the folds.
28
+
29
+ As a first step, our shape estimator $G_{shp}$ transforms the provided pose representations P into the actor's current silhouette
30
+
31
+ $$\tilde{s}_n^* = G_{shp}(P(i_n), \tilde{s}_{n-1}^*), \tag{2}$$
32
+
33
+ where $\tilde{s}_i^* \in \mathbb{R}^{j \times h \times w}$ is represented as semantic body-part segmentation with j labels. We extract a final segmentation mask $\tilde{s}_n \in ([\mathbb{N}]_1^j)^{h \times w}$ from our network by extracting the indices of maximum elements per pixel over the channel dimension.
34
+
35
+ As knowledge about the preceding shape is necessary to achieve temporal coherence of deforming garments, we design our networks in a recurrent fashion where each execution is conditioned on the previous output. Thus, $G_{shp}$ is tasked to estimate a delta in the observed shape based on the current pose and the contextual temporal derivatives. The resulting segmentation mask is a significant prior for the final appearance network to produce visually-plausible renderings, and it is further used to segment the actor and parts of their clothing in later stages. For training, we use the human parsing method of Li *et al.* [18] trained on the
36
+
37
+ <span id="page-3-2"></span><span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
38
+
39
+ **Figure 2. Framework Overview**: Our framework synthesizes views of humans from a static background image and a pose sequence extracted from the target (training) or source (testing) image sequence. Therefore, we apply two consecutive networks to explicitly preestimate human body parsing (*yellow*) and the internal gradient structure of clothing (*green*). The resulting outputs are used to condition final image generation, which is separated into the foreground (*blue*) and global (*red*) components to handle loose clothing in a temporally coherent way. Dashed lines indicate recurrent networks that feedback the last output to generate the next prediction.
40
+
41
+ ATR dataset [19] to extract pseudo-ground-truth segmentation maps $S = (s_n)_{n=1}^n$ from I. This dataset provides j=18 labels for all body limbs and common clothing, enabling our method to handle various clothing styles including shirts, pants, skirts, dresses and scarfs. We formulate our shape training loss $L_{shp}$ as the cross-entropy between the network prediction and the training label:
42
+
43
+ $$L_{shp} = \mathbb{E}_{n \sim N} \log \left( \sum_{j} \exp(\tilde{s}_{n}^{*}(j)) \right) - \tilde{s}_{n}^{*}(s),$$
44
+ (3)
45
+
46
+ where $\tilde{s}_n^*(j)$ is the output channel for a given label j.
47
+
48
+ While our shape representation is sufficient prior to infer the appearance of solid body parts like arms and faces, it does not provide information about wrinkles and folds within the clothing, that are needed to generate temporally consistent shading and texture patterns. Thus, we apply a second recurrent network $G_{str}$ that estimates the internal gradient structure $\tilde{w}_n \in \mathbb{R}^{2 \times h \times w}$ for clothing regions as indicated by the segmentation map $\tilde{s}_n$ :
49
+
50
+ $$\tilde{w}_n = G_{str}(P(i_n), \tilde{s}_n, \tilde{w}_{n-1}). \tag{4}$$
51
+
52
+ We model the clothing structure as the pixelwise gradient direction and strength extracted from the responses of 32
53
+
54
+ oriented Gabor filters, similar to recent work on neural hair synthesis [36]. However, as a floating garment comprises sparser gradients than human hair, we do not only use the maximum filter responses to smooth the angle field, but append a normalized version to the final gradient directions to model the probability of a wrinkle or texture change at a specific location, as visualized in Fig. 1 (color and saturation encode the gradient direction and the strength, respectively). We train the structure estimator using the $L_1$ distance between estimated $\tilde{w}_n$ and ground-truth $w_n$ structure using a mask for garment labels extracted from $\tilde{s}_n$ :
55
+
56
+ $$\boldsymbol{L_{str}} = \mathbb{E}_{n \sim N} \, \chi_C(\tilde{s}_n) \, |\tilde{w}_n - w_n|, \tag{5}$$
57
+
58
+ where C denotes the set of segmentation labels corresponding to the actor's clothing, and $\chi_C$ is the indicator function:
59
+
60
+ $$\chi_C(x) = \begin{cases} 1, & \text{if } x \in C, \\ 0, & \text{else.} \end{cases}$$
61
+ (6)
62
+
63
+ The second stage of our framework synthesizes the final output image based on the provided pose and garment conditionings. Again, we use two dedicated networks to independently generated the actor in the foreground and fuse it with the provided background image.
64
+
65
+ Our first rendering network $G_{app}$ takes provided pose as well as the pre-estimated shape $\tilde{S}$ and internal clothing structure $\tilde{W}$ to synthesize the actor's appearance $\tilde{f}_n \in \mathbb{R}^{3 \times h \times w}$ :
66
+
67
+ $$\tilde{f}_n = G_{app}(P(i_n), \tilde{s}_n, \tilde{w}_n). \tag{7}$$
68
+
69
+ We train the appearance module using a combination of $L_1$ distance and perceptual reconstruction loss [14]. Again, an indicator function $\chi_B$ is used to mask the foreground pixels that are not assigned to the background label $\beta$ :
70
+
71
+ <span id="page-4-1"></span>
72
+ $$\chi_B(x) = \begin{cases} 0, & \text{if } x = \beta, \\ 1, & \text{else.} \end{cases}$$
73
+ (8)
74
+
75
+ Thus, our appearance loss $L_{app}$ reads
76
+
77
+ <span id="page-4-2"></span>
78
+ $$\boldsymbol{L_{app}} = \mathbb{E}_{n \sim N} \chi_B(\tilde{s}_n) \left( \lambda_r |\tilde{f}_n - i_n| + \lambda_p |\phi(\tilde{f}_n) - \phi(i_n)| \right),$$
79
+ (9)
80
+
81
+ where $\phi(\cdot)$ denotes feature maps extracted from different layers of a pre-trained VGG19 network [35], and $\lambda_r, \lambda_p$ are free hyperparameters for weighting.
82
+
83
+ Finally, we apply a shallow refinement network to fuse the foreground prediction with the provided scene background. For this, we first paste the generated foreground $\tilde{f}_n$ onto the static background image b using the masking function (8):
84
+
85
+ $$\tilde{i}_n^* = (\chi_B(\tilde{s}_n) \cdot \tilde{f}_n) + ((1 - \chi_B(\tilde{s}_n)) \cdot b). \tag{10}$$
86
+
87
+ Then, given foreground-background composition $\tilde{i}_n^*$ and actor segmentation $\tilde{s}_n$ , our refinement network $G_{ref}$ performs simple transition smoothing and shadow generation to produce the final output image $\tilde{i}_n$ :
88
+
89
+ $$\tilde{i}_n = G_{ref}(\tilde{i}_n^*, \tilde{s}_n). \tag{11}$$
90
+
91
+ We use a combination of structural and perceptual losses similar to our $L_{app}$ (9), but over the entire image plane:
92
+
93
+ $$L_{ref} = \mathbb{E}_{n \sim N} \lambda_r |\tilde{i}_n - i_n| + \lambda_p |\phi(\tilde{i}_n) - \phi(i_n)|. \quad (12)$$
94
+
95
+ In contrast to recent pose-to-video translation methods [1, 6], we do not apply an adversarial loss for our generator network as we do not want to hallucinate high frequency details in the clothing based on statistics from the data sequence I, but rather encourage the network to stick to the predicted structure layout.
2105.02961/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2105.02961/paper_text/intro_method.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ B-Reps are the de facto standard for industrial design, and the representation most widely used in the consumer product and automotive industries where style is of great importance. B-Reps offer unparalleled editability in a compact, memory efficient representation, they are not discrete/sampled (as per mesh/point cloud) offering precise boundaries with continuous smooth surfaces/edge curves.
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Figure_9.jpeg)
6
+
7
+ Figure 1: Overview of UV-StyleNet: Grams of activations are normalized and extracted for each layer. The weights applied to each layer define the meaning of style. (a) Top-10 query results using uniform layer weights w (b) Top-10 query results using w? based on the user-selected examples (positive in green, negative in red). In this example, w? ≈ [0, 0, 0, 1, 0, 0, 0]<sup>&</sup>gt;. Zoom to see fillets/stylistic details.
8
+
9
+ <span id="page-1-1"></span><span id="page-1-0"></span>Figure 2: Lower case examples from font 'Viaoda Libre'. While 'j' and 'r' share some stylistic features, they are not obviously similar to 'c', 's' or 'z', *i.e*. font classes provide a ground truth for style compatibility (as perceived by their designers) yet only a *weak* label for style itself.
10
+
11
+ See [Appendix A](#page-10-0) for a brief introduction to B-Reps. A B-Rep style similarity measure has many use cases, *i.e*. finding architectural parts that are in-keeping with the style of a building, or selecting parts for a car that fit with the manufacturer's existing range. Moreover, the gradient of a style similarity measure can be used to generate helpful visualizations or modify the input 3D shape a la Gatys ` *et al*. [\[11\]](#page-8-0).
12
+
13
+ Geometric style is inherently subjective and may have a different meaning in different object class domains, *i.e*. the boundary between style and content is unclear. For example, in the context of chair designs, number of legs could be considered either style or content depending on the particular use case. Thus, an effective geometric style measure must cater for these different interpretations of the end user.
14
+
15
+ While existing methods use hand-crafted features [\[25,](#page-8-1) [24\]](#page-8-2) or crowd-sourcing [\[22,](#page-8-3) [27,](#page-8-4) [30,](#page-9-0) [28\]](#page-8-5) to pre-define and measure geometric style, we propose a user-defined fewshot style metric learning method that leverages the range of style signals available in the activations of a pre-trained 3D object encoder through second order statistics (Gram matrices). The relative importance of each layer's Gram matrix is then learnt through selection of just a few examples of what style means to an end user (see [Figure 1\)](#page-0-0).
16
+
17
+ Despite the abundant use of B-Reps in industrial settings, there is a fundamental lack of publicly available B-Rep data for training machine learning models — in particular, there are no existing B-Rep datasets that include a reliable ground truth for style. To overcome this challenge, we provide an adaptation to SolidLetters [\[16\]](#page-8-6), which improves the style consistency within font classes for the evaluation test set. The font classes, however, still provide only a weak label for style (see [Figure 2\)](#page-1-0), and as such we propose an unsupervised method and use the font labels purely for quantitative evaluation to justify design choices of our method. For comparison against existing SOTA on real-world data we also provide evaluation with the unlabeled ABC dataset [\[20\]](#page-8-7) of CAD models and a manually labeled subset of it.
18
+
19
+ The main contributions of this work are as follows:
20
+
21
+ • We demonstrate that the second order statistics (Gram matrix) approach used in 2D image style literature can be generalised to (B-Rep) 3D shapes
22
+
23
+ - We introduce a general few-shot learning method for capturing a subjective end-user's definition of 3D style and demonstrate its effectiveness on B-Reps
24
+ - We show quantitative efficiency and performance advantages of using UVStyle-Net architecture with B-Reps over similar approaches on meshes and point clouds using a new synthetic public dataset (SolidLetters) and a small subset of ABC labeled for style
25
+ - We verify our method on the ABC dataset with no style or content labels for pre-training, and demonstrate the effectiveness of our few-shot learning process to capture subjective user-defined style similarity measures
26
+
27
+ In summary, we introduce a geometric style similarity measure for 3D solids that may be used in completely unlabeled settings for arbitrary object classes, with user subjectivity handled by few-shot learning given only a few examples. While our method is adaptable for all 3D input types, we demonstrate the benefits of our approach with B-Reps both quantitatively and qualitatively.
2105.09356/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-06-11T22:50:11.849Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.41" version="14.7.7" etag="9sN8pTTQa0ret0YOqh9b" type="device"><diagram id="y6uXlLVwusbjaRmCE1AN">7Vpdd6M2EP01fowPQgjEo52P7UPbs6fZtptHGYStBiNXyLHdX19hJIMMzpIY4+zZ9YvRaCTE3Jk7g8QI3i63nwRZLX7jMU1HrhNvR/Bu5LogxFD9FZJdKXEhBqVkLlistSrBI/uPaqGjpWsW09xSlJynkq1sYcSzjEbSkhEh+MZWS3hq33VF5rQheIxI2pT+zWK5KKUYOZX8F8rmC3Nn4OieJTHKWpAvSMw3NRG8H8Fbwbksr5bbW5oW1jN2Kcc9nOg9LEzQTHYZ4JYDXki61s+m1yV35mHngq9XWo0KSbdtJiYzo+40lwAOD6ZcgvIllWKnVMxEWA/R3oBC3d5UpgWeF47D2g+XKoualRH0NcIa3fnhVpUB1IW2Qbs9YIs9/FTdYZpw9Th1w/j/rrnpuMn3PjpRCtBfbatOdTUv/r8IwjLVe8fySLAly4jkwkytFlXOXuo2EFAAZDEtVghU92bBJH1ckajo3agIU7KFXKa6O2FpestTNXsxFlIQIxooeS4Ff6a1ntAPIPH1A+gYg65p67uDk8DXAXZfBdjzgnEQegAEvhc4CGMLbhB6qrf2Qw30XYTGGLsQuXoWr4k+6AN9r2F6GqvA100u5ILPeUbS+0o6rcBxVKvS+ZXzlYbkHyrlTluYrCW3AaNbJr/Wrp+KqcZIt+62euZ9Y2camXqyr/VGbVTRrIbtW2Zc6QSGs2A38AsbvA69Mhlfi0hraSAkEXMqa2HVdBBBUyLZiz37OQCiC4XvJFddX8Rama1jzKpwkTbOJGXzTF1HynZUxeC0CCqmkspEdyxZHJcuRdV6NKMWqK04y+TeKmg6QnfF7eA0JTOaTkn0PN97oAnsjGf03THdhcy9V2P9xhmDEOjpOsOrp/tcPGc1l2/xhAlOM54nSa7c69g5Dkvq5C/+hfzldi32FisMWM43E4dUoHjBdSYiWuRDJIAkoX4UtSWAOAhnjnPJBPABKT54B8VHa/FysH5vfO+MXUPyT7WudsJvEneu+FVOimK24JSU5DmLjPiBpYe7ZbFR0tSgJLq/I/Zv5X98Jtm3s0EI7boBBo49RZlv9KizWAFfuAhscsInmlExVEkYE4qTVkbwI0xnyQAlIQ68saurwSLogw9bEoY/S8IeKAE2S8JzWaIrgGbPoPdo/oNuiIh/+How/FY9CB0E7fDupToENmeY0r/X8tAUsr37juZ72poMfqebYQvExD1RIPozHw2xQ4AAGAe1dBBej+9BcwNsQMKvSP6pzvGvEz54G+Fb1ewg7I+b7A9O7An2T//w4xT878rmFZ5Pdayvmc0BagLqD4WndyFK/nMV7wn5B0/n4MRWXZXPXc+z38RucD8J3d7vufGO3u/6yeiX2iFU/2RZJNpslhd/n6lIuFiSLGpP8ve5ZEtlLp4N8sqHKI69thyP3Rn0r5jjNdqe0zz0GS7n+x8nR4y77wm9twAYJkcELUkfDZUkgp91+xAxDYqdmupcz4WeFdQfaR8H4GvW9W/ZyLlaSR4MFZ3hhaLzUdLVPjKVxPlLFWQxk8UTnLtTYweYKcFq0ahF3UvAtki3ne3iO7BAVXInD+VhyzcYXUIVeWPvfAdxL7Vld+Bs61z3gTzX6/7Zsfb34TA9+ASE9pc4ALR8ieM7Tdj9PkBv22s7srRVc9VOtqrjrzr3ntgQd99eH53g2MYXDp0+e6oZE7XY0sjOPTwPbSx9dPQ6VWaDxnFZYyIM7IkCv9u5m8KG7Gpq+i335IIRbl/wqXUh50g//Ia+a+tjnYYq9yxX3PHNUjWrr/RK9epjR3j/Pw==</diagram></mxfile>
2105.09356/main_diagram/main_diagram.pdf ADDED
Binary file (21 kB). View file
 
2105.09356/paper_text/intro_method.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Neural architecture search (NAS) improves neural network model design by replacing the manual trial-and-error process with an automatic search procedure, and has achieved state-ofthe-art performance on many computer vision tasks [\[Elsken](#page-6-0) *et al.*[, 2018\]](#page-6-0). Since the underlying search space of architectures grows exponentially as a function of the architecture size, searching for an optimum neural architecture is like looking for a needle in a haystack. A variety of *search algorithms* have been proposed for NAS, including random search (RS) [\[Li and](#page-6-1) [Talwalkar, 2020\]](#page-6-1), differentiable architecture search (DARTS) [Liu *et al.*[, 2018\]](#page-6-2), Bayesian optimization (BO) [\[Kandasamy](#page-6-3) *et*
4
+
5
+ *al.*[, 2018\]](#page-6-3), evolutionary algorithm (EA) [Dai *et al.*[, 2020\]](#page-6-4), and reinforcement learning (RL) [Pham *et al.*[, 2018\]](#page-6-5).
6
+
7
+ Despite a proliferation of NAS methods proposed, their sensitivity to random seeds and reproducibility issues concern the community [\[Li and Talwalkar, 2020\]](#page-6-1), [Yu *et al.*[, 2019\]](#page-7-0), [Yang *et al.*[, 2019\]](#page-6-6). Comparisons between different search algorithms, such as EA, BO, and RL, etc., are particularly hard, as there is no shared search space or experimental protocol followed by all these NAS approaches. To promote fair comparisons among methods, multiple NAS benchmarks have recently emerged, including NAS-Bench-101 [\[Ying](#page-6-7) *et al.*[, 2019\]](#page-6-7), NAS-Bench-201 [\[Dong and Yang, 2020\]](#page-6-8), and NAS-Bench-301 [Siems *et al.*[, 2020\]](#page-6-9), which contain collections of architectures with their associated performance. This has provided an opportunity for researchers to fairly benchmark search algorithms (regardless of the search space in which they are performed) by evaluating how many queries to architectures an algorithm needs to make in order to discover a top-ranked architecture in the benchmark set [Luo *et al.*[, 2020;](#page-6-10) Siems *et al.*[, 2020\]](#page-6-9). The number of queries converts to an indicator of how many architectures need be evaluated in reality, which often forms the bottleneck of NAS.
8
+
9
+ In this paper, we propose Generative Adversarial NAS (GA-NAS), a provably converging and efficient search algorithm to be used in NAS based on adversarial learning. Our method is first inspired by the *Cross Entropy* (CE) method [\[Rubinstein](#page-6-11) [and Kroese, 2013\]](#page-6-11) in importance sampling, which iteratively retrains an architecture generator to fit to the distribution of winning architectures generated in previous iterations so that the generator will increasingly sample from more important regions in an extremely large search space. However, such a generator cannot be efficiently trained through back-propagation, as performance measurements can only be obtained for discretized architectures and thus the model is not differentiable. To overcome this issue, GA-NAS uses RL to train an architecture generator network based on RNN and GNN. Yet unlike other RL-based NAS schemes, GA-NAS does not obtain rewards by evaluating generated architectures, which is a costly procedure if a large number of architectures are to be explored. Rather, it learns a discriminator to distinguish the winning architectures from randomly generated ones in each iteration. This enables the generator to be efficiently trained based on the rewards provided by the discriminator, without many true evaluations. We further establish the convergence of GA-NAS
10
+
11
+ <sup>∗</sup>[Equal Contribution. Correspondence to: Di Niu](#page-6-3) [<dniu@ualberta.ca>, Seyed Rezaei <seyeds.rezaei@huawei.com>.](#page-6-3)
12
+
13
+ in a finite number of steps, by connecting GA-NAS to an importance sampling method with a symmetric Jensen–Shannon (JS) divergence loss.
14
+
15
+ Extensive experiments have been performed to evaluate GA-NAS in terms of its convergence speed, reproducibility and stability in the presence of random seeds, scalability, flexibility of handling constrained search, and its ability to improve already optimized baselines. We show that GA-NAS outperforms a wide range of existing NAS algorithms, including EA, RL, BO, DARTS, etc., and state-of-the-art results reported on three representative NAS benchmark sets, including NAS-Bench-101, NAS-Bench-201, and NAS-Bench-301—it consistently finds top ranked architectures within a lower number of queries to architecture performance. We also demonstrate the flexibility of GA-NAS by showing its ability to incorporate ad-hoc hard constraints and its ability to further improve existing strong architectures found by other NAS methods. Through experiments on ImageNet, we show that GA-NAS can enhance EfficientNet-B0 [Tan and Le, 2019] and ProxylessNAS [Cai et al., 2018] in their respective search spaces, resulting in architectures with higher accuracy and/or smaller model sizes.
16
+
17
+ # Method
18
+
19
+ We now demonstrate that GA-NAS can improve existing neural architectures, including ResNet and Inception cells in NAS-Bench-101, EfficientNet-B0 under hard constraints, and ProxylessNAS-GPU [Cai *et al.*[, 2018\]](#page-6-13) in unconstrained search.
20
+
21
+ For ResNet and Inception cells, we use GA-NAS to find better cells from NAS-Bench-101 under a lower or equal training time and number of weights. This can be achieved by enforcing a hard constraint in choosing the truth set T in each iteration. Table [6](#page-5-2) shows that GA-NAS can find new, dominating cells for both cells, showing that it can enforce ad-hoc constraints in search, a property not enforceable by regularizers in prior work. We also test Random Search under a similar number of queries to the benchmark under the same constraints, which is unable to outperform GA-NAS.
22
+
23
+ <span id="page-5-2"></span>
24
+
25
+ | ResNet | | | | | |
26
+ |---------------|----------|---------------|--------------|--|--|
27
+ | Algorithm | Best Acc | Train Seconds | #Weights (M) | | |
28
+ | Hand-crafted | 93.18 | 2251.6 | 20.35 | | |
29
+ | Random Search | 93.84 | 1836.0 | 10.62 | | |
30
+ | GA-NAS | 93.96 | 1993.6 | 11.06 | | |
31
+ | | | Inception | | | |
32
+ | Algorithm | Best Acc | Train Seconds | #Weights (M) | | |
33
+ | Hand-crafted | 93.09 | 1156.0 | 2.69 | | |
34
+ | Random Search | 93.14 | 1080.4 | 2.18 | | |
35
+ | GA-NAS | 93.28 | 1085.0 | 2.69 | | |
36
+
37
+ Table 6: Constrained search results on NAS-Bench-101. GA-NAS can find cells that are superior to the ResNet and Inception cells in terms of test accuracy, training time, and the number of weights.
38
+
39
+ <span id="page-5-3"></span>
40
+
41
+ | Network | #Params | Top-1 Acc |
42
+ |------------------------------|---------|-----------|
43
+ | EfficientNet-B0 (no augment) | 5.3M | 76.7 |
44
+ | GA-NAS-ENet-1 | 4.6M | 76.5 |
45
+ | GA-NAS-ENet-2 | 5.2M | 76.8 |
46
+ | GA-NAS-ENet-3 | 5.3M | 76.9 |
47
+ | ProxylessNAS-GPU | 4.4M | 75.1 |
48
+ | GA-NAS-ProxylessNAS | 4.9M | 75.5 |
49
+
50
+ Table 7: Results on the EfficientNet and ProxylessNAS spaces.
51
+
52
+ We now consider well-known architectures found on ImageNet i.e., EfficientNet-B0 and ProxylessNAS-GPU, which are already optimized strong baselines found by other NAS methods. We show that GA-NAS can be used to improve a given architecture in practice by searching in its original search space.
53
+
54
+ For EfficientNet-B0, we set the constraint that the found networks all have an equal or lower number of parameters than EfficientNet-B0. For the ProxylessNAS-GPU model, we simply put it in the starting truth set and run an unconstrained search to further improve its top-1 validation accuracy. More details are provided in the Appendix. Table [7](#page-5-3) presents the improvements made by GA-NAS over both existing models. Compared to EfficientNet-B0, GA-NAS can find new single-path networks that achieve comparable or better top-1 accuracy on ImageNet with an equal or lower number of trainable weights. We report the accuracy of EfficientNet-B0 and the GA-NAS variants *without data augmentation*. Total search time including supernet training is around 680 GPU hours on Tesla V100 GPUs. It is worth noting that the original EfficientNet-B0 is found using the MNasNet [Tan *et al.*[, 2019\]](#page-6-15) with a search cost over 40,000 GPU hours.
55
+
56
+ For ProxylessNAS experiments, we train a supernet on ImageNet and conduct an unconstrained search using GA-NAS for 38 hours on 8 Tesla V100 GPUs, a major portion of which, i.e., 29 hours is spent on querying the supernet for architecture performance. Compared to ProxylessNAS-GPU, GA-NAS can find an architecture with a comparable number of parameters and a better top-1 accuracy.
2106.01354/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-06-02T16:11:37.829Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36" version="14.7.3" etag="74ft68G-Rxjehe7t-7Tw" type="google"><diagram id="19Gn13mk0MPT6Slpry57">7V1bc+JKkv41jth9OBWVVZV1eex2j+fsw0zs9tmImXmaoA22ibHBg+nb/vrNFBKopJIQIBow6qZpkETpkl/es7Ju9O3Ljz8vRq9Pf5mPJ883So5/3OhPN0qBkZ7+4y0/8y0WzGrL42I6zrdtNvwx/b9JvlHmW79Ox5O36MDlfP68nL7GG+/ns9nkfhltGy0W8+/xYQ/z5/isr6PH/Ixys+GP+9HzpHbY36bj5VO+FWzY7Ph9Mn18yk/tlVvteBmtD15teHsajeffS+fSf7rRt4v5fLn69PLjdvLMT694LqsLumvYu76wxWS27PIDtfrBt9Hz1/ze8uta/ixudjH/OhtP+Hi40R+/P02Xkz9eR/e89zvRl7Y9LV+e893r+5H05fF59PaWfx6P3p7Wg7z9a7K8f8r3vC0X839NbufP80V2Qi2zP+s9xROmZ/PxYfr8XBw5m88mfJLFaDylu61sfpjPlnejl+kzY+zPk/nicTrKN+d4Ak/fF/PlaDmdz+j7b5rPSZBZjqazySK/uvz5TBbLyY/GZwxryhHmJ/OXyXLxkw7Jf4DKCw/SgZQGUPoc6zn4vRXe2BBAW+mcy3d+L8HKaOGM10j/rJEK80OeSgBDJ0DR6JZGCWALuI1ynD+ur2kDBfqQoyGNDL0dGUTsV/54P19kD2wbOGIkXRJUdAyVkF0mkxezT/xDC8dBD4lLK5TxPhCAAMDF6AHhEJQnYCFJUV1DjyfsSUPg88oap62rg0dZFFqCJgGmdECHh4PHXLFYqWBlJVZ6EiMOgkUlgwYSGBEQNCHBuqAwBOOCDCEpR4jYymg62DmtbR0KVgpJQPOWgCAloD4cCjhA4fgaRqOQKIO1aJxEayNoKG2EtERXsER7D94nsAFCoUeQympLksIndIync2h02kvPP+hDTtgEOOzzkqk5/cZjP08fZ9kO+++vbBN9fJ48LDff6NNj/n/2K374jK3CYiyOg/JP5MMKW5ttBfUax/1S3fD2OppFII6uT3+gfYpEs/b69Qd9Xp13OX9d7fpNCnrYxmNpJ1/6b6sLz35tyj9cjGZvD/PFy2rfG1uiP/4DBGl84/+zftl3o/vlW3bs6mqJDKsLjm+CNtdujLatHmJtc0aReOuKSPWHUL0XyfdSIsC+ZN35RLtT+viElQcQdgtB931ODWC/gxhDJbDIj/MvzMdv9PZl+ij4jmo/V91+Tryf/P16w+Iw4F41nvuGhO5E04evi8VPsYUG5d/emU7j/iRLIQZbChtXTfANqbD5kX4afZsUz3QxGafZ1zb//o4MmlExQGawbSP2FT5/1/z8fh8tMvutXX76jgM0UbAvAXpthAtdnzv7LSLx2OUdyI5j7MU8Ff+JHAfeXnaSIo+ncFjqrk1BjpVx85GdkCnZIB/yzS/T8ZhPkvTJNl5b5kk1uWWRJxb5TL9Pnr9N+Hw3da+pB6fIyNgL8nWXmFyZup8Dxhzu2LieHZsn6MQFpk2Jd+Ylszcvff76PHlrlEP929Y72889+0RMlmsXl59bvIS/sdy4UbeFsGSSPU1njyz5iJFLluKgvfoiR4vX9SGTUexwrWiSmdN1mlQNkhoBNidrcQf+64FvnUUnjx7pOz5TBob8Am6za5iwcJguK37hgIpeUGG2ECryCNasmpGkvKswVwZ27YswLV5amoNW7JszUMFONe5ptE0HKu1DpRZfOKNS1aFbE6hu7G+oNThxR6RYi/e9UoOZ5ZHbJpOEZVLEmrYrwRY//XNq5II3B2L3RewWj71ViNbF5mR8nc54cwKTvKR7fnKr/Gj+7eN8uZy/9JfBrDrrdV8dZcJXV9BDEtJv8dUPjJ33HIH/nxZ3K1IoG88KP36YvX2fLPhzdqraoP/bMOANfmoVSSfhj3vCVFZudC4c0gP+CxDm+DeI9WAVqAQDyMPxH4ZY1WXGqi46Vz0E2voMtH1OR9dm0/vJkC49OG72V3qMzQ7CYMb3RYKWaOKWh3UBFRU7nKrBZymFFWbz5U05fltxY1a7O+UWB9AeCFozgLYFtAVEV7B9exktlkm/+7jhygGqGVRbYs1XBdU8DzZN5L5qSn0Qm6eJqJeq/krabCM7agWodvTCfv7sy9trDOmBVAeSqiWU/o9NAneWNJL//XU6WQ7FgbvkDNKKdMUEhcyq2nuDn9dHruCqlOChpyqr0o1sjsMPGWY7iYAGMvRs4w25mh5i1Q4qyRpXT9ZgKllTzCk+JFhdnHuYMjZMGTsT1XLRYfies5NtU8Y+zGZxidkhs8aySMK+Js9Vw7VvircEbaNpQrmN2tkKaJsSFg1cx9Jg/TZSqyUEFFUP1DyK9Qgtjns0Qmz2pey7wQ/fjXZdJ5QVsvEKTeMjzymqmb6hbvuaVKESyD5sX7isSiUVVxuVIVu2BWIvsUu50l2SC4ZapV9Tq+Rk3HomWaxkjlSsVBRBtfWTmczGH7jL24YcJQqWH4i5qbaA4UcUNZJZDT4ZFw3huj+tqHax/jCKbYvJ82g5/RYPn3pC+Rn+ez7NuDQnhg2xRDK+8pDf5l8X95P8V5vnvHWgKrGWo8XjZFkbJ6PX+q67kTDRWszKz/+8ceR0u0/0uUbRfjoEtTcF0g5vCcnbmgLRkdZ9+Bjubrp1BmrSSNx97gitgKwygrWNRa1c0E7HRFVWOPJGwQHqoKSrqy+dHWG0R8KokZBoGKbTgxzE1iaFiTvGRGjABA0wfX1rEqElPIzeXldNIB+mPxhDMUA6E4u/p3pI1UR9DVkgnQ6TLsj6AuPxQ6pJVATdHmCCKIWXaLxX9BddLNGhkAVVXEimNlgNrjhlDRf1Qw7CRYf2YSVxf8/cPr1P6ezoqXc1GeccQV3yUZ4f/OTHdPn3fFT+/A8miMD826cfOX2yLz+LLzO65b+Xv5R+xV83P8u+Fb+roej21lq5IX9NJa0EfcxOK6Edi93uINGaLjNIhodxFuxWjBgU3nlpPAkGcJjCSHaIs8Yaw5ZysDsrPK0FCafgTEC6sqLV4PqqvFDogrMag9W1+QGdtWG7GJVeaATJF2CDldofTVcmOqTlcpFckUEw/mLBSOgTRhoyb7V03HSz3nDzRPoz1XBikJNb5aR9t3KSxJ+wXqO2RpOEUhUJBk6gskiXYdEYWRm+s5hsFcZbTtKjlExM4co9Cn9dHkUfEpCgZ73RRLkQPFT7zJLwYtFmASQEvW71G0tAQ7rTe2BYEEckJWA0SB/BsdQ0pkECpiRgJO38rvhYxzcLRMikgHPOaw9gQGtlExDIDiHtRyKERaGULTM5GyScDIIkkLIGlfHWxBJOIwpF1xDAovLB60pXp64ijoS0CMYZQ0ORLPUVGUebhAlg0AbFN1w5S38yTiUKH64zatKDjLMQBFn13lgyAVD6ioxzShByyHmw5EeAhTMRcSoV/x9E3FYjz9fF3mZhj8sTe0DOp/Vk2KH1VlakXrs46ir0EA1xQCAv2JEhgM7tcpIeZd5uwf5jA35P8O7DKAcBXr0vwFsCPLsRKkgTnPIxGI3yQpLTJZ1WgZs3Xjjkk8mRLODjrjHe03d8pwuqT6Tdm1Mg1xnqOzrpnRZOkvNAwlGT/5DIYp8GCImcR27p28HS35XoZMt7IiIZ8iYEGYpahXMPZySXRhls/a2mj06YPnixps+24IP0QgVw6J1xQZvqkjydYxzbmATJW6bNBsk2oks5mvEzZDH2An0i23vJoCdLnMMuOkvDalVJqOgAgpdAM5bwovz6TncGvSX1ry05FZr8Byd95TS0G50E5HvwWleKGXoEfTJ5Mdh9/aV0WxMaBog70RKqtUQXrE2WRJ3CAuiQ0LiuFTizFTezdTazFTdNuImX1wOfqhDoAUEgpRah9Cfu1qetFNZk1QKr9zqElDMEELn+k8j4eiv0egR+PxxCujln0BRMeKeexDFAwesqukx9kFWkqukna4UqETMlVpwgNepD/goJqcJHlHDVg1TRiRzCqdJIOxtpvhVNTdip6cCjSAjP4UNtgTOR9IqNCSONKNHR6ETdUDc01I44CAyJ+Hphelyj5XHs4jIyDqQgD8vTxXmoph37A0nPIqPDUuCDV1bzygrmKntlumfIaJ3w0dhaddrmgqhY7Tfy0fiQEtJUM0jSvlPwiiwebYCcM3oVQFyXXjhRHt1XQNjVQ2NbmoQqanLBkF7a7XSa/lw0nQzNZ0pTDXbUoUIRMAhpSVjngI0NKUOmt05Q+eRScZiZsJdUTNSgFex16VLRBSuC1gGVk/yqBGt7koqel3N36I3BwK/dztKjUBwyFHsxQCJYq3cuzjhPBgAlka7K8Owb4NdxOACUyVggSO3Y/sCK+RF4b2MgpUcOOK90Re9o3oenunBAojzp/XCARqGkV26lAmzFZDVetMT4OjMAWJ6ApnwGf7ueslKYTBqSsacjcEDzxIv3Gk369RZwCsunMXkTGYkhXHTMcFFrUPFscGESaYbBEtyuB0NCD+48HedM9aBE8oWUg7ByUjCGLpl+vehByaaeDdITl/ALdzlLf2rQNDdm+hKxQUNDJeDOnVlXogM7MzUPZOUf/1SZhN7WEOnLmbdDqjbwOUpqz4BwK3bKXrF5pYCgXZKg+0SkoGII2pYJvp3lsNouh6+xYiC7wKxiwN38qooBrSrVVFYK7WVAlb/Xq42VUwJhUyNQVE5FRQIoLATJ08yz9x6KBExyxsFQJNAPDpA8Pg1hLQuqVQIoStQEnyw+8opcuPWrofpIoQ42f6/OgdkLFvWcx18/3J3UwDcHGfjkfBscdzLwR/d+rH+Fga9DEOiDCYCc4KrWkKQM/FPBoXlSwpW1WDiGlDBOC3KjvFFhlek8XxwkGxINEZ9eGZ9znhalC3r17hIW5qnon4h5DzGgY6oIxZMvPBipsogyXgxSUuuqD1GhbVGhgsHKUaFC6PYGm4awkCSvJeRp5dTUDj4CnLH5+849qUAFtoa999qUzlFKD5adm0rWonNQiKcrAY2PIS9P2uUsPQaFktHyoWjoV1hMhHrhtAT0xfvZiEUcguV7icVEp76Cwy5eLAIYQcII6W8Gxcr0kp7kIiAICRZ8Pl9BnUYu4tDFaC8GSGSLCrfh4hkAIQjlVKhmcPqCPiJ3okS7DunHZwlarOOy/H406HcI0A/Qr0M/UTJndp7ffKbQN14JYxx3OWKZXC3nlCDWg/P7vhywJcKkgtg4kdWsQo8MMEwl2YsB8B3LfsPTs0lE+1WZS1zLmZWy2eANrN73bO61hcuMlkKbrGlB9n607l6YmEgyFApccKEA4qq3gMpfleicgqjYStUYrIsXCl4U8M/eC5l5kBfanM4ZghOHBydsEE65QAYntz+PQQEuCKPRoKK9gf7WQYFkjoJVHLzgJEFqCYHsEAR0Icj1VIyDEDFM59infzbuY4eSU0OqzABIw1UfO8LDGMEaXGOgoTRJn4RW5kOAoWO8c1WHpotS1kawOmSnDEPAOCIBwQkIlqv8JJ1r3Tpnd7UsyS+zvJIKXSgZpZX1AyQZF84yCxDYg62epke9nMxqDTUwfdXAKMErYzhiK5DOxIAnFAhpfdAc/QrOJZaTP41AHPJX+/gqmPBVCvZ6b0LSaEGyz2nyLMhzUfFcn23SawfX3QsrvbbsianaSi6/Tkie1wobl9KN2yZaUr5ThkAjhXNAF2uyxYtioFrHi6YpunqJ9P+ls4OtJ/KGAsn+q18OQ/1JLAebyHANtZL9lc55DrnY4MlzWC8qduaIUGelOi/FliwYpaw67a8XIr/Elmz1hDl1H6RVznn01heW2O6lUe284wXSDZCGlXQLGo+2qqltnnIyFBH3FGZUKAWZSsaTuSUDGWPnIgpblrkYCoiPMsekhefPFiVDd629FGaiUsLukyg+f4XJi/RJo41zkhPPOo5Q9xd8OZMItU2lZ84vcwxD5rhzUJyUt+OiCu0kIUfvyGMdxDI44ax06HwICOCbeayzWO7Q8eq6ZpiXZpOH7DKZvph94h/aY8HHyCC0VYQMS9R1Oi6b+Q15r1beSF63JSTw44MICknIW9L8qFLr1NPQgpfd85JXTiCHoA/F3twxaig8OBwVDgQCiYSgHNGrUkvohfSBJItxGtB6mVjrCkUgfUoKjZSdkj5t6wXubaZRstRxvYDivDIL52ztRZbd7s2CMstOe+4f6ulzxRvogBCy24KlH9IBksSCTDUPyg7xBry3wI7HzradYeVmPDrNjYp0xehCtrmMBl7/x2rp9g2GGMk9WGQwQRrQ4OJnYVFIaz1dvzTe1jpt9GfZuWGRjqPWHvDCKyqA8aTHpI2DJE4LsJIMd87lelm0wjq9RHTDJJm9/N9Ea9GCv96flKQhyIADgrYPWvtYSm6RXztUThPNABQ9De3Yoz+RlFRnxRCXUnxQKLWrYAg0WhgMknS6Ax0wrscBugegy5PWSEeyHPeeSnMmHFFPogz1B0epPzgE+KeyH5pXLBlKEA5HBXfvcd5wbC1oKFTR+aNiyKrspUQTFXwFg707JdruFYMUpEgVOJTacT/7ffMqW1iIVLUPgZNXpNT1EWfkuURe5VSC8pJLEdoCj0CokSo4ZU0AiSpd4X8SgfjOV/DoINx8osaqYIr3J9ww7soLxfeLDR6mJqkMaeGby00LoyJckXr1ErS3YVc26iBIwYngEUghoKGBvOshMewSGZyrXsT+2J4pz6nTiMZ75bMJ5hWN64QudRIv1huPZ547ErTSObAkv4orqLQ/Sx1yCE78+U2aOAwaZxm0CLZFF4MMvJb9RoqcizHmEzmPU1UFv7uYhQUrPFjQUtNHWelD7yxBQvEe0j8kVeqLE+jsCCMxIILUvsjJVuRFNEofmDivsH+v4ft9zf0uRn2iDtTvjBljvSATFqXlNFOo9MxJdgtihEjHyVQXCpug0i6I1zlHxav22FD0IOxuwqMLwivtuO0AXVllXaGAgjQi6UZDStFrqHgIXa17zNrUEpbJOqIbqhj3vKIDCX7PJajc88UdrejTp9pl7WiXe7LLU0b5XRTiaDDNT6iW9Xuz2IyT2xjoVEK2uSvVePptb4+wCXmfuyBvvTm6gvev/lMGYk/uAslDDd4gGAnuXKE4ZCj2CuIlMhR+5wzFWep7VNyZ0pA2ZtjuqIo7K/xW9iDrmPsyIxnRdJ/rKVRH0Pfn1YPrUhaULoD3/qxdqyvxarNnvLqdjawTdBfK8bqHxkBf+Kavizlr/83hpJWf/jIfT/iI/wc=</diagram></mxfile>
2106.01354/main_diagram/main_diagram.pdf ADDED
Binary file (77.5 kB). View file
 
2106.01354/paper_text/intro_method.md ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Formal reasoning over explicit multi-sentence knowledge [@newell1956logic] has often proved to be challenging [@musen1988brittleness], owing to the difficulty in creating logical forms from such sentences, thereby restricting the application of semantic parsers  [@zettlemoyer2012learning; @berant2013semantic; @berant2014semantic]. Thus, in a recent work, @clark2020transformers bypass the creation of intermediate logical forms and show that transformers [@vaswani2017attention] can act as "soft theorem provers\" by answering questions over natural language (English) rule-bases, consisting of facts and rules. In order to reliably interpret these predicted answers, @saha2020prover propose [PRover]{.smallcaps}, a transformer-based model that generates the corresponding proof graph, thus emulating formal reasoning closely. Consider the two example rule-bases with two questions and corresponding proofs in Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"}, where a proof is a directed graph consisting of the relevant facts and rules from the corresponding rule-base.
4
+
5
+ [PRover]{.smallcaps} shows good single-proof generation accuracy but is designed and trained in a way to generate only a single proof for each question. This is not ideal because formal proofs are not always unique and there may be multiple correct ways of arriving at the answer. For example, $Q_1$ and $Q_2$ in Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"} have three and four correct proofs respectively. Hence, in order to enhance the human-interpretability of linguistic formal reasoning systems, it is desirable to develop methods that can generate multiple proofs, each providing a different rationale for the predicted answer. Such interpretable methods, while possessing the flexibility of operating over natural language, can also aid in verifying claims when constructing proofs from scratch is tedious or infeasible.
6
+
7
+ We find that [PRover]{.smallcaps} [@saha2020prover], when trained on all proofs as independent training examples (Eq. [\[eqn:dataset\]](#eqn:dataset){reference-type="ref" reference="eqn:dataset"}) and extended to generate top-$p$ proofs during inference (Eq. [\[eqn:node_inferenc\]](#eqn:node_inferenc){reference-type="ref" reference="eqn:node_inferenc"}), fails drastically, achieving a low proof precision of 34%. The subsequent proofs are often incorrect because it is not trained jointly with all proofs and hence, is unable to exploit the inter-proof correlations and also does not learn the correct number of proofs for a question. Thus, we propose [multiPRover]{.smallcaps}, a transformer-based model that can generate a set of proof graphs with appropriate cardinality for a given question. Since multiple proofs can be generated in any arbitrary order, we pose this task as a set generation problem over graphs and train [multiPRover]{.smallcaps} jointly with a permutation-invariant Hungarian Loss [@Zhang2019deepsets; @zhang2019fspool] over all proofs.
8
+
9
+ A proof graph is generated through a node module which selects the relevant facts and rules as part of the proof and an edge module which determines the edges between the chosen nodes. Similar to PRover, we first enforce multiple structural constraints during training and inference to ensure that a generated proof is valid. Next, in order to generate a set of proofs jointly, we propose our first model, *Multilabel*-[multiPRover]{.smallcaps}, a multi-label classification framework which performs *implicit conditioning* among the proofs and predicts $p$ binary labels for each node and edge, denoting its presence or absence in each of the $p$ proofs that we want to generate. It is efficient in terms of the number of parameters and training time and also achieves a better proof F1 than [PRover]{.smallcaps}. However, the lack of explicit conditioning between the proofs is not ideal because a question with multiple proofs often has certain common sub-graphs across the proofs. E.g., all the 3 proofs for $Q_1$ in Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"} have the sub-graph $\{F_{10} \rightarrow R_1\}$ common. Thus, in order to exploit these correlations which *Multilabel*-[multiPRover]{.smallcaps} cannot capture explicitly, we further propose an improved variant of [multiPRover]{.smallcaps}, named *Iterative*-[multiPRover]{.smallcaps}, which generates an appropriate number of proofs by stacking multiple node and edge encoders, each of which generates one proof at each time step by conditioning on the previously generated proofs. This enables the model to better learn the correlations between multiple proofs for a given question. To capture the set-based nature of the task, we train [multiPRover]{.smallcaps} using a permutation-invariant Hungarian Loss (Sec. [3.5](#sec:hungarian){reference-type="ref" reference="sec:hungarian"}), which solves an assignment problem between a set of predicted and gold proofs.
10
+
11
+ Empirical evaluation on synthetic and human paraphrased QA rule-bases [@clark2020transformers] show that both of our [multiPRover]{.smallcaps} models achieve a significantly higher proof F1 compared to [PRover]{.smallcaps} while retaining the QA accuracy. Further, on a challenging hand-authored zero-shot dataset, where all examples have single gold proofs, *Iterative*-[multiPRover]{.smallcaps} achieves state-of-the-art proof F1. It also generalizes better to questions requiring higher depths of reasoning with more multiple proofs. Overall, our contributions are:
12
+
13
+ - We address a new and challenging problem of generating a set of multiple logical proof graphs for reasoning over natural language rule-bases by proposing two set-based joint models, *Multilabel*-[multiPRover]{.smallcaps} and *Iterative*-[multiPRover]{.smallcaps}.[^1]
14
+
15
+ - *Iterative*-[multiPRover]{.smallcaps}'s joint training and explicit conditioning helps it to better learn the relative importance of rules and facts for a particular question and uncover common subgraphs across multiple proofs. Thus, compared to *Multilabel*-[multiPRover]{.smallcaps} and [PRover]{.smallcaps}, it is able to transfer well in zero-shot settings because it learns to assign a soft prior over the rule-base.
16
+
17
+ - *Iterative*-[multiPRover]{.smallcaps}'s conditional generation also enables it to generalize better to questions requiring higher depths of reasoning where the presence of multiple proofs is frequent.
18
+
19
+ # Method
20
+
21
+ The input to our task is a tuple of the form $(\mathcal{C}, \mathcal{Q})$, where $\mathcal{C}$ is a rule-base context and $\mathcal{Q}$ is the question. We want to predict a binary answer $\mathcal{A} \in \{True,False\}$ for the question and generate a set of proof graphs $P = \{\mathcal{P}_1, \hdots, \mathcal{P}_p\}$, each of which provides a diverse rationale for the answer (see Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"}). The context $\mathcal{C}$ consists of a set of facts and rules, denoted by $\mathcal{F}$ and $\mathcal{R}$ respectively. Facts $\mathcal{F} = \{F_1, \hdots F_f\}$ are unambiguous statements, while rules $\mathcal{R} = \{R_1, \hdots R_r\}$ are logical statements, which can be used in conjunction with the facts to arrive at a logical conclusion. Each proof $\mathcal{P}_i = (\mathcal{V}_i, \mathcal{E}_i)$ is a directed graph, with a set of nodes $\mathcal{V}_i \subseteq \mathcal{N}$ and a set of edges $\mathcal{E}_i \subseteq \mathcal{V}_i \times \mathcal{V}_i$, where $\mathcal{N} = \mathcal{F} \cup \mathcal{R} \cup \{\textbf{NAF}\}$ and $k = |\mathcal{N}|$. If a statement (E.g. "Anne is big") cannot be deduced from the context, then Negation as Failure ($\textbf{NAF}$) contains the negation of that statement (E.g. "Anne is not big"), which is considered true in a closed-world assumption. See appendix for more details of the syntax of proof graphs.
22
+
23
+ [PRover]{.smallcaps} [@saha2020prover] builds on top of RoBERTa [@liu2019roberta] and consists of a question answering (QA) module, a node module, and an edge module where the node and edge modules are used to predict a single proof graph. The input to RoBERTa is the concatenation of the facts, rules, and the question. The QA module takes in the representation of the $[CLS]$ token and predicts a binary label for the question. The node module computes the node embeddings $\textbf{N} \in \mathbb{R}^{k \times d}$ consisting of the representations of each fact, rule and **NAF** where $d$ is the embedding dimension. The $i^{th}$ row $n_i$ of $\textbf{N}$ denotes the embedding of node $i$. A node classifier takes in these embeddings to output the node probabilities $np_i \in \mathbb{R}^k$ for each fact, rule, and **NAF** being present in the proof. The edge module computes the edge embeddings $\textbf{E} \in \mathbb{R}^{k^2 \times 3d}$ for every edge $(i, j)$ through the function $\phi(i,j) = [n_i; n_j; (n_i-n_j)]$ where $;$ is the concatenation operation and outputs probabilities $ep_{i,j} \in \mathbb{R}^{k^2}$ of each edge being present in the proof. [PRover]{.smallcaps} is trained using the joint cross-entropy loss over the QA, node, and edge modules. The authors pose inference as an Integer Linear Program (ILP). Given a set of nodes and the edge probabilities from the trained model, the following global score over the edge probabilities is maximized, subject to multiple structural constraints $\mathcal{S}$ that ensure the validity of a proof graph (like checking for graph connectivity). $$\begin{equation}
24
+ \small
25
+ \underset{e_{i,j}\in \{0,1\},\\ s \in \mathcal{S}}{\operatorname{\arg\max}} \sum_{i,j,i \neq j} ep_{i,j} * e_{i,j} + (1-ep_{i,j}) * (1-e_{i,j})
26
+ \label{eqn:edge_opt}
27
+ \end{equation}$$
28
+
29
+ ![Plot showing the percentage of samples with $p>1$ proofs for different training datasets, DU0-DU5.](figures/percent_proof.pdf){#fig:percent_proof width="\\columnwidth"}
30
+
31
+ **Extending [PRover]{.smallcaps} to Generate Proof-Sets:** Since @saha2020prover focus on generating one proof per question, they also train their model with one gold proof per question. For multiple proof generation, an obvious extension is to treat each proof for a question as a separate training example. Formally, for each sample $l$, given a context $\mathcal{C}^l$, a question $\mathcal{Q}^l$, an answer $\mathcal{A}^l$ and a set of gold proofs $\mathcal{P}^{l}_{i}$, where $i \in \{1,\hdots, p_l\}$, the extended training dataset can be defined as: $$\begin{equation}
32
+ \footnotesize
33
+ \mathcal{D} = \bigcup_{l=1}^{L} \left \{\left(\mathcal{Q}^l, \mathcal{C}^l, \mathcal{A}^l, \mathcal{P}^l_i\right)_{i=1}^{p_l}\right\}_{l}
34
+ \label{eqn:dataset}
35
+ \end{equation}$$
36
+
37
+ Once [PRover]{.smallcaps} is trained with this dataset, during inference, we generate top-$p$ proofs by first selecting the top-$p$ node sets according to Eqn. [\[eqn:node_inferenc\]](#eqn:node_inferenc){reference-type="ref" reference="eqn:node_inferenc"} and then choosing the corresponding edge sets using the optimization function in Eqn. [\[eqn:edge_opt\]](#eqn:edge_opt){reference-type="ref" reference="eqn:edge_opt"}. $$\begin{equation}
38
+ \underset{\textbf{v} \in \{0,1\}^k} {\operatorname{\arg\max}}
39
+ \sum_{i=1}^k np_{i} * \textbf{v}_{i} + (1-np_{i}) * (1-\textbf{v}_{i})
40
+ \label{eqn:node_inferenc}
41
+ \end{equation}$$
42
+
43
+ ![*Multilabel*-[multiPRover]{.smallcaps}.](figures/ML-MProver.pdf){#fig:ml-mprover width="\\columnwidth"}
44
+
45
+ The top-$p$ solutions of Eqn. [\[eqn:node_inferenc\]](#eqn:node_inferenc){reference-type="ref" reference="eqn:node_inferenc"} are $\textbf{v}^1, \hdots, \textbf{v}^p$ which indicate a node's presence or absence in the proofs. Although simple, this approach has two major issues. First, the lack of coupling between the proofs can potentially confuse the model as there are multiple possible proofs for the same (question, context) pair. Second, inference is inflexible and always generates a fixed number of proofs for every example, thus leading to the generation of many incorrect proofs (Section [5.1](#exp:main){reference-type="ref" reference="exp:main"}). As shown in Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"}, certain questions can have multiple possible proofs. Figure [2](#fig:percent_proof){reference-type="ref" reference="fig:percent_proof"} demonstrates this phenomenon statistically -- the datasets we experiment with [@clark2020transformers] contain up to 13% of the samples with $>$ 1 correct proof. Thus, in the light of [PRover]{.smallcaps}'s limitations, we propose two novel architectures of a proof-set generation model, [multiPRover]{.smallcaps}.
46
+
47
+ ![*Iterative*-[multiPRover]{.smallcaps}.](figures/IT-MProver.pdf){#fig:it-mprover width="\\columnwidth"}
48
+
49
+ As described in the previous section, a desired property for generating a set of proofs is to have the proofs conditioned on each other as opposed to treating them independently. Thus, we propose *Multilabel*-[multiPRover]{.smallcaps} (see Figure [3](#fig:ml-mprover){reference-type="ref" reference="fig:ml-mprover"}), which poses the problem of generating a set of proofs as a multi-label classification task over all the nodes and edges corresponding to the set of $p$ proofs. Each training example is a tuple $\left(\mathcal{Q}^l, \mathcal{C}^l, \mathcal{A}^l, \{\mathcal{P}^l_i\}_{i=1}^{p_l}\right)$, consisting of a set of gold proofs $\{\mathcal{P}^l_i\}_{i=1}^{p_l}$ per example. It consists of a QA module, a node module, and an edge module. Following [PRover]{.smallcaps} (Section [3.2](#sec:prover){reference-type="ref" reference="sec:prover"}), we obtain the node representations $\textbf{N} \in \mathbb{R}^{k \times d}$ by mean-pooling over the constituent RoBERTa representations. These are then passed through a *multi-label node classifier*, which consists of two linear layers and produces the probabilities $np_{i} \in \mathbb{R}^{p}$ of a node being present in the $p$ proofs. The node embeddings $n_i$ and $n_j$ for a pair of nodes are transformed by the function $\phi(i,j)$, described in Section [3.2](#sec:prover){reference-type="ref" reference="sec:prover"}, to output the edge embeddings $\textbf{E} \in \mathbb{R}^{k^2 \times 3d}$. We also have a *multi-label edge classifier*, which takes in the edge embeddings to generate the probabilities $ep_{i,j} \in \mathbb{R}^{p}$ of an edge $(i,j)$ being present in the $p$ proofs. Lastly, a *question answering* module predicts a binary answer for the question. Following [PRover]{.smallcaps}, during training, we mask certain impossible edges like fact to fact, rule to fact, and non-nodes. Given the outputs from the three modules, we train our model jointly over all proofs using a set-based Hungarian Loss.
50
+
51
+ This model is advantageous because there is *implicit conditioning* between the proofs as all the proofs are generated in parallel from the same node embeddings and edge embeddings. Thus, it has no additional time or memory overhead while also generating proof-sets better than [PRover]{.smallcaps} (Section [5.1](#exp:main){reference-type="ref" reference="exp:main"}). However, it suffers from two major drawbacks. First, since the proofs are generated in parallel, the model is trained by padding empty proof graphs. Hence for higher values of $p$, the model has to learn more empty proofs, which makes the learning problem harder. Second, the proofs are not explicitly conditioned on each other. This motivates us to propose *Iterative*-[multiPRover]{.smallcaps}.
52
+
53
+ <figure id="fig:cnce" data-latex-placement="t">
54
+ <embed src="figures/cnce.pdf" />
55
+ <figcaption>Plot showing the percentage of samples in DU5 with at least one common node, common edge or both between the proofs for varying number of proofs. </figcaption>
56
+ </figure>
57
+
58
+ As a motivating example for why explicit conditioning among proofs is necessary, consider the proofs for $Q_1$ in Figure [1](#fig:example_dataset_proof){reference-type="ref" reference="fig:example_dataset_proof"} where the sub-graph $\{F_{10} \rightarrow R_1\}$ is common across all the proofs. $F_{10}$ and $R_1$ are essential for answering the question and hence conditioning on the previously generated proofs will help the model adjust the relevance of nodes and edges in the subsequent proofs. Quantitatively, we find that about 75% of the samples with 4 proofs have at least one node and one edge common across all the proofs (see Figure [5](#fig:cnce){reference-type="ref" reference="fig:cnce"}). Thus, we propose *Iterative*-[multiPRover]{.smallcaps} (see Figure [4](#fig:it-mprover){reference-type="ref" reference="fig:it-mprover"}), which broadly consists of a base [PRover]{.smallcaps} architecture, as in Figure [3](#fig:ml-mprover){reference-type="ref" reference="fig:ml-mprover"} and an additional $p$ node and edge encoders for generating a maximum of $p$ proofs. The proofs are generated iteratively until an empty graph is generated to denote the end.
59
+
60
+ Base [PRover]{.smallcaps} architecture computes the first level of node embeddings $\textbf{N}^1 \in \mathbb{R}^{k \times d}$ and edge embeddings $\textbf{E}^1 \in \mathbb{R}^{k^2 \times d}$. These are passed respectively through a node and edge classifier to generate the node probabilities $np^1 \in \mathbb{R}^{k}$ and edge probabilities $ep^1 \in \mathbb{R}^{k^2}$, corresponding to the first proof. In the next iteration, two transformer encoders generate the node and edge embeddings corresponding to the second proof. Specifically, we condition the generation of the next node embeddings $\textbf{N}^2$ on the previous node ($\textbf{N}^1$) and edge ($\textbf{E}^1$) embeddings simultaneously. Conditioning on both is crucial because $\textbf{N}^1$ captures the relevance of nodes for the first proof, while $\textbf{E}^1$ contains information about the strength of the connections between these nodes. We condition $\textbf{E}^2$ only on $\textbf{E}^1$, because the edge embeddings corresponding to the nodes predicted by $\textbf{N}^1$ are already updated in $\textbf{E}^1$. Formally, $$\begin{equation}
61
+ \small
62
+ \left.\begin{aligned}
63
+ \textbf{T}^{1} &= W^{(1)} \textbf{E}^1 W^{(2)}, W^{(1)} \in \mathbb{R}^{k \times k^2}, W^{(2)} \in \mathbb{R}^{3d \times d} \nonumber \\
64
+ \textbf{N}' &= [\textbf{N}^{1}; \textbf{T}^1]W^{(3)}, W^{(3)} \in \mathbb{R}^{2d \times d} \nonumber \\
65
+ \textbf{N}^{2} &= \mathit{Transformer}(\textbf{N}') \nonumber;~~
66
+ \textbf{E}^{2} = \mathit{Transformer}(\textbf{E}^{1}) \nonumber
67
+ \end{aligned}\right.
68
+ \end{equation}$$
69
+
70
+ These next set of embeddings, when passed through the respective node and edge classifiers, predict the node probabilities $np^2 \in \mathbb{R}^k$ and edge probabilities $ep^2 \in \mathbb{R}^{k^2}$, denoting the likelihood of their presence in the second proof. We repeat this process of stacking up the node and edge encoders for generating a maximum of $p$ proofs. Given the node and edge probabilities corresponding to each proof and a QA probability from the QA module, we train *Iterative*-[multiPRover]{.smallcaps} jointly with all proofs using the Hungarian Loss, described below.
71
+
72
+ Unlike words in text generation, proofs can be generated in any arbitrary order. Consequently, computing cross-entropy loss between the $i^{th}$ predicted proof and the $i^{th}$ gold proof, $i \in \{1, ..., p\}$ will be sub-optimal. Thus, we use a permutation-invariant Hungarian Loss [@Zhang2019deepsets; @zhang2019fspool] which finds the most optimal assignment between the predicted proofs and the gold proofs such that the overall loss is minimized. Formally, the Hungarian loss $\mathcal{L}_H$ and total loss $\mathcal{L}$ are denoted as follows: $$\begin{equation}
73
+ \small
74
+ \left.\begin{aligned}
75
+ \mathcal{L}_H &= \min_{\pi \in \Pi} \sum_{i=1}^{p} \mathit{CE}(np^{i}, y_n^{\pi(i)})~+~\mathit{CE}(ep^{i}, y_e^{\pi(i)}) \nonumber \\
76
+ \mathcal{L} &= \mathcal{L}_{QA} + \mathcal{L}_H \nonumber
77
+ \end{aligned}\right.
78
+ \end{equation}$$
79
+
80
+ where $\mathit{CE}(., .)$ is the cross entropy loss, $np^{i}$ and $ep^{i}$ are the respective node and edge probabilities for the $i^{th}$ predicted proof while $y_n^{\pi(i)} \in \{0,1\}^{k}$ and $y_e^{\pi(i)} \in \{0,1\}^{k^2}$ are the respective true node and edge labels for the gold proof $\pi(i)$, where $\pi$ is the most optimal permutation. The Hungarian Loss is implemented by first summing the node and edge cross-entropy loss matrices $\mathcal{L}^n \in \mathbb{R}^{p \times p}$ and $\mathcal{L}^e \in \mathbb{R}^{p \times p}$ respectively, each entry $(i,j)$ of which corresponds to the proof loss between the $i^{th}$ predicted proof and $j^{th}$ gold proof (see Figures [3](#fig:ml-mprover){reference-type="ref" reference="fig:ml-mprover"} and [4](#fig:it-mprover){reference-type="ref" reference="fig:it-mprover"}). Then we find the best assignment between the gold and predicted proofs through the Hungarian algorithm [@Kuhn55thehungarian]. Our final loss sums the Hungarian proof loss and the QA loss.
81
+
82
+ ::: table*
83
+ +-----------------------------------------+----------+--------------------------------+--------------------------------+--------------------------------+----------+
84
+ | | | Node | Edge | Proof | |
85
+ +:========================================+:========:+:========:+:========:+:========:+:========:+:========:+:========:+:========:+:========:+:========:+:========:+
86
+ | 3-5 (lr)6-8 (lr)9-11 | QA | P | R | F1 | P | R | F1 | P | R | F1 | FA |
87
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
88
+ | [PRover]{.smallcaps} [@saha2020prover] | 99.3 | 89.2 | 84.9 | 86.0 | 87.5 | 84.2 | 85.3 | 87.1 | 84.0 | 84.7 | 81.2 |
89
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
90
+ | [PRover]{.smallcaps}-all | 99.3 | 87.9 | 83.8 | 84.9 | 87.1 | 83.6 | 84.6 | 85.9 | 82.8 | 83.7 | 80.3 |
91
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
92
+ | [PRover]{.smallcaps}-top-$p$ | 99.3 | 34.4 | 88.4 | 48.4 | 33.8 | 87.4 | 47.7 | 33.3 | 86.7 | 47.2 | 00.0 |
93
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
94
+ | [PRover]{.smallcaps}-top-$p$-classifier | 99.3 | 85.7 | 84.4 | 83.8 | 84.8 | 84.1 | 83.5 | 83.9 | 83.4 | 82.6 | 77.3 |
95
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
96
+ | [PRover]{.smallcaps}-top-$p$-threshold | 99.3 | 84.4 | 88.0 | 85.0 | 83.6 | 87.1 | 84.4 | 83.0 | 86.5 | 83.8 | 77.2 |
97
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
98
+ | ML-[multiPRover]{.smallcaps} | **99.5** | 89.4 | 89.2 | 89.0 | 87.7 | 87.8 | 87.4 | 87.2 | 87.3 | 87.0 | 83.8 |
99
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
100
+ | IT-[multiPRover]{.smallcaps} | **99.5** | **90.6** | **90.2** | **90.0** | **89.6** | **89.4** | **89.2** | **89.1** | **89.0** | **88.7** | **85.5** |
101
+ +-----------------------------------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+
102
+ :::
103
+
104
+ Following [PRover]{.smallcaps}, we generate valid proofs during inference using an ILP, subject to multiple global constraints (see @saha2020prover). For each predicted proof, the predicted nodes and edge probabilities from [multiPRover]{.smallcaps}, we obtain the corresponding predicted edges using Eqn. [\[eqn:edge_opt\]](#eqn:edge_opt){reference-type="ref" reference="eqn:edge_opt"}.
2106.09614/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2106.09614/paper_text/intro_method.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Monocular 3D face reconstruction aims at estimating the pose, shape, and albedo of a face, as well as the illumination conditions and camera parameters of the scene. Solving for all these factors from a single image is an ill-posed problem. Model-based face autoencoders [@tewari2017mofa] overcome this problem through fitting a 3D Morphable Model (3DMM) [@blanz2003face; @egger20203d] to a target image. The 3DMM provides prior knowledge about the face albedo and geometry such that 3D face reconstruction from a single image becomes feasible, enabling face autoencoders to set the current state-of-the-art in 3D face reconstruction [@deng2019accurate]. The network architectures in the face autoencoders are devised to enable end-to-end reconstruction and to enhance the speed compared to optimization-based alternatives [@kortylewski2018informed; @zhu2015discriminative], and sophisticated losses are designed to stabilize the training and to get better performance [@deng2019accurate].
4
+
5
+ A major remaining challenge for face autoencoders is that their performance in in-the-wild environments is still limited by nuisance factors such as model outliers, extreme illumination, and poses. Among those nuisances, model outliers are ubiquitous and inherently difficult to handle because of their wide variety in shape, appearance, and location. The outliers are a combination of the occlusions that do not belong to the face and the mismatches which are the facial parts but cannot be depicted well by the face model, such as pigmentation and makeup on the texture and wrinkles on the shape. Fitting to the outliers often distorts the prediction (see [3](#fig:OccusionAffectPerformance){reference-type="ref+label" reference="fig:OccusionAffectPerformance"}) and fitting to the mismatches cannot improve the fitting further due to the limitation of the model. Therefore we propose to only fit the inliers, i.e. the target with the outliers excluded.
6
+
7
+ To prevent distortion caused by model outliers, existing methods often follow a bottom-up approach. For example, a multi-view shape consistency loss is used as prior to regularize the shape variation of the same face in different images [@deng2019accurate; @DECA; @tiwari2022occlusion], or the face symmetry is used to detect occluders [@8578512]. Training the face encoder with dense landmark supervision also imposes strong regularization [@wood20223d; @zielonka2022towards], while pairs of realistic images and meshes are costly to acquire. Most existing methods apply face [@saito2016real] or skin [@deng2019accurate] segmentation models and subsequently exclude the non-facial regions during reconstruction. These segmentation methods operate in a supervised manner, which suffers from the high cost and efforts for acquiring a great variety of occlusion annotations from in-the-wild images.
8
+
9
+ In this work, we introduce an approach to handle outliers for model-based face reconstruction that is highly robust, without requiring any annotations for skin, occlusions, or dense landmarks. In particular, we propose to train a Face-autOenCoder and oUtlier Segmentation network, abbreviated as FOCUS, in a cooperative manner. To train the segmentation network in an unsupervised manner, we exploit the fact that the outliers cannot be well-expressed by the face model to guide the decision-making process of an outlier segmentation network. Specifically, the discrepancy between the target image and the rendered face image ([1](#fig:intro){reference-type="ref+label" reference="fig:intro"} 1st and 2nd rows) are evaluated by several losses that can serve as a supervision signal by preserving the similarities among the target image, the reconstructed image, and the reconstructed image under the estimated outlier mask.
10
+
11
+ The training process follows the core idea of the Expectation-Maximization (EM) algorithm, by alternating between training the face autoencoder given the currently estimated segmentation mask, and subsequently training the segmentation network based on the current face reconstruction. The EM-like training strategy resolves the chicken-and-egg problem that the outlier segmentation and model fitting are dependent on each other. This leads to a synergistic effect, in which the outlier segmentation first guides the face autoencoder to fit image regions that are easy to classify as face regions. The improved face fitting, in turn, enables the segmentation model to refine its prediction.
12
+
13
+ We define in-domain misfits as errors in regions, where a fixed model can explain but constantly not fit well, which are observed in the eyebrows and the lip region. We assume that such misfits result from the deficiencies of the fitting pipeline. Model-based face autoencoders use image-level losses only, which are highly non-convex and suffer from local optima. Consequently, it is difficult to converge to the globally optimal solution. In this work, we propose to measure and adjust the in-domain misfits with a statistical prior. Our misfit prior learns from synthetic data at which regions these systematic errors occur on average. Subsequently, the learnt prior can be used to counteract these errors for predictions on real data, especially when our FOCUS structure excludes the outliers. Building the prior requires only data generated by a linear face model without any enhancement and no further improvement in landmark detection.
14
+
15
+ We demonstrate the effectiveness of our proposed pipeline by conducting experiments on the NoW testset [@Ringnet], where we achieve state-of-the-art performance among model-based 3D face methods without 3D supervision. Remarkably, experiments on the CelebA-HQ dataset [@CELEBAHQ] and the AR database [@ARdataset] validate that our method is able to predict accurate occlusion masks without requiring any supervision during training.
16
+
17
+ In summary, we make the following contributions:
18
+
19
+ 1. We introduce an approach for model-based 3D face reconstruction that is highly robust, without requiring any human skin or occlusion annotation.
20
+
21
+ 2. We propose to compensate for the misfits with an in-domain statistical misfit prior, which is easy to implement and benefits the reconstruction.
22
+
23
+ 3. Our model achieves SOTA performance at self-supervised 3D face reconstruction and provides accurate estimates of the facial occlusion masks on in-the-wild images.
24
+
25
+ # Method
26
+
27
+ In this section, we introduce a neural network-based pipeline, FOCUS, that conducts 3D face reconstruction and outlier segmentation jointly. We first discuss our proposed pipeline architecture ([3.1](#approach:Network){reference-type="ref+label" reference="approach:Network"}) and then the EM-type training without any supervision regarding the outliers ([3.2](#approach:EM){reference-type="ref+label" reference="approach:EM"}). In [3.3](#approach:initialization){reference-type="ref+label" reference="approach:initialization"} we show the unsupervised EM initialization. Finally, we show how to compensate for the systematic in-domain misfits with a statistical prior ([3.4](#approach:prior){reference-type="ref+label" reference="approach:prior"})
28
+
29
+ Our goal is to robustly reconstruct the 3D face from a single target image $I_T$ with outliers, even severe occlusion. To solve this challenging problem, we integrate a model-based face autoencoder, $R$, with a segmentation network, $S$, and create synergy between them, as demonstrated in [2](#fig:pipeline){reference-type="ref+label" reference="fig:pipeline"}. For face reconstruction, the segmentation mask cuts the estimated outliers out during fitting, improving reconstruction robustness. For segmentation, the reconstructed result provides a reference, enhancing the segmentation accuracy. In this section, we explain how the two networks are connected together and how they benefit each other.
30
+
31
+ **The model-based face autoencoder**, $R$, is expected to reconstruct the complete face appearance from the visible face regions in the target image, $I_T$. It consists of an encoder and a computer graphics renderer as its decoder. The encoder estimates the latent parameters $\theta = [\alpha, \gamma, \phi, c] \in \mathbb{R}^{257}$, i.e. the 3D shape $\alpha \in \mathbb{R}^{144}$ and texture $\gamma \in \mathbb{R}^{80}$ of a 3DMM, as well as the illumination $\phi \in \mathbb{R}^{27}$ and camera parameters $c \in \mathbb{R}^{6}$ of the scene. Given the latent parameters, the decoder renders a predicted face image $I_R=\mathbb{R}(\theta)$.
32
+
33
+ <figure id="fig:OccusionAffectPerformance" data-latex-placement="t!">
34
+ <embed src="CVPR2023/figures/FIG3_CVPR2023.pdf" />
35
+ <figcaption>In the presence of outliers, FOCUS reconstructs faces more faithfully than previous model-based face autoencoders. The images from top to bottom are: target images, results of the MoFA network <span class="citation" data-cites="tewari2017mofa"></span>, and our results.</figcaption>
36
+ </figure>
37
+
38
+ Standard face autoencoders [@tewari2017mofa] fit the face model parameters, regardless of whether the underlying pixels depict a face or occlusion. Consequently, the face model is distorted by the outliers, as shown in the second row in [3](#fig:OccusionAffectPerformance){reference-type="ref+label" reference="fig:OccusionAffectPerformance"}, it is obvious that the illumination, appearance, and shape are estimated incorrectly. To resolve this fundamental problem of face autoencoders, we introduce an unsupervised segmentation network, whose output can be used to mask the outliers out during model fitting and therefore make the autoencoder robust to outliers.
39
+
40
+ **The segmentation network**, $S$, takes the stacked target image $I_T$ and the synthesized image $I_R$ as input and predicts a binary mask, $M=S(I_T,I_R)$, to describe whether a pixel depicts the face (1) or outliers (0). Since $I_R$ contains the estimated intact face, it provides the segmentation network with prior knowledge and helps the estimation.
41
+
42
+ The face autoencoder and the segmentation network are coupled together during training to induce a synergistic effect which makes the segmentation more accurate and reconstruction more robust under outliers, as shown in the last row in [3](#fig:OccusionAffectPerformance){reference-type="ref+label" reference="fig:OccusionAffectPerformance"}. [3.2](#approach:EM){reference-type="ref+label" reference="approach:EM"} describes how the pipeline can be trained end-to-end, despite the entanglement between the two networks, and the high-level losses that relieve our pipeline of any occlusion or skin annotation.
43
+
44
+ Due to the mutual dependencies between the face autoencoder and the segmentation network, we conduct an Expectation-Maximization (EM) like strategy, where we train the two networks in an alternating manner. This enables a stable convergence of the model training process. Similar to other EM-type training strategies, our training process starts from a rough initialization of the model parameters which is obtained in an unsupervised manner (as described in [3.3](#approach:initialization){reference-type="ref+label" reference="approach:initialization"}). We then optimize the two networks in an alternating manner, as described in the following.
45
+
46
+ **Training the segmentation network.** When training the segmentation network, the parameters of the face autoencoder are fixed and only the segmentation network is optimized. Instead of hunting for labelled data, we propose four losses enforcing intrinsic similarities among the images. Each loss works to either include pixels indicating face or the opposite. Since the proposed losses have overlapped or opposite functions to each other, only by reaching a balance among these losses can the network yield good segmentation results. The losses work either on the perceptual level or the pixel level, to fully exploit the visual clues. The perceptual-level losses compare the intermediate features of two images extracted by a pretrained face recognition model $F$. We use the cosine distance, $cos(X,Y)=1-{ \frac{X \cdot Y}{ \| {X} \|\| {Y} \|}}$, to compute the distance between the features. Perceptual losses are common for training face autoencoders, which encourage encoding facial details that are important for face recognition [@DECA]. We found that the perceptual losses also benefit segmentation (see [4.4](#ablation study){reference-type="ref+label" reference="ablation study"}).
47
+
48
+ The proposed losses are as follows:
49
+
50
+ ::: small
51
+ $$\begin{gather}
52
+ L_{nbr}= \sum_{x\in \Omega} \Big\| \min_{\forall x' \in \mathcal{N}(x)} \big\| I_{T}(x) - I_{R}(x') \big\| \Big\|_2^2
53
+ \label{eq:image exclude}
54
+ \\
55
+ L_{dist}= cos(F(I_{T}\odot M),F(I_{R}\odot M))
56
+ \label{eq:perceptual exclude}
57
+ \\
58
+ L_{area}= - S_M / S_R
59
+ \label{eq:image include}
60
+ \\
61
+ L_{presv}= cos(F(I_{T}\odot M),F(I_{T}))
62
+ \label{eq:perceptual include}
63
+ \end{gather}$$
64
+ :::
65
+
66
+ The pixel-level neighbor loss in [\[eq:image exclude\]](#eq:image exclude){reference-type="ref+label" reference="eq:image exclude"}, $L_{nbr}$, compares a pixel, $I_{T}(x)$, at location $x$ on the target image, with the pixels on the rendered image in the neighboring region, $\mathcal{N}(x)$ of this pixel, so that this loss is stable even if there are small misalignments. Note that it only accounts within the face region, $\Omega$, predicted by the segmentation network. A higher neighbor loss at $x$ indicates that this pixel is not fitted well and is more likely to be an outlier.
67
+
68
+ Similarly, $L_{dist}$ in [\[eq:perceptual exclude\]](#eq:perceptual exclude){reference-type="ref+label" reference="eq:perceptual exclude"} is introduced to compare the target and reconstructed face at perceptual level. [\[eq:image exclude,eq:perceptual exclude\]](#eq:image exclude,eq:perceptual exclude){reference-type="ref+label" reference="eq:image exclude,eq:perceptual exclude"} aim at shrinking the mask on the outliers, where the pixel-level and perceptual differences are large. Without any other constraints, the segmentation network would output an all-zero mask to make them both 0. On the contrary, once there is a force to encourage the network to preserve some image parts, parts with smaller losses are more likely to be preserved, which in fact are the ones well-explained by the face model and is much more likely to depict face.
69
+
70
+ Therefore, [\[eq:image include,eq:perceptual include\]](#eq:image include,eq:perceptual include){reference-type="ref+label" reference="eq:image include,eq:perceptual include"} are proposed to counterwork [\[eq:image exclude,eq:perceptual exclude\]](#eq:image exclude,eq:perceptual exclude){reference-type="ref+label" reference="eq:image exclude,eq:perceptual exclude"}. [\[eq:image include\]](#eq:image include){reference-type="ref+label" reference="eq:image include"} is an area loss, $L_{area}$ that enlarges the ratio between the number of estimated facial pixels, $S_M$, and the number of pixels in the rendered face region, $S_R$. It prevents the segmentation network from discarding too many pixels. $L_{presv}$ ([\[eq:perceptual include\]](#eq:perceptual include){reference-type="ref+label" reference="eq:perceptual include"}), ensures that the perceptual face features remain similar after the outliers in the target image are masked out and encourages the model to preserve as much of the visible face region as possible. Likewise, the network would keep the most-likely face region to decrease [\[eq:image include,eq:perceptual include\]](#eq:image include,eq:perceptual include){reference-type="ref+label" reference="eq:image include,eq:perceptual include"} in the presence of [\[eq:image exclude,eq:perceptual exclude\]](#eq:image exclude,eq:perceptual exclude){reference-type="ref+label" reference="eq:image exclude,eq:perceptual exclude"}.
71
+
72
+ We use an additional regularization term, $L_{bin}=-\sum_x ( M(x) - 0.5 )^2$, to encourage the face mask $M$ to be binary. The total loss for training the segmentation network is: $L_S= \eta_1 L_{nbr}+\eta_2 L_{dist}+\eta_3 L_{area}+\eta_4 L_{presv}+\eta_5 L_{bin}$, with $\eta_1=15$, $\eta_2 =3$, $\eta_3 =0.5$, and $\eta_4 =2.5$, and $\eta_5 =10$. Analysis of the influence of the hyper-parameters is provided in the supplementary material.
73
+
74
+ During training, the segmentation network is guided seeking a balance between discarding pixels that cannot be explained well by the face model and preserving pixels that are important to retain the perceptual representations of the target and rendered face images. Therefore no supervision for skin or occlusions is required.
75
+
76
+ **Training the face autoencoder.** In the second step, we continue to optimize the parameters of the face autoencoder, while keeping the segmentation network fixed. The losses for training the face autoencoder include:
77
+
78
+ ::: small
79
+ $$\begin{gather}
80
+ L_{pixel}= \Big\|( I_{T} - I_{R})\odot M\Big\|_2^2
81
+ \label{eq:image reconstruction}
82
+ \\
83
+ L_{per}= cos(F(I_{T}),F(I_{R}))
84
+ \label{eq:perceptual reconstruction}
85
+ \\
86
+ L_{lm}= \Big\| lm_T - lm_R\Big\|_2^2
87
+ \label{eq:landmarks}
88
+ \end{gather}$$
89
+ :::
90
+
91
+ Above are two reconstruction losses: $L_{pixel}$ ([\[eq:image reconstruction\]](#eq:image reconstruction){reference-type="ref+label" reference="eq:image reconstruction"}) at the image level and $L_{per}$ ([\[eq:perceptual reconstruction\]](#eq:perceptual reconstruction){reference-type="ref+label" reference="eq:perceptual reconstruction"}) at the perceptual level, and a landmark loss ([\[eq:landmarks\]](#eq:landmarks){reference-type="ref+label" reference="eq:landmarks"}) used to estimate the pose, where $lm_T$ and $lm_R$ stand for the 2D landmark coordinates on $I_T$ and $I_R$, respectively [@deng2019accurate]. We set the weights for the landmarks on the nose ridge and inner lip as 20, and the rest as 1. A regularization term is also required for the 3DMM: $L_{reg}= \Big\| \theta \Big\|_2^2$. To sum up, the loss for training the face autoencoder can be represented as: $$\begin{equation}
92
+ L_R= \lambda_1 L_{pixel} + \lambda_2 L_{per}+\lambda_3 L_{lm}+\lambda_4 L_{reg}
93
+ \label{eq:reconstruction loss sum}
94
+ \end{equation}$$, where $\lambda_1 =0.5$, $\lambda_2 =0.25$, $\lambda_3 =5e-4$, and $\lambda_4 =0.1$.
95
+
96
+ As every other EM-type training strategy, our training needs to be roughly initialized. To achieve unsupervised initialization, we generate preliminary masks using an outlier robust loss [@egger2018occlusion]: $$\begin{gather}
97
+ log(P_{face}(x))=-\frac{1}{{2 {\sigma}^2 }} ( I_{T}(x) - I_{R}(x))^2 + N_c
98
+ \label{eq:P_face}
99
+ \\
100
+ M_{{pre}}(x) =
101
+ \begin{cases}
102
+ 1 & \text{if $( I_{T}(x) - I_{R}(x) )^2< \xi$}\\
103
+ 0 & \text{otherwise}
104
+ \end{cases}
105
+ \label{eq:pre-mask}
106
+ \end{gather}$$ We assume that the pixel-wise error at pixel $x$ in the face regions follows a zero-mean Gaussian distribution. Therefore, we can express the log-likelihood that a pixel belongs to the face regions as $log(P_{face})$ ([\[eq:P_face\]](#eq:P_face){reference-type="ref+label" reference="eq:P_face"}), where $\sigma$ and $N_c$ are constant. We also assume that the values of the non-face pixels follow a uniform distribution, i.e., $log(P_{non-face})$ is a constant. Finally, a pixel at position $x$ is classified as face or non-face by comparing the log-likelihoods. This reduces to thresholding of the reconstruction error with a constant parameter $\xi$ ([\[eq:pre-mask\]](#eq:pre-mask){reference-type="ref+label" reference="eq:pre-mask"}). When $\xi$ increases, the initialized masks allow the pixels on the target image to have a larger difference from the reconstructed pixels and encourage the reconstruction network to fit these pixels. Empirically, we found that $\xi=0.17$ leads to a good enough initialization.
107
+
108
+ To initialize the face autoencoder, the preliminary mask, $M_{{pre}}(x)$, is obtained in the forward pass using [\[eq:pre-mask\]](#eq:pre-mask){reference-type="ref+label" reference="eq:pre-mask"}, after the reconstructed face is rendered. Then $M_{{pre}}(x)$ is used to mask out the roughly-estimated outliers as in [\[eq:image reconstruction\]](#eq:image reconstruction){reference-type="ref+label" reference="eq:image reconstruction"}, preventing the face autoencoder from fitting to any possible outliers. Subsequently, the segmentation network is pre-trained using these preliminary masks as ground truths.
109
+
110
+ ![Visualization of the misfit prior. We provide two views of the coordinate system in (a) and the misfits along $x$, $y$, and $z$ axis in (b), (c), and (d), respectively.](CVPR2023/figures/MisfitMap_CVPR2023.pdf){#fig:misfit prior}
111
+
112
+ The misfits in image regions that the model can explain yet not fitted well indicate systematic errors in the fitting pipeline. We propose an in-domain Misfit Prior (abbreviated as MP), $E_{MP}$, to measure and adjust such misfits.
113
+
114
+ To build the in-domain prior, we first synthesize images using the face model where theoretically every facial part should be fitted well. Hence, any systematic error is due to systematic deficiencies. We draw random vectors $\theta_i$ in the face model latent space to generate ground truth (GT) $T_{vi}$ geometry and texture. Then the renderer in the face autoencoder is employed to render target images, $T_i$, $i=1,...,N$.
115
+
116
+ A face autoencoder, $R_{syn}$, with the same structure as $R$ is then trained on the synthesized images using losses [\[eq:reconstruction loss sum\]](#eq:reconstruction loss sum){reference-type="ref+label" reference="eq:reconstruction loss sum"}, except that no segmentation mask is required in pixel-wise loss, resulting in: $L_{pixel}= \Big\|I_{T} - I_{R}\Big\|_2^2$, since there are no outliers on the synthesized images.
117
+
118
+ We built the statistical prior as the average error of the vertex-wise reconstruction deviation from the predicted geometry $P_{vi} \in \mathbb{R}^{px3}$ to the GT geometry $T_{vi}$, where $p$ is the number of vertices: $$\begin{equation}
119
+ E_{MP}= \frac{1}{N}\sum_{i\in [1,N]} (P_{vi}- T_{vi} )
120
+ \label{eq:misfit}
121
+ \end{equation}$$ This prior visualized in [4](#fig:misfit prior){reference-type="ref+label" reference="fig:misfit prior"} shows per-vertex bias introduced by the fitting pipeline. After inference, it could be used to adjust the in-domain misfits and the corrected prediction is $P_{mfi}= P_{vi} - E_{MP}$.
2106.10512/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2106.10512/paper_text/intro_method.md ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:tweet-example" data-latex-placement="!ht">
4
+ <img src="figures/tweet_ex.png" />
5
+ <figcaption>A sample of diverse natural language processing tweets.</figcaption>
6
+ </figure>
7
+
8
+ Online communication channels have become popular in the Internet era, and several online communities of like-minded people have evolved around these channels. For example, communities such as Stack Overflow and AskUbuntu are question-answering forums; Twitter and Reddit are content-sharing forums. These forums over the years have provided a platform for novice users to learn from the experts, facilitated discussions among the community members, and have over the years accumulated a rich database of questions, answers, and discussions.
9
+
10
+ According to the theory of diffusion of innovation proposed by @UBHD2028615, the communication channel is one of the four main elements influencing the spread of a new idea. Notably, the communication channel serves as a collective long-term memory or a knowledge archive of the community, which any member can access to study the community's stance on diverse topics at any point in time.
11
+
12
+ Although several mailing lists, slack channels, and subreddits exist for communication, most natural language processing (henceforth NLP) community discussions are primarily carried out on Twitter due to its open accessibility and wider reach. Announcements of calls for papers and submission deadlines, recently accepted papers, interesting talks and seminars, lecture videos, and tutorials on various topics are often posted on Twitter. These are a great medium to stay updated on the recent developments in the NLP field. It is also a medium for researchers to engage in informal research discussions which might be unreported in official publications. We present a sample of diverse NLP tweets in Figure [1](#fig:tweet-example){reference-type="ref" reference="fig:tweet-example"} to emphasise the utility of the platform.
13
+
14
+ However, unlike subreddits or communities like Stack Overflow and AskUbuntu, Twitter is not an exclusive channel for NLP discussions. Exclusive channels provide users a one-stop destination for their interests and allow extremely topic-specific exploration. While Twitter allows search by hashtags to narrow down to specific topics, the usage of hashtags is highly irregular. Furthermore, Twitter is more suited to live discussions and less suitable for maintaining a snapshot of the discussions taking place in the online community. Relevant Twitter discussions about specific research papers are often forgotten in the long run because there is no infrastructure to link these discussions with the papers on the proceedings archives or research paper search engines. In an attempt to address these issues, we extend the functionality of NLPExplorer [@parmar2020nlpexplorer] platform by integrating [TweeNLP]{.smallcaps} with it. NLPExplorer is a portal for searching, and visualizing NLP research volume based on the ACL Anthology [@ACLAnth]. In our current work, we build an automatic pipeline for curating NLP tweets and build a one-stop portal - *[TweeNLP]{.smallcaps}*, for the search and browsing of NLP discussion on Twitter. The system has curated 19,395 NLP tweets as of April 2021.
15
+
16
+ [TweeNLP]{.smallcaps} organizes NLP tweets into topics: (i) New paper announcements, (ii) Call for Paper announcements, (iii) Reading Materials & Tutorials, (iv) Career Opportunities, (v) Talks & Seminars, and (vi) Others. Topic-wise tweets are presented via dashboards for easy exploration. [TweeNLP]{.smallcaps} supports dashboards to browse through popular NLP tweets in the previous week and the month. We construct a CFP Timeline from 'Call for Papers' announcements on Twitter and arrange it according to the upcoming submission deadlines of various workshops and conferences. We link the research paper tweets to the research paper's metadata, accessible via the NLPExplorer paper discovery feature. We also build live Conference Visualization dashboards, which curate tweets about the conference schedule, ongoing talks, poster sessions, and interesting papers at the conference, and present statistics such as popular hashtags, users, tweet languages, etc.
17
+
18
+ We integrate [TweeNLP]{.smallcaps} with NLPExplorer (Section [2](#sec:nlpexp){reference-type="ref" reference="sec:nlpexp"}) to build a joint-portal that aims to bridge the gap between published research and its informal communication on the social media platform Twitter. Our automatic data curation pipeline and the architecture of the system is described in Section [3](#sec:data){reference-type="ref" reference="sec:data"} and Section [4](#sec:architecture){reference-type="ref" reference="sec:architecture"} respectively. We describe the features of [TweeNLP]{.smallcaps} in detail in Section [5](#sec:features){reference-type="ref" reference="sec:features"}. In Section [6](#sec:relwork){reference-type="ref" reference="sec:relwork"}, we discuss previous works in organizing the NLP literature and visualization of research papers.
19
+
20
+ NLPExplorer[^1] [@parmar2020nlpexplorer] is an automatic portal for indexing, searching, and visualizing Natural Language Processing research volume. It presents multiple paper, venue, and author statistics, including paper citation distribution, paper topic distribution, authors, their field of study, their citation distributions, etc. It also presents category information of research papers into various topics broadly arranged in five categories: (i) Linguistic Target (Syntax, Discourse, etc.), (ii) Tasks (Tagging, Summarization, etc.), (iii) Approaches (unsupervised, supervised, etc.), (iv) Languages (English, Chinese, etc.) and (v) Dataset types (news, clinical notes, etc.). The current snapshot consists of 75k research papers and 50k authors. Since its inception, it has been accessed by more than 7.3k users having a close to 9.7k sessions.
21
+
22
+ <figure id="fig:pipeline" data-latex-placement="!t">
23
+ <img src="figures/architecture.png" />
24
+ <figcaption>The architecture of <span class="smallcaps">TweeNLP</span>. Arrow directions denote the flow of data. AAD represents the ACL Anthology Dataset which is the other data source apart from Twitter.</figcaption>
25
+ </figure>
26
+
27
+ We curate the dataset from two primary sources:
28
+
29
+ We curate the Twitter data using the open-source library *Twint[^2]* by retrieving tweets with the hashtag NLProc. We also curate tweets with NLP conference hashtags such as #acl2020, #emnlp2020, etc. The list of NLP conferences is compiled via ACL Anthology. Our system is scheduled to download the Twitter data for each day automatically. For ongoing conferences, our system curates new tweets every hour to continually update the *Conference Visualizer* page. The current snapshot (as of April 2021) contains data since October 2017 (around 1300 days) and consists of 19,395 tweets.
30
+
31
+ We curate the conference and journal names and URLs from the ACL Anthology github repository[^3]. We also curate the paper titles and their links. Tweets are collected periodically every day, and the system checks for paper mentions in the tweets by substring matching the paper URLs collected from the ACL Anthology github repository.
32
+
33
+ # Method
34
+
35
+ We present the pipeline of our system in Figure [2](#fig:pipeline){reference-type="ref" reference="fig:pipeline"}. The Data Curator module curates tweets daily. The curated tweets are processed before we perform further steps. The following modules process tweets: (i) Tweet Classifier, (ii) Conference Page Builder, (iii) CFP Timeline Builder, and (iv) Paper Tweet Linker. We describe the tweet processing modules in detail below:
36
+
37
+ 1. *Tweet Classifier*: The Tweet Classifier module classifies a tweet into one of the six topics: (i) New Paper Announcements, (ii) Call for Paper announcements, (iii) Reading Materials & Tutorials, (iv) Career Opportunities, (v) Talks & Seminars, and (vi) Others. The Tweet Explorer feature utilizes these tweet categories. The detailed description of each topic is presented in Section [5.1](#subsec:TweetExp){reference-type="ref" reference="subsec:TweetExp"}. We experiment by fine-tuning a BERT-base[@Devlin2019BERTPO] classifier and twitter-roberta-base[@barbieri2020tweeteval] to predict the tweet topics. The BERT-base model[^4] obtains the best test accuracy of 75% on a small manually annotated dataset[^5].
38
+
39
+ 2. *Conference Page Builder*: The Conference Page Builder classifies a tweet either as discussing an ongoing conference or other topics. The module builds specific conference pages using such tweets.
40
+
41
+ 3. *CFP Timeline Builder*: []{#subsec:CFPTB label="subsec:CFPTB"} The module processes 'Call for Papers' tweets identified by the Tweet Classifier module. It extracts the conference (and workshop) name by regex-based keyword matching against a pre-compiled list of venues. The submission date are extracted from the tweets by labeling dates using the Spacy[^6] library. The tweets are arranged in a timeline sorted by the submission deadline.
42
+
43
+ 4. *Paper Tweet Linker*: The Paper Tweet Linker module maps specific tweets to research papers using regex matching of the paper title and paper URL. The Paper Tweet Visualizer uses these mappings to embed the tweets on the research paper page on NLPExplorer.
44
+
45
+ The pipeline then stores the tweets in the database after processing by the above modules. We schedule our system to automatically curate the Twitter data daily and increase it to an hourly frequency during ongoing conferences.
46
+
47
+ We present a Tweet Explorer dashboard that allows a user to browse tweets from specific topics such as:
48
+
49
+ 1. *New paper announcements:* This topic organizes tweets about recent papers, which often involve the summary or a short introduction of the research paper. These twitter threads facilitate other researchers to communicate informally with the paper authors. These also contain interesting discussions by the community on the insights, merits, and critiques of the research paper, and post questions about the work. The authors' short introductions offer an informal account of the paper compared to the paper alert services that usually present the title and the abstract of the research paper.
50
+
51
+ 2. *Call for Papers (CFPs) by various conferences and workshops:* Users can view the announcements for call for papers and submission deadlines by various workshops and conferences.
52
+
53
+ 3. *Reading Materials & Tutorials:* It lists various study material, such as lecture slides and videos, tutorials, online courses, and blog posts.
54
+
55
+ 4. *NLP Career Opportunities:* Individuals frequently advertise opportunities for various positions such as interns, full-time, Ph.D., postdoctoral fellows, and research fellows on Twitter.
56
+
57
+ 5. *NLP Talks & Seminars:* Various online NLP talks and seminars can be accessed using the NLP Talks & Seminars filter on the Tweet Explorer dashboard.
58
+
59
+ 6. *Others:* This category contains the NLP tweets which do not belong to any of the above topics.
60
+
61
+ The Tweet Explorer feature allows users to specifically browse through tweets by topics and filter them based on their immediate interests. A snapshot of the same is presented in Figure [3](#fig:TweetExp){reference-type="ref" reference="fig:TweetExp"}. We present the distribution of tweets in the six categories from tweets curated by the system in the last 1,300 days in Table [1](#tab:topic_dist){reference-type="ref" reference="tab:topic_dist"}.
62
+
63
+ <figure id="fig:TweetExp" data-latex-placement="!htb">
64
+ <img src="figures/TwExp1.png" />
65
+ <figcaption>Tweet Explorer feature of <span class="smallcaps">TweeNLP</span> which facilitates browsing tweets by six different topics.</figcaption>
66
+ </figure>
67
+
68
+ ::: {#tab:topic_dist}
69
+ **Topic** **Tweet Count**
70
+ ------------------------------- -----------------
71
+ New Paper Announcements 6,337
72
+ Call for Papers 972
73
+ Reading Materials & Tutorials 1,400
74
+ NLP Career Opportunities 681
75
+ NLP Talks & Seminars 2,382
76
+ Others 7,623
77
+ **Total Tweets** **19,395**
78
+
79
+ : Distribution of tweets (curated since October 2017) into various topics.
80
+ :::
81
+
82
+ :::: table*
83
+ ::: tabularx
84
+ \|A A M X\| **Top Hashtags** & **Top Mentions** & **Top URLs** & **Top Papers Discussed**\
85
+ #acl2020nlp & \@aclmeeting & [virtual.acl2020.org/socials.html](https://virtual.acl2020.org/socials.html) & Beyond Accuracy: Behavioral Testing of NLP models with CheckList\
86
+ #acl2020en & \@emilymbender & [virtual.acl2020.org/plenary\_ session_keynote_kathy_mckeown.html](virtual.acl2020.org/plenary_session_keynote_kathy_mckeown.html) & Photon: A Robust Cross-Domain Text-to-SQL System\
87
+ #nlproc & \@akoller & [virtual.acl2020.org/paper_main.701.html](https://virtual.acl2020.org/paper_main.701.html) & Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data\
88
+ #acl2020zht & \@winlpworkshop & [virtual.acl2020.org/workshop_W1.html](http://virtual.acl2020.org/workshop_W1.html) & Language Models as an Alternative Evaluator of Word Order Hypotheses: A Case Study in Japanese\
89
+ #acl2020hi & \@xandaschofield & [www.aclweb.org/anthology/ 2020.acl-main.442/](https://www.aclweb.org/anthology/2020.acl-main.442/) & Don't Stop Pretraining: Adapt Language Models to Domains and Tasks\
90
+ #mt & \@gneubig & [virtual.acl2020.org/workshop_W10.html](http://virtual.acl2020.org/workshop_W10.html) & The State and Fate of Linguistic Diversity and Inclusion in the NLP World\
91
+ :::
92
+ ::::
93
+
94
+ ::: table*
95
+ **Tweet Counter** **Likes Counter** **Retweet Counter** **Unique Mentions** **Unique Paper Mentions**
96
+ ------------------- ------------------- --------------------- --------------------- ---------------------------
97
+ 5,343 58,160 11,440 907 251
98
+ :::
99
+
100
+ [TweeNLP]{.smallcaps} supports real-time statistics for multiple top conferences and the popular #NLProc hashtag. The information is updated hourly for live events and weekly for past events. Some of the statistics presented are top mentions, top hashtags, top linked URLs, and top discussed papers in tweets. We present the most popular hashtags, mentions, URLs, and highly discussed papers for ACL2020 in Table [\[tab:top_insights\]](#tab:top_insights){reference-type="ref" reference="tab:top_insights"}. A summary of Twitter activity from the Conference Visualizer page for ACL 2020 is presented in Table [\[tab:acl2020_stats\]](#tab:acl2020_stats){reference-type="ref" reference="tab:acl2020_stats"}. Apart from Twitter discussions about a conference in a specific month, we also show insights from the conferences across the year. The insights from ACL conference over time is presented in Figure [4](#fig:acl_vis){reference-type="ref" reference="fig:acl_vis"}. We also present other conference-specific statistics such as the number of tweets per month, daily distribution of tweets in the conference month, most active users tweeting about the conference, and a distribution of the tweet languages other than English.
101
+
102
+ <figure id="fig:acl_vis" data-latex-placement="!htb">
103
+ <div class="center">
104
+ <p><img src="figures/plots_ACL2020/monthwise_plot.png" alt="image" /> <img src="figures/plots_ACL2020/daywise_plot.png" alt="image" /> <img src="figures/plots_ACL2020/lang_pie2.png" alt="image" /> <img src="figures/plots_ACL2020/user_tweets.png" alt="image" /></p>
105
+ </div>
106
+ <figcaption>Conference Visualizer: ACL2020 Statistics. (a) Distribution of tweets across different months. (b) Daily distribution of ACL2020 tweets in July 2020. (c) Distribution of tweet languages except English. (d) Twitter users with highest ACL2020 tweets. </figcaption>
107
+ </figure>
108
+
109
+ <figure id="fig:PopPaperVis" data-latex-placement="!htb">
110
+ <img src="figures/PopPaper.png" />
111
+ <figcaption>Popular papers identified <span class="smallcaps">TweeNLP</span> based on twitter activity.</figcaption>
112
+ </figure>
113
+
114
+ We showcase widely discussed papers on Twitter in the Popular Paper Visualizer dashboard. It presents the titles and provides direct links to the full-text of the top discussed papers for quick reference. The system extracts tweets mentioning research papers and assigns a popularity score to each paper based on the count of tweets that mention it, and the likes, retweets, and replies on the paper tweets. We present a snapshot of few popular papers identified by our platform in Figure [5](#fig:PopPaperVis){reference-type="ref" reference="fig:PopPaperVis"}. It also presents the most active users tweeting about #NLProc on Twitter. Popular Paper Visualizer dashboard also supports exploration of most liked and retweeted #NLProc tweets of all times and in the last month.
115
+
116
+ [TweeNLP]{.smallcaps} presents a timeline of the upcoming submission deadlines. The timeline is created by identifying 'Call for Papers' tweets using keyword based filtering of tweets and also lists the conference/workshop website. The details are described in the CFP Timeline Builder module [\[subsec:CFPTB\]](#subsec:CFPTB){reference-type="ref" reference="subsec:CFPTB"}. We present a snapshot of the timeline in Figure [6](#fig:cfp){reference-type="ref" reference="fig:cfp"}.
117
+
118
+ <figure id="fig:cfp" data-latex-placement="!h">
119
+ <img src="figures/CFP.png" />
120
+ <figcaption>CFP Timeline built from tweets. ‘W’ on top-right denotes Workshop.</figcaption>
121
+ </figure>
122
+
123
+ NLPExplorer supports a research paper search interface and builds research paper pages which showcase standard paper related statistics such as the publication year and venue, author information, citations, citation distribution over the years and the link to the corresponding PDF article. Additionally, it also provides interesting insights like similar papers, topical distribution and mentioned URLs. We map research paper discussion tweets on Twitter to the NLPExplorer paper page. This feature allows users to browse through discussions about the paper along with the metadata of the paper. We present a snapshot of the feature in Figure [7](#fig:PaperTweetVis){reference-type="ref" reference="fig:PaperTweetVis"}.
124
+
125
+ <figure id="fig:PaperTweetVis" data-latex-placement="!h">
126
+ <img src="figures/PaperTweetVis.png" />
127
+ <figcaption>Paper Tweet Visualizer curates tweets and metadata of a research paper on a joint portal. The image background is a ‘Paper’ page from NLPExplorer which lists paper metadata, citing papers, field-of-study tags, and similar papers alongwith the associated tweets.</figcaption>
128
+ </figure>
129
+
130
+ Lastly, we present popular tweets in the NLP community on Twitter (also referred as NLP Twitter). This feature allows researchers to catch up with the recent NLP-related Twitter discussions in a single dashboard without searching for them specifically in the Twitter feed.
131
+
132
+ Currently, the system is implemented only for NLP papers present in the ACL Anthology. The system could be extended to papers from NeurIPS, ICLR, and CVPR as the data for these conferences is available publicly. The system is versatile and can be easily extended to other domains. [TweeNLP]{.smallcaps} provides basic visualization graphs over Twitter activity. Over time, these discussions could be used to build a timeline of evolution of research in various domains of NLP based on the Twitter activity of researchers. Tweets by popular users attain likes and retweets at a higher rate in comparison to new users (or users with less followers) of the community. [TweeNLP]{.smallcaps} currently only presents popular tweets based on retweets and likes count which can bias the conversations, understanding and presentation of ideas by emphasising the tweets of a small set of popular users. Future work includes identifying novel alternative ideas and perspectives by adjusting user popularity to create an inclusive space for the community.
133
+
134
+ [^1]: http://nlpexplorer.org/
135
+
136
+ [^2]: <https://github.com/twintproject/twint>
137
+
138
+ [^3]: <https://github.com/acl-org/acl-anthology>
139
+
140
+ [^4]: We also experimented with a zero-shot classifier but it underperformed the BERT-base classifier.
141
+
142
+ [^5]: each tweet was annotated by two ML/NLP students and inter annotator agreement computed using Cohen's $\kappa$=0.68
143
+
144
+ [^6]: <https://spacy.io/>
2108.11636/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-18T03:38:45.910Z" agent="5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" version="14.4.7" etag="5G9fuFSEeEIE8NsKzMbo" type="google"><diagram id="ef3cZuglVMkEFylRH0Yb">7f1ZW9ta0zUK/5p17f0dvL7Uujk02CTKQvIyWGaZk/cCw23c0OzExJZ+/Vdj1JTkDnCCTUgW6354IndqqmpWN6tG/eUe3s4/fb14uAnvr64nfznW1fwvt/GX49h21ZF/8E6i71Tcsr4x+Dq8Ml8q3jgdptfmTcu8+zi8uv629MXp/f1kOnxYfrN/f3d33Z8uvXfx9ev9bPlr/7ufLF/14WJgrmgVb5z2LybXa187G15Nb7LnKteKDz5fDwc35tJVp6If3F7kX9Y3vt1cXN3PFq7lNv9yD7/e30/16HZ+eD0B8TK66A0dPfFpfmNfr++m2/zAMOL7xeTRPJu5r2mSPezsZji9Pn246OP1TBj6l3twM72dyCtbDr9Nv96PczK48s7/hpPJ4f3k/qu8vru/k58dfL1/vLu6vjI/ufjaNzytyKvB5OIb+GHhZOPraf8me5HRBi8err8Ob6+n119xJ8O7gXl7eCukrn97UC7j5Hf3xxeX1xPz+dXFt5v8uuZZr79Or+dP0svOuSDie30v1/yayFfMDyqZCBrJdTMJmS3IgZW9ebMgA65v3rwwwjfIT17wRw4Mizazy/0vsUtvNbu1vxz3gP/bDSMLJhlOetYGTlY2MNLxqyW7+npeer85L3e9tLzyy0vLsfa1svw14l9fic43L++/Tm/uB/d3F5Nm8e4CaUGP4jvH9/cPhi6j6+k0MRS/eJzeLzPwej6c/rtw3MOpSnIv+rIxN6fmiyR7cSeP9u/ii8Wf4XXxO77KfrguLtd3mXG1ch7isZdN1P3j1755y5ix6cXXwfV0SYy34PPX68nFdPh9+eyv4Vn5/fCs8nM8q/zneFb50HrLDsXLSs/19qX0qi8zY5mQL7FmkYCLZJ7Axh9c9McDnm+FWctMWGKvA/be300XvIBy+cA5OtrkHzTL+N+KQCy+vwN+1ZxlfnkbjFStvIFflvt6ftVe5hcIDK5I6DO9GN5df31JVE2MdHGZncH6UZLYK56U65fXaeJXSv4GX6rilqq11xMm88qfo4wozjpiv0Lu1pTKJil7QsMukqe2mTwLj+9vWMHZe1urWHOFf+6HcuGc2iRhtZb/t+KZqiUwv1kMBFdOY5Usq1p1vWql7NQ81/Itr7x8JjUga2cia/Kn3o5b9n+PW/NMX5Rqtu/4nuNWnGrV9pZWjvLStsu+LZ9bfrnyg8x8/ip74OQW+YM/lJPlWqmKZVL1Kp4vi6a6F04+cZU9cHKL1MIfyknH80qeVa5UauWybdnl2oZFWfs59r1w6jdVulvkG/5QBtuWV9LlU7HtatnznJ0x+IVTvymD/f8ug91aKVtgNHz+XpTxU5fZAy/L/11eVrzSwnqyyuXdLdbnT/2mi3WLPMgfymDHqpW8RffU3Z25ff7Ub8rgLXIrb87gLDyoeqVFp2RZW7owaUKeWsWpVHyrXFuP5j23xPWTn2JDwkM8H1lvrgQfFdsv+96Pisn/sd2SVXOFa65brcEJXjat8rHjO67teZ7tyU06PykyRWJiuxPvUES2SOd8iMgrRMSrlvyaXbVrZbtaEXJ7uxKRF068OxFx3mNe608Skapbqrm263iVml+reKvx9c+LyAsn3qGIvMdk2p8kIk7ZKpXFl5A7rCK5XfV3JSMvnXmHQuJ8CMl+vRG7VnIqVtWp1SryrbK/M0Xy0pl3KCTvMQX4JwmJcLfkVGuu50vkIZ/7P5mH2KBJXjjzDoVkizSi7kHq135uw/F5YXhxMfpeSXjsur4t5Khk0eTidmTFEg+uOIO9LgyuX3Ldmm/5dqWCeGAlDPiZXUrnl2bolHNPr7BapWQ/nUMTSpbsqiwgxxbiOdVsO/uZJVZ5cYW5z2z9roj+D6+RzRzew4L4pam635upyVZL7ZUs3+4iOxSILVJ7324uHnDIeljhNNTksH8xYS3sP/ffhtPh/Z18fnk/nd7fPl1AI9LyP/63cI76ZDjAb6coaDu4yIpt/zeco4xnpQQ3L8qV46uL6cVfbl1fOkffvg/+cg7mIqfO4T+fI+c8OfAuz+aP/dQaXnw+sfqN++/H7pV7lfhumPjf+7f97+GoPgsPa+nVbX8YfL6ZXn7y09Zt1/nn9Mv91eeTWWtY/S6/cI/v+unxbS05T6pJ2KjPj1357qE9ubrtPl59Dovf3t18uzjzvz7z+3mrM/bl9/xeMDwY9v6NJvHnL9/PbyffzjvW8Pyu++3yMBhcfOo+nDs31j+nwTxsDB5bjYG3/Id7Pp/076KHS8erBaPmY3gaDK6cyfjq0wCvrfDQm8k1nIuzrtu+rXlyrlnQqA/CTlv+bc5ah563et4oCYSOX4aR1+t8GwSH9cE/n04m53fh93+GvdH1p2YF7wWNeXJ+FlnBp/OHy0+zWnD35a5327wPD+NvC38PcvX787PJ3cXnNu7oqStuumu92+GBr/9W1+5q/nB5Gw/67kly6UwnQvPk3AnuV66eXgltL5x4WlyhLXQbzOWsNq583QjkvXiOK4VC/7Azls/HCejeOg1Wnxp8mwSfI6t/W/sKfvVvr4Z/N+qTY/P3t/Agu6fjf68ezj+f3Mu5LDwRzh91mrg+zm9F8t3rThNP7fP6I7n+qDc8Tr3bwL25aSV1PGv18tPk8fw0fxal9nAw+3I6M3+4Z3NPp7Z7cXZiXTTkWod4Ppy/bfP6cv6ogecf4/mTaIjr4/nD/VDY0jvAFdpWTuG0nlE4lc93T2E+oaHwMKNwaOUUTgf7oLBTUHiQUThtHWYUbu+Jwq2cwnFO4aiRU9gK9yLDOYVtfUJSYJ5ROGo090Dh2MspPAozCuP5Mhn29kJhJ8y1RM8uZHhQyPBwDxQexRmFnQUZdgoZDvZA4V4hw6KFMhkOcy0R2PuhMJ9QKVxoCVKAFE7CvVB4kwwHSaGH6/uV4U6hh8NcS+zL0oWFHk4KLZFT2N2LDDeCXA+HOYX7uQy3OvugcHuWUbjF65PCfkHhwX60hBfmMjzIKdxq5FrCi/agh1sFhd0w1xIDO9fDcn+7p3A/KbRELsNOVFi6PWmJcJOly3XTPrym2Cpsej9/0lxDdMJ9yK9bXHOcXXNOG8TnbFp78iNOcx2cy2+oOoq+8F48tVEuv06hg5uFp9bZi5Vb8CMGBYVzSWrO9yO/hae2YOUYDagO3pNtLa46y1dNLk39fdB3Vti4fNW4udbfiz9aWJpWHlPFC/7oXjx+P3vOcJQ/p1V4/KG/F44mRdTa9Ap/NI9aEXPsnMJhZ5BReL7gj3oFhfdhy5u5Pxp2cltuFba8vR8KzwtbvsDXPM6AftzDVQu9n9O3l3ujyPbsYaVmPPUKj6WfyZG/D6+70IG5bfMLzTtO90LZYp2qttWrJnu+6jzMLWrTyenbCQuPMNnHOs15mix4hIX27cR7kKMgKTRhHpn7Ub5ixvvxCJMiMg/sgsLxXiU4LCLzpFip42Kl7sVnCZwNtmb/K2fB1gT+BhnGKtoDhccZhVNdpU3zhBmF95FdQkbFUDjtFRQuZHg/XmFa5D7CBQrnXqG/D79bvQUTmRcybO1Xhnu5Hm41cj1sF9Y83I8H7BV6eLDgL+V2Lt1HhlQj/0FSrFLKWO6RIge+ewqPcz3cKrJL6QKF9yPDfhFjjN2CwrkMp3vJfTC2UI80bBTZysLqtPeT6TFXM1fubPL8e/u68uIzF5bWKa482I9vaq62dmWvsECDfXlRi1fu1IsrJ3u/Mq+WSVh/05X3FX80FmW7eOYiBlm5cvXYjZzz1MMe+47wplZrttyqU6pW10uTan7J21CP5Lg7APzYBvHj9dVbL1DCXinMqFY20qG8oQXJsXZRhrVN38VkMnz4dv0y5MlafcUK9sjRkSX/bSrzyT/ZSrqUc08Sda1qtLpeAmlvKHm0y6+np7tNk8LvQs+8Msp+O/ptU8H/u9Avxy1YEUfv7ci5Ta37b0bO1WLzN6TmNkXhvxk1s97MDH12vT54b9TcBoThN6Omu2x5yuumfG/U3Kae+jej5kpP+lvK5jaFzL8ZNaurvTTrBc57I+c2Hf6/GTkry8Jpe28ondv00/9m5LStZTggu+K/HT3/wCDIdle8zsrb+UneWwZBllU52gyBmX+yE3p6y/gZb2iMvD8wJlpd7mhXeTuC/oFRUTZGIkNleMPV/idGRVnjkiHnGy72PzAqqlZ/FTH/wKDItlb7Ft+OnH9gVJQboreg3x8YBq0acuftUh7eW4ZBb+RmOtayrnzDXLH3B0ZBOUTDL8gW+3/SVlCO4bu8X2l7b7fc/T8wDMrdvYye5bdLymVAIX8UPVdGkr1l1sP/E+OgNTSZDYPB9kVPb518CyONNg63GT3ePpjpNphx8oqRRtnYot7C+y/MM7L/Wpxn9NdLw4wuvk4XhhctjzJ62slYHgnjZWdagVThe0fDyaQ4+8o35J2Fz5XSz6OrGE2/OE4pg1BZmqdkbZaonU9P8t8yuLOsw8NyeTNnzCdf76cXBqrE9ne1/irL9sF/Q/PwlsHeWyVx3eW0zpu6g5Vfqs5K2VC2D4X2gwpNF8IrFJr56SoAe9UvWRW7UitXyrWKszIsoSIfLvxnr5x+a5T3crlU9stV35Pwzqs55eXcUc0uuZ5re37Ft3yrsj8E8Ayo8dcIv/3jom99iP5fWYZlD6JfK9WcmudVym5NVsDyTmWlWqo4FUwksC3Xzk3ej4u+9Zzo2075rWR/i7zKx2TBlfm31ZUxehtGC9qbiqJtbweV4eUtMjcfHHueY342Du1Fjrl2XvL+KqZtkR7adzm/b60Qwd0gtu4GItRqu6HBW6Z01DPZVnrKz1POLVXWXJNauVqxnexvjZCVklV2q3Liml+Wr2eZtEWyPvGVV5H4d8zyPEX8zOJ6pXLZqVSc7O+90Pp33At/SdC9kldb/G9Z6q1SrVKrWrb+vRc+/I7b6C/wQQi98t+y5hbvsFypOZb5W++A+UWs+B234F9gxTLl3ZpQzX5/a+At9+53alrtyorGWS5cdt1SWXhQsczfu5H037HI+SXl75Se0TnvVfJ/x7qAFxjhOSs9or5fqlZcr+xnf++E+JXfsYjgpVWw0nImCqjiVst29vdeSP+W9QY71firc73KJX9BwVvvRrbfMlp9I9ku10r+UuJ+lROW6ztexfy9FxVfecugdld7fS9womKvYky8T/Naecsgd59qRiLbql+zrar5Wy/z/kUE/gOj15UyPMcuVfyy5db0b31T+xdR/g8MVsvuc/kbr+SJ+q/Vsr/3wojfsfD8pSXgPhdLvVdGvG3J+k6V/XNyXyl5drnq5n+V90LvPzB2zczuLydu9Q+MTdcHq1e93F2Xv3UkqV9E+7cNTt/EY19tJfLEg3c9x6+Yv/fisWettn+S3Nv2suC/1/2P6p+0/Zo3G1vP2la3ZFdqnuWbv/diW6tvGbi+GSvsZa9yuV64iqGaZcetmL93o5HethR+lwS3vOfc+PeaMKv+gZHtCqBRxSu9S5e++mvr6P/6qKLfIFTLpcRZKdtiKbGmmF9RSry1fLxtiL2ftqAXlqqoxWeUplMuOVaxneZuABT+RUv3LaPxNwpaytXnfLf36kbXrF+qRD+akX5WjeoSeoUaNT9daZaoORJq+xgUX/Wq5ez32caOCGDVs33zV86gRn+0I6PqllyJGpyKLc6cV16ZSe/54m/4tlzCqflW1fVWrrK7hoya/SuF/6MZ6WdF/wlF/FrRlwCk7FUkmBP9Wq3VlmH+3GqpJgJb82oSpZdrbvUnRd96TvTLTsmr2JZf9hzXL1cq+5P8LTJX324uHnA4vL0A0w5gHod9EXS0qvxz/21onJnL++n0/vavJ3tYRLb+x/8WzlGfDAf47RRrZt2C85L17F0re0eOry6mF3+5dX3pHH37PvjLOZjL2nIO//kcOefJgXd5Nn/sp9bw4vOJ1W/cfz92r9yrxHfDxP/ev+1/D0f1WXhYS69uMf7iZnr5yU9bdzcYFfP1n9Mv91efT2atYfW7/Mo9vuunx7e15DypzludsX/s6veC4UE+GCfsBI+tzsA7/3cwDD7dTC7Oru6v+P7gMTzrYhiQc3HWddu3Ne+f02AWHFpWOIpnQSN2o0b9MTqte2GjjnGWN/3byc3x2c3D+aeTyeVI2PX5i32uI2r8y09xLRgG8rz/yKfndyGOPh/cXH0aDC7O2rVgbP99dmi5rUZ92m12e8fd0DruBGkrll99wueWF6b9Qdhp2tFR04saIY6t1mE9P466xfsh7qszwPEsmrTdqIP3hXaTthON+oNWoz3v4hkO607YOImjxtgKGoHf6spvO/3BcWcgr9vzKKlPo1HdjzvZcXvO94c8drtyLNeS49iS78xaemwHjdDR78R23JHz5u8Hvp4H38fgIx4neF+P22kXx/h+Z5DyO/g+aNwIZ3K/03DUTrqN0JLzzMPOgdx7KL9vu+FR247StjxbU87R94Q38nnoxB055ndDP2j0/fAQxzhH34/02OXxEMfxDMct/jaW7wxmob6f4ljPE1tyPA/N8akct+KB8OfqQkeubMfZds5ZcO2400szDuIYnAXH5diKmnL20cGJnGMuEuhEDTxlfabH0UmUjhMch2k8iNKBzePR+Uk0Ei4eWnYo5xQu4v00GkVCsX4qx27UbaetRjwIR01HKJoKZeywcxOHaV0oOpi1uu2k1WlCcvzQbppjkbTDenZshza+08OxI5IpxwM9Pq3Ltcz7kKrGOHtfJKyZfV+OYx4LNeW4pxJutymFcjzPJDISSZBVNycHGpO4JffL5+xcCV3aepw2B8Vx94e4EWbckKuBA5OTqNEEVeWqbfB92iK1BvJZU2QktCmLnYMToRY4MQ9JlT5/G2JU2qHlUa4hv12lnjy9oVzgdkfhLDwy74/q2RqQ9dgN5fdO0OgJB5Si0ag5M2vK0WsO5Ps9XDNtdQJ+3tX7dHRNNBPze1IVa9isW7vV+YLPXa4ZW84xujrha31WyLU86z2Gj1X7n46si8ODh3PRiZ34oXcsayk8tOOT7mDaHloJxkrxM6EJ1kM0Oo9FWlJ5FjvsyrlG8mxyL90GOW9HjaM4xCjGhkg4pKshnE3rHtazrHnRUweBSLFzKs8adptCb3m2FPo29IQ2bpiem2fvp/JaJAwS35T1GCZyfdFtOH/fon6etBOREbl+6ItOmUNqI9AO46MhIfJ70RXCryBpmWtBFuQ+8mOR0OzYalGi63ocD9Kocy732p7HnXCueikU3YihnTiui07B+EfISOiZ55/y+TuBfG/gn2H05Gg+6/17ch98mozFfkWnk8FUVvDsuHtydHZoHx53jppy/xgKx8+FdsI3PoPQADod5xIN0KgP5PmFH/XZqTxrd9RLThvNeWTNkkhXeNppNKGf5Xm+BKIdzkSLiO4KobfBd9B/Du0j78szBdCvlvBOftuz8blooSRKT85Af7mGFY1nCegnesMnDU5V/mUVOtTzdjMVOsjvx3LPXC+pPH8ov/fJzzFo+CW7D0fWQhpBgzVoYyDrcv2jk0j0s7k/O+qMB/r92IkmPT6LaLN5B+eX60eN8zDq1C3IV6cRW/KeI9exW43zs7A5EPtwI+frpQGfs+eDdmGK5xNaprgO5E/4RfrJeps0qcWiET4P5Lmac7GhCbVmB9do+i3YUKEztdQItA3t7kjOI7zamsdW9CnjcST2RO4jpA7B/YkNU15jDR8ErY7wjtdRHQKtKzIvGrINuie6zmSddPB8cSI8h821w9EkaDUOzoSG89PG2I1V1m3qI1mDQjNL5CUNQWOx1aewCMLDSDS0sem2nEsszgloNsf3oX9E98jvYyNzbT866okMtr1wSx3STjIdgnU7EH8B/8ozHfK+RSa7smZieZamKzohCSHrnf480PU1D4Wn4pPJ/YsOggzAAqVX+p5YvwCyCt9C/JwQA7Ghq8WCiH8ESzGPOjw/dP28he90QpvHHXlOWB0et2GNPP1OBB3n8LcjfMccq8Xib6PG5IRrk8eQlVBtRHolv+3N9R4O5HisVrkrlo62I4DNEB8XekO+O4IsR/jXhux1IW/CT/nXk/UuPkkTPJnFHdJL9CTWWiDWXGn5AzomyeRPLChtXDgRb4T3AdszcHnPHdCtnuiz3IBWafG8bU+P4QH0Ej0GrQaOfh+6WWkVdsCLgPZVvBDR07hmX+47sGgvcR0M7xZ/G5+L9ZfPQ7cjz4bn7TRgH6lDEtEDoidOZN2eiB4cG/3RtDL9EdK/pP4Q/VU3+iNwW92e0YX0UVO1B4HoK/iTcu6mrAX63LHoF/n90OjOdGx1R9SH4tUcwXam5vxWqxMb/Rk7oheoPyPKM79vY/2p/ozncn75PTyeOKGPLLQG7akvxXcQ/ZlEozF4kcozyxqPcey0YFdT2i34D/BzYW8Qk1ikefpFaBcqD6Eb1G/Hfcl78G/JS8ocPLFjyLs8A2RO7GO2Fvxw0psiZhD/1uoOIa9ftvGo4mDK8bzqTSW4U7nzudE2ok3whE1QBB6A3G0ICtndRjO34LAqatV7Fiy93KkjkujiCfj9tO2fNvrQFCLtfVhmF38R/cI2PYZuYywU6dui9eXOu9CimdcwpxWh1xFg1YivCGmD1o0t0e4pKB0xigghjSJ9IrkpvJYBPAmP0QlW70hXuK6EGNIq2gkrnL62WCPRuB367ZkH4dIjEMmWZ/P0O90Y3DHehmM8Cay4Oc8n3ppoEl1lI3CnnxjfXT6TZ6XHQ8/D3lLjzk8aV82w8NxcWIwWvArhAzwnkcQ5LJzwRTy3YEaLAhqMmvh8TqmipPeE/k1Ea1ZEjRrCUqfyOQZUwvPCc3B1tCQ6azG+gEUXj7YDjT0GzUXSRA5G8HLbwrdArQy0bwpt2KPn0GIME4rVatshrHoHKxlRauxIDGLT+sq5ZOUhRpCVBc8TniI8i3aK84snIysthNVKGCnTs5EV1IT1hlVpw3tyNS7p8bXcOyworNAJPEloFl3Z0AwD0Vhteh6i2YNIaNkdBbCaNn9PPsawStCc8nt4PtBQohEbJ+qR2L1E5NumhQYfhEaQA8RiGusgbqo79GBxHWrOgUYWXXgnsp5SrIMQsmNFnRtYZ4uxkqy3cERaI6o21v6LesCg+1DORw01UItCOYAFpBx44dHL8tQRC35ymltw8GsW81/wZmzTKk+gsWAVAlgqhxE1PMPOYIbPo6M2MjSQZZHBMbxDeFrw+OciN/JbocFRmxG6xIYuPa4t7k1lvc0x9+b+NGo6Ek9iCE9mPNDoCnJOS+q0OtAVY5HZHiI349X1Z7RMpyb6YhYkdkk/xqZYz4aHI/w+AN/hieGexRu7iaNUo7QziTDVMn0J1bNr05MO4WWKrJyCT5MevFbxpikj4m2KNzeCxxMaz0c8fvH4EDPDGoq3NTMev0R8PeEprDG8xRDyJDIEjwiyzygthT6CVyc8wW9TWDO5nhVNtqdplNOUa0T0ZjMxa1BeB3OukQ69YLGWIXIAfEa5D3goKfMD0JsjsVqyroSvYp0H0LvIRSCisiSGj3Hfqj8QHfTM+dvwkHh+iXwcWfN4LnmN5wp5PqxBel5p3wk0EpDfHyCaYHaK64w06IvO6M2pw/B7Ri+iT0RGkE3S6w0QOVnwMFp6fdyfp55v34dFpZ1IJcqQSKs7Ei/tiDz0hc/goR+m0UkL3qHwUOjPyCNingV6DWtFnmUkdpB87FviRc/J/xHtnKXR0wD2D+eQyL0tsiprKB3j/vl8uobqLj9HNgwe6Wgsay6mjqJd7YDe8KR5f044inB/ItfiPXXoVQ1UTyEygueCyAd6UiNz4+WLzlq8v3ZC+jaUf+b+hN5Nc38hPHXen+ou3l9CncD7w5B7vT9ETwv3l4YNswawLkfUcwtroM01IL8VOQ7dbA2IRyiyCE+rnkLmcW2TAbVoD+y2T18kPRce9LkOgobJMjQlKrXhUSKyPNjK4/EWPR5obngiIX0u8URixmHiieD9Q81xyIoOkUmT98V/RUxD66dW/BRWElqeOVlbc0SIP2DNAk9zlvBYEIMhtyJxEHy39Dz3tdXvbmdxjBw3U3oTo6sYUgNvJIJ/n6g3EjT4u4QrCn60rFT5jk3viN+BRxUbj+Uqxsph3uxIVhk9Glj+nqPXAKcCzVQewm+lByi/l8/xvF3+xoVvzdhS4m1IKDw55reUfj+UvYsz6qdqa9spMhKImOBvRpp7tOmBd6n7mbkFV/Q7X8Rv5HdSocqc66pTn3NNiwfeEr8R2TW1j+KvcX0EEunKGqNXLfpd/TLh1CSONLKU70Yn5thBhJMdgzsRfFccc931bT2Gz1X39Bj+Rq94v9FL8+836Gs6kOlIueKQK2KTeK0GfA0eQ/+bdQjfqjlj9rsBP2rg0vZSZ2iOV2xtTr8tIsdOBG5bzVmeuej0snw1pDbVnYE21tzc5GRPNHLpafaOkUmQcIXIvUYpfZ0Zc9YjRPADI1GwX1jzTegNK1rIDZtjh/p7FOR5ZeoGzTfb6mswD20jw5S9H+mOCr5vM/JT3yQ7Rm7bbpnMC+wJ9FOL0bDYfq7kL8JHZORENnBfyFYiLtCMsOiuiHIlz4tVDr8F9GBcojyT34+aOc22sb9x2sZq809y+xsj9pm2+GzITo/ht0NWxC7FWZYa96kZCNKmx+gQWTbN9uI9aIXBwMgWdDr9GdhC+qGjK+jTmXlfZCxQ2RN/L4pnpL3ZKXK4q6S2OPO3bNoGZrFjm/4b76mf6udt3rv+PvY0C47ngMZjfEA+YN9CbGsSkRdh9qy4X5NNnX0//9S97QsNz5OD9Orzl+8XTlwLbiO7f/dlcnl30rh05t/7oN3t5Nul0K/ndpP+bffxqhl9v/xUQxYk+930ypmMRffUglF9GHw+SXpnfnp+W0suO9bw8uzI6t9OvL9hsw51ByBAZupwPJR1cxd8OhG91Zxentnfjs98uS/chw1d9tznyaXbf+7z78HIq179G02Cz7qb2bq9Sc7PerVgGG1en+nkUJ5J7vXose+cf+/fWrXgTp/lUuTs/FS+LzpC6O38fco1b/edeHD9af5wefutfPH5y+R8ZA0D65l9rBXaCP1uLu+ib6I7Jn836t+4FiSGMefH7u2goO1A4s9gcPGp+3Du3Fj/nIplAt+HB74cI7MhfncwuMaurMSsV7eTyZX15bu+pi6zjkWeW9TlkJNg+OPPEK49Q9+Jbi4/xffh6Qy64FEstK33P/ved8/vKGsqZ2N5zpvjM3t6eWg/XjonE9GF06tPR0P5/Qu0wbPPXpIZC/Jqfnd76X6ZyvkNP+fyfbnHwxkysLOXznN++MJ5xAM5Hg2+iRw/QM6eeLb4vHtzcPWpNoXeN5//f9eHcv5/b0YiUyl28Dv2l+Yz55heJtwN/9xNz08PvMtPk8eL9OFeVuKtcH30xF7T95eo3ne7wxekHZH7S084vTj9wbtDTCT27sU7XKTdc3cp+q//TbPKcqefRWOfzbGWHiVmcP9GvC3f653a04u7gcha1+sJD/tuKD5Q7fTkUCsN+vm3Ls9m08vb2vicmsvbfoUgmxe33bbVo2U6dvo482HtRfkX/j4va514Hh56juie2d8/Sm3sGexCFpB1cI5H4b7kYfghD1vLQyp+16nn/Zw8tBZW3vb3dPU5eNl2NQKxMc9Lx9Wnl2wgckdLz/YEL7qfur32vyffL1/J0eqPcXQ0WOTmU2f9V+LiFyyRREPuy095eTb+UYmzJG7Y4h4X6ffsfY7quM8p6n3+Pj1Ie87RN3pCo/4sShDLZxb0wTsWDl5/AiejVM7zTzxuP2dlH3/AQzN7vxIRaKy7ILMv8EI4/LzEIQMt2mGYexfbU3sc7kgakMuNhl66L4kIPiTiByQiRKz/2Dr8CYkYjZ/Q9i/JyEmyRezhvl6/Yr+muSjtu4ifWsedL0eLfHjmrC/6nO3HKPlRynuyFuNt1uKLvB94YlvTH7etZndno219XbwiV3l1vBI1guQjXvnz45WfsA2jYOfxSihe5i+PV8SuH4+C/3i88i7kYdTzo+QdxiusGXnR3/r3ZPySrEWN5vx41HtR1n5MM65alJ+lP6q4xDfL7+7NbbrXaowfw9fb9N3xPY224/uHpfywlAuacfeZPaHfqWf9UkuJWhP3eNT8sJQ7k4dXx5/ypBKjnf6ExvzZ+PPFe2p70Sj+8XtarDt4MX8Yzlqd3kNWzbCdnOkub3QQvrRjw0gqnIeN4grspMw7HLkvNjy4Pz+b3F18bteCke6RBZ/HYh9Rr9pGd4l2N652O6JeIJXIdVj3tDMDe7M/sl4Wd6tXniPLsMyPsYt36s1ajfght8zYI9zMa/MO9+AGaFHF/+1i+JlvO6XqMvKD63mlyjrwRs0rZadfHoJmlXaC67AFPNpHg++WDb6J54ado8n14dIWMxxbK3I2N/iy6amD5haPBVqtQy/ZUYOvJ4GIUWtjqtuouVC6hGaXztVJmKI0WRta2HTWYHm6lll2mylKaLTJIXS1AfALmuJQtpiEQ22jRGoEZXksWxxpExzLFlmmMkZZHhq/LG0I0yY9lmWiVBqlSqMbU/7XTlkWh/voyvXZIHGOpgcUq7nmmijRt9g8lpUqoaGAZSVjNlCw2ZDP90V+y7Irm+WrLK+JUzQMoazDNESkKMdjuW8D5VF1lAOnLB8nb5poPPZYjmub5+mgCQ7ltCgLZzOjlzUkh1ommZrylxQlT1r+EqLs0O4iNYpnGM/SkKWxaBoZs3mKZV7ynawIj80tozFKUh22NCTm/Fr+NWfpOxt5mlrmI+dGay2eN7IfIjRKyXsJy1nZeMVyVUtLFFE6FJDebHpBgVyjjuYUi3zVRj7ct4uS6Uib61DiiFIrtgMIbVCijobGORq5wo42rWlDM9oUmpQrlLmF2lg6yMqQu0I3NJpEppSX5bosz43R9uCzlDhreEXZHMpGRrwnFw2xIcvNb54u3FuX/k4vk/45GzdGbGXyWECEAq4RWkl7aMpIUHAvzhEaSVC85fDOWRDas7RRuY6n8dkyfohCd9x9X1v17bYWWHditL3NtZUQxV9jDw0E8nl+/TM0rbBYFgVouMaYXEQRG5oYWIg7msQsHG8EMxaOsyHgCwrHtVURheEjFCw1QTmbbXIdtqThPGxACFMUQ9fZ0gVJb3VQvglqo+29iZXpaEvmWCWSbZrkJlojwaU5i9nYZD1gaz2aNXAvaMrWZnAUWPfArVTbibDa9Dxoj9emDhRZodhNpIiFb1h9IdveIxYBo0i5mWp7J+EFUHLqsIgLTSg851HR1gbJ07alOR0IFFsetbX0FUXVh3UU37qUyAaeOcAqS7ByKNEpWlr5jKm2y/ZVA+FzNP80UMTLElJZEScBaIb2NxaMozEeUp/2ha49bce12Uwh/AT/WYBssZQVjTzdtk1apijahNbte6JZQB9XeG1axm4CLUZs8rfYENbzPlWsJ8HHsEh2LLQ3ovDZBjQBiua6hFwQ+thocOizlTfWwkS2TYcs0Byz8UPu36PcsjkIK1OBAEI0TrC5qInGCBa7yrGtTRN8H5ok00h+RO0U6jGLp9rZd3y2kYkWi7UlkQ05RWsj+BQqf9nizeJStFiTJ8ed6KwVz+ZocmiRT7IW2Q4ua5bNPfI8vD+RLzR9oVDO3AvbPNnY8TI9T08ZftntvOEk40UTVkjlkw0xaI1k26xleCxW7iZusUA9QLMZClCxZk2zSztlQ1WDjV9s3WYbOFpDRwAeEI2LJg0W0vb8vJmNvGC7oKOt1iiW7wuvQm1IwHpKCaWBBgnTlNROaInZNIVWfbY7p7RkI1ivAMXGrgIlHKHYGPoHxbkWny8d2JBRoa9YahQS1xUaQHQT128aux00aVFPXZ1okSV1kmPap4XH/Xl2f2LtHd5Pt40ABxaJkBm6ZqBDcL9NrAcX8oIWeuGvrU06fXooQmNX9TUK6mk18ZnL5peJWGqCPKDMe4DGEOq0FtdREzABBtokFr03YLEp1mILBcwNA2tCXd1mSythN1I01bVt0/Lr0SMCLQB+Qf7159p6f4BSdt/AMzjamnoVR2yj78v1eolcj7ALqhvHgA6ZkXZo+KKlawN2xInoVdWt00ZP7ln0D5sEoYvoqaDYN5TnYmF4yIJYejJoWEtZSA59xTVF74lrTKEG2NYJGAEUpk8JjoEGLvFEaVUTwiPAhszVa6DVN3Iluk2uxQLntI5roYkyVY+PnoRF8I1RpjdDlx4h22X7gGDxWkZvGjmfsZUdhdSj8bzTaHpsfoPt6sSOsT827RI8CBYcq55vcfP86KTVic17bDOFvpPfCM1TeCcijynXJ+0Im7y6KMCnJ2Sbc6dqS9kYZOShbdZd01Y9D3kKFJqjmzUbQibhmYWw9w7lUvSWNsEMZtpiELh83i30+rGT+/Gf+vDxk0tnisTO6OrfL8n5mW+1ndq3SxcAPecPl59mG8uMs98d/3v1cP755N6E2csJIZQFf/7ycH04MCXEQslTSN7zqSuW5j/zOdNkr0x9relea4vk/bapL1tk7NelvjZ6o7vYbJc1t11CaeEOvG3LbuS5nr96L9lyU2L9+ePm82m2/aVeKrWStwK6mQH0L+RdHK9kORvyLk7J2gHmbG0LKPSPtMuWaZeh50ej801pFyd8Nu0SoirYZ9f/6S7TLvWPtMtH2uU/m3apf6RdPtIuf37a5fAj7bLTtMvhctolymlKFE9Z68ClwNpjGJ2qZoNGC7GGHOJ5pKS7o6EukcXm7KVGD+mIyHWwOj7xC5heAAoU16qtVhlyGtt6fliJHtIz2fUvOhrCspdfsQXaMyLB0foaLZ6FjCPsaQIbok18G2BLAJlPNShQwNpEQZUQWGVjBGwIXYOR4pfQ+raIKYSQuD7PO/KJXQA8mOZM1ywRwJiCYsqDaanxnHqpQKbKkKMSTf0AlTVwTWrqRFEg+Xmq3kWo+DVMDaEnGnpLaNltayoqbXvdkdg2WFTicIYp9Tb7rGlZLMUOknXbYA8wcBuIbnbMPmimndhzKb+zFc+oPSBKQGeQdiGHimjoxqfZMS0uenGBKEtc0RA99kQkAB4nkLsG8pnKdyieiEkRWK0MXUB4anrAFQeIuKdNp3WUIVQ1HVrBlPrN0vWH8/YtIPDxHCnoRR01J/6I6DraL6JtjiEzCP1F10I3E4GQmCTs+0/7RFYwcnSR70KPz8/OU9aIfY4kkK19RQi2oWP49uJs/m1jaiD73ald+Mbre/oI5ZOLfwf3JuzHU7Kr+KWuCazZ5+u92KXx6kB53XveYeeFaJM9dV68WBkoUpRVBn5RHLzY2Ul1oNjQeKvaxIU7QG1iY6urP7xYU7TQ0/iDz+8+2Xmy+I4rcWXq7SwBULbckr+Mp+6XrbUEgF3dFP3vIPTfYvTWR+i/dcXFPPrEEH+h6Kj5CLhwwqx/Pn84//fq8NIlsIKoOoWelVDaPR4BmC/wjkfIqh4lF2fN6cWnyfj8U/eRauOuO1GAhsk3qBhmNw3gwwYHZt46XMtdGgcmCw2bmpunsSRglgY1jbqBcQxTBbcj0JM6DxNAAmuobyCVJdRHeM18tKYCUuyzAMDQhPoEbWn7JqQX402YSThKhGBXcCuEudzfmavD2oYTj4BDAbPh1PGaIcA5AExocw+JgUA4B/CG5uqbBMpgcCG/VcATOhqepgKiQMJpF6BZdAABIqOhvgUgFE0FhF4BXAdgGdxTLzXPM5egxWIgBceKTu4NHLlsnyVtqbMrQSUhY00qAOEuAL1wrdDpINhBaoEQ1wi2uOcAsC4AQsFRhoOQEN6aAGgKNYp9EDi8ev7mPDKpAD5nilCeqQ1XAkoT6gfYj9MwmXsP2O+S1wC7GWX7aYGroIGALuL+3EzB9nDffYtOIu+FcLx0MBSGiaE+nEQN9RXw3Gfoj2BA5crh54caWIYEMSPIHgNCBBiR7g8lBCsitBMC5CbTJgp3C6ioMNF7uoqLdMaTzvuq7NsFYJsJ9An4iMCqlxCmiUBkgMUn9LM67wRIpPOuaRECVzLQd0KFOZ/ReR8aaNVOP4O9Vud9BGeMzvucgTud9/z6F0v7t3SaY90bSpsSEHL/B3CnPoNrgl8igOReKuFydS81SBQ8HnRjoG8z6CbvALZDcDGhI4JayJcEYRMN2lqEV8a6JJQWZNvmfhScQvKy5yiPCJJJuGEFzMe1AJnMcQUIEBC40omncyxON9eansc2+5xJywT63B82a88E+laRCAhtAxs/J/jnqIcAat7V55wzkCNoEPYMm4muLwI44Twzhf7F3h7+BcAm5IxjIOa6r2dAGJlGappn5D45g0qAoeF5SVPsC3LfT9aDBD2RQiJbZk/UZ5o0PYo1KQSdgqSH8BMgSkzmnMTUrSMEANw/dIWftoLl3cRKHwb/WWCV5LLAPUk971NgVC2RpWXnyQBRIVhLu7GCCh3ECp9GuORUwYyQ2CFfcK05YOiZCGKaD3DSzRmhzhIFyDzuIHHQ9BSWGWm3pgFWir2ugc5FMqjbyPQR0rzNeWiOSf/8OybQT/NASIMbDchs8okgWSLDlNO+gUYmT6ah3fM7DYKYeeTTIQD/mgjynCyYxf1BvrqQq0Mm93AvgIeG/G9Dz8wd9HLoea7VniX8UVj/EffnEyYFoXcJ/trzQlv3hSWQwxiAGUE1NegCGJQfZSldhbfzaf+o3/G6p6lQBJ2NvsgTEyaJAoPx+64CTSJpNMjWvpwPugrw7eGMctdhHYQmIpiaB5haPTFJQov3xzqItoFpho1BMk/sN8BkFfTQN8CSqd4PwRETDKdQ0MB6QvBX1p2gTiNkcqtF8NjBIE9G8HPRF+nYylLQlMeUNtnUaUxi1WXiXzRnlkKYI8XctiKTgpbz67qLBxbqH+Rznl/HICiIYVfpyX1vWee4H5vAm9CpTCkHBtCYesUlNDrg7ElPjCkIEyZJR2KfWPcRQG8wdQ+wVtCTCUwkPwgKSdlPVe9B/gjzr7D5BHruGX8GOpTbIqyD0PO14cdospVAdtRTdmgNLALnsZbCbFfo9zmqgHaW/hVlOSWgWV5zELpZHYL4AWlWY6H1AqjlQeKagJOuguuSTzPqAern0NxXoL6SJk181inYClMOCPOuQrSrLe4YOWyiPRM2FfqaYG9zfU6Mn2haHBVE6O7M/sNfZCIbPiN0CkfpiHz7+gxInoRGz/cc8yw29Uu3rUmlCZMoCQv0QUdCQNJGumE8mxOOfNR0uI4AqsZtIchlLyXoMhNyoXuqvt+cIIWEcuTWhqNw9dD3PSZddN233T8EbC29OgtemT5Y1Zcn2zQXTk4mZ80j61IC0Of38jG8YUarIE86yCHJDj03bPQfXtOkt1XtxyYf8tWNmyJj9ratYcUddLZsDbt4EeYj2BoGYf35FxsVn6mSMM0rO6qN8GulWnU5NeJjwPl6dqSW51CW21J2Ux6xxbTsjxzJdjmSaOiJTZuslkckxyOJG243lke4Wvc3EL/Pc7iB1ajvIkcifmmxwopxTjn0sPEnxhl4stow5iIGOtYIGzWwOxxh1eMmKP2l0TiLq13GS4xzOJBBztcNxMf1mV9J6vpssNcSt2ipRUhYYGz+EioYsd+oz1i02FRGTMFY0lK4Y/oztm4OII6+0VFTSGBzW99scthtbqZg7BNzANxIC/LrGthjbqDwOL/fetLB83AzkDGhiQc4biRVv/9EYw2JQ5/2sevWss3o2QWALuKzAfxO5IoUjLdBH8ph/JdvDMX5hrfJRXEElsZjkxMFy8183Xru60ZZzS83Qdt4fpsxYYoN/pglJRyZBRBi0B41x9zYGtPXZT6mcWMGIZjaUfpIY7+rNc0oRdD6Vs1tWaiTxcYXgWPNZjFqsOnL0lcOzOYyfWX5HGU18KFCT8szsprkwIzuiwead0CdtRYdkF+kE4bXtY2/SV9Q625NXsWUyFgZCC62HFBS0jKb8hoDZD7o5ERHSdEHtTUWhm8YI5abU8Y7ZpNezkc+sHSkzRIXDtRLjC866mW+qBnVhKIFMxaHG1uaLzttENzW+LDMpwzoy7OoAD5tPNDxarGOSLLVH440j6F5BOYdAgJYo6ABcaLmnVCowfFskB1HN8Lb8w5zBG3xifEv+MpxXQ79UcRW9Fm1/Eh920BzKABu4jgm1Gr3WY7CciGsNeZIMFYx0M1V5M1GJs8AnjAu4NgbR2vzGZ/MNb/aNoMEsAnZNHk50p61vfK5rzHfwQlHV3Fwy0ALC1CjPtTiAQzCMOvJMiVF3tNj7Qo9+JEv+8iX/bn5stz2fOTLdpIvW7PlTjGqMeMx1mw4Yykn7WtXh1Q1eP3FIja/KGJrrxSxcV/CM6WTLMpDCaMZ05jqkC0dP9niMB3sO4zpH7EQjAOwmH9LAi18s3TsGmQNeqdnyko53CrlIACOFcXgK9oEC74aRwmeaj4MexJdM2AGvTc6hChAflCHuqYsypubfpMTM6Y3zYD6MdZSfALtc2JpLQdFJfQ5WPKJgjcO/ZHfMx/k0D7kw5fQqxNyVDAKicRnUBsLP0DHr2aj/XSIAGnQ1KFa7GtikQnyOpbKXZO9OhxJd0qfQvh7EOsINsjtgOPouK9D2YMv0OT3dMRfPDNj29DPQHvOtaM9MgmHIJnCFx6zgKWJkYu+6qS2xwE9PEZukHtGKI6ZGflkocwpZJbFXlokQ1uha0GH7GiZrZ3JCdeFrYWasi5RdmtGvLZVn40wkrPJvinoWebXaF97KCKjnTpmoVc8y32YNNenOn6O+jSemRGFWLsebI76BmPjszTnzA3rAJeZKS0WG/AlKzZ0UT6b6dRA16/4BtA9/NzXvb+QuVGsXZWt3M4t6NxwbnTunHut3OPjMDBL5IvDiUMMX/jJ3qAX46h8nxn6vW+14pljRr+cRNzjjvm+iYe8aDyb00dNuR+IYlJHR6/CJ4rhhydZXGH2Ruc6qpA9YHP6XLRNsH+BlRUJGrmwWASofWYYWmKKEQPIha/FjSirZr9Wor15vSz2cTmUbqgjYAIdQoIxMbRTHCNjcwAaYD4wxkXXGn0fjbF08JMOuQi0r1J/jxguDWz+BuX+up/thhzRGpu+REO/7p/Sk7UNONZaTL4LQCQUX+4EDGnNU5bVtLuOMJGIX9kR1mg/1xG2E3CswN66Oyy/m627w7YCZe75K0CWu6TYaLseupe62KCXnsl8v5iXX/XLlsv6Xgv9iEqa7aFA12zDEznkPfTVVSvLiWPXX8sa+/bmprpynmJ+Vda48nLWmKlfJHEbtjz27GY4vT59uOjj09nXC2R7b6a3E/Pxt5uLq/uZSfEOJhffvpnjJ1LJd/d3yER/G19P+zfmq9+mX+/H12eGBkK/g//d300Xks/l8oFzdJR/c+GTZhn/wy+Gk8mm93fAtYprLXMtK3FcbIW0rU2ZfrtUre6AadU1Hl1fDa5Pzcv7r9Ob+8H93cWkWbx7sMzF0ePtw+kwxddBleInx/fI3+t3rqfTxHzp4nF6v8zo6/lw+u/CcQ/MK/nmVWNueMkXSfbiTp7038UXC7/Cy+JnfLX0u3+uvw6FVNdfcym5+Dqtf/1KacvECO8dDUG54jvmEfREV9kv+pDNYV/fND+x9dXCD9YEzOJ/a0Lq5qIFTiwJ1rf7x69981a2ZOW2Btd5gay7tQR+vZ5cTIffly/wKlGqfewa7ayydhQ/SpTgsIr2083k4uzq/grvd4JHZHs3V9cGvtirR42E0DzoSVQeY1TnI9p5ev9+eRDrfC/XGJ+fefTHrs5iNi5ci+8lVgw+QNYIsT4OUCLWlXGA8zwDkjZ1TGGje2bGLzrR6PwMGeiQw9OjM46+i7HD8szAR4yyWAJebHfCon2X8W6PA1MlDrQxDDe0e3Jnvdkp9osmPQ4hPEUj4ZP5X+ClrEAv5vnfEDkMDNidYUgu7lr3hLa644+G44+G4/9aw/GC9EvkkjYzyWfdXtTpex2tn5JoL9J9MPHPtUmODX5pVxtY7ahzHrRkvcn7HOyLnJLWLHFfzmOzLmqwRsAC6lm6z4t9mV7KGi82IeLpBrp3PMlqD5nDnynuDwYF9+asR0dTIlYe995Qo8eB9wk4ixXG5lSuovNAVp7bHWEANeqrQuZFWW+XcOgw8aMCHbgqemjgdUdt5xTUZJ0VVi5XQKJ7cbrPxYZo0gj73WguxLWDGXMj2PfGZ6OjM9DwyQGthUb+yPHvIMe/TM8wKeph6+z3kBXocdUir8ch7tj35L6yD5woyjkH07Kpnrk87ueKDJrB2CqjKRqPOQyVmg51AUZzG80eaP+I3aYmh2x2dV8h0WZS8I7NyolqzzZoAo3iE9NJtIBakH6Gk+RxQL2NetSADdzs4cB+46Qn/J8E6FnpjvAaWh97CszRQ5v6tBJsMIadjWcYet4dIaevg7QD7l9BtrG+B3P2alA7hUYTsTl+rrlz1O6SbyIDGH1dZ86Vje60ktBagdlXZS2xxVpf7FOwkRfDjVm7ylparSUxjbG0FOGcTcVYY6MToQGsXaxNyw0dmMuG76HZp2VvRm+udQEEKECjMF7bPD95H9u0Avx92zea0hYrjvwzmpatvB+ng34ePh/rCjT/zUG5ZkA1ZKapQ+ZH2/kVOkjAaFaX3SVHrBzAbrFPShPdDpxomjZ/SCFhnT0dnYoq1DGfFBWiLd2h0swybQh8CWiKnqKmqeb4Ea8nXtD9ObgD+3ygK8lDDoo3oAPAWJN1gL0Wj3tBpvYA+9bUkdwrQh0vBxPPtRaIQ+ZRa2TqX85D3QvDXngzw+zD3pQOnR5dyedt3W9gf01o9sVDU0sEz6Ve1M/Q04F8tbXxGjhzzEuzVtxSYGzse7Q9NrF3uE+bWXcL++6y5lLoBJVP7PHW2ccVWTNbvRY0QMdujsk3in2zb+9S5/P++loL3qEOV68GQ6VZOzHg+HTukWOfOFVculjkLFKuuiG9QaFxCrpjn6Fvs1ZCPotgj0TXt1i308RYd097lzgaPglNfYx4ca4BrTCYfPDgDGZfo3uSY/ZBCnWdyz0MLJVG1jNjfx0Ye1qvbIu32OmZXq6ek3lxEfc6YVMILOCGAJMYwR4FtO3YwwzxW+IVYh+OmH1aWzTK998deHgRbRr3gtTrTOtpp8G6GdTth9yz1V5AS3UNvWpcHzXr83x/C9iCCrSRmv4zyhhsGL1pYBbyX3iJ8KqhB1Vnc52PcC0CEMx4LfaesYcw0R4D9CgEqbnXNGKNO/AG4QWiUX1g6qRUzoE3KGspMXiDc91nA9CK2fsbqu4B3iC1gO7dseZd9+7GGQahR4CAbht4ja7WQ2AvsG8pAEsTenGu+3D0atPI6DX0CBhMvtTIg6lBaRq9Vqc/oft02H9l7RFk0udaTK+4vyprVOxErPvPGBZuABd0D6zvnYqvFWe1L7Bvqkt+RBN9QN58QN78cZA361mLKK/gZUeOG2pWBFpEogtmQuxTeFJ2D6iZ4rltZc/zvEu+ishhuQMnzPMu+RXkagdn/Az1SaPoh64QL11Bzu6IHxOIpmG0hUrKP6T7JLl0+68bVbk59/bSuE1UjsyOR+2tRpJsyIu9tDum+3bwSx6jQx3cgp2t4HO+2y4atb+SwRzb2NuTu/fl8ynqAuQOhRMZR5azmuhfPx5RS7usZ8Ko2y121jdn+Z4ZTGIdjwaiG834GQW9eHno6U3vdj55CaIjGr08rGrbAdURhrS8cpgmwIK2Gfe2C1TZgg+/G6rsi6PS13Jfu0CUReSwVc3ASny4HaKs98LV++6WQ4Ofe/YfGRr80mhC+MIfsvohqzuU1afGg75aVsX7b3vb1NpUf5BeuxkMLZGR2N1tK1mKtbCrMZ6ykv3jLcGqVjLbR9tX9Lw8QLzvtTr9x/D0xQHir+4ZXniK36xn+HKL0W9LErLN2MyXB7126rOfk8+d9Aun/a1H/K5ZCswpeblf+GndkmzhzbsSgT6Gww/f7/3Z0+A/bE8/fL8PWf1dZPXD9/vw/T58v537fjvAivnw/Z71/aaYT/aCbtnGonaajwszg3aRj15boz8NpGxFjfZjtN1Eo71oyYHXOvW2xgx6Wku+2pqkGNW9qC9f02O2e20ufLKOF3Laby4pXqsxftxy9tVzkvL8XbwYS0XEqPP8Lez+LsDZN1Ya/xng7OvW8q3B2ddrKd8SnP2l51+3LT8vtUCyax161oe3+jbe6m7sfzhC7a6XbOEDfETKu9Ow8W52SgY/F6/uKFpGhVbvJ6Q5nO9LBwHhbSF/8BGF7WYlLkUiP6/9fiYW2jYSu3yxZx1VWB+5nf+qtXxFbseNRuFj9KFV/oNa5aX8zs9plR+blfw6rBAiXf149iOvpXxpdaB6rOmKF5VX6v1gvaGzlZfa6Xsip9bflJg5+qEHRb1lz0Ef8uKQ4JbEmseoEPw8lmtbLvqeUdd6nQ1Pu51Mrqwv3/U1OmpAI9Rps85aKwl/vHbSXn+WzNNsCp3aj61Gb/63kXpidbzZeKyK65Yy/OUcysMued46LoTllKwNU7IyHI7X9PLblvVyM/9GBIRF/I4tAQ+8NaiEJ+APtqDiItrJBtiM7L2twQ/MFf65H8qFCyZVl2E7PM8v+V6t+M9fPqPiNpiTFFx48byuZy2fSMEe1k5EduZU2JLD9geHf4TDbqnsW/l/9s8xuOqtnLayRwY7Hwz+MQZbCyu49nMMXgX08cq1ZzXDLvntbsHvyWT48O36ZeylNfiUFRCko6MlOdj0yQ6MYdVaoaZTLrnlSs2xzN+aUayUrLJbdcp2zS87NTszm4sC9MRXXmcuvTekvWUdHpbLm1en+eTr/fTCQOnY/o4ck5oskGXHxC3LW37Zdqv6t+6j/Cp2bDHkc4fsqBxtRhDLP9kF+df9Qs+ulVzLc/yK+VtHfPtV9N9igMhvpooqZXHClyH2PM8qedX3qI22QOL7/ckvVCtVKwvkfz/qp/rn0d8DduSa/7tkCN7jStgCk+5340S1XLKsJzlR8UtV+x1ywt4mofCbcWLV10cIsUB7u/JuiL9NrP+b+UPlasl3l+kvWsj336M/lKG6/kH0r1og1JNqqCrcqS2w4t1w4g8Mkh3rGYPguX6ptmCa7fK7YcVbxsxvxArbF7F/khXUTwuscN8NK94yXn4rN8n9PVfFu5q9+VuBJkdDzw070YZRm6ETOs+N2hw7UeIRwhMl6AAKeS1YcqvRWyh0CPLR6sVoF44IVxBXjFhKOeISEGgWR6hzhEqM0S5JSJg4jlOzOEI9H2/dTzjaB5+n8QywxwZg+cSM2NRxiRyx2Z5nIzg5CiptYhQLxkImkUJU5uPpWgRIAihRW8fJcOxjPDPj6WyMcwNI7DHHcpoxmRxfxxGeGJk5y0cijvLrmrEyoY46Gi3cb+NLKM9jcZwMRw8OCLxDSDCMmsfxUVvHw3eeATHyotHK1v0ohxiydYAmBj0CyikkjJQOJOQA9cSA8CVRMTzPIxwUB6QT3A1Di9JIoZp1wFUOItjLILrMQCtQQIdsYlhgRDiqEKCENgeRpxioRbjjpNX5EkcGOooD5dNxNlCeAKwtDrPkkM1EB84DBJMwzo6CEMr5ORCQYHIWOKMDFdtzhRjWAWQEycSw9tFgoNDSoZGOczOskKCdCnmW3mQDDBXEbqiDwhSk7RzAomY4ZJtAmCKFloEBm3H4KAHpAN7T882AP6ulg7MIC90CWKoO0FNY6EMFFTRDOC0F8OPAPzyHR+BCAiYCyiuK84Hv3aYBpyJ0NehlcwCigskSokxBaA+CsANgub5nBnEqkCEG3p9yuGWqwIWhggF3AlltHEqP4YNmyGZbB4pyyNqRGcI54EBYDH0kcC3g5ESeVAaOQgytjBpmeKXwVQEiAU/WTBVarZmGGewa+ELYNWiGPiDpAFLsEYSXsHIYLh9YBMxrAAgZUGQhBm2ZIZsc6KgQZwB/VeBjj0M49fqAWJvrkE0Mzuq72eAsPl/al88J1uljGLDC1QHIkUMKMTwxMYMIRQM0zXrCYFQdZPv04Lzeamm7WwzOA7Rd7CtYM0CeAD9ODeFEgCxN8Yw9R4EYB4TqwzMBxk6HuBLKLwkBskjeEVbPywcVkyYxAB9TAut2Yp5fgXpBw+z6D5GuudDlvSgAI+DvHF1DHMo3M4CGoqXq2WAyAoHqYDPeG4FMOTRvNE47CriccFCvDq90CXY8ysAOmwCIJGCmGfA1U7pzeCvh9wj7rfwlUGVEeHhA7MWJgY9PIgUO5TBc1Un4vJ/rNA54U3j5OXVaCnBgvG6D1qkBcARwLoCUZZ3fnEFnd5QX8zDlUFeXEP94nhEGLPcJY06N38GQXw6KhSUQ3da3dfjeyYlaigByRk3eakQxwJ25DtOrk/wY+gLD6sBXDi3DGujTSgO4EkPxYJkMaPRcnsuPzDA9M6xM1tDViQ7xI6i0q2DUPY8A0Kd6TNBT/d3MDE2cRulJiCGDco4U9FLdBJ4Aeh3rIrAJlE0Acug+AhInBOJMAd4ceApIGwwKOXpygJyzUt5mYPx4XhuAqRiGpkN7mwDNnWPImtAY0L2EWNThpACRHjtdBYL2CEFJywprXQdsoqMDsjmgEACsBOpEWSEHG3LYX9Mj8K7C2/sELCftyUt6JuY7htb9JAMTpb4CjD+L9SGvoUI1cvBw7GaD6SIOPI7OWvFsDvq0CLlIXtOz4HcILo37E/px/cj6MvcSAuIY9uTpgXxOuDqmLB/osDREN+U6I0Qm6DxQm0mQ8YEZnE1oZALzcrge1geA5gncG1PeW7SZPQ49JkDqUTMb0jkn8DDXLzw4wICKHu22DcwyYP8pbw5BVrkm4ckBtjnQwbjQ7YnqXR1wjREOGHBrBnICxJh2hEDYHAiogLTgBcB2Q5frs3GVDeb2qe8V8Bu2NNVB4bBLPfVhMNKBwMYcpOhxuC2Ai83Q6HACYGPcK8DuOaQzpY/QIJC3T5DyDgDbAwzvdcz4BIdjFrSdzO4qtOYUYM75WiSwcz/XCRg1oXLKwdZTPKMO14Tt6WG8COQW62wakb6UPSvqZkM42zqCgsM8r07MkE7a5BwUl3q6qfJLyOx6osdXYUtHSFitI9XHLL/loGGC6HItQT45fNMaiMweqA3lWsA1dMh3NIKt4DiUOUGuISe8Nu3NjDoUXjvXGQDMKaMYSjBXoOQmgcop+yMA1sM/pR7EeArbgPDrEFgDjdrSgY9z8/wchEzYUsLHwkYOOOhYnycy7ykIuw6G7SUGYF+HymfwqPkQToK665BOHY2hQzoZHVC2csjaBfhUDoA1ujQxINkefWIMP+aweICt7xAMeCEC8ETCxpn3D6voRo1JCMuFWCnuwDNlfOSacZ2BWN0Qo26h8TkwA17AiFYV2t8OO4Crx6oI8uEhOmqXdzfXMdscl50wlmLbNrShkQyRcoyVp8TjXPACGfNcxTqyVazppEmIZpEawHE7ZgRnIBbqxIzZtDli3MRTBJ3lKM+rOKJlDADpbVYbILx7OqaTUhnQKpCS9EDw+x7jQFlFNqHDMaCEIzk54tM2UOQZ/S7yMvcs8r1DRNy/rX1Fs8IG2NXbi7P5t41jO7PfndrF2KP1tiGM2Uwu/h3cm5GckHNCs77UjAEr8cp2jRVpGuxqLGM62KZleIuRjJvi3V2MZIzdcPiakYybPP6djWTkgPjt2/TXvK1djFgFDKu7Lx6+j7Gae+XhKJY46Gd5+JZjNTeVXVleyamu5YvdDQli2y557i6SwlsU9HwkhTcmhUXMEIZuSAqLu337TFI47SXRUJPCYaMujsRRcnHWnF58mozPP3UfKdJ33Ykii0++YVGy88oglW8IU8TALs16NiFK7lD4nL7FCV9Ig4Ue3TiESJgppaG/pjVGQM/vEZE/4oQBzkRxOT+DM3HwOeeNy/m6AabzZclffTYmh83kuZDG3qQG5PeYR9XXGVr5hAMmp83MFDgxnLFia3iJFBAmAgRwbDydy9VWN9mGm6uzwUPM72EoGOTXNc5Mqi52sHC/9aSTJZZHmF+SzZ5qM92mYf1JnCXTn049xc6yEuvZ+Ywsk7YSJ8dnavZUZ1AhNQrnDWESpy+M4nz6AtH+G4GF+UMa7iPR3i/mx6R1E0a2nSib76JhFJ7f5jycFNMmNDXMeTedutL+CClSToYQ57ENVxph5IlO+Qs8utYM7cd+t8FZJZjMloV5LnlDl3os5+dECZP6DXQmClPHgZl00J7ppAOkgZASCT1NG1I2HDPtYs5UM9PAfS+bgEF+kU4ITzXlJLKFtFaiM2sgD0hZ9zRdeoSJgZx/xjlILTMhQmRawzVrYGlaXicwhEjhMmTArJcsNYxzmokRcj7ygZP02pyNk282cMqfpoI4PQ9rB6lQ44BruMMwc3aKzRabU5R0Dg1TrEgZwvk9wkwXM8cLqfUm5v/4vHdMr2Dql2GqSQ3XER5BBjzKEeSAaRDOQHG6JpTuNMaQFWz+IBUnfEX6McTcMswnw2YOZyDpJkjPpNipF5CiTpDixKw2TOdjGgtrjRMEMVOK6YOU4Z2mfjkRRFPDccIJJ9zuwIYXZ7v4mIAScrsDqQGdS2don3AS5UScCG5BmBT2COnugU65OGq7TF2nN4Gs/xOzniwzYdF7ev5moQcX+kvX0zVDMxkEE1tSzHQL9Zl0ukaKFIumaQPbpNEtDemhu9qYbJlgXljE2UIB0z+6caZbOdF4ls2L8jn1kWlc0BbzvEIJ8WcJUhLY6og7nBeoaaHUbIxNmmY+ILeCsB7l84NY06yi6zAvivMLsJXQ1nlWudyHPiaB8PlSzMRr+0y7Yt5UB2GOXB/6gteL3U4DHbGcZ3USMfSmvnEipnWgb/rzfJ5U2tZtmW5b50VxPfc9nVYC2absYhKJS/0gvI1SpGJA3z6nkwqNXZ2Ucy6fkZ/4DDODMJ/Po17oIEAcMD2MaTItTjDhVo+n6co4jTtmXhTmJWXzorrZPCmkrwaaYklFvhpM38yY6kV6CrRIqSMwp0g3FnGvI6aC5txy4Zw6zBXCtkFfrscZUr7OesR2FFL0fZ3Fhe0lTrnklocTaUrLOhWbgNl9kdk2M1NKOXMp4hYQZy5lOsxlCi0xuo4p0HPVwapz0iwVA50YDrN5Uj1sblqcqJnUva5uvc11Yig3oI1cNR1cS7cX6rgW50WZmYApp4p2dZ6U3mvo6rylkwBbgkxDG71v5HzGeYA6h030D+do6kTaDmZnUb8wtc3poekgn7CjevEom9GIdY80FtLT8htMiIOeEHlMuT5NikfsNNNxnIJqm3OnOsWI86KMPLTNumsaOwV5wv0Kb7Dd0eib7RfOLsSkJYdyKfqZsjMazDQ5we2Un5yos+yPfaThX52GX9XrH2n4jzT8Rxr+v5mG7/Q+0vAfafhdpeGXpOkdpuHz+3tfafjV7Ef3YJdp+J7oFW9rTJ81b2sXaXjRDX3neFQgfL4G92ij//J6ZOnGePYcquoPy9YGNKKfR0eFbtoWu2tBnlDS6j+DDYjsrRztKAVfQVH9cgrel7fWe0q8DXARWevN6/LvWzR0fuTfNxdlJ94sGn35ifw7YgXP3mX+PfzIv791/j35yL9/5N8/8u/vK/8eruTfC13I/IwTMW5HvNdjvo4tBeKvdHXtOy3yADqrhzxFgjg5GkFHBsyrSFR1ohFaj00bLIcfDSjXzL2yxQF5k7aL8mfVI33f0FqeCeueJfJeNvUc+UuWGyMfirh8xNygzbwL86NNkdf2TPUGXuP+NE+PHJnJ03vMw4+YZ0/N/Ho7ZJ6f98O8CiLBCDkR5E8nnCjP68u6Twu90PRyvdDoGb0Qqg0RvSB6gPlO5nF0Pxl6AzIqsftBqC0fiN3l/qjX6rpvwfN1A7l/kdPQ1/NTb7k6sbytEW/apJ7kuu8gqsQ0+cAm/fA8o5gT1TUPBZvF0n62cTC/PKIe1anyDa47n/lb0hNrr+0ijxSZ9STXn7FlBi0T5CfaQLA2A20zOGUbg8VcNmxWg+djvpp5RbZ6tF3VP5E834B5LH2NfLa2W1C/0AZjfZIf3B+Q8+U59sjkqfPGIuab69Rnxg6jhSGJmK+t63ph2T0n06fmvoSP2COgLaVeRP5L6IhnmmnjVZ1r3ZSJixyijQS5Z5aXz3EfIe0XdcEcukCbp7CX0rbQ0NWCHkSeFzKKHDJzdF9kPWe59kHWyuJEZr8gYh6YOtrsL2hriLbQ9J0sxyN6EmXr8BWYv2d+kHl/IPzXnTBFbg0ZjF7KXBnbPvB8vZTrGvlHiQDYeMb9FPE9dN1bz+V5w4+8+U7z5uFH3vwjb/6RN//Im3vCoY+8+UfefGd58+Cd582D95k3X8laiIzusnxdfOCl6UsvZKXD3efNYdGt41FzR3nzDf7LO8ibr8hWZ7C7vLlo+TBZ3HfYUp7eNG9etb1Slv3O0eEqJdtfQCNeRyhzy3ssY98Cje/t0uhrODW/V1596IlfO/nxvPpoINrRc3aYV7c+8upvnVdvf+TVP/LqH3n195VXt1bz6kUehzrGCRuIuXGuXsIoBpAlo3ZCWBdOtSGfqaO0phWQKG3AvviMmgDspLA8yGMkzENJDJ8BN7Gud4Q4OmbeHjnGiMBM+fUvVnJKieZiuN8qupg1uo7QX/PO0JmdOMujz1sFNFKieW1EjIQIAsSKyTMRhibVGmrkQsfK64nm73TvgDlJ6DyrhcgUOZDG2MDf9Bxde02X8TRyTZQFXAs6hUBYhO0BBAdq7OFTM/+U1LPz2KYWPdHcFWrR0TaquWa1I4T2SQ3MEKFWNL/wBVGwp3krPifzEcjxac6ZOV3NYejaBTQOom6RNfyLNcC9DObmtPY6YM6YOoa5z17Wy5DlJ2YmP2HkNCZcU9QcyP0iZxtk8E6+6mfRM1rnLM+NvDyhquwW8wsnMfNpI9g21ni7wk+bNeui25Q+fa+rcDGA80lyWWDduJ53C9tj5uQau0NQsW6sMD0HasM66Ntoaq4Zst0hXxJCqch6b0EvjQDTBLip5oyZAO4ngdY9hWfRfJHLHJzGA15XIZfkGL0TzbnazbHF3Jw5Jv3z75icb/olq2dn/j5UmCibfErJ30TlFPlk5tldns/u+R0cD5GnZt7JJRSPrFn4L9BPuD/IVxdyJbGduZcZctzIVf2ALXcKeKSMx1iz4Yx7H7Sv3bhlYL3yvQDmyrCHojYd/RtiQ31C1xDihxBGHnO+0B2a87XyPbXU5OQIQ9am7pDzzc3ex5wQSMwWIe/ahP6x2GPD/Bv0To/yRB16qnBhtMvYc0lpEyz4aswDsyeAeTX2ROheCmC9sOcm92NnMGewI+256XGAz6A5X+S4uecXwifQ3hrmhLG3IzqBvTV8/qSre0jy+0h+D16EvkIk9U1/SOhrPjeCz6A2VvfzLM2BdmOuR8IZgQbNuR7HtNXYX0Cvj9mHYn+IfI5nNLncg1ghiSC3A+RVLe5fUPbgC2jON1J4NKwB6GaTExYZTupZX0YSQk5Vfi0eN5jPtSJr5htII8jnTKGOsN/WVLA9XUu29qP03FPILPcx+urPwlboWnCwLxZqLtnO5ITrwm4nmuXr2QD6Y44beWDK6FEo9GevDvQsQQ1pX3vYH6GdOu4AZs7ktOHDpLk+9aJcn8aEoevq2vVgc9Q3GBufpQm4M74XAhoKvgJ9AfZ9pQaiL9epgckJtwjXZnK+eU6Y8F2OylZu5xZ0LvcTZpoZDUyfFuQ/tES+CL0nfuYu+1GW46i8Jw76vW+14pljMr2EuEIWOFC7g/jCi8azOX3UFLYzmCmEWtP4RDH88KQAn+Tehu7rKOyY7hPRNsH+BVa2/2XkQoEgNU/vR5Bf5v8DyIWv+3ZtuS57hBLtB+tlsY8rvq3ubdJX4BpDVph2illju21TBkbM2upao++jMRZlkvYXoJjs5dPfI4ZLA5u/afRhZ2zC+aXw22LTC2foZ/y2Y+dkcn4Xmvxi3z1JLp3pZGOG99P5w+WnWS24jez+3ZfJ5d1J49KZf5f4Pf/d8b9XD+efT+4ZYa/mjG7tm6vPXx6uD+Fd1gekkmaBn8250UI88/llskVObjUm30XWUNbReDcZw1VPWVbTDjKGnX7yqoxhYzVj2NtdxhA1NPbWOV9rHzlf0Rr+8ai3Lw6+h5zvPjmYwLtYpN/zOd/2L8n5VmrVkm2vTbHL0r++nY/wehm5xN1FytfZNPajPJkCFvyeg/OK3G/5/3u8zz74P984SrAuX7CrD/PiQzka4N/w+J/sRHITei79ZC2lzLwwMrzM6b6AXr4819DdBF9e3RlIuW25VslbztETJX6NR7a7obbd8UtubRdMstfR3q8G16fm5f3X6c394P7uYtIs3j0oqAqyjR5vH8z0R8yfK35yfI9ku37nejpNzJcuHqf3y4S/ng+n/4JHJd+86i180pgb9vFFsvDin+uvQ3nk66/Ze3fy+Asnwsve4mfFqfgqO9e36cXXaTYM8+7+7jp772gIyhXfWRhyuXF8prxpfmL/tToVc9t5mu5fT07Q1EmUGefMGtUxkitrbgs53HqI5vaytGFywO8vS/aSJBWC9YQs8dXqyf4cAXOruxcw/lSe9SJZ+MID5qJ+WzjzyrRV28rmqRYj+Raldf0H9trY1x/8QdlQpFgPetM/PbbV2WIizYuboJt3OBfNmtlWvZ0PxOTdlP43uZ/1b0TYShCUrcXGWbGHRox3YgqrXqm6PmE5n3Xplaz1AX+OlbspS4bR2pH3smlEzS68l5Pr48df47406+UDjojdl/tSrZWst/ZfNo2v2QWbGl/vH+4fp7+GUwf83w45JdHBauGHYZovq6u8Hhjsl2mbBt18RAZbMsyzNjXA7pdhld/Vs/vJiOAP8uKqb+TFbS9Mmzqqd2RZ4z/EstruU4kcv1wu2evDF99ME2xRx4dqvIcnKdG/v7u77k8vLrOvbx+yZtWNlWW/3M2a/xeJYG0CBMgo8yoSuFuMs/0h8VoRJld4v3mIZ/7JdmKmvFon4xKZNlDJqZVqi/9VdkG0LcbQvmuimZ+4m0TtrWjo/Bk0LP9KGm4Rcv8ONKz9ShpuMb31d6ChvcGLfjMabjF29XegofcrabjFvNTfgYaVX0nD3zay+4H9nzyF/yft/7ibAjtvD/s/u0nPl73qomCu/8C1nv3B67Pt7hZIXvvMtn97vL0d3g3+7/8e7/p62veRe/dLy/n2su1sSjBh1Hx1XSstvP06RbQpsNxFVuAgsq9+TVagXD5wfsBKPM8nx3s2K/ALkoLepjh4Fxw7fbz9NQy78q+rV96uGOYum+vqMsN+bRrHe79lH7ml/6GE7ksm/v0ndAu5e97uZ5tuy3bf273d316YNuUldqEIOhd3N39AQjcHJ8233Pw333LLmrhfseDfaoU/69gvrvmXi3E2LN61lfmjfne21JbXn/9e/G7XWt4udKv+C373yoy7H/6BV7N366h7ry+9cLxN+uT4tBNuq0/WlfNWTv5Elsb/nQxvh9MntNCqoinjf7v0Eb1l5rhuDuCwpGvcjdUzvl/KoqLXqZsfSm/ZW2jsm4srGl0s3AEMrzl+AgMis+bj62n/ZuN6d4zUbHLY12x2zqZ9ss9b4Z272al3ahtwOlxrF1tb3hYZtf8USsdt13kGoQMokfNj9wadDpMroEd9Dl+D8DHs/RtN4s9fvp/fKvrG+V332+Uyysc8TOPHqBPby39tue75pH8XPVyKQg5GzccQHRfOZHz1aYDXVph4QKhY/h++czuZXFlfvl8TxQo9Sk99t/7ktf8Z9kbXn5qV4LA+CBpzImwdp95t4N7ctBJ577Be7d92R1eHOapWcu58ufm7UZ8cL/z9fbrts+a9V/OoET6u3WvalO/guevy75j/rt4PulvOT4F08uX7hRPXgrsvd73hYPbldGb+xhuRviLhx3WjKefso1vXC0fyXtrDdea8Z97fOOFxI9zzdcPiuo0wu64foqdqmSeQscki4ln/9mr49wrtN/S2WbxWJ7vWAOd30KF43cG9jOfyuc972QuNe37+rKN+9qwO0NauO4F8HlvaX/NlGHm9zjdcdfAPe4TixQ4/kbbgPjyMv5m/B8W44V1Ni1UCzg14hSjBHeAK7Mn2SYGCs3Z0uAcKj3o5hcMkozCeMKPwPqSpl+YU7oQFhZOMwoNkLxS2w5zCbS+jcKsRLKydfcgTr6VP2+gvrJ16cd1OsI/rugWV4wVdEez5urG9Yf2sXLe/D6lyN+vG/sJ1x3vWjYOF6473fV3v5evuhc4L/M3pbEWnuX6c70k/DrPV258X+rGZ68f9WKBmph/dqNCPhQVqDPbB2VySW6olSOFWboHa6V4o7NHGkcKDWU7hwsanrT1YIH1CpXBhgUIvo3CY7kNH9QsLNMrXDrwNQ+FwPxbIaeUU7lkZhfGEGYX3Y+PbhY0f5hS2CgrvRUskBYWDnMJRbuPbs/1QOByuUzgqbK21FwqnuQzDDzYU7nmFF7UPLUE/mBQmSkz2rCrPidHJO39Wc63suoWn2oj3fV2nuG6QbLpuay/XDZKF69rFdceF5Un2cl174boLEUGuj509Pa+z8bqj/Hn9/fA32MjfVicu/JnGPjQVpr9nOkOOD7P1CzSkTFthQvVennm2sJbyZ+YUkUw7A4loH5rSeOQDQ9mFCGEhqt+HtsRsh+Kp61bB6faeJaxuLVz3CQlr7kPCZgsSNluUsPC0kLB9+K/ynLmEGfrOs+fMab0H7aXXomx5Ue5zjN1MqkHz3dN5sCkS84vIBNffi8+Re3XtBX2ZryJ7HxSOGnnmJi28ujinMOzi7ikcFrZwY+wXenuhcFLEfs0Fvzn3dNJ9UDgsZHherNkwp3CY7iOqb+brtVg3oV94knvx1fP4oJDe/l71w0IsX6zT3NqO9yNFbqH986yjX0TVe7rqvMg6Ft5jq5PLrr8X73EUZzxNirzFOC307z4ikgUKM2+ySmHavt0/a5GjSYtdgszOBpn93cN1x9l1rSWbPtzvdaMiY2IVmn/xumN/L5JsFV5M2y8keSHfuJf8eZx7bQu5mvkCd/e0H1Pkavq5XlzcadtPJiHXxW6rkKkiVyPX328meZxTOCoovJ8IxOVqVQr7BYXDQp72QuHxJgrPcs24lyzyYNM+Zh7H70l+c+oarzCzu+Rpbx8rdWHHKY81nJyy++Bm4aPZUc7NwrbuZx8xyjVDXOwjFpHccu64euxGznnqof4E9Y3TC1Ot8n/8XRXxeOXVGiwXLRbrBd6Oi4k8G0o+nWLC/euqeV7X22W9ruYzO9ZKTScrAH2prHu5wNN5ucJztYZzucZzi4rODfWczp6qOdeKKT1nuTTYsyulFdbrrZrfPVOX6VVePJU+4tqpfqpGs/p+ZKvyc7JV+cNlq/yyQGwrW5UNYup4a81I+5Cz2q+Qs5/k/P55WvUqzzQVeVa55C+y5SfZXS3/2Hl3yG9/X01lf2Eten/5h2Lbp39VDk61Mrgi57HkXf3/txfTm/7FBB+vfGJ+Jb6CYx1fTKdDWeGO9QnV30+e40g++b/y78R8H9/jTWxZgs5rbpDLlRrnDY2hF6aOti/iiCaKtQLb2+HVFZfKprLr5eVzdfHtJq/S3oWH5C+3D5Q9t1StrnlHdmVTN8wuipz9TVA0ypFvDxd3uxax4wXuPylpflOOjjdJiN7Sf0pCvKpT8pb7W73qhuZW3y9lfZO7F5Kne9p2KyRrKuSTnPP/7f7l4Lj5//uQBzDfLa1gwj6lNPYqEpugg/ZhmupXI6HuXR8kCC+mX4fzRfmofxgRQKZUVgfcPikSe7Mj+0IIXpWH5u1l6UUn4/r28sPBUNmwV4IVUR+1dfzoPQrGvjCJVwWjI7y6k99cQSjWBeTyf/Kd7/93+CEXJnaq/mK52Bfs8WafQtyHu/791fBuwJglEX58CAI/tRxglz+FN++KIdkwKmePcvEuMLj+i0jJy7NT3K0F6ZVZnNoqgG21vHyKJxI1PwoG8NR1nmztXwXtWvnB61v7/S0wuF6F3WGEbXl1XHztG7mp/LXcPb7cH77YY/6QiTTuBDp0oeE4b0HGye/u2fm8SVetSehOcf2dVT8nawFfyqJs2mDyqyV7F5hd/tOYXbsNkhW4wWpciz3bbMb+g2GxKCy7VN1kuwrL9qZ2rPxWudtP13fXXy+m9HtfyuP+Px9Oj37qVZeBYcqbYEH2JxtPJ113Kxt/f/BbP7UtP9fymXaoVEr2m2qEt8qiPhgF8GEWHM9ZTp27NZSZrMMGv1XatPx02nQnMkCF/8+34QfvN/mENa/kbcpx7Izh8vLrPfhShATIPoTiqOEb/38=</diagram></mxfile>
2108.11636/paper_text/intro_method.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ ![ (a) Given lattice points sampled on input sketches (Left), our proposed Lattice-GCN-LSTM network can *recreate* a corresponding vector sketch (Right). (b) Given a corrupted sketch, the resulting lattice points are used to reconstruct a *similar* sketch accordingly. (c) The abstraction level of generated sketches is controllable by varying the density of latticed points. (d) Image-to-sketch synthesis by dropping a few lattice points along the edge of an object.](opening-v6.pdf){#fig:intro width="\\linewidth"}
4
+
5
+ Research on freehand human sketches has become increasingly popular in recent years. Due to its ubiquitous ability in recording visual objects [@eitz2012humans], sketches form a natural medium for human-computer interaction. Deriving a tailor-made representation for sketches sits at the core of sketch research, and has direct impact on a series of downstream applications such as sketch recognition [@yu2015sketch; @xu2019multigraph; @kipf2016semi], sketch-based image retrieval [@cao2010mindfinder; @yu2016sketch; @pang2017cross; @pang2019generalising], sketch-3D reconstruction [@lun20173d; @jiang2019disentangled; @wang20203d; @ranjan2018generating; @shih20203d], and sketch synthesis [@liuunsupervised; @song2018learning]. Albeit a pivotal component, designing an effective representation is challenging since sketches are typically abstract and iconic.
6
+
7
+ Prior works predominantly relied on encoding sketch in a pixelative format (i.e., an image) [@eitz2012humans; @yu2015sketch; @sangkloy2017scribbler]. Although it provided the convenience of using off-the-shelf convolutional neural network effortlessly re-purposed for sketches, pixelative format lacks the intuitive exploitation of structural information. The presence of structural information is vital for sketch abstraction modeling [@eitz2012humans; @riaz2018learning], which in turn is essential for downstream tasks that dictate structural manipulation such as sketch generation [@ha2017neural; @chen2017sketch; @karras2019style] and sketch synthesis [@liuunsupervised; @song2018learning].
8
+
9
+ RNN-based approaches have consequently emerged as means to fully explore the sequential nature of sketches [@ha2017neural]. The research convention is to use *QuickDraw* [@quickdraw] vector format where each sketch is represented as a list of offsets in $x$ and $y$. Thanks to the stroke-level modeling, these approaches do offer a degree of flexibility in generation and synthesis tasks, yet they do so by imposing a strong assumption -- all sketches have sequential stroke data to them. This assumption largely prohibits the application of RNN-based methods to work with sketches such as those drawn on a piece of paper. A natural question is therefore -- is there a way to remove the bottleneck on requiring vector data but at the same time preserve the structural cues that vector data provides?
10
+
11
+ To answer this question, we propose an alternative sketch representation inspired by the concept of lattice structures -- SketchLattice. We define SketchLattice as a set of points sampled from the original 2D sketch image using a lattice graph as shown in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"} (a). Such latticed sketch representation, although seemingly simplistic, is remarkably amenable to structural deformations, thereby providing vital benefits for sketch abstraction modeling and in subsequent downstream sketch generation tasks. Our proposed latticed representation can be easily and effectively encoded using a simple off-the-shelf graph convolutional network (GCN) [@kipf2016semi; @chen2019multi; @yang2018graph], resulting in considerably fewer model parameters ($13.5$ times lesser) as compared to recent state-of-the-art techniques. This not only makes our proposed sketch representation easily deployable, thus making further progress towards practicality, but also reduces the difficulty in optimization and training to give a competitive performance.
12
+
13
+ Specifically, each point in SketchLattice is regarded as a graph node. Geometric proximity between nodes serves as guiding principle for constructing the adjacency matrix to form graph links. Intuitively, the proposed GCN-based feature extractor learns the topology of points in a sketch object. Despite being simple, our novel sketch representation is surprisingly effective for sketch generation. In particular, using the proposed latticed representation, we show how to recover a corrupted sketch using our Lattice-GCN-LSTM network, as represented in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}(b). Additionally, we present a novel aspect in sketch representation, where the abstraction level in the generated sketch is controllable as shown in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}(c), subject to the density of points sampled by the lattice graph. Furthermore, our method is also applicable to the problem of image-to-sketch synthesis by simply dropping a few key points along the edge of a target object as depicted in Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}(d).
14
+
15
+ Our contributions are summarized as follows: (i) we propose SketchLattice, a novel latticed representation for sketches using an extremely simple formulation i.e., a set of points sampled from a sketch image using a lattice graph. (ii) Our latticed representation can be easily and effectively encoded using a simple graph model that use fewer model parameters, thereby making important progress towards efficiency. (iii) We show how the abstraction level of generated sketches is controllable by varying the density of points sampled from an image using our lattice graph.
16
+
17
+ # Method
18
+
19
+ ![A schematic representation of Lattice-GCN-LSTM architecture. An input sketch image or the edge map of an image object is given to our lattice graph to sample lattice points. All overlapping points between the *dark pixel* in sketch map and uniformly spread lines in lattice graph are sampled. Given the lattice points, we construct a graph using proximity principles. A graph model is used to encode SketchLattice into a latent vector. Finally, a generative LSTM decoder recreates a vector sketch which resembles the original sketch image. ](model-structure-v4.pdf){#fig:lats-gcn-structure width="\\linewidth"}
20
+
21
+ **Overview** We describe the Lattice-GCN-LSTM network where the central idea is a novel sketch representation technique that (i) transforms an input 2D sketch image $\mathcal{S}$ into a set of points $\mathcal{S}^{L} = \{p_1, p_2, \dots, p_m \}$, using a lattice graph $\mathcal{F}_{lattice}$. Each point $p_i = (x, y)$ in $\mathcal{S}^{L}$ represents the absolute coordinates $x$ and $y$ in the $\mathcal{S}$. We call $\mathcal{S}^{L}$ as the lattice format representation of $\mathcal{S}$. (ii) Our novel lattice format $\mathcal{S}^{L}$ could be seamlessly transformed to a graphical form $G=(V, E)$ that is encoded into a $d$-dimensional sketch-level embedding vector $\Psi \in \mathbb{R}^{d}$ using a simple off-the-shelf GCN-based model. (iii) We observe how this sketch-level embedding vector $\Psi$ could help in downstream tasks such as sketch generation by using existing LSTM-based decoding models. Figure [2](#fig:lats-gcn-structure){reference-type="ref" reference="fig:lats-gcn-structure"} offers a schematic illustration.
22
+
23
+ The input to our proposed latticed sketch representation is a sketch image $\mathcal{S} \in \mathbb{R}^{w \times h}$ where $w$ and $h$ represent the width and height of $\mathcal{S}$ respectively. We extract the latticed sketch $\mathcal{S}^{L}$ from $\mathcal{S}$ using the lattice graph $\mathcal{F}_{lattice}$. Our lattice graph $\mathcal{F}_{lattice}$ is a grid that constitutes of uniformly distributed $2n$ horizontal and vertical lines, arranged in a criss-cross manner. The optimal value of $n$ for any given sketch image $\mathcal{S}$, could be empirically determined during inference without further training. As shown in Figure [2](#fig:lats-gcn-structure){reference-type="ref" reference="fig:lats-gcn-structure"}, we construct $\mathcal{S}^{L}$ by sampling the set of all overlapping points $\mathcal{S}^{L} = \{p_1, p_2, \dots, p_m\}$ between a *black pixel* in sketch image $\mathcal{S}$ representing a stroke region, and the $2n$ horizontal or vertical lines in $\mathcal{F}_{lattice}$. Formally, we define $\mathcal{S}^{L}$ as: $$\begin{equation}
24
+ \mathcal{S}^{L} = \mathcal{F}_{lattice}(\mathcal{S})
25
+ \end{equation}$$
26
+
27
+ Although extremely simple, this novel latticed sketch representation $\mathcal{S}^{L}$ is very informative since it can express the topology (i.e., overall structure and shape) of the original sketch image $\mathcal{S}$, without the need of vector data. Additionally, our latticed sketch representation is very flexible because, (i) there is no constrain for the size of input sketch image $(w \times h)$ and the original aspect ratio is maintained, (ii) the abstraction level of the generated sketches modulates depending on the sampling density from the lattice graph $\mathcal{F}_{lattice}$ by varying the value of $n$. Increasing the value of $n$ would result in more detailed sketches, whereas decreasing it would lead to highly abstract sketches. Figure [1](#fig:intro){reference-type="ref" reference="fig:intro"}(c) and [4](#fig:abstract){reference-type="ref" reference="fig:abstract"} demonstrate how adding more sample points $p_i$ changes the abstraction level of generated sketches.
28
+
29
+ **Graph Nodes $V$** The SketchLattice $\mathcal{S}^{L}$ can be effectively encoded by a simple graph model which not only consumes fewer model parameters, thereby increasing efficiency, but also allows easier optimization resulting in a model better trained to give a state-of-the-art performance. For each point $p_i \in \mathcal{S}^{L}$ we calculate $\mathbf{v}_i \in V$ representing elements of the set $V$, denoting graph nodes. To ensure that the encoding process is amenable to structural changes, each point $p_i$ is tokenized by a learnable embedding function $\mathcal{F}_{emb}(\cdot): \mathbb{R}^{2} \mapsto \mathbb{R}^{d}$ that maps the absolute point location $p_i=(x,y)$ to a $d$-dimensional vector space. Formally, $$\begin{equation}
30
+ \mathbf{v}_i = \mathcal{F}_{emb}(p_i)
31
+ \end{equation}$$ where $\mathbf{v}_i \in \mathbb{R}^{d}$ is the resulting tokenized vector representation. Maintaining the original aspect ratio, we resize and pad the input sketch image $\mathcal{S} \in \mathbb{R}^{w \times h}$ to a size of (256, 256) before applying the lattice graph $\mathcal{F}_{lattice}$. Hence, the vocabulary size of the learned embedding function $\mathcal{F}_{emb}$ is $256^2$. Our intuition is that, by using an embedding function $\mathcal{F}_{emb}$ that tokenizes each point location $(x, y)$ to a $d$-dimensional vector $\mathbf{v}_i$, the model would learn to obtain similar embedding features for nearby points. Hence the resulting representation would be more robust to the frequently observed shape deformation and be more amenable that largely benefits sketch abstraction modeling in the generation tasks.
32
+
33
+ **Graph Edges $E$** A straight-forward yet efficacious approach based on the geometric proximity principles is adopted to construct the graph edge links among nodes, based on the corresponding latticed points' locations $p_i=(x,y)$. Specifically, we first compute the euclidean distance between every pair of nodes $(\mathbf{v}_i, \mathbf{v}_j)$ by $d_{i,j} = ||p_i - p_j||_{2}$. Then we follow either of the two options: (i) Each node $\mathbf{v}_i \in V$ is connected to its *nearest* neighbor or (ii) each node $\mathbf{v}_i \in V$ is connected to its *nearby* neighbors that are *"close enough"*, i.e., $norm(d_{i,j})<d_{T}$, where $norm(d_{i,j})$ is a normalized distance in (0,1). $d_{T}$ is a pre-defined distance threshold whose value is empirically found to be $0.2$ in our case. An adjacency matrix $\mathbf{A} \in \mathbb{R}^{m \times m}$ is constructed by setting the link strength to $a_{i,j} = 1 - norm(d_{i,j})$ for a pair of linked nodes ($v_i, v_j$), such that a smaller distance would result in a larger score. All the disconnected nodes, $a_{i,j}$ are set to $0$.
34
+
35
+ Given the graph nodes $V=\{\mathbf{v}_1, \mathbf{v}_2, \dots, \mathbf{v}_m\}$ and their corresponding adjacency matrix $\mathbf{A}$, we employ a simple graph model to compute our final sketch-level latent vector $\Psi \in \mathbb{R}^{d}$. The resulting vector $\Psi$ allows downstream applications including sketch healing, and image-to-sketch translation. We employ a stack of $K$ identical graph encoding layers, followed by a fully connected (FC) layer, batch normalization and non-linear activation function $Tanh$.
36
+
37
+ For each $i^{th}$ node $\mathbf{v}_i^k$ in graph encoding layer $k \in [1, K]$, a feature propagation step is executed to produce the updated node feature $\hat{\mathbf{v}}_i^{k}$, where each node $\mathbf{v}_i^k$ attends to all its linked neighbors with non-zero link strength, defined in the adjacency matrix $\mathbf{A}$. We compute $\hat{\mathbf{v}}_i^k$ as: $$\begin{equation}
38
+ \hat{\mathbf{v}}_i^{k} = \sum_{j=1}^{m} a_{i, j} \mathbf{v}_j^{k}
39
+ \end{equation}$$ Such a mechanism incorporating spatial awareness not only facilitates message passing among connected nodes, but also adds robustness to missing parts in a lattice sketch while encoding. This greatly benefits downstream tasks such as sketch healing [@su2020sketchhealer]. A graph convolution is applied to the resulting rich spatially dependent feature $\hat{v}_i^k$ as: $$\begin{equation}
40
+ \mathbf{v}_i^{k+1} = [ReLU(MLP_{\Theta}(\hat{\mathbf{v}}_i^k))]_{\times 2}
41
+ \end{equation}$$ where each encoding layer consists of two multi-layer perceptron (MLP) units, both of which is followed by a rectified linear unit (ReLU). We employ dropout and residual connection in each encoding layer as shown in Figure [2](#fig:lats-gcn-structure){reference-type="ref" reference="fig:lats-gcn-structure"}. The final feature vectors of nodes from the $K^{th}$ graph encoding layer are integrated into a single vector which is further fed into a sequence of FC layer, batch normalization, and $Tanh$ to compute our sketch-level latent representation $\Psi \in \mathbb{R}^{d}$.
42
+
43
+ Following [@ha2017neural; @chen2017sketch; @su2020sketchhealer], we design a generative LSTM decoder that generates the sequential sketch strokes in vector format. Accordingly, the sketch-level latent vector $\Psi$ is projected into two vectors $\mu \in \mathbb{R}^{d}$ and $\sigma \in \mathbb{R}^{d}$, then from which we can sample a random vector $z \in \mathbb{R}^{d}$ by using the reparameterization trick [@kingma2014vae] to introduce stochasticity in the generation process via an IID Gaussian variable $\mathcal{N}(0,I)$: $$\begin{equation}
44
+ \label{z}
45
+ \begin{aligned}
46
+ & z=\mu + \sigma \odot \mathcal{N}(0, I) \\
47
+ & \hspace{-0.5cm} \mu=W_\mu \Psi, \; \; \;
48
+ \sigma = exp(\cfrac{W_\sigma \Psi}{2})
49
+ \end{aligned}
50
+ \end{equation}$$ where $W_{\mu}$ and $W_{\sigma}$ are learned through backpropagation [@ha2017neural]. The latent vector $z$ is used as a condition for the LSTM decoder to sequentially predict sketch strokes. Specifically, the output stroke representation $s_{t-1}$ from the previous time step, together with latent vector $z$ serve as inputs to update the LSTM hidden state $h_{t-1}$ by: $$\begin{equation}
51
+ h_{t} = LSTM_{forward}(h_{t-1}; [s_{t-1}, z])
52
+ \end{equation}$$ where $[\cdot]$ represents concatenation operation. Next, a linear layer is used to predict a output stroke representation for current time step, i.e., $s_{t} = W_s h_t + b_s$, where $W_s$ and $b_s$ are learnable weight and bias. The final stroke coordinates are derived from $s_t$ with the help of Gaussian mixture models, to generate the vector sketch format, represented by $\mathcal{S'}$. We refer readers to [@ha2017neural; @quickdraw] for more details.
53
+
54
+ Our proposed graphical sketch encoder and the generative LSTM decoder are trained end-to-end for sketch generation. Note that, although we require vector sketches to train the LSTM decoder for the purpose of vector sketch generation, our model fully works on image sketch input, rather than vector data during inference. Following [@ha2017neural], the goal is to minimize the negative log-likelihood of the generated probability distribution to explain the training data $\mathcal{S}$, which can be defined as: $$\begin{equation}
55
+ \min E_{q_{\phi}(z|\mathcal{S})}[- \log p_{\theta}(\mathcal{S}|z)]
56
+ \end{equation}$$ which seeks to reconstruct the vector sketch representation $\mathcal{S}$ from the predicted latent vector $z$. Upon training, the decoder generates a vector sketch conditioned on the graphical encoded latent vector $z$, obtained from our lattice sketch $\mathcal{S}^{L}$ given any image sketch, thus being more effective for practical applications.
2108.13493/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-05-15T16:49:15.605Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36" version="14.4.2" etag="z-WugH55xxoDMyrUL0yf" type="google"><diagram id="9cJfTQ3g73yOOs9PvBAK">7V3bcqM4EP0a1z5Fhe7wOHZmdh92qqYq2drdpynGxjYVYrw2mSTz9SsZARISvoLjG+OqGCGQ6dPqPt1qmB4ePL/9vgjn06/pKEp6yBu99fB9DyHocyT+yJb3vIUjmjdMFvFIdaoaHuJfkWr0VOtLPIqWRscsTZMsnpuNw3Q2i4aZ0RYuFumr2W2cJuao83ASWQ0PwzCxW/+OR9lU3YXnVe1/RPFkqkYmxYHnsOirrrCchqP0NW9a9cGfe3iwSNMs//b8NogSKbtCLPmFvjQcLX/XIppl25ygcPgZJi/q1tTvyt6Le12kL7NRJPvDHu6/TuMsepiHQ3n0VaAr2qbZc6IOj9NZpuBCTO7HSTJIk3QhGmbpTLT3R+FyWl5O9i+O9xD2PMYGA9G+zBbpU9R8pJC6EFhf3UK0yKK3RjHAUrhCKaP0OcoW76KLOsEHkPjaJ8gvoLRTydB7raDGXgAYCqpP3mOqgc58dVqolG1SDlsBIr4oTNz44K7xqctfblvgZuEzGKgzO8EHEwoCvAkURAjweEdIEAcSoo18+97j/ecev+/Rz+KbJ7+hwc+yddXpQNA0MISoR2Hkj4cuENjQj36MHTB3AkqAADUnCuWA+xYsAQcE2qhwCig7HBe6DpfQiUvYDS7j8RgNnbiM2A9G2cfhArXpczRgWAMw9xumhrjxzJRzmMSTmfg+FAKIhEj7Ujyx8Maf1IHneDSSp/cX0TL+Ff5YXUoao3kaz7LVPdB+j97La71k6TJHwGUPWxD/HUalWEt6AwKHt9CggNSzgSAtmC2+DoU1E+HsUUBCuT1d5qb7gAwD7sFqQzYmpCNM/M1OPZqNPkmeKuWdhMtlPFzvx4VUFu//KHmvdv6VO4AWu/dv+sH7d7WXDxyNanQ3CxeTqLjj3UXvbfLULm2naySrxvomFakaiBFAtA0TY9hAmDF9I+bll+nLYhipK+oMuTYIRB7gxKs+ph5haWyR9qHmMLkkrWFWKlKKbiutCdrXmgb0Tx9q35zLPgbEr6Y69feDOjCx9MlOo7SHdBHlbuKa8Jq5pkAIINuRdstpIFyHTOhE5urYJiYVBsdDxpXJuBq6ac4MbhpLckS6CV0JixvfFJOCUeB5lXNhdjjWGd+ErtzFBxHOJhu0loJCb3c4ToKDSqPnYV59+H7MZAMJFSYXePrx7qiJK91yoCptrRKnD7/JS7FPQMCrEHM/7Nez0vVjtAh8UzqnxkmfrpmTUggBPDrzaUrx1Djp0zVzUhp8RLTgSvRcJyeliAEHOToKJXUlTm6UVPguKMlJ5byojUlXjBS5MhzHohEGs2TnQi1qzJISH/jaFuxHLjYQS+JTYGQ3u6IXyJVYufFKN69kmAEf1Wd1u7xy/RgtAm/nbQAAF2eLoccB57q8zXmGaxUoti3uzBQ3ZWx6dCDLlYREhZv8Ktzk93IZ3ekuxWXj+TLaTBrD5TyvyRrHb5JoHocTYmZOXehtFDmvrSA5inwauhwESFOliRMQeDGANJRUfTweTRUmTjyeLgYPyiA4yfnhykQcFMTWpSt6xrOJkqza66dZlj4rD6LaHlNxpfs7VyBM5b+eVV+Xb1aArCrsWgCRcIeVs5ELfECRg6fgNhyKKx9xIJWriZF8kv9kbvktzv5R58jvWg5a7FUpaLlTZKCPmrkuvOsJUkzsyQqYKjlNuKk5xKT+/p6pa+rVIgxm3p5cxtV5Ua1Mo0Wm2ZSN+etC43/CCUDMWhQo8IWyKA1XG7GtRFeUsykjY3nU387TlTKEQRBYXk9bH5eLtCUyJ+JY8V5JmcdpLDTfrA7fo+D8mJZc3MSXOCl+4dZ2Pbd4pl03bH1wsraeUOHwoV99asWQXK5b6Fsntr6B4bdv6vFe2aQr1WRyXpqMPaAvqNdWQ7mZ1Kpp2NnpsZ0cu+lxkx7Tmx6frB7bOcZvnx8tVf5Qwl2bBuXzbC3QQYIl1da8b835uh6a4h1x74Lma1B8fbw7dTTKpz9bQEOm4gPWuOTlhgPCrvDooKDqrLIlJ2ibN2RGVvXNnhW/7WqcMQ1EkFhsPt9pkBZt8xbp1JsCnpQCroqZNHqwJztYr4D1QTqrLMRb5IvbfMa9zME35ey7ys2XZcxltaAjMS+AtzUI8jY8jZ3/fJi+jMdCxifj+h2vhCiNRysIUMBxUz0UhUTMh+qonZfzHbO7jSUTvNuzfa4XMmyv/yWzPcgaw46ssR7SFcVZekhXWIsTtNqQmY95M5Pnt1SxtSGm49QZV7Zvtgvya+gsSzKlcD358p1C59h/L/KFNv0qvaA1aUpeNMoL3OUm4pPogOD8bdW1OolN8r9q1SBM5tOwXlSb/xhxH/nvKU6pzyuhgX+GP6KkO4O3wcKVEzKRP6MfDp8mK3+3rQdSLzlSP6FXKrg+RXIL0zxHpNIQZQ4XURJm8U9zXu5AXiAQhlLbzGlwhwWv1TdkDpCOx8voYOXcLQF8NQaVX5JBZdh8uKqbRYvjGVTk0Nm6fVtTrv4BhqxDg0XW66AHEAtaMVfC9DHTIJn4Qyjh17baCO3YqyO/5+tkYiDEkPNZ8e7CIGInQG9hkAmJzDx9RBhEdsuFXrLXNl7Ac3AN2Z1wY4D6VbIn4F257MAcGXvCfDaTvZZ8NPUQ4L7moztbySKubOkt6jnRqCc3KMeKekwaYS7o3lFQfzNUO8Rht2Lbm8k8A5PpB4BQO34+X5vZVHh7nYENX6uHlxbYbJHZvsjAhnkgQLpwHS8f6C7IoXZy9hbkGPAIfwy1g7Zx7yrIobfUpNNjH1yqd0dQADy7dKcbLx0EwIfa9O7m6RgZPqFAc9KdvV20eBzsFticQ2BDN0yN4wY2tZR4K8SBbpERvZnJkzeT0F8VW1rac752cs3T+dcXzOSz9EKDGbFb/Uc8effqfzPCn/8H</diagram></mxfile>
2108.13493/paper_text/intro_method.md ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We first provide a formal definition of the problem of scientific exaggeration detection, which guides the approach described in §[3](#sec:approach){reference-type="ref" reference="sec:approach"}. We start with a set of document pairs $\{(t,s) \in \mathcal{D}\}$, where $s$ is a source document (e.g. a scientific paper abstract) and $t$ is a document written about the source document $s$ (e.g. a press release for the paper). The goal is to predict a label $l \in \{0,1,2\}$ for a given document pair $(t,s)$, where $0$ implies the target document *undersells* source document, $1$ implies the target document accurately reflects the source document, and $2$ implies the target document *exaggerates* the source document.
4
+
5
+ Two realizations of this formulation are investigated in this work. The first (defined as **T1**) is an *inference* task consisting of labeled document pairs used to learn to predict $l$ directly. In other words, we are given training data of the form $(t, s, l)$ and can directly train a model to predict $l$ from both $t$ and $s$. The second (defined as **T2**) is as a *classification* task consisting of a training set of documents $d \in \mathcal{D}'$ from **both** the source and the target domain, and a classifier is trained to predict the *claim strength* $l'$ of sentences from these documents. In other words, we don't require **paired** documents $(t,s)$ at train time. At test time, these classifiers are then applied to document pairs $(t,s)$ and the predicted claim strengths $(l'_{s}, l'_{t})$ are compared to get the final label $l$. Previous work has used this formulation to estimate the prevalence of *correlation to causation* exaggeration in press releases [@yu2020measuring], but have not evaluated this on paired labeled instances.
6
+
7
+ Following previous work [@yu2020measuring], we simplify the problem by focusing on detecting when the *main finding* of a paper is exaggerated. The first step is then to identify the main finding from $s$, and the sentence describing the main finding in $s$ from $t$. In our semi-supervised approach, we do this as an intermediate step to acquire unlabeled data, but for all labeled training and test data, we assume the sentences are already identified and evaluate on the sentence-level exaggeration detection task.
8
+
9
+ <figure id="fig:mt-pet" data-latex-placement="t">
10
+ <img src="figures/mt-pet.png" />
11
+ <figcaption>MT-PET design. We define pairs of complementary pattern-verbalizer pairs for a main task and auxiliary task. These PVPs are then used to train PET on data from both tasks.</figcaption>
12
+ </figure>
13
+
14
+ One of the primary challenges for scientific exaggeration detection is a lack of labeled training data. Given this, we develop a semi-supervised approach for few-shot exaggeration detection based on pattern exploiting training (PET, @schick2020exploiting [@schick2020small]). Our method, multi-task PET ([MT-PET]{.smallcaps}, see [2](#fig:mt-pet){reference-type="ref+label" reference="fig:mt-pet"}), improves on PET by using multiple complementary cloze-style QA tasks derived from different source tasks during training. We first describe PET, followed by [MT-PET]{.smallcaps}.
15
+
16
+ PET [@schick2020exploiting] uses the masked language modeling objective of pretrained language models to transform a task into one or more cloze-style question answering tasks. The two primary components of PET are *patterns* and *verbalizers*. *Patterns* are cloze-style sentences which mask a single token e.g. in sentiment classification with the sentence "We liked the dinner" a possible pattern is: "We liked the dinner. It was `[MASK]`." *Verbalizers* are single tokens which capture the meaning of the task's labels in natural language, and which the model should predict to fill in the masked slots in the provided patterns (e.g. in the sentiment analysis example, the verbalizer could be `Good`).
17
+
18
+ Given a set of *pattern-verbalizer pairs (PVPs)*, an ensemble of models is trained on a small labeled seed dataset to predict the appropriate verbalizations of the labels in the masked slots. These models are then applied on unlabeled data, and the raw logits are combined as a weighted average to provide soft-labels for the unlabeled data. A final classifier is then trained on the soft labeled data using a distillation loss based on KL-divergence.
19
+
20
+ We adopt the notation in the original PET paper [@schick2020exploiting] to describe [MT-PET]{.smallcaps}. In this, we have a masked language model $\mathcal{M}$ with a vocabulary $V$ and mask token `[MASK]` $\in V$. A pattern is defined as a function $P(x)$ which transforms a sequence of input sentences $\mathbf{x} = (s_{0},...,s_{k-1}), s_{i} \in V^{*}$ to a phrase or sentence which contains exactly one mask token. Verbalizers $v(x)$ map a label in the task's label space $\mathcal{L}$ to a set of tokens in the vocabulary $V$ which $\mathcal{M}$ is trained to predict.
21
+
22
+ For a given sample $\mathbf{z} \in V^{*}$ containing exactly one mask token and $w \in V$ corresponding to a word in the language model's vocabulary, $M(w|\mathbf{z})$ is defined as the unnormalized score that the language model gives to word $w$ at the masked position in $\mathbf{z}$. The score for a particular label as given in @schick2020exploiting is then $$\begin{equation}
23
+ s_{\mathbf{p}}(l|\mathbf{x}) = M(v(l) | P(\mathbf{x}))
24
+ \end{equation}$$ For a given sample, PET then assigns a score $s$ for each label based on all of the verbalizations of that label. When applied to unlabeled data, this produces soft labels from which a final model $\mathcal{M}'$ can be trained via distillation using KL-divergence.
25
+
26
+ ::: table*
27
+ Name Pattern
28
+ -------------------- ----------------------------------------------------------------------------------------------
29
+ $P_{T_{1}}^{0}(x)$ Scientists claim $a$. \|\| Reporters claim $b$.The reporters claims are `[MASK]`
30
+ $P_{T_{1}}^{1}(x)$ Academic literature claims $a$. \|\| Popular media claims $b$. The media claims are `[MASK]`
31
+ $P_{T_{2}}^{0}(x)$ \[Reporters\|Scientists\] say $a$. The claim strength is `[MASK]`
32
+ $P_{T_{2}}^{1}(x)$ \[Academic literature\|Popular media\] says $a$. The claim strength is `[MASK]`
33
+ :::
34
+
35
+ In the original PET implementation, PVPs are defined for a single target task. [MT-PET]{.smallcaps} extends this by allowing for auxiliary PVPs from related tasks, adding complementary cloze-style QA tasks during training. The motivation for the multi-task approach is two-fold: 1) complementary cloze-style tasks can potentially help the model to learn different aspects of the main task; in our case, the similar tasks of exaggeration detection and claim strength prediction; 2) data on related tasks can be utilized during training, which is important in situations where data for the main task is limited.
36
+
37
+ Concretely, we start with a main task $T_{m}$ with a small labeled dataset $(x_{m},y_{m}) \in D_{m}$, where $y_{m} \in \mathcal{L}_{m}$ is a label for the instance, as well as an auxiliary task $T_{a}$ with labeled data $(x_{a},y_{a}) \in D_{a}, y_{a} \in \mathcal{L}_{a}$. Each pattern $P_{m}^{i}(x)$ for the main task has a corresponding complementary pattern $P_{a}^{i}(x)$ for the auxiliary task. Additionally, the labels in $\mathcal{L}_{a}$ have their own verbalizers $v_{a}(x)$. Thus, with $k$ patterns, the full set of PVP tuples is given as $$\begin{equation*}
38
+ \mathcal{P} = \{((P_{m}^{i}, v_{m}), (P_{a}^{i}, v_{a})) | 0 \le i < k\}
39
+ \end{equation*}$$ Finally, a large set of unlabeled data $U$ for the *main task only* is available. [MT-PET]{.smallcaps} then trains the ensemble of $k$ masked language models using the pairs defined for the main and auxiliary task. In other words, for each individual model both the main PVP $(P_{m},v_{m})$ and auxiliary PVP $(P_{a},v_{a})$ are used during training.
40
+
41
+ For a given model $\mathcal{M}_{i}$ in the ensemble, on each batch we randomly select one task $T_{c}, c \in \{m,a\}$ on which to train. The PVP for that task is then selected as $(P_{c}^{i}, v_{c})$. Inputs $(x_{c}, y_{c})$ from that dataset are passed through the model, producing raw scores for each label in the task's label space. $$\begin{equation}
42
+ s_{\mathbf{p}_{c}^{i}}(\cdot|\mathbf{x}_{c}) = \{\mathcal{M}_{i}(v_{c}(l)|P_{c}^{i}(\mathbf{x}_{c})) | \forall~ l \in \mathcal{L}_{c}\}
43
+ \end{equation}$$ The loss is calculated as the cross-entropy between the task label $y_{c}$ and the softmax of the score $s$ normalized over the scores for all label verbalizations [@schick2020exploiting], weighted by a term $\alpha_{c}$. $$\begin{equation}
44
+ q_{\mathbf{p}_{c}^{i}} = \frac{e^{s_{\mathbf{p}_{c}^{i}}(\cdot|\mathbf{x}_{c})}}{\sum_{l\in\mathcal{L}_{c}}e^{s_{\mathbf{p}_{c}^{i}}(l|\mathbf{x}_{c})}}
45
+ \end{equation}$$ $$\begin{equation}
46
+ L_{c} = \alpha_{c} * \frac{1}{N}\sum_{n} H(y_{c}^{(n)},q^{(n)}_{\mathbf{p}_{c}^{i}})
47
+ \end{equation}$$ where $N$ is the batch size, $n$ is a sample in the batch, $H$ is the cross-entropy, and $\alpha_{c}$ is a hyperparameter weight given to task $c$.
48
+
49
+ [MT-PET]{.smallcaps} then proceeds in the same fashion as standard PET. Different models are trained for each PVP tuple in $\mathcal{P}$, and each model produces raw scores $s_{\mathbf{p}_{m}^{i}}$ for all samples in the unlabeled data. The final score for a sample is then a weighted combination of the scores of individual models. $$\begin{equation}
50
+ s(l|\mathbf{x}_{u}^{j}) = \sum_{i}w_{i}*s_{\mathbf{p}_{m}^{i}}(l|\mathbf{x}_{u}^{j})
51
+ \end{equation}$$ where the weights $w_{i}$ are calculated as the accuracy of model $\mathcal{M}_{i}$ on the train set $D_{m}$ before training. The final classification model is then trained using KL-divergence between the predictions of the model and the scores $s$ as target logits.
52
+
53
+ We use [MT-PET]{.smallcaps} to learn from data labeled for both of our formulations of the problem (**T1**, **T2**). In this, the first step is to define PVPs for exaggeration detection (**T1**) and claim strength prediction (**T2**).
54
+
55
+ To do this, we develop an initial set of PVPs and use PETAL [@DBLP:conf/coling/SchickSS20] to automatically find verbalizers which adequately represent the labels for each task. We then update the patterns manually and re-run PETAL, iterating as such until we find a satisfactory combination of verbalizers and patterns which adequately reflect the task. Additionally, we ensure that the patterns between **T1** and **T2** are roughly equivalent. This yields 2 patterns for each task, provided in [\[tab:patterns\]](#tab:patterns){reference-type="ref+label" reference="tab:patterns"}, and verbalizers given in [1](#tab:verbalizers1){reference-type="ref+label" reference="tab:verbalizers1"}. The verbalizers found by PETAL capture multiple aspects of the task labels, selecting words such as "mistaken," "wrong," and "artificial" for exaggeration, "preliminary" and "conditional" for downplaying, and multiple levels of strength for strength detection such as "estimated" (correlational), "cautious" (conditional causal), and "proven" (direct causal).
56
+
57
+ For unlabeled data, we start with unlabeled pairs of full text press releases and abstracts. As we are concerned with detecting exaggeration in the primary conclusions, we first train a classifier based on single task PET for conclusion detection using a set of seed data. The patterns and verbalizers we use for conclusion detection are given in [2](#tab:conc_patterns){reference-type="ref+label" reference="tab:conc_patterns"} and [3](#tab:verbalizers2){reference-type="ref+label" reference="tab:verbalizers2"}. After training the conclusion detection model, we apply it to the press releases and abstracts, choosing the sentence from each with the maximum score $s_{\mathbf{p}}(1|\mathbf{x})$.
58
+
59
+ ::: {#tab:verbalizers1}
60
+ +-----------------+---------------+-------------------------------------------------------------------------------+
61
+ | Pattern | Label | Verbalizers |
62
+ +:===============:+:=============:+:==============================================================================+
63
+ | | Downplays | preliminary, competing, uncertainties |
64
+ +-----------------+---------------+-------------------------------------------------------------------------------+
65
+ | $P_{T_{1}}^{0}$ | Same | following, explicit |
66
+ +-----------------+---------------+-------------------------------------------------------------------------------+
67
+ | | Exaggerates | mistaken, wrong, hollow, naive, false, lies |
68
+ +-----------------+---------------+-------------------------------------------------------------------------------+
69
+ | | Downplays | hypothetical, theoretical, conditional |
70
+ +-----------------+---------------+-------------------------------------------------------------------------------+
71
+ | $P_{T_{1}}^{1}$ | Same | identical |
72
+ +-----------------+---------------+-------------------------------------------------------------------------------+
73
+ | | Exaggerates | mistaken, wrong, premature, fantasy, noisy, artifical |
74
+ +-----------------+---------------+-------------------------------------------------------------------------------+
75
+ | $P_{T_{2}}^{*}$ | NA | sufficient, enough, authentic, medium |
76
+ | +---------------+-------------------------------------------------------------------------------+
77
+ | | Correlational | inferred, estimated, calculated, borderline, approximately, variable, roughly |
78
+ | +---------------+-------------------------------------------------------------------------------+
79
+ | | Cond. Causal | cautious, premature, uncertain, conflicting, limited |
80
+ | +---------------+-------------------------------------------------------------------------------+
81
+ | | Causal | touted, proven, replicated, promoted, distorted |
82
+ +-----------------+---------------+-------------------------------------------------------------------------------+
83
+
84
+ : Verbalizers for PVPs from both **T1** and **T2**. Verbalizers are obtained using PETAL [@DBLP:conf/coling/SchickSS20], starting with the top 10 verbalizers per label and then manually filtering out words which do not make sense with the given labels.
85
+ :::
86
+
87
+ []{#tab:verbalizers1 label="tab:verbalizers1"}
88
+
89
+ One of the main contributions of this work is a curated benchmark dataset for scientific exaggeration detection. Labeled datasets exist for the related task of claim strength detection in scientific abstracts and press releases [@yu2020measuring; @yu2019detecting], but these data are from press releases and abstracts which are unrelated (i.e. the given press releases are not written about the given abstracts), making them unsuitable for benchmarking exaggeration detection. Given this, we curate a dataset of paired sentences from abstracts and associated press releases, labeled by experts for exaggeration based on their claim strength. We then collect a large set of unlabeled press release/abstract pairs useful for semi-supervised learning.
90
+
91
+ The gold test data used in this work are from @sumner2014association and @bratton2019association, who annotate scientific papers, their abstracts, and associated press releases along several dimensions to characterize how press releases exaggerate papers. The original data consists of 823 pairs of abstracts and press releases. The 462 pairs from @sumner2014association have been used in previous work to test claim strength prediction [@DBLP:conf/emnlp/LiZY17], but the data, which contain press release and abstract conclusion sentences that are mostly paraphrases of the originals, are used as is.
92
+
93
+ We focus on the annotations provided for claim strength. The annotations consist of six labels which we map to the four labels defined in @DBLP:conf/emnlp/LiZY17. The labels and their meaning are given in [\[tab:all_labels\]](#tab:all_labels){reference-type="ref+label" reference="tab:all_labels"}. This gives a claim strength label $l_{\rho}$ for the press release and $l_{\gamma}$ for the abstract. The final exaggeration label is then defined as follows: $$\begin{equation*}
94
+ l_{e} = \begin{cases}
95
+ 0 & l_{\rho} < l_{\gamma}\\
96
+ 1 & l_{\rho} = l_{\gamma}\\
97
+ 2 & l_{\rho} > l_{\gamma}
98
+ \end{cases}
99
+ \end{equation*}$$
100
+
101
+ ::: {#tab:conc_patterns}
102
+ Name Pattern
103
+ ------------ --------------------------- --
104
+ $P_{0}(x)$ `[MASK]`: $a$
105
+ $P_{1}(x)$ `[MASK]` - $a$
106
+ $P_{2}(x)$ "`[MASK]`" statement: $a$
107
+ $P_{3}(x)$ $a$ (`[MASK]`)
108
+ $P_{4}(x)$ (`[MASK]`) $a$
109
+ $P_{5}(x)$ \[Type: `[MASK]`\] $a$
110
+
111
+ : Patterns for conclusion detection.
112
+ :::
113
+
114
+ ::: {#tab:verbalizers2}
115
+ Label Verbalizers
116
+ ------- -------------
117
+ 0 Text
118
+ 1 Conclusion
119
+
120
+ : Verbalizers for PVPs for conclusion detection.
121
+ :::
122
+
123
+ As the original abstracts in the study are not provided, we automatically collect them using the Semantic Scholar API.[^3] We perform a manual inspection of abstracts to ensure the correct ones are collected, discarding missing and incorrect abstracts. Gold conclusion sentences are obtained by sentence tokenizing abstracts using SciSpaCy [@neumann-etal-2019-scispacy] and finding the best matching sentence to the provided paraphrase in the data using ROUGE score [@lin2004rouge]. We then manually fix sentences which do not correspond to a single sentence from the abstract. Gold press release sentences are gathered in the same way from the provided press releases.
124
+
125
+ This results in a dataset of 663 press release/abstract pairs labeled for claim strength and exaggeration. The label distribution is given in [4](#tab:gold_statistics){reference-type="ref+label" reference="tab:gold_statistics"}. We randomly sample 100 of these instances as training data for few shot learning (**T1**), leaving 553 instances for testing. Additionally, we create a small training set of 1,138 sentences labeled for whether or not they are the main conclusion sentence of the press release or abstract. This data is used in the first step of [MT-PET]{.smallcaps} to identify conclusion sentences in the unlabeled pairs.
126
+
127
+ ::: table*
128
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
129
+ | @sumner2014association | Description | @DBLP:conf/emnlp/LiZY17 | Description |
130
+ +:======================:+:====================================+:=======================:+:===================================+
131
+ | 0 | No relationship mentioned | \- | \- |
132
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
133
+ | 1 | Statement of no relationship | 0 | Statement of no relationship |
134
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
135
+ | 2 | Statements of correlation | 1 | Statement of correlation |
136
+ +------------------------+-------------------------------------+ | |
137
+ | 3 | Ambiguous statement of relationship | | |
138
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
139
+ | 4 | Conditional statement of causation | 2 | Conditional statement of causation |
140
+ +------------------------+-------------------------------------+ | |
141
+ | 5 | Statement of "can" | | |
142
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
143
+ | 6 | Statements of causation | 3 | Statement of causation |
144
+ +------------------------+-------------------------------------+-------------------------+------------------------------------+
145
+ :::
146
+
147
+ ::: {#tab:gold_statistics}
148
+ Label Count
149
+ ------------- -------
150
+ Downplays 113
151
+ Same 406
152
+ Exaggerates 144
153
+
154
+ : Number of labels per class for benchmark exaggeration detection data.
155
+ :::
156
+
157
+ ::: {#tab:t1_base_results}
158
+ Method P R F1
159
+ ------------ ------------------ ------------------ ------------------
160
+ Supervised $28.06$ $33.10$ $29.05$
161
+ PET $41.90$ $39.87$ $39.12$
162
+ MT-PET $\mathbf{47.80}$ $\mathbf{47.99}$ $\mathbf{47.35}$
163
+
164
+ : Results for exaggeration detection with paired conclusion sentences from abstracts and press releases (**T1**). MT-PET uses 200 sentences for strength classification, 100 each from press releases and abstracts.
165
+ :::
166
+
167
+ ::: table*
168
+ Method $|$**T2**$|$,$|$**T1**$|$ P R F1 Press F1 Abstract F1
169
+ ------------------- --------------------------- ------------------ ------------------ ------------------ ------------------ ------------------
170
+ Supervised 200,0 $49.28$ $51.07$ $49.03$ $54.78$ $59.41$
171
+ PET 200,0 $55.76$ $58.58$ $56.57$ $63.56$ $62.76$
172
+ MT-PET 200,100 $\mathbf{56.68}$ $\mathbf{60.13}$ $\mathbf{57.44}$ $\mathbf{64.72}$ $\mathbf{63.27}$
173
+ Supervised 4500,0 $58.20$ $59.99$ $58.66$ $63.26$ $\textbf{67.26}$
174
+ PET 4500,0 $59.53$ $61.84$ $60.45$ $\mathbf{64.20}$ $64.92$
175
+ MT-PET 4500,100 $\mathbf{60.09}$ $\mathbf{62.68}$ $\mathbf{61.11}$ $63.93$ $64.69$
176
+ PET+in domain MLM 200,100 $\textit{57.18}$ $\textit{60.12}$ $\textit{58.06}$ $\textit{64.29}$ $\textit{62.69}$
177
+ PET+in domain MLM 4500,100 $\textit{59.87}$ $\textit{62.33}$ $\textit{60.85}$ $\textit{64.10}$ $\textit{64.73}$
178
+ :::
179
+
180
+ For **T2** we use the data from @yu2020measuring [@yu2019detecting]. @yu2019detecting create a dataset of 3,061 conclusion sentences labeled for claim strength from structured PubMed abstracts of health observational studies with conclusion sections of 3 sentences or less. @yu2020measuring then annotate statements from press releases from EurekAlert. The selected data are from the title and first two sentences of the press releases, as @sumner2014association note that most press releases contain their main conclusion statements in these sentences, following an inverted pyramid structure common in journalism [@po2003news]. Both studies use the labeling scheme from @DBLP:conf/emnlp/LiZY17 (see [\[tab:all_labels\]](#tab:all_labels){reference-type="ref+label" reference="tab:all_labels"}). The final data contains 2,076 labeled conclusion statements. From these two datasets, we select a random stratified sample of 4,500 instances for training in our full-data experiments, and subsample 200 for few-shot learning (100 from abstracts and 100 from press releases).
181
+
182
+ We collect unlabeled data from ScienceDaily,[^4] a science reporting website which aggregates and re-releases press releases from a variety of sources. To do this, we crawl press releases from ScienceDaily via the Internet Archive Wayback Machine[^5] between January 1st 2016 and January 1st 2020 using Scrapy.[^6] We discard press releases without paper DOIs and then pair each press release with a paper abstract by querying for each DOI using the Semantic Scholar API. This results in an unlabeled set of 7,741 press release/abstract pairs. Additionally, we use only the title, lead sentence, and first three sentences of each press release.
2110.04176/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-10-02T14:29:46.685Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36" etag="dVhzzfq6E33-vKYpI8o7" version="15.3.5" type="google"><diagram id="bOToXViQuoNUxYJ9y57v" name="Page-1">7V1bb9s2FP41fowhiro+Nk7TYli3ohkwbC+DbNG2MMl0ZaVJ9usn2pJsU3RrV6R4LDMIEpvUhfr48eM5hxeN8CR7/ZBH6+UnGpN0ZFvx6wg/jGwbISco/7GUt12KF6JdwiJP4uqgfcJT8h+pEq0q9TmJyebowILStEjWx4kzulqRWXGUFuU5fTk+bE7T47uuo0V1R2uf8DSLUtI67M8kLpb1c3nhPuMjSRbL6taB7e8ysqg5eJewWUYxfTm4F34/wpOc0mL3KXudkJSBV+OyK9DjidymYDlZFeec8MvX+Pdl8fblb2f2+XP2K/prNn+4w87uMt+i9Ll64qq0xVsNQU6fVzFhV0EjfP+yTArytI5mLPelrPQybVlkaZU9T9J0QlOab8/FsUuC2CnTN0VO/yUHOYE9xZ5X5tDyWknB6OFa5deqPCQvyOvJJ0UNfiXxCM1Ikb+Vh1QnhGFVlxXncFB9f9nXoFMlLQ/qrk6LKs4smivvYS0/VMhegDKykGKYIxLMZyKYvVlApnMlMAc+OJhtAcxeWjDAaPlQh3h7X59pnXG32erOu/KAEoDXfSZTjF0V7NP+SLJSkGzrN/JS/v1Cs2h1eIK3qP5vb7tZb3O3aePxuE4uH+8w5yB5V846maNIWVfFMQ+iNFmsys+zss5IWeX3rEaTUr7eVRlZEsfsdCGh9pRjlFhGs+VzTj6wrAfWZtc0YVd9/628+KY6iKNRTouoSCi7053jVnA+VSW2ZJAsOCaZg9yxG4T7nxblsIByyBk7SBHr6iKoatvzOfFmwrYd++HUElSKiraN2kD33LYtAcymbQ+pbWMXXNv2B2ge8W3b9jT322FgmvbAm7YdetCatqu42w5mRNxtTwPXcfvptm1Lc9O2VRtHOlBGlgvM88Ei4+ja+6k2zLqNUKzYjddi6rdg1m4PYJEbf+3RkjbMurUZ4wFqcyv2p10zVNsZOjSDR1k/l70BSkYLZe3CrNgd1qMYGBrKInf42o25FsraFWOIYVseZe2OSX3hYekyj7JuG8MZYiwD8SEkP9CMssjG0BSmdLa/1sid0KI6o0782Zjkcd2u6IpwRKiSZMYuZdDE4WgiiC3aAprYymgCKJptaFLTBIOjicj+0EwT+/52CdLUNBSCuKJgo9ERzTRxoemIa8OjyS3riANOR0RTPI2OaKaJz48GaKeJyIc0OqKNIB44HQHo/rJTb5UgAR9R1U4Q0ejArmqaKus3KCUDZV6na/h0RaFcUXjh2lEOEYeyrzmi6oq886tHmZ/U7VhjVy/O9VjboHBGFifNju7BLk8US2Do3ikA+YzxRRWSoVuYPZEjro7K/aDc6v50C7MnmrR03UxuyUUTq9YGsihkcO1U5ns//aIs8rhVUfmM6QgKMNYvyiKnVaF90QvKLcHQr8o9u369wMz3ffpl+ZTrd8WCgaCJ8inHT4l98ePZS0rkQrcq173E6VBil8BgU1PnBQThV2CrV9Ut9/4pd1KRfdgPygiY3Ps9O5S9oMx3qtoF39e/CkaGxnscebXD2n3UGXnHo0VHgz/uZEoWyWrk30+zqMiT15H/UN9gmjcHu5PteI9/X8LnRRmrqtV0s96NPvFJrYSfPoaVhY1ys9/vFOqci8kruMRCySt4z4U6r+AnqERWMce36xt4PGu3AAmCxE+TqgcHD/So2dLlaMm/5ahSpO4THKQqkoiKd7239qvVxZ895g7BFUKjOd2m8AIUHVFgDJjonNlqrqR9i3vzISmh5E7CiE43Qweg6HRfWgJCdGB25/1L4Q0YhUaEuk0WbomQI5jZhZBQhkJFMhSI4peD3tYPLMX63v4PtXbhwcC2/wtE8UmZ0XQdm2xgaEvmA8UbE/QzcwhDC64HgJZimA1TpSgmhq+Yqjd/0rFhjgtOMRVv/tTPtB4XnGIC2m3BKKYUxXTBK2ZdwEFtfuXzmxLqVsz6hVIaUZYBK782S7dihjfnvQ9eMQMLvGLenGNjQkan3hiBuZfB2NrZiazuo7lXRs/B0y7kIufw3i+GLEAbChrWKWEdvDdfNcNChnWDZR28lzI1I5S3wzpjAJ5iJ0ADEBmneGi0Q5bVtgD9A9rVI8VaeSdapGR4NyjeMRsQHO+6r+IyvAPOO2YFguMdoM1IjRmomZ8Yni7aN+em3ADvPPh2YP1YhnfD5R1EO9A2fu/geQfRDrRvzu81duBJfkK0AyUs7w2+u9Iui4rldD7y79+N/Id/0GAXQbHneIyyJGW1LWqRSuaycT2vb43ri/xg2pWyTfSR3X209yJK2YZSMikFk1MSFgRfwilsOCWTUw5ETuHuIZCLOOUYTsnklFdvhwSKU93DG+dy6tGYU/Lf2AxSp7q7kBdxythTcpc0g+SUhM0tL+GUsafkLvoEySkJ2xNewiljT8kNJQQQOdWf3/dxyDv0aGFUyO+UXcdD5dOp/JpTVsFN3ocSq+UnGhN2xP8=</diagram></mxfile>
2110.04176/main_diagram/main_diagram.pdf ADDED
Binary file (31.7 kB). View file
 
2110.04176/paper_text/intro_method.md ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ state-of-the-art convolutional models achieved astonishing results in various fields of application by large-scaling the overall parameters amount [@karras2020analyzing; @dascoli2021convit; @dosovitskiy2021image; @Real2019ImgClass]. Simultaneously, hypercomplex algebra applications are gaining increasing attention in diverse spheres of research such as signal processing [@NAVARROMORENO2021108022; @NAVARROMORENO202010100; @Sanei2018ICASSP; @XIANG2018193] or deep learning [@Kamayashi2021TNNLS; @Lin2021TNNLS; @Liu2021TNNLS; @Valle2018TNNLS; @Liu2018TNNLS; @VALLE2020136; @DECASTRO202054; @PaulTNNLS2015; @Hirose2014SSTNNLS]. Indeed, hypercomplex and quaternion neural networks (QNNs) demonstrated to significantly reduce the number of parameters while still obtaining comparable performance [@Muppidi2021ICASSP; @ParcolletICLR2019; @GrassucciQGAN2021; @Tay2019QTRansformer; @Cariow2021Oct; @WU2020179; @VALLE2021111]. These models exploit hypercomplex algebra properties, including the Hamilton product, to painstakingly design interactions among the imaginary units, thus involving $1/4$ or $1/8$ of free parameters with respect to real-valued models. Furthermore, thanks to the modelled interactions, hypercomplex networks capture internal latent relations in multidimensional inputs and preserve pre-existing correlations among input dimensions [@Chen2021QFM; @GrassucciICASSP2021; @Grassucci2021Entropy; @Gai2021TCS; @Vieira2020IJCNN]. Therefore, the quaternion domain is particularly appropriate for processing $3$D or $4$D data, such as color images or (up to) $4$-channel signals [@Took2019ICASSP], while the octonion one is suitable for $8$D inputs. Unfortunately, most common color image datasets contain RGB images and some tricks are required to process this data type with QNNs. Among them, the most employed are padding a zero channel to the input in order to encapsulate the image in the four quaternion components, or remodelling the QNN layer with the help of vector maps [@Gaudet2021RemDim]. Additionally, while quaternion neural operations are widespread and easy to be integrated in pre-existing models, very few attempts have been made to extend models to different domain orders. Accordingly, the development of hypercomplex convolutional models for larger multidimensional inputs, such as magnitudes and phases of multichannel audio signals or $16$-band satellite images, still remains painful. Moreover, despite the significantly lower number of parameters, these models are often slightly slow with respect to real-valued baselines [@hoffmann2020algebranets] and ad-hoc algorithms may be necessary to improve efficiency [@Cariow2021Quat; @Cariow2021Oct].
4
+
5
+ Recently, a novel literature branch aims at compress neural networks leveraging Kronecker product decomposition [@Huang2020StochasticNN; @Tang2021SKFAC], gaining considerable results in terms of model efficiency [@Wang2021KroneckerCD]. Lately, a parameterization of hypercomplex multiplications have been proposed to generalize hypercomplex fully connected layers by sum of Kronecker products [@Zhang2021PHM]. The latter method obtains high performance in various natural language processing tasks by also reducing the number of overall parameters. Other works extended this approach to graph neural networks [@le2021parameterized] and transfer learning [@mahabadi2021compacter], proving the effectiveness of Kronecker product decomposition for hypercomplex operations. However, no solution exists for convolutional layers yet, which remain the most employed layers when dealing with multidimensional inputs, such as images and audio signals [@Wu2021CvTIC; @Hersheyicassp2017].
6
+
7
+ In this paper, we devise the family of parameterized hypercomplex neural networks (PHNNs), which are lightweight large-scale hypercomplex neural models admitting any multidimensional input, whichever the number of dimensions. At the core of this novel set of models, we propose the parameterized hypercomplex convolutional (PHC) layer. Our method is flexible to operate in domains from $1$D to $n$D, where $n$ can be arbitrarily chosen by the user or tuned to let the model performance lead to the most appropriate domain for the given input data. Such a malleability comes from the ability of the proposed approach to subsume algebra rules to perform convolution regardless of whether these regulations are preset or not. Thus, neural models endowed with our approach adopt $1/n$ of free parameters with respect to their real-valued counterparts, and the amount of parameter reduction is a user choice. This makes PHNNs adaptable to a plethora of applications in which saving storage memory can be a crucial aspect. Additionally, PHNNs versatility allows processing multidimensional data in its natural domain by simply setting the dimensional hyperparameter $n$. For instance, color images can be analyzed in their RGB domain by setting $n=3$ without adding any useless information, contrary to standard processing for quaternion networks with the padded zero-channel. Indeed, PHC layers are able to grasp the proper algebra from input data, while capturing internal correlations among the image channels and saving $66\%$ of free parameters.
8
+
9
+ On a thorough empirical evaluation on multiple benchmarks, we demonstrate the flexibility of our method that can be adopted in different domains of applications, from images to audio signals. We devise a set of PHNNs for large-scale image classification and sound event detection tasks, letting them operate in different hypercomplex domain and with various input dimensionality with $n$ ranging from $2$ to $16$.
10
+
11
+ The contribution of this paper is three-fold.
12
+
13
+ - We introduce a parameterized hypercomplex convolutional (PHC) layer which grasps the convolution rules directly from data via backpropagation exploiting the Kronecker product properties, thus reducing the number of free parameters to $1/n$.
14
+
15
+ - We devise the family of parameterized hypercomplex neural networks (PHNNs), lightweight and more efficient large-scale hypercomplex models. Thanks to the proposed PHC layer and to the method in [@Zhang2021PHM] for fully connected layers, PHNNs can be employed with any kind of input and pre-existing neural models. To show the latter, we redefine common ResNets, VGGs and Sound Event Detection networks (SEDnets), operating in any user-defined domain just by choosing the hyperparameter $n$, which also drives the number of convolutional filters.
16
+
17
+ - We show how the proposed approach can be employed with any kind of multidimensional data by easily changing the hyperparameter $n$. Indeed, by setting $n=3$ a PHNN can process RGB images in their natural domain, while leveraging the properties of hypercomplex algebras, allowing parameter sharing inside the layers and leading to a parameter reduction to $1/3$. To the best of our knowledge, this is the first approach that processes color images with hypercomplex-based neural models without adding any padding channel. As well, multichannel audio signals can be analysed by simply considering $n=4$ for standard first-order ambisonics (which has $4$ microphone capsules), $n=8$ for an array of two ambisonics microphones, or even $n=16$ if we want to include the information of each channel phase.
18
+
19
+ The rest of the paper is organized as follows. In Section [2](#subsec:real_layers){reference-type="ref" reference="subsec:real_layers"}, we introduce concepts of hypercomplex algebra and we recapitulate real and quaternion-valued convolutional layers. Section [3](#sec:phc){reference-type="ref" reference="sec:phc"} rigorously introduces the theoretical aspects of the proposed method. Sections [4](#sec:phc_rgb){reference-type="ref" reference="sec:phc_rgb"} and [5](#sec:phc_audio){reference-type="ref" reference="sec:phc_audio"} reveal how the approach can be adopted in different neural models and in two different domains, the images and audio one, expounding how to process RGB images with $n=3$ and multichannel audio with $n$ up to $8$. The experimental evaluation is presented in Section [6](#sec:img_class){reference-type="ref" reference="sec:img_class"} for image classification and in Section [7](#sec:sed){reference-type="ref" reference="sec:sed"} for sound event detection. Finally, Section [8](#sec:abl){reference-type="ref" reference="sec:abl"} reports the ablation studies we conduct and in Section [9](#sec:conc){reference-type="ref" reference="sec:conc"} we draw conclusions.
20
+
21
+ <figure id="fig:hprod" data-latex-placement="t">
22
+ <embed src="Figures/octonions_mult_table.pdf" />
23
+ <figcaption>Example of hypercomplex multiplication table for <span class="math inline"><em>n</em> = 2</span> i.e., complex, among others (green line), <span class="math inline"><em>n</em> = 4</span> i.e., quaternions, tessarines, (blue line) and <span class="math inline"><em>n</em> = 8</span>, i.e., octonions, bi-quaternions, and so on (red line). While for these domains algebra rules exist and are predefined, no regulations are set for other domains such as <span class="math inline"><em>n</em> = 3, 5, 6, 7</span> (dashed grey lines). The parameterized hypercomplex approaches are able to learn these missing algebra rules from data, thus defining hypercomplex multiplication and convolution for any desired domain.</figcaption>
24
+ </figure>
25
+
26
+ Hypercomplex neural networks rely in a hypercomplex number system based on the set of hypercomplex numbers $\mathbb{H}$ and their corresponding algebra rules to shape additions and multiplications [@VALLE2021111]. These operations should be carefully modelled due to the interactions among imaginary units that may not behave as real-valued numbers. For instance, Figure [1](#fig:hprod){reference-type="ref" reference="fig:hprod"} reports an example of a multiplication table for complex (green), quaternion (blue) and octonion (red) numbers. However, this is just a small subset of the hypercomplex domain that exist. Indeed, for $n=4$ there exist quaternions, tessarines, among others, while for $n=8$ octonions, dual-quaternions, and so on. Each of these domains have different multiplication rules due to dissimilar imaginary units interactions. A generic hypercomplex number is defined as
27
+
28
+ $$\begin{equation}
29
+ h = h_0 + h_i {\hat{\imath}}_i + \ldots + h_n {\hat{\imath}}_n, \qquad i=1, \ldots, n
30
+ \label{eq:hyp_num}
31
+ \end{equation}$$
32
+
33
+ being $h_0, \ldots, h_n \in \mathbb{R}$ and ${\hat{\imath}}_i, \ldots, {\hat{\imath}}_n$ imaginary units. Different subsets of the hypercomplex domain exist, including complex, quaternion, and octonion, among others. They are identified by the number of imaginary units they employ and by the properties of their vector multiplication. The quaternion domain is one of the most popular for neural networks thanks to the Hamilton product properties. This domain has its foundations in the quaternion number $q = q_0 + q_1 {\hat{\imath}}+ q_2 {\hat{\jmath}}+ q_3 {\hat{\kappa}}$, in which $q_c, \; c \in \{0,1,2,3\}$ are real coefficients and ${\hat{\imath}}, {\hat{\jmath}}, {\hat{\kappa}}$ the imaginary untis. A quaternion with its real part $q_0$ equal to $0$ is named *pure quaternion*. The imaginary units comply with the property ${\hat{\imath}}^2 = {\hat{\jmath}}^2 = {\hat{\kappa}}^2 = -1$ and with the non-commutative products ${\hat{\imath}}{\hat{\jmath}}= - {\hat{\jmath}}{\hat{\imath}}; \; {\hat{\jmath}}{\hat{\kappa}}= - {\hat{\kappa}}{\hat{\jmath}}; \; {\hat{\kappa}}{\hat{\imath}}= - {\hat{\imath}}{\hat{\kappa}}$. Due to the non-commutativity of vector multiplication, the Hamilton product has been introduced to properly model the multiplication between two quaternions.
34
+
35
+ A generic convolutional layer can be described by
36
+
37
+ $$\begin{equation}
38
+ {\mathbf y}= \text{Conv}({\mathbf x}) = {\mathbf W}* {\mathbf x}+ \mathbf{b},
39
+ \label{eq:conv}
40
+ \end{equation}$$ where the input ${\mathbf x}\in \mathbb{R}^{t \times s}$ is convolved ($*$) with the filters tensor ${\mathbf W}\in \mathbb{R}^{s \times d \times k \times k}$ to produce the output ${\mathbf y}\in \mathbb{R}^{d \times t}$, where $s$ is the input channels dimension, $d$ the output one, $k$ is the filter size, and $t$ is the input and output dimension. The bias term $\mathbf{b}$ does not heavily influence the number of parameters, thus the degrees of freedom for this operation are essentially $\mathcal{O}(sdk^2)$.
41
+
42
+ Quaternion convolutional layers, instead, build the weight tensor ${\mathbf W}\in \mathbb{R}^{s \times d \times k \times k}$ by following the Hamilton product rule and organize filters according to it:
43
+
44
+ $$\begin{equation}
45
+ {\bf{W}} * {\bf{x}} = \left[ {\begin{array}{*{20}c}
46
+ \hfill {{\bf{W}}_0 } & \hfill { - {\bf{W}}_1 } & \hfill { - {\bf{W}}_2 } & \hfill { - {\bf{W}}_3 } \\
47
+ \hfill {{\bf{W}}_1 } & \hfill {{\bf{W}}_0 } & \hfill { - {\bf{W}}_3 } & \hfill {{\bf{W}}_2 } \\
48
+ \hfill {{\bf{W}}_2 } & \hfill {{\bf{W}}_3 } & \hfill {{\bf{W}}_0 } & \hfill { - {\bf{W}}_1 } \\
49
+ \hfill {{\bf{W}}_3 } & \hfill { - {\bf{W}}_2 } & \hfill {{\bf{W}}_1 } & \hfill {{\bf{W}}_0 } \\
50
+ \end{array}} \right] * \left[ {\begin{array}{*{20}c}
51
+ {{\bf{x}}_0 } \hfill \\
52
+ {{\bf{x}}_1 } \hfill \\
53
+ {{\bf{x}}_2 } \hfill \\
54
+ {{\bf{x}}_3 } \hfill \\
55
+ \end{array}} \right]
56
+ \label{eq:qprod}
57
+ \end{equation}$$
58
+
59
+ where ${\mathbf W}_0, {\mathbf W}_1, {\mathbf W}_2, {\mathbf W}_3 \in \mathbb{R}^{\frac{s}{4} \times \frac{d}{4} \times k \times k}$ are the real coefficients of the quaternion weight matrix ${\mathbf W}= {\mathbf W}_0 + {\mathbf W}_1 {\hat{\imath}}+ {\mathbf W}_2 {\hat{\jmath}}+ {\mathbf W}_3 {\hat{\kappa}}$ and ${\mathbf x}_0, {\mathbf x}_1, {\mathbf x}_2, {\mathbf x}_3$ are the coefficients of the quaternion input ${\mathbf x}$ with the same structure.
60
+
61
+ As done for real-valued layers, the bias can be ignored and the degree of freedom computations of the quaternion convolutional layer can be approximated to $\mathcal{O}(sdk^2/4)$. The lower number of parameters with respect to the real-valued operation is due to the reuse of filters performed by the Hamilton product in Eq. [\[eq:qprod\]](#eq:qprod){reference-type="ref" reference="eq:qprod"}. Also, sharing the parameter submatrices forces to consider and exploit the correlation between the input components [@ParcolletAIR2019; @Tay2019QTRansformer; @GaudetIJCNN2018].
62
+
63
+ <figure id="fig:ham_prod" data-latex-placement="t">
64
+ <div class="center">
65
+ <embed src="Figures/Hamilton_prod.pdf" style="width:90.0%" />
66
+ </div>
67
+ <figcaption>The quaternion convolution rule can be expressed as sum of Kronecker products between the matrices <span class="math inline"><strong>A</strong><sub><em>i</em></sub></span> that subsume the algebra rules and the matrices <span class="math inline"><strong>F</strong><sub><em>i</em></sub></span> that contain the convolution filters, with <span class="math inline"><em>i</em> = 1, 2, 3, 4</span>. In this example, the parameters of <span class="math inline"><strong>A</strong><sub><em>i</em></sub></span> are fixed for visualization purposes, but in PHC layers they are learnable parameters.</figcaption>
68
+ </figure>
69
+
70
+ In the following, we delineate the formulation for the proposed parameterized hypercomplex convolutional (PHC) layer. We also show that this approach is capable of learning the Hamilton product rule when two quaternions are convolved.
71
+
72
+ The PHC layer is based on the construction, by sum of Kronecker products, of the weight tensor $\mathbf{H}$ which encapsulates and organizes the filters of the convolution. The proposed method is formally defined as: $$\begin{equation}
73
+ {\mathbf y}= \text{PHC}({\mathbf x}) = \mathbf{H}*{\mathbf x}+ \mathbf{b},
74
+ \end{equation}$$
75
+
76
+ whereby, $\mathbf{H} \in \mathbb{R}^{s \times d \times k \times k}$ is built by sum of Kronecker products between two learnable groups of matrices. Here, $s$ is the input dimensionality to the layer, $d$ is the output one, and $k$ is the filter size. More concretely, $$\begin{equation}
77
+ \mathbf{H} = \sum_{i=1}^n \mathbf{A}_i \otimes \mathbf{F}_i,
78
+ \end{equation}$$
79
+
80
+ in which $\mathbf{A}_i \in \mathbb{R}^{n \times n}$ with $i=1, ..., n$ are the matrices that describe the algebra rules and $\mathbf{F}_i \in \mathbb{R}^{\frac{s}{n} \times \frac{d}{n} \times k \times k}$ represents the $i$-th batch of filters that are arranged by following the algebra rules to compose the final weight matrix. It is worth noting that $\frac{s}{n} \times \frac{d}{n} \times k \times k$ holds for squared kernels, while $\frac{s}{n} \times \frac{d}{n} \times k$ should be considered instead for 1D kernels. The core element of this approach is the Kronecker product [@KroneckerBook], which is a generalization of the vector outer product that can be parameterized by $n$. The hyperparameter $n$ can be set by the user who wants to operate in a pre-defined real or hypercomplex domain (e.g., by setting $n=2$ the PHC layer is defined in the complex domain, or in the quaternion one if $n$ is set equal to $4$, as Figure [2](#fig:ham_prod){reference-type="ref" reference="fig:ham_prod"} illustrates), or tuned to obtain the best performance from the model. The matrices $\mathbf{A}_i$ and $\mathbf{F}_i$ are learnt during training and their values are reused to build the definitive tensor $\mathbf{H}$.
81
+
82
+ The degree of freedom of $\mathbf{A}_i$ and $\mathbf{F}_i$ are $n^3$ and $sdk^2/n$, respectively. Usually, real world applications employ a large number of filters in layers ($s, d = 256, 512, ...)$ and small values for $k$. Therefore, frequently $sdk^2 \gg n^3$ holds. Thus, the degrees of freedom for the PHC weight matrix can be approximated to $\mathcal{O}(sdk^2/n)$. Hence, the PHC layer reduces the number of parameters by $1/n$ with respect to a standard convolutional layer in real world problems.
83
+
84
+ <figure id="fig:toy" data-latex-placement="t">
85
+ <div class="center">
86
+ <embed src="Figures/toy_examples.pdf" />
87
+ </div>
88
+ <figcaption>Loss plots for toy examples. The PHC layer is able to learn the matrix <span class="math inline"><strong>A</strong></span> describing the convolution rule for pure (left) and full quaternions (right).</figcaption>
89
+ </figure>
90
+
91
+ Moreover, when processing multidimensional data with correlated channels, such as color images, rather than mulichannel audio or multisensor signals, PHC layers bring benefits due to the weight sharing among different channels. This allows capturing latent intra-channels relations that standard convolutional networks ignore because of the rigid structure of the weights [@GrassucciQGAN2021; @ParcolletICASSP2019a]. The PHC layer is able to subsume hypercomplex convolution rules and the desired domain is specified by the hyperparameter $n$. Interestingly, by setting $n=1$ a real-valued convolutional layer can be represented too. Indeed, standard real layers do not involve parameter sharing, therefore the algebra rules are solely described by the single $\mathbf{A} \in \mathbb{R}^{1\times1}$ and the complete set of filters are included in $\mathbf{F}^{s \times d \times k \times k}$.
92
+
93
+ Therefore, the PHC layer fills the gaps left by pre-existing hypercomplex algebras in Fig. [1](#fig:hprod){reference-type="ref" reference="fig:hprod"} and subsumes the missing algebra rules directly from data, i.e., the dashed grey lines in Fig. [1](#fig:hprod){reference-type="ref" reference="fig:hprod"}. Thus, a neural model equipped with PHC layers can grasp the filter organization also for $n=3,5,6,7$ and so on. Moreover, any convolutional model can be endowed with our approach, since PHC layers easily replace standard convolution / transposed convolution operations and the hyperparameter $n$ gives high flexibility to adapt the layer to any kind of input, such as color images, multichannel audio or multisensor signals.
94
+
95
+ We test the receptive ability of the PHC layer in two toy problems building an artificial dataset. We highly encourage the reader to take a look at the section `tutorials` of the GitHub repository <https://github.com/eleGAN23/HyperNets> for more insights and results on toy examples, including the learned matrices $\mathbf{A}_i$. The first task aims at learning the right matrix $\mathbf{A}$ to build a quaternion convolutional layer which properly follows the Hamilton rule in Eq. [\[eq:qprod\]](#eq:qprod){reference-type="ref" reference="eq:qprod"}. That is, we set $n=4$ and the objective is to learn the four matrices $\mathbf{A}_i$ as they are in the quaternion product in Fig. [2](#fig:ham_prod){reference-type="ref" reference="fig:ham_prod"}. We build the dataset by performing a convolution with a matrix of filters ${\mathbf W}\in \mathbb{H}$, which are arranged following the regulation in Eq. [\[eq:qprod\]](#eq:qprod){reference-type="ref" reference="eq:qprod"}, and a quaternion ${\mathbf x}\in \mathbb{H}$ in input. The target is still a quaternion, named $\mathbf{y} \in \mathbb{H}$. As shown in Fig. [3](#fig:toy){reference-type="ref" reference="fig:toy"} (right), the MSE loss of the PHC layer converges very fast, meaning that the layer properly learns the matrix $\mathbf{A}$ and the Hamilton convolution.
96
+
97
+ The second toy example is a modification of the previous dataset target. Here, we want to learn the matrix $\mathbf{A}$ which describes the convolution among two pure quaternions. Therefore, when setting $n=4$, the matrix $\mathbf{A}_1$ of a pure quaternion should be complete null. Pure quaternions may be, as an example, an input RGB image and the weights of a hypercomplex convolutional layer since the first channel of RGB images is zero. Figure [3](#fig:toy){reference-type="ref" reference="fig:toy"} (left) displays the convergence of the PHC layer loss during training, proving that the proposed method is able of subsuming hypercomplex convolutional rules when dealing with pure quaternions too.
98
+
99
+ ::: strip
100
+ $$\begin{gather}
101
+ \label{eq:visualphc}
102
+ \begin{array}{c}
103
+ \mathop {\left[ A \right]}\limits_{\left( {1 \times 1} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
104
+ {} \\
105
+ {} \\
106
+ {} \\
107
+ {} \\
108
+ {} \\
109
+ {} \\
110
+ {} \\
111
+ \end{array}\begin{array}{*{20}c}
112
+ {} \\
113
+ {} \\
114
+ {} \\
115
+ {} \\
116
+ {} \\
117
+ {} \\
118
+ {} \\
119
+ \end{array}\begin{array}{*{20}c}
120
+ {} \\
121
+ {} \\
122
+ {} \\
123
+ {} \\
124
+ {} \\
125
+ {} \\
126
+ {} \\
127
+ \end{array}{\bf{F}}\begin{array}{*{20}c}
128
+ {} \\
129
+ {} \\
130
+ {} \\
131
+ {} \\
132
+ {} \\
133
+ {} \\
134
+ {} \\
135
+ \end{array}\begin{array}{*{20}c}
136
+ {} \\
137
+ {} \\
138
+ {} \\
139
+ {} \\
140
+ {} \\
141
+ {} \\
142
+ {} \\
143
+ \end{array}\begin{array}{*{20}c}
144
+ {} \\
145
+ {} \\
146
+ {} \\
147
+ {} \\
148
+ {} \\
149
+ {} \\
150
+ {} \\
151
+ \end{array}} \right]}\limits_{\left( {s \times d \times k \times k} \right)} = \mathop {\left[ {\begin{array}{*{20}c}
152
+ {} \\
153
+ {} \\
154
+ {} \\
155
+ {} \\
156
+ {} \\
157
+ {} \\
158
+ {} \\
159
+ \end{array}\begin{array}{*{20}c}
160
+ {} \\
161
+ {} \\
162
+ {} \\
163
+ {} \\
164
+ {} \\
165
+ {} \\
166
+ {} \\
167
+ \end{array}\begin{array}{*{20}c}
168
+ {} \\
169
+ {} \\
170
+ {} \\
171
+ {} \\
172
+ {} \\
173
+ {} \\
174
+ {} \\
175
+ \end{array}{\bf{H}}\begin{array}{*{20}c}
176
+ {} \\
177
+ {} \\
178
+ {} \\
179
+ {} \\
180
+ {} \\
181
+ {} \\
182
+ {} \\
183
+ \end{array}\begin{array}{*{20}c}
184
+ {} \\
185
+ {} \\
186
+ {} \\
187
+ {} \\
188
+ {} \\
189
+ {} \\
190
+ {} \\
191
+ \end{array}\begin{array}{*{20}c}
192
+ {} \\
193
+ {} \\
194
+ {} \\
195
+ {} \\
196
+ {} \\
197
+ {} \\
198
+ {} \\
199
+ \end{array}} \right]}\limits_{\left( {s \times d \times k \times k} \right)} \\
200
+ \mathop {\left[ {{\bf{A}}_1 } \right]}\limits_{\left( {2 \times 2} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
201
+ {} \\
202
+ {} \\
203
+ {} \\
204
+ \end{array}{\bf{F}}_1 \begin{array}{*{20}c}
205
+ {} \\
206
+ {} \\
207
+ {} \\
208
+ \end{array}} \right]}\limits_{\left( {\frac{s}{2} \times \frac{d}{2} \times k \times k} \right)} + \mathop {\left[ {{\bf{A}}_2 } \right]}\limits_{\left( {2 \times 2} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
209
+ {} \\
210
+ {} \\
211
+ {} \\
212
+ \end{array}{\bf{F}}_2 \begin{array}{*{20}c}
213
+ {} \\
214
+ {} \\
215
+ {} \\
216
+ \end{array}} \right]}\limits_{\left( {\frac{s}{2} \times \frac{d}{2} \times k \times k} \right)} = \mathop {\left[ {\begin{array}{*{20}c}
217
+ {} \\
218
+ {} \\
219
+ {} \\
220
+ {} \\
221
+ {} \\
222
+ {} \\
223
+ {} \\
224
+ \end{array}\begin{array}{*{20}c}
225
+ {} \\
226
+ {} \\
227
+ {} \\
228
+ {} \\
229
+ {} \\
230
+ {} \\
231
+ {} \\
232
+ \end{array}\begin{array}{*{20}c}
233
+ {} \\
234
+ {} \\
235
+ {} \\
236
+ {} \\
237
+ {} \\
238
+ {} \\
239
+ {} \\
240
+ \end{array}{\bf{H}}\begin{array}{*{20}c}
241
+ {} \\
242
+ {} \\
243
+ {} \\
244
+ {} \\
245
+ {} \\
246
+ {} \\
247
+ {} \\
248
+ \end{array}\begin{array}{*{20}c}
249
+ {} \\
250
+ {} \\
251
+ {} \\
252
+ {} \\
253
+ {} \\
254
+ {} \\
255
+ {} \\
256
+ \end{array}\begin{array}{*{20}c}
257
+ {} \\
258
+ {} \\
259
+ {} \\
260
+ {} \\
261
+ {} \\
262
+ {} \\
263
+ {} \\
264
+ \end{array}} \right]}\limits_{\left( {s \times d \times k \times k} \right)} \\
265
+ \begin{array}{*{20}c}
266
+ \vdots \\
267
+ \vdots \\
268
+ \end{array} \\
269
+ \mathop {\left[ {{\bf{A}}_1 } \right]}\limits_{\left( {n \times n} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
270
+ {} \\
271
+ \end{array}{\bf{F}}_1 \begin{array}{*{20}c}
272
+ {} \\
273
+ \end{array}} \right]}\limits_{\left( {\frac{s}{n} \times \frac{d}{n} \times k \times k} \right)} + \mathop {\left[ {{\bf{A}}_2 } \right]}\limits_{\left( {n \times n} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
274
+ {} \\
275
+ \end{array}{\bf{F}}_2 \begin{array}{*{20}c}
276
+ {} \\
277
+ \end{array}} \right]}\limits_{\left( {\frac{s}{n} \times \frac{d}{n} \times k \times k} \right)} + \ldots + \mathop {\left[ {{\bf{A}}_n } \right]}\limits_{\left( {n \times n} \right)} \otimes \mathop {\left[ {\begin{array}{*{20}c}
278
+ {} \\
279
+ \end{array}{\bf{F}}_n \begin{array}{*{20}c}
280
+ {} \\
281
+ \end{array}} \right]}\limits_{\left( {\frac{s}{n} \times \frac{d}{n} \times k \times k} \right)} = \mathop {\left[ {\begin{array}{*{20}c}
282
+ {} \\
283
+ {} \\
284
+ {} \\
285
+ {} \\
286
+ {} \\
287
+ {} \\
288
+ {} \\
289
+ \end{array}\begin{array}{*{20}c}
290
+ {} \\
291
+ {} \\
292
+ {} \\
293
+ {} \\
294
+ {} \\
295
+ {} \\
296
+ {} \\
297
+ \end{array}\begin{array}{*{20}c}
298
+ {} \\
299
+ {} \\
300
+ {} \\
301
+ {} \\
302
+ {} \\
303
+ {} \\
304
+ {} \\
305
+ \end{array}{\bf{H}}\begin{array}{*{20}c}
306
+ {} \\
307
+ {} \\
308
+ {} \\
309
+ {} \\
310
+ {} \\
311
+ {} \\
312
+ {} \\
313
+ \end{array}\begin{array}{*{20}c}
314
+ {} \\
315
+ {} \\
316
+ {} \\
317
+ {} \\
318
+ {} \\
319
+ {} \\
320
+ {} \\
321
+ \end{array}\begin{array}{*{20}c}
322
+ {} \\
323
+ {} \\
324
+ {} \\
325
+ {} \\
326
+ {} \\
327
+ {} \\
328
+ {} \\
329
+ \end{array}} \right]}\limits_{\left( {s \times d \times k \times k} \right)}. \\
330
+ \end{array}
331
+ \end{gather}$$
332
+ :::
333
+
334
+ We provide a formal explanation of the PHC layer to better understand the Kronecker product and how it organizes convolution filters to reduce the overall number of parameters to $1/n$. In Eq. [\[eq:visualphc\]](#eq:visualphc){reference-type="ref" reference="eq:visualphc"}, we show how the PHC layer generalizes from $1$D to $n$D domains. When subsuming real-valued convolutions in the first line of Eq. [\[eq:visualphc\]](#eq:visualphc){reference-type="ref" reference="eq:visualphc"}, the Kronecker product is performed between a scalar $A$ and the filter matrix $\mathbf{F}$, whose dimension is the same as the final weight matrix $\mathbf{H}$, which is $s \times d \times k \times k$.
335
+
336
+ Considering the complex case with $n=2$ in the second line of Eq. [\[eq:visualphc\]](#eq:visualphc){reference-type="ref" reference="eq:visualphc"}, the algebra is defined in $\mathbf{A}_1$ and $\mathbf{A}_2$ while the filters are contained in $\mathbf{F}_1$ and $\mathbf{F}_2$, each of dimension $1/2$ the final matrix $\mathbf{H}$. Therefore, while the size of the weight matrix $\mathbf{H}$ remains unchanged, the parameter size is approximately $1/2$ the real one. In the last line of Eq. [\[eq:visualphc\]](#eq:visualphc){reference-type="ref" reference="eq:visualphc"}, we can see the generalization of this process, in which the size of matrices $\mathbf{F}_i$, $i=1, ..., n$ is reduced proportionally to $n$. It is worth noting that, while the parameter size is reduced with growing values of $n$, the dimension of $\mathbf{H}$ remains the same.
337
+
338
+ In this section, we describe how PHNNs can be applied for processing color images in hypercomplex domains without needing any additional information to the input and we propose examples of parameterized hypercomplex versions of common computer vision models such as VGGs and ResNets. In order to be consistent with literature, we perform each experiment with a real-valued baseline model, then we compare it with its complex and quaternion counterparts and with the proposed PHNN. Furthermore, we assess the malleability of the proposed approach testing different values of the hyperparameter $n$, therefore defining parameterized hypercomplex models in multiple domains.
339
+
340
+ Different encodes exist to process color images, however, the most common computer vision datasets are comprised of three-channel images in $\mathbb{R}^3$. In the quaternion domain, RGB images are enclosed into a quaternion and processed as single elements [@ParcolletAIR2019]. The encapsulation is performed by considering the RGB channels as the real coefficients of the imaginary units and by padding a zeros channel as the first real component of the quaternion.
341
+
342
+ Here, we propose to leverage the high malleability of PHC layers to deal with RGB images in hypercomplex domains without embedding useless information to the input. Indeed, the PHC can directly operate in $\mathbb{R}^3$ by easily setting $n=3$ and process RGB images in their natural domain while exploiting hypercomplex network properties such as parameters sharing. Indeed, the great flexibility of PHC layers allows the user to choose whether processing images in $\mathbb{R}^4$ or $\mathbb{R}^3$. On one hand, by setting $n=4$, the zeros channel is added to the input even so the layer saves the $75\%$ of free parameters. On the other hand, by choosing $n=3$ the network does not handle any useless information, notwithstanding, it reduces the number of parameters by solely $66\%$. This is a trade-off which may depend on the application or on the hardware the user needs. Furthermore, the domain on which processing images can be tuned by letting the performance of the network indicates the best choice for $n$.
343
+
344
+ A family of popular methods for image processing is based on the VGG networks [@VGG2015] that stack several convolutional layers and a closing fully connected classifier. To completely define models in the desired hypercomplex domain, we propose to endow the network with PHC layers as convolution components and with Parameterized Hypercomplex Multiplication (PHM) layers [@Zhang2021PHM] as linear classifier. The backbone of our PHVGG is then $$\begin{equation}
345
+ \begin{split}
346
+ \mathbf{h}_t &= \text{ReLU} \left( \text{PHC}_t \left( \mathbf{h}_{t-1} \right) \right) \qquad t=1,...,j \\
347
+ {\mathbf y}&= \text{ReLU} \left( \text{PHM}(\mathbf{h}_j) \right).
348
+ \end{split}
349
+ \end{equation}$$
350
+
351
+ In recent literature, a copious set of high performance in image classification is obtained with models having a residual structure. ResNets [@Resnet2016] pile up manifold residual blocks composed of convolutional layers and identity mappings. A generic PHResNet residual block is defined by $$\begin{equation}
352
+ {\mathbf y}= \mathcal{F}({\mathbf x}, \{ \mathbf{H}_j \}) + {\mathbf x},
353
+ \end{equation}$$
354
+
355
+ whereby $\mathbf{H}_j$ are the PHC weights of layer $j = 1, 2$ in the block, and $\mathcal{F}$ is $$\begin{equation}
356
+ \mathcal{F}({\mathbf x}, \{ \mathbf{H}_j \}) = \text{PHC} \left( \text{ReLU} \left( \text{PHC}({\mathbf x}) \right) \right),
357
+ \end{equation}$$
358
+
359
+ in which we omit batch normalization to simplify notation. The backward phase of a PHNNs reduces to a backpropagation similar to the quaternion neural networks one, which has been already developed in [@NittaQBack1995; @ParcolletAIR2019; @ParcolletICLR2019].
360
+
361
+ In the following, we expound how PHNNs can be employed to deal with multichannel audio signals and we introduce, as an example, the parameterized hypercomplex Sound Event Detection networks (PHSEDnets).
362
+
363
+ A first-order Ambisonics (FOA) signal is composed of $4$ microphone capsules, whose magnitude representations can be enclosed in a quaternion [@ComminielloICASSP2019a; @RicciardiMLSP2020]. However, the quaternion algebra may be restrictive if more than one microphone is employed for registration or whether the phase information has to be included too. Indeed, quaternion neural networks badly fit with multidimensional input with more than $4$ channels [@Grassucci2022DualQ].
364
+
365
+ Conversely, the proposed method can be easily adapted to deal with these additional dimensions by handily setting the hyperparameter $n$ and thus completely leveraging each information in the $n$-dimensional input.
366
+
367
+ Sound Event Detection networks (SEDnets) [@Adavanne2019SoundEL] are comprised of a core convolutional component which extracts features from the input spectrogram. The information is then passed to a gated recurrent unit (GRU) module and to a stack of fully connected (FC) layers with a closing sigmoid $\sigma$ which outputs the probability the sound is in the audio frame. Formally, the PHSEDnet is described by $$\begin{equation}
368
+ \begin{split}
369
+ \mathbf{h}_t &= \text{PHC}_t(\mathbf{h}_{t-1}) \qquad t=1,...,j\\
370
+ {\mathbf y}&= \sigma \left( \text{FC} \left( \text{GRU} \left( \mathbf{h}_j \right) \right) \right).
371
+ \end{split}
372
+ \end{equation}$$ After the GRU model, We employ standard fully connected layers, that can be also implemented as PHM layers with $n=1$, since the so processed signal loses its multidimensional original structure.
2111.04670/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile modified="2021-03-16T15:40:48.480Z" host="app.diagrams.net" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" etag="hakWtmANt0Np3td4jCrs" version="14.4.8" type="google" pages="4"><diagram id="TrKhs7d_Uj7syuPoxLAD" name="Page-1">7VpNc5swEP01PqYDEh/OsXHi9NDMtPVM2+SmGgVosUWEsE1+fYURBoFD/FEjeeqT0dOCpN2n1WPxAI5mq3uK4uCBeDgaAMNbDeDtAADTAg7/yZFMIK5rFohPQ09gFTAJX7EADYGmoYcTyZARErEwlsEpmc/xlEkYopQsZbNnEsmjxsjHLWAyRVEb/RF6LCjQoW1U+Ccc+kE5smmInhkqjQWQBMgjyxoE7wZwRAlhxdVsNcJR7r3SL8V94zd6NxOjeM52ucF+/f4KXm5eHv5kT2D5dTxOvz1eDcXcWFYuGHt8/aJJKAuIT+YouqvQG0rSuYfzpxq8Vdl8JiTmoMnB35ixTAQTpYxwKGCzSPTyCdPsZ37/B7tsPorHrRu3K6mVidY0pYv1uPlD2ssXHklISqe4Y80ljRD1Meuwg4Vd7pDaAMK595jMMJ8cN6A4QixcyIRBgnf+xk7c+pFSlNUMYhLOWVJ78pcc4AZiDzmCPmIDXcFGlBvm0HK77PlFMYGyVVtJBa2ZsweLrg9gkRRMZZQ6PYssLVlkdbPIuu60Pw2LypR/odEWO/scaeRAqIBG5oVGb9o550gj13UV0KgYcoGiVHhhMgBOxJ17k6S/+KWfX4a5XYHyUWodLQ4ug5DhSYzWYVxytSxTBiVxIV+fw1VOvbe5sMCU4VVn9EQvkN1cNpc1HSugoCZhS2xbtGsuP8CjUNuNaZ5sY4KzkgnAkBO2ae+nExr2J9qZ1oVHmuuEfXnUFAr98Mi+8EhzobAvj5pKoR8euRrxSJKbFamU8eifF1GOkiBgN1Gnr6QzHXlDKNd0hxQQz537UJWmO477QKNQ9XXc7RoqPWRTs3jbKum/I5sa9qc57sD/+Bq3K4/0kE0tHr3z0aApm0AfHw1g6yzeUriLojBOcgIpOGuh0fCK6rNWp88sfZ21lqoEftxZq++r5elq0LuGSo8c2frENdyzCD3sIUdarRy5RcSpzJGWrVeNefNnCx02Xl850la18Y4Kld3i9hZhqZLb9rVe79pOy2FbCuEqHebCvgQTb1Z/nCqSbfX/M3j3Fw==</diagram><diagram name="arch_example" id="JBdPQSLxVuS15KBWi9wy">7V1Zc9o6FP41eYzHkrw+ZuvtdJK5STNz2/TNAQNuCKLGJKG//trBBlsSRsTWxoSHBAtvfGfVd47FCbp4fvsnjeaTGzyMpyfQHr6doMsTCIEDvfxfMbIqR3wfrEfGaTIsx7YD98nfuBy0y9FlMowXjR0zjKdZMm8ODvBsFg+yxliUpvi1udsIT5tXnUfjmBq4H0RTevRHMswm69HAtbfjX+NkPKmuDOzyk+eo2rkcWEyiIX6tDaGrE3SRYpyt3z2/XcTTAr0Klx83k9HL6fAKzW7T2a/BAwod+3R9si+HHLL5Cmk8yz586ufo139nyVM4mU289Ony2/xx8HjqOutzv0TTZQlY+WWzVYXgOMXL+Qk6jxbztYBGyVucn/Sc87bK23+J0yx+Y8k8eqwutYU1V8gYP8dZusr3K49yK6GVungKg3LgdStZ6JVjk5pUkW+B8ptGpT6NN6ffYpa/KWE7AMLKQtogzBGcDQvMLu0ct9dJksX382hQfPqa210+Nsme86tegvztKJlOL/AUp+/HIv/qzLuymXC3i5TEeyeuvmPlBuHnxr7+G1KgvuNX38eTjHEgFuMvX8IQIZEYAyewahAHbqAdyA4QC7JARdYJRWgsim/tCOoEMjIdZACA1UAQ6QcyR26gt9PNX1Y9sAUaarJrvCYHrlUPbD7UDmSXA2RtstygUgmtslxHtyzX3YG3wVmu45uOsQlZruCphECQdUIx1Cw3OFhVDchyK7rDXJANyHJd3Wa9B4NsQJbr6jYpPlyTDchyOSLbOsuVnNFC17Vsu4ZNCUxrguuGVhhS2RqJJQq3r0rv+2cfZWkvx9yjXfIfjn4+W5vlKrAsUkcezOScY0c6LNlPSMoq5MFMTTt2REC5OMuazcnDmcw0dkyh5cIMjg9n3mRDLtBQVqYsAWjFSAqOdCpcMDnTc3RQ2YCmKQCFdH6eZL6I96PcNzW8G0zoNDsdAKQz5IDBAFdjvQMZ0lSE0yOQIzzLytYhEIgEFvgEuQ4ATUnIRZb2qq6JyCKbLFsA1TpLO9V4OI7vy02cZhM8xrNoerUdPW+63e0+1xjPS4x/x1m2KjGNlhluSiDHNV39LI9/33goNiy32rx8q394uSq3FlmKnzYtamgzwsdytAlxgZfpIOYw7ixKx3FbWKh0tYDxY+YGUYu0389zlqbRqrbDHCezbFG7zG0xUPeVgDRp3yZ0Z33OrSZt7rSDctEhvM/IIs1s9Ys0dNcBw471B5ZSfZpikYsrI6qI94YcXq2lCtHJq1W5336v5nT2am7/Xi1o2mXo1TVgvxds7i/IB3pqA+w2pj7UP9MwwDq8qmgLVcVuDoTmWZCJjplKVFU75uDTinp26P4ROHTkKHDooaY5gjDHrKOmIEQ6KLQn9pMF1eYBTV3hP3xzx2uDKw8jvF0PWrd54upoPKColLZSVxkTdWF5BLAZE2WTeBkyU9EwjRTr1jqJ31PX5NoXZQKJqj4BHaObRXVrkKeu6bUv0MNmid83AHR1PbJ9cVg8D9yqhtkX3CUrXrdB0ITQAMzVNc325U+8Zr0aGQA6NB105Df6h0zAXFanhjjMUbOXKNAfdGBzaPpiEs2Lt4NlOl2dp9HgqUiJ98G/lVWbMFKcRVmCZ/lwQcmSkwAoUF4BJDJx5NECqqihukAA8MTN2GgruC1WR4H2Y7TI0YT2IMriMU6TqDhZPBvgYTIbUzLLMchY3E8lgxmexYRYyqFomowLcQxyuON8/LxANBlE07Pyg+dkOJzu4p2bFlqbv0EgUJSuHVjkvAq5tDQ9l0E3Q3HC5HisW5JxybYtACzSvAJ/K6S9FpaHL1eYWOi2bLpi20t4f9f/8pyA4DMc4fDvmsJBhmWoD0T0U8+C4r8yoTiBlR+5Iz3QUyg0mfHvPE7XTgXawyT3KMnjstz8DEZrOYdbkWyEW1AqtHxBnqQzZNpHRLoZ3Dt/v/35ev7be7q7u/4zOr2/O6Xnkib06JL1OZmNU0wY6fhhYocuWSaV2qDLxJUOASb25zqApPIktueyzV5x1a9rGUhE2afNsOtVnzZF5S36UJbWf28ugqQx99yay1Ys2mJNbM3VLsDQpAydC+sPK6X28tq/mKgyeuiOpeWmLdvb681gV2/WRwWbeWdKmqT07lZpk+BeSbvaSppmykzseKVSP8Uezz8y+1HtKQ98fEGi/Xx2NvNJmtHXJSbDFyZpoFjUXSdzMnr6OgVJLYyczceqFT2wbICa4ne2AzsU4H3rNk6THIKCwRWcKvFK7TT/Mo69fUHyURVgQb/2+aZQ3tuM3UEhccWyW5evl9xBgRUS3eSeVazNhZDjQdt3ACRueW0hVHM5zSW8LwhWeznUjVoQ1cFrXqe/Jvbvo0FqvyzD+fUIZYvV9cPd90g06SAqN/QUPv7LxpFuDzaRxnZVrjPBBpaumpnIY/sql5lgA6uYD+idx+6BD2g17Xqu06qqvGGTMrb+mWxP9CITTCAqZ2w4k61dkGGsZGYilU3pvTxihw0rA8Rj4bJbk769Hg0eOBGgPVof0zf2rSkpQOhN0rTKcH/4OvDpO5nCPg5Gm0oCVTs+JWvrGGhD/A7zQL5Lpg2pXfXGBL5rvy9lMN4KfOmHpgI+uTxDIGMm8Fk061uzOk8yxXmYYy+m2HYY2ipTND3CC/up8c/VMTrVUD4QAWi2x22eAxEL4vDWLMg8lTxPfzUJds2Q+2cPu+Xph//EoW/vkFH9F2AAI4V3hFWwET01ElvAaRcZP7lGqJgvj1tj40iveiG3gNMV14qkRIrbpBya/JVbsOlJQQFqGrujWEEZP3Mul1Q/UsNn/Ca3XFK9J8OHjB8plOtBdaXRW9L2Vuz3NkEh3ga4SjR7k8E0nkZZ8tK07/4ldWy838VF0TkjQMKV4PZKuEofNJGwo5owFxSE1Tu5Yys7CTMdfufIS2pIco7Htjq8sPDncEuYd3l4Sc5RSaKiB08pKRyqs2k2b0pFQxOaXEOiYqO8/4ieKZnY40qxeMp7XOlkzcQWV2DbJD2qusdV8Tynq6OXVn1kuPU2ReWtadB8ee8NriH5kKiUBtfjWKlBu/hCP0Mhl4kTFF8Ud3kpqcqrbKXgdGYHtujsLv71LrHPlRr4JM37YAYv+ypf0lX0MryxlU79FPs8oLgbxRgT4nWWgJfCUWBDaukcqY2twhqcbR1c6YdmAMAmagRSGlvBsdUBlGtW57mlOAfDYLlMohFkLBLSKT9TElzyzRTjrO4W8oRqcoOHcbHH/w==</diagram><diagram name="Copy of arch_example" id="iZ4cdDQ_QTdzxB7f9_Cl">7V1dc5tGF/41vjSz3x+Xie28vWin6WSmaS6JhCXeykKDcG3113exFgS7gMBi0dLUF4lYLYv0nK/nnLOgG3z39Pq/NNytf0mW0eYGgeXrDb6/QQgSxNR/+chBj3AOjyOrNF7qsdPAl/jvSA8CPfocL6N9bWKWJJss3tUHF8l2Gy2y2liYpslLfdpjsqlfdReuImvgyyLc2KNf42W2Po4KCk7jP0Xxal1cGQL9zlNYTNYD+3W4TF4qQ/jhBt+lSZIdXz293kWbHL0CF/ANgu1dBlbr8O+vPx3Qb7+/fr09LvZpyCnlV0ijbTbu0ui49F/h5lnjpb9rdigAXKXJ8+4Gfwz3u6N8HuPXSK35seen0p/+ryjNotcmkYffi0udUFX6GCVPUZYe1Dx9FmXkeJ5WxVsktGReToJFTI+tK0LFPID63FCr06pc/gSZeqFRG4AgPo+gAnC7zCG7Bwq2l3WcRV924SJ/90VZnRpbZ0/qovdQvXyMN5u7ZJOkb+di/vCBPYD+aKMWtFtR5SRQ1sCVpR//lRakb+hV57BpESZuEf70SUqMHSIMiQgqAAsqvIOYzRziNvw8gpjP3E9ACIMaftg7iMXMIVZ/QdUVC/+0WM4cYihoUHXFHHkHcUGcZ8HJREHBvOJkEJ6HcEo1LUX6L2JlsEfq4DfGM+Bl0HF2MZSYDQDZJxQdZxDuVXUG/BbSuYM8A4YLHWcRDv3BjAhYjzziSMAmJlsYgQCACjasB/eiMpDSIhImllie/oq0Y3xgx84eLPp7qTqLFlH1dsy8WZunVWDUI4MYxRW/O/0YjLPJh1uo2sQ4j51mnNPnwTFwMM4WJ24hGhMDPVWRfTqF7hsHJwZ6KqY8AdBXRtIxHS5D24Qqa/Jj4oPKkh69i/063OUvF8/p5vAxDRd/Rtl5xE/i6cI/TbIwi5OtGr6VuRz3WZr8Wfaa0eVFuVaBMAkDBg0OSOxSEaG2SAR0xfKInaN8zlvpCHwP9wpORXvDLFolaRzma0XbRbKMtytLaAqErC6QI7KFELbJNjLkoofCTbzK5bFQeEdq/GMOabwINx/0G0/xcplfplED6lb5mGwzvakBu5QlAUYhFRdSqwiS0YZCasH2xhdkjzxoIsua2rCECLCoy4MDECC7ItBsWzQowBtfKnYSRdzE6DfV12vCKU0B19NcjmuSgEKFIf9KM9ROuhxF/KvJheXwVcrlZBZysZO0X3dRevQsCCxj5Vbi78/68L9odBS1rEcjKO3KEkVTBqOSbfZpi44M0/BCHTSCB8I2flg0dUXpm+8TgDFIOS/YnYNqfY9K8qAiZymf96NXLVuCa4LTkFMACx21ULzbt9ltZ1XnnepZyqw/kaE1PVR2HWACMJNYYoilsMuVHAbK0QuoQo0guZLaYmiZ4kAMto7CeYmhcJ8IBkJKjpn6TpxyZFN7r3AXNsrLVVRwjiTN1skq2Yabh9OoEahOc35Okp2WwP+jLDtoxMPnLGmKpwW9x1aErVLT6DXO/tCXyl9/y18HVB/dv1beuj/og6Hi3SfP6SLqxEm7iSxMV1G3c9Rr5jD2UZhbigKGBVUWizghgGF3vXF5DWErGaSHP05Syw+/Vd87yfDtqBDiECVxIHDeV+AUOxT42yof0jQ8VCbsknib7SsX+ZwPVAsMdUaHUQCoAIQTmYcEXtWki05WL46f7aSU5fe9hLD0ya+8DgZlGdUAc17BgTaE4GsEhzIElG7/W9XrN4aAy7zOJY1lF0GlsAh3PsbyA+U19ZIY0UAKSQiVECAJEK+vePy6epEO/2KuCyULuGL6hElGmZSFqRfrHsGx1h3qEyEw+DEGgeCAQ+Xw1HuUDPrQnWe78ooNPnDK6F21oopR9Y7eFbercmZvo7lLS7vQHdutcTyvsOhrnLOr6vOLc/CiKOfQ9gqLGpFJay27RQIGwChNc674qsSMcUoY5EWZejRyDQkyOB3qDh3GfF3KaowV9snGPReQKJuQSHCqQ/H7YrBR8lXBrKD3ulzUKwSPF9moH/Y3Hvfrq8FGfnBLcFCt2ZFip/UA2oYNniPo+3SEI0NH3NCylo/b9qmQ0TsfOh/Xbpx3xdTsmvLv4SZQIw9prlky4MzSeO+6Tkg4zY2RoV8YBVAqMg25gJADOzNWgzZHgDx4m04EJkwRcelsQzP1das4M7eTNHWg2posztCyty540VYZvBPuTFuF27uTGQ+YYh8SYZUZSsVHbOBbpowvBmZX0rxoq/QXQ1vlDPmNuyeVM2NfyeNjxBaLTqbjtN1SNJvO5wzafZzPGdil+Tpq14FmSmGWgqThRFsI1lBmZDoeaTzyx5iOSdd0R0SnAO8Hbij1VmnWOw1mzLFKj9xPYqJTMwed7UpR7dJZg+r6HAf7dpQ8j4ueVNp6bDeYoNd01bh5add6cNw811AytniN1U8yb36YqJ+E8aBP3X26K694lbqb1x0lF+HcoaVd6I3tOpUXDaV33fjnVZjj1zCsmTeU+pseHp1JD2gojb5bq9xOrS9IOwixJw0i0C+mjheoPNmlerInKUStQMK6bUodfI7SWAGRF+jH2mg0mBaO1ZYaTv3qZAljGpw2TGCE36elvLG3MzbTs1pOZyoxV2k5Mbvebrac7JLkzFtOg5NnZAhGuTWKASYYAC7LXSBnW04iKPeqAUG6Nhhc+ACo3k+H7IlYC14jPAnSg45Tod+edZxKIY52I4+tphyr3FEwxCUTGHPe8FiGlikOxIAsMfjQcRoghrbKWsMNVD7h3pC5eVFZG0xreO/MutC1CXdFt3Rlrt384V3THVGOQpd/3OZPoannCXihqT10WrjW6R/tbiJuP57J5lFeR6S+vR/PI5S9582PCOV172eAjyl+cum8j6GTx83/7iby7G4ifpUStc+9HyfR3KWlXeiO7f2sPjR/BoRFX+PcVe6yH5KJTdcWcmB7dHQmPeRuopG5tRkJGAoK/cQgDwntkWB4K8gIhWPdLGREbte9INFQ1fPCvCwaCWsdou4ttBUaKXmNSCq4ATtjZm9HZoPpqgxzoO05uA/qTMvpnXdFGcZAQC/lv7jlpJ/T71XLSdgFd7PlZLdt591yGp6zWy0nWGs52Y9c79dy6jCHFvKiDk8/X3vUgdOvAOOHfwA=</diagram><diagram name="Copy of Copy of Page-1" id="AEppd9Ga2jvaG4ghRlaq">7V1bl6I4EP41PvYcbkJ47HZ6Z/Z2zpwzD7P7yEha2UXiYGx1f/2GJgEh2MbWUIHphz4tBUSpy5fKlyqduLPV/lMerZd/khinE8eK9xP348RxbM/x2b9CcuCSILBLySJPYi6rBV+T/zAXWly6TWK8aVxICUlpsm4K5yTL8Jw2ZFGek13zsieSNt91HS2wJPg6j1JZ+i2J6bKUoqlVyz/jZLEU72xb/MwqEhdzwWYZxWR3JHIfJ+4sJ4SWr1b7GU4L7Qm9TLPPT7/9+u33H/79903yuHu8D9y7crBfLrmleoQcZ/S2QwdBOfZzlG65wvjD0oPQYE62WYyLUayJ+7BbJhR/XUfz4uyOOQ2TLekqZUc2e/mUpOmMpCRnxxnJ2EUPcbRZvtxenFd8FP7IzzineH9kSP5onzBZYZof2CX8rCMMyv30zvG5YFdbvZItjyw+FQaPuKctqsFrbbIXXKGXKNcfq3KDAFy5ri0p15a0y8ZhOIPPazbarEvweUr2hTq1qdL1pg1V2o6sSdShSKRNj46kR++GenwiGeWTgo10uihquajturCKDaayGmM2NfFDktMlWZAsSh9r6UMTDupr/iBkzVX8D6b0wFUabSlpGoCpNT/8Vdz/YSoO/z4+93HPBy+PDvzoMrtsyDaf49e8iiufRvkCvzagzzGyUM2rds5xGtHkuRnqt48GV4qGDjMaHw1tlLmrEBwMZjpApc9oOI6Fo9A4EQ0bmpN/q3zRrSRi8p04rmWFoaUjchzFyBGuejZyTiFk5RNdxn4Z5j7Po8PRBWuSZHRz9C5fCsGR2zntt7CtlueUQ9Z+VH3OK1xrqjUTgIpZ+MxATl/l/NV8vUp+HwCrNYBAQkhEE8v+s4immgu8CdGuMxoaUjKnYOzZzLK0GNtXNbZjrrFDCfjcAQKf156NgYHPs2QlDjqG4AEzNDaGvI4UrEdj2x8s223m/F4tOGHyl6MvOE+YCnBuAJaGRmDpm5YCXpsyFLy61qWA15Ggjju30ooAJ0zrnljlVWOUDstvq21+sQ/5EMtJr2OuHxJT0U4v+vNF5dTP3GnLl9nwIe4qsEUmMHngy0TqKLYVfAd4W8H3JMWOgqEOPGiPBd6vuTlDrSst9ZX3drwrcb7yCX0MddBHWurLTOoYGGoDZhm5fGUUFLUXAnM1PgjfCbiQEonfeVALdILadUYLYWewwZDUwobnjX3tHqs+YwvoHRtJDQ59ASxzOaAoUoVM4aomRhEIYTgumlqYFxhNb0NTT8Me1gMBCMUImF3pRQBFmjpo1yLroql7WVMGAy+oA6OplZM/k6ctJCV/kjcMorPAlyoGDWjbkMs/RqJcA9o2kLxsGcIGS2AbVpyJ5PaXIe6v+FKZAHTbBoJdEcC2bSBVaj9EinNjP20baBxtG22UgW/bQAPPMnvLKgUen40cdOEiWELI29dq+SFEnQ0aR9uGeZnBONo2JL8HpoWRqW0bumg9ocnziHZh28ZliHad0d7bNhSNrVq7J+Y5E409jraNoD0bAwOf+DijiSFtKaAyYF5Y/9pjDIU/UduGNj9QbdvQi6VvWgqgNmXYS9tGCMK5GOAiGi0vm9Y6scq79X4YOtUfoteHxt62AZ/6GTxtyUXxQ9xVgC+oDWVSZhTbCuBtG6HMyoyCoQZv2whBeJkBMtQitM/v7VzL59y+TEtiqHspsQnlKosxMNQGzDIyUzMKihq8drn68tqfZiUlMr/zqHZt9qqvmsu23kvOFa2tSq2Iyc5IazsS+o2CpzYA/YA5gOHEkSpqVs5qZCDBFqeMonmjMjAwpN6Gre6le8O2QL4pADDJ0owCiny1rv4Nia/uZXFpWz7sZKWdsNaW8qsmgWZPXl2/vOGnlGu24Rj+jy0RJ+42Lzq/Zxcwne7rk+zVovjP3MrKmYs41h37Q5Pih1kwzsTg7NOW45dXSz7Isj/aBTytBoaOnoYoTRYZO5wzMxdT20ORSybzKL3nJ1ZJHKenUtqmZzf6I449baoxo3VaGa3TwZPYQUdG6+jzkq5Gn3cvgfQSH5nnJV0dS7fwkuL7FKcNT5miUvLuLYrfb9BqDnVd2Vsqz7jSW9hh/WtUZZ5S/6iX+/g/</diagram></mxfile>
2111.04670/main_diagram/main_diagram.pdf ADDED
Binary file (11.8 kB). View file
 
2111.04670/paper_text/intro_method.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Neural architecture search ([NAS]{acronym-label="NAS" acronym-form="singular+short"}) is an extremely challenging problem and in the last few years, a huge number of solutions have been proposed in the literature [@elsken2018neural; @NEURIPS2020_NAGO; @Liu2019_DARTS; @ZophLe17_NAS; @Pham2018_ENAS]. From genetic algorithms, to Bayesian optimisation, many approaches have been evaluated, each presenting different strengths and weaknesses, with no clear winner overall. At its core, [NAS]{acronym-label="NAS" acronym-form="singular+short"} is a combinatorial problem for which no algorithm can guarantee to find the global optimum in polynomial time. For example, the commonly used [DARTS]{acronym-label="DARTS" acronym-form="singular+short"} [@Liu2019_DARTS] search space, even with simplifying constraints, contains $8^{14} \approx 4.3 \times 10^{12}$ architectures, an unreasonably large number to explore. Given the sheer size of the search space, it is highly unlikely that [NAS]{acronym-label="NAS" acronym-form="singular+short"} algorithms are able to reliably find the global optimum and in fact recent research has shown that local search algorithms can perform comparably to the state of the art [@white2020local; @ottelander2020local]. Furthermore, thanks to a number of benchmarks, we know that many architectures perform similarly one another [@ying2019bench; @dong2019NASbench201; @siems2020bench; @Yang2020NASEFH].
4
+
5
+ ![Three architecture cells from [NAS-Bench-201]{acronym-label="NAS-Bench-201" acronym-form="singular+short"} [@dong2019NASbench201] giving very similar validation performance. Each edge represents an operation: either [`conv1`$\times$`1`]{style="color: orange"} or [`conv3`$\times$`3`]{style="color: blue"}. From the viewpoint of existing encodings, they represent three distinct architectures which all need to be evaluated. The [ANASOD]{acronym-label="ANASOD" acronym-form="singular+short"} framework assumes that they all belong to the same operation distribution ($4\times$[`conv3`$\times$`3`]{style="color: blue"} and $2\times$[`conv1`$\times$`1`]{style="color: orange"} in 6 operations) and does not repeatedly re-evaluate each one.](Figs/DistNas-arch_example2.png){#fig:example_archs width="45%"}
6
+
7
+ <figure id="fig:nasbench101_dist" data-latex-placement="t">
8
+ <figure>
9
+ <embed src="Figs/n201_cifar10-valid_variability.pdf" style="width:100.0%" />
10
+ </figure>
11
+ <figure>
12
+ <embed src="Figs/n201_cifar100_variability.pdf" style="width:100.0%" />
13
+ </figure>
14
+ <figure>
15
+ <embed src="Figs/n201_ImageNet16-120_variability.pdf" style="width:100.0%" />
16
+ </figure>
17
+ <figure>
18
+ <embed src="Figs/n301_variability.pdf" style="width:100.0%" />
19
+ </figure>
20
+ <figcaption>To what extent does the <span data-acronym-label="ANASOD" data-acronym-form="singular+short">ANASOD</span> encoding determine performance? We randomly draw <span class="math inline">200</span> <span data-acronym-label="ANASOD" data-acronym-form="singular+short">ANASOD</span> encodings in 4 tasks. Within each, we draw <span class="math inline">5</span> architectures <em>for each encoding</em> and show the mean <span class="math inline">±</span> 1 standard deviation (black and <span style="color: gray">gray</span>, respectively) of the top-100 encodings vs those of <em>all 1,000 sampled architectures</em> (<span style="color: red">red</span>). Architectures sampled from the same encoding usually perform similarly and encodings that on average perform better also have smaller variability. </figcaption>
21
+ </figure>
22
+
23
+ Knowing that most architectures perform similarly and that the true global optimum is very unlikely to be found, one can ask whether the current paradigm of searching for a specific architecture is the way forward. Differently from prior work, we argue that tackling [NAS]{acronym-label="NAS" acronym-form="singular+short"} with an approximate algorithm that learns a distribution rather than a specific architecture allows resources to be used more efficiently. An intuitive understanding can be had from Figure [1](#fig:example_archs){reference-type="ref" reference="fig:example_archs"}. Indeed, the accuracy distribution for each encoding is shown to have small standard deviation in Figure [2](#fig:nasbench101_dist){reference-type="ref" reference="fig:nasbench101_dist"}, which motivates the optimization over encodings rather than architectures. The key issue is that comparing every single architecture is intractable and unnecessary: intractable due to the sheer size of the considered search spaces, and unnecessary because we experimentally know that small differences in the architecture have little to no effect on the final result [@xie2019exploring; @NEURIPS2020_NAGO; @Yang2020NASEFH].
24
+
25
+ Rather than searching for a specific architecture, we propose to find an approximate solution by instead searching for the optimal operation distribution, defined as the relative ratio between operations (e.g. `conv3`$\times$`3`, `conv5`$\times$`5`, `maxpool`, \...) in the whole architecture. Once the operation distribution is defined, we can sample from it, seamlessly generating architectures of variable length. As we experimentally show, this simple re-framing of the [NAS]{acronym-label="NAS" acronym-form="singular+short"} problem enables us to significantly improve the sample efficiency of all exploitative methods. Indeed, [ANASOD]{acronym-label="ANASOD" acronym-form="singular+short"} is orthogonal to existing [NAS]{acronym-label="NAS" acronym-form="singular+short"} solutions and we experimentally show how it improves over Bayesian optimisation, local search and random search. Not only is this approach more sample-efficient, but it has less potential for overfitting, as we show by successfully transferring to new datasets.
26
+
27
+ To summarise, we propose searching for an approximate rather than exact solution to the [NAS]{acronym-label="NAS" acronym-form="singular+short"} problem. This is easily applicable to most existing approaches and enables a substantial speed-up (from sample-efficiency) without a sacrifice in performance. We empirically show this both on the [NAS]{acronym-label="NAS" acronym-form="singular+short"} benchmarks [@dong2019NASbench201; @siems2020bench] and with open-domain experiments on [CIFAR-10]{acronym-label="CIFAR-10" acronym-form="singular+short"} and [CIFAR-100]{acronym-label="CIFAR-100" acronym-form="singular+short"}.
2111.15000/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2111.15000/paper_text/intro_method.md ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Machine learning has been adopted in many domains, including high-stakes applications such as healthcare [2, 29], finance [46], and criminal justice [3]. In these critical domains, interpretability is essential in determining whether we can trust predictions made by machine learning models. In computer vision, there is a growing stream of research that aims to produce accurate yet interpretable image classifiers by integrating the power of deep learning and the interpretability of case-based reasoning [2, 4, 28, 45]. These models learn a set of *prototypes* from training images, and make predictions by comparing parts of the input image with prototypes learned during training. This enables explanations of the form "this is an image of a painted bunting, because *this* part of the image looks like *that* prototypical part of a painted bunting," as in Figure 1(a). However, existing prototype-based models for computer vision use spa-
4
+
5
+ ![](_page_0_Figure_12.jpeg)
6
+
7
+ (b) An explanation using a deformable prototype
8
+
9
+ Figure 1. How an input image of a painted bunting is compared with (a) a regular (non-deformable) prototype and (b) a deformable prototype of the painted bunting class (overlaid on its source image).
10
+
11
+ tially rigid prototypes, which cannot explicitly account for geometric transformations or pose variations of objects.
12
+
13
+ Inspired by recent work on modeling geometric transformations in convolutional neural networks [5, 16, 17, 53], we propose a *deformable prototypical part network* (*Deformable ProtoPNet*), a case-based interpretable neural network that provides spatially flexible *deformable prototypes*. In a Deformable ProtoPNet, each prototype is made up of several prototypical parts that *adaptively change their relative spatial positions* depending on the input image. This enables each prototype to detect object features with a higher tolerance to spatial transformations, as the parts within a prototype are allowed to move. Figure 1(b) illustrates the idea of a deformable prototype; when an input image is compared with a deformable prototype, the prototypical parts within the deformable prototype adaptively change their relative spatial positions to detect similar parts of the input image. Consequently, a Deformable ProtoP-Net can explicitly capture pose variations, and improve both model accuracy and the richness of explanations provided.
14
+
15
+ The main contributions of our paper are as follows: (1) We developed the first prototypical case-based interpretable neural network that provides spatially flexible deformable prototypes. (2) We improved the accuracy of case-based interpretable neural networks by introducing angular margins to the training algorithm. (3) We showed that Deformable ProtoPNet can achieve state-of-the-art accuracy on the CUB-200-2011 bird recognition dataset [47] and the Stanford Dogs [18] dataset.
16
+
17
+ # Method
18
+
19
+ We will first discuss the general formulation of a nondeformable prototype, as defined in previous work (e.g., [4]). Let $\mathbf{p}^{(c,l)}$ denote the *l*-th prototype of class *c*, represented as a tensor of the shape $\rho_1 \times \rho_2 \times d$ with $\rho = \rho_1 \rho_2$ spatial positions, and let $\mathbf{p}_{m,n}^{(c,l)}$ denote the d-dimensional vector at the spatial location (m, n) of the prototype tensor $\mathbf{p}^{(c,l)}$ , with $m \in \{-|\rho_1/2|,...,|\rho_1/2|\}$ and $n \in$ $\{-|\rho_2/2|,...,|\rho_2/2|\}$ . (A $3\times 3$ prototype has $\rho_1=\rho_2=3$ and $m, n \in \{-1, 0, 1\}$ .) Let **z** denote a tensor of image features with shape $\eta_1 \times \eta_2 \times d$ , produced by passing an input image through some feature extractor (e.g., a CNN), and let $\mathbf{z}_{a,b}$ denote the d-dimensional vector at the spatial location (a,b) of the image-feature tensor **z**. In the previous work [4], the prototype's height and the width satisfy $\rho_1 \leq \eta_1$ and $\rho_2 \leq \eta_2$ , and its depth is the same as that of z. We can interpret each prototype as representing a patch in the input image, and we can compare a prototype with each $\rho_1 \times \rho_2$ patch of an image-feature tensor using an $L^2$ based similarity function. Mathematically, for each spatial position (a, b) in an image-feature tensor z, a regular nondeformable prototype computes its similarity with a $\rho_1 \times \rho_2$ patch of z centered at (a, b) as:
20
+
21
+ $$g(\mathbf{z})_{a,b}^{(c,l)} = \sin\left(\sum_{m} \sum_{n} \|\mathbf{p}_{m,n}^{(c,l)} - \mathbf{z}_{a+m,b+n}\|_{2}^{2}\right), \quad (1)$$
22
+
23
+ where sim is a function that inverts an $L^2$ -distance (in the latent space of image features) into a similarity measure. In a ProtoPNet [4] and a ProtoTree [28], an $L^2$ -based similarity was used to compare a prototype and an image patch in the latent space, presumably because it is intuitive to think about similarity as "closeness" in a Euclidean space.
24
+
25
+ In a ProtoPNet [4], a prototype (prototypical part) is a spatially contiguous patch, regardless of the number of its spatial positions $\rho$ . For example, Figure 1(a)(right) illustrates a $3\times 3$ non-deformable prototype that can be used in a ProtoPNet. In a Deformable ProtoPNet, we define a prototypical part within a (deformable) prototype to be a $1\times 1$ patch (of shape $1\times 1\times d$ ) within a prototype tensor (of shape $\rho_1\times \rho_2\times d$ ) (see Figure 2). In particular, we
26
+
27
+ use $\hat{\mathbf{p}}^{(c,l)}$ to denote the l-th deformable prototype of class c, again represented as a tensor of the shape $\rho_1 \times \rho_2 \times d$ with $\rho = \rho_1 \rho_2$ spatial positions, and we use $\hat{\mathbf{p}}^{(c,l)}_{m,n}$ to denote the (m,n)-th prototypical part within the deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ . Figure 1(b)(right) illustrates a deformable prototype of 9 spatial positions (represented as a $3 \times 3 \times d$ tensor), where each spatial position is viewed as an individual prototypical part that can move around, and represents a semantic concept that is *spatially decoupled* from other prototypical parts. For notational consistency, we use $\hat{\mathbf{z}}$ to denote a tensor of image features that will be compared with a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ , and we use $\hat{\mathbf{z}}_{a,b}$ to denote the d-dimensional vector at the spatial location (a,b) of the image-feature tensor $\hat{\mathbf{z}}$ .
28
+
29
+ In a Deformable ProtoPNet, we require all prototypical parts $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ (a d-dimensional vector) of all deformable prototypes $\hat{\mathbf{p}}^{(c,l)}$ to have the same $L^2$ length:
30
+
31
+ $$\|\hat{\mathbf{p}}_{m,n}^{(c,l)}\|_2 = r = 1/\sqrt{\rho},$$
32
+ (2)
33
+
34
+ so that when we represent a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ as a stacked vector of its constituent prototypical parts $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ , all deformable prototypes have the same $L^2$ length, which is equal to $\|\hat{\mathbf{p}}^{(c,l)}\|_2 = \sqrt{\rho r^2} = 1$ (i.e., all deformable prototypes are unit vectors). We also require every spatial location (a,b) of every image-feature tensor $\hat{\mathbf{z}}$ to have the same $L^2$ length:
35
+
36
+ $$\|\hat{\mathbf{z}}_{a,b}\|_2 = r = 1/\sqrt{\rho}.$$
37
+ (3)
38
+
39
+ With equations (2) and (3), we can rewrite the squared $L^2$ distance between $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ and $\hat{\mathbf{z}}_{a+m,b+n}$ in equation (1) as: $\|\hat{\mathbf{p}}_{m,n}^{(c,l)} - \hat{\mathbf{z}}_{a+m,b+n}\|_2^2 = \sum_m \sum_n (2r^2 - 2\hat{\mathbf{p}}_{m,n}^{(c,l)} \cdot \hat{\mathbf{z}}_{a+m,b+n})$ . With similarity function $\sin(\kappa) = -(\kappa/2 - 1)$ , the similarity (defined in equation (1)) between a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ of shape $\rho_1 \times \rho_2 \times d$ and a $\rho_1 \times \rho_2$ patch, centered at (a,b), of the image-feature tensor $\hat{\mathbf{z}}$ becomes:
40
+
41
+ $$g(\hat{\mathbf{z}})_{a,b}^{(c,l)} = \sum_{m} \sum_{n} \hat{\mathbf{p}}_{m,n}^{(c,l)} \cdot \hat{\mathbf{z}}_{a+m,b+n}$$
42
+ (4)
43
+
44
+ *before* we allow the prototype to deform. Note that equation (4) is equivalent to a convolution between $\hat{\mathbf{p}}^{(c,l)}$ and $\hat{\mathbf{z}}$ , but with the added constraints given by equations (2) and (3).
45
+
46
+ To allow a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ to deform, we introduce offsets to enable each prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ to move around when the prototype is applied at a spatial location (a,b) on the image-feature tensor $\hat{\mathbf{z}}$ . Mathematically, equation (4) becomes:
47
+
48
+ $$g(\hat{\mathbf{z}})_{a,b}^{(c,l)} = \sum_{m} \sum_{n} \hat{\mathbf{p}}_{m,n}^{(c,l)} \cdot \hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2},$$
49
+ (5)
50
+
51
+ where $\Delta_1 = \Delta_1(\hat{\mathbf{z}}, a, b, m, n)$ and $\Delta_2 = \Delta_2(\hat{\mathbf{z}}, a, b, m, n)$ are functions depending on $\hat{\mathbf{z}}$ , a, b, m, and n (further explained in Section 3.2). These offsets allow us to evaluate the similarity between a prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ and
52
+
53
+ the image feature $\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$ at a deformed position $(a+m+\Delta_1,b+n+\Delta_2)$ rather than the regular grid position (a+m,b+n). Since $\Delta_1$ and $\Delta_2$ are typically fractional, we use feature interpolation to define image features at fractional positions (discussed in Section 3.2). We further require interpolated image features to have the same $L^2$ length of r as those image features at regular grid positions, namely:
54
+
55
+ $$\|\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}\|_2 = r = 1/\sqrt{\rho}.$$
56
+ (6)
57
+
58
+ Note that equation (5) is equivalent to a deformable convolution [5, 53] between $\hat{\mathbf{p}}^{(c,l)}$ and $\hat{\mathbf{z}}$ , but with the added constraints given by equations (2) and (6).
59
+
60
+ It is worth noting that similarity defined in equation (5) has a simple geometric interpretation. Let $\theta(\mathbf{v}, \mathbf{w})$ denote the angle between two vectors, and let
61
+
62
+ $$g(\hat{\mathbf{z}})_{a,b,m,n}^{(c,l)} = \hat{\mathbf{p}}_{m,n}^{(c,l)} \cdot \hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$$
63
+ (7)
64
+
65
+ denote the contribution of the prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ to the similarity score of the prototype. Note that equation (7) is exactly equal to $\cos\left(\theta(\hat{\mathbf{p}}_{m,n}^{(c,l)},\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2})\right)$ , which is the cosine similarity between $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ and $\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$ . Since both $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ and $\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$ have the same $L^2$ length r (equations (2) and (6)), all prototypical parts and all (interpolated) image features live on a d-dimensional hypersphere of radius r. This means that an interpolated image feature vector $\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$ is considered similar (has a large cosine similarity) to a prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ only when the angle between them is small on the hypersphere.
66
+
67
+ A similar geometric interpretation also holds between an entire deformable prototype and image features at deformed positions. Let $\hat{\mathbf{z}}_{a,b}^{\Delta}$ denote the interpolated image features $\hat{\mathbf{z}}_{a-\lfloor \rho_1/2 \rfloor + \Delta_1, b-\lfloor \rho_2/2 \rfloor + \Delta_2}, \; ..., \; \hat{\mathbf{z}}_{a+\lfloor \rho_1/2 \rfloor + \Delta_1, b+\lfloor \rho_2/2 \rfloor + \Delta_2}$ at $\rho$ deformed positions, stacked into a column vector. Note that $\hat{\mathbf{z}}_{a,b}^{\Delta}$ has $L^2$ length $\|\hat{\mathbf{z}}_{a,b}^{\Delta}\|_2 = 1$ . We can then rewrite equation (5) as:
68
+
69
+ $$g(\hat{\mathbf{z}})_{a,b}^{(c,l)} = \hat{\mathbf{p}}^{(c,l)} \cdot \hat{\mathbf{z}}_{a,b}^{\Delta} = \cos(\theta(\hat{\mathbf{p}}^{(c,l)}, \hat{\mathbf{z}}_{a,b}^{\Delta})),$$
70
+
71
+ which is exactly the cosine similarity between $\hat{\mathbf{p}}^{(c,l)}$ and $\hat{\mathbf{z}}_{a,b}^{\Delta}$ . Since both $\hat{\mathbf{p}}^{(c,l)}$ and $\hat{\mathbf{z}}_{a,b}^{\Delta}$ are unit vectors, all deformable prototypes and all collections of interpolated image features at $\rho$ deformed positions live on a $\rho d$ -dimensional hypersphere of radius 1. This means that a collection of interpolated image features $\hat{\mathbf{z}}_{a,b}^{\Delta}$ is considered similar to an *entire* deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ only when the angle between them is small on the hypersphere.
72
+
73
+ With the similarity between a deformable prototype and a collection of image features at deformed locations defined in equation (5), we now define the similarity score between a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ and an *entire* image-feature
74
+
75
+ ![](_page_3_Picture_10.jpeg)
76
+
77
+ Figure 2. How a deformable prototype is applied to the latent representation of an input image of a painted bunting. (a) The latent input $\hat{\mathbf{z}}$ is fed into the offset prediction function $\delta$ to produce (b) a field of offsets. These offsets are used to (c) alter the spatial position of each prototypical part, which are (d) compared to the input to (e) compute prototype similarity according to equation (5).
78
+
79
+ tensor $\hat{\mathbf{z}}$ to be its maximum similarity to any set of positions:
80
+
81
+ $$g(\hat{\mathbf{z}})^{(c,l)} = \max_{a,b} g(\hat{\mathbf{z}})_{a,b}^{(c,l)}$$
82
+ (8)
83
+
84
+ In our experiments, we trained Deformable ProtoPNets using both $3 \times 3$ and $2 \times 2$ deformable prototypes. A $2 \times 2$ deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ can be implemented as a tensor of the shape $2 \times 2 \times d$ ( $\rho_1 = \rho_2 = 2$ ) with dilation 2 and with $\rho = \rho_1 \rho_2 = 4$ prototypical parts at $(m,n) \in \{(-1,-1),(-1,1),(1,-1),(1,1)\}$ .
85
+
86
+ As in Figure 2, the offsets used for deformable prototypes are computed using an offset prediction function $\delta$ that maps fixed length input features $\hat{\mathbf{z}}$ to an offset field with the same spatial size as $\hat{\mathbf{z}}$ . At each spatial center location, this field contains $2\rho$ components, corresponding to a $(\Delta_1, \Delta_2)$ pair of offsets for each of the $\rho$ prototypical parts.
87
+
88
+ The offsets $(\Delta_1, \Delta_2)$ produced by $\delta$ may be integer or fractional. Prior work [5, 16, 17, 53] uses bilinear interpolation to compute the value of these fractional locations. In contrast, we do not use bilinear interpolation because it is not feasible for a Deformable ProtoPNet, as the similarity function specified in equation (5) relies on the assumption that the image feature vector $\hat{\mathbf{z}}_{a+m+\Delta_1,b+n+\Delta_2}$ is of $L^2$ length r; without this assumption, similarities will no longer be dependent only on the angle between a prototype and image features. Bilinear interpolation breaks this assumption, because when interpolating between two vectors that have the same $L^2$ norm, bilinear interpolation does
89
+
90
+ not preserve the $L^2$ norm for the interpolated vector. This can be informally explained geometrically: bilinear interpolation chooses a point on the hyperplane that intersects the four interpolated points, meaning that it will never fall on the hypersphere for a fractional location. We use an $L^2$ norm-preserving interpolation function, introduced in Theorem 3.1, to solve this problem. A proof of Theorem 3.1 can be found in the supplement.
91
+
92
+ Theorem 3.1. Let $\hat{\mathbf{z}}_1, \hat{\mathbf{z}}_2, \hat{\mathbf{z}}_3, \hat{\mathbf{z}}_4 \in \mathbb{R}^n$ be vectors such that $\|\hat{\mathbf{z}}_i\| = r$ for all $i \in 1, 2, 3, 4$ for some constant r, and let $\hat{\mathbf{z}}^2$ denote the element-wise square of a vector. For some constants $\alpha \in [0,1]$ and $\beta \in [0,1]$ , the bilinear interpolation operation $\mathbf{z}_{\text{interp}} = (1-\alpha)(1-\beta)\hat{\mathbf{z}}_1 + (1-\alpha)\beta\hat{\mathbf{z}}_2 + \alpha(1-\beta)\hat{\mathbf{z}}_3 + \alpha\beta\hat{\mathbf{z}}_4$ does not guarantee that $\|\mathbf{z}_{\text{interp}}\|_2 = r$ . However, the $L^2$ norm-preserving interpolation operation $\mathbf{z}_{\text{interp}} = \sqrt{(1-\alpha)(1-\beta)\hat{\mathbf{z}}_1^2 + (1-\alpha)\beta\hat{\mathbf{z}}_2^2 + \alpha(1-\beta)\hat{\mathbf{z}}_3^2 + \alpha\beta\hat{\mathbf{z}}_4^2}$ guarantees that $\|\mathbf{z}_{\text{interp}}\|_2 = r$ .
93
+
94
+ Finally, the theoretical framework of a deformable prototype requires that every spatial location $\hat{\mathbf{z}}_{a,b}$ and $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ of $\hat{\mathbf{z}}$ and $\hat{\mathbf{p}}^{(c,l)}$ have $L^2$ length r. In our implementation, we guarantee this by always normalizing and scaling both the image features extracted by a CNN at every spatial location (a, b) of a convolutional output z, as well as every prototypical part of a deformable prototype, to length r, before they are used in computation. Specifically, we compute $\hat{\mathbf{z}}_{a,b} = r\mathbf{z}_{a,b}/\|\mathbf{z}_{a,b}\|_2$ for every spatial location (a,b)of the convolutional output $\mathbf{z}$ and $\hat{\mathbf{p}}_{m,n}^{(c,l)} = r\mathbf{p}_{m,n}^{(c,l)}/\|\mathbf{p}_{m,n}^{(c,l)}\|_2$ for every (m, n)-th part of a deformable prototype. However, this normalization is undefined when $\|\mathbf{p}_{m,n}^{(c,l)}\|_2 = 0$ or $\|\mathbf{z}_{a,b}\|_2 = 0$ . This is a problem because zero padding and the ReLU activation function can both create a feature vector z with $L^2$ norm 0. We address this problem by appending a uniform channel of a small value $\epsilon = 10^{-5}$ to $\mathbf{p}^{(c,l)}$ and z prior to normalization. In particular, an all-0 feature vector $\mathbf{z}_{a,b}$ produced by a CNN will become $[0 \dots 0 \epsilon]$ , which has an $L^2$ norm of $\epsilon$ .
95
+
96
+ Figure 3 gives an overview of the architecture of a Deformable ProtoPNet. A Deformable ProtoPNet consists of a CNN backbone f that maps an image $\mathbf x$ to latent image features $\mathbf z$ , which are normalized to length r at each spatial location into $\hat{\mathbf z}$ , followed by a deformable prototype layer g that contains deformable prototypes as defined in Section 3.2, and a fully connected last layer h, which combines the similarity scores produced by deformable prototypes into a class score for each class.
97
+
98
+ Similar to [4], the training of a Deformable ProtoPNet proceeds in three stages.
99
+
100
+ Stage 1: Stochastic gradient descent (SGD) of layers before last layer. We perform stochastic gradient descent over the features of f and g while keeping h fixed. By doing so we aim to learn a useful feature space where the image features $\hat{\mathbf{z}}_{a,b}^{\Delta}$ of inputs of class c are clustered around prototypes $\hat{\mathbf{p}}^{(c,l)}$ of the same class, but separated from those of other classes on a hypersphere. To achieve this, we use the cluster and separation losses as in [4] and adapted for the angular space in [45]. The cluster and separation losses are defined as:
101
+
102
+ $$\ell_{\text{clst}} = -\frac{1}{N} \sum_{i=1}^{N} \max_{\hat{\mathbf{p}}^{(c,l)}: c = y^{(i)}} g(\hat{\mathbf{z}}^{(i)})^{(c,l)}$$
103
+ (9)
104
+
105
+ and
106
+
107
+ $$\ell_{\text{sep}} = \frac{1}{N} \sum_{i=1}^{N} \max_{\hat{\mathbf{p}}^{(c,l)}: c \neq y^{(i)}} g(\hat{\mathbf{z}}^{(i)})^{(c,l)}$$
108
+ (10)
109
+
110
+ respectively, where N is the total number of inputs, $\hat{\mathbf{z}}^{(i)}$ is the image feature tensor normalized and scaled at each spatial location for input i, $y^{(i)}$ is the label of $\mathbf{x}^{(i)}$ , and all other values are as defined previously.
111
+
112
+ We were inspired by recent work in margin-based softmax losses [8, 23, 24, 43, 44] to further encourage this clustering structure by modifying traditional cross entropy loss. Specifically, we use a new form of cross entropy: *subtractive margin cross entropy*. This is defined as:
113
+
114
+ $$CE^{(-)} = \sum_{i=1}^{N} -\log \frac{\exp(\sum_{c,l} w_h^{((c,l),y^{(i)})} g^{(-)}(i)^{(c,l)})}{\sum_{c'} \exp(\sum_{c,l} w_h^{((c,l),c')} g^{(-)}(i)^{(c,l)})},$$
115
+ (11)
116
+
117
+ where $w_h^{((c,l),c')}$ denotes the last layer connection between the similarity of prototype $\hat{\mathbf{p}}^{(c,l)}$ and class c',
118
+
119
+ $$g^{(-)}(i)^{(c,l)} = \begin{cases} g(\hat{\mathbf{z}}^{(i)})^{(c,l)} & \text{if } c = y^{(i)} \\ \max_{a,b} \cos(\lfloor \theta(\hat{\mathbf{p}}^{(c,l)}, \hat{\mathbf{z}}_{a,b}^{\Delta,(i)}) - \phi \rfloor_{+}) & \text{else} \end{cases}$$
120
+ (12)
121
+
122
+ for a fixed margin $\phi=0.1$ , and $\lfloor \ \rfloor_+$ denotes the ReLU function. Subtractive margin cross entropy encourages a well separated feature space by artificially decreasing the angle between a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ of class c and the collection of deformed image features $\hat{\mathbf{z}}_{a,b}^{\Delta,(i)}$ from the i-th training image with $y^{(i)} \neq c$ , thereby inflating the cosine similarity between the two and increasing the class score of the incorrect class c. In order to reduce the value of this loss, the network has to try harder to counter the introduced margin $\phi$ by further increasing the angle between a deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ and an image feature $\hat{\mathbf{z}}_{a,b}^{\Delta,(i)}$ of an incorrect class, resulting in a stronger separation between classes on the latent hypersphere.
123
+
124
+ While the subtractive margin encourages separation between classes, it does not encourage diversity between prototypes within a class and between prototypical parts within
125
+
126
+ ![](_page_5_Figure_0.jpeg)
127
+
128
+ Figure 3. The architecture for Deformable ProtoPNet.
129
+
130
+ a prototype. In particular, we have observed that deformations without further regularization often result in duplications of prototypical parts within a prototype. Inspired by [45], we discourage this behavior by introducing orthogonality loss between prototypical parts. This is formulated as:
131
+
132
+ $$\ell_{\text{ortho}} = \sum_{c} \|\mathbf{P}^{(c)}\mathbf{P}^{(c)\top} - r^2 \mathbf{I}^{(\rho L)}\|_F^2, \tag{13}$$
133
+
134
+ where L is the number of deformable prototypes in class c, $\rho L$ is the total number of prototypical parts from all prototypes of class $c, \mathbf{P}^{(c)} \in \mathbb{R}^{\rho L \times d}$ is a matrix with every prototypical part of every prototype from class c arranged as a row in the matrix, and $\mathbf{I}^{(\rho L)}$ is the $\rho L \times \rho L$ identity matrix. The matrix multiplication $\mathbf{P}^{(c)}\mathbf{P}^{(c)\top}$ in equation (13) contains an inner product between every pair of prototypical parts in class c; by encouraging this to be close to the scaled identity matrix $r^2\mathbf{I}^{(\rho L)}$ , we encourage the prototypical parts to be orthogonal to one another and thereby increase the diversity of semantic concepts represented by prototypical parts. This loss differs from [45] because it encourages orthogonality at both the prototype and the prototypical part level. Whereas the orthogonality loss in [45] encourages orthogonality between each pair of prototypes within a class, equation (13) encourages orthogonality between all prototypical parts within a class. A visualization of the space created by these terms can be seen in Figure 4.
135
+
136
+ With these loss terms defined, our overall loss term during the first stage of training is:
137
+
138
+ $$\ell = CE^{(-)} + \lambda_1 \ell_{\text{sep}} + \lambda_2 \ell_{\text{clst}} + \lambda_3 \ell_{\text{ortho}}$$
139
+ (14)
140
+
141
+ where $\lambda_1=0.01, \lambda_2=0.1$ and $\lambda_3=0.1$ are hyperparameters chosen empirically. As in [4], the last layer connection between each deformable prototype and its class is set to 1; all other connections are set to -0.5.
142
+
143
+ **Stage 2: Projection of prototypes.** We project each deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ onto the most similar collection of interpolated image features $\hat{\mathbf{z}}_{a,b}^{(\Delta)}$ from some training image $\mathbf{x}$ . Mathematically, this is formulated as:
144
+
145
+ $$\mathbf{p}^{(c,l)} \leftarrow \underset{\hat{\mathbf{z}}_{a,b}^{(\Delta)}}{\operatorname{argmax}} \cos (\theta(\hat{\mathbf{p}}^{(c,l)} \cdot \hat{\mathbf{z}}_{a,b}^{(\Delta)})). \tag{15}$$
146
+
147
+ ![](_page_5_Figure_10.jpeg)
148
+
149
+ ![](_page_5_Figure_11.jpeg)
150
+
151
+ (a) Within the same class, prototypes are encouraged to be orthogonal to one another.
152
+
153
+ (b) Within each prototype, prototypical parts are encouraged be orthogonal to one another.
154
+
155
+ Figure 4. A representation of the latent space learned by Deformable PrototPNet.
156
+
157
+ In this projection scheme, we allow projection onto fractional locations and we project all prototypical parts within each prototype onto the *same* training image, which promotes cohesion among parts of a single prototype.
158
+
159
+ **Stage 3: Optimization of the last layer.** In this stage, we fix all other model parameters and optimize over the last layer connections h. Let $w_h^{((c,l),c')}$ be defined as previously described. For this stage, we use the loss function:
160
+
161
+ $$\ell_{\text{last}} = \text{CE} + \lambda_1 \sum_{c,l} \sum_{c' \neq c} |w_h^{((c,l),c')}|,$$
162
+ (16)
163
+
164
+ where $\lambda_1 = 10^{-3}$ and CE is standard cross entropy loss. The second term on the right-hand side of equation (16) discourages negative reasoning processes as explained in [4].
165
+
166
+ With prototype projection, we can associate each deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ with a training image $\mathbf{x}$ . Before we describe how we map a prototypical part to an image patch, we first define a downsampling factor $\gamma$ as the ratio of spatial downsampling between the original image and the image-feature tensor. For images of spatial size $224 \times 224$ with latent representations of spatial size of $14 \times 14$ , we have $\gamma = \frac{224}{14} = 16$ .
167
+
168
+ In order to produce a visualization of a deformable prototype on an input image x, we pass the image x through the network. This enables us to obtain the center location
169
+
170
+ | Model | VGG16 | VGG19 | Res34 | Res50 | Res152 | Dense121 | Dense161 |
171
+ |-----------------------------------|-------|-------|-------|-------|--------|----------|----------|
172
+ | Baseline | 70.9 | 71.3 | 76.0 | 78.7 | 79.2 | 78.2 | 80.0 |
173
+ | ProtoPNet [4] | 70.3* | 72.6* | 72.4* | 81.1* | 74.3* | 74.0* | 75.4* |
174
+ | Def. ProtoPNet $(3 \times 3, nd)$ | 67.9 | 71.1 | 76.7 | 85.9 | 78.2 | 76.5 | 79.6 |
175
+ | Def. ProtoPNet $(3 \times 3)$ | 73.8 | 75.4 | 76.7 | 86.1 | 78.8 | 76.4 | 79.7 |
176
+ | Def. ProtoPNet $(2 \times 2,nd)$ | 76.0 | 76.1 | 76.8 | 86.4 | 79.2 | 78.9 | 80.8 |
177
+ | Def. ProtoPNet $(2 \times 2)$ | 75.7 | 76.0 | 76.8 | 86.4 | 79.6 | 79.0 | 81.2 |
178
+
179
+ Table 1. Accuracy of Deformable ProtoPNets with $3 \times 3$ and $2 \times 2$ deformable prototypes, compared to that of the baseline models, ProtoPNets, and Deformable ProtoPNets without deformations (denoted (nd)) across different base architectures. \*We retrained ProtoPNets on full images for direct comparison, and report the accuracy numbers on full images here, so the numbers differ from those reported in [4].
180
+
181
+ (a',b') that produced the best similarity for prototype $\hat{\mathbf{p}}^{(c,l)}$ :
182
+
183
+ $$(a',b') = \operatorname*{argmax}_{a,b} g(\hat{\mathbf{z}}_{a,b}^{(\Delta)})^{(c,l)}.$$
184
+
185
+ We can then retrieve the $(\Delta_1, \Delta_2)$ offset pair for each prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ from the location (a',b') of the offset field $\delta(\hat{\mathbf{z}})$ . These values tell us that the prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ is compared to the image features at spatial location $(a'+m+\Delta_1,b'+n+\Delta_2)$ . To find the corresponding patch in the original image, we create a square bounding box in the original image centered at $(\gamma(a'+m+\Delta_1),\gamma(b'+n+\Delta_2))$ of height and width $\gamma$ for each prototypical part $\hat{\mathbf{p}}_{m,n}^{(c,l)}$ . Since all parts of a deformable prototype must be projected onto (interpolated) image features from the same image, this allows us to view all parts of a prototype on the same image.
186
+
187
+ Figure 5 shows the reasoning process of a Deformable ProtoPNet in classifying a test image x. In particular, for a given image x and for every class c, a Deformable ProtoP-Net tries to find evidence for x belonging to class c, by comparing the latent features $\hat{\mathbf{z}}$ with every learned deformable prototype $\hat{\mathbf{p}}^{(c,l)}$ of class c. In Figure 5, our Deformable ProtoPNet tries to find evidence for the test image being a vermilion flycatcher by comparing the image's latent features with each deformable prototype (whose constituent prototypical parts are visualized in the "Prototypical parts" column) of that class. As shown in the figure, the prototypical parts within a deformable prototype, which can be visualized as patches from some training image, can adaptively change their relative spatial positions as the deformable prototype is scanned across the input image to compute a prototype similarity score at each center location according to equation (5). The maximum score across all spatial locations is taken according to equation (8), producing a single "similarity score" for the prototype, which is multiplied by a class connection score from the fully connected layer h to produce a prototype contribution score. These are summed across all prototypes, yielding a final score for the class.
188
+
189
+ ![](_page_6_Figure_7.jpeg)
190
+
191
+ Figure 5. The reasoning process of a Deformable ProtoPNet with $2\times 2$ deformable prototypes.
2112.05125/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2112.05125/paper_text/intro_method.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Identifying the author of a text is one of the most versatile NLP tasks, with applications ranging from plagiarism detection to forensics and monitoring the activity of cyber-criminals. The task spans several decades and was tackled using statistical linguistics [@mendenhall1887; @zipf1932selected; @MostellerWallace64], and, more recently, machine learning [@svmmail; @dt; @Koppel-JMLR-2007; @Stamatatos2009]. Due to the typically small data setup of authorship analysis tasks, deep learning methods had a slow start in this domain. Nevertheless, inspired by the impressive performance of pre-trained language models, such as BERT [@Devlin2019], these methods gained traction in authorship analysis as well. showed that Convolutional Siamese Networks are more robust than a BERT-based method over large-scale authorship attribution tasks.
4
+
5
+ investigated pre-trained language models for cross-topic and cross-domain authorship attribution and showed that BERT and ELMo [@Peters:2018] achieve the best results while being the most stable approaches. introduced BERT for Authorship Attribution (BertAA) in which they combine BERT with stylometric features for authorship attribution. The authors remarked that their model is unable to perform text similarity evaluation in the context of the more difficult authorship verification problem, which we tackle.
6
+
7
+ One of the main contributors to the active developments in authorship analysis is the PAN organizing team, who proposed annual shared tasks since 2009. While the recent PAN 2020 and 2021 contests increased the difficulty of the authorship verification task and enabled large-scale model training [@Kestemont2020OverviewOT; @kestemont2021], there are still possible generalization issues due to the dataset splits. For instance, models from 2020 trained on the *closed-set* data surprisingly performed better on the *open-set* test data (which is arguably more difficult) than on the *closed-set* test data [@kestemont2021]. We therefore argue that in order to better assess the generalization capabilities of authorship verification systems, a more fine-grained approach to dataset splitting may be needed. To address these issues, we introduce a set of five carefully designed splits of the publicly available PAN dataset, ranging from the easiest setup (*closed-set*) to the most difficult (*open-set*). Our splits progressively alleviate information leaks in the test data, enabling a more confident evaluation.
8
+
9
+ Furthermore, we release our splits publicly[^2] to allow other members of the community to evaluate models on any computing infrastructure, enabling the evaluation of large-scale models. Along with the new splits, we introduce a set of BERT-based models [@Devlin2019] to serve as baselines for future research. We show that these language models are competitive with the top scoring O2D2 (*out-of-distribution detector*) system at PAN 2021 [@boenninghoff:2021].
10
+
11
+ We also qualitatively inspect the models' predictions and find that they often rely on named entities to verify authorship. We show that by replacing the named entities in the dataset with placeholders, we are able to obtain significant performance gains and better generalization capabilities.
12
+
13
+ :::: center
14
+ ::: {#tab: baselines}
15
+ ------------ ------ ---------- ---------- ------ ------
16
+ Closed 93.5 **96.4** 95.6 75.6 72.2
17
+ Clopen 94.0 96.0 **97.4** 74.1 71.1
18
+ Open UA 92.6 **92.6** 90.2 78.6 68.5
19
+ Open UF 91.4 **95.1** 91.6 79.9 79.0
20
+ Open All 80.6 67.5 **88.7** 75.6 76.9
21
+ PAN Closed 93.3 **93.5** \- 74.7 74.2
22
+ PAN Open 93.3 **94.4** \- 75.3 74.5
23
+ ------------ ------ ---------- ---------- ------ ------
24
+
25
+ : Overall scores of several models evaluated on our public test splits. We also list the reported results of the models on the private PAN splits. BERT is competitive with the top-scoring O2D2 model of the PAN 2021 competition and both methods greatly outperform the PAN baselines (Naive and Compression). O2D2 performs poorly on our most difficult split Open All. However, performance on the development set is much closer to the BERT results on the test set. The neural models were trained on the large training splits. $^\dagger$Models trained on the small datasets. $^\ast$Models evaluated on the validation set.
26
+ :::
27
+ ::::
28
+
29
+ In summary, **our contributions** are threefold:
30
+
31
+ **1.** We **introduce five splits**, based on the PAN dataset, with a decreasing degree of shared information between train and test sets. These configurations enable benchmarking large models, providing a robust evaluation environment, on which we run several BERT-based baselines.
32
+
33
+ **2.** Using explainable AI (XAI) methods, we find that **BERT-like models focus on named entities** to determine authorship. We replace them with placeholders and retrain our models, which brings a significant performance boost.
34
+
35
+ **3.** We introduce the DarkReddit dataset for authorship verification, which is significantly different in style to the fanfictions in PAN. We test the **generalization capabilities** of the models trained on PAN, by evaluating them on DarkReddit. Our previous finding is further confirmed by our model trained without named entities, which generalizes better and improves the overall metric by 5.6%.
36
+
37
+ :::: center
38
+ ::: {#tab: splits}
39
+ Split
40
+ --------------------- -- -- -- --
41
+ Closed
42
+ Clopen$^{\diamond}$
43
+ Open Unseen Authors
44
+ Open Unseen Fandoms
45
+ Open All
46
+
47
+ : Dataset splits sorted from the easiest (train authors and fandoms are seen in the validation and test sets) to the most difficult (train authors and fandoms are not found in the test set). $^{\diamond}$Some of the authors of the Different Authors train pairs in Clopen may be unknown at test time, making it a mix between Closed and Open.
48
+ :::
49
+ ::::
50
+
51
+ :::: table*
52
+ ::: center
53
+ +-----------------------------------------------------+------------------------------+------------------------------+----------------------------+----------------------------+------------------------------+
54
+ | | Closed$_{XL}$ | Clopen$_{XL}$ | Open UA$_{XL}$ | Open UF$_{XL}$ | Open All$_{XL}$ |
55
+ +:====================================================+:========:+:======:+:========:+:========:+:======:+:========:+:========:+:======:+:======:+:========:+:======:+:======:+:========:+:======:+:========:+
56
+ | 2-4 (lr)5-7 (lr)8-10 (lr)11-13 (lr)14-16 **Metric** | **O2D2** | **cB** | **B** | **O2D2** | **cB** | **B** | **O2D2** | **cB** | **B** | **O2D2** | **cB** | **B** | **O2D2** | **cB** | **B** |
57
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
58
+ | *F1* | **96.6** | 93.8 | 95.0 | 96.1 | 95.6 | **96.8** | **93.6** | 85.4 | 89.2 | **95.2** | 88.6 | 90.8 | 45.0 | 74.8 | **86.9** |
59
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
60
+ | *F0.5* | 94.3 | 93.4 | **94.5** | 94.0 | 96.5 | **97.0** | **89.6** | 87.0 | 88.1 | **94.5** | 92.3 | 88.8 | 70.1 | 84.6 | **87.2** |
61
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
62
+ | *c@1* | **95.9** | 93.3 | 94.5 | 95.4 | 95.4 | **96.6** | **91.5** | 84.8 | 88.2 | **93.1** | 88.8 | 90.0 | 65.2 | 78.9 | **87.0** |
63
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
64
+ | *AUC* | **98.7** | 98.0 | 98.6 | 98.4 | 99.1 | **99.4** | **95.8** | 92.4 | 95.3 | **97.6** | 96.5 | 96.9 | 89.5 | 91.1 | **94.0** |
65
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
66
+ | *overall* | **96.4** | 94.7 | 95.6 | 96.0 | 96.7 | **97.4** | **92.6** | 87.4 | 90.2 | **95.1** | 91.5 | 91.6 | 67.5 | 82.3 | **88.7** |
67
+ +-----------------------------------------------------+----------+--------+----------+----------+--------+----------+----------+--------+--------+----------+--------+--------+----------+--------+----------+
68
+ :::
69
+ ::::
70
+
71
+ We use the PAN 2020 authorship verification dataset[^3]. A document $d_i$ belongs to a fandom (topic) $f_i$ and is written by an author $a_i$. Author verification is a classification task which asks whether documents $d_i$ and $d_j$ are written by the same author (SA) or by different authors (DA). The dataset comes in two sizes: small (52k examples) and large (275k examples). The latter one is better suited for deep learning models.
72
+
73
+ The PAN 2020 competition is a *closed-set* verification setup, meaning that the unseen test set contains documents whose authors and fandoms were seen at training time. The PAN 2021 competition has a more difficult *open-set* setup, in which the training data is the same as in 2020, but the submitted solutions are privately tested against document pairs from previously unseen authors and fandoms. The PAN testing infrastructure makes it difficult to evaluate large models quickly. To this end, we release several dataset splits, ranging from the easier *closed-set* setup to the more difficult *open-set* variants. We summarize the splits in Tab. [2](#tab: splits){reference-type="ref" reference="tab: splits"} and provide a more detailed description in the Supplementary Material [6](#sec: apx_pan_splits){reference-type="ref" reference="sec: apx_pan_splits"}. For each split, we propose a small (XS) and a large (XL) version.
74
+
75
+ To test an even more difficult scenario than our *open-set* splits, we created a small authorship verification dataset. This dataset could be used to benchmark the generalization capabilities of AV models, while also being useful for cybersecurity applications. The dataset was constructed by crawling $1026$ samples from $\texttt{/r/darknet}$[^4], a subreddit dedicated to discussions about the Darknet. There is an equal number of same author and different author pairs, resulting in a balanced dataset. A document has 2,500 words on average, $9$ times less than the PAN 2020 splits. The two datasets also differ in other aspects (*e*.*g*. $\!$topics, authors, text purpose, self-contained message). We illustrate the differences between PAN and DarkReddit examples in Figure [1](#fig:pan_reddit_samples){reference-type="ref" reference="fig:pan_reddit_samples"}.
76
+
77
+ <figure id="fig:pan_reddit_samples" data-latex-placement="t!">
78
+ <div class="center">
79
+ <embed src="figs/pan_vs_reddit_wide1.pdf" style="width:99.0%" />
80
+ </div>
81
+ <figcaption>A PAN-2020 sample compared to a DarkReddit one. Note the contrasting style, topics, vocabulary and size between the two samples.</figcaption>
82
+ </figure>
2112.11909/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-12-11T04:55:15.083Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.9.6 Chrome/89.0.4389.128 Electron/12.0.16 Safari/537.36" etag="qQVSABNaZ1c_OU4KBPBd" version="14.9.6" type="device"><diagram id="BhiHeWdFNuaIeRZcwZBa" name="第 1 页">7V1ZdyJHsv41OufeB+XJLXJ5lJDk8dg9Y0/73B7Piw+SkBobCTWit/n1NxKogspMqCqogkqENCM3WwH5RXyxZGTEmeg9ffth0n/5+G58PxidcXr/7UxcnXHOpBL4H3fP9/k9nEo1v+dxMrxfPGt5x/vhfweLO+ni3s/D+8Fr4YnT8Xg0Hb4U77wbPz8P7qaF+/qTyfhr8WkP41HxXV/6j4t3pMs73t/1R9nnICtP/DC8n36c32+4Xt7/t8Hw8WP23kzZ+SNP/ezJi0u/fuzfj7+u3CWuz0RvMh5P5/96+tYbjNz6ZSszf93NmkfzDzYZPE+rvOCv6Y+PP50/2Isef/z9yj79+Ul/OF/A8Tr9nn3lwT2uwOLmeDL9OH4cP/dH18t7Lyfjz8/3A3dVireWz/l5PH7BOxne+edgOv2+gLP/eTrGuz5On0aLRwffhtN/u5cTWNz6feWRq2+LK89ufM9uPE8n3+cvMgDZHb+vPrp84eyWe6UkjBlKBUj8Y6ni2aO/DCbDp8F0MFm85GH8PF18Xibx9ut0Mv5r0BuPxpPZsgilLvnNTf5IJgtidsH7m+FolL95JseLxy6cHOLN5/GzW70QtgWSr+PPk7sMhH//8fTL/c3v5+L67x/+ffVkfrx+Ptcg5890EBV1oj95HEw3vlTnEofaOhjjd598x1dOBqP+dPileLn+Qmse8+flL/1lPMSPzelCxQ0Dwi2AzX7mF1io+zm3lGjOrFUSn6KoKV5//rEXl1xKLi5Y//vK017cE143fAguim8rJF+9Hv5jfsns1soKLO+aaUtcczaj8aU/+rxYv0CZlqripPvrx+F08P6lP0P5K1JmUS1WZdDJTn9yt7gJgdyxmIw6AOhMmlEcV+6/wZ9eb5PsfRlMpoNvK3eFkrJ4VIPxFtssvvbXJT0yseC8jyvUKOl66VqBagskMvxbQyJY6cvZbwWMfCS4+20GCaEVoUJSI43ShmlexAWc8dIoEAK0wWfpECWpCTOCciWR3hhQHoLGqCScaQMW34lZoNASiErbOiDSchDv+68fc8Dj/L5Rm25uBP54GJaReA34rCAUkBQFp5ZxaT0Sk1YTxMVQYNxorlSAn+aEcys108Ao5caG8AlGrNEKrLZaGlAZxI3DJ1gd9I5FBRXzdE5KEqFDrlHRWES5uMif3jwrSnlQ327Fs1v6eSW+HSv4dSR38yKeXcxpW3G27kb919fhXcE7YwXvjEd9udCmihu4gd38tsw+lbppmRZ5Dt5a+ZOB/FnU9xInrq57hc4zUcYJsWBGSaQqj6iUJdJKyylQdAW5kKuyW/9yhhOLjEip5FJxZVX7vhzP2GqFvn79PJgt9S8ujuP0h8HzYIKu8vg50Coki2lRCYpStLAZETPSHw0fn520oky5UOTSUc8Qw8+LxQNPw/v7mV7G2LKoq74yzG4vPmQkVqzNdRZFi2pjuAKJVoepGPVRTTHQQmMlQnOFXiEBy41SUjm7FnM3OCVKaAwYjEKHAwA2CPNO7AhMH4IdI4y1EixWoKM8DF2JhwvRMCvhzGXsLXBxlxx9TolDZCNN4w0/bt6eFYFWJMUsZVSVFLN8Sy1SDGiKKy/O4cxzfeffMIhegysx4IRqhf6XkMiV1HqEhzaGCEuZZBb1Bv3AdqJkTln8C6373OEL+B6oWAEc1G0ppKToRn2oq8zbqux6EtiD9ikJh1A/jLw2egkYmBJnh7QVlktQWfRbVzmBSwLoKgPaLAraD6L3pZvgWVNP1YIXoLd1AN3E1QjcpEBZDxTl3Q8ng7uZeyauEHx3f5uRH2eWZEy1zLhI4rLN2Q8LXCBGIzG65SRLzjTu5rDQrw3ZdTQavryuczJXwOq/vsz3WB6G3xzAl0WofSy91b/t35n7KMYYMUi49zB8diTeEFZcEU0Z+p4C3VMtrC7ihu4oYVZKdG8pQwrQMuK7onML6NmCsMgENKOjgu+65jnNg1oh3dkWqKsQfR28TmN4Xiv32xB2iA1ogV4BcEoNsCD4TAs7DH66495U3HHbISsj1rhFK9Iy58oop2/v3mS8V+reZLpU1bvRgG4JuvCGCyNy3yi3ABjiordiMZLCOFeCaIsBKmw9tcUAAYC3BiREd50U/sQ5owFmQMEhlFH0DRmj1GovWEmPGA4b9+w5XXswYqiais1ULC1iyMpS0vP3miOGUnfPKoKfgBsjuLbUiG4TAzcnri/1AtOCVGkaQnj0XB/dmtu7AcgYstQAZHpX1QCAl5Epy3vtRgrZzl8HMzEtZl1KjOws/yKAUqaEYCaSf+GUGGosUA0M3AZTyAIuHaPQaliJz4a2yl14lZql7ljqBrAzxmFHBTXojCOhe+oirStXMlpQIQ06Sjy2gdghDue8QgzW6H5huKnAAg7dgRc5VCVGXjNmdpGYdnGaFJobppivt4YSyQSTigurLGsrGZp/xU64Uk1myWaFRFqiTKLKUqH9gligQATlSItUzwrGuq5b6kSOxfDRoAoxJbUwVKiOpzJ4lSj0SKmxZq1Cd6gxqSizCcpUkqCrKNAPNKh50i/3SI4ya5dPH43O1QzUOqNzi1Mqb8fKMQ649EA5KItWzK8vmqfsBWhUSQbcdD0CkJtLCBc1no2lbbJ/10rb5De2rqjef4ZGVs3R5xqUWJJenmL/iOIrZrVBxo3kbTqm90ntvjblK0ntTiqhzRTBSaUZfGCpokJwZjvP2+bE263wNq/M22lurmanWguKr0bTBQAFkVKfPo+zB85fZ1hc4BOYfvm2fBD/9bj47+wqt9kdhJDsPvyot/7z8L75G2Z3e+LcvRMpYvbotL/YGrQbzw7UYCYhCHDFhGXK3fKIyQgitQKFUiHQ1ecBMTGODik6+hwkQ7dUZudNCsQEBJg7qcm1ACQm1d4R2KQCgpaqNYU17sSxNUYYjSYs3OsVRDDhWg5QjajbSDYMYVdCGsAAnmqV9wcooLrmOS2gekB3Yd/lmugeEIPWGylZKjTkJnXsWAjVqVqzac8hJ77y0yi6pufABRCNUqIsY5IZqj02AamJNtadAFEYe2SRSQuSdMDKrAgLFJojtEsJGPyhzyYZGlEDChXW5wTXYYK5/XrKpLYyUrzTLU7YHD4cWfFO7XP1UdoQqFz4j+jBnVVJ3IVDZGUO0fU4BDjhfsghLcmOOO8iYdGeXMkegNyuaVYoPusOSm7q9LXbYWSdtefYprWWjEtPvdZadY8MKuufswSv/ZvfJQt2fYGkJS/wyw68FxQPJUZe7nUPE35vlDWnLRvr6JUJQctFdavsFyumixXdtVhUBzI4scuIDpOwgkaaerXbwiZy0u3VJSaecb3XA1OhN1Qpc1nb60WYK1LfeCWu4cYsrrlq8vAasysUG3E0Axmg+aFUKbf+GEb5+XRpiJFaoieE/9eZFK9giT6Zdv0QAUMwamxWtlBwqDg6ztQy65w2QxlrDWVxkCCrIScnfsC/nr/VxgF/Udl+MVvPHdqq6VB48N7vnYWuP9F25cdLszV11J75H7/EDIKoaTeDF0izwQxuYbwunr88vuv/dPfvv54+fPnh7h0dfxocqJHr0lHT9brQ5JoUjT/X+I08orZrNaRUnHdsfaqln9xC1vX1oGobCu3SEFxppuyM171wQyqXp7ZUCAbgWiPt2S8K62bHk+HjEEUF7/30efAa7Yv1liyyskB80JwVhsD0MmZI5NiBM7etuVGR3BNcvr/+5Qyu2kBtAza9i6vL66uN+O4TNenS1cCV85Y4U7X9KHy9ohY9aKGt0SBiB0r250ixCkWcj4jvi4ec41V/n2/qTEDlbcPqWOS92vu32UeiGzFyvi7GKJZpsJoGIaY1hCJvclfLpxSLJA9BE+tK+SxjVOdFW4UuqUCMMdRS0FwjUK2VHuS+1gpCl9f/+m2DEtaMMmUhpmTzEBF9A8GcNi+cI3wLuMT/4cL3qMttORro5Xcsb7ub3oN69ihchar98HCHP3HVXrtp4Cm7Uotnlit7iQqUc8BuMiLb2gXWLFTajub/Vtw/ZvMAaOb/ESp4qQ84Le/DX3JutK73uL23mO1OB20vtwl+FAdCJXPFBK7cifuepHLrRxWTKJhudIHHR2v8vcjbSKI0RWHVGklSRBzWVfPmucBrfNfGvEoeVtO83o0nmzI7W5Bh2NabXsOlDsloJvc+pdkHPdtf82iK3rjflnwS5CIhXVaOC2mMH2WAUEQiS0m3m4ZsFBo8FB7X7Vuj60Styo4lFzwSRZQRxlqJZkH5Pe0a7Mvfod3zykxWK449FF1V2NComdVppm2jMoZwybhWbuCK0j5bUothrmZIfSCN9Duqth3HZu7wCuP85irlOH3Xn959DIS1e2V0MuCcBpLJQhMtkEmAMuB5bWZ1wkG43bEOqRB2BkZHq3v31dhZ84Nun9Yp9q1JHrwmeRQ21t2NX/pTFMbnudjQYv6uXsJ6B17iVbfpdd3WbE3lpf3srfDyMq2zVFgv2Oam14ZkTe7+BGJ3dX0lLy6acXiEkcRQaajCpQbDIl4qQ8oQEm2KwfgwPInZqd2syOHny0H/Ce95P8DI/K1aGWEUhjyCcrAgBBN+rTYwizGPUEK66NqqyLiiLpkZWaFDwfGVCgR7dpSRrCd0Yd6NJTF08gRrC+McInDUPJkhX77Ns2Xe4YzL4c/vf3tX9ejFbpk7v64znjBbjTg9tC/hmt1E5QPZz83nC/Pr1+rKNjUcTrkzXJJqhjSulN8gWzA3Q0QxhkpsQOvIQTFgKDmKublmQI0WkXw6uqtSSjfsTqGStzchJAwWGhOp3r9uOiNPwahA2YsPQsqHC/oSWJQnow1Tej/yBEj3DKMSSVEOorn/cnHibkQaU9Sg4wBUGd1aL3a733OHB8p/7D3VkXF/aUSRaXTlgGI2gkKgdEjXC06BFxlLyolQSEjcMmQ101q/dxXrW6X6T077n29fX2K80T2vUoX8QzYOVqvBEwbdPtfQw2pQkinPtxTophh0G92xACOYDYlCIk9gCGGEM0uudWzEebEE3R18CycQXPPWWgMyFTsqcoJ7CTdzU4dR4SiGhK6RiK+YqeEd27Y/4b2CN0giNcfQXrhJPNIvjyrHG31GpdAboFxTpFOtI47AHgGP9Wo6Ab4CuHHTdSSyOVjXEMLPCSWm4DraN+CEd9CyHT11BczBnjbe0WzECe9CP3cj0a9WINxuoN9RJjW8o6mCE95LvI0h1kotOKWWIrJJ6zenIZ9HGvq3cJg7smsTLVt5uLubpfqCALzXcxV2QRKnkQiMUiQ+0Gi8MRLzATYUHTB04DSd9VyMtogSkgmlDSiDbpqNZffXPKcFhEONjtSyvCmEEVQMstGTVsICCOk3cUsO4vBs4VtX4tmkYqEsl1wpMNYvBUkO4nAn/c1DjJHVbEaG1WhKhfArtJKDOEyNvnWIXTAl3HYfcEalNjR1og632t88xGrW2A4XHh1mmY8iSRbhMOH91hF2EREHrtAGU8OsCiLkxCBmmyeenfY+dzqVUr5TSYmwnClq3WZ6/cPOxtVtC+CaCXdeRPv5WMC1M0Y5ZQDtergX36DqMWjjKjaZNBaEayHIvVZhQlH0TzVKu7VcUe19jZYLM/POAScZTlKGGfpFRrvST7yEkNqvQGxKiN37KPx8QJH1eNiu+NBSzE9SnLIUK0mokx2DfgG3YaYNpVgbjuZcWHQOqJRbSnHZ+xxYisVJihOWYo7uLVOCGwEg0LHQvqVvSIo5flB3ZsmVjOA3yUrFuiLE8iTEKQuxMERQd9SAomQZf5xRYzKMb0OBWdfuTVGMCDvmFW8+b3cS4o4LsQYiwdXQSYuvl34+uCkhxrdh1kjLUWEoE0Z2S4g3t087CXG3hVgwThQVTkaRjJXRkdCuCSl27yMkUK0B5RSyxqfdEOK8v8fBegB2pXXCjqMns3ZpFeYY1VWEbY4o1+5w6W/4yAW7rQ31/KY23gt2bli5RlztQcW1pZ74XRbXbMXTFlfuz/DYi7jySBPF38YvCDun18/T4dR9538N7saPz8NoN8zuFfy1cczbSqKV5c4GolU3fktMDPupcuMgpEZbLyKFfshGBKzr5SeVcEMjF9JxkEo/GT0E6gGbdVQM9h2rL2ftpogl5Xa4JkTguhjjWjwpCHcH50uInhnMTq6JWMsETbjRRhpr0W8zbc1TkTJ2um3NGnsbs/f9gXmIthxUd2Zw+xAFoQTp7bHZsLwmJsXoFaMMcwy8EEbJwLa2wrWaFWzdSSS2/KUHjnMIqwKVi0utDo9VAHGHjZlVrqurcXOK2sOjwiyoY8Tj+8Z17ixadbrqdpmfvq/hna2Wf4/sFTstl6y2bNWftlP6ALHTbG8AjzTZK9oZp3m01mGyCGGioc7O1FdLZw7o4UIFe3+IKEK43dENx7KSCiOgG2Za7wxOZw0xHJMhhvQNsTomQ1wDjzQNsapgiJPgp0TDiKz34FFoi6qemO2sPoTnL98EHomyV3iUsg20Wg0j1lFfKmGEqpDGPUQYUXIaLakoItp7be9WOkf6CKOIaLezZHm/EU45LLMfU1RXA4807XC0lViK/JRoFBFt7ZWqtuTClC57RVtvvQE8EmWv/cR8bUYRa6kvlShCV4jkDhJFuLYlQLVwFYGSZm5JklGE7kRJU470EUYR+phKmnT6JU36mEqaauCRqB3uxF5pA/yUahRxTDG3Tn8n1RzTTmoNPNJkL5N+SdNa6kslisgEoGtRBLOUcAYCrBDSauWdlU8qijAVouX2rXSO9BFGEWY/u5r74f1cXBK2wxXi5mPEI1E7XGM/utP8lGgUYY4p5jaN5FUPqw/HtLNdA49E2Ws/MXibUcRa6kslirA1amL3GkUAJ0opARw4lcymfC4iG5VzWCttd8ems3bYHlMlq22EUw7K7PaYoroaeKRph20n9kob4KdEowh7TDupNv2dVHtMUV0NPBJlr/3EfG1GEWupL5kookIkd5Aowp+Yd5ZsFAGRkZeHsNJ2Z2y6aoeB7mdXcy+8vxSXZO0wREaAvgk8krTDEJnmmSY/pRlFQGTUZsLakvxOKkTmYr4JPBJlr/3E4K1GEeuoL5EoAk4TClsdAcCJQlKSBqRWWgdjhzknhikhmNCG2+yQTY0BhYYoy/DKIKlVlAaxliAoZ0YoKYWgwmw7oNAQKy3Gc1ZSqqn05iByw4lUTAgtBWWWae9tWp4AAKcJhSnLMONADBoihbLDLZUi2HZsRojd+4ChgiLnCm2F6JoUh8bvJMXpSLHixKK5zn+DElxBOFUGqBGam9z3qT+gsOR9Di3FpwmFCUuxGxzIBEOetaC0P327MRm2lhjtZtECaDciSXVMhk8DClOWYYGhkzYoOBIjKIG/7Uixex/K3dBkih+TK38+y8Gl+DShMGUp1pJILhWVFh1WwwLpakqK8X2YUkpyoAyyPi+dkeHTgMKEZVgwRlCkmKUoosiQvnA1JMLubfAzoihbLZD2mB9AHlaItdycnmhp4lvVyW03w9FySFwmijwQOLGqJqtKkqtMXE0CgbbiBm5gk/iWzn5T2fCVVie6hXKm0OArKTWXGiyA9Sh5No4T3JBvoBx41nm3/vBuz+2W1FYS2Loj6IR/TFTMd+fWKxrAji/Ivkq7IxYjg4YuvvSnKIGBHo5Gw5dXp28lOwtBAc1SP31li2z93PbvzL2IaQMXUoK73v1wgm/gJujh2w9ep9lF8yF189srLxXXSNx6kyJVn1+n0Qvwy/2Rk0nmD6xOqottVmwwDTttT+hsCMahhruuWvZ60zLFBs5dPEcELBt1AejsZyfG1FnKvnRaZq48VblVU7c3sPzxOAUsJZS3JR5VhlS1oeNRXb5FPwpoqP03N+hfq7iON6C7Bnwa74TihpVz86//x/ghgKh7Q0J5wL8NAEUNhnQ0/2E+bJpkPsMKbNJNkI8gR0nWnLwF8EK1+nv/aYC+BO3hfyeROa9FXam5T9+f3C1uQpQSA4W6uen1os4l/lDaAnbKAvHsI7OK6Ej1qqAhWlmnneahKomQ2zKQWwTMLY6S3hgNlxtHVtPk+QHmduGE8pMtGKMGl2k7NuUdEh+2UXzW5VuWXljlfEs3xE5lhVXlPlk2W6CqgAIn/lAzJ6OiAWv/1/THx5/OH+xFjz/+fmWf/vykP5wzcZAUx1KM3CjoKjwkCWOGUgES/1iq+OLBXwaTIS6F8yCaTu6VJV2qCpsIQocdGC9zGVdFb/Tppyv118UPX67eX//x54+vj/95/3DOqqYUJ4NRfzr8Mih8kLVkWDdVEXBlWeZBGT85U3zBzpmH6Grpw2qBgWp0el5VDcqrXzfKajG1WBYJNyzOmz3bA8uz4f7Weok86yB53rA8b17YlUDgp8tAyF8/9l/cP+++j4YoyxNRHgHczqX+59v8jv7dX48zXfjn5yleJrfFmfgRFzkx/NqaCsZhY3heXvuLsvtgH/QszA+k+ppe6YtemHEziJvSbUQXknpsxVgsFpydbGBheIEBP2+gQpj+2B/9yqCv1ad3v/52Nf31b48/nDfd5aGIy9y+NrCEzEiiPZ/HWAyS2UpuKlhQMJpAZEEbCdiiy8lDjZrnRcaTWGakjd3OIFldSZpXWTj6zSDk4PgKqKYpeDf5DhNVL5PB03AwGdz/cd+fDg5q0ytH17nHmruo9VPW7qV3nydfipy6R8mwnZKMyAzxpWS8jBzB1ReNwvqmrspZkrcrgAV4uW2//iSEaestgVKvosJWX9Peg9BAmOdPor9YZvoiaUpoy+qx8KQXStYf/a/9yf3BlSgjW7ZKtiVlDU3sEDaosJH4J45D4+H8bmIROkMnsTiEWPBOiQUPDe8rfod9+cfblDmtcb+6DXvVKqr9oB5ywfv+E97xwUE4fH6cRnYdGzXkDZtlaRnR3ta84apTZjnc5z3xb4OKGCm6ieNQdX9nT2bZnsDfI/jdomEeRlGbOeHn/u1g5LFt5aqcyeB1+N9FdxiHxSJxjheHyzO4qo/ORoHevbkZJYJlGdAFqS90t96GQZDgP/c2/scPD6+DXff84wDzI/Gu6mzGHkCvu6XWoaX/z3iAd7zvj+77z/3kPSvNu5XwCEOY9/3pZ+fPXsyZlD64enR66epA8c67WHn61kV14UZLbAPs4Q5/YqkqoYQVbaSqlBDEP4xhZNkujQWSNX1YBU+0Bl7IkBcXvd8u3KeKYPfzoH//tiHUpRtte4cwbMx2/fQynAw2KCD6Iq9vFD9RFpbuHb/QC3X8OXk+4RfBT5mu4Se3qSttP4SsWmLagSRetk1e6mdWPgy5H0czciTndjx5/mMYyd6lET1msrx79HhOSTF2zDfGdoweiye3vTLf9kJJuU3B40nLu6HlWxUucg3Ek7ZzpWTE+jRVmBhfjbD3/XPfnaxD7ph+T5Zosi2QBtJUrodTAaYs179rnso/nL4vruFhCUHZmbQWyzwO4PhxwQnTK2rm9S/sWMkHD1WUU2oJw28R9pxt102/ux/cmtv9osUk8VTFsLIoGd34rPnDXrz07NjO4fZcPZzuBvDwsFecEtilFWE6ao+5204i1LFsrwyzFT0E5n6/0Oyf5AQqCl+FwYNJ6U7BFOlJ8PP4+X7PHHcAmCIFokZ3iuOgzWGiER9htuEY6Sdx33/9uKb8PYLYzY0p6cxSx2GwhMpld4isacrSvSsDLDsSVhzm0BpiFUZtZAfDpgjP4L9jd73Ll5Uzhvn9KwcPS4F1irZyWDtErD7qk/G0v+jLcm4dnotTZ6CJXYVkdr6yglQI/GlGKqSQxHisakWZHznr7BdpGiI29fDbTRrC6Gz6dezefvziZOLz7SMi+RH/6coM6Jq6/G70gVkrUY0dVdMhHZfuoDEro41gWgsOImc/d6PkPZ78s6Vkue7kn25NQ8J4+NPnweyLvPSnTjMeB8+DyZyH3qhiaMSE+0haYu1GxdBxJFtTjKzvUsey3mVFVH57q2K3z22LJ9fCXpr6zjyT0tR3dhC6IxtcKowifj0TF2dc9Z+czjzfvr7MFkaNnM6+vvSfC8KiPn0eZ+0dzx/6T8PR99nLaX6FxROmw3l683nw1X258ZO7UPEps/ehsyvNXRZ3HaZevs0/QPYsrh7dfz98dBZ5ftwYpS83w+Im+7C4IPPPO39BajykzoIz+7mL3nRQFbPiVpSSFRqrLEoutnNrS1o3d4FPk6x2IB1WlXREt0gnzNQmQzp9t300qx93F/7Yd3+nHwezz9efvK7GBLOKnq8uEHt7lNSQM2yINSsM5G0SWVbGUIIZImOtQltjqDC9nWHvQsdZu6CCDL97Hf9jPHnqj1aFbZ2sV5DQakpT40Lu+U5D+nfF6/y20K1/zHTrXwvdauIr/M9MYXq5dV80E+nlNv5/V9Rp/vGKHznQsvzul9Q1b0MLbUp7vabyd4piBF/bHVCaqL0qWyzPelK2eso2nWtWfmpmqWbEmba5kctOTKF43Zy0r23tA0GDqthyW8eFiZbFNqF+F89fHt/1f7r7919PH778cPeOjj8NIqdwXh0Iz5FWMi0OPsa1vxLXcGMCoZm1UO71Zi39/HED+Eh+f8Nei8CoyFBpqJJCg2Ge0yKlwVBLgpBKUoNEqwMk0e3RrpMhcItPsdFe55xoSlEoDChuKMtOeTTfDTWdnswrfU4XLVSrHK6L9490D5c1kFxpA7kgo2WbSBqTxmaboUbhqtyFF6q2iW63BBN0zd6RQfPUpntHblrWarsHNYu+4oMzPIobzH7O4t3lV553OftthsikYcTY4mqv6xNvOInRlEH93dBLfDdmomHuMkDFdeF0ax+UNlRfn9r1qqAtrhuSCTWaa6685r1Mu6iWKamFoSJ7dGUxdSSlpijxRzA1tpCmwt5YW3NHKjccWzg9MeBKpKNc3hfIcEYEBa5AU4vmG2QIDdME8WWWUSmBUhT7EKp1z2ketwrzYhptyhdnqjLTtRmeUotmIiZtoxhX7St/Pissd4P2LBeaM+npqXTlvUqBYFQJA9mWZ/MwVjAuexv7c63cbztqlp+6INqCZkI4egThNTdniApoN8JRaUkZ1abbWmgrWKGjZs/OAhPuMrxleszkNDV6tLGM+mH0K599FuhXz0IPTKu0CRYdb26tsWCQNalNnDZr18get3byRLUzbE3xtqxf5tQYYrigihkrcLXBGz8tMbSwQlljFKdaRQYadks5N9dZNN09a9998WSg+OvHlIaU0ThJ2Mix6Y3KVpkk0CggCUjBpKYodtIb1OKE0nLtnqEU+uTQwNiNjd/wzbLEKg0whbbcUKSMjpNAd9JCh3e8qNCau/WmihbLs+eYGlDIGVKIrieNshqaE7XvhdplVWqvm7zqCrWzSHOyQMLmWfhgcRvLuEuriGAMnC+MqxEk3DWuhuTGCPSXqcl6Saym3CP75uCKW6URlCnF8E8D1ZBrpKmD1nF1uvez0/9GLGYuK5XJ95wXsBSWCGO1xUhYMXS04yYUOZpKpGyrpM72V3yyjT2nBWgPaEMjA9p3T/fWR5BrQpVBDdXGSuafBE0O0M3ms4sjhvPih/rWNKxMaMOaZjJVak1zpiw1p7niVbenaDIpGG2UsYYbIYtyip+ScIPxPbNoYFwk35qEdWk7KHev1znkEZJpj0oAiLZodQCVnArwXJ7UqCTSOOhQw/i2mweTNpVE5spsVsj0qCTSt7tTjuU6r2T/fiUzhMcD9w7xBT8Zhq18zCTAFW/QGJQNUT+shcjYs9xCVJ44l1sI9FU019btGBkjpfTO2gggwpXIMKU5tdlgwRZ8zbDtzctkcD94GD7PjlS/3n0cPIV9v7pxWsNP+zXeqs2IQhmn8EBi+LhUeR2nsKHDaSIlsa5vYgM0Mvr005X66+KHL1fvr//488fXx/+8fzjvQCpOCYL8KwCXhKKP46XiXCNJpalkViglONiQl0XsDAtKKnK41fg6ga/kDSxgtCye68o1xcESbkRk+3UtWxrputZJ4xaG4jqxBrzJ6NJE+s1dXv/rt2B1tq6Ddzq8rG5n86PxqECChU2KL/HD9Cjh4PoV815+x/K2u+k9qGePzhscb2yAu2pXIs4pjR949o4YKUXXtVMrEb1aJXdVZIK3pS3R2kf/EH8SxqMFkLJHGXFHtET24+U/8VHh6rUMY1paakIjwoGowo+J8KMkoI0RVFMBaIuacBnWQB6LKU+Q+zvGCsP/HHIvT0WJoUCV254DjTdDG9gxyGNuxQnyopZTTZhz2D3EksU81hDzhHkRc0Cn1Kocc/AwZwRjBqo4/iqMEUTnMY/tQ58wL2aaKCdaW5kfwxeJYx7boD5hXsQcKLFU5ph7LWzT4/bYucET5kXMrSFixYcrHjpPT81jm8onyIt9OzBqBraM1Fgx34dqTrViXGvJhAWpwr4dncJcsGh3pBPmFYLz7mJ6CsVKMd0cfZ9TornbJQQBTCvDVFgN2jHMT6FYuR774bdKHfRTLFYO+ub4mxJGOQVmmLRcailF2LemY6CfgrFyx9wPwFPH/BSMlWO+OQBPkN1P4Vg56Bsj8PQUnZ/2SksxLwnB01N0WqUCt/06v5onJNf0qeRR7DeVvawW2cWLSiLnIzeXGpTW2LWQPONvGsTSFf++8uiG1qBBK0/NCQD674wqAC2y/oaLy7lm4gwM01ZZbplf8jUXrcUVlwjXfBPkG2GEGzFgXA2Gf+p6LpbBm2zRVXSNZFVognOSrJNkbWF4TpLVimQx9DypMEpzJhhzjmcbolXyLoeWrSo9n45XtkqdGp71tK7g1cChvBoRqf0/oVhAsbpvqg6HYpXeEceLYms8784xaORxhVGstEx7KaeGeL7kXQ7N81WaVxyvbFVgiEg7gs0lN4dgiCr9iY8XxdYYQoAlwnLUT+ASjFXFtmANMUTJu0ggCqQFd4bQiqBrWMsMAaEj2Pv5fSBdWw//4QH++TPWn7tZjvYpnvHpXVxdXl/FZC0fElRPsqqnNS2SPMh8n1IWKxJm072XkhKZB+Sai2qgy5/IUWVhiMKvTplwY4eYaKDfVxzziNt4bc+Q4C5vzq7hDE3YBTu7xpvX7n/dlIUKA6PakgUwhBqxTE/zhGUhS/DvqZdgnVlQ60furhm34wuSd0AQ3G8oYJ6Z2l2QmsuYQ1UDuBBMQ/QKRylQVQSzhQO4kWT7tT6z8uwSHL9cXp1dskDs3jytuAPSoNS6Ublp8UqeQPGF4OIsGxZywn4FexBEWbaEXiaNfSS7iHp/cXN22Tu7Vu6vPQlBpC6KEaZhTdVrajIQczKVczJRDE7Q+9BLjBEpXRax66Sxj2Q0XThBz8zJ7keqpBQGl8t4gkPS2IcZx/fXv7SN+sfxZPhffFZ/tJsYHDDDIKQklppcDBJh/+iXr7Av1XYTI1eHRpWUmksNFsCbtXQurCRcSdecSykjuY0sKA9X0DlqmWnbZdne/e374Oqff799+s+HkRxe/uPP//tT1+z9VHmNKkgrbGYob14vIyheYJXhVOD/dWQMBs96DYWLt/rKllhIyQoS2PlpvxtlpPLAEuqDpVXWTagaXi1xRIVdzzUcUWtdahOHIcAUl44UBFCW1XlnR0gJjyyeYEQLqpXrBuj+xtqfOe7VGOMDcE0NzzrKNp/djR5HiK5spZnKG8HbebUZnVULG+7qxA0vkjRjjAimKShqAV3kSKs5pomxkisNLi+rlYxYPQ6Eofi7FoAUGF6ptf5q0UMBa5be44v7/sA83MUYQt2Zwe1DFJsSAWikY52/wiom3JyAkUwKJYCD1bq1rYtoCX5TvqXcuPyllJ9DWBkouwaonQFRhFvkejYrTHAj61rDo/aE1uPA4/vGde4sWvxI+CkzEB7vqHIDcWD6qj0ytcPqkktTwvRVIeQ6RjwSpa9aUd3WaK3DZLEZHj2/tzP31dKZw3q5vHLo1oEAA6NeYpQVVmIo5/4mHmLwChWd7Ztwvjs+3bXRFeLndGyCXgNUQja6QlB9jHikaaNF5e77HeenVEMMcUwRuajef6CzClEh5j5GPBKlr/0E6G2GGGu5L6EQQ1TeCu1AiME0xhBCKwbUuL+sWMCRXIghKm/ft2nCxe47et210RUi6HRsAqwBKiEbXSGoPkY8ErXRNTa4O81PyYYYxxSRi0YysgdVCHlMm+I18EiTvuR+AvQ2Q4y13JdQiJEVYKURYghLmATD9fzv4qOmGmHIClF2+xY8l4AjjDDkMW1s5+KSsInez9Z15/BI1ETX2OHuND+lGmHIYwrIZSMJ2cMqxDHtidfAI1H62k983maEsZb7EoowIuPW11qRw0cYVBHLDAeqlPurE9/EgApRdvsmHNbtlx5BiAEVAuhkbEIuLunaaDim2vIaeKRpoyON99Lkp1RDjFgXvHTVpZGE7GEV4pj2xGvgkSh97SdAbzPEWMt9KYUYKZ311pIIsEpQ15xDULb4qMlGGJ047A1HfNhbHdO+di4u6ZpodUyl5TXwSNNEZ32Jk+enVCMMdUwBeS5NCdPXMe2J18AjUfpK/7D3Wu5b6yJTB4BrzKcV41aYrF9EZ51gldJZcMkJx2Wj2lLq/hZbLCYXgKhOHAVXu8PTXQt+TNveKv2j4OqYCs9r4JGmBdc19r87zU+pBiD6mOL1GqPoO6sQFSLyY8QjUfpK/yj4Wu5LZ4tDZeeBOjihCld/8n3lRe7m79n13I3ly2a3YpOt5NnqZCsWVeKdBykuxKB8MlXWYa10NJXK2oxUnU11jmZPS4sqbbR0f6FYYHYuCGdWUuFa3OP/8yEyLUiUPUnU3iSq8iBtlTVKSVCisgOYJ4nag0Sp6hwl0pWocFDOSaLakqismKWCRNWcyNgliQq915NEtSVR2QmMChIF6UqUOEnU/iSKVZYola5EhVMhTxLVlkSJ6p65Tleiwv3JiGc1Gg1fXgflGZxgB7M4Q6okwTMTHH8qtX3QsysFknBDr+FSnwWj6+iN+83eLR9xV0s4qm9Ia0EMEwggVbO/xbmUnBPQ2k1QA00xChKR7WpLlBHKSgTZSPRCIsmkdc9pQRzCLdNI6HYSh3Xi4KZYcWCMCT37ayBpedBV2n1mW1TxWaOrIyIlfmRWm6l335gCQ5TFVcOVMpIZDeHGVDCiTITLLgRRyoJlYHD5dXspXRZbdTWaLpa0sPzq0+dx9sD560ybLvAJTL58m6109jj+69H996aXXQo/2/xq8wcCXLee/1eeu3f6etO7tjcxTbZW9igNNDYQJ6WEmD0vFKcSYa6V7Q8kw1SQjAaGAm5OKxdFo//k4Hi+fX2JAYlfdlpEK7ppEtlc6Y+Gj8948w5XdoD3X7qlG971RxeLB56G9/ejdZag6IeuSoiKSkgLOGaPEkDGRa6UFP8Ky4qVTZRI62ZLGzBUUc1E1o1mRQwEEG0MCFj8tSoiBhx52Vg3qw+vAtkw5xakILZRepKCsgnCgghuqBYWhPubuhBErcRJCDYLgdIoBJoZboG7v7K+ECDA6L4JY+nsb2QPfn8yECvZPslAiQxYjeG3lsKZavy7hQh0igdiheAnGShzCbglFjTjFAzeoCpxIYjVmZ+EoEwIFCdMKUO5tgz/WpO4FMQq3k9SUCYFVhKhgVurGOVKqaJByDJgIeiUWiGtMAwioDNBWFsZAp1x/pqNgGXO/3p5bwP7As3l+C8mk/FXJzuj/uvr8O5sf4n/8nS+POQG0eJdfhkPZ4mexVsYQdDXBFCUIX/ILAuQ5bcsoRLw8tpozqX1rz/f61hc0hO9/OvsIo3qINJ42F2qw0lw6dZV7hJWkPV0t64iDXF+/Tx4nQ7Hz3hvbwbLA5qj+R2dt3oyyHgGQvHub98HV//8++3Tfz6M5PDyH3/+35/6vHonnSwNwogjCiutVMYadEoC+8YMEHSM1ax6Gf+aWIUzJehvMGvd9FgJ+WHOXYCOfsHNVRRtk8sqteREU0IubJVaKOFQk13ClLi4gRsIqEPMSedmOFpa6IyClrTiqXZRpLLN3hVKiVIPq1xeMfeRQ+mbDEaoil+KH2WtBUR27X9fecKLM16v6w2kyibv5Ty0oIebLV+A/5h/hm1NZVSWWSx9H91Piyv1uq2vCKjrM2BZfXf+zY0lkY1IqtArNkwDcIyTZRZbrHKAbGCfI75Oe+9sY22vF7HOkZ2rK3ENN+Ys2I8SeI2bcKc5U8MIlqXczsrCVwLSLn8gDE8RWsOXIEaPqiiCkY8QLpkpjKG2AVD/mv74+NP5g73o8cffr+zTn5/0B/w2ez+e2jqqFSGsFY5Kt1stjQKrLZOGFpMSnFCMOzUIdMMkRR8rPCbWKdD58alyG6BzohT6ZJQjXBI8ipZEIkiaaRQGRXm2Bd1ZyPd+rjZJyNGBlvi2biMJ9dl6Q4sU0aBddQ+jxjDJO87tGcQnzDdhbohixkjH3q6RvCqWC1rCLGiJmCpDGRVdh3zv/btShBx9NCsFZcxI6vYbi8yOJGDcY8qZeoGqHpamdQvzvfcISxFzSUAhpUsjXPrEeOlaJgh+LYp3Wy2QBmzHId97I/AUIecYigFH/XX1vQa4Km4oMyAAhqJF51poZrqO+d6bjaeIOSWIuFLovAvFLZK3NxNGI1xgQeJnw4el7UyktjHFt4L53WTQnyLEnN73p/1AAo4niV4D+3xXmEhY/Q0zaJZG0JSk8KrsaG0NNPHmZOxqupeJSVyWj+/G9wP3jP8H</diagram></mxfile>
2112.11909/paper_text/intro_method.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Due to the proliferation of Artificial Intelligence(AI), smart systems[@ref1] have made significant achievements in communication[@ref2][@ref3][@ref4][@ref5][@ref6] and information extraction[@ref7][@ref8]. Since a sophisticated smart system can bring much convenience and efficiency , the research in this field has attracted extensive attention from academic and industrial circles.
4
+
5
+ A KBQA system aims to Answer Questions(QA) by understanding the semantic structure and extract the answers in large Knowledge Bases(KB).Recently, tremendous KBQA models are proposed to effectively utilize KB to answer 'simple' questions. Here 'simple' refers to questions that can be answered with a single predicate or a predicate sequence in the KB. For instance, "Who directed Avatar?" is a simple question due to its answer can be obtained by a single triplet fact query (?, director_of, Avatar). To answer such questions, plenty of Rule-based[@ref9], Keyword-based[@ref10] and Synonym-based methods[@ref11][@ref13][@ref13][@ref14] have been proposed. However, questions in real life are usually more complex which can only be answered correctly by multi-hop query path with constraints. As is shown in Figure[1](#fig:two-question){reference-type="ref" reference="fig:two-question"}, for answering a complex question, a sequence of operations need to be generated, including multi-hop query and answers combination. Recently using KB to Answer such Complex Questions(KBCQA) has attracted growing interests prodigiously[@ref15]. Previous state-of-art KBCQA models can be categorized into a taxonomy that contains two main branches, namely Information Retrived-based(IR-based) and Neural Semantic Parsing-based(SP-based). The IR-based model[@ref16][@ref17][@ref18][@ref19][@ref20] first recognizes topic entities in the natural language and links them to Node Entities in Knowledge Base. Then all nodes surrounding around the topic nodes are regarded as candidate answers, and a score function is used to model their semantic relevance and predict the final answers. Methods based on Semantic Parsing[@ref21][@ref22][@ref23][@ref7][@ref24]) usually includes a Seq2Seq module which converts natural languages into executable query languages and a Executor Module which executes the generated logical sequence on KB to obtain the final answers.
6
+
7
+ <figure id="fig:two-question" data-latex-placement="!htb">
8
+ <embed src="sub_graph_and_path_generation.pdf" style="width:75.0%" />
9
+ <figcaption>The subgraph of KB and two query paths corresponding to questions of different types</figcaption>
10
+ </figure>
11
+
12
+ However, although the state-of-art models have made great achievements, several challenges still exist. Firstly, the dependency of annotated data is a thorny problem for SP-based models, which is usually settled by using a breadth-first search(BFS) to produce pseudo-gold action sequences and adopting a Reinforce-Learning(RL) algorithm([@ref25; @ref26]). Yet since BFS will inevitably ignore many other plausible annotations and RL usually suffers from several challenges, such as sparse reward and data inefficiency, the research of SP-based models are immensely hindered. Secondly, both IR-based and SP-based methods suffer from the Large Searching Space. For better performance on KBCQA task, large KBs, such as Wikidata[@ref27] or FreeBase[@ref28] are usually needed. Although these KBs contain comprehensive knowledge, they also bring vast search space when searching a query path with more than 3 hops. We record the average number of relations in one hop and multi-hop subgraphs of a topic entity in our training dataset. It is shown that in one-hop subgraphs, the average number is 514.908 while in 2-hop and 3-hop subgraphs, it grows to 1919.446 and 6408.070 respectively. This exponential growth of generated candidate tuples makes it expensive and difficult for caculation. Thirdly, most previous work requires Large KBCQA datasets to train their model, such as ComplexWebQuestions[@ref29] and QALD[@ref30]. However, this Large Datasets are usually in English, hindering research in more realistic settings and in languages other than English.
13
+
14
+ To solve the three problems above, we propose a template-based model consisting of Question Classification, Named Entity Recognition, Query Paths Generating and Path Ranking Module. Our contribution can be categorized into three fields:
15
+
16
+ 1\. We propose a data-efficient model equipped with a Pre-Trained Language Model BERTwhich can achieve high performance but only use tiny amount of data. Thus, our model can be utilized to process KBQA task in some languages without large KBQA datasets.
17
+
18
+ 2\. By adopting Beam Search algorithm and using BERT to score for each searching branch, the Spatial Complexity and Time Complexity have been greatly dropped but the generating accuracy still remains competitive.
19
+
20
+ 3\. We put forward a method to construct artificial data on pre-defined schemas of query graphs, allowing our model to process questions with novel categories which are excluded by training set.
21
+
22
+ With the utilize of pre-trained language model BERT and pre-defined schemas of query graphs, our model can effectively extract and filter the query tuples for a complex question. Also, we adopt Beam Search algorithm to relieve the exponential growth with increasing hops, which make it possible to handle multi-hop questions.
23
+
24
+ This paper is organized as follows: In Section [\[sec:related-word\]](#sec:related-word){reference-type="ref" reference="sec:related-word"}, we review works on NER and Beam Search, which are the basis of our experiments. In Section [3](#sec:our-method){reference-type="ref" reference="sec:our-method"} we present the overall architecture, and then introduce each key component in detail. In Section [3](#sec:our-method){reference-type="ref" reference="sec:our-method"}, we demonstrate the evaluated models and the methodology used to generate the sentence embeddings. In Section [4](#sec:experiments){reference-type="ref" reference="sec:experiments"}, we describe the experimental setup and evaluation of the proposed model. Finally, we summarize the contribution of this work in the Section [\[sec:conclusion\]](#sec:conclusion){reference-type="ref" reference="sec:conclusion"}.
25
+
26
+ # Method
27
+
28
+ In this section, we will present the overall architecture(shown in Figure[2](#fig:architecture){reference-type="ref" reference="fig:architecture"}), and then introduce each key component of the proposed model in detail.
29
+
30
+ The general idea behind our method is to process the question step by step. Given a question, we first encode it with a BERT layer and then the representations will be passed to an Entity Linking module(Sec.[\[sub:node-extractor\]](#sub:node-extractor){reference-type="ref" reference="sub:node-extractor"}) of BERT-BiLSTM-CRF layer and a Question Classification Module(Sec.[\[sub:question-classify\]](#sub:question-classify){reference-type="ref" reference="sub:question-classify"}) trained with extra manually constructed samples(Sec.[\[subsec:data-construction\]](#subsec:data-construction){reference-type="ref" reference="subsec:data-construction"}). With the recognized topic entities and a specific category the question belongs to, we can refer to a more precise schema(Sec.[\[subsec:predefine-query-schema\]](#subsec:predefine-query-schema){reference-type="ref" reference="subsec:predefine-query-schema"}) to generate the query path in a narrower searching space. However, since the query graph of a complex question may involve multiple relations, such simple generating program will bring intolerable Time Complexity and Spatial Complexity, and bring calculating burden to the Candidate Tuples Ranking module. To solve this, we adopt a heuristic algorithm for graph search(Sec.[\[sec:beam-search\]](#sec:beam-search){reference-type="ref" reference="sec:beam-search"}) based on a pre-trained text-match model, which greatly decreases the number of candidate query paths. Afterwards, a Candidate Tuples Ranking module is designed to sift out the final path using the above PTM-TextMatch model. By executing the golden query tuple, we can retrieve the answer in Knowledge Base.
31
+
32
+ Besides, we are not to search aimlessly in KB when generating query subgraph. Instead, we refer to a set of pre-defined schemas of all possible query graphs in complex question answering. This policy will not only narrow the searching space significantly but also provide a semantic framework for reference when constructing artificial questions.
33
+
34
+ The main goal of this module is to identify topic entities in the question. This module includes Tokenization with dictionaries, Named Entity Recognition(NER) and Entity Linking.
35
+
36
+ *1) Tokenize* : Different from English Tokenize, Chinese Tokenizing usually use dictionaries as a supplementary to tokenize Chinese question text into Chinese words. In this paper, we use a dictionary provided by CCKS consisting of all subjects in KB, all entities and their mentions in Mention Dictinary.
37
+
38
+ *2) Named Entity Recognition*: In the NER module, we encode the question with BERT layer, and then pass it through a BiLSTM to capture the infomation of context and a CRF layer to predict label of each token. Let us use $Q=(t_1,t_2,t_3,...,t_n)$ to represent a tokenized question. We put Q into a BERT layer to encode representations with semantic knowledge. Next, the representations $Q={X_i}_{i=1}^{\mid Q \mid}$ are passed through a BiLSTM layer and CRF layer[@ref28].
39
+
40
+ For each input token, the context information is captured by two LSTMs, where one capture information from left to right, the other from right to left. At each time step $t$, a hidden vector $\overrightarrow{h_t}$(from left to right) is computed based on the previous hidden state $\overrightarrow{h_{t-1}}$ and the input at the current step $x_t$. Then The forward and backward context representations, generated by $\overrightarrow{h_t}$ and $\overleftarrow{h_t}$ respectively, are concatenated into a long vector which we represent as $h_t=[\overrightarrow{h_t}:\overleftarrow{h_t}]$. The basic LSTM function is defined as follow: $$\begin{equation}
41
+ \left[ \begin{array}{c}\tilde{c}_t \\ f_t \\ o_t \\ i_t\end{array}\right] =\left[\begin{array}{c}\sigma \\ \sigma \\ \sigma \\ \tanh \end{array} \right] \Big ( W^T \left[\begin{array}{c} x_t \\ h_{t-1} \end{array}\right] + b\Big )
42
+ \end{equation}$$ $$\begin{equation}
43
+ c_t=i_t\odot \tilde{c}_t+f_t\odot c_{t-1}
44
+ \end{equation}$$ $$\begin{equation}
45
+ h_t=o_t \odot tanh(c_t)
46
+ \end{equation}$$ where $W^T$ and $b$ are trainable parameters; $\sigma()$ is the sigmoid function; $i_t , o_t, f_t$ indicating input, output and forget gates respectively; $\odot$ represents the dot product function; $x_t$ is the input vector of the current time step.
47
+
48
+ The output vectors of the BiLSTM contain the bidirectional relation information of the words in a question. Then we adopt CRF to predict labels for each word, considering the dependencies of adjacent labels. The CRF is the Markov random field of Y given a random variable X condition and included a undirected graph G where Y are connected by undirected edges indicating dependencies. Formally, given observations variables $H={h_i}_{i=1}^{\mid Q \mid}$, and a set of output values $y\in {0,1}$, where $y=1$ means the corresponding token is a topic entity and $y=0$ is not. CRF defines potential fucntions as below: $$p(y\mid h)=\frac{1}{Z_h}\prod_{s\in S(y,h)}\phi_s(y_s, h_s)$$ where $Z_h$ is a normalization factor overall output values, $S(y,h)$ is the set of cliques of $G$, $\phi_s(y_s, h_s)$ is the clique potential on clique $s$.
49
+
50
+ Afterwards, in the BiLSTM-CRF model, a softmax over all possible tag sequences yields a probability for the sequencey. The prediction of the output sequence is computed as follows: $$y_*=argmax_{y\in\{0,1\}}\sigma(H,y)$$ $$\sigma(H,y)=\sum_{i=0}^n A_{y_i,y_{i+1}} + \sum_{i=0}^n P_{i,y_{i}}$$
51
+
52
+ where $A$ is a matrix of transition scores, $A_{y_i,y_{i+1}}$ represents the score of a transition from the tag $y_i$ to $y_{i+1}$. $n$ is the length of a sentence, $P$ is the matrix of scores output by the BiLSTM network, $P_{i,y_{i}}$ is the score of the $y_i^{th}$ tag of the $i^{th}$ word in a sentence.
53
+
54
+ *3) Entity Linking*: In this module, we link the recognized named entity to the entity in KB and select a set of candidate topic entities with a Mention Dictionary. The Mention Dic is a dictionary provided by CCKS Sponsors describing mapping relations from mentions to node entities. After obtaining mentions of entities in a question, we correspond them to relevant node entities. Then we need to extract helpful features from the mentions and entities to select the potential candidate entities. In this work, we extract six features as below: The Length of Entity Mention($f_1$), The TF value of Entity Mention($f_2$), The Distance Between the Entity Mention and Interrogative Word($f_3$), Word Overlap Between Question and Triplet Paths($f_4$), and Popularity of Candidate entities($f_5$). The popularity is calculated as $\sqrt{k}$, where $k$ represents the number of relation path the candidate entity has within 2 hop graph. We assume that a entity with larger $f_1$, $f_2$, $f_4$, $f_5$ and smaller $f_3$, are more likely to be a topic entity.
55
+
56
+ This six features will be calculated and put into a linear weighing layer to output relative scores. Entities with $Top\ k$ score build the candidate entities set.
57
+
58
+ The score is calculated using the following fuction: $$s=w_1\cdot f_1 + w_2\cdot f_2 + w_3\cdot f_3 + w_4\cdot f_4 + w_5\cdot f_5$$ where $f_i$ represents the $i^{th}$ feature and $w_i$ represents the corresponding weight.
59
+
60
+ In order to improve the efficiency of our model, we use a pre-trained language model BERT to classify the complex questions into two categories: one topic entity question and multi-entity question, and process each of them separately. In one entity question, predicted paths usually extend from the topic entity with one relation or a sequence of relation hops. While in multi-entity questions, correct answers can only be obtained accurately by executing the query paths extended from several topic entities in the question. For instance, the question "Whose husband is the director of Avator?" is one-entity question because its query paths (?, wife_of, t. t, director_of, Avator) can be extracted from the "Avator" through the relations "director_of" and "wife_of" and the transitional entity t. Meanwhile, "Which actors in Avator born in British?" is a complex question because the correct query paths can only be generated from the entity "Avator" and "British" respectively through the relations "actor_of" and "born_in". In addition, we generate artificial questions in a semantic structured form to improve the performance of our classification model. The detailed implementation will be represented in subsection[\[subsec:data-construction\]](#subsec:data-construction){reference-type="ref" reference="subsec:data-construction"}.
61
+
62
+ Given a question, we encode it with words encoding, position encoding and segment encoding, and attach a special token $[CLS]$ at the beginning of a question to separate different sentences. Then the semantic information will be captured with a multi-head attention system and a Dense Layer will be attached to obtain the prediction.
63
+
64
+ The golden key to solving the KBCQA task is to map entities of a question into a specific query graph. A Semantic Parsing-based model transfers the KBQA task into a Seq2Seq task. By feeding the model with numerous annotated data, SP-based model can understand the semantic framework of a question and refine corresponding query graph. An Information Retrived-based model adopts a different method that searches all query graphs surrounding the extracted topic entities and then uses a Candidate Tuple Ranking Module to sift the final query graphs. However, with limited data, it is challenging to learn the query structure of questions, let alone changing it to an executable action sequence. In this work, we relieve this problem by predefining the schema of query graph and adopt Beam Search to pruning the searching space of multi-hop query paths.
65
+
66
+ Inspired by Aqqu[@ref43], we propose an inverse solution that we first take a deep insight into numerous Chinese multi-hop questions and propose eight searching schemas for complex questions as shown in Figure[3](#fig:predefined-schema){reference-type="ref" reference="fig:predefined-schema"}. By predefining the schema of query graph, our model can benefit from three aspects:
67
+
68
+ <figure id="fig:predefined-schema" data-latex-placement="!htb">
69
+ <embed src="schema.pdf" style="width:65.0%" />
70
+ <figcaption>predefined schema of query path. The grey ones represent topic entities we already know. The white one represents transitional entity we need not record and the red one represents the answer we query. </figcaption>
71
+ </figure>
72
+
73
+ a\) Predefining the schema introduces prior knowledge, which stipulates the semantic structure of the queried question and greatly prunes the search space.
74
+
75
+ b\) Since the patterns of query tuples are specified, we can easily turn each query tuples into its semantic form and calculate the similarity between the artificial question and real question with a pre-trained language model, which we define as the score of the query path we generate.
76
+
77
+ c\) Extra data can be constructed on the enumerated query schema to train the classification model, which allows the model to learn the basic semantic knowledge of classifying questions.
78
+
79
+ We assume that the diversity of candidate tuples will lead to poor performance of candidate query path ranking module. Thus, we divide the query schema into two modules according to number of topic entities the query pattern has. When generating query paths, we use two separate modules to generate candidate query paths. For one-entity question, we simply search the sub graph of the topic entity within two relation hops. While for questions of multiple entities, we generate query paths on the searching schemas shown in Figure[3](#fig:predefined-schema){reference-type="ref" reference="fig:predefined-schema"}. Let $n$ represent the number of candidate topic entities and $m$ represent the number of true topic entities in a given question. Since combinatorial number $C_n^m$ grows too large when m is greater than 3, we only consider questions containing three or less topic entities.
80
+
81
+ For better predicting which class a question belonging to and alleviating the need of labeled training data, we generate substantial artificial questions on the predefined query schemas. In our method, we randomly select a node entity in KB and extend a query path from the entity. When generating a query path, we are not to consider all branches in a random searching schema. Instead, we conduct the algorithm on the predefined query schema which has been introduced in subsection[\[subsec:predefine-query-schema\]](#subsec:predefine-query-schema){reference-type="ref" reference="subsec:predefine-query-schema"}. For instance, as for the above question "Whose husband is the director of Avator?", the corresponding query schema is ($x$, $r_1$, $t$. $t$, $r_2$, $e$) where $x$ represents the answer, $r_1$ and $r_2$ represent any relations in two hops query path extended from the topic entity $e$ through an intermediate entity $t$. We generate the artificial question by replacing mentions of topic entities(in this example is "Avator") and relations("wife_of", "director_of") with mentions of randomly selected node entities and correlated relations. In addition, if the query schema is excluded in training samples, we only need to manually construct a fake question corresponding to the query schema and then execute the above steps.
82
+
83
+ Since our predefined query schema contains semantic structure for both one-entity and multi-entity questions, our constructed samples can lead the pre-trained language model to converge in a direction which is more compatible with our specific classification task. Besides, the ratio of questions of different query patterns should be carefully controlled in order to improve the generalization of created data.
84
+
85
+ Although our constructed questions have some differences from the real questions in semantic expression, our model can still learn extra semantic structure of questions in two classes. In our experiment, we constructed 5k artificial questions and use them to train our classification model. With the help of pre-trained language model, our model can handle some questions that have never shown in training set. As the results in Sec.[4](#sec:experiments){reference-type="ref" reference="sec:experiments"} shown, given only 10% of training data, our model can achieve good performance in classifying the questions.
86
+
87
+ It is worth to notice that when extending multi-hop relations of the two type questions above, Query Path Generation module often suffers from the vast searching space. To solve this, we adopt a Heuristic algorithm Beam Search algorithm equipped with a pretrained language model BERT to score for each breach of relations, thus we avoid exhaustive search on irrelevant relations. When extending a new relation path at n-step, we try to add the relation $r_n$ to the previous generated query path $R_{n-1}$ and use the strategy introduced in Artificial Data Construction (Sec.[\[subsec:data-construction\]](#subsec:data-construction){reference-type="ref" reference="subsec:data-construction"}) to transfer the graph into a semantic form $S_n$. Then $S_n$ and original question $Q$ are tokenized and concatenated with a special token $[SEP]$ as below: $$input=[CLS]S_n[SEP]Q$$ This two sentences are fed into a pretrained language model of downstream task to calculate the semantic similarity which represents the score for $r_n$ given a sub query path $R_{n-1}$. The formulation is defined as: $$Sco(r_n|R_{n-1})=BERTLayer(input)$$ At each extending step, we only consider relations with $Top_k$ score for further search, which significantly excluded some irrelavant query branches. The result in Sec.[\[subsec:exp-beam-search\]](#subsec:exp-beam-search){reference-type="ref" reference="subsec:exp-beam-search"} shows that, by adopting the Beam Search algorithm, the accuracy of query paths generating remains competitive but the number of candidate paths decrease above 80%. The detailed description is seen in Algorithm[\[algorithm1\]](#algorithm1){reference-type="ref" reference="algorithm1"}.
88
+
89
+ :::: algorithm
90
+ ::: algorithmic
91
+ $KB$, question $q$, topic entity set $E$, number of hops $T$ $P^{(T)}$\
92
+ **Initialize:** $P^{(0)}\leftarrow \{e_0\in E\}$ =$\tilde{P}^{(t)}\leftarrow \phi$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
93
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
94
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
95
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
96
+ \fi`{=latex} $\tilde{P}^{(t)}\leftarrow \phi$
97
+
98
+ =$\tilde{S}^{(t)}\leftarrow \phi$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
99
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
100
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
101
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
102
+ \fi`{=latex} $\tilde{S}^{(t)}\leftarrow \phi$
103
+
104
+ =$e_{t-1}\leftarrow tail(p)$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
105
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
106
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
107
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
108
+ \fi`{=latex} $e_{t-1}\leftarrow tail(p)$
109
+
110
+ =$p'\leftarrow p\oplus(r,e_t)$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
111
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
112
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
113
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
114
+ \fi`{=latex} $p'\leftarrow p\oplus(r,e_t)$
115
+
116
+ =$p'\leftarrow p\oplus(r)$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
117
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
118
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
119
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
120
+ \fi`{=latex} $p'\leftarrow p\oplus(r)$
121
+
122
+ =$\tilde{P}^{(t)}\leftarrow \tilde{P}^{(t)}\cup \{p'\}$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
123
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
124
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
125
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
126
+ \fi`{=latex} $\tilde{P}^{(t)}\leftarrow \tilde{P}^{(t)}\cup \{p'\}$
127
+
128
+ =$\tilde{S}^{(t)}\leftarrow \tilde{S}^{(t)}\cup \{Sentence(p')\}$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
129
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
130
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
131
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
132
+ \fi`{=latex} $\tilde{S}^{(t)}\leftarrow \tilde{S}^{(t)}\cup \{Sentence(p')\}$
133
+
134
+ =score all elements in $\tilde{S}^{(t)}$ and rank all corresponding elements in $\tilde{P}^{(t)}$ `\ifdim\dimexpr \the\ht\statebox+1pt\relax<.75\baselineskip
135
+ \def\dimexpr \the\ht\statebox+1pt\relax{\dimexpr .75\baselineskip+1pt\relax}%
136
+ \fi`{=latex} `\ifdim\dimexpr \the\dp\statebox+1pt\relax<.25\baselineskip
137
+ \def\dimexpr \the\dp\statebox+1pt\relax{\dimexpr .25\baselineskip+1pt\relax}%
138
+ \fi`{=latex} score all elements in $\tilde{S}^{(t)}$ and rank all corresponding elements in $\tilde{P}^{(t)}$
139
+ :::
140
+ ::::
2201.07788/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2201.07788/paper_text/intro_method.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Humans have the ability to recognize 3D objects in a wide variety of positions and orientations (poses) [\[40\]](#page-9-0), even if objects are occluded. We also seem to prefer certain *canonical views* [\[10\]](#page-8-0), with evidence indicating that an object in a new pose is *mentally rotated* to a canonical pose [\[47\]](#page-9-1) to aid recognition. Inspired by this, we aim to build scene understanding methods that reason about objects in different poses by learning to map them to a canonical pose without explicit supervision.
4
+
5
+ Given a 3D object shape, the goal of instance-level canonicalization is to find an *equivariant frame* of reference that is consistent relative to the geometry of the shape under different 3D poses. This problem can be solved if we have shape correspondences and a way to find a distinctive equivariant frame (*e.g*., PCA). However, it becomes significantly harder if we want to operate on different 3D poses of different object instances that lack correspondences. This category-level canonicalization problem has received much less attention despite tremendous interest in categorylevel 3D object understanding [\[8,](#page-8-1) [11,](#page-8-2) [14,](#page-8-3) [25,](#page-8-4) [26,](#page-8-5) [31,](#page-9-2) [56\]](#page-10-0). Most methods rely on data augmentation [\[23\]](#page-8-6), or manually annotated datasets [\[3,](#page-8-7) [56\]](#page-10-0) containing instances that are consistently positioned and oriented within each category [\[44,](#page-9-3) [48,](#page-9-4) [52\]](#page-9-5). This has prevented broader application of these methods to un-canonicalized data sources, such as online model collections [\[1\]](#page-8-8). The problem is further exacerbated by the difficulty of canonicalizing partial shape observations (*e.g*., from depth maps [\[36\]](#page-9-6)), or symmetric objects that require an understanding of inter-instance part relationships. Recent work addresses these limitations using weakly-supervised [\[15,](#page-8-9) [38\]](#page-9-7) or self-supervised learning [\[13,](#page-8-10) [29,](#page-9-8) [43,](#page-9-9) [46\]](#page-9-10), but cannot handle partial 3D shapes, or is limited to canonicalizing only orientation.
6
+
7
+ <span id="page-1-0"></span>We introduce ConDor, a method for self-supervised category-level Canonicalization of the 3D pose of partial shapes. It consists of a neural network that is trained on an un-canonicalized collection of 3D point clouds with inconsistent 3D poses. During inference, our method takes a full or partial 3D point cloud of an object at an arbitrary pose, and outputs a canonical rotation frame and translation vector. To enable operation on instances from different categories, we build upon Tensor Field Networks (TFNs) [\[49\]](#page-9-11), a 3D point cloud architecture that is equivariant to 3D rotation and point permutation, and invariant to translation. To handle partial shapes, we use a two-branch (Siamese) network with training data that simulates partiality through shape slicing or camera projection. We introduce several losses to help our method learn to canonicalize 3D pose via selfsupervision. A surprising feature of our method is the (optional) ability to learn consistent part co-segmentation [\[6\]](#page-8-11) across instances without any supervision (see Figure [1\)](#page-0-0).
8
+
9
+ Given only the recent interest, standardized metrics for evaluation of canonicalization methods have not yet emerged. We therefore propose four new metrics that are designed to evaluate the consistency of instance- and category-level canonicalization, as well as consistency with manually pre-canonicalized datasets. We extensively evaluate the performance of our method using these metrics by comparing with baselines and other methods [\[43,](#page-9-9) [46\]](#page-9-10). Quantitative and qualitative results on common shape categories show that we outperform existing methods and produce consistent pose canonicalizations for both full and partial 3D point clouds. We also demonstrate previously difficult applications enabled by our method such as operation on partial point clouds from depth maps, keypoint annotation transfer, and expanding the size of existing datasets. To sum up, our contributions include:
10
+
11
+ - A self-supervised method to canonicalize the 3D pose of full point clouds from a variety of object categories.
12
+ - A method that can also handle partial 3D point clouds.
13
+ - New metrics to evaluate canonicalization methods, extensive experiments, and new applications.
14
+
15
+ # Method
16
+
17
+ Given a point cloud $X \in \mathbb{R}^{3 \times K}$ denoting a full or partial shape from a set of non-aligned shapes, our goal is to estimate its rotation $\mathcal{R}(X)$ (canonical frame) sending X to a canonical pose. For a partial shape $Y \subset X$ we also learn a translation $\mathcal{T}(Y)$ aligning Y with X in the canonical frame. We achieve this by training a neural network on 3D shapes in a self-supervised manner (see Figure 3).
18
+
19
+ We first discuss the case of canonicalizing 3D rotation for full shapes. Given a point cloud X, our approach estimates a rotation-invariant point cloud $X^c$ , and an equivariant rotation E that rotates $X^c$ to X. Note that for full shapes, translation can be canonicalized using mean centering [29], but this does not hold for partial shapes.
20
+
21
+ **Rotation Invariant Point Cloud/Embedding:** To estimate a rotation-invariant point cloud, we build on top of a permutation-, rotation-equivariant and translation-invariant neural network architecture: Tensor Field Networks (TFNs) [49] with equivariant non-linearities for TFNs [32]. Given X, we use a TFN [32] to produce global **equivariant features** $F^{\ell}$ , with columns $F^{\ell}_{:,j}$ as described in Section 3.
22
+
23
+ The central observation of [32] is that the features F have the same rotation equivariance property as coefficients of spherical functions in the spherical harmonics basis, and can therefore be treated as such. We exploit this property by embedding the shape using the spherical harmonics basis and using the global TFN features F as coefficients of this embedding. Since the input to the spherical harmonics embedding and the coefficients rotate together with the input shape, they can be used to define a rotation and translation **invariant embedding** of the shape. Formally, let $Y^{\ell}(x) \in \mathbb{R}^{2\ell+1}$ be the vector of degree $\ell$ spherical harmonics which are homogeneous polynomials defined over $\mathbb{R}^3$ . We define a rotation invariant embedding of the shape as the dot products
24
+
25
+ <span id="page-2-3"></span>
26
+ $$H_{ij}^{\ell} := \langle F_{:,j}^{\ell}, Y^{\ell}(X_i) \rangle, \tag{1}$$
27
+
28
+ where i is an index to a single point on the point cloud, and j is the channel index as in Section 3. Both sides of the dot product are rotated by the same Wigner rotation matrix when rotating the input pointcloud X making H invariant to rotations of X. The input point cloud is mean-centered to achieve invariance to translation. Note that we can use any functional basis of the form : $x \mapsto (\varphi^r(||x||)Y^\ell(x))_{r\ell}$ , where $(\varphi^r)_r$ are real valued functions to define H.
29
+
30
+ We use the rotation invariant embedding corresponding to $\ell=1$ (degree 1) to produce a 3D **invariant shape** through a linear layer on top of H. Note that degree 1 spherical harmonics are the x,y,z coordinates of the input point cloud since $Y^1(x)=x$ . As we show in Appendix B, other choices for $\ell$ enable us to learn consistent co-segmentation without supervision. The 3D rotation invariant shape is given by:
31
+
32
+ <span id="page-2-1"></span>
33
+ $$X_i^c := \sum_{i} W_{:,j} H_{ij}^1 = W(F^1)^\top X_i.$$
34
+ (2)
35
+
36
+ We obtain our canonical frame as described in Section 3 as $\mathcal{R}(X) = W(F^1)^{\top}$ where W is the learnable weights matrix of the linear layer.
37
+
38
+ **Rotation Equivariant Embedding**: Next, we seek to find an equivariant rotation that transforms $X^c$ to X. In addition to the equivariant features F, our TFN also outputs a 3D equivariant frame E which we optimise to be a rotation matrix. E satisfies the equivariance relation E(R.X) = RE(X) so that the point cloud $E(X)X^c$ is rotation equivariant. Note that we could have chosen $E(X) = R(X)^{\top}$ but we instead choose to learn E(X) independently as this approach generalizes to the case of non-linear embeddings
39
+
40
+ <span id="page-3-2"></span><span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
41
+
42
+ Figure 3. ConDor. (*left*) Our method learns to canonicalize rotation by estimating an equivariant pose E(X) and an invariant point cloud X c of an input shape X. A self-supervision loss ensures that the input and transformed canonical shapes match. (*right*) To handle translation in partial shapes, we train a two-branch (Siamese) architecture, one taking the full shape and the other taking an occluded (*e.g*., via slicing) version of the full shape as input. Various losses ensure that the feature embeddings of the full and partial shapes match. We predict the amodal barycenter of the full shape T(O(X )) from the partial shape to canonicalize for position.
43
+
44
+ (*e.g*., with values other than ℓ = 1 in Equation [\(2\)](#page-2-1)) which we use for unsupervised segmentation in Appendix [B.](#page-11-0)
45
+
46
+ Using E, we can transform our 3D invariant embedding X<sup>c</sup> back to the input equivariant embedding and compare it to the input point cloud. To handle situations with high occlusion and symmetric objects we estimate P equivariant rotations and choose the frame that minimizes the L <sup>2</sup> norm between corresponding points in the input and the predicted invariant shape.
47
+
48
+ Next, we discuss canonicalizing 3D translation for partial point clouds, *e.g*., acquired from depth sensors or Li-DAR. As noted, translation canonicalization for full shapes is achieved using mean centering [\[29\]](#page-9-8). Thus, our approach in Section [4.1](#page-2-2) is sufficient for 3D pose canonicalization of full shapes. However, partial shapes can have different centroids depending on how the shape was occluded. To address this issue, we extend our approach to additionally find a rotation-equivariant translation T ∈ R 3 that estimates the difference between the barycenter of the full and partial shape from the mean-centered partial point cloud that translates it to align with the full shape in the input frame.
49
+
50
+ In practice, we operationalize the above idea in a twobranch Siamese architecture as shown in Figure [3.](#page-3-0) We slice the input point cloud to introduce synthetic occlusion. We penalize the network by ensuring semantic consistency between the full and the partial point cloud. Furthermore, our network predicts an amodal translation vector that captures the barycenter of the full shape from the partial input shape.
51
+
52
+ A surprising finding is that our method can be used for unsupervised part co-segmentation [\[6\]](#page-8-11) of full and partial shapes with little modification. This result is enabled by finding the rotation invariant embedding H in Equation [\(1\)](#page-2-3) corresponding to all ℓ ⩾ 0 to produce a non-linear invariant embedding. To obtain a consistent rotation invariant part segmentation, we segment the input shape into N parts by learning an MLP on top of the rotation invariant embedding. The part label of each point in the input point cloud is given by S<sup>i</sup> := softmax[MLP(H)<sup>i</sup> ]. Results visualized in the paper include these segmentations as colored labels. Please see the supplementary material for more details.
53
+
54
+ A key contribution of our work is to demonstrate that 3D pose canonicalization can be achieved through selfsupervised learning as opposed to supervised learning from labeled datasets [\[4,](#page-8-14) [56\]](#page-10-0). We now list the loss functions that enable this. Additionally, we describe losses that prevent degenerate results, handle symmetric shapes, and enable unsupervised segmentation. We begin with full shapes.
55
+
56
+ Canonical Shape Loss: Our primary self-supervision signal comes from the canonical shape loss that tries to minimize the L 2 loss between the rotation invariant point cloud X<sup>c</sup> transformed by the rotation equivariant rotation E with the input point cloud X. It is worth noting that X<sup>c</sup> and X are in correspondence because our method is permutation equivariant and we extract point-wise embeddings. For each point i in a point cloud of size K, we define the canonical shape loss to be
57
+
58
+ $$\mathcal{L}_{canon} = \frac{1}{K} \sum_{i} \|EX_{i}^{c} - X_{i}\|_{2}.$$
59
+ (3)
60
+
61
+ We empirically observe that our estimation of E can be flipped 180◦ or X<sup>c</sup> can become a degenerate shape when the object class has symmetry or heavy occlusions. To mitigate this issue, we estimate P equivariant rotations E<sup>p</sup> and choose the one that minimizes the above loss.
62
+
63
+ <span id="page-4-2"></span>Orthonormality Loss: The equivariant rotation E estimated by our method must be a valid rotation in SO(3), but this cannot be guaranteed by the TFN. We therefore add a loss to constrain E to be orthonormal by minimizing its difference to its closest orthonormal matrix. We achieve this using the SVD decomposition of E = UΣV <sup>⊤</sup> and enforcing unit eigenvalues with the loss
64
+
65
+ $$\mathcal{L}_{ortho} = \|UV^{\top} - E\|_2. \tag{4}$$
66
+
67
+ Separation Loss: When estimating P equivariant rotations Ep, our method could learn a degenerate solution where all E<sup>p</sup> are similar. To avoid this problem, we introduce a separation loss that encourages the network to estimate different equivariant rotations as
68
+
69
+ $$\mathcal{L}_{sep} = -\frac{1}{9P} \sum_{i \neq j} ||E_i - E_j||_2.$$
70
+ (5)
71
+
72
+ Restriction Loss: We next turn our attention to partial shapes. Similar to full shapes, we compute the canonical shape, orthonormality and separation losses. We assume that a partial shape is a result of a cropping operator O that acts on a full point cloud X to select points corresponding to a partial version O(X) ⊆ X. In practice, our cropping operator is slicing or image projection (see Section [5.2\)](#page-4-1). During training, we train two branches of our method, one with the full shape and the other with a partial shape generated using a random sampling of O. We then enforce that the invariant embedding for partial shapes is a restriction of the invariant embedding of the full shape X using the loss
73
+
74
+ $$\mathcal{L}_{rest} = \frac{1}{|\mathcal{S}|} \sum_{i \in \mathcal{S}} \|\widehat{\mathcal{O}[X^c]}_i - \left(\widehat{\mathcal{O}[X]}^c\right)_i\|_2^2, \tag{6}$$
75
+
76
+ where S is the set of valid indices of points in both X and <sup>O</sup>(X), and the hat indicates mean-centered point clouds. <sup>c</sup> During inference, we do not require the full shape and can operate only with partial shapes. Empirically, we observe that our method generalizes to different cropping operations between training and inference (see Section [6.3\)](#page-6-0)
77
+
78
+ Amodal Translation Loss: Finally, to align the meancentered partial shape with the full shape, we estimate the barycenter of the full shape after the occlusion operation O[X] from the partial shape only using a rotationequivariant translation vector <sup>T</sup> (O[[X]) by minimizing
79
+
80
+ $$\mathcal{L}_{amod} = \|\mathcal{T}(\widehat{\mathcal{O}[X]}) - \overline{\mathcal{O}(X)}\|_{2}^{2}. \tag{7}$$
81
+
82
+ Unsupervised Part Segmentation Losses: A surprising finding in our method is that we can segment objects into parts consistently across instances without any supervision (see Figure [1\)](#page-0-0). This is enabled by interpreting higher degree invariant embedding H<sup>ℓ</sup> as a feature for unsupervised segmentation. Our losses are based on the localization and equilibrium losses of [\[46\]](#page-9-10). We refer the reader to [\[46\]](#page-9-10) and the supplementary document for details on these losses. Note that [\[46\]](#page-9-10) need to perform segmentation to enable rotation canonicalization, while it is optional for us.
83
+
84
+ Our method is trained on a collection of uncanonicalized shapes X , and partial shapes randomly generated using a suitable operator O. We report two kinds of partiality: slicing and image projection (*i.e*., depth maps). We borrow our TFN architecture from [\[32\]](#page-9-20) and use the ReLU non-linearity in all layers. We use 1024 and 512 points for full and partial point cloud. Our method predicts 5 canonical frames for every category. Our models are trained for 45,000 iterations for each category with the Adam [\[19\]](#page-8-26) optimizer with an initial learning rate of 6 × 10−<sup>4</sup> . We set a step learning rate scheduler that decays our learning rate by a factor of 10−<sup>1</sup> every 15,000 steps. Our models are trained on Linux with Nvidia Titan V GPUs – more details in the supplementary document.
2203.05843/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-01T04:31:56.609Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36" etag="DYm9qX8xzqpVRC-ILzlb" version="15.6.2" type="google"><diagram id="xcwLMkDALMh61FPQWAOv" name="Page-1">7Vxbc5s4FP41nmkf7AFxsx9jO2ln2u10Nnt/ycig2GwBsSDXdn99JUCAhHyLDU67+MGBgzgIne/oXJ2BMQu37xIYr37BHgoGQPO2A2M+AGAyNug3I+xygmVPcsIy8b2cpFeER/8bKohaQV37HkqFgQTjgPixSHRxFCGXCDSYJHgjDnvGgfjUGC6LJ2oV4dGFAWoM+9P3yCqnjoFT0d8jf7niT9b5+4WQDy4Ypyvo4U2NZNwPjFmCMcmPwu0MBWzt+Lrk9z3suVpOLEEROeWG+yc4f/62Xj5E77R/4im0nof/Ds1xzuYrDNbFGxezJTu+BMsEr+NiGEoI2qoWHi4CeR2rienl61KYIBwikuzoEM6Iy7qAiKNb+fmmWnCb42hVW2zDKWYPCyEvS97VOtCDYinOWBZwfFVQ5N0xfNGzCEeUOF2RkD5krtNDumKRh9gTNHr2jCPyAEM/YC/4mx9SPAPtE9rQ719xCCM6JCUwIQX6bXpOuRdnZnY1wV9K/BmMpR8EMxzgpHp8PkPkLWXkVnI4AIH90tEawrC0piw4TSWKgvdn7NOJVENSvE5cVFDrwOUDixnYjsSbrtUSkcaNmaTLF3i58I3XLHy9U+FPBMW0mnp5IRQKxjqQFPlsaOxj1DJUdPM1Y6XbjULECtCMlsDS2PVfCpYGo7bBotpY7IAuxHRBD5Ykk01OYEgQYGT/t8b8wjDNJH5HBwAz3lYXZS4+J9xvCUoiyKb4IcKbIBM+0KYwZX/efJi+5bfQN/NlNpSWz6dBrs1bQj11E4gEdUSnXbgJDOxwTXD+ItllGPhLCvC5S0GIKFqnzNfwqSN2V1wIfc9jN09jJqBMZNZ0YM3VIM81QSKK2hbABQqm0P2yzOh87AAYD9nnNIXc6xhdqk+WAFa7oU2GoVAncECdLvKIjE4cxRc5KOJCGQrn0R43V2rc1krp+l41P1mrtZO0euaTHY0fQnoOw5gSokUaZwO1z4nvoqcERkzPG1dZlJZAgs5S+h9PwVvVXuMwKoc6aOAQTLrUWD6BDnBII9uUMAk0ofYRuxRo3pMfqS4i312tYOKRHomtIdEAktvTDOgMrUtgqixJ1/vjJ4qZJ4JVOycMwqePcBP1kGwRkorN0ekSg5OuMPgOR1+pfNDTe0yyzKhgmbX7bYyi1P/am+IWTbEYmdqK/a9TV9rqCnv1rewsu1w36D0q23IQ9aNmWe8SlvbNtsQ6FmlsghO4WPi9/W3T/orYu/mWaKjs8evILtjCSuk8D1JPL1iK2lSpz9eP644q6jN0RT1Vwaump5emGcss4O8p1R1VdjBjMmNP1XZ4Tb8T5OIwRJFHj0PmCEG2fsV+EOT7AVsIdkdjA+GENM44Nme/KJN6QzfXQfb8ZLl4k9UwKEe69Czm4Udvmy+njE3zJ5azkOymYr8yHn6iLeuC/Whyli3k9de6knVqC3Xn1erY4y4lKNyrZQ1V0a6kLfVjXQMKjamF23v0RWHPNT8ttD+icRH9k3mdGlmhBI163TlFd4CgO0BrWihj3KnynFAqSFcwZofPAdoW1dFprVDqBjBNfXdwWa2UMizL5mBki5+BUDsdjcf1i85AVXeXa69XrK0eEbJUDNIbEr6wtMohaZoilnSRw6mVVpmPJfFpudDKkX0Db/KIHB1xXXTTaUrSmShEabakq0DVwHCVqM86WCeYSWFfdn64GmDNfyJz0Gr0V2svVFoMyxG1vGkvOnW2QGsJMRUGSyelh+DtIAjGCtB1WoQCqihaEmLNIVnQmPTLxe4Infhf9PJQG2m6zSl/Mw6j8nS+LTjmZ7v62WeUUM4ZPgqi9+AHfEJiK9hLPZF9kjvR2UhQAAmrZ9QferoHAkwxWa8bI+eFTggYW6OJrVUf8xjnlt0S8wTI9X7xeaZMd0TPeDjmrufVnWOpe5BvIOfCUmJj2Z1iENyujeuIayz9CEDhGdumQo66abRkIQxVFHFpu+ZpFRJOWJclExRlDVssJ7raxZisUMp+FXNX80fW8t3/o1bOdn2ZwxWCZroFdOrJGPv7Da+Y8qPHipTfKPvsTfj9eEA63BOcWcdbYk3eJXVnpCs6gzt1pFtrpemjt1e64wGtaZw7jt7273md2+Y/aFD2TGVNfBz1Frmb9oYjrqQYE0yaFloHnaIV3GyLLPtc2cnh1tZqe6xhdHYWdHuYXgBTXZGI7fYXA2D/D9Suj9MaGFW2/HALYo/VjrEKJvpRsHbbMgb2t2j0Yc+PFvYcQ58EPlvR4tAl9kxVzCNJvVpgJtLNyifoMc6bhjYJjEVAeDBdlWPZShfZVGAeEinPwYJWrZT8o+mRKSSET8vSWWBkGfVPW5JR1RL7HPslHbDDsZRit62m+l2p/8SWcuy8t+nsBhSZkSkxenGanZ5W/7InH1793yPj/js=</diagram></mxfile>
2203.05843/main_diagram/main_diagram.pdf ADDED
Binary file (34 kB). View file
 
2203.05843/paper_text/intro_method.md ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Neural task-oriented dialogue systems have enjoyed a rapid progress recently [\(Peng et al.,](#page-10-0) [2020;](#page-10-0) [Hosseini-Asl et al.,](#page-9-0) [2020;](#page-9-0) [Wu et al.,](#page-10-1) [2020\)](#page-10-1), achieving strong empirical results on various benchmark datasets such as SMD [\(Eric et al.,](#page-9-1) [2017\)](#page-9-1) and Multi-WOZ [\(Budzianowski et al.,](#page-9-2) [2018\)](#page-9-2). However, most existing approaches suffer from the lack of explainability due to the black-box nature of neural networks [\(Doshi-Velez and Kim,](#page-9-3) [2017;](#page-9-3) [Lipton,](#page-9-4) [2018;](#page-9-4) [Bommasani et al.,](#page-8-0) [2021\)](#page-8-0), which may hurt the trustworthiness between the users and the system. For
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Figure_8.jpeg)
6
+
7
+ Figure 1: An example dialogue that incorporates external KB. The context entity (i.e., *Leichhardt*) and answer entity (i.e., *Cityroom*) are marked as Red and Yellow, respectively. The triple containing the context entity and answer entity is not directly stored in KB and should be derived by a reasoning chain formed by multiple KB triplets.
8
+
9
+ instance, in Figure [1,](#page-0-0) a user is asking for a hotel recommendation at a given location. The system performs reasoning on a knowledge base (KB) and incorporates the correct entity in the response. However, when the system fails to provide the correct entities, it would be difficult for humans to trace back the issues and debug the errors due to its intrinsic *implicit* reasoning nature. As a result, such system cannot be sufficiently trusted to be deployed in real-world products.
10
+
11
+ To achieve trustworthy dialogue reasoning, we aim to develop an interpretable KB reasoning as it's crucial for not only providing useful information (e.g., locations in Figure [1\)](#page-0-0) to users, but also essential for communicating options and selecting target entities. Without interpretability, it's difficult for users to readily trust the reasoning process and the returned entities.
12
+
13
+ To tackle this challenge, we present a novel Neuro-Symbolic Dialogue framework (NS-Dial) which combines representation capacities of neural networks and explicit reasoning nature of symbolic approaches (e.g., rule-based expert systems). Existing neuro-symbolic approaches [\(Vedantam et al.,](#page-10-2)
14
+
15
+ [2019;](#page-10-2) [Chen et al.,](#page-9-5) [2020\)](#page-9-5) mostly employ a onephase procedure where a tree-structured program composed of pre-defined human interpretable neural modules (e.g., attention and classification modules in Neural Module Networks [\(Andreas et al.,](#page-8-1) [2016\)](#page-8-1)) is generated to execute to obtain the final predictions. However, since the KB reasoning task involves a reasoning process spanning over multiple triplets in a diverse and large-scale KB, only generating and following a single program (i.e., a reasoning chain formed by KB triplets) is prone to error propagation where a mistake in one step could lead to a failure of the subsequent reasoning process and may result in sub-optimal performances.
16
+
17
+ To address this, we propose a two-phase procedure to alleviate the effects of error propagation by first generating and then verifying multiple hypotheses. Here, a hypothesis is in the form of a triplet containing an entity mentioned in dialogue context and an entity within KB, and their corresponding relation. The valid (i.e., correct) hypothesis is the one that contains the entity mentioned in the ground-truth response. Once we obtain multiple hypothesis candidates during the generation phase, we employ a reasoning engine for verifying those hypotheses. For instance in Figure [1,](#page-0-0) given the user query "*Can you recommend me a hotel located in Leichhardt?*", in order to find the valid hypothesis, the hypothesis generator obtains multiple candidates e.g., *[Cityroom, Located\_in, Leichhardt]* and *[Gonville\_Hotel, Located\_in, Leichhardt]*. The reasoning engine will then construct proof trees to verify them, e.g., for the first hypothesis *[Cityroom, Located\_in, Leichhardt]*, it can be verified with the following reasoning chain in the KB: *[Cityroom, Next\_to, Palm\_Lawn]* → *[Palm\_Lawn, Located\_in, Chadstone]* → *[Chadstone, Located\_in, Leichhardt]*. The whole framework is trained end-to-end using raw dialogues and thus does not require additional intermediate labels for either the hypothesis generation or verification modules.
18
+
19
+ To summarize, our contributions are as follows:
20
+
21
+ - We introduce a novel neuro-symbolic framework for interpretable KB reasoning in taskoriented dialogue systems.
22
+ - We propose a two-phase "generating-andverifying" approach which generates multiple hypotheses and verifies them via reasoning chains to mitigate the error-propagation issue.
23
+ - We conduct extensive experimental studies on
24
+
25
+ two benchmark datasets to verify the effectiveness of our proposed model. By analyzing the generated hypotheses and the verifications, we demonstrate our model's interpretability.
26
+
27
+ # Method
28
+
29
+ In this work, we focus on the problem of taskoriented dialogue response generation with KBs. Formally, given the dialogue history X and knowledge base B, our goal is to generate the system responses Y word-by-word. The probability of the generated responses can be written as:
30
+
31
+ $$p(Y|X,B) = \prod_{t=1}^{n} p(y_t|X,B,y_1,y_2,...,y_{t-1})$$
32
+ (1)
33
+
34
+ where $y_t$ is the t-th token in the response Y. The overall architecture is shown in Figure 2. We start by introducing the standard modules in our system and then explain the two novel modules afterward.
35
+
36
+ We employ pre-trained language model BERT (Devlin et al., 2019) as the backbone to obtain the distributed representations for each token in the dialogue history. Specifically, we add a [CLS] token at the start of the dialogue history to represent the overall semantics of the dialogue. The hidden states $H_{enc} = (h_{CLS}, h_1, ..., h_M)$ for all the input tokens $X = ([CLS], x_1, ..., x_M)$ are computed using:
37
+
38
+ $$H_{enc} = \text{BERT}_{enc}(\phi^{emb}(X))$$
39
+ (2)
40
+
41
+ where M is the number of tokens in the dialogue history, $\phi^{emb}$ is the embedding layer of BERT.
42
+
43
+ To generate the system response, we first utilize a linear layer to project $H_{enc}$ to $H_{enc}' = (h_{CLS}', h_1', ..., h_M')$ that are in the same space of the decoder. We initialize the decoder with $h_{CLS}'$ . During decoding timestep t, the model utilizes the hidden state $h_{dec,t}$ to attend $H_{enc}'$ to obtain an attentive representation $h_{dec,t}'$ via standard attention mechanism. We then concatenate $h_{dec,t}$ and $h_{dec,t}'$ to form a context vector C and project it into the vocabulary space V:
44
+
45
+ <span id="page-2-0"></span>
46
+ $$C = [h_{dec,t}, h'_{dec,t}]$$
47
+ (3)
48
+
49
+ $$P_{vocab,t} = \text{Softmax}(U_1C) \tag{4}$$
50
+
51
+ where $U_1$ is a learnable linear layer, $P_{vocab,t}$ is the vocabulary distribution for generating the token $y_t$ .
52
+
53
+ Next, we aim to estimate the KB distribution $P_{kb,t}$ , i.e., the probability distribution of entities in the KB, in an interpretable way and fuse $P_{vocab,t}$ and $P_{kb,t}$ for generating the final output tokens. We follow See et al. (2017) and employ a soft-switch mechanism to fuse $P_{vocab,t}$ and $P_{kb,t}$ to generate output token $y_t$ . Specifically, the generation probability $p_{gen} \in [0,1]$ is computed from the attentive representation $h_{dec,t}'$ and the hidden state $h_{dec,t}$ :
54
+
55
+ $$p_{qen} = \sigma(U_2([h'_{dec.t}, h_{dec.t}])) \tag{5}$$
56
+
57
+ where $\sigma$ is sigmoid function, $U_2$ is a linear layer. The output token $y_t$ is generated by greedy sampling from the probability distribution P(w):
58
+
59
+ $$P(w) = p_{gen} P_{vocab,t} + (1 - p_{gen}) P_{kb,t}$$
60
+ (6)
61
+
62
+ We next describe how to obtain the KB distribution $P_{kb,t}$ in details using the two novel modules we proposed, i.e., hypothesis generator and hierarchical reasoning engine.
63
+
64
+ To compute the KB distribution $P_{kb,t}$ , we present two novel modules: hypothesis generator (HG) and hierarchical reasoning engine (HRE). We take the context vector C (Equation 3) as the input of HG module and generate K hypotheses $\mathbb{H}$ , each of which are then fed into the HRE module to generate the logical reasoning chains and their belief scores. The estimated belief scores are then served as $P_{kb,t}$ , giving us a distribution over the entities in the KB. Next, we describe how each component works in detail and explain how they interact with each other for generating $P_{kb,t}$ .
65
+
66
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
67
+
68
+ Figure 2: Illustration of the overall architecture: (a) hypothesis generator generating a set of synthesized hypotheses; (b) reasoning engine used to verify the generated hypotheses; (c) dialogue encoding; (d) response generation.
69
+
70
+ Let a hypothesis be a 3-tuple of the form "[H,R,T]", where H and T are the head and tail entities, and R is the relation between entities. In this paper, we are interested in three types of hypotheses including the H-Hypothesis, T-Hypothesis, and R-Hypothesis. The H-Hypothesis is the structure where the tail entity T and relation R are inferred from the context and the head entity H is unknown (which needs to be answered using the KB), and it takes the form " $[\triangleright, R, T]$ ". In a similar vein, the T-Hypothesis and R-Hypothesis have unknown tail entity T and relation R, respectively. The goal of the Hypothesis Generator module is to generate hypotheses in this triple format which will later be verified by the Hierarchical Reasoning Engine.
71
+
72
+ Intuitively, a hypothesis can be determined by its content and structure. The structure indicates the template form of the hypothesis while the content fills up the template. For instance, the H-Hypothesis has its template form of " $[\triangleright, R, T]$ " and the content that needs to be realised includes candidate entities (i.e., " $\triangleright$ "), and query states (i.e., the tail "T" and relation entities "R"). To this end, we employ a divide-and-conquer strategy to jointly learn three sub-components: structure prediction, query states prediction, and candidates prediction. Next, we describe each sub-component in details.
73
+
74
+ **Structure Prediction (SP)** The goal of the structure prediction module is to determine the structure of the hypothesis (i.e., H/T/R-Hypothesis) based on the context. For example in Figure 1, one might expect an H-Hypothesis at timestep 0. Specifically, SP uses a shared-private architecture to predict the hypothesis type. It first takes the context vector C (Equation 3) as input and utilizes a shared transformation layer between all the three sub-components
75
+
76
+ to learn task-agnostic feature $h_{share}$ :
77
+
78
+ $$h_{share} = W_2(\text{LeakyReLU}(W_1C))$$
79
+ (7)
80
+
81
+ where $W_1$ and $W_2$ are learnable parameters (shared by the structure prediction, query states prediction and candidate prediction components) and LeakyReLU is the activation function.
82
+
83
+ The shared layer can be parameterised with complicated neural architectures. However, to keep our model simple, we use linear layers which we found to perform well in our experiments. SP next uses a private layer on top of the shared layer to learn task-specific features for structure prediction:
84
+
85
+ $$h_{nrivate}^{sp} = W_4(\text{LeakyReLU}(W_3 h_{share}))$$
86
+ (8)
87
+
88
+ where $W_3$ and $W_4$ are learnable parameters. For ease of presentation, we define the private feature transformation function as:
89
+
90
+ $$\mathcal{F}^{\star}: h_{share} \to h_{private}^{\star}$$
91
+ (9)
92
+
93
+ where $\star$ denotes any of the three sub-components. To obtain the predicted hypothesis structure, a straightforward approach is to apply softmax on $h_{private}^{sp}$ . However, this will break the differentiability of the overall architecture since we perform sampling on the outcome and pass it to the neural networks. To avoid this, we utilize the Gumbel-Softmax trick (Jang et al., 2017) over $h_{private}^{sp}$ to get the sampled structure type:
94
+
95
+ $$I_{sp} = \text{Gumbel-Softmax}(h_{private}^{sp}) \in \mathbb{R}^3$$
96
+ (10)
97
+
98
+ where $I_{sp}$ is a one-hot vector and the index of one element can be viewed as the predicted structure. In this paper, we define 0 as H-Hypothesis, 1 as T-Hypothesis and 2 as R-Hypothesis.
99
+
100
+ Query States Prediction (QSP) Query states are the tokens in hypothesis that need to be inferred from the dialogue history. For example, one
101
+
102
+ might want to infer relation R=*Located\_in* and tail T=*Leichhardt* based on the history in Figure [1.](#page-0-0) Therefore, the goal of the query states prediction is to estimate the state information (e.g., T and R in H-Hypothesis) of hypothesis. Specifically, QSP takes the shared feature hshare as the input and next applies the private feature transformation function followed by Gumbel-Softmax to obtain the state tokens of hypothesis using:
103
+
104
+ $$h_{private}^{qsp,k} = \mathcal{F}^{qsp,k}(h_{share}) \tag{11}$$
105
+
106
+ $$I_{qsp}^{k} = \text{Gumbel-Softmax}(h_{private}^{qsp,k}) \in \mathbb{R}^{n}$$
107
+ (12)
108
+
109
+ where n is the number of tokens (entities and relations) in the KB, k ∈ {0,1}, I 0 qsp and I 1 qsp are two one-hot vectors where their corresponding tokens in KB serve as the state tokens of the hypothesis. Candidates Prediction (CP) To generate the final hypotheses, we need multiple candidates to instantiate the structure of the hypothesis except the state tokens, e.g., *Cityroom* or *Gonville\_Hotel* as candidate head entities H in Figure [1.](#page-0-0) To this end, we utilize an embedding layer φ emb cp to convert all the tokens in the KB to vector representations. We then compute a probability distribution over all the KB tokens using:
110
+
111
+ $$P_i = \operatorname{Sigmoid}(\phi_{cp}^{emb}(K_i) \odot h_{share}) \tag{13}$$
112
+
113
+ <span id="page-4-1"></span>where K<sup>i</sup> is the i-th token in KB, φ emb cp is the embedding layer of CP, P<sup>i</sup> is the probability of the i-th token to be candidate,
114
+ denotes inner-product. We use sigmoid instead of softmax as we find that softmax distribution is too "sharp" making the probability between different tokens are hard to differentiate for sampling multiple reasonable candidates. Hypothesis Synthesizing The final hypotheses H are composed by combining the outputs of the three sub-components as follows: (i) We generate the hypothesis template according to the predicted structure type. For example, if SP predicts a structure type 0 which denotes H-Hypothesis, the model will form a template of "[., R, T]"; (ii) We next instantiate the state tokens in the hypothesis sequentially by using the outputs of QSP module. For example, if the output tokens of QSP are "*Located\_in*" (k=0) and "*Leichhardt*" (k=1), the hypothesis will become [., *Located\_in*, *Leichhardt*]; (iii) Finally, we instantiate the candidate (i.e., .) with the top-K (K =5 in our best-performing version) entities selected from P. If the top-2 highest probability
115
+
116
+ tokens are *Cityroom* and *Gonville\_Hotel*, the model will instantiate two hypotheses *[Cityroom, Located\_in, Leichhardt]*, *[Gonville\_Hotel, Located\_in, Leichhardt]*.
117
+
118
+ With the hypotheses generated by HG module, we next aim to verify them via logical reasoning chains. Inspired by Neural Theorem Provers [\(Rocktäschel](#page-10-13) [and Riedel,](#page-10-13) [2017\)](#page-10-13), we develop chain-like logical reasoning with following format:
119
+
120
+ <span id="page-4-0"></span>
121
+ $$\alpha, (H, R, T) \leftarrow (H, R_n, Z_n) \wedge \cdots \wedge (Z_1, R_1, T)$$
122
+ (14)
123
+
124
+ where α is a weight indicating the *belief* of the model on the target hypothesis [H, R, T], and the right part of the arrow is the reasoning chain used to prove that hypothesis, and R<sup>i</sup> and Z<sup>i</sup> are relations and entities from the KB. The goal is to find the proof chain and the confidence α for a given hypothesis. To this end, we introduce a neuralnetwork based hierarchical reasoning engine (HRE) that learns to conduct chain-like logical reasoning. At a high level, HRE recursively generates multiple levels of sub-hypotheses using neural networks that form a tree structure as shown in Figure [2.](#page-3-0) Next, we describe how this module works in details.
125
+
126
+ The module takes the output hypotheses from the HG module as input. Each hypothesis serves as one target hypothesis. To generate the reasoning chain in Equation [14,](#page-4-0) the module first finds sub-hypotheses of the same format as the target in the hypothesis space. The sub-hypotheses can be viewed as the intermediate reasoning results to prove the target. One straightforward approach is to use neural networks to predict all the tokens in the sub-hypotheses (2 heads, 2 tails and 2 relations). However, this can lead to extremely large search space of triples and is inefficient. Intuitively, subhypotheses inherit from the target hypothesis and sub-hypotheses themselves are connected by bridge entities. For example, *[Uber,office\_in,USA]* can be verified by two sub-hypotheses *[Uber,office\_in,Seattle]* and *[Seattle,a\_city\_of,USA]*, *Uber* and *USA* are inherited from the target and *Seattle* is the bridge entity between sub-hypotheses. Motivated by this, we propose to reduce the triple search complexity by constraining the sub-hypotheses. Specifically, given target [H, R, T], we generate sub-hypotheses of the format [H, R1, Z],[Z, R2, T], where Z is the bridge entity, R<sup>1</sup> and R<sup>2</sup> are relations to be predicted. Therefore, the goal of the neural networks has been reduced to predict three tokens (2 relations
127
+
128
+ and 1 bridge entity). Formally, HRE predicts the vector representation of bridge entity as follows:
129
+
130
+ $$h_H, h_R, h_T = \phi_{cp}^{emb}(H), \phi_{cp}^{emb}(R), \phi_{cp}^{emb}(T)$$
131
+ (15)
132
+ $h_Z = W_6(\text{LeakyReLU}(W_5[h_H, h_R, h_T]))$ (16)
133
+
134
+ where $[h_H, h_R, h_T]$ are the concatenation of the representations of tokens in target hypothesis, $h_Z$ is the vector representation of bridge entity Z. The prediction of $h_{R_1}$ and $h_{R_2}$ uses the same architecture in Equation 16 and the difference is that they use different linear layers for the feature transformation. Note that $h_Z$ denotes a KB token in the embedding space. We can decode the token by finding the nearest KB token to $h_Z$ in vector space. More details on the token decoding can be found in Appendix A. Upon obtaining $h_Z$ , $h_{R_1}$ , $h_{R_2}$ , the module generates the two sub-hypotheses in vector representations. Next, the module iteratively takes each of the generated sub-hypothesis as input and extend the proof process by generating next-level sub-hypotheses in a depth-first manner until the maximum depth D has been reached.
135
+
136
+ **Belief Score** To model confidence in different reasoning chains, we further measure the semantic similarities between each triple of the leaf node and triples in the KB, and compute the belief score $\alpha_m$ of the m-th hypothesis $\mathbb{H}_m$ :
137
+
138
+ $$\alpha_m = \min_{\forall i \in U} \max_{\forall i \in V} e^{-d_j(Leaf_i, KB_j)}$$
139
+ (17)
140
+
141
+ where $Lea f_i$ is the representation (concatenation of H, R, T) of the i-th leaf node in the proof tree (DFS manner), $KB_i$ is the representation of the j-th triple in KB, U=[0,...,u-1], V=[0,...,v-1] where u and v are the number of leaf nodes and KB triples correspondingly, d is the distance metric. In general, any distance function can be applied and we adopt Euclidean distance in our implementation since we found that it worked well in our experiments. All the triples in the leaf nodes form the reasoning chain for the input hypothesis as in Equation 14. The hypotheses $\mathbb{H}$ coupled with the belief $\alpha$ form our KB distribution $P_{kb,t}$ . More details can be found in Appendix B. Intuitively, the belief score can be viewed as the likelihood of the hypothesis contains the correct entity. If the hypothesis is valid (i.e., contains the correct answer entity), it should have a high likelihood and thus encourage to generate more proper reasoning chains based on the triples stored in the KB.
142
+
143
+ <span id="page-5-1"></span>
144
+
145
+ | Dataset | Domains | Train | Dev | Test |
146
+ |--------------|-------------------------------|-------|-----|------|
147
+ | SMD | Navigate, Weather, Schedule | 2425 | 302 | 304 |
148
+ | MultiWOZ 2.1 | Restaurant, Attraction, Hotel | 1839 | 117 | 141 |
149
+
150
+ <span id="page-5-2"></span>Table 1: Statistics of SMD and MultiWOZ 2.1.
151
+
152
+ <span id="page-5-0"></span>**Training** We apply two loss functions to train the whole architecture end-to-end. The first loss function $\mathcal{L}_{gen}$ is for the final output. We use a cross-entropy loss over the ground-truth token and the generated token from the final distribution P(w). The second loss $\mathcal{L}_{cp}$ is for the candidates prediction (CP) module in the hypotheses generator. We apply binary cross-entropy loss over the output distribution for each KB token (Equation 13) and their corresponding labels. The labels for each KB token are computed as follows:
153
+
154
+ $$Label_i = \begin{cases} 1, & K_i = y_t \\ 0, & K_i \neq y_t \end{cases}$$
155
+ (18)
156
+
157
+ where $K_i$ is the *i*-th token in the KB and $y_t$ is the ground-truth output at timestep t. The final loss $\mathcal{L}$ is calculated by:
158
+
159
+ $$\mathcal{L} = \gamma_q * \mathcal{L}_{qen} + \gamma_c * \mathcal{L}_{cp} \tag{19}$$
160
+
161
+ where $\gamma_g$ and $\gamma_c$ are hyper-parameters and we set them to 1 in our experiments.
2203.11284/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2203.11284/paper_text/intro_method.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Representing the attributes of an image that are independent of its domain (e.g. imaging modality, geographic location, sensitive attribute or object identity) is key for many computer vision tasks. For instance, consider the following toy example: assume that we observe images of faces, each image is specified by the identity and pose but only labels of the identity are provided. The goal is to learn a representation that captures the unlabeled pose attribute, and carry no information about the identity attribute. This task has many other applications, including: learning to make fair decisions, cross domain matching, model anonymization, image translation etc. It is a part of the fundamental machine learning problem of representation disentanglement. We note that the most ambitious disentanglement setting, i.e. unsupervised disentanglement where no labels are provided, was proven by Locatello et al. [@locatello2019challenging] to be impossible without inductive biases. Luckily, our setting is easier than unsupervised disentanglement as the domain label is provided for all training images. This setting has attracted much research e.g. DRNET [@denton2017drnet], ML-VAE [@bouchacourt2018mlvae] and LORD [@gabbay2020lord].
4
+
5
+ We begin by defining the desired properties for domain disentanglement. This task has two objectives: i) *Invariance*: the learned representation should be invariant to the domain ii) *Informativeness*: the learnt representation should include the information about all of the attributes which are independent of the domain. The invariance requirement is challenging, but it can *in-principle* be directly optimized as the domain label is provided, e.g. using an adversarial discriminator. The informativeness requirement, however, is not generally possible to directly optimize without additional inductive biases as the attributes are unlabeled. This was theoretically demonstrated by [@Johansson2019SupportAI; @invforda19zhao]. Nonetheless, recent methods have been able to achieve meaningful representations in many cases, by enforcing a reconstruction term, which optimizes a related objective.
6
+
7
+ <figure id="fig:model_diagram" data-latex-placement="t!">
8
+ <img src="images/model_diagram.png" />
9
+ <figcaption><strong><em>An illustration of our method.</em></strong> The representations are domain invariant as the representations of each domain follow a spherically uniform distribution (encouraged by our domain-wise contrastive objective). Image augmentations (here Gaussian blurring) are used to assign similar images to nearby representations which indirectly improves informativeness. The reconstruction objective and encoder pre-trained weights initialization are not shown in this diagram.</figcaption>
10
+ </figure>
11
+
12
+ We present a new method, **DCoDR**: **D**omain-wise **Co**ntrastive **D**isentangled **R**epresentations, that significantly improves both representation domain invariance and informativeness. To enforce the domain invariance, we propose a per-domain contrastive loss, that requires the representations of each domain to be uniformly distributed across the unit sphere. Differently from standard contrastive losses [@chen2020simclr], our objective only considers negative examples from the *same* domain. As shown in Sec. [5.2.1](#sec:ablations){reference-type="ref" reference="sec:ablations"}, this seemingly simple change is crucial for learning domain invariant representations. Unfortunately, we find that encoders which satisfy this invariance constraint alone, are often uninformative over the desired attributes. This is a case of the documented phenomenon of *feature suppression* [@featsupprunsup20li; @intriguingcl20chen; @avoidshort21robinson]. In line with previous methods [@gabbay2020lord; @denton2017drnet; @bouchacourt2018mlvae], we optimize the informativeness of the representations indirectly by a reconstruction constraint. As we find this may be insufficient for learning informative representations in some cases, we propose two other techniques: i) Similarly to several self-supervised objectives (e.g. the one in SimCLR [@chen2020simclr]), we enforce representations of images to be similar to those of their augmentations. Despite being common among self-supervised methods, we show that standard choices of augmentations (specifically, those used by SimSiam [@chen2020simsiam]) can harm the domain invariance of the representation. We analyse the effectiveness of different augmentations for domain invariant representation learning. ii) Initializing the image encoder using weights pre-trained with self-supervision on an external dataset, which we empirically find to learn both more informative and invariant representations.
13
+
14
+ We evaluate our method on five popular benchmarks. Our method significantly exceeds the state-of-the-art in terms of invariance and informativeness. We investigate a fully discriminative version and find that in many cases it is competitive with the previous state-of-the-art while being much faster.
15
+
16
+ A summary of our contributions:
17
+
18
+ 1. A non-adversarial and non-generative, domain invariance objective.
19
+
20
+ 2. Analysing the benefits and pitfalls of image augmentations for informativeness and domain invariance of the learned representations.
21
+
22
+ 3. A new approach, DCoDR, which significantly outperforms the state-of-the-art in domain invariant representation learning.
23
+
24
+ 4. A discriminative only variant, which is 5X faster than existing approaches.
25
+
26
+ 5. An extensive evaluation on five datasets.
27
+
28
+ # Method
29
+
30
+ We receive as input a set of training samples $\mathcal{X}_t = \{x_1,x_2,..,x_N\}$. Each training sample $x \in \mathcal{X}_t$ has a labeled domain $d$ and unlabelled attributes $y$ which are uncorrelated to $d$. We assume that the labeled domain $d$ is a single categorical variable. The objective is to learn an encoder $E$, which encodes each image $x$ as code $z = E(x)$ satisfying the criteria in Sec. [3.2](#sec:crit){reference-type="ref" reference="sec:crit"}.
31
+
32
+ The domain disentanglement task requires satisfying the following two criteria:
33
+
34
+ **Invariance**: We require that the representation $z$ should not be predictive of the domain $d$. This can be written as: $$\begin{equation}
35
+ \label{eq:disentanglement_metric}
36
+ P(d|z) = P(d)
37
+ \end{equation}$$
38
+
39
+ **Informativeness**: We require that the representation $z$ should encapsulate as much information on attributes $y$ as possible. Note that $z$ cannot hold more information about $y$ than the original image $x$, as there exists a deterministic encoder $E$ which maps $x$ to $z$. It therefore follows by the data processing inequality, that the maximally informative representation $z$ should be as informative as the original image about the attributes $y$: $$\begin{equation}
40
+ \label{eq:completeness_metric}
41
+ I(y,z) = I(y,x)
42
+ \end{equation}$$
43
+
44
+ In our setting, only the domain labels $d$ are provided but not the attribute labels of $y$. The objective in Eq. [\[eq:completeness_metric\]](#eq:completeness_metric){reference-type="ref" reference="eq:completeness_metric"} cannot therefore be optimized directly. Saying that, in line with previous methods, we optimize informativeness by training a conditional generator through a reconstruction objective. Unlike previous methods, we use additional techniques which increase informativeness significantly. Our proposed approach will be detailed in Sec. [4.3](#sec:augmentations){reference-type="ref" reference="sec:augmentations"}.
45
+
46
+ Current methods optimize the invariance criterion using two main approaches:
47
+
48
+ **Adversarial methods [@denton2017drnet].** Many disentanglement methods rely on adversarial domain confusion constraints to ensure representation invariance. They are often written in the following form:
49
+
50
+ $$\begin{equation}
51
+ \label{eq:invariance_gan}
52
+ L_{adv} = \max_D \ell_{CE}(D(E(x)), d)
53
+ \end{equation}$$
54
+
55
+ Where $\ell_{CE}$ is the cross-entropy loss. The discriminator $D$ measures how informative the representation $z=E(x)$ is over the original domain $d$. An encoder that satisfies this constraint will indeed be domain invariant $P(d|z) = P(d)$. Unfortunately, adversarial training is challenging and the optimization often fails to minimize this loss perfectly.
56
+
57
+ **Variational-autoencoders (VAE) [@bouchacourt2018mlvae; @gabbay2020lord].** Given the weaknesses of adversarial methods, variational methods were proposed that ensure the representations are normally distributed $P(z|d) = N(0; I)$. The encoder in this case outputs the parameters of a Gaussian distribution of the posterior $p(z|x)$. Using the ELBO criterion, the objective becomes: $$\begin{equation}
58
+ \label{eq:invariance_VAE}
59
+ L_{vae} = \ell_{KL}(E(x), N(0, I))
60
+ \end{equation}$$
61
+
62
+ However, LORD [@gabbay2020lord] found that simply optimizing this criterion does not converge to disentangled representations. Furthermore, they showed that randomly initialized encoders are highly entangled and variational losses were insufficient for removing this entanglement. Instead, they suggested using latent optimization rather than deep encoders at first, for directly learning the representation $z$ of each training image $x$. This indeed improves the domain invariance of the representations, but is more sensitive to hyper-parameter choices. It also requires an inconvenient and time consuming second stage, for learning an image to representation encoder.
63
+
64
+ DCoDR optimizes the combination of the $3$ objectives presented in this section:
65
+
66
+ $$\begin{equation}
67
+ \label{eq:total_objective}
68
+ \min_{E,G} L_{DCoDR} = L_{inv} + L_{rec} + L_{aug}
69
+ \end{equation}$$
70
+
71
+ We use the augmentations from Sec. [4.3](#sec:augmentations){reference-type="ref" reference="sec:augmentations"}. We initialize the encoder $E$ with the weights of an MoCo-V2 encoder pre-trained on ImageNet (without labels).
72
+
73
+ We present a discriminative variant of our method, by simply dropping the reconstruction constraint:
74
+
75
+ $$\begin{equation}
76
+ \label{eq:abcd_total_objective}
77
+ \min_{E} L_{DCoDR-norec} = L_{inv} + L_{aug}
78
+ \end{equation}$$
79
+
80
+ The lack of a reconstruction constraint, makes this variant typically learn less informative representations than DCoDR. However, as this variant does not train a generator, it is several times faster than DCoDR which by itself is considerably faster than previous state-of-the-art LORD.
81
+
82
+ Although a part of our method is motivated by the SimCLR [@chen2020simclr] objective, it is significantly different. In Tab. [3](#tab:ablations){reference-type="ref" reference="tab:ablations"} we show that although the differences from SimCLR might look superficially simple, each of them is essential for the success of our method (the first $3$ apply for DCoDR-norec as well):
83
+
84
+ - **Domain-wise Loss.** DCoDR learns a contrastive loss over each domain separately whereas SimCLR learns a single loss over all the data.
85
+
86
+ - **Choice of Augmentations.** DCoDR learns a reduced set of augmentations rather than the standard set used in SimCLR.
87
+
88
+ - **Pre-Training.** DCoDR initializes the encoders weights by *unsupervised* pre-training on ImageNet using MoCo-V2 [@chen2020mocov2], which does *not* use any labels.
89
+
90
+ - **Reconstruction** DCoDR uses a reconstruction term for increasing the informativeness of its representations, which does not exist in SimCLR.
2204.01613/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-01-27T18:02:29.770Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/16.4.0 Chrome/96.0.4664.110 Electron/16.0.7 Safari/537.36" version="16.4.0" etag="QtjcYygL5Wwuu2hI4cXP" type="device"><diagram id="lvQ-EjV8rCWYBtpJWgsn">7V1bc5s4FP41ntl9aAZJ3PyYS9t9aGcy28ls+5TBRrFpMXgxaZL++hUgbkKAAIFxl+Qh5ljcdD595+icI2WFbg+vHwPruP/s29hdQcV+XaG7FYQIwjX5E0neEgkAyEgku8CxqSwXfHF+YSpUqPTZsfGp1DD0fTd0jmXh1vc8vA1LMisI/JdysyffLd/1aO3oHZVc8GVrubjS7B/HDveJ1IRGLv8LO7t9emeg0zc+WGljeonT3rL9l8K90PsVug18P0w+HV5vsRv1XtovyQN9qPk2e7AAe6HICTA54aflPtN3o88VvqUvG/jPno2j9mCFbl72Toi/HK1t9O0L0S+R7cODS79+clz31nf9ID4XPT09we2WyE9h4P/AhW9sfaNrenSG74VUw8Akx/SBcBDi19qXAllXEZBh/4DD4I00SU/QqdremOOXXFlIobJ9QVGZ0KIA2WXXzvuQfKDdyO9S9Dt2qWaYV4bW2qtgDa6gNlLHqu0di20yTukhdjf+y/tccBMLyBd7P3B+kS6y3Ejo2dcRIxD51rVOJ2db7v1cVQqvz5X4J7rOqxN+pedEn79FZ1xp9OjulV4gPngrXS1lEFirueityqzhPwdbKkrxFlrBDtPuNoTVW9CdxhkQqSzArhU6P8tPwdMlvcO975AbZ+h5B3JYUPRAFZWvkrwTPbHIWMy1oF5BIjIZdCV9UbkWUbT1Vmh2jBqc6h8bGWUeIepm8JpcMUdv1rNCgNYa8esH4d7f+Z7lFkFcpo68zSffP1LhdxyGbxRF1nPolwHdB6gERcHb1+JBchbU0uP8vPhIKsKNKsBNYYB3Q25XhKgqgxDAmNu29npze4QGtl9rUhGrD0Kscn7EtgPWCsLUIHi+h1PZByfqFHoGx2QQIW3Cs9la9DvieACa/AEhapYNjlnW3ZC+Wwkt+r/PfvrFu1P81tekATCPr/Grp9+TT7v4r3b7R+TuP66M6LNrHTa2tTLIlRRy+Gd6H/KEya3oWYPcLSvYUn1AvapJDGwNGzxPYK0byIrO2AWW7RAt3DkBmXs4vhf5ItYprFX2IMcMsb6uWfXKdI5l1yU4ZOY0mn84q8ZtC5tPXH9b35p483R2jcMJNb6u1fjpaHnDND4GdK4FoJOJk1eYAFEaNm2VhygTbpB+fg5BEyIqDenUuBTUBJ/F65XkCzTMGTMnJfdLvqUPObZXDdLAWcGN6DBxHDgjNM3qLI6NEUiaxRmqXrqPCuTO4tKOvARStH83UnzBY5Girp6RFJsjW+ckxTNPrOrJVA4pcoJpF0GKfXiLF6ieM2+JzAOm561+04PpeGvK6QFQL4C3yr5WzlQXzVvahfLWUGcOynbmtAsjxS5hscnJsWe0bDpynDJaBvQRoZUgIsEDMTrGza8ID8bdY46PGByOl/yNChE2G9Lu76id9p58Ask3oXPAZAAqP2JA6dYh0ri3OR3j2ykVURlwjcgiegvL8CnjgtJrEURUZLnOLkLGlugZE/lNhAJna7nX9IuDY9txtoGH13IGYgxYMagyjCutgivArziQACxeBJ41w6PmvnvOCfrY8e/Ph2PqTxDykWVA0+KCfknGqbLobBIdqQx8RJPoYE0strrOfjQmOc/CUuKsw+zmIk6YM7wMV29Ct45JJiMghorKhdZKu39Yg9w+CKtPUgz2vO4DbDuJmyKYiZqHzRNF3iAraLDlMmbVCJrwilMeltLaECsImxMJSzUNLlm1oqFbj8QoQ6tpUMdqGtRWHTO0veRqmkw9sy+nkWQa+wePxwF+GgId35YirWoCWXMq6rxpnGsJOmxdR6SmdBuRLe2Hj5jmxMiMSH7kQrLqNGh2NZpA3IEYOLji2UzLDEZ4cK3XpQupgk5v55G17lY5yraXbovqs0Gy4pVRcCnyho2bz5/u49hUHq56KceqlhK/gl8NGVLlBS3XnLCDjKAld9HFxLB4SO+4CYbdbykjbEAVL084GqrqsyyiqCIMy9fyR+zhwApJ717UDL2g4UhrUjSM0FVaSZJSB6oqGWgos56yQ9OwPucxWNF3zmkbOAfHW5Sd+C1X6SraVNmQk4TgxaelaJqThKD8Dm9kEXhM3ZXMhuscT3X9XuTn0zFZC/zkvEaaqJAqR+kSFGOgLBmUxT7zQH5RNQYvNJZLB2mnOeo+o3lT3TTG6D6NEZle9Yi/0/nBHANoCJZtutoyqai0h5InFSOG4kW9R5Fq1xHdvrnV+rNuH6+sdSy3L7UzjYnqlpx03xCixLxxv5E/TY64sgDU6BkHURlqyKhC9hprNsmodVvhKruaHw2Lyfe0lFJTz2UbmJU9KmhdHjIFQV2BRnR0j4mXjWOft6F+pnGUTBcK1AADp75Bdr3GlMrGv17zwLXPxbY3oVz8w+Ec3aUCoi+f90AhFM/2TEPWJsPVfbEKYQvpyyvfQQK721wQPOYKBaSse0KBde/YC0mEgsh+PAsUhkKhYnj6QkFV1mwoQSIYLqhuHj+mE0R5KyFnF1eUVtSlqGz4yuRVN6eGoRS7khC5QmNWzcuPRcSwKuYxH3+s2gvv2ar7BZFNkW6FCXNzygxHQ2P9ZjezReODGAq9BYVDUAhNnRfUHw2I9XvvzBaIsbVNMFhdVlSDydKSJG+BZVdYojSWOwUmL2h3oJq6owVcTeCCTA0amBBcaSn2xYHrYQFVJ1BNbEjVC9q9h8lrLrjqgqtpp60qL4w+U1jhYs6czhYWR6tzXIS33G00eF3Q3j0leDGhkQVmbTADZ3S56mu/R9yw4kEgUrFsVNF7elimLP5GFYCT3JBRI6qOmSSoBdT1AqjJAMWrJhsPTrzUQMcEJNslMhKSrSvukqdsrpiZW61CZb0aWxAovMCOsWhZ8cLYC+wkL5hTRXbdmSD93VoYbVunfVabFh3cWyGhEi+WEEatJYbmYpq5VT4CtjCQ3VSidzWNOlrWXL2gKL63eOqNlhAo5emgCiacDvIi7x25qFzD2oWZeGvRRXZ9OBdzzc20Ak0Sc6ls6dBI/xcLGPwHrt1tjN0YSPIyE42XG1jgXwN/dW7wZ9CRpSw7G27Ws0Rihnso/JFsOPNSEv8fz7LDYoEL8yw1Zp9QVbBOuzNAawxKLT+z7Y2WvUhq3kO4vewBA2fG/z3WvfZYicpZiDq7scNuCKVqPcldYzE63j9G0CSscSjDSYAoh3ocPSm8Dz/PzX/W2TUPvfd+Ytc8smCVteaLfWDZfChhZca5APxOuVKAwaAYmi0wlrZQUfz/aJ4H2/19jzNhu833YNun85OeY4EcBn4UvsqbB9Zx/9m3cdTiPw==</diagram></mxfile>
2204.01613/main_diagram/main_diagram.pdf ADDED
Binary file (45.4 kB). View file
 
2204.01613/paper_text/intro_method.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The ability to generate new samples from a distribution is a central problem in machine learning. Most of the work has focused on data with a regular structure, such as images and audio [@brock2018large; @oord2016wavenet]. For such data, Generative Adversarial Networks (GANs) [@goodfellow2014generative; @arjovsky2017wasserstein] have emerged as a powerful paradigm, managing to balance generation novelty and fidelity in a manner previously thought impossible.
4
+
5
+ The present work considers the use of GANs for graph data. The generation of novel graphs is relevant for numerous applications in molecule [@jin2018junction; @de2018molgan; @liu2018constrained], protein [@huang2016coming], network, and circuit design [@mirhoseini2021graph]. Yet, despite recent efforts, the problem remains largely unresolved: current state-of-the-art approaches are constrained to small graphs and often fail to strike a beneficial trade-off between capturing the essential properties of the training distribution and exhibiting high novelty. We argue that the one-shot generators used in current GAN models face expressivity issues that hinder them from capturing the global graph properties. We refer to a model as one-shot if it generates all edges between nodes at once. In contrast, auto-regressive models build a graph by progressively adding new nodes and edges.
6
+
7
+ <figure id="fig:bait" data-latex-placement="t!">
8
+ <div class="center">
9
+ <embed src="figures/bait.pdf" />
10
+ </div>
11
+ <figcaption>Generating the spectrum first allows to control the global graph structure prior to the local connectivity. <strong>Top</strong>: Conditioning spectral embedding. <strong>Bottom</strong>: The generated graph, plotted using its final spectral embedding. </figcaption>
12
+ </figure>
13
+
14
+ One-shot generators are typically confronted with the demanding task of controlling global graph structure emergent from a large number of local node interactions. As a thought experiment, consider the toy problem of generating a tree by sampling node embeddings and then connecting them using some similarity kernel [@krawczuk2020gg; @serviansky2020set2graph; @vignac2021top]. Even if the extended neighborhood of each node is locally tree-like, the overall graph will not be valid unless *all* nodes are positioned appropriately w.r.t. each other. Misplacing even a few nodes can completely alter the global properties by introducing cycles and rendering the graph disconnected. Sadly, the aforementioned expressivity issue becomes increasingly pronounced as the size of the graph grows and can manifest in terms of non-convergent GAN training.
15
+
16
+ This work puts forth ---an equivariant generator that aims to overcome the expressivity issues of one-shot approaches. decomposes the graph generation problem into two parts that are learned jointly: (*i*) modeling the dominant components of the graph spectrum and (*ii*) generating a graph conditioned on a set of eigenvalues and eigenvectors. Modeling the distribution of the top-$k$ eigenvalues and eigenvectors is a simpler problem than one-shot graph generation. Crucially, a direct inspection of the graph spectrum conveys many pertinent global graph properties (e.g., connectivity, cluster structure, diameter) and can be utilized to construct embeddings that approximate the geodesic distance between nodes [@belkin2003laplacian; @coifman2005geometric; @mohar1997some]. As shown in Figure [1](#fig:bait){reference-type="ref" reference="fig:bait"}, the (normalized) graph Laplacian eigenvectors (associated with low eigenvalues) capture well the coarse graph structure, making them ideal to model non-local dependencies. Thus, by generating the spectrum first, can control the global properties of the generated graphs. The learned eigenvectors and eigenvalues are then used to initialize the node embeddings of a second generator (inspired by GG-GAN [@krawczuk2020gg]) that acts as a local refinement procedure. Both steps are permutation equivariant, differentiable, and are optimized jointly in an end-to-end fashion.
17
+
18
+ Our experiments with synthetic and real-world graphs provide evidence that spectral conditioning helps to overcome the limitations of one-shot generators, managing to faithfully capture the graph statistics even for graphs with hundreds of nodes. Interestingly, can outperform state-of-the-art by a non-negligible margin, striking a compelling trade-off between the ability to generate graphs not in the training distribution (novelty) and modeling fidelity. We also find that conditioning on real spectra yields further improvement without sacrificing novelty, indicating that additional gains may be attainable by using a better spectrum generator.
19
+
20
+ # Method
21
+
22
+ In this contribution, we consider unweighted undirected graphs $G=(\cV, \cE)$ where $\cV$ is a set of $n$ nodes connected (or not) by a set of edges $\cE$. We index each node $v_i \in \cV$ with $i=1,\ldots, n$ and define the adjacency matrix $\bA$ as $\bA_{i,j}=1$ if $v_i$ and $v_j$ are connected and $\bA_{i,j}=0$, otherwise.
23
+
24
+ Spectral graph theory studies the connections between the spectrum of the graph Laplacian and the general properties of the graph. The normalized graph Laplacian[^1] is defined as $\bL= \bI - \bD^{-\frac{1}{2}} \bA \bD^{-\frac{1}{2}},$ where $\bD = \diag(d_1, \cdots, d_n)$ is the diagonal degree matrix defined as $d_{i}=\sum_{j=1}^n{\bA_{i,j}}$. Being a symmetric positive semi-definite matrix, the graph Laplacian can always be diagonalized as $\bL = \bU \bLambda \bU^T,$ where the orthogonal matrix $\bU = [\bu_1, \cdots, \bu_n]$ and the diagonal matrix $\bLambda=\text{diag}(\lambda_1, \cdots, \lambda_n)$ contain the graph Laplacian eigenvectors and eigenvalues, respectively. We follow the convention of sorting eigenvalues in non-decreasing order as $0 = \lambda_1 \leq \lambda_2 \leq \cdots \leq \lambda_n \leq 2$.
25
+
26
+ Our approach is motivated by the well known fact that the first few eigenvalues $\blambda_k = (\lambda_1, \cdots, \lambda_k)$ and eigenvectors $\bU_k = [\bu_1, \cdots, \bu_k]$ of the graph Laplacian describe global properties of the graph structure such as its connectivity, clusterability, diameter, and node distance (see Appendix [11](#app:spectrum){reference-type="ref" reference="app:spectrum"}).
27
+
28
+ We assume the graph to be connected. Therefore the first eigenvector (associated with $\lambda_1=0$) only contains information about the node degree (a local feature) and we start conditioning graph generation with the *second* eigenvector.
29
+
30
+ In the following we describe some useful facts about the geometric, algebraic, and group structure of orthogonal matrices as eigenvectors of undirected graphs are such matrices.
31
+
32
+ *The special orthogonal group $\SO{n}$.* Being orthogonal matrices ($\bU \bU^\top = \bU^\top \bU = \bI$), eigenvectors belong to the orthogonal matrix group $\GO{n}$. The latter contains two connected components: one comprising of all of the matrices with determinant of $-1$ and another comprising of all of the matrices with determinant of $+1$ (rotation matrices), also known as special orthogonal group $\SO{n}$. Graph eigenvectors are phase-independent, meaning that $-\bU$ corresponds the same eigenbasis as $\bU$ and there always exists a $\SO{n}$ matrix that corresponds to a given graph's eigenbasis. We can thus generate all possible graph eigenspace while being restricted to $\SO{n}$, as we do in the following.
33
+
34
+ The Lie algebra of $\SO{n}$ is formed by skew-symmetric matrices ($\bS^T = -\bS$). The matrix exponential $\bU=\exp(\bS)$ can then be used as a surjective map from skew-symmetric matrices onto $\SO{n}$ [@shepard2015representation]. A simple counting argument reveals that orthogonal matrices can be defined using ${n(n-1)}/{2}$ parameters.
35
+
36
+ *The Stiefel manifold $\V{k}{n}$.* In this work, we mostly focus on the first $k$ eigenvectors $\bU_{k} = [\bu_1, \cdots, \bu_k] \in \bbR^{n \times k}$, which form a Stiefel manifold $V_k(n)$ and abide to: $\bU_{k}^\top \bU_{k} = \bI_k,$ where $\bI_k$ is the $k \times k$ identity matrix. Note that $\bU_{k} \bU_{k}^\top = \bI_n$ iff $n=k$. One point in the Stiefel manifold can be transformed into any other with a rotation, i.e. , using multiplication with orthogonal matrices $\bR_L \in \SO{n}, \bR_R \in \SO{k}$: $$\begin{equation}
37
+ \label{eq:linear-transoform-rotation}
38
+ \bU \in \V{k}{n} \Rightarrow \bR_L \bU \bR_R \in \V{k}{n}.
39
+ \end{equation}$$ We use this property to build the layers of the eigenvector generator (see Sec. [4.2](#subsec:architecture_eigenvector){reference-type="ref" reference="subsec:architecture_eigenvector"}). To generate a random initial point on the Stiefel manifold, one can create a random skew-symmetric matrix with $nk - {k(k+1)}/{2}$ non-zero parameters, compute its exponential, and select the first $k$ columns.
40
+
41
+ <figure id="fig:architecture" data-latex-placement="ht">
42
+ <p><embed src="figures/architecture-general.pdf" style="width:50.0%" /> <embed src="figures/eigenvector-generator.pdf" style="width:49.0%" /></p>
43
+ <figcaption><strong>Left:</strong> General architecture. Generation is performed sequentially with 3 GANs generating the eigenvalues (purple), the eigenvectors (blue) and finally the graph (green). Each generation step is conditioned on the previous generated variable. The input latent variable <span class="math inline">$\bw$</span> of each sub-generator is obtained using an MLP. <strong>Right:</strong> Eigenvector generator. The initial eigenvectors <span class="math inline">$\bU^{(0)}$</span> are selected from a learned Bank of Stiefel Manifolds. They are then transformed with <span class="math inline">3</span> rotation layers (orange box) that each perform one left and one right rotation. </figcaption>
44
+ </figure>
45
+
46
+ aims to overcome a key difficulty that one-shot graph generators face: controlling the global graph structure by manipulating local interactions. To this end, generates graphs by first modeling the distribution of the top-$k$ eigenvalues and eigenvectors $\blambda_k \in \bbLambda_k$ and $\bU_k \in \V{k}{n}$, where $\bbLambda_k$ is the set of $k$ strictly positive non-decreasing eigenvalues of the graph Laplacian $$\bbLambda_k := \{\blambda_k \in (0,2]^k \ \text{ s.t. } \ \lambda_1 \leq \cdots \leq \lambda_k\},$$ whereas $\V{k}{n}$ is the Stiefel manifold.
47
+
48
+ The graph is then generated conditioned on $(\bU_k, \blambda_k)$. As explained in the background section, the dominant Laplacian spectra succinctly summarize the global structural properties of a graph and provide a rough embedding for the nodes. The latter can be used to bootstrap the graph generator module, simplifying its job.
49
+
50
+ We cover the architecture of SPECTRE (Figure [2](#fig:architecture){reference-type="ref" reference="fig:architecture"}, left) by first presenting the conditional graph generator and then showing how the top-$k$ eigenvalue and eigenvector generation works. Each latent variable $\bz_\lambda \in \bbR^{1\times k}$, $\bz_U \in \bbR^{n\times k}$, $\bz_A\in \bbR^{n\times k}$ is obtained by sampling a uniform point $\bz_U$ from a hypersphere. Then, they are transformed using four-layer Multi-Layer Perceptrons ($\text{MLP}_{\bw_\lambda}$, $\text{MLP}_{\bw_U}$, $\text{MLP}_{\bw_A}$) to obtain $\bw_\lambda\in \bbR^{1\times k}$, $\bw_U\in \bbR^{n\times k}$, $\bw_A\in \bbR^{n\times k}$. Further implementation details of the architecture in can be found in Appendix [7](#appx:architecture){reference-type="ref" reference="appx:architecture"}.
51
+
52
+ The graph generator $g_L$ aims to construct graphs with a given dominant spectra $(\blambda_k, \bU_k)$: $$\bA = g_A(\blambda_k, \bU_k, \bw_A).$$ In a first step, we build $\bL^{(0)}\in \bbR^{n \times n}$, a rough approximation of the Laplacian matrix $$\bL^{(0)} = \bU_k \, \diag(\blambda_k) \, \bU_k^\top \;.$$ Though $\bL^{(0)}$ does not look like a graph at this stage as it lacks the local connectivity information, it internally encodes the global graph structure. This initial approximation is then progressively refined by a Provably Powerful Graph Network (PPGN) [@maron2019provably]: $$\bL^{(l)} = \text{PPGN}_l(\bL^{(l-1)}) \quad \text{for layer} \quad l = 1, \cdots, L-1 .$$ To avoid a complicated manual conversion from Laplacian to an adjacency the final layer instead produces it directly: $$\bA =\sigma(\text{PPGN}_L(\bL^{(L-1)})),$$ where $\sigma$ is the sigmoid activation function. PPGN's expressive power matches that of a 3-WL test. The network is thus better suited to modeling and distinguishing graphs than typical Graph Neural Networks (GNNs) that typically are as discriminative as a 2-WL test [@xu2018how; @morris2019weisfeiler]. More recent GNNs are either less expressive than PPGN [@zhang2021nested; @sandfelder2021ego; @papp2021dropgnn; @bevilacqua2021equivariant], or are computationally slower in the non-asymptotic regime [@vignac2020building; @NEURIPS2020_f81dee42], or hinge on the computation of pre-defined features [@bouritsas2020improving]. We stress that the list of mentioned references provides an incomplete sample of all known GNN architectures whose expressive power surpasses the 2-WL test.
53
+
54
+ We should note that the set $\bb{S}_k(n) \subset \bbLambda_k \times \V{k}{n}$ of *valid graph Laplacian spectra*, i.e., those $(\blambda_k,\bU_k)$ that lead to a valid graph $G$, can be much smaller than those that are modeled above. Unfortunately, the problem of determining whether $(\blambda_k,\bU_k) \in \bb{S}_k(n)$ appears to be computationally hard when $k \ll n$, meaning that we cannot expect that the spectrum generator always returns a valid sample. We thus do not enforce exact conditioning on $(\blambda_k,\bU_k)$ but instead motivate the graph generator to generate graphs whose first $k$ eigenvalues and eigenvectors are close to those sampled during the first stages.
55
+
56
+ The graph discriminator takes the adjacency matrix and corresponding spectral features as input: $$e_{\bA} = d_A(\bA, \blambda_k, \bU_k, n) .$$ Due to its conditioning, the discriminator also helps to ensure that the generated graph and eigenvectors are consistent. To encourage the discriminator to focus on the relation between the graph and the eigenvectors, we sometimes pass true (perturbed) eigenvalues and eigenvectors to the generator. The discriminator architecture is analogous to the generator, with an additional global pooling layer before the output. Note that having spectral features strengthens the discriminator. Consider, for instance, the task of determining whether a graph is connected---a task that a good discriminator needs to solve. Whereas in normal architectures the GNN depth needs to exceed the graph diameter to determine connectivity, when spectral features are available, connectivity can be checked locally. To guarantee global consistency, we then just need to ensure that the spectral features we condition on are well posed.
57
+
58
+ The eigenvector generator (Figure [2](#fig:architecture){reference-type="ref" reference="fig:architecture"} right), aims to construct eigenvectors $\bU_k$ matching given eigenvalues $\blambda_k$: $$\bU_k = g_U(\blambda_k, \bw_U),$$ Akin to the graph generator, $g_U$ operates by iteratively refining a starting eigenvector matrix. First, a starting Stiefel matrix $\bU_{k}^{(0)}$ is selected from a bank of learned matrices $\{\bB_0,...,\bB_m \}$ with $\bB_i \in \V{k}{n}$ for all $i = 1, \cdots, m$. Using a bank helps to make the generation easier, as orthogonal matrices which correspond to valid graph Laplacian eigenvectors form a potentially small subset of all orthogonal matrices. The selection is done by generating a query matrix $\bQ = \text{MLP}(\blambda_k)$ of size ${n \times k}$ that is compared to matrices in the bank using the canonical Stiefel manifold metric [@edelman1998geometry]: $$m(\bQ, \bB_i) := \text{tr}\big(\bQ^T\big(\bI-\frac{1}{2}\bB_{i}\bB_{i}^\top\big)\bB_{i} \big),$$ which we normalize such that the distance from $\bB_{i}$ to itself is equal to one. The starting matrix $\bU_{k}^{(0)}$ is then sampled using Gumbel-softmax [@jang2016categorical; @maddison2016concrete].
59
+
60
+ The generator proceeds to refine $\bU_{k}^{(0)}$ by repeatedly multiplying it with left and right orthogonal matrices: $$\bU_{k}^{(\ell)} = \bR_{L}^{(\ell)} \, \bU_{k}^{(\ell-1)} \bR_{R}^{(\ell)} \quad \text{for layer} \quad \ell = 1, \cdots, L.$$ As described in [\[eq:linear-transoform-rotation\]](#eq:linear-transoform-rotation){reference-type="eqref" reference="eq:linear-transoform-rotation"} this transformation ensures that the matrix stays on the Stiefel manifold. The left refinement matrix is constructed by processing inputs with a PointNetST layer [@segol2019universal] and projecting the result onto $\SO{n}$ by constructing a skew-symmetric matrix and finally using the matrix exponential: $$\bR_L^{(\ell)} = \text{proj} \big( \text{outer} \big(\text{PointNetST}(\bU_{k}^{(\ell-1)}, \blambda_k, \bz_U)
61
+ \big)\big),$$ where we define $\text{proj}(\bX) := \exp( \text{tril}(\bX) - \text{tril}(\bX)^\top)$ and $\text{outer}(\bX) := \bX \bX^\top$. The right rotation matrix is constructed similarly by using a second PointNetST layer (no parameter-sharing) and mean pooling over the set of nodes: $$\bR_{R}^{(\ell)} = \text{proj} \big(
62
+ \text{MLP}_{\bR_R}(\frac{1}{n}\sum_{i=1}^n \text{PointNetST}(\bU_{k}^{(\ell-1)}, \blambda_k, \bz_U))
63
+ \big).$$ The eigenvector discriminator takes the eigenvectors and the corresponding eigenvalues as input: $$e_{\bU_k} = d_U(\bU_k, \blambda_k, n) .$$ Since spectral node embeddings induce a clustering, for the discriminator we use an architecture based on PointNet [@qi2017pointnet], which achieves good results on point cloud segmentation and classification. This architecture comprises of a right rotation, a point-wise transformation with an $\text{MLP}$, another right rotation, and a PointNetST layer followed by mean pooling and an $\text{MLP}$. The right rotations are constructed in the same way as done in the generator.
64
+
65
+ We use PointNetST layers as they are efficient and can approximate any equivariant set function [@segol2019universal]. Model weights are initialized such, that all the rotation matrices are close to identity.
66
+
67
+ <figure id="fig:graph_samples" data-latex-placement="t!">
68
+ <embed src="figures/sample_graphs/main_samples.pdf" style="width:99.0%" />
69
+ <figcaption>A set of sample graphs produced by the models. Each row is conditioned on the same number of nodes.</figcaption>
70
+ </figure>
71
+
72
+ The final piece of the puzzle entails generating $$\blambda_k = g_{\lambda}( \bw_{\lambda}).$$ As eigenvalues are just an increasing sequence, the eigenvalue generator is a simple 4-layer 1D CNN with up-sampling [@donahue2018adversarial].
73
+
74
+ Likewise, the discriminator: $$e_{\blambda_k} = d_{\lambda}(\blambda_k, n) ,$$ is a strided 4-layer 1D CNN with a linear final read-out layer. Both networks employ gated activation units $z = \tanh(W_{1}X) \cdot \sigma(W_{2}X)$ as used in WaveNet [@oord2016wavenet] and PixelCNN [@oord2016conditional].
75
+
76
+ To train our model we use the WGAN-LP loss ($\lambda_{\text{LP}} = 5$) [@petzka2018regularization], ADAM optimizer ($\beta_1=0.5$, $\beta_2=0.9$) [@kingma2014adam], and learning rate of $1e-4$ for both the generator and the discriminator.
77
+
78
+ During training we utilize a form of teacher forcing, where initially each model is trained separately for 26k training steps, using actual spectral features from the training set for conditioning. We then gradually anneal the mixing temperature $\tau$, which defines how many real inputs each model receives, over the next 26k steps from $1.0$ to $0.8$ using a cosine schedule. All of our models are trained for 150k steps in total.
2204.10211/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-14T11:26:18.733Z" agent="5.0 (X11)" version="15.7.3" etag="bl37-1Wk_T_n7g1K6Hzf" type="google"><diagram id="NxV5hAxd4UNKsg048TIy">7VnJbtswEP0aXQPtyzG2kzSHAEXToseCkBiJKCUaFL316zs0KVtbHLV2ZGfxwSAfRxzyvRl6RBvONF/fcTTPHliCqWGbydpwZoZtR44L3xLYKMDzLAWknCQKqgGP5A/WoKnRBUlw2TAUjFFB5k0wZkWBY9HAEOds1TR7YrTpdY5S3AEeY0S76E+SiEyhoR3s8S+YpFnl2fIjNZKjylhPUWYoYSsFbTfn3BjOlDMmVCtfTzGV3FW8KAZunxndLYzjQgx5wFYPLBFd6L3pdYlNtVnOFkWCpb1pOJNVRgR+nKNYjq5AXcAykVPoWdB8IpROGWUc+gUrwGiiPWAu8PrZVVq7vUPMYJZjwTdgoh/wXS28jhfX0f1VjX0NZTXiPY0hrXe6m3lPCTQ0K/0MOW+CodA8H0Pum2DIDc/HkPcyQ/GCL7cESQZwkVzLY2pPQI2eJpdgekuk421PTYuTzvH1Iks1FrwDLHBMkSDL5vR91GgPXxkBxzURoquo9gmbkgTmlVn7tKYv2YLHWM9YP9NaTnz3CCcC8RSLjhOQA21qZnNpUD6/Uc9vRVtkHl6zc9AeGmoF+3DbaTcoAv1OBN4//OgEIWSXaAZbKTj7jVvZ2JOgiJK0gG4McYYBn8hcJfCLea0HcpIk0k1v8jdD+gTJ7rWPQ7eb7H5PmDsnSPagQ/UMz8EplCMof7+Uh+0I7qG872Q5BeVhz/nqU8lrQpbQTGXz292kQmG+2kCP7Vapftv3qp8fnE+/aIh+hiw0XMO7MYLJvRHMvv+CxhQaeuCD6eW19fLHO+Kq97Dhgk0rwWYfVrCgfUCOKZjVI9iHrECPKg6HVqChef4KNLD/rQINotetQK0BVw21qIspKksSHwo8dQ5UFzD2UcG3bm760EvioeC8sDchzzp9HP6P9APuUD6lP630Lzhxx5J+wOXQ2aX3vbGkH+XXxw8uI+sH3Hp9Sj/q1ddoWd+9bro86UNrLOmPKgcH15zOZWR99/rrU/rzvm68UtZDd/8/pTLf/9nr3PwF</diagram></mxfile>